diff --git "a/2336.jsonl" "b/2336.jsonl" new file mode 100644--- /dev/null +++ "b/2336.jsonl" @@ -0,0 +1,704 @@ +{"seq_id":"51683508","text":"# -*- coding: utf-8 -*-\n\nfrom math import sqrt\n\nimport numpy as np\n\nfrom pyfr.bases.base import BaseBasis\nfrom pyfr.polys import get_polybasis\nfrom pyfr.quadrules import get_quadrule\nfrom pyfr.util import lazyprop\n\n\nclass PriBasis(BaseBasis):\n name = 'pri'\n ndims = 3\n\n # nspts = n^2*(n + 1)/2\n nspts_coeffs = [1, 1, 0, 0]\n nspts_cdenom = 2\n\n @classmethod\n def std_ele(cls, sptord):\n pts1d = get_quadrule('line', 'equi-spaced', sptord + 1).points\n sele = [(p, q, r)\n for r in pts1d\n for i, q in enumerate(pts1d)\n for p in pts1d[:(sptord + 1 - i)]]\n\n return np.array(sele)\n\n @property\n def nupts(self):\n return (self._order + 1)**2*(self._order + 2) // 2\n\n @lazyprop\n def fpts(self):\n n = self._order + 1\n\n # Tri face points\n tname = self._cfg.get('solver-interfaces-tri', 'flux-pts')\n ts, tt = get_quadrule('tri', tname, n*(n + 1) // 2).np_points.T\n\n # Quad face points\n qname = self._cfg.get('solver-interfaces-quad', 'flux-pts')\n qs, qt = get_quadrule('quad', qname, n**2).np_points.T\n\n # Project\n proj = [(ts, tt, -1), (ts, tt, 1), (qs, -1, qt), (-qs, qs, qt),\n (-1, qs, qt)]\n\n return np.vstack(list(np.broadcast(*p)) for p in proj)\n\n @property\n def facenorms(self):\n c = 1 / sqrt(2)\n return [(0, 0, -1), (0, 0, 1), (0, -1, 0), (c, c, 0), (-1, 0, 0)]\n\n @property\n def facefpts(self):\n n = self._order + 1\n\n tpts = np.arange(n*(n + 1)).reshape(2, -1)\n qpts = np.arange(3*n**2).reshape(3, -1) + n*(n + 1)\n\n return tpts.tolist() + qpts.tolist()\n\n @lazyprop\n def fbasis_coeffs(self):\n n = self._order + 1\n\n tfproj = lambda s, t: [(s, t, -1), (s, t, 1)]\n qfproj = lambda s, t: [(s, -1, t), (-s, s, t), (-1, s, t)]\n\n tS = self._fbasis_coeffs_for('tri', tfproj, [1, 1], n*(n + 1) // 2)\n qS = self._fbasis_coeffs_for('quad', qfproj, [1, sqrt(2), 1], n**2)\n\n return np.vstack([tS, qS])\n","sub_path":"pyfr/bases/mixed.py","file_name":"mixed.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"543045820","text":"#!/usr/bin/env python\n##############\n# (c) CarlosM carlos@xt6labs.io\n# \n# 2019-04-09\n##############\n\nimport pytricia\nimport sqlite3\nimport logging\nimport fire\nimport collections\n\ndbfile = \"data/netdata-20190325.db\"\ncc = 'BR'\ntype = 'ipv6'\n\nstats = collections.Counter()\n\nclass netdatadb:\n\tdef __init__(self, wdb):\n\t\ttry:\n\t\t\tself.con = sqlite3.connect(wdb)\n\t\t\tself.con.row_factory = sqlite3.Row\n\t\texcept:\n\t\t\tself.conf = False\n\t\t\traise\n\t# end init\n\n\tdef runsql(self, wsql):\n\t\tself.cur = self.con.cursor()\n\t\tself.cur.execute(wsql)\n\t\treturn self.cur\n\t# end runsql\n# end netdatadb\n\ndef main(cc, type='ipv6', dbfile='data/netdata-20190325.db', verbose='0'):\n\tndb = netdatadb(dbfile) \n\tpyt = pytricia.PyTricia(48)\n\n\tif verbose == 1:\n\t\tlogging.basicConfig(filename='rt.log', filemode='w', level=logging.DEBUG)\n\telif verbose == 0: \n\t\tlogging.basicConfig(level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(level=logging.INFO)\n\n\tlogging.info(\"Loading {} resources into PyTricia\".format(cc))\n\tlogging.info(\"Looking for {} routes into PyTricia\".format(type))\n\tsql_cc_pfx = \"SELECT * FROM numres WHERE cc='{}' AND type='{}' \". \\\n\t\t\t\t\tformat(cc, type)\n\tfor x in ndb.runsql(sql_cc_pfx):\n\t\tpfx = str(x['prefix'])\n\t\tlogging.debug(pfx)\n\t\tpyt[pfx] = cc\n\n\t# go over routing table and get VE routes\n\tstats = collections.Counter({'total': 0, 'ccroutes': 0})\n\tsql_routes = \"SELECT * FROM riswhois WHERE type='{}' \".format(type)\n\tfor x in ndb.runsql(sql_routes):\n\t\trpfx = str(x['prefix'])\n\t\trcc = pyt.get(rpfx, None)\n\t\tstats['total'] += 1\n\t\tif rcc == cc:\n\t\t\tcovering_pfx = pyt.get_key(rpfx)\n\t\t\tlogging.info(\"Found {} route: {}, under allocation {}\".format(cc, rpfx, covering_pfx))\n\t\t\tstats['ccroutes'] += 1\n\t\t# end if\n\t# end for\n\n\tlogging.info(\"Found {} {} routes for type {}\". \\\n\t\tformat(stats['ccroutes'], cc, type))\n\tlogging.info(\"Total routes: {}\".format(stats['total']))\n## end main\n\nif __name__ == \"__main__\":\n\tfire.Fire(main)\n\t\n# END","sub_path":"apps/30-routingtable_cc/get_rt_cc.py","file_name":"get_rt_cc.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615675648","text":"\"\"\"\nEventually this will be an example showing how we support z-ordering.\n\"\"\"\nimport arcade\n\nSCALE = 0.75\n\nSCREEN_HEIGHT = 320\nSCREEN_WIDTH = 512\n\n\nclass MyApplication(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height):\n \"\"\" Set up the game and initialize the variables. \"\"\"\n\n super().__init__(width, height)\n\n # Sprite lists\n self.all_sprites_list = arcade.SpriteList()\n\n # Set up the player\n self.score = 0\n self.player_sprite = arcade.Sprite(\"images/playerShip1_orange.png\", SCALE)\n self.player_sprite.center_x = 200\n self.player_sprite.center_y = 200\n self.all_sprites_list.append(self.player_sprite)\n\n # Make the asteroids\n enemy_sprite = arcade.Sprite(\"images/meteorGrey_big1.png\", SCALE)\n enemy_sprite.center_y = 200\n enemy_sprite.center_x = 150\n enemy_sprite.size = 4\n self.all_sprites_list.append(enemy_sprite)\n\n enemy_sprite = arcade.Sprite(\"images/meteorGrey_big2.png\", SCALE)\n enemy_sprite.center_y = 200\n enemy_sprite.center_x = 250\n enemy_sprite.size = 4\n self.all_sprites_list.append(enemy_sprite)\n\n enemy_sprite = arcade.Sprite(\"images/meteorGrey_big3.png\", SCALE)\n enemy_sprite.center_y = 150\n enemy_sprite.center_x = 200\n enemy_sprite.size = 4\n self.all_sprites_list.append(enemy_sprite)\n\n self.background = arcade.load_texture(\"stars.jpg\")\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n\n # Draw the background\n arcade.draw_xywh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background)\n\n # Draw all the sprites.\n self.all_sprites_list.draw()\n\n\n\nwindow = MyApplication(SCREEN_WIDTH, SCREEN_HEIGHT)\narcade.run()\n","sub_path":"examples/zorder.py","file_name":"zorder.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"653907683","text":"import sys\nsys.path.insert(0, '.')\n\nimport pandas as pd\nimport Functions as fxns\nimport numpy as np\nfrom joblib import dump\n\n# load original CSV files\nbeneficiary = pd.read_csv('./data/Train_Beneficiarydata-1542865627584.csv')\ninpatient = pd.read_csv('./data/Train_Inpatientdata-1542865627584.csv')\noutpatient = pd.read_csv('./data/Train_Outpatientdata-1542865627584.csv')\ntarget = pd.read_csv('./data/Train-1542865627584.csv')\n\n# change numeric encoding from 1/2 to 0/1\nfxns.re_encode(beneficiary)\n\n# numerically encode RenalDiseaseIndicator\nbeneficiary.loc[beneficiary.RenalDiseaseIndicator == '0', 'RenalDiseaseIndicator'] = 0\nbeneficiary.loc[beneficiary.RenalDiseaseIndicator == 'Y', 'RenalDiseaseIndicator'] = 1\n\n# convert procedure code cols to str\nfor df in [inpatient, outpatient]:\n procedure_cols = df.columns[df.columns\n .str.contains('Procedure')].to_list()\n df[procedure_cols] = \\\n df[procedure_cols].apply(lambda x: x.astype('str'))\n \n for col in procedure_cols:\n df.loc[df[col] == 'nan', [col]] = np.nan\n\n# # encode patient type in prep for merge\ninpatient['IsOutpatient'] = 0\noutpatient['IsOutpatient'] = 1\n\n# numerically encode PotentialFraud\ntarget.loc[target.PotentialFraud == 'No', 'PotentialFraud'] = 0\ntarget.loc[target.PotentialFraud == 'Yes', 'PotentialFraud'] = 1\n\n# merge dfs\nclaims = pd.concat([inpatient, outpatient])\nclaims = pd.merge(claims, beneficiary, on='BeneID')\nclaims = pd.merge(claims, target, on='Provider')\n\n# parse dates\nfxns.date_parser(claims,\n ['ClaimStartDt', 'ClaimEndDt', 'AdmissionDt', 'DischargeDt', 'DOB', 'DOD'])\n\n# replace ChronicCond_ prefix from applicable cols with _Chronic suffix\nfxns.drop_chronic_prefix(claims)\n\n# add date cols containing only day, week, year for each column\n# fxns.split_date(claims, ['ClaimStartDt', 'ClaimEndDt',\n# 'AdmissionDt', 'DischargeDt'])\n\n# change applicable cols to dtype category\nfxns.to_category_dtype(claims)\n\nclaims.RenalDisease = claims.RenalDisease.astype(int)\nclaims.PotentialFraud = claims.PotentialFraud.astype(int)\n\n# pickle pre-processed file\ndump(claims, '../claims.pkl')\n\n\n\n\n\n\n\n\n\n","sub_path":"Sita/Sita_Preprocessing.py","file_name":"Sita_Preprocessing.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"40926653","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('catalogue', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='product',\n name='technical_manual',\n ),\n migrations.AddField(\n model_name='productphoto',\n name='alt_text',\n field=models.CharField(max_length=255, null=True, blank=True),\n ),\n ]\n","sub_path":"catalogue/migrations/0002_auto_20150715_1621.py","file_name":"0002_auto_20150715_1621.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"553441222","text":"import sys\ninput=sys.stdin.readline\n\nclass Electric:\n def read(self):\n self.n=int(input())\n self.info=[]\n for _ in range(self.n):\n new=list(map(int,input().split()))\n self.info.append(new)\n \n def solve(self):\n self.info=sorted(self.info,key=lambda x:x[0])\n self.sum=[]\n self.cur=[]\n for i in range(self.n):\n if i==0:\n self.sum.append(1)\n self.cur.append(self.info[0][1])\n else:\n S=0\n for j in range(len(self.sum)):\n if self.cur[j]S:\n S=self.sum[j]\n self.sum.append(S+1)\n self.cur.append(self.info[i][1])\n \n \n print(self.n-max(self.sum))\n \nelec=Electric()\nelec.read()\nelec.solve()","sub_path":"jiyoon🐫/동적 계획법 1/백준_2565.py","file_name":"백준_2565.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444185607","text":"from numpy import array\nfrom matplotlib import pyplot as plt\n\nf = array([-20,120]) #grader fahrenheit\nc = (f-32)*5/9 #grader celsius\nca = (f-30)/2 #grader celsius (estimat)\n\nplt.xlabel(\"Fahrenheit\")\nplt.ylabel(\"Celsius\")\nplt.plot(f,c)\nplt.plot(f,ca)\nplt.show()","sub_path":"IN1900/6/f2c_shortcut_plot.py","file_name":"f2c_shortcut_plot.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444791060","text":"import pygame\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nimport random\n\nvertices = [\n (1, -1, -1),\n (1, 1, -1),\n (-1, 1, -1),\n (-1, -1, -1),\n (1, -1, 1),\n (1, 1, 1),\n (-1, 1, 1),\n (-1, -1, 1)\n]\n\nedges = [\n (0, 1),\n (1, 2),\n (2, 3),\n (3, 0),\n (0, 4),\n (1, 5),\n (2, 6),\n (3, 7),\n (4, 5),\n (5, 6),\n (6, 7),\n (7, 4)\n]\n\n#ORDERING MATTERS FOR THESE A LOT\nfaces = [\n (0, 1, 2, 3),\n (0, 4, 5, 1),\n (2, 1, 5, 6),\n (3, 7, 6, 2),\n (3, 0, 4, 7),\n (7, 6, 5, 4)\n]\n\ncolors = [\n (1, 0, 0),\n (0, 1, 0),\n (0, 0, 1),\n (0, 0, 0),\n (1, 1, 1),\n (0, 1, 1),\n (1, 0, 0),\n (0, 1, 0),\n (0, 0, 1),\n (0, 0, 0),\n (1, 1, 1),\n (0, 1, 1),\n]\n\ndef Cube():\n\n glBegin(GL_QUADS)\n for face in faces:\n x = 0\n glColor3fv(colors[x])\n for vertex in face:\n glColor3fv(colors[x])\n x += 1\n glVertex3fv(vertices[vertex])\n glEnd()\n\n glBegin(GL_LINES)\n glColor3fv(colors[4])\n for edge in edges:\n for vertex in edge:\n glVertex3fv(vertices[vertex])\n glEnd()\n\ndef Main():\n pygame.init()\n display = (800, 600)\n pygame.display.set_mode(display, pygame.DOUBLEBUF | pygame.OPENGL)\n\n gluPerspective(45, display[0] / display[1], 0.1, 50)\n\n glTranslatef(random.randrange(-5, 5), random.randrange(-5, 5), -40)\n # glRotatef(25, 2, 1, 0)\n\n object_passed = False\n\n # glEnable(GL_DEPTH_TEST)\n while not object_passed:\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key == pygame.K_q):\n pygame.quit()\n quit()\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n glTranslatef(0.5, 0, 0)\n if event.key == pygame.K_RIGHT:\n glTranslatef(-0.5, 0, 0)\n if event.key == pygame.K_DOWN:\n glTranslatef(0, 0.5, 0)\n if event.key == pygame.K_UP:\n glTranslatef(0, -0.5, 0)\n\n # if event.type == pygame.MOUSEBUTTONDOWN:\n # if event.button == 4:\n # glTranslatef(0, 0, 1)\n # if event.button == 5:\n # glTranslatef(0, 0, -1)\n\n #glRotatef(1, 3, 1, 1)\n\n x = glGetDoublev(GL_MODELVIEW_MATRIX)\n\n camera_x = x[3][0]\n camera_y = x[3][1]\n camera_z = x[3][2]\n print(camera_z)\n\n if camera_z < -1:\n object_passed = True\n\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n\n glTranslatef(0, 0, 0.5)\n Cube()\n pygame.display.flip()\n pygame.time.wait(10)\n\nfor x in range(10):\n Main()\npygame.quit()\nquit()","sub_path":"OpenGLCubeLesson4.py","file_name":"OpenGLCubeLesson4.py","file_ext":"py","file_size_in_byte":2790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"252939132","text":"##Muhammad Usman Ahmed\n##CMPT141 Arlin\n##11275853\n##mua942\n\nimport math\nimport random\nimport matplotlib.pyplot as mp\n\ndef fileOpen(fileName):\n '''\n A function that opens a area file and assigns each weed to a dicionary element\n :param: name of text file\n :Return: A dictionary of the weeds WITHOUT any groupings\n '''\n database = {}\n raw = open(fileName,\"r\")\n lineData = raw.readlines()\n counter = 1\n ##clean up and seperate the input data\n for x in lineData:\n line = x.rstrip()\n line = line.split(\",\")\n ##at this point line is a list of the Vit C and GLA\n \n ##code for adding VIT C and GLA into a dict\n name = \"Weed \"+ str(counter)\n database[counter] = {\"name\":name,\"Species\":\"\",\"values\":line}\n counter +=1\n if(len(database) <= 0):\n return \"error\"\n return database\n \ndef distance(pair1,pair2):\n '''\n Calculates the eucldian distance. takes two lists and splits them into xy cords\n :param: 2 list contain xy pairs\n :return: Returns a distance value\n '''\n \n if min(pair1) < 0 or min(pair2) < 0:\n return \"error\"\n \n theDistance = 0\n ##pair1 is sample\n \n u = pair1[0]\n v = pair1[1]\n \n ##pair2 is rep\n x = pair2[0]\n y = pair2[1]\n \n \n sum1 = (u-x) ** 2\n sum2 = (v-y) ** 2\n \n theDistance = math.sqrt( (sum1 + sum2) )\n return theDistance\n\n\n\ndef getValues(database,point):\n \n ''' A function to search a database of weeds and pull x,y values of that weed at a given point\n\n :param: a database of weeds\n :return: a list containg x,y point\n \n\n '''\n weed = database[point]\n values = weed[\"values\"]\n vitC = int(values[0])\n GLA = int(values[1])\n \n valuesSet = []\n valuesSet.append(vitC)\n valuesSet.append(GLA)\n \n \n return valuesSet\n\n \ndef getAverage(groupList,valuesList,s):\n '''\n A function to avg of a groupings of weed GLA and VitC values \n \n param: groupList: a list containg a list of intergers where each int represents a weed's group\n param: valuesList: a list of list containing vitc and GLA values of each weed\n return:a list of lists containg each new shadow rep \n '''\n returnList = []\n for t in range(s):\n counter = 0\n avgx = 0\n avgy = 0\n returnListIndividual = []\n for y in range( len(groupList) ):\n averageOfx = 0\n averageOfy = 0\n \n if groupList[y] == t+1:\n counter +=1\n x = valuesList[y][0]\n y = valuesList[y][1]\n avgx += x\n avgy += y\n averageOfx = avgx/counter\n averageOfy = avgy/counter\n \n returnListIndividual.append(averageOfx)\n returnListIndividual.append(averageOfy)\n \n returnList.append(returnListIndividual)\n \n return returnList\n \n \n \ndef grouper(weedValues,repValues):\n '''goes through and groups the weeds based on their distance to the reps.\n \n param: weedValues: a list of list containing vitc and GLA values of each weed\n \n param: repValues: where are the reps located x,y\n return: a list if integers where each int is a number corpsonding to a group\n \n '''\n rawList = []\n for x in weedValues:\n distanceList = []\n for y in repValues:\n \n distanceList.append( distance(x,y) )\n rawList.append(distanceList)\n groupList = []\n \n \n for t in rawList:\n indice = t.index( min(t) )\n groupList.append(indice+1)\n \n return groupList\n\n\n\n \n\n\ndef groupingMainFunction(database,s):\n '''\n A function that takes a databse of weeds and clusters them according to a set desired groups. Makes use of a lot of other functions to\n make cleaner code\n :param: database: contain agricultural data without a grouping\n :param: s: number of groups we need to create\n :return: a list containg the groups\n '''\n\n listOfReps = []\n ##randomly pick\n \n length = len(database)\n \n ##use this to get two distinct random ints to refer to points in a dictionary\n for t in range(s):\n x = random.randint(1,length)\n if(x in listOfReps): ## making sure we do not the same reps for each one\n while x in listOfReps:\n x = random.randint(1,length)\n listOfReps.append(x)\n \n \n ##get x,y for the reps using function getValues() using the distinct ints from previous loops\n valuesOfRepresentives = []\n for x in listOfReps:\n valuesOfRepresentives.append( getValues(database,x) )\n \n ##at this point you have a list containing some lists which have the x,y for randomized points\n \n ##put weeds c,gla values in a list from their database version for ease of use\n theWeedList = []\n \n for x in database:\n ##running through each weed\n theWeed = getValues(database,x) ##get that weeds value\n theWeedList.append(theWeed) ## add list to the bigger\n \n ##this returns the a list of the groupings based on the randoms values used\n oldGroupings = grouper(theWeedList,valuesOfRepresentives)\n \n ##this is step 2 using that random groupings to get a shadow rep\n oldReps = getAverage(oldGroupings,theWeedList,s)\n \n \n ##set up vars for while loop\n firstTime = True\n switch = True\n \n newReps = getAverage(oldGroupings,theWeedList,s)\n \n newGroupings = grouper(theWeedList,valuesOfRepresentives)\n \n while switch:\n ## check if old run is equal to new run\n if( (oldReps == newReps) and firstTime == False):\n switch = False\n ##if true break from function and return\n return newGroupings, newReps\n \n else:\n firstTime = False\n ##assign the new values as old\n oldReps = []\n ##list are mutable lol\n for x in newReps:\n oldReps.append(x.copy())\n \n oldGroupings = newGroupings\n \n \n ##now run the program again and again until we get the same reps and groupings\n newReps = getAverage(newGroupings,theWeedList,s)\n\n newGroupings = grouper(theWeedList, newReps)\n\n \n \n \n \ndef databaseUpdater(groupList, database):\n '''\n A function to go and update the orginal text-based databse\n param groupList: a list of intergers with each int representing a group\n param database: a database of weeds without type being assingned\n return: a new database containg assingned type\n '''\n for z in range( len(groupList) ):\n database[z+1][\"Species\"] = groupList[z]\n \n\n return database\n \ndef plotter(database,s,reps,filename):\n '''\n uses matplotlib to make a graph of the complete database\n param database: a database containg weeds and their values including their species\n param: s: is the number of groups there are\n param: reps: a list of lists containg the location of the shadow reps\n '''\n length = len(database)\n mp.figure()\n ##these lims can change depending on data set\n mp.xlim(0,600)\n mp.ylim(0,600)\n mp.xlabel(\"Vitamen C Levels\")\n mp.ylabel(\"GLA levels\")\n mp.title(\"Filename: \"+ filename + \" total groupings: \"+str(s))\n \n ##a list of colors possible for 4 total groups\n colors = ['b','g','r','c']\n ##color+shape\n \n for x in range(s):\n ## first loop runs through for each group\n for y in range(length):\n if( database[y+1][\"Species\"] == x+1 ):\n value = getValues(database,y+1)\n xRep = reps[x][0]\n yRep = reps[x][1]\n \n \n color = colors[x]+\"o\"\n repColor = colors[x]+\"x\"\n xPoint = value[0]\n yPoint = value[1]\n \n mp.plot(xRep,yRep,repColor,linewidth=5,markersize=17)\n mp.plot(xPoint,yPoint,color,linewidth=5,markersize=5)\n \n \n mp.show()\n \n \n \n \n\n\n######## main part of the code #############################\n\ntotalGroups = 4\nfile = \"backyard.txt\"\n\n##Open File and make a database\ntestBase = fileOpen(file)\n\nprint(testBase)\n\n\n\n## will have a list returned, with [0] being the groupings and [1] being the shadow reps\nanswer = groupingMainFunction(testBase,totalGroups)\n\n##pass in the groupings list and the inital database to get final answer\nfinalDatabase = databaseUpdater(answer[0],testBase)\n\n\n##send for plotting\nplotter(finalDatabase,totalGroups,answer[1],file)\n\n","sub_path":"a9q1.py","file_name":"a9q1.py","file_ext":"py","file_size_in_byte":8736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"117230522","text":"import pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser(description=\"\"\"This script creates an new index out of the difference between two\n existing indexes. The new index contains new posts and posts with \n different number of replies.\"\"\")\n\nparser.add_argument(\"--index_old\", dest=\"index_old\", type=str, default=\"./data/forums/incels/index_old.csv\",\n help=\"Location of old index file.\")\n\nparser.add_argument(\"--index_new\", dest=\"index_new\", type=str, default=\"./data/forums/incels/index_new.csv\",\n help=\"Location of old index file.\")\n\nparser.add_argument(\"--dst\", dest=\"dst\", type=str, default=\"./data/forums/incels/index_new.csv\",\n help=\"Location to save the diff index.\")\n\nargs = parser.parse_args()\n\nindex_old = pd.read_csv(args.index_old)\n\nindex_new = pd.read_csv(args.index_new)\n\nexisting_links = list(index_old.link.values)\n\nlink_to_replies_old = {link: num_replies for link, num_replies in zip(index_old.link.values, index_old.replies.values)}\n\nflag_new = index_new.link.apply(lambda x: x not in existing_links)\n\nflag_replies = index_new.apply(lambda x: x[\"link\"] in existing_links and\n x[\"replies\"] != link_to_replies_old[x[\"link\"]], axis=1)\n\nindex_new[flag_new | flag_replies].to_csv(args.dst, index=False)\n","sub_path":"forums_tools/diff_index.py","file_name":"diff_index.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"553493335","text":"#! /usr/bin/env python3\n# RegexSearch.py\nimport re\n\nRegexPattern = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d')\nmo = RegexPattern.search('My number is 415-555-4242.')\nprint('Phone number found: ' + mo.group())\n\nRegexPattern = re.compile(r'(\\d\\d\\d)-(\\d\\d\\d-\\d\\d\\d\\d)')\nmo = RegexPattern.search('My number is 415-555-4242.')\nprint('Phone number found: ' + mo.group(2))\nprint('Phone number found: ' + mo.group(1))\nprint('Phone number found: ' + mo.group(0))\nprint('Phone number found: ' + mo.group())\nprint(mo.groups())\nareaCode, mainNumber = mo.groups()\n\nphoneNumRegex = re.compile(r'(\\(\\d\\d\\d\\)) (\\d\\d\\d-\\d\\d\\d\\d)')\nmo = phoneNumRegex.search('My phone number is (415) 555-4242.')\nprint('Indicatif: ' + mo.group(1))\n\nbatRegex = re.compile(r'Bat(man|mobile|copter|bat)')\nmo = batRegex.search('Batmobile lost a wheel batbat')\nprint(mo.group(1))\nprint(mo.group())\n\nbatRegex = re.compile(r'Bat(wo)?man')\nbatRegex = re.compile(r'Bat(wo)*man')\nbatRegex = re.compile(r'Bat(wo)+man')\nhaRegex = re.compile(r'(Ha){3}')\ngreedyHaRegex = re.compile(r'(Ha){3,5}')\nmo1 = greedyHaRegex.search('HaHaHaHaHa')\nprint(mo1.group())\nnongreedyHaRegex = re.compile(r'(Ha){3,5}?')\nmo2 = nongreedyHaRegex.search('HaHaHaHaHa')\nprint(mo2.group())\n\nphoneNumRegex = re.compile(r'\\d\\d\\d-\\d\\d\\d-\\d\\d\\d\\d') # has no groups\nmo = phoneNumRegex.findall('Cell: 415-555-9999 Work: 212-555-0000')\nprint(mo)\n\nxmasRegex = re.compile(r'\\d+\\s\\w+') # cf. p158\natRegex = re.compile(r'.at')\nnameRegex = re.compile(r'First Name: (.*) Last Name: (.*)')\nnewlineRegex = re.compile('.*', re.DOTALL)\n\nrobocop = re.compile(r'robocop', re.I) # Case-Insensitive Matching\nmo = robocop.search('RoboCop is part man, part machine, all cop.').group()\nprint(mo)\n\nnamesRegex = re.compile(r'Agent \\w+')\nmo = namesRegex.sub('CENSORED', 'Agent Alice gave the secret documents to Agent Bob.')\nprint(mo)\n\n","sub_path":"RegexSearch.py","file_name":"RegexSearch.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"287444725","text":"from rest_framework import serializers\n\nfrom extras.models import CF_TYPE_SELECT, CustomFieldChoice, Graph\n\n\nclass CustomFieldSerializer(serializers.Serializer):\n \"\"\"\n Extends a ModelSerializer to render any CustomFields and their values associated with an object.\n \"\"\"\n custom_fields = serializers.SerializerMethodField()\n\n def get_custom_fields(self, obj):\n\n # Gather all CustomFields applicable to this object\n fields = {cf.name: None for cf in self.context['view'].custom_fields}\n\n # Attach any defined CustomFieldValues to their respective CustomFields\n for cfv in obj.custom_field_values.all():\n\n # Attempt to suppress database lookups for CustomFieldChoices by using the cached choice set from the view\n # context.\n if cfv.field.type == CF_TYPE_SELECT and hasattr(self, 'custom_field_choices'):\n cfc = {\n 'id': int(cfv.serialized_value),\n 'value': self.context['view'].custom_field_choices[int(cfv.serialized_value)]\n }\n fields[cfv.field.name] = CustomFieldChoiceSerializer(instance=cfc).data\n # Fall back to hitting the database in case we're in a view that doesn't inherit CustomFieldModelAPIView.\n elif cfv.field.type == CF_TYPE_SELECT:\n fields[cfv.field.name] = CustomFieldChoiceSerializer(instance=cfv.value).data\n else:\n fields[cfv.field.name] = cfv.value\n\n return fields\n\n\nclass CustomFieldChoiceSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = CustomFieldChoice\n fields = ['id', 'value']\n\n\nclass GraphSerializer(serializers.ModelSerializer):\n embed_url = serializers.SerializerMethodField()\n embed_link = serializers.SerializerMethodField()\n\n class Meta:\n model = Graph\n fields = ['name', 'embed_url', 'embed_link']\n\n def get_embed_url(self, obj):\n return obj.embed_url(self.context['graphed_object'])\n\n def get_embed_link(self, obj):\n return obj.embed_link(self.context['graphed_object'])\n","sub_path":"netbox/extras/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592257152","text":"import rclpy\nfrom visualization_msgs.msg import Marker\nfrom visualization_msgs.msg import MarkerArray\nfrom rclpy.node import Node\n\nfrom builtin_interfaces.msg import Duration\n\nfrom scipy.spatial.transform import Rotation\nimport numpy as np\n\n\n\nclass MyMarker(Node):\n \n TOPIC = 'maker_pub'\n\n def __init__(self):\n\n super().__init__(self.TOPIC)\n\n self.get_logger().info(\"%s initializing...\" % (self.TOPIC))\n\n self.pub = self.create_publisher(MarkerArray, self.TOPIC, 10)\n\n timer_period = 1 # seconds\n self.timer = self.create_timer(timer_period, self.timer_callback)\n\n def timer_callback(self):\n marker_data = Marker()\n marker_array = MarkerArray()\n rot = Rotation.from_rotvec(np.array([0, 0, np.pi/3]))\n quo = rot.as_quat()\n\n for i in range(5):\n marker_data.header.frame_id = \"odom\"\n #marker_data.header.stamp = rclpy.time\n\n marker_data.ns = \"basic_shapes\"\n marker_data.id = i\n\n marker_data.action = Marker.ADD\n\n marker_data.pose.position.x = 0.0\n marker_data.pose.position.y = 0.0\n marker_data.pose.position.z = float(i)\n\n marker_data.pose.orientation.x = quo[0]\n marker_data.pose.orientation.y = quo[1]\n marker_data.pose.orientation.z = quo[2]\n marker_data.pose.orientation.w = quo[3]\n\n marker_data.color.r = 1.0\n marker_data.color.g = 0.0\n marker_data.color.b = 0.0\n marker_data.color.a = 1.0\n\n marker_data.scale.x = 0.5\n marker_data.scale.y = 0.05\n marker_data.scale.z = 0.05\n\n marker_data.lifetime = Duration()\n\n marker_data.type = 0\n\n marker_array.markers.append(marker_data)\n \n self.pub.publish(marker_array)\n\n #self.get_logger().info('Publishing: \"%s\"' % marker_array.markers[0].pose)\n self.get_logger().info('Debug: \"%f\"' % quo[1])\n\n\n\ndef main(args=None):\n \n rclpy.init(args=args)\n\n my_marker = MyMarker()\n\n rclpy.spin(my_marker)\n\n my_marker.destroy_node()\n rclpy.shutdown()\n\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","sub_path":"build/py_turtlebot3_gazebo/build/lib/py_turtlebot3_gazebo/marker_pub.py","file_name":"marker_pub.py","file_ext":"py","file_size_in_byte":2196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445635130","text":"####################################################\r\n# MeshWorks Loopback/Range Test Remote Node Device\r\n####################################################\r\n# we have 3 Control Points: 2 LEDs and 1 PWM buzzer \r\n \r\n# 1. green LED is on PA6 \r\ngreenLed = [\"greenLed\", \"PA6\", \"digital\", \"grLedF\", 1]\r\ngreenValues = [\"discrete\", 2, \"off\", \"on\"]\r\n \r\n# 2. red LED is on PA7 \r\nredLed = [\"redLed\", \"PA7\", \"digital\", \"rdLedF\", 1]\r\nredValues = [\"discrete\", 2, \"off\", \"on\"]\r\n \r\n# 3. buzzer on PB7 \r\nbuzzer = [\"buzzer\", \"PB7\", \"PWM\", \"buzzerF\", 1] \r\nbuzzerVal = [\"range\", 1, 12, \"tone\"] \r\n \r\n####################################################\r\n# 2 digital Data Points: button and Reed switch \r\n \r\n# 1. button on PB6 \r\nbutton = [\"button\", \"PB6\", \"digital\", \"buttonF\", 1] \r\nbValues = [\"discrete\", 2, \"up\", \"down\"] \r\n \r\nprevButtonValue = 0 \r\n \r\ndef buttonF():\r\n value = readDigital() \r\n # on different value, send report \r\n if (value != prevButtonValue): \r\n sendDataReport(value, \"button state\")\r\n prevButtonValue = value \r\n \r\n# 2. reed switch on PB4 \r\nreedSw = [\"reedSw\", \"PB4\", \"digital\", \"reedSwF\", 1] \r\nrsValues = [\"discrete\", 2, \"no contact\", \"contact\"] \r\n \r\nprevReedSwValue = 0 \r\n \r\ndef reedSwF():\r\n value = readDigital() \r\n # on different value, send report \r\n if (value != prevReedSwValue): \r\n sendDataReport(value, \"reedsw state\")\r\n prevReedSwValue = value \r\n\r\n# When GW tells node to change LED state, ack back with same state\r\ndef cpCallbackVariableUpdate(variableName, value):\r\n if (variableName == \"redLedState\"):\r\n celPy.AdjustLocalControlPoint(\"redLed\", value)\r\n celPy.setRemoteVariable(\"Gateway\", \"ackBack\", value)\r\n\r\n\r\n# Blink green LED every 2 seconds\r\ncelPy.addTickFunction(heartbeatLed, 20) \r\n \r\nledState = 0\r\n \r\ndef heartbeatLed(): \r\n if (ledState == 0): \r\n celPy.AdjustLocalControlPoint(\"greenLed\", 1)\r\n ledState = 1\r\n return\r\n if (ledState == 1): \r\n celPy.AdjustLocalControlPoint(\"greenLed\", 0)\r\n ledState = 0\r\n \r\n####################################################\r\n# device configuration\r\ncelPy.ApplicationName = \"Loopback Test\"\r\ncelPy.DeviceName = \"Node\" \r\ncelPy.IsSleepyDevice = False\r\ncelPy.DataCollectionPoints = [button, reedSw] \r\ncelPy.DataCollectionValues = [bValues, rsValues]\r\ncelPy.ControlPoints = [greenLed, redLed, buzzer]\r\ncelPy.ControlValues = [greenValues, redValues, buzzerVal] \r\n \r\ndef main(): \r\n pass\r\n","sub_path":"Loopback Test/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173467051","text":"from PyQt5.QtWidgets import QDialog, QMessageBox\nfrom .login_dialog import Ui_LoginForm\nfrom lib import MatrixAPI\n\nclass LoginForm(Ui_LoginForm, QDialog):\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.setupUi(self)\n self.buttonBox.accepted.connect(self.is_accepted)\n\n\n def is_accepted(self) -> None:\n self.access_token = self.txtAccessToken.toPlainText().replace(r'\\n', '').strip()\n self.homeserver = self.txtHomeserver.toPlainText().replace(r'\\n', '').strip()\n self.user_id = self.txtUserID.toPlainText().replace(r'\\n', '').strip()\n\n def updateDefaultsFromMatrix(self, matrix: MatrixAPI) -> None:\n self.txtAccessToken.setPlainText(matrix.access_token)\n self.txtUserID.setPlainText(matrix.user_id)\n self.txtHomeserver.setPlainText(matrix.homeserver)\n\n","sub_path":"ui/py_login_dialog.py","file_name":"py_login_dialog.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255176906","text":"import torch\nimport torch.nn as nn\nimport torchvision\n\n\nclass _netG(nn.Module):\n def __init__(self,opt):\n super(_netG, self).__init__()\n self.main_encoder = nn.Sequential(\n nn.Conv2d(3, 96, kernel_size=11, stride=4,bias=False), \n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(96, 256, kernel_size=5, padding=2,bias=False),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n nn.Conv2d(256, 384, kernel_size=3, padding=1,bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 384, kernel_size=3, padding=1,bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(384, 256, kernel_size=3, padding=1,bias=False),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=3, stride=2),\n )\n self.main_decoder = nn.Sequential(\n nn.ConvTranspose2d(256,128,kernel_size=5,stride=2,padding=2,bias=False).cuda(),\n nn.BatchNorm2d(128).cuda(),\n nn.ReLU(True),\n nn.ConvTranspose2d(128,64,kernel_size=5,stride=2,padding=2,bias=False).cuda(),\n nn.BatchNorm2d(64).cuda(),\n nn.ReLU(True),\n nn.ConvTranspose2d(64,64,kernel_size=5,stride=2,padding=2,bias=False).cuda(),\n nn.BatchNorm2d(64).cuda(),\n nn.ReLU(True),\n nn.ConvTranspose2d(64,32,kernel_size=5,stride=2,padding=2,bias=False).cuda(),\n nn.BatchNorm2d(32).cuda(),\n nn.ReLU(True),\n nn.ConvTranspose2d(32,3,kernel_size=5,stride=2,padding=2,bias=False).cuda(),\n nn.Tanh()\n \n )\n\n def forward(self, input):\n var = input.shape[0]\n temp = self.main_encoder(input)\n temp = torch.reshape(temp, (temp.shape[0],-1))\n temp = torch.reshape(temp, (var,9216,1)).cuda()\n filt = nn.Conv1d(9216,9216,kernel_size=1,bias=False).cuda()\n temp = filt(temp)\n temp = torch.reshape(temp,(var,256,6,6))\n output = self.main_decoder(temp)\n output = torchvision.transforms.functional.resize(output,[113])\n return output\n\n\nclass _netlocalD(nn.Module):\n def __init__(self, opt):\n super(_netlocalD, self).__init__()\n self.ngpu = opt.ngpu\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(opt.nc, opt.ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(opt.ndf, opt.ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(opt.ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(opt.ndf * 2, opt.ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(opt.ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(opt.ndf * 4, opt.ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(opt.ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(opt.ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n\n return output.view(-1, 1)\n\n","sub_path":"src2/model_alexnet_l2.py","file_name":"model_alexnet_l2.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359647785","text":"\n\nfrom js9 import j\nfrom .Tags import Tags\n\nJSBASE = j.application.jsbase_get_class()\nclass TagsFactory(JSBASE):\n \"\"\"\n Factory Class of dealing with TAGS\n \"\"\"\n\n def __init__(self):\n self.__jslocation__ = \"j.data.tags\"\n JSBASE.__init__(self)\n\n def getObject(self, tagstring=\"\", setFunction4Tagstring=None, keepcase=False):\n \"\"\"\n check whether labelname exists in the labels\n\n @param tagstring: example \"important customer:kristof\"\n @type tagstring: string\n \"\"\"\n return Tags(tagstring, setFunction4Tagstring, keepcase=keepcase)\n\n def getTagString(self, labels=None, tags=None):\n \"\"\"\n Return a valid tags string, it's recommended to use this function\n and not to build the script manually to skip reserved letters.\n\n @param labels: A set of labels\n @param tags: A dict with key values\n \"\"\"\n labels = labels or set()\n tags = tags or dict()\n if not isinstance(labels, set):\n raise TypeError(\"labels must be of type set\")\n\n if not isinstance(tags, dict):\n raise TypeError(\"tags must be of type dict\")\n\n t = Tags()\n t.labels = labels\n t.tags = tags\n return str(t)\n","sub_path":"JumpScale9/data/tags/TagsFactory.py","file_name":"TagsFactory.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"652513563","text":"from turtle import Turtle\nfrom random import randint\n\n#-- Fruit Class --#\nclass Fruit(Turtle):\n\n #-- Constructor --#\n def __init__(self, snake):\n Turtle.__init__(self)\n\n self.snake = snake\n\n self.TILESX = int(1088 / 32)\n self.TILESY = int(736 / 32)\n\n self.startX = -16 # These are used to orient the fruit.\n self.startY = 0 # These are used in all of the position oriented classes.\n\n self.x, self.y = self.get_specific_num(32, self.snake)\n\n self.shape(\"square\")\n self.color(\"red\")\n self.pu()\n self.speed(0)\n self.shapesize(32 / 22)\n self.goto(self.startX + (self.x * 32), self.startY + (self.y * 32))\n\n #-- Main Functions --#\n def move(self, array):\n for i in array:\n if i.pos() == self.pos():\n self.x, self.y = self.get_specific_num(32, self.snake)\n self.goto(self.startX + (self.x * 32), self.startY + (self.y * 32))\n\n #-- Helper Functions --#\n def get_specific_num(self, range, snake): # Basically gets a random number\n num1 = randint(int(-self.TILESX / 2) + 1, 5) # if that number is a multiple of\n num2 = randint(int(-self.TILESY / 2), int(self.TILESY / 2)) # 32\n\n while num1 % range != 0 and num2 % range != 0:\n num1 = randint(int(-self.TILESX / 2) + 1, 5)\n num2 = randint(int(-self.TILESY / 2), int(self.TILESY / 2))\n\n return num1, num2\n","sub_path":"src/fruit.py","file_name":"fruit.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"189906102","text":"\"\"\"\n LeetCode \n By Solmaz Ebrahimi\n \n ZigZag Conversion Problem\n https://leetcode.com/problems/zigzag-conversion\n \n Time Submitted | Status | Runtime | Memory | Language\n 2020/06/07 | Accepted | 60 ms\t| 13.7 MB | python3\n\"\"\"\n\n\n\n\nclass Solution:\n def convert(self, s: str, numRows: int) -> str:\n \n if numRows == 1:\n return s\n \n data = ['' for x in range(numRows)]\n down = False\n i = 0\n \n for item in s:\n data[i] = data[i] + item\n if i == 0 or i == numRows-1:\n down = not down\n i = i + 1 if down else i-1\n \n out = ''\n for char in data:\n if char != '':\n out = out + char\n return out","sub_path":"6.ZigZag Conversion.py","file_name":"6.ZigZag Conversion.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"383353827","text":"#!/usr/bin/python3\n\n#from fabric.api import env, run\nfrom fabric import Connection\nimport os\nimport vagrant\n\n\ncsiTestVM = \"fedora29-csi-test-0.2\"\n\n\ndef cleanVM(root):\n \n v = vagrant.Vagrant(root=root)\n print(\" - Cleanig VM \", root) \n\n try:\n v.destroy()\n except Exception as err:\n print(err)\n\n try:\n os.remove(root + \"/Vagrantfile\")\n except FileNotFoundError:\n pass\n\n \n\ndef initVM(vmName, root):\n buildPath = root + \"/build\"\n v = vagrant.Vagrant(root=root)\n\n if not os.path.exists(root):\n os.makedirs(root)\n\n print(\" - Setting up VM \", root) \n if not os.path.exists(buildPath):\n os.makedirs(buildPath)\n v.init(box_name=vmName)\n\ndef copyBins(bins, root):\n\n cmd = \"cp -R {0}/* {1}/build/\".format(bins,root)\n print(\" - Copying binaries: \", cmd)\n os.system(cmd)\n\n\ndef runVM(root):\n v = vagrant.Vagrant(root=root)\n print(\" - Starting VM \", root) \n v.up()\n\ndef runPlugin(root):\n v = vagrant.Vagrant(root=root)\n\n # Start plugin\n cmd = \"nohup /home/vagrant/build/jdss-csi-plugin --csi-address=127.0.0.1:15947 --soc-type=tcp --config ./build/controller-cfg.yaml >& /dev/null < /dev/null &\"\n con = Connection(v.user_hostname_port(),\n connect_kwargs={\n \"key_filename\": v.keyfile(),\n })\n out = con.sudo(cmd)\n \n\ndef runCSISanity(root):\n v = vagrant.Vagrant(root=root)\n \n # Run tests\n print(\"Starting sanity tests.\")\n #out = v.ssh(command=\"/home/vagrant/go/src/csi-test/cmd/csi-sanity/csi-sanity -ginkgo.failFast -csi.endpoint 127.0.0.1:15947\")\n cmd = \"/home/vagrant/go/src/csi-test/cmd/csi-sanity/csi-sanity -ginkgo.failFast -csi.endpoint 127.0.0.1:15947\"\n print(\"Running: \", cmd)\n con = Connection(v.user_hostname_port(),\n connect_kwargs={\n \"key_filename\": v.keyfile(),\n })\n \n out = con.run(cmd)\n \n\ndef main():\n root = \"csi-sanity\"\n cleanVM(root)\n initVM(csiTestVM,root)\n copyBins(\"bins\", root)\n try:\n runVM(root)\n runPlugin(root)\n runCSISanity(root)\n except Exception as err:\n print(err)\n raise err\n\n cleanVM(root)\n print(\"Success!\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"test/scripts/sanity_test.py","file_name":"sanity_test.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"349290186","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom .forms import CreateCustomUserForm\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom qa.models import questions, answers\n\n# Create your views here.\ndef UserSignup(request):\n signup_form = CreateCustomUserForm()\n if request.method == \"POST\":\n signup_form = CreateCustomUserForm(request.POST)\n if signup_form.is_valid():\n signup_form.save()\n return HttpResponse(\"User Created\")\n else:\n print(\"not valid\")\n else:\n print(\"method is not post\")\n return render(request,'user/signup.html',{\"form\":signup_form})\n\n\ndef UserSignin(request):\n if request.method == \"POST\":\n email = request.POST['email']\n password = request.POST['password']\n user = authenticate(request,email=email,password=password)\n if user is not None:\n login(request,user)\n return HttpResponse(\"Login Success\")\n else:\n return HttpResponse(\"User Does not Exist\")\n return render(request,'user/signin.html',{})\n\n\nlogin_required(login_url=\"/user/signin\")\ndef UserSignout(request):\n logout(request)\n return redirect('/user/signin')\n\n\nlogin_required(login_url=\"/user/signin\")\ndef UserProfile(request):\n context = {}\n profile = request.user\n context['profile'] = profile\n try:\n context['questions'] = questions.objects.filter(asked_by=profile)\n except:\n context['questions'] = None\n return render(request,\"user/profile.html\",context)\n","sub_path":"ReturnZero/user/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224455689","text":"from typing import Tuple, Dict, List\nimport copy\nimport random\n\ninitial_state = {\n 'tab': [\n [1, 2, 3, 4, 5, 6, 7, 8],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]\n ],\n\n 'queen_position': [0, 0, 0, 0, 0, 0, 0, 0]\n}\n\n\n\n\n\n\nclass NODE:\n def __init__(self, state: Dict[str, list], father_node: 'NODE' = None, action: Tuple[int, int] = None):\n\n self.state = state\n self.father_node = father_node\n self.action = action\n\n def Print_Tree(self):\n\n node = self\n print('start')\n while not node is None:\n print('State', node.state)\n print('Action', node.action)\n node = node.father_node\n print('end')\n return\n\n\ndef replace_queen(father_node: NODE, action: Tuple[int, int])->NODE:\n\n # Salva a rainha e a linha para que ela será movimentada\n\n queen = action[0]\n new_line = action[1]\n\n # Instancia um novo NÓ\n copia_state = copy.deepcopy(father_node.state)\n child = NODE(copia_state, father_node, action)\n\n # Troca a rainha de posição\n\n father_queen_line = father_node.state['queen_position'][queen-1] # Linha que a rainha se encontra no estado atual\n child.state['tab'][father_queen_line][queen-1] = 0 # Remove a rainha da posição antiga\n child.state['tab'][new_line][queen-1] = queen # Poe a raina na nova linha\n child.state['queen_position'][queen-1] = new_line # Atualiza a posição da rainha no novo estado.\n\n return child\n\n\nclass Problem:\n\n h = 0\n attacking_queen_pairs = []\n\n def goal_test(self, node: NODE):\n\n if self.compute_h(node) == 0:\n return True\n else:\n return False\n\n @staticmethod\n def neigthbour(node: NODE)->List[NODE]:\n\n vizinhos = []\n for queen_column in range(8):\n for line in range(8):\n queen = queen_column + 1\n vizinhos.append(replace_queen(node, (queen, line)))\n\n return vizinhos\n\n @staticmethod\n def random_neigthbour(node: NODE)->NODE:\n lista = Problem.neigthbour(node)\n return random.choice(lista)\n\n def compute_h(self, node: NODE)->int:\n\n self.h = 0\n self.attacking_queen_pairs = []\n queen = 1\n for queen_line in node.state['queen_position']:\n self.attack(node, queen, queen_line)\n queen+=1\n\n return self.h\n\n def attack(self, node:NODE, queen: int, queen_line: int):\n\n # vetor tem o incremento nas posições de i e j no tabuleiro\n # [1, 1] é mover na diagonal para baixo\n # [-1, -1] é mover na diagonal para cima\n # [0,1] é mover na horizontal para direita\n # etc.\n\n for vetor in [[1,1], [-1, -1], [0, 1], [0, -1], [1, -1], [-1, 1]]:\n\n i = int(queen_line)\n j = int(queen-1)\n\n while i !=-1 and i!=8 and j !=-1 and j !=8:\n\n i+=vetor[0]\n j+=vetor[1]\n\n if i == -1 or i == 8 or j == -1 or j == 8:\n continue\n\n if node.state['tab'][i][j] != 0:\n\n attacked_queen = node.state['tab'][i][j]\n pair1 = (int(queen), int(attacked_queen))\n pair2 = (int(attacked_queen), int(queen))\n\n if pair1 in self.attacking_queen_pairs or pair2 in self.attacking_queen_pairs:\n\n continue\n\n else:\n\n self.attacking_queen_pairs.append(pair1)\n self.h+=1\n\n\n\n","sub_path":"queens.py","file_name":"queens.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102158112","text":"# ------------------------------------------------------------------------ #\n# Title: Assignment 08\n# Description: Working with classes\n\n# ChangeLog (Who,When,What):\n# RRoot,1.1.2030,Created started script\n# RRoot,1.1.2030,Added pseudo-code to start assignment 8\n# MKim, 11.26.2019, Modified code to complete assignment 8\n# ------------------------------------------------------------------------ #\n\n# Data -------------------------------------------------------------------- #\nstrFileName = 'products.txt'\nlstOfProductObjects = []\n\nclass Product():\n \"\"\"Stores data about a product:\n properties:\n ProductName: (string) with the products's name\n ProductPrice: (float) with the products's standard price\n changelog: (When,Who,What)\n RRoot,1.1.2030,Created Class\n MKim, 11.26.2019, Modified code to complete assignment 8\n \"\"\"\n # -- Constrictor --\n # Each object instance can hold the product name and price\n def __init__(self, ProductName, ProductPrice):\n self.name = ProductName\n self.price = ProductPrice\n\n# End of Data -------------------------------------------------------------------- #\n\n# Processing ------------------------------------------------------------- #\nclass FileProcessor:\n \"\"\"Processes data to and from a file and a list of product objects:\n methods:\n SaveDataToFile(file_name, list_of_product_objects):\n ReadDataFromFile(file_name): -> (a list of product objects)\n changelog: (When,Who,What)\n RRoot,1.1.2030,Created Class\n MKim, 11/26.2019, Modified code to complete assignment 8\n \"\"\"\n\n # -- Methods --\n @staticmethod\n def ReadDataFromFile(filename):\n lstOfProductObjects = []\n file = open(filename,\"r\")\n for line in file:\n data = line.split(\",\")\n ProductObject = Product(data[0].strip(), float(data[1].strip()))\n row = {\"Name\":ProductObject.name, \"Price\":ProductObject.price}\n lstOfProductObjects.append(row)\n file.close()\n return lstOfProductObjects\n\n @staticmethod\n def SaveDataToFile(filename, lstOfProductObjects):\n with open(filename, 'w') as file:\n for row in lstOfProductObjects:\n file.write(row[\"Name\"] + \",\" + str(row[\"Price\"]) + \"\\n\")\n\n# End of Processing ------------------------------------------------------------- #\n\n# Presentation (Input/Output) -------------------------------------------- #\nclass IO:\n\n '''\n Methods: ShowMenu: Showing the user options\n GetUserChoice: Accepting the user's choice\n ShowCurrentData: Showing the current data in file\n GetProductData: Getting a new product name and price from the user\n changelog: (When,Who,What)\n MKim, 11/26.2019, Modified code to complete assignment 8\n '''\n\n @staticmethod\n def ShowMenu():\n print('''\n Menu of Options\n 1)Show current data from the file\n 2)Add a product to the list of product objects\n 3)Save current data to file and exit\n ''')\n print()\n\n @staticmethod\n def GetUserChoice():\n choice = input(\"Which option would you like to perform? \")\n return choice\n\n def ShowCurrentDataFromFile(self):\n lstOfProductObjects = FileProcessor.ReadDataFromFile(self)\n for row in lstOfProductObjects:\n print(row[\"Name\"] + \",\" + str(row[\"Price\"]) + \"\\n\")\n\n @staticmethod\n def GetProductData(lstOfProductObjects):\n newProductName = input(\"Which product would you like to add? \")\n newProductPrice = float(input(\"What is the price for the product? \"))\n row = {\"Name\": newProductName, \"Price\": newProductPrice}\n lstOfProductObjects.append(row)\n return lstOfProductObjects\n\n# End of Presentation (Input/Output) -------------------------------------------- #\n\n# Main Body of Script ---------------------------------------------------- #\n\n# Load data from file into a list of product objects when script starts\nlstOfProductObjects = FileProcessor.ReadDataFromFile(strFileName)\n\nwhile True:\n # Show user a menu of options\n IO.ShowMenu()\n # Get user's menu option choice\n choice = IO.GetUserChoice()\n\n # Show user current data in the list of product objects\n if choice == \"1\":\n IO.ShowCurrentDataFromFile(strFileName)\n\n # Let user add data to the list of product objects\n elif choice == \"2\":\n lstOfProductObjects = IO.GetProductData(lstOfProductObjects)\n\n # let user save current data to file and exit program\n elif choice == \"3\":\n FileProcessor.SaveDataToFile(\"products2.txt\",lstOfProductObjects)\n break\n else:\n try:\n raise TypeError(\"Invalid Choice\")\n except TypeError as te:\n print (te, \"Type 1-3 only!\")\n\n# End of Main Body of Script ---------------------------------------------------- #\n\n","sub_path":"Assigment08.py","file_name":"Assigment08.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"141877068","text":"from django.urls import path, include\nfrom django.conf.urls import url\n\n\nurlpatterns = [\n \n #route api ipasn bj\n path('ipasn/', include('apps.ipasnbj.urls')),\n \n #route api domain bj\n path('domain/', include('apps.domainbj.urls')),\n \n #route api penetration bj\n path('penetration/', include('apps.penetrationbj.urls')),\n \n #route api sonde bj\n path('sonde/', include('apps.sondebj.urls')),\n \n \n #route api evolution fai bj\n path('evolution-fai/', include('apps.evolution_fai.urls')),\n \n #route api evolution fixe bj\n path('evolution-fixe/', include('apps.evolution_fixe.urls')),\n \n #route api teledensite bj\n path('teledensite/', include('apps.teledensitebj.urls')),\n \n #route api trafic echange bj\n path('trafic/', include('apps.trafic_echange.urls')),\n \n #route api pour openresolver au benin\n path('openresolver/', include('apps.openresolver.urls')),\n\n]\n","sub_path":"apps/disponibilite.py","file_name":"disponibilite.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"270901131","text":"import json\nimport os\n\nimport cv2\n\ntest_data = json.load(open('./data10/annotations/fabric_testa_round2.json'))\nanns = json.load(open('./result_data10.bbox.json'))\n\nimages_name = dict()\nfor image in test_data.get('images'):\n images_name[image.get('id')] = image.get('file_name')\ndata_home = '/tcdata/guangdong1_round2_testB_20191024/'\nimage_with_anns = dict()\nfor a in anns:\n score = a.get('score')\n name = images_name.get(a.get('image_id'))\n bbox = a.get('bbox')\n image_name = os.path.split(name)[-1]\n base_name = image_name.split('.')[0]\n image_path = f'{data_home}{base_name}/{image_name}'\n\n if image_with_anns.get(image_path):\n image_with_anns[image_path].append(a)\n else:\n image_with_anns[image_path] = [a]\n\nfor image_path, anns in image_with_anns.items():\n img = cv2.imread(image_path)\n for ann in anns:\n score = ann.get('score')\n name = images_name.get(ann.get('image_id'))\n bbox = ann.get('bbox')\n image_name = os.path.split(name)[-1]\n cv2.rectangle(img, pt1=(int(bbox[0]), int(bbox[1])), pt2=(int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])),\n color=(0, 255, 0),\n thickness=4)\n cv2.imwrite(f'./result/{os.path.split(image_path)[-1]}', img)\n","sub_path":"data/init_detect_result.py","file_name":"init_detect_result.py","file_ext":"py","file_size_in_byte":1275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"545765178","text":"import os\nimport re\nfrom multiprocessing import Pool\nfrom os import listdir\nfrom os.path import isdir, isfile, join\nfrom typing import AnyStr, Callable, Dict, Generator, List, Union\n\nimport numpy as np\nimport requests\nfrom fastdist import fastdist\nfrom tqdm import tqdm\n\n\ndef download_from_url(\n url: str, destination_filepath: str, chunk_size: int = 1024\n) -> None:\n \"\"\"\n Downloads a file from url to a specific destination filepath\n\n Parameters\n ----------\n url : str\n URL to download file from.\n destination_filepath : str\n Where to save the file after downloading it.\n chunk_size : int, optional\n Chunk size for downloading (default 1024).\n \"\"\"\n file_size = int(\n requests.head(url, headers={\"Accept-Encoding\": \"identity\"}).headers[\n \"Content-Length\"\n ]\n )\n with tqdm(total=file_size, initial=0, unit=\"B\", unit_scale=True) as progressbar:\n req = requests.get(url, stream=True)\n req.encoding = \"utf-8\"\n with (open(destination_filepath, \"ab\")) as f:\n for chunk in req.iter_content(chunk_size=chunk_size):\n if chunk:\n f.write(chunk)\n progressbar.update(chunk_size)\n\n\ndef get_cached_download_text_file(\n url: str,\n target_dir: str,\n filename: str,\n) -> str:\n \"\"\"\n Downloads and caches text file from url.\n\n Parameters\n ----------\n url : str\n URL to download file from.\n target_dir : str\n Which directory to save the file\n filename : str\n Name of file to save (including extension).\n\n Returns\n -------\n file_content : str\n Raw content of file as a string.\n \"\"\"\n # Create target directory if it does not exist.\n os.makedirs(target_dir, exist_ok=True)\n destination_filepath = join(target_dir, filename)\n\n if not os.path.exists(destination_filepath):\n\n # Download file from url to destination filepath\n download_from_url(url, destination_filepath)\n\n # Read cached content from file\n with open(destination_filepath, \"r\") as file:\n file_content = file.read()\n\n return file_content\n\n\ndef batch_list_gen(lst: List, batch_size: int) -> Generator[List, None, None]:\n \"\"\"\n Creates a generator for batching list into chunks of `batch_size`.\n\n Parameters\n ----------\n lst : List\n List of elements.\n batch_size : int\n Size of batches.\n\n Yields\n ------\n sub_lst : List\n Batches sublist of `lst`.\n \"\"\"\n lst_len = len(lst)\n for i in range(0, lst_len, batch_size):\n yield lst[i : min(i + batch_size, lst_len)]\n\n\ndef text_file_into_texts(filepath: str) -> List[str]:\n \"\"\"\n Reads a text file from disk and splits it into texts delimited by new line.\n\n Parameters\n ----------\n filepath : str\n Path of file to read\n\n Returns\n -------\n texts : list of str\n Text content of file split into a list of texts delimited by a new line.\n \"\"\"\n # Read file\n with open(filepath, \"r\") as file:\n text_content = file.read()\n\n # Split into texts\n texts = text_content.split(\"\\n\")\n\n return texts\n\n\ndef _make_file_gen(\n reader: Callable[[int], AnyStr], buffer_size: int = 1024 * 1024\n) -> Generator[AnyStr, None, None]:\n \"\"\"\n Helper function for reading file in batches (used in `text_files_total_line_count`).\n\n Parameters\n ----------\n reader : Callable[[int], AnyStr]\n file.read function.\n buffer_size : int\n Buffer size.\n\n Returns\n -------\n b : AnyStr\n File buffer.\n \"\"\"\n b = reader(buffer_size)\n while b:\n yield b\n b = reader(buffer_size)\n\n\ndef text_file_total_line_count(filepath: str) -> int:\n \"\"\"\n Counts number of lines in text file.\n\n Parameters\n ----------\n filepath : str\n Filepath of text file to count.\n\n Returns\n -------\n line_count : int\n Number of lines in text file\n \"\"\"\n f = open(filepath, \"rb\")\n f_gen = _make_file_gen(f.read)\n line_count = sum(buf.count(b\"\\n\") for buf in f_gen)\n f.close()\n return line_count\n\n\ndef text_files_total_line_count(filepaths: List[str]) -> int:\n \"\"\"\n Counts number of lines in text files.\n\n Parameters\n ----------\n filepaths : str\n Filepaths of text files to count\n\n Returns\n -------\n line_count : int\n Number of lines in text files\n \"\"\"\n total = 0\n with Pool() as pool:\n results = pool.imap_unordered(text_file_total_line_count, filepaths)\n for result in results:\n total += result\n return total\n\n\ndef get_all_filepaths(file_dir: str, file_ext: str) -> List[str]:\n \"\"\"\n Gets all paths of files of a specific file extension in a directory.\n\n Parameters\n ----------\n file_dir : str\n Directory containing files.\n file_ext : str\n File extension (including dot).\n\n Returns\n -------\n filepaths : list of str\n List of filepaths in file directory with given file extension.\n \"\"\"\n filepaths = [\n join(file_dir, f)\n for f in listdir(file_dir)\n if isfile(join(file_dir, f)) and f.endswith(file_ext)\n ]\n return filepaths\n\n\ndef get_all_filepaths_recursively(root_dir: str, file_ext: str) -> List[str]:\n \"\"\"\n Gets all paths of files of a specific file extension recursively in a directory.\n\n Parameters\n ----------\n root_dir : str\n Root directory to start the search from.\n file_ext : str\n File extension (including dot).\n\n Returns\n -------\n filepaths : list of str\n List of filepaths in root directory with given file extension.\n \"\"\"\n filepaths = get_all_filepaths(root_dir, file_ext)\n dirs = [\n dir_in_root\n for dir_in_root in listdir(root_dir)\n if isdir(join(root_dir, dir_in_root))\n ]\n for dir in dirs:\n files_in_dir = get_all_filepaths_recursively(join(root_dir, dir), file_ext)\n if files_in_dir:\n for filepath in files_in_dir:\n\n # Ensure the file size is > 0\n if os.stat(filepath).st_size == 0:\n continue\n\n filepaths.append(join(filepath))\n return filepaths\n\n\ndef get_model_checkpoint_filepaths(\n output_dir: str, model_name: str, dataset_name: str\n) -> Dict[str, Union[str, List[str], None]]:\n \"\"\"\n Gets model checkpoint filepaths of a specific model (trained on a specific dataset)\n in an output directory.\n\n Parameters\n ----------\n output_dir : str\n Output directory.\n model_name : str\n Name of the model.\n dataset_name : str\n Name of the dataset which the model has been trained on.\n\n Returns\n -------\n filepaths_dict : dict\n Dictionary containing filepaths to trained models, intermediate weight embeddings,\n words used during training and training log.\n \"\"\"\n # List files in output directory\n output_filenames = listdir(output_dir)\n\n # Filter by model_name and dataset_name entries only\n model_id = f\"{model_name}_{dataset_name}\"\n output_filenames = [fn for fn in output_filenames if fn.startswith(model_id)]\n\n # Get model training configuration filepath\n model_training_conf_filepath = join(output_dir, f\"{model_id}.conf\")\n\n # Get model filenames and sort them by epoch numbers (from first to last).\n model_filenames = np.array([fn for fn in output_filenames if fn.endswith(\".model\")])\n model_epoch_nrs = np.array(\n [int(re.findall(r\"_(\\d{2}).model\", fn)[0]) for fn in model_filenames]\n )\n model_filenames = model_filenames[np.argsort(model_epoch_nrs)]\n\n # Append output directory to filenames\n model_filepaths = [join(output_dir, fn) for fn in model_filenames]\n\n # Get intermediate embedding weights sorted by first to last\n intermediate_embedding_weight_filenames = np.array(\n [fn for fn in output_filenames if fn.endswith(\"weights.npy\")]\n )\n intermediate_embedding_weight_filepaths = None\n intermediate_embedding_weight_normalized_filepaths = None\n intermediate_embedding_weight_annoy_index_filepaths = None\n intermediate_embedding_weight_scann_artifact_dirs = None\n if len(intermediate_embedding_weight_filenames) > 0:\n\n # Extract combined epoch/embedding nrs and sort by them.\n epoch_embedding_nrs = []\n for fn in intermediate_embedding_weight_filenames:\n epoch_nr, embedding_nr = re.findall(r\"_(\\d{2})_(\\d{2})_weights.npy\", fn)[0]\n epoch_embedding_nr = int(f\"{epoch_nr}{embedding_nr}\")\n epoch_embedding_nrs.append(epoch_embedding_nr)\n epoch_embedding_nrs = np.array(epoch_embedding_nrs)\n intermediate_embedding_weight_filenames = (\n intermediate_embedding_weight_filenames[np.argsort(epoch_embedding_nrs)]\n )\n\n # Append output directory to filenames\n intermediate_embedding_weight_filepaths = [\n join(output_dir, fn) for fn in intermediate_embedding_weight_filenames\n ]\n\n # Check for normalized/Annoy index filepaths\n for fn in intermediate_embedding_weight_filenames:\n fn_no_ext = fn.rsplit(\".\", 1)[0]\n for output_fn in output_filenames:\n output_filepath = join(output_dir, output_fn)\n if output_fn.startswith(fn_no_ext):\n if output_fn.endswith(\"_normalized.npy\"):\n if intermediate_embedding_weight_normalized_filepaths is None:\n intermediate_embedding_weight_normalized_filepaths = []\n intermediate_embedding_weight_normalized_filepaths.append(\n output_filepath\n )\n elif output_fn.endswith(\"_annoy_index.ann\"):\n if intermediate_embedding_weight_annoy_index_filepaths is None:\n intermediate_embedding_weight_annoy_index_filepaths = []\n intermediate_embedding_weight_annoy_index_filepaths.append(\n output_filepath\n )\n elif output_fn.endswith(\"_scann_artifacts\"):\n if intermediate_embedding_weight_scann_artifact_dirs is None:\n intermediate_embedding_weight_scann_artifact_dirs = []\n intermediate_embedding_weight_scann_artifact_dirs.append(\n output_filepath\n )\n\n train_words_filename = f\"{model_id}_words.txt\"\n train_words_filepath = None\n if train_words_filename in output_filenames:\n train_words_filepath = join(output_dir, train_words_filename)\n\n train_word_counts_filename = f\"{model_id}_word_counts.txt\"\n train_word_counts_filepath = None\n if train_word_counts_filename in output_filenames:\n train_word_counts_filepath = join(output_dir, train_word_counts_filename)\n\n # Add path to train logs\n train_logs_filename = f\"{model_id}_logs.csv\"\n train_logs_filepath = None\n if train_logs_filename in output_filenames:\n train_logs_filepath = join(output_dir, train_logs_filename)\n\n return {\n \"model_training_conf_filepath\": model_training_conf_filepath,\n \"model_filepaths\": model_filepaths,\n \"intermediate_embedding_weight_filepaths\": intermediate_embedding_weight_filepaths,\n \"intermediate_embedding_weight_normalized_filepaths\": intermediate_embedding_weight_normalized_filepaths,\n \"intermediate_embedding_weight_annoy_index_filepaths\": intermediate_embedding_weight_annoy_index_filepaths,\n \"intermediate_embedding_weight_scann_artifact_dirs\": intermediate_embedding_weight_scann_artifact_dirs,\n \"train_words_filepath\": train_words_filepath,\n \"train_word_counts_filepath\": train_word_counts_filepath,\n \"train_logs_filepath\": train_logs_filepath,\n }\n\n\ndef pairwise_cosine_distances(X: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes pairwise cosine distances.\n\n Parameters\n ----------\n X : np.ndarray\n Numpy (n-m)-matrix to compute pairwise cosine distances of.\n\n Returns\n -------\n X_cosine_dists : np.ndarray\n Square (n-n) Numpy matrix containing pairwise cosine distance.\n \"\"\"\n # Compute pairwise cosine distances (1 - similarity) in X\n X_cosine_dists = 1 - fastdist.cosine_matrix_to_matrix(X, X)\n\n # Ensure diagonal is filled with zeros\n np.fill_diagonal(X_cosine_dists, 0)\n\n # Clip values between 0 and 1 to ensure validity\n X_cosine_dists = np.clip(X_cosine_dists, 0, 1)\n\n return X_cosine_dists\n\n\ndef create_word_embeddings_distances_matrix(\n word_embeddings: np.ndarray,\n vocabulary: np.ndarray,\n) -> np.ndarray:\n \"\"\"\n Creates distance matrix for word embeddings\n\n Parameters\n ----------\n word_embeddings : np.ndarray\n Word embeddings\n vocabulary : np.ndarray\n Array consisting of word integers to use as vocabulary\n\n Returns\n -------\n word_embeddings_distances : np.ndarray\n Pairwise cosine distances between word embeddings from vocabulary\n \"\"\"\n # Compute cosine distance matrix\n word_embeddings_to_precompute = word_embeddings[vocabulary]\n word_embeddings_distances = pairwise_cosine_distances(word_embeddings_to_precompute)\n return word_embeddings_distances\n\n\ndef cosine_distance(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"\n Computes the cosine distance between two points x and y.\n\n Parameters\n ----------\n x : np.ndarray\n First point\n y : np.ndarray\n Second point\n\n Returns\n -------\n cosine_dist : float\n Cosine distance between x and y\n \"\"\"\n return 1 - fastdist.cosine(x, y)\n\n\ndef cosine_vector_to_matrix_distance(x: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"\n Computes the cosine distance between a 1D vector x and a\n matrix of vectors y.\n\n Parameters\n ----------\n x : np.ndarray\n 1D vector\n y : np.ndarray\n Matrix\n\n Returns\n -------\n cosine_dist : np.ndarray\n Cosine distance between x and y as a vector\n \"\"\"\n return 1 - fastdist.cosine_vector_to_matrix(x, y)\n\n\ndef words_to_vectors(\n words_vocabulary: list,\n word_to_int: dict,\n word_embeddings: np.ndarray,\n) -> np.ndarray:\n \"\"\"\n Gets word embeddings for a list of words.\n\n Parameters\n ----------\n words_vocabulary : list\n List of words/integers from vocabulary to find word embeddings from\n word_to_int : dict\n Dictionary mapping from word to integer\n word_embeddings : np.ndarray\n Word embeddings\n\n Returns\n -------\n word_vectors : np.ndarray\n Word embeddings of input words\n \"\"\"\n # Create word vectors from given words/vocabulary\n if all(isinstance(elem, str) for elem in words_vocabulary):\n word_vectors = np.zeros((len(words_vocabulary), word_embeddings.shape[1]))\n for i, word in enumerate(words_vocabulary):\n word_vectors[i] = word_embeddings[word_to_int[word]]\n elif all(isinstance(elem, int) for elem in words_vocabulary):\n word_vectors = word_embeddings[words_vocabulary]\n else:\n raise TypeError(\n \"words_vocabulary argument must contain list of words or integers representing the vocabulary.\"\n )\n return word_vectors\n\n\ndef normalize_array(arr: np.ndarray):\n \"\"\"\n Nornalizes a vector to be unit-length if 1D or unit-length rows if 2D.\n\n Parameters\n ----------\n arr : np.ndarray\n Array to normalize\n\n Returns\n -------\n arr_norm : np.ndarray\n Normalized array\n \"\"\"\n # Do not perform normalization if array is null-vector as it leads to division by zero.\n if np.allclose(arr, np.zeros(arr.shape)):\n return arr\n\n if len(arr.shape) == 1:\n arr_norm = arr / np.linalg.norm(arr)\n else:\n arr_norm = np.empty(arr.shape)\n for i, row in enumerate(arr):\n if np.allclose(row, np.zeros(row.shape)):\n arr_norm[i] = row\n else:\n arr_norm[i] = row / np.linalg.norm(row)\n\n return arr_norm\n","sub_path":"code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":16130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"608850512","text":"\"\"\"\nNon-decreasing Array with Single Modification\n\nHi, here's your problem today. This problem was recently asked by Microsoft:\n\nYou are given an array of integers in an arbitrary order. Return whether or not it is possible to make the array non-decreasing by modifying at most 1 element to any value.\n\nWe define an array is non-decreasing if array[i] <= array[i + 1] holds for every i (1 <= i < n).\n\nExample:\n\n[13, 4, 7] should return true, since we can modify 13 to any value 4 or less, to make it non-decreasing.\n\n[13, 4, 1] however, should return false, since there is no way to modify just one element to make the array non-decreasing.\n\nCan you find a solution in O(n) time?\n\n\"\"\"\nfrom typing import List\n\n\ndef solution(array: List[int]) -> bool:\n constraints_break = 0\n\n for index in range(1, len(array)):\n if not array[index - 1] <= array[index]:\n constraints_break += 1\n\n return constraints_break <= 1\n\n\nif __name__ == \"__main__\":\n array = [13, 4, 7, 9, 10, 11, 12, 15]\n print(f\"Solution({array}) -> \", solution(array))\n\n array = [5, 1, 3, 2, 5]\n print(f\"Solution({array}) -> \", solution(array))\n","sub_path":"python/daily_interview_pro/202002/20200211.py","file_name":"20200211.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"311521380","text":"import torch\nfrom torch.autograd import Variable\nimport time\n\nclass NN(torch.nn.Module):\n \n def __init__(self, n_inputs, network, n_outputs, relu=False, gpu=True):\n super(NN, self).__init__()\n network_layers = [torch.nn.Linear(n_inputs, network[0])]\n if len(network) > 1:\n network_layers.append(torch.nn.Tanh() if not relu else torch.nn.ReLU())\n for i in range(len(network)-1):\n network_layers.append(torch.nn.Linear(network[i], network[i+1]))\n network_layers.append(torch.nn.Tanh() if not relu else torch.nn.ReLU())\n network_layers.append(torch.nn.Linear(network[-1], n_outputs))\n self.model = torch.nn.Sequential(*network_layers)\n self.to(torch.device('cuda' if torch.cuda.is_available() else 'cpu')).double()\n self.processed = False\n \n def tensor(self, np_array):\n return torch.from_numpy(np_array.astype('double')).cuda() if torch.cuda.is_available() else torch.from_numpy(np_array.astype('double')) # Return tensor for Torch\n \n def standardise(self, data, mean, sd):\n return (data-mean)/sd\n \n def process(self, X, T):\n X, T = self.tensor(X), self.tensor(T)\n if not self.processed:\n self.processed = True\n self.Xmeans, self.Xstds, self.Tmeans, self.Tstds = X.mean(dim=0), X.std(dim=0), T.mean(dim=0), T.std(dim=0)\n return self.standardise(X, self.Xmeans, self.Xstds), self.standardise(T, self.Tmeans, self.Tstds) # Return standardised inputs\n \n def forward(self, X):\n return self.model(X) # Output of forward pass is passing data through the model\n \n def train_pytorch(self, X, T, n_iterations, batch_size, learning_rate=10**-3, use_SGD=False, verbose=False):\n start_time = time.time()\n X, T = self.process(X, T)\n optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if not use_SGD else torch.optim.SGD(self.parameters(), lr=learning_rate)\n loss_func = torch.nn.MSELoss()\n errors = []\n n_examples = X.shape[0]\n for i in range(n_iterations):\n num_batches = n_examples//batch_size\n for j in range(num_batches):\n start, end = j*batch_size, (j+1)*batch_size\n X_batch, T_batch = Variable(X[start:end, ...], requires_grad=False), Variable(T[start:end, ...], requires_grad=False)\n # Forward pass\n outputs = self(X_batch)\n loss = loss_func(outputs, T_batch)\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n errors.append(torch.sqrt(loss)) # Error at end of iteration\n if verbose:\n print(f'Iteration {i+1} training completed. Error rate: {errors[-1]}')\n self.time = time.time()-start_time\n return self, errors\n \n def use_pytorch(self, X):\n X = self.tensor(X)\n with torch.no_grad():\n return self(X).cpu().numpy() if torch.cuda.is_available() else self(X).numpy() # Return Y\n","sub_path":"DistributedML/neuralnetworks.py","file_name":"neuralnetworks.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538234482","text":"from bs4 import BeautifulSoup\n\nfrom kik_unofficial.message.message import Message, Response\nfrom kik_unofficial.peer import Group, User\n\n\nclass RosterMessage(Message):\n def __init__(self):\n super().__init__()\n\n def serialize(self) -> bytes:\n data = (''\n ''\n '').format(self.message_id)\n return data.encode()\n\n\nclass RosterResponse(Response):\n def __init__(self, data: BeautifulSoup):\n super().__init__(data)\n self.members = [self.parse_member(element) for element in iter(data.query)]\n\n @staticmethod\n def parse_member(element):\n if element.name == \"g\":\n return Group(element)\n elif element.name == \"item\":\n return User(element)\n\n\nclass FriendMesssage(Message):\n def __init__(self, username):\n super().__init__()\n self.username = username\n\n def serialize(self) -> bytes:\n data = (''\n ''\n ''\n ''\n '').format(self.message_id, self.username)\n return data.encode()\n\n\nclass FriendMessageResponse(Response):\n def __init__(self, data: BeautifulSoup):\n super().__init__(data)\n self.user = User(data.query.item)\n\n\nclass BatchFriendMesssage(Message):\n def __init__(self, peer_jid):\n super().__init__()\n self.peer_jid = peer_jid\n\n def serialize(self) -> bytes:\n data = (''\n ''\n ''\n ''\n '').format(self.message_id, self.peer_jid)\n return data.encode()\n\n\nclass AddFriendMessage(Message):\n def __init__(self, peer_jid):\n super().__init__()\n self.peer_jid = peer_jid\n\n def serialize(self):\n data = '' \\\n '' \\\n '' \\\n '' \\\n ''.format(self.message_id, self.peer_jid)\n return data.encode()\n","sub_path":"kik_unofficial/message/roster.py","file_name":"roster.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"188721889","text":"#!/usr/bin/python\nimport os\nimport sys\nimport urllib\nimport urllib2\nimport ssl\nimport optparse\nimport subprocess\nimport time\nimport re\nimport logging\n\nlogging.basicConfig()\nlog = logging.getLogger(__name__)\n\n\ndef fix_path(fn):\n def decorator(*args, **kw):\n pathmap = {\n '/sbin': '/usr/sbin',\n '/bin': '/usr/bin',\n '/usr/sbin': '/sbin',\n '/usr/bin': '/bin'\n }\n\n curr_path = args[0].split()[0]\n\n if os.path.exists(curr_path):\n return fn(*args, **kw)\n\n args = list(args)\n\n for path in pathmap.keys():\n if curr_path.startswith(path):\n new_path = curr_path.replace(path, pathmap[path])\n if os.path.exists(new_path):\n args[0] = args[0].replace(path, pathmap[path])\n return fn(*args, **kw)\n\n raise OSError('path was not found')\n\n return decorator\n\n\nsubprocess.call = fix_path(subprocess.call)\n\n\ndef url_join(*parts):\n return '/'.join(part.strip('/') for part in parts)\n\n\ndef main(args=None):\n parser = optparse.OptionParser()\n parser.add_option('-k', '--ksurl', dest='ksurl',\n help='kickstart url')\n parser.add_option('-e', '--efi', dest='efi', action='store_true',\n help='use efi')\n\n options, args = parser.parse_args()\n\n install_url = None\n\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n\n for line in urllib2.urlopen(options.ksurl, context=ctx).readlines():\n urlre = re.search('^url\\s+--url=(https?://[^\\s]+)', line)\n if urlre:\n install_url = urlre.group(1)\n break\n\n assert install_url is not None, 'install_url is nil!!!'\n\n boot_files = 'vmlinuz', 'initrd.img'\n\n for name in boot_files:\n urllib.urlretrieve(\n url_join(install_url, 'images', 'pxeboot', name),\n '/boot/%s-kickself' % name\n )\n\n # call grubby\n subprocess.call(' '.join([\n '/usr/sbin/grubby --title=\\\"Kickself %s\\\"' % time.ctime(),\n '--make-default --add-kernel=%s --initrd=%s' % tuple(\n '/boot/%s-kickself' % x for x in boot_files\n ),\n '--args=\\\"ksdevice=link lang= text noverifyssl inst.ks=%s\\\"' % options.ksurl,\n options.efi and '--efi --copy-default' or ''\n ]), shell=True)\n\n # remove root argument\n subprocess.call(' '.join([\n '/usr/sbin/grubby',\n '--update-kernel=/boot/vmlinuz-kickself',\n '--remove-args=\\\"root rhgb quiet\\\"',\n ]), shell=True)\n\n if options.efi:\n log.info('you may need to remove the root argument')\n\nif __name__ == '__main__':\n sys.exit(main())\n\n","sub_path":"kickself.py","file_name":"kickself.py","file_ext":"py","file_size_in_byte":2715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"408716110","text":"from configparser import ConfigParser\n\nfrom azure.cosmos import cosmos_client, PartitionKey, exceptions as cosmos_except\nfrom azure.mgmt.managementgroups import ManagementGroupsAPI\nfrom dotenv import load_dotenv\n\nfrom mop.azure.utils.create_configuration import change_dir, OPERATIONSPATH, CONFVARIABLES\nfrom mop.azure.comprehension.operations.subscriptions import Subscriptions\nfrom mop.framework.azure_connections import AzureConnections\n\n\nclass AggregateCosmosDb:\n def __init__(self, client_id, key, tenant_id, url, cosmos_key):\n load_dotenv()\n self.url = url\n self.__comos_key = cosmos_key\n self.cosmos_client = cosmos_client.CosmosClient(url, cosmos_key)\n\n self.credentials = AzureConnections().authenticate_device_code(CLIENT=client_id, KEY=key,\n TENANT_ID=tenant_id)\n\n with change_dir(OPERATIONSPATH):\n self.config = ConfigParser()\n self.config.read(CONFVARIABLES)\n\n def delete_database(self, id):\n self.cosmos_client.delete_database(id)\n\n def get_cosmos_container(self, database, container_name, partition, offer_throughput=400):\n partition_key = PartitionKey(path=partition, kind='Hash')\n container = database.create_container_if_not_exists(\n id=container_name,\n partition_key=partition_key,\n offer_throughput=offer_throughput\n )\n return container\n\n def get_cosmos_db(self, cosmos_db_name):\n databases = list(self.cosmos_client.list_databases())\n for database in databases:\n if cosmos_db_name in database['id']:\n return self.cosmos_client.get_database_client(cosmos_db_name)\n\n return self.cosmos_client.create_database(cosmos_db_name)\n\n\nclass AggregateCosmosDbSubscriptions(AggregateCosmosDb):\n\n def get_management_group_entities(self, management_grp, subscriptions_only = True):\n entity_list = list()\n management_client = ManagementGroupsAPI(self.credentials)\n mngrp_subscriptions = management_client.entities.list(group_name=management_grp)\n for entity in mngrp_subscriptions:\n if subscriptions_only:\n if \"/subscriptions\" in entity.type:\n sub_entity = dict()\n sub_entity['name'] = entity.name\n sub_entity['display_name'] = entity.display_name\n sub_entity['resource_id'] = entity.id\n sub_entity['tenant_id'] = entity.tenant_id\n entity_list.append(sub_entity)\n else:\n raise NotImplementedError\n\n return entity_list\n\n\n def publish_subscription_info(self, cosmos_db_name, partition, offer_throughput=400):\n\n database = self.get_cosmos_db(cosmos_db_name)\n container = self.get_cosmos_container(database, partition, offer_throughput)\n\n\n # try:\n # database.create_container(id=id, partition_key=partition_key)\n # print('Container with id \\'{0}\\' created'.format(id))\n #\n # except cosmos_except.CosmosResourceExistsError:\n # pass\n\n\n\n","sub_path":"src/mop/azure/analysis/baseline/aggregate_cosmos_subscriptions.py","file_name":"aggregate_cosmos_subscriptions.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267933825","text":"\"\"\"src URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom profileapp import views\n\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.index , name='index'),\n url(r'^helper/', views.helper , name='helper'),\n url(r'^seeker/', views.seeker , name='seeker'),\n url(r'^browse/', views.browse , name='browse'),\n url(r'^request/', views.request , name='request'),\n url(r'^post/(?P\\d+)/$', views.post_detail, name='post_detail'),\n url(r'^post_edit/(?P\\d+)/$', views.post_edit, name='post_edit'),\n url(r'^offer/(?P\\d+)/$', views.offer, name='offer'),\n url(r'^seeker_commitments/', views.seeker_commitments , name='seeker_commitments'),\n]\n ","sub_path":"src/src/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"544178044","text":"import boto3\r\n\r\ndef get_secret_client(environment,aws_key,aws_secret):\r\n local_url = \"http://host.docker.internal:4566\"\r\n current_url = local_url if environment == 'local' else None\r\n\r\n session = boto3.session.Session()\r\n return session.client(\r\n service_name='secretsmanager',\r\n aws_access_key_id=aws_key,\r\n aws_secret_access_key=aws_secret,\r\n endpoint_url=current_url\r\n )\r\n","sub_path":"layers/secrets_manager_generator/secrets_manager_generator.py","file_name":"secrets_manager_generator.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"573300520","text":"#!/usr/bin/env python\nfrom unittest import main, TestCase\nfrom assert_is import ok_, not_\nfrom launchdplist import launchdplist\n\nclass TestCase(TestCase):\n def test_unlink(self):\n plist=launchdplist(\"file.plist\")\n plist.create()\n ok_(plist.exists)\n plist.unlink()\n not_(plist.exists)\n\nif __name__ == \"__main__\":\n main()","sub_path":"tests/test_unlink.py","file_name":"test_unlink.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"556288111","text":"# -*- coding: utf-8 *-*\n'''\nCreated on 05/06/2014\n\n@author: Admin\n'''\n# -----------\n# Librerias\n# -----------\nimport pygame\n# -----------\n# Constantes\n# -----------\n# ------------------------------\n# Clases y Funciones utilizadas\n# ------------------------------\nimport UserConfigView\nimport Clases.encriptador\n# ------------------------------\n# Funcion principal del Programa\n\"\"\" Controlador de la Interfaz del Usuario\"\"\"\n# ------------------------------\n\n\nclass UserController:\n def __init__(self,sistemaop):\n # Guardamos el SO\n self.sistemaop = sistemaop \n \n # Instancia para la VISTA\n self.vista = UserConfigView.UserConfigView(sistemaop)\n\n if sistemaop == \"linux2\":\n archivo = \"/opt/BitacoraL/src/files/profile1\"\n else:\n archivo = \"C:/Program Files/Bitacora/src/files/profile1\"\n\n # Configuracion del Objeto a la Instancia de la Clase\n lista_tags = [\"[user]\",\"[pwd]\"]\n # Instancia para el Encriptador\n self.encriptador = Clases.encriptador.Encriptador(sistemaop,lista_tags,archivo)\n\n # Cargamos todo lo relacionado a pygame\n pygame.init() \n\n \"\"\"---------------------------------------Metodos-------------------------------------------------------\"\"\"\n def crear_interfaz(self):\n self.vista.crear_interfaz()\n \n def actualizar_datos(self):\n \"Metodo para Configurar Usuario y Pwd del Super Usuario\"\n mensaje = \"\"\n user = self.vista.t_usuario.getTxt()\n if user != \"\":\n tmp = \"\"\n for caracter in user:\n entero = ord(caracter)\n tmp += chr(entero)\n pwd = self.vista.t_pwd.getTxt()\n if pwd != \"\":\n tmp2 = \"\"\n for caracter in pwd:\n entero = ord(caracter)\n tmp2 += chr(entero)\n \n lista_newdata = [tmp,tmp2]\n self.encriptador.actualizar_archivo(lista_newdata)\n mensaje = \"Usuario Actualizado\"\n else:\n mensaje = \"Ingresar Pwd\"\n else:\n mensaje = \"Ingresar Usuario\"\n self.vista.mensaje.update_prompt(mensaje)\n return\n #tmp =\"cadena\"\n\n\n \"\"\"--------------------------------------Eventos-------------------------------------------------------\"\"\"\n def eventos_config(self):\n \"Metodo para Los Eventos en la Vista del Usuario\"\n # Iniciamos con el foco en el textbox del Usuario\n band_write = 1\n \n res = \"\"\n while True:\n # Empezamos a capturar la lista de Eventos\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.KEYDOWN:\n # Para iterar entre los dos TextBox de Usuario y Pwd\n if event.key == pygame.K_TAB:\n #print \"Click en tecla TAB\"\n if band_write < 2:\n band_write += 1\n elif band_write == 2:\n band_write = 1\n \n elif event.type == pygame.MOUSEBUTTONDOWN:\n self.vista.mensaje.update_prompt(\"\") \n # Dependiendo de la zona donde se hizo click se realiza una accion\n x, y = event.pos\n if x>= 45 and x <= 295 and y >= 75 and y<= 100:\n # Click en Textbox usuario\n band_write = 1\n elif x>= 45 and x <= 295 and y >= 125 and y<= 150:\n # Click en Textbox pwd\n band_write = 2\n \n elif self.vista.actualizar.collidepoint(x, y):\n # Click en Boton Actualizar\n self.actualizar_datos()\n\n elif self.vista.regresar.collidepoint(x, y):\n # Click en Boton Regresar\n return\n \n if band_write == 1:\n # Se ingresan datos en el TextBox del Usuario\n self.vista.t_usuario.update(events,self.sistemaop)\n elif band_write == 2:\n # Se ingresan datos en el TextBox del Pwd\n self.vista.t_pwd.update(events,self.sistemaop)\n self.vista.surface()\n self.vista.refresh_display()","sub_path":"Configuracion/Usercontroller.py","file_name":"Usercontroller.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"471401140","text":"from handle_sms import Handle_SMS_HTTP_Response_Builder\nfrom model.user_model import User\nfrom model.db_session import DB_Session_Factory\nfrom model.interviewer import Interviewer\nimport re\nfrom string import strip, lower\nfrom datetime import datetime, timedelta\n\nclass Handle_Score_SMS_HTTP_Response_Builder(Handle_SMS_HTTP_Response_Builder):\n number_map = {\n \"one\" : 1,\n \"two\" : 2,\n \"three\" : 3,\n \"four\" : 4,\n }\n\n modifier_map = {\n \"+\" : .25,\n \"-\" : -.25\n }\n\n @staticmethod\n def parse_score(message):\n message = strip(lower(message))\n match = re.match('^([1-4]|one|two|three|four)\\s*(\\+|\\-)?$', message)\n result = None\n if match is not None:\n number = int(Handle_Score_SMS_HTTP_Response_Builder.number_map.get(match.group(1), match.group(1)))\n modifier = Handle_Score_SMS_HTTP_Response_Builder.modifier_map.get(match.group(2), 0)\n result = number + modifier\n return result\n\n def process_sms(self):\n self.from_phone_number = self.from_phone_number[2:]\n self.to_phone_number = self.to_phone_number[2:]\n db_session = DB_Session_Factory.get_db_session()\n interviewer = Interviewer.get_interviewer_by_phone_number(self.from_phone_number)\n interview = None\n response_msg = \"Thanks for your feedback\"\n if interviewer is None:\n response_msg = \"I don't know who you are or what you want from me.\"\n else:\n interview = interviewer.get_most_recently_completed_interview(self.to_phone_number, for_update = True)\n if interview is None:\n response_msg = \"You haven't done an interview recently so we have nothing to talk about.\"\n\n if interview is not None and interview.technical_score is None and not interview.is_coffee_break():\n # The user should be trying to send in the technical score.\n score = Handle_Score_SMS_HTTP_Response_Builder.parse_score(self.sms_body)\n if score is None:\n response_msg = \"Invalid technical score. Valid input is 1, 2, 3, 4 or one, two, three, four. You can also use +/-. Please try again.\"\n else:\n interview.technical_score = score\n response_msg = \"What's the cultural score?\"\n elif interview is not None and interview.cultural_score is None:\n # The user should be trying to send in the cultural score.\n score = Handle_Score_SMS_HTTP_Response_Builder.parse_score(self.sms_body)\n if score is None:\n response_msg = \"Invalid cultural score. Valid input is 1, 2, 3, 4 or one, two, three, four. You can also use +/-. Please try again.\"\n else:\n interview.cultural_score = score\n response_msg = \"Thanks. Feel free to send in any notes you have about \" + interview.candidate_name + \" in subsequent texts.\"\n elif interview is not None:\n # The user should be trying to send in notes for the interview.\n if interview.notes is None:\n interview.notes = \"\"\n interview.notes = interview.notes + self.sms_body\n if interview.notes_ts is None or datetime.now() - interview.notes_ts >= timedelta(seconds=3):\n response_msg = \"Thanks. Your feedback was added to \" + interview.candidate_name + \"'s file.\"\n else:\n response_msg = None\n db_session.commit()\n return response_msg\n","sub_path":"api/http_response_builder/sms/handle_score_sms.py","file_name":"handle_score_sms.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290220428","text":"# -*— coding:utf-8 -*-\n\n\"\"\"\n整数顺序排列问题简述:任意三个整数类型,x、y、z
提问:要求把这三个数,按照由小到大的顺序输出\nPython解题思路分析:首先,要想方法把最小的数放到x位上,之后将x与y进行比较;
\n如果x>y的话,就将x与y的值进行交换;
然后再用x与z进行比较,如果x>z则将x与z的值进行交换,这样能使x最小。\n\"\"\"\n\nl = []\n\nfor i in range(3):\n x = int(input(\"输入一个数字:\"))\n l.append(x)\n\nl.sort()\n\nprint('从小到大顺序为:',l)\n","sub_path":"lt-test/it-05-20190117.py","file_name":"it-05-20190117.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"246789826","text":"# 学号: 2016012963\n# 函数:y = cos(20x+63)\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef func(x):\n return math.cos(20 * x + 63)\n\nif __name__ == \"__main__\":\n x = np.linspace(-63.0 / 20, float(2 * math.pi - 63) / 20, 2000)\n y = [func(i) for i in x]\n\n f1 = np.polyfit(x, y, 3)\n p = np.poly1d(f1)\n\n y2 = p(x)\n\n plt.scatter(x, y)\n plt.scatter(x, y2)\n plt.show()","sub_path":"tensorflow2/class1/work1/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"354164190","text":"import sys\nimport csv\n\nsys.path.insert(0, './social_id_fetcher')\nsys.path.insert(0, './score_calculator')\nsys.path.insert(0, './social_trail_fetcher')\n\nfrom pycpfcnpj.gen import cpf as cpf_generator\n\nfrom base_a.manager import Manager as ManagerA\nfrom base_a.utils import address_generator\nfrom base_b.manager import Manager as ManagerB\nfrom base_c.manager import Manager as ManagerC\n\n\nPEOPLE_COUNT = 1000\n\ndef generate_data():\n id_list = []\n for _ in range(PEOPLE_COUNT):\n id_list.append({\n 'cpf': cpf_generator(),\n 'address': address_generator()\n })\n\n print('Gerando dados para Base A')\n manager_a = ManagerA()\n manager_a.restart_models_tables()\n manager_a.generate_people(id_list)\n\n print('Gerando dados para Base B')\n manager_b = ManagerB()\n manager_b.restart_models_tables()\n manager_b.generate_people(id_list)\n\n print('Gerando dados para Base C')\n manager_c = ManagerC()\n manager_c.drop_collection()\n manager_c.generate_people(id_list)\n\n with open('people.csv', 'w', newline='') as csvfile:\n peoplewriter = csv.writer(csvfile, delimiter=' ',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n peoplewriter.writerow(['csv', 'address'])\n for person_info in id_list:\n peoplewriter.writerow(person_info.values())\n\nif __name__ == \"__main__\":\n # execute only if run as a script\n generate_data()","sub_path":"db_admin.py","file_name":"db_admin.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509655780","text":"#import standart modules\nimport os\nimport copy\nimport logging\nimport threading\nimport queue\nimport random\nimport time\nimport typing as T\n\nimport settings\nfrom getMessages import Updater\nfrom sendMessages import Sender\nfrom msgToProtokol import MessageToProtocol\nfrom user import User, BannedUserSearch\nfrom storages import DataBase, UIDS\nimport location\nfrom protokol import Stop\n#linter imp\nimport inMessage\n\nclass Bus:\n BOT_ID : int \n PATH : str\n users : T.List[User] = []\n usersData : list = []\n chatSettings : 'settings.Settings' = settings.Settings()\n uDB : DataBase\n _error_count : int = 0\n _message_cycle_count: int = 0\n _message_time_count : float = 0.0\n\n def __init__(self, token: str):\n\n self.token = token\n self.BOT_ID = int(token.split(':',-1)[0])\n self.PATH = 'BData/{0}/'.format(token.split(':',-1)[0])\n self.logger = logging.getLogger('MainBus')\n\n try:\n os.mkdir('BData', mode=0o777, dir_fd=None)\n except OSError:\n self.logger.info('Директория {0} уже создана'.format('BData'))\n \n try:\n os.mkdir(self.PATH, mode=0o777, dir_fd=None)\n except OSError:\n self.logger.info('Директория {0} уже создана'.format(self.PATH))\n\n self.worklist = []\n\n self.in_queue = queue.Queue()\n self.out_queue = queue.Queue()\n self.uDB = DataBase(self)\n self.uidGen = UIDS()\n\n self.offset = None\n\n self.upd = Updater(self.token, self.in_queue)\n self.upd.daemon = True\n self.worklist.append(self.upd)\n self.upd.start()\n\n self.snd = Sender(self.token, self.out_queue)\n self.snd.daemon = False\n self.worklist.append(self.snd)\n self.snd.start()\n\n self.startLocation = location.NewbieIncome(self)\n self.mTp = MessageToProtocol(self)\n\n def run(self):\n self.logger.debug(f'Start Core {self.BOT_ID}')\n while True:\n try:\n self.logger.debug(str(self))\n msg: 'messageIncome.InMessage' = self.in_queue.get()\n self.mTp.form_protocol(msg)\n self._message_cycle_count += 1\n except KeyboardInterrupt:\n self.logger.info('Stop Core')\n [i.incomeQueue.put(Stop()) for i in self.users]\n time.sleep(3)\n self.out_queue.put('STOP')\n break\n except BannedUserSearch:\n self.logger.info(f'Banned user send msg')\n pass\n except Exception as ex:\n self.logger.error(ex, exc_info=True)\n self._error_count += 1\n finally:\n if self.uDB.session.dirty:\n self.logger.debug('commit session')\n self.uDB.session.commit()\n\n def userFromMsgID(self, msg: 'inMessage.InMessage') -> 'User':\n searchedUsers = [i for i in self.users if i.tID == msg.chat_id]\n if searchedUsers:\n return searchedUsers[0]\n else:\n newUser = User(self)\n newUser.init(msg.chat_id)\n self.uDB.get_userData(newUser)\n if newUser.data.flagBanned:\n raise BannedUserSearch(newUser.tID)\n self.users.append(newUser)\n newUser.start()\n # newUser.incomeQueue.put(protokol.EnterLocation(self.startLocation))\n return newUser\n\n def __str__(self) -> str:\n return f'users: {len(self.users)} - errors {self._error_count}'\n ","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374999872","text":"import numpy as np\nimport pytest\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import estimator_checks\nfrom sklearn.utils.estimator_checks import check_transformers_unfitted\n\nfrom sklego.common import flatten\nfrom sklego.preprocessing import RandomAdder\nfrom tests.conftest import nonmeta_checks\n\n\n@pytest.mark.parametrize(\n \"test_fn\",\n flatten(\n [\n nonmeta_checks,\n # Transformer checks\n check_transformers_unfitted,\n # General checks\n estimator_checks.check_fit2d_predict1d,\n estimator_checks.check_fit2d_1sample,\n estimator_checks.check_fit2d_1feature,\n estimator_checks.check_fit1d,\n estimator_checks.check_get_params_invariance,\n estimator_checks.check_set_params,\n estimator_checks.check_dict_unchanged,\n estimator_checks.check_dont_overwrite_parameters,\n ]\n ),\n)\ndef test_estimator_checks(test_fn):\n # Tests that are skipped:\n # check_methods_subset_invariance: Since we add noise, the method is not invariant on a subset\n # check_transformer_data_not_an_array: tests with `NotAnArray` as X for which we don't have a hashing function\n # check_transformer_general: tests with lists as X for which we don't have a hashing function\n adder = RandomAdder()\n test_fn(RandomAdder.__name__, adder)\n\n\ndef test_dtype_regression(random_xy_dataset_regr):\n X, y = random_xy_dataset_regr\n assert RandomAdder().fit(X, y).transform(X).dtype == np.float\n\n\ndef test_dtype_classification(random_xy_dataset_clf):\n X, y = random_xy_dataset_clf\n assert RandomAdder().fit(X, y).transform(X).dtype == np.float\n\n\ndef test_only_transform_train(random_xy_dataset_clf):\n X, y = random_xy_dataset_clf\n X_train, X_test, y_train, y_test = train_test_split(X, y)\n\n random_adder = RandomAdder()\n random_adder.fit(X_train, y_train)\n\n assert np.all(random_adder.transform(X_train) != X_train)\n assert np.all(random_adder.transform(X_test) == X_test)\n","sub_path":"tests/test_preprocessing/test_randomadder.py","file_name":"test_randomadder.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"405566681","text":"from tkinter import *\nfrom tkinter import ttk\nfrom PIL import Image\nimport os\nimport json\n\ndef convertFormatToFormat(*args):\n # определяем форматы\n if cb_1.get() != \"None\":\n listFormats = cb_1.get()\n listFormats = listFormats.lower().split('-')\n listFormat_1 = listFormats[0]\n listFormat_2 = listFormats[1]\n formatSave = listFormat_2\n if listFormat_2 == \"jpg\":\n formatSave = \"jpeg\"\n path = entry_1.get()\n listFiles = os.listdir(path)\n for file in listFiles:\n file = file.split('.')[0]\n try:\n Image.open(r'{1}\\{0}.{2}'.format(file, path, listFormat_1)).save(r'{1}\\{0}.{2}'.format(file, path, listFormat_2))#, \"{0}\".format(formatSave.upper()))\n print('Convert succes')\n except FileNotFoundError:\n pass\n else:\n print(\">>Err. None\")\n\nroot =Tk()\nroot.title(\"Convert\")\n\n# App\n\n# Север - North (N)\n# Юг - South (S)\n# Запад - West (W)\n# Восток - East (E)\n\n#listFormat = [u'None', u'PNG-JPG', u'JPG-PNG', u'PNG-BMP', u'JPG-BMP', u'PNG-PNG', u'JPG-JPG']\n# чтение форматов из TXTs\n'''\nwith open('formatListImage.txt', 'r') as txt_Data:\n txt_Data = str(txt_Data.read().split('\\n')).replace(chr(44), chr(10)).replace(chr(10), chr(39)).replace(chr(39), chr(91)).replace(chr(91), chr(93)).replace(chr(93), chr(32))\n '''\n# чтение форматов из json\nwith open('formatListImage.json', 'r', encoding='utf-8') as file_data:\n data = json.load(file_data)\nprint(data)\n\nlabel_1 = Label(root, text=\"Format: \")\ncb_1 = ttk.Combobox(root, values=data)\ncb_1.current(0)\nlabel_2 = Label(root, text = \"Path: \")\nentry_1 = Entry(root)\nbutton_1 = Button(root, text=\"Convert\", command=convertFormatToFormat)\n\nlabel_1.grid(row=0, column=0, sticky=(N, S, W, E))\ncb_1.grid(row=0, column=1, sticky=(N, S, W, E))\nlabel_2.grid(row=1, column=0, sticky=(N, S, W, E))\nentry_1.grid(row=1, column=1, sticky=(N, S, W, E))\n#cb_1.bind(\"< 30:\n M = 60 - M\n if M == 1:\n s = time[M] + ' minute to ' + time[H+1]\n else:\n s = time[M] + ' minutes to ' + time[H+1]\nelif M < 30:\n if M == 1:\n s = time[M] + ' minute past ' + time[H]\n else:\n s = time[M] + ' minutes past ' + time[H]\nprint(s)\n","sub_path":"OnlineCoding/The Time in words.py","file_name":"The Time in words.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"563089731","text":"'''\nDan Bartram\n\nYou think you’ve narrowed down the server that might be delivering slow responses,\nwrite a script that:\na. makes a hundred requests (1 per second)\nb. shows the average and maximum response times\n\n'''\n\nimport argparse\nimport requests\nimport time\n\n\ndef send_request(target):\n r = requests.get(target + '/main/channels.cgi?url=http%3A%2F%2Fwww.telegraph.co.uk%2Fsport%2Frugbyunion%2F')\n rtt = r.elapsed.total_seconds()\n return int(rtt)\n\nconvert = lambda n: n * 1000\nmax = lambda list: max(list)\navg = lambda list: sum(list) / len(list)\n\nif __name__ == '__main__':\n #Allows the user to specify which server they think is the problem\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--server\")\n args = parser.parse_args()\n count = 0\n results = [] #Could use a dict here if we were interested in preserving the timings of the requests\n while count < 100: #Loop round for the 100 requests\n count += 1\n print(\"Sending request {} out of 100\".format(count))\n rtt = send_request(args.server)\n results.append(rtt) #Sticking all values into a list to eval later\n time.sleep(1)\n\n maximum = max(results)\n average = max(results)\n print(\"Performance report:\\nMax: {} ms\\nAverage: {} ms\\n\", format(convert(maximum), convert(average)))\n","sub_path":"Script 3.py","file_name":"Script 3.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"387701217","text":"#!/usr/bin/env python3\n\nimport pytest\nimport coverage\nimport os\n\ncov = coverage.coverage()\ncov.start()\n\npytest.main()\n\ncov.stop()\ncov.save()\n\nprint('Coverage Report:')\ncov.report()\n\nreport_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'covreport')\ncov.html_report(directory = report_dir)\nprint(f'HTML Report at {os.path.join(report_dir, \"index.html\")}')\n\ncov.erase()\n","sub_path":"covtest.py","file_name":"covtest.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"403630401","text":"import scipy.io.wavfile as sio \nimport os \nimport re\n# import numpy as np \n# import tensorflow as tf \n# from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio\n\n#### DATA \n# sound as numpy array | letter that is pressed | length of audio \n\n\n### maybe pad audio sequences so all are the longest in the batch? \n### or use length of audio as a data point \n\ndirectory = \"./spacebackspace/\"\n\n# files1 = os.listdir(directories[0])\n# files2 = os.listdir(directories[1])\n# files3 = os.listdir(directories[2])\n# files4 = os.listdir(directories[3])\n# files5 = os.listdir(directories[4])\n\ndef atoi(text):\n return int(text) if text.isdigit() else text\n\ndef natural_keys(text):\n return [ atoi(c) for c in re.split('(\\d+)',text) ]\n\n# files1.sort(key=natural_keys)\n# files2.sort(key=natural_keys)\n# files3.sort(key=natural_keys)\n# files4.sort(key=natural_keys)\n# files5.sort(key=natural_keys)\n\nfiles = os.listdir(directory)\nfiles.sort(key=natural_keys)\n\nwith open(\"meta.csv\", \"w\") as f: \n\tf.write(\"file_path,label\\n\")\n\tfor file in files: \n\t\t# print(\"Current Directory is: \" + directories[i])\n\t\tfilename = file.split(\".\")[0]\n\t\tfilename = re.sub(\"\\d\", \"\", filename)\n\t\t# print(filename)\n\t\tif filename == \"backspace\":\n\t\t\tfilename = \"0\"\n\t\telif filename == \"space\": \n\t\t\tfilename = \"1\"\n\t\telse:\n\t\t\tfilename = str(ord(filename))\n\t\tf.write(directory + file + \", \" + filename + \"\\n\")\n\n# for file in files:\n# \trate, data = sio.read(directory + file)\n# \tprint(data)\n# \taudio_binary = tf.read_file(directory + filename)\n# \tdesired_channels = 1\n# \twav_decoder = contrib_audio.decode_wav(audio_binary, desired_channels=desired_channels)\n\n","sub_path":"numbers/makemeta.py","file_name":"makemeta.py","file_ext":"py","file_size_in_byte":1646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"365419065","text":"\"\"\"users, invoices tables\n\nRevision ID: da18a4e8da81\nRevises: \nCreate Date: 2019-10-05 13:58:04.966620\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'da18a4e8da81'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('invoice',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=128), nullable=True),\n sa.Column('text', sa.String(length=5000), nullable=True),\n sa.Column('full_text', sa.String(length=5000), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.create_table('image',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('path', sa.String(), nullable=True),\n sa.Column('invoice_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['invoice_id'], ['invoice.id'], ),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('path')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('image')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_table('user')\n op.drop_table('invoice')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/da18a4e8da81_users_invoices_tables.py","file_name":"da18a4e8da81_users_invoices_tables.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483916708","text":"class Solution:\n def checkIfPrerequisite(self, numCourses: int, prerequisites, queries):\n g = {i: set() for i in range(numCourses)}\n for p in prerequisites:\n g[p[0]].add(p[1])\n dp = {}\n\n def dfs(u):\n dp[u] = {u}\n for v in g[u]:\n if v in dp:\n f = dp[v]\n else:\n f = dfs(v)\n for w in f:\n dp[u].add(w)\n return dp[u]\n\n for i in range(numCourses):\n if i not in dp:\n dfs(i)\n res = []\n for a, b in queries:\n res.append(b in dp[a])\n return res\n\n\ns = Solution()\nprint(s.checkIfPrerequisite(3, [[1, 2], [1, 0], [2, 0]], [[1, 0], [1, 2]]))\n","sub_path":"leetcode/2021/course-schedule-iv.py","file_name":"course-schedule-iv.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205858310","text":"# -*- coding: utf-8 -*-\n\"\"\"Clowder test subprocess execution utilities\n\n.. codeauthor:: Joe Decapo \n\n\"\"\"\n\nfrom __future__ import print_function\n\nimport atexit\nimport os\nimport subprocess\nfrom multiprocessing.pool import ThreadPool\n\nfrom clowder_test import ROOT_DIR\nfrom clowder_test.clowder_test_error import ClowderTestError\n\n\n# Disable errors shown by pylint for catching too general exception\n# pylint: disable=W0703\n\n\ndef execute_test_command(command, path, **kwargs):\n \"\"\"Execute test command\n\n .. py:function:: execute_test_command(command, path, parallel=False, write=False, coverage=False, test_env=None, debug=False, quiet=False, ssh=False)\n\n :param command: Command to run\n :type command: str\n :param str path: Path to set as ``cwd``\n\n Keyword Args:\n parallel (bool): Whether to run tests in parallel\n write (bool): Whether to run tests requiring write permission\n coverage (bool): Whether to run tests with code coverage\n test_env (dict): Custom dict of environment variables\n debug (bool): Toggle debug output\n quiet (bool): Suppress all output\n ssh (bool): Whether to run test scripts requiring ssh credentials\n\n :return: Subprocess return code\n :rtype: int\n \"\"\"\n\n parallel = kwargs.get('parallel', False)\n write = kwargs.get('write', False)\n coverage = kwargs.get('coverage', False)\n test_env = kwargs.get('test_env', {})\n debug = kwargs.get('debug', False)\n quiet = kwargs.get('quiet', False)\n\n test_env['ACCESS_LEVEL'] = 'write' if write else 'read'\n\n if parallel:\n test_env['PARALLEL'] = '--parallel'\n\n if coverage:\n rc_file = os.path.join(ROOT_DIR, '.coveragerc')\n test_env['COVERAGE_PROCESS_START'] = rc_file\n test_env['COMMAND'] = 'coverage run --rcfile=' + rc_file + ' -m clowder.clowder_app'\n else:\n test_env['COMMAND'] = 'clowder'\n\n if debug:\n test_env['COMMAND'] = test_env['COMMAND'] + ' --debug'\n\n if quiet:\n test_env['COMMAND'] = test_env['COMMAND'] + ' --quiet'\n execute_command(command, path, print_output=False, env=test_env)\n else:\n execute_command(command, path, env=test_env)\n\n\ndef subprocess_exit_handler(process):\n \"\"\"terminate subprocess\"\"\"\n\n try:\n process.terminate()\n except Exception as err:\n del err\n\n\ndef execute_subprocess_command(command, path, **kwargs):\n \"\"\"Execute subprocess command\n\n .. py:function:: execute_subprocess_command(command, path, shell=True, env=None, stdout=None, stderr=None)\n\n :param command: Command to run\n :type command: str or list[str]\n :param str path: Path to set as ``cwd``\n\n Keyword Args:\n shell (bool): Whether to execute subprocess as ``shell``\n env (dict): Enviroment to set as ``env``\n stdout (int): Value to set as ``stdout``\n stderr (int): Value to set as ``stderr``\n\n :return: Subprocess return code\n :rtype: int\n :raise ClowderTestError:\n \"\"\"\n\n shell = kwargs.get('shell', True)\n env = kwargs.get('env', None)\n stdout = kwargs.get('stdout', None)\n stderr = kwargs.get('stderr', None)\n\n if isinstance(command, list):\n cmd = ' '.join(command)\n else:\n cmd = command\n\n try:\n process = subprocess.Popen(cmd, shell=shell, env=env, cwd=path,\n stdout=stdout, stderr=stderr)\n atexit.register(subprocess_exit_handler, process)\n process.communicate()\n if process.returncode != 0:\n raise ClowderTestError\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception as err:\n raise ClowderTestError(err)\n\n\ndef execute_command(command, path, **kwargs):\n \"\"\"Execute command via thread\n\n .. py:function:: execute_command(command, path, shell=True, env=None, print_output=True)\n\n :param command: Command to run\n :type command: str or list[str]\n :param str path: Path to set as ``cwd``\n\n Keyword Args:\n shell (bool): Whether to execute subprocess as ``shell``\n env (dict): Enviroment to set as ``env``\n print_output (bool): Whether to print output\n\n :return: Command return code\n :rtype: int\n :raise ClowderTestError:\n \"\"\"\n\n shell = kwargs.get('shell', True)\n env = kwargs.get('env', None)\n print_output = kwargs.get('print_output', True)\n\n cmd_env = os.environ.copy()\n if env:\n cmd_env.update(env)\n\n if print_output:\n pipe = None\n else:\n pipe = subprocess.PIPE\n\n pool = ThreadPool()\n\n try:\n result = pool.apply(execute_subprocess_command,\n args=(command, path),\n kwds={'shell': shell, 'env': cmd_env, 'stdout': pipe, 'stderr': pipe})\n pool.close()\n pool.join()\n return result\n except (KeyboardInterrupt, SystemExit):\n if pool:\n pool.close()\n pool.terminate()\n raise ClowderTestError('Command interrupted')\n except Exception as err:\n if pool:\n pool.close()\n pool.terminate()\n raise ClowderTestError(err)\n","sub_path":"clowder_test/clowder_test/execute.py","file_name":"execute.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538631393","text":"from random import randint\n\n\nclass RPS:\n \"\"\"\"\"\"\n\n def __init__(self, message):\n self.channel = message.channel\n self.author = message.author\n # tab = ['Rock','Paper','Scissors']\n self.tab = [\"🪨\", \"📰\", \"✂\"]\n\n async def launch(self, client):\n botplay = self.tab[randint(0, 2)]\n\n def check(reaction, user):\n return user == self.author and (\n str(reaction.emoji) == \"🪨\"\n or str(reaction.emoji) == \"📰\"\n or str(reaction.emoji) == \"✂\"\n )\n\n while True:\n respons = await self.channel.send(\"Pierre, Papier ou Ciseaux ?\")\n for emot in self.tab:\n await respons.add_reaction(emot)\n guess = await client.wait_for(event=\"reaction_add\", check=check)\n guess = str(guess[0].emoji)\n if guess == botplay:\n await self.channel.send(\n f\"{self.author.mention}\\nÉgalité, j'avais choisi {botplay}.\"\n )\n botplay = self.tab[randint(0, 2)]\n\n elif (\n (guess == \"🪨\" and botplay == \"✂\")\n or (guess == \"📰\" and botplay == \"🪨\")\n or (guess == \"✂\" and botplay == \"📰\")\n ):\n await self.channel.send(\n f\"{self.author.mention}\\nGG, j'avais choisi {botplay} donc tu as gagné \"\n )\n break\n\n else:\n await self.channel.send(\n f\"{self.author.mention}\\nDésolé, j'avais choisi {botplay} donc tu as perdu \"\n )\n break","sub_path":"nymeria/message/games/rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"654032204","text":"n = int(input())\npeople = list(map(int, input().split()))\npeople.sort()\ngroup = 0\nj = 0\nfor i in people:\n j+=1\n if i <= j:\n group+=1\n j = 0\nprint(group)","sub_path":"doyeon/BOJ/★그리디 알고리즘/20210212_book_모험가 길드.py","file_name":"20210212_book_모험가 길드.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"169307489","text":"def main():\n n = int(input())\n cut = list(map(int, input().split()))\n wood = int(input())\n INF = float('inf')\n dp = [INF for _ in range(wood+1)]\n \n dp[0] = 0\n\n cut.sort() # 오름차순으로 정렬\n\n for i in range(n):\n for j in range(cut[i], wood+1):\n dp[j] = min(dp[j], dp[j-cut[i]] + 1)\n print(dp[wood]) \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"13주차_코테 (dp)/prac3.py","file_name":"prac3.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"185177160","text":"import numpy as np\nimport time\nimport logging\nimport traceback\nimport nidaqmx\nimport matplotlib.pyplot as plt\n\n\nclass PCIe6351_do:\n def __init__(self, time_offset, *constr_param):\n self.time_offset = time_offset\n self.constr_param = constr_param\n self.channel = self.constr_param[0]\n self.trig_channel = self.constr_param[1]\n self.samp_rate = round(float(self.constr_param[2])*1000) # in S/s\n self.ctrl_param = []\n for key in self.constr_param[3]:\n l = []\n for elem in self.constr_param[3][key]:\n l.append(float(elem))\n self.ctrl_param.append(l)\n print(f\"Constructor got passed the following parameter: {self.constr_param}\")\n\n # generate a waveform for analog output\n self.update_waveform()\n\n try:\n self.daq_init()\n self.task.close()\n except Exception as err:\n self.init_error = [\"error\", \"DAQ initialization failed.\"]\n print(err)\n logging.error(traceback.format_exc())\n self.task.close()\n return\n\n self.init_error = \"\"\n\n # HDF attributes generated when constructor is run\n self.new_attributes = []\n\n # shape and type of the array of returned data\n self.dtype = 'f'\n # self.shape updated in self.update.waveform()\n # self.shape = (1, 2, self.samp_num)\n\n # each element in self.warnings should be in format: [time.time()-self.time_offset, \"warning content\"]\n self.warnings = []\n self.explicitly_start = False\n\n def __enter__(self):\n # when opened in the main file by with...as... statement, __enter__ will be called right after __init__\n return self\n\n def __exit__(self, *exc):\n # when with...as... statementn finished running, __exit__ will be called\n # self.task.close()\n pass\n\n def daq_init(self):\n\n self.task = nidaqmx.Task()\n self.task.do_channels.add_do_chan(\n self.channel,\n line_grouping=nidaqmx.constants.LineGrouping.CHAN_FOR_ALL_LINES\n )\n self.task.timing.cfg_samp_clk_timing(\n rate = self.samp_rate,\n # source = \"/Dev1/ai/SampleClock\", # same source from this channel\n active_edge = nidaqmx.constants.Edge.RISING,\n sample_mode = nidaqmx.constants.AcquisitionType.FINITE,\n samps_per_chan = self.samp_num\n )\n self.task.triggers.start_trigger.cfg_dig_edge_start_trig(\n trigger_source = self.trig_channel,\n trigger_edge = nidaqmx.constants.Edge.RISING\n )\n self.task.triggers.start_trigger.retriggerable = False\n # self.task.out_stream.output_buf_size = self.samp_num\n\n def ReadValue(self):\n try:\n self.daq_init()\n num_write = self.task.write(self.writing, auto_start=True, timeout=10.0)\n writing_sample = self.writing\n self.task.wait_until_done(timeout=10.0)\n self.task.close()\n # task.write() returns the actual number of samples successfully written\n # print(\"actual number of samples successfully written: {:d}\".format(num_write))\n # print(time.time()-self.time_offset)\n\n except Exception as err:\n logging.error(\"PCIe6351 writing error!\")\n logging.error(traceback.format_exc())\n writing_sample = [np.NaN]*self.samp_num\n self.task.close()\n\n data = np.append(self.timestamp, writing_sample)\n data = np.array(data).reshape(self.shape)\n attr = {\"source\": \"Teensy with DDS\", \"trigger\": \"function generator\"}\n\n return [data, [attr]]\n\n def update_channel(self, arg):\n self.channel = arg\n\n def update_trig_channel(self, arg):\n self.trig_channel = arg\n\n def update_samp_rate(self, arg):\n self.samp_rate = round(float(arg)*1000)\n\n def update_waveform(self):\n self.writing = np.array([])\n for i, timing in enumerate(self.ctrl_param[0]):\n samp_num_part = round(float(timing)/1000.0*self.samp_rate)\n output_part = np.zeros(samp_num_part)\n for j in range(len(self.ctrl_param)-1):\n output_part += np.ones(samp_num_part)*int(self.ctrl_param[j+1][i])*np.power(2, j)\n self.writing = np.append(self.writing, output_part)\n self.writing = np.append(self.writing, np.array([0]))\n self.writing = [int(elem) for elem in self.writing]\n self.samp_num = len(self.writing)\n self.timestamp = np.arange(self.samp_num)*(1/self.samp_rate)*1000 # in ms\n self.shape = (1, 2, self.samp_num)\n\n def update_control(self, i, j, arg):\n if int(i) == 0:\n self.ctrl_param[int(i)][int(j)] = float(arg)\n else:\n self.ctrl_param[int(i)][int(j)] = 1 if arg in [\"1\", \"2\", 1, 2] else 0\n self.update_waveform()\n\n def GetWarnings(self):\n warnings = self.warnings\n self.warnings = []\n return warnings\n\n# samp_rate = 20 # in kS/s\n# channel = \"Dev1/port0/line0:2\"\n# trig_channel = \"/Dev1/PFI1\"\n# ctrl_param = {\"timing\": [10, 10, 10, 10, 10], \"ch0\": [1, 0, 1, 0, 0], \"ch1\": [0, 1, 0, 1, 0], \"ch2\": [1, 0, 1, 0, 1]}\n#\n# with PCIe6351_do(0, channel, trig_channel, samp_rate, ctrl_param) as obj:\n# first_time = time.time()\n# data = obj.ReadValue()\n# print(time.time()-first_time)\n# data = obj.ReadValue()\n# print(time.time()-first_time)\n# data = obj.ReadValue()\n# print(time.time()-first_time)\n# data = obj.ReadValue()\n# print(time.time()-first_time)\n# t = data[0][0,0]\n# writing = data[0][0,1]\n#\n# plt.plot(t, writing)\n# plt.show()\n","sub_path":"drivers/PCIe6351_do.py","file_name":"PCIe6351_do.py","file_ext":"py","file_size_in_byte":5752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641819447","text":"#auteur: Gilles Maillot L3 Informatique N° etudiant: 35004091\n# coding: utf-8\nfrom threading import Thread, Condition, current_thread\nimport time\nimport random\n\n#Classe file\nclass Queue():\n def __init__(self):\n self.queue=[]\n self.condition = Condition()\n print(u'File:' + str(self.queue) + u'')\n\n #Methode supprimant un entier aléatoire à la fin\n def removeBeg(self):\n with self.condition:\n while len(self.queue) == 0:\n self.condition.wait()\n print(u'File:' + str(self.queue) + u'')\n self.queue.pop(0)\n self.condition.notifyAll()\n #Methode ajoutant un entier aléatoire à la fin\n def addEnd(self):\n with self.condition:\n while len(self.queue) >= 20:\n self.condition.wait()\n print(u'File:' + str(self.queue) + u'')\n self.queue.append(random.randint(1, 100))\n self.condition.notifyAll()\n\n#Thread Consommateur\nclass ProducerThread(Thread):\n def __init__(self,queue,producer_sleep):\n Thread.__init__(self)\n self.queue = queue\n self.producer_sleep = float(producer_sleep) / 1000.0\n self.daemon = True\n\n def run(self):\n while(True):\n self.queue.addEnd()\n time.sleep(self.producer_sleep)\n\n#Thread Consomateur\nclass ConsumerThread(Thread):\n def __init__(self,queue,consumer_sleep):\n Thread.__init__(self)\n self.queue = queue\n self.consumer_sleep = float(consumer_sleep) / 1000.0\n self.daemon = True\n\n def run(self):\n while (True):\n self.queue.removeBeg()\n time.sleep(self.consumer_sleep)\n\n#Fonction main\nif __name__ == \"__main__\":\n\n producer_sleep = 50 # En milliseconde\n consumer_sleep = 200 # En milliseconde\n queue = Queue()\n\n producer = ProducerThread(queue,producer_sleep)\n consumer = ConsumerThread(queue,consumer_sleep)\n\n producer.start()\n consumer.start()\n\n input(\"\")\n","sub_path":"TP_Note_Exercice2.3/Python_35004091/Queue_Integer.py","file_name":"Queue_Integer.py","file_ext":"py","file_size_in_byte":1991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476022871","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0016_auto_20160608_1535'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='SnlmtContainerSetting',\n fields=[\n ('cmsplugin_ptr', models.OneToOneField(serialize=False, primary_key=True, to='cms.CMSPlugin', related_name='snlmt_container_plugin_snlmtcontainersetting', parent_link=True, auto_created=True)),\n ('container_type', models.CharField(default='container', choices=[('container', 'фиксированный контейнер'), ('container-fluid', 'адаптивный контейнер')], verbose_name='тип контейнера', max_length=30)),\n ('teg', models.CharField(default='div', choices=[('div', 'div'), ('span', 'span'), ('main', 'main'), ('section', 'section'), ('aside', 'aside'), ('figure', 'figure'), ('header', 'header'), ('footer', 'footer'), ('address', 'address')], verbose_name='тег', max_length=30)),\n ('classes', models.CharField(null=True, max_length=255, verbose_name='классы', blank=True)),\n ('style', models.TextField(null=True, verbose_name='inline стили', blank=True)),\n ],\n options={\n 'abstract': False,\n },\n bases=('cms.cmsplugin',),\n ),\n ]\n","sub_path":"snlmt_plugins/snlmt_container_plugin/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"72016837","text":"from lxml import html\nimport requests, os\n\n# scrape rooster tooths for RvB scripts\nPATH = 'scripts.txt'\nCONVERSATION = False\n\nwith open(PATH, 'w') as f:\n for i in [x for x in range(347)]:\n page = requests.get('http://roostertooths.com/transcripts.php?eid={}'.format(i+1))\n tree = html.fromstring(page.content)\n lines = []\n f.write('\\n\\n'+tree.xpath('//p[@class=\"breadcrumbs\"]/a//text()')[1]\n +'\\n'+tree.xpath('//h1//text()')[0]+'\\n\\n')\n for row in tree.xpath('//table[@class=\"script\"]/tr'):\n f.write(''.join(row.xpath('.//td//text()'))+'\\n')\n\ntext = open(PATH).read()\nfor line in text.split('\\n'):\n # only get spoken lines from RvB\n if line.startswith(' '):\n line = line[1:]\n # remove last line if captioned\n if line.startswith('caption'):\n line = lines[-1].split(':',1)[0].upper() + ':' + line.split(':',1)[1].lower()\n lines = lines[:-1]\n else:\n line = line.split(':',1)[0].upper() + ':' + line.split(':',1)[1].lower()\n lines.append(line)\n elif not CONVERSATION:\n lines.append(line)\n\nif CONVERSATION:\n line_groups = []\n for group in zip(lines, lines[1:], lines[2:]):\n line_groups.append(' '.join(group))\n text = '\\n'.join(line_groups)\nelse:\n text = '\\n'.join(lines)\n\nreplacemap = {\"\\x91\": '\"', \"\\x93\": '\"', \"\\x92\": \"'\", \"\\x94\": \"'\",\n '[': '(', ']': ')', '\\x85': '\\n', '\\xa0': ' ', '\\x96': ''}\nfor k, v in replacemap.items():\n text = text.replace(k, v)\n\nwith open(PATH, 'w') as f:\n f.write(text)\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"514078026","text":"import sys\nimport signal\nimport threading\nimport asyncio\nimport conf_loader\nimport notifier\nimport bili_statistics\nfrom bili_console import Biliconsole\nimport printer\nfrom user import User\nfrom tasks.login import LoginTask\nfrom tasks.live_daily_job import (\n HeartBeatTask,\n RecvHeartGiftTask,\n OpenSilverBoxTask,\n RecvDailyBagTask,\n SignTask,\n WatchTvTask,\n SignFansGroupsTask,\n SendGiftTask,\n ExchangeSilverCoinTask,\n)\nfrom tasks.main_daily_job import (\n JudgeCaseTask,\n BiliMainTask\n \n)\nfrom danmu import monitor_danmu_raffle\nfrom dyn.monitor_dyn_raffle import DynRaffleMonitor\nfrom substance.monitor_substance_raffle import SubstanceRaffleMonitor\n\n\nloop = asyncio.get_event_loop()\n \ndict_user = conf_loader.read_user()\ndict_bili = conf_loader.read_bili()\ndict_color = conf_loader.read_color()\ndict_ctrl = conf_loader.read_ctrl()\nprinter.init_config(dict_color, dict_ctrl['print_control']['danmu'])\n\n# user设置\nusers = []\nglobal_task_control = dict_ctrl['global_task_control']\ncustom_task_control = dict_ctrl['custom_task_control']\nfor i, user_info in enumerate(dict_user['users']):\n username = user_info['username']\n if username in custom_task_control:\n task_control = {**global_task_control, **custom_task_control[username]}\n else:\n task_control = global_task_control\n users.append(User(i, user_info, task_control, dict_bili))\nnotifier.set_values(loop)\nnotifier.set_users(users)\n \nloop.run_until_complete(notifier.exec_func(-2, LoginTask.handle_login_status))\n\nother_control = dict_ctrl['other_control']\narea_ids = other_control['area_ids']\nbili_statistics.init_area_num(len(area_ids))\ndefault_roomid = other_control['default_monitor_roomid']\n\nasync def get_printer_danmu():\n future = asyncio.Future()\n yjmonitor_danmu_roomid = other_control['yjmonitor_danmu_roomid']\n yjmonitor_tcp_addr = other_control['yjmonitor_tcp_addr']\n yjmonitor_tcp_key = other_control['yjmonitor_tcp_key']\n asyncio.ensure_future(monitor_danmu_raffle.run_danmu_monitor(\n raffle_danmu_areaids=area_ids,\n yjmonitor_danmu_roomid=yjmonitor_danmu_roomid,\n printer_danmu_roomid=default_roomid,\n yjmonitor_tcp_addr=yjmonitor_tcp_addr,\n yjmonitor_tcp_key=yjmonitor_tcp_key,\n future=future))\n await future\n return future.result()\n\nprinter_danmu = loop.run_until_complete(get_printer_danmu())\n\nif sys.platform != 'linux' or signal.getsignal(signal.SIGHUP) == signal.SIG_DFL:\n console_thread = threading.Thread(\n target=Biliconsole(loop, default_roomid, printer_danmu).cmdloop)\n console_thread.start()\nelse:\n console_thread = None\n\n\nnotifier.exec_task(-2, HeartBeatTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, RecvHeartGiftTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, OpenSilverBoxTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, RecvDailyBagTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, SignTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, WatchTvTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, SignFansGroupsTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, SendGiftTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, ExchangeSilverCoinTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, JudgeCaseTask, 0, delay_range=(0, 5))\nnotifier.exec_task(-2, BiliMainTask, 0, delay_range=(0, 5))\n\n\nother_tasks = [\n SubstanceRaffleMonitor().run(),\n # DynRaffleMonitor(should_join_immediately=True).run(),\n ]\nif other_tasks:\n loop.run_until_complete(asyncio.wait(other_tasks))\nloop.run_forever()\nif console_thread is not None:\n console_thread.join()\n\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"443435735","text":"# -*- coding: utf-8 -*-\nimport json\nfrom sqlalchemy import create_engine, text, func\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy_utils import database_exists, create_database, drop_database\nfrom common.base.db.interfaceDB import IDatabases\nfrom common.base.db.configureConnection import ConfigureConnection\n\n\"\"\"\n Class that implements a Basic DataBase Connection\n * class Connection\n * requires python 3.+\n * version 1.0.0\n * package icity-BlockChain\n * author Alcindo Schleder \n * copyright Vocatio Telecom \n\"\"\"\nclass Connection(IDatabases):\n\n def __init__(self, app):\n super(Connection, self).__init__()\n self._app = app\n self._appConfig = None\n self._engine = None\n self._session = None\n self._dbTable = None\n self._DATABASE_URI = None\n self._setDriver(self._app.config['DATABASE_DRIVER'])\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if (not self._engine.closed):\n self._engine.dispose()\n\n def _setDriver(self, driver) -> dict:\n self._appConfig = ConfigureConnection()\n self._app.config['ICITY_SECURITY_DATA'] = self._appConfig.globalConfig['ICITY_SECURITY_DATA']\n try:\n if (self._appConfig.result['state']['sttCode'] == 200):\n self._appConfig.dbDriver = driver\n self._DATABASE_URI = self._appConfig.connectionUri()\n self._config_db()\n except Exception as e:\n msg = f'A internal unexpected error occurred: ({e.args})'\n self.resultStatusCode = 500\n self.resultStatusMessage = msg\n raise Exception(msg)\n\n def _config_db(self):\n self.resultStatusCode = 200\n msg = f'Database {self._appConfig.databaseName} on {self._appConfig.databaseDriver} '\n msg += f'({self._DATABASE_URI}) not found. Plase verify with your sysdba!'\n try:\n if not database_exists(self._DATABASE_URI):\n self.resultStatusCode = 404\n self.resultStatusMessage = msg\n except Exception as e:\n self.resultStatusCode = 500\n self.resultStatusMessage = f'{msg}: ({e.args})'\n raise Exception(f'{msg}: ({e.args})')\n \n def _createSession(self):\n try:\n if (self._engine is None):\n msg = 'Engine not created ou closed!' \n self.resultStatusCode = 301\n self.resultStatusMessage = 'Database not connected!'\n raise Exception(msg)\n Session = sessionmaker(bind=self._engine)\n Session.configure(bind=self._engine)\n self._session = Session(autocommit=True)\n except Exception as e:\n if (self._session) and (self._session.is_active):\n self._session.close()\n msg = f\"Can't create a session into Database {self._dbTable.name}: ({e.args})\"\n self.resultStatusCode = 500\n self.resultStatusMessage = msg\n raise Exception(msg)\n return True\n\n def connect(self):\n self.resultStatusCode = 200\n if (not self.isConnected):\n self._app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n self._app.config['SQLALCHEMY_DATABASE_URI'] = self._DATABASE_URI\n try:\n self._engine = create_engine(self._DATABASE_URI, echo=False)\n self._createSession()\n except Exception as e:\n if (self._engine):\n self._engine.dispose()\n msg = f'A Unexpected error occurred on connect database, please contact network admin! ({e.args})'\n self.resultStatusCode = 500\n self.resultStatusMessage = msg\n raise Exception(msg)\n\n def disconnect(self):\n if ((self._session is not None) and (self._session.is_active)):\n self._session.close()\n return True\n\n @property\n def isConnected(self):\n return ((self._session is not None) and (self._session.is_active))\n\n @property\n def engine(self):\n return self._engine\n \n @property\n def session(self):\n return self._session\n\n @property\n def db(self):\n return self.session\n\n @property\n def dbTable(self):\n return self._dbTable\n\n @dbTable.setter\n def dbTable(self, table):\n self._dbTable = table\n\n @property\n def dbTableName(self) -> str:\n return self._dbTable.tableName\n\n def execCommand(self, aQuery: str, aParams: dict = None) -> dict:\n if (not self._session.is_active):\n self._session.begin()\n if (self.resultStatusCode != 200):\n return False\n try:\n dbObj = self._session.query(self.dbTable).from_statement(text(aQuery)).params(aParams).all()\n self._session.commit()\n self.resultData = dbObj\n except Exception as e:\n self._session.rollback()\n self.resultStatusCode = 500\n self.resultStatusMessage = 'Erro on execute sql command!' + str(e.args)\n finally:\n self._session.close()\n return self.result\n\n def browseRecord(self, filters=None, orderby=None, start:int=0, limit:int=0):\n\n self.resultStatusCode = 200\n try:\n if (not self._session.is_active):\n self._session.begin()\n\n self._result['data'] = []\n if ((start > 0) and (limit > 0)):\n self.result['page'] = {\n 'count': self.session.query(func.count(self._dbTable)),\n 'start': start,\n 'limit': limit,\n 'url': '/'\n }\n if (filters is None):\n if ((start > 0) and (limit > 0)):\n rows = self._session.query(self._dbTable).limit(limit).offset(start * limit)\n else:\n rows = self._session.query(self._dbTable).all().order_by(orderby)\n for row in rows:\n self._result['data'].append(row._asdict())\n else:\n for row in self._session.query(self._dbTable).filter(filters).all():\n self._result['data'].append(row._asdict())\n\n self._session.commit()\n except Exception as e:\n self._session.rollback()\n self.resultStatusCode = 500\n self.resultStatusMessage = f'Erro ao pesquisar registros na tabela {self.dbTableName}! : {e.args}'\n finally:\n self._session.close()\n\n def insertRecord(self):\n self.resultStatusCode = 200\n try:\n if (not self._session.is_active):\n self._session.begin()\n data = self._session.add(self._dbTable)\n self.resultData = data\n self._session.commit()\n except Exception as e:\n self._session.rollback()\n self.resultStatusCode = 500\n self.resultStatusMessage = 'Erro ao inserir um registro na tabela %s! : %s' %(self.dbTableName, str(e.args))\n finally:\n self._session.close()\n\n def updateRecord(self):\n self.resultStatusCode = 200\n try:\n if (not self._session.is_active):\n self._session.begin()\n data = self._session.update(self._dbTable)\n self.resultData = data\n self._session.commit()\n except Exception as e:\n self._session.rollback()\n self.resultStatusCode = 500\n self.resultStatusMessage = 'Erro ao editar um registro na tabela %s! : %s' %(self.dbTableName, str(e.args))\n finally:\n self._session.close()\n\n def deleteRecord(self):\n self.resultStatusCode = 200\n try:\n if (not self._session.is_active):\n self._session.begin()\n self._session.delete(self._dbTable)\n self.resultData = {}\n self._session.commit()\n except Exception as e:\n self._session.rollback()\n self.resultStatusCode = 500\n self.resultStatusMessage = 'Erro ao deletar um registro na tabela %s!: %s' %(self.dbTableName, str(e.args))\n finally:\n self._session.close()\n","sub_path":"common/base/db/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":8332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255535645","text":"import numpy as np\r\nimport cv2\r\n\r\n\r\ndef xywh2cs(x, y, w, h):\r\n center = np.zeros((2), dtype=np.float32)\r\n center[0] = x + w * 0.5\r\n center[1] = y + h * 0.5\r\n aspect_ratio = 1\r\n if w > aspect_ratio * h:\r\n h = w * 1.0 / aspect_ratio\r\n elif w < aspect_ratio * h:\r\n w = h * aspect_ratio\r\n scale = np.array([w, h], dtype=np.float32)\r\n return center, scale\r\n\r\n\r\ndef _get_3rd_point(a, b):\r\n direct = a - b\r\n return b + np.array([-direct[1], direct[0]], dtype=np.float32)\r\n\r\n\r\ndef _get_dir(src_point, rot_rad):\r\n sn, cs = np.sin(rot_rad), np.cos(rot_rad)\r\n\r\n src_result = [0, 0]\r\n src_result[0] = src_point[0] * cs - src_point[1] * sn\r\n src_result[1] = src_point[0] * sn + src_point[1] * cs\r\n\r\n return src_result\r\n\r\n\r\ndef transform_logits(logits, center, scale, width, height, input_size):\r\n trans = get_affine_transform(center, scale, 0, input_size, inv=1)\r\n channel = logits.shape[2]\r\n target_logits = []\r\n for i in range(channel):\r\n target_logit = cv2.warpAffine(\r\n logits[:, :, i],\r\n trans,\r\n (int(width), int(height)), # (int(width), int(height)),\r\n flags=cv2.INTER_LINEAR,\r\n borderMode=cv2.BORDER_CONSTANT,\r\n borderValue=(0))\r\n target_logits.append(target_logit)\r\n target_logits = np.stack(target_logits, axis=2)\r\n\r\n return target_logits\r\n\r\n\r\ndef get_affine_transform(center,\r\n scale,\r\n rot,\r\n output_size,\r\n shift=np.array([0, 0], dtype=np.float32),\r\n inv=0):\r\n if not isinstance(scale, np.ndarray) and not isinstance(scale, list):\r\n print(scale)\r\n scale = np.array([scale, scale])\r\n\r\n scale_tmp = scale\r\n\r\n src_w = scale_tmp[0]\r\n dst_w = output_size[1]\r\n dst_h = output_size[0]\r\n\r\n rot_rad = np.pi * rot / 180\r\n src_dir = _get_dir([0, src_w * -0.5], rot_rad)\r\n dst_dir = np.array([0, (dst_w - 1) * -0.5], np.float32)\r\n\r\n src = np.zeros((3, 2), dtype=np.float32)\r\n dst = np.zeros((3, 2), dtype=np.float32)\r\n src[0, :] = center + scale_tmp * shift\r\n src[1, :] = center + src_dir + scale_tmp * shift\r\n dst[0, :] = [(dst_w - 1) * 0.5, (dst_h - 1) * 0.5]\r\n dst[1, :] = np.array([(dst_w - 1) * 0.5, (dst_h - 1) * 0.5]) + dst_dir\r\n\r\n src[2:, :] = _get_3rd_point(src[0, :], src[1, :])\r\n dst[2:, :] = _get_3rd_point(dst[0, :], dst[1, :])\r\n\r\n if inv:\r\n trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))\r\n else:\r\n trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))\r\n\r\n return trans\r\n","sub_path":"image_segmentation/human_part_segmentation/hps_utils.py","file_name":"hps_utils.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"81178456","text":"__author__ = 'Zhenwei Wang, wangzw1@live.unc.edu, Onyen = wangzw1'\n__copyright__ = 'Copyright 2014, 2015 UNC VACLab'\n\nimport csvkit\nimport pymysql as db\nconnect = db.connect(host='127.0.0.1', port = 3306, user='vaclab',passwd='vaclab206', db='UNC3')\ncursor= connect.cursor()\nwith open('/home/aml14/DIAGNOSES.csv', newline='') as file:\n reader = csvkit.DictReader(file, delimiter='|')\n list = []\n for row in reader:\n list.append(row)\n for i in range(len(list)):\n while True:\n try:\n cursor.execute('Insert into event_dict(class, code, description) VALUES (\"'+format(list[i]['type'])+'\",\"'+format(list[i]['DGNS_ICD9_CODE'])+'\",\"'+format(list[i]['DGNS_ICD9_DESCR'])+'\")')\n connect.commit()\n break\n except db.MySQLError as error:\n message = error\n print(message)\n break\nconnect.close()\n","sub_path":"DIAG_DICT.py","file_name":"DIAG_DICT.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"136379955","text":"from django.shortcuts import render\nfrom django.views import generic\nfrom django.utils.translation import gettext as _\nfrom django.urls import reverse_lazy\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\n\nfrom painless.medicine import treatment\n\nfrom django.contrib.messages.views import SuccessMessageMixin\n\nfrom .models import Sample\nfrom .models import Contact\nfrom .models import FAQ\nfrom blog.models import Post\nfrom blog.models import Category\nfrom blog.models import Tag\n\nfrom .forms import ContactForm\n# Create your views here.\n\nclass HomeTemplate(generic.TemplateView):\n template_name = 'site/home.html'\n title = _('خانه')\n\nclass ContactTemplate(SuccessMessageMixin, generic.CreateView):\n template_name = 'site/contact.html'\n form_class = ContactForm\n title = _('تماس با ما')\n success_url = reverse_lazy('site:contact')\n success_message = 'پیام شما با موفقیت ارسال شد.'\n\n def form_valid(self, form):\n to_email = ['sa.goldeneagle@gmail.com']\n to_email.append(form.cleaned_data.get('from_email', None))\n \n send_mail(\n subject = 'پیام شما ارسال شد.',\n message = '',\n from_email = 'animateidea@gmail.com',\n recipient_list = [to_email[1]],\n html_message = treatment.load_partial(self.request, 'mail/welcome.html', 'email'),\n )\n \n send_mail(\n subject = 'شخصی قصد ارتباط با شما دارد.',\n message = '',\n from_email = 'animateidea@gmail.com',\n recipient_list = [to_email[0]],\n html_message = treatment.load_partial(self.request, 'mail/welcome.html', 'email'),\n )\n\n return super(ContactTemplate, self).form_valid(form)\n\nclass AboutTemplate(generic.TemplateView):\n template_name = 'site/about-us.html'\n title = 'درباره ما'\n\nclass ServiceTemplate(generic.TemplateView):\n template_name = 'site/service-traffis.html'\n title = 'خدمات'\n\nclass TestimonialsTemplate(generic.TemplateView):\n template_name = 'site/testimonials.html'\n title = 'درباره ما'\n\nclass SamplesTemplate(generic.ListView):\n template_name = 'site/samples.html'\n title = 'نمونه کار'\n model = Sample\n paginate_by = 4\n context_object_name = 'samples'\n\n\nclass FAQTemplate(generic.ListView):\n template_name = 'site/faq.html'\n title = 'پرسش و پاسخ متداول'\n context_object_name = 'issues'\n model = FAQ\n\nclass PageNotFoundTemplate(generic.TemplateView):\n template_name = 'site/404.html'\n title = '404'\n\nclass ServerErrorTemplate(generic.TemplateView):\n template_name = 'site/500.html'\n title = '500'\n\nclass PermissionErrorTemplate(generic.TemplateView):\n template_name = 'site/403.html'\n title = '403'","sub_path":"kernel/mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393086436","text":"import pandas as pd\nimport numpy as np\n\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import MinMaxScaler, RobustScaler\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.feature_selection import SelectKBest, f_regression\nfrom sklearn.feature_selection import RFE as rfe\n\ndef impute_missing_data(train, validate, test):\n '''\n \n '''\n numerical_columns = [\n 'calculatedfinishedsquarefeet',\n 'lotsizesquarefeet',\n 'structuretaxvaluedollarcnt',\n 'taxvaluedollarcnt',\n 'landtaxvaluedollarcnt',\n 'taxamount'\n]\n \n categorical_columns = [\n \"regionidcity\",\n \"regionidzip\",\n \"yearbuilt\",\n \"regionidcity\"\n]\n \n imputer = SimpleImputer(strategy='median')\n \n train[numerical_columns] = imputer.fit_transform(train[numerical_columns])\n validate[numerical_columns] = imputer.transform(validate[numerical_columns])\n test[numerical_columns] = imputer.transform(test[numerical_columns])\n \n \n imputer = SimpleImputer(strategy='most_frequent')\n \n train[categorical_columns] = imputer.fit_transform(train[categorical_columns])\n validate[categorical_columns] = imputer.transform(validate[categorical_columns])\n test[categorical_columns] = imputer.transform(test[categorical_columns])\n \n return train, validate, test\n\ndef features_for_modeling(predictors, target, k_features):\n '''\n Signature: features_for_modeling(predictors, target, k_features)\n Docstring:\n\n Parameters\n ----------\n\n Returns\n -------\n\n '''\n df_best = pd.DataFrame(select_kbest(predictors, target, k_features))\n df_rfe = pd.DataFrame(select_rfe(predictors, target, k_features))\n \n df_features = pd.concat([df_best, df_rfe], axis=1)\n return df_features\n\n\ndef select_kbest(predictors, target, k_features=3):\n '''\n Signature: select_kbest(predictors, target, k_features=3)\n Docstring:\n\n Parameters\n ----------\n pandas.core.frame.DataFrame\n\n Returns\n -------\n\n '''\n f_selector = SelectKBest(f_regression, k=k_features)\n f_selector.fit(predictors, target)\n \n f_mask = f_selector.get_support()\n f_features = predictors.iloc[:,f_mask].columns.to_list()\n \n print(f\"Select K Best: {len(f_features)} features\")\n print(f_features)\n return None\n # return predictors[f_features]\n \n \ndef select_rfe(X, y, k_features=3):\n '''\n Signature: rfe(predictors, target, k_features=3)\n Docstring:\n\n Parameters\n ----------\n pandas.core.frame.DataFrame\n\n Returns\n -------\n\n '''\n lm = LinearRegression()\n rfe_init = rfe(lm, k_features)\n\n rfe_init.fit(X, y)\n rfe_mask = rfe_init.support_ \n rfe_features = X.iloc[:, rfe_mask].columns.to_list()\n\n print(f\"Recursive Feature Elimination: {len(rfe_features)} features\")\n print(rfe_features)\n return None\n #return X[rfe_features]\n\n","sub_path":"src/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"69260605","text":"from django.conf.urls import url, include\nfrom rest_framework import routers\n\nfrom .api import UserList, UserDetail, SocialPageViewSet\nfrom .api import PublicationList, PublicationDetail, UserPublicationList\nfrom .api import PostCategoryDetail, PostCategoryList, UserPostCategoryList\nfrom .api import TagDetail, TagList\nfrom .api import TagCategoryDetail, TagCategoryList, UserTagCategoryList\nfrom .api import PostList, PostDetail, UserPostList\nfrom .api import PhotoList, PhotoDetail, PostPhotoList\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users/(?P[0-9a-zA-Z_-]+)/accounts', SocialPageViewSet) #, name='accounts') #, base_name='social_accounts')\n\nuser_urls = [\n url(r'(?P[0-9a-zA-Z_-]+)/posts$', UserPostList.as_view(), name='userpost-list'),\n url(r'(?P[0-9a-zA-Z_-]+)/post_categories$', UserPostCategoryList.as_view(), name='userpostcategory-list'),\n url(r'(?P[0-9a-zA-Z_-]+)/tags$', UserTagCategoryList.as_view(), name='usertag-list'),\n url(r'(?P[0-9a-zA-Z_-]+)/publications$', UserPublicationList.as_view(), name='userpublication-list'),\n url(r'(?P[0-9a-zA-Z_-]+)/accounts$', SocialPageViewSet, name='useraccount-list'),\n url(r'(?P[0-9a-zA-Z_-]+)$', UserDetail.as_view(), name='user-detail'),\n url(r'^$', UserList.as_view(), name='user-list'),\n]\n\npost_urls = [\n url(r'(?P\\d+)/photos$', PostPhotoList.as_view(), name='postphoto-list'),\n url(r'(?P\\d+)$', PostDetail.as_view(), name='post-detail'),\n url(r'^$', PostList.as_view(), name='post-list')\n]\n\npost_category_urls = [\n url(r'(?P\\d+)$', PostCategoryDetail.as_view(), name='post-category-detail'),\n url(r'^$', PostCategoryList.as_view(), name='post-category-list')\n]\n\ntag_urls = [\n url(r'(?P\\d+)$', TagDetail.as_view(), name='tag-detail'),\n url(r'^$', TagList.as_view(), name='tag-list')\n]\n\ntag_category_urls = [\n url(r'(?P\\d+)$', TagCategoryDetail.as_view(), name='tag-category-detail'),\n url(r'^$', TagCategoryList.as_view(), name='tag-category-list')\n]\n\nphoto_urls = [\n url(r'(?P\\d+)$', PhotoDetail.as_view(), name='photo-detail'),\n url(r'^$', PhotoList.as_view(), name='photo-list')\n]\n\n\npublication_urls = [\n url(r'(?P\\d+)$', PublicationDetail.as_view(), name='publication-detail'),\n url(r'^$', PublicationList.as_view(), name='publication-list')\n]\n\n\nurlpatterns = [\n url(r'^users/', include(user_urls)),\n url(r'^posts/', include(post_urls)),\n url(r'^post_categories/', include(post_category_urls)),\n url(r'^tag_categories/', include(tag_category_urls)),\n url(r'^tags/', include(tag_urls)),\n url(r'^photos/', include(photo_urls)),\n url(r'^publications/', include(publication_urls)),\n url(r'^accounts', include(router.urls)),\n url(r'', include(router.urls)),\n]\n","sub_path":"backend/smm/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491465401","text":"import turtle\r\n\r\n# Retrieve the Turtle window from the software and store it.\r\n# This may be useful later.\r\nwindow = turtle.Screen()\r\n\r\n# Create a turtle named Turtz\r\nturtz = turtle.Turtle(shape='turtle')\r\n\r\n# Turtz is a green turtle who leaves a blue trail\r\nturtz.color(\"blue\", \"green\")\r\n\r\n# We need to move and turn our turtle once for every side\r\nfor i in range(4):\r\n # Make the turtle walk forward\r\n turtz.forward(100)\r\n\r\n # Turn the turtle to the left\r\n turtz.left(90)\r\n","sub_path":"Solutions for lab 1-B nd assing1 nd test/Lab 1/turtle_square_for.py","file_name":"turtle_square_for.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116064667","text":"# -*- coding: UTF-8 -*-\n\nimport os,sys,time\nfrom datetime import datetime\n\nSN = '151000010000002C'\nlocalmd5 = 'cd573cfaace07e7949bc0c46028904ff'\n\nf = '/mnt/internal_sd/'\nt = '/data/'\nname = '1G.txt'\na = []\n\nc0 = datetime.now().strftime('%Y%m%d_%H%M%S')\n\ndef Logfile(str):\n c1 = datetime.now().strftime('%Y%m%d-%H:%M:%S')\n lirun = open(c0 + \"_emmc.log\",\"a+\")\n print(c1 + \" \" + str)\n lirun.write(c1 + \" \" + str + \"\\n\")\n lirun.flush()\n lirun.close()\n\n\ndef CopyCheck(p1,p2):\n\n Logfile(\"start copy \" + p1 + name + \" to \" + p2)\n t1 = time.time()\n os.popen(\"adb -s \" + SN + \" shell cp \" + p1 + name + \" \" + p2)\n t2 = time.time()\n t3 = t2 - t1\n su = 1024/float(t3)\n a.append(su)\n \n Logfile(\"copy finish: %.2fM/s\"%su)\n cpmd5 = os.popen(\"adb -s \" + SN + \" shell busybox md5sum \" + p2 + name).read()\n cpmd51 = cpmd5.split()[0]\n\n if cpmd51 == localmd5:\n Logfile(\"md5 check pass\")\n else:\n Logfile(\"local file md5 is : \" + localmd5)\n Logfile(\"copy file md5 is : \" + cpmd51)\n Logfile (\"md5 check fail\")\n exit()\n \n Logfile (\"delete \" + p1 + name + \"\\n\")\n os.popen(\"adb -s \" + SN + \" shell rm \" + p1 + name)\n\nif __name__ =='__main__':\n\n for i in range(1,201):\n Logfile (\"========== testtime: %d\"%i)\n CopyCheck(t,f)\n time.sleep(0.5)\n CopyCheck(f,t)\n\n ave = float(sum(a))/len(a)\n Logfile (\"average: %.2fM/s\"%ave)\n \n\n","sub_path":"mypython/PD801/data_sdcard.py","file_name":"data_sdcard.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477100711","text":"import pygame\nimport sys\nimport time\nimport random\nfrom pygame.locals import * \n\npygame.init()\nsize = width, height = 800, 800\nscreen = pygame.display.set_mode(size)\npygame.display.set_caption(\"Snake Change\")\n# Colors\nred = (255, 0, 0)\ngreen = (0, 255, 0)\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nbrown = (165, 42, 42)\n\n# FPS controller\ntimes = pygame.time.Clock()\n\n\ndirection = 'RIGHT'\nblock=10\nsnake_pos=[100,50]\nsnake_body=[[100,50],[90,50],[80,50]] \ngame_over = False\n\n\ndef move(): \n if direction == 'RIGHT':\n snake_pos[0] += block\n elif direction == 'LEFT':\n snake_pos[0] -= block\n elif direction == 'DOWN':\n snake_pos[1] += block\n elif direction == 'UP':\n snake_pos[1] -= block\n\n########### ex3 以下代码与PPT 36页练习相关#########################\n\ndef gameover():\n font = pygame.font.SysFont(None, 88)\n fontimg = font.render('game over', True, red) \n screen.blit(fontimg, (250,250))\n pygame.display.update()\n #time.sleep(5)\n #pygame.quit() #第46-47行的代码可以加上也可以不加上\n\n\ndef control2(aa):\n if event.key == K_RIGHT or event.key == K_d:\n if aa != 'LEFT':\n aa = 'RIGHT'\n \n if event.key == K_LEFT or event.key == K_a:\n if aa != 'RIGHT':\n aa = 'LEFT'\n \n if event.key == K_UP or event.key == K_w:\n if aa != 'DOWN':\n aa = 'UP'\n \n if event.key == K_DOWN or event.key == K_s:\n if aa != 'UP':\n aa = 'DOWN'\n return aa #一开始不写return 会有什么问题\n\ndef get_color():\n red = random.randint(1,255)\n green = random.randint(1,255)\n blue = random.randint(1,255)\n color = (red,green,blue)\n \n\n return color\n\n\n\nwhile True:\n screen.fill(white)\n event = pygame.event.poll()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n sys.exit()\n\n elif event.type == pygame.KEYDOWN: \n direction = control2(direction) \n move() \n\n\n if snake_pos[0] >= width or snake_pos[0] < 0:\n game_over = True\n if snake_pos[1] >= height or snake_pos[1] < 0:\n game_over = True #碰撞\n\n if game_over:\n gameover()\n \n snake_body.insert(0, list(snake_pos))\n snake_body.pop() \n color=get_color()\n for pos in snake_body:\n pygame.draw.rect(screen, color, (pos[0], pos[1], block, block))\n\n\n keys = pygame.key.get_pressed()\n if keys[K_ESCAPE]:\n pygame.quit()\n sys.exit() \n \n pygame.display.flip()\n times.tick(20)","sub_path":"example/test/L20_homework.py","file_name":"L20_homework.py","file_ext":"py","file_size_in_byte":2546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"82815954","text":"from django.core.exceptions import ValidationError\n\nfrom .exceptions import LunchbreakException\n\n\nclass CleanModelMixin:\n\n def clean(self):\n super().clean()\n\n fields = self.__class__._meta.get_fields()\n\n for field in fields:\n clean_method_name = 'clean_{}'.format(field.name)\n if hasattr(self, clean_method_name):\n try:\n getattr(self, clean_method_name)()\n except Exception as e:\n form = getattr(self, '_form', None)\n if form is None:\n raise\n\n if isinstance(e, LunchbreakException):\n e = e.django_validation_error\n elif not isinstance(e, ValidationError):\n raise\n\n form.add_error(\n field.name,\n e\n )\n","sub_path":"lunchbreak/Lunchbreak/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"631064151","text":"#!/usr/bin/env python\nimport time\nfrom copy import deepcopy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom . import matrix\n\n\ndef propagateLocalInfection(m, p):\n rows = m.shape[0]\n cols = m.shape[1]\n n = deepcopy(m)\n expansion = False\n for y in range(0, rows):\n for x in range(0, cols):\n if m[y][x] == 1.4:\n if y - 1 >= 0:\n if m[y - 1][x] < p:\n n[y - 1, x] = 1.4\n expansion = True\n if y + 1 < rows:\n if m[y + 1][x] < p:\n n[y + 1, x] = 1.4\n expansion = True\n if x - 1 >= 0:\n if m[y][x - 1] < p:\n n[y][x - 1] = 1.4\n expansion = True\n if x + 1 < cols:\n if m[y][x + 1] < p:\n n[y, x + 1] = 1.4\n expansion = True\n return expansion, n\n\n\ndef propagateInfection(m, p):\n l = len(m)\n for i in range(l):\n for j in range(len(m[i])):\n if m[i][j] == 2:\n if i - 1 > 0 and m[i - 1][j] == 1:\n if np.random.random() < p:\n m[i - 1][j] = 8\n if i + 1 < l and m[i + 1][j] == 1:\n if np.random.random() < p:\n m[i + 1][j] = 8\n if j - 1 > 0 and m[i][j - 1] == 1:\n if np.random.random() < p:\n m[i][j - 1] = 8\n if j + 1 < l and m[i][j + 1] == 1:\n if np.random.random() < p:\n m[i][j + 1] = 8\n for i in range(l):\n for j in range(len(m[i])):\n if m[i][j] == 8:\n m[i][j] = 2\n return m\n\n\ndef findSuccessfulEnd(m):\n l = len(m)\n for i in range(l):\n if m[i][l - 1] == 2:\n return True\n return False\n\n\ndef findEnd(m):\n l = len(m)\n for i in range(l):\n if m[i][l - 1] == 2:\n return 1 # stop / matrix percolates\n for i in range(l):\n for j in range(len(m[i])):\n if m[i][j] == 2:\n if (i - 1 > 0 and m[i - 1][j] == 1) or \\\n (i + 1 < l and m[i + 1][j] == 1) or \\\n (j - 1 > 0 and m[i][j - 1] == 1) or \\\n (j + 1 < l and m[i][j + 1] == 1):\n return 2 # dont stop / matrix do not percolate\n return 3 # stop / matric cant percolate\n\n\ndef horizontal(n, p, display):\n m, pp = matrix.initInfectedMatrix(n, True)\n if display is True:\n plt.rcParams['image.cmap'] = 'binary'\n plt.ion()\n endStatus = findEnd(m)\n while endStatus is 2:\n m = propagateInfection(m, p)\n if display is True:\n plt.imshow(m)\n plt.show()\n plt.pause(0.0001)\n endStatus = findEnd(m)\n if display is True:\n plt.show(block=True)\n im = plt.matshow(m, cmap=plt.cm.binary, aspect='auto')\n plt.colorbar(im)\n plt.savefig('horizontal.png', bbox_inches='tight')\n if endStatus is 1: # matrix percolates\n return m, True, pp\n return m, False, pp\n\n\ndef local(n, p, display):\n m, pp = matrix.initInfectedMatrix(n, False)\n if display:\n plt.rcParams['image.cmap'] = 'binary'\n plt.ion()\n expansion = True\n while expansion is True:\n expansion, m = propagateLocalInfection(m, p)\n if display:\n plt.imshow(m)\n plt.show()\n plt.pause(0.0001)\n if display:\n plt.show(block=True)\n im = plt.matshow(m, cmap=plt.cm.binary, aspect='auto')\n plt.colorbar(im)\n plt.savefig('local.png', bbox_inches='tight')\n return m\n","sub_path":"infection/infect.py","file_name":"infect.py","file_ext":"py","file_size_in_byte":3745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312806796","text":"import urllib.request\nimport urllib.parse\nimport json\nimport csv\nimport codecs\nimport os\nimport threading, queue, time, hashlib\n\n\nclass TheardPoll(object):\n def __init__(self, maxsize):\n self.masize = maxsize\n self._q = queue.Queue(self.masize)\n for i in range(self.masize):\n self._q.put(threading.Thread)\n\n def getThread(self):\n return self._q.get()\n\n def addThread(self):\n return self._q.put(threading.Thread)\n\n\ndef crawer(page, p):\n print('this is thread [%s]' % page)\n\n referer = 'https://sh.zu.anjuke.com/ditie/dt56-fx1-p' + str(page) + '-x1/'\n url = 'https://sh.zu.anjuke.com/v3/ajax/listrecommend?rec_type=list&guid=BBF91347-C216-7EDE-3216-79F82C72F3FD&city_id=11&num=5'\n\n headers = {\n 'referer': referer,\n 'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'\n }\n\n request = urllib.request.Request(url=url, headers=headers, method='GET')\n\n response = urllib.request.urlopen(request)\n\n data = response.read().decode('utf8')\n data = json.loads(data)\n\n data = data['data']['guess_like']\n\n datarow = list()\n for item in data:\n datarow.append(\n [item['id'], item['title'], item['type'], item['img'], item['room_num'], item['hall_num'],\n item['fitment_name'],\n item['price'], item['comm_name'], item['from'], item['is_hp'], item['has_video']])\n\n if not os.path.exists('data.csv'):\n\n f = codecs.open('data.csv', 'wb', 'utf8') # 用gb2312编码写入,否则Excel打开乱码\n write = csv.writer(f, dialect='excel')\n write.writerow(\n ['id', 'title', 'type', 'img', 'room_num', 'hall_num', 'fitment_name', 'price', 'comm_name', 'from',\n 'is_hp',\n 'has_video'])\n else:\n f = codecs.open('data.csv', 'ab', 'utf8')\n write = csv.writer(f, dialect='excel')\n\n write.writerows(datarow)\n\n p.addThread()\n\n\nclass gaoDe(object):\n def __init__(self):\n self.searchapi = 'http://restapi.amap.com/v3/place/text'\n self.key = '70b207be5618472335f9f1c6ac1af857'\n\n def search(self, addr):\n data = {'key': self.key, 'keywords': addr, 'city': 'shanghai', 'types': '1203'}\n sign = self.sign(data)\n data['sign'] = sign\n data = urllib.parse.urlencode(data)\n url = self.searchapi + '?' + data\n print(url)\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'\n }\n request = urllib.request.Request(url, headers=headers)\n response = urllib.request.urlopen(request)\n response = json.loads(response.read().decode('utf8'))\n location = response['pois'][0]['location']\n return location\n\n def sign(self, paramas):\n paramas = sorted(paramas.items(), key=lambda e: e[1], reverse=True)\n parastr = ''\n for parama in paramas:\n tmp = '%s=%s&' % (parama[0], parama[1])\n parastr += '%s=%s&' % (parama[0], parama[1])\n\n return hashlib.md5(parastr.encode(encoding='UTF-8')).hexdigest()\n\n\nif __name__ == '__main__':\n # poll = TheardPoll(3)\n # for page in range(50):\n # t = poll.getThread()\n # a = t(target=crawer, args=(page, poll))\n # a.start()\n\n test = gaoDe()\n print(test.search('南京东路'))\n","sub_path":"anjuke.py","file_name":"anjuke.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"127439428","text":"from flask import (\n Flask,\n)\nfrom flask_socketio import SocketIO\n\nsocketio = SocketIO()\n\n\ndef create_app(debug=False):\n app = Flask(__name__)\n app.debug = debug\n app.secret_key = 'flask secret key test info'\n\n from .main import bp_main\n app.register_blueprint(bp_main)\n\n socketio.init_app(app)\n return app\n\n\n","sub_path":"web23-chat/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"371192629","text":"#!/usr/bin/env python\n\n__all__ = ['doi_to_wos', 'query', 'single', 'doi_to_wos_full', 'multi_doi']\n\nfrom xml.etree import ElementTree as _ET\nfrom xml.dom import minidom as _minidom\nimport re as _re\nfrom multiprocessing.dummy import Pool as ThreadPool\nimport itertools\nimport time\nimport logging\nlogger = logging.getLogger(__name__)\n\nrecord_limit = 100\nspeed_limit = 10 # Requests per n seconds\ndelay = 1 # in n seconds (throttle limit)\npool = ThreadPool(speed_limit) \n\ndef single(wosclient, wos_query, xml_query=None, count=5, offset=1):\n \"\"\"Perform a single Web of Science query and then XML query the results.\"\"\"\n logger.debug('Query: {}'.format(wos_query))\n result = wosclient.search(wos_query, count, offset)\n xml = _re.sub(' xmlns=\"[^\"]+\"', '', result.records, count=1).encode('utf-8')\n if xml_query:\n xml = _ET.fromstring(xml)\n return [el.text for el in xml.findall(xml_query)]\n else:\n return _minidom.parseString(xml).toprettyxml()\n\n\ndef query(wosclient, wos_query, xml_query=None, count=5, offset=1, limit=100):\n \"\"\"Query Web of Science and XML query results with multiple requests.\"\"\"\n results = [single(wosclient, wos_query, xml_query, min(limit, count-x+1), x)\n for x in range(offset, count+1, limit)]\n if xml_query:\n return [el for res in results for el in res]\n else:\n pattern = _re.compile(r'.*?|.*', _re.DOTALL)\n return ('\\n' +\n '\\n'.join(pattern.sub('', res) for res in results) +\n '')\n\n\ndef doi_to_wos(wosclient, doi):\n \"\"\"Convert DOI to WOS identifier.\"\"\"\n results = query(wosclient, 'DO=\"{}\"'.format(doi) , './REC/UID', count=1)\n time.sleep(delay)\n if results:\n return ('{},{}'.format(doi, results[0].lstrip('WOS:')))\n else:\n return ('{},'.format(doi))\n\n\ndef doi_to_wos_full(wosclient, query):\n \"\"\"Handle queries from multi_doi.\"\"\"\n results = single(wosclient, query , None, count=record_limit)\n time.sleep(delay)\n print(results)\n\n\n\ndef multi_doi(wosclient, doifile, onlyid):\n \"\"\"Query many DOIs in a CSV file.\"\"\"\n with open(doifile, 'r') as f:\n doi_list = f.readline().strip().split(',')\n\n if onlyid:\n logger.info('Retrieving WOS IDs for {} DOIs, please '\n 'wait...'.format(len(doi_list)))\n results = pool.starmap(doi_to_wos,\n zip(itertools.repeat(wosclient), doi_list)) \n if results:\n print('doi,wos id')\n for line in results:\n print(line) if line else None\n else:\n logger.info('Querying WOS for {} DOIs, please '\n 'wait...'.format(len(doi_list))) \n i=0\n queries=[]\n while i < len(doi_list): \n # Chunk into combined DOI queries equivalent to the\n # max records returned\n chunk = doi_list[i:i+record_limit]\n chunk = 'DO=(' + ' OR '.join(chunk) + ')'\n queries.append(chunk)\n i+=record_limit\n\n results = pool.starmap(doi_to_wos_full,\n zip(itertools.repeat(wosclient), queries))\n \n\n\n","sub_path":"wos/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33182809","text":"import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\n\n\nclass DecisionTreeExtClassifier:\n def __init__(self, *args, **kwargs):\n self.max_round = kwargs.pop('max_round')\n self.p_value = kwargs.pop('p_value')\n self.kwargs = kwargs\n self.args = args\n\n def fit(self, src_x, src_y):\n fit_list = []\n u_list = []\n rest = len(src_y)\n\n x = src_x.copy().reset_index(drop=True)\n y = src_y.copy().reset_index(drop=True)\n\n full_x, full_y, = x, y\n for idx in range(self.max_round):\n if idx == self.max_round - 1:\n if idx == 0:\n fit_list.append(DecisionTreeClassifier(*self.args, **self.kwargs).fit(full_x, full_y))\n else:\n fit_list.append(fit_list[0])\n ext_len = rest\n u_value = 1\n else:\n model = DecisionTreeClassifier(*self.args, **self.kwargs).fit(x, y)\n fit_list.append(model)\n\n pred = model.predict_proba(x)\n df_pred = pd.DataFrame(pred)\n df_pred.columns = ['false', 'true']\n\n criterion = (df_pred.true * df_pred.false).to_frame()\n criterion.columns = ['criterion']\n df_ext = criterion.sort_values('criterion', ascending=True)\n\n cut_len = round(rest * self.p_value)\n df_cut_ext = df_ext.iloc[: cut_len]\n u_value = df_cut_ext.iloc[-1].criterion\n df_cut_ext = df_cut_ext[df_cut_ext.criterion < u_value]\n\n x = x.drop(df_cut_ext.index).reset_index(drop=True)\n y = y.drop(df_cut_ext.index).reset_index(drop=True)\n ext_len = len(df_cut_ext)\n\n rest -= ext_len\n u_list.append(u_value)\n # print('Train) Round, u value, rest, number of extraction:', idx, u_value, rest, ext_len)\n\n return DecisionTreeExtModel(self.max_round, self.p_value, zip(fit_list, u_list))\n\n\nclass DecisionTreeExtModel:\n def __init__(self, max_round, p_value, obj):\n self.max_round = max_round\n self.p_value = p_value\n self.obj = obj\n\n def predict(self, x):\n meet_list = []\n final_pred = None\n rest = len(x)\n stored_pred = []\n stored_criterion = []\n\n for idx, (model, u_value) in enumerate(self.obj):\n pred = model.predict_proba(x)\n df_pred = pd.DataFrame(pred)\n df_pred.columns = ['false', 'true']\n criterion = df_pred.true * df_pred.false\n meet = (criterion < u_value)\n\n if idx == 0:\n final_pred = df_pred.true\n df_pred.true[~meet] = None\n else:\n for prev_idx in range(idx):\n prev_meet = meet_list[prev_idx]\n meet[prev_meet] = False\n\n final_pred[meet] = df_pred.true\n for prev_idx in range(idx):\n final_pred[meet & (criterion > stored_criterion[prev_idx])] = stored_pred[prev_idx].true\n criterion[meet & (criterion > stored_criterion[prev_idx])] = stored_criterion[prev_idx]\n\n meet_list.append(meet)\n if idx == self.max_round - 1:\n rest = 0\n else:\n stored_pred.append(df_pred)\n stored_criterion.append(criterion)\n rest -= meet.sum()\n # print('Prediction) Round, u value, rest, meet count:', idx, u_value, rest, meet.sum())\n\n final_pred[final_pred >= 0.5] = 1\n final_pred[final_pred < 0.5] = 0\n\n return final_pred.to_numpy()\n","sub_path":"train/model_dt_ext.py","file_name":"model_dt_ext.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461250526","text":"import dask.dataframe as dd\nimport packaging.version\nimport pandas as pd\nimport pytest\nimport sklearn.preprocessing\nfrom sklearn.base import clone\n\nimport dask_ml.preprocessing\n\ntry:\n import sklearn.compose\n import dask_ml.compose\nexcept ImportError:\n from dask_ml._compat import SK_VERSION\n\n pytestmark = pytest.mark.skipif(\n SK_VERSION < packaging.version.parse(\"0.20.0.dev0\"),\n reason=\"sklearn.compose added in 0.20.0\",\n )\n\n\ndf = pd.DataFrame({\"A\": pd.Categorical([\"a\", \"a\", \"b\", \"a\"]), \"B\": [1.0, 2, 4, 5]})\nddf = dd.from_pandas(df, npartitions=2)\n\n\ndef test_column_transformer():\n a = sklearn.compose.make_column_transformer(\n ([\"A\"], sklearn.preprocessing.OneHotEncoder(sparse=False)),\n ([\"B\"], sklearn.preprocessing.StandardScaler()),\n )\n\n b = dask_ml.compose.make_column_transformer(\n ([\"A\"], dask_ml.preprocessing.OneHotEncoder(sparse=False)),\n ([\"B\"], dask_ml.preprocessing.StandardScaler()),\n )\n\n a.fit(df)\n b.fit(ddf)\n\n expected = a.transform(df)\n result = b.transform(ddf)\n\n assert isinstance(result, dd.DataFrame)\n expected = pd.DataFrame(expected, index=result.index, columns=result.columns)\n dd.utils.assert_eq(result, expected)\n\n # fit-transform\n result = clone(b).fit_transform(ddf)\n expected = clone(a).fit_transform(df)\n expected = pd.DataFrame(expected, index=result.index, columns=result.columns)\n dd.utils.assert_eq(result, expected)\n","sub_path":"tests/compose/test_column_transformer.py","file_name":"test_column_transformer.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"466276916","text":"class Intcode(object):\n def __init__(self, input_values):\n self.input = 0\n self.original_input_values = input_values\n self.input_values = self.to_dictionary(self.original_input_values)\n self.input_range = len(input_values)\n self.output_stream = []\n self.index = 0\n self.operator_returned = 0\n self.input_next = False\n self.relative_base = 0\n\n def reset(self):\n self.input = 0\n self.input_values = self.to_dictionary(self.original_input_values)\n self.input_range = len(self.input_values)\n self.output_stream = []\n self.index = 0\n self.operator_returned = 0\n self.input_next = False\n self.relative_base = 0\n\n def to_dictionary(self, input_values):\n res_dct = {i: input_values[i] for i in range(len(input_values))}\n return res_dct\n\n def get_value(self, value_index):\n if value_index in self.input_values:\n return self.input_values[value_index]\n else:\n return 0\n\n def set_value(self, value_index, value):\n self.input_values[value_index] = value\n\n def solveOpcode(self, input):\n self.input = input\n elements = len(self.input_values)\n self.operator_returned = 0\n\n while self.index < elements and self.operator_returned != 99:\n self.operator_returned = self.operatorInterpreter()\n\n def operatorInterpreter(self):\n A, B, C, operator = self.getParameterModes()\n if operator == 99:\n return operator\n\n # Parameter1 should always be readable.\n if C == 0:\n parameter1 = self.get_value(self.get_value(self.index + 1))\n elif C == 1:\n parameter1 = self.get_value(self.index + 1)\n elif C == 2:\n parameter1 = self.get_value(self.relative_base + self.get_value(self.index + 1))\n\n # Operators 3 and 4 only support one parameter. Therefore, it is not sure, wether the input is long enough.\n if self.parameters_required(operator) >= 2:\n if B == 0:\n parameter2 = self.get_value(self.get_value(self.index + 2))\n elif B == 1:\n parameter2 = self.get_value(self.index + 2)\n elif B == 2:\n parameter2 = self.get_value(self.relative_base + self.get_value(self.index + 2))\n\n if self.parameters_required(operator) >= 3:\n if A == 0:\n #parameter3 = self.get_value(self.get_value(self.index + 3))\n parameter3 = self.get_value(self.index + 3)\n elif A == 1:\n raise Exception(\"Parameter 3 shall never be in immediate mode!\")\n elif A == 2:\n #parameter3 = self.get_value(self.relative_base + self.get_value(self.index + 3))\n parameter3 = self.relative_base + self.get_value(self.index + 3)\n\n # 1: Addition\n if operator == 1:\n self.set_value(parameter3, parameter1 + parameter2)\n self.index += 4\n\n # 2: Multiplication\n elif operator == 2:\n self.set_value(parameter3, parameter1 * parameter2)\n self.index += 4\n\n # 3: Write\n elif operator == 3:\n to_write = self.input\n position = self.get_value(self.index + 1)\n if C == 0:\n self.set_value(position, to_write)\n elif C == 2:\n self.set_value(self.relative_base + position, to_write)\n self.index += 2\n\n # 4: Print\n elif operator == 4:\n self.output_stream.append(parameter1)\n self.index += 2\n\n # 5: Jump-If-True\n elif operator == 5:\n if parameter1 != 0:\n self.index = parameter2\n else:\n self.index += 3\n\n # 6: Jump-If-False\n elif operator == 6:\n if parameter1 == 0:\n self.index = parameter2\n else:\n self.index += 3\n\n # 7: Less Than\n elif operator == 7:\n if parameter1 < parameter2:\n self.set_value(parameter3, 1)\n else:\n self.set_value(parameter3, 0)\n self.index += 4\n\n # 8: Equals\n elif operator == 8:\n if parameter1 == parameter2:\n self.set_value(parameter3, 1)\n else:\n self.set_value(parameter3, 0)\n self.index += 4\n\n # 9: Opcode 9 adjusts the relative base by the value of its only parameter. The relative base increases (or decreases, if the value is negative) by the value of the parameter.\n elif operator == 9:\n self.relative_base += parameter1\n self.index += 2\n\n # 99: End of operation\n else:\n self.index += 1\n\n return operator\n\n def getParameterModes(self):\n ABC, DE = divmod(self.get_value(self.index), 100)\n AB, C = divmod(ABC, 10)\n A, B = divmod(AB, 10)\n return A, B, C, DE\n\n def parameters_required(self, operator):\n par_req = 3\n if operator == 3 or operator == 4:\n par_req = 1\n if operator == 5 or operator == 6:\n par_req = 2\n return par_req","sub_path":"2019/AoC2019Day09/intcode.py","file_name":"intcode.py","file_ext":"py","file_size_in_byte":5231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"116400884","text":"import yaml\nimport jieba\nimport os\nimport json\nimport pandas as pd\nfrom config.setting import config_path\nfrom config.setting import OUTPUT_PATH\nfrom config.setting import INPUT_PATH\nfrom config.setting import column_name_list\n# 存在如下行业分类\n# str = '''IT/通信/电子/互联网 金融业 房地产/建筑业 法律 商业服务 医疗/健康 贸易/批发/零售/租赁业 生产/加工/制造 交通/运输/物流/仓储 服务业 体育/休闲/旅游/娱乐 能源/矿产/环保 政府/非盈利机构 媒体 教育 农/林/牧/渔 跨领域经营 其他'''\n# for item in str.split(' '):\n# print(item+\":\")\n# for n in item.split(\"/\"):\n# print(\" {}: true\".format(n))\n\n################################################################################\n# 行业分类_关键词列表人工获取\n# 行业分页脚本, 根据配置文件industry.yaml中每个行业的关键词 去匹配行业类型\n################################################################################\n\nclass industry_classify(object):\n \"\"\"\n 读取分词关键字对应行业分类的配置文件\n \"\"\"\n def __init__(self, yaml_path=config_path + os.sep + 'industry.yaml'):\n stream = open(yaml_path, 'r', encoding='utf-8')\n self.industry_dict = yaml.load(stream)\n self.index = 0\n\n ignore_word_str = '''|\n 公司\n 有限\n 责任\n 政府\n ’\n ?\n ,\n 、\n 注册\n 其他\n 未分类\n 分类\n 未分\n (\n )\n '''.split('\\n')\n ignore_word_list = [x.strip() for x in ignore_word_str if x.strip() != \"\" ]\n self.ignore_word = dict.fromkeys(ignore_word_list, True)\n self.industry_keys = sorted(self.industry_dict.keys())\n\n def insert(self, key, value):\n key = key.strip()\n value = value.strip()\n match_key_dict = self.industry_dict.get(key, None)\n if match_key_dict is not None:\n match_key_dict.update({value: True})\n\n def save(self):\n tmp_yaml_path = OUTPUT_PATH + os.sep +'industry'+str(self.index)+'.yaml'\n self.index += 1\n with open(tmp_yaml_path, mode='w', encoding='utf-8') as f:\n yaml.dump(self.industry_dict, f, default_flow_style=False, encoding='UTF-8', allow_unicode = True)\n\n def industry_judge(self, word):\n for key in self.industry_dict.keys():\n if word in self.industry_dict[key].keys():\n # print(word, \" 命中分类 \", key)\n return key\n else:\n return None\n pass\n\n def interaction_industry_judge(self, word_list):\n \"\"\"\n 输入词列表 ,人工输入属于那个分类\n :param word_list:\n :return:\n \"\"\"\n word_list = [x for x in word_list if x.strip() != \"\"]\n if len(word_list) == 0:\n return None\n\n print(\"该公司 分词结果列表:\", word_list)\n print(\"存在一下分类,请选择你分类结果,\")\n for keyn, keyname in enumerate(self.industry_keys):\n print(keyn, keyname, end=' ')\n print()\n\n for word in word_list:\n input_key = input(\"word:\"+word+\" 你要作为那个分类的关键字 不能作为关键字请回车\").strip()\n\n if input_key == \"save\":\n self.save()\n input_key = input(\"word:\" + word + \" 你要作为那个分类的关键字 不能作为关键字请回车\").strip()\n\n if input_key.isdigit():\n input_key = int(input_key)\n if input_key in range(0, keyn):\n key_name = self.industry_keys[input_key]\n self.insert(key_name, word)\n return key_name\n else:\n print(\"输入数据没有对应枚举项\")\n continue\n else:\n return None\n\n def interaction_industry_judge_continue(self, word_list):\n \"\"\"\n 输入词列表 ,人工输入属于那个分类\n :param word_list:\n :param continue_flag 单个词匹配以后 ,其他词是否继续提示 手工 输入 匹配分类\n :return:\n \"\"\"\n word_list = [x for x in word_list if x.strip() != \"\"]\n if len(word_list) == 0:\n return None\n\n # print(\"该公司 分词结果列表:\", word_list)\n print(\"存在一下分类,请选择你分类结果,\")\n for keyn, keyname in enumerate(self.industry_keys):\n print(keyn, keyname, end=' ')\n print()\n\n for index, word in enumerate(word_list):\n input_key = input(str(index)+\" word:\" + word + \" 你要作为那个分类的关键字 不能作为关键字请回车\").strip()\n\n if input_key == \"save\":\n self.save()\n input_key = input(str(index) + \"word:\" + word + \" 你要作为那个分类的关键字 不能作为关键字请回车\").strip()\n\n if input_key.isdigit():\n input_key = int(input_key)\n if input_key in range(0, keyn+1):\n key_name = self.industry_keys[input_key]\n self.insert(key_name, word)\n else:\n print(\"输入数据 {} 没有对应枚举项\".format(word))\n\n if divmod(index,10)[1] == 0:\n print(\"存在一下分类,请选择你分类结果,\")\n for keyn, keyname in enumerate(self.industry_keys):\n print(keyn, keyname, end=' ')\n print()\n else:\n self.save()\n return None\n\n\ndef industry_to_stand(industry_tmp, industry_object):\n \"\"\"\n 对行业分类 做枚举化\n :param industry_tmp:\n :param industry_object:\n :return:\n \"\"\"\n ignore_word = industry_object.ignore_word\n seg_list = jieba.cut_for_search(industry_tmp) # 搜索引擎模式\n seg_list = [x for x in seg_list if x not in ignore_word]\n for word in seg_list:\n key_industry = industry_object.industry_judge(word)\n if key_industry is not None:\n return key_industry\n else:\n # 手工设置 分类 与 对应关键词\n # key_industry = industry_object.interaction_industry_judge(seg_list)\n # return key_industry\n return \"生产/加工/制造\"\n\n\n\ndef run(startid=None, endid=None, filerootpath=OUTPUT_PATH):\n \"\"\"\n 读取csv文件, 将对应行业字段取出,进行分词,将分词列表 与 行业分类关键词配置文件数据 做匹配\n 匹配不到数据, 提示交互界面, 人工新增关键字匹配规则\n :param startid: csv文件读取的起始行\n :param endid: csv文件读取的终止行\n :return:\n \"\"\"\n industry_object = industry_classify()\n with open(os.path.join(filerootpath, 'all_new.csv'), mode='r', encoding='utf-8') as f, \\\n open(os.path.join(filerootpath, 'fgood_all_new.csv'), mode='w', encoding='utf-8') as fgood, \\\n open(os.path.join(filerootpath, 'fbad_all_new.csv'), mode='w', encoding='utf-8') as fbad:\n\n index_NAME = column_name_list.index('NAME')\n index_INDUSTRY = column_name_list.index('INDUSTRY')\n\n indexn = 1\n index_feild = index_INDUSTRY # 需要修改的字段\n index_industry = index_INDUSTRY # 行业字段\n index_name = index_NAME # 企业名字段\n\n for iline in f:\n\n iline_list = iline.split(',')\n industry_tmp = iline_list[index_feild]\n # print(indexn, \" industry_tmp:\", industry_tmp)\n\n industry_key = industry_to_stand(iline_list[index_name], industry_object)\n if industry_key == \"生产/加工/制造\":\n industry_key = industry_to_stand(industry_tmp, industry_object)\n\n #print(\"industry_key:\", industry_key)\n if industry_key is not None:\n iline_list[index_industry] = industry_key\n data = ','.join(iline_list)\n # print(\"fgood write:\", data)\n fgood.write(data)\n fgood.flush()\n else:\n data = ','.join(iline_list)\n # print(\"fbad write:\", data)\n fbad.write(data)\n fbad.flush()\n indexn += 1\n\n\n\ndef run_wordlist(filepath='ind_new.txt'):\n \"\"\"\n 读取将分词列表 与 行业分类关键词配置文件数据 做匹配\n 匹配不到数据, 提示交互界面, 人工新增关键字匹配规则\n :param filepath: 分词列表的 txt 文件 ,分割数据\n :return:\n \"\"\"\n\n with open(INPUT_PATH + os.sep + 'ind.txt', mode='r', encoding='utf-8') as f:\n data = f.read()\n data = data.replace(\"[\", \"\").replace(\"]\", \"\").replace(\"\\'\", \"\")\n word_list = data.split(',')\n industry_object = industry_classify()\n industry_object.interaction_industry_judge_continue(word_list)\n\n\ndef merge_yaml(file_path_list=[INPUT_PATH + os.sep + 'INDUSTRY_qq.yaml',\n INPUT_PATH + os.sep + 'industry_zy.yaml',\n INPUT_PATH + os.sep + 'industry_c.yaml']):\n industry_dict_all = industry_classify(file_path_list[0]).industry_dict\n\n for index, yml_file_path in enumerate(file_path_list[1:]):\n industry_dict_tmp = industry_classify(yml_file_path).industry_dict\n for key in industry_dict_all.keys():\n key_valueskey_1 = industry_dict_all[key].keys()\n key_valueskey_2 = industry_dict_tmp[key].keys()\n key_valueskey = list(set(key_valueskey_1) | set(key_valueskey_2))\n industry_dict_all[key] = dict.fromkeys(key_valueskey, True)\n\n tmp_yaml_path = OUTPUT_PATH + os.sep + 'industry' + \"merge\" + '.yaml'\n with open(tmp_yaml_path, mode='w', encoding='utf-8') as f:\n yaml.dump(industry_dict_all, f, default_flow_style=False, encoding='UTF-8', allow_unicode=True)\n\n\n\n# with open('ind.txt', mode='r', encoding='utf-8') as f:\n# data = f.read()\n# data = data.replace(\"[\", \"\").replace(\"]\", \"\").replace(\"\\'\", \"\")\n# word_list1 = data.split(',')\n# word_list1 = [x.strip() for x in word_list1 if x.strip() != \"\"]\n# print(len(word_list1))\n# with open('ind2.txt', mode='r', encoding='utf-8') as f:\n# data = f.read()\n# word_list2 = data.split('\\n')\n# word_list2 = [x.strip() for x in word_list2 if x.strip() !=\"\"]\n# print(len(word_list2))\n# word_list = list( set(word_list2) - set(word_list1) )\n# print(len(word_list))\n\nif __name__ == \"__main__\":\n pass\n run()\n #\n # run_wordlist()\n # merge_yaml()","sub_path":"code_src/Industry_keyword_interactive.py","file_name":"Industry_keyword_interactive.py","file_ext":"py","file_size_in_byte":10595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"86085045","text":"class Solution():\n \"\"\"快排类\"\"\"\n def __init__(self):\n pass\n\n # end 初始为 None\n def quicksorted(self, head, end):\n if head != end:\n node = self.partition(head, end) # 先挖坑填数\n self.quicksorted(head, node) # 递归调用\n self.quicksorted(node.next, end) # 递归调用\n\n def partition(self, head, end):\n p1, p2 = head, head.next # p2是遍历指针,p1是小数的指针\n\n while p2 != end:\n if p2.value < head.value:\n p1 = p1.next\n\n tmp = p2.value\n p2.value = p1.value\n p1.value = tmp\n p2 = p2.next\n\n tmp = head.value\n head.value = p1.value\n p1.value = tmp\n\n return p1\n","sub_path":"classics/链表快排.py","file_name":"链表快排.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62027261","text":"\"\"\"\nNGSolve\n=======\n\nA high order finite element library\n\nModules:\nngsolve.bla .... simple vectors and matrices\nngsolve.fem .... finite elements and integrators\nngsolve.comp ... function spaces, forms\n\"\"\"\n\nfrom ngsolve.ngslib import *\n\nstoremyinit = None\n\ndef __empty_init(x, *args, **kwargs):\n return\n\ndef __empty_init_reset_init(x,*args,**kwargs):\n global storemyinit\n x.__class__.__init__ = storemyinit\n storemyinit = None\n return\n\ndef __monkeypatch_new(thisclass, creatorfunction):\n pybind_constructor = thisclass.__new__\n def patched_new(class_t, *args,**kwargs):\n global storemyinit\n # if called from subclass which has a __init__ implementation, call the pybind\n # __new__ instead\n if class_t is not thisclass and hasattr(class_t,\"__init__\"):\n return pybind_constructor(class_t,*args,**kwargs)\n else:\n result = creatorfunction(class_t,*args,**kwargs)\n if hasattr(result.__class__, \"__init__\"):\n storemyinit = result.__class__.__init__\n result.__class__.__init__ = __empty_init_reset_init\n else:\n result.__class__.__init__ = __empty_init\n return result\n return patched_new\n\n\n# assign creator functions to __new__\ncreator_functions = {\n fem.ElementTransformation : fem.CreateElementTransformation,\n fem.CoefficientFunction : fem.CreateCoefficientFunction,\n fem.BFI : fem.CreateBilinearFormIntegrator,\n fem.LFI : fem.CreateLinearFormIntegrator,\n comp.BilinearForm : comp.CreateBilinearForm,\n comp.LinearForm : comp.CreateLinearForm,\n comp.Preconditioner : comp.CreatePreconditioner,\n comp.GridFunction : comp.CreateGridFunction,\n comp.PDE : comp.CreatePDE,\n comp.VTKOutput : comp.CreateVTKOutput,\n comp.FESpace : comp.CreateFESpace,\n comp.Periodic : comp.CreatePeriodicFESpace\n }\n\nfor pclass, creator in creator_functions.items():\n pclass.__new__ = __monkeypatch_new(pclass,creator)\n\n\n# creator function for unpickling of BaseVector\ndef CreateBaseVector(size,iscomplex,entrysize, entries,_dict):\n vec = la.CreateVVector(size,iscomplex,entrysize)\n for i,val in enumerate(entries):\n vec[i] = val\n vec.__dict__ = _dict\n return vec\n\ndef TmpRedraw(*args, **kwargs):\n solve._Redraw(*args, **kwargs)\n try:\n import netgen\n import tkinter\n while(netgen.gui.win.tk.dooneevent(tkinter._tkinter.DONT_WAIT)):\n pass\n except:\n pass\n\nsolve.Redraw = TmpRedraw\ndel TmpRedraw\n\n\n\nngstd.__all__ = ['ArrayD', 'ArrayI', 'BitArray', 'Flags', 'HeapReset', 'IntRange', 'LocalHeap', 'Timers', 'RunWithTaskManager', 'TaskManager', 'SetNumThreads']\nbla.__all__ = ['Matrix', 'Vector', 'InnerProduct', 'Norm']\nla.__all__ = ['BaseMatrix', 'BaseVector', 'CreateVVector', 'InnerProduct', 'CGSolver', 'QMRSolver', 'GMRESSolver', 'ArnoldiSolver', 'Projector']\nfem.__all__ = ['BFI', 'CoefficientFunction', 'Parameter', 'CoordCF', 'ET', 'ElementTransformation', 'ElementTopology', 'FiniteElement', 'ScalarFE', 'H1FE', 'HEX', 'L2FE', 'LFI', 'POINT', 'PRISM', 'PYRAMID', 'QUAD', 'SEGM', 'TET', 'TRIG', 'VERTEX', 'EDGE', 'FACE', 'CELL', 'ELEMENT', 'FACET', 'SetPMLParameters', 'sin', 'cos', 'tan', 'atan', 'exp', 'log', 'sqrt', 'floor', 'ceil', 'Conj', 'atan2', 'pow', 'specialcf', \\\n 'BlockBFI', 'BlockLFI', 'CompoundBFI', 'CompoundLFI', 'BSpline', \\\n 'IntegrationRule', 'IfPos' \\\n ]\n# TODO: fem:'PythonCF' comp:'PyNumProc'\ncomp.__all__ = ['BBND','BND', 'BilinearForm', 'COUPLING_TYPE', 'ElementId', 'BndElementId', 'FESpace','HCurl' , 'GridFunction', 'LinearForm', 'Mesh', 'NodeId', 'ORDER_POLICY', 'Preconditioner', 'VOL', 'NumProc', 'PDE', 'Integrate', 'SymbolicLFI', 'SymbolicBFI', 'SymbolicEnergy', 'VTKOutput', 'SetHeapSize', 'SetTestoutFile', 'ngsglobals','pml','Periodic','HDiv','HCurl'] \nsolve.__all__ = ['Redraw', 'BVP', 'CalcFlux', 'Draw', 'DrawFlux', 'SetVisualization']\n\nfrom ngsolve.ngstd import *\nfrom ngsolve.bla import *\nfrom ngsolve.la import *\nfrom ngsolve.fem import *\nfrom ngsolve.comp import *\nfrom ngsolve.solve import *\nfrom ngsolve.utils import *\nfrom . import timing\n\n# add flags docu to docstring\nall_classes = comp.__dict__\nfor classname in all_classes:\n instance = all_classes[classname]\n try:\n flags_doc = instance.__flags_doc__()\n if instance.__doc__ == None:\n instance.__doc__ = \"\"\n instance.__doc__ += \"\\n Keyword arguments can be:\\n\"\n for name in flags_doc:\n instance.__doc__ += name + \": \" + flags_doc[name] + \"\\n\"\n except AttributeError:\n pass\n\nfrom ngsolve.ngstd import MPIManager\nMPIManager.InitMPI()\n\nfrom . import __expr\nBaseVector.expr = property(__expr.VecExpr)\nBaseVector.data = property(__expr.Expr, __expr.expr_data)\nBaseVector.__add__ = __expr.expr_add\nBaseVector.__sub__ = __expr.expr_sub\nBaseVector.__neg__ = __expr.expr_neg\nBaseVector.__rmul__ = __expr.expr_rmul\n\nBaseMatrix.expr = property(__expr.MatExpr)\nBaseMatrix.data = property(__expr.Expr, __expr.expr_data)\nBaseMatrix.T = property(__expr.TransExpr)\nBaseMatrix.__mul__ = __expr.expr_mul\nBaseMatrix.__rmul__ = __expr.expr_rmul\nBaseMatrix.__neg__ = __expr.expr_neg\n\nTiming = timing.Timing\n\nfem.__doc__ = \\\n\"\"\"Finite Elements\n===============\n\nfinite element shape functions, and element-matrix/vector integrators\n\"\"\"\n\n\n__all__ = ngstd.__all__ + bla.__all__ +la.__all__ + fem.__all__ + comp.__all__ + solve.__all__ + utils.__all__ + [\"Timing\"]\n\n\n\n\n","sub_path":"python/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"306194671","text":"#!/usr/bin/env Python\n# coding=utf-8\n\nfrom selenium import webdriver\nfrom time import sleep\n\ndriver=webdriver.Firefox()\n# 设置网页路径,r代表转义\nfile_path=r'F:\\selenium lianxi\\webdriver\\Frame.html'\n# 路径转义另一种方法\n# file_path='F:\\\\selenium lianxi\\\\webdriver\\\\Frame.html'\ndriver.get(file_path)\n\n# 切换到frame页面内******\ndriver.switch_to.frame(\"search\")\n\ndriver.find_element_by_css_selector(\"#query\").send_keys(\"Python\")\nsleep(2)\ndriver.find_element_by_css_selector(\"#stb\").click()\n\nsleep(2)\ndriver.quit()\n","sub_path":"webdriver/4-22~4-32/frame_find_element_p1.py","file_name":"frame_find_element_p1.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175748160","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns =[\n url(r'^$', views.index),\n url(r'^register$', views.register),\n url(r'^login$', views.login),\n url(r'^pokes$', views.dash),\n url(r'^logout$', views.log_out),\n url(r'^createpoke/(?P\\d+)$', views.create_poke)\n]\n","sub_path":"apps/poke/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428270357","text":"# RVAR_CAPM\n# CAPM residual variance\n\nimport pandas as pd\nimport numpy as np\nimport datetime as dt\nimport wrds\nimport psycopg2\nfrom dateutil.relativedelta import *\nfrom pandas.tseries.offsets import *\nimport datetime\n\n###################\n# Connect to WRDS #\n###################\nconn=wrds.Connection()\n\n# CRSP Block\ncrsp = conn.raw_sql(\"\"\"\n select a.permno, a.date, a.ret, (a.ret - b.rf) as exret, b.mktrf, b.smb, b.hml\n from crsp.dsf as a\n left join ff.factors_daily as b\n on a.date=b.date\n where a.date > '01/01/1959'\n \"\"\")\n\ncrsp = crsp.sort_values(by=['permno', 'date'])\n\n# change variable format to int\ncrsp['permno'] = crsp['permno'].astype(int)\n\n# Line up date to be end of month\ncrsp['date'] = pd.to_datetime(crsp['date'])\n\n################################\n# Calculate the beta for mktrf #\n################################\n\ndf = crsp.groupby('permno')['exret', 'mktrf'].rolling(60).cov() # 60 trading days\n\ndf.index.names = ['permno', 'index', 'type'] # rename the multiple keys in index\ndf = df.xs('exret', level='type') # takes a key argument to select data at a particular level of a MultiIndex\ndf.rename(columns={'exret': 'var', 'mktrf': 'cov'}, inplace=True)\n\ndf = df.reset_index() # extract permno from index\ndf = df[['permno', 'var', 'cov']]\ndf['date'] = crsp['date']\n\ncrsp_final = df\ncrsp_final['beta_mktrf'] = crsp_final['cov']/crsp_final['var']\ncrsp_final = crsp_final[['permno', 'date', 'beta_mktrf']]\n\n##############################\n# Calculate the beta for smb #\n##############################\n\ndf = crsp.groupby('permno')['exret', 'smb'].rolling(60).cov() # 60 trading days\n\ndf.index.names = ['permno', 'index', 'type'] # rename the multiple keys in index\ndf = df.xs('exret', level='type') # takes a key argument to select data at a particular level of a MultiIndex\ndf.rename(columns={'exret': 'var', 'smb': 'cov'}, inplace=True)\n\ndf = df.reset_index() # extract permno from index\ndf = df[['permno', 'var', 'cov']]\ndf['beta_smb'] = df['cov']/df['var']\n\ncrsp_final['beta_smb'] = df['beta_smb']\n\n##############################\n# Calculate the beta for hml #\n##############################\n\ndf = crsp.groupby('permno')['exret', 'hml'].rolling(60).cov() # 60 trading days\n\ndf.index.names = ['permno', 'index', 'type'] # rename the multiple keys in index\ndf = df.xs('exret', level='type') # takes a key argument to select data at a particular level of a MultiIndex\ndf.rename(columns={'exret': 'var', 'hml': 'cov'}, inplace=True)\n\ndf = df.reset_index() # extract permno from index\ndf = df[['permno', 'var', 'cov']]\ndf['beta_hml'] = df['cov']/df['var']\n\ncrsp_final['beta_hml'] = df['beta_hml']\n\n##############################\n# Calculate residual #\n##############################\ncrsp_final[['exret', 'mktrf', 'smb', 'hml']] = crsp[['exret', 'mktrf', 'smb', 'hml']]\ncrsp_final['rvar_ff3'] = crsp_final['exret'] - crsp_final['beta_mktrf']*crsp_final['mktrf'] - \\\n crsp_final['beta_smb']*crsp_final['smb'] - crsp_final['beta_hml']*crsp_final['hml']\ncrsp_final = crsp_final[['permno', 'date', 'rvar_ff3']]\ncrsp_final = crsp_final.dropna()\n\n","sub_path":"pychars/rvar_ff3.py","file_name":"rvar_ff3.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"356342967","text":"import discord\r\nfrom discord.ext import commands\r\nfrom discord.ext.commands import Bot\r\nimport asyncio\r\n\r\n# ön ek\r\nbot = commands.Bot(command_prefix = 'sau')\r\nSORU_DOGRULUK_ORANI=10 # %93 Oraninda 1 ssecenegin on plana cikmasi gerekiyor\r\nVERILEN_OY_SINIRI=1 # En az 30 oy kullanilmasi gerekiyor\r\nclass Soru():\r\n def __init__(self):\r\n self.soru = [[1000000000000,1,1,1,1,1]]\r\n def soru_sifirla(self):\r\n self.soru=[[1000000000000,1,1,1,1,1]]\r\n def soru_guncelle(self,message_id,number,count):\r\n for n,simdiki_soru in enumerate(self.soru, start=1):\r\n if len(self.soru) < simdiki_soru[0] and message_id == simdiki_soru[0]:\r\n simdiki_soru[number] = count\r\n\r\n def soru_secenekleri_say(self, message):\r\n for react in message.reactions:\r\n for secenek in enumerate(list(\"🇦🇧🇨🇩🇪\"), start =1):\r\n print(secenek)\r\n if react.emoji == secenek[1]:\r\n print(\"A secenek:\"+str(react.count))\r\n self.soru_guncelle(message.id,secenek[0],react.count)\r\n if react.emoji == secenek[1]:\r\n print(\"B secenek:\"+str(react.count))\r\n self.soru_guncelle(message.id,secenek[0],react.count)\r\n if react.emoji == secenek[1]:\r\n print(\"C secenek:\"+str(react.count))\r\n self.soru_guncelle(message.id,secenek[0],react.count)\r\n if react.emoji == secenek[1]:\r\n print(\"D secenek:\"+str(react.count))\r\n self.soru_guncelle(message.id,secenek[0],react.count)\r\n if react.emoji == secenek[1]:\r\n print(\"E secenek:\"+str(react.count))\r\n self.soru_guncelle(message.id,secenek[0],react.count)\r\n\r\n def soru_analiz(self,message_id):\r\n total_reaction_counts = 0\r\n new_rate = 0\r\n max_percent_reaction = 0,0,0 # number_of_selection, count_of_selection, percent_of_selection\r\n for soru_x in self.soru:\r\n if message_id in soru_x:\r\n for answers in soru_x[1:]: # toplam reaksiyon sayisi\r\n total_reaction_counts += answers\r\n for answers in enumerate(soru_x[1:], start=1):\r\n new_rate = (answers[1]/total_reaction_counts)*100\r\n if max_percent_reaction[2] < new_rate:\r\n max_percent_reaction = answers[0],answers[1],new_rate\r\n \"\"\"\r\n if total_reaction_counts >= 2:\r\n print(max_percent_reaction[2])\r\n if max_percent_reaction[2] > ((total_reaction_counts*10)/100): # %80 den fazlasi dogru sikki ayni dusunuyorsa\r\n print(\"Maksimum Secenek Yuzdesi:\")\r\n print(max_percent_reaction)\r\n \"\"\"\r\n return max_percent_reaction\r\n def soru_ekle(self,message_id,count_a,count_b,count_c,count_d,count_e): \r\n self.soru.append([message_id, count_a, count_b, count_c, count_d, count_e])\r\n\r\nsoru = Soru() # Soru sayimi icin nesne turetildi\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('bot is ready')\r\n\r\n# log\r\n@bot.event\r\nasync def on_member_join(member):\r\n channel = bot.get_channel(717402901017788417)\r\n users = bot.users \r\n guild = member.guild\r\n lst = len(list(guild.members))\r\n\r\n await channel.send(f'{member.mention} sunucuya katıldı 🤙 Üye sayısı: {lst}')\r\n\r\n# log\r\n@bot.event\r\nasync def on_member_remove(member):\r\n channel = bot.get_channel(717402901017788417)\r\n users = bot.users \r\n guild = member.guild\r\n lst = len(list(guild.members))\r\n\r\n await channel.send(f'{member.mention} sunucudan ayrıldı 👋 Üye sayısı: {lst+2}')\r\n \r\n\r\n\r\n#sorular\r\n@bot.event\r\nasync def on_message(message):\r\n message_id=message.id\r\n if message.channel.id == 716895157194194984 or message.channel.id == 717708011602313276 or message.channel.id == 717746193186422825 :\r\n await message.add_reaction(\"🇦\")\r\n await message.add_reaction(\"🇧\")\r\n await message.add_reaction(\"🇨\")\r\n await message.add_reaction(\"🇩\")\r\n await message.add_reaction(\"🇪\")\r\n soru.soru_ekle(message_id,1,1,1,1,1)\r\n\r\n# toplu mesaj silme\r\n if message.content.startswith('sausil'):\r\n soru.soru_sifirla()\r\n if message.author.permissions_in(message.channel).manage_messages:\r\n args = message.content.split(' ')\r\n if len(args) == 2:\r\n if args[1].isdigit():\r\n count = int(args[1]) + 1\r\n deleted = await message.channel.purge(limit = count)\r\n await message.channel.send('{} mesaj silindi'.format(len(deleted)-1))\r\n\r\n\r\n\r\n# dogrulama\r\n@bot.event\r\nasync def on_raw_reaction_add(payload):\r\n message_id = payload.message_id\r\n channel_id = payload.channel_id\r\n channel = bot.get_channel(channel_id)\r\n message = await channel.fetch_message(message_id)\r\n soru.soru_secenekleri_say(message)\r\n durum=soru.soru_analiz(message_id)\r\n if durum[1] > VERILEN_OY_SINIRI and durum[2] > SORU_DOGRULUK_ORANI:\r\n await message.add_reaction(\"✅\")\r\n if message_id == 717786402728837120:\r\n guild_id = payload.guild_id\r\n guild = discord.utils.find(lambda g : g.id == guild_id, bot.guilds)\r\n\r\n if payload.emoji.name == 'sau':\r\n print('sau role ok')\r\n role = discord.utils.get(guild.roles, name='Member')\r\n\r\n if role is not None:\r\n member = discord.utils.find(lambda m : m.id == payload.user_id, guild.members)\r\n if member is not None:\r\n await member.add_roles(role)\r\n print('done')\r\n\r\n\r\n\r\n# dogrulama\r\n@bot.event\r\nasync def on_raw_reaction_remove(payload):\r\n message_id = payload.message_id\r\n channel_id = payload.channel_id\r\n channel = bot.get_channel(channel_id)\r\n message = await channel.fetch_message(message_id)\r\n soru.soru_secenekleri_say(message)\r\n durum=soru.soru_analiz(message_id)\r\n if durum[1] > VERILEN_OY_SINIRI and durum[2] > SORU_DOGRULUK_ORANI:\r\n await message.add_reaction(\"✅\")\r\n if message_id == 717786402728837120:\r\n guild_id = payload.guild_id\r\n guild = discord.utils.find(lambda g : g.id == guild_id, bot.guilds)\r\n\r\n if payload.emoji.name == 'sau':\r\n print('sau role deleted')\r\n role = discord.utils.get(guild.roles, name='Member')\r\n\r\n if role is not None:\r\n member = discord.utils.find(lambda m : m.id == payload.user_id, guild.members)\r\n if member is not None:\r\n await member.remove_roles(role)\r\n print('delete done')\r\n\r\nbot.run('TOKEN')","sub_path":"real_bot.py","file_name":"real_bot.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"8517032","text":"#!/usr/bin/python\n# -*- coding:UTF-8 -*-\nimport sys, os, re\n\nCHINESE = u\"\\u4e00-\\u9fa5\"\nKR = u\"\\uAC00-\\uD7AF\\u1100-\\u11FF\\u3130-\\u318F\"\n\nchar_range = KR\nregex = re.compile(u\"\\\"[^\\\"\\n]*[\"+char_range+\"]+[^\\\"\\n]*\\\"\")\nregex_xml = re.compile(u\">([^\\\"\\n]*[\"+char_range+\"]+[^\\\"\\n]*)<\")\n\ndef main(srcPath):\n segment = []\n for root, dirs, files in os.walk( srcPath ):\n for fn in files:\n p = os.path.join(root,fn)\n\n if not os.path.isfile(p):\n continue\n if not p.endswith(\".lua\") and not p.endswith(\".as\") and not p.endswith(\".xml\"):continue\n fp = open(p)\n content = fp.read()\n fp.close()\n try:\n content = content.decode(\"utf-8\")\n except:\n pass\n m = regex.findall(content)\n segment.extend(m)\n if p.endswith(\".xml\"):\n m = regex_xml.findall(content)\n segment.extend( [\"\\\"\"+i+\"\\\"\" for i in m] )\n segment = set(segment)\n print(\"\\r\\n\".join([a.encode(\"utf-8\") for a in segment]))\nif __name__ == '__main__':\n if len(sys.argv) <= 1:\n print(\"test.py path\")\n exit(1)\n main(sys.argv[1])\n","sub_path":"workspace/genLangSrc.py","file_name":"genLangSrc.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159315067","text":"\"\"\"\n Tor2web\n Copyright (C) 2012 Hermes No Profit Association - GlobaLeaks Project\n\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU Affero General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Affero General Public License for more details.\n\n You should have received a copy of the GNU Affero General Public License\n along with this program. If not, see .\n\"\"\"\n\n\"\"\"\n\n:mod:`Tor2Web`\n=====================================================\n\n.. automodule:: Tor2Web\n :synopsis: [GLOBALEAKS_MODULE_DESCRIPTION]\n\n.. moduleauthor:: Arturo Filasto' \n.. moduleauthor:: Giovanni Pellerano \n\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\nfrom twisted.internet import reactor, ssl\nfrom twisted.internet.task import LoopingCall\nfrom twisted.internet.defer import Deferred\nfrom twisted.web.client import HTTPPageGetter, HTTPClientFactory, _parse\n\nimport gzip\nimport json\n\nfrom StringIO import StringIO\n\nclass HTTPCacheDownloader(HTTPPageGetter):\n \n def connectionMade(self, isCached=False):\n\n self.content_is_gzip = False\n\n if self.factory.url in self.factory.cache and 'response' in self.factory.cache[self.factory.url]:\n self.cache = self.factory.cache[self.factory.url]\n else:\n self.cache = None\n\n self.cachetemp = {}\n\n method = getattr(self.factory, 'method', 'GET')\n self.sendCommand(method, self.factory.path)\n if self.factory.scheme == 'http' and self.factory.port != 80:\n host = '%s:%s' % (self.factory.host, self.factory.port)\n elif self.factory.scheme == 'https' and self.factory.port != 443:\n host = '%s:%s' % (self.factory.host, self.factory.port)\n else:\n host = self.factory.host\n\n self.sendHeader('host', self.factory.headers.get('host', host))\n self.sendHeader('user-agent', self.factory.agent)\n self.sendHeader('accept-encoding', 'gzip')\n\n if self.cache and 'if-modified-since' in self.cache:\n self.sendHeader('if-modified-since', self.cache['if-modified-since'])\n\n data = getattr(self.factory, 'postdata', None)\n if data is not None:\n self.sendHeader('content-length', str(len(data)))\n\n cookieData = []\n for (key, value) in self.factory.headers.items():\n if key.lower() not in self._specialHeaders:\n # we calculated it on our own\n self.sendHeader(key, value)\n if key.lower() == 'cookie':\n cookieData.append(value)\n for cookie, cookval in self.factory.cookies.items():\n cookieData.append('%s=%s' % (cookie, cookval))\n if cookieData:\n self.sendHeader('cookie', '; '.join(cookieData))\n\n self.endHeaders()\n self.headers = {}\n\n if data is not None:\n self.transport.write(data)\n\n def handleHeader(self, key, value):\n key = key.lower()\n\n if key == 'date' or key == 'last-modified':\n self.cachetemp[key] = value\n\n if key == 'content-encoding' and value == 'gzip':\n self.content_is_gzip = True\n \n HTTPPageGetter.handleHeader(self, key, value)\n\n def handleResponse(self, response):\n if self.content_is_gzip:\n c_f = StringIO(response)\n response = gzip.GzipFile(fileobj=c_f).read()\n\n self.cachetemp['response'] = response\n self.factory.cache[self.factory.url] = self.cachetemp\n HTTPPageGetter.handleResponse(self, response)\n\n def handleStatus(self, version, status, message):\n HTTPPageGetter.handleStatus(self, version, status, message)\n \n def handleStatus_304(self):\n # content not modified\n pass\n\nclass HTTPClientCacheFactory(HTTPClientFactory):\n protocol = HTTPCacheDownloader\n cache = {}\n\n def __init__(self, url, method='GET', postdata=None, headers=None,\n agent=\"Tor2Web (https://github.com/globaleaks/tor2web-3.0)\", timeout=0, cookies=None,\n followRedirect=1):\n\n headers = {}\n\n if url in self.cache:\n if 'last-modified' in self.cache[url]:\n headers['if-modified-since'] = self.cache[url]['last-modified']\n elif 'date' in self.cache[url]:\n headers['if-modified-since'] = self.cache[url]['date']\n\n HTTPClientFactory.__init__(self, url=url, method=method,\n postdata=postdata, headers=headers, agent=agent,\n timeout=timeout, cookies=cookies, followRedirect=followRedirect)\n self.deferred = Deferred()\n\ndef getPageCached(url, contextFactory=None, *args, **kwargs):\n \"\"\"download a web page as a string, keep a cache of already downloaded pages\n\n Download a page. Return a deferred, which will callback with a\n page (as a string) or errback with a description of the error.\n\n See HTTPClientCacheFactory to see what extra args can be passed.\n \"\"\" \n scheme, host, port, path = _parse(url)\n factory = HTTPClientCacheFactory(url, *args, **kwargs)\n if scheme == 'https':\n if contextFactory is None:\n contextFactory = ssl.ClientContextFactory()\n reactor.connectSSL(host, port, factory, contextFactory)\n else:\n reactor.connectTCP(host, port, factory)\n\n return factory.deferred\n\nclass torExitNodeList(set):\n def __init__(self, refreshPeriod):\n set.__init__(self)\n self.lc = LoopingCall(self.update)\n self.lc.start(refreshPeriod)\n\n def processData(self, data, d):\n if(len(data) != 0):\n try:\n data = json.loads(data)\n except:\n d.callback(False)\n\n self.clear()\n for relay in data['relays']:\n for ip in relay['a']:\n self.add(ip)\n\n d.callback(True)\n\n def handleError(self, error, d):\n d.errback()\n\n def update(self):\n update_finished = Deferred()\n pageFetchedDeferred = getPageCached(\"https://onionoo.torproject.org/summary?type=relay\")\n pageFetchedDeferred.addCallback(self.processData, update_finished)\n pageFetchedDeferred.addErrback(self.handleError, update_finished)\n return update_finished\n","sub_path":"torExitNodes.py","file_name":"torExitNodes.py","file_ext":"py","file_size_in_byte":6613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335771258","text":"from typing import Optional\n\nfrom sklearn.preprocessing import normalize\n\nfrom ..definitions import InteractionMatrix\nfrom ._knn import P3alphaComputer\nfrom .base import BaseRecommenderWithThreadingSupport, BaseSimilarityRecommender\n\n\nclass P3alphaRecommender(\n BaseSimilarityRecommender, BaseRecommenderWithThreadingSupport\n):\n def __init__(\n self,\n X_all: InteractionMatrix,\n alpha: float = 1,\n top_k: Optional[int] = None,\n normalize_weight: bool = False,\n n_thread: Optional[int] = 1,\n ):\n super().__init__(X_all, n_thread=n_thread)\n self.alpha = alpha\n self.top_k = top_k\n self.normalize_weight = normalize_weight\n\n def _learn(self) -> None:\n computer = P3alphaComputer(\n self.X_all.T,\n alpha=self.alpha,\n n_thread=self.n_thread,\n )\n top_k = self.X_all.shape[1] if self.top_k is None else self.top_k\n self.W_ = computer.compute_W(self.X_all.T, top_k)\n if self.normalize_weight:\n self.W_ = normalize(self.W_, norm=\"l1\", axis=1)\n","sub_path":"irspack/recommenders/p3.py","file_name":"p3.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"113889577","text":"# %%\nimport cv2\nimport numpy as np\nimport dlib\nfrom imutils import face_utils\n\nimport imutils\nimport deepgaze\nfrom deepgaze.head_pose_estimation import CnnHeadPoseEstimator\nimport time\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nimport tensorflow as tf\ntf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)\n# os.system(\"clear\")\n\n# %%\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor('shape_predictor_68_face_landmarks.dat')\n\n# %%\nsess = tf.Session()\nhead_pose_estimator = CnnHeadPoseEstimator(sess)\nhead_pose_estimator.load_pitch_variables('pitch.tf')\nhead_pose_estimator.load_yaw_variables('yaw.tf')\nhead_pose_estimator.load_roll_variables('roll.tf')\n\n# %%\ncap = cv2.VideoCapture(0)\n\nwhile(True):\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n (fh, fw) = frame.shape[:2]\n\n if not ret:\n break\n\n faces = detector(gray, 0)\n\n for face in faces:\n (x, y, w, h) = face_utils.rect_to_bb(face)\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n image = frame[y:y + h, x:x + w]\n\n try:\n image = cv2.resize(image, (480,480))\n except:\n print('Exception')\n continue\n\n pitch = head_pose_estimator.return_pitch(image,radians=True)[0][0][0]\n yaw = head_pose_estimator.return_yaw(image,radians=True)[0][0][0]\n roll = head_pose_estimator.return_roll(image,radians=True)[0][0][0]\n \n print('data points ', 'pitch ', pitch, ' roll ', roll, ' yaw ', yaw)\n \n FONT = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, 'pitch = {:.2f}'.format(pitch), (20,25), FONT, 0.7, (0,255,0), 1)\n cv2.putText(frame, 'roll = {:.2f}'.format(roll), (20,50), FONT, 0.7, (0,255,0), 1)\n cv2.putText(frame, 'yaw = {:.2f}'.format(yaw), (20,75), FONT, 0.7, (0,255,0), 1)\n\n if pitch < -0.15 or pitch > 0:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n if yaw < -0.5 or yaw > 0.5:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n\n cv2.imshow('pose', frame) \n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# %%\ncap.release()\ncv2.destroyAllWindows() \n\n# %%\n","sub_path":"pose.py","file_name":"pose.py","file_ext":"py","file_size_in_byte":2264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19375679","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 4 15:36:03 2018\n\n@author: HuQiang\n\"\"\"\nimport numpy as np\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport pandas as pd\nimport time\n\ndef DSPFP(A_ori,B_ori,k):\n '''\n input:\n A,B: np.2darray, adjacent matrix\n output:\n [0]: np.1darray, matching results\n [1]: float, runtime\n '''\n A = A_ori/np.max(A_ori)*k\n B = B_ori/np.max(B_ori)*k\n \n start = time.clock() #timestamp, since \n n = A.shape[0]\n E = np.ones((n,1))\n I = np.identity(n) \n \n #Initialize X,Y \n X = np.dot(E,E.T)/n**2\n Y = np.zeros((n,n))\n XX = np.ones((n,n))*float('inf')\n YY = np.ones((n,n))*float('inf')\n \n #Iteratuin\n alpha = 0.5\n I1 = 1000000\n I2 = 1000000\n t1 = 0.0000000001\n t2 = 0.0000000001\n\n\n countX = 0\n while((np.max(np.abs(X - XX)) > t1) and (countX < I1)):\n countX += 1\n XX = X.copy()\n \n Y = np.dot(np.dot(A,X),B)\n countY = 0\n while((np.max(np.abs(Y - YY)) > t2) and (countY < I2)):\n countY += 1\n YY = Y.copy()\n Y = Y + np.dot(I/n + np.dot(np.dot(E.T,Y),E)*I/n**2 - Y/n,np.dot(E,E.T)) - np.dot(np.dot(E,E.T),Y)/n\n Y = (Y + np.abs(Y))/2\n \n X = (1-alpha)*X + alpha*Y\n \n P = X\n XX = X.copy()\n\n M = np.zeros(n)\n for count in range(n):\n mindex = np.argmax(XX)\n i = int(np.floor(mindex/n))\n j = int(mindex - i*n)\n M[i] = j\n XX[i,:] = -1\n XX[:,j] = -1\n \n end = time.clock()\n \n return P, M, end-start\n\ndef PRG(real_order, numrow, numcol):\n \n L = numrow * numcol\n PR = np.zeros((4, L))\n \n tmpMat = np.array(real_order).reshape((numrow, numcol))\n PR[0,:] = tmpMat.ravel()\n \n #交换上下\n for i in range(numrow//2):\n tmpMat[[i,numrow-1-i],:] = tmpMat[[numrow-1-i,i],:]\n PR[1,:] = tmpMat.ravel()\n \n #交换左右\n for i in range(numcol//2):\n tmpMat[:,[i,numcol-1-i]] = tmpMat[:,[numcol-1-i,i]]\n PR[2,:] = tmpMat.ravel()\n \n #交换上下\n for i in range(numrow//2):\n tmpMat[[i,numrow-1-i],:] = tmpMat[[numrow-1-i,i],:]\n PR[3,:] = tmpMat.ravel()\n \n return PR\n \n \n\ndef matchingAccuracy(M, real_order):\n \n Acc = np.zeros(4)\n print(\"Have you updated the numrow and numcol?\")\n PR = PRG(real_order, 4, 4)\n for i in range(4):\n Acc[i] = np.sum(M==PR[i,:])\n \n \n return np.max(Acc)\n \n\n\n\ndef simClusterColumn(df,xlist,ylist):\n subdf = df.query('x in '+str(xlist)+'& y in '+str(ylist))\n subdf_pivot = pd.pivot_table(subdf,index='BSSID',values='RSSI')\n pivot_dict = subdf_pivot.to_dict()\n return pivot_dict['RSSI']\n\ndef simClusterColumns(df):\n '''\n 12点实验\n xlists = [[50,150,250],[250,350,450],[450,550,650],[650,750,850]]\n ylists = [[520,620,720],[320,420,520],[120,220,320]]\n '''\n xlists = [[50+i*100,150+i*100] for i in range(9)]\n ylists = [[20+i*100,120+i*100] for i in range(7)]\n S_LOD = []\n for ylist in ylists:\n for xlist in xlists:\n S_LOD.extend([simClusterColumn(df,xlist,ylist)])\n \n return S_LOD\n\ndef intersection(S_LOD):\n '''\n input:\n S_LOD: list of dict, each dict denotes a RP\n output:\n APSet: intersection of AP for all RP in S\n '''\n APSetList = [set(x.keys()) for x in S_LOD]\n APSet = APSetList[0]\n for apset in APSetList:\n APSet = APSet&apset\n return APSet\n\ndef setToArray(S_LOD,APSet):\n #build AP Matrix according to Set from dict_S\n Array=[]\n for i in range(len(S_LOD)):\n Array.append([S_LOD[i][x] for x in APSet])\n \n Array=np.array(Array)\n \n return Array\n\n\ndef rowToMatrix(row):\n M = np.zeros((8,10))\n \n for i in range(10):\n M[:,9-i] = row[8*i:8*i+8]\n \n M[[6,7],:] = M[[7,6],:]\n \n for i in range(4):\n M[:,[4-i,3-i]] = M[:,[3-i,4-i]]\n \n return M\n\n\n###generate physical coordinate\ndef PCG(area_size, numrow, numcol):\n (x_length, y_length) = area_size\n x_stride = x_length /(2*numcol)\n y_stride = y_length / (2*numrow)\n x = [x_stride+i*2*x_stride for i in range(numcol)]\n y = [y_stride+i*2*y_stride for i in range(numrow)]\n \n pcoord = []\n for i in x:\n for j in y:\n pcoord.append([i,j])\n \n return np.array(pcoord)\n \n ","sub_path":"LAB_DJ/GraphMatching.py","file_name":"GraphMatching.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232827735","text":"\"\"\"\nHelper classes for reading cached objects from Git's Object Database.\n\"\"\"\n\nimport subprocess\nimport hashlib\nimport re\nfrom typing import TypeVar, Type, Dict, Union, Sequence, Optional, Mapping, Tuple, cast\nfrom enum import Enum\n\n\nclass MissingObject(Exception):\n def __init__(self, ref: str):\n Exception.__init__(self, f\"Object {ref} does not exist\")\n\n\nclass Oid(bytes):\n def __new__(cls, b: bytes) -> 'Oid':\n if len(b) != 20:\n raise ValueError(\"Expected 160-bit SHA1 hash\")\n return super().__new__(cls, b) # type: ignore\n\n @classmethod\n def fromhex(cls, hex: str) -> 'Oid':\n return Oid(bytes.fromhex(hex))\n\n @classmethod\n def null(cls) -> 'Oid':\n return cls(b'\\0' * 20)\n\n @classmethod\n def for_object(cls, tag: str, body: bytes):\n m = hashlib.sha1()\n m.update(tag.encode('ascii') + b' ' + str(len(body)).encode('ascii') + b'\\0' + body)\n return cls(m.digest())\n\n def __repr__(self) -> str:\n return self.hex()\n\n def __str__(self) -> str:\n return self.hex()\n\n\nclass Signature:\n name: bytes\n email: bytes\n timestamp: bytes\n offset: bytes\n\n __slots__ = ('name', 'email', 'timestamp', 'offset')\n\n _default_author: Optional['Signature'] = None\n _default_committer: Optional['Signature'] = None\n\n sig_re = re.compile(rb'''\n (?P[^<>]+)<(?P[^<>]+)>[ ]\n (?P[0-9]+)\n (?:[ ](?P[\\+\\-][0-9]+))?\n ''', re.X)\n\n @classmethod\n def parse(cls, spec: bytes) -> 'Signature':\n match = cls.sig_re.fullmatch(spec)\n assert match is not None, \"Invalid Signature\"\n\n return Signature(\n match.group('name').strip(),\n match.group('email').strip(),\n match.group('timestamp').strip(),\n match.group('offset').strip(),\n )\n\n @classmethod\n def default_author(cls) -> 'Signature':\n if Signature._default_author is None:\n rv = subprocess.run(['git', 'var', 'GIT_AUTHOR_IDENT'],\n check=True, stdout=subprocess.PIPE)\n Signature._default_author = Signature.parse(rv.stdout.rstrip())\n return Signature._default_author\n\n @classmethod\n def default_committer(cls) -> 'Signature':\n if Signature._default_committer is None:\n rv = subprocess.run(['git', 'var', 'GIT_COMMITTER_IDENT'],\n check=True, stdout=subprocess.PIPE)\n Signature._default_committer = Signature.parse(rv.stdout.rstrip())\n return Signature._default_committer\n\n def __init__(self, name: bytes, email: bytes, timestamp: bytes, offset: bytes):\n self.name = name\n self.email = email\n self.timestamp = timestamp\n self.offset = offset\n\n def raw(self) -> bytes:\n return self.name + b' <' + self.email + b'> ' + self.timestamp + b' ' + self.offset\n\n def __repr__(self):\n return f\"\"\n\n\nGitObjT = TypeVar('GitObjT', bound='GitObj')\n\n\nclass GitObj:\n tag: str\n body: bytes\n oid: Oid\n persisted: bool\n\n __slots__ = ('tag', 'body', 'persisted', 'oid')\n\n o_cache: Dict[Oid, 'GitObj'] = {}\n catfile: Optional[subprocess.Popen] = None\n\n def __new__(cls, body: bytes):\n tag = cls.__name__.lower()\n oid = Oid.for_object(tag, body)\n if oid in GitObj.o_cache:\n return GitObj.o_cache[oid]\n\n self = super().__new__(cls)\n self.tag = tag\n self.body = body\n self.oid = oid\n self.persisted = False\n GitObj.o_cache[oid] = self\n self.parse_body()\n return self\n\n @classmethod\n def get(cls: Type[GitObjT], ref: Union[str, Oid]) -> GitObjT:\n # If we have an OID, check the cache first, otherwise, convert it to a\n # hex string for passing to cat-file.\n if isinstance(ref, Oid):\n if ref in GitObj.o_cache:\n obj: GitObj = GitObj.o_cache[ref]\n if not isinstance(obj, cls):\n raise ValueError(f\"Unexpected {type(obj).__name__} \"\n f\"{obj.oid} (expected {cls.__name__})\")\n return obj\n\n ref = ref.hex()\n\n # Spawn cat-file subprocess if it isn't running already.\n if GitObj.catfile is None:\n GitObj.catfile = subprocess.Popen(['git', 'cat-file', '--batch'],\n bufsize=-1,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n catfile = GitObj.catfile\n\n # Write out an object descriptor.\n catfile.stdin.write(ref.encode('ascii') + b'\\n')\n catfile.stdin.flush()\n\n # Read in the response.\n response = catfile.stdout.readline().split()\n if len(response) < 3:\n assert response[1] == b'missing'\n raise MissingObject(ref)\n\n oid_hex, kind, size = response\n oid = Oid.fromhex(oid_hex.decode('ascii'))\n body = catfile.stdout.read(int(size) + 1)[:-1]\n assert int(size) == len(body), \"bad size?\"\n\n # Create a corresponding git object. This will re-use the item in the\n # cache, if found, and add the item to the cache otherwise.\n if kind == b'commit':\n obj = Commit(body)\n elif kind == b'tree':\n obj = Tree(body)\n elif kind == b'blob':\n obj = Blob(body)\n else:\n raise ValueError(f\"Unknown object kind: {kind}\")\n\n obj.persisted = True\n assert obj.oid == oid, \"miscomputed oid\"\n if not isinstance(obj, cls):\n raise ValueError(f\"Unexpected {type(obj).__name__} \"\n f\"{obj.oid} (expected {cls.__name__})\")\n return obj\n\n def persist(self):\n if self.persisted:\n return\n\n self.persist_deps()\n new_oid = subprocess.run(['git', 'hash-object', '--no-filters',\n '-t', self.tag, '-w', '--stdin'],\n input=self.body, check=True,\n stdout=subprocess.PIPE).stdout.rstrip()\n assert Oid.fromhex(new_oid.decode('ascii')) == self.oid\n self.persisted = True\n\n def persist_deps(self): ...\n\n def parse_body(self): ...\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, GitObj):\n return self.oid == other.oid\n return False\n\n\nclass Commit(GitObj):\n tree_oid: Oid\n parent_oids: Sequence[Oid]\n author: Signature\n committer: Signature\n message: bytes\n\n __slots__ = ('tree_oid', 'parent_oids', 'author', 'committer', 'message')\n\n @classmethod\n def create(cls,\n tree_oid: Oid,\n parent_oids: Sequence[Oid],\n message: bytes,\n author: Optional[Signature] = None,\n committer: Optional[Signature] = None) -> 'Commit':\n \"\"\"Directly create an in-memory commit object, without persisting it.\n If a commit object with these properties already exists, it will be\n returned instead.\"\"\"\n if author is None:\n author = Signature.default_author()\n if committer is None:\n committer = Signature.default_committer()\n\n body = b'tree ' + tree_oid.hex().encode('ascii') + b'\\n'\n for parent in parent_oids:\n body += b'parent ' + parent.hex().encode('ascii') + b'\\n'\n body += b'author ' + author.raw() + b'\\n'\n body += b'committer ' + committer.raw() + b'\\n'\n body += b'\\n'\n body += message\n return Commit(body)\n\n @classmethod\n def head(cls) -> 'Commit':\n return Commit.get('HEAD')\n\n @classmethod\n def from_index(cls, message: bytes = b'') -> 'Commit':\n return Commit.create(Tree.from_index().oid, [Commit.head().oid], message)\n\n def parse_body(self):\n # Split the header from the core commit message.\n hdrs, self.message = self.body.split(b'\\n\\n', maxsplit=1)\n\n # Parse the header to populate header metadata fields.\n self.parent_oids = []\n for hdr in re.split(br'\\n(?! )', hdrs):\n # Parse out the key-value pairs from the header, handling\n # continuation lines.\n key, value = hdr.split(maxsplit=1)\n value = value.replace(b'\\n ', b'\\n')\n\n if key == b'tree':\n self.tree_oid = Oid.fromhex(value.decode())\n elif key == b'parent':\n self.parent_oids.append(Oid.fromhex(value.decode()))\n elif key == b'author':\n self.author = Signature.parse(value)\n elif key == b'committer':\n self.committer = Signature.parse(value)\n else:\n raise ValueError('Unknown commit header: ' + key.decode())\n\n def tree(self) -> 'Tree':\n return Tree.get(self.tree_oid)\n\n def parents(self) -> Sequence['Commit']:\n return [Commit.get(parent) for parent in self.parent_oids]\n\n def parent(self) -> 'Commit':\n if len(self.parents()) != 1:\n raise ValueError(\n f\"Commit {self.oid} has {len(self.parents())} parents\")\n return self.parents()[0]\n\n def rebase(self, parent: 'Commit') -> 'Commit':\n from .merge import rebase\n return rebase(self, parent)\n\n def update(self,\n tree: Optional['Tree'] = None,\n parents: Optional[Sequence['Commit']] = None,\n message: Optional[bytes] = None,\n author: Optional[Signature] = None) -> 'Commit':\n # Compute parameters used to create the new object.\n tree_oid = tree.oid if tree else self.tree_oid\n parent_oids = [p.oid for p in parents] \\\n if parents else self.parent_oids\n if message is None:\n message = self.message\n if author is None:\n author = self.author\n\n # Check if the commit was unchanged.\n unchanged = (tree_oid == self.tree_oid and\n parent_oids == self.parent_oids and\n message == self.message and\n author == self.author)\n if unchanged:\n return self\n return Commit.create(tree_oid, parent_oids, message, author)\n\n def update_ref(self, ref: str, reason: str, current: Optional[Oid]):\n self.persist()\n args = ['git', 'update-ref', '-m', reason, ref, str(self.oid)]\n if current is not None:\n args.append(str(current))\n subprocess.run(args, check=True)\n\n def persist_deps(self):\n self.tree().persist()\n for parent in self.parents():\n parent.persist()\n\n def __repr__(self) -> str:\n return (f\"\")\n\n\nclass Mode(Enum):\n GITLINK = b'160000'\n SYMLINK = b'120000'\n DIR = b'40000'\n REGULAR = b'100644'\n EXEC = b'100755'\n\n def is_file(self) -> bool:\n return self in (Mode.REGULAR, Mode.EXEC)\n\n\nclass Entry(object):\n mode: Mode\n oid: Oid\n\n __slots__ = ('mode', 'oid')\n\n def __init__(self, mode: Mode, oid: Oid):\n self.mode = mode\n self.oid = oid\n\n def blob(self) -> 'Blob':\n if self.mode in (Mode.REGULAR, Mode.EXEC):\n return Blob.get(self.oid)\n return Blob.empty()\n\n def symlink(self) -> bytes:\n if self.mode == Mode.SYMLINK:\n return Blob.get(self.oid).body\n return b''\n\n def tree(self) -> 'Tree':\n if self.mode == Mode.DIR:\n return Tree.get(self.oid)\n return Tree.empty()\n\n def persist(self):\n if self.mode != Mode.GITLINK:\n GitObj.get(self.oid).persist()\n\n def __repr__(self):\n return f\"\"\n\n def __eq__(self, other: object) -> bool:\n if isinstance(other, Entry):\n return self.mode == other.mode and self.oid == other.oid\n return False\n\n\nclass Tree(GitObj):\n entries: Dict[bytes, Entry]\n\n __slots__ = ('entries',)\n\n @classmethod\n def create(cls, entries: Mapping[bytes, Entry]) -> 'Tree':\n def entry_key(pair: Tuple[bytes, Entry]) -> bytes:\n name, entry = pair\n # Directories are sorted in the tree listing as though they have a\n # trailing slash in their name.\n if entry.mode == Mode.DIR:\n return name + b'/'\n return name\n\n body = b''\n for name, entry in sorted(entries.items(), key=entry_key):\n body += cast(bytes, entry.mode.value) + b' ' + name + b'\\0' + entry.oid\n return Tree(body)\n\n @classmethod\n def empty(cls) -> 'Tree':\n return Tree(b'')\n\n @classmethod\n def from_index(cls) -> 'Tree':\n written = subprocess.run(['git', 'write-tree'],\n check=True,\n stdout=subprocess.PIPE)\n oid = Oid.fromhex(written.stdout.rstrip().decode())\n return Tree.get(oid)\n\n def parse_body(self):\n self.entries = {}\n rest = self.body\n while len(rest) > 0:\n mode, rest = rest.split(b' ', maxsplit=1)\n name, rest = rest.split(b'\\0', maxsplit=1)\n entry_oid = Oid(rest[:20])\n rest = rest[20:]\n self.entries[name] = Entry(Mode(mode), entry_oid)\n\n def persist_deps(self):\n for entry in self.entries.values():\n entry.persist()\n\n def __repr__(self) -> str:\n return f\"\"\n\n\nclass Blob(GitObj):\n __slots__ = ()\n\n @classmethod\n def empty(cls) -> 'Blob':\n return Blob(b'')\n\n def __repr__(self) -> str:\n return f\"\"\n","sub_path":"zipfix/odb.py","file_name":"odb.py","file_ext":"py","file_size_in_byte":13987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277999731","text":"class employee:\n def __init__(self,name,age,salary,code):\n self.name=name\n self.age=age\n self.salary=salary\n self.code=code\n\n def printdata(self):\n print(\"employee name :\",self.name)\n print(\"employee age :\",self.age)\n print(\"employee salary:\",self.salary)\n print(\"employee code :\",self.code)\n\n\nn=input(\"enter the name :\")\na=int(input(\"enter age :\"))\ns=int(input(\"enter salary :\"))\nc=int(input(\"enter code :\"))\n\ne=employee(n,a,s,c)\n\ne.printdata()","sub_path":"python/employee.py","file_name":"employee.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"59159930","text":"ucnum = int(input())\nans = list()\nfor i in range(ucnum):\n num = int(input())\n strs = input().split()\n lists = [int(k) for k in strs]\n leader = list()\n while len(lists)>0:\n if lists[0]==max(list):\n leader.append(list[0])\n lists.pop(0)\n strss = [str(k) for k in leader]\n ans.append(' '.join(strss))\nfor i in ans:\n print(i)","sub_path":"Code/CodeRecords/2281/60716/270192.py","file_name":"270192.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348425016","text":"#Exercício Python 050: Desenvolva um programa que leia seis números\r\n#inteiros e mostre a soma apenas daqueles que forem pares.\r\n#Se o valor digitado for ímpar, desconsidere-o.\r\nprint('\\033[1;32m--=DESAFIO 50=--\\033[m')\r\ns = 0\r\nfor c in range(1,7):\r\n número = int(input('Digite o {}º número: '.format(c)))\r\n if número % 2 == 0:\r\n s += número\r\nprint('A soma desses números, desconsiderando os ímpares é: {}.'.format(s))\r\n","sub_path":"Mundo 02/ex050.py","file_name":"ex050.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"626974663","text":"import numpy as np\nimport pandas as pd\nfrom dfply import *\n#adjust display to get a better view of columns and rows\npd.options.display.max_rows = 100\npd.options.display.max_columns = 500\nprint(pd.options.display.max_rows)\nprint(pd.options.display.max_columns)\n\ntrain_0 = pd.read_csv(\"train.csv\")\ntest_0 = pd.read_csv(\"test.csv\")\n\n#briefly checking the training data\ntrain_0.head()\n\n#check data types\ntypes=np.array(train_0.dtypes)\ntypes_df=pd.DataFrame(train_0.dtypes)\ntypes_df.columns=['types']\ntypes_df_count=pd.DataFrame(types_df['types'].value_counts())\ntypes_df_count.columns=['counts']\ntypes_df_count\n\nlen(train_0.columns)\n\ntrain_0=train_0.set_index(['Id'])\ntest_0= test_0.set_index(['Id'])\n\ntrain_0_X = train_0.iloc[:, 1:79]\ntrain_0_y = train_0['SalePrice']\ntest_0_X =test_0.copy()\n\nall=pd.concat([train_0_X,test_0_X])\n\nall.fillna(0,inplace=True)\n\ncategorical = pd.DataFrame()\nnumeric = pd.DataFrame()\nfor k in all.columns:\n dtype_k = all[k].dtype\n if dtype_k == 'object':\n categorical[k] = all[k]\n else:\n numeric[k] = all[k]\n\ncategorical.head()\nprint(categorical.columns)\nprint(categorical.shape)\nnumeric.head()\n\nimport sklearn as sk\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import OneHotEncoder,PolynomialFeatures\nfrom sklearn import preprocessing\nColumns = ['Alley', 'BldgType', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1',\n 'BsmtFinType2', 'BsmtQual', 'CentralAir', 'Condition1', 'Condition2',\n 'Electrical', 'ExterCond', 'ExterQual', 'Exterior1st', 'Exterior2nd',\n 'Fence', 'FireplaceQu', 'Foundation', 'Functional', 'GarageCond',\n 'GarageFinish', 'GarageQual', 'GarageType', 'Heating', 'HeatingQC',\n 'HouseStyle', 'KitchenQual', 'LandContour', 'LandSlope', 'LotConfig',\n 'LotShape', 'MSZoning', 'MasVnrType', 'MiscFeature', 'Neighborhood',\n 'PavedDrive', 'PoolQC', 'RoofMatl', 'RoofStyle', 'SaleCondition',\n 'SaleType', 'Street', 'Utilities']\nfor col in Columns:\n labelencoder = preprocessing.LabelEncoder()\n labelencoder.fit(list(categorical[col].values))\n categorical[col] = labelencoder.transform(list(categorical[col].values))\n \nprint(\"Shape of categorical data: {}\".format(categorical.shape))\nprint(categorical.head())\n\nfrom sklearn.preprocessing import OneHotEncoder, PolynomialFeatures\nonehotencoder = OneHotEncoder(sparse=False,dtype=np.int)\ncategorical = pd.DataFrame(onehotencoder.fit_transform(categorical), index = categorical.index)\n\nprint(categorical.head())\n\npf = PolynomialFeatures(degree=2,interaction_only=True,include_bias=False)\nnumeric = pd.DataFrame(pf.fit_transform(numeric), index = numeric.index)\n\nall = pd.concat([categorical, numeric], axis=1)\nall.columns = np.arange(all.shape[1])\nall.head()\n\ntrain_1_X=all.loc[train_0_X.index,:]\ntest_1_X=all.loc[test_0_X.index,:]\ntrain_1_y=train_0_y.copy()\n\nprint(train_1_X.shape)\nprint(test_1_X.shape)\n\nimport xgboost as xgb\nD_train = xgb.DMatrix(train_1_X, label=train_1_y)\nD_test = xgb.DMatrix(test_1_X)\n\nparams = {\n 'objective': 'reg:gamma',\n 'eta': 0.002,\n 'seed': 0,\n 'missing': -999,\n 'silent' : 1,\n 'gamma' : 0.02,\n 'subsample' : 0.5,\n 'alpha' : 0.045,\n 'max_depth':4,\n 'min_child_weight':1\n }\nnum_rounds=20000\n\n#attention: The eta parameter is importaant! It gives us a chance to prevent overfitting.\n#objective = the loss function being used\n\nmodel = xgb.train(params, D_train, num_rounds)\n\npreds = pd.Series(model.predict(D_test),index=test_1_X.index)\nresults = pd.DataFrame(preds,columns=['SalePrice'])\nresults.to_csv(\"results.csv\", index=False)\nprint(pd.read_csv(\"results.csv\"))","sub_path":"House_Prices.py","file_name":"House_Prices.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"377257927","text":"# This file is part of Tryton. The COPYRIGHT file at the top level of\n# this repository contains the full copyright notices and license terms.\nfrom trytond.model import fields\nfrom trytond.pool import PoolMeta, Pool\nfrom trytond.pyson import Bool, Eval\n\n\nclass TaxCodeTemplate(metaclass=PoolMeta):\n __name__ = 'account.tax.code.template'\n\n aeat_report = fields.Selection([\n (None, ''),\n ('111', \"Model 111\"),\n ('115', \"Model 115\"),\n ('303', \"Model 303\"),\n ], \"AEAT Report\")\n\n def _get_tax_code_value(self, code=None):\n value = super(TaxCodeTemplate, self)._get_tax_code_value(code=code)\n value['aeat_report'] = self.aeat_report\n return value\n\n\nclass TaxCode(metaclass=PoolMeta):\n __name__ = 'account.tax.code'\n\n aeat_report = fields.Selection([\n (None, ''),\n ('111', \"Model 111\"),\n ('115', \"Model 115\"),\n ('303', \"Model 303\"),\n ], \"AEAT Report\",\n states={\n 'readonly': (Bool(Eval('template', -1))\n & ~Eval('template_override', False)),\n },\n depends=['template', 'template_override'])\n\n\nclass TaxTemplate(metaclass=PoolMeta):\n __name__ = 'account.tax.template'\n\n es_vat_list_code = fields.Char(\"Spanish VAT List Code\")\n es_ec_purchases_list_code = fields.Char(\"Spanish EC Purchase List Code\")\n es_reported_with = fields.Many2One('account.tax.template', \"Reported With\")\n\n def _get_tax_value(self, tax=None):\n value = super()._get_tax_value(tax=tax)\n for name in ['es_vat_list_code', 'es_ec_purchases_list_code']:\n if not tax or getattr(tax, name) != getattr(self, name):\n value[name] = getattr(self, name)\n return value\n\n @classmethod\n def create_tax(\n cls, account_id, company_id, template2account, template2tax=None):\n pool = Pool()\n Tax = pool.get('account.tax')\n super().create_tax(\n account_id, company_id, template2account, template2tax)\n\n to_write = []\n\n for template_id, tax_id in template2tax.items():\n template = cls(template_id)\n if not template.es_reported_with:\n continue\n reported_with = template2tax[template.es_reported_with.id]\n to_write.append([Tax(tax_id)])\n to_write.append({\n 'es_reported_with': reported_with,\n })\n\n if to_write:\n Tax.write(*to_write)\n\n\nclass Tax(metaclass=PoolMeta):\n __name__ = 'account.tax'\n\n es_vat_list_code = fields.Char(\"Spanish VAT List Code\",\n states={\n 'readonly': (Bool(Eval('template', -1))\n & ~Eval('template_override', False)),\n },\n depends=['template', 'template_override'])\n es_ec_purchases_list_code = fields.Char(\"Spanish EC Purchases List Code\",\n states={\n 'readonly': (Bool(Eval('template', -1))\n & ~Eval('template_override', False)),\n },\n depends=['template', 'template_override'])\n es_reported_with = fields.Many2One('account.tax', \"Reported with\",\n states={\n 'readonly': (Bool(Eval('template', -1))\n & ~Eval('template_override', False)),\n },\n depends=['template', 'template_override'])\n\n @classmethod\n def update_tax(cls, company_id, template2account, template2tax=None):\n super().update_tax(company_id, template2account, template2tax)\n\n to_write = []\n\n for template_id, tax_id in template2tax.items():\n tax = cls(tax_id)\n if not tax.template_override:\n values = {}\n reported_with = (tax.es_reported_with.id\n if tax.es_reported_with else None)\n if (tax.template.es_reported_with\n and reported_with != template2tax.get(\n tax.template.es_reported_with.id)):\n values['es_reported_with'] = template2tax.get(\n tax.template.es_reported_with.id)\n elif (not tax.template.es_reported_with\n and tax.es_reported_with):\n values['es_reported_with'] = None\n if values:\n to_write.append([tax])\n to_write.append(values)\n\n if to_write:\n cls.write(*to_write)\n","sub_path":"account_es/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":4452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"402187221","text":"from utils.dataset import get_datasets\nimport hparams as hp\nfrom models.fatchord_wavernn import Model\nfrom utils.generation import gen_testset\nfrom utils.paths import Paths\nimport argparse\n\nparser = argparse.ArgumentParser(description='Generate WaveRNN Samples')\nparser.add_argument('--batched', '-b', dest='batched', action='store_true')\nparser.add_argument('--unbatched', '-u', dest='batched', action='store_false')\nparser.add_argument('--samples', '-s', type=int, help='[int] number of samples to generate')\nparser.add_argument('--target', '-t', type=int, help='[int] number of samples in each batch index')\nparser.add_argument('--overlap', '-o', type=int, help='[int] number of crossover samples')\nparser.set_defaults(batched=hp.batched)\nparser.set_defaults(samples=hp.gen_at_checkpoint)\nparser.set_defaults(target=hp.target)\nparser.set_defaults(overlap=hp.overlap)\nargs = parser.parse_args()\n\nbatched = args.batched\nsamples = args.samples\ntarget = args.target\noverlap = args.overlap\n\nprint('\\nInitialising Model...\\n')\n\nmodel = Model(rnn_dims=hp.rnn_dims,\n fc_dims=hp.fc_dims,\n bits=hp.bits,\n pad=hp.pad,\n upsample_factors=hp.upsample_factors,\n feat_dims=hp.num_mels,\n compute_dims=hp.compute_dims,\n res_out_dims=hp.res_out_dims,\n res_blocks=hp.res_blocks,\n hop_length=hp.hop_length,\n sample_rate=hp.sample_rate).cuda()\n\npaths = Paths(hp.data_path, hp.model_id)\n\nmodel.restore(paths.latest_weights)\n\n_, test_set = get_datasets(paths.data)\n\ngen_testset(model, test_set, samples, batched, target, overlap, paths.output)\n\nprint('\\n\\nExiting...\\n')\n","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"560576125","text":"import timeit\nimport random\n\ndef insertionSort(array):\n for index in range(1,len(array)):\n\n currentvalue = array[index]\n position = index\n\n while position>0 and array[position-1]>currentvalue:\n array[position]=array[position-1]\n position = position-1\n\n array[position]=currentvalue\n\narray = list(range(60000))\nrandom.shuffle(array)\n\ninsertionSort(array)\n\ntempo = timeit.timeit(\"insertionSort({})\".format(array), \\\nsetup= \"from __main__ import insertionSort\",number=1)\n\nprint(array)\nprint(tempo)\n","sub_path":"InsertionSort/time_insertion.py","file_name":"time_insertion.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"109575327","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2019/5/21\n\n@author: Robin\n\nKeep files in directory according to mode and criterion, and delete the remaining.\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport random\nimport argparse\n\n\ndef main(work_dir, num, mode, criterion):\n \"\"\"\n main function\n :param work_dir: the directory to operate\n :param num: number of files to keep\n :param mode: judgement\n :param criterion: criterion to keep files\n :return: None\n \"\"\"\n fs = [f for f in os.listdir(work_dir) if os.path.isfile(os.path.join(work_dir, f))]\n\n if num > len(fs):\n raise Exception('Number out of range.')\n\n ks = {\n 'name': None,\n 'time': lambda f: os.stat(os.path.join(work_dir, f)).st_mtime\n }\n\n rv = False if criterion in ['new', 'big'] else True\n\n if criterion is 'random':\n random.shuffle(fs)\n else:\n fs = sorted(fs, key=ks[mode], reverse=rv)\n\n fs_del = fs[: -num]\n msg = 'remove {} files from {} files, and remain {} files. Proceed (y/n)? '\n ans = input(msg.format(len(fs_del), len(fs), len(fs) - len(fs_del)))\n\n while True:\n if ans is 'y':\n for f in fs_del:\n os.remove(os.path.join(work_dir, f))\n print('done')\n break\n elif ans is 'n':\n print('abort')\n sys.exit()\n else:\n ans = input('choose y or n. ')\n\n\nif __name__ == '__main__':\n usage = 'python keep_file_conditionally.py --dir [DIR] --num [NUM] --mode [MODE] --criterion [CRI]'\n description = 'Keep files in directory according to mode and criterion, and delete the remaining.'\n\n argparser = argparse.ArgumentParser(usage=usage, description=description)\n argparser.add_argument('-d', '--dir', type=str, required=True, help='Working directory')\n argparser.add_argument('-n', '--num', type=int, required=True, help='Number of reserved files')\n argparser.add_argument('-m', '--mode', type=str, default='name', choices=['name', 'time'],\n help='Working mode, default is name')\n argparser.add_argument('-c', '--criterion', type=str, default='big',\n choices=['big', 'small', 'new', 'old', 'random'],\n help='Criterion, default is big')\n args = argparser.parse_args()\n\n main(args.dir, args.num, args.mode, args.criterion)\n","sub_path":"keep_file_conditionally.py","file_name":"keep_file_conditionally.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179718229","text":"\"\"\"Generators differ from list comprehensions, in that they do\nnot create a final container object, i.e a list, set or \ndictionary. Instead, they iterate through an iterable and \ninstantly return their values. Generators consume far less\nmemory space than comprehensions, and are invaluable when dealing\nwith very large iterables (e.g. a log file with millions of\nlines of code. This example shows a program for iterating through\na log file and writing any warning messages into another file.\"\"\"\n\n\"\"\"Generator expression written using list-comprehension-style\nsyntax with round brackets () \"\"\"\ninfile = \"\"\nwarnings = (line for line in infile if 'WARNING' in line)\n\n\n\"\"\"More complete generator example, showing the special 'yield'\nfunction\"\"\"\n\n\"\"\"Generator function\"\"\"\ndef warnings_filter(insequence):\n for l in insequence:\n if \"WARNING\" in l:\n \"\"\"'yield' returns a value but pauses\n the iteration at its current position, \n ready to pick up again when called again\n by '__next__'\"\"\"\n yield l.replace(\"\\tWARNING\", \"\")\n\n\n\"\"\"Outer function that takes in a logfile, uses the\ngenerator to find all 'warning' messages, and writes \nthem to a different file\"\"\"\ndef filter_logs(inname, outname):\n with open(inname) as infile:\n with open(outname, \"w\") as outfile:\n filter = warnings_filter(infile)\n for l in filter:\n outfile.write(l)\n","sub_path":"design_patterns/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"74723441","text":"\"\"\"\r\nProgrammer: Chris Blanks\r\nLast Edited: 11/3/2018\r\nProject: Automated Self-Serving System\r\nPurpose: This script defines the KeyboardWindow Class. It directly supports\r\nthe inter-GUI processes that need text input from the user.\r\n\"\"\"\r\n\r\n\r\nfrom tkinter import messagebox\r\nimport tkinter as tk\r\n\r\n\r\nclass KeyboardWindow:\r\n\r\n buttons = [\r\n 'q','w','e','r','t','y','u','i','o','p','<-','7','8','9','-',\r\n 'a','s','d','f','g','h','j','k','l','[',']','4','5','6','+',\r\n 'z','x','c','v','b','n','m',',','.','?',' Enter ','1','2','3','/',' Space ']\r\n \r\n def __init__(self,main_app_instance):\r\n self.main_app = main_app_instance\r\n self.master = self.main_app.keyboard_top_lvl\r\n \r\n self.frame = tk.Frame(self.master)\r\n self.frame.grid()\r\n \r\n self.entry = tk.Entry(self.frame,width= 138)\r\n self.entry.grid(row= 0, columnspan = 20,sticky=\"n\")\r\n \r\n #self.frame.grid_rowconfigure(0,weight=1)\r\n #self.frame.grid_columnconfigure(0,weight=1)\r\n \r\n self.initializeKeyboard()\r\n \r\n self.master.protocol(\"WM_DELETE_WINDOW\",self.deployExitMessageBox)\r\n\r\n\r\n def initializeKeyboard(self):\r\n \"\"\"Creates the keyboard window.\"\"\"\r\n varRow = 1\r\n varColumn = 0\r\n \r\n for button in self.buttons:\r\n \r\n command = lambda x=button: self.select(x)\r\n\r\n if button != \" Space \":\r\n tk.Button(self.frame, text= button, width = 5, bg=\"#000000\",fg=\"#ffffff\",\r\n activebackground=\"#ffffff\",activeforeground=\"#000000\", relief=\"raised\",\r\n padx= 8, pady=8, bd=8,command=command).grid(row=varRow,column=varColumn)\r\n else:\r\n tk.Button(self.frame, text= button, width = 60, bg=\"#000000\",fg=\"#ffffff\",\r\n activebackground=\"#ffffff\",activeforeground=\"#000000\", relief=\"raised\",\r\n padx= 4, pady=4, bd=4,command=command).grid(row=6,columnspan= 16)\r\n \r\n varColumn += 1\r\n \r\n if varColumn > 14 and varRow == 1:\r\n varColumn = 0\r\n varRow += 1\r\n if varColumn > 14 and varRow == 2:\r\n varColumn = 0\r\n varRow += 1\r\n\r\n\r\n def select(self,value):\r\n \"\"\"Defines the action for each button in the keyboard.\"\"\"\r\n if value == \"<-\":\r\n entry2 = self.entry.get()\r\n pos = self.entry.index(tk.INSERT)\r\n if pos != 0:\r\n self.entry.delete(pos-1)\r\n elif value == \" Space \":\r\n self.entry.insert(tk.END,\" \")\r\n elif value == \" Tab \":\r\n self.entry.insert(tk.END,\" \")\r\n elif value == \" Enter \":\r\n entry2 = self.entry.get()\r\n print(entry2)\r\n #use the string in entry2 as input to other programs\r\n else:\r\n self.entry.insert(tk.END, value)\r\n\r\n\r\n def deployExitMessageBox(self):\r\n if messagebox.askokcancel(\"Quit\",\"Are you sure?\"):\r\n self.master.destroy()\r\n","sub_path":"Senior_Design_Code_Copy/KeyboardWindow.py","file_name":"KeyboardWindow.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"185135847","text":"import re\nfrom django.views import View\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render\nfrom django.shortcuts import redirect, reverse\nfrom django.contrib.auth import authenticate, login\nfrom experiment.models import Experiment\nfrom experiment.models import UserExperiment\n\nclass ConsentView(View):\n\n def get(self, request, experiment_id):\n experiment = Experiment.objects.get(id=experiment_id)\n context_dict = {'experiment': experiment}\n return render(request, template_name='experiment/experiment_consent.html', context=context_dict)\n\n def post(self, request, experiment_id):\n experiment = Experiment.objects.get(id=experiment_id)\n context_dict = {'experiment': experiment}\n\n user_details = self.check_consent_form(request)\n context_dict.update(user_details)\n print(user_details)\n if user_details['error']:\n return render(request, template_name='experiment/experiment_consent.html', context=context_dict)\n else:\n # consented, valid email, username\n # create user if not created\n u = User.objects.get_or_create( username=user_details['username'])[0]\n u.set_password(user_details['email'])\n u.email = user_details['email']\n u.save()\n\n # create consent record for user\n ue = UserExperiment.objects.get_or_create(user=u, experiment=experiment)[0]\n ue.email = user_details['email']\n ue.consent = True\n ue.save()\n\n # Maybe check if steps complete is greater than 0, if so they have already started experiment.\n\n # Otherwise, assign the next condition / rotation to the user\n\n\n user = authenticate(request, username=u.username, password=u.email)\n if user is not None:\n login(request, user)\n return redirect(reverse('experiment:next_experiment_step', kwargs={'experiment_id': experiment_id}))\n else:\n context_dict['username_error'] = \"User error problems. Try registering with a different username and email.\"\n return render(request, template_name='experiment/experiment_consent.html', context=context_dict)\n\n def check_consent_form(self, request):\n regex = '^[a-z0-9]+[\\._]?[a-z0-9]+[@]\\w+[.]\\w{2,3}$'\n\n user_details = {}\n error = False\n\n fields = ['consent', 'username', 'email']\n for field in fields:\n if (field in request.POST) and len(request.POST[field].strip() )>0:\n user_details[field] = request.POST[field]\n else:\n error = True\n user_details[field+'_error'] = '{} not provided'.format(field.capitalize())\n\n if 'email' in user_details:\n if not (re.search(regex, user_details['email'])):\n error = True\n user_details['email_error'] = 'Email is not valid.'\n\n user_details[\"error\"] = error\n return user_details","sub_path":"experiment/views_consent.py","file_name":"views_consent.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"491766316","text":"# Author: Yvette WANG\n# Last edited: 18 DEC 2017 SAT\n\n# 17. Letter Combinations of a Phone Number\n# Medium\n\n# Given a digit string, return all possible letter combinations that the number could represent.\n#\n# A mapping of digit to letters (just like on the telephone buttons) is given below.\n\n# 2 -> abc\n# 3 -> def\n# 4 -> ghi\n# 5 -> jkl\n# 6 -> mno\n# 7 -> pqrs\n# 8 -> tuv\n# 9 -> wxyz\n\n# Version 1 -> Accepted\n# Beats 60%\n\n# possible improvements\n# using yield\n# using recursion\n\nclass Solution:\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n if len(digits) == 0:\n return []\n\n dig_let = {}\n dig_let[2] = 'abc'\n dig_let[3] = 'def'\n dig_let[4] = 'ghi'\n dig_let[5] = 'jkl'\n dig_let[6] = 'mno'\n dig_let[7] = 'pqrs'\n dig_let[8] = 'tuv'\n dig_let[9] = 'wxyz'\n\n com = [letter for letter in dig_let[int(digits[0])]]\n\n if len(digits) > 1:\n for dig in digits[1::]:\n copy = com\n com = []\n for letter in dig_let[int(dig)]:\n for content in copy:\n content += letter\n com.append(content)\n return com\n\nif __name__ == '__main__':\n print(Solution().letterCombinations('23'))\n\n\n\n","sub_path":"17. Letter Combinations of a Phone Number.py","file_name":"17. Letter Combinations of a Phone Number.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237652787","text":"\"\"\" Tests for the Player class. \"\"\"\n\nfrom hypothesis import given\nfrom hypothesis.strategies import lists, text\n\nfrom matching import Player\n\n\n@given(name=text())\ndef test_init(name):\n \"\"\" Make an instance of Player and check their attributes are correct. \"\"\"\n\n player = Player(name)\n\n assert player.name == name\n assert player.prefs is None\n assert player.pref_names is None\n assert player.matching is None\n\n\n@given(name=text())\ndef test_repr(name):\n \"\"\" Verify that a Player instance is represented by their name. \"\"\"\n\n player = Player(name)\n\n assert repr(player) == name\n\n\n@given(name=text(), pref_names=lists(text(), min_size=1))\ndef test_set_prefs(name, pref_names):\n \"\"\" Verify a Player can set its preferences correctly. \"\"\"\n\n player = Player(name)\n others = [Player(other) for other in pref_names]\n\n player.set_prefs(others)\n assert player.prefs == others\n assert player.pref_names == [other.name for other in others]\n\n\n@given(name=text(), pref_names=lists(text(), min_size=1))\ndef test_get_favourite(name, pref_names):\n \"\"\" Check the correct player is returned as the favourite of a player. \"\"\"\n\n player = Player(name)\n others = [Player(other) for other in pref_names]\n\n player.set_prefs(others)\n favourite = others[0]\n assert player.get_favourite() == favourite\n\n\n@given(name=text(), pref_names=lists(text(), min_size=1))\ndef test_match(name, pref_names):\n \"\"\" Check that a player can match to another player correctly. \"\"\"\n\n player = Player(name)\n other = Player(pref_names[0])\n\n player.match(other)\n assert player.matching == other\n\n\n@given(name=text(), pref_names=lists(text(), min_size=1))\ndef test_unmatch(name, pref_names):\n \"\"\" Check that a player can unmatch from another player correctly. \"\"\"\n\n player = Player(name)\n other = Player(pref_names[0])\n\n player.matching = other\n player.unmatch()\n assert player.matching is None\n\n\n@given(name=text(), pref_names=lists(text(), min_size=1))\ndef test_forget(name, pref_names):\n \"\"\" Test that a player can forget somebody. \"\"\"\n\n player = Player(name)\n others = [Player(other) for other in pref_names]\n\n player.set_prefs(others)\n for i, other in enumerate(others[:-1]):\n player.forget(other)\n assert player.prefs == others[i + 1 :]\n\n player.forget(others[-1])\n assert player.prefs == []\n assert player.pref_names == pref_names\n\n\n@given(name=text(), pref_names=lists(text(), min_size=1))\ndef test_get_successors(name, pref_names):\n \"\"\" Test that the correct successors to another player in a player's\n preference list are found. \"\"\"\n\n player = Player(name)\n others = [Player(other) for other in pref_names]\n\n player.set_prefs(others)\n player.matching = others[0]\n if len(player.pref_names) > 1:\n successors = others[1:]\n assert player.get_successors() == successors\n else:\n assert player.get_successors() == []\n\n\n@given(name=text(), pref_names=lists(text(), min_size=1, unique=True))\ndef test_prefers(name, pref_names):\n \"\"\" Test that a comparison of preference between two other players can be\n found for a player. \"\"\"\n\n player = Player(name)\n others = [Player(other) for other in pref_names]\n\n player.set_prefs(others)\n for i, other in enumerate(others[:-1]):\n assert player.prefers(other, others[i + 1])\n","sub_path":"tests/players/test_player.py","file_name":"test_player.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414097645","text":"\nfrom Tkinter import *\nimport ttk\nfrom time import sleep\nfrom Libraries import LoaderLibrary #Imported from sub directory with blank __init__.py in it\n#import font\n\n#Building the main window\n'''The namespace 'Main' is being assigned to an object of type\nTK from the tkinter library, that object is essentially a form or\nwindow and we will create many more objects to populate it'''\nMain = Tk()\n'''Here we change a property of the object 'Main' of type TK,\nthat property is the title which if looked up in the documentation\ncontains what is written at the top of the window'''\nMain.title('Puzzle Solver')\n'''Here we change the initial size of the calculator window'''\nMain.minsize(width = 200,height=200)\n\n#Setting the setup variables\n'''\nx elements would be; number of rows,\nnumber of discriptive elements for each row,\nthe arrays of those descriptives,\nideally this would be an initializer that\ncould be dynamically changed\n'''\nx_elements = [7,[[1],[2,1],[2,1],[1],[2,1],[2,1],[1]]]\ny_elements = [6,[[2,2],[2,2],[-1],[1,1],[1,1],[3]]] \n'''\nThe tile grid is merely a 2d array, Y\nencases X, it contains the tile objects\n'''\ntile_grid = []\nsolutions = [0,[[[0,1,0,1,0],[0,1,0,1,0],[0,0,0,0,0],[1,0,0,0,1],[0,1,1,1,0]]]]\n'''\nThe x_solve holds proposed solutions,\nfirst we lay out the most basic pattern of x,\nthen we iterate, cycling the right most line first\nall the way through, if no solve then we iterate the next one\nonce and reiterate the right most etc until we find a solve.\nIdeally we iterate past the solve to see if there are multiple\nsolves and store them, but for now we go till solve.\n'''\nsolve = [[],[]]\ngrid_solve = []\nlabels = [[],[],0,0]\nupdated = False\nfinished = -1\nStep = -1; i = 0; state = False\nfont_size = 6\nsolve_check = []\n\n#Settings stuff\nx_elem_label = Label(Main, font = (\"Times\", font_size, \"bold\"), text = 'No. X Elements')\ny_elem_label = Label(Main, font = (\"Times\", font_size, \"bold\"), text = 'No. Y Elements')\nelem_updt_label = Label(Main, font = (\"Times\", font_size, \"bold\"), text = 'X/Y, index 0->n')\nelem_num_updt_label = Label(Main, font = (\"Times\", font_size, \"bold\"), text = 'Run length to add')\nspacer_label = Label(Main, font = (\"Times\", font_size, \"bold\"), text = '', bd = 5, padx=9, pady=3)\nx_plus = Button(Main, text=\"+\", font = (\"Times\", font_size, \"bold\"))\nx_neg = Button(Main, text=\"-\", font = (\"Times\", font_size, \"bold\"))\ny_plus = Button(Main, text=\"+\", font = (\"Times\", font_size, \"bold\"))\ny_neg = Button(Main, text=\"-\", font = (\"Times\", font_size, \"bold\"))\nxy_select_var = StringVar()\nxy_select = ttk.Combobox(Main, font = (\"Times\", font_size, \"bold\"), width = 10, textvariable = xy_select_var, state = 'readonly')\nxy_select['values'] = ('X axis', 'Y axis')\nxy_index_select_var = StringVar()\nxy_index_select = ttk.Combobox(Main, font = (\"Times\", font_size, \"bold\"), width = 10, textvariable = xy_index_select_var, state = 'readonly')\nxy_num_select_var = StringVar()\nxy_num_select = ttk.Combobox(Main, font = (\"Times\", font_size, \"bold\"), textvariable = xy_num_select_var, state = 'readonly')\ngo_solve = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"solve\")\nview_solve = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"view\")\nStepper = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"stp\")\nxy_add = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"Add\")\nxy_del = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"Delete\")\nfile_selector_var = StringVar()\nfile_selector = ttk.Combobox(Main, font = (\"Times\", font_size, \"bold\"), width = 40, textvariable = file_selector_var, state = 'readonly')\nnew = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"New\")\nsave = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"Save\")\nload = Button(Main, font = (\"Times\", font_size, \"bold\"), text=\"Load\")\n\n#The File Read/Write Libraries\ndef Refresh_Save_List(k = 0):\n global x_elements, y_elements, solutions, file_selector_var\n file_name = \"Data\"\n file_data = []\n file_data = LoaderLibrary.Read_file(file_name)\n temp1 = ()\n '''\n for y in xrange(len(file_data)):\n if y == 0:\n temp1 = (file_data[0][0],)\n else:\n temp1 = temp1 + (file_data[0][y],)\n '''\n if k == 0 and file_data != []:\n temp1 = ()\n for y in xrange(len(file_data[0])):\n if y == 0:\n temp1 = (file_data[0][0],)\n else:\n temp1 = temp1 + (file_data[0][y],)\n file_selector['values'] = temp1\n elif k == 1:\n temp1 = str(len(file_data[0]))\n if len(temp1) < 3:\n for j in xrange(3-len(temp1)):\n temp1 = '0' + temp1\n temp1 = 'Save' + temp1\n file_data[0].append(temp1)\n LoaderLibrary.Write_file(file_data, file_name)\n LoaderLibrary.Write_file([x_elements, y_elements, solutions], temp1)\n elif k == 2:\n LoaderLibrary.Write_file([x_elements, y_elements, solutions], file_selector_var.get())\n elif k == 3:\n temp2 = LoaderLibrary.Read_file(file_selector_var.get())\n x_elements = temp2[0]\n y_elements = temp2[1]\n solutions = temp2[2]\n reinitialize(5)\n \n\n#Creating the Labels\ndef Create_Labels():\n global x_elements, y_elements, tile_grid, labels, updated, solutions\n labels = [[],[],0,0]; tile_grid = []\n if isinstance(x_elements[1], list):\n for i in xrange(len(x_elements[1])):\n if isinstance(x_elements[1][i], list):\n for j in xrange(len(x_elements[1][i])):\n if j > labels[2]:\n labels[2] = j\n if isinstance(y_elements[1], list):\n for i in xrange(len(y_elements[1])):\n if isinstance(y_elements[1][i], list):\n for j in xrange(len(y_elements[1][i])):\n if j > labels[3]:\n labels[3] = j\n k = 0\n for i in xrange(len(x_elements[1])):\n if isinstance(x_elements[1][i], list):\n for j in xrange(len(x_elements[1][i])):\n labels[0].append(0)\n labels[0][k] = StringVar()\n labels[0][k].set(x_elements[1][i][len(x_elements[1][i])-j-1])\n labels[0].append(Label(Main, font = (\"Times\", font_size, \"bold\"), textvariable=labels[0][k]))\n labels[0][k+1].grid(row = labels[2]-j, column = labels[3]+1+i)\n k += 2\n for i in xrange(len(y_elements[1])):\n if isinstance(y_elements[1][i], list):\n for j in xrange(len(y_elements[1][i])):\n labels[0].append(0)\n labels[0][k] = StringVar()\n labels[0][k].set(y_elements[1][i][len(y_elements[1][i])-j-1])\n labels[0].append(Label(Main, font = (\"Times\", font_size, \"bold\"), textvariable=labels[0][k]))\n labels[0][k+1].grid(column = labels[3]-j, row = labels[2]+1+i)\n k += 2\n #Creating the tiles\n for i in xrange(y_elements[0]):\n tile_grid.append([0])\n if i >= len(solutions[1][0]):\n solutions[1][0].append([0])\n for j in xrange(x_elements[0]):\n if j != 0: tile_grid[i].append(0)\n tile_grid[i][j] = Label(Main, relief = SUNKEN, font = (\"Times\", font_size, \"bold\"), bg = 'white', bd = 5, padx=5, pady=0)\n tile_grid[i][j].grid(row = i+labels[2]+1, column = j+labels[3]+1)\n tile_grid[i][j].bind(\"\",lambda e,y=i,x=j:Flip(x,y))\n if j >= len(solutions[1][0][i]):\n solutions[1][0][i].append(0)\n x_elem_label.grid(row = 0, column = labels[3] + x_elements[0]+1, columnspan = 2)\n y_elem_label.grid(row = 2, column = labels[3] + x_elements[0]+1, columnspan = 2)\n spacer_label.grid(row = 0, column = labels[3] + x_elements[0]+3)\n elem_updt_label.grid(row = 0, column = labels[3] + x_elements[0]+4, columnspan = 2)\n x_plus.grid(row = 1, column = labels[3] + x_elements[0]+2)\n x_neg.grid(row = 1, column = labels[3] + x_elements[0]+1)\n y_plus.grid(row = 3, column = labels[3] + x_elements[0]+2)\n y_neg.grid(row = 3, column = labels[3] + x_elements[0]+1)\n xy_select.grid(row = 1, column = labels[3] + x_elements[0]+4, columnspan = 1)\n xy_index_select.grid(row = 1, column = labels[3] + x_elements[0]+5, columnspan = 1)\n elem_num_updt_label.grid(row = 2, column = labels[3] + x_elements[0]+4, columnspan = 2)\n xy_num_select.grid(row = 3, column = labels[3] + x_elements[0]+4, columnspan = 2)\n go_solve.grid(row = 4, column = labels[3] + x_elements[0]+1)\n view_solve.grid(row = 5, column = labels[3] + x_elements[0]+1)\n Stepper.grid(row = 6, column = labels[3] + x_elements[0]+1)\n xy_add.grid(row = 4, column = labels[3] + x_elements[0]+4)\n xy_del.grid(row = 4, column = labels[3] + x_elements[0]+5)\n file_selector.grid(row = 5, column = labels[3] + x_elements[0]+3, columnspan = 3)\n new.grid(row = 6, column = labels[3] + x_elements[0]+3)\n save.grid(row = 6, column = labels[3] + x_elements[0]+4)\n load.grid(row = 6, column = labels[3] + x_elements[0]+5)\n updated = True\n\ndef Flip(x,y):\n global tile_grid, solutions\n View_Solution(-1)\n if tile_grid[y][x].cget(\"bg\") == 'white':\n tile_grid[y][x].configure(bg = 'black')\n solutions[1][0][y][x] = 1\n else:\n tile_grid[y][x].configure(bg = 'white')\n solutions[1][0][y][x] = 0\n\ndef View_Solution(k = 0):\n global solutions, tile_grid, updated\n if not updated: Create_Labels()\n if k == 0:\n if solutions[0] >= (len(solutions[1])-1):\n solutions[0] = 0\n else:\n solutions[0] += 1\n k = solutions[0]\n elif k == -1:\n k = 0\n for i in xrange(len(solutions[1][k])):\n for j in xrange(len(solutions[1][k][i])):\n if solutions[1][k][i][j] == 1:\n tile_grid[i][j].configure(bg = 'black')\n else:\n tile_grid[i][j].configure(bg = 'white')\n Main.update()\n\ndef Populate_Solves():\n global x_elements, y_elements, grid_solve, solve\n solve = [[],[]]\n for x in xrange(x_elements[0]):\n solve[0].append([])\n for i in xrange(len(x_elements[1][x])):\n if x_elements[1][x][i] != -1:\n for j in xrange(x_elements[1][x][i]):\n solve[0][x].append(1)\n if len(solve[0][x]) < y_elements[0]:\n solve[0][x].append(0)\n if len(solve[0][x]) < y_elements[0]:\n for k in xrange(y_elements[0]-len(solve[0][x])):\n solve[0][x].append(0)\n #print len(solve[0][x]), solve[0][x]\n for y in xrange(y_elements[0]):\n solve[1].append([])\n for i in xrange(len(y_elements[1][y])):\n if y_elements[1][y][i] != -1:\n for j in xrange(y_elements[1][y][i]):\n solve[1][y].append(1)\n if len(solve[1][y]) < x_elements[0]:\n solve[1][y].append(0)\n if len(solve[1][y]) < x_elements[0]:\n for k in xrange(x_elements[0]-len(solve[1][y])):\n solve[1][y].append(0)\n #print len(solve[1][y]), solve[1][y]\n #print solve[1]\n \ndef Check_Solution():\n global solve, solve_check\n state = False; column = -1; column2 = -1\n for x in xrange(len(solve[0])):\n for y in xrange(len(solve[0][x])):\n if solutions[1][0][y][x] == 1 and solve[0][x][y] != 1:\n column = x\n break\n state = False\n while not state:\n for x1 in xrange(x+1):\n if solve[0][x1][y] != solve[1][y][x1]:\n state = Line_Shift(1,y)\n break\n else:\n column = -1\n break\n column = x\n Line_Reset(1,y)\n if column != -1:\n break\n if column != -1:\n break\n solve_check = [column, y]\n return column\n \ndef Line_Shift(index, RowCol):\n global solve\n Line = solve[index][RowCol]; Line1 = []; Line2 = []\n if Line[-1] == 0:\n Line1 = Line\n Line2 = []\n else:\n for i in xrange(len(Line)-2,-1,-1):\n if i != 0:\n if Line[i] == 0 and Line[i-1] == 0:\n Line1 = Line[0:i+1]\n Line2 = Line[i+1:len(Line)]\n break\n else:\n Line1 = Line\n if Line1.count(1) != 0:\n Line1 = Line1[0:len(Line1)-1]\n for i in xrange(len(Line1)-1,-1,-1):\n if (i == 0 or (i != 0 and Line1[i] == 1 and Line1[i-1] == 0)):\n Line1.insert(i, 0)\n break\n if Line[-1] != 0:\n for j in xrange(len(Line1)-1,-1,-1):\n if (j == 0 or (j != 0 and Line1[j] == 0 and Line1[j-1] == 1)):\n break\n for i in xrange(len(Line2)-1,-1,-1):\n Line1.insert(j+1, Line2[i])\n solve[index][RowCol] = Line1\n return False\n else:\n solve[index][RowCol] = Line2 + Line1\n return True\n\ndef Line_Reset(index, RowCol, element = -1):\n global solve\n #First filter\n if element == -1:\n temp1 = []\n temp2 = solve[index][RowCol]\n else:\n temp1 = solve[index][RowCol][0:element]\n temp2 = solve[index][RowCol][element:]\n if temp2[0] == 0:\n for j in xrange(len(temp2)-1):\n if temp2[j] == 1:\n temp2 = temp2[j:] + temp2[0:j]\n break \n #Second filter\n i = 1\n while True:\n #Second Filter Part 1\n for j in xrange(i, len(temp2)-1):\n if temp2[j] == 0 and temp2[j-1] == 0:\n i = j\n break\n else:\n break\n #Second Filter Part 2\n for k in xrange(i, len(temp2)-1):\n if temp2[k] == 1 and temp2[i:k] != []:\n temp2 = temp2[0:i]+temp2[k:]+temp2[i:k]\n break\n else:\n break\n temp = temp1+temp2\n temp1 = False\n if element != -1:\n while True:\n for i in xrange(element,-1,-1):\n if temp[i] == 0:\n temp = temp[0:i]+temp[len(temp)-1:]+temp[i:len(temp)-1]\n break\n if temp[element] == 0:\n temp1 = False\n break\n elif temp[-1] == 0:\n temp1 = True\n break\n solve[index][RowCol] = temp\n return temp1\n\ndef Step_Solve():\n global Step\n if Step == -1:\n Step = 0\n else:\n Step = -1\n\ndef Brute_Solve_1():\n global solutions, solve, Step, i, state\n solutions[0] = len(solutions[1])-1\n if Step == -1 or Step == 0 or i == -1: \n i = len(solve[0][0])-1; state = False\n if solutions[0] != 0:\n for k in xrange(len(solutions[1])-2):\n if solutions[1][len(solutions[1])-1] == solutions[1][k]:\n del solutions[1][len(solutions[1])-1]\n solutions[0] -= 1\n break\n solutions[1].append([])\n solutions[0] += 1\n for m in xrange(len(solve[0][0])):\n solutions[1][solutions[0]].append([])\n for n in xrange(len(solve[0])):\n solutions[1][solutions[0]][m].append(0)\n if Step == 0:\n Step = 1\n monk = True\n while (Step == -1 and i != -1) or monk:\n if state:\n state = False\n for j in xrange(len(solve[0])-1,i,-1):\n Line_Reset(0, j)\n if i != 0:\n state = Line_Shift(0, i-1)\n if state:\n i -= 1\n else:\n if solve_check != []:\n if Line_Reset(0, solve_check[0], solve_check[1]):\n state = Line_Shift(0, i)\n if not state:\n i = Check_Solution()\n for m in xrange(len(solve[0][0])):\n for n in xrange(len(solve[0])):\n solutions[1][solutions[0]][m][n] = solve[0][n][m]\n View_Solution(solutions[0])\n monk = False\n\ndef xy_select_update(e):\n global xy_select_var, xy_index_select_var, x_elements, y_elements\n if xy_select_var.get() == 'X axis':\n temp = (0,)\n for x in xrange(1,x_elements[0]):\n temp = temp + (x,)\n xy_index_select['values'] = temp\n del temp\n temp = 0\n if len(xy_index_select_var.get()) != 0:\n for x in xrange(len(x_elements[1][int(xy_index_select_var.get())])):\n if x_elements[1][int(xy_index_select_var.get())] != -1:\n temp = temp + x_elements[1][int(xy_index_select_var.get())][x] + 1\n temp1 = (1,)\n for x in xrange(1,y_elements[0]-temp):\n temp1 = temp1 + (x+1,)\n xy_num_select['values'] = temp1\n elif xy_select_var.get() == 'Y axis':\n temp = (0,)\n for y in xrange(1,y_elements[0]):\n temp = temp + (y,)\n xy_index_select['values'] = temp\n del temp\n temp = 0\n if len(xy_index_select_var.get()) != 0:\n for y in xrange(len(y_elements[1][int(xy_index_select_var.get())])):\n if y_elements[1][int(xy_index_select_var.get())] != -1:\n temp = temp + y_elements[1][int(xy_index_select_var.get())][y] + 1\n temp1 = (1,)\n for y in xrange(1,x_elements[0]-temp):\n temp1 = temp1 + (y+1,)\n xy_num_select['values'] = temp1\n\ndef XY_Add():\n global xy_select_var, xy_index_select_var, xy_num_select_var, x_elements, y_elements\n if len(xy_select_var.get()) != 0 and len(xy_index_select_var.get()) != 0 and len(xy_num_select_var.get()) != 0:\n if xy_select_var.get() == 'X axis':\n if x_elements[1][int(xy_index_select_var.get())] == [-1]:\n x_elements[1][int(xy_index_select_var.get())] = [int(xy_num_select_var.get())]\n else:\n x_elements[1][int(xy_index_select_var.get())].append(int(xy_num_select_var.get()))\n elif xy_select_var.get() == 'Y axis':\n if y_elements[1][int(xy_index_select_var.get())] == [-1]:\n y_elements[1][int(xy_index_select_var.get())] = [int(xy_num_select_var.get())]\n else:\n y_elements[1][int(xy_index_select_var.get())].append(int(xy_num_select_var.get()))\n reinitialize(5)\n \ndef XY_Del():\n global xy_select_var, xy_index_select_var, xy_num_select_var\n if len(xy_select_var.get()) != 0 and len(xy_index_select_var.get()) != 0 and len(xy_num_select_var.get()) != 0:\n if xy_select_var.get() == 'X axis':\n if len(x_elements[1][int(xy_index_select_var.get())]) == 1:\n x_elements[1][int(xy_index_select_var.get())] = [-1]\n else:\n x_elements[1][int(xy_index_select_var.get())].pop()\n elif xy_select_var.get() == 'Y axis':\n if len(y_elements[1][int(xy_index_select_var.get())]) == 1:\n y_elements[1][int(xy_index_select_var.get())] = [-1]\n else:\n y_elements[1][int(xy_index_select_var.get())].pop()\n\n reinitialize(5)\n\ndef reinitialize(z=0):\n global x_elements, y_elements, solve, labels, updated, finished, Step, i, state, tile_grid, solutions\n if z == 1:\n x_elements[0] += 1\n while True:\n if len(x_elements[1]) < x_elements[0]:\n x_elements[1].append([-1])\n else:\n break\n elif z == 2:\n if x_elements[0] > 3:\n x_elements[0] -= 1\n while True:\n if len(x_elements[1]) > x_elements[0]:\n del x_elements[1][len(x_elements[1])-1]\n else:\n break\n elif z == 3:\n y_elements[0] += 1\n while True:\n if len(y_elements[1]) < y_elements[0]:\n y_elements[1].append([-1])\n else:\n break\n elif z == 4:\n if y_elements[0] > 3:\n y_elements[0] -= 1\n while True:\n if len(y_elements[1]) > y_elements[0]:\n del y_elements[1][len(y_elements[1])-1]\n else:\n break\n if z != 0:\n del solve[0]; del solve[0]\n solve = [[],[]]\n for j in xrange(2):\n for k in xrange(len(labels[j])):\n if k%2 == 1:\n labels[j][k].destroy()\n labels = [[],[],0,0]\n updated = False\n finished = -1\n Step = -1; i = 0; state = False\n if len(tile_grid) != 0:\n for j in xrange(len(tile_grid)):\n for k in xrange(len(tile_grid[j])):\n tile_grid[j][k].destroy()\n tile_grid = []\n Create_Labels()\n Populate_Solves()\n View_Solution(0)\n Refresh_Save_List(0)\n \nreinitialize(0)\n\nx_plus.configure(command=lambda var1=1:reinitialize(var1))\nx_neg.configure(command=lambda var2=2:reinitialize(var2))\ny_plus.configure(command=lambda var3=3:reinitialize(var3))\ny_neg.configure(command=lambda var4=4:reinitialize(var4))\nnew.configure(command=lambda var6=1:Refresh_Save_List(var6))\nsave.configure(command=lambda var7=2:Refresh_Save_List(var7))\nload.configure(command=lambda var8=3:Refresh_Save_List(var8))\nxy_select.bind('<>', xy_select_update)\nxy_index_select.bind('<>', xy_select_update)\nfile_selector.bind('<>', lambda e,var5=0:Refresh_Save_List(var5))\ngo_solve.configure(command=Brute_Solve_1)\nview_solve.configure(command=View_Solution)\nStepper.configure(command=Step_Solve)\nxy_add.configure(command=XY_Add)\nxy_del.configure(command=XY_Del)\nMain.mainloop() #The final Thread\n","sub_path":"Dev/Crypto Shade Breaker 012a.py","file_name":"Crypto Shade Breaker 012a.py","file_ext":"py","file_size_in_byte":21749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"183318806","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport os\n\nfrom django.db import models, migrations\nfrom django.core.management import call_command\n\nfixture_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../fixtures'))\nfixture_filename = 'initial_data.json'\n\ndef load_fixture(apps, schema_editor):\n fixture_file = os.path.join(fixture_dir, fixture_filename)\n call_command('loaddata', fixture_file)\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('findbook', '0001_initial'),\n ]\n\n operations = [\n migrations.RunPython(load_fixture)\n ]\n","sub_path":"FindBook/findbook/migrations/0002_load_intial_data.py","file_name":"0002_load_intial_data.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"486873134","text":"number = int(input(\"Enter the number \"))\nlastDigit = number%10\nleftDigits = number//10\nleftDigitsSum = 0\nwhile leftDigits:\n leftDigitsSum += leftDigits%10\n leftDigits = leftDigits//10\n\nif leftDigitsSum == lastDigit:\n print( str(number)+\" is a handsome number.\")\nelse:\n print(str(number)+\"is not handsome number\" %(number))\n","sub_path":"handsomeNumber.py","file_name":"handsomeNumber.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203991804","text":"#!Python\r\n\"\"\"\r\nDeletes Windows %TEMP% and logs actions in specified log file\r\n\"\"\"\r\n__author__ = 'mramirez'\r\n\r\nimport sys, os\r\n# adding Utilities holding directory to PYTHONPATH and importing it\r\nsys.path.append(r'C:\\Scripts\\Python')\r\nimport Utilities.Common_Utils as CU\r\n\r\n# cfg parameters\r\n_LOGFILE = ''\r\n_CLEANDIR = os.environ['temp']\r\n\r\n# uses default log location %userprofile%\\Reports\\CleanTemp if _LOGFILE is null\r\nif not _LOGFILE:\r\n _LOGFILE = CU.create_logDirs(os.path.join(os.environ['userprofile'], 'Reports/CleanTemp'), True, '%Y-%m-%d %H%M%S', 'CleanTempLog.log')\r\n \r\n# executes cleanup and logs results\r\norigNumDirs, origNumFiles = CU.get_num_dirs_files(_CLEANDIR) # retrieves # of dirs and files before cleanup\r\nCU.logger_datetime(_LOGFILE, 'STARTING CLEANUP: ' + _CLEANDIR) # starts cleanup log \r\nfor item in os.scandir(_CLEANDIR): # creates iterator using os.scandir() and _CLEANDIR path\r\n CU.logger_datetime(_LOGFILE, CU.remove_dir_file(item, verbose=True)) # for item in iterator, attempt delete and log action\r\nfinalNumDirs, finalNumFiles = CU.get_num_dirs_files(_CLEANDIR) # retrieves # of dirs and files after cleanup\r\n\r\n# calculating deleted dirs/files\r\ndeletedDirs = origNumDirs - finalNumDirs ; errorDirs = origNumDirs - deletedDirs\r\ndeletedFiles = origNumFiles - finalNumFiles ; errorFiles = origNumFiles - deletedFiles\r\n\r\n# continues and finishes logging\r\nCU.logger_empty_line(_LOGFILE, 'Summary:\\n') # starts summary log section\r\nCU.logger(_LOGFILE, '-' * 26)\r\nCU.logger(_LOGFILE, 'Dirs: [{}] Attempted to delete: [{}] SUCCESS, [{}] ERROR'.format(origNumDirs, deletedDirs, errorDirs))\r\nCU.logger(_LOGFILE, 'Files: [{}] Attempted to delete: [{}] SUCCESS, [{}] ERROR'.format(origNumFiles, deletedFiles, errorFiles))\r\nCU.logger_empty_line(_LOGFILE)\r\nCU.logger_datetime(_LOGFILE, 'FINISHED CLEANUP: ' + _CLEANDIR) # ends cleanup log\r\n","sub_path":"CleanTemp.py","file_name":"CleanTemp.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370141460","text":"# 猜拳游戏\n\n#玩家猜拳\n#电脑猜拳\n#比较得出输赢\n\nplayer = int(input(\"请出拳(剪刀 0,石头 1,布 2):\"))\ncomputer = 1\n#比较玩家和电脑\nif (player == 0 and computer == 2) or (player == 1 and computer == 0) or (player == 2 and computer == 1):\n print(\"玩家获胜\")\nelif (player == computer):\n print(\"平局\")\nelse:\n print(\"电脑获胜\")\n\n","sub_path":"day02/if猜拳游戏-基础版.py","file_name":"if猜拳游戏-基础版.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509386420","text":"import time\nimport datetime\nimport logging\nimport cronex\nfrom data_sources.hippo_base import HippoDataSource\n\n\nclass CronQueue(HippoDataSource):\n namespace = 'cron'\n label = 'Cron Scheduler'\n inputs = {\n 'cronstring': {'input':'text','label':'Cron String (UTC)','default':'* * * * *'},\n 'maxbacklog': {'input':'number','label':'Maximum Job Backlog','default':1}\n }\n\n def __init__(self, *args):\n super().__init__(*args, namespace=CronQueue.namespace, inputs=CronQueue.inputs)\n\n def process(self):\n if self.last_task_queued_tstamp and time.time() - self.last_task_queued_tstamp < 60:\n # finest granularity is one minute\n return\n cur_tstamp = int(time.time())\n dt = datetime.datetime.fromtimestamp(cur_tstamp)\n cur_dt_tuple = (dt.year,dt.month,dt.day,dt.hour,dt.minute)\n cex = cronex.CronExpression(self.cronstring)\n needs_to_run = cex.check_trigger(cur_dt_tuple)\n\n if not needs_to_run and self.last_task_queued_tstamp and cur_tstamp - self.last_task_queued_tstamp < 86400 * 3:\n # make sure we didn't miss last run if last queuing was less than three days ago\n s_tstamp = self.last_task_queued_tstamp + 60\n while s_tstamp < cur_tstamp:\n last_qdt = datetime.datetime.fromtimestamp(int(s_tstamp))\n last_qdt_tuple = (last_qdt.year,last_qdt.month,last_qdt.day,last_qdt.hour,last_qdt.minute)\n if cex.check_trigger(last_qdt_tuple):\n needs_to_run = True\n break\n s_tstamp += 60\n\n if needs_to_run:\n self.create_tasks([str(cur_tstamp)])\n","sub_path":"data_sources/cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"188836207","text":"import cv2\r\nfrom cv2 import imread\r\nimport numpy as np\t\r\nfrom numpy import array\r\nfrom cv2 import NORM_MINMAX, CV_8UC1, imshow, normalize, drawContours, waitKey, CV_32SC1\r\n\r\ndef get_contrasted(image, type=\"dark\", level=2):\r\n maxIntensity = 255.0 # depends on dtype of image data\r\n phi = 1\r\n theta = 1\r\n\r\n if type == \"light\":\r\n newImage0 = (maxIntensity/phi)*(image/(maxIntensity/theta))**0.5\r\n newImage0 = array(newImage0,dtype=uint8)\r\n return newImage0\r\n elif type == \"dark\":\r\n newImage1 = (maxIntensity/phi)*(image/(maxIntensity/theta))**level\r\n np.clip(newImage1, 0, 255, out=newImage1)\r\n newImage1 = newImage1.astype('uint8')\r\n #newImage1 = array(newImage1,dtype=uint8)\r\n\r\n return newImage1\r\n\r\ndef sharp(image, level=3):\r\n f = cv2.GaussianBlur(image, (level,level), level)\r\n f = cv2.addWeighted(image, 1.5, f, -0.5, 0)\r\n return f\r\n\r\noriginal_image = imread('../../images/imagen.jpg')\r\n#original_image = imread('resframe.jpg')\r\n\r\n#Alternatives grayscale\r\ngray_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)\r\nimshow('color_image',original_image)\r\nimshow('gray_image',gray_img)\r\n\r\n# 1 Convert to gray & Normalize\r\n#gray_img = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)\r\n#gray_img = sharp(get_contrasted(gray_img))\r\n#gray_img = normalize(gray_img, None, 0, 255, NORM_MINMAX, CV_8UC1)\r\n#imshow(\"Gray\", gray_img)\r\n\r\n# 2 Find Threshold\r\ngray_blur = cv2.GaussianBlur(gray_img, (7, 7), 0)\r\nadapt_thresh_im = cv2.adaptiveThreshold(gray_blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 1)\r\nmax_thresh, thresh_im = cv2.threshold(gray_img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)\r\nthresh = cv2.bitwise_or(adapt_thresh_im, thresh_im)\r\n\r\n# 3 Dilate\r\ngray = cv2.Canny(thresh, 88, 400, apertureSize=3)\r\ngray = cv2.dilate(gray, None, iterations=8)\r\ngray = cv2.erode(gray, None, iterations=8)\r\nimshow(\"Trheshold\", gray)\r\n\r\n# 4 Flood\r\n(_, contours, _) = cv2.findContours(gray, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\r\n#(_, contours, _) = cv2.findContours(gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\n\r\ncontour_info = []\r\nfor c in contours:\r\n contour_info.append((\r\n c,\r\n cv2.isContourConvex(c),\r\n cv2.contourArea(c),\r\n ))\r\ncontour_info = sorted(contour_info, key=lambda c: c[2], reverse=True)\r\nmax_contour = contour_info[0]\r\nholes = np.zeros(gray_img.shape, np.uint8)\r\ndrawContours(holes, max_contour, 0, 255, -1)\r\nimshow(\"Holes\", holes)\r\n\r\nmask = cv2.GaussianBlur(holes, (15, 15), 0)\r\nmask = np.dstack([mask] * 3) # Create 3-channel alpha mask\r\n\r\nmask = mask.astype('float32') / 255.0 # Use float matrices,\r\nimg = original_image.astype('float32') / 255.0 # for easy blending\r\nmasked = (mask * img) + ((1 - mask) * (0,0,1)) # Blend\r\nmasked = (masked * 255).astype('uint8')\r\n\r\nimshow(\"Maked\", masked)\r\nwaitKey()\r\n","sub_path":"Project_18.py","file_name":"Project_18.py","file_ext":"py","file_size_in_byte":2853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451281526","text":"import re\n\n\nCOMMIT_MESSAGE = \"\"\"Fixed #27533 -- Fixed inspectdb crash if a unique constraint uses an … #31337\n …unsupported type.\"\"\"\n\nre.findall(r'#[0-9]+', COMMIT_MESSAGE, flags=re.MULTILINE)\n# ['#23919', '#31337']\n\nissues = re.compile(r'#[0-9]+', flags=re.MULTILINE)\nissues.findall(COMMIT_MESSAGE)\n# ['#23919', '#31337']\n","sub_path":"regex/src/re-multiline.py","file_name":"re-multiline.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"322495215","text":"from flask import Flask\nfrom flask import request\nimport threading\nimport mainframe\nimport os\napp = Flask(__name__)\n\ndownload_manager = mainframe.downloader()\nconfig = download_manager.config\n\ndownload_manager.start_background_sync_server()\ndownload_manager.start_background_sync_client()\n\n@app.route(\"/home\")\ndef home():\n\tcontent = \"\"\"\n\t\t\tDOWNLOADER\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t

Enter url/filename here


\t\t\t\n\t\t\t
\n\t\t\t

URL

-
\n\t\t\t

FILE

- \n\t\t\tSubmit\n\t\t\t\n\t\t\t\"\"\"\n\treturn content\n\n\n@app.route(\"/distributor\",methods=['POST'])\ndef distributor():\n\turl = request.form[\"url\"]\n\tfilename = request.form[\"filename\"]\n\tthreading.Thread(target=download_manager.distributor,args=(url,filename)).start()\n\treturn '...'\n\n@app.route(\"/local_init\",methods=['POST'])\ndef local_init():\n\tstart = int(request.form['start'])\n\tend = int(request.form['end'])\n\treporting_ip = request.form['reporting_ip']\n\turl = request.form[\"url\"]\n\tfile_name = request.form[\"file_name\"]\n\tthreading.Thread(target=download_manager.local_init,args=(start,end,url,file_name,reporting_ip)).start()\n\treturn '...'\n\n@app.route(\"/fetch_local_data\",methods=['POST'])\ndef fetch_local_data():\n\tstart = int(request.form['start'])\n\tlocal_id = request.form['local_id']\n\tip = request.form['ip']\t\n\tfile_name = request.form[\"file_name\"]\n\tthreading.Thread(target=download_manager.fetch_local_data,args=(local_id,ip,start,file_name)).start()\n\treturn '...'\n\n@app.route(\"/local_transfer\",methods=['POST'])\ndef local_transfer() :\n\tlocal_id = request.form['local_id']\n\twith open(os.path.join(os.getcwd(),local_id),'rb') as f :\n\t\tcontent = f.read()\n\tos.unlink(os.path.join(os.getcwd(),local_id))\n\treturn content\n\napp.run(host=config['server_ip'], port=config['server_port'],debug=True)\t","sub_path":"http_server.py","file_name":"http_server.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34183329","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Exe_5_33.py\n# \n# Copyright 2019 chee <983184728@qq.com>\n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# Exe 33\n# 题目:(财务应用程序:计算 CD 价值)\n\n\ndef main(args):\n deposit = eval(input(\"Enter the initial deposit amount: \"))\n yield_rate = eval(input(\"Enter annual percentage yield: \")) / 100\n months = eval(input(\"Enter maturity period (number of months): \"))\n\n print(\"Month\\tCD Value\")\n for i in range(1, months + 1):\n deposit = deposit * (1 + yield_rate / 12)\n print(\"{}\\t{:.2f}\".format(i, deposit))\n return 0\n\nif __name__ == '__main__':\n import sys\n sys.exit(main(sys.argv))\n","sub_path":"unit05/Exe_5_33.py","file_name":"Exe_5_33.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"452638920","text":"import json\nfrom collections import Counter\nfrom sklearn import tree\nimport timeit\nimport numpy as np\n\ndef run() :\n with open('train.json') as f :\n start = timeit.default_timer()\n data = json.load(f)\n ingredientList = []\n for jsonItem in data :\n for ingredient in jsonItem[\"ingredients\"]:\n ingredientList.append(ingredient)\n\n c= Counter(ingredientList).most_common(3500)\n ingredientList = []\n for item in c:\n ingredientList.append(item[0])\n # print(ingredientList)\n\n datasetAttr = []\n datasetCuisine = []\n for jsonItem in data :\n # add the ingredients list\n tempList = []\n for item in ingredientList :\n if item in jsonItem[\"ingredients\"]:\n tempList.append(1)\n else:\n tempList.append(0)\n datasetAttr.append(tempList)\n datasetCuisine.append(jsonItem[\"cuisine\"])\n\n # c = Counter(item for item in datasetAttr[0])\n # print(c[1])\n end = timeit.default_timer()\n print(\"Training Dataset Conversion Time : \",end - start, \" Seconds\")\n\n start = timeit.default_timer()\n testData = []\n testID = []\n with open('test.json') as f :\n data = json.load(f)\n for jsonItem in data :\n tempList = []\n for item in ingredientList :\n if item in jsonItem[\"ingredients\"]:\n tempList.append(1)\n else:\n tempList.append(0)\n testData.append(tempList)\n testID.append(jsonItem[\"id\"])\n\n end = timeit.default_timer()\n print(\"Testing Dataset Conversion Time : \",end - start, \" Seconds\")\n\n start = timeit.default_timer()\n classifier = tree.DecisionTreeClassifier(min_impurity_split = 0.2)\n classifier.fit(datasetAttr,datasetCuisine)\n\n end = timeit.default_timer()\n print(\"Model Training Time : \",end - start, \" Seconds\")\n\n\n start = timeit.default_timer()\n i = 0\n # result = {}\n f = open(\"DecisionTree-results.csv\",\"w\")\n f.write(\"id,cuisine\\n\")\n while i< len(testID):\n f.write(str(testID[i]))\n f.write(\",\"),\n f.write(classifier.predict(np.reshape(testData[i],(1,-1)))[0])\n f.write(\"\\n\")\n i+=1\n end = timeit.default_timer()\n print(\"Testing Time : \",end - start, \" Seconds\")\n print(\"Results written to DecisionTree-results.csv file\")\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"HW1/Q4/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":2659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"412792122","text":"# https://stackoverflow.com/questions/4798654/modular-multiplicative-inverse-function-in-python\n\n\ndef egcd(a, b):\n if a == 0:\n return b, 0, 1\n else:\n g, y, x = egcd(b % a, a)\n return g, x - (b // a) * y, y\n\n\ndef modinv(a, m):\n a = a % m\n g, x, y = egcd(a, m)\n if g != 1:\n raise Exception(\"modular inverse does not exist\")\n else:\n return x % m\n","sub_path":"ecc/math_utils/mod_inverse.py","file_name":"mod_inverse.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"286836954","text":"import bisect\nimport collections\nfrom enum import Enum\nfrom datetime import datetime, timedelta\n\n\n\nPriceEvent = collections.namedtuple(\"PriceEvent\", [\"timestamp\", \"price\"])\n\nclass StockSignal(Enum):\n buy = 1\n neutral = 0\n sell = -1\n\n\nclass Stock:\n\n LONG_TERM_TIMESPAN = 10\n SHORT_TERM_TIMESPAN = 5\n\n def __init__(self, symbol):\n self.symbol = symbol\n self.price_history = []\n\n @property\n def price(self):\n return self.price_history[-1].price if self.price_history else None\n\n def update(self, timestamp, price):\n if price < 0:\n raise ValueError(\"price should not be negative\")\n bisect.insort_left(self.price_history, PriceEvent(timestamp, price))\n\n def is_increasing_trend(self):\n return self.price_history[-3].price < self.price_history[-2].price < self.price_history[-1].price\n\n def get_crossover_signal(self, on_date):\n closing_price_list = []\n NUM_DAYS = self.LONG_TERM_TIMESPAN + 1\n for i in range(NUM_DAYS):\n chk = on_date.date() - timedelta(i)\n for price_event in reversed(self.price_history):\n if price_event.timestamp.date() > chk:\n pass\n if price_event.timestamp.date() == chk:\n closing_price_list.insert(0, price_event)\n break\n if price_event.timestamp.date() < chk:\n closing_price_list.insert(0, price_event)\n break\n\n # Return NEUTRAL signal\n if len(closing_price_list) < 11:\n return StockSignal.neutral\n\n # BUY signal\n if sum([update.price for update in closing_price_list[-11:-1]]) / 10 \\\n > sum([update.price for update in closing_price_list[-6:-1]]) / 5 \\\n and sum([update.price for update in closing_price_list[-10:]])/10 \\\n < sum([update.price for update in closing_price_list[-5:]]) / 5:\n return StockSignal.buy\n\n # BUY signal\n if sum([update.price for update in closing_price_list[-11:-1]]) / 10 \\\n < sum([update.price for update in closing_price_list[-6:-1]]) / 5 \\\n and sum([update.price for update in closing_price_list[-10:]])/10 \\\n > sum([update.price for update in closing_price_list[-5:]]) / 5:\n return -StockSignal.buy\n\n # NEUTRAL signal\n return StockSignal.neutral\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"src/stock_alerter/stock.py","file_name":"stock.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"406869871","text":"import RPi.GPIO as GPIO\nimport time\nimport sys\nimport requests\nGPIO.setmode(GPIO.BCM)\nprint(\"Requset Remote Condition:\")\nGPIO.setup(18,GPIO.OUT)\nGPIO.setup(12,GPIO.OUT)\nwhile True:\n time.sleep(1)\n\n url=\"http://192.168.200.45:8000/\"\n led = requests.get(url).text\n GPIO.output(18, True if led == 'ON' else False)\n\n url=\"http://192.168.200.17:8000/\"\n led = requests.get(url).text\n GPIO.output(12, True if led == 'ON' else False)\n\nGPIO.cleanup()\nprint(\"End of Test\")\n","sub_path":"Book/RPi3+/Ch.11/control_ldr.py","file_name":"control_ldr.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66998129","text":"import math\n\nclass Gas:\n def __init__(self, model, **args):\n # initialize the super class\n super().__init__()\n \n self.target_models = [\"Compliance\"]\n\n self.p_atm = 760\n self.temp_settings = {}\n self.dry_air = {}\n self.compounds = {}\n \n # set the independent properties\n for key, value in args.items():\n setattr(self, key, value)\n \n # get a reference to the whole model\n self.model = model\n \n # define a list which contains all components holding gas\n self.gas_components = []\n \n # now transform the components with content gas into gas components\n for comp_name, comp in model.components.items():\n if ((comp.model_type in self.target_models) and comp.content == 'gas'):\n self.gas_components.append(comp)\n # now prepare the object for being a gas containing object\n setattr(comp, 'mix_gas', self.mix_gas)\n setattr(comp, 'p_atm', self.p_atm)\n setattr(comp, 'c_total', 0)\n setattr(comp, 'c_total_dry', 0)\n # set the gas compounds as attributes of the model component\n for compound, value in self.compounds.items():\n setattr(comp, \"f\" + compound, 0)\n setattr(comp, \"c\" + compound, 0)\n setattr(comp, \"p\" + compound, 0)\n # set the temperature and the water vapour pressure which is temperature dependent\n for temp in self.temp_settings:\n if temp == comp.name:\n setattr(comp, \"temp\", self.temp_settings[temp])\n setattr(comp, \"ph2o\", self.calculate_water_vapour_pressure(self.temp_settings[temp]))\n # set the dry air compensation\n for compound, value in self.dry_air.items():\n setattr(comp, compound, value)\n \n \n \n def model_step(self):\n if self.is_enabled:\n for comp in self.gas_components:\n self.calculate_gas_composition(comp)\n \n def calculate_water_vapour_pressure(self, temp):\n # calculate the water vapour pressure in air with temperature temp\n return math.pow(math.e, (20.386 - (5132 / (temp + 273)))) \n \n \n def calculate_gas_composition(self, comp):\n # calculate the concentration of all molecules (including h2o) in the air at the current pressure, volume and temperarure of \n comp.c_total = ((comp.pres * comp.vol) / (self.gas_constant * (273.15 + comp.temp)) / comp.vol) * 1000 \n comp.c_total_dry = (((comp.pres - comp.ph2o) * comp.vol) / (self.gas_constant * (273.15 + comp.temp)) / comp.vol) * 1000\n \n # now calculate the partial pressures and concentrations of the compounds\n for compound in self.compounds.keys():\n # get the fraction\n fraction = getattr(comp, \"f\" + compound)\n # calculate the partial pressures\n setattr(comp, \"p\" + compound, fraction * (comp.pres - comp.ph2o))\n # calculate the gas concentrations\n setattr(comp, \"c\" + compound, fraction * comp.c_total_dry)\n \n def mix_gas(self, dvol, comp_to, comp_from):\n # mix the blood only if the blood model is enabled\n if self.is_enabled:\n # iterate over all the blood compounds\n for compound in self.compounds.keys():\n fraction = \"f\" + compound\n # get the compound concenrtation in the components\n comp_to_fraction = getattr(comp_to, fraction)\n comp_from_fraction = getattr(comp_from, fraction)\n # get the volume\n volume = comp_to.vol\n # calculate the change in compound concentration\n d_fraction = (comp_from_fraction - comp_to_fraction) * dvol\n # update the new compound concentration in the component receiving the blood\n if volume > 0:\n new_fraction = (comp_to_fraction * volume + d_fraction) / volume\n else:\n new_fraction = 0\n # store the new concentration in the component receiving the blood\n setattr(comp_to, fraction, new_fraction)\n\n \n \n \n ","sub_path":"core_models/.ipynb_checkpoints/Gas-checkpoint.py","file_name":"Gas-checkpoint.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"517187125","text":"from __future__ import (\n division,\n absolute_import,\n with_statement,\n print_function,\n unicode_literals,\n)\nimport os, sys\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nsys.path.append(os.path.join(BASE_DIR, \"../../\"))\nimport torch\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_sched\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom ClassArch2.utils import pytorch_utils as pt_utils\nfrom ClassArch2.utils import viz as v\nimport pprint\nimport os.path as osp\nimport argparse\n\nfrom ClassArch2.models.rscnn_ssn_cls import RSCNN_SSN as RSCNN\nfrom ClassArch2.models.rscnn_ssn_cls import model_fn_decorator\nfrom ClassArch2.data.ModelNet40Loader import ModelNet40\nimport ClassArch2.data.data_utils as d_utils\n# from RandAugment3D.augmentation import RandAugment3D\n\ntorch.backends.cudnn.enabled = True\ntorch.backends.cudnn.benchmark = True\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description=\"Arguments for cls training\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"-batch_size\", type=int, default=32, help=\"Batch size\")\n parser.add_argument(\n \"-num_points\", type=int, default=1024, help=\"Number of points to train with\"\n )\n parser.add_argument(\n \"-weight_decay\", type=float, default=1e-5, help=\"L2 regularization coeff\"\n )\n parser.add_argument(\"-lr\", type=float, default=0.001, help=\"Initial learning rate\")\n parser.add_argument(\n \"-lr_decay\", type=float, default=0.7, help=\"Learning rate decay gamma\"\n )\n parser.add_argument(\n \"-decay_step\", type=float, default=21, help=\"Learning rate decay step\"\n )\n parser.add_argument(\n \"-bn_momentum\", type=float, default=0.9, help=\"Initial batch norm momentum\"\n )\n parser.add_argument(\n \"-bnm_decay\", type=float, default=0.5, help=\"Batch norm momentum decay gamma\"\n )\n parser.add_argument(\n \"-checkpoint\", type=str, default=None, help=\"Checkpoint to start from\"\n )\n parser.add_argument(\n \"-epochs\", type=int, default=400, help=\"Number of epochs to train for\"\n )\n parser.add_argument(\n \"-run_name\",\n type=str,\n default=\"cls_run_1\",\n help=\"Name for run in tensorboard_logger\",\n )\n parser.add_argument(\"-visdom-port\", type=int, default=8097)\n parser.add_argument(\"-visdom\", action=\"store_true\")\n\n return parser.parse_args()\n\n\nlr_clip = 0.00001\nbnm_clip = 0.01\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n transforms = transforms.Compose(\n [\n d_utils.PointcloudToTensor(),\n # RandAugment3D(1, 1),\n ]\n )\n\n test_set = ModelNet40(args.num_points, transforms=transforms, split='test')\n test_loader = DataLoader(\n test_set,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=True,\n )\n\n train_set = ModelNet40(args.num_points, transforms=transforms, split='train')\n train_loader = DataLoader(\n train_set,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=4,\n pin_memory=True,\n )\n\n model = RSCNN(input_channels=0, num_classes=40, use_xyz=True)\n \n # Multi GPU\n os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'\n model = torch.nn.DataParallel(model, output_device=1)\n model.cuda()\n optimizer = optim.Adam(\n model.parameters(), lr=args.lr, weight_decay=args.weight_decay\n )\n lr_lbmd = lambda it: max(\n args.lr_decay ** (int(it // args.decay_step)),\n lr_clip / args.lr,\n )\n bn_lbmd = lambda it: max(\n args.bn_momentum\n * args.bnm_decay ** (int(it // args.decay_step)),\n bnm_clip,\n )\n\n # default value\n it = -1 # for the initialize value of `LambdaLR` and `BNMomentumScheduler`\n best_loss = 1e10\n start_epoch = 1\n\n # load status from checkpoint\n if args.checkpoint is not None:\n checkpoint_status = pt_utils.load_checkpoint(\n model, optimizer, filename=args.checkpoint.split(\".\")[0]\n )\n if checkpoint_status is not None:\n it, start_epoch, best_loss = checkpoint_status\n\n lr_scheduler = lr_sched.LambdaLR(optimizer, lr_lambda=lr_lbmd, last_epoch=it)\n bnm_scheduler = pt_utils.BNMomentumScheduler(\n model, bn_lambda=bn_lbmd, last_epoch=it\n )\n\n it = max(it, 0) # for the initialize value of `trainer.train`\n\n model_fn = model_fn_decorator(nn.CrossEntropyLoss())\n\n if args.visdom:\n viz = v.VisdomViz(port=args.visdom_port)\n else:\n viz = v.CmdLineViz()\n\n viz.text(pprint.pformat(vars(args)))\n\n if not osp.isdir(\"checkpoints\"):\n os.makedirs(\"checkpoints\")\n\n trainer = pt_utils.Trainer(\n model,\n model_fn,\n optimizer,\n checkpoint_name=\"checkpoints/rscnn_cls\",\n best_name=\"checkpoints/rscnn_cls_best\",\n lr_scheduler=lr_scheduler,\n bnm_scheduler=bnm_scheduler,\n viz=viz,\n )\n\n trainer.train(\n it, start_epoch, args.epochs, train_loader, test_loader, best_loss=best_loss\n )\n\n if start_epoch == args.epochs:\n _ = trainer.eval_epoch(test_loader)\n","sub_path":"ClassArch2/train/teacher_train.py","file_name":"teacher_train.py","file_ext":"py","file_size_in_byte":5218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"6394188","text":"import tcod as libtcod\nfrom map_objects.game_map import CGameMap\nfrom constants import Const as C\n\n\nclass CGVars:\n def __init__(self):\n self.timer = 0\n self.events = []\n self.screen_width = 72\n self.screen_height = 50\n\n self.map_width = 50\n self.map_height = 50\n\n self.inv_x = self.map_width // 2 - 13\n self.inv_y = self.map_height //2 - 13\n\n self.colors = {\n -4: libtcod.Color(255, 39, 39),\n -3: libtcod.Color(255, 85, 85),\n -2: libtcod.Color(255, 131, 131),\n -1: libtcod.Color(255, 180, 180),\n 0: libtcod.white,\n 1: libtcod.Color(208, 224, 255),\n 2: libtcod.Color(192, 213, 255),\n 3: libtcod.Color(154, 187, 255),\n 4: libtcod.Color(115, 161, 255),\n 5: libtcod.Color(95, 140, 255)\n }\n\n self.entities = []\n self.game_map = CGameMap(self.map_width, self.map_height)\n\n self.GAME_STATE = C.GS_PLAY\n\n self.redraw_map = True\n\n\ngv = CGVars()\n","sub_path":"source/global_vars.py","file_name":"global_vars.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"54559104","text":"#aggregationHelper.py:\ndef get_policy_for_claim_no(sno):\n pipeline = [\n {\n \"$match\": {\n \"sno\":sno\n }\n },\n {\n \"$lookup\": {\n \"from\" : \"policydata\",\n \"localField\" : \"policyNumber\",\n \"foreignField\" : \"policyNumber\",\n \"as\" : \"policyDataMatch\"\n }\n },\n {\n \"$project\": {\n \"policyDataMatchForSno\":{\"$arrayElemAt\": [ \"$policyDataMatch\", 0 ]},\n \"policyNumber\":\"$policyNumber\"\n }\n },\n {\n \"$project\": {\n \"customerEmail\":\"$policyDataMatchForSno.customerEmail\",\n \"policyNumber\":\"$policyNumber\"\n }\n }\n ]\n return pipeline\n\ndef get_weekly_premium():\n pipeline = [\n {\n \"$project\": {\n \"year\":{\"$year\":\"$datetime\"},\n \"month\":{\"$month\":\"$datetime\"},\n \"week\":{\"$week\":\"$datetime\"},\n \"policyType\":\"$policyType\",\n \"premium\":\"$premium\"\n }\n },\n {\n \"$group\": {\n \"_id\":{\n \"policyType\":\"$policyType\",\n \"year\":\"$year\",\n \"month\":\"$month\",\n \"week\":\"$week\",\n },\n \"premium\":{\"$sum\":\"$premium\"}\n }\n },\n {\n \"$sort\": {\n \"_id.policyType\":1,\n \"_id.year\":1,\n \"_id.month\":1,\n \"_id.week\":1\n }\n },\n {\n \"$group\": {\n \"_id\":{\"policyType\":\"$_id.policyType\"},\n \"premiumForDuration\":{\"$push\":{\"year\":\"$_id.year\",\"month\":\"$_id.month\",\"week\":\"$_id.week\",\"premium\":\"$premium\"}}\n }\n },\n {\n \"$project\": {\n \"policyType\":\"$_id.policyType\",\n \"premiumForDuration\":\"$premiumForDuration\"\n }\n },\n]\n return pipeline\n\ndef get_monthly_premium():\n pipeline = [\n {\n\t\t\t\"$project\": {\n\t\t\t \"year\":{\"$year\":\"$datetime\"},\n\t\t\t \"month\":{\"$month\":\"$datetime\"},\n\t\t\t \"policyType\":\"$policyType\",\n\t\t\t \"premium\":\"$premium\"\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"$group\": {\n\t\t\t \"_id\":{\n\t\t\t\"policyType\":\"$policyType\",\n\t\t\t\"year\":\"$year\",\n\t\t\t\"month\":\"$month\",\n\t\t\t },\n\t\t\t\"premium\":{\"$sum\":\"$premium\"}\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"$sort\": {\n\t\t\t\"_id.policyType\":1,\n\t\t\t\"_id.year\":1,\n\t\t\t\"_id.month\":1\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"$group\": {\n\t\t\t \"_id\":{\"policyType\":\"$_id.policyType\"},\n\t\t\t \"premiumForDuration\":{\"$push\":{\"year\":\"$_id.year\",\"month\":\"$_id.month\",\"premium\":\"$premium\"}}\n\t\t\t}\n\t\t},\n\t\t{\n\t\t\t\"$project\": {\n\t\t\t\"policyType\":\"$_id.policyType\",\n\t\t\t\"premiumForDuration\":\"$premiumForDuration\"\n\t\t\t}\n\t\t},\n\t] \n return pipeline ","sub_path":"python-flask-api/aggregationHelper.py","file_name":"aggregationHelper.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"634442588","text":"def testBig(subs,k):\n permStr(subs,k)\n return words[0]\n\nwords = []\ndef permStr(subs,k):\n if(k>0):\n for i,ch in enumerate(subs):\n new_subs = subs[0:i] + subs[i+1:]\n #print(new_subs)\n permStr(new_subs,k-1)\n else:\n #print(subs)\n if (len(words) == 0):\n words.append(subs)\n else:\n temp = []\n temp.append(words[0])\n temp.append(subs)\n temp.sort()\n words[0] = temp[1]\n del temp\n\n#x = 'zyxedcba'\nx = 'rim'\nz = testBig(x,2)\nprint(\"ip\"+\" \"+x)\nprint('result')\nprint(z)\n","sub_path":"dict_order_str.py","file_name":"dict_order_str.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379188367","text":"import json\nimport socket\nimport threading\n\nimport conf\nfrom model.logger import logging, setup_logging\n\nsetup_logging()\n\"\"\"\n终端的 socket 接收模块\n\"\"\"\n\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nip = conf.params['platform_ip']\nport = conf.params['platform_port']\nterminal_id = None\n\n\ndef connect(recv_queue, send_queue, terminal_type):\n # 进行 socket 连接\n socket.connect((ip, port))\n socket.send(json.dumps({'type': terminal_type}).encode())\n try:\n buffer = socket.recv(1024).decode()\n obj = json.loads(buffer)\n if obj['ok']:\n # 绑定终端 id\n global terminal_id\n terminal_id = obj['id']\n thread = threading.Thread(\n target=receive_message_thread, args=(recv_queue, ))\n thread.start()\n thread = threading.Thread(\n target=send_message_thread, args=(send_queue, ))\n thread.start()\n else:\n logging.warning('[%s] 无法连接服务器' % terminal_id)\n print('[%s] 无法连接服务器' % terminal_id)\n except Exception:\n logging.warning('[%s] 无法从服务器获取数据' % terminal_id)\n print('[%s] 无法从服务器获取数据' % terminal_id)\n\n\ndef receive_message_thread(recv_queue):\n print(\"[%s] 接收线程启动\" % terminal_id)\n while True:\n try:\n buffer = socket.recv(1024).decode()\n obj = json.loads(buffer)\n recv_queue.put(obj)\n logging.info('[%s] receive message %s' % (terminal_id, obj))\n print('[%s] receive message %s' % (terminal_id, obj))\n except Exception:\n logging.warning('[%s] 无法从服务器获取数据' % terminal_id)\n print('[%s] 无法从服务器获取数据' % terminal_id)\n\n\ndef send_message_thread(send_queue):\n print(\"[%s] 发送线程启动\" % terminal_id)\n while True:\n try:\n message = send_queue.get()\n logging.info('[%s] send message to %s message %s' %\n (terminal_id, message['target'], message))\n print('[%s] send message to %s message %s' %\n (terminal_id, message['target'], message))\n socket.send(json.dumps(message).encode())\n send_queue.task_done()\n except Exception:\n logging.warning('[%s] 无法向服务器发送数据' % terminal_id)\n print('[%s] 无法向服务器发送数据' % terminal_id)\n","sub_path":"src/network/terminal/socket_client.py","file_name":"socket_client.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615960464","text":"from django.conf.urls import patterns, include, url\nimport settings\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n #url(r'^$', 'css_django.views.home', name='home'),\n #url(r'^css_django/', include('css_django.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\n\nurlpatterns += patterns('',\n ('^pages/', include('django.contrib.flatpages.urls')),\n\t(r'css/(?P.*)$', 'django.views.static.serve',\n\t{'document_root': settings.STATIC_ROOT + 'templates/css'}),\n\t(r'images/(?P.*)$', 'django.views.static.serve', \n\t{'document_root': settings.STATIC_ROOT + 'templates/images'}),\n\t(r'js/(?P.*)$', 'django.views.static.serve',\n\t{'document_root': settings.STATIC_ROOT + 'templates/js'}),\n)\n","sub_path":"css_django/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393543077","text":"#from optparse import OptionParser\nimport os\n\nfrom argparse import ArgumentParser\n\ndef parse_input(argv):\n\tif len(argv)<2:\n\t\targv = ['-h']\n\n\tparser = ArgumentParser(description='json validator to check given schema')\n\tparser.add_argument('-i', '--input', dest='input', default=\"\", help='input file or dir')\n\tparser.add_argument('-o', '--out_dir', dest='out_dir', default=\"\", help='')\n\tparser.add_argument('-s', '--schema', dest='json_schema', \n\t\tdefault=\"\", help='expected schema')\n\n\tadv_option = parser.add_argument_group(title='advanced options',\n\t\tdescription='should not be changed by users')\n\tadv_option.add_argument('--out_file_name', dest='out_file_name', default=\"json_report.txt\", help='')\n\n\targs = parser.parse_args(argv)\n\targs.input = args.input.split(\",\")\n\tif not type(args.input) is list:\n\t\targs.input = [args.input]\n\n\tif not args.out_dir:\n\t\targs.out_dir = os.path.dirname(args.input[0])\n\n\treturn args","sub_path":"src/json_options.py","file_name":"json_options.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"356649531","text":"import pandas as pd\nimport numpy as np\nfrom scipy.sparse import csr_matrix\nimport time\n\ndef read_csv(csv):\n x = pd.read_csv(csv, sep = ',', encoding = 'utf-8')\n y = x.iloc[:,-1]\n x = x.iloc[:,0:3]\n X = np.array(x)\n Y = np.array(y)\n return X, Y\n\ndef read_normal_csv(csv):\n x = pd.read_csv(csv, sep = '\\:\\:', encoding = 'latin-1', engine='python')\n x = x.iloc[:,:]\n X = np.array(x)\n return X\n\ndef main():\n name_1 = input('ingrese el nombre de ratings: ')\n temp = time.time()\n X = read_normal_csv(name_1)\n read_time = time.time() - temp\n print('Tiempo de lectura: ', read_time)\n Xt = np.transpose(X)\n Sparsa_ = csr_matrix((Xt[2], (Xt[0].astype(int), Xt[1].astype(int))), dtype=np.int8).toarray()\n name_2 = input('ingrese el nombre de movies: ')\n movies = read_normal_csv(name_2)\n answer = []\n movie_pos = 0\n for i in range(len(Sparsa_)):\n if movie_pos < len(movies) and movies[movie_pos][0] == i:\n answer.append([movies[movie_pos][1], movies[movie_pos][2], Sparsa_[i]])\n movie_pos += 1\n else:\n answer.append([-1, -1, Sparsa_[i]])\n print(answer)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ml-10M100K/sparsa.py","file_name":"sparsa.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"445135904","text":"from django.core.management.base import BaseCommand\nfrom app.models import Data\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"input\")\n\n def handle(self, *args, **options):\n input = options[\"input\"]\n f_obj = open(input)\n count = 0\n recs = []\n for line in f_obj:\n count += 1\n line = line.strip()\n word, occur = line.split(\"\\t\")\n recs.append(Data(word=word, count=occur))\n if count == 500:\n Data.objects.bulk_create(recs)\n recs = []\n count = 0\n\n f_obj.close()\n","sub_path":"web/app/management/commands/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476773510","text":"from random import choice\nfrom time import time\nfrom hashlib import sha512\n\n\nchars = [chr(i) for i in range(97, 97 + 26)] + [str(i) for i in range(10)]\n\ndef randstr(len = 6):\n res = str()\n for _ in range(len):\n res += choice(chars)\n return res\n\ndef make_query(args, methodName, open, secret):\n args['apiKey'] = open\n args['time'] = str(round(time()))\n\n args = sorted(args.items())\n\n rand = randstr()\n apiSig = rand + '/' + methodName + '?'\n for i in range(len(args)):\n apiSig += args[i][0]\n apiSig += '='\n apiSig += args[i][1]\n if i != len(args) - 1:\n apiSig += '&'\n\n apiSig += '#'\n apiSig += secret\n\n hash = sha512(apiSig.encode('utf-8'))\n args.append(('apiSig', rand + hash.hexdigest()))\n\n url = 'https://codeforces.com/api/{0}?'.format(methodName)\n\n for i in range(len(args)):\n url += args[i][0]\n url += '='\n url += args[i][1]\n if i != len(args) - 1:\n url += '&'\n\n return url\n","sub_path":"src/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"343107513","text":"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom functools import partial\nimport argparse\nimport os\nimport random\nimport time\n\nimport numpy as np\nimport paddle\nimport paddle.nn.functional as F\nimport paddlenlp as ppnlp\nfrom paddlenlp.data import Stack, Tuple, Pad\nfrom paddlenlp.datasets import load_dataset\nfrom paddlenlp.transformers import LinearDecayWithWarmup\n\nfrom utils import convert_example\n\n# yapf: disable\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--save_dir\", default='./checkpoint', type=str, help=\"The output directory where the model checkpoints will be written.\")\nparser.add_argument(\"--max_seq_length\", default=128, type=int, help=\"The maximum total input sequence length after tokenization. \"\n \"Sequences longer than this will be truncated, sequences shorter will be padded.\")\nparser.add_argument(\"--batch_size\", default=32, type=int, help=\"Batch size per GPU/CPU for training.\")\nparser.add_argument(\"--learning_rate\", default=5e-5, type=float, help=\"The initial learning rate for Adam.\")\nparser.add_argument(\"--weight_decay\", default=0.0, type=float, help=\"Weight decay if we apply some.\")\nparser.add_argument(\"--epochs\", default=3, type=int, help=\"Total number of training epochs to perform.\")\nparser.add_argument(\"--warmup_proportion\", default=0.0, type=float, help=\"Linear warmup proption over the training process.\")\nparser.add_argument(\"--init_from_ckpt\", type=str, default=None, help=\"The path of checkpoint to be loaded.\")\nparser.add_argument(\"--seed\", type=int, default=1000, help=\"random seed for initialization\")\nparser.add_argument('--device', choices=['cpu', 'gpu', 'xpu'], default=\"gpu\", help=\"Select which device to train model, defaults to gpu.\")\nargs = parser.parse_args()\n# yapf: enable\n\n\ndef set_seed(seed):\n \"\"\"sets random seed\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n paddle.seed(seed)\n\n\n@paddle.no_grad()\ndef evaluate(model, criterion, metric, data_loader):\n \"\"\"\n Given a dataset, it evals model and computes the metric.\n\n Args:\n model(obj:`paddle.nn.Layer`): A model to classify texts.\n data_loader(obj:`paddle.io.DataLoader`): The dataset loader which generates batches.\n criterion(obj:`paddle.nn.Layer`): It can compute the loss.\n metric(obj:`paddle.metric.Metric`): The evaluation metric.\n \"\"\"\n model.eval()\n metric.reset()\n losses = []\n for batch in data_loader:\n input_ids, token_type_ids, labels = batch\n logits = model(input_ids, token_type_ids)\n loss = criterion(logits, labels)\n losses.append(loss.numpy())\n correct = metric.compute(logits, labels)\n metric.update(correct)\n accu = metric.accumulate()\n print(\"eval loss: %.5f, accu: %.5f\" % (np.mean(losses), accu))\n model.train()\n metric.reset()\n\n\ndef create_dataloader(dataset,\n mode='train',\n batch_size=1,\n batchify_fn=None,\n trans_fn=None):\n if trans_fn:\n dataset = dataset.map(trans_fn)\n\n shuffle = True if mode == 'train' else False\n if mode == 'train':\n batch_sampler = paddle.io.DistributedBatchSampler(\n dataset, batch_size=batch_size, shuffle=shuffle)\n else:\n batch_sampler = paddle.io.BatchSampler(\n dataset, batch_size=batch_size, shuffle=shuffle)\n\n return paddle.io.DataLoader(\n dataset=dataset,\n batch_sampler=batch_sampler,\n collate_fn=batchify_fn,\n return_list=True)\n\n\ndef do_train():\n paddle.set_device(args.device)\n rank = paddle.distributed.get_rank()\n if paddle.distributed.get_world_size() > 1:\n paddle.distributed.init_parallel_env()\n\n set_seed(args.seed)\n\n train_ds, dev_ds = load_dataset(\"chnsenticorp\", splits=[\"train\", \"dev\"])\n\n # If you wanna use bert/roberta/electra pretrained model,\n # model = ppnlp.transformers.BertForSequenceClassification.from_pretrained('bert-base-chinese', num_class=2)\n # model = ppnlp.transformers.RobertaForSequenceClassification.from_pretrained('roberta-wwm-ext', num_class=2)\n # model = ppnlp.transformers.ElectraForSequenceClassification.from_pretrained('chinese-electra-small', num_classes=2)\n model = ppnlp.transformers.ErnieForSequenceClassification.from_pretrained(\n 'ernie-tiny', num_classes=len(train_ds.label_list))\n\n # If you wanna use bert/roberta/electra pretrained model,\n # tokenizer = ppnlp.transformers.BertTokenizer.from_pretrained('bert-base-chinese')\n # tokenizer = ppnlp.transformers.RobertaTokenizer.from_pretrained('roberta-wwm-ext')\n # tokenizer = ppnlp.transformers.ElectraTokenizer.from_pretrained('chinese-electra-small', num_classes=2)\n # ErnieTinyTokenizer is special for ernie-tiny pretained model.\n tokenizer = ppnlp.transformers.ErnieTinyTokenizer.from_pretrained(\n 'ernie-tiny')\n\n trans_func = partial(\n convert_example,\n tokenizer=tokenizer,\n max_seq_length=args.max_seq_length)\n batchify_fn = lambda samples, fn=Tuple(\n Pad(axis=0, pad_val=tokenizer.pad_token_id), # input\n Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # segment\n Stack(dtype=\"int64\") # label\n ): [data for data in fn(samples)]\n train_data_loader = create_dataloader(\n train_ds,\n mode='train',\n batch_size=args.batch_size,\n batchify_fn=batchify_fn,\n trans_fn=trans_func)\n dev_data_loader = create_dataloader(\n dev_ds,\n mode='dev',\n batch_size=args.batch_size,\n batchify_fn=batchify_fn,\n trans_fn=trans_func)\n\n if args.init_from_ckpt and os.path.isfile(args.init_from_ckpt):\n state_dict = paddle.load(args.init_from_ckpt)\n model.set_dict(state_dict)\n model = paddle.DataParallel(model)\n\n num_training_steps = len(train_data_loader) * args.epochs\n\n lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,\n args.warmup_proportion)\n\n # Generate parameter names needed to perform weight decay.\n # All bias and LayerNorm parameters are excluded.\n decay_params = [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ]\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in decay_params)\n\n criterion = paddle.nn.loss.CrossEntropyLoss()\n metric = paddle.metric.Accuracy()\n\n global_step = 0\n tic_train = time.time()\n for epoch in range(1, args.epochs + 1):\n for step, batch in enumerate(train_data_loader, start=1):\n input_ids, token_type_ids, labels = batch\n logits = model(input_ids, token_type_ids)\n loss = criterion(logits, labels)\n probs = F.softmax(logits, axis=1)\n correct = metric.compute(probs, labels)\n metric.update(correct)\n acc = metric.accumulate()\n\n global_step += 1\n if global_step % 10 == 0 and rank == 0:\n print(\n \"global step %d, epoch: %d, batch: %d, loss: %.5f, accu: %.5f, speed: %.2f step/s\"\n % (global_step, epoch, step, loss, acc,\n 10 / (time.time() - tic_train)))\n tic_train = time.time()\n loss.backward()\n optimizer.step()\n lr_scheduler.step()\n optimizer.clear_grad()\n if global_step % 100 == 0 and rank == 0:\n save_dir = os.path.join(args.save_dir, \"model_%d\" % global_step)\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n evaluate(model, criterion, metric, dev_data_loader)\n model._layers.save_pretrained(save_dir)\n tokenizer.save_pretrained(save_dir)\n\n\nif __name__ == \"__main__\":\n do_train()\n","sub_path":"Paddle_ChineseBert/PaddleNLP/examples/text_classification/pretrained_models/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461021377","text":"import sys\n_module = sys.modules[__name__]\ndel sys\nRandAugment = _module\naugmentations = _module\ncommon = _module\ndata = _module\nimagenet = _module\nlr_scheduler = _module\nmetrics = _module\nnetworks = _module\npyramidnet = _module\nresnet = _module\nshakedrop = _module\nshakeshake = _module\nshake_resnet = _module\nshake_resnext = _module\nshakeshake = _module\nwideresnet = _module\nsmooth_ce = _module\ntrain = _module\nsetup = _module\n\nfrom _paritybench_helpers import _mock_config, patch_functional\nfrom unittest.mock import mock_open, MagicMock\nfrom torch.autograd import Function\nfrom torch.nn import Module\nimport abc, collections, copy, enum, functools, inspect, itertools, logging, math, matplotlib, numbers, numpy, pandas, queue, random, re, scipy, sklearn, string, tensorflow, time, torch, torchaudio, torchtext, torchvision, types, typing, uuid, warnings\nimport numpy as np\nfrom torch import Tensor\npatch_functional()\nopen = mock_open()\nyaml = logging = sys = argparse = MagicMock()\nArgumentParser = argparse.ArgumentParser\n_global_config = args = argv = cfg = config = params = _mock_config()\nargparse.ArgumentParser.return_value.parse_args.return_value = _global_config\nyaml.load.return_value = _global_config\nsys.argv = _global_config\n__version__ = '1.0.0'\nxrange = range\nwraps = functools.wraps\n\n\nimport random\n\n\nimport numpy as np\n\n\nimport torch\n\n\nimport logging\n\n\nimport torchvision\n\n\nfrom torch.utils.data import SubsetRandomSampler\n\n\nfrom torch.utils.data import Sampler\n\n\nfrom torch.utils.data.dataset import ConcatDataset\n\n\nfrom torchvision.transforms import transforms\n\n\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\nfrom torchvision.datasets.utils import check_integrity\n\n\nfrom torchvision.datasets.utils import download_url\n\n\nimport copy\n\n\nfrom collections import defaultdict\n\n\nfrom torch import nn\n\n\nfrom torch.nn import DataParallel\n\n\nimport torch.backends.cudnn as cudnn\n\n\nimport torch.nn as nn\n\n\nimport math\n\n\nimport torch.nn.functional as F\n\n\nfrom torch.autograd import Variable\n\n\nimport torch.nn.init as init\n\n\nfrom torch.nn.modules.module import Module\n\n\nimport itertools\n\n\nfrom collections import OrderedDict\n\n\nfrom torch import optim\n\n\nfrom torch.nn.parallel.data_parallel import DataParallel\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * Bottleneck.expansion, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * Bottleneck.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n return out\n\n\nclass PyramidNet(nn.Module):\n\n def __init__(self, dataset, depth, alpha, num_classes, bottleneck=True):\n super(PyramidNet, self).__init__()\n self.dataset = dataset\n if self.dataset.startswith('cifar'):\n self.inplanes = 16\n if bottleneck:\n n = int((depth - 2) / 9)\n block = Bottleneck\n else:\n n = int((depth - 2) / 6)\n block = BasicBlock\n self.addrate = alpha / (3 * n * 1.0)\n self.ps_shakedrop = [(1.0 - (1.0 - 0.5 / (3 * n) * (i + 1))) for i in range(3 * n)]\n self.input_featuremap_dim = self.inplanes\n self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)\n self.featuremap_dim = self.input_featuremap_dim\n self.layer1 = self.pyramidal_make_layer(block, n)\n self.layer2 = self.pyramidal_make_layer(block, n, stride=2)\n self.layer3 = self.pyramidal_make_layer(block, n, stride=2)\n self.final_featuremap_dim = self.input_featuremap_dim\n self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)\n self.relu_final = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(8)\n self.fc = nn.Linear(self.final_featuremap_dim, num_classes)\n elif dataset == 'imagenet':\n blocks = {(18): BasicBlock, (34): BasicBlock, (50): Bottleneck, (101): Bottleneck, (152): Bottleneck, (200): Bottleneck}\n layers = {(18): [2, 2, 2, 2], (34): [3, 4, 6, 3], (50): [3, 4, 6, 3], (101): [3, 4, 23, 3], (152): [3, 8, 36, 3], (200): [3, 24, 36, 3]}\n if layers.get(depth) is None:\n if bottleneck == True:\n blocks[depth] = Bottleneck\n temp_cfg = int((depth - 2) / 12)\n else:\n blocks[depth] = BasicBlock\n temp_cfg = int((depth - 2) / 8)\n layers[depth] = [temp_cfg, temp_cfg, temp_cfg, temp_cfg]\n None\n self.inplanes = 64\n self.addrate = alpha / (sum(layers[depth]) * 1.0)\n self.input_featuremap_dim = self.inplanes\n self.conv1 = nn.Conv2d(3, self.input_featuremap_dim, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(self.input_featuremap_dim)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.featuremap_dim = self.input_featuremap_dim\n self.layer1 = self.pyramidal_make_layer(blocks[depth], layers[depth][0])\n self.layer2 = self.pyramidal_make_layer(blocks[depth], layers[depth][1], stride=2)\n self.layer3 = self.pyramidal_make_layer(blocks[depth], layers[depth][2], stride=2)\n self.layer4 = self.pyramidal_make_layer(blocks[depth], layers[depth][3], stride=2)\n self.final_featuremap_dim = self.input_featuremap_dim\n self.bn_final = nn.BatchNorm2d(self.final_featuremap_dim)\n self.relu_final = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(7)\n self.fc = nn.Linear(self.final_featuremap_dim, num_classes)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n assert len(self.ps_shakedrop) == 0, self.ps_shakedrop\n\n def pyramidal_make_layer(self, block, block_depth, stride=1):\n downsample = None\n if stride != 1:\n downsample = nn.AvgPool2d((2, 2), stride=(2, 2), ceil_mode=True)\n layers = []\n self.featuremap_dim = self.featuremap_dim + self.addrate\n layers.append(block(self.input_featuremap_dim, int(round(self.featuremap_dim)), stride, downsample, p_shakedrop=self.ps_shakedrop.pop(0)))\n for i in range(1, block_depth):\n temp_featuremap_dim = self.featuremap_dim + self.addrate\n layers.append(block(int(round(self.featuremap_dim)) * block.outchannel_ratio, int(round(temp_featuremap_dim)), 1, p_shakedrop=self.ps_shakedrop.pop(0)))\n self.featuremap_dim = temp_featuremap_dim\n self.input_featuremap_dim = int(round(self.featuremap_dim)) * block.outchannel_ratio\n return nn.Sequential(*layers)\n\n def forward(self, x):\n if self.dataset == 'cifar10' or self.dataset == 'cifar100':\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.bn_final(x)\n x = self.relu_final(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n elif self.dataset == 'imagenet':\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.bn_final(x)\n x = self.relu_final(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n\nclass ResNet(nn.Module):\n\n def __init__(self, dataset, depth, num_classes, bottleneck=False):\n super(ResNet, self).__init__()\n self.dataset = dataset\n if self.dataset.startswith('cifar'):\n self.inplanes = 16\n None\n if bottleneck == True:\n n = int((depth - 2) / 9)\n block = Bottleneck\n else:\n n = int((depth - 2) / 6)\n block = BasicBlock\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, n)\n self.layer2 = self._make_layer(block, 32, n, stride=2)\n self.layer3 = self._make_layer(block, 64, n, stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n elif dataset == 'imagenet':\n blocks = {(18): BasicBlock, (34): BasicBlock, (50): Bottleneck, (101): Bottleneck, (152): Bottleneck, (200): Bottleneck}\n layers = {(18): [2, 2, 2, 2], (34): [3, 4, 6, 3], (50): [3, 4, 6, 3], (101): [3, 4, 23, 3], (152): [3, 8, 36, 3], (200): [3, 24, 36, 3]}\n assert layers[depth], 'invalid detph for ResNet (depth should be one of 18, 34, 50, 101, 152, and 200)'\n self.inplanes = 64\n self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(blocks[depth], 64, layers[depth][0])\n self.layer2 = self._make_layer(blocks[depth], 128, layers[depth][1], stride=2)\n self.layer3 = self._make_layer(blocks[depth], 256, layers[depth][2], stride=2)\n self.layer4 = self._make_layer(blocks[depth], 512, layers[depth][3], stride=2)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * blocks[depth].expansion, num_classes)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion))\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n if self.dataset == 'cifar10' or self.dataset == 'cifar100':\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n elif self.dataset == 'imagenet':\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n return x\n\n\nclass ShakeDropFunction(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, x, training=True, p_drop=0.5, alpha_range=[-1, 1]):\n if training:\n gate = torch.FloatTensor([0]).bernoulli_(1 - p_drop)\n ctx.save_for_backward(gate)\n if gate.item() == 0:\n alpha = torch.FloatTensor(x.size(0)).uniform_(*alpha_range)\n alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x)\n return alpha * x\n else:\n return x\n else:\n return (1 - p_drop) * x\n\n @staticmethod\n def backward(ctx, grad_output):\n gate = ctx.saved_tensors[0]\n if gate.item() == 0:\n beta = torch.FloatTensor(grad_output.size(0)).uniform_(0, 1)\n beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)\n beta = Variable(beta)\n return beta * grad_output, None, None, None\n else:\n return grad_output, None, None, None\n\n\nclass ShakeDrop(nn.Module):\n\n def __init__(self, p_drop=0.5, alpha_range=[-1, 1]):\n super(ShakeDrop, self).__init__()\n self.p_drop = p_drop\n self.alpha_range = alpha_range\n\n def forward(self, x):\n return ShakeDropFunction.apply(x, self.training, self.p_drop, self.alpha_range)\n\n\nclass ShakeShake(torch.autograd.Function):\n\n @staticmethod\n def forward(ctx, x1, x2, training=True):\n if training:\n alpha = torch.FloatTensor(x1.size(0)).uniform_()\n alpha = alpha.view(alpha.size(0), 1, 1, 1).expand_as(x1)\n else:\n alpha = 0.5\n return alpha * x1 + (1 - alpha) * x2\n\n @staticmethod\n def backward(ctx, grad_output):\n beta = torch.FloatTensor(grad_output.size(0)).uniform_()\n beta = beta.view(beta.size(0), 1, 1, 1).expand_as(grad_output)\n beta = Variable(beta)\n return beta * grad_output, (1 - beta) * grad_output, None\n\n\nclass Shortcut(nn.Module):\n\n def __init__(self, in_ch, out_ch, stride):\n super(Shortcut, self).__init__()\n self.stride = stride\n self.conv1 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)\n self.conv2 = nn.Conv2d(in_ch, out_ch // 2, 1, stride=1, padding=0, bias=False)\n self.bn = nn.BatchNorm2d(out_ch)\n\n def forward(self, x):\n h = F.relu(x)\n h1 = F.avg_pool2d(h, 1, self.stride)\n h1 = self.conv1(h1)\n h2 = F.avg_pool2d(F.pad(h, (-1, 1, -1, 1)), 1, self.stride)\n h2 = self.conv2(h2)\n h = torch.cat((h1, h2), 1)\n return self.bn(h)\n\n\nclass ShakeBlock(nn.Module):\n\n def __init__(self, in_ch, out_ch, stride=1):\n super(ShakeBlock, self).__init__()\n self.equal_io = in_ch == out_ch\n self.shortcut = self.equal_io and None or Shortcut(in_ch, out_ch, stride=stride)\n self.branch1 = self._make_branch(in_ch, out_ch, stride)\n self.branch2 = self._make_branch(in_ch, out_ch, stride)\n\n def forward(self, x):\n h1 = self.branch1(x)\n h2 = self.branch2(x)\n h = ShakeShake.apply(h1, h2, self.training)\n h0 = x if self.equal_io else self.shortcut(x)\n return h + h0\n\n def _make_branch(self, in_ch, out_ch, stride=1):\n return nn.Sequential(nn.ReLU(inplace=False), nn.Conv2d(in_ch, out_ch, 3, padding=1, stride=stride, bias=False), nn.BatchNorm2d(out_ch), nn.ReLU(inplace=False), nn.Conv2d(out_ch, out_ch, 3, padding=1, stride=1, bias=False), nn.BatchNorm2d(out_ch))\n\n\nclass ShakeResNet(nn.Module):\n\n def __init__(self, depth, w_base, label):\n super(ShakeResNet, self).__init__()\n n_units = (depth - 2) / 6\n in_chs = [16, w_base, w_base * 2, w_base * 4]\n self.in_chs = in_chs\n self.c_in = nn.Conv2d(3, in_chs[0], 3, padding=1)\n self.layer1 = self._make_layer(n_units, in_chs[0], in_chs[1])\n self.layer2 = self._make_layer(n_units, in_chs[1], in_chs[2], 2)\n self.layer3 = self._make_layer(n_units, in_chs[2], in_chs[3], 2)\n self.fc_out = nn.Linear(in_chs[3], label)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, x):\n h = self.c_in(x)\n h = self.layer1(h)\n h = self.layer2(h)\n h = self.layer3(h)\n h = F.relu(h)\n h = F.avg_pool2d(h, 8)\n h = h.view(-1, self.in_chs[3])\n h = self.fc_out(h)\n return h\n\n def _make_layer(self, n_units, in_ch, out_ch, stride=1):\n layers = []\n for i in range(int(n_units)):\n layers.append(ShakeBlock(in_ch, out_ch, stride=stride))\n in_ch, stride = out_ch, 1\n return nn.Sequential(*layers)\n\n\nclass ShakeBottleNeck(nn.Module):\n\n def __init__(self, in_ch, mid_ch, out_ch, cardinary, stride=1):\n super(ShakeBottleNeck, self).__init__()\n self.equal_io = in_ch == out_ch\n self.shortcut = None if self.equal_io else Shortcut(in_ch, out_ch, stride=stride)\n self.branch1 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)\n self.branch2 = self._make_branch(in_ch, mid_ch, out_ch, cardinary, stride)\n\n def forward(self, x):\n h1 = self.branch1(x)\n h2 = self.branch2(x)\n h = ShakeShake.apply(h1, h2, self.training)\n h0 = x if self.equal_io else self.shortcut(x)\n return h + h0\n\n def _make_branch(self, in_ch, mid_ch, out_ch, cardinary, stride=1):\n return nn.Sequential(nn.Conv2d(in_ch, mid_ch, 1, padding=0, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.Conv2d(mid_ch, mid_ch, 3, padding=1, stride=stride, groups=cardinary, bias=False), nn.BatchNorm2d(mid_ch), nn.ReLU(inplace=False), nn.Conv2d(mid_ch, out_ch, 1, padding=0, bias=False), nn.BatchNorm2d(out_ch))\n\n\nclass ShakeResNeXt(nn.Module):\n\n def __init__(self, depth, w_base, cardinary, label):\n super(ShakeResNeXt, self).__init__()\n n_units = (depth - 2) // 9\n n_chs = [64, 128, 256, 1024]\n self.n_chs = n_chs\n self.in_ch = n_chs[0]\n self.c_in = nn.Conv2d(3, n_chs[0], 3, padding=1)\n self.layer1 = self._make_layer(n_units, n_chs[0], w_base, cardinary)\n self.layer2 = self._make_layer(n_units, n_chs[1], w_base, cardinary, 2)\n self.layer3 = self._make_layer(n_units, n_chs[2], w_base, cardinary, 2)\n self.fc_out = nn.Linear(n_chs[3], label)\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2.0 / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.bias.data.zero_()\n\n def forward(self, x):\n h = self.c_in(x)\n h = self.layer1(h)\n h = self.layer2(h)\n h = self.layer3(h)\n h = F.relu(h)\n h = F.avg_pool2d(h, 8)\n h = h.view(-1, self.n_chs[3])\n h = self.fc_out(h)\n return h\n\n def _make_layer(self, n_units, n_ch, w_base, cardinary, stride=1):\n layers = []\n mid_ch, out_ch = n_ch * (w_base // 64) * cardinary, n_ch * 4\n for i in range(n_units):\n layers.append(ShakeBottleNeck(self.in_ch, mid_ch, out_ch, cardinary, stride=stride))\n self.in_ch, stride = out_ch, 1\n return nn.Sequential(*layers)\n\n\n_bn_momentum = 0.1\n\n\nclass WideBasic(nn.Module):\n\n def __init__(self, in_planes, planes, dropout_rate, stride=1):\n super(WideBasic, self).__init__()\n self.bn1 = nn.BatchNorm2d(in_planes, momentum=_bn_momentum)\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)\n self.dropout = nn.Dropout(p=dropout_rate)\n self.bn2 = nn.BatchNorm2d(planes, momentum=_bn_momentum)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != planes:\n self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True))\n\n def forward(self, x):\n out = self.dropout(self.conv1(F.relu(self.bn1(x))))\n out = self.conv2(F.relu(self.bn2(out)))\n out += self.shortcut(x)\n return out\n\n\nclass WideResNet(nn.Module):\n\n def __init__(self, depth, widen_factor, dropout_rate, num_classes):\n super(WideResNet, self).__init__()\n self.in_planes = 16\n assert (depth - 4) % 6 == 0, 'Wide-resnet depth should be 6n+4'\n n = int((depth - 4) / 6)\n k = widen_factor\n nStages = [16, 16 * k, 32 * k, 64 * k]\n self.conv1 = conv3x3(3, nStages[0])\n self.layer1 = self._wide_layer(WideBasic, nStages[1], n, dropout_rate, stride=1)\n self.layer2 = self._wide_layer(WideBasic, nStages[2], n, dropout_rate, stride=2)\n self.layer3 = self._wide_layer(WideBasic, nStages[3], n, dropout_rate, stride=2)\n self.bn1 = nn.BatchNorm2d(nStages[3], momentum=_bn_momentum)\n self.linear = nn.Linear(nStages[3], num_classes)\n\n def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):\n strides = [stride] + [1] * (num_blocks - 1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, dropout_rate, stride))\n self.in_planes = planes\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = F.relu(self.bn1(out))\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n\ndef cross_entropy(input, target, size_average=True):\n \"\"\" Cross entropy that accepts soft targets\n Args:\n pred: predictions for neural network\n targets: targets, can be soft\n size_average: if false, sum is returned instead of mean\n Examples::\n input = torch.FloatTensor([[1.1, 2.8, 1.3], [1.1, 2.1, 4.8]])\n input = torch.autograd.Variable(out, requires_grad=True)\n target = torch.FloatTensor([[0.05, 0.9, 0.05], [0.05, 0.05, 0.9]])\n target = torch.autograd.Variable(y1)\n loss = cross_entropy(input, target)\n loss.backward()\n \"\"\"\n logsoftmax = torch.nn.LogSoftmax(dim=1)\n if size_average:\n return torch.mean(torch.sum(-target * logsoftmax(input), dim=1))\n else:\n return torch.sum(torch.sum(-target * logsoftmax(input), dim=1))\n\n\nclass SmoothCrossEntropyLoss(Module):\n\n def __init__(self, label_smoothing=0.0, size_average=True):\n super().__init__()\n self.label_smoothing = label_smoothing\n self.size_average = size_average\n\n def forward(self, input, target):\n if len(target.size()) == 1:\n target = torch.nn.functional.one_hot(target, num_classes=input.size(-1))\n target = target.float()\n if self.label_smoothing > 0.0:\n s_by_c = self.label_smoothing / len(input[0])\n smooth = torch.zeros_like(target)\n smooth = smooth + s_by_c\n target = target * (1.0 - s_by_c) + smooth\n return cross_entropy(input, target, self.size_average)\n\n\nimport torch\nfrom torch.nn import MSELoss, ReLU\nfrom _paritybench_helpers import _mock_config, _mock_layer, _paritybench_base, _fails_compile\n\n\nTESTCASES = [\n # (nn.Module, init_args, forward_args, jit_compiles)\n (BasicBlock,\n lambda: ([], {'inplanes': 4, 'planes': 4}),\n lambda: ([torch.rand([4, 4, 4, 4])], {}),\n True),\n (ShakeBlock,\n lambda: ([], {'in_ch': 4, 'out_ch': 4}),\n lambda: ([torch.rand([4, 4, 4, 4])], {}),\n False),\n (ShakeBottleNeck,\n lambda: ([], {'in_ch': 4, 'mid_ch': 4, 'out_ch': 4, 'cardinary': 4}),\n lambda: ([torch.rand([4, 4, 4, 4])], {}),\n False),\n (ShakeDrop,\n lambda: ([], {}),\n lambda: ([torch.rand([4, 4, 4, 4])], {}),\n False),\n (ShakeResNeXt,\n lambda: ([], {'depth': 1, 'w_base': 4, 'cardinary': 4, 'label': 4}),\n lambda: ([torch.rand([4, 3, 64, 64])], {}),\n True),\n (ShakeResNet,\n lambda: ([], {'depth': 1, 'w_base': 4, 'label': 4}),\n lambda: ([torch.rand([4, 3, 64, 64])], {}),\n True),\n (Shortcut,\n lambda: ([], {'in_ch': 4, 'out_ch': 4, 'stride': 1}),\n lambda: ([torch.rand([4, 4, 4, 4])], {}),\n True),\n (SmoothCrossEntropyLoss,\n lambda: ([], {}),\n lambda: ([torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])], {}),\n False),\n (WideBasic,\n lambda: ([], {'in_planes': 4, 'planes': 4, 'dropout_rate': 0.5}),\n lambda: ([torch.rand([4, 4, 4, 4])], {}),\n True),\n]\n\nclass Test_ildoonet_pytorch_randaugment(_paritybench_base):\n def test_000(self):\n self._check(*TESTCASES[0])\n\n def test_001(self):\n self._check(*TESTCASES[1])\n\n def test_002(self):\n self._check(*TESTCASES[2])\n\n def test_003(self):\n self._check(*TESTCASES[3])\n\n def test_004(self):\n self._check(*TESTCASES[4])\n\n def test_005(self):\n self._check(*TESTCASES[5])\n\n def test_006(self):\n self._check(*TESTCASES[6])\n\n def test_007(self):\n self._check(*TESTCASES[7])\n\n def test_008(self):\n self._check(*TESTCASES[8])\n\n","sub_path":"generated/test_ildoonet_pytorch_randaugment.py","file_name":"test_ildoonet_pytorch_randaugment.py","file_ext":"py","file_size_in_byte":27308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547303380","text":"feast = ['lambs',\n 'sloths',\n 'orangutans',\n 'breakfast cereals',\n 'fruit bats']\n\ncomprehension = [delicacy.capitalize() for delicacy in feast]\n\n\n# prints \"Lambs\"\nprint(comprehension[0])\n\n# prints \"Orangutans\"\nprint(comprehension[2])\n\ncomp = [delicacy for delicacy in feast if len(delicacy) > 6]\n\n# prints 5\nprint(len(feast))\n\n# prints 3\nprint(len(comp))\n\nlist_of_tuples = [(1, 'lumberjack'), (2, 'inquisition'), (4, 'spam')]\n\ncomprehension = [skit * number for number, skit in list_of_tuples]\n\n# prints \"lumberjack\"\nprint(comprehension[0])\n\n# prints \"spamspamspamspam\"\nprint(len(comprehension[2]))\n\neggs = ['poached egg', 'fried egg']\n\nmeats = ['lite spam', 'ham spam', 'fried spam']\n\ncomprehension = ['{0} and {1}'.format(egg, meat) for egg in eggs for meat in meats]\n\n# prints 6\nprint(len(comprehension))\n\n# prints \"poached egg and lite spam\"\nprint(comprehension[0])\n\ncomprehension = {x for x in 'aabbbcccc'}\n\n# prints {'a', 'b', 'c'}\nprint(comprehension)\n\ndict_of_weapons = {'first': 'fear',\n 'second': 'surprise',\n 'third': 'ruthless efficiency',\n 'forth': 'fanatical devotion',\n 'fifth': None}\n\ndict_comprehension = {k.upper(): weapon for k, weapon in dict_of_weapons.items() if weapon}\n\n# prints False\nprint('first' in dict_comprehension)\n\n# prints True\nprint('FIRST' in dict_comprehension)\n\n# prints 5\nprint(len(dict_of_weapons))\n\n# prints 4\nprint(len(dict_comprehension))\n\n\ndef count_evens(arr):\n return len([x for x in arr if x % 2 == 0])\n\n\nfood_prefs = {\"name\": \"Chris\",\n \"city\": \"Seattle\",\n \"cake\": \"chocolate\",\n \"fruit\": \"mango\",\n \"salad\": \"greek\",\n \"pasta\": \"lasagna\"}\n\n# prints “Chris is from Seattle, and he likes chocolate cake, mango fruit, greek salad, and lasagna pasta”\nprint(\"{} is from {}, and he likes {} cake, {} fruit, {} salad, and {} pasta.\"\n .format(food_prefs[\"name\"], food_prefs[\"city\"], food_prefs[\"cake\"], food_prefs[\"fruit\"], food_prefs[\"salad\"], food_prefs[\"pasta\"]))\n\nhexed_dictionary = {x:hex(x) for x in range(16)}\nprint(hexed_dictionary)\n\na_food_prefs = {k:\"a\"*len(v) for k, v in food_prefs.items()}\nprint(a_food_prefs)\n\ns2 = set([x for x in range(21) if x % 2 == 0])\nprint(s2)\n\ns3 = set([x for x in range(21) if x % 3 == 0])\nprint(s3)\n\ns4 = set([x for x in range(21) if x % 4 == 0])\nprint(s4)\n\nsets = [s2, s3, s4]\n\nnested_set = [set([x for x in range(21) if x % n == 0]) for n in range(2, 5)]\nprint(nested_set)\n","sub_path":"students/cowhey/session05/comprehensions.py","file_name":"comprehensions.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"463101569","text":"import os\nimport jinja2\nimport webapp2\nfrom mwEntities import Quote\nfrom mwEntities import Category\nfrom mwEntities import quoteExists\nfrom mwEntities import getCategoryKey\nfrom google.appengine.ext import ndb\nfrom google.appengine.api import users\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n \nadminIDs = ['185804764220139124118', '106916172306015446665']\n\nclass QuoteDeleteHandler(webapp2.RequestHandler):\n def get(self):\n try:\n user = users.get_current_user()\n if user.user_id() in adminIDs or user.user_id() == oldQuote.userID:\n messageText = \"You do not have permissions to delete that.\"\n else:\n quoteKey = self.request.get(\"key\")\n quoteKey = ndb.Key(urlsafe=quoteKey)\n quoteKey.delete();\n messageText = \"The quote was deleted.\"\n except:\n messageText = \"There was an error editing the quote.\"\n \n template = JINJA_ENVIRONMENT.get_template('quoteSubmit.html')\n self.response.write(template.render({'messageText': messageText}))\n\n\nclass QuoteEditHandler(webapp2.RequestHandler):\n def post(self):\n try:\n quoteKey = self.request.get(\"key\")\n quoteKey = ndb.Key(urlsafe=quoteKey)\n except:\n messageText = \"There was an error editing the quote.\"\n \n user = users.get_current_user()\n oldQuote = quoteKey.get()\n if user.user_id() not in adminIDs and user.user_id() != oldQuote.userID: \n messageText = \"You do not have permission to edit that quote\" \n else: \n author = self.request.get(\"author\")\n quote = self.request.get(\"quote\")\n messageText = \"Error\"\n if not author or not quote:\n messageText = \"You are missing the quote or author text.\"\n else:\n category = self.request.get(\"category\")\n try:\n categoryKey = getCategoryKey(category)\n quoteKey.delete();\n newQuote = Quote(quote=quote,\n parent=categoryKey,\n userID=user.user_id(),\n author=author)\n newQuote.put()\n messageText = \"You edited the quote '%s' by the author '%s' at (%s)\" % (quote, author, newQuote.createdTime.strftime(\"%m-%d-%y %H:%M\"))\n except:\n messageText = \"That category is not in the database.\"\n template = JINJA_ENVIRONMENT.get_template('quoteSubmit.html')\n self.response.write(template.render({'messageText': messageText}))\n\nclass QuoteSubmitHandler(webapp2.RequestHandler):\n def post(self):\n author = self.request.get(\"author\")\n quote = self.request.get(\"quote\")\n messageText = \"Error\"\n if not author or not quote:\n messageText = \"You are missing the quote or author text.\"\n else:\n if quoteExists(quote):\n messageText = \"That quote is already in the database.\"\n else:\n category = self.request.get(\"category\")\n try:\n if category == \"create\":\n category = self.request.get(\"newCategory\")\n newCategory = Category(name=category)\n categoryKey = newCategory.put()\n else:\n categoryKey = getCategoryKey(category)\n user = users.get_current_user()\n newQuote = Quote(quote=quote,\n parent=categoryKey,\n userID=user.user_id(),\n author=author)\n newQuote.put()\n messageText = \"You added the quote '%s' by the author '%s' at (%s)\" % (quote, author, newQuote.createdTime.strftime(\"%m-%d-%y %H:%M\"))\n except:\n messageText = \"That category is not in the database.\"\n template = JINJA_ENVIRONMENT.get_template('quoteSubmit.html')\n self.response.write(template.render({'messageText': messageText}))\n\n","sub_path":"quoteHandler.py","file_name":"quoteHandler.py","file_ext":"py","file_size_in_byte":4367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"63328504","text":"import csv\nimport sys\nsys.path.append(\"../tools/\")\n\nfrom work_data_proc import *\nfrom work_parameters import *\n\ndef export_to_csv(data_dict, filename, features):\n with open(filename, \"wb\") as csvfile:\n cwriter = csv.writer(csvfile, delimiter=',',quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n cwriter.writerow(['Name','poi']+features)\n for k, v in data_dict.items():\n row = [k,v['poi']]\n row = row + [v[feat] for feat in features]\n cwriter.writerow(row)\n\nif __name__ == \"__main__\":\n data_dict = load_data_set()\n export_to_csv(data_dict, \"csv/enron_2.csv\", ana_features)","sub_path":"final_project/work_export_to_csv.py","file_name":"work_export_to_csv.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615893968","text":"from math import gcd \r\np = int(input())\r\nq = int(input())\r\nplain = int(input())\r\nn = p*q \r\nphi = (p-1) * (q-1) \r\nfor e in range(2,phi): \r\n if gcd(e,phi) == 0 : break \r\nfor i in range(1,phi): \r\n x = i*phi + 1 \r\n if x%e == 0:\r\n d = int(x/e)\r\n break \r\ncipher = pow(plain,e)%n \r\ndecipher = pow(plain,d) %n","sub_path":"securityyy main/rsashort.py","file_name":"rsashort.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"326955961","text":"import os\nimport csv\n\nclass csvMerger(object):\n \"\"\"\n Take a directory of csv files and merge them into one file. The file will\n be saved in the same directory.\n A list of ignored_row_identifiers is supplied when object is created.\n The list is compared to the second cell of the header row.\n \"\"\"\n \n def __init__(self, directory, file_out, ignored_row_identifiers):\n self.ignored_row_identifiers = ignored_row_identifiers\n self.directory = directory\n self.file_out = open(os.path.join(directory, file_out), 'a')\n self.csv_writer = csv.writer(self.file_out)\n self.paths = []\n self.ignored_files = ['.DS_Store', file_out]\n\n for r, d, f in os.walk(self.directory):\n self.root = r\n self.dirs = d\n self.files = f\n for file in self.files:\n if not file in self.ignored_files:\n self.paths.append(os.path.join(r, file))\n \n for csv_file in self.paths:\n self.csv_reader = csv.reader(open(csv_file, 'rU'))\n for row in self.csv_reader:\n if row[1] not in self.ignored_row_identifiers:\n self.csv_writer.writerow(row) \n self.file_out.close()\n \n\n\n#csvMerger(\"/Users/danr/Desktop/csv_test/\", 'combined.csv',\\\n# ['Description', 'Job Part'])\n","sub_path":"FileMerger.py","file_name":"FileMerger.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641582901","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Article\n\ndef index (request):\n articles = Article.objects.all()\n context = {\n 'message': 'Welcome my BBS',\n 'articles': articles,\n }\n return render(request, 'bbs/index.html', context)","sub_path":"bbs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"649854869","text":"#-*- coding:utf-8 -*-\n\nimport sys\nfrom PyQt4 import QtGui,QtCore\nclass Window(QtGui.QWidget):\n def __init__(self,parent=None):\n QtGui.QWidget.__init__(self,parent)\n self.resize(800,600)\n self.setWindowTitle(\"simpleQtDemo\")\n self.setWindowIcon(QtGui.QIcon('icon\\earth.ico'))\n self.center()\n self.tooltip()\n self.outBtn()\n\n def center(self):\n screen=QtGui.QDesktopWidget().screenGeometry()\n size=self.geometry()\n self.move((screen.width()-size.width())/2,(screen.height()-size.height())/2)\n\n def tooltip(self):\n self.setToolTip('this is tooltip')\n\n def closeEvent(self,event):\n repty = QtGui.QMessageBox.question(self,\"box\",\"are you go desktop?\"\\\n ,QtGui.QMessageBox.Yes,QtGui.QMessageBox.No)\n if repty == QtGui.QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n def outBtn(self):\n btn=QtGui.QPushButton(\"Close\",self)\n btn.setGeometry(18,18,36,48)\n self.connect(btn,QtCore.SIGNAL('clicked()'),QtGui.qApp,QtCore.SLOT('quit()'))\n\napp=QtGui.QApplication(sys.argv)\nw=Window()\nw.show()\nsys.exit(app.exec_())\n","sub_path":"pyqt_1.py","file_name":"pyqt_1.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"333851861","text":"import numpy as np\nfrom scipy.integrate import solve_ivp\nimport matplotlib.pyplot as plt\nfrom typing import List\n\nclass Data:\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass BaseModel:\n\n def __init__(self, data=None):\n if data:\n self.data = data\n\n def input_function(self, t) -> np.array:\n \"\"\"\n You must override this in subclass!!\n \"\"\"\n return np.array([])\n\n def derivative(self, t: float, y: np.array)->np.array:\n \"\"\"\n Youmust override this in subclass!!\n \"\"\"\n return np.array([])\n\n def compute_parameters(self, **kwargs):\n \"\"\"\n You must override this in subclass!!\n You can compute the model parameters from data here and set then as class attributes.\n They will be accessible in the derevative method, which is the method where you should\n place the model dynamical equations.\n Example:\n def compute_parameters(self, Pressure=P, Temperature=T**kwargs)\n self.gas_density = self.data.MolarMass*Pressure/(R*Temperature)\n\n Now, inside derivative:\n\n def derivative(t, y):\n ...\n ...\n ...\n\n density = self.gas_density\n\n ...\n ...\n ...\n\n \"\"\"\n return\n\n\nclass BaseSystem:\n\n last_solution = None\n\n def __init__(self, model: BaseModel):\n self.model = model\n\n def solve(self, t_span: list, y0:np.array, parameters_data=None)->list:\n \"\"\"\n :param t_span: [t0, t_final] interval that the solution will be computed numerically\n :param parameters_data a dictionary with extra variable you want to pass to compute_parameters funciton\n :return solution: (t, y) system solution\n \"\"\"\n if parameters_data:\n param_dict = parameters_data\n else:\n param_dict = {}\n\n self.model.compute_parameters(**param_dict)\n\n result = solve_ivp(self.model.derivative, t_span,y0)\n\n if not result.success:\n raise RuntimeError('Not solved!')\n\n solution = (result.t, result.y)\n\n self.last_solution = tuple(solution)\n\n return solution\n\n def plot(self, format_strings: List[str] = None,\n title:str = None, text:list = None, xlabel:str = None, ylabel:str = None, font=None):\n\n\n if not self.last_solution:\n raise RuntimeError(\"Solution not computed yet!!\")\n\n t = self.last_solution[0]\n y = self.last_solution[1]\n\n for i in range(len(y)):\n\n n = str(i + 1)\n name = 'y'+n\n\n if format_strings:\n plt.plot( t, y[i],format_strings[i])\n else:\n plt.plot(t, y[i] )\n if font:\n font_dict = {'fontdict': font}\n else:\n font_dict = {}\n\n if title:\n plt.title(title, **font_dict)\n if text:\n plt.text(*text, **font_dict)\n if xlabel:\n plt.xlabel(xlabel, **font_dict)\n if ylabel:\n plt.ylabel(ylabel, **font_dict)\n\n plt.show()\n\n\n\nclass TwoTanks(BaseModel):\n\n A = 5.0\n Cv_1=1.0\n Cv_2=5.0\n h1_st=8.0\n h2_st=2.0\n\n data = Data(\n A=A,\n Cv_1=Cv_1,\n Cv_2=Cv_2,\n h1_st=h1_st,\n h2_st=h2_st,\n )\n\n def compute_parameters(self, *args , **kwargs):\n\n b = self.data.Cv_1/self.data.A\n a = 1/self.data.A\n c = self.data.Cv_2/self.data.A\n\n self.a = a\n self.B = 0.5*b*(self.data.h1_st-self.data.h2_st)**(-0.5)\n self.C = 0.5*c*(self.data.h2_st)**(-0.5)\n\n def derivative(self, t: float, y:np.array)->np.array:\n\n \"\"\"\n :param t = time, y1 = h1, y2 = h2\n :return ddydt: dy1dt = dh1dt, dy2dt = dh2dt\n \"\"\"\n u = self.input_function(t)\n\n fi = u[0]\n fo = u[1]\n h1 = y[0]\n h2 = y[1]\n\n dh1dt = self.a*(fi - fo) - self.B*(h1-h2)\n\n dh2dt = self.B*h1 + (self.B-self.C)*h2\n\n return [dh1dt, dh2dt]\n\n def input_function(self, t):\n return 1.0, 0.5\n\n\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"364542462","text":"\"\"\"\nAuthor: \nAslak Per Audun Scheie Anti & Thomas Birk\n\n\"\"\"\n\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\nFPS_LIMIT = 60\n\nGAME_GRAVITY = 1.2\nGAME_FORCESTRENGHT = 0.3\nGAME_POINTS_KILL = 3\n\nPLAYER_MAX_SAFE_VELOCITY = 10\nPLAYER_SIZE = 0.9\nPLAYER_SPEED = 50\nPLAYER_ROTATESPEED = 20\nPLAYER_FUEL = 200\nPLAYER_HEALTH = 100\nPLAYER_THRUST = 5\nPLAYER_REFUEL_SPEED = 10\nPLAYER_LANDING_RANGE = 20\nPLAYER_FUEL_CONSUMPTION = 2\nPLAYER_REFUEL_VALUE = 5\n\nBULLET_DAMAGE = 10\nBULLET_SPEED = 65\nBULLET_RATE = 12\nBULLET_OFFSET = 12\nBULLET_SIZE = 5\nBULLET_COLOR = (0, 200, 0)\n\nBG_COLOR = (50, 50, 50)\nOBJECT_COLOR = (0, 225, 0)\nPLATFORM_COLOR = (255, 200, 0)\nHEALTH_COLOR = (0, 255, 0)\nHEALTH_BG_COLOR = (255, 0, 0)\nFUEL_COLOR = (255, 150, 0)\nFUEL_BG_COLOR = (255, 0, 0)\nSCORE_COLOR = (255,255,255)","sub_path":"assignments_19v/assignment_3/inf1400-tbi019-3/src/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"155177334","text":"#!/usr/bin/python\nimport sys\n\nclass Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n if not s:\n return []\n self.result = []\n self.dfs(s, 0, [])\n return self.result\n\n def dfs(self, s, sidx, can):\n if len(s) == sidx:\n self.result.append(can[:])\n return\n for i in range(sidx, len(s)):\n new_s = s[sidx: i + 1]\n if not self.isPal(new_s):\n continue\n can.append(new_s)\n self.dfs(s, i + 1, can)\n can.pop()\n\n def isPal(self, s):\n i = 0\n j = len(s) - 1\n while i < j:\n if s[i] != s[j]:\n return False\n i += 1\n j -= 1\n return True\n\ndef main():\n aa = Solution()\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())","sub_path":"LeetCode/palindromePartition.py","file_name":"palindromePartition.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611296023","text":"import inspect\nimport sys\nfrom contextlib import contextmanager\nfrom functools import wraps, partial\nfrom types import MethodType\nimport warnings\nfrom threading import RLock\nfrom operator import attrgetter\n\nfrom easypy.collections import intersected_dict, ilistify\n\n\ndef deprecated(func=None, message=None):\n if not callable(func):\n return partial(deprecated, message=func)\n message = (\" \"+message) if message else \"\"\n message = \"Hey! '%s' is deprecated!%s\" % (func.__name__, message)\n\n @wraps(func)\n def inner(*args, **kwargs):\n warnings.warn(message, DeprecationWarning, stacklevel=2)\n return func(*args, **kwargs)\n return inner\n\n\ndef deprecated_arguments(**argmap):\n def wrapper(func):\n \"\"\"\n Renames arguments while emitting deprecation warning\n\n @deprecated_arguments(old_name='new_name')\n def func(new_name):\n # ...\n\n func(old_name='value meant for new name')\n \"\"\"\n\n @wraps(func)\n def inner(*args, **kwargs):\n deprecation_warnings = []\n for name, map_to in argmap.items():\n try:\n value = kwargs.pop(name)\n except KeyError:\n pass # deprecated argument was not used\n else:\n if map_to in kwargs:\n raise TypeError(\"%s is deprecated for %s - can't use both in %s()\" % (\n name, map_to, func.__name__))\n deprecation_warnings.append('%s is deprecated - use %s instead' % (name, map_to))\n kwargs[map_to] = value\n\n if deprecation_warnings:\n message = 'Hey! In %s, %s' % (func.__name__, ', '.join(deprecation_warnings))\n warnings.warn(message, DeprecationWarning, stacklevel=2)\n\n return func(*args, **kwargs)\n return inner\n return wrapper\n\n\ndef parametrizeable_decorator(deco):\n @wraps(deco)\n def inner(func=None, **kwargs):\n if func is None:\n return partial(deco, **kwargs)\n else:\n return wraps(func)(deco(func, **kwargs))\n return inner\n\n\ndef singleton_contextmanager(func):\n class CtxManager():\n def __init__(self, func):\n self.count = 0\n self.func_cm = contextmanager(func)\n self._lock = RLock()\n\n def __enter__(self):\n with self._lock:\n if self.count == 0:\n self.ctm = self.func_cm()\n self.obj = self.ctm.__enter__()\n self.count += 1\n\n def __exit__(self, *args):\n with self._lock:\n self.count -= 1\n if self.count > 0:\n return\n self.ctm.__exit__(*sys.exc_info())\n del self.ctm\n del self.obj\n\n return CtxManager(func)\n\n\n_singleton_contextmanager_method_attr_lock = RLock()\n\n\ndef singleton_contextmanager_method(func):\n cached_attr_name = '__singleton_contextmanager_method__' + func.__name__\n\n # Wrap with a context manager to get proper IPython documentation\n @contextmanager\n @wraps(func)\n def inner(self):\n with _singleton_contextmanager_method_attr_lock:\n try:\n cm = getattr(self, cached_attr_name)\n except AttributeError:\n cm = singleton_contextmanager(partial(func, self))\n setattr(self, cached_attr_name, cm)\n with cm as val:\n yield val\n\n return inner\n\n\n@parametrizeable_decorator\ndef kwargs_resilient(func, negligible=None):\n \"\"\"\n If function does not specify **kwargs, pass only params which it can accept\n\n negligible: If set, only be resilient to these specific parameters:\n - Other parameters will be passed normally, even if they don't appear in the signature.\n - If a specified parameter is not in the signature, don't pass it even if there are **kwargs.\n \"\"\"\n spec = inspect.getfullargspec(inspect.unwrap(func))\n acceptable_args = set(spec.args or ())\n if isinstance(func, MethodType):\n acceptable_args -= {spec.args[0]}\n\n if negligible is None:\n @wraps(func)\n def inner(*args, **kwargs):\n if spec.varkw is None:\n kwargs = intersected_dict(kwargs, acceptable_args)\n return func(*args, **kwargs)\n else:\n negligible = set(ilistify(negligible))\n\n @wraps(func)\n def inner(*args, **kwargs):\n kwargs = {k: v for k, v in kwargs.items()\n if k in acceptable_args\n or k not in negligible}\n return func(*args, **kwargs)\n\n return inner\n\n\ndef reusable_contextmanager(context_manager):\n if not hasattr(context_manager, '_recreate_cm'):\n return context_manager # context manager is already reusable (was not created usin yield funcion\n\n class ReusableCtx:\n def __enter__(self):\n self.cm = context_manager._recreate_cm()\n return self.cm.__enter__()\n\n def __exit__(self, *args):\n self.cm.__exit__(*args)\n\n return ReusableCtx()\n\n\n@parametrizeable_decorator\ndef as_list(generator, sort_by=None):\n \"\"\"\n Forces a generator to output a list.\n When writing a generator is more convenient\n\n @as_list(sort_by=lambda n: -n)\n def g():\n yield 1\n yield 2\n yield from range(2)\n\n >>> g()\n [2, 1, 1, 0]\n\n \"\"\"\n @wraps(generator)\n def inner(*args, **kwargs):\n l = list(generator(*args, **kwargs))\n if sort_by:\n l.sort(key=sort_by)\n return l\n return inner\n\n\nclass LazyDecoratorDescriptor:\n def __init__(self, decorator_factory, func):\n self.decorator_factory = decorator_factory\n self.func = func\n\n def __get__(self, instance, owner):\n method = self.func.__get__(instance, owner)\n if instance is None:\n return method\n else:\n decorator = self.decorator_factory(instance)\n return decorator(method)\n\n\ndef lazy_decorator(decorator_factory):\n \"\"\"\n Create and apply a decorator only after the method is instantiated.\n\n class UsageWithLambda:\n @lazy_decorator(lambda self: some_decorator_that_needs_the_object(self))\n def foo(self):\n # ...\n\n class UsageWithAttribute:\n def decorator_method(self, func):\n # ...\n\n @lazy_decorator('decorator_method')\n def foo(self):\n # ...\n \"\"\"\n\n if callable(decorator_factory):\n pass\n elif isinstance(decorator_factory, str):\n decorator_factory = attrgetter(decorator_factory)\n else:\n raise TypeError('decorator_factory must be callable or string, not %s' % type(decorator_factory))\n\n def wrapper(func):\n return LazyDecoratorDescriptor(decorator_factory, func)\n return wrapper\n","sub_path":"easypy/decorations.py","file_name":"decorations.py","file_ext":"py","file_size_in_byte":6966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"313742906","text":"import os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom math import sqrt\nimport numpy as np\nimport time\n\n# ==============================================================\n# =================== IMPORTA I DATI* ==========================\n# ==============================================================\n\nprint(\"Fase iniziale: importazione dei dati dal dataset QuickDraw...\")\ndata_dir = \"./QuickDraw/\"\n\n# *Parte di questa prima fase, per l'importazione dei dati, è stata rielaborata a partire da\n# https://github.com/ankonzoid/Google-QuickDraw/blob/master/QuickDraw_noisy_classifier.py\n\nl = {0: 'airplane', 1: 'banana', 2: 'hourglass', 3: 'icecream', 4: 'mountain',\n\t5: 'mug', 6: 'mushroom', 7: 'pineapple', 8: 'pizza', 9: 'rabbit'}\nlabels = list(l.values())\n\nprint(\"Categorie:\")\nfor i in range(10):\n\ttime.sleep(0.5)\n\tprint(list(l.keys())[i], \"-\", labels[i])\n\ndef dense_to_one_hot(labels_dense, num_classi=10):\n\tnum_labels = labels_dense.shape[0]\n\tindex_offset = np.arange(num_labels) * num_classi\n\tlabels_one_hot = np.zeros((num_labels, num_classi))\n\tlabels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n\treturn labels_one_hot\n\ncategory_filenames = []\nn_remaining_category = []\n\nfor catname in labels:\n\tfilename = os.path.join(data_dir, \"full_numpy_bitmap_\" + catname + \".npy\")\n\tcategory_filenames.append(filename)\n\nfor i_category, category in enumerate(labels):\n\tdata = np.load(category_filenames[i_category])\n\tn_total = len(data)\n\tn_remaining_category.append(n_total)\n\ndim_immagine = int(sqrt(data.shape[1]))\n\nn_take_train = min([40000, min(n_remaining_category)])\nn_take_test = min([8000, min(n_remaining_category)])\n\t\nx_train = []\ny_train = []\nx_test = []\ny_test = []\n\nfor i_category, category in enumerate(labels):\n\tdata = np.load(category_filenames[i_category])\t\t\n\tn_data = len(data)\n\t\t\n\tfor j, data_j in enumerate(data):\n\t\timg = np.array(data_j).reshape((dim_immagine, dim_immagine))\n\t\tif j < n_take_train:\n\t\t\tx_train.append(img)\n\t\t\ty_train.append(i_category)\n\t\telif j - n_take_train < n_take_test:\n\t\t\tx_test.append(img)\n\t\t\ty_test.append(i_category)\n\t\telse:\n\t\t\tbreak\n\nx_train = np.array(x_train)\ny_train = np.array(y_train)\nx_test = np.array(x_test)\ny_test = np.array(y_test)\n\nx_train = x_train.astype('float32') / 255\nx_test = x_test.astype('float32') / 255\ny_train = dense_to_one_hot(y_train)\ny_test = dense_to_one_hot(y_test)\n\n# ==============================================================\n# =================== SETTA I PARAMETRI ========================\n# ==============================================================\n\nnum_classi = len(labels)\nlearning_rate = 0.001\nnum_epoche = 100\n\n# ==============================================================\n# ===================== FUNZIONI UTILI =========================\n# ==============================================================\n\ndef crea_pesi(dim, nome):\n\tres = tf.Variable(tf.zeros(dim), name = nome)\n\treturn res\n\ndef crea_fc(inp, weight, bias):\n\tres = tf.matmul(inp, weight) + bias\n\treturn res\n\ndef crea_costo(out, y):\n\tres = -tf.reduce_sum(y*tf.log(out + 1e-8), name=\"costo\")\n\treturn res\n\ndef crea_optimizer(opt, costo):\n\tif opt == \"gd\":\n\t\tres = tf.train.GradientDescentOptimizer(learning_rate).minimize(costo)\n\telif opt == \"adam\":\n\t\tres = tf.train.AdamOptimizer(learning_rate).minimize(costo)\n\telif opt == \"adagrad\":\n\t\tres = tf.train.AdagradOptimizer(learning_rate).minimize(costo)\n\treturn res\n\t\ndef calcola_accuracy(out, y):\n\tcor = tf.equal(tf.argmax(out, 1), tf.argmax(y, 1))\n\tres = tf.reduce_mean(tf.cast(cor, tf.float32), name=\"accuracy\")\n\treturn res\n\ndef stampa_img(inp, txt):\n s = (np.reshape(inp, (dim_immagine, dim_immagine)) * 255).astype(np.uint8)\n plt.title(\"La label è {}\".format(txt))\n plt.imshow(s, cmap=\"gray\")\n return plt\n\n# ==============================================================\n# ===================== STAMPA IMMAGINI ========================\n# ==============================================================\n \nc = False\n\nwhile(c == False):\n\trisposta = input(\"Stampare un'immagine d'esempio dal dataset? \")\n\tif(risposta.lower() == \"si\"):\n\t\tn = np.argmax(y_test[0], 0)\n\t\tstampa_img(x_test[0], l[n]).show()\n\t\tc = True\n\telif(risposta.lower() == \"no\"):\n\t\tc = True\n\telse:\n\t\tprint(\"Inserimento errato. Scegliere fra 'si' e 'no'\")\n\n# ==============================================================\n# ==================== CREA RETE NEURALE =======================\n# ==============================================================\n\nprint(\"Creazione della rete neurale...\")\n\nc = False\n\nwhile(c == False):\n\trisposta = input(\"Inserire il metodo di ottimizzazione: \")\n\tif risposta.lower() == \"gradient descent\":\n\t\topt = \"gd\"\n\t\tc = True\n\telif risposta.lower() == \"adam\":\n\t\topt = \"adam\"\n\t\tc = True\n\telif risposta.lower() == \"adagrad\":\n\t\topt = \"adagrad\"\n\t\tc = True\n\telse:\n\t\tprint(\"Inserimento errato. Scegliere fra 'Gradient Descent', 'AdaGrad' e 'Adam'\")\n\nx = tf.placeholder(tf.float32, [None, dim_immagine, dim_immagine])\ninp = tf.reshape(x, [-1, dim_immagine * dim_immagine])\ny = tf.placeholder(tf.float32, [None, num_classi])\n\ndim_W1 = [dim_immagine * dim_immagine, num_classi]\ndim_b1 = [num_classi]\n\nW1 = crea_pesi(dim_W1, \"W1\")\nb1 = crea_pesi(dim_b1, \"b1\")\n\nwith tf.name_scope(\"output_layer\") as scope:\n\tfc_out = crea_fc(inp, W1, b1)\n\tout = tf.nn.softmax(fc_out)\n\nwith tf.name_scope(\"funzione_costo\") as scope:\n costo = crea_costo(out, y)\n\nwith tf.name_scope(\"train\") as scope:\n optimizer = crea_optimizer(opt, costo)\n\nwith tf.name_scope(\"accuracy\") as scope:\n\tacc = calcola_accuracy(out, y)\n\n# ==============================================================\n# ====================== AVVIO SESSIONE ========================\n# ==============================================================\n\nfor elem in [W1, b1]:\n tf.summary.histogram(elem.op.name, elem)\n \nfor elem in [costo, acc]:\n tf.summary.scalar(elem.op.name, elem)\n\ninit = tf.global_variables_initializer()\nsummaries = tf.summary.merge_all()\n\nprint(\"Avvio sessione di tensorflow...\")\ntime.sleep(2)\n\n# inizia la sessione\nwith tf.Session() as sess:\n sess.run(init)\n\n summary_writer = tf.summary.FileWriter(\"modulo2\", sess.graph)\n \n # allenamento\n print(\"Inizio del processo di training...\")\n\t\n for i in range(num_epoche):\n fd = {x: x_train, y: y_train}\n acc_esec, _ = sess.run([acc, optimizer], feed_dict = fd)\n\n if i % 5 == 0 or i == num_epoche-1:\n print(\"Iterazione:\", i, \"- Training Accuracy: {:.3f} %\".format(acc_esec * 100))\n summary_writer.add_summary(sess.run(summaries, feed_dict = fd), i)\n \n print(\"Termine del processo di training, eseguite\", num_epoche, \"epoche!\")\n print(\"Valutazione delle performance sul test-set\")\n time.sleep(2)\n\n\t# stampa i risultati sul test-set\n print(\"Test Accuracy: {:.3f} %\".format(acc.eval({x: x_test, y: y_test}) * 100))\n\n","sub_path":"modulo2.py","file_name":"modulo2.py","file_ext":"py","file_size_in_byte":6867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"244563262","text":"import re\n\nimport pandas as pd\nfrom openpyxl import load_workbook\n\nfrom package.apps.core.models import Profile, Emcexception, week\nfrom package.apps.core.utils import Exception_type\n# IMPORT DataObject from xlsx file\n# Subsystem 1\nfrom package.apps.Upload.models import Document\nfrom xlrd import open_workbook, XLRDError\n\n\nclass ExcelData:\n # TODO change sheet_name to an input field\n def __init__(self, filepath, docid):\n self.filepath = filepath\n self.docid = docid\n self.docfile = Document.objects.filter(id=self.docid)[0]\n\n def excelfileparser(self):\n df = pd.read_excel(self.filepath, sheet_name=\"Sheet1\")\n return df\n\n def validate_format(self):\n if not (test_book(self.filepath)):\n print('File format is wrong')\n return False\n xl = pd.ExcelFile(self.filepath)\n sheets = xl.sheet_names\n sheet_name = 'Sheet1'\n if sheet_name not in sheets:\n print('Sheet1 not found')\n return False\n head_list = ['IDJV', 'PJ_Name', 'week number', 'Date de reception ', 'Motif ', 'Sujet ', 'Destinataire ',\n 'Expediteur ']\n\n file_head = list(xl.parse(sheet_name))\n Li = [c for c in head_list if c in file_head]\n print(Li)\n if not (Li == head_list):\n print('headers problem')\n print(file_head)\n return False\n return True\n\n def getemaillist(self, dataframe):\n return list(set(dataframe['Expediteur ']))\n\n def name_and_surnameFromemail(self, emailaddr):\n emailaddr_list = emailaddr.split('@')\n if '.' in emailaddr_list[0]:\n fullname = emailaddr_list[0].split('.')\n Name = fullname[0].capitalize()\n second_name = fullname[1].capitalize()\n else:\n Name = emailaddr_list[0].capitalize()\n second_name = \"NoSurname\"\n Org = emailaddr_list[1].split('.')[0]\n return {'name': Name, 'secondname': second_name, 'org': Org}\n\n def createProfiles(self, ):\n listofEmails = self.getemaillist(self.excelfileparser())\n for emailaddr in listofEmails:\n res = self.name_and_surnameFromemail(emailaddr)\n Name = res['name']\n second_name = res['secondname']\n Org = res['org']\n existing = Profile.objects.filter(emailaddress=emailaddr)\n if not existing:\n profileobj = Profile(name=Name, surname=second_name, emailaddress=emailaddr, organisation=Org)\n profileobj.save()\n profileobj.data_file.add(self.docfile)\n\n def getCellColor(self, sheetpage, cell):\n colorobj = sheetpage[cell].fill.start_color\n if colorobj.rgb is None:\n return \"grey\"\n elif colorobj.rgb == 'FF92D050':\n return \"green\"\n elif colorobj.rgb == '00000000':\n return \"white\"\n else:\n return \"invalid color\"\n\n def Colorstatement(self, color):\n colors = {\n \"green\": \"Traite\"\n , \"white\": \"Non traite\"\n }\n return colors.get(color, 'Archivée')\n\n def getExceptions(self):\n wb = load_workbook(self.filepath, data_only=True)\n sheetpage = wb[\"Sheet1\"]\n nan = 'Nan'\n nexcept = int(re.findall(r\"\\d+\", sheetpage.dimensions)[-1])\n exceptionstates = []\n for case in range(2, nexcept + 1):\n exceptionstate = dict()\n color = self.getCellColor(sheetpage, \"B\" + str(case))\n exceptionstate[\"idjv\"] = str(sheetpage[\"A\" + str(case)]._value)\n exceptionstate[\"state\"] = str(self.Colorstatement(color))\n exceptionstates.append(exceptionstate)\n data = self.excelfileparser()\n IDJV = data['IDJV']\n PJ_Name = data['PJ_Name']\n WN = data['week number']\n DATE = data['Date de reception ']\n MOTIF = data['Motif ']\n SUJET = data['Sujet ']\n DESTINATIRE = data['Destinataire ']\n EXPED = data['Expediteur ']\n for i in range(0, nexcept - 1):\n print(EXPED[i])\n if not test_week_exists(WN[i]):\n W = week(id=WN[i])\n W.save()\n existing = Emcexception.objects.filter(idjv=IDJV[i], pj_name=PJ_Name[i],\n motif=MOTIF[i], sujet=SUJET[i],\n destinataire=DESTINATIRE[i], expediteur=\n Profile.objects.filter(emailaddress=EXPED[i])[\n 0]\n , etat=exceptionstates[i]['state'], wn=week.objects.get(id=WN[i]))\n if not existing:\n util = Exception_type(IDJV[i], PJ_Name[i], SUJET[i], MOTIF[i])\n print(util)\n phase = util[0]\n emc_type = util[1]\n emcobj = Emcexception(idjv=IDJV[i], pj_name=PJ_Name[i], wn=week.objects.filter(id=WN[i])[0],\n recpdate=DATE[i],\n motif=MOTIF[i],\n sujet=SUJET[i], destinataire=DESTINATIRE[i],\n expediteur=Profile.objects.filter(emailaddress=EXPED[i])[0],\n etat=exceptionstates[i]['state'], data_file=self.docfile, phase=phase,\n type=emc_type)\n emcobj.save()\n\n\ndef test_book(filename):\n try:\n open_workbook(filename)\n except XLRDError:\n return False\n else:\n return True\n\n\ndef test_week_exists(id):\n try:\n week.objects.get(id=id)\n except Exception:\n return False\n else:\n return True\n","sub_path":"LP20KPI/package/package/apps/Upload/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120619066","text":"#!/usr/bin/env python3\nimport math, collections, itertools\nfrom sys import stdin\n\n\ndef readValue(valueType):\n return valueType(stdin.readline())\n\n\ndef readValues(valueType):\n return list(map(valueType, stdin.readline().split()))\n\n\nclass Mouth():\n count = 1\n\n @classmethod\n def answer(cls, answer):\n print(\"Case #{}: {}\".format(cls.count, answer))\n cls.count += 1\n\n\ndef readInput():\n n = readValue(int)\n lines = [readValues(int) for _ in range(2*n-1)]\n return n, lines\n\n\ndef solve(n, lines):\n global crtRow, crtCol, missingRow\n lines = sorted(lines)\n final = [[None] * n for _ in range(n)]\n\n crtRow = 0\n crtCol = 0\n missingRow = None\n\n def placeOriz(lineNo):\n global crtRow, crtCol, missingRow\n if crtRow >= n or lineNo >= 2 * n - 1:\n return False\n\n if crtRow:\n for i in range(n):\n if missingRow != crtRow - 1 and lines[lineNo][i] <= final[crtRow - 1][i]:\n return False\n for i in range(n):\n if crtCol > i and lines[lineNo][i] != final[crtRow][i]:\n return False\n for i in range(n):\n final[crtRow][i] = lines[lineNo][i]\n return True\n\n def placeVert(lineNo):\n global crtRow, crtCol, missingRow\n\n if crtCol >= n or lineNo >= 2 * n - 1:\n return False\n\n if crtCol:\n for i in range(n):\n if lines[lineNo][i] <= final[i][crtCol - 1]:\n return False\n for i in range(n):\n if crtRow > i and missingRow != i and lines[lineNo][i] != final[i][crtCol]:\n return False\n\n for i in range(n):\n final[i][crtCol] = lines[lineNo][i]\n return True\n\n #print(lines)\n def solve(lineNo):\n global crtRow, crtCol, missingRow\n\n if crtRow == n and crtCol == n:\n return True\n\n if placeOriz(lineNo):\n crtRow += 1\n #print(\"oriz: \" + str(lines[lineNo]) + \", \" + str(lineNo))\n if solve(lineNo + 1):\n return True\n crtRow -= 1\n\n if placeVert(lineNo):\n crtCol += 1\n #print(\"vert: \" + str(lines[lineNo]) + \", \" + str(lineNo))\n if solve(lineNo + 1):\n return True\n crtCol -= 1\n\n if crtRow != n and missingRow is None:\n missingRow = crtRow\n #print(\"missing row: \" + str(crtRow))\n crtRow += 1\n if solve(lineNo):\n return True\n crtRow -= 1\n missingRow = None\n\n return False\n\n solve(0)\n\n return \" \".join([str(x) for x in final[missingRow]])\n\n\nif __name__ == '__main__':\n for _ in range(readValue(int)):\n Mouth.answer(solve(*readInput()))\n","sub_path":"codes/BuildLinks1.10/test_input/CJ_16_1/16_1_2_saca_B.py","file_name":"16_1_2_saca_B.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"353814854","text":"\"\"\"\nWithout recursion\n\"\"\"\n\nimport sys\n\nfrom . import bigint\nfrom .lang import eval_string\n\n\ndef main():\n if len(sys.argv) < 2:\n _print_error(sys.stderr, 'invalid arg count')\n return 1\n\n try:\n assigns = [s.strip() for s in sys.argv[1].split(';')]\n args = dict([a.split('=') for a in assigns])\n except:\n _print_error(sys.stderr, 'invalid args format')\n return 1\n\n try:\n input_path = args['input']\n except KeyError:\n _print_error(sys.stderr, '\\'input\\' is a required arg')\n return 1\n\n try:\n nodesize = int(args['digitsPerNode'])\n except KeyError:\n _print_error(sys.stderr, '\\'digitsPerNode\\' is a required arg')\n sys.exit(1)\n except TypeError:\n _print_error(sys.stderr, '\\'digitsPerNode\\' is not an int')\n sys.exit(1)\n\n with open(input_path) as inputfile:\n stripped = (l.strip() for l in inputfile)\n exprs = (l for l in stripped if l)\n\n equations = [(e, _safe_call(eval_string, e, nodesize)) for e in exprs]\n str_results = [_fmt_result(e[0], e[1]) for e in equations]\n output = '\\n'.join(str_results)\n\n print(output)\n\n return 0\n\n\ndef _fmt_result(expr, result):\n success, val = result\n if success:\n return '%s = %s' % (expr, bigint.tostring(val))\n else:\n return '%s = invalid expression' % (expr)\n\n\ndef _print_error(file, msg):\n print('infinitearithmetic: %s' % (msg), file=file)\n\n\ndef _safe_call(fun, *args):\n try:\n return (True, fun(*args))\n except BaseException as e:\n return (False, e)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"infinitearithmetic/infinitearithmetic.py","file_name":"infinitearithmetic.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216933887","text":"from flask import Flask, render_template, jsonify, request,Response,redirect,url_for\r\n\r\nimport argparse\r\nimport sys\r\n\r\nimport json\r\nimport dynamodb\r\nimport jsonconverter as jsonc\r\n\r\nimport cv2\r\n\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\nimport numpy as np\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.decomposition import PCA\r\nimport botocore\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\n\r\nimport mysql.connector\r\nfrom gevent.pywsgi import WSGIServer\r\n\r\nfrom datetime import datetime\r\nfrom twilio.rest import Client\r\n\r\nfrom IOTAssignmentUtilitiesdorachua.MySQLManager import MySQLManager\r\n\r\nfrom IOTAssignmentUtilitiesdorachua.MySQLManager import QUERYTYPE_DELETE, QUERYTYPE_INSERT\r\n\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\nfrom AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient\r\nimport pandas as pd\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/api/getdata\",methods=['GET', 'POST'])\r\ndef apidata_getdata():\r\n try:\r\n maxdatetimestart = dynamodb.get_maxdatetimestart_from_dynamodb()\r\n data_reversed = dynamodb.get_top10data_from_dynamodb(maxdatetimestart)\r\n datarows = jsonc.data_to_json(data_reversed)\r\n \r\n #return data_reversed'''\r\n\r\n return jsonify(json.loads(datarows))\r\n \r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/getDashboardDetails\",methods=['GET', 'POST'])\r\ndef apidata_getTotalNumOfVehicles():\r\n try:\r\n datarows = []\r\n maxdatetimestart = dynamodb.get_maxdatetimestart_from_dynamodb() \r\n datarows.append(dynamodb.get_numofbid_and_maxspeed(maxdatetimestart))\r\n datarows.append(dynamodb.getDistinctBidandAvgSpeedandCount(maxdatetimestart))\r\n\r\n return jsonify(json.loads(jsonc.data_to_json(datarows))) \r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/getSummaryTable\",methods=['GET', 'POST'])\r\ndef apidata_getSummaryTable():\r\n try:\r\n data = dynamodb.getSummaryTableData()\r\n return jsonify(json.loads(jsonc.data_to_json(data)))\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1]) \r\n\r\n@app.route(\"/api/getIndividualBooking/\",methods=['GET', 'POST'])\r\ndef getBooking(bid):\r\n try:\r\n dataRows = dynamodb.getindividualBookingData(bid)\r\n\r\n return jsonify(json.loads(jsonc.data_to_json(dataRows)))\r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1]) \r\n\r\n@app.route(\"/individualBooking/\",methods=['GET', 'POST'])\r\ndef detailedtable(bid):\r\n try:\r\n bookingid = bid\r\n\r\n return render_template('detailed-table.html',bookingid=bookingid) \r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1]) \r\n\r\n@app.route(\"/api/getIndividualSpeedDetails/\",methods=['GET', 'POST'])\r\ndef getIndividualSpeedDetails(bid):\r\n try:\r\n bookingid = bid\r\n data = dynamodb.getIndivividualDashboard(bookingid)\r\n \r\n return jsonify(json.loads(jsonc.data_to_json(data))) \r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/getIndividualMapData/\", methods=['GET', 'POST'])\r\ndef getIndividualMapData(bid):\r\n try:\r\n bookingid = bid\r\n mapdata = dynamodb.getIndividualMapData(bookingid)\r\n \r\n return jsonify(json.loads(jsonc.data_to_json(mapdata)))\r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/captureDriverImage/\",methods=['GET', 'POST'])\r\ndef captureDriverImage(bid):\r\n try:\r\n bookingid = bid\r\n cam = cv2.VideoCapture(\"http://27.104.173.113:8081/out.jpg\") \r\n ret, frame = cam.read()\r\n if not ret:\r\n exit()\r\n currentdatetime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n imgpathaddon = currentdatetime.replace(\":\",\".\") \r\n imgpathaddon = imgpathaddon.replace(\" \",\"_\")+ \".jpg\"\r\n imgname = bookingid+\"_\"+imgpathaddon\r\n success = dynamodb.uploadImageFileToS3(frame,imgname)\r\n print(success)\r\n #cv2.imwrite(imgname,frame)\r\n print(imgname)\r\n cam.release()\r\n\r\n host = \"a3kfg2bxlkhyhy-ats.iot.us-east-1.amazonaws.com\"\r\n rootCAPath = \"certs/rootca.pem\"\r\n certificatePath = \"certs/certificate.pem.crt\"\r\n privateKeyPath = \"certs/private.pem.key\"\r\n\r\n my_rpi = AWSIoTMQTTClient(\"p1828430\")\r\n my_rpi.configureEndpoint(host, 8883)\r\n my_rpi.configureCredentials(rootCAPath, privateKeyPath, certificatePath)\r\n\r\n my_rpi.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing\r\n my_rpi.configureDrainingFrequency(2) # Draining: 2 Hz\r\n my_rpi.configureConnectDisconnectTimeout(10) # 10 sec\r\n my_rpi.configureMQTTOperationTimeout(5) # 5 sec\r\n\r\n # Connect and subscribe to AWS IoT\r\n my_rpi.connect()\r\n imageData = {'VehicleType':'GrabCar', 'datetime_value':currentdatetime, 'bookingid':bookingid, 'KeyName':imgname}\r\n my_rpi.publish(\"iotdatabase/image\", json.dumps(imageData), 1)\r\n\r\n return jsonify(json.loads(jsonc.data_to_json(success)))\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/getDriverImage/\",methods=['GET', 'POST'])\r\ndef getDriverImage(bid):\r\n try:\r\n bookingid = bid\r\n data = []\r\n image, facesimilarity, ageDetails = dynamodb.retrieveImageFromS3(bookingid)\r\n data = [image, facesimilarity, ageDetails]\r\n return jsonify(json.loads(jsonc.data_to_json(data)))\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/checkCapturedImageIndex/\",methods=['GET', 'POST'])\r\ndef checkLatestImage(bid):\r\n try:\r\n bookingid = bid\r\n data = []\r\n image, facesimilarity, ageDetails = dynamodb.retrieveImageFromS3forIndex(bookingid)\r\n data = [image, facesimilarity, ageDetails]\r\n return jsonify(json.loads(jsonc.data_to_json(data)))\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/getDriverSpeedingDetails\", methods=['GET', 'POST'])\r\ndef getDriverSpeedingDetails():\r\n try:\r\n driversImages = dynamodb.getLatest3DriversthatSped()\r\n\r\n return jsonify(json.loads(jsonc.data_to_json(driversImages)))\r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/getMapData\", methods=['GET', 'POST'])\r\ndef getMapData():\r\n try:\r\n data = []\r\n maxdatetimestart = dynamodb.get_maxdatetimestart_from_dynamodb()\r\n listofBID = dynamodb.getListofBIDforLatestDTS()\r\n for bid in listofBID:\r\n data.append(dynamodb.getMapData(bid,maxdatetimestart))\r\n\r\n return jsonify(json.loads(jsonc.data_to_json(data)))\r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/sendAlert//\",methods=['GET', 'POST'])\r\ndef sendAlert(bid = None,speed = None):\r\n try:\r\n bookingid = bid\r\n carspeed = speed\r\n\r\n # INSERT ACCOUNT SID AND AUTH TOKEN HERE AND PHONE NUMBER\r\n\r\n account_sid = \"\"\r\n auth_token = \"\"\r\n\r\n client = Client(account_sid, auth_token)\r\n sms = \"It was identified that you drove at a speed of *{}*, which is beyond the maximum speed limit of 90km/hr. Please drive slower so as to provide the passenger a more pleasant ride. \\nBy Grab\\nBooking ID : *{}*\".format(carspeed,bookingid)\r\n #message = client.messages.create(to=my_hp,from_=twilio_hp,body=sms, mediaUrl=imgname) \r\n client.messages.create(body=sms,\r\n from_='whatsapp:+14155238886',\r\n to='whatsapp:+6598421399')\r\n\r\n return(\"message sent successfully\")\r\n \r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/api/predictSafety/\",methods=['GET', 'POST'])\r\ndef predictSafety(bid):\r\n try:\r\n # Obtain SQL Reading for selected BookingID (SQLQuery)\r\n # https://www.kite.com/python/answers/how-to-convert-an-sql-query-result-to-a-pandas-dataframe-in-python\r\n # https://stackoverflow.com/questions/12047193/how-to-convert-sql-query-result-to-pandas-data-structure\r\n\r\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\r\n table = dynamodb.Table('modedata')\r\n response = table.query(\r\n KeyConditionExpression = Key('bookingid').eq(bid)\r\n )\r\n datarows = response['Items']\r\n\r\n df = pd.DataFrame(datarows,dtype=float)\r\n\r\n #resoverall is yr sql statement\r\n # df = DataFrame(resoverall.fetchall())\r\n # df.columns = resoverall.keys()\r\n df.drop(['accuracy'], axis=1, inplace=True)\r\n\r\n df['acceleration'] = np.sqrt((df.loc[:, ('acceleration_x', 'acceleration_y', 'acceleration_z')] ** 2).sum(axis=1))\r\n df.drop(['acceleration_x', 'acceleration_y','acceleration_z','speedkmhour'], axis=1, inplace=True)\r\n\r\n pca_gyro = PCA(n_components=1).fit(df.loc[:, ['gyro_x', 'gyro_y', 'gyro_z']])\r\n df['gyro'] = pca_gyro.transform(df.loc[:, ('gyro_x', 'gyro_y', 'gyro_z')])\r\n df.drop(['gyro_x', 'gyro_y','gyro_z'], axis=1, inplace=True)\r\n\r\n data = pd.DataFrame()\r\n for col in df.columns:\r\n if col != \"bookingid\" and col != \"label\":\r\n temp = df.groupby(\"bookingid\")[col].agg([\"mean\", \"sum\", \"max\", \"min\"])\r\n data[col + \"_mean\"] = temp[\"mean\"]\r\n data[col + \"_sum\"] = temp[\"sum\"]\r\n data[col + \"_max\"] = temp[\"max\"]\r\n data[col + \"_min\"] = temp[\"min\"]\r\n #data = data.drop(columns=[\"bookingid\"]).reset_index(drop=True)\r\n data.drop(columns=[\"seconds_min\"], inplace=True)\r\n\r\n # generate distance, velocity and angle features\r\n for col in data.columns:\r\n if col.startswith(\"seconds\"):\r\n agg_method = col.split(\"_\")[1]\r\n data[\"distance_\" + agg_method] = data[col] * data[\"speed_\" + agg_method]\r\n data[\"velocity_\" + agg_method] = data[col] * data[\"acceleration_\" + agg_method]\r\n data[\"angle_\" + agg_method] = data[col] * data[\"gyro_\" + agg_method]\r\n\r\n data = data.drop(columns=['seconds_sum','seconds_mean']).reset_index(drop=True)\r\n\r\n # Once Obtain model after hyper tuning save weights here and predict\r\n\r\n from joblib import load\r\n\r\n sr = load(\"GBR.joblib\")\r\n y_pred = sr.predict(data)\r\n print(\"regression value: \", y_pred[0])\r\n return jsonify(json.loads(jsonc.data_to_json(y_pred[0])))\r\n\r\n except:\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n\r\n@app.route(\"/summary-table\")\r\ndef summarytable():\r\n return render_template('summary-table.html')\r\n\r\n@app.route(\"/map\")\r\ndef map():\r\n return render_template('map-google.html')\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n return render_template('index.html')\r\n\r\nif __name__ == '__main__':\r\n try:\r\n host = '0.0.0.0'\r\n port = 80\r\n parser = argparse.ArgumentParser() \r\n parser.add_argument('port',type=int)\r\n \r\n args = parser.parse_args()\r\n if args.port:\r\n port = args.port\r\n \r\n http_server = WSGIServer((host, port), app)\r\n app.debug = True\r\n print('Web server waiting for requests')\r\n http_server.serve_forever()\r\n\r\n \r\n\r\n except:\r\n print(\"Exception while running web server\")\r\n print(sys.exc_info()[0])\r\n print(sys.exc_info()[1])\r\n","sub_path":"web_server.py","file_name":"web_server.py","file_ext":"py","file_size_in_byte":11777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"400396856","text":"#!/usr/bin/python\nimport csv\nimport random\n\nrecords=90\n#print(\"Making %d records\\n\" % records)\n\nfieldnames=['id','name','age','city']\nwriter = csv.DictWriter(open(\"peopleWithFault.csv\", \"w\"), fieldnames=fieldnames)\n\nnames=['Deepak', 'Sangeeta', 'Geetika', 'Anubhav', 'Sahil', 'Akshay']\ncities=['Delhi', 'Kolkata', 'Chennai', 'Mumbai']\n\nwriter.writerow(dict(zip(fieldnames, fieldnames)))\nfor i in range(0, records):\n writer.writerow(dict([\n ('id', i),\n ('name', random.choice(names)),\n ('age', str(random.randint(24,26))),\n ('city', random.choice(cities))]))\nprint(i)","sub_path":"GenerateWithFault.py","file_name":"GenerateWithFault.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"199365375","text":"# This source code is part of the Biotite package and is distributed\n# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further\n# information.\n\nimport biotite.structure as struc\nimport biotite.structure.io.pdbx as pdbx\nimport biotite.database.rcsb as rcsb\nimport biotite.structure as struc\nimport numpy as np\nimport glob\nfrom os.path import join\nfrom .util import data_dir\nimport pytest\n\n\n@pytest.mark.parametrize(\"path\", glob.glob(join(data_dir, \"*.cif\")))\ndef test_superimposition_array(path):\n pdbx_file = pdbx.PDBxFile()\n pdbx_file.read(path)\n fixed = pdbx.get_structure(pdbx_file, model=1)\n mobile = fixed.copy()\n mobile = struc.rotate(mobile, (1,2,3))\n mobile = struc.translate(mobile, (1,2,3))\n fitted, transformation = struc.superimpose(\n fixed, mobile, (mobile.atom_name == \"CA\")\n )\n assert struc.rmsd(fixed, fitted) == pytest.approx(0, abs=5e-4)\n fitted = struc.superimpose_apply(mobile, transformation)\n assert struc.rmsd(fixed, fitted) == pytest.approx(0, abs=5e-4)\n\n@pytest.mark.parametrize(\"ca_only\", (True, False))\ndef test_superimposition_stack(ca_only):\n path = join(data_dir, \"1l2y.cif\")\n pdbx_file = pdbx.PDBxFile()\n pdbx_file.read(path)\n stack = pdbx.get_structure(pdbx_file)\n fixed = stack[0]\n mobile = stack[1:]\n if ca_only:\n mask = (mobile.atom_name == \"CA\")\n else:\n mask = None\n fitted, transformation = struc.superimpose(fixed, mobile, mask)\n if ca_only:\n # The superimpositions are better for most cases than the\n # superimpositions in the structure file\n # -> Use average\n assert np.mean(struc.rmsd(fixed, fitted)) \\\n < np.mean(struc.rmsd(fixed, mobile))\n else:\n # The superimpositions are better than the superimpositions\n # in the structure file\n assert (struc.rmsd(fixed, fitted) < struc.rmsd(fixed, mobile)).all()\n","sub_path":"tests/structure/test_superimpose.py","file_name":"test_superimpose.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"247505020","text":"# Get the number of nums\nn = int(input())\n\n# Get numbers\nnums = list(map(int, input().split()))\n\n# Initialize list\nsums = [0] * n\nsums[0] = nums\n\n# Sum\nfor i in range(1, n):\n # Initialize elem of the list\n elem = [0] * (n - i)\n for j in range(n - i):\n elem[j] = sums[i-1][j] + nums[j + i]\n sums[i] = elem\n\n# Get max sum\nprint(max(map(max, sums)))","sub_path":"practice_coding/boj/dp/1912-memory-exceeded.py","file_name":"1912-memory-exceeded.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599198167","text":"import os\n\nfrom conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.build import check_min_cppstd\nfrom conan.tools.files import copy, get\nfrom conan.tools.scm import Version\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass XoshiroCppConan(ConanFile):\n name = \"xoshiro-cpp\"\n description = \"Header-only Xoshiro/Xoroshiro PRNG wrapper library for modern C++ (C++17/C++20)\"\n license = \"MIT\"\n homepage = \"https://github.com/Reputeless/Xoshiro-cpp\"\n url = \"https://github.com/conan-io/conan-center-index\"\n topics = (\"prng\", \"xoshiro\", \"header-only\")\n package_type = \"header-library\"\n settings = \"arch\", \"build_type\", \"compiler\", \"os\"\n\n @property\n def _minimum_compilers_version(self):\n return {\n \"apple-clang\": \"10\",\n \"clang\": \"6\",\n \"gcc\": \"7\",\n \"Visual Studio\": \"16\"\n }\n\n @property\n def _minimum_cpp_standard(self):\n return 17\n\n def package_id(self):\n self.info.clear()\n\n def validate(self):\n if self.settings.get_safe(\"compiler.cppstd\"):\n check_min_cppstd(self, self._minimum_cpp_standard)\n\n compiler = str(self.settings.compiler)\n version = Version(self.settings.compiler.version)\n try:\n min_version = self._minimum_compilers_version[compiler]\n if version < min_version:\n msg = (\n f\"{self.name} requires C++{self._minimum_cpp_standard} features \"\n f\"which are not supported by compiler {compiler} {version}.\"\n )\n raise ConanInvalidConfiguration(msg)\n except KeyError:\n msg = (\n f\"{self.ref} recipe lacks information about the {compiler} compiler, \"\n f\"support for the required C++{self._minimum_cpp_standard} features is assumed\"\n )\n self.output.warn(msg)\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n def package(self):\n copy(self, \"*.hpp\", src=self.source_folder,\n dst=os.path.join(self.package_folder, \"include/xoshiro-cpp/\"))\n copy(self, \"LICENSE\", src=self.source_folder,\n dst=os.path.join(self.package_folder, \"licenses\"))\n\n def package_info(self):\n self.cpp_info.set_property(\"cmake_file_name\", \"xoshiro-cpp\")\n self.cpp_info.set_property(\n \"cmake_target_name\", \"xoshiro-cpp::xoshiro-cpp\")\n self.cpp_info.bindirs = []\n self.cpp_info.libdirs = []\n","sub_path":"recipes/xoshiro-cpp/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"500190704","text":"import os, sys\n\n\nFILE_NAME = 'microwave-heating'\nFILE_EXT = 'm4a'\nnumbering_from = 10\n\n\nfp=r'./'\nos.chdir(fp)\n\n\ndef rename(folder):\n files=[]\n filedir = os.listdir(folder)\n index = numbering_from\n for fl in filedir:\n if os.path.isfile(fl):\n if os.path.splitext(fl)[-1] != \".py\":\n print(fl)\n os.rename(fl, FILE_NAME + '-' + str(index) + '.' + FILE_EXT)\n index = index + 1\n\n\n\n#rename files in fp\nrename(fp)\n\n#convert .m4a to wav\nimport argparse\nfrom pydub import AudioSegment\n\n\nfor filename in os.listdir(fp):\n infilename = os.path.join(fp,filename)\n if not os.path.isfile(infilename): continue\n oldbase = os.path.splitext(filename)\n newname = infilename.replace('.tmp', '.m4a')\n output = os.rename(infilename, newname)\n\t \nformats_to_convert = ['.m4a']\n\nfor (dirpath, dirnames, filenames) in os.walk(fp):\n for filename in filenames:\n if filename.endswith(tuple(formats_to_convert)):\n\n filepath = dirpath + '/' + filename\n (path, file_extension) = os.path.splitext(filepath)\n file_extension_final = file_extension.replace('.', '')\n try:\n track = AudioSegment.from_file(filepath,\n file_extension_final)\n wav_filename = filename.replace(file_extension_final, 'wav')\n wav_path = dirpath + '/' + wav_filename\n print('CONVERTING: ' + str(filepath))\n file_handle = track.export(wav_path, format='wav')\n os.remove(filepath)\n except:\n print(\"ERROR CONVERTING \" + str(filepath))","sub_path":"data/homedata-test/microwave-heating/fast.py","file_name":"fast.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267996108","text":"# stack과 recursion으로 가능\n# https://kingpodo.tistory.com/47?category=805745\n\ngraph = [\n[0,1,1,1,0,0,0], #0\n[1,0,0,0,1,0,0], #1\n[1,0,0,0,1,1,0], #2\n[1,0,0,0,0,1,0], #3\n[0,1,1,0,0,0,1], #4\n[0,0,1,1,0,0,1], #5\n[0,0,0,0,1,1,0]] #6\n\nrow = len(graph)\ncolumn = len(graph[0])\n\nvisited = [False] * column\n\n# 재귀 함수로 구현하기\n# def dfs(graph, node, visited):\n# visited[node] = True\n# print(node)\n \n# for i in range(column):\n# if graph[node][i] is 1 and visited[i] is False:\n# dfs(graph, i, visited)\n\n# 스택으로 구현하기\n# visited[] 배열을 보면 모든 정점의 방문 여부를 false로하여 방문하지 않은 것을 하며 stack[]에는 순회를 시작할 정점를 넣어줍니다.\n# stack[]을 pop하여 0을 삭제하여 0정점을 방문하고 visited[0]을 true로 바꾸어 줍니다.\n# 이 부분 중요!!! 정점 0과 인접한 모든 인접 노드 중 방문하지 않은 노드를 stack[]에 push합니다.\n# stack[]을 pop하여 1을 삭제하여 1정점을 방문하고 visited[1]을 true로 바꾸어 줍니다. \n# ...\n# 모든 정점을 방문한 상태에서 스택에 데이터가 남아있는경우 나머지 데이터들도 pop하지만 visited[]배열에 방문하지 않은 정점이 없기 때문에 계속 pop연산만하고 모든 데이터가 pop이되면 종료가 됩니다. \n\ndef dfs(graph, root, visited):\n stack = [root]\n while stack:\n node = stack.pop()\n if visited[node] is False:\n print(node)\n visited[node] = True\n\n for i in reversed(range(row)):\n if graph[node][i] is 1 and visited[i] is False:\n stack.append(i)\n\ndfs(graph, 0, visited)\n","sub_path":"algorithm/search/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"558256399","text":"import cv2\r\nimport numpy\r\nimport math\r\nimport sys\r\n\r\n## for adaptive maximum supression\r\ndef adaptive_sus(points,resize):\r\n sorted_points=list()\r\n for point_1 in points:\r\n small_dist=sys.float_info.max\r\n for point_2 in points:\r\n distance=(((point_1.pt[0]-point_2.pt[0])**2)+((point_1.pt[1]-point_2.pt[1])**2))**0.5\r\n if(distance!=0) and point_1.response<(0.9*point_2.response) and distanceresize:\r\n sorted_points=sorted_points[:resize]\r\n return sorted_points\r\n\t\r\n\r\n## for rotation invariance\r\ndef rotateinvariance(mag,angle):\r\n histogram=numpy.zeros(36)\r\n ret_angles=list()\r\n for row in range(0,angle.shape[0]):\r\n for col in range(0,angle.shape[1]):\r\n angle[row][col]%=360\r\n key=int(math.floor(angle[row][col]/10))\r\n histogram[key]+=mag[row][col]\r\n maxval=max(histogram)\r\n dst=list()\r\n for loop in range(0,len(histogram)):\r\n if histogram[loop]>=(maxval*0.8):\r\n dst.append(loop)\r\n for loc in dst:\r\n new_angle=angle\r\n for row in range(0,new_angle.shape[0]):\r\n for col in range(0,new_angle.shape[1]):\r\n new_angle[row][col]-=(loc*10)\r\n angle[row][col]%=360\r\n ret_angles.append(new_angle)\r\n return ret_angles\r\n\r\n## to match features\r\ndef create_matchings(pts_1, pts_2):\r\n final_features1=list()\r\n final_features2=list()\r\n distances=list()\r\n for img1 in pts_1:\r\n feat_desp1=pts_1[img1]\r\n totals_pt=dict()\r\n totals=list()\r\n for img2 in pts_2:\r\n total=0\r\n feat_desp2=pts_2[img2]\r\n ft_desp=(feat_desp1-feat_desp2)**2\r\n for loop in ft_desp:\r\n total+=loop\r\n \r\n totals.append(total)\r\n totals_pt[img2]=total\r\n totals=sorted(totals)\r\n \r\n if totals[0]<0.5 and (totals[0]/totals[1])<0.6: \r\n print(totals[0])\r\n final_features1.append(cv2.KeyPoint(int(img1.split()[0]),int(img1.split()[1]),1))\r\n ans=\"\"\r\n for key in totals_pt:\r\n if totals_pt[key]==totals[0]:\r\n ans=key\r\n final_features2.append(cv2.KeyPoint(int(ans.split()[0]),int(ans.split()[1]),1))\r\n distances.append(totals[0])\r\n \r\n\t## remove duplicates\r\n loop1=0\r\n removeEle=set()\r\n for key1 in final_features1:\r\n loop2=0\r\n key1=key1.pt\r\n for key2 in final_features1:\r\n key2=key2.pt\r\n if loop1!=loop2 and key1[0]==key2[0] and key1[1]==key2[1]:\r\n if distances[loop1]>distances[loop2]:\r\n removeEle.add(loop1)\r\n else:\r\n removeEle.add(loop2)\r\n loop2+=1\r\n loop1+=1\r\n \r\n loop1=0\r\n for key1 in final_features2:\r\n loop2=0\r\n key1=key1.pt\r\n for key2 in final_features2:\r\n key2=key2.pt\r\n if loop1!=loop2 and key1[0]==key2[0] and key1[1]==key2[1]:\r\n if distances[loop1]>distances[loop2]:\r\n removeEle.add(loop1)\r\n else:\r\n removeEle.add(loop2)\r\n loop2+=1\r\n loop1+=1\r\n \r\n res_features1=list()\r\n res_features2=list()\r\n matchings=list()\r\n out_loop=0\r\n for loop in range(0,len(distances)):\r\n if loop not in removeEle:\r\n res_features1.append(final_features1[loop])\r\n res_features2.append(final_features2[loop])\r\n matchings.append(cv2.DMatch(out_loop,out_loop,distances[loop]))\r\n out_loop+=1\r\n \r\n return res_features1, res_features2, matchings\r\n\r\n\r\n## to create 16x16 magnitude and angle matrix\r\ndef create_mag_angle(inp_img):\r\n rows, cols=inp_img.shape\r\n inp_img = numpy.float32(inp_img)\r\n g_x=numpy.zeros(inp_img.shape)\r\n g_y=numpy.zeros(inp_img.shape)\r\n for row in range(1,rows-1):\r\n for col in range(1,cols-1):\r\n g_x[row][col]=inp_img[row][col+1]-inp_img[row][col-1]\r\n g_y[row][col]=inp_img[row+1][col]-inp_img[row-1][col]\r\n \r\n magnitude=((g_x**2)+(g_y**2))**0.5\r\n degrees=numpy.degrees(numpy.arctan2(g_y,g_x))\r\n return magnitude, degrees\r\n\t\r\n\r\n## sift working\r\ndef sift(input_img, points):\r\n features=dict()\r\n grey_inp = cv2.GaussianBlur(cv2.cvtColor(input_img,cv2.COLOR_BGR2GRAY),(0,0),1.5)\r\n magnitude,degrees=create_mag_angle(grey_inp)\r\n key_loop=0\r\n for point in points:\r\n pt_c=int(point.pt[0])\r\n pt_r=int(point.pt[1])\r\n if pt_r-8>=0 and pt_c-8>=0 and pt_r+8<=magnitude.shape[0] and pt_c+8<=magnitude.shape[1]:\r\n mag16=magnitude[pt_r-8:pt_r+8,pt_c-8:pt_c+8]\r\n mag16=cv2.normalize(mag16,None,norm_type=cv2.NORM_L2)\r\n angleout=degrees[pt_r-8:pt_r+8,pt_c-8:pt_c+8]\r\n array=[0,4,8,12]\r\n ret_angles=rotateinvariance(mag16,angleout)\r\n ##multiple keypoints\r\n for angle16 in ret_angles:\r\n descrip_128=list()\r\n for r in array:\r\n for c in array:\r\n histogram=numpy.zeros(8)\r\n window_mag=mag16[r:r+4,c:c+4]\r\n window_angle=angle16[r:r+4,c:c+4]\r\n for i in range(0,4):\r\n for j in range(0,4):\r\n window_angle[i][j]%=360\r\n key=int(math.floor(window_angle[i][j]/45))\r\n histogram[key]+=window_mag[i][j]\r\n\r\n descrip_128.extend(list(histogram))\r\n ##normalise descriptor\r\n descrip_128 = numpy.clip(descrip_128, a_min=0,a_max=0.2)\r\n descrip_128= numpy.array(descrip_128)\r\n descrip_128 = cv2.normalize(descrip_128, None, norm_type=cv2.NORM_L2)\r\n keyName=str(pt_c)+\" \"+str(pt_r)+\" \"+str(++key_loop)\r\n features[keyName]=descrip_128\r\n \r\n return features\r\n\t\r\n\r\n## for local maximal supression\r\ndef max_suppression(input_img,threshold):\r\n rows,cols=input_img.shape\r\n for row in range(0,rows-3):\r\n for col in range(0,cols-3):\r\n a_max=input_img[row:row+3,col:col+3]\r\n _,max_val,_,(loc_c,loc_r)=cv2.minMaxLoc(a_max)\r\n if max_val>threshold:\r\n input_img[row:row+3,col:col+3]=0\r\n input_img[row+loc_r][col+loc_c]=max_val\r\n \r\n return input_img\r\n\t\r\n\r\n## for corner detection\r\ndef harris_points(input_img,smallval,threshold,resize):\r\n grey_inp=cv2.cvtColor(input_img,cv2.COLOR_BGR2GRAY)\r\n ix=cv2.Sobel(grey_inp,cv2.CV_32F,1,0,ksize=5)\r\n iy=cv2.Sobel(grey_inp,cv2.CV_32F,0,1,ksize=5)\r\n Ix2=ix**2\r\n gx2=cv2.GaussianBlur(Ix2,(3,3),0)\r\n Iy2=iy**2\r\n gy2=cv2.GaussianBlur(Iy2,(3,3),0)\r\n Ixy=ix*iy\r\n gxy=cv2.GaussianBlur(Ixy,(3,3),0)\r\n I_x2_paded=cv2.copyMakeBorder(gx2, 2, 2, 2, 2, cv2.BORDER_CONSTANT, 0)\r\n I_y2_paded=cv2.copyMakeBorder(gy2, 2, 2, 2, 2, cv2.BORDER_CONSTANT, 0)\r\n I_xy_paded=cv2.copyMakeBorder(gxy, 2, 2, 2, 2, cv2.BORDER_CONSTANT, 0)\r\n corner_mat=numpy.zeros(Ixy.shape)\r\n ret_points=list()\r\n for row in range(2,I_x2_paded.shape[0]-2):\r\n for col in range(2,I_x2_paded.shape[1]-2):\r\n sum_x2=numpy.sum(I_x2_paded[row-2:row+3,col-2:col+3])\r\n sum_y2=numpy.sum(I_y2_paded[row-2:row+3,col-2:col+3])\r\n sum_xy=numpy.sum(I_xy_paded[row-2:row+3,col-2:col+3])\r\n \r\n determinant=(sum_x2*sum_y2)-(sum_xy**2)\r\n trace=sum_x2+sum_y2\r\n corner=determinant/(smallval+trace)\r\n \r\n if corner>threshold:\r\n corner_mat[row-2][col-2]=corner\r\n \r\n paded_corner=cv2.copyMakeBorder(corner_mat, 1, 1, 1, 1, cv2.BORDER_CONSTANT, 0)\r\n paded_corner=max_suppression(paded_corner,threshold)\r\n for row in range(0,paded_corner.shape[0]):\r\n for col in range(0,paded_corner.shape[1]):\r\n if paded_corner[row][col]>threshold:\r\n keyPt=cv2.KeyPoint(col-1,row-1,1)\r\n keyPt.response=paded_corner[row][col]\r\n ret_points.append(keyPt)\r\n \r\n return adaptive_sus(ret_points,resize)\r\n \r\n\r\n## to get image features\r\ndef image_features(threshold, small_val, adapt_resize, imgName, var):\r\n img=cv2.imread(imgName)\r\n points=harris_points(img,small_val,threshold,adapt_resize)\r\n features=sift(img,points)\r\n out_img=cv2.drawKeypoints(img, points, None, color=(0,255,0), flags=0)\r\n cv2.imwrite(\"points\"+var+\".png\",out_img)\r\n return img, features\r\n\t\r\n\r\n## constants\r\nthreshold=20000000\r\nsmall_val=0.000000000000000001\r\nadapt_resize=500\r\nimgName1=\"pano1_0008.png\"\r\nimgName2=\"pano1_0009.png\"\r\n\r\n## to starts working\r\nimg1, features1=image_features(threshold, small_val, adapt_resize, imgName1, \"1\")\r\nimg2, features2=image_features(threshold, small_val, adapt_resize, imgName2, \"2\")\r\nfeatures1, features2, matchings=create_matchings(features1, features2)\r\nprint(\"Correct pts:\",len(features1))\r\nout_img=cv2.drawMatches(img1,features1,img2,features2, matchings, None, flags=2)\r\ncv2.imwrite(\"Output.png\",out_img)","sub_path":"code/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":9416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"6058918","text":"import os\nimport json\nimport random\n\n\ndef get_datasets(dir_path):\n datasets = {}\n for file in os.listdir(dir_path):\n name = None\n for x in [\"train\", \"valid\", \"dev\", \"test\"]:\n if x in file:\n name = x\n if not name:\n continue\n\n path = os.path.join(dir_path, file)\n if \"json\" in path:\n with open(path, encoding='UTF-8') as f:\n datasets[name] = json.load(f)\n else:\n with open(path, encoding='UTF-8', errors='ignore') as f:\n datasets[name] = [i.strip() for i in f.readlines() if len(i) > 0]\n return datasets\n\n\ndef extrac_data(path):\n with open(path, encoding=\"utf-8\") as f:\n data = [x.strip() for x in f.readlines() if len(x) > 0]\n data_pos = []\n data_neg = []\n for line in data:\n seq = line.split(\"\\t\")\n if seq[-1] == \"0\":\n if seq[1] != \"NULL\":\n if len(seq[0]) == len(seq[1]):\n data_neg.append(\"\\t\".join(seq))\n else:\n seq[1] = seq[0]\n data_pos.append(\"\\t\".join(seq))\n data_neg = list(set(data_neg))\n print(\"neg len\", len(data_neg))\n random.shuffle(data_neg)\n neg_train = data_neg[:int(len(data_neg) * 0.8)]\n neg_valid = data_neg[int(len(data_neg) * 0.8): int(len(data_neg) * 0.9)]\n neg_test = data_neg[int(len(data_neg) * 0.9):]\n\n data_pos = list(set(data_pos))\n print(\"pos len\", len(data_pos))\n random.shuffle(data_pos)\n pos_train = data_pos[:int(len(data_pos) * 0.8)]\n pos_valid = data_pos[int(len(data_pos) * 0.8): int(len(data_pos) * 0.9)]\n pos_test = data_pos[int(len(data_pos) * 0.9):]\n\n train = neg_train + pos_train\n valid = neg_valid + pos_valid\n test = neg_test + pos_test\n for k, v in {\"train\": train, \"valid\": valid, \"test\": test}.items():\n dirpath = \"data/xiaowei/all/\"\n if not os.path.exists(dirpath):\n os.mkdir(dirpath)\n print(k, \"len\", len(v))\n with open(dirpath + k + \".txt\", \"w\", encoding=\"utf-8\") as f:\n f.write(\"\\n\".join(v))\n\n for k, v in {\"neg_train\": neg_train, \"neg_valid\": neg_valid, \"neg_test\": neg_test}.items():\n dirpath = \"data/xiaowei/neg/\"\n if not os.path.exists(dirpath):\n os.mkdir(dirpath)\n print(k, \"len\", len(v))\n with open(dirpath + k + \".txt\", \"w\", encoding=\"utf-8\") as f:\n f.write(\"\\n\".join(v))\n\n# if __name__ == '__main__':\n# extrac_data(\"TransformerBaselines/data/raw/xiaowei.txt\")\n","sub_path":"agents/seq2seq/T5/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"191231016","text":"#!/usr/bin/env python\n\nimport argparse\nimport sys\n\nfrom runplaybook import run_playbook\nfrom inventory.inventory_query import get_instance_by_name\n\n\ndef provision(opts):\n extra_vars = dict()\n extra_vars['server_name'] = opts.name\n full_server_name = '%s.parkme.com' % extra_vars['server_name']\n run_playbook('provision.yml', opts.inventory, subset=full_server_name, extra_vars=extra_vars)\n\n\ndef build(opts):\n extra_vars = dict()\n extra_vars['server_name'] = opts.name\n full_server_name = '%s.parkme.com' % extra_vars['server_name']\n if opts.server_class == 'appservers':\n run_playbook('appservers.yml', opts.inventory, subset=full_server_name)\n elif opts.server_class == 'opsservers':\n run_playbook('opsservers.yml', opts.inventory, subset=full_server_name)\n else:\n raise ValueError('A valid server_class was not specified')\n\n\ndef deprovision(opts):\n extra_vars = dict()\n if opts.extra_vars is not None:\n extra_vars.update(opts.extra_vars)\n if opts.name is None:\n print('You cannot deprovision the entire domain at once. Please specify a server')\n sys.exit(1)\n full_server_name = '%s.parkme.com' % opts.name\n instance = get_instance_by_name(opts.name)\n instance_obj = instance[full_server_name]['instance']\n extra_vars['ec2_id'] = instance_obj.id\n extra_vars['region'] = instance_obj.region.name\n extra_vars['public_dns'] = instance_obj.public_dns_name\n run_playbook('deprovision.yml', 'production', subset=full_server_name, extra_vars=extra_vars)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"command\", help=\"The action which you wish to carry out\",\n choices=['create', 'destroy', 'build'])\n parser.add_argument(\"server_class\", help=\"The server type to create\", choices=['appservers', 'opsservers'])\n parser.add_argument(\"name\", help=\"The short name of the server to create or destroy\")\n parser.add_argument('-v', '--verbosity', help='increase output verbosity', action='count', default=0)\n parser.add_argument('-e', '--extra_vars', help='Any Additional variables to pass in (as key, value pairs)')\n parser.add_argument('-i', '--inventory', help='Override the default inventory file (production)',\n default='production', choices=['production', 'development'])\n options = parser.parse_args()\n\n if options.command == 'create':\n provision(options)\n elif options.command == 'destroy':\n deprovision(options)\n elif options.command == 'build':\n build(options)\n\n","sub_path":"devops/provision.py","file_name":"provision.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"56990513","text":"from urllib.request import urlopen\nimport xml.etree.ElementTree as et\n\nurl = \"https://gdata.youtube.com/feeds/api/standardfeeds/top_rated\"\nresponse = urlopen(url)\ncontents = response.read()\ntext = contents.decode('utf8')\nroot = et.fromstring(text)\nfor vid in root.findall('{http://www.w3.org/2005/Atom}entry'):\n print(vid.find('{http://www.w3.org/2005/Atom}title').text)\n","sub_path":"code/web/youtube_easy.py","file_name":"youtube_easy.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"645717191","text":"\"\"\"\nTesting for transistor models\n\"\"\"\nimport unittest\nfrom SemiPy.Extractors.TLM.TLMExtractor import TLMExtractor\nfrom physics.value import Value, ureg\nimport numpy as np\n\n\nclass TestTLMExtractors(unittest.TestCase):\n\n def test_tlmextraction(self):\n\n # path = '/home/connor/Documents/Stanford_Projects/Extractions/src/SampleData/FETExampleData/nano_patterning.csv'\n # idvg_path = '/home/connor/Documents/Stanford_Projects/Extractions/fetextraction/SemiPy/SampleData/TLMExampleData'\n idvg_path = '/home/connor/Documents/Stanford_Projects/Extractions/SemiPy/SampleData/TLMExampleDataShort'\n # idvg_path = '/home/connor/Documents/Stanford_Projects/Extractions/fetextraction/SemiPy/SampleData/TLMExampleDataShort'\n\n # idvd_path = '/home/connor/Documents/Stanford_Projects/Extractions/fetextraction/SemiPy/SampleData/FETExampleData/WSe2_Sample_4_Id_Vd.txt'\n # idvg_path = '/home/connor/Documents/Stanford_Projects/Extractions/fetextraction/SemiPy/SampleData/FETExampleData/WSe2_Sample_4_Id_Vg.txt'\n widths = Value(4.0, ureg.micrometer)\n # lengths = Value.array_like(np.array([0.5, 1.0, 2.0, 2.5, 3.0, 3.5]), unit=ureg.micrometer)\n lengths = Value.array_like(np.array([1.0, 0.5, 2.0]), unit=ureg.micrometer)\n tox = Value(90, ureg.nanometer)\n\n result = TLMExtractor(widths=widths, lengths=lengths, tox=tox, epiox=3.9,\n device_polarity='n', idvg_path=idvg_path,\n vd_values=[1.0, 2.0])\n\n result.save_tlm_plots()\n\n a = 5\n\n\n# if __name__ == '__main__':\n# test = TestFETExtractors()\n# test.test_fetextraction()\n\n","sub_path":"SemiPy/Extractors/TLM/test_tlmextractor.py","file_name":"test_tlmextractor.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"605525735","text":"\"\"\"\nldstorapi\n\n\"\"\"\n\nfrom flask import Flask, jsonify, make_response\nfrom flask_restful import request\nfrom ldstorapi.views import bp_api\nfrom ldstorapi.models import db, User\nfrom mongoengine.connection import ConnectionError\n\n\ndef create_app(config_filename):\n app = Flask(__name__)\n app.config.from_pyfile(config_filename)\n\n # This allows us to fire up the app without connecting to the database\n # note the app wont do much and will spit out errors constantly\n # but useful for checking docs...\n\n if not app.config.get('MONGODB_CONNECTION_DISABLED'):\n try:\n db.init_app(app)\n # Check that the admin user exists...\n # This is needed for registration of new apps\n # might remove this capability in later production versions\n # of the app...\n if not User.objects(username=\"admin\").first():\n user = User(username=\"admin\")\n user.password_hash = r'pbkdf2:sha1:1000$NfgQVyRr$a10aa2e345339090a7483f07ed90e4cc85a3ba0c'\n user.save()\n except ConnectionError:\n host = app.config.get('MONGODB_HOST')\n print(\"ERROR - Connection to mongodb running on: \" + host + \" failed\")\n exit(1)\n\n # Lock in the blueprint and set the URL prefix for the api version\n app.register_blueprint(bp_api, url_prefix=app.config.get('API_PREFIX'))\n\n return app\n","sub_path":"ldstorapi/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"250492074","text":"import copy\nimport hashlib\nimport pickle\nimport statistics\n\nfrom bson.json_util import loads\n\nfrom dataproc.model import Row, RowCreator\n\n\nclass Join():\n items = []\n\n def __init__(self, items=[]):\n self.items = items\n\n def appendjoin_inner_left_full(self, alias='join', data=[], where=lambda v1, v2: v1 == v2, row={}):\n app = False\n row[alias] = row.get(alias, [])\n for item in data:\n if where(row, item) is True:\n row[alias].append(item)\n self.items.append(Row(row))\n elif type in ['left', 'full']:\n self.items.append(Row(row))\n if app is False and type != 'inner':\n self.items.append(Row(row))\n\n def appendjoin_right_full(self, alias='join', data=[], where=lambda v1, v2: v1 == v2, item={}):\n app = False\n for row in data:\n row[alias] = row.get(alias, [])\n if where(row, item) is True:\n row[alias].append(item)\n self.items.append(Row(row))\n else:\n self.items.append(Row({alias: [item]}))\n if app is False:\n self.items.append(Row({alias: [item]}))\n\n def join_inner_left_full(self, alias='join', data=[], where=lambda v1, v2: v1 == v2, row={}):\n app = False\n row[alias] = row.get(alias, {})\n for item in data:\n if where(row, item) is True:\n row[alias] = item\n self.items.append(Row(row))\n elif type in ['left', 'full']:\n row[alias] = {}\n self.items.append(Row(row))\n if app is False and type != 'inner':\n self.items.append(Row(row))\n\n def join_right_full(self, alias='join', data=[], where=lambda v1, v2: v1 == v2, item={}):\n app = False\n for row in data:\n row[alias] = row.get(alias, {})\n if where(row, item) is True:\n tmp = copy.deepcopy(row)\n tmp[alias] = item\n self.items.append(Row(tmp))\n else:\n self.items.append(Row({alias: item}))\n if app is False:\n self.items.append(Row({alias: item}))\n\n def get_items(self):\n return self.items\n\n\nclass Executor():\n __SKIP_ = 0\n __PAGE__ = 1\n __DATA__ = []\n __SELECT__ = []\n __PIPELINE__ = []\n\n def __init__(self, pipeline=[]):\n if hasattr(pipeline, 'pipeline') and callable(getattr(pipeline, 'pipeline')):\n pipeline = pipeline.pipeline()\n self.__PIPELINE__ = list(pipeline)\n for row in self.__PIPELINE__:\n call = getattr(self, row[0], None)\n if callable(call):\n if len(row) == 1:\n call()\n elif isinstance(row[1], dict):\n call(**row[1])\n elif isinstance(row[1], (tuple, list)):\n call(*tuple(row[1]))\n\n def from_list(self, data: list = []):\n try:\n self.__SELECT__ = []\n self.__DATA__ = list(map(Row, data))\n except Exception:\n pass\n finally:\n return self\n\n def from_bson(self, path):\n try:\n data = []\n with open(path, 'r') as file:\n for line in file:\n try:\n tmp = loads(line)\n except Exception as e:\n print(e)\n tmp = None\n if tmp is not None:\n if isinstance(tmp, list):\n for row in tmp:\n data.append(row)\n else:\n data.append(tmp)\n self.from_list(data)\n except Exception:\n pass\n finally:\n return self\n\n def __getattr__(self, attr):\n row = RowCreator()\n getattr(row, attr)\n return row\n\n def select(self, *args):\n self.__DATA__ = list(map(lambda row: Row(row).select(*args), self.__DATA__))\n self.__SELECT__ = []\n return self\n\n def reduce(self, *args):\n data = {}\n args = list(map(str, args))\n for item in self.__DATA__:\n row = {\"__COUNT__\": 0}\n for key in item.keys():\n if key in args:\n continue\n row[key] = item.get(key, None)\n hash = self.hash(row)\n data[hash] = data.get(hash, row)\n data[hash]['__COUNT__'] = data[hash].get('__COUNT__', 0)\n data[hash]['__COUNT__'] += 1\n for key in args:\n data[hash]['__VALUES__'] = data[hash].get('__VALUES__', {})\n data[hash]['__VALUES__'][key] = data[hash]['__VALUES__'].get(key, [])\n data[hash]['__VALUES__'][key].append(item.get(key, None))\n self.__DATA__ = list(map(Row, data.values()))\n self.__SELECT__ = []\n return self\n\n def function(self, *args):\n def _map(fields: list, fnc: lambda v: v, data: list = []):\n if callable(fnc):\n for row in data:\n for field in fields:\n field = str(field)\n row['__VALUES__'] = row.get('__VALUES__', {})\n row[field] = fnc(row['__VALUES__'].get(field, None))\n return data\n\n pipe = list(args)\n fnc = pipe.pop(-1)\n self.__DATA__ = _map(pipe, fnc, self.__DATA__)\n return self\n\n def custom(self, alias='custom', fnc=lambda v: v):\n alias = str(alias)\n\n def _map(row):\n if callable(fnc):\n try:\n row[alias] = fnc(Row(row))\n except Exception:\n row[alias] = row.get(alias, None)\n else:\n row[alias] = fnc\n return Row(row)\n\n self.__DATA__ = list(map(_map, self.__DATA__))\n self.__SELECT__ = []\n return self\n\n def map(self, *args):\n def _map(row):\n for fnc in args:\n if callable(fnc):\n try:\n row = fnc(Row(row))\n except Exception:\n continue\n return Row(row)\n\n self.__DATA__ = list(map(_map, self.__DATA__))\n self.__SELECT__ = []\n return self\n\n def count(self, name):\n name = str(name)\n for row in self.__DATA__:\n row[name] = row.get('__COUNT__', 0)\n return self\n\n def list(self, *args):\n for row in self.__DATA__:\n for field in args:\n alias = field.get_alias() if isinstance(field, RowCreator) else str(field)\n field = str(field)\n alias = field if alias is None else alias\n row['__VALUES__'] = row.get('__VALUES__', {})\n items = [i for i in row['__VALUES__'].get(field, []) if i is not None]\n row[alias] = list(items) if len(items) > 0 else None\n return self\n\n def distinct(self, *args):\n for row in self.__DATA__:\n for field in args:\n alias = field.get_alias() if isinstance(field, RowCreator) else str(field)\n field = str(field)\n alias = field if alias is None else alias\n row['__VALUES__'] = row.get('__VALUES__', {})\n items = [i for i in row['__VALUES__'].get(field, []) if i is not None]\n row[alias] = list(set(list(items))) if len(items) > 0 else None\n return self\n\n def min(self, *args):\n for row in self.__DATA__:\n for field in args:\n alias = field.get_alias() if isinstance(field, RowCreator) else str(field)\n field = str(field)\n alias = field if alias is None else alias\n row['__VALUES__'] = row.get('__VALUES__', {})\n items = [i for i in row['__VALUES__'].get(field, []) if i is not None]\n row[alias] = min(items) if len(items) > 0 else None\n return self\n\n def max(self, *args):\n for row in self.__DATA__:\n for field in args:\n alias = field.get_alias() if isinstance(field, RowCreator) else str(field)\n field = str(field)\n alias = field if alias is None else alias\n row['__VALUES__'] = row.get('__VALUES__', {})\n items = [i for i in row['__VALUES__'].get(field, []) if i is not None]\n row[alias] = max(items) if len(items) > 0 else None\n return self\n\n def sum(self, *args):\n for row in self.__DATA__:\n for field in args:\n alias = field.get_alias() if isinstance(field, RowCreator) else str(field)\n field = str(field)\n alias = field if alias is None else alias\n row['__VALUES__'] = row.get('__VALUES__', {})\n items = [i for i in row['__VALUES__'].get(field, []) if i is not None]\n row[alias] = sum(items) if len(items) > 0 else None\n return self\n\n def mean(self, *args):\n for row in self.__DATA__:\n for field in args:\n alias = field.get_alias() if isinstance(field, RowCreator) else str(field)\n field = str(field)\n alias = field if alias is None else alias\n row['__VALUES__'] = row.get('__VALUES__', {})\n items = [i for i in row['__VALUES__'].get(field, []) if i is not None]\n row[alias] = statistics.mean(items) if len(items) > 0 else None\n return self\n\n def median(self, *args):\n for row in self.__DATA__:\n for field in args:\n alias = field.get_alias() if isinstance(field, RowCreator) else str(field)\n field = str(field)\n alias = field if alias is None else alias\n row['__VALUES__'] = row.get('__VALUES__', {})\n items = [i for i in row['__VALUES__'].get(field, []) if i is not None]\n row[alias] = statistics.median(items) if len(items) > 0 else None\n return self\n\n def data(self):\n for row in self.__DATA__:\n for index in ['__COUNT__', '__VALUES__']:\n try:\n del row[index]\n except Exception:\n continue\n return self.__DATA__\n\n def page(self, page: int = 1):\n self.__PAGE__ = page\n return self\n\n def skip(self, skip: int = 0):\n self.__SKIP__ = skip\n return self\n\n def filter(self, *args):\n cls = []\n for fnc in args:\n if callable(fnc):\n cls.append(fnc)\n\n def _filter(row):\n for fnc in cls:\n if fnc(row):\n return True\n return False\n\n if len(args) > 0:\n self.__DATA__ = list(filter(_filter, self.__DATA__))\n return self\n\n def limit(self, limit: int = 1000):\n self.__LIMIT__ = limit\n _len = len(self.__DATA__)\n if isinstance(self.__SKIP__, int) and self.__SKIP__ > 0:\n if _len > self.__SKIP__:\n _start = (self.__SKIP__ - 1)\n self.__DATA__ = self.__DATA__[_start:(_start + limit)]\n else:\n self.__DATA__ = []\n self.__SKIP__ = 0\n elif isinstance(self.__PAGE__, int) and self.__PAGE__ > 1:\n self.__SKIP__ = ((self.__PAGE__ - 1) * limit) + 1\n self.__PAGE__ = 1\n return self.limit(limit)\n else:\n self.__DATA__ = self.__DATA__[0:limit]\n self.__SELECT__ = []\n return self\n\n def inner_appendjoin(self, alias='inner_appendjoin', data=[], where=lambda v1, v2: v1 == v2):\n return self.appendjoin(alias, data, where, 'inner')\n\n def left_appendjoin(self, alias='left_appendjoin', data=[], where=lambda v1, v2: v1 == v2):\n return self.appendjoin(alias, data, where, 'left')\n\n def right_appendjoin(self, alias='right_appendjoin', data=[], where=lambda v1, v2: v1 == v2):\n return self.appendjoin(alias, data, where, 'right')\n\n def full_appendjoin(self, alias='full_appendjoin', data=[], where=lambda v1, v2: v1 == v2):\n return self.appendjoin(alias, data, where, 'full')\n\n def inner_join(self, alias='inner_join', data=[], where=lambda v1, v2: v1 == v2):\n return self.join(alias, data, where, 'inner')\n\n def left_join(self, alias='left_join', data=[], where=lambda v1, v2: v1 == v2):\n return self.join(alias, data, where, 'left')\n\n def right_join(self, alias='right_join', data=[], where=lambda v1, v2: v1 == v2):\n return self.join(alias, data, where, 'right')\n\n def full_join(self, alias='full_join', data=[], where=lambda v1, v2: v1 == v2):\n return self.join(alias, data, where, 'full')\n\n def appendjoin(self, alias='join', data=[], where=lambda v1, v2: v1 == v2, type='inner'):\n alias = str(alias)\n items = []\n if hasattr(data, 'data') and callable(getattr(data, 'data')):\n data = data.data()\n\n if not callable(where):\n return self\n\n join = Join(items)\n\n if type in ['inner', 'left', 'full']:\n list(map(lambda row: join.appendjoin_inner_left_full(alias, data, where, row), self.__DATA__))\n\n if type in ['right', 'full']:\n list(map(lambda row: join.appendjoin_right_full(alias, self.__DATA__, where, row), data))\n\n self.__DATA__ = join.get_items()\n self.__SELECT__ = []\n return self\n\n def join(self, alias='join', data=[], where=lambda v1, v2: v1 == v2, type='inner'):\n alias = str(alias)\n items = []\n if hasattr(data, 'data') and callable(getattr(data, 'data')):\n data = data.data()\n\n if not callable(where):\n return self\n\n join = Join(items)\n\n if type in ['inner', 'left', 'full']:\n list(map(lambda row: join.join_inner_left_full(alias, data, where, row), self.__DATA__))\n\n if type in ['right', 'full']:\n list(map(lambda row: join.join_right_full(alias, self.__DATA__, where, row), data))\n\n self.__DATA__ = join.get_items()\n self.__SELECT__ = []\n return self\n\n def sort(self, **sort):\n order = []\n asc = set()\n desc = set()\n\n for k in sort.keys():\n if sort[k] in ['0', '-1', 'desc', 'DESC']:\n if len(asc) > 0:\n order.append([tuple(asc), False])\n asc = set()\n desc.add(k)\n else:\n if len(desc) > 0:\n order.append([tuple(desc), True])\n desc = set()\n asc.add(k)\n\n if len(asc) > 0:\n order.append([tuple(asc), False])\n if len(desc) > 0:\n order.append([tuple(desc), True])\n\n for sort in order:\n self.__DATA__ = sorted(self.__DATA__, key=Row.values_getter(*sort[0]), reverse=sort[1])\n\n return self\n\n def hash(self, data):\n return hashlib.sha1(str(pickle.dumps(data)).encode()).hexdigest()\n","sub_path":"dataproc/executor/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"123748149","text":"#Merge Sort\n\nimport random\n\ndef merge(lst0, lst1):\n ret = []\n while lst0 and lst1:\n if lst0[0] <= lst1[0]:\n ret.append(lst0.pop(0))\n else:\n ret.append(lst1.pop(0))\n ret.extend(lst0)\n ret.extend(lst1)\n return ret\n\ndef mergesort(lst):\n if len(lst) <= 1:\n return lst\n # random to avoid dead loop for special sequence\n r = lst[random.randint(0, len(lst) - 1)]\n left, mid, right = [], [], []\n for i in lst:\n if i < r:\n left.append(i)\n elif i == r:\n mid.append(i)\n else:\n right.append(i)\n left = mergesort(left)\n left.extend(mid)\n right = mergesort(right)\n ret = merge(left, right)\n return ret\n\nx = [1,5,8,7,9,2,3]\nmergesort(x)\n","sub_path":"Merge Sort.py","file_name":"Merge Sort.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"172646483","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom django.core import serializers\nimport datetime, time\nfrom apps.insumos.models import Insumos\nfrom apps.Propietarios.models import Propietarios\nfrom apps.fabricas.models import Fabricas\nfrom .models import Compras, Insumos_Compras\nfrom django.contrib.auth.models import User\n\n# Create your views here.\n\ndef ComprasHome(request):\n user_id = request.session.get('user_id')\n compras = Compras.objects.raw('SELECT * FROM compras com INNER JOIN fabricas fa ON fa.id_fab = com.fabrica_id INNER JOIN propietarios pro ON pro.id_pro = com.propietario_id WHERE pro.user_id = %s ', [user_id])\n\n return render(request, 'compras/compras.html', {'compras': compras})\n \n\ndef GetCompras(request):\n if request.method == \"POST\":\n search_type = request.POST.get('search_type') \n from_date = request.POST.get('from_date')\n to_date = request.POST.get('to_date')\n filtro_cp_mes = request.POST.get('filtro_cp_mes') \n def aplicar(): \n repetido, unico_id = [], []\n for x in compras:\n if x.id_ins not in unico_id:\n unico_id.append(x.id_ins)\n else:\n if x.id_ins not in repetido:\n repetido.append(x.id_ins)\n print (\"unico_id \" + str(unico_id))\n insumos = Insumos.objects.filter(id_ins__in = unico_id)\n return render(request, 'compras/filtros.html', {'compras': compras, 'insumos': insumos})\n user_id = request.session.get('user_id')\n if search_type == 'by_month':\n #return HttpResponse(\"xmes\")\n compras = Compras.objects.raw('SELECT * FROM compras c INNER JOIN insumos_compras ic ON ic.compra_id = c.id_compra INNER JOIN fabricas f ON c.fabrica_id = f.id_fab INNER JOIN propietarios p ON p.id_pro = c.propietario_id INNER JOIN insumos ins ON ins.id_ins = ic.insumo_id WHERE EXTRACT(month FROM fecha_compra) = %s AND p.user_id = %s', [filtro_cp_mes, user_id])\n return aplicar() \n elif search_type == 'by_range':\n #return HttpResponse(\"xrango\")\n compras = Compras.objects.raw('SELECT * FROM compras c INNER JOIN insumos_compras ic ON ic.compra_id = c.id_compra INNER JOIN fabricas f ON c.fabrica_id = f.id_fab INNER JOIN propietarios p ON p.id_pro = c.propietario_id INNER JOIN insumos ins ON ins.id_ins = ic.insumo_id WHERE c.fecha_compra BETWEEN %s AND %s AND p.user_id = %s',[from_date, to_date, user_id])\n return aplicar() \n else:\n return HttpResponse(\"No hay criterios de busqueda\") \n \n for obj in compras:\n return HttpResponse(obj.id_compra) \n #return HttpResponse(filtro_cp_mes)\n \n \n \n \n \ndef GetInsID(request):\n if request.method == \"GET\" and request.is_ajax(): \n id_insumo = request.GET.get(\"id_insumo\")\n insumos = Insumos.objects.filter(id_ins = id_insumo)\n for ins in insumos:\n return HttpResponse(ins.id_ins)\n \ndef CrearCompra(request):\n if request.method == 'POST':\n action = request.POST.get('add_in_editar')\n id_compra = request.POST.get('referencia')\n fabrica_id = request.POST.get('fabrica_id')\n propietario_id = request.POST.get('propietario_id')\n fecha_compra = request.POST.get('fecha_compra')\n compra = Compras(id_compra = id_compra, fabrica_id = fabrica_id, propietario_id = propietario_id, fecha_compra = fecha_compra)\n if action == 'edi':\n print(\"=> edit\")\n else:\n compra.save()\n \n insumo_id = request.POST.getlist('insumo_id[]') \n cantidad = request.POST.getlist('cantidad[]')\n precio = request.POST.getlist('precio[]')\n \n gramos_kl, gramos_lb, militros = 0,0,0\n j_kilos , j_libras, j_litros = [],[],[]\n va, und_min = [] , []\n dt = []\n for i, c, p in zip(insumo_id, cantidad, precio):\n print(\"ID del insumo es: \"+str(i)+ \" Los precios son: \"+str(p))\n price = float(p)\n valor = Insumos.objects.filter(id_ins = i)\n for ins in valor:\n print(ins.nombre_ins)\n print(ins.unidad_medida_comercial_ins)\n if ins.unidad_medida_comercial_ins == 'kilos':\n print(\"Cantidad es: \"+str(ins.cantidad_comercial_ins))\n gramos_kl = price / (float(ins.cantidad_comercial_ins) * 1000)\n j_kilos += [gramos_kl]\n va += [gramos_kl]\n und_min += ['kl => gr']\n elif ins.unidad_medida_comercial_ins == 'libras':\n print(\"Cantidad es: \"+str(ins.cantidad_comercial_ins))\n gramos_lb = price / (float(ins.cantidad_comercial_ins) * 500)\n j_libras += [gramos_lb]\n va += [gramos_lb]\n und_min += ['lb => gr']\n #print(\"Valor de libras por gramo \" + str(gramos_lb))\n elif ins.unidad_medida_comercial_ins == 'litros':\n print(\"Cantidad litro: \"+str(ins.cantidad_comercial_ins))\n militros = price (float(ins.cantidad_comercial_ins * 1000) )\n j_litros += [militros]\n va += [militros]\n und_min += ['lt => ml']\n print(\"Militros \" + str(militros))\n if gramos_kl != '' and gramos_lb == '' and militros == '':\n dt = j_kilos\n elif gramos_lb != '' and gramos_kl == '' and militros == '':\n dt = j_libras\n elif militros != '' and gramos_kl == '' and gramos_lb == '': \n dt = j_litros\n elif gramos_kl != '' and gramos_lb != '' and militros != '':\n dt = va\n for insu, canti, prec, pumi, und_min_x in zip(insumo_id, cantidad, precio, dt, und_min):\n entry = Insumos_Compras(cantidad = canti, precio = prec, compra_id = id_compra, precio_und_min_ico = pumi, und_min_ico = und_min_x, insumo_id = insu)\n entry.save()\n if action == 'edi':\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n else:\n return redirect('compras:listar')\n else: \n insumos = Insumos.objects.all()\n user_id = request.session.get('user_id')\n propietario = Propietarios.objects.raw('SELECT * FROM propietarios WHERE user_id = %s', [user_id])\n fabricas = Fabricas.objects.raw('SELECT * FROM fabricas fab INNER JOIN propietarios prop ON fab.propietario_id = prop.id_pro INNER JOIN auth_user u ON prop.user_id = u.id WHERE u.id = %s', [user_id])\n date_time = datetime.datetime.now()\n date = date_time.date()\n time = date_time.time()\n print (date)\n #print (time.hour,time.minute,time.second)\n tiempo = time.strftime(\"%S%f\")\n my_time = [date, tiempo]\n print (my_time)\n return render(request, 'compras/crear.html', {'insumos': insumos, 'fabricas':fabricas, 'propietario':propietario, 'tiempo':tiempo})\n \ndef EditarCompra(request, id_compra):\n if request.method == 'POST':\n id_compra = request.POST.get('id_compra')\n insumo_id = request.POST.getlist('insumo_id[]') \n cantidad = request.POST.getlist('cantidad[]')\n precio = request.POST.getlist('precio[]')\n id_ico = request.POST.getlist('id_ico[]') #id de la compra del insumo\n val_und_min_ins = request.POST.getlist('val_und_min_ins[]') \n \n total_und = 0\n unds_arr = []\n for val_und, pric in zip(val_und_min_ins, precio):\n val_un = float(val_und)\n pri = float(pric)\n print(\"los valores \" + str(val_un) + \" los precios \" + str(pri)) \n total_und = pri / val_un \n print(total_und)\n unds_arr.append(total_und)\n print(unds_arr) \n for cant, precio, t_un, insumo, id_ic in zip(cantidad, precio,unds_arr, insumo_id, id_ico):\n Insumos_Compras.objects.filter(id_ico=id_ic).update(cantidad= cant, precio = precio, precio_und_min_ico = t_un, insumo_id = insumo)\n return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))\n else:\n compra = Compras.objects.raw('SELECT * FROM compras c INNER JOIN propietarios p ON p.id_pro = c.propietario_id INNER JOIN fabricas f ON c.fabrica_id = f.id_fab WHERE c.id_compra = %s', [id_compra])\n detalles_compra = Compras.objects.raw('SELECT * FROM compras c INNER JOIN propietarios p ON p.id_pro = c.propietario_id INNER JOIN fabricas f ON c.fabrica_id = f.id_fab INNER JOIN insumos_compras ci ON c.id_compra = ci.compra_id INNER JOIN insumos ins ON ci.insumo_id = ins.id_ins WHERE c.id_compra = %s', [id_compra])\n insumos_all = Insumos.objects.all()\n return render(request, 'compras/detalles.html', {'compra':compra, 'detalles_compra':detalles_compra, 'insumos_all':insumos_all})\n \n\ndef DetallesCompra(request):\n \n first_name = \"SENA VELEZ\"\n last_name = \"SEN\"\n username = \"prueba\"\n password = \"prueba\"\n user = User.objects.create_user(username=username, password=password)\n #return render(request, 'register.html') \n if request.method == \"GET\" and request.is_ajax(): \n compra_id = request.GET.get(\"compra_id\")\n #lista = serializers.serialize('json', rooms)\n rooms = list(Insumos_Compras.objects.all().values())\n data = dict()\n data['rooms'] = rooms\n return JsonResponse(data)\n \n \n else:\n print(\"something no GET cause its post\")\n \n ''' \n if request.method == 'POST':\n #planta = Compras.objects.get(id_compra = 43019594)\n nombre = request.POST.getlist('nombre[]')\n cedula = ['139704638','1099342753','1005363794']\n telefono = ['3202632051','3125346587','3148789965']\n datos = {\n\t\t\t\"nombres\": nombre,\n\t\t\t\"cedulas\": cedula\n\t\t}\n data = dict()\n data['datos'] = datos\n \n for nombre, cedula,telefono in zip(nombre, cedula,telefono):\n print(nombre, end=\" \")\n entry = Propietarios(nombre_propietario = nombre, cc_propietario =cedula, tel_propietario = telefono)\n entry.save()\n \n array = [\"Hola\", \"Buenas\", \"Adios\"]\n print (array[2]) \n \n'''\ndef EliminarCompra(request, id_compra):\n if request.method == 'POST':\n print(\"Eliminar compra\")\n\ndef EliminarDetallesCompra(request, id_ico, compra):\n insumos_compras = Insumos_Compras.objects.get(id_ico = id_ico)\n if request.method == 'POST':\n insumos_compras.delete()\n return redirect('compras:editar', compra)\n return render(request, 'compras/eliminar.html', {'insumos_compras': insumos_compras})\n ","sub_path":"apps/compras/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370452826","text":"# -*- coding: utf-8 -*-\n# @Author: Marte\n# @Date: 2018-09-30 21:28:05\n# @Last Modified by: Marte\n# @Last Modified time: 2018-09-30 22:10:10\nfrom collections import Iterable\nfrom collections import Iterator\nimport time\n\n\nclass Classmate(object):\n def __init__(self):\n self.names = list()\n\n def add(self,name):\n self.names.append(name)\n\n def __iter__(self):\n # 如果想要一个对象称为一个可以迭代的对象,即可以使用for,那么必须实习__iter__方法\n return ClassIterator(self)\n\nclass ClassIterator(object):\n def __init__(self,obj):\n self.obj = obj\n self.current_num = 0\n def __iter__(self):\n pass\n def __next__(self):\n if self.current_num < len(self.obj.names):\n\n ret = self.obj.names[self.current_num]\n self.current_num += 1\n return ret\n\n else:\n raise StopIteration(\" error\")\n\nclassmate = Classmate()\n\nclassmate.add(\"老三\")\nclassmate.add(\"王三\")\nclassmate.add(\"张三\")\n\n# print(\"判断classmate是否是可以迭代的对象:\",isinstance(classmate,Iterable))\n\n# classmate_iterator = iter(classmate)\n\n# print(\"判断classmate是否是可以迭代的对象:\",isinstance(classmate_iterator,Iterator))\n\n# iter(classmate)\n\n\nfor name in classmate:\n print(name)\n time.sleep(1)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# for temp in xxx_obj:\n# pass\n\n# 1. 判断xxx_obj是否可以迭代:只要创建它的类里边有__iter__这个方法,那么它就可以迭代\n# 2. 在第一步成立的前提下,调用iter函数,得到xxx_obj对象的__iter__方法得到的返回值(引用):\n# 如果__iter__返回的那个引用(对象)里边实现了__iter__和__next__方法,那么这个就是生成器(ClassIterator) 每一次for循环,就是调用__next__方法\n#\n# 3. __iter__ 方法的返回值就是一个迭代器","sub_path":"python 多任务/协程(迭代器,生成器等)/模拟实现迭代器的类的方法.py","file_name":"模拟实现迭代器的类的方法.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"525963375","text":"#\n# Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved.\n# SPDX-License-Identifier: Apache-2.0\n#\n\"\"\"Synchronise the offline target database with the online database.\n\nThis utility performs the following actions:\n* Downloads the latest online target database\n* Saves the database to a local file in the mbed-tools repository\n* Writes a news file detailing any added, removed or modified boards\n\"\"\"\n\nimport argparse\nimport logging\nimport sys\n\nfrom dataclasses import dataclass\nfrom pathlib import Path\n\nfrom mbed_tools_ci_scripts.create_news_file import create_news_file, NewsType\nfrom mbed_tools.lib.exceptions import ToolsError\nfrom mbed_tools.lib.logging import log_exception, set_log_level\n\nfrom mbed_tools.targets._internal.board_database import get_board_database_path\nfrom mbed_tools.targets.boards import Boards\n\nlogger = logging.getLogger()\n\nBOARD_DATABASE_PATH = get_board_database_path()\n\n\n@dataclass(frozen=True)\nclass DatabaseComparisonResult:\n \"\"\"Result of the database comparison.\"\"\"\n\n boards_added: set\n boards_removed: set\n boards_modified: set\n\n\ndef save_board_database(board_database_text: str, output_file_path: Path) -> None:\n \"\"\"Save a snapshot of the board database to a local file.\n\n Args:\n board_database_text: json formatted text containing the board data returned from the online database\n output_file_path: the path to the output file\n \"\"\"\n output_file_path.parent.mkdir(exist_ok=True)\n output_file_path.write_text(board_database_text)\n\n\ndef compare_databases(offline_boards: Boards, online_boards: Boards) -> DatabaseComparisonResult:\n \"\"\"Compare offline and online board databases.\"\"\"\n added = online_boards - offline_boards\n removed = offline_boards - online_boards\n added_board_names = set(b.board_name for b in added)\n removed_board_names = set(b.board_name for b in removed)\n modified_board_names = added_board_names & removed_board_names\n added_board_names -= modified_board_names\n removed_board_names -= modified_board_names\n return DatabaseComparisonResult(added_board_names, removed_board_names, modified_board_names)\n\n\ndef create_news_item_text_from_boards(prefix: str, board_names: set) -> str:\n \"\"\"Create a news item string from the list of boards.\"\"\"\n return f\"{prefix} {', '.join(sorted(board_names))}.\\n\"\n\n\ndef create_news_file_text_from_result(result: DatabaseComparisonResult) -> str:\n \"\"\"Creates and writes a news file from the result of the database update.\n\n Args:\n result: Result of the database update\n \"\"\"\n news_item_text = \"\"\n if result.boards_added:\n news_item_text = create_news_item_text_from_boards(\"Targets added:\", result.boards_added)\n\n if result.boards_removed:\n news_item_text = create_news_item_text_from_boards(f\"{news_item_text}Targets removed:\", result.boards_removed)\n\n if result.boards_modified:\n news_item_text = create_news_item_text_from_boards(f\"{news_item_text}Targets modified:\", result.boards_modified)\n\n return news_item_text\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Parse the command line.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--verbose\", \"-v\", action=\"count\", default=0)\n return parser.parse_args()\n\n\ndef main(args: argparse.Namespace) -> int:\n \"\"\"Main entry point.\"\"\"\n set_log_level(args.verbose)\n try:\n online_boards = Boards.from_online_database()\n offline_boards = Boards.from_offline_database()\n result = compare_databases(offline_boards, online_boards)\n if not (result.boards_added or result.boards_removed or result.boards_modified):\n logger.info(\"No changes to commit. Exiting.\")\n return 0\n\n news_file_text = create_news_file_text_from_result(result)\n create_news_file(news_file_text, NewsType.feature)\n save_board_database(online_boards.json_dump(), BOARD_DATABASE_PATH)\n return 0\n except ToolsError as tools_error:\n log_exception(logger, tools_error)\n return 1\n\n\nif __name__ == \"__main__\":\n sys.exit(main(parse_args()))\n","sub_path":"ci_scripts/sync_board_database.py","file_name":"sync_board_database.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179907445","text":"from pymongo import MongoClient\nfrom Usuarios import users\nimport sys\n#Usuarios de la BD\n\nusuarios = [\n users('Carlos','Esparza','Medel','carlos@ipn.mx'),\n users('Andy','Perez','Martinez','andy@ipn.mx')\n] \n\n#Conexión con BD\nclient = MongoClient('mongodb://localhost:27017/')\n\n#Crear BD\ndb = client['conexion']\n\n#Crear una nueva colección\ncollection = db[\"Usuarios\"]\n\n#Insertar elementos en la colección (tabla)(CREATE)\n\n\ndef create():\n for user in usuarios:\n collection.insert_one(user.toDBCollection())\n return \n\ncreate()\n\ndef read():\n cursor = collection.find()\n for usr in cursor:\n print(usr)\n\ndef update():\n myquery = { \"nombre\": {\"$regex\": \"Carlos\"}}\n newvalues = {\"$set\": {\"nombre\": \"Carlitros\"}}\n collection.update_many(myquery, newvalues)\n\n collection.update_one(myquery, newvalues)\n\n return read()\n\ndef delete():\n myquery2 = {\"nombre\": {\"$regex\": \"Carlitros\"}}\n collection.delete_many(myquery2)\n return read()\n \nread()\nupdate()\ndelete()","sub_path":"MongoDB/funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"201953122","text":"\"\"\"\nSome utilities for working with spiders\n\"\"\"\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom itertools import izip_longest\nfrom scrapy.crawler import Crawler\nfrom scrapy.utils.project import get_project_settings\nimport magic\nimport subprocess\nimport pydocx\nimport os\nimport lxml.etree\nimport lxml.html\nfrom lxml.html import HTMLParser\nfrom lxml.html.clean import clean_html,Cleaner\nfrom logging import raiseExceptions\n\n\nHTML = 1\nDOC = 2\nDOCX = 3\nPDF = 4\n#ZIP = 5\n\ndef list_spiders():\n settings = get_project_settings()\n crawler = Crawler(settings)\n return crawler.spiders.list()\n\n\ndef check_file_type(filepath, as_string=False):\n filetype = magic.from_file(filepath)\n if not filetype:\n # Filetype Could Not Be Determined\n return None\n elif filetype == 'empty':\n # Filetype Could Not Be Determined (file looks empty)\n return None\n elif filetype == 'very short file (no magic)':\n # Filetype Could Not Be Determined (very short file)\n return None\n elif \"Microsoft Office Word\" in filetype:\n return DOC if not as_string else 'DOC'\n elif filetype[0:4] == 'HTML':\n return HTML if not as_string else 'HTML'\n elif filetype == 'Microsoft Word 2007+':\n return DOCX if not as_string else 'DOCX'\n elif 'PDF' in filetype:\n return PDF if not as_string else 'PDF'\n elif filetype[0:3] == 'Zip':\n # a lot of hansards are found to be in ZIP format, but can be opened with python-docx\n return DOCX if not as_string else 'DOCX'\n else:\n # some other filetype that we don't account for\n return None\n\n\ndef doc_to_html(filepath, overwrite=False):\n \"\"\"\n Converts a doc file to in-memory html string.\n\n :param filepath: full filepath to the file to convert\n :return: unicode string\n \"\"\"\n html_file = '{}.html'.format(filepath)\n if not os.path.exists(html_file) or overwrite:\n cmd = ['abiword', '--to=html', '--to-name=fd://1', filepath]\n try:\n res = subprocess.check_output(cmd)\n except:\n return None\n with open(html_file, 'wb') as tmp:\n tmp.write(res)\n else:\n with open(html_file, 'rb') as tmp:\n res = tmp.read()\n return res.decode('utf-8')\n\n\ndef docx_to_html(filepath, overwrite=False):\n \"\"\"\n Converts docx file to in-memory html string\n\n :param filepath: full path to the file to convert\n :return: unicode string\n \"\"\"\n html_file = '{}.html'.format(filepath)\n if not os.path.exists(html_file) or overwrite:\n #res = pydocx.docx2html(filepath)\n res = pydocx.PyDocX.to_html(filepath)\n with open(html_file, 'wb') as tmp:\n tmp.write(res.encode('utf-8'))\n else:\n with open(html_file, 'rb') as tmp:\n res = tmp.read().decode('utf-8')\n return res\n\n\ndef get_file_path(rel_path):\n \"\"\"\n Given a relative path for a file downloaded by scrapy, get the absolute path\n \"\"\"\n files_folder = getattr(settings, 'SCRAPY_FILES_PATH', None)\n if files_folder is None:\n raise ImproperlyConfigured(\"No SCRAPY_FILES_PATH defined\")\n\n file_path = os.path.join(files_folder, rel_path)\n if not os.path.exists(file_path):\n raise RuntimeError(\"Could not find file at {}\".format(file_path))\n\n return file_path\n\n\ndef to_string(obj, encoding='utf-8'):\n \"\"\"\n Converts unicode objects to strings, and returns strings directly\n \"\"\"\n if isinstance(obj, basestring):\n if isinstance(obj, unicode):\n obj = obj.encode(encoding)\n return obj\n\n\ndef to_unicode(obj, encoding='utf-8'):\n if isinstance(obj, basestring):\n if not isinstance(obj, unicode):\n obj = unicode(obj, encoding)\n return obj\n\n\ndef grouper(iterable, n, fillvalue=None):\n \"\"\"\n Collect data into fixed-length chunks or blocks\n \"\"\"\n # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx\n args = [iter(iterable)] * n\n return izip_longest(fillvalue=fillvalue, *args)\n\ndef utf2html(ustring):\n ustring = ustring.replace('\\r\\n','

')\n ustring = ustring.replace('\\n','

')\n ustring = ustring.replace('\\t',' ')\n return ustring\n\ndef merge_docx(docx_list=None, out_htmlpath=None):\n \"\"\"\n docx_list is a list of strings which contains the (absolute) path of DOC/DOCX files to be merged.\n MERGE_DOCX() will follow the index order of docx_list for appending.\n Returns the HTML file as string. \n If OUT_HTMLPATH is given, write the HTML file out as well.\n \"\"\"\n if docx_list is None:\n return None\n \n cleaner = Cleaner()\n parser = HTMLParser(encoding='utf-8')\n html_list = []\n for path in docx_list:\n try:\n tmp_html = pydocx.PyDocX.to_html(path)\n html_list.append(cleaner.clean_html(lxml.html.fromstring(tmp_html, parser=parser)))\n except:\n #'MalformedDocxException'\n try:\n # Pretend it is a html\n html_file = '{}.html'.format(path)\n with open(html_file, 'rb') as tmp:\n tmp_html = tmp.read()\n tmp_html = tmp_html.decode('utf-8')\n html_list.append(cleaner.clean_html(lxml.html.fromstring(tmp_html, parser=parser)))\n except:\n # Cannot convert\n continue\n \n #print html_list\n if len(html_list)>1:\n #Append element at the end of first body\n main_body = html_list[0].xpath('./body')[0]\n for tree in html_list[1:]:\n elem_list = tree.xpath('./body/*')\n for elem in elem_list:\n main_body.append(elem)\n elif len(html_list)==1:\n main_body = html_list[0].xpath('./body')[0]\n else:\n try:\n main_body = html_list[0].xpath('./body')[0]\n except IndexError:\n # no body content. Most likely just an image/appendix\n return None\n \n # Convert ElementTree back to string\n # in this way we will lose the 'style' info in html_list[0][0], which is usually in header,\n # but not sure if it will cause any differences to parser later on. Probably not.\n html_str = lxml.etree.tostring(main_body)\n \n if out_htmlpath is not None:\n with open(out_htmlpath, 'wb') as tmp:\n tmp.write(html_str.encode('utf-8'))\n \n return html_str\n \n","sub_path":"app/raw/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"309544891","text":"def calculate_fuel(mass):\n\treturn (mass // 3) - 2\n\n# Part 1\ntotal = 0\nwith open('input.txt', 'r') as file:\n\t# calculate the fuel required for every module, add it to the total\n\tfor line in file:\n\t\tmass = int(line.strip('/n'))\n\t\ttotal += calculate_fuel(mass)\n\nprint(\"Soluton 1: {}\".format(total))\n\n# Part 2\ndef calculate_total(mass):\n\t# calculate the initial mass fuel required\n\tmodule_mass = calculate_fuel(mass)\n\tfuel_total = 0\n\tx = module_mass\n\t# now calculate the mass of fuel required for the fuel added\n\twhile x >= 0 and calculate_fuel(x) > 0:\n\t\tfuel_total += calculate_fuel(x)\n\t\tx = calculate_fuel(x)\n\treturn module_mass + fuel_total\n\n# pre define 100 modules\nmodules = [0] * 100\nwith open('input.txt', 'r') as file:\n\ti = 0\n\t# for every module, calculate the base mass and the fuel mass\n\tfor line in file:\n\t\tmass = int(line.strip('/n'))\n\t\tmodules[i] += calculate_total(mass)\n\t\ti += 1\n\nprint(\"Soluton 2: {}\".format(sum(modules)))\n\n","sub_path":"day_1/day_1.py","file_name":"day_1.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"123092572","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 11 15:26:10 2017\n\n@author: 01550070\n\"\"\"\nimport random\n\n#Создадим список и заполним его случайными целыми числами\na = random.sample(range(-1000000, 1000000), 10000)\n\n#Можно решить задачу запустив цикл по элементам массива в прямом порядке,\n#запустив внутри него цикл по элементам этого массива в обратном порядке,\n#добавляя произведение элементов в отдельный список в случае, \n#если эелемнты не равны или количество элементов больше одного.\n#Это способ \"в лоб\"\n#Его главный недостаток в том, что список произведений будет содержать очень много дублей, \n#что при большой длине входного массива приведет к существенным замедлениям\nprint(min ((item * item_rev for item in a for item_rev in reversed(a) if item != item_rev or a.count(item) > 1)))\n\n\n\n#Если подойти к этой задаче логически, то очевидно, \n#что минимальным произведением двух элементов массива, \n#в котором содержатся положительные и отрицательные числа \n#будет произведение наименьшего отрицательного числа и наибольшего положительного. \n#Если в массиве содержатся только положительные или только отрицательные числа, \n#то минимальным произведением двух чисел будет произвдение двух минимальных по модулю чисел.\n\ndef get_min(l):\n pos = [x for x in l if x > 0]\n neg = [x for x in l if x <= 0]\n if len(pos) > 0 and len(neg) > 0:\n return min(neg) * max(pos)\n else:\n abs_list = list(map(abs, a))\n min_abs = min(abs_list)\n if abs_list.count(min_abs) > 1:\n return min_abs * min_abs\n else:\n abs_list.remove(min_abs)\n return min_abs * min(abs_list)\n \nprint(get_min(a))\n\n","sub_path":"helpfull scripts/min.py","file_name":"min.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"123930332","text":"import ast\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport os\nimport sys\n\nfrom joblib import Memory\n\n# Auto-detect terminal width.\npd.options.display.width = None\npd.options.display.max_rows = 1000\npd.options.display.max_colwidth = 200\n\n# Initialize a persistent memcache.\nmem_hist = Memory(cachedir='./.cached_plot_hist', verbose=0)\nmem_sim = Memory(cachedir='./.cached_plot_sim', verbose=0)\n\n# We could use some good argparse parameters here instead of\n# a bunch of constants to fiddle with.\nPRINT_HISTORICAL = False\nPRINT_BASELINE = False\nPRINT_VOLUME = False\n\nBETWEEN_START = pd.to_datetime('09:30').time()\nBETWEEN_END = pd.to_datetime('09:30:00.000001').time()\n\n# Linewidth for plots.\nLW = 2\n\n# Used to read and cache real historical trades.\n#@mem_hist.cache\ndef read_historical_trades (file, symbol):\n print (\"Historical trades were not cached. This will take a minute.\")\n df = pd.read_pickle(file, compression='bz2')\n\n df = df.loc[symbol]\n df = df.between_time('9:30', '16:00')\n\n return df\n\n\n# Used to read and cache simulated trades.\n# Doesn't actually pay attention to symbols yet.\n#@mem_sim.cache\ndef read_simulated_trades (file, symbol):\n print (\"Simulated trades were not cached. This will take a minute.\")\n df = pd.read_pickle(file, compression='bz2')\n df = df[df['EventType'] == 'LAST_TRADE']\n\n if len(df) <= 0:\n print (\"There appear to be no simulated trades.\")\n sys.exit()\n\n df['PRICE'] = [y for x,y in df['Event'].str.split(',')]\n df['SIZE'] = [x for x,y in df['Event'].str.split(',')]\n\n df['PRICE'] = df['PRICE'].str.replace('$','').astype('float64')\n df['SIZE'] = df['SIZE'].astype('float64')\n\n return df\n\n\n# Main program starts here.\n\nif len(sys.argv) < 3:\n print (\"Usage: python ticker_plot.py [agent trade log]\")\n sys.exit()\n\n# TODO: only really works for one symbol right now.\n\nsymbol = sys.argv[1]\nsim_file = sys.argv[2]\n\nagent_log = None\nif len(sys.argv) >= 4: agent_log = sys.argv[3]\n\nprint (\"Visualizing simulated {} from {}\".format(symbol, sim_file))\n\ndf_sim = read_simulated_trades(sim_file, symbol)\n\nif PRINT_BASELINE:\n baseline_file = os.path.join(os.path.dirname(sim_file) + '_baseline', os.path.basename(sim_file))\n print (baseline_file)\n df_baseline = read_simulated_trades(baseline_file, symbol)\n\n# Take the date from the first index and use that to pick the correct historical date for comparison.\nif PRINT_HISTORICAL: \n hist_date = pd.to_datetime(df_sim.index[0])\n hist_year = hist_date.strftime('%Y')\n hist_date = hist_date.strftime('%Y%m%d')\n hist_file = \"/nethome/cb107/emh/data/trades/trades_{}/ct{}_{}.bgz\".format(hist_year, 'm' if int(hist_year) > 2014 else '', hist_date)\n\n print (\"Visualizing historical {} from {}\".format(symbol, hist_file)) \n df_hist = read_historical_trades(hist_file, symbol)\n\nplt.rcParams.update({'font.size': 12})\n\n\n\n# Use to restrict time to plot.\ndf_sim = df_sim.between_time(BETWEEN_START, BETWEEN_END)\nprint (\"Total simulated volume:\", df_sim['SIZE'].sum())\n\nif PRINT_BASELINE:\n df_baseline = df_baseline.between_time(BETWEEN_START, BETWEEN_END)\n print (\"Total baseline volume:\", df_baseline['SIZE'].sum())\n\nif PRINT_VOLUME:\n fig,axes = plt.subplots(figsize=(12,9), nrows=2, ncols=1)\nelse:\n fig,ax = plt.subplots(figsize=(12,9), nrows=1, ncols=1)\n axes = [ax]\n\n# Crop figures to desired times and price scales.\n#df_hist = df_hist.between_time('9:46', '13:30')\n\n# For smoothing...\n#hist_window = 100\n#sim_window = 100\n\nhist_window = 1\nsim_window = 1\n\nif PRINT_HISTORICAL:\n df_hist = df_hist.between_time(BETWEEN_START, BETWEEN_END)\n print (\"Total historical volume:\", df_hist['SIZE'].sum())\n\n df_hist['PRICE'] = df_hist['PRICE'].rolling(window=hist_window).mean()\n df_sim['PRICE'] = df_sim['PRICE'].rolling(window=sim_window).mean()\n\n df_hist['PRICE'].plot(color='C0', grid=True, linewidth=LW, ax=axes[0])\n df_sim['PRICE'].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])\n axes[0].legend(['Historical', 'Simulated'])\n\n if PRINT_VOLUME:\n df_hist['SIZE'].plot(color='C0', linewidth=LW, ax=axes[1])\n df_sim['SIZE'].plot(color='C1', linewidth=LW, alpha=0.9, ax=axes[1])\n axes[1].legend(['Historical Vol', 'Simulated Vol'])\nelif PRINT_BASELINE:\n # For nanosecond experiments, turn it into int index. Pandas gets weird if all\n # the times vary only by a few nanoseconds.\n rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')\n\n df_baseline = df_baseline[~df_baseline.index.duplicated(keep='last')]\n df_baseline = df_baseline.reindex(rng,method='ffill')\n df_baseline = df_baseline.reset_index(drop=True)\n\n df_sim = df_sim[~df_sim.index.duplicated(keep='last')]\n df_sim = df_sim.reindex(rng,method='ffill')\n df_sim = df_sim.reset_index(drop=True)\n\n df_baseline['PRICE'].plot(color='C0', grid=True, linewidth=LW, ax=axes[0])\n df_sim['PRICE'].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])\n\n axes[0].legend(['Baseline', 'With Impact'])\n\nelse:\n #df_sim['PRICE'] = df_sim['PRICE'].rolling(window=sim_window).mean()\n\n # For nanosecond experiments, turn it into int index. Pandas gets weird if all\n # the times vary only by a few nanoseconds.\n\n rng = pd.date_range(start=df_sim.index[0], end=df_sim.index[-1], freq='1N')\n df_sim = df_sim[~df_sim.index.duplicated(keep='last')]\n df_sim = df_sim.reindex(rng,method='ffill')\n df_sim = df_sim.reset_index(drop=True)\n df_sim['PRICE'].plot(color='C1', grid=True, linewidth=LW, alpha=0.9, ax=axes[0])\n axes[0].legend(['Simulated'])\n\n if PRINT_VOLUME:\n df_sim['SIZE'].plot(color='C1', linewidth=LW, alpha=0.9, ax=axes[1])\n axes[1].legend(['Simulated Vol'])\n\n# Superimpose a particular trading agent's trade decisions on top of the ticker\n# plot to make it easy to visually see if it is making sensible choices.\nif agent_log:\n df_agent = pd.read_pickle(agent_log, compression='bz2')\n df_agent = df_agent.between_time(BETWEEN_START, BETWEEN_END)\n df_agent = df_agent[df_agent.EventType == 'HOLDINGS_UPDATED']\n\n first = True\n\n for idx in df_agent.index:\n event = df_agent.loc[idx,'Event']\n if symbol in event:\n shares = event[symbol]\n if shares > 0:\n print (\"LONG at {}\".format(idx))\n axes[0].axvline(x=idx, linewidth=LW, color='g')\n elif shares < 0:\n print (\"SHORT at {}\".format(idx))\n axes[0].axvline(x=idx, linewidth=LW, color='r')\n else:\n print (\"EXIT at {}\".format(idx))\n axes[0].axvline(x=idx, linewidth=LW, color='k')\n else:\n print (\"EXIT at {}\".format(idx))\n axes[0].axvline(x=idx, linewidth=LW, color='k')\n\nplt.suptitle('Execution Price/Volume: {}'.format(symbol))\n\naxes[0].set_ylabel('Executed Price')\n\nif PRINT_VOLUME:\n axes[1].set_xlabel('Execution Time')\n axes[1].set_ylabel('Executed Volume')\n axes[0].get_xaxis().set_visible(False)\nelse:\n axes[0].set_xlabel('Execution Time')\n\n#plt.savefig('background_{}.png'.format(b))\n\nplt.show()\n\n","sub_path":"cli/ticker_plot.py","file_name":"ticker_plot.py","file_ext":"py","file_size_in_byte":6972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234935239","text":"\"\"\"\nHangman.\n\nAuthors: Jack Franey and Jake Lauteri and Josiah Hasegawa.\n\"\"\" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n\n# TODO: 2. Implement Hangman using your Iterative Enhancement Plan.\n\n####### Do NOT attempt this assignment before class! #######\n\nimport random\n\ndef main():\n min_length = get_min_length()\n secret_word = get_word(min_length)\n game_loop(secret_word)\n\n\n # list = [spaces(secret_word)]\n # print(list)\n #\n # lst1 = []\n # for j in range(5+ len(secret_word)):\n # lst1 = lst1 + [guess(secret_word)]\n #\n # for q in range(len(lst1)):\n # for t in range(len(lst1[q])):\n # change_word(lst1, list, secret_word[lst1[q][t]])\n\n\ndef get_word(min_length):\n\n with open('words.txt') as f:\n f.readline()\n string = f.read()\n words = string.split()\n # min_length = get_min_length()\n while True:\n\n r = random.randrange(0,len(words))\n secret_word = words[r]\n print(secret_word)\n if len(secret_word) >= min_length:\n return secret_word\n\ndef get_min_length():\n\n min_length = int(input('Enter the minimum length of the secret word:'))\n print(min_length)\n return min_length\n\ndef game_loop(secret_word):\n wrong = 0\n spaces = []\n for _ in range(len(secret_word)):\n spaces = spaces + ['_']\n while True:\n letter = guess()\n letter, result, k = check_guess(secret_word,letter,wrong)\n progress, right = stuff_right(secret_word,letter,spaces)\n change_word(progress)\n message, wrong = end_of_game(progress, right, result, wrong)\n print(message)\n if message == 'Loser':\n print('The word was:', secret_word)\n break\n elif message == 'Winner':\n break\n\ndef guess():\n letter = str(input('Enter a letter: '))\n return letter\n\ndef check_guess(secret_word,letter,chances):\n result = 'Correct'\n for k in range(len(secret_word)):\n if secret_word[k] == letter:\n print('Wrong guesses left: ', chances)\n return letter, result, k\n else:\n chances = chances - 1\n result = 'Wrong'\n print(result, 'Wrong guesses left: ', chances)\n return chances, result, None\n\ndef stuff_right(secret_word,letter,spaces):\n right = []\n for k in range(len(secret_word)):\n right = right + [secret_word[k]]\n for j in range(len(secret_word)):\n if letter == right[j]:\n spaces[j] = letter\n return spaces, right\n\n # for k in range(len(some_list)):\n # secret_word[some_list[k]] = (w)\n # print(secret_word)\n # return secret_word\n\ndef change_word(spaces):\n blanks = ''\n for k in range(len(spaces)):\n blanks = blanks + spaces[k] + ' '\n print(blanks)\n\ndef end_of_game(progress,right,result,wrong):\n message = ''\n if win(progress,right) == True: #problem might be with win function?\n message = 'Winner'\n if result == 'Wrong':\n wrong = wrong + 1\n if wrong == 5:\n message = 'Loser'\n return message\n\n# def win(spaces,right):\n# number_right = 0\n# for k in range(len(spaces)):\n# if spaces[k] == right[k]:\n# number_right = number_right +1\n# if number_right == len(right):\n# return True\n# else:\n# return False\n\ndef win(spaces, right):\n count = 0\n for k in range(len(spaces)):\n if spaces[k] == right[k]:\n count = count + 1\n if count == len(right):\n return True\n else:\n return False\n\n\n\n\nmain()","sub_path":"src/m1_hangman.py","file_name":"m1_hangman.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"152531769","text":"\n\"\"\"\nGiven a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.\n\nExample 1:\n\nInput: 1->2->3->3->4->4->5\nOutput: 1->2->5\nExample 2:\n\nInput: 1->1->1->2->3\nOutput: 2->3\n\"\"\"\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def deleteDuplicates(self, head: ListNode) -> ListNode:\n\n dummy = pre = ListNode(0)\n dummy.next = head\n while head and head.next:\n if head.val == head.next.val:\n while head and head.next and head.val == head.next.val:\n head = head.next\n head = head.next\n pre.next = head\n else:\n pre = pre.next\n head = head.next\n return dummy.next\n\n\nclass Solution2:\n def deleteDuplicates(self, head):\n pre = dummy = ListNode(0) # construct a dummy node\n dummy.next = head\n cur = head\n while cur:\n if cur.next and cur.val == cur.next.val:\n # loop until cur point to the last duplicates\n while cur and cur.next and cur.val == cur.next.val:\n cur = cur.next\n pre.next = cur.next # propose the next for pre\n # this will be verified by next line\n else:\n pre = pre.next\n cur = cur.next\n return dummy.next\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"LeetcodeNew/python/LC_082.py","file_name":"LC_082.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"308672028","text":"import sys\nimport time\nimport getopt\nfrom st3d.control.load_miscdf import *\n\nfrom multiprocessing import Pool\n\nfrom st3d.control.save_miscdf import *\nfrom st3d.model.slice_dataframe import slice_dataframe\n\n############################################################################\n# logic codes\n#############################################################################\ndef gem2bfm_one_slice(data:[]):\n gem_file_name = data[0]\n z_index = data[1]\n #one_slice = data[0]\n prefix = data[2]\n binsize = data[3]\n\n one_slice = slice_dataframe()\n one_slice.init_from_file(gem_file_name,z_index)\n slice_index = one_slice.slice_index\n init_gem2bfm_slice(prefix,slice_index)\n\n print(\"build gene maps for slice {} ...\".format(slice_index))\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),flush=True)\n gene_maps = one_slice.get_gene_ids()\n\n print(\"get bins of slice {} ...\".format(slice_index))\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),flush=True)\n slices_info, bos = one_slice.get_bins_of_slice(binsize=binsize)\n\n print(\"get mtx of slice {}...\".format(slice_index))\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),flush=True)\n mtx = one_slice.get_mtx(gene_maps,bos)\n\n print('save slice {} data ...'.format(slice_index))\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),flush=True)\n print_features_tsv(gene_maps.keys(),prefix,slice_index)\n print_barcodes_tsv(bos ,prefix,slice_index)\n print_tissue_positions_list(bos,prefix,slice_index)\n print_gem2bfm_slices_json(slices_info,prefix,slice_index)\n print_matrix_mtx(mtx,prefix,slice_index,len(gene_maps),bos.bin_num())\n print('gem2bfm done for {}'.format(slice_index))\n\n# multi-processing run all slices\ndef gem2bfm_slices_one_by_one(slices,prefix,binsize=50,tasks=8):\n init_gem2bfm_output(prefix)\n args=[]\n #for slice_id in range(0,slices.slices_num):\n for slice_name in slices:\n z_index = slices[slice_name]\n #one_slice = slices.slices[slice_id]\n args.append([slice_name,z_index,prefix,binsize])\n with Pool(tasks) as p:\n p.map(gem2bfm_one_slice, args)\n\n\n############################################################################\n# section 1 : gem2bfm\n#############################################################################\n\n# usage of gem2bfm\ndef gem2bfm_usage():\n print(\"\"\"\nUsage : GEM_toolkit.py gem2bfm -c \\\\\n -o \\\\\n -b [bin-size (default 50)] \\\\\n -t [threads (default 8)]\n\nNotice : Since one gem file will be handled only in one thread,\n there is no need to set -t greater than slice number.\n\"\"\")\n\n# main of gem2bfm\ndef gem2bfm_main(argv):\n config = ''\n prefix = ''\n binsize= 50\n threads=8\n try:\n opts, args = getopt.getopt(argv,\"hc:o:b:t:\",[\"help\",\"iconf=\",\"ofile=\",\"bin=\",\"threads=\"])\n except getopt.GetoptError:\n gem2bfm_usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt in ('-h' ,'--help'):\n gem2bfm_usage()\n sys.exit(0)\n elif opt in (\"-b\", \"--bin\"):\n binsize = int(arg)\n elif opt in (\"-c\", \"--iconf\"):\n config = arg\n elif opt in (\"-t\", \"--threads\"):\n threads= int(arg)\n elif opt in (\"-o\", \"--ofile\"):\n prefix = arg\n\n if config == \"\" or prefix == \"\" or binsize<1 or threads <1:\n gem2bfm_usage()\n sys.exit(3)\n\n print(\"config file is {}\".format(config))\n print(\"output prefix is {}\".format( prefix))\n print(\"binsize is {}\".format(binsize))\n print(\"threads is {}\".format(threads))\n\n print('start loading slice(s)...')\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),flush=True)\n slice_data = load_slices(config)\n print('handle slice(s)...')\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),flush=True)\n gem2bfm_slices_one_by_one(slice_data,prefix,binsize,threads)\n print('gem2bfm, all done ...')\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"),flush=True)\n\n","sub_path":"st3d/control/gem2bfm.py","file_name":"gem2bfm.py","file_ext":"py","file_size_in_byte":4041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"532973270","text":"# leary1.py\n# Richard Leary\n# Assignment submission for CPSC3400\n\nimport re\n\ndef get_alnum_string(word):\n ret = ''\n for i in word:\n if i.isalnum():\n ret += i\n return ret.capitalize()\n\ndef main():\n print('Program 2 - keyword indexing')\n file1 = open('titles.txt')\n file2 = open('keywords.txt')\n titleSet = set()\n indexDict = {}\n \n for line in file2:\n for word in line.split():\n item = get_alnum_string(word)\n if(item not in titleSet):\n titleSet.add(item)\n for line in file1:\n for word in line.split():\n item = get_alnum_string(word)\n if(item in titleSet):\n indexDict[item] = indexDict.get(item, 0) + 1\n for a, b in indexDict.items():\n print(a+':')\n print(b)\n\nif __name__ == '__main__':\n main()\n","sub_path":"Languages and Computation/HW1 - Keyword Indexing/leary1.py","file_name":"leary1.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"562289556","text":"from django.shortcuts import render ,redirect\n\nfrom django.contrib.auth.hashers import make_password, check_password\n#from django.http import HttpResponse\nfrom .models import *\nfrom .forms import ContactForm\n\nfrom django.views import View\n# Create your views here.\nfrom .forms import CreateUserForm \nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate , login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\ndef register(request):\n form = CreateUserForm()\n\n if request.method == \"POST\":\n form = CreateUserForm(request.POST)\n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(request, 'Votre compte a été créé '+ user)\n\n return redirect('loginPage')\n\n context={'form':form}\n return render(request, 'account/register.html', context) \n\ndef loginPage(request):\n if request.method == \"POST\":\n username = request.POST.get('username')\n password = request.POST.get('password')\n \n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request, user) \n return redirect('index') \n else:\n messages.info(request, 'Nom d utilisateur ou mot de pass incorrect')\n context={}\n return render(request, 'account/login.html', context)\n\n\ndef logoutUser(request):\n logout(request)\n return redirect('loginPage') \n\n\n@login_required(login_url='loginPage')\ndef index(request):\n ecoles = Ecole.objects.all();\n actulignes = Actuligne.objects.all();\n data = {}\n data['ecoles'] = ecoles\n data['actulignes'] = actulignes\n return render(request, 'index.html' , data)\n\n\ndef utilisateur(request):\n ecoles = Ecole.objects.all()\n data = {}\n data['ecoles'] = ecoles\n return render(request, 'account/utilisateur.html', data)\n\n\n\n# Create your views here.\n\ndef contact(request):\n form = ContactForm()\n context={'form':form}\n message= \"\"\n error=\"\"\n if request.method == 'POST':\n form = ContactForm(request.POST,request.FILES)\n if form.is_valid():\n form.save(commit=True)\n message=\"Inscription validée .\"\n return redirect('index')\n else:\n print(form.errors) \n error=\"ok\"\n form = FormulaireForm() \n \n context={\n 'form':form,\n 'message':message,\n 'error':error,\n }\n return render(request , 'blog/contact.html', context)\n\n\n\ndef boutique(request):\n return render(request, 'blog/boutique.html')\n\n\ndef actualite(request):\n actualites = None\n ecoles = Ecole.objects.all()\n universites = Universite.get_all_universites()\n universiteID = request.GET.get('universite')\n if universiteID:\n actualites = Actualite.objects.filter(universite=universiteID)\n else:\n actualites = Actualite.objects.all();\n data = {}\n data['ecoles'] = ecoles\n data['actualites'] = actualites\n data['universites'] = universites\n return render(request, 'blog/actualite.html', data)\n\n\ndef detailactualite(request , actualite_id):\n id= int(actualite_id)\n actualite = Actualite.objects.get(pk=actualite_id)\n \n context={\n 'actualite':actualite,\n }\n return render(request, 'blog/detailactualite.html' , context)\n\n\ndef homeufhb(request , slug):\n #id= int(ecole_id)\n ecoles = Ecole.objects.all()\n ecole = Ecole.objects.get(slug=slug)\n departement=ecole.departement.all()\n \n context={\n 'ecole':ecole,\n 'departement':departement,\n }\n return render(request, 'homeufhb.html' , context)\n\n\ndef depart(request , departement_id):\n id = int(departement_id)\n departement = Departement.objects.get(id=departement_id)\n niveau=departement.niveau.all()\n\n context = {\n 'departement':departement,\n 'niveau' :niveau\n }\n return render(request, 'depart.html', context)\n\n\ndef lesmatieres(request , niveau_id):\n id = int(niveau_id)\n niveau = Niveau.objects.get(id=niveau_id)\n matiere=niveau.matiere.all()\n\n context = {\n 'niveau' :niveau,\n 'matiere' :matiere\n }\n return render(request, 'lesmatieres.html', context)\n\n\n\ndef cour(request, matiere_id):\n id = int(matiere_id)\n matiere = Matiere.objects.get(id=id)\n cour=matiere.cour.all()\n ecoles = Ecole.objects.all();\n if request.method ==\"POST\":\n idcour = request.GET.get(\"document\")\n for cou in cour:\n if cou.id == idcour:\n cou.inscriptions.add(request.inscription.id)\n print(cou.id)\n else:\n error = \"erreur\"\n context= {\n 'ecoles':ecoles,\n 'matiere' :matiere,\n 'cour' :cour\n }\n return render(request, 'cour.html' , context)\n\n ","sub_path":"bvci/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"133633944","text":"### Question 1\n# (a)\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.filterwarnings('ignore')\nfrom prettytable import PrettyTable\nfrom tabulate import tabulate\n\n# Question2\nX = [[-0.8, 1],\n [3.9, 0.4],\n [1.4, 1],\n [0.1, -3.3],\n [1.2, 2.7],\n [-2.45, 0.1],\n [-1.5, -0.5],\n [1.2, -1.5]]\ny = [1, -1, 1, -1, -1, -1, 1, 1]\nX = np.array(X)\ny = np.array(y)\n\ncolumn = np.array([0, 0, 0, 0, 0, 0, 0, 0])\nX = np.column_stack((X, column))\n\nX_temp = X\n\nx1 = X[:, 0]\nx2 = X[:, 1]\n\nX[:, 2] = pow(2, 0.5) * x1 * x2\nX[:, 0] = x1 * x1\nX[:, 1] = x2 * x2\n\nw = np.array([1, 1, 1, 1], dtype='float64')\n# learning rate = 0.2\nlr = 0.2\n\n\n\n\n\n\nconverged = 0\nm = 1\niteration = []\niteration.append(0)\n\n\niteration_total = []\nn = 1\niteration_total.append(0)\nweights = np.array([[1, 1, 1, 1]], dtype='float64')\n\nweight_total = np.array([[1, 1, 1, 1]], dtype='float64')\n\nwhile converged == 0:\n converged = 1\n for i in range(8):\n w0 = w[0]\n w1 = w[1]\n w2 = w[2]\n w3 = w[3]\n y_ = w0 + w1 * X[:, 0][i] + w2 * X[:, 1][i] + w3 * X[:, 2][i]\n signal = y_ * y[i]\n if signal <= 0:\n w[0] = w0 + (y[i] * 0.2 * 1)\n w[1] = w1 + (y[i] * 0.2 * X[:, 0][i])\n w[2] = w2 + (y[i] * 0.2 * X[:, 1][i])\n w[3] = w3 + (y[i] * 0.2 * X[:, 2][i])\n converged = 0\n iteration.append(m)\n weight_total = np.concatenate((weight_total, [w]))\n m = m + 1\n iteration_total.append(n)\n weights = np.concatenate((weights, [w]))\n n = n+1\n\nprint(f\"The final weight vector is {w}.\\n\")\n\nt = PrettyTable(['Iter_No. of each update', 'w0', 'w1', 'w2', 'w3'])\nfor i in range(len(iteration)):\n t.add_row([iteration[i], weight_total[i][0], weight_total[i][1], weight_total[i][2], weight_total[i][3] ])\n\nprint(t)\n\nt_2 = PrettyTable(['Total iteration', 'w0', 'w1', 'w2', 'w3'])\nfor i in range(len(iteration_total)):\n t_2.add_row([iteration_total[i], weights[i][0], weights[i][1], weights[i][2], weights[i][3] ])\n\nprint(t_2)\n\n\n\n","sub_path":"hw2/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"264012301","text":"from flask import render_template, Request, Flask, request, redirect\nfrom textblob import TextBlob\n\n\napp = Flask(__name__)\n\ndef extract_sentiment(text):\n blob = TextBlob(text)\n senti_value = 0\n for sentence in blob.sentences:\n senti_value += sentence.sentiment.polarity\n\n overall_sentiment = senti_value/len(blob.sentences)\n\n if overall_sentiment<=-0.5:\n return \"Negative\"\n elif overall_sentiment >=0.5:\n return \"Positive\"\n elif overall_sentiment>-0.5 and overall_sentiment<0.5:\n return \"Neutral\"\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n # check if the post request has the file part\n textvalue = request.form['w3review']\n sentiment = extract_sentiment(textvalue)\n return redirect('/sentimentanalysis/'+sentiment)\n return render_template('index.html')\n\n@app.route(\"/sentimentanalysis/\", methods = ['GET', 'POST'])\ndef sentiment_value(sentiment):\n return render_template('sentiment.html', sentiment=sentiment)\n\n\nif __name__ == \"__main__\":\n app.run(debug=False, threaded=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615297985","text":"from collections import Counter\n\nnum = []\n\n\ndef findPrime(n1, n2) :\n for x in range(n1, n2) :\n if x > 1 :\n for n in range(2, x) :\n if (x % n) == 0 :\n break\n else :\n print(x)\n num.append(x)\n\n\nstart = 0\nend = 1000\nfindPrime(start, end)\n\n\ndef findAnagram(s1):\n # Counter() returns a dictionary data\n for i in range(len(s1)) :\n if Counter(i) == Counter(i + 1) :\n print(\"! anagram\")\n else :\n print(\": are not anagram\")\n\n\nfindAnagram(num)\n","sub_path":"new/week1/Alogrith/primanagra.py","file_name":"primanagra.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3802041","text":"from django.http import Http404\nfrom django.shortcuts import render\nfrom gazteaApp.models import Herriak\nfrom django.http import JsonResponse\nfrom django.core import serializers\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\n\n\ndef detail(request):\n try:\n herriak = Herriak.objects.all().order_by('izena_eus')\n except Herriak.DoesNotExist:\n raise Http404(\"Poll does not exist\")\n return render(request, 'gazteaApp/detail.html', {'herriak': herriak})\n\n\n@csrf_exempt\ndef validate_herriak(request):\n body_unicode = request.body.decode('utf-8')\n body = json.loads(body_unicode)\n\n izenaEus = body['izenaEus']\n izenaCast = body['izenaCast']\n izenaCastEz = body['izenaEzCast']\n izenaEusEz = body['izenaEzEus']\n probintzia = body['probintzia']\n probintziaEz = body['probintziaEz']\n\n q = Herriak.objects.all()\n\n for letrak in range(0, len(izenaEus)):\n q = q.filter(izena_eus__icontains=izenaEus[letrak])\n\n for letrak in range(0, len(izenaCast)):\n q = q.filter(izena_cast__icontains=izenaCast[letrak])\n\n for letrak in range(0, len(izenaCastEz)):\n q = q.exclude(izena_cast__icontains=izenaCastEz[letrak])\n\n for letrak in range(0, len(izenaEusEz)):\n q = q.exclude(izena_eus__icontains=izenaEusEz[letrak])\n for letrak in range(0, len(probintzia)):\n q = q.filter(probintzia__icontains=probintzia[letrak])\n for letrak in range(0, len(probintziaEz)):\n q = q.exclude(probintzia__icontains=probintziaEz[letrak])\n return JsonResponse(serializers.serialize('json', q), safe=False)\n","sub_path":"gazteaApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"531858803","text":"def sortString(s: str) -> str:\n num = [0] * 26\n for c in s:\n num[ord(c) -ord('a')] += 1\n\n ret = []\n while len(ret) \nimport collections\nimport copy\nimport inspect\nimport os\nimport sys\nfrom ShareYourSystem.Functions import Tool\n\n\n\n\n\n\nfrom ShareYourSystem.Classors import Classor\n\n\n\n\nfrom ShareYourSystem.Object import Caller\nimport importlib\nBaseModule=importlib.import_module(\"ShareYourSystem.Functers.Functer\")\nDecorationModule=importlib.import_module(\"ShareYourSystem.Classors.Tester\")\n#\n\n#\nBaseNameString=Classor.getNameStringWithModuleString(BaseModule.__name__)\nBaseClass=getattr(BaseModule,Classor.getClassStringWithNameString(BaseNameString))\nDecorationNameString=Classor.getNameStringWithModuleString(DecorationModule.__name__)\nDecorationClass=getattr(DecorationModule,Classor.getClassStringWithNameString(DecorationNameString))\nArgumentingString=\"_\"\nHookingBeforeString='Before'\nHookingAfterString='After'\n#\n\n#\ndef getSettedHookedListWithInstanceVariable(_HookedList,_InstanceVariable):\n\n\t#Debug\n\t'''\n\tprint('HookedList is '+str(_HookedList))\n\tprint('')\n\t'''\n\n\t#Set first the class \n\tif _HookedList[0]==\"\":\n\t\t_HookedList[0]=_InstanceVariable.__class__\n\telif _HookedList[0]==\"\":\t\n\t\t_HookedList[0]=_InstanceVariable.__class__.__bases__[0]\n\telif type(_HookedList[0])==str:\n\t\t_HookedList[0]=getattr(\n\t\t\t\t\t\t\t\tSYS,\n\t\t\t\t\t\t\t\tClassor.getClassStringWithNameString(_HookedList[0])\n\t\t\t\t\t\t\t\t)\n\t#Set then the method\n\tif type(_HookedList[1])==str and hasattr(_HookedList[0],_HookedList[1]):\n\t\t_HookedList[1]=getattr(_HookedList[0],_HookedList[1])\n\n\t#Return the list\n\treturn _HookedList\n\ndef setHookerWithHookingInstanceVariable(_Hooker,_InstanceVariable):\n\n\t#Set to the HookerPointer the corresponding class\n\t_Hooker.HookedClass=_InstanceVariable.__class__\n\n\t#Debug\n\t'''\n\tprint('We have to first set in the class the hooking classes and corresponding functions')\n\tprint(str([self.HookingBeforeVariablesList,self.HookingAfterVariablesList]))\n\tprint('')\n\t'''\n\n\t#Define a HookedCaller\n\tHookedCaller=Caller.CallerClass(**{'CallingInstanceVariable':_InstanceVariable})\n\n\t#Set the Hooked functions\n\t[_Hooker.HookedBeforeFunctionsList,_Hooker.HookedAfterFunctionsList]=map(\n\t\tlambda __HookingDictsList:\n\t\tmap(\n\t\t\t\tlambda __HookingDict:\n\t\t\t\tHookedCaller.call(__HookingDict).CallingVariable,\n\t\t\t\t__HookingDictsList\n\t\t),\n\t\t[_Hooker.HookingBeforeVariablesList,_Hooker.HookingAfterVariablesList]\n\t)\n\n\t#Set the hooking functions\n\t[_Hooker.HookedBeforeStringsList,_Hooker.HookedAfterStringsList]=map(\n\t\tlambda __HookedBeforeFunctionsList:\n\t\tmap(\n\t\t\t\tlambda __HookedBeforeFunction:\n\t\t\t\t__HookedBeforeFunction.im_func.__repr__() \n\t\t\t\tif hasattr(__HookedBeforeFunction,'im_func')\n\t\t\t\telse __HookedBeforeFunction.__repr__()\n\t\t\t\t,__HookedBeforeFunctionsList\n\t\t\t),\n\t\t\t[_Hooker.HookedBeforeFunctionsList,_Hooker.HookedAfterFunctionsList]\n\t\t)\n\n\t#Debug\n\t'''\n\tprint('Hooker l.158 : hooked functions are setted and they are : ')\n\tprint(str([_Hooker.HookedBeforeStringsList,_Hooker.HookedAfterStringsList]))\n\tprint('')\n\t'''\n\n\t#Say ok for the setting\n\t_Hooker.HookedIsBool=True\n\n#\n\n#\n@DecorationClass()\nclass HookerClass(BaseClass):\n\n\tdef __init__(self,**_KwargVariablesDict):\n\n\t\t#\n\t\tself.HookingBeforeVariablesList=[] \t\t\t#\n\t\tself.HookingAfterVariablesList=[]\t\t\t#\n\t\tself.HookingUniqueBool=True \t\t\t\t#\n\t\tself.HookedFunction=None \t\t\t\t\t#\n\t\tself.HookedBeforeVariablesList=[] \t\t\t#\n\t\tself.HookedAfterVariablesList=[] \t\t\t#\n\t\tself.HookedBeforeFunctionsList=[] \t\t\t#\n\t\tself.HookedAfterFunctionsList=[] \t\t\t#\n\t\tself.HookedIsBool=False \t\t\t\t\t#\n\t\t#\n\n\t\t#Call the parent init method\n\t\tBaseClass.__init__(self,**_KwargVariablesDict)\n\n\tdef __call__(self,_Function):\n\t\t\n\t\t#Debug\n\t\t'''\n\t\tprint('Hooker l.127')\n\t\tprint(\"_Function is \"+str(_Function))\n\t\tprint('')\n\t\t'''\n\n\t\t#Hook\n\t\tself.hook(_Function)\n\n\t\t#Call the __call__ parent method\n\t\tself.FunctedFunction=self.HookedFunction\n\t\tBaseClass.__call__(self,_Function)\n\n\t\t#Return the FunctedFunction\n\t\treturn self.FunctedFunction\n\n\tdef hook(self,_Function):\n\n\t\t#Define the FunctedFunction\n\t\tdef HookedFunction(*_LiargVariablesList,**_KwargVariablesDict):\n\n\t\t\t#Debug\n\t\t\t'''\n\t\t\tprint('Hooker l.85 : Start of the method')\n\t\t\tprint('')\n\t\t\t'''\n\n\t\t\t#Define an alias of the instance\n\t\t\tInstanceVariable=_LiargVariablesList[0]\n\n\t\t\t#Init maybe the _KwargVariablesDict\n\t\t\tif 'HookingIsBool' not in _KwargVariablesDict:\n\t\t\t\t_KwargVariablesDict['HookingIsBool']=True\n\t\t\t\t_KwargVariablesDict['HookedFunctionsList']=[]\n\t\t\t\t_KwargVariablesDict['HookingUniqueBool']=self.HookingUniqueBool\t\n\n\t\t\t#Init maybe the hooking classes and functions for the first call\n\t\t\tif self.HookedIsBool==False:\n\n\t\t\t\t#Set the HookedFunction\n\t\t\t\tsetHookerWithHookingInstanceVariable(self,InstanceVariable)\n\n\t\t\t#After hooks (integrativ loop)\n\t\t\tfor __HookedAfterFunction in self.HookedAfterFunctionsList:\n\n\t\t\t\t#Check\n\t\t\t\tif _KwargVariablesDict['HookingIsBool']:\n\t\n\t\t\t\t\t#Check\n\t\t\t\t\tif callable(__HookedAfterFunction):\n\t\t\t\t\t\n\t\t\t\t\t\t#Check if it is a unique call or not\n\t\t\t\t\t\tHookedIsBool=True\n\t\t\t\t\t\tif _KwargVariablesDict['HookingUniqueBool']:\n\t\t\t\t\t\t\tif __HookedAfterFunction in _KwargVariablesDict['HookedFunctionsList']:\n\t\t\t\t\t\t\t\tHookedIsBool=False\n\n\t\t\t\t\t\t#Append \n\t\t\t\t\t\t_KwargVariablesDict['HookedFunctionsList'].append(__HookedAfterFunction)\n\n\t\t\t\t\t\t#Check for calling\n\t\t\t\t\t\tif HookedIsBool:\n\n\t\t\t\t\t\t\t#Debug\n\t\t\t\t\t\t\t'''\n\t\t\t\t\t\t\tprint('__HookedAfterFunction is called '+str(\n\t\t\t\t\t\t\t\t\t\t\t\t__HookedAfterFunction))\n\t\t\t\t\t\t\tprint('From Module '+str(inspect.getmodule(__HookedAfterFunction)))\n\t\t\t\t\t\t\tprint('')\n\t\t\t\t\t\t\t'''\n\n\t\t\t\t\t\t\t#Call and try with or without _KwargVariablesDict\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tOutputVariable=__HookedAfterFunction(*_LiargVariablesList,**_KwargVariablesDict)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tOutputVariable=__HookedAfterFunction(*_LiargVariablesList)\n\n\t\t\t\t\t\t\t#Update maybe the _KwargVariablesDict\n\t\t\t\t\t\t\tif type(OutputVariable)==dict:\n\t\t\t\t\t\t\t\t_KwargVariablesDict.update(OutputVariable)\n\n\t\t\t\telse:\n\n\t\t\t\t\t#Return the instance\n\t\t\t\t\treturn InstanceVariable\n\n\t\t\t#Check if it is a unique call or not\n\t\t\tHookedIsBool=True\n\t\t\tif _KwargVariablesDict['HookingUniqueBool']:\n\t\t\t\tif _Function in _KwargVariablesDict['HookedFunctionsList']:\n\t\t\t\t\tHookedIsBool=False\n\n\t\t\t#Append \n\t\t\t_KwargVariablesDict['HookedFunctionsList'].append(_Function)\n\n\t\t\tif _KwargVariablesDict['HookingIsBool']:\n\n\t\t\t\t#Debug\n\t\t\t\t'''\n\t\t\t\tprint('_Function is called '+str(_Function))\n\t\t\t\tprint('From Module '+str(inspect.getmodule(_Function)))\n\t\t\t\tprint('')\n\t\t\t\t'''\n\n\t\t\t\t#Call and try with or without _KwargVariablesDict\n\t\t\t\ttry:\n\t\t\t\t\tOutputVariable=_Function(*_LiargVariablesList,**_KwargVariablesDict)\n\t\t\t\texcept:\n\t\t\t\t\tOutputVariable=_Function(*_LiargVariablesList)\n\n\t\t\t\t#Update maybe the _KwargVariablesDict\n\t\t\t\tif type(OutputVariable)==dict:\n\t\t\t\t\t_KwargVariablesDict.update(OutputVariable)\n\n\t\t\telse:\n\n\t\t\t\t#Return the instance\n\t\t\t\treturn InstanceVariable\n\n\t\t\t#Before hooks (integrativ loop)\n\t\t\tfor __HookedBeforeFunction in self.HookedBeforeFunctionsList:\n\n\t\t\t\t#Check\n\t\t\t\tif _KwargVariablesDict['HookingIsBool']:\n\t\t\t\t\t\n\t\t\t\t\t#Check\n\t\t\t\t\tif callable(__HookedBeforeFunction):\n\n\t\t\t\t\t\t#Check if it is a unique call or not\n\t\t\t\t\t\tHookedIsBool=True\n\t\t\t\t\t\tif _KwargVariablesDict['HookingUniqueBool']:\n\t\t\t\t\t\t\tif __HookedBeforeFunction in _KwargVariablesDict['HookedFunctionsList']:\n\t\t\t\t\t\t\t\tHookedIsBool=False\n\n\t\t\t\t\t\t#Append \n\t\t\t\t\t\t_KwargVariablesDict['HookedFunctionsList'].append(__HookedBeforeFunction)\n\n\t\t\t\t\t\t#Check for calling\n\t\t\t\t\t\tif HookedIsBool:\n\n\t\t\t\t\t\t\t#Debug\n\t\t\t\t\t\t\t'''\n\t\t\t\t\t\t\tprint('__HookedBeforeFunction is called '+str(__HookedBeforeFunction))\n\t\t\t\t\t\t\tprint('From Module '+str(inspect.getmodule(__HookedBeforeFunction)))\n\t\t\t\t\t\t\tprint('')\n\t\t\t\t\t\t\t'''\n\n\t\t\t\t\t\t\t#Call and try with or without _KwargVariablesDict\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tOutputVariable=__HookedBeforeFunction(*_LiargVariablesList,**_KwargVariablesDict)\n\t\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\t\tOutputVariable=__HookedBeforeFunction(*_LiargVariablesList)\n\n\t\t\t\t\t\t\t#Update maybe the _KwargVariablesDict\n\t\t\t\t\t\t\tif type(OutputVariable)==dict:\n\t\t\t\t\t\t\t\t_KwargVariablesDict.update(OutputVariable)\n\n\t\t\t\telse:\n\n\t\t\t\t\t#Return the Instance\n\t\t\t\t\treturn InstanceVariable\n\n\t\t\t#Return self for the wrapped method call\n\t\t\treturn InstanceVariable\n\n\t\t#Debug\n\t\t'''\n\t\tprint('HookedFunction is '+str(HookedFunction))\n\t\tprint('')\n\t\t'''\n\n\t\t#Set\n\t\tself.HookedFunction=HookedFunction\n\n\t\t#Return self\n\t\treturn self\n\n#\n\n","sub_path":"Install/build/lib/ShareYourSystem/Functers/Hooker/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45559726","text":"# -*- coding: utf-8 -*-\nfrom DateTime import DateTime\nfrom pkg_resources import get_distribution\nfrom pkg_resources import parse_version\nfrom plone.app.testing import PLONE_INTEGRATION_TESTING\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces import IMarkupSchema\nfrom zope.component import getUtility\n\nimport unittest\n\n\nclass UpgradeMemberData51to52Test(unittest.TestCase):\n layer = PLONE_INTEGRATION_TESTING\n\n def test_rebuild_member_data(self):\n portal = self.layer['portal']\n from plone.app.upgrade.v52.alphas import rebuild_memberdata\n\n rebuild_memberdata(portal)\n tool = getToolByName(portal, 'portal_memberdata')\n self.assertIn('test_user_1_', tool._members.keys())\n\n\nclass Various52Test(unittest.TestCase):\n layer = PLONE_INTEGRATION_TESTING\n\n def test_rebuild_redirections(self):\n # Until at least 5.2rc1, redirection values were simple paths,\n # now they are tuples. The upgrade step rebuilds the information.\n # (The code can at the moment handle old-style and new-style,\n # but rebuilding is still good.)\n from plone.app.redirector.interfaces import IRedirectionStorage\n from plone.app.upgrade.v52.final import rebuild_redirections\n\n storage = getUtility(IRedirectionStorage)\n # add old-style redirect directly in internal structure:\n old = '/plone/old'\n new = '/plone/new'\n storage._paths[old] = new\n # get_full mocks a new-style redirect,\n # though with None instead of a DateTime, and manual always True.\n self.assertTupleEqual(storage.get_full(old), (new, None, True))\n portal = self.layer['portal']\n # Run the rebuild, and keep track of time before and after.\n time1 = DateTime()\n rebuild_redirections(portal.portal_setup)\n time2 = DateTime()\n # The basic information and usage has not changed:\n self.assertIn(old, storage)\n self.assertListEqual(storage.redirects(new), [old])\n self.assertEqual(storage.get(old), new)\n self.assertEqual(storage[old], new)\n # The internal structure is now a tuple:\n redirect = storage._paths[old]\n self.assertIsInstance(redirect, tuple)\n # The first item in the tuple is the target path.\n self.assertEqual(redirect[0], new)\n # The current DateTime is set as the creation time of the redirect.\n self.assertIsInstance(redirect[1], DateTime)\n self.assertTrue(time1 < redirect[1] < time2)\n # Existing migrations are marked as manual,\n # because we have no way of knowing if it is automatic or nor.\n self.assertEqual(redirect[2], True)\n # get_full now returns the real information\n self.assertTupleEqual(storage.get_full(old), redirect)\n\n\nclass UpgradePortalTransforms521to522Test(unittest.TestCase):\n layer = PLONE_INTEGRATION_TESTING\n\n def setUp(self):\n self.portal = self.layer['portal']\n self.request = self.layer['request']\n self.pt = self.portal.portal_transforms\n registry = getUtility(IRegistry)\n self.settings = registry.forInterface(IMarkupSchema, prefix='plone')\n\n def test_migrate_markup_settings(self):\n from plone.app.upgrade.v52.final import \\\n move_markdown_transform_settings_to_registry\n self.pt.markdown_to_html._config['enabled_extensions'] = [\n 'markdown.extensions.fenced_code',\n 'markdown.extensions.nl2br',\n 'markdown.extensions.extra',\n ]\n move_markdown_transform_settings_to_registry(self.portal)\n if getattr(self.settings, 'markdown_extensions', None):\n self.assertEqual(\n self.settings.markdown_extensions,\n [\n 'markdown.extensions.fenced_code',\n 'markdown.extensions.nl2br',\n 'markdown.extensions.extra',\n ]\n )\n\n\ndef test_suite():\n # Skip these tests on Plone < 5.2a1\n plone_version = get_distribution('Products.CMFPlone').version\n if not parse_version(plone_version) >= parse_version('5.2a1'):\n return unittest.TestSuite()\n\n suite = unittest.TestSuite()\n suite.addTest(unittest.makeSuite(UpgradeMemberData51to52Test))\n suite.addTest(unittest.makeSuite(Various52Test))\n suite.addTest(unittest.makeSuite(UpgradePortalTransforms521to522Test))\n return suite\n","sub_path":"plone/app/upgrade/v52/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"520562377","text":"import json\nfrom datetime import datetime\nwith open('files/profile_read.json', 'r') as rf:\n json_data = json.load(rf)\n # chang format of birthday\n birthday = datetime.strptime(json_data['birthday'], '%m-%d-%Y')\n new_date = birthday.strftime('%B %d, %Y')\n json_data['birthday'] = new_date\n # update date_released and sort movies by latest released\n for movie in json_data['movies']:\n if movie['date_released'] == '--':\n movie['date_released'] = 'Upcoming'\n # sort movies list\n json_data['movies'].sort(key=lambda k: k['date_released'], reverse=True)\n# create a json file and write the new data\nwith open('files/new_emelyn_json.json', 'w') as wf:\n json.dump(json_data, wf, ensure_ascii=False, indent=2)\n","sub_path":"activities/seatiel.py","file_name":"seatiel.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"475471872","text":"from threading import Timer\ndef t(a):\n p,s=[],[]\n for e in a:\n o=Timer(0.01*e,lambda x:s.append(x),args=(e,))\n p+=[o]\n o.start()\n [t.join()for t in p]\n return s\ndef r(g):\n s={sum(t(l)):t(l)for l in g for j in range(len(g[0]))}\n return[s[k]for k in t([*s])]\n","sub_path":"challenge3.py","file_name":"challenge3.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"139629671","text":"from pyramid.security import ALL_PERMISSIONS, Allow, Authenticated, Everyone\n\nfrom app.models.user import UserModel\n\n\nclass RootFactory(object):\n __acl__ = [\n (Allow, 'group:admin', ALL_PERMISSIONS),\n (Allow, 'group:editor', 'edit'),\n (Allow, Authenticated, 'add'),\n (Allow, Everyone, 'read'),\n ]\n\n def __init__(self, request):\n self.request = request\n\n\ndef group_finder(userid, request):\n query = request.dbsession.query(UserModel)\n user = query.filter_by(id=userid).first()\n\n if user:\n group = user.group\n group_ = ['group:{}'.format(group)]\n return group_\n","sub_path":"py/alchemified/app/app/securities/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273057770","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nonemapsg.utils\n~~~~~~~~~~~~~~\n\nThis module contains utilities shared across the package.\n\"\"\"\n\nfrom typing import Any, Callable, List, Optional, Type, Union\nfrom urllib.parse import urlencode\n\nimport requests\nfrom requests import Response as RequestsResponse\n\nfrom .api import API\nfrom .response import GeocodeInfo, Response, RouteResult, SearchResult\n\nSAFE_METHODS: List[str] = [\"get\", \"options\"]\n\n\ndef to_dict(obj: Any) -> dict:\n \"\"\"Converts class instances to dictionaries.\n Handles nested objects as well.\"\"\"\n if not hasattr(obj, \"__dict__\"):\n return obj\n result: dict = {}\n for key, val in obj.__dict__.items():\n element: Union[List[Union[dict, List[dict]]], dict] = []\n if not key.startswith(\"__\"):\n if isinstance(val, list) and isinstance(element, list):\n for item in val:\n element.append(to_dict(item))\n else:\n element = to_dict(val)\n result[key] = element\n return result\n\n\ndef make_request(\n endpoint: str, method: str = \"get\", data: Optional[dict] = None, timeout: int = 15\n) -> Response:\n \"\"\"Makes a request to the given endpoint and maps the response\n to a Response class\"\"\"\n method = method.lower()\n request_method: Callable = getattr(requests, method)\n if method not in SAFE_METHODS and data is None:\n raise ValueError(\"Data must be provided for POST, PUT and PATCH requests.\")\n\n r: RequestsResponse\n if method not in SAFE_METHODS:\n r = request_method(endpoint, json=data, timeout=timeout)\n else:\n r = request_method(endpoint, timeout=timeout)\n return Response(status_code=r.status_code, data=r.json())\n\n\ndef construct_search_query(\n search_val: str, return_geometry: str, get_address_details: str, page_number: int\n) -> str:\n \"\"\"Constructs search query URL compliant with OneMap's requirements.\"\"\"\n search_params: dict = {\n \"searchVal\": search_val,\n \"returnGeom\": \"Y\" if return_geometry else \"N\",\n \"getAddrDetails\": \"Y\" if get_address_details else \"N\",\n \"pageNum\": page_number if page_number else 1,\n }\n search_params_str: str = urlencode(search_params)\n search_url: str = f\"{API.search}?{search_params_str}\"\n return search_url\n\n\ndef get_search_class() -> Type[SearchResult]:\n \"\"\"Returns SearchResult class.\"\"\"\n return SearchResult\n\n\ndef validate_address_type(address_type: str) -> str:\n if address_type.lower() not in [\"all\", \"hdb\"]:\n raise ValueError(\"Invalid `addressType` value - can only be `HDB` or `All`\")\n return address_type.lower()\n\n\ndef construct_reverse_geocode_svy21_query(\n location: str,\n token: str,\n buffer: int = 10,\n address_type: str = \"all\",\n other_features: bool = False,\n) -> str:\n \"\"\"Constructs Reverse Geocode (SVY21) query URL compliant with\n OneMap's requirements.\"\"\"\n search_params: dict = {\n \"location\": \",\".join([str(loc).strip() for loc in location]),\n \"token\": token,\n \"buffer\": buffer,\n \"addressType\": validate_address_type(address_type),\n \"otherFeatures\": \"Y\" if other_features else \"N\",\n }\n search_params_str: str = urlencode(search_params, safe=\",:-\")\n search_url: str = f\"{API.reverse_geocode}?{search_params_str}\"\n return search_url\n\n\ndef get_reverse_geocode_svy21_class() -> Type[GeocodeInfo]:\n \"\"\"Returns GeocodeInfo class.\"\"\"\n return GeocodeInfo\n\n\nconstruct_reverse_geocode_wgs84_query: Callable = construct_reverse_geocode_svy21_query\nget_reverse_geocode_wgs84_class: Callable = get_reverse_geocode_svy21_class\n\n\ndef construct_route_query(\n start: str, end: str, route_type: str, public_transport_options: dict, token: str\n) -> str:\n \"\"\"Constructs route query URL compliant with OneMap's requirements.\"\"\"\n route_params: dict = {\"start\": start, \"end\": end, \"routeType\": route_type}\n if route_type == \"pt\" and public_transport_options:\n route_params.update(public_transport_options)\n route_params_str: str = urlencode(route_params, safe=\",:-\")\n route_url: str = f\"{API.route}?{route_params_str}&token={token}\"\n return route_url\n\n\ndef get_route_class() -> Type[RouteResult]:\n \"\"\"Returns RouteResult class.\"\"\"\n return RouteResult\n\n\ndef coerce_response(cls: Type[Any], data: dict) -> Any:\n \"\"\"Creates a class object out of given response data and class.\"\"\"\n return cls(**data)\n","sub_path":"onemapsg/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"432273221","text":"infile = open('D-small-attempt0.in', 'r')\noutfile = open('fractiles_results.txt', 'w')\nT = int(infile.readline())\n\nfor i in range(T):\n\tresult = 'Case #' + str(i+1) + ':'\n\tK, C, S = map(int, infile.readline().strip().split())\n\tfor j in range(K):\n\t\tresult += ' ' + str(j+1)\n\toutfile.write(result + '\\n')\n\ninfile.close()\noutfile.close()","sub_path":"codes/CodeJamCrawler/16_0_4_neat/16_0_4_jmak_fractiles.py","file_name":"16_0_4_jmak_fractiles.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"254916140","text":"from flask import Blueprint, render_template, redirect, request\nfrom flask_login import login_required, logout_user, current_user\n\n\n\ncalculate = Blueprint('calculate', __name__)\n\n@calculate.route('/calc', methods=['GET','POST'])\n@login_required\ndef calc():\n if request.method == 'POST':\n var1 = request.form.get('var1', type=int)\n var2 = request.form.get('var2', type=int)\n operate = request.form.get('operation')\n if operate == 'add':\n result = var1 + var2\n elif operate == 'sub':\n result = var1 - var2\n elif operate == 'div':\n result = var1 / var2\n elif operate == 'mul':\n result = var1 * var2\n\n return render_template('result.html', entry=result)\n return render_template('chome.html')\n\n","sub_path":"apps/calc/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"347988322","text":"\"\"\"\nMagnetostatic Vector Potentials: Dipole and Loop Sources\n========================================================\n\nIn this example, we plot the vector potential for a dipole and a loop source\nin a wholespace.\n\nWe can vary the magnetic permeability of the wholespace, location and\norientation of the sources. For the dipole source, we can vary the moment, and\nfor the loop source, we can vary the radius and current through the loop.\n\n:author: Lindsey Heagy (`@lheagy `_)\n:date: June 6, 2018\n\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom scipy.constants import mu_0, epsilon_0\n\nfrom geoana import utils, spatial\nfrom geoana.em import static\n\n###############################################################################\n# Setup\n# -----\n#\n# define the location orientation and source, physical properties of the\n# wholespace and source parameters\n\nmu = mu_0 # permeability of free space (this is the default)\nlocation=np.r_[0., 0., 0.] # location of the dipole\norientation='Z' # vertical dipole (can also be a unit-vector)\n\n# dipole parameters\nmoment = 1\n\n# loop source parameters\ncurrent = 1\nradius = 20\n\n\n###############################################################################\n# Magnetostatic Dipole and Loop\n# -----------------------------\n#\n# Here, we build the geoana magnetic dipole in a wholespace and circular loop\n# in a wholespace using the parameters defined above.\n# For a full list of the properties you can set on a dipole, see the\n# :class:`geoana.em.static.MagneticDipoleWholeSpace` docs and for the\n# circular loop source, see the\n# :class:`geoana.em.static.CircularLoopWholeSpace` docs\n\ndipole = static.MagneticDipoleWholeSpace(\n mu=mu, location=location,\n orientation=orientation , moment=moment\n)\n\nloop = static.CircularLoopWholeSpace(\n mu=mu, location=location,\n orientation=orientation, current=current,\n radius=radius\n)\n\n###############################################################################\n# Evaluate vector potential\n# --------------------------\n#\n# Next, we construct a grid where we want to plot the vector potential and\n# evaluate\n\nx = np.linspace(-50, 50, 100)\ny = np.linspace(-50, 50, 100)\nxyz = utils.ndgrid([x, y, np.r_[0]])\n\n# evaluate the vector potential\na_dipole = dipole.vector_potential(xyz)\na_loop = loop.vector_potential(xyz)\n\n###############################################################################\n#\n# and define plotting code to plot an image of the amplitude of the vector\n# field / flux as well as the streamlines\n\ndef plot_amplitude(ax, v):\n v = spatial.vector_magnitude(v)\n plt.colorbar(\n ax.pcolormesh(\n x, y, v.reshape(len(x), len(y), order='F'), norm=LogNorm()\n ), ax=ax\n )\n ax.axis('square')\n ax.set_xlabel('x (m)')\n ax.set_ylabel('y (m)')\n\n\n# plot streamlines\ndef plot_streamlines(ax, v):\n vx = v[:, 0].reshape(len(x), len(y), order='F')\n vy = v[:, 1].reshape(len(x), len(y), order='F')\n ax.streamplot(x, y, vx.T, vy.T, color='k')\n\n\n###############################################################################\n#\n# Create subplots for plotting the results. Loop over frequencies and plot the\n# electric and magnetic fields along a slice through the center of the dipole.\n\nfig, ax = plt.subplots(1, 2, figsize=(12, 5))\n\n# plot dipole vector potential\nplot_amplitude(ax[0], a_dipole)\nplot_streamlines(ax[0], a_dipole)\n\n# plot loop vector potential\nplot_amplitude(ax[1], a_loop)\nplot_streamlines(ax[1], a_loop)\n\n\n# set the titles\nax[0].set_title(\"$\\\\vec{A}$: dipole\")\nax[1].set_title(\"$\\\\vec{A}$: loop\")\n\n# format so text doesn't overlap\nfig.tight_layout()\nplt.show()\n\n\n","sub_path":"examples/em/plot_static_vector_potentials.py","file_name":"plot_static_vector_potentials.py","file_ext":"py","file_size_in_byte":3702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"398236322","text":"# given an experiment ID, find the adult \"new diagnostic prevalence\" at the date of the prevalence survey by pulling\n# from an appropriately specified summary report.\n\n# also: pull received_treatment time series and population to make incidence chart\n\nimport pandas as pd\nimport os\nimport json\nimport numpy as np\n\nfrom simtools.Utilities.COMPSUtilities import COMPS_login\nfrom simtools.DataAccess.ExperimentDataStore import ExperimentDataStore\n\nCOMPS_login(\"https://comps.idmod.org\")\n\nexp_id = \"31e5032c-efb0-e711-9414-f0921c16b9e5\"\n\nexpt = ExperimentDataStore.get_most_recent_experiment(exp_id)\n\ndf = pd.DataFrame([x.tags for x in expt.simulations])\ndf['outpath'] = pd.Series([sim.get_path() for sim in expt.simulations])\n\nprev_vec = []\n\nprint(\"retrieving treated pop\")\nfor idx in df.index:\n\n main_dir = os.path.join(df['outpath'][idx], \"output\")\n treat_dir = os.path.join(main_dir, \"ReportEventCounter.json\")\n inset_dir = os.path.join(main_dir, \"InsetChart.json\")\n\n # incidence\n with open(treat_dir) as treat_rep:\n treated = json.loads(treat_rep.read())\n\n with open(inset_dir) as inset_rep:\n inset = json.loads(inset_rep.read())\n\n inc_df = pd.DataFrame({\"treated\": treated[\"Channels\"][\"Received_Treatment\"][\"Data\"],\n \"pop\": inset[\"Channels\"][\"Statistical Population\"][\"Data\"]\n })\n inc_df[\"day\"] = range(len(inc_df))\n inc_df[\"week\"] = np.floor(inc_df[\"day\"]/7)\n\n agg_funs = {\"pop\": [\"mean\"],\n \"treated\": [\"sum\"]}\n\n inc_df = inc_df.groupby(\"week\").agg(agg_funs)\n inc_df.columns = inc_df.columns.droplevel(1)\n inc_df[\"inc_rate\"] = (inc_df[\"treated\"]/inc_df[\"pop\"])*1000\n\n\n\n\n\n\ndf['prev'] = prev_vec\n\nsummary_df = df[['Run_Number', 'minimus.WATER_VEGETATION', 'prev']]\nsummary_df = summary_df.groupby('minimus.WATER_VEGETATION').mean()\n\nprint(summary_df)\n","sub_path":"02_view_outputs/received_treatmet_inc.py","file_name":"received_treatmet_inc.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"176886567","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n#Load data\nloan_df = pd.read_csv(\"loan_data.csv\")\nhome_ownership_df = pd.read_csv(\"home_ownership_data.csv\")\n\n#Merge separate dataframes into single dataframe\nmergedData = pd.merge(loan_df, home_ownership_df, on=\"member_id\")\n\n#Calculate mean loan amount by home ownership status\nmeanByHomeOwnershipStatus = mergedData.groupby(\"home_ownership\", as_index=False)[\"loan_amnt\"].mean()\n\n#Graph data\nax = meanByHomeOwnershipStatus.plot.bar(\"home_ownership\", \"loan_amnt\")\nax.get_legend().remove()\nplt.title(\"Average loan amounts per home ownership\")\nplt.xlabel(\"Home ownership\")\nplt.ylabel(\"Average loan amount ($)\")\nplt.xticks(rotation=\"horizontal\")\nplt.show()\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242888040","text":"import argparse\nimport sys\nfrom .bork import bork\nfrom .utils import whatsmyip\nfrom .spriteit import spriteit\nfrom .gifit import gifit\nfrom .network_monitor import testwork\nimport os\n\n\ndef main(argv=None):\n argv = (argv or sys.argv)\n # Grab script name and location.\n argv[0] = os.path.splitext(os.path.basename(argv[0]))[0]\n\n # Create ArgumentParser\n parser = argparse.ArgumentParser(prog=argv[0])\n subparsers = parser.add_subparsers()\n\n # Init the different SubParsers for the Command Line Tool.\n bork_parser = subparsers.add_parser('bork')\n bork_parser.set_defaults(func=bork)\n\n whatsmyip_parser = subparsers.add_parser('whatsmyip')\n whatsmyip_parser.add_argument('--local', '-l', action='store_true', help='get local IP instead of public IP')\n whatsmyip_parser.set_defaults(func=whatsmyip)\n\n spriteit_parser = subparsers.add_parser('spriteit')\n spriteit_parser.add_argument('source')\n spriteit_parser.add_argument('output', nargs='?', default='sprites')\n spriteit_parser.add_argument('-x', '--width', dest='width', type=int, default=None)\n spriteit_parser.add_argument('-y', '--height', dest='height', type=int, default=None)\n spriteit_parser.add_argument('-xy', '--size', dest='size', type=int, default=None)\n spriteit_parser.set_defaults(func=spriteit)\n\n gifit_parser = subparsers.add_parser('gifit')\n gifit_parser.add_argument('source', type=str, help='source directory to make the gif with')\n gifit_parser.add_argument('output', type=str, help='the desired final output filename')\n gifit_parser.set_defaults(func=gifit)\n\n testwork_parser = subparsers.add_parser('whatsmyspeed')\n testwork_parser.add_argument('--quiet', '-q', dest='quiet', action='store_true', help=\"don't show any output\")\n testwork_parser.add_argument('--repeat', '-r', dest='repeat', default=3, type=int, help='best of N')\n testwork_parser.add_argument('--last', default=0, type=int, help=\"output the last N results\")\n testwork_parser.add_argument('--debug', action='store_true')\n testwork_parser.set_defaults(func=testwork)\n\n args = parser.parse_args(argv)\n return args.func(args)\n","sub_path":"hackytools/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120652905","text":"from random import seed\nfrom random import choice\nfrom random import randint\nfrom string import ascii_letters\n\nseed(1120117)\n\no = []\nl = []\ndef random_string_generator():\n v = \"\"\n for b in range(randint(1,15)):\n v += choice(ascii_letters)\n return v\n\n\nfor t in range(18354):\n o.append(randint(0, 104))\n l.append(random_string_generator())\n\n\nk = o\n\nfor t in range(55062):\n o[t%len(o)] = o[t%len(o)] * 3\nanswer_1_true = o[1]\nanswer_2_true = o[2]\nanswer_3_true = o[3]\nanswer_4_true = o[4]\nanswer_5_true = o[5]\n\nprint(answer_1_true)\nprint(answer_2_true)\nprint(answer_3_true)\nprint(answer_4_true)\nprint(answer_5_true)\n","sub_path":"03_Implementacao/DataBase/true_or_false_question_working_with_lists/question/version_1/full_program.py","file_name":"full_program.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350933572","text":"\"\"\"\nBinary search trees are a data structure that enforce an ordering over \nthe data they store. That ordering in turn makes it a lot more efficient \nat searching for a particular piece of data in the tree. \n\nThis part of the project comprises two days:\n1. Implement the methods `insert`, `contains`, `get_max`, and `for_each`\n on the BSTNode class.\n2. Implement the `in_order_print`, `bft_print`, and `dft_print` methods\n on the BSTNode class.\n\"\"\"\nclass BSTNode:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # Insert the given value into the tree\n def insert(self, value):\n if value < self.value:\n # check if the new node's value is less than the current node value.\n if self.left:\n # if there is already another node on the left, run the insert function\n # recursively on the left node.\n return self.left.insert(value)\n else:\n # otherwise - create a new node and assign it as left child.\n self.left = BSTNode(value)\n return True\n else:\n # do the same on the right node, if the new node value is equal or greater than\n # the current node value.\n if self.right:\n return self.right.insert(value)\n else:\n self.right = BSTNode(value)\n return True\n\n # Return True if the tree contains the value\n # False if it does not\n def contains(self, target):\n if target == self.value:\n # if the value of the current node matches target - return True\n return True\n else:\n if target < self.value and self.left:\n # if the target is lesser than current node, and there is a left child\n # run the contians method on the left node.\n return self.left.contains(target)\n elif target > self.value and self.right:\n # same logic on the right node.\n return self.right.contains(target)\n else:\n # if there is no child nodes, we reached the leaf node and \n # didn't find the value. Return False.\n return False","sub_path":"names/binary_tree.py","file_name":"binary_tree.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624317605","text":"from django.conf.urls import include, url, patterns\nfrom apps.myauth import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='index'),\n url(r'^registration', views.registration, name='registration'),\n url(r'^login', views.login, name='login'),\n url(r'^logout$', views.logout, name='logout'),\n # url(r'^logout_auth', views.logout_auth, name='logout_auth'),\n\n # url(r'^success/(?P[\\w|\\W]+)/$', views.success, name='success'),\n url(r'^success', views.success, name='success'),\n )","sub_path":"apps/myauth/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"266879298","text":"# Copyright (c) 2018, Olmo Kramer \n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n'''\nFind command.\n'''\n\nimport enum\nimport typing as t\n\nfrom dataclasses import dataclass\n\nfrom fb.command import FindCommand\nfrom fb.context import public_api, public_apis\nfrom fb.options import options\nfrom fb.tab import Tab\n\nif t.TYPE_CHECKING:\n from fb.window import Window # pylint: disable=unused-import\n from fb.tab import File # pylint: disable=unused-import\n\n\n@public_api\nclass FindOption(enum.IntEnum):\n '''\n Options for the Find command.\n '''\n\n SORT_COLUMN = 1\n\n\n@public_api\n@public_apis('get_find_query', 'set_find_query')\n@Tab.keymap.bind('f')\n@Tab.keymap.bind('F', direction=-1)\n@Tab.keymap.bind(';', repeat=True)\n@Tab.keymap.bind(',', repeat=True, direction=-1)\n@Tab.keymap.bind('sf', column=FindOption.SORT_COLUMN)\n@Tab.keymap.bind('sF', column=FindOption.SORT_COLUMN, direction=-1)\nclass Find(FindCommand):\n '''\n Go to the next file whose name starts with a user-input character.\n\n :param char:\n Character to search for.\n :param column:\n Column to search. ``FindOption.SORT_COLUMN`` may also be given to\n search the column by which the tab is sorted.\n :param repeat:\n Repeat the previous find command.\n :param direction:\n Search direction. When repeat is ``True``, -1 reverses the direction of\n the previous search command, while 1 preserves it.\n '''\n\n @dataclass\n class _Query:\n '''\n :ivar char:\n Character to search for.\n :ivar column:\n Colum to search.\n :ivar direction:\n Search direction.\n '''\n\n char: str\n column: str\n direction: int\n\n _query: t.ClassVar[t.Optional['_Query']] = None\n\n @classmethod\n def get_find_query(cls) -> '_Query':\n '''\n Get the previous find query.\n '''\n if cls._query is None:\n raise RuntimeWarning('No Find query.')\n\n return cls._query\n\n @classmethod\n def set_find_query(\n cls, char: str, column: str = 'name', direction: int = 1\n ) -> None:\n '''\n Set a new find query that will be used when the find command is\n repeated.\n\n :param char:\n Char to search for.\n :param column:\n Column to search.\n :param direction:\n Search direction.\n '''\n\n cls._query = cls._Query(char, column, direction)\n\n _char: str\n _column: t.Union[str, 'FindOption']\n _repeat: bool\n\n def __init__(\n self,\n char: str = '',\n column: t.Union[str, 'FindOption'] = 'name',\n repeat: bool = False,\n **kwargs: t.Any,\n ) -> None:\n super().__init__(**kwargs)\n self._char = char\n self._column = column\n self._repeat = repeat\n\n def __help__(self) -> str:\n if self._repeat:\n direction = 'same' if self.direction > 0 else 'opposite'\n\n return f'''\n Repeat the previous find command in the {direction} direction.\n '''\n else:\n direction = 'next' if self.direction > 0 else 'previous'\n\n if self._char:\n if self._column is FindOption.SORT_COLUMN:\n return f'''\n Move to the {{count}}th {direction} file where the first\n character of the column by which the tab is sorted is\n `{self._char}`.\n '''\n else:\n return f'''\n Move to the {{count}}th {direction} file where the first\n character of the {self._column} column is `{self._char}`.\n '''\n else:\n if self._column is FindOption.SORT_COLUMN:\n return f'''\n Wait for a single character input, then move to the\n {{count}}th {direction} file where the first character of\n the column by which the tab is sorted is that character.\n '''\n else:\n return f'''\n Wait for a single character input, then move to the\n {{count}}th {direction} file where the first character of\n the {self._column} column is that character.\n '''\n\n def modify_count(self, count: int) -> int:\n if self._repeat:\n query = self.get_find_query()\n direction = query.direction * self.direction\n else:\n direction = self.direction\n\n return direction * count\n\n def make_predicate(self, win: 'Window') -> t.Callable[['File'], t.Any]:\n if not self._repeat:\n if self._char:\n char = self._char\n else:\n char = win.get_key().name\n\n if self._column is FindOption.SORT_COLUMN:\n column = win.tab.sort_key\n else:\n column = str(self._column)\n\n self.set_find_query(char, column, self.direction)\n\n query = self.get_find_query()\n\n if options.ignore_case: # type: ignore\n char = query.char.upper()\n\n if query.column == 'name':\n return lambda e: e.path.name[0].upper() == char\n else:\n return lambda e: e.columns[query.column][0].upper() == char\n else:\n if query.column == 'name':\n return lambda e: e.path.name[0] == query.char\n else:\n return lambda e: e.columns[query.column][0] == query.char\n","sub_path":"fb/commands/find.py","file_name":"find.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"237630532","text":"\r\ndef lengthOfLongestSubstring(s: str) -> int:\r\n dic, back_index, max_count = {}, 0, 0\r\n for front_index, character in enumerate(s):# stores the index at dictionary location\r\n if character in dic and dic[character] + 1 > back_index: \r\n back_index = dic[character] +1 # sets the new back index to the index of the duplicitous char in substring\r\n if (front_index - back_index + 1) > max_count:# computes the current length of non repeating substring\r\n max_count = front_index - back_index + 1\r\n dic[character] = front_index\r\n return max_count\r\n \r\n \r\nprint(lengthOfLongestSubstring(\"ckilbkd\"))\r\n\r\n\r\n\r\n'''\r\nOriginal Attempt\r\ndic = {}\r\n order = 0\r\n count = 0\r\n max_count = 0\r\n last_char = ''\r\n for char in s:\r\n if char in dic:\r\n if dic[char] != order:\r\n count += 1\r\n else:\r\n if char == last_char:\r\n if count > max_count:\r\n max_count = count\r\n count = 1\r\n order += 1\r\n else:\r\n if count > max_count:\r\n max_count = count \r\n \r\n dic[char] = order\r\n else:\r\n dic[char] = order\r\n count += 1\r\n last_char = char\r\n\r\n if count > max_count:\r\n max_count = count\r\n return max_count\r\n'''","sub_path":"LeetCode/Longest_Nonrepeating_Substring.py","file_name":"Longest_Nonrepeating_Substring.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"210167762","text":"import RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BCM) #pregatirea placii\n\nTRIG = 23 ##macro-uri\nECHO = 24\nLED = 25\n\nprint(\"Masurarea distantei in desfasurare\")\n\nGPIO.setup(TRIG,GPIO.OUT)\nGPIO.setup(ECHO,GPIO.IN)\nGPIO.setup(LED,GPIO.OUT)\ndef calcul_distanta():\n GPIO.output(TRIG, False)\n GPIO.output(LED, False)\n print(\"Se asteapta 3 secunde pentru calibrarea senzorului\")\n time.sleep(3)\n\n GPIO.output(TRIG, True)\n time.sleep(0.00001) ##10 us\n GPIO.output(TRIG, False)\n\n while GPIO.input(ECHO)==0:\n pulse_start = time.time()\n\n while GPIO.input(ECHO)==1:\n pulse_end = time.time()\n\n pulse_duration = pulse_end - pulse_start\n\n distance=pulse_duration * 17150\n distance=round( distance, 2)\n\n if distance > 2 and distance < 400:\n print(\"Distanta este de :\",distance - 0.5,\"cm\")\n GPIO.output(LED, True)\n time.sleep(1)\n return distance\n else:\n print(\"In afara razei\")\n\nGPIO.cleanup","sub_path":"modul_senzor.py","file_name":"modul_senzor.py","file_ext":"py","file_size_in_byte":962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"227094511","text":"import serval\nimport imp\nimport sqlmaker\nimport managedb1\nimport citations\n\n\ninserts_of_serval = 'serval_inserts.sql'\ndata_of_serval = 'serval_data.xml'\n\ninserts_of_rh = 'employees_inserts.sql'\ndata_of_rh = 'rh.html'\n\nurls_citation = 'urls_citation_cut.txt'\ndata_citations = 'citations.xml'\nupdates_citations = 'citations_updates.sql'\n\n\ndef fetch_citations():\n imp.reload(citations)\n \n cits = citations.Citation() \n #cits.create_urls(urls_citation)\n r = cits.fetch_wos(data_citations) \n return r\n\ndef fetch_serval():\n imp.reload(serval)\n\n ser = serval.Serval(data_of_serval)\n ser.connect()\n ser.imp()\n ser.load_file()\n\n\ndef create_serval_inserts():\n\n imp.reload(sqlmaker)\n ssm = sqlmaker.ServalSqlMaker(data_of_serval, inserts_of_serval)\n ssm.servalinsertsfile()\n del ssm\n\ndef create_employees_inserts():\n\n imp.reload(sqlmaker)\n esm = sqlmaker.EmployeesSqlMaker(data_of_rh, inserts_of_rh)\n esm.createinsertsfile()\n\ndef create_citations_updates():\n imp.reload(sqlmaker)\n csm = sqlmaker.CitationSqlMaker(data_citations, updates_citations )\n csm.create_updatefile()\n\n \ndef load_serval():\n imp.reload(managedb1)\n \n lhost = 'localhost'\n luser = 'root'\n lpsw = 'peaceandlove'\n ldbn = 'publi_acad'\n ser_exceptions = []\n \n db = managedb1.mydb(lhost, luser, lpsw, ldbn)\n db.connect()\n ser_exceptions = db.import_sql(inserts_of_serval)\n db.close()\n \n return ser_exceptions\n\ndef load_employees():\n \"\"\" load employes into the DB\n \"\"\"\n imp.reload(managedb1)\n \n lhost = 'localhost'\n luser = 'root'\n lpsw = 'peaceandlove'\n ldbn = 'publi_acad'\n ser_exceptions = []\n \n db = managedb1.mydb(lhost, luser, lpsw, ldbn)\n db.connect()\n ser_exceptions = db.import_sql(inserts_of_rh)\n\n db.close()\n \n return ser_exceptions\n\ndef load_citations():\n\n\n imp.reload(managedb1)\n \n lhost = 'localhost'\n luser = 'root'\n lpsw = 'peaceandlove'\n ldbn = 'publi_acad'\n ser_exceptions = []\n \n db = managedb1.mydb(lhost, luser, lpsw, ldbn)\n db.connect()\n ser_exceptions = db.import_sql(updates_citations)\n\n db.close()\n \n return ser_exceptions\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195317505","text":"# Create a class named Movie which takes an object.\nclass Movie(object):\n\n \"\"\" This class provides a way to store movie related information,\n as well as a self updating list to keep track of it's instances. \"\"\"\n\n # Creating a list to keep track of all instances of this class\n movies = []\n\n # Define the initial function, which takes the title, synopsis, box art,\n # trailer, director and release date for a Movie. \"\"\"\n\n def __init__(self, title, synopsis, poster_image_url, trailer_youtube_url,\n director, release_date):\n\n # Define the objects's attritubutes as the arguments that are passed in\n # during instantiation.\n self.title = title\n self.synopsis = synopsis\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = trailer_youtube_url\n self.director = director\n self.release_date = release_date\n\n # Append each new instance of the class to the list movies.\n self.movies.append(self)\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444257542","text":"from utils import batchify\nfrom losses import cross_entropy_loss\nimport os\nimport os.path as osp\nimport torch\nimport json\nimport math\nfrom utils import RandomStateContextManager, device\nfrom fully_connected import FullyConnectedClassifier\nfrom cnn import CNNClassifier\nfrom typing import *\n\n\ndef train_and_eval_fully_connected_model(X_train, y_train, X_test, y_test, class_names, save_dir, model_name,\n weight_decay=0., dropout_drop_probability=0.,\n optimizer_type=\"sgd\", learning_rate=0.001, sgd_momentum=0.9,\n init_type=\"xavier\", init_gaussian_std=0.01,\n hidden_size=256, num_hidden_layers=1, activation=\"relu\",\n epochs=100, batch_size=32, seed=34, num_reports=5):\n with RandomStateContextManager(seed):\n num_classes = len(class_names)\n input_size = X_train.shape[1]\n\n net = FullyConnectedClassifier(num_classes,\n input_size,\n hidden_size,\n num_hidden_layers,\n activation,\n init_type,\n init_gaussian_std,\n dropout_drop_probability)\n\n if optimizer_type.lower() == \"sgd\":\n optimizer = torch.optim.SGD(net.trainable_params(),\n lr=learning_rate,\n momentum=sgd_momentum,\n weight_decay=weight_decay)\n elif optimizer_type.lower() == \"adam\":\n optimizer = torch.optim.Adam(net.trainable_params(),\n lr=learning_rate)\n else:\n raise ValueError('optimizer_type must be one of [\"sgd\", \"adam]')\n\n metrics = fit_classifier(net,\n optimizer,\n X_train,\n y_train,\n epochs,\n batch_size,\n seed,\n X_test,\n y_test,\n num_reports)\n\n hyper_params = dict(\n model_name=model_name,\n optimizer_type=optimizer_type,\n dropout_drop_probability=dropout_drop_probability,\n weight_decay=weight_decay,\n init_gaussian_std=init_gaussian_std,\n learning_rate=learning_rate,\n sgd_momentum=sgd_momentum,\n epochs=epochs,\n num_classes=num_classes,\n input_size=input_size,\n hidden_size=hidden_size,\n num_hidden_layers=num_hidden_layers,\n activation=activation,\n init_type=init_type,\n batch_size=batch_size,\n seed=seed)\n\n save_model(net,\n metrics,\n hyper_params,\n model_name,\n save_dir)\n\n\ndef train_and_eval_cnn_model(X_train, y_train, X_test, y_test, class_names, save_dir, model_name,\n conv_layers_params,\n weight_decay=0., dropout_drop_probability=0.,\n optimizer_type=\"sgd\", learning_rate=0.005, sgd_momentum=0.9,\n init_type='xavier', init_gaussian_std=0.1,\n hidden_size=784, epochs=100, batch_size=32, seed=34, num_reports=5, residual_net=False):\n with RandomStateContextManager(seed):\n num_classes = len(class_names)\n input_size = X_train.shape[1]\n\n net = CNNClassifier(num_classes, hidden_size, conv_layers_params,\n init_type,\n init_gaussian_std,\n dropout_drop_probability, residual_net=residual_net)\n\n if optimizer_type.lower() == \"sgd\":\n optimizer = torch.optim.SGD(net.trainable_params(),\n lr=learning_rate,\n momentum=sgd_momentum,\n weight_decay=weight_decay)\n elif optimizer_type.lower() == \"adam\":\n optimizer = torch.optim.Adam(net.trainable_params(),\n lr=learning_rate)\n else:\n raise ValueError('optimizer_type must be one of [\"sgd\", \"adam]')\n\n metrics = fit_classifier(net,\n optimizer,\n X_train,\n y_train,\n epochs,\n batch_size,\n seed,\n X_test,\n y_test,\n num_reports)\n\n hyper_params = dict(\n model_name=model_name,\n optimizer_type=optimizer_type,\n dropout_drop_probability=dropout_drop_probability,\n weight_decay=weight_decay,\n init_gaussian_std=init_gaussian_std,\n learning_rate=learning_rate,\n sgd_momentum=sgd_momentum,\n epochs=epochs,\n num_classes=num_classes,\n input_size=input_size,\n hidden_size=hidden_size,\n init_type=init_type,\n batch_size=batch_size,\n conv_layers_params=conv_layers_params,\n seed=seed)\n\n save_model(net,\n metrics,\n hyper_params,\n model_name,\n save_dir)\n\n\ndef fit_classifier(net,\n optimizer,\n X_train,\n y_train,\n epochs,\n batch_size=32,\n seed=None,\n X_test=None,\n y_test=None,\n num_reports=10):\n metrics = {\n \"train_loss\": [],\n \"train_accuracy\": []\n }\n if (X_test is not None) and (y_test is not None):\n metrics.update({\n \"test_loss\": [],\n \"test_accuracy\": []\n })\n metrics.update({\n \"weights_l2\": [],\n \"grad_l2\": []\n })\n\n for curr_epoch in range(1, epochs + 1):\n epoch_seed = seed + curr_epoch if seed is not None else None\n batches = batchify(X_train, y_train, batch_size, seed=epoch_seed)\n\n for i_batch, (X_batch, y_batch) in enumerate(batches):\n # forward\n X_batch = torch.from_numpy(X_batch).float().to(device)\n y_batch = torch.from_numpy(y_batch).long().to(device)\n probs = net.forward(X_batch)\n loss = cross_entropy_loss(probs, y_batch)\n\n # gradient step\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n # if device == 'cuda':\n # torch.cuda.empty_cache()\n\n # update metrics\n weights_l2, grad_l2 = _calculate_l2_norms(optimizer)\n metrics[\"weights_l2\"].append(weights_l2)\n metrics[\"grad_l2\"].append(grad_l2)\n\n train_loss, train_accuracy = eval_classifier(net, X_train, y_train)\n metrics[\"train_loss\"].append(train_loss)\n metrics[\"train_accuracy\"].append(train_accuracy)\n\n if (X_test is not None) and (y_test is not None):\n test_loss, test_accuracy = eval_classifier(net, X_test, y_test)\n metrics[\"test_loss\"].append(test_loss)\n metrics[\"test_accuracy\"].append(test_accuracy)\n\n if _should_report_progress(curr_epoch, epochs, num_reports):\n _report_progress(curr_epoch, epochs, metrics)\n\n return metrics\n\n\ndef _should_report_progress(curr_epoch: int,\n epochs: int,\n num_reports: int):\n should_report_at_all = (num_reports > 0)\n report_every = math.ceil(epochs / num_reports)\n is_reportable_epoch = ((curr_epoch % report_every == 0)\n or (curr_epoch == 1)\n or (curr_epoch == epochs))\n should_report = should_report_at_all and is_reportable_epoch\n return should_report\n\n\ndef _report_progress(curr_epoch: int,\n epochs: int,\n metrics: Dict[str, List[float]]):\n curr_metrics = {name: values[-1] for name, values in metrics.items()}\n report = f\"epoch: {curr_epoch}/{epochs} | \"\n report += \" | \".join([f\"{name}: {value:.2f}\" for name, value in curr_metrics.items()])\n print(report)\n\n\ndef _calculate_l2_norms(optimizer) -> Tuple[float, float]:\n weights = optimizer.param_groups[0][\"params\"]\n flat_weights = torch.cat([torch.flatten(x) for x in weights])\n weights_l2 = torch.sqrt(torch.sum(flat_weights ** 2)).item()\n flat_grad = torch.cat([torch.flatten(x.grad) for x in weights])\n grad_l2 = torch.sqrt(torch.sum(flat_grad ** 2)).item()\n return weights_l2, grad_l2\n\n\ndef eval_classifier(net, X, y):\n X, y = torch.from_numpy(X).float().to(device), torch.from_numpy(y).long().to(device)\n probs = net.predict_proba(X)\n pred_labels = probs.argmax(axis=1)\n\n loss = cross_entropy_loss(probs, y).item()\n accuracy = (pred_labels == y).float().mean().item()\n return loss, accuracy\n\n\ndef save_model(net, metrics, hyper_params, model_name, save_dir):\n model_dir = osp.join(save_dir, model_name)\n os.makedirs(model_dir, exist_ok=True)\n\n with open(osp.join(model_dir, \"net.pkl\"), 'wb') as f:\n torch.save(net, f)\n\n with open(osp.join(model_dir, \"metrics.json\"), 'w') as f:\n json.dump(metrics, f, indent=2)\n\n with open(osp.join(model_dir, \"hyper_params.json\"), 'w') as f:\n json.dump(hyper_params, f, indent=2)\n","sub_path":"ex1/training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":9895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"112512981","text":"import numpy as np\nimport csv\nimport sys\ndef readtest(filedir):\n text = open(filedir, \"r\")\n rows = csv.reader(text, delimiter= \",\")\n x = list()\n for i, row in enumerate(rows):\n if i != 0:\n data = row[1].split(' ')\n x.append(list(map(int, data))) \n # if i == 3:\n # break\n\n #x = np.array(x, float)\n # x = np.concatenate((np.ones((x.shape[0],1)),x), axis=1)\n text.close()\n return np.array(x)\n\na = readtest(sys.argv[1])\nb = readtest(sys.argv[2])\nn = 0\nfor i,j in zip(a,b):\n if i - j != 0:\n n += 1\nprint(n)","sub_path":"final/src/ensemblefile/150over2_0.50869/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205939280","text":"\r\n\r\n\r\ndef e_sieve(value):\r\n ''''Sieve_of_Eratosthenes'\r\n Prints all the prime numbers upto 'value' and returns a list\r\n >>> print(e_sieve(20))\r\n ... [2, 3, 5, 7, 11, 13, 17, 19]\r\n '''\r\n # https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes\r\n prime_list = []\r\n p = 2\r\n prime_list.append(p)\r\n prime_list.append(5)\r\n for num in range(p,value):\r\n if num % p != 0 and num != p and num % 5 != 0 and num != 5:\r\n prime_list.append(num)\r\n\r\n p = 3\r\n while p*p < value:\r\n for number in prime_list: \r\n if number != p and number % p == 0: \r\n prime_list.remove(number)\r\n \r\n p +=1\r\n\r\n return prime_list\r\n\r\n\r\nprint(e_sieve(1000))\r\n\r\n\r\n\r\n# for p in my_primes:\r\n# print(f\"{my_num} \\ {p} = {my_num/p}\")","sub_path":"Sieve_of_Eratosthenes.py","file_name":"Sieve_of_Eratosthenes.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"569103036","text":"import calendar\nfrom datetime import date\n\ndef meetup_day(year, month, day_str, num_str):\n\n cal = calendar.Calendar()\n\n day_index = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"].index(day_str)\n \n extract = [day for day, week_day in cal.itermonthdays2(year, month) if week_day == day_index and day > 0]\n\n if num_str == \"teenth\":\n for day in extract:\n if day > 12 and day < 20:\n return date(year, month, day)\n\n if num_str == \"last\":\n return date(year, month, extract[-1])\n\n num_index = [\"1st\", \"2nd\", \"3rd\", \"4th\"].index(num_str)\n \n return date(year, month, extract[num_index])\n \n \n","sub_path":"all_data/exercism_data/python/meetup/c289d5ed8fef412abb820b6634ef255f.py","file_name":"c289d5ed8fef412abb820b6634ef255f.py","file_ext":"py","file_size_in_byte":710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"36081165","text":"import os\nimport numpy as np\nimport pandas as pd\n\ndef str2bool(s):\n if s.lower == 'true':\n return True\n elif s.lower == 'false':\n return False\n else:\n raise ValueError # evil ValueError that doesn't tell you what the wrong value was\n\ndef loadData(whichData, targetVar, seed=1):\n '''\n Loads specified dataframe and if not already happend, \n splits up data into training and test. \n Drops unneeded variables (non-numerical or non-boolean)\n \n Parameters:\n whichData: str ('house_geo_binominal' or 'data')\n targetVar: str (target variable)\n seed: int (specifies random seed)\n Return:\n data: pandas DataFrame (training data)\n target: pandas DataFrame (values of target variable)\n '''\n \n if not os.path.isfile('{}_train.csv'.format(whichData)):\n rawdataset = pd.read_csv('{}.csv'.format(whichData))\n rand_perm = rawdataset.sample(rawdataset.shape[0], random_state = seed) # random_state: seed for reproducability\n data_test = rand_perm[:5].sort_index()\n data_train = rand_perm[5:].sort_index()\n \n data_test.to_csv('{}_test.csv'.format(whichData), sep=',',header=True,)\n data_train.to_csv('{}_train.csv'.format(whichData), sep=',',header=True,)\n \n dataset = pd.read_csv('{}_train.csv'.format(whichData))\n try:\n dataset = dataset.drop(labels='Unnamed: 0', axis=1)\n except Exception:\n pass\n try:\n dataset = dataset.drop(labels='Id', axis=1)\n except Exception:\n pass\n \n if whichData == 'house_geo_binominal':\n # process non-numerical data\n for feature in dataset.columns:\n if not dataset[feature].dtype == np.float64 \\\n and not dataset[feature].dtype == np.int64 \\\n and not dataset[feature].dtype == bool:\n try:\n dataset[feature] = dataset[feature].apply(str2bool)\n except ValueError:\n print('feature {} is neither numerical nor could be converted to boolean\\n'.format(feature) +\n 'feature {} will be removed from dataset'.format(feature))\n dataset = dataset.drop(feature, axis=1)\n \n # to make all data non-negative\n dataset['Hood_Y'] = dataset['Hood_Y'].abs()\n \n target = dataset[targetVar]\n data = dataset.drop(targetVar, axis=1)\n return data, target\n","sub_path":"loadData.py","file_name":"loadData.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"567032308","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"This script runs experiments on the sample BEL and *-omics* data then uploads it.\n\nPrerequisites\n-------------\n1. Must have run load_omics.py\n2. Must have run load_networks.py\n\"\"\"\n\nimport json\nimport logging\nimport os\nfrom typing import Any, Dict, List, Mapping, Optional\n\nfrom bel_commons.manager_utils import run_heat_diffusion_helper\nfrom bel_commons.models import Experiment, Omic, Query\nfrom bel_commons.resources.constants import BMS_BASE, OMICS_DATA_DIR\nfrom pybel.manager import Manager\n\nlogger = logging.getLogger(__name__)\n\n\n# 1. build query from all of alzheimer's disease using manifest from AD folder\n# 2. run experiment and upload\n\ndef get_manifest(directory: str) -> List[Dict]:\n \"\"\"Get the manifest from a directory.\"\"\"\n manifest_path = os.path.join(directory, 'manifest.json')\n if not os.path.exists(manifest_path):\n raise RuntimeError('manifest missing from {}'.format(directory))\n\n with open(manifest_path) as f:\n return json.load(f)\n\n\ndef build_query(directory: str, manager: Manager) -> Query:\n \"\"\"Build a query for the Alzheimer's disease network.\n\n :param directory: Directory containing *-omics* data and a manifest\n \"\"\"\n manifest = get_manifest(directory)\n\n network_ids = [\n network['id']\n for network in manifest\n ]\n\n query = Query.from_query_args(manager=manager, network_ids=network_ids)\n query.public = True\n manager.session.add(query)\n manager.session.commit()\n\n return query\n\n\ndef create_experiment(\n query: Query,\n directory: str,\n manager: Manager,\n permutations: Optional[int] = None,\n) -> List[Experiment]:\n \"\"\"Create experiment models.\n\n :param query:\n :param directory: the directory of -*omics* data resources\n :param manager:\n :param permutations: Number of permutations to run (defaults to 200)\n \"\"\"\n omics_manifest = get_manifest(directory)\n\n return [\n Experiment(\n public=True,\n omic=manager.session.query(Omic).get(omic_metadata['id']),\n query=query,\n permutations=permutations or 200,\n )\n for omic_metadata in omics_manifest\n ]\n\n\ndef upload_experiments(experiments: List[Experiment], manager: Manager):\n \"\"\"Upload experiments models.\"\"\"\n logger.info('adding experiments to session')\n manager.session.add_all(experiments)\n\n logger.info('committing experiments')\n manager.session.commit()\n\n\ndef run_experiments(\n experiments: List[Experiment],\n manager: Manager,\n use_tqdm: bool = True,\n tqdm_kwargs: Optional[Mapping[str, Any]] = None,\n) -> None:\n \"\"\"Run experiments and commits after each.\"\"\"\n logger.info('running %d experiments', len(experiments))\n\n for experiment in experiments:\n run_heat_diffusion_helper(manager, experiment, use_tqdm=use_tqdm, tqdm_kwargs=tqdm_kwargs)\n logger.info('done in %.2f seconds', experiment.time)\n manager.session.add(experiment)\n manager.session.commit()\n\n\ndef work_directory(\n query: Query,\n omic_directory: str,\n manager: Manager,\n permutations: Optional[int] = None,\n use_tqdm: bool = True,\n) -> None:\n \"\"\"Make models, upload, and run experiments for all data in a given directory.\"\"\"\n logger.info(f'making experiments for directory: {omic_directory}')\n experiments = create_experiment(query, directory=omic_directory, manager=manager, permutations=permutations)\n\n logger.info(f'uploading experiments for directory: {omic_directory}')\n upload_experiments(experiments, manager=manager)\n\n logger.info(f'running experiments for directory: {omic_directory}')\n run_experiments(experiments, manager=manager, use_tqdm=use_tqdm)\n\n\ndef work_group(\n network_directory: str,\n omics_directories: List[str],\n manager: Manager,\n permutations: Optional[int] = None,\n) -> None:\n \"\"\"Make models, upload, and run experiments for all data in several directories.\n\n :param network_directory:\n :param omics_directories:\n :param manager: database connection string to cache, pre-built :class:`Manager`, or None to use default cache\n :param permutations: Number of permutations to run (defaults to 200)\n \"\"\"\n query = build_query(directory=network_directory, manager=manager)\n logger.info('made query %s for %s', query, network_directory)\n\n for omic_directory in omics_directories:\n work_directory(\n query=query,\n omic_directory=omic_directory,\n manager=manager,\n permutations=permutations,\n )\n\n\ndef main(manager: Manager, permutations: int = 25):\n \"\"\"Run the experiments and uploads them.\"\"\"\n network_directory = os.path.join(BMS_BASE, 'aetionomy', 'neurommsig')\n\n gse1297_directory = os.path.join(OMICS_DATA_DIR, 'GSE1297')\n gse28146_directory = os.path.join(OMICS_DATA_DIR, 'GSE28146')\n gse63063_directory = os.path.join(OMICS_DATA_DIR, 'GSE63063')\n\n omics_directories = [\n gse1297_directory,\n gse28146_directory,\n gse63063_directory,\n ]\n\n work_group(\n network_directory=network_directory,\n omics_directories=omics_directories,\n manager=manager,\n permutations=permutations,\n )\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\")\n logger.setLevel(logging.INFO)\n main(Manager())\n","sub_path":"src/bel_commons/resources/load_experiments.py","file_name":"load_experiments.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378902324","text":"# -*- coding: utf-8 -*-\n#\n# PySceneDetect: Python-Based Video Scene Detector\n# ---------------------------------------------------------------\n# [ Site: http://www.scenedetect.scenedetect.com/ ]\n# [ Docs: http://manual.scenedetect.scenedetect.com/ ]\n# [ Github: https://github.com/Breakthrough/PySceneDetect/ ]\n#\n# Copyright (C) 2014-2022 Brandon Castellano .\n# PySceneDetect is licensed under the BSD 3-Clause License; see the\n# included LICENSE file, or visit one of the above pages for details.\n#\n\"\"\" PySceneDetect Scene Detection Tests\n\nThese tests ensure that the detection algorithms deliver consistent\nresults by using known ground truths of scene cut locations in the\ntest case material.\n\"\"\"\n\nimport time\n\nfrom scenedetect import detect, SceneManager, FrameTimecode, StatsManager\nfrom scenedetect.detectors import AdaptiveDetector, ContentDetector, ThresholdDetector\nfrom scenedetect.backends.opencv import VideoStreamCv2\n\n# TODO(v1.0): Parameterize these tests like VideoStreams are.\n# Current test output cannot be used for profiling cases which iterate over multiple detectors.\n\n# TODO(v1.0): Add new test video.\n\nTEST_MOVIE_CLIP_GROUND_TRUTH_CONTENT = [(30, [1199, 1226, 1260, 1281, 1334, 1365, 1697, 1871]),\n (27, [1199, 1226, 1260, 1281, 1334, 1365, 1590, 1697,\n 1871])]\n\"\"\"Ground truth for `test_movie_clip` with ContentDetector as (threshold, [scene start frame]).\"\"\"\n\nTEST_VIDEO_FILE_GROUND_TRUTH_THRESHOLD = [0, 15, 198, 376]\n\"\"\"Results for `test_video_file` with default ThresholdDetector values.\"\"\"\n\n\ndef test_detect(test_video_file):\n \"\"\" Test scenedetect.detect and ThresholdDetector. \"\"\"\n scene_list = detect(video_path=test_video_file, detector=ThresholdDetector())\n assert len(scene_list) == len(TEST_VIDEO_FILE_GROUND_TRUTH_THRESHOLD)\n detected_start_frames = [timecode.get_frames() for timecode, _ in scene_list]\n assert all(\n x == y for (x, y) in zip(TEST_VIDEO_FILE_GROUND_TRUTH_THRESHOLD, detected_start_frames))\n\n\ndef test_content_detector(test_movie_clip):\n \"\"\" Test SceneManager with VideoStreamCv2 and ContentDetector. \"\"\"\n for threshold, start_frames in TEST_MOVIE_CLIP_GROUND_TRUTH_CONTENT:\n video = VideoStreamCv2(test_movie_clip)\n scene_manager = SceneManager()\n scene_manager.add_detector(ContentDetector(threshold=threshold))\n\n video_fps = video.frame_rate\n start_time = FrameTimecode('00:00:50', video_fps)\n end_time = FrameTimecode('00:01:19', video_fps)\n\n video.seek(start_time)\n scene_manager.auto_downscale = True\n\n scene_manager.detect_scenes(video=video, end_time=end_time)\n scene_list = scene_manager.get_scene_list()\n assert len(scene_list) == len(start_frames)\n detected_start_frames = [timecode.get_frames() for timecode, _ in scene_list]\n assert start_frames == detected_start_frames\n\n\ndef test_adaptive_detector(test_movie_clip):\n \"\"\" Test SceneManager with VideoStreamCv2 and AdaptiveDetector. \"\"\"\n # We use the ground truth of ContentDetector with threshold=27.\n start_frames = TEST_MOVIE_CLIP_GROUND_TRUTH_CONTENT[1][1]\n video = VideoStreamCv2(test_movie_clip)\n scene_manager = SceneManager()\n assert scene_manager.stats_manager is None\n # The SceneManager should implicitly create a StatsManager since this\n # detector requires it.\n scene_manager.add_detector(AdaptiveDetector())\n assert scene_manager.stats_manager is not None\n scene_manager.auto_downscale = True\n\n video_fps = video.frame_rate\n start_time = FrameTimecode('00:00:50', video_fps)\n end_time = FrameTimecode('00:01:19', video_fps)\n\n video.seek(start_time)\n scene_manager.detect_scenes(video=video, end_time=end_time)\n\n scene_list = scene_manager.get_scene_list()\n assert len(scene_list) == len(start_frames)\n detected_start_frames = [timecode.get_frames() for timecode, _ in scene_list]\n assert start_frames == detected_start_frames\n\n\ndef test_threshold_detector(test_video_file):\n \"\"\" Test SceneManager with VideoStreamCv2 and ThresholdDetector. \"\"\"\n video = VideoStreamCv2(test_video_file)\n scene_manager = SceneManager()\n scene_manager.add_detector(ThresholdDetector())\n scene_manager.auto_downscale = True\n scene_manager.detect_scenes(video)\n scene_list = scene_manager.get_scene_list()\n assert len(scene_list) == len(TEST_VIDEO_FILE_GROUND_TRUTH_THRESHOLD)\n detected_start_frames = [timecode.get_frames() for timecode, _ in scene_list]\n assert all(\n x == y for (x, y) in zip(TEST_VIDEO_FILE_GROUND_TRUTH_THRESHOLD, detected_start_frames))\n\n\ndef test_detectors_with_stats(test_video_file):\n \"\"\" Test all detectors functionality with a StatsManager. \"\"\"\n # TODO(v1.0): Parameterize this test case (move fixture from cli to test config).\n for detector in [ContentDetector, ThresholdDetector, ThresholdDetector]:\n video = VideoStreamCv2(test_video_file)\n stats = StatsManager()\n scene_manager = SceneManager(stats_manager=stats)\n scene_manager.add_detector(detector())\n scene_manager.auto_downscale = True\n end_time = FrameTimecode('00:00:15', video.frame_rate)\n benchmark_start = time.time()\n scene_manager.detect_scenes(video=video, end_time=end_time)\n benchmark_end = time.time()\n time_no_stats = benchmark_end - benchmark_start\n initial_scene_len = len(scene_manager.get_scene_list())\n assert initial_scene_len > 0 # test case must have at least one scene!\n # Re-analyze using existing stats manager.\n scene_manager = SceneManager(stats_manager=stats)\n scene_manager.add_detector(detector())\n\n video.reset()\n scene_manager.auto_downscale = True\n\n benchmark_start = time.time()\n scene_manager.detect_scenes(video=video, end_time=end_time)\n benchmark_end = time.time()\n time_with_stats = benchmark_end - benchmark_start\n scene_list = scene_manager.get_scene_list()\n assert len(scene_list) == initial_scene_len\n\n print(\"--------------------------------------------------------------------\")\n print(\"StatsManager Benchmark For %s\" % (detector.__name__))\n print(\"--------------------------------------------------------------------\")\n print(\"No Stats:\\t%2.1fs\" % time_no_stats)\n print(\"With Stats:\\t%2.1fs\" % time_with_stats)\n print(\"--------------------------------------------------------------------\")\n","sub_path":"tests/test_detectors.py","file_name":"test_detectors.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"632236404","text":"#-*- coding: utf-8 -*-\n\n# Copyright 2008-2012 Calculate Ltd. http://www.calculate-linux.org\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom files import ( process, checkUtils, readFile, listDirectory,readLinesFile, \n getRunCommands )\nimport sys,os\nimport re\nimport struct,fcntl,socket,math,ctypes\nfrom os import path\nimport select, time\n\nfrom calculate.lib.cl_lang import setLocalTranslate\nsetLocalTranslate('cl_lib3',sys.modules[__name__])\n\nSYSFS_NET_PATH = \"/sys/class/net\"\nPROCFS_NET_PATH = \"/proc/net/dev\"\n\n# From linux/sockios.h\nSIOCGIFINDEX = 0x8933\nSIOCGIFFLAGS = 0x8913\nSIOCSIFFLAGS = 0x8914\nSIOCSIFHWADDR = 0x8924\nSIOCSIFADDR = 0x8916\nSIOCSIFNETMASK = 0x891C\nSIOCETHTOOL = 0x8946\n\nSIOCGIFADDR = 0x8915\nSIOCGIFNETMASK = 0x891B\nSIOCGIFHWADDR = 0x8927\n\n# ip digit from 0|1-255|254 (template)\nIP_DIG = \"[%s-9]|(?:1[0-9]|[1-9])[0-9]|2[0-4][0-9]|25[0-%s]\"\n# ip net 0-32\nIP_NET_SUFFIX = \"[0-9]|[12][0-9]|3[012]\"\n# ip digs 1-254,0-254,0-255\nIP_DIGS = { 'dig1_254' : IP_DIG % (1,4), 'dig0_254' : IP_DIG % (0,4),\n 'dig0_255' : IP_DIG % (0,5), }\n# ip addr 10.0.0.12\nIP_ADDR = \"(%(dig1_254)s)\\.(%(dig0_254)s)\\.(%(dig0_254)s)\\.(%(dig1_254)s)\"%\\\n IP_DIGS\nIP_MASK = \"(%(dig0_255)s)\\.(%(dig0_255)s)\\.(%(dig0_255)s)\\.(%(dig0_255)s)\"%\\\n IP_DIGS\n# ip addr for net 10.0.0.0\nIP_NET = \"(%(dig1_254)s)\\.(%(dig0_254)s)\\.(%(dig0_254)s)\\.(%(dig0_254)s)\"%\\\n IP_DIGS\n# ip and net 192.168.0.0/16\nIP_ADDR_NET = \"(%(ipaddr)s)/((%(ipnet)s))\"%{'ipaddr':IP_NET,\n 'ipnet':IP_NET_SUFFIX}\n\nreIp = re.compile(\"^{0}$\".format(IP_ADDR))\nreNetSuffix = re.compile(\"^{0}$\".format(IP_NET_SUFFIX))\nreNet = re.compile(\"^{0}$\".format(IP_ADDR_NET))\nreMask = re.compile(\"^{0}$\".format(IP_MASK))\n\ndef checkIp(ip):\n \"\"\"Check ip\"\"\"\n return reIp.match(ip)\n\ndef checkNetSuffix(netSuffix):\n \"\"\"Check net suffix\"\"\"\n return reNetSuffix.match(netSuffix)\n\ndef checkNet(net):\n \"\"\"Check net\"\"\"\n if not reNet.match(net):\n return False\n ip,op,cidr = net.partition('/')\n mask = strIpToIntIp(cidrToMask(int(cidr)))\n return (strIpToIntIp(ip)&mask) == (strIpToIntIp(ip))\n\nmaskDigs = map(lambda x:str(x),(0b10000000,0b11000000,0b11100000,0b11110000,\n 0b11111000,0b11111100,0b11111110,0b11111111))\n\ndef checkMask(mask):\n \"\"\"Check net\"\"\"\n if not mask:\n return False\n if mask.count('.') != 3:\n return False\n zero = False\n for dig in mask.split('.'):\n if zero or not dig in maskDigs:\n if dig == \"0\":\n zero = True\n else:\n return False\n return True\n\ndef getIpAndMask(interface=\"eth0\"):\n \"\"\"Get ip and mask from interface\"\"\"\n ifconfig = process('/sbin/ifconfig',interface)\n res = re.search(r\"inet addr:(\\S+)\\s.*Mask:(\\S+)\",ifconfig.read(),re.S)\n if res:\n return res.groups()\n else:\n return (\"\",\"\")\n\ndef strIpToIntIp(addr):\n \"\"\"Convert ip specified by string to integer\"\"\"\n addr = addr.split('.')\n return ((int(addr[0])<<24)|\n (int(addr[1])<<16)|\n (int(addr[2])<<8)|\n (int(addr[3])))\n return reduce(lambda x,y:x+(int(y[1])<<(y[0]*8)),\n enumerate(reversed(addr.split(\".\"))),0)\n\ndef intIpToStrIp(addr):\n \"\"\"Convert ip specified by integer to string\"\"\"\n return \"{0}.{1}.{2}.{3}\".format(\n addr>>24,(addr>>16)&0xff,(addr>>8)&0xff,addr&0xff)\n\ndef maskToCidr(mask):\n \"\"\"Convert mask specified by str to net\"\"\"\n mask = strIpToIntIp(mask)\n return 32-int(math.log(((~mask) & 0xffffffff)+1,2))\n\ndef cidrToMask(cidr):\n \"\"\"Convert net to mask specified by str\"\"\"\n return intIpToStrIp((2**cidr-1)<<(32-cidr))\n\ndef getIpNet(ip,mask=None,cidr=None):\n \"\"\"Get net (xx.xx.xx.xx/xx) by ip address and mask\"\"\"\n ip = strIpToIntIp(ip)\n if not mask is None:\n net = maskToCidr(mask)\n else:\n net = int(cidr)\n mask = cidrToMask(net)\n mask = strIpToIntIp(mask)\n return \"{ip}/{net}\".format(ip=intIpToStrIp(ip&mask),\n net=net)\n\ndef isIpInNet(checkip,*ipnets):\n \"\"\"Check is ip in specified nets\"\"\"\n return map(lambda x:x[0],\n filter(lambda x:strIpToIntIp(checkip)&x[2] == strIpToIntIp(x[1])&x[2],\n map(lambda x:(x[0],x[1][0],strIpToIntIp(cidrToMask(int(x[1][1])))),\n map(lambda x:(x,x.partition('/')[0::2]),\n ipnets))))\n\ndef isDhcpIp(interface=\"eth0\"):\n \"\"\"Get ip by dhcp or static\"\"\"\n # dhclients (dhcpcd, dhclient (dhcp), udhcpc (busybox)\n dhcpProgs = (\"dhcpcd\",\"dhclient\",\"udhcpc\")\n if filter(lambda x:interface in x and any(prog in x for prog in dhcpProgs),\n getRunCommands()):\n return True\n else:\n return False\n\ndef getRouteTable(onlyIface=[]):\n \"\"\"Get route table, exclude specifed iface\"\"\"\n ipProg = checkUtils('/sbin/ip')\n routes = process(ipProg,\"route\")\n if onlyIface:\n filterRe = re.compile(\"|\".join(map(lambda x:r\"dev %s\"%x,onlyIface)))\n routes = filter(filterRe.search,routes)\n for line in routes:\n network,op,line = line.partition(\" \")\n routeParams = map(lambda x:x.strip(),line.split())\n # (network,{'via':value,'dev':value})\n if network:\n yield (network,dict(zip(routeParams[0::2],routeParams[1::2])))\n\ndef getInterfaces():\n \"\"\"\n Get available interfaces (discard which hasn't device)\n \"\"\"\n return filter(lambda x:path.exists(path.join(SYSFS_NET_PATH,x,\"device\")),\n listDirectory(SYSFS_NET_PATH))\n\ndef getIp(iface):\n sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\\x00'*14)\n try:\n res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)\n except IOError:\n return \"\"\n finally:\n sockfd.close()\n ip = struct.unpack('16sH2x4s8x', res)[2]\n return socket.inet_ntoa(ip)\n\ndef getMask(iface):\n \"\"\"\n Get mask for interface\n \"\"\"\n sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\\x00'*14)\n try:\n res = fcntl.ioctl(sockfd, SIOCGIFNETMASK, ifreq)\n except IOError:\n return 0\n finally:\n sockfd.close()\n netmask = socket.ntohl(struct.unpack('16sH2xI8x', res)[2])\n return 32 - int(math.log(ctypes.c_uint32(~netmask).value + 1, 2))\n\ndef getMac(iface):\n \"\"\"\n Get mac for interface\n \"\"\"\n sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n ifreq = struct.pack('16sH14s', iface, socket.AF_UNIX, '\\x00'*14)\n res = fcntl.ioctl(sockfd, SIOCGIFHWADDR, ifreq)\n address = struct.unpack('16sH14s', res)[2]\n mac = struct.unpack('6B8x', address)\n sockfd.close()\n return \":\".join(['%02X' % i for i in mac])\n\ndef getOperState(iface):\n \"\"\"\n Get interface state up or down\n \"\"\"\n if readFile(\"/sys/class/net/%s/operstate\"%iface) == \"down\":\n return \"down\"\n return \"up\"\n\ndef isOpenPort(ip,port):\n \"\"\"\n Test if an [ip:port] is open\n \"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((ip,int(port)))\n s.shutdown(2)\n return True\n except:\n return False\n\nclass IPError(Exception):\n \"\"\"Error received on work with ip\"\"\"\n pass\n \nclass Pinger:\n # ICMP parameters\n ICMP_ECHO = 8 # Echo request (per RFC792)\n ICMP_MAX_RECV = 2048 # Max size of incoming buffer\n\n def checksum(self,source_string):\n \"\"\"\n A port of the functionality of in_cksum() from ping.c\n Ideally this would act on the string as a series of 16-bit ints (host\n packed), but this works.\n Network data is big-endian, hosts are typically little-endian\n \"\"\"\n countTo = (int(len(source_string)/2))*2\n sum = 0\n count = 0\n\n # Handle bytes in pairs (decoding as short ints)\n loByte = 0\n hiByte = 0\n while count < countTo:\n if (sys.byteorder == \"little\"):\n loByte = ord(source_string[count])\n hiByte = ord(source_string[count + 1])\n else:\n loByte = ord(source_string[count + 1])\n hiByte = ord(source_string[count])\n sum = sum + ((hiByte) * 256 + (loByte))\n count += 2\n\n # Handle last byte if applicable (odd-number of bytes)\n # Endianness should be irrelevant in this case\n if countTo < len(source_string): # Check for odd length\n loByte = ord(source_string[len(source_string)-1])\n sum += loByte\n\n sum &= 0xffffffff #Truncate sum to 32 bits (a variance from ping.c,which\n #uses signed ints, but overflow is unlikely in ping)\n\n sum = (sum >> 16) + (sum & 0xffff) # Add high 16 bits to low 16 bits\n sum += (sum >> 16) # Add carry from above (if any)\n answer = ~sum & 0xffff # Invert and truncate to 16 bits\n answer = socket.htons(answer)\n\n return answer\n\n def ping(self, destIP, timeout, numDataBytes):\n \"\"\"\n Returns either the delay (in ms) or None on timeout.\n \"\"\"\n delay = None\n\n try: # One could use UDP here, but it's obscure\n mySocket = socket.socket(socket.AF_INET, socket.SOCK_RAW,\n socket.getprotobyname(\"icmp\"))\n except socket.error as e:\n raise IPError(_(\"failed. (socket error: '%s')\" % e.args[1]))\n\n my_ID = os.getpid() & 0xFFFF\n\n sentTime = self.send_one_ping(mySocket, destIP, my_ID, 0,\n numDataBytes)\n if sentTime == None:\n mySocket.close()\n return delay\n\n recvTime, dataSize, iphSrcIP, icmpSeqNumber, iphTTL = \\\n self.receive_one_ping(mySocket, my_ID, timeout)\n\n mySocket.close()\n\n if recvTime:\n delay = (recvTime-sentTime)*1000\n return (dataSize,socket.inet_ntoa(struct.pack(\"!I\", iphSrcIP)),\n iphTTL,delay)\n else:\n raise IPError(_(\"Request timed out\"))\n\n def send_one_ping(self,mySocket, destIP, myID, mySeqNumber, numDataBytes):\n \"\"\"\n Send one ping to the given >destIP<.\n \"\"\"\n destIP = socket.gethostbyname(destIP)\n\n # Header is type (8), code (8), checksum (16), id (16), sequence (16)\n myChecksum = 0\n\n # Make a dummy heder with a 0 checksum.\n header = struct.pack(\n \"!BBHHH\", self.ICMP_ECHO, 0, myChecksum, myID, mySeqNumber\n )\n\n padBytes = []\n startVal = 0x42\n for i in range(startVal, startVal + (numDataBytes)):\n padBytes += [(i & 0xff)] # Keep chars in the 0-255 range\n data = bytes(padBytes)\n\n # Calculate the checksum on the data and the dummy header.\n # Checksum is in network order\n myChecksum = self.checksum(header + data)\n\n # Now that we have the right checksum, we put that in. It's just easier\n # to make up a new header than to stuff it into the dummy.\n header = struct.pack(\n \"!BBHHH\", self.ICMP_ECHO, 0, myChecksum, myID, mySeqNumber\n )\n\n packet = header + data\n\n sendTime = time.time()\n\n try:\n # Port number is irrelevant for ICMP\n mySocket.sendto(packet, (destIP, 1))\n except socket.error as e:\n raise IPError(\"General failure (%s)\" % (e.args[1]))\n\n return sendTime\n\n def receive_one_ping(self, mySocket, myID, timeout):\n \"\"\"\n Receive the ping from the socket. Timeout = in ms\n \"\"\"\n timeLeft = timeout/1000.0\n\n while True: # Loop while waiting for packet or timeout\n startedSelect = time.time()\n whatReady = select.select([mySocket], [], [], timeLeft)\n howLongInSelect = (time.time() - startedSelect)\n if whatReady[0] == []: # Timeout\n return None, 0, 0, 0, 0\n\n timeReceived = time.time()\n\n recPacket, addr = mySocket.recvfrom(self.ICMP_MAX_RECV)\n\n ipHeader = recPacket[:20]\n iphVersion, iphTypeOfSvc, iphLength, \\\n iphID, iphFlags, iphTTL, iphProtocol, \\\n iphChecksum, iphSrcIP, iphDestIP = struct.unpack(\n \"!BBHHHBBHII\", ipHeader\n )\n\n icmpHeader = recPacket[20:28]\n icmpType, icmpCode, icmpChecksum, \\\n icmpPacketID, icmpSeqNumber = struct.unpack(\n \"!BBHHH\", icmpHeader\n )\n\n if icmpPacketID == myID: # Our packet\n dataSize = len(recPacket) - 28\n return timeReceived, dataSize, iphSrcIP, icmpSeqNumber, iphTTL\n\n timeLeft = timeLeft - howLongInSelect\n if timeLeft <= 0:\n return None, 0, 0, 0, 0\n","sub_path":"calculate/lib/utils/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":13360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"288281523","text":"# -*- coding: utf-8 -*-\n\n# EFILTER Forensic Query Language\n#\n# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"EFILTER abstract type system.\n\nThe repeated protocol concerns itself with variables that have more than one\nvalue, such as repeated fields on protocol buffers.\n\"\"\"\n\nfrom efilter import dispatch\nfrom efilter import protocol\n\nfrom efilter.protocols import counted\n\n# Declarations:\n# pylint: disable=unused-argument\n\n\n@dispatch.multimethod\ndef repeated(*values):\n \"\"\"Build a repeated variable from values.\n\n Repeated values usually [1] preserve order and always allow a single value\n to appear more than once. Order of repeated values is NOT significant even\n when it is preserved.\n\n 1: Order is always preserved for repetead values created with 'repeated' or\n 'meld' but not for repeated values created with other functions.\n \"\"\"\n return values\n\n\ndef meld(*values):\n \"\"\"Return the repeated value, or the first value if there's only one.\n\n This is a convenience function, equivalent to calling\n getvalue(repeated(x)) to get x.\n\n This function skips over instances of None in values (None is not allowed\n in repeated variables).\n\n Examples:\n meld(\"foo\", \"bar\") # => ListRepetition(\"foo\", \"bar\")\n meld(\"foo\", \"foo\") # => ListRepetition(\"foo\", \"foo\")\n meld(\"foo\", None) # => \"foo\"\n meld(None) # => None\n \"\"\"\n values = [x for x in values if x is not None]\n if not values:\n return None\n\n if len(values) == 1:\n return values[0]\n\n return values\n\n\n@dispatch.multimethod\ndef lazy(generator_func):\n \"\"\"Return a lazy repeated value of 'generator_func', which must be stable.\n\n For large datasets, it's useful to use lazy repeated values, because they\n avoid storing all the values of the repetition in memory.\n\n EFILTER ships a default implementation of this multimethod, found in\n efilter.ext.lazy_repetition.\n\n Arguments:\n generator_func: A function that returns a generator of the values that\n constitute this repeated value.\n\n IMPORTANT: This function MUST be stable, meaning the values in the\n generator MUST be the same each time the function is called.\n \"\"\"\n raise NotImplementedError()\n\n\n@dispatch.multimethod\ndef lines(fd):\n \"\"\"Return a lazy repeated value of lines in 'fd' which is a File object.\n\n EFILTER ships a default implementation of this multimethod, found in\n efilter.ext.line_reader.\n\n Argument:\n fd: A File object that represents a text file.\n \"\"\"\n raise NotImplementedError()\n\n\n@dispatch.multimethod\ndef getvalues(x):\n \"\"\"Return a collection of the values of x.\"\"\"\n raise NotImplementedError()\n\n\ndef getvalue(x):\n \"\"\"Return the single value of x or the first value in the list.\"\"\"\n if not isrepeating(x):\n return x\n\n for value in getvalues(x):\n return value\n\n\n@dispatch.multimethod\ndef isrepeating(x):\n \"\"\"Optional: Is x a repeated var?\"\"\"\n return isinstance(x, IRepeated)\n\n\nclass IRepeated(protocol.Protocol):\n _required_functions = (getvalues,)\n _optional_functions = (isrepeating,)\n\n\n# If you're repeated, you automatically implement ICounted.\ncounted.ICounted.implement(\n for_type=IRepeated,\n implementations={\n counted.count: lambda r: len(getvalues(r))\n }\n)\n\n\n# Implementation for scalars:\n# pylint: disable=unnecessary-lambda\nIRepeated.implement(\n for_type=protocol.AnyType,\n implementations={\n getvalues: lambda x: (x,) if x is not None else (),\n }\n)\n\n\nIRepeated.implement(\n for_types=(tuple, list),\n implementations={\n getvalues: lambda x: x,\n }\n)\n","sub_path":"efilter/protocols/repeated.py","file_name":"repeated.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"408162637","text":"# 2018-12-23\n# Screen and event control\n\nfrom flask import Flask, Response, request\nfrom PIL import ImageGrab\nfrom io import BytesIO\nimport logging\nimport pyautogui\n\napp = Flask(__name__)\n\nlogger = logging.getLogger('werkzeug')\nlogger.disabled = True\napp.logger.disabled = True\n\nwith open('2.html', 'r') as f:\n index_html = f.read()\n\n@app.route('/', methods=['GET'])\ndef index():\n return index_html\n\ndef capture_screen():\n while True:\n im = ImageGrab.grab()\n with BytesIO() as out:\n im.save(out, 'JPEG')\n im = out.getvalue()\n yield b'--frame\\r\\nContent-Type: image/jpeg\\r\\n\\r\\n' + im + b'\\r\\n'\n\n@app.route('/screen', methods=['GET'])\ndef screen():\n return Response(capture_screen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n@app.route('/event', methods=['POST'])\ndef event():\n event = request.json\n print(event)\n buttons = ['left', 'middle', 'right']\n if event['type'] == 'mousemove':\n pyautogui.moveTo(event['x'], event['y'])\n elif event['type'] == 'click':\n pyautogui.click(\n x=event['x'], y=event['y'],\n button=buttons[event['button']])\n elif event['type'] == 'keydown':\n pyautogui.keyDown(event['key'])\n elif event['type'] == 'keyup':\n pyautogui.keyUp(event['key'])\n return ''\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"RemoteDesktop/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"282281954","text":"#!/usr/bin/python3\nimport logging\nlogger = logging.getLogger(__name__)\nlogger.debug('Importing')\nimport cv2\nimport cv2.aruco\nimport numpy as np\nlogger.debug('Done')\n\nclass StereoCalibrator:\n def __init__(self, calPatternDims=(8, 8), calDotSpacingMm=(25.877, 25.877), detector=None):\n \"\"\"\n calPatternDims : (int, int) - the rows,cols indicating the number of dots on the target\n calDotSpacingMm : (float,float) - in x,y, the number of millimeters between dots in x and y\n detector : a cv2.SimpleBlobDetector, or None for the default\n \"\"\"\n self._cal_target_dot_det = self.make_detector()\n \n # Set up the calibration pattern \n self._calPatternDims = calPatternDims\n # self._calPatternDims = (24, 48) # in dots, row,col\n self._calDotSpacingMm = calDotSpacingMm\n # self._calDotSpacingMm = (25.4, 25.4) # in mm, x,y\n self._IMAGE_SIZE = (800,600) # in px, x,y\n self._SENSOR_DIMS = (4*0.707107,4*0.707107) # in mm, row,col\n self._cal_3space_pattern = [] #[(x,y), ...]\n # OpenCV coordinate convention: x+ rightward, y+ downward, z+ out away from camera.\n for y in range(0, self._calPatternDims[0]):\n for x in range(0, self._calPatternDims[1]):\n self._cal_3space_pattern += [(x * self._calDotSpacingMm[0], y * self._calDotSpacingMm[1], 0)]\n # logger.debug(\"self._cal_3space_pattern: \" + repr(self._cal_3space_pattern))\n \n def make_detector(self):\n # Setup SimpleBlobDetector parameters.\n parms = cv2.SimpleBlobDetector_Params()\n \n # Change thresholds\n parms.minThreshold = 0;\n parms.maxThreshold = 128;\n \n # Filter by Area.\n parms.filterByArea = True\n parms.minArea = 5\n \n # Filter by Circularity\n parms.filterByCircularity = True\n parms.minCircularity = 0.25\n \n # Filter by Convexity\n parms.filterByConvexity = False\n parms.minConvexity = 0.9\n parms.maxConvexity = 1\n \n # Filter by Inertia\n parms.filterByInertia = True\n parms.minInertiaRatio = 0.5\n \n # logger.debug(\"Orig minDistBetweenBlobs: \" + str(parms.minDistBetweenBlobs))\n parms.minDistBetweenBlobs = 5\n parms.blobColor = 0\n \n # Create a detector with the parameters\n return cv2.SimpleBlobDetector_create(parms)\n\n def find_single_cam_calibration(self, image_paths):\n \"\"\"\n image_paths : list of image file paths\n return : cameraMatrix,distCoeffs if successful, or None,None if not\n \"\"\"\n all_points_in_3space, all_points_in_images = self._find_point_vectors(image_paths)\n if len(all_points_in_3space) > 0:\n # logger.debug(\"np.array(all_points_in_3space) = \" + repr(np.array(all_points_in_3space)))\n all_points_in_3space = np.array(all_points_in_3space, dtype=np.float32)\n # logger.debug(\"all_points_in_3space = \" + str(all_points_in_3space))\n # logger.debug(\"all_points_in_images = \" + str(all_points_in_images))\n found,cameraMatrix,distCoeffs,rvecs,tvecs = cv2.calibrateCamera(all_points_in_3space, all_points_in_images, self._IMAGE_SIZE, None, None)\n \n # Debug by projecting the points in the calibration pattern onto the image\n # for img_path,points,rvec,tvec in zip(image_paths, all_points_in_3space, rvecs, tvecs):\n # img = cv2.imread(img_path)\n # imagePoints, jacobian = cv2.projectPoints(points, rvec, tvec, cameraMatrix, distCoeffs)\n # self._draw_points_on_image(img, imagePoints)\n # cv2.imshow('reprojected on %s'%img_path, img)\n # cv2.waitKey()\n \n # logger.debug(\"found: \" + repr(found) + \",\\n cameraMatrix: \" + repr(cameraMatrix) + \",\\n distCoeffs: \" + repr(distCoeffs) + \",\\n rvecs: \" + repr(rvecs) + \",\\n tvecs: \" + repr(tvecs))\n # logger.debug(\"found: \" + repr(found) + \",\\n rvecs: \" + repr(rvecs) + \",\\n tvecs: \" + repr(tvecs))\n return cameraMatrix,distCoeffs\n else: \n logger.error(\"Can't find any calibration patterns in any of the supplied images. Can't compute single camera calibration.\")\n return None,None\n \n def _draw_points_on_image(self, image, points): \n \"\"\"\n Annotate an image with points for debugging\n image : color opencv image\n points : list of coordinates in image\n \"\"\"\n RADIUS = 1\n COLOR = (0,0,255)\n i = 0\n for point in points:\n # point is x,y, like : np.array([[697.77185, 396.0037 ]], dtype=float32\n # logger.debug(\"point: %s\"%repr(point))\n cv2.circle(image, tuple(point[0]), RADIUS, COLOR, -1)\n cv2.putText(image, '%d'%i, tuple(point[0]), cv2.FONT_HERSHEY_SIMPLEX, 0.33, COLOR)\n i += 1\n \n def _find_point_vectors(self, image_paths, rowCol=False):\n \"\"\"\n Get the coorinates of the dots on the calibration target\n \n image_paths : list of N image file paths\n rowCol : true to return points in row,col convention. False to use x,y convention.\n returns : (, )\n \"\"\"\n all_points_in_images = []\n all_points_in_3space = []\n \n first_loop = True\n for image_path in image_paths:\n img = cv2.imread(image_path)\n points = np.array([[]])\n found,points = cv2.findCirclesGrid(img, self._calPatternDims, points, cv2.CALIB_CB_SYMMETRIC_GRID, self._cal_target_dot_det)\n if found:\n # logger.debug(\"points.shape: %s\"%repr(points.shape))\n points = points[:,0,:] # This doesn't seem to actually change anything, it seems to be just a spare dimension?\n # findCirclesGrid returns x,y convention. Convert to row,col\n if rowCol:\n points = points[:,[1,0]]\n # logger.debug(\"points.shape: %s\"%repr(points.shape))\n # logger.debug((\"Found \" + str(len(points)) + \" cal points in \" + image_path) if found else \"No cal pattern found in \" + image_path)\n all_points_in_images += [points]\n all_points_in_3space += [self._cal_3space_pattern]\n \n # self._draw_points_on_image(img, points)\n # cv2.imshow(image_path, img)\n else:\n logger.warning(\"Didn't find calibration pattern in this image: %s\"%image_path)\n # cv2.waitKey()\n \n return all_points_in_3space, all_points_in_images\n \n def find_stereo_pair_calibration(self, pair_image_paths):\n \"\"\"\n \n left_image_paths : list of strings, each of which is a path to an image from the left camera\n right_image_paths : list of strings, each of which is a path to an image from the right camera\n pair_image_paths : list of twoples, of the form (\"/path/to/one/left/image\", \"/path/to/one/right/image\"),\n \n returns : If failure, None. If successs, projection matrices for cv2.triangulatePoints, stored in a dictionary like this:\n {\n 'leftProjMat':leftProjMat ,\n 'rightProjMat':rightProjMat,\n }\n \"\"\"\n left_image_paths = [pair[0] for pair in pair_image_paths]\n right_image_paths = [pair[1] for pair in pair_image_paths]\n\n # First must calibrate individual cameras\n logger.info(\"Computing left camera calibration\")\n lCameraMatrix, lDistCoeffs = self.find_single_cam_calibration(left_image_paths)\n logger.info(\"lCameraMatrix: \" + repr(lCameraMatrix))\n logger.info(\"Computing right camera calibration\")\n rCameraMatrix, rDistCoeffs = self.find_single_cam_calibration(right_image_paths)\n logger.info(\"rCameraMatrix: \" + repr(rCameraMatrix))\n \n if lCameraMatrix is None or rCameraMatrix is None:\n logger.error(\"Failed to find one or both camera matrices.\")\n return None\n \n # Find individual dots in all the images\n logger.info(\"Finding points in left images from pairs\")\n all_points_in_3space, all_points_in_left_images = self._find_point_vectors(left_image_paths, True)\n logger.info(\"Finding points in right images from pairs\")\n all_points_in_3space, all_points_in_right_images = self._find_point_vectors(right_image_paths, True)\n all_points_in_3space = np.array(all_points_in_3space, dtype=np.float32)\n # logger.debug(\"all_points_in_3space: \" + repr(all_points_in_3space))\n logger.debug(\"all_points_in_left_images: \" + repr(all_points_in_left_images))\n logger.debug(\"all_points_in_right_images: \" + repr(all_points_in_right_images))\n # logger.debug(\"self._IMAGE_SIZE: \" + repr(self._IMAGE_SIZE))\n logger.debug(\"len(all_points_in_3space): \" + str(len(all_points_in_3space)))\n logger.debug(\"len(all_points_in_left_images): \" + str(len(all_points_in_left_images)))\n logger.debug(\"len(all_points_in_right_images): \" + str(len(all_points_in_right_images)))\n logger.info(\"Computing stereo calibration\")\n flags = 0\n flags |= cv2.CALIB_FIX_INTRINSIC\n # flags |= cv2.CALIB_FIX_PRINCIPAL_POINT\n flags |= cv2.CALIB_USE_INTRINSIC_GUESS\n flags |= cv2.CALIB_FIX_FOCAL_LENGTH\n # flags |= cv2.CALIB_FIX_ASPECT_RATIO\n flags |= cv2.CALIB_ZERO_TANGENT_DIST\n # flags |= cv2.CALIB_RATIONAL_MODEL\n # flags |= cv2.CALIB_SAME_FOCAL_LENGTH\n # flags |= cv2.CALIB_FIX_K3\n # flags |= cv2.CALIB_FIX_K4\n # flags |= cv2.CALIB_FIX_K5\n\n stereocalib_criteria = (cv2.TERM_CRITERIA_MAX_ITER +\n cv2.TERM_CRITERIA_EPS, 100, 1e-5)\n try:\n minError, lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, R, T, E, F = cv2.stereoCalibrate(all_points_in_3space, all_points_in_left_images, all_points_in_right_images, lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, self._IMAGE_SIZE, criteria=stereocalib_criteria, flags=flags)\n except:\n logger.error(\"Failed to find stereo calibration.\", exc_info=True)\n return None\n logger.debug(\"minError: \" + repr(minError))\n logger.debug(\"lCameraMatrix: \" + repr(lCameraMatrix))\n logger.debug(\"lDistCoeffs: \" + repr(lDistCoeffs))\n logger.debug(\"rCameraMatrix: \" + repr(rCameraMatrix))\n logger.debug(\"rDistCoeffs: \" + repr(rDistCoeffs))\n logger.debug(\"R: \" + repr(R))\n logger.debug(\"T: \" + repr(T))\n logger.debug(\"E: \" + repr(E))\n logger.debug(\"F: \" + repr(F))\n \n # For debugging only\n # imageL = cv2.imread(left_image_paths[0])\n # imageR = cv2.imread(right_image_paths[0])\n # lUd = cv2.undistort(imageL, lCameraMatrix, lDistCoeffs)\n # rUd = cv2.undistort(imageR, rCameraMatrix, rDistCoeffs)\n # cv2.imshow('left', imageL)\n # cv2.imshow('right', imageR)\n # cv2.imshow('left undistorted', lUd)\n # cv2.imshow('right undistorted', rUd)\n # cv2.waitKey()\n \n # Compute projection matrices\n #https://docs.opencv.org/2.4/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html#stereorectify\n (leftRectXform, rightRectXform, leftProjMat, rightProjMat, Q, leftRoi, rightRoi) = cv2.stereoRectify(lCameraMatrix, lDistCoeffs, rCameraMatrix, rDistCoeffs, self._IMAGE_SIZE, R, T)\n Txf = rightProjMat[0,3]\n Tyf = rightProjMat[1,3]\n if(Txf != 0 and Tyf == 0):\n logger.info(\"Horizontal stereo configuration detected.\")\n elif(Txf == 0 and Tyf != 0):\n logger.info(\"Vertical stereo configuration detected.\")\n else:\n logger.error(\"Invalid stereo configuration detected. Txf=%f, Tyf=%f\"%(Txf,Tyf))\n \n # TODO: ROI rectangles are x,y,width,height. Check if we got a reasonable fraction of each image\n \n logger.debug(\"leftRectXform : \" + repr(leftRectXform ))\n logger.debug(\"rightRectXform: \" + repr(rightRectXform))\n logger.debug(\"leftProjMat : \" + repr(leftProjMat ))\n logger.debug(\"rightProjMat : \" + repr(rightProjMat ))\n logger.debug(\"Q: \" + repr(Q))\n logger.debug(\"leftRoi: \" + repr(leftRoi))\n logger.debug(\"rightRoi: \" + repr(rightRoi))\n\n retDict = {\n 'minError':minError,\n 'leftProjMat':leftProjMat ,\n 'rightProjMat':rightProjMat,\n }\n \n return retDict\n \n def find_cal_pattern_in_3space(self, stereo_cal, pair_image_path):\n \"\"\"\n stereo_cal: Projection matrices for cv2.triangulatePoints, stored in a dictionary like this:\n {\n 'leftProjMat':leftProjMat ,\n 'rightProjMat':rightProjMat,\n }\n pair_image_path : a twople, of the form (\"/path/to/one/left/image\", \"/path/to/one/right/image\"),\n \n returns: a list of [x,y,z] coordinates in real-world space, in the form:\n np.array([[ 7549.84 , -184252.69 , 40687.215 ],\n [ 7626.0737, -185671.55 , 41133.258 ],\n [ 7643.9023, -186005.36 , 41351.223 ]])\n \"\"\"\n \n # Find individual dots in all the images\n logger.info(\"Finding points in left images from pairs\")\n all_points_in_3space, all_points_in_left_images = self._find_point_vectors([pair_image_path[0]])\n logger.info(\"Finding points in right images from pairs\")\n all_points_in_3space, all_points_in_right_images = self._find_point_vectors([pair_image_path[1]])\n \n # for image_path,points in zip(pair_image_path, [all_points_in_left_images[0], all_points_in_right_images[0]]):\n # img = cv2.imread(image_path)\n # self._draw_points_on_image(img, points)\n # cv2.imshow(image_path, img)\n all_points_in_left_images = all_points_in_left_images[0]\n # logger.debug(\"Shape: %s\"%repr(all_points_in_left_images.shape))\n # all_points_in_left_images = all_points_in_left_images[:,0,:]\n # logger.debug(\"Shape: %s\"%repr(all_points_in_left_images.shape))\n all_points_in_right_images = all_points_in_right_images[0]\n # all_points_in_right_images = all_points_in_right_images[:,0,:]\n # Switch from x,y to row,col\n # all_points_in_left_images = all_points_in_left_images[:,[1,0]]\n # all_points_in_right_images = all_points_in_right_images[:,[1,0]]\n all_points_in_left_images = all_points_in_left_images.transpose()\n # logger.debug(\"Shape: %s\"%repr(all_points_in_left_images.shape))\n all_points_in_right_images = all_points_in_right_images.transpose()\n \n # logger.debug('all_points_in_left_images: ' + repr(all_points_in_left_images))\n \n points4d = cv2.triangulatePoints(stereo_cal['leftProjMat'], stereo_cal['rightProjMat'], all_points_in_left_images, all_points_in_right_images)\n points3d = cv2.convertPointsFromHomogeneous(points4d.transpose())\n \n # logger.debug('points4d: ' + repr(points4d))\n logger.debug('points3d: ' + repr(points3d))\n return points3d[:,0,:]\n \n\ndef mark_dots(infilepath, outfilepath, detector):\n \"\"\"\n Test routine for debugging blob finder params\n \"\"\"\n image = cv2.imread(infilepath)\n blobs = detector.detect(image)\n print(\"Found \"+str(len(blobs))+\" blobs \" + infilepath + \" -> \" + outfilepath)\n # Draw detected blobs as red circles.\n # cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob\n annotated = cv2.drawKeypoints(image, blobs, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n cv2.imwrite(outfilepath, annotated)\n\n \nif __name__ == '__main__':\n logging.basicConfig(level=logging.WARNING)\n logger.setLevel(logging.DEBUG)\n\n sc = StereoCalibrator()\n cal_img_dir = 'test images/2019-10-18 stereo cal images/'\n pair_image_names = [\n ('left/left-00001.png','right/right-00001.png'),\n ('left/left-00002.png','right/right-00002.png'),\n # ('left/left-00003.png','right/right-00003.png'),\n ('left/left-00004.png','right/right-00004.png'),\n ('left/left-00005.png','right/right-00005.png'),\n ('left/left-00006.png','right/right-00006.png'),\n # ('left/left-00007.png','right/right-00007.png'),\n # ('left/left-00008.png','right/right-00008.png'),\n # ('left/left-00009.png','right/right-00009.png'),\n ('left/left-00010.png','right/right-00010.png'),\n ('left/left-00011.png','right/right-00011.png'),\n ('left/left-00012.png','right/right-00012.png'),\n ('left/left-00013.png','right/right-00013.png'),\n # ('left/left-00014.png','right/right-00014.png'),\n # ('left/left-00015.png','right/right-00015.png'),\n ('left/left-00019.png','right/right-00019.png'),\n # ('left/left-00020.png','right/right-00020.png'),\n ('left/left-00021.png','right/right-00021.png'),\n ('left/left-00022.png','right/right-00022.png'),\n ('left/left-00023.png','right/right-00023.png'),\n # ('left/left-00025.png','right/right-00025.png'),\n ]\n pair_cal_images = [(cal_img_dir + pair[0], cal_img_dir + pair[1]) for pair in pair_image_names]\n # det = sc.make_detector()\n # for img in all_images:\n # outfile = 'dotted_' + img;\n # mark_dots(cal_img_dir + img, cal_img_dir + outfile, det)\n \n if False:\n # Progressively include more pairs\n for numPairs in range(1, len(pair_cal_images) + 1):\n logger.debug(\"=============================================================================\")\n logger.debug(\"Trying with %d pairs.\"%numPairs)\n pairsToUse = pair_cal_images[0:numPairs]\n stereo_cal = sc.find_stereo_pair_calibration(pairsToUse )\n input('Press enter to continue...')\n elif False:\n # Do just the first two. Those are in the same relative position, so if it doesn't work on these, it never will\n stereo_cal = sc.find_stereo_pair_calibration(pair_cal_images[0:2])\n elif True:\n # Just use two images\n stereo_cal = sc.find_stereo_pair_calibration(\n [(cal_img_dir + 'left/left-00012.png', cal_img_dir + 'right/right-00012.png')])\n else:\n # Do em all at once\n stereo_cal = sc.find_stereo_pair_calibration( pair_cal_images)\n \n output_fn = 'src/stereo_cal.py'\n with open(output_fn, 'w+') as outfile:\n outfile.write('from numpy import array\\nstereo_cal = ' + repr(stereo_cal) + '\\n')\n logger.info('Done, saved calibration to ' + output_fn)\n\n # test\n points3d = sc.find_cal_pattern_in_3space(stereo_cal, pair_cal_images[0])\n \n if True:\n # https://matplotlib.org/3.1.1/gallery/mplot3d/scatter3d.html\n # This import registers the 3D projection, but is otherwise unused.\n from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import\n\n import matplotlib.pyplot as plt\n import numpy as np\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n \n ax.scatter(points3d[:,0], points3d[:,1], points3d[:,2], marker='o')\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n plt.show()\n\n cv2.waitKey()\n","sub_path":"src/stereo_calibrator.py","file_name":"stereo_calibrator.py","file_ext":"py","file_size_in_byte":19578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"437026351","text":"from lcd_api import LcdApi\nfrom machine import Pin\nfrom utime import sleep_ms, sleep_us\n\nclass GpioLcd(LcdApi):\n def __init__(self, rs_pin, enable_pin, d0_pin=None, d1_pin=None,\n d2_pin=None, d3_pin=None, d4_pin=None, d5_pin=None,\n d6_pin=None, d7_pin=None, rw_pin=None, backlight_pin=None,\n num_lines=2, num_columns=16):\n self.rs_pin = rs_pin\n self.enable_pin = enable_pin\n self.rw_pin = rw_pin\n self.backlight_pin = backlight_pin\n self._4bit = True\n if d4_pin and d5_pin and d6_pin and d7_pin:\n self.d0_pin = d0_pin\n self.d1_pin = d1_pin\n self.d2_pin = d2_pin\n self.d3_pin = d3_pin\n self.d4_pin = d4_pin\n self.d5_pin = d5_pin\n self.d6_pin = d6_pin\n self.d7_pin = d7_pin\n if self.d0_pin and self.d1_pin and self.d2_pin and self.d3_pin:\n self._4bit = False\n else:\n self.d0_pin = None\n self.d1_pin = None\n self.d2_pin = None\n self.d3_pin = None\n self.d4_pin = d0_pin\n self.d5_pin = d1_pin\n self.d6_pin = d2_pin\n self.d7_pin = d3_pin\n self.rs_pin.init(Pin.OUT)\n self.rs_pin.off()\n if self.rw_pin:\n self.rw_pin.init(Pin.OUT)\n self.rw_pin.off()\n self.enable_pin.init(Pin.OUT)\n self.enable_pin.off()\n self.d4_pin.init(Pin.OUT)\n self.d5_pin.init(Pin.OUT)\n self.d6_pin.init(Pin.OUT)\n self.d7_pin.init(Pin.OUT)\n self.d4_pin.off()\n self.d5_pin.off()\n self.d6_pin.off()\n self.d7_pin.off()\n if not self._4bit:\n self.d0_pin.init(Pin.OUT)\n self.d1_pin.init(Pin.OUT)\n self.d2_pin.init(Pin.OUT)\n self.d3_pin.init(Pin.OUT)\n self.d0_pin.off()\n self.d1_pin.off()\n self.d2_pin.off()\n self.d3_pin.off()\n if self.backlight_pin is not None:\n self.backlight_pin.init(Pin.OUT)\n self.backlight_pin.off()\n sleep_ms(20)\n self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)\n sleep_ms(5)\n self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)\n sleep_ms(1)\n self.hal_write_init_nibble(self.LCD_FUNCTION_RESET)\n sleep_ms(1)\n cmd = self.LCD_FUNCTION\n if not self._4bit:\n cmd |= self.LCD_FUNCTION_8BIT\n self.hal_write_init_nibble(cmd)\n sleep_ms(1)\n LcdApi.__init__(self, num_lines, num_columns)\n if num_lines > 1:\n cmd |= self.LCD_FUNCTION_2LINES\n self.hal_write_command(cmd)\n def hal_pulse_enable(self):\n self.enable_pin.off()\n sleep_us(1)\n self.enable_pin.on()\n sleep_us(1)\n self.enable_pin.off()\n sleep_us(100)\n def hal_write_init_nibble(self, nibble):\n self.hal_write_4bits(nibble >> 4)\n def hal_backlight_on(self):\n if self.backlight_pin:\n self.backlight_pin.on()\n def hal_backlight_off(self):\n if self.backlight_pin:\n self.backlight_pin.off()\n def hal_write_command(self, cmd):\n self.rs_pin.off()\n self.hal_write_8bits(cmd)\n if cmd <= 3:\n sleep_ms(5)\n def hal_write_data(self, data):\n self.rs_pin.on()\n self.hal_write_8bits(data)\n def hal_write_8bits(self, value):\n if self.rw_pin:\n self.rw_pin.off()\n if self._4bit:\n self.hal_write_4bits(value >> 4)\n self.hal_write_4bits(value)\n else:\n self.d3_pin.value(value & 0x08)\n self.d2_pin.value(value & 0x04)\n self.d1_pin.value(value & 0x02)\n self.d0_pin.value(value & 0x01)\n self.hal_write_4bits(value >> 4)\n def hal_write_4bits(self, nibble):\n self.d7_pin.value(nibble & 0x08)\n self.d6_pin.value(nibble & 0x04)\n self.d5_pin.value(nibble & 0x02)\n self.d4_pin.value(nibble & 0x01)\n self.hal_pulse_enable()\n","sub_path":"lcd/nodemcu_gpio_lcd.py","file_name":"nodemcu_gpio_lcd.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"63540923","text":"from random import randint\n\n#a\ntall=[]\nfor i in range(1,35):\n tall.append(i)\n\n#b\ndef velgtall():\n #b=[]\n #while len(b)<7:\n # b.append(input('Et tall takk (7 tot): '))\n #d=[int(i) for i in b]\n return [3,8,24,31,7,9,1]\n\nb=[3,8,24,31,7,9,1]\n\ndef trekk(n):\n t=[]\n for i in range(n):\n rand=randint(1,len(tall)-1)\n t.append(tall[rand])\n tall.pop(rand)\n t.sort()\n return t\n\ndef comp(a,b):\n k=0\n for i in range(len(a)):\n if a[i] in b:\n k+=1\n return k\n\n\ndef vunnet(a,b):\n if a==7:\n return \"Du har vunnet 2 749 455kr\"\n if a==6 and b>=1:\n return \"Du har vunnet 102 110kr\"\n if a==6:\n return \"Du har vunnet 3385kr\"\n if a==5:\n return \"Du har vunnet 95kr\"\n if a==4 and b>=1:\n return \"Du har vunnet 45kr\"\n return \"Beklager, du vant dessverre ingenting.\"\n\n\ndef main():\n a=velgtall()\n b=trekk(7)\n c=trekk(3)\n print(a,b,c)\n return vunnet(comp(trekk(7),a),comp(trekk(3),a))\n\n\"\"\"\ndef vunnet2(a,b):\n if a==7:\n return 2749455\n if a==6 and b>=1:\n return 102110\n if a==6:\n return 3385\n if a==5:\n return 95\n if a==4 and b>=1:\n return 45\n return 0\n\n\nfo=0\ntj=0\n\nfor i in range(10**6):\n tall=[]\n for i in range(1,35):\n tall.append(i)\n fo+=5\n ab=trekk(7)\n bc=trekk(3)\n cd=comp(ab,b)\n de=comp(bc,b)\n tj+=vunnet2(cd,de)\nprint(tj,fo)\n\"\"\"\n\n#if __name__==\"__main__\":\n # print(main())\n","sub_path":"Øving6/lotto.py","file_name":"lotto.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"452896456","text":"from typing import Tuple, Type, Union\n\nfrom formulaic.errors import FormulaSyntaxError\nfrom .types.ast_node import ASTNode\nfrom .types.token import Token\n\n\ndef exc_for_token(\n token: Union[Token, ASTNode],\n message: str,\n errcls: Type[Exception] = FormulaSyntaxError,\n) -> Exception:\n \"\"\"\n Return an exception ready to be raised with a helpful token/source context.\n\n Args:\n token: The `Token` or `ASTNode` instance about which an exception should\n be raised.\n message: The message to be included in the exception.\n errcls: The type of the exception to be returned.\n \"\"\"\n token = __get_token_for_ast(token)\n token_context = token.get_source_context(colorize=True)\n if token_context:\n return errcls(f\"{message}\\n\\n{token_context}\")\n return errcls(message)\n\n\ndef exc_for_missing_operator(\n lhs: Union[Token, ASTNode],\n rhs: Union[Token, ASTNode],\n errcls: Type[Exception] = FormulaSyntaxError,\n):\n \"\"\"\n Return an exception ready to be raised about a missing operator token\n between the `lhs` and `rhs` tokens/ast-nodes.\n\n Args:\n lhs: The `Token` or `ASTNode` instance to the left of where an operator\n should be placed.\n rhs: The `Token` or `ASTNode` instance to the right of where an operator\n should be placed.\n errcls: The type of the exception to be returned.\n \"\"\"\n lhs_token, rhs_token, error_token = __get_tokens_for_gap(lhs, rhs)\n raise exc_for_token(\n error_token,\n f\"Missing operator between `{lhs_token.token}` and `{rhs_token.token}`.\",\n errcls=errcls,\n )\n\n\ndef __get_token_for_ast(ast: Union[Token, ASTNode]) -> Token:\n \"\"\"\n Ensure that incoming `ast` is a `Token`, or else generate one for debugging\n purposes (note that this token will not be valid `Token` for use other than\n in reporting errors).\n \"\"\"\n if isinstance(ast, Token):\n return ast\n lhs_token = ast\n while isinstance(lhs_token, ASTNode):\n lhs_token = lhs_token.args[0]\n rhs_token = ast\n while isinstance(rhs_token, ASTNode):\n rhs_token = rhs_token.args[-1]\n return Token(\n token=lhs_token.source[lhs_token.source_start : rhs_token.source_end + 1]\n if lhs_token.source\n else \"\",\n source=lhs_token.source,\n source_start=lhs_token.source_start,\n source_end=rhs_token.source_end,\n )\n\n\ndef __get_tokens_for_gap(\n lhs: Union[Token, ASTNode], rhs: Union[Token, ASTNode]\n) -> Tuple[Token, Token, Token]:\n \"\"\"\n Ensure that incoming `lhs` and `rhs` objects are `Token`s, or else generate\n some for debugging purposes (note that these tokens will not be valid\n `Token`s for use other than in reporting errors). Three tokens will be\n returned: the left-hand side token, the right-hand-side token, and the\n \"middle\" token where a new operator/token should be inserted (may not\n be empty depending on context).\n \"\"\"\n lhs_token = lhs\n while isinstance(lhs_token, ASTNode):\n lhs_token = lhs_token.args[-1]\n rhs_token = rhs or lhs\n while isinstance(rhs_token, ASTNode):\n rhs_token = rhs_token.args[0]\n return (\n lhs_token,\n rhs_token,\n Token(\n lhs_token.source[lhs_token.source_start : rhs_token.source_end + 1]\n if lhs_token.source\n else \"\",\n source=lhs_token.source,\n source_start=lhs_token.source_start,\n source_end=rhs_token.source_end,\n ),\n )\n","sub_path":"formulaic/parser/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"505076203","text":"import decimal\nimport unittest\nfrom basic_operations import *\n\nDATABASE.init('unittest.db')\n\n\nclass CustomerTests(unittest.TestCase):\n def setUp(self):\n DATABASE.drop_tables([Customer])\n DATABASE.create_tables([Customer])\n\n def test_add_customer(self):\n added = add_customer(\n customer_id=111,\n name=\"Billy\",\n lastname=\"Bones\",\n home_address=\"123 4th Ave, Seattle WA\",\n phone_number=1234567890,\n email_address=\"spam@gmail.com\",\n status=True,\n credit_limit=1234.56\n )\n retrieved = Customer.get(Customer.customer_id == 111)\n self.assertEqual(added, retrieved)\n\n def test_add_duplicate_key(self):\n first = add_customer(\n customer_id=111,\n name=\"Billy\",\n lastname=\"Bones\",\n home_address=\"123 4th Ave, Seattle WA\",\n phone_number=1234567890,\n email_address=\"spam@gmail.com\",\n status=True,\n credit_limit=1234.56\n )\n second = add_customer(\n customer_id=111,\n name=\"Roger\"\n )\n self.assertIsNone(second)\n retrieved = Customer.get(Customer.customer_id == 111)\n self.assertEqual(retrieved, first)\n self.assertEqual(len(Customer), 1)\n\n def test_search_customer(self):\n added = add_customer(customer_id=111, name=\"Aaron\")\n retrieved = search_customer(111)\n self.assertEqual(retrieved, added)\n\n def test_search_customer_not_found(self):\n add_customer(customer_id=111, name=\"Aaron\")\n retrieved = search_customer(222)\n self.assertIsNone(retrieved)\n\n def test_delete_customer(self):\n add_customer(customer_id=111, name=\"Aaron\")\n delete_customer(111)\n retrieved = search_customer(111)\n self.assertIsNone(retrieved)\n\n def test_update_credit(self):\n add_customer(customer_id=111, name=\"Aaron\", credit_limit=123.45)\n update_customer_credit(111, 246.90)\n retrieved = search_customer(111)\n self.assertAlmostEqual(retrieved.credit_limit, decimal.Decimal(246.90))\n\n def test_update_credit_not_found(self):\n self.assertRaises(ValueError, lambda: update_customer_credit(111, 7.1))\n\n def test_list_active_customers(self):\n add_customer(customer_id=111, name=\"Amy\", status=True)\n add_customer(customer_id=222, name=\"Ben\", status=False)\n add_customer(customer_id=333, name=\"Carrie\", status=True)\n add_customer(customer_id=444, name=\"David\") # status unset\n add_customer(customer_id=555, name=\"Erin\", status=True)\n self.assertEqual(list_active_customers(), 3)\n","sub_path":"students/joshuabone/lesson03/test_basic_operations.py","file_name":"test_basic_operations.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426687316","text":"from collections import defaultdict\nfrom operator import itemgetter\nfrom .. import Item, Recipe\nfrom ..ingredients.raw import *\n\n\nclass TrainFull(Exception):\n pass\n\n\nclass Cargo():\n\n def __init__(self, cargo_size: int = 40):\n self.available_slots = cargo_size\n self.cargo = defaultdict(int)\n\n\n def fill_with(self, recipe: Recipe):\n self.check_fuel(recipe)\n for i, n in recipe.raw_ingredients():\n self.add_item(i)\n limiting = []\n while len(limiting) <= self.available_slots and self.available_slots:\n for i in limiting:\n self.add_item(i)\n limiting = self.get_limiting_items(recipe)\n for i, slots in sorted(list(self.cargo.items()), key=itemgetter(1)):\n print(\"{:<3d}x {} = {} {}\".format(slots, i.stack_size, slots * i.stack_size, i))\n if self.available_slots:\n print(\"{:<3d} EXTRA\".format(self.available_slots))\n\n\n def check_fuel(self, recipe: Recipe):\n if recipe.require_fuel() and WoodBlock not in self.cargo:\n self.add_item(WoodBlock)\n\n\n def add_item(self, item: Item):\n if not self.available_slots:\n raise TrainFull()\n self.available_slots -= 1\n self.cargo[item] += 1\n\n\n\n def get_limiting_items(self, item: Recipe):\n counts = [\n (i, self.cargo[i] * i.stack_size / n)\n for i, n in item.raw_ingredients()\n ]\n least = min(x[1] for x in counts)\n return [i for i, n in counts if n == least]\n\n\ndef make_train(item, cargo_slots=40):\n t = Cargo(cargo_slots)\n t.fill_with(item)\n return t\n","sub_path":"factorio/planning/cargo.py","file_name":"cargo.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"565081289","text":"# Copyright (C) 2017 Paolo Galeone \n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, you can obtain one at http://mozilla.org/MPL/2.0/.\n# Exhibit B is not attached; this software is compatible with the\n# licenses expressed under Section 1.12 of the MPL v2.\n\"\"\"Use DyTB to define and train a model. Then redefines it, changing the input\nwhile restoring the learned weights of the best model. Then export it in a protobuf.\"\"\"\n\nimport sys\n\nimport tensorflow as tf\nfrom dytb.inputs.predefined.MNIST import MNIST\nfrom dytb.models.predefined.LeNetDropout import LeNetDropout\nfrom dytb.train import train\n\n\ndef main():\n \"\"\"main executes the operations described in the module docstring\"\"\"\n lenet = LeNetDropout()\n mnist = MNIST()\n\n info = train(model=lenet, dataset=mnist, hyperparameters={\"epochs\": 1})\n\n checkpoint_path = info[\"paths\"][\"best\"]\n\n with tf.Session() as sess:\n # Define a new model, import the weights from best model trained\n # Change the input structure to use a placeholder\n images = tf.placeholder(tf.float32, shape=(None, 28, 28, 1), name=\"input_\")\n # define in the default graph the model that uses placeholder as input\n _ = lenet.get(images, mnist.num_classes)\n\n # The best checkpoint path contains just one checkpoint, thus the last is the best\n saver = tf.train.Saver()\n saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))\n\n # Create a builder to export the model\n builder = tf.saved_model.builder.SavedModelBuilder(\"export\")\n # Tag the model in order to be capable of restoring it specifying the tag set\n builder.add_meta_graph_and_variables(sess, [\"tag\"])\n builder.save()\n\n # save the checkpoint files. Those are needed to freeze the graph.\n tf.gfile.MakeDirs(\"models\")\n saver.save(sess, \"models/model\")\n\n # Write the serialized form of the graph\n tf.train.write_graph(sess.graph, \"models\", \"model.pb\")\n\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"test_models/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"101563618","text":"\"\"\"\nIn LeetCode Store, there are some kinds of items to sell. Each item has a price.\n\nHowever, there are some special offers, and a special offer consists of one or more different kinds of items with a sale price.\n\nYou are given the each item's price, a set of special offers, and the number we need to buy for each item. The job is to output the lowest\nprice you have to pay for exactly certain items as given, where you could make optimal use of the special offers.\n\nEach special offer is represented in the form of an array, the last number represents the price you need to pay for this special offer,\nother numbers represents how many specific items you could get if you buy this offer.\n\nYou could use any of special offers as many times as you want.\n\nExample 1:\nInput: [2,5], [[3,0,5],[1,2,10]], [3,2]\nOutput: 14\nExplanation: \nThere are two kinds of items, A and B. Their prices are $2 and $5 respectively. \nIn special offer 1, you can pay $5 for 3A and 0B\nIn special offer 2, you can pay $10 for 1A and 2B. \nYou need to buy 3A and 2B, so you may pay $10 for 1A and 2B (special offer #2), and $4 for 2A.\nExample 2:\nInput: [2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1]\nOutput: 11\nExplanation: \nThe price of A is $2, and $3 for B, $4 for C. \nYou may pay $4 for 1A and 1B, and $9 for 2A ,2B and 1C. \nYou need to buy 1A ,2B and 1C, so you may pay $4 for 1A and 1B (special offer #1), and $3 for 1B, $4 for 1C. \nYou cannot add more items, though only $9 for 2A ,2B and 1C.\nNote:\nThere are at most 6 kinds of items, 100 special offers.\nFor each item, you need to buy at most 6 of them.\nYou are not allowed to buy more items than you want, even if that would lower the overall price.\n\"\"\"\nclass Solution:\n def shoppingOffers(self, price, special, needs):\n \"\"\"\n :type price: List[int]\n :type special: List[List[int]]\n :type needs: List[int]\n :rtype: int\n \"\"\"\n def can_apply(offer, left_needs):\n return all([left_needs[i] >= offer[i] for i in range(len(left_needs))])\n\n def dp(left_needs):\n if str(left_needs) in mem:\n return mem[str(left_needs)]\n\n pay = 0 # does not use offer\n for i, [p, n] in enumerate(zip(price, left_needs)):\n pay += p * n\n for offer in special: \n tmp_pay = 0 \n tmp_needs = left_needs[:]\n while can_apply(offer, tmp_needs):\n for i in range(len(left_needs)):\n tmp_needs[i] -= offer[i]\n tmp_pay += offer[-1]\n pay = min(pay, dp(tmp_needs[:]) + tmp_pay)\n mem[str(left_needs)] = pay\n return pay\n\n mem = dict()\n return dp(needs)\n\ns = Solution()\nprint(s.shoppingOffers([2,5], [[3,0,5],[1,2,10]], [3,2])) #14\nprint(s.shoppingOffers([2,3,4], [[1,1,0,4],[2,2,1,9]], [1,2,1])) # 11\n","sub_path":"leetcode/shoppingOffers/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"110959783","text":"import base64\nimport logging\nimport os\n\nimport mapnik\nimport tempdir\n\nfrom static_map_generator.renderer import Renderer\nfrom static_map_generator.utils import merge_dicts\nfrom static_map_generator.utils import rescale_bbox\n\nlog = logging.getLogger(__name__)\n\n\nclass Generator:\n\n @staticmethod\n def generate(config):\n \"\"\"\n Creates a map based on a configuration file\n :param config\n :return:\n \"\"\"\n\n # generate map\n mapnik_map = mapnik.Map(config['params']['width'], config['params']['height'],\n '+init=epsg:31370')\n mapnik_map.background = mapnik.Color('white')\n\n s = mapnik.Style()\n r = mapnik.Rule()\n polygon_symbolizer = mapnik.PolygonSymbolizer()\n polygon_symbolizer.fill = mapnik.Color('steelblue')\n polygon_symbolizer.fill_opacity = 0.5\n r.symbols.append(polygon_symbolizer)\n line_symbolizer = mapnik.LineSymbolizer()\n line_symbolizer.fill = mapnik.Color('rgb(50%,50%,50%)')\n r.symbols.append(line_symbolizer)\n s.rules.append(r)\n mapnik_map.append_style('default', s)\n s = mapnik.Style()\n r = mapnik.Rule()\n point_symbolizer = mapnik.PointSymbolizer()\n point_symbolizer.file = os.path.abspath(\n os.path.dirname(__file__)) + '/fixtures/pointer.svg'\n r.symbols.append(point_symbolizer)\n s.rules.append(r)\n mapnik_map.append_style('point', s)\n\n # render layers\n layers = [layer for layer in config['layers'] if layer['type'] in ['geojson']]\n for idx, layer in enumerate(layers):\n renderer = Renderer.factory(layer['type'])\n layer['idx'] = idx\n kwargs = merge_dicts(config['params'], layer)\n try:\n rendered_layer = renderer.render(**kwargs)\n rendered_layer.styles.append('default')\n mapnik_map.layers.append(rendered_layer)\n except Exception as e:\n log.error('Following layer could not be rendered: ' + str(idx))\n log.error(e, exc_info=True)\n raise\n\n # bbox is the given bbox or the bbox of the layers with a buffer value\n if config['params'].get('bbox') is None:\n mapnik_map.zoom_all()\n extend = mapnik_map.envelope()\n min_width = int(min(extend.maxx - extend.minx, extend.maxy - extend.miny))\n min_width_param = max(len(str(min_width)) - 1, 2)\n extend.width(extend.width() + 10 ** min_width_param)\n bbox_layers = [extend.minx, extend.miny, extend.maxx, extend.maxy]\n else:\n bbox_layers = config['params']['bbox']\n extend = mapnik.Box2d(bbox_layers[0], bbox_layers[1], bbox_layers[2],\n bbox_layers[3])\n mapnik_map.zoom_to_box(extend)\n\n # render background\n background_layers = [layer for layer in config['layers'] if\n layer['type'] == 'wms']\n background = background_layers[0] if len(background_layers) > 0 else None\n if background:\n # printing map to image works differently for wms in comparison to Mapnik rendering\n # rescaling of the bbox is necessary to avoid deformations of the background image\n bbox = rescale_bbox(config['params']['height'], config['params']['width'],\n bbox_layers)\n config['params']['bbox'] = bbox\n mapnik_map.zoom_to_box(mapnik.Box2d(bbox[0], bbox[1], bbox[2], bbox[3]))\n # rendering the background image\n renderer = Renderer.factory('wms')\n kwargs = merge_dicts(config['params'], background)\n try:\n rendered_layer = renderer.render(**kwargs)\n background = os.path.join(str(config['params']['tempdir']),\n \"background.png\")\n with open(background, 'wb') as im:\n im.write(rendered_layer)\n mapnik_map.background_image = background\n except Exception as e:\n log.error('Background wms could not be rendered')\n log.error(e, exc_info=True)\n raise\n\n im = mapnik.Image(mapnik_map.width, mapnik_map.height)\n mapnik.render(mapnik_map, im)\n filename = os.path.join(str(config['params']['tempdir']), \"result\")\n im.save(filename, 'png')\n\n # add text\n text_layers = [layer for layer in config['layers'] if layer['type'] == 'text']\n for text_layer in text_layers:\n renderer = Renderer.factory('text')\n config['params']['filename'] = filename\n kwargs = merge_dicts(config['params'], text_layer)\n try:\n renderer.render(**kwargs)\n except Exception as e:\n log.error('Text could not be rendered')\n log.error(e, exc_info=True)\n raise\n\n # add scale\n renderer = Renderer.factory('scale')\n config['params']['filename'] = filename\n scale = {\n \"map_scale\": mapnik_map.scale(),\n \"gravity\": \"south_west\",\n \"font_size\": 3\n }\n kwargs = merge_dicts(config['params'], scale)\n try:\n renderer.render(**kwargs)\n except Exception as e:\n log.error('Scale could not be rendered')\n log.error(e, exc_info=True)\n raise\n\n return filename\n\n @staticmethod\n def generate_stream(config):\n temp = tempdir.TempDir()\n config['params']['tempdir'] = temp.name\n image_file = Generator.generate(config)\n\n with open(image_file, 'rb') as f:\n return f.read()\n\n @staticmethod\n def generate_base64(config):\n temp = tempdir.TempDir()\n config['params']['tempdir'] = temp.name\n image_file = Generator.generate(config)\n\n with open(image_file, \"rb\") as image_file:\n return base64.b64encode(image_file.read())\n","sub_path":"static_map_generator/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":6075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"150909533","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 26 20:11:44 2019\n\n@author: yogesh\n\"\"\"\n\nimport re\n\nline = \"hi This is yogesh mehra\";\n\nmatchObj = re.match( r'mehra', line, re.M|re.I)\nif matchObj:\n print (\"match --> matchObj.group() : \", matchObj.group())\nelse:\n print (\"No match!!\")\n\nsearchObj = re.search( r'mehra', line, re.M|re.I)\nif searchObj:\n print (\"search --> searchObj.group() : \", searchObj.group())\nelse:\n print (\"Nothing found!!\")","sub_path":"re3.py","file_name":"re3.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"628895015","text":"# HTTP请求头\r\nfrom urllib3 import *\r\nimport re\r\ndisable_warnings()\r\n\r\nhttp = PoolManager()\r\nurl = 'https://list.tmall.com/search_product.htm?q=%B5%E7%C4%D4&type=p&vmarket=&spm=875.7931836%2FB.a2227oh.d100&from=mallfp..pc_1_searchbutton'\r\ndef str2Headers(file):\r\n headerDict = {}\r\n f = open(file,'r')\r\n headersText = f.read()\r\n headers = re.split('\\n',headersText)\r\n for header in headers:\r\n result = re.split(':',header,maxsplit = 1)\r\n headerDict[result[0]] = result[1]\r\n f.close()\r\n return headerDict\r\nheaders = str2Headers('header.txt')\r\n#print(headers)\r\nresponse = http.request('GET', url,headers = headers)\r\ndata = response.data.decode('GB18030')\r\nprint(data)","sub_path":"Python学习基础知识/高级python篇/第16章:网络高级编程/发送HTTP请求头.py","file_name":"发送HTTP请求头.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241421495","text":"# coding:utf-8\n\nfrom tornado.web import Application, StaticFileHandler\n\nfrom pytornado_web.handler.demo import *\nfrom pytornado_web.handler.handler import *\n\n\ndef make_app():\n settings = {\n \"static_path\": os.path.join(os.path.dirname(__file__), \"static\"),\n \"debug\": True,\n \"cookie_secret\": \"pytornado\",\n # \"xsrf_cookies\": True, # 跨站攻击,需要配合xsrf_form_html()使用\n }\n\n handler = [\n (r'/', WelcomeHandler),\n (r'/example/([^/]*)', ExampleHandler),\n (r'/ga', GAHandler),\n (r'/dga/([^/]*)', DGAHandler),\n (r'/sync', SyncHandler),\n (r'/async', AsyncHandler),\n (r'/async/task', AsyncTaskHandler),\n (r'/future', FutureHandler),\n (r'/future/exec', FutureResponseHandler),\n\n (r\"/(robots\\.txt)\", StaticFileHandler, dict(path=settings['static_path'])),\n ]\n return Application(handler, **settings)\n\n\napp = make_app()\n\n# gunicorn 整合模式启动\n\n# tornado 无法多线程(单进程多线程无效),只能多进程,可热加载文件\n#\n# gunicorn -k tornado -w 10 wsgi:app\n","sub_path":"pyx_tornado/web/pytornado_web/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"590841406","text":"# Definition for a point.\nclass Point:\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n \ndef create_points(l):\n return [Point(x,y) for x, y in l]\n\nclass Solution:\n def maxPoints(self, points):\n \"\"\"\n :type points: List[Point]\n :rtype: int\n \"\"\"\n from collections import defaultdict\n if not points: return 0\n n = len(points)\n counts = defaultdict(lambda : 0)\n for i in range(n):\n pi = points[i]\n counts[(pi.x, pi.y)] += 1\n point_list = list(counts.keys())\n pn = len(point_list)\n res = max(counts.values())\n for i in range(pn-1):\n xi,yi = point_list[i]\n mem = defaultdict(lambda : counts[(xi, yi)])\n # print(mem)\n for j in range(i+1, pn):\n xj,yj = point_list[j]\n if xi == xj:\n k = float(\"inf\")\n b = xi\n else:\n k = (yi-yj) / (xi - xj)\n # k.p()\n b = round(yi + yj - k * (xi + xj), 8)\n # (point_list[i], point_list[j], k, b).p()\n mem[(k, b)] += counts[(xj, yj)]\n res = max(res, max(mem.values()))\n # print(res, mem)\n return res\n \nif __name__ == '__main__':\n from minitest import *\n\n with test(Solution):\n # Solution().maxPoints(create_points(\n # [[1,1],[3,2],[5,3],[4,1],[2,3],[1,4]])).must_equal(4)\n # Solution().maxPoints(create_points(\n # [[1,1],[1,1],[2,3]])).must_equal(3)\n Solution().maxPoints(create_points(\n [[3,1],[12,3],[3,1],[-6,-1]])).must_equal(4)\n\n\n ","sub_path":"python/leetcode/data_struct/149_Max_Points_on_a_Line.py","file_name":"149_Max_Points_on_a_Line.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"374462705","text":"\nfrom flask import Flask, request, jsonify\n\n#from app.torch_utils import transform_image,get_prediction\n\napp = Flask(__name__)\nimport io\nimport torch \nimport torch.nn as nn \nimport torchvision.transforms as transforms \nfrom PIL import Image\n\n# load model\n\nclass NeuralNet(nn.Module):\n def __init__(self, input_size, hidden_size, num_classes):\n super(NeuralNet, self).__init__()\n self.input_size = input_size\n self.l1 = nn.Linear(input_size, hidden_size) \n self.relu = nn.ReLU()\n self.l2 = nn.Linear(hidden_size, num_classes) \n \n def forward(self, x):\n out = self.l1(x)\n out = self.relu(out)\n out = self.l2(out)\n # no activation and no softmax at the end\n return out\n\ninput_size = 784 # 28x28\nhidden_size = 500 \nnum_classes = 10\nmodel = NeuralNet(input_size, hidden_size, num_classes)\n\nPATH = \"app/mnist_ffn.pth\"\nmodel.load_state_dict(torch.load(PATH))\nmodel.eval()\n\n# image -> tensor\ndef transform_image(image_bytes):\n transform = transforms.Compose([transforms.Grayscale(num_output_channels=1),\n transforms.Resize((28,28)),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,),(0.3081,))])\n\n image = Image.open(io.BytesIO(image_bytes))\n return transform(image).unsqueeze(0)\n\n# predict\ndef get_prediction(image_tensor):\n images = image_tensor.reshape(-1, 28*28)\n outputs = model(images)\n # max returns (value ,index)\n _, predicted = torch.max(outputs.data, 1)\n return predicted\n\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\ndef allowed_file(filename):\n # xxx.png\n return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n file = request.files.get('file')\n if file is None or file.filename == \"\":\n return jsonify({'error': 'no file'})\n if not allowed_file(file.filename):\n return jsonify({'error': 'format not supported'})\n\n try:\n img_bytes = file.read()\n tensor = transform_image(img_bytes)\n prediction = get_prediction(tensor)\n data = {'prediction': prediction.item(), 'class_name': str(prediction.item())}\n return jsonify(data)\n except:\n return jsonify({'error': 'error during prediction'})","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267596950","text":"#This is a QA system by (Lakshmi Prasanna Gundabolu)\r\n#It will try to answer questions that start with Who, What, When or Where.\r\n#please download mylogfile.txt and qa-system.py files into the same folder\r\n#importing the required packages\r\nimport sys\r\nimport nltk\r\nimport numpy as np\r\nimport random\r\nimport string # to process standard python strings\r\nimport re, string, unicodedata\r\nimport pandas as pd\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\nimport wikipedia as wk\r\nfrom collections import defaultdict\r\nnltk.download('punkt') # first-time use only\r\nnltk.download('wordnet') # first-time use only\r\n#creating a definition to return the response\r\ndef response(user_response):\r\n robo_response=''\r\n x=0\r\n if(x==0) :\r\n robo_response = wikipedia_data(user_response)\r\n return robo_response\r\n else:\r\n robo_response = 'I am sorry, I dont know the answer.'\r\n return robo_response\r\n#definition for wikipedia search\r\ndef wikipedia_data(input):\r\n reg_ex = re.search('(.*)', input)\r\n try:\r\n if reg_ex:#using regular expressions to search the question in wikipedia\r\n topic = reg_ex.group(1)\r\n ny = wk.summary(topic, sentences = 1)\r\n mylogfile.write(\"Answer \"+\":\\n\" + ny+ \"\\n\")\r\n return ny\r\n except Exception as e:\r\n #To return the exception message when the program is unable to find the question in wikipedia\r\n exception='I am sorry, I dont know the answer.'\r\n mylogfile.write(\"Answer \"+\":\\n\" + exception+ \"\\n\")\r\n print(exception)\r\n#creating the dummy variable flag to separate the response for exit\r\nflag=True\r\n#creating a logfile to record the questions\r\nmylogfile = open(sys.argv[1], \"r+\")\r\nmylogfile.truncate(0)\r\n#printing the welcome message onto console\r\nprint(\" This is a QA system by Team 1. It will try to answer questions that start with Who, What, When or Where. Enter exit to leave the program.\")\r\nwhile(flag==True):\r\n #taking the input to user_response variable\r\n user_response = input()\r\n user_response_input=user_response\r\n user_response=user_response.lower()\r\n #if the user enters exit the program enters into this loop\r\n if(user_response!='exit'):\r\n #writing the question to logfile\r\n mylogfile.write(\"Question\" + \":\\n\"+ user_response_input+ \"\\n\")\r\n #printing the answer to console\r\n print(response(user_response))\r\n else:\r\n flag=False\r\n #printing the thanks message to console when the user enters exit\r\n print(\"Thank you! GoodBye\")","sub_path":"qa-system_gundabolu.py","file_name":"qa-system_gundabolu.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"209434546","text":"x = 0\nfinalBMIGrades = []\nBMIGrades = [\"under\",\"normal\",\"over\",\"obese\"]\n\ndef CalculateBMI(personData):\n\t\tBMI = personData[0]/(personData[1]**2)\n\t\tif BMI >= 30.0:\n\t\t\treturn BMIGrades[3]\n\t\telif BMI >= 25.0:\n\t\t\tif BMI < 30.0:\n\t\t\t\treturn BMIGrades[2]\n\t\telif BMI >= 18.5:\n\t\t\tif BMI < 25.0:\n\t\t\t\treturn BMIGrades[1]\n\t\telif BMI < 18.5:\n\t\t\treturn BMIGrades[0]\n\n\nnumPeople = int(input())\nwhile x < numPeople:\n\tnumList = input().split()\n\tpersonData = list(map(float, numList))\n\tfinalBMIGrades.append(CalculateBMI(personData))\n\tx += 1\n\n\n#for printing the final result\nfor i in finalBMIGrades:\n\tprint(i, end= \" \")","sub_path":"BMI.py","file_name":"BMI.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215711017","text":"import json\nfrom bqconnection import bqdata\n\ndef testquery():\n sql = \"\"\"\n SELECT * FROM `itamarapp.Fraud_detection_example.FD_example_view` LIMIT 10\n \"\"\"\n df = bqdata(sql)\n jsn = df.to_json(orient='records')\n parsed = json.loads(jsn)\n\n return parsed\n\n","sub_path":"server/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421974272","text":"\n\n#calss header\nclass _BREADTH():\n\tdef __init__(self,): \n\t\tself.name = \"BREADTH\"\n\t\tself.definitions = [u'the distance from one side to another: ', u'the fact of including many different things, features, subjects, or qualities: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_breadth.py","file_name":"_breadth.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"577981193","text":"import traceback\nfrom flask import Flask, request, jsonify\nimport json\nfrom testing import predict\n\napp = Flask(__name__)\n\n@app.route('/api/classify_products', methods=['POST'])\ndef article_recommend():\n if request.method == 'POST':\n try:\n data = json.loads(request.data)\n title = data.get('title')\n text = data.get('text')\n response = predict(title=title, text=text)\n\n except Exception as ex:\n response = {\n 'error': 'exception returning categories - {0}'.format(str(traceback.format_exc())),\n 'res': []}\n else:\n response = {'res': \"request not sent using POST\"}\n\n\n return jsonify(**{'response':response})\n\n\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=7000)\n\n\n","sub_path":"TF_TF_NODE_based/Flask.py","file_name":"Flask.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"646075613","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom wechat.official import WxRequest, WxTextResponse\nfrom wechat.official import WxClient\n\n\ndef wechat(request):\n cli = WxClient(\"your official account token\")\n ret = cli.is_valid_params(request.GET) # validate the request\n if not ret:\n return HttpResponse('invalid request')\n if request.method == 'GET':\n return HttpResponse(ret[1]) # for interface validation\n else:\n req = WxRequest(request.body)\n if req.MsgType == 'text':\n return HttpResponse(WxTextResponse(req.Content, req).as_xml())\n elif req.MsgType == 'event' and req.Event == 'subscribe':\n return HttpResponse(WxTextResponse('welcome to echo bot!',\n req).as_xml())\n else:\n return HttpResponse(WxTextResponse('support text only',\n req).as_xml())\n","sub_path":"demo/echo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"468170824","text":"import sys, time\n\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QListWidget, QGridLayout\n\n\nclass WinForm(QWidget):\n \"\"\"主窗口\"\"\"\n def __init__(self, parent=None):\n super().__init__(parent)\n self.setWindowTitle(\"processEvents 使用\")\n\n self.list_file = QListWidget()\n self.button_start = QPushButton(\"开始\")\n \n grid_layout = QGridLayout()\n grid_layout.addWidget(self.list_file, 0, 0, 1, 2) # 0行0列, 占1行, 占2列\n grid_layout.addWidget(self.button_start, 1, 1) # 1行1列\n self.setLayout(grid_layout)\n\n self.button_start.clicked.connect(self.slot_add)\n \n def slot_add(self):\n for n in range(10):\n str_n = \"file index{}\".format(n)\n self.list_file.addItem(str_n)\n QApplication.processEvents()\n time.sleep(2)\n\n\nif __name__ == \"__main__\":\n\n app = QApplication(sys.argv)\n win = WinForm()\n win.show()\n sys.exit(app.exec())","sub_path":"03_高级界面控件/示例内容/09_6_事件处理processEvents()手动刷新页面.py","file_name":"09_6_事件处理processEvents()手动刷新页面.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"593089736","text":"import intrinsic_value_calculator as ivc\nimport unittest\n\n\nclass TestIntrinsicValueCalculator(unittest.TestCase):\n def setUp(self):\n self.stock = \"FB\"\n self.operational_cash_flow = 27956 * 1e6\n self.total_debt = 0\n self.cash_on_hand = 42309 * 1e6\n self.growth_rate_5Y = 22.2 / 100\n self.growth_rate_after_5Y = 22.2 / 100\n self.beta = 0.5\n self.no_shares = 2894.6 * 1e6\n\n def test_projected_growth(self):\n actual_projected_growths = [34162.23, 41746.25, 51013.91, 62339.00, 76178.26, 93089.84, 113755.78, 139009.56,\n 169869.69, 207580.76]\n projected_growth = ivc.get_projected_cash_flow(\n self.operational_cash_flow, self.growth_rate_5Y, self.growth_rate_after_5Y)\n for i in range(len(actual_projected_growths)):\n p = projected_growth[i]\n q = actual_projected_growths[i] * 1e6\n self.assertTrue(abs(round(p, 2) - q) <= 4500)\n\n def test_discount_rate_from_beta(self):\n actual_discount_rate = 0.05\n discount_rate = ivc.get_discount_from_beta(self.beta)\n self.assertEqual(discount_rate, actual_discount_rate)\n\n def test_discounted_rates(self):\n actual_discount_rates = [0.95, 0.91, 0.86, 0.82, 0.78, 0.75, 0.71, 0.68, 0.64, 0.61]\n discounted_rates = ivc.calculate_discount_rates(\n ivc.get_discount_from_beta(self.beta))\n for i in range(len(actual_discount_rates)):\n p = actual_discount_rates[i]\n q = discounted_rates[i]\n self.assertTrue(abs(round(p, 2) - q) <= 0.01)\n\n def test_discounted_cash(self):\n actual_discounted_values = [32535.46, 37865.08, 44067.74, 51286.45, 59687.66, 69465.07, 80844.11, 94087.14,\n 109499.51, 127436.58]\n projected_growth = ivc.get_projected_cash_flow(\n self.operational_cash_flow, self.growth_rate_5Y, self.growth_rate_after_5Y)\n discounted_rates = ivc.calculate_discount_rates(ivc.get_discount_from_beta(self.beta))\n discounted_cash_flows = ivc.calculate_discounted_values(\n projected_growth, discounted_rates)\n\n for i in range(len(actual_discounted_values)):\n p = discounted_cash_flows[i]\n q = actual_discounted_values[i] * 1e6\n self.assertTrue(abs(round(p, 2) - q) <= 4500)\n\n def test_intrinsic_value_calculation(self):\n actual_intrinsic_value = 258.79\n projected_growth = ivc.get_projected_cash_flow(\n self.operational_cash_flow, self.growth_rate_5Y, self.growth_rate_after_5Y)\n discounted_rates = ivc.calculate_discount_rates(\n ivc.get_discount_from_beta(self.beta))\n discounted_cash_flows = ivc.calculate_discounted_values(\n projected_growth, discounted_rates)\n\n intrinsic_value = ivc.calculate_intrinsic_value(sum(discounted_cash_flows), self.no_shares, self.total_debt,\n self.cash_on_hand)\n\n p = intrinsic_value[\"Intrinsic Value\"]\n q = actual_intrinsic_value\n self.assertTrue(abs(round(p, 2) - q) <= 2)\n","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255419400","text":"# -*- coding: utf-8 -*-\nfrom unittest import TestCase\n\nimport requests\n\nfrom siteparser.parser import Parser\n\n\nclass TestRSS(TestCase):\n\n def test_telega_in(self):\n parser = Parser()\n parser = parser.html('https://telega.in/orders/new')\n # # import ipdb; ipdb.set_trace()\n # for subj_parser in parser.foreach('//select[@id=\"channel_theme\"]/option'):\n # subj_parser = subj_parser.take(subject='.')\n # subject_id = subj_parser.get('./@value')\n # if not subject_id:\n # continue\n # page = 1\n # print('subj_id={}'.format(subject_id))\n # while True:\n # print(page)\n # channels_json = requests.get(\n # 'https://telega.in/orders/new.json?theme={}&page={}'.format(subject_id, page)\n # ).json()\n # channels_html = channels_json['html']\n # print(channels_html)\n # channels_parser = subj_parser.html(channels_html)\n # for channel_parser in channels_parser.foreach('//tr'):\n # channel_parser = channel_parser.take(\n # url='./td[1]/a/@href',\n # cost='.//span[@class=\"cost\"]'\n # )\n # print(channel_parser.document)\n # if channels_json['show_more_btn']:\n # page += 1\n # else:\n # break\n\n def test_tg_channel(self):\n parser = Parser()\n parser = parser.html('https://t.me/crypto_world_nevs')\n parser = parser.take(\n title='//div[@class=\"tgme_page_title\"]',\n members_text='//div[@class=\"tgme_page_extra\"]',\n description='//div[@class=\"tgme_page_description\"]',\n photo='//img[@class=\"tgme_page_photo_image\"]/@src'\n )\n print(parser.document)\n\n def test_get(self):\n parser = Parser()\n parser = parser.html('https://t.me/tvkinoradio/416?embed=1')\n self.assertEquals(\n parser.get('//div[@class=\"tgme_widget_message_link\"]'),\n 't.me/tvkinoradio/416'\n )\n self.assertSequenceEqual(\n parser.get_list('//div[@class=\"tgme_widget_message_text\"]//a/@href'),\n ['https://t.me/jump_cut', 'https://t.me/jump_cut']\n )\n\n def test_exists(self):\n parser = Parser()\n parser = parser.html('https://t.me/elonmusknewsru/1?embed=1')\n self.assertFalse(parser.exists('//div[@class=\"tgme_widget_message_error\" and text()=\"Post not found\"]'))\n parser = parser.html('https://t.me/elonmusknewsru/100500?embed=1')\n self.assertTrue(parser.exists('//div[@class=\"tgme_widget_message_error\" and text()=\"Post not found\"]'))\n\n\n","sub_path":"tests/test_web.py","file_name":"test_web.py","file_ext":"py","file_size_in_byte":2784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"394285339","text":"'''\n\n astopa\n\n dynamo interaction file\n\n'''\n\ndef convert_to_dynamo(data):\n\n converted_data = {}\n\n # --- Loop through the highest level\n for i in data:\n # if the data is a string, create the object and put the data to a string\n # if it's not a string then we gotta loop again\n # this method is then repeated foro every subsequet loop\n if type(data[i]) is str:\n converted_data[i] = str(data[i])\n else:\n converted_data[i] = {}\n for j in data[i]:\n if type(data[i][j]) is str:\n converted_data[i][j] = str(data[i][j])\n elif type(data[i][j]) is list:\n count = 0\n converted_data[i][j] = []\n for x in data[i][j]:\n lo = {}\n for y in data[i][j][count]:\n lo[y] = str(data[i][j][count][y])\n count += 1\n converted_data[i][j].append(lo)\n elif type(data[i][j]) is dict:\n converted_data[i][j] = {}\n for k in data[i][j]:\n if type(data[i][j][k]) is str:\n converted_data[i][j][k] = str(data[i][j][k])\n elif type(data[i][j][k]) is int:\n converted_data[i][j][k] = str(data[i][j][k])\n elif type(data[i][j][k]) is float:\n converted_data[i][j][k] = str(data[i][j][k])\n\n return converted_data\n\n\ndef convert_from_dynamo(data):\n print('converting---------------------------')\n\n converted_data = {}\n\n for i in data:\n if type(data[i]) is str:\n converted_data[i] = data[i]\n else:\n converted_data[i] = {}\n for j in data[i]:\n if type(data[i][j]) is str:\n converted_data[i][j] = data[i][j]\n\n elif type(data[i][j]) is list:\n count = 0\n converted_data[i][j] = []\n for x in data[i][j]:\n lo = {}\n for y in data[i][j][count]:\n try:\n a = int(data[i][j][count][y])\n lo[y] = a\n except Exception as a:\n lo[y] = data[i][j][count][y]\n count += 1\n converted_data[i][j].append(lo)\n\n elif type(data[i][j]) is dict:\n converted_data[i][j] = {}\n for k in data[i][j]:\n try:\n a = int(data[i][j][k])\n converted_data[i][j][k] = a\n except Exception as a:\n try:\n b = float(data[i][j][k])\n converted_data[i][j][k] = b\n except Exception as b:\n converted_data[i][j][k] = data[i][j][k]\n\n return converted_data\n\ndef convert_output_to_dynamo(data):\n print('-------------------- converting')\n # print(data)\n \n converted_data = {}\n for i in data:\n if type(data[i]) is str:\n converted_data[i] = data[i]\n elif type(data[i]) is dict:\n # print(' type:')\n # print(type(data[i]))\n converted_data[i] = {}\n for j in data[i]:\n # print(str(type(data[i][j])))\n # print(data[i][j])\n if type(data[i][j]) is int:\n converted_data[i][j] = str(data[i][j])\n # --- type is bumpy.float?\n # --- string it? I actually don't know what to do here\n elif type(data[i][j]) is float:\n converted_data[i][j] = str(data[i][j])\n elif str(type(data[i][j])) == \"\":\n converted_data[i][j] = str(data[i][j])\n elif type(data[i][j]) is dict:\n for k in data[i][j]:\n converted_data[i][j][k] = str(data[i][j][k])\n else:\n print('oh fuck') \n # TODO || type is returning error because of NaN\n # FIXME || plz\n print(converted_data[i])\n else:\n # print(data[i])\n rowCount = 0\n headers = []\n converted_data[i] = []\n\n for row in data[i]:\n # print(row)\n if rowCount == 0:\n headers = row\n else:\n rowObject = {}\n colCount = 0\n for col in data[i][rowCount]:\n rowObject[headers[colCount]] = str(data[i][rowCount][colCount])\n converted_data[i].append(rowObject)\n rowCount += 1\n\n print('--- end of dynamo converstion')\n print(converted_data)\n return converted_data","sub_path":"lambda_mgmt/dev_/run_plan/dynamoInteraction.py","file_name":"dynamoInteraction.py","file_ext":"py","file_size_in_byte":5132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277852439","text":"# coding=utf-8\nfrom utils import FifoList, BoundedPriorityQueue\nfrom models import (SearchNode, SearchNodeHeuristicOrdered,\n SearchNodeStarOrdered, SearchNodeCostOrdered,\n SearchNodeValueOrdered)\nimport copy\nimport math\nimport random\nfrom itertools import count\n\n\ndef breadth_first_search(problem, graph_search=False):\n return _search(problem,\n FifoList(),\n graph_search=graph_search)\n\n\ndef depth_first_search(problem, graph_search=False):\n return _search(problem,\n [],\n graph_search=graph_search)\n\n\ndef limited_depth_first_search(problem, depth_limit, graph_search=False):\n return _search(problem,\n [],\n graph_search=graph_search,\n depth_limit=depth_limit)\n\n\ndef iterative_limited_depth_first_search(problem, graph_search=False):\n return _iterative_limited_search(problem,\n limited_depth_first_search,\n graph_search=graph_search)\n\n\ndef uniform_cost_search(problem, graph_search=False):\n return _search(problem,\n BoundedPriorityQueue(),\n graph_search=graph_search,\n node_factory=SearchNodeCostOrdered)\n\n\ndef greedy_search(problem, graph_search=False):\n return _search(problem,\n BoundedPriorityQueue(),\n graph_search=graph_search,\n node_factory=SearchNodeHeuristicOrdered)\n\n\ndef astar_search(problem, graph_search=False):\n return _search(problem,\n BoundedPriorityQueue(),\n graph_search=graph_search,\n node_factory=SearchNodeStarOrdered)\n\n\ndef beam_search(problem, beam_size=100):\n fringe = BoundedPriorityQueue(beam_size)\n fringe.append(SearchNodeValueOrdered(state=problem.initial_state,\n problem=problem))\n while fringe:\n successors = BoundedPriorityQueue(beam_size)\n for node in fringe:\n if problem.is_goal(node.state):\n return node\n successors.extend(node.expand())\n fringe = successors\n\n\ndef beam_search_best_first(problem, beam_size=100, graph_search=False, node_filter=None):\n return _search(problem,\n BoundedPriorityQueue(beam_size),\n node_factory=SearchNodeValueOrdered,\n local_search=True)\n\n\ndef hill_climbing(problem, graph_search=False, node_filter=None):\n return beam_search_best_first(problem,\n beam_size=1,\n graph_search=graph_search,\n node_filter=node_filter)\n\n\ndef _filter_random_uphill_neighbor(problem, node, childs):\n neighbor = None\n is_uphill = lambda x: problem.value(x.state) > problem.value(node.state)\n uphill = filter(is_uphill, childs)\n if uphill:\n random.shuffle(uphill)\n neighbor = uphill[0]\n return [neighbor, ]\n\n\ndef hill_climbing_stochastic(problem, graph_search=False):\n '''Stochastic hill climbing, where a random neighbor is chosen among\n those that have a better value'''\n return hill_climbing(problem,\n graph_search=graph_search,\n node_filter=_filter_random_uphill_neighbor)\n\n\ndef _filter_first_choice_random(problem, node, childs):\n neighbor = None\n eligible = copy.copy(childs)\n current_value = problem.value(node.state)\n while eligible:\n candidate = eligible.pop()\n if problem.value(candidate.state) > current_value:\n neighbor = candidate\n break\n return [neighbor, ]\n\n\ndef hill_climbing_first_choice(problem, graph_search=False):\n '''First-choice hill climbing, where neighbors are randomly taken and the\n first with a better value is chosen'''\n return hill_climbing(problem,\n graph_search=graph_search,\n node_filter=_filter_first_choice_random)\n\n\n# Quite literally copied from aima\ndef simulated_annealing(problem, schedule=None):\n if not schedule:\n schedule = _exp_schedule()\n current = SearchNode(problem.initial_state,\n problem=problem)\n for t in count():\n T = schedule(t)\n if T == 0:\n return current\n neighbors = current.expand()\n if not neighbors:\n return current\n succ = random.choice(neighbors)\n delta_e = problem.value(succ.state) - problem.value(current.state)\n if delta_e > 0 or random.random() < math.exp(delta_e / T):\n current = succ\n\n\ndef _iterative_limited_search(problem, search_method, graph_search=False):\n solution = None\n limit = 0\n\n while not solution:\n solution = search_method(problem, limit, graph_search)\n limit += 1\n\n return solution\n\n\ndef _search(problem, fringe, graph_search=False, depth_limit=None,\n node_factory=SearchNode, local_search=False, node_filter=None):\n memory = set()\n fringe.append(node_factory(state=problem.initial_state,\n problem=problem))\n\n while fringe:\n node = fringe.pop()\n if problem.is_goal(node.state):\n return node\n if depth_limit is None or node.depth < depth_limit:\n childs = []\n for n in node.expand():\n if graph_search:\n if n.state not in memory:\n memory.add(n.state)\n childs.append(n)\n else:\n childs.append(n)\n\n if node_filter:\n childs = node_filter(problem, node, childs)\n\n for n in childs:\n fringe.append(n)\n\n\n# Math literally copied from aima-python\ndef _exp_schedule(k=20, lam=0.005, limit=100):\n \"One possible schedule function for simulated annealing\"\n def f(t):\n if t < limit:\n return k * math.exp(-lam * t)\n return 0\n return f\n","sub_path":"simple_ai/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":6071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399311267","text":"#3.7.X\r\n#getList --> get first 1000 proteinfilenames\r\n#downloadFiles --> itterate over list, download and unzip files\r\n#saveFilesInDB --> etablish connection to DB, create Table, iterate over list\r\n #cut datatype of proteinnames, iterate over file and\r\n #check first word in line\r\n #Header --> save in header when the data is no date\r\n #Title --> save all data which is no digit\r\n #Compnd --> save all data after CHAIN\r\n #Atom --> save first 3 float numbers, if float numbers are concatenede with \"-\" instead of \" \" those are splitted\r\n #pdbID --> is last value of header\r\n #INSERT values into Table\r\n#printProteins --> select all atom coordinates of proteins that contain a '5'\r\n\r\nimport gzip\r\nimport os\r\nimport urllib.request\r\nimport re\r\nimport sqlite3\r\n\r\n\r\ndef getList(l):\r\n f = open(\"ftpSourceView.txt\",\"r\")\r\n file_count = 0\r\n while(file_count < 1000):\r\n i = f.readline()\r\n pattern = re.compile(r\"\\\"(.*)\\\".*\")\r\n result = pattern.search(i)\r\n if result != None:\r\n l.append(result.group(1))\r\n file_count = file_count +1\r\n return l\r\ndef downloadFiles():\r\n l = list()\r\n getList(l)\r\n for name in l:\r\n \r\n f = urllib.request.urlretrieve('ftp://ftp.wwpdb.org/pub/pdb/data/structures/all/pdb/' + name ,name)\r\n \r\n zipFile = gzip.open(os.getcwd() + \"\\\\\" + name ,\"rb\")\r\n\r\n print(str(name)[:-7])\r\n unCompressedFile = open(os.getcwd() + \"\\\\\" + name[:-7],\"wb\")\r\n\r\n decoded = zipFile.read()\r\n\r\n unCompressedFile.write(decoded)\r\n\r\n zipFile.close()\r\n\r\n unCompressedFile.close()\r\n\r\ndef saveFilesInDB():\r\n l = list()\r\n getList(l)\r\n conn = sqlite3.connect(\"protein.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"CREATE TABLE if not exists protein(Id, header, title, compnd_chain, atom_x, atom_y, atom_z)\")\r\n for protein in l:\r\n pattern = re.compile(r\"(.*)\\..*\\..*\")\r\n result = pattern.search(protein)\r\n prot = result.group(1)\r\n f = open(prot, \"r\")\r\n header = \"\"\r\n title = \"\"\r\n compnd = \"\"\r\n coord = [0.0,0.0,0.0]\r\n for i in f:\r\n if str(i)[:6] == \"HEADER\":\r\n words = i.split(\" \")\r\n for j in words:\r\n pattern = re.compile(r\"\\d\\d-[A-Z]*-\\d\\d\")\r\n \r\n if j != \"\" and j != \"\\n\" and j != \"HEADER\" and pattern.search(str(j)) == None:\r\n header = header + \" \" +j\r\n if str(i)[:5] == \"TITLE\":\r\n words = i.split(\" \")\r\n for j in words:\r\n pattern = re.compile(r\"\\d\")\r\n if j != \"\" and pattern.search(str(j)) == None and j != 'TITLE':\r\n title = title + \" \" + j\r\n if str(i)[:6] == \"COMPND\":\r\n save = False\r\n words = i.split(\" \")\r\n for j in words:\r\n if save:\r\n if j != \"\":\r\n compnd = compnd + \" \" + j\r\n else:\r\n save = False\r\n if str(j).find(\"CHAIN\") != -1:\r\n save = True\r\n if str(i)[:4] == \"ATOM\":\r\n words = i.split(\" \")\r\n count = 0\r\n for j in words:\r\n pattern = re.compile(r\"[\\d]+\\.[\\d]+$\")\r\n if pattern.search(str(j)) != None and count < 3:\r\n if str(j).find(\"-\",1) != -1:\r\n print(j)\r\n yz = str(j).split(\"-\")\r\n coord[count] = float(coord[count]) + float(yz[0])\r\n count = count +1\r\n coord[count] = float(coord[count]) + float(yz[1])\r\n else:\r\n coord[count] = float(coord[count]) + float(str(j))\r\n count = count +1\r\n f.close()\r\n pdbID = header.split()[-1]\r\n \r\n cur.execute(\"INSERT INTO protein VALUES(?,?,?,?,?,?,?)\", (pdbID, header, title, compnd, str(coord[0]), str(coord[1]), str(coord[2])))\r\n cur.close()\r\n conn.commit()\r\ndef printProteins():\r\n conn = sqlite3.connect(\"protein.db\")\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT atom_x, atom_y, atom_z FROM protein WHERE Id LIKE '%5%'\")\r\n rows = cur.fetchall()\r\n for row in rows:\r\n print(row)\r\n cur.close()\r\n conn.commit()\r\nprintProteins()\r\n","sub_path":"3.Semester/Skriptsprachen/Übung/3/3_1.py","file_name":"3_1.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"622027474","text":"'''convert_to_coreml.py\n\nThis function converts a keras model that was trained using this repo to CoreML\n'''\nimport tensorflow as tf \nimport coremltools\nimport argparse\nimport keras\nimport numpy as np\nimport cv2\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom model.utils import _preprocess_numpy_input\n\n# print(dir(tf.keras.applications.mobilenet.MobileNet))\nfrom tensorflow.python.keras.applications import mobilenet\n# print(dir(mobilenet))\n# from keras.applications.mobilenet import DepthwiseConv2D# Tensorflow 1.5 does not have built in\n# 2.01b version\n\nparser=argparse.ArgumentParser()\n# model_path\nparser.add_argument(\"--model_path\",default=\"experiment/test/best_weights/after-epoch-1/model_acc_0.8611111111111112.h5\",\n help=\"path to train .h5 keras model\")\n# author\nparser.add_argument(\"--author\",default=\"Andrew Mendez\",\n help=\"Name of Author that will be saved in CoreML model\")\n# license\nparser.add_argument(\"--license\",default='Copyright @Andrew Mendez 2018')\n# name_of_coreml_model\nparser.add_argument(\"--name_of_coreml_model\",default='Hand_Sign_Recognition.mlmodel',\n help=\"Name of CoreML model\")\n\ndef model_fn(NUM_CAT=6):\n input = keras.layers.Input(shape=(224,224,3))\n m = keras.applications.MobileNet(include_top=False,weights=None)\n # m.summary() \n\n x = m(input)# (None, 7, 7, 1024)\n x = keras.layers.GlobalAveragePooling2D()(x)\n # x = tf.keras.layers.Dense(512,activation=tf.nn.relu)(x)\n # x = tf.keras.layers.Dense(NUM_CAT,activation=tf.nn.softmax)(x)\n x = keras.layers.Reshape((1, 1, int(1024)), name='reshape_1')(x)\n x = keras.layers.Dropout(1e-3, name='dropout')(x)\n x = keras.layers.Conv2D(NUM_CAT, (1, 1),\n padding='same',\n name='conv_preds')(x)\n x = keras.layers.Activation('softmax', name='act_softmax')(x)\n x = keras.layers.Reshape((NUM_CAT,), name='reshape_2')(x)\n model = keras.models.Model(inputs=input,outputs=x)\n return model\ndef load_model(model_path):\n # print(dir(tf.keras.applications.mobilenet.MobileNet))\n # tf.keras.models.mobilenet.re\n # with keras.utils.generic_utils.CustomObjectScope({'relu6': keras.applications.mobilenet.relu6,'DepthwiseConv2D': keras.applications.mobilenet.DepthwiseConv2D}):\n # with tf.keras.utils.CustomObjectScope(custom_objects={ 'relu6': tf.keras.applications.mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D}):\n \n # \n model2 = tf.keras.models.load_model(model_path, custom_objects={ 'relu6': mobilenet.relu6, 'DepthwiseConv2D': mobilenet.DepthwiseConv2D})\n model=model_fn()# Keras Model!\n #\n model.load_weights(model_path)# Transfering weights from tf.keras to keras\n\n return model\ndef preprocess_for_keras_model(image,data_format):\n '''\n Function handles resizing, preprocessing, and expanding dimension for keras model\n '''\n image = cv2.resize(image,dsize=(224,224))\n image = image.astype(np.float32)\n image = _preprocess_numpy_input(image,data_format,'tf')\n image = image[np.newaxis,...]# keras expect 4D tensor\n return image\n\n\n\nif __name__=='__main__':\n args = parser.parse_args()\n \n keras_model = load_model(args.model_path)\n keras_model.summary()\n fcn_mlmodel = coremltools.converters.keras.convert(\n keras_model,\n input_names = 'image',\n image_input_names = 'image',\n output_names = 'class_label',\n class_labels=[\"0\",\"1\",\"2\",\"3\",\"4\",\"5\"],\n image_scale = 1/127.5,\n red_bias=-1.0,\n blue_bias=-1.0,\n green_bias=-1.0\n )\n fcn_mlmodel.author =args.author\n fcn_mlmodel.license=args.license\n fcn_mlmodel.short_description=\"Outputs Hand Sign class given input image\"\n fcn_mlmodel.input_description['image']=\"Image size (224,224,3)\"\n fcn_mlmodel.output_description['class_label']=\" Class label 0-5\"\n fcn_mlmodel.save(args.name_of_coreml_model)\n\n model = coremltools.models.MLModel(\"Hand_Sign_Recognition_224_0.85.mlmodel\")\n img =Image.open(\"/Users/andrewmendez1/Documents/Hand Sign Recognition/data/224x224_SIGNS/dev_signs/0_IMG_5864.jpg\")\n\n # data_format = tf.keras.backend.image_data_format()\n # img = preprocess_for_keras_model(img,data_format)\n \n '''\n https://apple.github.io/coremltools/generated/coremltools.models.MLModel.html\n\n 'has_key', 'items', 'iteritems', 'iterkeys', 'itervalues', 'keys', 'pop',\n 'popitem', 'setdefault', 'update', 'values', 'viewitems', 'viewkeys', 'viewvalues'\n \n Properties:\n {\n classLabel:\n class_label:{...confidence scores}\n }\n '''\n #ToDo(Andrew): Make sure this model correctly predicts image from training set\n res = model.predict({\"image\":img})\n\n img = np.asarray(img)\n plt.imshow(img)\n plt.show()\n print(res[\"classLabel\"], res[\"class_label\"][res[\"classLabel\"]])\n\n","sub_path":"convert_to_coreml.py","file_name":"convert_to_coreml.py","file_ext":"py","file_size_in_byte":4878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"324711798","text":"#Deepcopy와 재귀 이용\nimport copy\n\nfishes = [[-1, -1]] + [[0, 0] for _ in range(16)]\ndx = [-1, -1, 0, 1, 1, 1, 0, -1]\ndy = [0, -1, -1, -1, 0, 1, 1, 1]\n\ndata = [[0] * 4 for _ in range(4)]\n\nfor i in range(4):\n temp = list(map(int, input().split()))\n for j in range(4):\n data[i][j] = [temp[j * 2], temp[j * 2 + 1] - 1]\n\n\ndef find_fish(data, index):\n for i in range(4):\n for j in range(4):\n if data[i][j][0] == index:\n return (i, j)\n return None\n\ndef turn_left(direc):\n return (direc + 1) % 8\n\ndef move_fish(sx, sy, data):\n for i in range(1, 17):\n pos = find_fish(data, i) #i번째 물고기의 위치\n if pos != None:\n x, y, direc = pos[0], pos[1], data[pos[0]][pos[1]][1]\n for j in range(8):\n nx = x + dx[direc]\n ny = y + dy[direc]\n if 0 <= nx < 4 and 0 <= ny< 4:\n if not(nx == sx and ny == sy):\n data[x][y][1] = direc\n data[x][y], data[nx][ny] = data[nx][ny], data[x][y]\n break\n direc = turn_left(direc)\n\n\ndef move_shark(sx, sy, data):\n q = []\n direc = data[sx][sy][1]\n for i in range(4):\n sx += dx[direc]\n sy += dy[direc]\n if sx < 0 or sx >= 4 or sy < 0 or sy >= 4:\n continue\n if data[sx][sy][0] != -1:\n q.append((sx, sy))\n return q\n\nresult = 0\ndef solve(sx, sy, data, score):\n global result\n data = copy.deepcopy(data)\n score += data[sx][sy][0]\n data[sx][sy][0] = -1\n move_fish(sx, sy, data)\n q = move_shark(sx, sy, data)\n if len(q) == 0:\n result = max(score, result)\n return\n for x, y in q:\n solve(x, y, data, score)\n\nsolve(0, 0, data, 0)\nprint(result)","sub_path":"DFS BFS/[Q47]청소년 상어.py","file_name":"[Q47]청소년 상어.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"29180670","text":"#!/usr/bin/env python\n# vim: ai ts=4 sts=4 et sw=4\n\n\nimport re\nimport rapidsms\nfrom rapidsms.parsers import Matcher\nfrom persistance.models import *\nfrom models import *\nfrom locations.models import *\nfrom tags.models import *\nfrom people.models import *\nfrom rwanda.models import *\nfrom rwanda.utils import *\n\n\nclass App(rapidsms.App):\n def parse(self, msg):\n msg.text = msg.text.replace(\".\", \" \")\n\n def handle(self, msg):\n if msg.text.strip() == \"\":\n msg.error(\"Your message was empty. You must enter some text.\")\n return True\n\n def catch(self, msg):\n if not msg.responses:\n msg.error(\"Sorry, we could not understand that message.\")\n return True\n\n\nclass Appx(object):\n MSG = {\n \"en\": {\n \"bad-alias\": \"Sorry, I don't know anyone by that name.\",\n \"first-login\": \"Nice to meet you, %(name)s. Your alias is %(alias)s.\",\n \"login\": \"Hello, %(name)s. It has been %(days)d days since I last heard from you.\",\n \"reminder\": \"I think you are %(name)s.\",\n \"dont-know\": \"Please register your phone with RapidSMS.\",\n \"list\": \"I have %(num)d %(noun)s: %(items)s\",\n \"empty-list\": \"I don't have any %(noun)s.\",\n \"lang-set\": \"I will now speak to you in English, where possible.\",\n \"denied\": \"Sorry, you must identify yourself before you can do that.\",\n \"disabled\": \"Sorry, but that functionality is disabled.\" },\n\n # worst german translations _ever_\n # just an example. all of this stuff\n # should be moved to an i18n app!\n \"de\": {\n \"bad-alias\": \"Tut mir leit, ich weiss nicht diesen Namen\",\n \"first-login\": \"%(name)s hallo! Ich habe nicht gesehen, bevor Sie\",\n \"login\": \"%(name)s hallo! Ich habe nicht gesehen, Sie sich fur %(days)d Tag\",\n \"reminder\": \"Sie sind %(name)s.\",\n \"lang-set\": \"Sie sind Deutsche.\" }}\n\n HELP = [\n (\"identify\", \"To identify yourself to RapidSMS, reply: IDENTIFY \")\n ]\n\n datesep = r\"(\\.|\\/|\\\\|\\-)\"\n date = r\"\\d\\d?\"\n month = r\"\\d\\d?\"\n year = r\"\\d{2}(\\d{2})?\"\n datepattern = r\"^\\d\\d?[\\.|\\/|\\\\|\\-]\\d\\d?[\\.|\\/|\\\\|\\-]\\d{2}(\\d{2})?$\"\n \n def __str(self, key, reporter=None, lang=None):\n\n # if no language was explicitly requested,\n # inherit it from the reporter, or fall\n # back to english. because everyone in the\n # world speaks english... right?\n if lang is None:\n if reporter is not None:\n lang = reporter.language\n\n # fall back\n if lang is None:\n lang = \"en\"\n\n # look for an exact match, in the language\n # that the reporter has chosen as preferred\n if lang is not None:\n if lang in self.MSG:\n if key in self.MSG[lang]:\n return self.MSG[lang][key]\n\n # not found in localized language. try again in english\n # TODO: allow the default to be set in rapidsms.ini\n return self.__str(key, lang=\"en\") if lang != \"en\" else None\n\n\n def __deny(self, msg):\n \"\"\"Responds to an incoming message with a localizable\n error message to instruct the caller to identify.\"\"\"\n return msg.respond(self.__str(\"denied\", msg.reporter))\n\n\n # def configure(self, allow_join, allow_list, **kwargs):\n # self.allow_join = allow_join\n # self.allow_list = allow_list\n\n\n def handle(self, msg):\n matcher = Matcher(msg)\n\n # TODO: this is sort of a lightweight implementation\n # of the keyworder. it wasn't supposed to be. maybe\n # replace it *with* the keyworder, or extract it\n # into a parser of its own\n map = {\n \"registerChild\": [\"(?:born) (whatever)\"], \n \"registerMother\": [\"(?:preg) (whatever)\"], \n \"reporterChild\": [\"(?:crep|mrep) (whatever)\"],\n \"reporterMother\": [\"(?:crep|mrep) (whatever)\"]\n }\n self.info(\"Entered mother\")\n #the user is unidentified Dont add pregnancy of births.\n \n \n # search the map for a match, dispatch\n # the message to it, and return/stop\n for method, patterns in map.items():\n if matcher(*patterns) and hasattr(self, method): \n getattr(self, method)(msg, *matcher.groups)\n return True\n\n # no matches, so this message is not\n # for us; allow processing to continue\n return False\n \n def parse_person(self,msg,text):\n allwords = text.split()\n tagsfound = []\n person = Person()\n #Find all occurances of the tags /codes and save them. or send alerts. \n for word in allwords:\n if len(word)==2 and Tag.objects.filter(code__iexact=word).count():\n tagsfound = tagsfound + word\n \n setattr(person,'tags',tagsfound) \n \n if len(allwords) < 1:\n msg.respond(\"missing national id\")\n return None\n \n # Determine if the word is a National id \n m = re.match(r\"^(\\d+)$\", allwords[0], re.IGNORECASE)\n if m is not None:\n MatchCode = m.group(0)\n setattr(person,'uniqueid', MatchCode) \n else:\n msg.respond(\"missing or invalid national id\")\n return None\n \n if len(allwords) < 2:\n msg.respond(\"missing national id and date\")\n return None \n \n # Determine if the word is a Date \n m = re.match( self.datepattern, allwords[1], re.IGNORECASE)\n if m is not None:\n MatchCode = m.group(0)\n setattr(person,'date', util.get_good_date(MatchCode))\n else:\n msg.respond(\"missing or invalid date\")\n return None \n \n # Determine if the word is weight \n m = re.match( r\"(\\d+(?:\\.\\d+))(kg|lb)$\", allwords[-1], re.IGNORECASE)\n if m is not None:\n MatchCode = m.group(0)\n setattr(person,'weight', MatchCode.replace(\"kg\",\"\").replace(\"lb\",\"\")) \n self.info(\" weight %s\" % MatchCode)\n \n return person \n\n\n def registerChild(self, msg, name):\n \n try:\n if msg.reporter is None:\n msg.respond(\"you are not register\")\n return False\n \n child = self.parse_person(msg,name)\n if child is None:\n return False\n \n personid = child.uniqueid\n DOB = child.date\n self.info(\"Dob %s\"% DOB)\n weight = child.weight\n persontype ,isno = PersonType.objects.get_or_create(singular=\"Child\" , plural=\"Children\")\n \n if personid is not None: \n child , dontcare = Child.objects.get_or_create( code=personid,name=personid ,date_of_birth = DOB ,weight=weight, type=persontype) \n child.save() \n self.info(\"Success fully added/updated child\") \n msg.respond(\"Birth was added successfully\")\n \n return True\n \n except:\n msg.respond(\"Sorry, I couldn't add child.\")\n raise \n\n def registerMother(self, msg, name):\n \n try:\n if msg.reporter is None:\n msg.respond(\"you are not register\")\n return False \n person = self.parse_person(msg,name)\n if person is None:\n return False \n # getattr(person,'uniqueid')\n personid = person.uniqueid\n # getattr(person,\"date\",date_of_m)\n date_of_m = person.date\n self.info(\"Date field %s\" % date_of_m)\n persontype ,isno = PersonType.objects.get_or_create(singular=\"Pregnant Woman\" , plural=\"Pregnant Women\")\n \n \n self.info(\"Personid %s \" % personid) \n if personid is not None: \n pregnant ,dontcare = Pregnant.objects.get_or_create( code=personid , name=personid, gender =\"F\" , \n date_last_menses = date_of_m ,type=persontype) \n pregnant.save()\n self.info(\"Successfully added or updated mother\") \n msg.respond(\"pregnancy was added successfully\")\n\n return True\n \n except:\n msg.respond(\"Sorry, I couldn't add pregnant woman.\")\n raise \n \n def identify(self, msg, alias):\n try:\n\n # give me reporter.\n # if no alias will match,\n # exception must raise\n rep = Reporter.objects.get(alias=alias)\n\n # no such alias, but we can be pretty sure that the message\n # was for us, since it matched a pretty specific pattern\n # TODO: levenshtein spell-checking from rapidsms/ethiopia\n except Reporter.DoesNotExist:\n msg.respond(self.__str(\"bad-alias\"))\n return True\n\n\n # before updating the connection, take note\n # of the last time that we saw this reporter\n ls = rep.last_seen()\n\n # assign the reporter to this message's connection\n # (it may currently be assigned to someone else)\n msg.persistant_connection.reporter = rep\n msg.persistant_connection.save()\n msg.reporter = rep\n\n\n # send a welcome message back to the now-registered reporter,\n # depending on how long it's been since their last visit\n if ls is not None:\n msg.respond(\n self.__str(\"login\", rep) % {\n \"name\": unicode(rep),\n \"days\": (datetime.now() - ls).days })\n\n # or a slightly different welcome message\n else:\n msg.respond(\n self.__str(\"first-login\", rep) % {\n \"name\": unicode(rep),\n \"alias\": rep.alias })\n\n # re-call this app's prepare, so other apps can\n # get hold of the reporter's info right away\n self.parse(msg)\n\n\n def remind(self, msg):\n\n # if a reporter object was attached to the\n # message by self.parse, respond with a reminder\n if msg.reporter is not None:\n msg.respond(\n self.__str(\"reminder\", msg.reporter) % {\n \"name\": unicode(msg.reporter) })\n\n # if not, we have no idea\n # who the message was from\n else:\n msg.respond(self.__str(\n \"dont-know\",\n msg.reporter))\n\n\n def reporters(self, msg):\n\n # abort if listing reporters isn't allowed\n # (it can get rather long and expensive)\n if not self.allow_join:\n msg.respond(self.__str(\"disabled\"))\n return True\n\n # not identified yet; reject, so\n # we don't allow random people to\n # query our reporters list\n if msg.reporter is None:\n msg.respond(self.__str(\"denied\"))\n return True\n\n # collate all reporters, with their full name,\n # username, and current connection.\n items = [\n \"%s (%s) %s\" % (\n rep.full_name(),\n rep.alias,\n rep.connection().identity)\n for rep in Reporter.objects.all()\n if rep.connection()]\n\n # respond with the concatenated list.\n # no need to check for empty _items_. there will\n # always be at least one reporter, because only\n # identified reporters can trigger this handler\n msg.respond(\n self.__str(\"list\", msg.reporter) % {\n \"items\": \", \".join(items),\n \"noun\": \"reporters\",\n \"num\": len(items) })\n\n\n def lang(self, msg, code):\n\n # reqiure identification to continue\n # TODO: make this check a decorator, so other apps\n # can easily indicate that methods need a valid login\n if msg.reporter is not None:\n\n # if the language code was valid, save it\n # TODO: obviously, this is not cross-app\n if code in self.MSG:\n msg.reporter.language = code\n msg.reporter.save()\n resp = \"lang-set\"\n\n # invalid language code. don't do\n # anything, just send an error message\n else: resp = \"bad-lang\"\n\n # if the caller isn't logged in, send\n # an error message, and halt processing\n else: resp = \"denied\"\n\n # always send *some*\n # kind of response\n msg.respond(\n self.__str(\n resp, msg.reporter))\n","sub_path":"rwanda/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":12980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"607157057","text":"from sklearn.cluster import DBSCAN\nimport numpy as np\n\ndef recursive_dbscan(orders, min_radius, max_radius):\n clus = list()\n av_cluster_size = 3\n max_av_cluster_size = 2\n max_len_cluster = 10\n min_no_clusters = 2\n best_res=list()\n while min_radius < max_radius:\n curr_radius = (min_radius+max_radius)/2\n clusters = DBSCAN().fit(orders)\n db1_labels = clusters.labels_\n labels, no_of_clusters = np.unique(db1_labels[db1_labels>=0], return_counts=True)\n\n if no_of_clusters < min_no_clusters:\n max_radius = curr_radius - 1\n else:\n min_radius = curr_radius + 1\n \n if av_cluster_size > max_av_cluster_size:\n \n for k in range(no_of_clusters):\n clus.append(list())\n for i in range(len(db1_labels)):\n clus[i].append(orders[i])\n best_res = clus\n max_av_cluster_size = av_cluster_size\n for cluster in best_res:\n if len(cluster) > max_len_cluster:\n best_res.remove(cluster)\n best_res.append(recursive_dbscan(cluster,50,100))\n return clus\n\ndef dbScan(orders):\n clusters = DBSCAN().fit(orders)\n return clusters.labels_\n\no = list([[13.0481,77.6284],[13.1049,77.6723],[13.0897,77.6884],[13.1216,77.6754],[13.0922,77.6654],[13.0775,77.5758],[13.0684,77.5506]])\nprint(len(recursive_dbscan(o,50,100)))\nprint(dbScan(o))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"518994977","text":"'''\n1:用一个变量right保存 left and nums[right] > k:\n right -= 1\n if left < right:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right -= 1\n return left, nums\n\n\ns = Solution()\ns2 = Solution2()\narr = [3, 2, 5, 6, 9, 1]\n# 划分为大于4,小于4的两个部分\nprint('划分轴、划分结果:', s.partitionArray(arr, 4))\nprint('划分轴、划分结果:', s2.partitionArray(arr, 4))\n","sub_path":"src/实验楼_LeetCode/4整型数组(二)/2partition array.py","file_name":"2partition array.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"559750510","text":"import click\nimport psycopg2\n\nfrom typing import Set, List, Tuple\nfrom datetime import datetime, timedelta\n\nDS_NODASH = \"%Y%m%d\"\n\n\ndef extract_ds_nodash(tablename):\n return tablename.split(\"_\")[-1]\n\n\ndef retention_date_range(base: str, period: int = 365, buffer: int = 7) -> Set[str]:\n \"\"\"Create a set of dates between [base-period, base]. The date format is ds_nodash.\"\"\"\n base = datetime.strptime(base, DS_NODASH)\n num_days = period + buffer\n dates = set(\n [\n datetime.strftime(base - timedelta(period) + timedelta(x), DS_NODASH)\n for x in range(num_days)\n ]\n )\n return dates\n\n\ndef create_connection(dbname, user, password, host):\n conn_str = f\"dbname={dbname} user={user} password={password} host={host}\"\n conn = psycopg2.connect(conn_str)\n return conn\n\n\ndef display_summary(action: str, table_set: Set[str], tables_to_show: int = 10):\n tables = list(sorted(table_set, key=extract_ds_nodash))\n print(f\"To {action} {len(tables)} tables...\")\n print(\"-\" * 40)\n if len(tables) > tables_to_show:\n show = tables[: tables_to_show // 2] + [\"...\"] + tables[-tables_to_show // 2 :]\n else:\n show = tables\n print(\"\\n\".join(show))\n print(\"=\" * 40)\n\n\ndef partition_set_by_filter(\n full_set: Set[str], retain_suffix_set: Set[str]\n) -> Tuple[Set[str], Set[str]]:\n retain_set = {\n table for table in full_set if extract_ds_nodash(table) in retain_suffix_set\n }\n trim_set = full_set - retain_set\n return retain_set, trim_set\n\n\ndef query_submission_date(\n cursor, retain_suffix_set: Set[str]\n) -> Tuple[Set[str], Set[str]]:\n submission_date_query = \"\"\"\n select tablename\n from pg_catalog.pg_tables\n where schemaname='public' and tablename like 'submission_date%';\n \"\"\"\n cursor.execute(submission_date_query)\n submission_retain, submission_trim = partition_set_by_filter(\n {row[0] for row in cursor.fetchall()}, retain_suffix_set\n )\n display_summary(\"retain\", submission_retain)\n display_summary(\"trim\", submission_trim)\n return submission_retain, submission_trim\n\n\ndef query_build_id(cursor, retain_suffix_set: Set[str]) -> Tuple[Set[str], Set[str]]:\n build_id_query = \"\"\"\n select tablename\n from pg_catalog.pg_tables\n where schemaname='public' and tablename like 'build_id%';\n \"\"\"\n cursor.execute(build_id_query)\n build_id_retain, build_id_trim = partition_set_by_filter(\n {row[0] for row in cursor.fetchall()}, retain_suffix_set\n )\n display_summary(\"retain\", build_id_retain)\n display_summary(\"trim\", build_id_trim)\n return build_id_retain, build_id_trim\n\n\ndef trim_tables(conn, trim_set: Set[str], batch_size=100):\n cursor = conn.cursor()\n trim_list = list(trim_set)\n num_batches = (len(trim_list) // batch_size) + 1\n for i in range(num_batches):\n trim_subset = trim_list[i * batch_size : (i + 1) * batch_size]\n if not trim_subset:\n continue\n print(f\"dropping {i+1} out of {num_batches} batches in groups of {batch_size}\")\n tables = \", \".join(trim_subset)\n query = f\"drop table {tables};\"\n cursor.execute(query)\n conn.commit()\n\n\n@click.command()\n@click.option(\n \"--base-date\", type=str, default=datetime.strftime(datetime.today(), DS_NODASH)\n)\n@click.option(\"--retention-period\", type=int, default=365 * 2)\n@click.option(\"--dry-run/--no-dry-run\", default=True)\n@click.option(\"--postgres-db\", type=str, envvar=\"POSTGRES_DB\", default=\"telemetry\")\n@click.option(\"--postgres-user\", type=str, envvar=\"POSTGRES_USER\", default=\"root\")\n@click.option(\"--postgres-pass\", type=str, envvar=\"POSTGRES_PASS\", required=True)\n@click.option(\"--postgres-host\", type=str, envvar=\"POSTGRES_HOST\", required=True)\ndef main(\n base_date,\n retention_period,\n dry_run,\n postgres_db,\n postgres_user,\n postgres_pass,\n postgres_host,\n):\n conn = create_connection(postgres_db, postgres_user, postgres_pass, postgres_host)\n cursor = conn.cursor()\n\n retain_suffix_set = retention_date_range(base_date, retention_period)\n submission_retain, submission_trim = query_submission_date(\n cursor, retain_suffix_set\n )\n build_id_retain, build_id_trim = query_build_id(cursor, retain_suffix_set)\n\n if not dry_run:\n print(\"Dropping tables...\")\n trim_tables(conn, submission_trim | build_id_trim)\n else:\n print(\"Dry run enabled, not dropping tables...\")\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"mozaggregator/trim_db.py","file_name":"trim_db.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"217246761","text":"import numpy as np\nfrom scipy.ndimage import *\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\ndef plotResultCurve(_metrics:list,att_names:list,title=''):\n res = np.array(list(map(lambda x:[x[a] for a in att_names],_metrics)))\n ax = plt.figure()\n \n handles = []\n for i,a in enumerate(att_names,0):\n line, = plt.plot(res[:,i],label=a)\n handles.append(line)\n \n plt.title(title)\n legend = plt.legend(handles=handles,loc=4)\n plt.show()\n \n return","sub_path":"utility/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615102163","text":"import pytest\nimport tempfile\nfrom unittest import mock\nfrom andriller import adb_conn\n\n\nfake_adb = tempfile.NamedTemporaryFile()\n\n\n@pytest.fixture\ndef ADB():\n with mock.patch('andriller.adb_conn.ADBConn.cmd_shell', return_value=fake_adb.name):\n adb = adb_conn.ADBConn()\n return adb\n\n\n@pytest.fixture\ndef ADB_win():\n with mock.patch('sys.platform', return_value='win32'):\n adb = adb_conn.ADBConn()\n return adb\n\n\n@mock.Mock('subprocess.STARTUPINFO')\ndef test_init_windows(ADB_win):\n assert ADB_win.startupinfo is not None\n\n\n@pytest.mark.parametrize('file_path, result', [\n ('/some/file.txt', '/some/file.txt\\n'),\n ('/some/my file.txt', '/some/my file.txt\\n'),\n ('some/file.txt', 'some/file.txt\\n'),\n])\ndef test_file_regex(file_path, result):\n assert adb_conn.ADBConn._file_regex(file_path).match(result)\n","sub_path":"tests/test_adb.py","file_name":"test_adb.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"5733951","text":"import time\n\nfrom matplotlib.pyplot import figure\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom alpha_vantage.timeseries import TimeSeries\nfrom alpha_vantage.techindicators import TechIndicators\n\n\ndef join_dataframes(df1, df2, on, how='inner'):\n return df1.join(df2, on=on, how=how)\n\n\ndef get_daily_time_series_df(ts, stock):\n data, meta_data = ts.get_daily(symbol=stock)\n data.rename(columns={'1. open': 'open', '2. high': 'high', '3. low': 'low', \n '4. close': 'close', '5. volume': 'volume'}, inplace=True)\n return data\n\n\ndef get_technical_indicators_df(ti, stock, time_sleep=61):\n # Simple Moving Average - 9 days\n sma_9, meta_sma_9 = ti.get_sma(stock, interval='daily', time_period=9)\n sma_9.rename(columns={'SMA': 'sma_9'}, inplace=True)\n # Simple Moving Average - 13 days\n sma_13, meta_sma_13 = ti.get_sma(stock, interval='daily', time_period=13)\n sma_13.rename(columns={'SMA': 'sma_13'}, inplace=True)\n # Simple Moving Average - 26 days\n sma_26, meta_sma_26 = ti.get_sma(stock, interval='daily', time_period=26)\n sma_26.rename(columns={'SMA': 'sma_26'}, inplace=True)\n # Momentum - 1 day\n mom_1, meta_mom_1 = ti.get_mom(stock, interval='daily', time_period=1)\n mom_1.rename(columns={'MOM': 'mom_1'}, inplace=True)\n # Momentum - 8 day\n mom_8, meta_mom_8 = ti.get_mom(stock, interval='daily', time_period=8)\n mom_8.rename(columns={'MOM': 'mom_8'}, inplace=True)\n time.sleep(time_sleep)\n # Momentum - 15 day\n mom_15, meta_mom_15 = ti.get_mom(stock, interval='daily', time_period=15)\n mom_15.rename(columns={'MOM': 'mom_15'}, inplace=True)\n # RSI - 14 day\n rsi_14, meta_rsi_14 = ti.get_rsi(stock, interval='daily', time_period=14)\n rsi_14.rename(columns={'RSI': 'rsi_14'}, inplace=True)\n # RSI - 7 day\n rsi_7, meta_rsi_7 = ti.get_rsi(stock, interval='daily', time_period=7)\n rsi_7.rename(columns={'RSI': 'rsi_7'}, inplace=True)\n # Stochastic oscillator - 14 days faskK, 3 days fastD/slowK, 3 days slowD\n # https://commodity.com/technical-analysis/stochastics/\n # https://www.fmlabs.com/reference/default.htm?url=StochasticOscillator.htm\n stoch_14_3_3, meta_stoch_14_3_3 = ti.get_stoch(stock, interval='daily', fastkperiod=14, slowkperiod=3, \n slowdperiod=3, slowkmatype=0, slowdmatype=0)\n # Double Exponential Moving Average - 13 days\n dema_13, meta_dema_13 = ti.get_dema(stock, interval='daily', time_period=13)\n dema_13.rename(columns={'DEMA': 'dema_13'}, inplace=True)\n time.sleep(time_sleep)\n # Double Exponential Moving Average - 26 days\n dema_26, meta_dema_26 = ti.get_dema(stock, interval='daily', time_period=26)\n dema_26.rename(columns={'DEMA': 'dema_26'}, inplace=True)\n # Average Directional Movement Index (ADX) - 7 days\n adx_7, meta_adx_7 = ti.get_adx(stock, interval='daily', time_period=7)\n adx_7.rename(columns={'ADX': 'adx_7'}, inplace=True)\n # Average Directional Movement Index (ADX) - 14 days\n adx_14, meta_adx_14 = ti.get_adx(stock, interval='daily', time_period=14)\n adx_14.rename(columns={'ADX': 'adx_14'}, inplace=True)\n # Commodity Channel Index (CCI) - 7 days\n cci_7, meta_cci_7 = ti.get_cci(stock, interval='daily', time_period=7)\n cci_7.rename(columns={'CCI': 'cci_7'}, inplace=True)\n # Commodity Channel Index (CCI) - 14 days\n cci_14, meta_cci_14 = ti.get_cci(stock, interval='daily', time_period=14)\n cci_14.rename(columns={'CCI': 'cci_14'}, inplace=True)\n time.sleep(time_sleep)\n # Aroon (AROON) values (AroonUp/AroonDown) - 14 days\n aroon_14, meta_aroon_14 = ti.get_aroon(stock, interval='daily', time_period=14)\n # Money Flow Index (MFI) - 7 days\n mfi_7, meta_mfi_7 = ti.get_mfi(stock, interval='daily', time_period=7)\n mfi_7.rename(columns={'MFI': 'mfi_7'}, inplace=True)\n # Money Flow Index (MFI) - 14 days\n mfi_14, meta_mfi_14 = ti.get_mfi(stock, interval='daily', time_period=14)\n mfi_14.rename(columns={'MFI': 'mfi_14'}, inplace=True)\n # Accumulation/Distribution Line / Chaikin A/D (AD)\n ad, meta_ad = ti.get_ad(stock, interval='daily')\n # On Balance Volume (OBV)\n obv, meta_obv = ti.get_obv(stock, interval='daily')\n \n ti_dfs = [sma_9,sma_13, sma_26, mom_1, mom_8, mom_15, rsi_7, rsi_14, stoch_14_3_3, \n dema_13, dema_26, adx_7, adx_14, cci_7, cci_14, aroon_14, mfi_7, mfi_14, ad, obv]\n \n stock_ti_df = join_dataframes(ti_dfs[0], ti_dfs[1], 'date')\n for ti_df_ind in range(2, len(ti_dfs)):\n stock_ti_df = join_dataframes(stock_ti_df, ti_dfs[ti_df_ind], 'date')\n\n stock_ti_df.columns = map(lambda c: '_'.join(str.lower(c).split()), stock_ti_df.columns)\n return stock_ti_df\n\n\ndef chech_if_should_buy(ti, stock):\n try:\n # Moving Average Convergence Divergence - 9 days\n macd_9, macd_sma_9 = ti.get_macd(stock, interval='daily')\n # RSI - 14 day\n rsi_14, meta_rsi_14 = ti.get_rsi(stock, interval='daily', time_period=14)\n # Accumulation/Distribution Line / Chaikin A/D (AD)\n ad, meta_ad = ti.get_ad(stock, interval='daily')\n\n stock_ti_df = join_dataframes(macd_9[-2:], rsi_14[-2:], 'date')\n stock_ti_df = join_dataframes(stock_ti_df, ad[-2:], 'date') \n stock_ti_df.columns = map(lambda c: '_'.join(str.lower(c).split()), stock_ti_df.columns)\n \n print('Stock: {}'.format(stock))\n print(stock_ti_df)\n macd_is_good = stock_ti_df['macd_hist'][-1] > 0 and stock_ti_df['macd_hist'][-2] < 0\n rsi_is_good = stock_ti_df['rsi'][-1] > 30 and stock_ti_df['rsi'][-2] < 30\n ad_is_good = stock_ti_df['chaikin_a/d'][-1] > 0 and stock_ti_df['chaikin_a/d'][-2] < 0\n\n return stock_ti_df, sum([macd_is_good, rsi_is_good, ad_is_good]) > 0\n except:\n print('Stock: {} - error'.format(stock)) \n return None, False\n ","sub_path":"src/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435494239","text":"# Body mass index calculation\r\n\r\ndef bmi_calc():\r\n\r\n question = input('Do you use metric system?: Y/N> ')\r\n metric_system = None\r\n\r\n if question == 'Y' or question == 'y' or question == 'yes': \r\n metric_system = True\r\n\r\n height = float(input('Enter your height in meters: '))\r\n weight = float(input('Enter your weight in kilograms: '))\r\n\r\n elif question == 'N' or question == 'n' or question == 'no':\r\n metric_system = False\r\n\r\n height = float(input('Enter your height in feets: '))\r\n weight = float(input('Enter your weight in pounds: '))\r\n\r\n #converting to inches\r\n height = height * 12 \r\n\r\n else:\r\n print('incorrect answer')\r\n bmi_calc()\r\n\r\n bmi = None\r\n\r\n if metric_system == True:\r\n bmi = weight / (height ** 2)\r\n elif metric_system == False:\r\n bmi = (weight / (height ** 2)) * 703\r\n\r\n print(f'Your body mass index is {bmi:.2f}')\r\n\r\nbmi_calc()","sub_path":"python-workbook-solutions/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557550857","text":"import numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\ndef add_layer(inputs, in_size, out_size, activation_function=None):\n\tWeights = tf.Variable(tf.random_normal([in_size, out_size]))\n\tbiases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n\tWx_plus_b = tf.matmul(inputs, Weights) + biases #matrix multiply\n\n\tif activation_function is None:\n\t\toutputs = Wx_plus_b\n\telse:\n\t\toutputs = activation_function(Wx_plus_b)\n\n\treturn outputs\n\n### Create Data : poly. + noise\nx_data = np.linspace(-2,2,600)[:,np.newaxis]\nnoise = np.random.normal(0,0.8,x_data.shape)\n# y_data_ori = 1.5*np.power(x_data,3) + 2*np.power(x_data,2) + 1\ny_data_ori = 2*x_data + 2 * np.sin(x_data/0.5)\ny_data = y_data_ori + noise\n\nxs = tf.placeholder(tf.float32,[None,1])\nys = tf.placeholder(tf.float32,[None,1])\n\n### Create NN graph\nl1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)\nprediction = add_layer(l1 , 10 , 1 , activation_function=None)\n\n# Set Learning Parameter\nloss = tf.reduce_mean(tf.reduce_sum(tf.square(y_data - prediction),reduction_indices=[1]))\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n# initial tensorflow variables\ninit = tf.initialize_all_variables()\n\n# initial graph\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nax.scatter(x_data, y_data)\nplt.ion()\nplt.show()\n\nwith tf.Session() as sess:\n\tsess.run(init)\n\n\tfor i in range(1000):\n\t\tsess.run(train_step, feed_dict={xs:x_data, ys:y_data})\n\n\t\t# predict and plot the result\n\t\tif i % 50 ==0:\n\t\t\tprint(sess.run(loss,feed_dict={xs:x_data, ys:y_data}))\n\t\t\ttry:\n\t\t\t\tax.lines.remove(lines[0])\n\t\t\texcept Exception:\n\t\t\t\tpass\n\n\t\t\tprediction_value = sess.run(prediction, feed_dict={xs: x_data})\n\t\t\t# plot the prediction\n\t\t\tlines = ax.plot(x_data, prediction_value, 'r-', lw=5)\n\t\t\tplt.pause(0.2)\n\n\tlines = ax.plot(x_data, y_data_ori,'g-', lw=3)\n\tplt.pause(30)\n\n# Model parameters\nW = tf.Variable([.3], dtype=tf.float32)\nb = tf.Variable([-.3], dtype=tf.float32)\n# Model input and output\nx = tf.placeholder(tf.float32)\nlinear_model = W*x + b\ny = tf.placeholder(tf.float32)\n\n# loss\nloss = tf.reduce_sum(tf.square(linear_model - y)) # sum of the squares\n# optimizer\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n\n# training data\nx_train = [1, 2, 3, 4]\ny_train = [1, 1, 1, 1]\n# training loop\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init) # reset values to wrong\nfor i in range(1000):\n sess.run(train, {x: x_train, y: y_train})\n\n# evaluate training accuracy\ncurr_W, curr_b, curr_loss = sess.run([W, b, loss], {x: x_train, y: y_train})\nprint(\"W: %s b: %s loss: %s\"%(curr_W, curr_b, curr_loss))","sub_path":"Module/TensorFlow.py","file_name":"TensorFlow.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"274561365","text":"#--------------------------------------------------------------------------------------------\n#\n# This script contains functions and constants to analyze pictures from the scan. \n#\n# Mike Boch, Isaac Walker, Matt Howard, Chris Shaffer, George Montgomery\n#\n# 2015\n#\n#--------------------------------------------------------------------------------------------\n\nimport numpy as np\nfrom scipy import ndimage\nfrom scipy.misc import imread\nfrom skimage.color import rgb2gray\nfrom skimage import measure\nfrom scipy.signal import butter, lfilter\n\nW = 2592. #Image pixel sizes\nH = 1944.\n\ndistance = 15 #Distance between camera and laser\nfocal = 18.45 #Focal point (Location where camera plane and laser plane intersect)\nFy = 7 #Location where camera angle in y intersects zero plane\n\nalpha = 26.5*np.pi/180 #Camera half-angle in x\nbeta = 20*np.pi/180 #Camera half-angle in y\nthetav = 2*np.pi/180 #Tilt of camera in y\n\nd = np.sqrt(distance*distance+focal*focal) #Refer to figure about trig for where following comes from\ntheta = np.arctan(focal/distance)-alpha\nm = distance*np.tan(2*alpha+theta)\n\nx = np.linspace(0,W,W) #Used for plots, z can be used as a lookup table\nn = np.linspace(0,H,H) #Used to plot y at a given depth\n\nz = (-2*x*np.tan(alpha)*d/np.cos(2*alpha+theta)+2*focal*x*np.tan(alpha)*np.sin(alpha+theta)/np.cos(2*alpha+theta)+m*W/np.cos(alpha))/(2*x*np.tan(alpha)*np.sin(alpha+theta)/np.cos(2*alpha+theta)+W/np.cos(alpha))\n\ny = []\n\nfor zCo in z:\n\tyCo = np.sqrt(distance*distance+zCo*zCo)*(np.sin(beta)*(2*(H/2-n)/H)+np.sin(thetav)/np.cos(beta))+Fy\n\ty.append(yCo)\n\ndef butter_lowpass(cutoff, fs, order=5):\n\tnyq = 0.5 * fs\n\tnormal_cutoff = cutoff / nyq\n\tb, a = butter(order, normal_cutoff, btype='low', analog=False)\n\n\treturn b, a\n\ndef butter_lowpass_filter(data, cutoff, fs, order=5):\n\tb, a = butter_lowpass(cutoff, fs, order=order)\n\ty = lfilter(b, a, data)\n\n\treturn y\n\ndef getPoints(filename):\n\timg = imread(filename, flatten=True) #Open image in grayscale\n\tcontours = measure.find_contours(img, 255 * .99) #find contours with threshold of 99% gray level\n\tcontours = sorted(enumerate(contours),key=lambda x:len(x[1])) #Sort contours by length to find\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #important ones\n\tk=-1\n\tfinvals=[]\n\tif(not contours): #End analysis of picture if no contours found\n\t\treturn\n\tif(not contours[k]):\n\t\treturn\n\n\twhile -k < len(contours) and len(contours[k][1]) > 90: #Analyse only contours that are longer than 90 points\n\t\tn, contour = contours[k] #Get contour data\n\t\tskel = []\n\t\tleftPoints = []\n\t\trightPoints = []\n\n\t\tfor i in range(len(img)): #Create 3 lists to hold points\n\t\t\tskel.append([0,0])\n\t\t\tleftPoints.append([0,0])\n\t\t\trightPoints.append([0,0])\n\n\t\tmaxY = max(contour, key=lambda x:x[0])[0] #Get max and min to be used to figure out which\n\t\tminY = min(contour, key=lambda x:x[0])[0] #side of the contour you're on\n\n\t\tmaxHit = False\n\t\tminHit = False\n\t\tcurrent = False #False -> Left, True -> Right\n\t\tfor point in contour: #Puts points in contour on each side into respective lists\n\t\t\ty = point[0]\n\t\t\tif y == maxY and not maxHit:\n\t\t\t\tmaxHit = True\n\t\t\t\tcurrent ^= True\n\t\t\telif y == minY and not minHit:\n\t\t\t\tminHit = True\n\t\t\t\tcurrent ^= True\n\t\t\tif current:\n\t\t\t\ty = int(round(y))\n\t\t\t\trightPoints[y][1] = (rightPoints[y][1] * rightPoints[y][0] + point[1]) / (rightPoints[y][0] + 1.)\n\t\t\t\trightPoints[y][0] += 1\n\t\t\telse:\n\t\t\t\ty = int(round(y))\n\t\t\t\tleftPoints[y][1] = (leftPoints[y][1] * leftPoints[y][0] + point[1]) / (leftPoints[y][0] + 1.)\n\t\t\t\tleftPoints[y][0] += 1\n\n\t\tj=0\n\t\tpoints=[]\n\t\tfor i in range(0, len(img)): #Computes skeleton of contour by averaging points on left and right\n\t\t\tleftPoint = leftPoints[i]\n\t\t\trightPoint = rightPoints[i]\n\t\t\tskelX = 0\n\t\t\tif(leftPoint[0]):\n\t\t\t\tif rightPoint[0]:\n\t\t\t\t\tskelX = (leftPoint[1] + rightPoint[1]) / 2.\n\t\t\t\telse:\n\t\t\t\t\tskelX = leftPoint[1]\n\t\t\telif rightPoint[0]:\n\t\t\t\tskelX = rightPoint[1]\n\t\t\tif skelX:\n\t\t\t\tpoints.append((skelX,j))\n\t\t\tj+=1\n\t\n\t\tx,y=zip(*points)\n\t\n\t\t# Filter requirements.\n\t\torder = 6\n\t\tfs = 1000.0 # sample rate, Hz\n\t\tcutoff = 750./np.sqrt(len(x)) # desired cutoff frequency of the filter, Hz\n \n\t\tfil = butter_lowpass_filter(x, cutoff, fs, order) #Get butterworth filter\n\t\tfinvals+=zip(fil[30::10], y[30::10]) #Zip points together using every 10th and ignoring first 30\n\t\t\t\t\t\t\t\t\t\t\t #Avoids issues with tail effects\n\t\tfor i in range(len(finvals)): #Rounds each value to integers to index later\n\t\t\tfinvals[i] = [int(round(finvals[i][0])),int(round(finvals[i][1]))]\n\t\tk-=1\n\n\treturn finvals","sub_path":"scanner_utils.py","file_name":"scanner_utils.py","file_ext":"py","file_size_in_byte":4458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"589017886","text":"#*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 18 17:43:17 2019\n\n@author: Sameer Gupta\n\"\"\"\nfrom random import *\nfrom datetime import date\nimport numpy as np\nimport pandas as pd\nimport pymysql\n\ndef water_level_check (records) :\n date1=date(2019,7,16)#actual start date of the crop\n date2=date.today()#current date\n day_count=abs(date2-date1).days\n \n \n \n wl=int(records[1])\n \n \n #print(\"water_level_data = \", records)\n list=[] \n df=pd.read_csv(\"water_level_data.csv\",header=None)\n\n r,c=df.shape\n\n array_cm = [[0]*c]*r\n\n list = [[]]*r\n\n list=df.values.tolist()\n for i in range(0,r):\n for j in range(0,c):\n if list[i][j] <= 13 :\n array_cm[i][j]=0.2\n\n elif (list[i][j] > 13 and list[i][j]<= 43) :\n array_cm[i][j]=0.5\n\n elif (list[i][j] > 44 and list[i][j]<= 65):\n array_cm[i][j]=0.5+(list[i][j]/65)\n\n elif (list[i][j] > 66 and list[i][j]<= 113):\n array_cm[i][j]=1.0+(list[i][j]/113)\n\n elif (list[i][j] > 114 and list[i][j]<= 143):\n array_cm[i][j]=1.5+(list[i][j]/143)\n\n elif (list[i][j] > 144 and list[i][j]<= 157):\n array_cm[i][j]=2.0+(list[i][j]/157)\n\n elif (list[i][j] > 157 and list[i][j]<= 158):\n array_cm[i][j]=2.5+(list[i][j]/157)\n\n elif (list[i][j] > 158 and list[i][j]< 162):\n array_cm[i][j]=3.5+(1.2*(list[i][j]/162))\n\n else :\n a=(randint(33,150)/100)\n array_cm[i][j]=4+a\n\n X_min=np.amin(array_cm,axis=0) \n X_max=np.amax(array_cm,axis=0)\n X_mean=np.mean(array_cm,axis=0)\n\n X_mid=[]\n for i in range(0,134) :\n X_mid.append((X_max[i]+X_min[i])/2)\n\n\n\n for i in range(day_count-1,day_count) :\n if ( X_mean[i] >= X_mid[i] ) :\n if ( X_mean[i] >= wl ):\n upto_level=X_max[i]\n else :\n upto_level=0;\n elif ( X_mean[i] < mid[i] ) :\n if ( X_mean[i] >= wl ):\n upto_level=(X_mid[i]+X_max[i])/2\n else :\n upto_level=0\n\n upto_level=(round(upto_level/0.5))*0.5\n return upto_level,array_cm\n\n\n\n","sub_path":"sih_fnode_Desktop/final_agrotech/old/water_level_pattern.py","file_name":"water_level_pattern.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613393492","text":"import torch\nimport torch.nn as nn\nimport torch.nn.init as init\nimport torch.nn.functional as F\n\nfrom torch.utils import model_zoo\nfrom torchvision import models\n\n\nclass Mobile_Unet(nn.Module):\n\n def __init__(self, num_classes,alpha=0.15, alpha_up=0.25):\n super(Mobile_Unet, self).__init__()\n def conv_bn(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n nn.BatchNorm2d(oup),\n nn.ReLU(inplace=True)\n )\n\n def conv_dw(inp, oup, stride):\n return nn.Sequential(\n nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),\n nn.BatchNorm2d(inp),\n nn.ReLU(inplace=True),\n\n nn.Conv2d(inp, int(oup), 1, 1, 0, bias=False),\n nn.BatchNorm2d(int(oup)),\n nn.ReLU(inplace=True),\n )\n self.b001 = nn.Sequential(\n conv_bn(3, int(32 * alpha), 1),\n )\n\n self.b002 = nn.Sequential(\n conv_bn(int(32 * alpha), int(32 * alpha), 1),\n )\n\n self.b00 = nn.Sequential(\n conv_bn(int(32*alpha), int(32*alpha), 2),\n )\n\n self.b01 = nn.Sequential(\n conv_dw(int(32*alpha), int(64*alpha), 1),)\n\n self.b03 = nn.Sequential(\n conv_dw(int(64*alpha), int(96*alpha), 2),\n conv_dw(int(96*alpha), int(96*alpha), 1),)\n\n self.b05 = nn.Sequential(\n conv_dw(int(96*alpha), int(192*alpha), 2),\n conv_dw(int(192*alpha), int(192*alpha), 1),)\n\n self.b11 = nn.Sequential(\n conv_dw(int(192*alpha), int(384*alpha), 2),\n conv_dw(int(384*alpha), int(384*alpha), 1),\n conv_dw(int(384*alpha), int(384*alpha), 1),\n conv_dw(int(384*alpha), int(384*alpha), 1),\n conv_dw(int(384*alpha), int(384*alpha), 1),\n conv_dw(int(384*alpha), int(384*alpha), 1),\n )\n\n self.b13 = nn.Sequential(\n conv_dw(int(384*alpha), int(768*alpha), 2),\n conv_dw(int(768*alpha), int(768*alpha), 1),\n )\n\n self.ConvTranspose1= nn.ConvTranspose2d(int(768*alpha), int(384*alpha)+1, 2, stride=2)\n self.b14 = conv_dw(int(768*alpha), int(384*alpha*alpha_up), 1)\n\n self.ConvTranspose2= nn.ConvTranspose2d(int(384*alpha*alpha_up), int(192*alpha), 2, stride=2)\n self.b15 = conv_dw(int(384*alpha)-1, int(192*alpha*alpha_up), 1)\n\n self.ConvTranspose3= nn.ConvTranspose2d(int(192*alpha*alpha_up), int(96*alpha), 2, stride=2)\n self.b16 = conv_dw(int(192*alpha), int(96*alpha*alpha_up), 1)\n\n self.ConvTranspose4= nn.ConvTranspose2d(int(96*alpha*alpha_up), int(64*alpha), 2, stride=2)\n self.b17 = conv_dw(int(96*alpha)+4, int(64*alpha*alpha_up), 1)\n\n self.b18 = conv_bn( int(64*alpha*alpha_up)+int(32*alpha), int(32*alpha*alpha_up)+1, 1)\n\n self.final_cat = nn.Conv2d(int(32 * alpha * alpha_up) + 1, int(32 * alpha), 1)\n self.final = nn.Conv2d(int(32 * alpha) * 2, num_classes, 1)\n\n # self.init_params()\n\n def init_params(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n init.kaiming_normal(m.weight, mode='fan_out')\n if m.bias is not None:\n init.constant(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n init.constant(m.weight, 1)\n init.constant(m.bias, 0)\n elif isinstance(m, nn.Linear):\n init.normal(m.weight, std=0.001)\n if m.bias is not None:\n init.constant(m.bias, 0)\n\n def forward(self, x):\n b001 =self.b001(x)\n b002 = self.b002(b001)\n b00 = self.b00(b002)\n # print b00.size()\n b01 =self.b01(b00)\n # print b01.size()\n\n b03 =self.b03(b01)\n # print b03.size()\n\n b05 =self.b05(b03)\n # print b05.size()\n b11 =self.b11(b05)\n # print b11.size()\n b13 =self.b13(b11)\n # print b13.size()\n\n up1 = torch.cat([self.ConvTranspose1(b13),b11],1)\n # print up1.size()\n b14 = self.b14(up1)\n # print b14.size()\n\n up2 = torch.cat([self.ConvTranspose2(b14),b05],1)\n # print up2.size()\n b15 = self.b15(up2)\n # print b15.size()\n\n up3 = torch.cat([self.ConvTranspose3(b15),b03],1)\n # print up3.size()\n b16 = self.b16(up3)\n # print b16.size()\n up4 = torch.cat([self.ConvTranspose4(b16),b01],1)\n # print up4.size()\n b17 = self.b17(up4)\n\n # print b17.size()\n up5 = torch.cat([b17,b00],1)\n # print up5.size()\n b18=self.b18(up5)\n # print b18.size()\n b19 = F.upsample_bilinear(self.final_cat(b18),scale_factor=2)\n b20 = torch.cat([b19, b002], 1)\n b21 = self.final(b20)\n return F.sigmoid(b21)\nif __name__=='__main__':\n from torch.autograd import Variable\n\n x = torch.FloatTensor(1,3,768,768)\n x = Variable(x)\n model = Mobile_Unet(num_classes=1, alpha=0.15)\n print(model)\n y = model(x)\n print(y.view(-1,768,768).size())\n","sub_path":"JNetV3/models/imJNetV3.py","file_name":"imJNetV3.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159741418","text":"# 1.理解为java的StringBuffer???\nfrom io import StringIO, BytesIO\n\nf = StringIO()\nf.write('hello')\nf.write(' ')\nf.write('world')\nprint(f.getvalue())\n\n# 2.读取StringIO\nf1 = StringIO('Hello!\\nHi!\\nGoodbye!')\nwhile True:\n s = f1.readline()\n if s == '':\n break\n print(s.strip())\n\n# 3.操作ByteIO\nf2 = BytesIO()\nf2.write('王琨'.encode('utf-8'))\nprint(f2.getvalue())","sub_path":"com/wk/io/StringIOAndBytesIO/StringIODemo.py","file_name":"StringIODemo.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504342673","text":"from django.http import HttpResponseRedirect\nfrom django.template import loader\nfrom django.shortcuts import render\nfrom .forms import Nomeform, Questionarioform1, Questionarioform2, Questionarioform3, Questionarioform4, Questionarioform5, Questionarioform6, Questionarioform7\nfrom .models import cursos, dadosaluno, disciplina\n\ndef index(request):\n curso=cursos.objects.all()\n return render(request, 'index.html', {'cursos': curso})\n\ndef dados(request, curso_id):\n if request.method == 'POST':\n form = Nomeform(curso_id, request.POST)\n if form.is_valid():\n data = form.save()\n return HttpResponseRedirect(\"/caa/%s/formularios/\" % (data.id))\n else:\n form = Nomeform(curso_id)\n return render(request, 'Iniciar.html', {'form': form}, )\n\ndef formularios(request, dadosaluno_id):\n aluno=dadosaluno.objects.get(id=dadosaluno_id)\n materia=aluno.materias.all()\n return render(request, 'formularios.html', {'materias': materia,\n 'aluno' : aluno.id})\n\ndef questionario(request, dadosaluno_id, disciplina_id):\n disc=disciplina.objects.get(id=disciplina_id)\n materia=disc.nome\n if request.method == 'POST':\n form1 = Questionarioform1(request.POST)\n form2 = Questionarioform2(request.POST)\n form3 = Questionarioform3(request.POST)\n form4 = Questionarioform4(request.POST)\n form5 = Questionarioform5(request.POST)\n form6 = Questionarioform6(request.POST)\n form7 = Questionarioform7(request.POST)\n if form1.is_valid() and form2.is_valid() and form3.is_valid() and form4.is_valid() and form5.is_valid() and form6.is_valid() and form7.is_valid():\n data1 = form1.save(commit=False)\n data1.código=disc\n data1.Curso=disc.Cursoo\n data1.nome=dadosaluno.objects.get(id=dadosaluno_id)\n data1.save()\n data2 = form2.save(commit=False)\n data2.código = disc\n data2.Curso = disc.Cursoo\n data2.nome = dadosaluno.objects.get(id=dadosaluno_id)\n data2.save()\n data3 = form3.save(commit=False)\n data3.código = disc\n data3.Curso = disc.Cursoo\n data3.nome = dadosaluno.objects.get(id=dadosaluno_id)\n data3.save()\n data4 = form4.save(commit=False)\n data4.código = disc\n data4.Curso = disc.Cursoo\n data4.nome = dadosaluno.objects.get(id=dadosaluno_id)\n data4.save()\n data5 = form5.save(commit=False)\n data5.código = disc\n data5.Curso = disc.Cursoo\n data5.nome = dadosaluno.objects.get(id=dadosaluno_id)\n data5.save()\n data6 = form6.save(commit=False)\n data6.código = disc\n data6.Curso = disc.Cursoo\n data6.nome = dadosaluno.objects.get(id=dadosaluno_id)\n data6.save()\n data7 = form7.save(commit=False)\n data7.código = disc\n data7.Curso = disc.Cursoo\n data7.nome = dadosaluno.objects.get(id=dadosaluno_id)\n data7.save()\n return HttpResponseRedirect(\"/caa/%s/formularios/\" % (dadosaluno_id))\n else:\n form1 = Questionarioform1\n form2 = Questionarioform2\n form3 = Questionarioform3\n form4 = Questionarioform4\n form5 = Questionarioform5\n form6 = Questionarioform6\n form7 = Questionarioform7\n return render(request, 'questionario.html', {'form1': form1,\n 'form2': form2,\n 'form3': form3,\n 'form4': form4,\n 'form5': form5,\n 'form6': form6,\n 'form7': form7,\n 'disciplina': materia\n })","sub_path":"mysite/caa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"607321876","text":"\ndef convert(s):\n\ts = list(s)\n\tif s[0] in \"aeiou\":\n\t\treturn ''.join(s) + \"-\" + str('way')\n\telif s[0] == \"q\":\n\t\treturn ''.join(s)[2:] + '-' + ''.join(s)[0:2] + str('ay')\n\telse:\n\t\tvowelindex = list()\n\t\tfor i in range(0,len(s)):\n\t\t\tif s[i] in \"aeiou\":\n\t\t\t\tvowelindex.append(i)\n\t\tfirstvowelindex = min(vowelindex)\n\t\treturn ''.join(s)[firstvowelindex:] + \"-\" + ''.join(s)[:firstvowelindex] + 'ay'\t\t\t\n\n\n","sub_path":"Piglatin.py","file_name":"Piglatin.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190443308","text":"import logging\nimport json\n\nfrom tornado import websocket, ioloop, web\n\nfrom player import Player\n\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass GameWebSocketHandler(websocket.WebSocketHandler):\n connected_players = set()\n\n def check_origin(self, something):\n # woohoo! CORS are allowed\n return True\n\n def __init__(self, *args, **kwargs):\n super(GameWebSocketHandler, self).__init__(*args, **kwargs)\n self.player = None\n\n def open(self):\n self.player = Player(self)\n GameWebSocketHandler.connected_players.add(self.player)\n logging.info('Player {} connected'.format(hash(self)))\n self.write_message({'action': 'init', 'data': self.player.to_json()})\n self.send_update()\n\n def on_message(self, message):\n data = json.loads(message)\n logging.info('Received message: {}'.format(message))\n action = data.get('action')\n if not action:\n logging.exception('No action.')\n return\n handler = self.handlers.get(action)\n if not handler:\n logging.exception('No handler for action {}'.format(action))\n return\n params = data.get('params')\n logging.info('Running action {}.'.format(action))\n handler(self, **params) if params else handler(self)\n\n @property\n def players(self) -> list:\n return GameWebSocketHandler.connected_players\n\n def on_close(self):\n logging.info('Player {} disconnected.'.format(hash(self)))\n GameWebSocketHandler.connected_players.remove(self.player)\n self.send_update()\n\n def move(self, direction):\n self.player.move(direction)\n self.send_update()\n return True\n\n def fire(self, direction):\n self.player.move(direction)\n self.send_update()\n return True\n\n def send_update(self):\n map_state = []\n\n for user in self.players:\n map_state.append(user.to_json())\n for user in self.players:\n user.write_message({'action': 'update', 'map': map_state})\n\n handlers = {\n 'move': move,\n 'fire': fire,\n }\n\napplication = web.Application([\n (r'/ws/', GameWebSocketHandler),\n], debug=True, static_path='static')\n\nif __name__ == \"__main__\":\n address = '0.0.0.0'\n # address = '127.0.0.1'\n port = 8888\n logging.info('Starting application on %s:%s' % (address, port))\n application.listen(port=port, address=address)\n ioloop.IOLoop.instance().start()\n","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"428201033","text":"import random\nimport statistics\nfrom collections import Counter\n\n\ndef roll(count, die, roll_list=False):\n count = int(count)\n die = int(die)\n\n rolls = []\n for i in range(count):\n rolls.append(random.randint(1, die))\n if roll_list:\n return sum(rolls), rolls\n else:\n return sum(rolls)\n\n\ndef testing():\n ten_ten = [roll(10, 10) for i in range(100_000)]\n one_one_hundred = [roll(1, 100) for i in range(100_000)]\n five_twenty = [roll(5, 20) for i in range(100_000)]\n\n print(Counter(ten_ten))\n print(statistics.median(ten_ten))\n\n print(Counter(one_one_hundred))\n print(statistics.median(one_one_hundred))\n\n print(Counter(five_twenty))\n print(statistics.median(five_twenty))\n\n\nif __name__ == '__main__':\n testing()\n","sub_path":"game/dice.py","file_name":"dice.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78539555","text":"from dao.db import OracleDb\r\nimport cx_Oracle\r\n\r\n\r\nclass Helper:\r\n\r\n def init(self):\r\n self.db = OracleDb()\r\n\r\n def Get_phone(self, phone_id=None):\r\n\r\n if phone_id:\r\n phone_id = \"'{0}'\".format(phone_id)\r\n else:\r\n phone_id = 'null'\r\n\r\n query = \"select * from table(pkg_hr.get_phone({0}))\".format(phone_id)\r\n result = self.db.execute(query)\r\n\r\n return result.fetchall()\r\n\r\n def Update_phone(self, phone_id, phone_model, phone_vendor, phone_price, phone_date):\r\n\r\n cursor = self.db.cursor\r\n status = cursor.var(cx_Oracle.STRING)\r\n\r\n cursor.callproc(\"pkg_hr.update_phone\", [phone_id, phone_model, phone_vendor, phone_price, phone_date, status])\r\n\r\n return status.getvalue()\r\n\r\n\r\nif __name__ == \"main\":\r\n helper = Helper()\r\n","sub_path":"myhelper.py","file_name":"myhelper.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173160594","text":"import numpy as np\nimport theano\nimport theano.tensor as T\n\nfrom .. import nonlinearities, init, utils\n\nfrom .base import Layer\nfrom .input import InputLayer\nfrom .dense import DenseLayer\nimport helper\nfrom settings import *\n\nclass CustomRecurrentLayer(Layer):\n\n def __init__(self, incoming,name ,input_to_hid, hid_to_hid,\n nonlinearity=nonlinearities.rectify,\n hid_init=init.Constant(0.), backwards=False,trace_steps=-1):\n '''\n An bottom layer for RecurrentLayer.\n\n Parameters\n ----------\n From slide p.6\n - input_to_hid\n - hid_to_hid\n - backwards : boolean\n If True, process the sequence backwards(used in biDirectional)\n - trace_steps : int\n Number of timesteps to include in backpropagated gradient\n If -1, backpropagate through the entire sequence\n '''\n super(CustomRecurrentLayer, self).__init__(incoming,name)\n\n self.input_to_hid = input_to_hid\n self.hid_to_hid = hid_to_hid\n self.nonlinearity = nonlinearity\n self.backwards = backwards\n self.trace_steps = trace_steps\n\n # Get batchSize and num_units at high level\n # num_batches == input_shape[0]\n # Initialize hidden state\n (n_batch,self.num_units) = self.input_to_hid.get_output_shape()\n self.hid_init = self.add_param(hid_init,self.input_to_hid.get_output_shape())\n\n def get_params(self):\n params = (helper.get_all_params(self.input_to_hid) +\n helper.get_all_params(self.hid_to_hid) + [self.hid_init] )\n return params #return a list\n\n def get_all_non_bias_params(self):\n return (helper.get_all_non_bias_params(self.input_to_hid) +\n helper.get_all_non_bias_params(self.hid_to_hid) + [self.hid_init] )\n\n def get_bias_params(self):\n return (helper.get_all_bias_params(self.input_to_hid) +\n helper.get_all_bias_params(self.hid_to_hid))\n\n def get_output_shape_for(self, input_shape):\n return (input_shape[0],input_shape[1],self.num_units)\n\n def get_output_for(self, input,*args, **kwargs):\n\n # Single recurrent computation\n def step(layer_input, prev_hidden_state):\n return self.nonlinearity(\n self.input_to_hid.get_output(layer_input) +\n self.hid_to_hid.get_output(prev_hidden_state))\n #ref:http://deeplearning.net/software/theano/library/scan.html\n\n #No non-changing variable -> thus,no non_sequence\n #outputs_info is used for initialization\n #truncate_gradient is the number of steps to use in truncated BPTT. \n #If you compute gradients through a scan op, \n #they are computed using backpropagation through time. \n #By providing a different value then -1, you choose to use truncated BPTT \n #instead of classical BPTT, \n #where you go for only truncate_gradient number of steps back in time.\n\n # Input should be provided as (n_batch, nGrams, n_features)\n # but scan requires the iterable dimension to be first\n # So, we need to dimshuffle to (nGrams, n_batch, n_features)\n #print(shape(input))\n input = input.dimshuffle(1, 0, 2)\n\n #Refer to the order od theano.scan ~ seqs -> output_info -> nonseqs\n output = theano.scan(fn=step, sequences=input,\n go_backwards=self.backwards,\n outputs_info=[self.hid_init],\n truncate_gradient=self.trace_steps)[0]\n # Now, dimshuffle back to (n_batch, nGrams, n_features))\n output = output.dimshuffle(1, 0, 2)\n\n if self.backwards:\n output = output[:, ::-1, :] # reverse the gram to noraml index~~\n\n return output\n\nclass RecurrentLayer(CustomRecurrentLayer):\n def __init__(self, incoming,name ,num_units, W_i=init.Uniform(),\n W_h=init.Uniform(), b=init.Constant(0.),\n nonlinearity=nonlinearities.rectify,\n hid_init=init.Constant(0.), backwards=False,trace_steps=-1):\n '''\n An top layer for RecurrentLayer.\n\n Parameters\n ----------\n Create a recurrent layer.\n - W_i\n - W_h\n - hid_init : function or np.ndarray or theano.shared\n Initial hidden state\n - backwards : If True, process the sequence backwards\n - trace_steps :\n Number of steps to trace in BPTT\n If -1 -> through whole sequence\n '''\n\n input_shape = incoming.get_output_shape()\n\n #One gram in each step\n input_to_hid = DenseLayer(InputLayer((input_shape[0],input_shape[2])),\n num_units,W = W_i,b=b,nonlinearity = nonlinearity)\n\n hid_to_hid = DenseLayer(InputLayer((input_shape[0], num_units)),\n num_units,W = W_h,nonlinearity=nonlinearity)\n\n super(RecurrentLayer, self).__init__(\n incoming, name,input_to_hid, hid_to_hid, nonlinearity=nonlinearity,\n hid_init=hid_init, backwards=backwards,trace_steps=trace_steps)\n","sub_path":"network/layers/recurrent.py","file_name":"recurrent.py","file_ext":"py","file_size_in_byte":5216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"32984479","text":"import os\r\nimport time\r\nimport pika\r\nimport pymysql\r\nimport json\r\nimport subprocess\r\nimport collections\r\nimport subprocess\r\n\r\n\r\nsupported_agent = [\"xxx\", \"YangJun\", \"LiShuokai\", \"QianTao\", \"CFRAgent\", \"MappingAgent\", \"NFSPAgent\"]\r\n\r\ndef callback(ch, method, properties, body):\r\n data = json.loads(body)\r\n print(data)\r\n room_id = data[\"room_id\"]\r\n room_number = data[\"room_number\"]\r\n game_number = data[\"game_number\"]\r\n bot_name = data[\"bot_name\"]\r\n bot_name_suffix = data[\"bot_name_suffix\"]\r\n \r\n if bot_name == \"xxx\":\r\n command = (\r\n 'docker run -d --rm registry.cn-hangzhou.aliyuncs.com/xuhang/agent:demo bash -c \"cd /root/project && python demo.py {} {} {} {}\"'.format(\r\n room_id, room_number, bot_name + bot_name_suffix, game_number)\r\n )\r\n print(command)\r\n subprocess.call(command, shell=True)\r\n\r\n if bot_name == \"YangJun\" or bot_name == \"CFRAgent\":\r\n # repo_name = \"hub.kce.ksyun.com/cxxuhang/agent:thu_v1.2\"\r\n repo_name = \"registry.cn-beijing.aliyuncs.com/liuqh/texas2:v1.2\"\r\n command = (\r\n 'docker run -d {} bash -c \"cd /root/poker && export PATH=/root/miniconda3/bin:$PATH && python run_this.py {} {} {} {}\"'.format(\r\n repo_name, room_id, room_number, bot_name + bot_name_suffix, game_number)\r\n )\r\n print(command)\r\n subprocess.call(command, shell=True)\r\n\r\n if bot_name == \"LiShuokai\":\r\n command = (\r\n 'docker run -d registry.cn-beijing.aliyuncs.com/xuehongyan/ict_agent:v0.1.2 bash -c \"cd /root/project && python py_player_new.py {} {} {} {}\"'.format(\r\n room_id, room_number, bot_name + bot_name_suffix, game_number)\r\n )\r\n print(command)\r\n subprocess.call(command, shell=True)\r\n\r\n if bot_name == \"QianTao\":\r\n command = (\r\n 'docker run -d registry.cn-shenzhen.aliyuncs.com/hitszcs/hitsz6p:v1.4 bash -c \"cd /root/project && python agent_cas.py {} {} {} {}\"'.format(\r\n room_id, room_number, bot_name + bot_name_suffix, game_number)\r\n )\r\n print(command)\r\n subprocess.call(command, shell=True)\r\n \r\n if bot_name == \"MappingAgent\":\r\n command = (\r\n 'docker run -d hub.kce.ksyun.com/cxxuhang/agent:MappingAgent-v0 bash -c \"cd /root/MappingAgent && python MappingAgent.py {} {} {} {}\"'.format(\r\n room_id, room_number, bot_name + bot_name_suffix, game_number)\r\n )\r\n print(command)\r\n subprocess.call(command, shell=True)\r\n \r\n if bot_name == \"NFSPAgent\":\r\n command = (\r\n 'docker run -d hub.kce.ksyun.com/cxxuhang/agent:nfsp_agent_v0 bash -c \"cd /root/code; python play_with_openholdem.py {} {} {} {} \"'.format(\r\n room_id, room_number, bot_name + bot_name_suffix, game_number)\r\n )\r\n print(command)\r\n subprocess.call(command, shell=True)\r\n \r\n ch.basic_ack(method.delivery_tag)\r\n\r\n\r\ndef declare_queue():\r\n credentials = pika.PlainCredentials(\"root\", \"root\")\r\n connection = pika.BlockingConnection(\r\n pika.ConnectionParameters(\r\n host=\"172.18.40.65\",\r\n credentials=credentials,\r\n heartbeat=0\r\n ))\r\n channel = connection.channel()\r\n for agent in supported_agent:\r\n channel.queue_declare(queue=agent)\r\n for agent in supported_agent:\r\n channel.basic_consume(queue=agent, on_message_callback=callback)\r\n channel.start_consuming()\r\n\r\ndef update_database():\r\n connect = pymysql.Connect(\r\n host=\"172.18.40.65\", # mysql的主机ip\r\n port=3306, # 端口\r\n user=\"root\", # 用户名\r\n passwd=\"root\", # 数据库密码\r\n db=\"poker\", # 数据库名\r\n charset='utf8', # 字符集\r\n )\r\n cursor = connect.cursor()\r\n select_sql = 'select name from agent'\r\n cursor.execute(select_sql)\r\n cursor.connection.commit()\r\n results = cursor.fetchall()\r\n agents = [result[0] for result in results]\r\n update_sql = \"insert into agent(name) values (%s)\"\r\n for agent in supported_agent:\r\n if agent not in agents:\r\n cursor.execute(update_sql, agent)\r\n cursor.connection.commit()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n update_database()\r\n declare_queue()\r\n","sub_path":"multi_gev_server/agent/docker_agent_receiver.py","file_name":"docker_agent_receiver.py","file_ext":"py","file_size_in_byte":4272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"352958694","text":"# See LICENSE file for full copyright and licensing details.\n\nfrom odoo import models\nfrom odoo.osv.expression import OR, AND\n\n\nclass ResPartner(models.Model):\n _inherit = 'res.partner'\n\n def _get_domain_search_name(self, term):\n\n name_field, operator, search_name = term\n\n if name_field not in ('name', 'display_name'):\n return [term]\n if not search_name:\n return [term]\n if operator not in ('=', 'ilike', '=ilike', 'like', '=like'):\n return [term]\n# if ' ' not in search_name:\n# return [term]\n # else\n\n parts = search_name.split(' ')\n search_fields = (\n 'name',\n 'display_name',\n 'email',\n 'city',\n 'street',\n )\n return AND(\n [\n OR([(f, operator, part)] for f in search_fields)\n for part in parts\n ],\n )\n\n def search(self, args, offset=0, limit=None, order=None, count=False):\n new_domain = []\n for term in args:\n if type(term) in [list, tuple] \\\n and term[0] in('name', 'display_name'):\n term = self._get_domain_search_name(term)\n new_domain.extend(term)\n else:\n new_domain.append(term)\n return super(ResPartner, self).search(\n new_domain, offset=offset, limit=limit, order=order, count=count\n )\n","sub_path":"partner_search/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504873309","text":"# SUMMARY: plot_pertube.pyss\n# USAGE: plot flux averaging results\n# ORG: Pacific Northwest National Laboratory\n# AUTHOR: Xuehang Song\n# E-MAIL: xuehang.song@pnnl.gov\n# ORIG-DATE: July-2018\n# DESCRIPTION:\n# DESCRIP-END.\n# COMMENTS: only deal cartesian sturtured grids\n#\n# Last Change: 2018-08-01\n\n\nimport numpy as np\nimport math\nimport csv\nimport matplotlib.pyplot as plt\nimport pickle\nimport os.path\n\nsimu_dir = \"/pic/scratch/song884/bcomplex/uq/\"\nimg_dir = '/pic/projects/dvz/BComplex/FY18/figures/uq_wells2/'\nobs_data_dir = '/people/song884/bcomplex/FY18/UQmodel/obsdata/'\n\n\nscale_units = {}\nscale_units['solute aqueous concentration NO3[kg]'] = 10**9\nscale_units['solute aqueous concentration Tc-99[ci]'] = 10**12\nscale_units['solute aqueous concentration U-Total[kg]'] = 10**9\n\nylabels = {}\nylabels['solute aqueous concentration NO3[kg]'] = \"NO3[ug/L]\"\nylabels['solute aqueous concentration Tc-99[ci]'] = \"Tc-99[pci/L]\"\nylabels['solute aqueous concentration U-Total[kg]'] = \"Uranium[ug/L]\"\n\n\nobs_data_dir = {}\nobs_data_dir['solute aqueous concentration NO3[kg]'] = \\\n \"/pic/projects/dvz/BComplex/FY18/UQmodel/obsdata/nitrate2/\"\nobs_data_dir['solute aqueous concentration Tc-99[ci]'] = \\\n \"/pic/projects/dvz/BComplex/FY18/UQmodel/obsdata/tc-99/\"\nobs_data_dir['solute aqueous concentration U-Total[kg]'] = \\\n \"/pic/projects/dvz/BComplex/FY18/UQmodel/obsdata/uranium/\"\n\n\nncase = 147\n\ncases = []\ncase_name = {}\nfor icase in range(ncase):\n cases.append(str(icase))\n case_name[cases[icase]] = str(icase)\n\ncase_data = {}\nfor icase in cases:\n print(simu_dir+icase)\n fname = open(simu_dir+icase+\"/tec_data/averaged_varis.pk\", \"rb\")\n case_data[icase] = pickle.load(fname)\n fname.close()\nvaris = list(case_data[icase].keys())\nvaris.remove('Time')\n\nwells = list(case_data[icase][varis[0]].keys())\nnwell = len(wells)\n\nraw_data = {}\nfor icase in cases:\n print(simu_dir+icase)\n fname = open(simu_dir+icase+\"/tec_data/raw_varis.pk\", \"rb\")\n raw_data[icase] = pickle.load(fname)\n fname.close()\n\n\n# read observation data\nobs_time_range = [1e4, -1e4]\nobs_data = {}\nfor ivari in varis:\n obs_data[ivari] = {}\n for iwell in wells:\n data_file = obs_data_dir[ivari] + iwell + \".dat\"\n if os.path.exists(data_file):\n obs_data[ivari][iwell] = []\n with open(data_file, \"r\") as infile:\n reader = csv.reader(infile)\n next(reader, None)\n next(reader, None)\n for row in reader:\n obs_data[ivari][iwell].append(\n [float(row[0]), float(row[2])])\n obs_data[ivari][iwell] = np.asarray(obs_data[ivari][iwell])\n obs_time_range = [min(obs_time_range[0], np.min(obs_data[ivari][iwell][:, 0])),\n max(obs_time_range[1], np.max(obs_data[ivari][iwell][:, 0]))]\n","sub_path":"fy18/best_reaz.py","file_name":"best_reaz.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33101894","text":"#! /usr/bin/env python\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom fun import *\nimport pickle\nfrom dynamicfitter import *\n\nxs = np.linspace(-10,10,21)\nassert(xs[np.argmax(gaussian(xs,0,1))] == 0.0)\n\nstar0 = [1, 1, 1, 1]\ntime = 1\nfor time in range(10):\n assert(project_star(star0, time)[0] == star0[0] + star0[2]*time)\n assert(project_star(star0, time)[1] == star0[1] + star0[3]*time)\n\ntrace_back, n_time_steps, nstars, times, orig =\\\n pickle.load(open(\"data.pkl\", 'r'))\n\nassert np.shape(trace_back) == (n_time_steps, nstars, 4)\n\nassert gaussian_fitter((0,2), nstars, trace_back[-1]) > 0\n\ninit_pars = [0,1]\nbnds = ((None, None), (0.1,None))\n\nstandard_fit = opt.minimize(\n gaussian_fitter, init_pars, (nstars, trace_back[-1]),\n bounds=bnds )\nbayesian_fit = opt.minimize(overlap, init_pars, (nstars, trace_back[-1]),\n bounds=bnds )\n\nst_fitted_mu = standard_fit.x[0]\nst_fitted_sig = standard_fit.x[1]\n\nb_fitted_mu = bayesian_fit.x[0]\nb_fitted_sig = bayesian_fit.x[1]\n\nxs = np.linspace(-100,100,1000)\nplt.plot(xs, nstars * gaussian(xs, st_fitted_mu, st_fitted_sig), label=\"Gaussian fit\")\nplt.plot(xs, nstars * gaussian(xs, b_fitted_mu, b_fitted_sig), label=\"Bayesian fit\")\nplt.plot(xs, group_pdf(xs, orig), label=\"True origin\")\nplt.legend(loc='best')\n#plt.show()\nplt.clf()\n\nst_fitted_sigs = np.zeros(n_time_steps)\nba_fitted_sigs = np.zeros(n_time_steps)\nba_fitted_mus = np.zeros(n_time_steps)\ninit_pars = [0,2]\nfor i, time in enumerate(times):\n st_fit = opt.minimize(gaussian_fitter, init_pars, (nstars, trace_back[i]))\n ba_fit = opt.minimize(overlap, init_pars, (nstars, trace_back[i]))\n\n st_fitted_sigs[i] = np.abs(st_fit.x[1])\n ba_fitted_sigs[i] = ba_fit.x[1]\n ba_fitted_mus[i] = ba_fit.x[0]\n\nplt.plot(st_fitted_sigs, label=\"Standard fit\")\nplt.plot(ba_fitted_sigs, label=\"Bayesian fit\")\nplt.xlabel(\"Age [Myrs]\")\nplt.ylabel(r\"Fitted $\\sigma$\")\nplt.title(\"Positional variance of group fit\")\nplt.legend(loc='best')\nplt.show()\n\ninit_skip = 2\n\nbest_ix = np.argmin(ba_fitted_sigs[init_skip:40]) + init_skip\nprint(times[best_ix])\nprint(ba_fitted_sigs[best_ix])\nprint(ba_fitted_mus[best_ix])\npdb.set_trace()\n","sub_path":"playground/bayesian_demo/unit_test_fun.py","file_name":"unit_test_fun.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"459303783","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Software: PyCharm\n# @Time : 2019/5/27 20:28\n# @Author : linjinting\n# @Site :\n# @Software: CommunicationTool\n# @File : UDPServer.py\n# @Function:\n\nfrom UDPServerBase import UDPServerBase\nfrom PyQt5.QtCore import QObject, pyqtSignal\n\n\nclass UDPServer(QObject, UDPServerBase):\n signal_show_statusmsg = pyqtSignal(str)\n signal_show_status = pyqtSignal(str)\n signal_write_msg = pyqtSignal(str)\n signal_client_change = pyqtSignal()\n def __init__(self):\n super(UDPServer, self).__init__()\n UDPServerBase.__init__(self)\n\n self.isStopDisplay = False\n\n def channel_change(self, type_):\n if type_ == \"client\":\n self.signal_client_change.emit()\n\n def show_msg(self, type_, msg=\"\"):\n \"\"\"\n 功能函数,根据不同类型显示不同的信息内容\n 参数---type_:statusmsg,status,write_msg,addclient,print\n ---msg:str类型数据,默认为空\n :return:\n \"\"\"\n # print \"--TCPServer---show-msg\"\n\n if self.isStopDisplay:\n return\n if msg == \"\":\n return\n # print msg\n if type_ == \"print\":\n print(msg)\n if type_ == \"statusmsg\":\n self.signal_show_statusmsg.emit(msg)\n if type_ == \"status\":\n self.signal_show_status.emit(msg)\n if type_ == \"write\":\n self.signal_write_msg.emit( msg)\n\n def setStopDisplay(self, isStopDisplay):\n self.isStopDisplay = isStopDisplay\n\n\nif __name__ == '__main__':\n s = UDPServer()\n s.setAddress(\"127.0.0.1\", 5566)\n s.open()\n","sub_path":"UDPServer.py","file_name":"UDPServer.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"410677900","text":"\"\"\"\nCNN 이 사용하는 파라미터(filter W, bias b)의 초깃값과 학습 후 W, b 값 비교\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.image import imread\n\nfrom lab_dl.lab_dl.ch07.simple_convnet import SimpleConvNet\nfrom lab_dl.lab_dl.common.layers import Convolution\n\n\ndef show_filter(filters, num_filters, ncols=8):\n # subplot 사용\n # ncols = 컬럼갯수\n # num_filters // ncols = nrows\n # nrows * ncols 바둑모양으로 구분되어 그래프 생성하기\n nrows = np.ceil(num_filters / ncols) # ceil 천장 -> 30 / 8 = 3.75‬ 이면 4 출력\n for i in range(num_filters):\n # subplot 위치 결정\n plt.subplot(nrows, ncols, (i+1), xticks=[], yticks=[])\n # 그래프 넣기\n plt.imshow(filters[i, 0], cmap='gray')\n # filters[i, 0] 의 의미 : 4차원에서 2차원을 꺼낸다\n # 4차원에서 인덱스 0번-> 3차원 의 인덱스 0번 -> 2차원 배열 (5, 5)\n plt.show()\n\n\nif __name__ == '__main__':\n # Simple CNN 생성\n cnn = SimpleConvNet()\n # 학습시키기 전 파라미터 - 난수들로 이루어짐\n before_filters = cnn.params['W1']\n print(before_filters.shape)\n # (30, 1, 5, 5) -> filter 갯수 30개, (1, 5, 5) 이므로\n # filter = 30개\n show_filter(before_filters, 30, ncols=8) # 0 검은색, 255 흰색으로 변환된 숫자가 나옴..\n\n # 학습 시킨 후 파라미터 - pkl 에 있음\n # pickle 파일에 저장된 파라미터를 cnn의 필드로 로드.\n cnn.load_params('cnn_params.pkl')\n\n after_filters = cnn.params['W1']\n # 학습 끝난 후 갱신(업데이트)된 파라미터를 그래프로 출력\n show_filter(after_filters, 16, ncols=4)\n\n # 학습 끝난 후 갱신된 파라미터를 실제 이미지 파일에 적용\n # pyplot.imread : png 파일을 np.array 로 변환함\n # jpeg 파일은 PIL.library 를 통해 np.array 로 변환해야함\n lena = imread('lena_gray.png')\n print(lena.shape)\n\n # 이미지 데이터를 Convolution 레이어의 forward() 메소드에 전달.\n # lena - 2차원 <-> 레이어는 무조건 4차원\n # -> 2차원 4차원으로 변환하는 작업 거침\n # *lena.shape : () tuple 에서 숫자 1개씩 뽑아준다\n lena = lena.reshape(1, 1, *lena.shape)\n for i in range(16):\n # 필터\n w = cnn.params['W1'][i] # 갱신된 필터\n # b = cnn.params['b1'][i] # 갱신된 바이어스\n b = 0\n w = w.reshape(1, *w.shape) # 3차원 -> 4차원으로 변환\n conv = Convolution(w, b) # Convolution 레이어 생성\n out = conv.forward(lena) # i 번 필터를 forward 하여 지나가게 함.\n # pyplot 을 사용하기 위해서 4차원을 2차원으로 변환\n out = out.reshape(out.shape[2], out.shape[3])\n plt.subplot(4, 4, i+1, xticks=[], yticks=[])\n plt.imshow(out, cmap='gray')\n plt.show()\n\n\n\n","sub_path":"lab_dl/ch07/ex15_cnn_params.py","file_name":"ex15_cnn_params.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477727169","text":"import math\ndef siete():\n\n suma=0\n cad=\"\"\n n = int(input(\"n numeros para elevar al cuadrado: \"))\n for q in range(1,n+1):\n c=pow(q,2)\n cad=cad+\" \"+str(c)\n suma+=c\n print(cad)\n print(\"La suma de n cuadrados es: \", suma)\nsiete()\n","sub_path":"10._RecursivoC.py","file_name":"10._RecursivoC.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"26348484","text":"from urllib.parse import urlencode\n\nimport requests\nfrom jose import jwt\n\nfrom ._property import overwritable_property\nfrom ._request_auth import HTTPBasicAuth, HTTPBearerAuth\n\nfrom openid_connect._verify import verify\n\n\nclass TokenResponse(object):\n\tdef __init__(self, data, client=None):\n\t\tself._data = data\n\t\tself._client = client\n\n\t@property\n\tdef access_token(self):\n\t\treturn self._data.get(\"access_token\")\n\n\t@property\n\tdef id_token(self):\n\t\treturn self._data.get(\"id_token\")\n\n\t@property\n\tdef userinfo(self):\n\t\ttry:\n\t\t\treturn self._userinfo\n\t\texcept AttributeError:\n\t\t\tuserinfo = self._client.get_userinfo(self.access_token)\n\t\t\tif self.id[\"sub\"] != userinfo[\"sub\"]:\n\t\t\t\treturn None\n\t\t\tself._userinfo = userinfo\n\t\t\treturn self._userinfo\n\nclass OpenIDClient(object):\n\tdef __init__(self, url, client_id=None, client_secret=None, initial_auth=None, initial_access_token=None, registration_client_uri=None, registration_auth=None, registration_access_token=None):\n\t\tself.url = url\n\t\tself.client_id = client_id\n\t\tself.client_secret = client_secret\n\n\t\tif initial_auth:\n\t\t\tself.initial_auth = initial_auth\n\t\tself.initial_access_token = initial_access_token\n\n\t\tself.registration_client_uri = registration_client_uri\n\t\tif registration_auth:\n\t\t\tself.registration_auth = registration_auth\n\t\tself.registration_access_token = registration_access_token\n\n\t\tself._configuration = self.get_configuration()\n\n\t@overwritable_property\n\tdef auth(self):\n\t\tif self.client_secret:\n\t\t\treturn HTTPBasicAuth(self.client_id, self.client_secret)\n\t\telse:\n\t\t\treturn None\n\n\tdef get_configuration(self):\n\t\tr = requests.get(self.url + \"/.well-known/openid-configuration\")\n\t\tr.raise_for_status()\n\t\treturn r.json()\n\n\tdef translate_scope_in(self, scope):\n\t\treturn scope\n\n\tdef translate_scope_out(self, scope):\n\t\treturn scope\n\n\tdef translate_userinfo(self, userinfo):\n\t\treturn userinfo\n\n\tverify = verify\n\n\tdef get_id(self, token_response):\n\t\tif not token_response.id_token:\n\t\t\treturn None\n\n\t\treturn self.verify(**token_response._data)\n\n\t@property\n\tdef issuer(self):\n\t\treturn self._configuration[\"issuer\"]\n\n\t@property\n\tdef authorization_endpoint(self):\n\t\treturn self._configuration[\"authorization_endpoint\"]\n\n\t@property\n\tdef token_endpoint(self):\n\t\treturn self._configuration[\"token_endpoint\"]\n\n\t@property\n\tdef jwks_uri(self):\n\t\treturn self._configuration[\"jwks_uri\"]\n\n\t@property\n\tdef userinfo_endpoint(self):\n\t\treturn self._configuration[\"userinfo_endpoint\"]\n\n\t@property\n\tdef end_session_endpoint(self):\n\t\treturn self._configuration.get(\"end_session_endpoint\")\n\n\t@property\n\tdef registration_endpoint(self):\n\t\treturn self._configuration.get(\"registration_endpoint\")\n\n\t@property\n\tdef keys(self):\n\t\tr = requests.get(self._configuration[\"jwks_uri\"])\n\t\tr.raise_for_status()\n\t\treturn r.json()\n\n\tdef get_userinfo(self, access_token):\n\t\tr = requests.get(self.userinfo_endpoint, headers=dict(\n\t\t\tAuthorization = \"Bearer \" + access_token,\n\t\t))\n\t\tr.raise_for_status()\n\t\tdata = r.json()\n\t\treturn self.translate_userinfo(data)\n\n\tdef authorize(self, redirect_uri, state='', scope=('openid',)):\n\t\tscope = set(self.translate_scope_in(scope))\n\t\treturn self.authorization_endpoint + \"?\" + urlencode(dict(\n\t\t\tclient_id=self.client_id,\n\t\t\tresponse_type=\"code\",\n\t\t\tredirect_uri=redirect_uri,\n\t\t\tstate=state,\n\t\t\tscope=\" \".join(scope),\n\t\t))\n\n\tdef request_token(self, redirect_uri, code):\n\t\tr = requests.post(self.token_endpoint, auth=self.auth, data=dict(\n\t\t\tgrant_type=\"authorization_code\",\n\t\t\tredirect_uri=redirect_uri,\n\t\t\tcode=code,\n\t\t), headers={'Accept': 'application/json'})\n\t\tr.raise_for_status()\n\t\tresp = TokenResponse(r.json(), self)\n\n\t\tif \"scope\" in resp._data:\n\t\t\tresp.scope = set(self.translate_scope_out(set(resp._data[\"scope\"].split(\" \"))))\n\t\tresp.id = self.get_id(resp)\n\n\t\treturn resp\n\n\tdef end_session(self, post_logout_redirect_uri, state, id_token_hint=''):\n\t\tif not self.end_session_endpoint:\n\t\t\traise NotImplementedError(\"This OP does not support RP-initiated logout.\")\n\n\t\treturn self.end_session_endpoint + \"?\" + urlencode(dict(\n\t\t\tclient_id=self.client_id, # See https://bitbucket.org/openid/connect/issues/914/session-5-missing-client_id-parameter\n\t\t\tpost_logout_redirect_uri=post_logout_redirect_uri,\n\t\t\tstate=state,\n\t\t\tid_token_hint=id_token_hint,\n\t\t))\n\n\t@overwritable_property\n\tdef initial_auth(self):\n\t\tif self.initial_access_token:\n\t\t\treturn HTTPBearerAuth(self.initial_access_token)\n\t\telse:\n\t\t\t# TODO? Write an OAuth extension for using Basic auth for registration_endpoint.\n\t\t\treturn self.auth\n\n\tdef register(self, auth=None, access_token=None, **client_config):\n\t\tif not self.registration_endpoint:\n\t\t\traise NotImplementedError(\"This OP does not support Dynamic Client Registration.\")\n\n\t\tif not auth and access_token:\n\t\t\tauth = HTTPBearerAuth(access_token)\n\n\t\tif not auth:\n\t\t\tauth = self.initial_auth\n\n\t\tr = requests.post(self.registration_endpoint, auth=auth, json=client_config)\n\t\tr.raise_for_status()\n\t\tdata = r.json()\n\t\tclient = type(self)(self.url, client_id=data['client_id'], client_secret=data.get('client_secret'), registration_client_uri=data.get('registration_client_uri'), registration_access_token=data.get('registration_access_token'))\n\t\treturn client, data\n\n\t@overwritable_property\n\tdef registration_auth(self):\n\t\tif self.registration_access_token:\n\t\t\treturn HTTPBearerAuth(self.registration_access_token)\n\t\telse:\n\t\t\t# TODO Write an OAuth extension for using normal auth for registration_client_uri.\n\t\t\treturn self.auth\n\n\t@property\n\tdef client(self):\n\t\tif not self.registration_client_uri:\n\t\t\traise NotImplementedError(\"registration_client_uri was not provided.\")\n\n\t\tr = requests.get(self.registration_client_uri, auth=self.registration_auth)\n\t\tr.raise_for_status()\n\t\treturn r.json()\n\n\t@client.setter\n\tdef client(self, config):\n\t\tif not self.registration_client_uri:\n\t\t\traise NotImplementedError(\"registration_client_uri was not provided.\")\n\n\t\tr = requests.put(self.registration_client_uri, auth=self.registration_auth, json=config)\n\t\tr.raise_for_status()\n\t\treturn r.json()\n\n\t@client.deleter\n\tdef client(self):\n\t\tif not self.registration_client_uri:\n\t\t\traise NotImplementedError(\"registration_client_uri was not provided.\")\n\n\t\tr = requests.delete(self.registration_client_uri, auth=self.registration_auth)\n\t\tr.raise_for_status()\n","sub_path":"openid_connect/_oidc.py","file_name":"_oidc.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"603773816","text":"################################################################################\n# File: .py\n# Date: 2020.11.30\n# Author: HS\n#\n# Description: \n#\n################################################################################\nimport serial # Import pySerial for serial communication\nimport struct\nimport time\n\nser = serial.Serial(None,9600,bytesize = serial.EIGHTBITS,parity = serial.PARITY_EVEN,stopbits = serial.STOPBITS_ONE, timeout = 0.5)\n\n# CRC-8 calculation Maxim / Dallas\ndef Crc8(b, crc):\n b2 = b\n \n if (b < 0): b2 = b + 256\n for i in range(8):\n odd = ((b2^crc) & 1) == 1\n crc >>= 1\n b2 >>= 1\n if (odd): crc ^= 0x8C \n return crc\n\n# CalculateCrc calls Crc8 (Dallas/Maxim crc) for one byte \n# A list of hex numbers is passed as parameter \ndef CalculateCrc(hex_liste):\n i=0\n crc_i = 0\n for i in hex_liste: crc_i=Crc8(i,crc_i)\n return crc_i \n \n\n\nser.port='COM4' # add your com port here\nser.open()# Open serial port \n\n### define the used commands ###\ncmd_both_off = [0x01,0x03, 0x00, 0x00, 0x00] # all off: speed=0, offset=0\ncmd_both_off.append(CalculateCrc(cmd_both_off))\n\ncmd_both_fwd_8000_offset_0 = [0x01,0x03, 0x40, 0x1F, 0x00] # FWD: speed=8000 rpm, offset=0\ncmd_both_fwd_8000_offset_0.append(CalculateCrc(cmd_both_fwd_8000_offset_0))\n\ncmd_both_fwd_8000_offset_100 = [0x01,0x03, 0x40, 0x1F, 0x64] # FWD: speed=8000 rpm, offset=100\ncmd_both_fwd_8000_offset_100.append(CalculateCrc(cmd_both_fwd_8000_offset_100))\n\ncmd_both_fwd_8000_offset_50 = [0x01,0x03, 0x40, 0x1F, 0x32] # FWD: speed=8000 rpm, offset=100\ncmd_both_fwd_8000_offset_50.append(CalculateCrc(cmd_both_fwd_8000_offset_50))\n\ncmd_both_fwd_8000_offset_minus_100 = [0x01,0x03, 0x70, 0x17, 0x9C] # FWD: speed=8000 rpm, offset=-100\ncmd_both_fwd_8000_offset_minus_100.append(CalculateCrc(cmd_both_fwd_8000_offset_minus_100))\n\ncmd_both_bwd_8000_offset_100 = [0x01,0x03, 0xC0, 0xE0, 0x64] # BWD: speed=-8000 rpm, offset=100\ncmd_both_bwd_8000_offset_100.append(CalculateCrc(cmd_both_bwd_8000_offset_100))\n\ncmd_both_bwd_8000_offset_50 = [0x01,0x03, 0xC0, 0xE0, 0x32] # BWD: speed=-8000 rpm, offset=50\ncmd_both_bwd_8000_offset_50.append(CalculateCrc(cmd_both_bwd_8000_offset_50))\n\ncmd_both_bwd_8000_offset_minus_50 = [0x01, 0x03, 0xC0, 0xE0, 0xCE] # BWD: speed=-8000 rpm, offset=-50\ncmd_both_bwd_8000_offset_minus_50.append(CalculateCrc(cmd_both_bwd_8000_offset_minus_50))\n\ncmd_both_set_current_limit_200mA = [0x03, 0x03, 0xC8, 0x00] # both motors, 200 mA\ncmd_both_set_current_limit_200mA.append(CalculateCrc(cmd_both_set_current_limit_200mA))\n\ncmd_both_set_current_limit_300mA = [0x03, 0x03, 0x2C, 0x01] # both motors, 300 mA\ncmd_both_set_current_limit_300mA.append(CalculateCrc(cmd_both_set_current_limit_300mA))\n\ncmd_mot1_fwd_8000_offset_0 = [0x01,0x01, 0x40, 0x1F, 0x00] # FWD: speed=8000 rpm, offset=0\ncmd_mot1_fwd_8000_offset_0.append(CalculateCrc(cmd_mot1_fwd_8000_offset_0))\n\ncmd_mot2_fwd_8000_offset_0 = [0x01,0x02, 0x40, 0x1F, 0x00] # FWD: speed=8000 rpm, offset=0\ncmd_mot2_fwd_8000_offset_0.append(CalculateCrc(cmd_mot2_fwd_8000_offset_0))\n\ncmd_mot1_bwd_8000_offset_0 = [0x01,0x01, 0xC0, 0xE0, 0x00] # BWD: speed=-8000 rpm, offset=0\ncmd_mot1_bwd_8000_offset_0.append(CalculateCrc(cmd_mot1_bwd_8000_offset_0))\n\ncmd_mot2_bwd_8000_offset_0 = [0x01,0x02, 0xC0, 0xE0, 0x00] # BWD: speed=-8000 rpm, offset=0\ncmd_mot2_bwd_8000_offset_0.append(CalculateCrc(cmd_mot2_bwd_8000_offset_0))\n\ncmd_both_reset_errors = [0x09,0x03] # reset errors\ncmd_both_reset_errors.append(CalculateCrc(cmd_both_reset_errors))\n\nturn_time =2.3\nfwd_time =5\nrotate_time=1;\n\n\n#### demonstration sequence\nprint(\"START\")\nser.write(serial.to_bytes(cmd_both_reset_errors))\ntime.sleep(1)\nser.write(serial.to_bytes(cmd_both_off))\ntime.sleep(0.1)\nser.write(serial.to_bytes(cmd_both_set_current_limit_200mA))\ntime.sleep(0.1)\n\nprint(\"FWD\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_0))\ntime.sleep(fwd_time)\n\nprint(\"TURN\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_100))\ntime.sleep(turn_time)\n\nprint(\"FWD\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_0))\ntime.sleep(fwd_time)\n\nprint(\"TURN\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_100))\ntime.sleep(turn_time)\n\nprint(\"FWD\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_0))\ntime.sleep(fwd_time)\n\nprint(\"TURN\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_100))\ntime.sleep(turn_time)\n\nprint(\"FWD\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_0))\ntime.sleep(fwd_time)\n\nprint(\"TURN\")\nser.write(serial.to_bytes(cmd_both_fwd_8000_offset_100))\ntime.sleep(turn_time)\n\nser.write(serial.to_bytes(cmd_both_off))\ntime.sleep(1)\n\nprint(\"ROTATE\")\nser.write(serial.to_bytes(cmd_mot2_bwd_8000_offset_0))\ntime.sleep(0.1)\nser.write(serial.to_bytes(cmd_mot1_fwd_8000_offset_0))\ntime.sleep(rotate_time)\nser.write(serial.to_bytes(cmd_both_off))\ntime.sleep(0.1)\nser.write(serial.to_bytes(cmd_mot2_fwd_8000_offset_0))\ntime.sleep(0.1)\nser.write(serial.to_bytes(cmd_mot1_bwd_8000_offset_0))\ntime.sleep(rotate_time)\nser.write(serial.to_bytes(cmd_both_off))\n\nprint(\"Finish\")\n\nser.close() # Close serial port\n","sub_path":"tdk-hvc4223f-scripts/files/demos/drive_square.py","file_name":"drive_square.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"171335203","text":"import cv2\nimport os\n\ndef extract_frames(video_path):\n \"Take a video and output all frames to folder\"\n success = True\n count = 0\n new_dir = video_path[:-4]\n if not os.path.exists(new_dir):\n os.mkdir(new_dir)\n vidcap = cv2.VideoCapture(video_path)\n\n num_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))\n print('Video has been read and it has {} frames'.format(num_frames))\n\n while success:\n success, image = vidcap.read()\n img_path = new_dir+'/'+str(count)+'.jpg'\n cv2.imwrite(img_path.format(count), image)\n count += 1\n if count % 5000 == 0:\n print('Processed {:0.1f}% of frames'.format(count/num_frames * 100))\n\n print('Done! Processed {} frames from video'.format(count))\n return\n\nif __name__ == \"__main__\":\n extract_frames('data/test_video.mp4')\n","sub_path":"video_proc.py","file_name":"video_proc.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"420507179","text":"import pandas as pd\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.model_selection import train_test_split \nfrom sklearn.metrics import mean_absolute_error\ndata=pd.read_csv(\"melbourne_housing_FULL.csv\")\nprint(data.columns)\n\ndata=data.dropna(axis=0)\ny=data.Price\n\nfeatures=['Rooms','Bathroom','Landsize','Lattitude','Longtitude']\nX=data[features]\n\ntrain_X,val_X,train_y,val_y=train_test_split(X,y,random_state=0)\n\nmodel=DecisionTreeRegressor()\nmodel.fit(train_X,train_y)\npredictions=model.predict(val_X)\nprint(predictions)\nprint(mean_absolute_error(val_y,predictions))","sub_path":"Melbourne Housing Prices/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"144926859","text":"from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', 'cotaskme.views.home', name='home'),\n url(r'^new-task-list$', 'cotaskme.views.newlist', name='newlist'),\n\n url(r'^t/([^/]+)$', 'cotaskme.views.tasklist', name='tasklist'),\n url(r'^t/([^/]+)/_action$', 'cotaskme.views.tasklist_action', name='tasklist_action'),\n url(r'^t/([^/]+)/_post$', 'cotaskme.views.tasklist_post', name='tasklist_post'),\n\n url('', include('social.apps.django_app.urls', namespace='social')),\n url('logout$', 'cotaskme.views.logout_view'),\n\n url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"cotaskme/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467759753","text":"#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n# Hive Colony Framework\r\n# Copyright (c) 2008-2022 Hive Solutions Lda.\r\n#\r\n# This file is part of Hive Colony Framework\r\n#\r\n# Hive Colony Framework is free software: you can redistribute it and/or modify\r\n# it under the terms of the Apache License as published by the Apache\r\n# Foundation, either version 2.0 of the License, or (at your option) any\r\n# later version.\r\n#\r\n# Hive Colony Framework is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# Apache License for more details.\r\n#\r\n# You should have received a copy of the Apache License along with\r\n# Hive Colony Framework If not, see .\r\n\r\n__author__ = \"João Magalhães \"\r\n\"\"\" The author(s) of the module \"\"\"\r\n\r\n__version__ = \"1.0.0\"\r\n\"\"\" The version of the module \"\"\"\r\n\r\n__revision__ = \"$LastChangedRevision$\"\r\n\"\"\" The revision number of the module \"\"\"\r\n\r\n__date__ = \"$LastChangedDate$\"\r\n\"\"\" The last change date of the module \"\"\"\r\n\r\n__copyright__ = \"Copyright (c) 2008-2022 Hive Solutions Lda.\"\r\n\"\"\" The copyright for the module \"\"\"\r\n\r\n__license__ = \"Apache License, Version 2.0\"\r\n\"\"\" The license for the module \"\"\"\r\n\r\nimport colony\r\n\r\nclass LazyClassTest(colony.ColonyTestCase):\r\n \"\"\"\r\n Class that tests the lazy loading mechanisms.\r\n \"\"\"\r\n\r\n def test_equals(self):\r\n \"\"\"\r\n Tests the \"equals\" value function.\r\n \"\"\"\r\n\r\n # verifies that the global lazy reference is\r\n # \"equivalent\" to a null reference\r\n self.assertEqual(colony.Lazy, None)\r\n\r\n # creates a new lazy class instance to check the\r\n # comparisons against it\r\n lazy = colony.LazyClass()\r\n\r\n # verifies that the new instance is equivalent to\r\n # a null object, itself and the global lazy reference\r\n self.assertEqual(lazy, None)\r\n self.assertEqual(lazy, lazy)\r\n self.assertEqual(lazy, colony.Lazy)\r\n","sub_path":"src/colony/test/libs/lazy_util.py","file_name":"lazy_util.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507078714","text":"def localization(self):\n \"\"\" French internationalization \"\"\"\n\n self.name = \"FactoQuizBot\"\n self.dateTimeFormat = \"%d/%m/%Y à %H:%M:%S\"\n\n # Support command ==============================================\n self.cmdHelp_details = \"= Liste des commandes =\\n\\n[{0}help pour plus de détails]\\n\"\n self.cmdHelp_alias = \"\\nalias :: \"\n\n # help / Admin\n self.cmdHelp_load = \"Active un module.\"\n self.cmdHelp_loadExt = \"usage :: load #\\ndétails :: Active un module.\"\n self.cmdHelp_unload = \"Désactive un module.\"\n self.cmdHelp_unloadExt = \"usage :: unload #\\ndétails :: Désactive un module.\"\n self.cmdHelp_reload = \"Met à jour un module.\"\n self.cmdHelp_reloadExt = \"usage :: reload #\\ndétails :: Met à jour un module.\"\n\n # help / General\n self.cmdHelp_ping = \"Latence & temps de réponse de l'API.\"\n self.cmdHelp_pingExt = \"usage :: ping#\\ndétails :: Cette commande aide à détecter s'il y a de la latence (\" \\\n \"lag) du coté de la connectivité du bot, ou de l'API.\"\n self.cmdHelp_serverInfo = \"Infos & stats du serveur.\"\n self.cmdHelp_serverInfoExt = \"usage :: serverInfo#\\ndétails :: Cette commande affiche diverses informations \" \\\n \"et statistiques du serveur.\"\n self.cmdHelp_userInfo = \"Infos détaillées pour un utilisateur donné.\"\n self.cmdHelp_userInfoExt = \"usage :: userInfo [@mention|userid]#\\ndétails :: Cette commande affiche des \" \\\n \"informations sur un utilisateur en particulier ou sur vous-même.\"\n self.cmdHelp_botInfo = \"Infos & stats du bot.\"\n self.cmdHelp_botInfoExt = \"usage :: botInfo#\\ndétails :: Cette commande affiche diverses informations et \" \\\n \"statistiques du bot.\"\n\n # help / Support\n\n # help / Quiz\n self.cmdHelp_startQuiz = \"Démarre le quiz d'entraînement.\"\n self.cmdHelp_startQuizExt = \"usage :: startquiz#\\ndétails :: Cette commande permet de démarrer un quiz \" \\\n \"d'entraînement. Tout le monde peut y participer, mais seul le plus rapide pourra \" \\\n \"y répondre.\"\n self.cmdHelp_startExam = \"Démarre la session d'examen.\"\n self.cmdHelp_startExamExt = \"usage :: startExam#\\ndétails :: Cette commande permet de démarrer la session \" \\\n \"d'examen. Cet examen est individuel et le participant peut évaluer ses connaissances\" \\\n \" sur le jeu Factorio.\"\n\n # help / Admin\n self.cmdHelp_config = \"Affiche la configuration du serveur.\"\n self.cmdHelp_configExt = \"usage :: config#\\ndétails :: Cette commande permet d'afficher la configuration du \" \\\n \"serveur.\"\n self.cmdHelp_edit = \"Edite al configuration du serveur.\"\n self.cmdHelp_editExt = \"usage :: edit