diff --git "a/1461.jsonl" "b/1461.jsonl" new file mode 100644--- /dev/null +++ "b/1461.jsonl" @@ -0,0 +1,749 @@ +{"seq_id":"22507966","text":"'''\nCreated on Jul 3, 2014\n\n@author: roj-com0419\n'''\n\nimport os,sys\nimport time,datetime\nimport h5py\nimport numpy\nimport fnmatch\nimport re\n\nfrom schainpy.model.data.jroheaderIO import RadarControllerHeader, SystemHeader\nfrom schainpy.model.data.jrodata import Voltage\nfrom schainpy.model.proc.jroproc_base import ProcessingUnit, Operation\n\n\ndef isNumber(str):\n \"\"\"\n Chequea si el conjunto de caracteres que componen un string puede ser convertidos a un numero.\n\n Excepciones:\n Si un determinado string no puede ser convertido a numero\n Input:\n str, string al cual se le analiza para determinar si convertible a un numero o no\n\n Return:\n True : si el string es uno numerico\n False : no es un string numerico\n \"\"\"\n try:\n float( str )\n return True\n except:\n return False\n\ndef getFileFromSet(path, ext, set=None):\n validFilelist = []\n fileList = os.listdir(path)\n\n\n if len(fileList) < 1:\n return None\n\n # 0 1234 567 89A BCDE\n # H YYYY DDD SSS .ext\n\n for thisFile in fileList:\n try:\n number= int(thisFile[6:16])\n\n # year = int(thisFile[1:5])\n # doy = int(thisFile[5:8])\n except:\n continue\n\n if (os.path.splitext(thisFile)[-1].lower() != ext.lower()):\n continue\n\n validFilelist.append(thisFile)\n\n if len(validFilelist) < 1:\n return None\n\n validFilelist = sorted( validFilelist, key=str.lower )\n\n if set == None:\n return validFilelist[-1]\n\n print(\"set =\" ,set)\n for thisFile in validFilelist:\n if set <= int(thisFile[6:16]):\n print(thisFile,int(thisFile[6:16]))\n return thisFile\n\n return validFilelist[-1]\n\n myfile = fnmatch.filter(validFilelist,'*%10d*'%(set))\n #myfile = fnmatch.filter(validFilelist,'*%4.4d%3.3d%3.3d*'%(year,doy,set))\n\n if len(myfile)!= 0:\n return myfile[0]\n else:\n filename = '*%10.10d%s'%(set,ext.lower())\n print('the filename %s does not exist'%filename)\n print('...going to the last file: ')\n\n if validFilelist:\n validFilelist = sorted( validFilelist, key=str.lower )\n return validFilelist[-1]\n\n return None\n\ndef getlastFileFromPath(path, ext):\n \"\"\"\nDepura el fileList dejando solo los que cumplan el formato de \"res-xxxxxx.ext\"\n al final de la depuracion devuelve el ultimo file de la lista que quedo.\n\n Input:\n fileList : lista conteniendo todos los files (sin path) que componen una determinada carpeta\n ext : extension de los files contenidos en una carpeta\n\n Return:\n El ultimo file de una determinada carpeta, no se considera el path.\n \"\"\"\n validFilelist = []\n fileList = os.listdir(path)\n\n # 0 1234 567 89A BCDE\n # H YYYY DDD SSS .ext\n\n for thisFile in fileList:\n\n try:\n number= int(thisFile[6:16])\n except:\n print(\"There is a file or folder with different format\")\n if not isNumber(number):\n continue\n\n# year = thisFile[1:5]\n# if not isNumber(year):\n# continue\n\n# doy = thisFile[5:8]\n# if not isNumber(doy):\n# continue\n\n number= int(number)\n# year = int(year)\n# doy = int(doy)\n\n if (os.path.splitext(thisFile)[-1].lower() != ext.lower()):\n continue\n\n\n validFilelist.append(thisFile)\n\n\n if validFilelist:\n validFilelist = sorted( validFilelist, key=str.lower )\n return validFilelist[-1]\n\n return None\n\n\n\nclass HFReader(ProcessingUnit):\n '''\n classdocs\n '''\n path = None\n startDate= None\n endDate = None\n startTime= None\n endTime = None\n walk = None\n isConfig = False\n dataOut=None\n nTries = 3\n ext = \".hdf5\"\n\n def __init__(self, **kwargs):\n '''\n Constructor\n '''\n ProcessingUnit.__init__(self, **kwargs)\n\n self.isConfig =False\n\n self.datablock = None\n\n self.filename_current=None\n\n self.utc = 0\n\n self.ext='.hdf5'\n\n self.flagIsNewFile = 1\n\n #-------------------------------------------------\n self.fileIndex=None\n\n self.profileIndex_offset=None\n\n self.filenameList=[]\n\n self.hfFilePointer= None\n\n self.filename_online = None\n\n self.status=True\n\n self.flagNoMoreFiles= False\n\n self.__waitForNewFile = 20\n\n\n #--------------------------------------------------\n\n self.dataOut = self.createObjByDefault()\n\n\n def createObjByDefault(self):\n\n dataObj = Voltage()\n\n return dataObj\n\n def setObjProperties(self):\n\n pass\n\n def getBlockDimension(self):\n \"\"\"\n Obtiene la cantidad de puntos a leer por cada bloque de datos\n\n Affected:\n self.blocksize\n\n Return:\n None\n \"\"\"\n pts2read =self.nChannels*self.nHeights*self.nProfiles\n self.blocksize = pts2read\n\n def __readHeader(self):\n\n self.nProfiles = 100\n self.nHeights = 1000\n self.nChannels = 2\n self.__firstHeigth=0\n self.__nSamples=1000\n self.__deltaHeigth=1.5\n self.__sample_rate=1e5\n #self.__frequency=2.72e6\n #self.__frequency=3.64e6\n self.__frequency=None\n self.__online = False\n self.filename_next_set=None\n\n #print \"Frequency of Operation:\", self.__frequency\n\n\n def __setParameters(self,path='', startDate='',endDate='',startTime='', endTime='', walk=''):\n self.path = path\n self.startDate = startDate\n self.endDate = endDate\n self.startTime = startTime\n self.endTime = endTime\n self.walk = walk\n\n def __checkPath(self):\n if os.path.exists(self.path):\n self.status=1\n else:\n self.status=0\n print('Path %s does not exits'%self.path)\n return\n return\n\n def __selDates(self, hf_dirname_format):\n try:\n dir_hf_filename= self.path+\"/\"+hf_dirname_format\n fp= h5py.File(dir_hf_filename,'r')\n hipoc=fp['t'].value\n fp.close()\n date_time=datetime.datetime.utcfromtimestamp(hipoc)\n year =int(date_time[0:4])\n month=int(date_time[5:7])\n dom =int(date_time[8:10])\n thisDate= datetime.date(year,month,dom)\n if (thisDate>=self.startDate and thisDate <= self.endDate):\n return hf_dirname_format\n except:\n return None\n\n def __findDataForDates(self,online=False):\n if not(self.status):\n return None\n\n pat = '\\d+.\\d+'\n dirnameList = [re.search(pat,x) for x in os.listdir(self.path)]\n dirnameList = [x for x in dirnameList if x!=None]\n dirnameList = [x.string for x in dirnameList]\n if not(online):\n\n dirnameList = [self.__selDates(x) for x in dirnameList]\n dirnameList = [x for x in dirnameList if x!=None]\n\n if len(dirnameList)>0:\n self.status = 1\n self.dirnameList = dirnameList\n self.dirnameList.sort()\n\n else:\n self.status = 0\n return None\n\n def __getTimeFromData(self):\n startDateTime_Reader = datetime.datetime.combine(self.startDate,self.startTime)\n endDateTime_Reader = datetime.datetime.combine(self.endDate,self.endTime)\n print('Filtering Files from %s to %s'%(startDateTime_Reader, endDateTime_Reader))\n print('........................................')\n filter_filenameList=[]\n self.filenameList.sort()\n for i in range(len(self.filenameList)-1):\n filename=self.filenameList[i]\n dir_hf_filename= filename\n fp= h5py.File(dir_hf_filename,'r')\n hipoc=fp['t'].value\n hipoc=hipoc+self.timezone\n date_time=datetime.datetime.utcfromtimestamp(hipoc)\n fp.close()\n year =int(date_time[0:4])\n month=int(date_time[5:7])\n dom =int(date_time[8:10])\n hour =int(date_time[11:13])\n min =int(date_time[14:16])\n sec =int(date_time[17:19])\n this_time=datetime.datetime(year,month,dom,hour,min,sec)\n if (this_time>=startDateTime_Reader and this_time <= endDateTime_Reader):\n filter_filenameList.append(filename)\n filter_filenameList.sort()\n self.filenameList = filter_filenameList\n return 1\n\n def __getFilenameList(self):\n #print \"hola\"\n #print self.dirnameList\n dirList = [os.path.join(self.path,x) for x in self.dirnameList]\n self.filenameList= dirList\n #print self.filenameList\n #print \"pase\",len(self.filenameList)\n\n def __selectDataForTimes(self, online=False):\n\n if not(self.status):\n return None\n #----------------\n self.__getFilenameList()\n #----------------\n if not(online):\n if not(self.all):\n self.__getTimeFromData()\n if len(self.filenameList)>0:\n self.status=1\n self.filenameList.sort()\n else:\n self.status=0\n return None\n else:\n if self.set != None:\n\n filename=getFileFromSet(self.path,self.ext,self.set)\n\n if self.flag_nextfile==True:\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[fullfilename]\n self.filename_next_set=int(filename[6:16])+10\n\n self.flag_nextfile=False\n else:\n print(filename)\n print(\"PRIMERA CONDICION\")\n #if self.filename_next_set== int(filename[6:16]):\n print(\"TODO BIEN\")\n\n if filename == None:\n raise ValueError(\"corregir\")\n\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[fullfilename]\n self.filename_next_set=int(filename[6:16])+10\n print(\"Setting next file\",self.filename_next_set)\n self.set=int(filename[6:16])\n if True:\n pass\n else:\n print(\"ESTOY AQUI PORQUE NO EXISTE EL SIGUIENTE ARCHIVO\")\n\n else:\n filename =getlastFileFromPath(self.path,self.ext)\n\n if self.flag_nextfile==True:\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[self.filenameList[-1]]\n self.filename_next_set=int(filename[6:16])+10\n\n self.flag_nextfile=False\n else:\n filename=getFileFromSet(self.path,self.ext,self.set)\n print(filename)\n print(\"PRIMERA CONDICION\")\n #if self.filename_next_set== int(filename[6:16]):\n print(\"TODO BIEN\")\n\n if filename == None:\n raise ValueError(\"corregir\")\n\n self.dirnameList=[filename]\n fullfilename=self.path+\"/\"+filename\n self.filenameList=[fullfilename]\n self.filename_next_set=int(filename[6:16])+10\n print(\"Setting next file\",self.filename_next_set)\n self.set=int(filename[6:16])\n if True:\n pass\n else:\n print(\"ESTOY AQUI PORQUE NO EXISTE EL SIGUIENTE ARCHIVO\")\n\n\n\n def searchFilesOffLine(self,\n path,\n startDate,\n endDate,\n ext,\n startTime=datetime.time(0,0,0),\n endTime=datetime.time(23,59,59),\n walk=True):\n\n self.__setParameters(path, startDate, endDate, startTime, endTime, walk)\n\n self.__checkPath()\n\n self.__findDataForDates()\n #print self.dirnameList\n\n self.__selectDataForTimes()\n\n for i in range(len(self.filenameList)):\n print(\"%s\"% (self.filenameList[i]))\n\n return\n\n def searchFilesOnLine(self,\n path,\n expLabel= \"\",\n ext=None,\n startDate=None,\n endDate=None,\n walk=True,\n set=None):\n\n\n startDate = datetime.datetime.utcnow().date()\n endDate = datetime.datetime.utcnow().date()\n\n self.__setParameters(path=path,startDate=startDate,endDate=endDate,walk=walk)\n\n self.__checkPath()\n\n fullpath=path\n print(\"%s folder was found: \" %(fullpath ))\n\n if set == None:\n self.set=None\n filename =getlastFileFromPath(fullpath,ext)\n startDate= datetime.datetime.utcnow().date\n endDate= datetime.datetime.utcnow().date()\n#\n else:\n filename= getFileFromSet(fullpath,ext,set)\n startDate=None\n endDate=None\n#\n if not (filename):\n return None,None,None,None,None\n #print \"%s file was found\" %(filename)\n\n#\n# dir_hf_filename= self.path+\"/\"+filename\n# fp= h5py.File(dir_hf_filename,'r')\n# hipoc=fp['t'].value\n# fp.close()\n# date_time=datetime.datetime.utcfromtimestamp(hipoc)\n#\n# year =int(date_time[0:4])\n# month=int(date_time[5:7])\n# dom =int(date_time[8:10])\n# set= int(filename[4:10])\n# self.set=set-1\n #self.dirnameList=[filename]\n filenameList= fullpath+\"/\"+filename\n self.dirnameList=[filename]\n self.filenameList=[filenameList]\n self.flag_nextfile=True\n\n #self.__findDataForDates(online=True)\n #self.dirnameList=[self.dirnameList[-1]]\n #print self.dirnameList\n #self.__selectDataForTimes(online=True)\n #return fullpath,filename,year,month,dom,set\n return\n\n def __setNextFile(self,online=False):\n \"\"\"\n \"\"\"\n if not(online):\n newFile = self.__setNextFileOffline()\n else:\n newFile = self.__setNextFileOnline()\n\n if not(newFile):\n return 0\n return 1\n\n def __setNextFileOffline(self):\n \"\"\"\n \"\"\"\n idFile= self.fileIndex\n while(True):\n idFile += 1\n if not (idFile < len(self.filenameList)):\n self.flagNoMoreFiles = 1\n print(\"No more Files\")\n return 0\n filename = self.filenameList[idFile]\n hfFilePointer =h5py.File(filename,'r')\n\n epoc=hfFilePointer['t'].value\n #this_time=datetime.datetime(year,month,dom,hour,min,sec)\n break\n\n self.flagIsNewFile = 1\n self.fileIndex = idFile\n self.filename = filename\n\n self.hfFilePointer = hfFilePointer\n hfFilePointer.close()\n self.__t0=epoc\n print(\"Setting the file: %s\"%self.filename)\n\n return 1\n\n def __setNextFileOnline(self):\n \"\"\"\n \"\"\"\n print(\"SOY NONE\",self.set)\n if self.set==None:\n pass\n else:\n self.set +=10\n\n filename = self.filenameList[0]#fullfilename\n if self.filename_online != None:\n self.__selectDataForTimes(online=True)\n filename = self.filenameList[0]\n while self.filename_online == filename:\n print('waiting %d seconds to get a new file...'%(self.__waitForNewFile))\n time.sleep(self.__waitForNewFile)\n #self.__findDataForDates(online=True)\n self.set=self.filename_next_set\n self.__selectDataForTimes(online=True)\n filename = self.filenameList[0]\n sizeoffile=os.path.getsize(filename)\n\n #print filename\n sizeoffile=os.path.getsize(filename)\n if sizeoffile<1670240:\n print(\"%s is not the rigth size\"%filename)\n delay=50\n print('waiting %d seconds for delay...'%(delay))\n time.sleep(delay)\n sizeoffile=os.path.getsize(filename)\n if sizeoffile<1670240:\n delay=50\n print('waiting %d more seconds for delay...'%(delay))\n time.sleep(delay)\n\n sizeoffile=os.path.getsize(filename)\n if sizeoffile<1670240:\n delay=50\n print('waiting %d more seconds for delay...'%(delay))\n time.sleep(delay)\n\n try:\n hfFilePointer=h5py.File(filename,'r')\n\n except:\n print(\"Error reading file %s\"%filename)\n\n self.filename_online=filename\n epoc=hfFilePointer['t'].value\n\n self.hfFilePointer=hfFilePointer\n hfFilePointer.close()\n self.__t0=epoc\n\n\n self.flagIsNewFile = 1\n self.filename = filename\n\n print(\"Setting the file: %s\"%self.filename)\n return 1\n\n def __getExpParameters(self):\n if not(self.status):\n return None\n\n def setup(self,\n path = None,\n startDate = None,\n endDate = None,\n startTime = datetime.time(0,0,0),\n endTime = datetime.time(23,59,59),\n set = None,\n expLabel = \"\",\n ext = None,\n all=0,\n timezone=0,\n online = False,\n delay = 60,\n walk = True):\n '''\n In this method we should set all initial parameters.\n\n '''\n if path==None:\n raise ValueError(\"The path is not valid\")\n\n if ext==None:\n ext = self.ext\n\n self.timezone= timezone\n self.online= online\n self.all=all\n #if set==None:\n\n #print set\n if not(online):\n print(\"Searching files in offline mode...\")\n\n self.searchFilesOffLine(path, startDate, endDate, ext, startTime, endTime, walk)\n else:\n print(\"Searching files in online mode...\")\n self.searchFilesOnLine(path, walk,ext,set=set)\n if set==None:\n pass\n else:\n self.set=set-10\n\n# for nTries in range(self.nTries):\n#\n# fullpath,file,year,month,day,set = self.searchFilesOnLine(path=path,expLabel=expLabel,ext=ext, walk=walk,set=set)\n#\n# if fullpath:\n# break\n# print '\\tWaiting %0.2f sec for an valid file in %s: try %02d ...' % (self.delay, path, nTries+1)\n# time.sleep(self.delay)\n# if not(fullpath):\n# print \"There ins't valid files in %s\" % path\n# return None\n\n\n if not(self.filenameList):\n print(\"There is no files into the folder: %s\"%(path))\n sys.exit(-1)\n\n self.__getExpParameters()\n\n\n self.fileIndex = -1\n\n self.__setNextFile(online)\n\n self.__readMetadata()\n\n self.__setLocalVariables()\n\n self.__setHeaderDO()\n #self.profileIndex_offset= 0\n\n #self.profileIndex = self.profileIndex_offset\n\n self.isConfig = True\n\n def __readMetadata(self):\n self.__readHeader()\n\n\n def __setLocalVariables(self):\n\n self.datablock = numpy.zeros((self.nChannels, self.nHeights,self.nProfiles), dtype = numpy.complex)\n #\n\n\n\n self.profileIndex = 9999\n\n\n def __setHeaderDO(self):\n\n\n self.dataOut.radarControllerHeaderObj = RadarControllerHeader()\n\n self.dataOut.systemHeaderObj = SystemHeader()\n\n\n #---------------------------------------------------------\n self.dataOut.systemHeaderObj.nProfiles=100\n self.dataOut.systemHeaderObj.nSamples=1000\n\n\n SAMPLING_STRUCTURE=[('h0', '= self.nProfiles:\n return 1\n\n return 0\n\n def readNextBlock(self):\n if not(self.__setNewBlock()):\n return 0\n\n if not(self.readBlock()):\n return 0\n\n return 1\n\n def __setNewBlock(self):\n\n if self.hfFilePointer==None:\n return 0\n\n if self.flagIsNewFile:\n return 1\n\n if self.profileIndex < self.nProfiles:\n return 1\n\n self.__setNextFile(self.online)\n\n return 1\n\n\n\n def readBlock(self):\n fp=h5py.File(self.filename,'r')\n #Puntero que apunta al archivo hdf5\n ch0=(fp['ch0']).value #Primer canal (100,1000)--(perfiles,alturas)\n ch1=(fp['ch1']).value #Segundo canal (100,1000)--(perfiles,alturas)\n fp.close()\n ch0= ch0.swapaxes(0,1) #Primer canal (100,1000)--(alturas,perfiles)\n ch1= ch1.swapaxes(0,1) #Segundo canal (100,1000)--(alturas,perfiles)\n self.datablock = numpy.array([ch0,ch1])\n self.flagIsNewFile=0\n\n self.profileIndex=0\n\n return 1\n\n def getData(self):\n if self.flagNoMoreFiles:\n self.dataOut.flagNoData = True\n return 0\n\n if self.__hasNotDataInBuffer():\n if not(self.readNextBlock()):\n self.dataOut.flagNodata=True\n return 0\n\n ##############################\n ##############################\n self.dataOut.data = self.datablock[:,:,self.profileIndex]\n self.dataOut.utctime = self.__t0 + self.dataOut.ippSeconds*self.profileIndex\n self.dataOut.profileIndex= self.profileIndex\n self.dataOut.flagNoData=False\n self.profileIndex +=1\n\n return self.dataOut.data\n\n\n def run(self, **kwargs):\n '''\n This method will be called many times so here you should put all your code\n '''\n\n if not self.isConfig:\n self.setup(**kwargs)\n self.isConfig = True\n self.getData()","sub_path":"schainpy/model/io/jroIO_hf.py","file_name":"jroIO_hf.py","file_ext":"py","file_size_in_byte":25125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468577730","text":"class TreeNode:\n def __init__(self, val):\n self.val = val\n self.left = None\n self.right = None\n\n\nclass Solution:\n def hasPathSum(self, root, sum):\n stack = [(root, sum)]\n while stack:\n node, value = stack.pop()\n if node:\n if not node.left and not node.right and node.val == value:\n return True\n stack.append((node.left, value - node.val))\n stack.append((node.right, value - node.val))\n else:\n continue\n return False\n\n\nif __name__ == \"__main__\":\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(4)\n root.right = TreeNode(5)\n root.right.right = TreeNode(6)\n print(Solution().hasPathSum(root, 6))\n","sub_path":"other/datstr/v3/6Trees/probs/pathSum.py","file_name":"pathSum.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"566376767","text":"from subprocess import call\n\n\n# module_name = 'task_rnn.py'\n# module_name = 'task_lstm.py'\n# module_name = 'task_origin_lstm.py'\nmodule_name = 'task_msu.py'\n\ndelays = [2]\n\n# lrs = [0.02, 0.02, 0.05, 0.05, 0.1, 0.1]\nlrs = [0.01]\n# lrs = [0.01]\n\n# batch_sizes = [1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3]\nbatch_sizes = [1, 1, 1, 1]\n# batch_sizes += [1] * 2 + [2] * 3 + 3 * [3]\n\nshift_memory = False\n\ni = 0\nfor delay in delays:\n for lr in lrs:\n for bs in batch_sizes:\n i += 1\n command = ['python', module_name, '--int_para_1', '{}'.format(delay),\n '--lr', '{}'.format(lr), '--batch_size', '{}'.format(bs),\n '--suffix', '({})'.format(i)]\n if shift_memory: command += ['--bool_para_1']\n call(command)\n print()\n\n","sub_path":"98-TOY/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"185436025","text":"# opt_strat\n# Created by steven on 8/3/19.\n##Integrate PyFolio at some point or see below\n##https://github.com/quantopian/empyrical\n###http://www.vantharp.com/tharp-concepts/sqn.asp\n####SQN = SquareRoot(NumberTrades) * Average(TradesProfit) / StdDev(TradesProfit)\n\nimport backtrader as bt\nimport pandas as pd\nfrom datetime import datetime\nfrom strategies import *\nfrom ccxt_datahandler import ccxt_datahandler\nimport matplotlib.pyplot as plt\n\ndef printSQN(analyzer):\n\tsqn = round(analyzer.sqn,2)\n\tprint('SQN: {}'.format(sqn))\ndef runstrat(pair, strat, period_1, period_2,period_3):\n\tplt.switch_backend('Qt5Agg')\n\t# Create a cerebro entity\n\tcerebro = bt.Cerebro(optreturn=False)\n\t\n\t# Add a strategy\n\tcerebro.optstrategy(strat, ema1=period_1,ema2=period_2,sma1=period_3)\n\t\n\t##add analyzer\n\tcerebro.addanalyzer(bt.analyzers.SQN, _name=\"sqn\")\n\n\t\n\t# Get a pandas dataframe and add to cerebro\n\tcerebro.adddata(bt.feeds.PandasData(dataname=ccxt_datahandler(pair, 'poloniex', '1d')))\n\tstartcash=10000\n\tcerebro.broker.setcash(startcash)\n\t# Run over everything\n\topt_runs = cerebro.run()\n\n\tfinal_results_list = []\n\tfor run in opt_runs:\n\t\tfor strategy in run:\n\t\t\t####can add other analyzers here for sort by just go back to backtrader analyzers\n\t\t\tSQN_val = round(strategy.analyzers.sqn.get_analysis().sqn,2)\n\n\t\t\tvalue = round(strategy.broker.get_value(),2)\n\t\t\tPnL = round(value - startcash,2)\n\t\t\tperiod1 = strategy.params.ema1\n\t\t\tperiod2 = strategy.params.ema2\n\t\t\tperiod3 = strategy.params.sma1\n\t\t\t\n\t\t\tfinal_results_list.append([period1,period2,period3,PnL,SQN_val])\n\tby_PnL = sorted(final_results_list, key=lambda x: x[3], reverse=True)\n\t#Print results\n\tby_SQN = sorted(final_results_list, key=lambda x: x[4], reverse=True)\n\tprint('Results: Ordered by Profit:')\n\tresult = by_PnL[0]\n\tprint('Period 1: {}, Period 2: {}, Period 3: {},PnL: {},SQN: {}'.format(result[0], result[1],result[2],result[3],result[4]))\n\tresult = by_SQN[0]\n\tprint('Results: Ordered by SQN:')\n\tprint('Period 1: {}, Period 2: {}, Period 3: {},PnL: {},SQN: {}'.format(result[0], result[1],result[2],result[3],result[4]))\nif __name__ == '__main__':\n\t\n\t###need to feed the type of strat that will be run\n\tprint('BTC EMA')\n\trunstrat('BTC/USDT',emaCross, period_1=range(3,15), period_2=range(15,25),period_3=[50,100,150,200])\n\tprint('BTC SMA')\n\trunstrat('BTC/USDT',smaCross, period_1=range(3,15), period_2=range(15,25),period_3=[50,100,150,200])\n\tprint('ETH EMA')\n\trunstrat('ETH/USDT',emaCross, period_1=range(3,15), period_2=range(15,25),period_3=[50,100,150,200])\n\tprint('ETH SMA')\n\trunstrat('ETH/USDT',smaCross, period_1=range(3,15), period_2=range(15,25),period_3=[50,100,150,200])","sub_path":"opt_strats.py","file_name":"opt_strats.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"593937513","text":"import os\nimport sys\nimport json\nimport yaml\nimport pickle\n\n\n__all__ = ['save_object', 'load_pickle_obj', 'load_json_obj', 'load_config', 'Recorder']\n\n\ndef save_object(fname, obj, mode = 'pickle'):\n # the function is used to save some data in class or object in .pkl file\n # or json file\n if not isinstance(fname, str):\n raise TypeError(fname, \"is not string object, it can't be the saving name of file.\")\n\n if mode == 'pickle':\n if not fname.endswith('.pkl'):\n fname += '.pkl'\n\n with open(fname, 'wb') as out_file:\n pickle.dump(obj, out_file)\n\n elif mode == 'json':\n if not fname.endswith('.json'):\n fname += '.json'\n\n obj = json.dumps(obj)\n with open(fname, 'w') as out_file:\n out_file.write(obj)\n\n out_file.close()\n return None\n\ndef load_pickle_obj(fname):\n # the function is used to read the data in .pkl file\n if not fname.endswith('.pkl'):\n raise RuntimeError(fname, 'is not a pickle file.')\n\n with open(fname, 'rb') as in_file:\n return pickle.load(in_file)\n\ndef load_json_obj(fname):\n # the functionis used to read the data in .json file\n if not fname.endswith('.json'):\n raise RuntimeError(fname, 'is not a json file.')\n\n with open(fname, 'r') as in_file:\n return json.loads(in_file.read())\n\ndef load_config(fname):\n f = open(fname)\n content = yaml.load(f, Loader = yaml.FullLoader)\n f.close()\n return content\n\n\nclass Recorder(object):\n # Recoder of recording whole history in traninig.\n def __init__(self, record_column):\n self.record_column = record_column\n self.length_check = len(record_column)\n self.data = []\n\n def from_old_file(self, file_name):\n f = open(file_name, 'r')\n lines = f.readlines()\n f.close()\n\n for line in lines:\n elements = line.replace('\\n', '').split(',')\n temp_list = []\n assert len(elements) == self.length_check\n for obj in elements:\n temp_list.append(obj)\n\n self.data.append(obj)\n\n return None\n\n def insert(self, new_data):\n if len(new_data) != self.length_check:\n raise IndexError('Input data length is not equal to init record length.')\n\n insertion = []\n for obj in new_data:\n insertion.append(str(obj))\n\n self.data.append(insertion)\n return None\n\n def write(self, path, file_name, file_type = '.csv'):\n print('Start writing recording file ...')\n lines = self._build_file()\n f = open(os.path.join(path, file_name) + file_type, 'w')\n f.writelines(lines)\n f.close()\n print('Recoder writing done.')\n return None\n\n def _build_file(self):\n lines = ['']\n for i in range(len(self.record_column)):\n if i == len(self.record_column) - 1:\n lines[0] = lines[0] + self.record_column[i] + '\\n'\n else:\n lines[0] = lines[0] + self.record_column[i] + ','\n\n for i in range(len(self.data)):\n new_lines = ''\n for j in range(len(self.data[i])):\n if j == len(self.data[i]) - 1:\n new_lines = new_lines + self.data[i][j] + '\\n'\n else:\n new_lines = new_lines + self.data[i][j] + ','\n\n lines.append(new_lines)\n\n return lines\n\n\n","sub_path":"HW2/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"323058998","text":"import numpy as np\nimport cv2 as cv\n\n\ncar_cascade = cv.CascadeClassifier(\"E:\\FYP\\haar-cascades\\cars6.xml\")\nobject_detector = cv.createBackgroundSubtractorMOG2()\n\ncap = cv.VideoCapture(r\"E:\\FYP\\videos\\video2.avi\")\ncount = 0\nkernel = np.ones((5,5),np.uint8)\n\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv.resize(frame, (720,480))\n height, width = frame.shape[0:2]\n frame[0:50,0:width] = [25,9,51]\n cv.putText(frame,'Vehicle Count: ', (10,40), cv.FONT_HERSHEY_TRIPLEX, 1.5, (255,255,255), 2)\n cv.line(frame,(0,height-400), (width, height-400), (0,255,0), 2)\n cv.line(frame,(0,height-200), (width, height-200), (255,0,0), 2)\n\n mask = object_detector.apply(frame)\n erosion = cv.erode(mask,kernel,iterations = 1)\n\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n cars = car_cascade.detectMultiScale(gray,1.3, 5)\n \n for (x,y,w,h) in cars:\n carCy = int(y+h/2)\n lineCy = height-400\n\n if (carCylineCy-6):\n count = count+1\n cv.line(frame,(0,height-200), (width, height-200), (60,255,60), 2)\n\n\n cv.rectangle(frame, (x,y), (x+w, y+h), (222,107,0), 2)\n cv.putText(frame, \"Car\", (x,y-10), cv.FONT_HERSHEY_COMPLEX, 0.5, (0,255,0), 2)\n cv.putText(frame, str(count), (500,30), cv.FONT_HERSHEY_COMPLEX, 1.5, (255,255,255), 2)\n \n\n cv.imshow(\"Detection\", frame)\n cv.imshow(\"Threshold\", mask)\n cv.imshow(\"Morphological Analysis\", erosion)\n k = cv.waitKey(100) & 0xFF\n if k ==97:\n break\n\ncap.release()\ncv.destroyAllWindows()","sub_path":"Vehicle_Detection_using Haar Cascade Classifier/vehicle_detection.py","file_name":"vehicle_detection.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195872341","text":"import pickle\nfrom builtins import print\nfrom pickle import UnpicklingError\n\nimport cv2\nimport os\nimport random\nimport time\nimport numpy\n\nfrom Fruit import Fruit\n\nnumpy.set_printoptions(threshold=numpy.nan)\n\n# # # # # # # # # # # # #Nacitanie a zapisanie obrazkov do picklov # # # # # # # # # # # # # # # # # # # #\n\ntime_start = time.clock()\n\nlistOfDir = os.listdir('C:/Users/frantisek.ff/Downloads/Fruit-Images-Dataset-master/Fruit-Images-Dataset-master/Test')\nprint(listOfDir)\nfruit = [i.split()[0] for i in listOfDir] # necham len prve slovo\nprint(fruit)\ndistinct_fruit = sorted(set(fruit)) # zoradeny a unikatny set ovocia\nprint(distinct_fruit)\nlistZeros = [0]*len(distinct_fruit)\nlistChars = list() * len(distinct_fruit)\nnumberingOfFruit = numpy.arange(1, len(distinct_fruit)+1)\nprint(listZeros)\ntemporarylist = []\nmapOfFruit = dict(zip(distinct_fruit, listZeros))\nmapOfFruitNumbering = dict(zip(distinct_fruit, numberingOfFruit))\nprint(mapOfFruitNumbering)\n\n\ndef loadAllImgandDump(rootDir):\n\n for dirName, subdirList, fileList in os.walk(rootDir):\n print('Found directory: %s' % dirName)\n onlyDirName = dirName.split('\\\\')[-1]\n onlyDirName = onlyDirName.split(' ')[0]\n\n if onlyDirName in mapOfFruit.keys():\n filename = onlyDirName\n\n if mapOfFruit[onlyDirName] > 0:\n file = open(filename, 'rb')\n for x in range(0, mapOfFruit[onlyDirName]):\n temporarylist.append(pickle.load(file))\n file.close()\n\n file = open(filename, 'wb')\n for x in range(0, mapOfFruit[onlyDirName]):\n pickle.dump(temporarylist[x], file, pickle.HIGHEST_PROTOCOL)\n\n mapOfFruit[onlyDirName] += len(fileList)\n\n for fname in fileList:\n image = cv2.imread(dirName + '/' + fname)\n image = (image / 255 - 0.5)\n\n tempFruit = Fruit()\n tempFruit.name = fname\n tempFruit.kind = mapOfFruitNumbering[onlyDirName]\n tempFruit.rgb = image\n\n pickle.dump(tempFruit, file)\n file.close()\n temporarylist = []\n\n print('Found subdirectory: %s' % subdirList)\n print(len(fileList))\n\nrootDir = 'C:/Users/frantisek.ff/Downloads/Fruit-Images-Dataset-master/Fruit-Images-Dataset-master/Test'\nloadAllImgandDump(rootDir)\n\nlowest_fruit = min(mapOfFruit.values())\nnameFruit = open(\"nameFruit\", 'w')\nnameFruit.write(str(mapOfFruit.keys()))\nnameFruit.close()\n#\nprint('Najmensi pocet ovocia je %s' % lowest_fruit)\nprint(mapOfFruit)\nprint('Pocet ovoci je %s' % len(mapOfFruit))\n\n\n###########################################################################\n\n############################## Nacitanie z picklov a zobrazenie #############################################\n\ntemporarylist = []\n\ninfile = open('Apple','rb')\n\nwhile 1:\n try:\n temporarylist.append(pickle.load(infile))\n except (EOFError, UnpicklingError):\n break\n\ninfile.close()\n\ntime_elapsed = (time.clock() - time_start)\nprint(time_elapsed)\n\ndef showImageFromPickle(windowName, data, xWindow, yWindow):\n cv2.namedWindow(windowName) # Create a named window\n cv2.moveWindow(windowName, xWindow,yWindow) # Move it to (40,30)\n cv2.imshow(windowName, data + 0.5)\n\nshowImageFromPickle(\"Obr 1 \", temporarylist[526].rgb, 50, 50)\nshowImageFromPickle(\"Obr 2 \", temporarylist[256].rgb, 100, 50)\nshowImageFromPickle(\"Obr 3 \", temporarylist[56].rgb, 50, 100)\nshowImageFromPickle(\"Obr 4 \", temporarylist[806].rgb, 50, 100)\n\ncv2.waitKey(0)\n\n\n\n\n\n\n","sub_path":"Zadanie2/picklenie.py","file_name":"picklenie.py","file_ext":"py","file_size_in_byte":3583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385937336","text":"import numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import confusion_matrix, classification_report, accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nfrom matplotlib import cm as cm\nimport operator\nfrom warnings import filterwarnings\nfilterwarnings(\"ignore\")\n\nlabel=[]\nfor i in range(62):\n label.append(\"X\"+str(i))\n i +=1\n\ndata = pd.read_csv(\"sonar_all_data_2.csv\", names=label)\nprint(data.shape)\nX = data.drop([\"X60\", \"X61\"] ,axis=1)\ny = data[\"X60\"]\nprint(data[\"X61\"].value_counts())\n#sns.countplot(data=data, x=\"X61\")\n#plt.show()\n\nsc= StandardScaler()\nX_train,X_test, y_train, y_test = train_test_split(X,y, test_size = 0.3, random_state = 0)\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\ndct={}\naccuracy =[]\ncomponents = []\ncomp = range(1,61)\nfor trial in comp:\n pca = PCA(n_components = trial)\n X_train_pca = pca.fit_transform(X_train)\n X_test_pca = pca.transform(X_test)\n\n mlpc = MLPClassifier(hidden_layer_sizes=(1000), activation =\"logistic\", max_iter=20000, alpha=0.00001, solver=\"adam\", tol=0.0001)\n mlpc.fit(X_train_pca, y_train)\n y_predect = mlpc.predict(X_test_pca)\n print(\"accuracy : {:.2f} and number of components is : {:.2f}\" .format(accuracy_score(y_test, y_predect),trial))\n dct[trial]= accuracy_score(y_test, y_predect)\n accuracy.append(accuracy_score(y_test, y_predect))\n components.append(trial)\n trial +=1\n #cm = confusion_matrix(y_test, y_predect)\n #print(cm)\n #sns.heatmap(cm, center=True)\n #plt.show()\n\n\nprint(max(dct, key=dct.get))\nprint(\"%0.2f\" %dct[max(dct, key=dct.get)])\nplt.plot(components,accuracy)\nplt.show()\n#print(accuracy)\n","sub_path":"Project 2/project2/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"274747541","text":"import sys\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtWidgets import *\r\n\r\nclass MainWidget(QWidget):\r\n def __init__(self):\r\n super(MainWidget, self).__init__()\r\n self.initUI()\r\n # self.center()\r\n self.contact = ['','','','','','','','']\r\n self.contacts = [self.contact]\r\n\r\n def initUI(self):\r\n grid = QGridLayout()\r\n self.setLayout(grid)\r\n\r\n\r\n #initializing the groupbox\r\n groupBoxMenu = QGroupBox('&Choose option')\r\n #initializing the subgrid\r\n gridMenu = QGridLayout()\r\n self.setLayout(gridMenu)\r\n #btn\r\n LookupBtn = QPushButton(\"Look up\")\r\n LookupBtn.clicked.connect(self.lookup)\r\n AddBtn = QPushButton(\"Add\")\r\n AddBtn.clicked.connect(self.add)\r\n QuitBtn = QPushButton(\"Quit\")\r\n QuitBtn.clicked.connect(QCoreApplication.instance().quit)\r\n #positioning\r\n gridMenu.addWidget(LookupBtn,0,0)\r\n gridMenu.addWidget(AddBtn,0,1)\r\n gridMenu.addWidget(QuitBtn,0,2)\r\n #making the groupbox\r\n groupBoxMenu.setLayout(gridMenu)\r\n BoxMenuLayout = QGridLayout()\r\n BoxMenuLayout.addWidget(groupBoxMenu)\r\n\r\n\r\n # .... 2nd groupbox.........\r\n groupBoxShow = QGroupBox('&Entries')\r\n gridShow = QGridLayout()\r\n self.setLayout(gridShow)\r\n self.fname = QLineEdit()\r\n self.lname = QLineEdit()\r\n self.street = QLineEdit()\r\n self.city = QLineEdit()\r\n self.state = QLineEdit()\r\n self.zip = QLineEdit()\r\n self.home = QLineEdit()\r\n self.mobile = QLineEdit()\r\n labelfname = QLabel(\"First Name\")\r\n labellname = QLabel(\"Last Name\")\r\n labelstreet = QLabel(\"Street\")\r\n labelcity = QLabel(\"City\")\r\n labelstate = QLabel(\"State\")\r\n labelzip = QLabel(\"ZIP\")\r\n labelhome = QLabel(\"Home\")\r\n labelmobile = QLabel(\"Mobile\")\r\n\r\n gridShow.addWidget(labelfname,0,0)\r\n gridShow.addWidget(self.fname,0,1)\r\n gridShow.addWidget(labellname,0,2)\r\n gridShow.addWidget(self.lname,0,3)\r\n gridShow.addWidget(labelstreet,1,0)\r\n gridShow.addWidget(self.street,1,1)\r\n gridShow.addWidget(labelcity,1,2)\r\n gridShow.addWidget(self.city,1,3)\r\n gridShow.addWidget(labelstate,1,4)\r\n gridShow.addWidget(self.state,1,5)\r\n gridShow.addWidget(labelzip,1,6)\r\n gridShow.addWidget(self.zip,1,7)\r\n gridShow.addWidget(labelhome,2,0)\r\n gridShow.addWidget(self.home,2,1)\r\n gridShow.addWidget(labelmobile,2,2)\r\n gridShow.addWidget(self.mobile,2,3)\r\n\r\n groupBoxShow.setLayout(gridShow)\r\n BoxMenuLayout = QGridLayout()\r\n BoxMenuLayout.addWidget(groupBoxShow)\r\n\r\n # .........Big GrouBox ..................\r\n grid.addWidget(groupBoxMenu, 1, 0)\r\n grid.addWidget(groupBoxShow, 0, 0)\r\n self.show()\r\n\r\n def center(self):\r\n qr = self.frameGeometry()\r\n cp = QDesktopWidget().availableGeometry().center()\r\n qr.moveCenter(cp)\r\n self.move(qr.topLeft())\r\n\r\n def lookup(self):\r\n lastname = self.lname.text()\r\n found = False\r\n rightcontact = []\r\n while (found == False):\r\n for contact in self.contacts:\r\n if (lastname == contact[1]):\r\n rightcontact = contact\r\n found = True\r\n\r\n if (found == True):\r\n self.fname.setText(rightcontact[0])\r\n self.street.setText(rightcontact[2])\r\n self.city.setText(rightcontact[3])\r\n self.state.setText(rightcontact[4])\r\n self.zip.setText(rightcontact[5])\r\n self.home.setText(rightcontact[6])\r\n self.mobile.setText(rightcontact[7])\r\n else:\r\n print(\"no such contact\")\r\n\r\n def add(self):\r\n Fname = self.fname.text()\r\n Lname = self.lname.text()\r\n St = self.street.text()\r\n City = self.city.text()\r\n State = self.state.text()\r\n Zip = self.zip.text()\r\n Home = self.home.text()\r\n Mobile = self.mobile.text()\r\n print(Fname,Lname,St,City,State,Zip,Home,Mobile)\r\n self.contact = [Fname,Lname,St,City,State,Zip,Home,Mobile]\r\n self.contacts.append(self.contact)\r\n print(self.contacts)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n ex = MainWidget()\r\n sys.exit(app.exec_())","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"435175684","text":"from django.conf.urls import include, url\nfrom . import views\n\nurlpatterns = [\n url(r'^(?P[\\d]+/)?$', views.ProjectDetail.as_view(), name='project_detail'),\n url(r'^(?P[\\d]+/)?criar/$', views.ProjectCreate.as_view(), name='project_create'),\n url(r'^(?P[\\d]+/)?atualizar/$', views.ProjectUpdate.as_view(), name='project_update'),\n url(r'^(?P[\\d]+/)?areas/$', views.AreasList.as_view(), name='areas_list'),\n url(r'^(?P[\\d]+/)?areas/(?P[\\w]+)/$', views.AreaDetail.as_view(), name='area_detail'),\n url(r'^(?P[\\d]+/)?areas/(?P[\\w]+)/atualizar/$', views.AreaUpdate.as_view(), name='area_update'),\n url(r'^(?P[\\d]+/)?comentarios/$', views.CommentsList.as_view(), name='comments_list'),\n url(r'^(?P[\\d]+/)?comentarios/(?P[\\d]+)/resposta/criar/$', views.CommentAnswerCreate.as_view(), name='comments_answer_create'),\n url(r'^(?P[\\d]+/)?mensagens/$', views.ChatList.as_view(), name='chat_list'),\n url(r'^(?P[\\d]+/)?mensagens/create/$', views.ChatCreate.as_view(), name='chat_create'),\n url(r'^(?P[\\d]+/)?convites/criar/$', views.InviteCreate.as_view(), name='invite_create'),\n url(r'^(?P[\\d]+/)?convites/(?P[\\d]+)/apagar/$', views.InviteDelete.as_view(), name='invite_delete'),\n]\n","sub_path":"api/project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"412947147","text":"import requests\nfrom django.conf import settings\nfrom locations.models import Location\n\ndef locu_search(query):\n locality = query.replace(' ', '%20')\n url = 'https://api.locu.com/v1_0/venue/search/?api_key={}&locality={}'.format(settings.LOCU_API_KEY, locality)\n r = requests.get(url)\n data = r.json()\n locations = [(location['name'], location['id']) for location in data['objects']]\n return locations\n\ndef locu_detail(locu_id):\n url = 'https://api.locu.com/v1_0/venue/'\n url += '{}/?api_key={}'.format(locu_id, settings.LOCU_API_KEY)\n r = requests.get(url)\n data = r.json()\n details = [(detail['lat'], detail['long']) for detail in data['objects']]\n return details\n","sub_path":"locator/locu.py","file_name":"locu.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"480119968","text":"\n\ndef get_output_shape(inpts, fs, s, p, k):\n \"\"\"Calculates the output shape of a convolution\n\n :inpts input shape (h, w, d)\n :fs filter shape (h, w, d )\n :s stride (h, w)\n :p padding (h, w)\n :k number of filters\n \"\"\"\n new_height = (inpts[0] - fs[0] + 2 * p[0]) / s[0] + 1\n new_width = (inpts[1] - fs[1] + 2 * p[1]) / s[1] + 1\n return (new_height, new_width, k)\n\n\n# This would correspond to the following code:\n\ninp = tf.placeholder(tf.float32, (None, 32, 32, 3))\n# (height, width, input_depth, output_depth)\nfilter_weights = tf.Variable(tf.truncated_normal((8, 8, 3, 20)))\nfilter_bias = tf.Variable(tf.zeros(20))\nstrides = [1, 2, 2, 1] # (batch, height, width, depth)\npadding = 'SAME'\nconv = tf.nn.conv2d(inp, filter_weights, strides, padding) + filter_bias\n\n#\n# In summary TensorFlow uses the following equation for 'SAME' vs 'PADDING'\n#\n# SAME Padding, the output height and width are computed as:\n#\n# out_height = ceil(float(in_height) / float(strides1))\n#\n# out_width = ceil(float(in_width) / float(strides[2]))\n#\n# VALID Padding, the output height and width are computed as:\n#\n# out_height = ceil(float(in_height - filter_height + 1) / float(strides1))\n#\n# out_width = ceil(float(in_width - filter_width + 1) / float(strides[2]))\n","sub_path":"tensorflow/quizzes/convolutional_nets/convolution_output_shape.py","file_name":"convolution_output_shape.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"583106475","text":"\"\"\" Post-process ZnO G0W0 results.\n\"\"\"\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.axes import Axes\nimport numpy as np\nfrom typing import Tuple\n\nfrom src.conversions import ha_to_ev\nfrom src.parsers import band_gaps\n\n\ndef band_gaps_vs_q(root: str, q_points: list) -> dict:\n \"\"\" Get fundamental and direct Gamma gaps as a function of q.\n\n :param path: Path to file.\n :param q_points: List of q-points.\n :return: Fundamental gap\n \"\"\"\n fundamental = np.empty(shape=len(q_points))\n\n for iq, q_point in enumerate(q_points):\n q_str = \"\".join(str(x) for x in q_point)\n path = os.path.join(root, q_str)\n gaps: dict = band_gaps(path)\n fundamental[iq] = gaps['fundamental']\n\n return {'fundamental': fundamental}\n\n\ndef initialise_gamma_gap_plot() -> Tuple[Figure, Axes]:\n fig, ax = plt.subplots()\n ax.set_title('Convergence in Gamma-Gamma Gap of ZnO')\n fig.set_size_inches(14, 10)\n plt.rcParams.update({'font.size': 16})\n ax.set_xlabel('q-points', fontsize=16)\n ax.set_ylabel('Quasiparticle Band Gap (eV)', fontsize=16)\n ax.set_ylim(1.8, 3.0)\n return fig, ax\n\n\nif __name__ == \"__main__\":\n # Notebook Results directory\n root = 'GW_results/results/zno'\n subdirectory = '_percent_empty'\n\n q_points = [[2, 2, 1], [4, 4, 3], [6, 6, 4], [8, 8, 5]]\n total_q_points = [np.prod(q_point) for q_point in q_points]\n n_empties = [25, 50, 100]\n\n fig, ax = initialise_gamma_gap_plot()\n for n_empty in n_empties:\n gaps = band_gaps_vs_q(os.path.join(root, str(n_empty) + subdirectory), q_points)\n ax.plot(total_q_points, gaps['fundamental'] * ha_to_ev, 'o', markersize=10, label=str(n_empty))\n\n ax.legend(title='Number of Emtpy States (%)')\n plt.show()\n","sub_path":"pycharm_projects/wp_benchmarks/GW_analysis/zno.py","file_name":"zno.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"305867128","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport djangocms_text_ckeditor.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('djangocms_nearby', '0003_auto_20170725_0603'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='activity',\n name='info_content',\n field=djangocms_text_ckeditor.fields.HTMLField(verbose_name='Description', help_text='Description of activity', blank=True, default=''),\n ),\n migrations.AddField(\n model_name='activity',\n name='title',\n field=models.CharField(verbose_name='Title', help_text='Title', max_length=150, default=''),\n preserve_default=False,\n ),\n ]\n","sub_path":"djangocms_nearby/migrations/0004_auto_20170725_0606.py","file_name":"0004_auto_20170725_0606.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"494657533","text":"import mantid.simpleapi as mantid\nfrom mantid.api import WorkspaceFactory\nfrom mantid.api import AnalysisDataService\nfrom mantid.kernel import DateAndTime\n\nfrom checkpoint import DataCheckpoint\nfrom mantid_workspace_checkpoint import MantidWorkspaceCheckpoint\nfrom mantid_workspace_transition import MantidWorkspaceTransition\n\n\nclass CreateMantidWorkspaceFromEventsTransition(MantidWorkspaceTransition):\n def __init__(self):\n self._log_data = {}\n self._number_of_spectra = self._get_number_of_spectra_from_instrument()\n super(CreateMantidWorkspaceFromEventsTransition, self).__init__(parents=[])\n\n def set_log_data(self, data):\n self._log_data = dict(data)\n\n def process(self, event_data, pulse_time):\n update = DataCheckpoint()\n update._data = (event_data, pulse_time)\n self.trigger_update({'no-parent':update})\n\n def _do_transition(self, data):\n event_data, pulse_time = data[0].data\n ws = WorkspaceFactory.Instance().create(\"EventWorkspace\", self._number_of_spectra, 1, 1);\n AnalysisDataService.Instance().addOrReplace('POWDIFF_test', ws)\n ws = AnalysisDataService['POWDIFF_test']\n mantid.LoadInstrument(Workspace=ws, Filename='data/POWDIFF_Definition.xml', RewriteSpectraMap=True)\n ws.getAxis(0).setUnit('tof')\n for i in event_data:\n ws.getEventList(int(i[0])).addEventQuickly(float(i[1]), DateAndTime(pulse_time))\n self._add_log_data(ws, pulse_time)\n return ws\n\n def _add_log_data(self, ws, pulse_time):\n mantid.AddSampleLog(ws, LogName='start_time', LogText=str(DateAndTime(pulse_time)))\n for key, value in self._log_data.items():\n mantid.AddTimeSeriesLog(ws, Name=key, Time=str(DateAndTime(pulse_time)), Value=value)\n\n def _get_number_of_spectra_from_instrument(self):\n # We create a helper workspace to obtain the number of detectors.\n ws = mantid.CreateSimulationWorkspace(Instrument='data/POWDIFF_Definition.xml', BinParams='1,0.5,2')\n return ws.getNumberHistograms()\n","sub_path":"backend/create_mantid_workspace_from_events_transition.py","file_name":"create_mantid_workspace_from_events_transition.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"534006894","text":"import random\nimport numpy as np\nimport torch\n\nclass game:\n def __init__(self, size, nConnect):\n\n self.size = size\n self.nConnect = nConnect\n\n def reset(self):\n self.state = torch.zeros((self.size, self.size))\n return self.state, False\n\n def show_state(self):\n print(\"Current board position:\")\n print(self.state.int().numpy(), \"\\n\")\n\n def valid_actions(self):\n return self.state == 0\n\n def valid_action_indices(self):\n return self.valid_actions().nonzero()\n\n def is_valid(self, coord):\n x, y = coord\n return self.state[x, y] == 0\n\n def move(self, coord, alternate_players=True):\n x, y = coord\n if self.is_valid(coord):\n self.state[x, y] = 1\n else:\n raise ValueError(\"move not valid\")\n\n if self.is_winning_move(coord):\n return self.state, +1, True # state, reward, done\n\n if len(self.valid_action_indices()) == 0:\n return self.state, +0, True\n\n\n if alternate_players: self.state *= -1\n return self.state, 0, False\n\n def is_winning_move(self, coord):\n x, y = coord\n for dx in [0,1]:\n for dy in [0,1]:\n if not dx==dy==0:\n nConnected = 0\n for depth in range(-self.nConnect-1, self.nConnect):\n new_coord = np.array([x, y]) + np.array([dx, dy])*depth\n if (0 <= new_coord).all() and (new_coord < self.size).all():\n if self.state[tuple(new_coord)] == 1:\n nConnected += 1\n if nConnected == self.nConnect:\n return True\n else:\n nConnected = 0\n return False\n\n\ngame = game(5, nConnect = 0)\nstate, done = game.reset()\ngame.show_state()\nwhile not done:\n poss_actions = game.valid_action_indices()\n action = poss_actions[random.randint(0, len(poss_actions)-1)]\n new_state, reward, done = game.move(action)\n state = new_state\n game.show_state()\n if reward:\n print(\"Won!\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"98054470","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, print_function, division\n\nfrom collections import namedtuple\nfrom contextlib import contextmanager\nfrom io import StringIO\nfrom os import path\nimport csv\nimport json\nimport os\nimport tempfile\nimport unittest\n\nimport csvdiff\nfrom csvdiff import patch, records\n\nfrom click.testing import CliRunner\n\n\nclass TestCsvdiff(unittest.TestCase):\n def setUp(self):\n csvdiff.DEBUG = True\n self.examples = path.join(path.dirname(__file__), 'examples')\n self.a_file = path.join(self.examples, 'a.csv')\n self.b_file = path.join(self.examples, 'b.csv')\n self.diff_file = path.join(self.examples, 'diff.json')\n self.bad_diff_file = path.join(self.examples, 'bad_diff.json')\n self.runner = CliRunner()\n\n def csvdiff_cmd(self, *args):\n RunResult = namedtuple('RunResult', 'exit_code diff output')\n t = tempfile.NamedTemporaryFile(delete=True)\n result = self.runner.invoke(csvdiff.csvdiff_cmd,\n ('--output', t.name) + args)\n diff = None\n if path.exists(t.name) and os.stat(t.name).st_size:\n with open(t.name) as istream:\n diff = json.load(istream)\n\n return RunResult(result.exit_code, diff, result.output)\n\n def csvdiff_summary_cmd(self, *args):\n RunResult = namedtuple('RunResult', 'exit_code summary output')\n t = tempfile.NamedTemporaryFile(delete=True)\n test_args = ('--output', t.name, '--style', 'summary')\n result = self.runner.invoke(csvdiff.csvdiff_cmd,\n test_args + args)\n with open(t.name, 'r') as istream:\n summary = istream.read()\n\n return RunResult(result.exit_code, summary, result.output)\n\n def patch_cmd(self, *args):\n RunResult = namedtuple('RunResult', 'exit_code records output')\n t = tempfile.NamedTemporaryFile(delete=True)\n result = self.runner.invoke(csvdiff.csvpatch_cmd,\n ('--output', t.name) + args)\n recs = None\n if path.exists(t.name) and os.stat(t.name).st_size:\n with open(t.name) as istream:\n recs = list(csv.DictReader(istream))\n\n return RunResult(result.exit_code, recs, result.output)\n\n def test_summarize(self):\n lhs = [\n {'name': 'a', 'sheep': '7'},\n {'name': 'b', 'sheep': '12'},\n {'name': 'c', 'sheep': '0'},\n ]\n rhs = [\n {'name': 'a', 'sheep': '7'},\n {'name': 'c', 'sheep': '2'},\n {'name': 'd', 'sheep': '8'},\n ]\n diff = csvdiff.diff_records(lhs, rhs, ['name'])\n assert patch.is_valid(diff)\n assert not patch.is_typed(diff)\n o = StringIO()\n csvdiff._summarize_diff(diff, len(lhs), stream=o)\n self.assertEqual(\n o.getvalue(),\n \"1 rows removed (33.3%)\\n\"\n \"1 rows added (33.3%)\\n\"\n \"1 rows changed (33.3%)\\n\"\n )\n\n def test_summarize_cmd(self):\n lhs = [\n {'name': 'a', 'sheep': '7'},\n {'name': 'b', 'sheep': '12'},\n {'name': 'c', 'sheep': '0'},\n ]\n rhs = [\n {'name': 'a', 'sheep': '7'},\n {'name': 'c', 'sheep': '2'},\n {'name': 'd', 'sheep': '8'},\n ]\n with tmp_csv_files(lhs, rhs) as (lhs_file, rhs_file):\n result = self.csvdiff_summary_cmd('name', lhs_file, rhs_file)\n self.assertEquals(result.exit_code, 1)\n self.assertEquals(\n result.summary,\n \"1 rows removed (33.3%)\\n\"\n \"1 rows added (33.3%)\\n\"\n \"1 rows changed (33.3%)\\n\"\n )\n\n def test_summarize_identical(self):\n lhs = [\n {'name': 'a', 'sheep': '7'},\n {'name': 'b', 'sheep': '12'},\n {'name': 'c', 'sheep': '0'},\n ]\n diff = csvdiff.diff_records(lhs, lhs, ['name'])\n assert patch.is_valid(diff)\n assert not patch.is_typed(diff)\n o = StringIO()\n csvdiff._summarize_diff(diff, len(lhs), stream=o)\n self.assertEqual(\n o.getvalue(),\n 'files are identical\\n'\n )\n\n def test_csvdiff_fails_without_enough_arguments(self):\n result = self.csvdiff_cmd()\n self.assertEquals(result.exit_code, 2)\n\n result = self.csvdiff_cmd(self.a_file, self.b_file)\n self.assertEquals(result.exit_code, 2)\n\n def test_csvdiff_fails_without_valid_key(self):\n result = self.csvdiff_cmd('abcd', self.a_file, self.b_file)\n assert result.exit_code != 0\n\n def test_diff_command_valid_usage_with_difference(self):\n result = self.csvdiff_cmd('id', self.a_file, self.b_file)\n self.assertEqual(result.exit_code, 1)\n diff = result.diff\n patch.validate(diff)\n assert patch.is_valid(diff)\n\n expected = {\n '_index': ['id'],\n 'added': [{'id': '5', 'name': 'mira', 'amount': '81'}],\n 'removed': [{'id': '2', 'name': 'eva', 'amount': '63'}],\n 'changed': [\n {'key': ['1'],\n 'fields': {'amount': {'from': '20', 'to': '23'}}},\n {'key': ['6'],\n 'fields': {'amount': {'from': '10', 'to': '13'}}},\n ],\n }\n\n self.assertPatchesEqual(diff, expected)\n\n def test_diff_records_str_values(self):\n lhs = [\n {'name': 'a', 'sheep': '7'},\n {'name': 'b', 'sheep': '12'},\n {'name': 'c', 'sheep': '0'},\n ]\n rhs = [\n {'name': 'a', 'sheep': '7'},\n {'name': 'c', 'sheep': '2'},\n {'name': 'd', 'sheep': '8'},\n ]\n\n diff = csvdiff.diff_records(lhs, rhs, ['name'])\n assert patch.is_valid(diff)\n assert not patch.is_typed(diff)\n\n # check the contents of the diff\n self.assertEqual(diff['added'], [\n {'name': 'd', 'sheep': '8'}\n ])\n self.assertEqual(diff['removed'], [\n {'name': 'b', 'sheep': '12'}\n ])\n self.assertEqual(diff['changed'], [\n {'key': ['c'],\n 'fields': {'sheep': {'from': '0', 'to': '2'}}}\n ])\n\n # check that we can apply the diff\n patched = csvdiff.patch_records(diff, lhs)\n self.assertRecordsEqual(rhs, patched)\n\n def test_diff_records_nonstr_values(self):\n lhs = [\n {'name': 'a', 'sheep': 7},\n {'name': 'b', 'sheep': 12},\n {'name': 'c', 'sheep': 0},\n ]\n rhs = [\n {'name': 'a', 'sheep': 7},\n {'name': 'c', 'sheep': 2},\n {'name': 'd', 'sheep': 8},\n ]\n\n diff = csvdiff.diff_records(lhs, rhs, ['name'])\n assert patch.is_valid(diff)\n assert patch.is_typed(diff)\n\n self.assertEqual(diff['added'], [\n {'name': 'd', 'sheep': 8}\n ])\n self.assertEqual(diff['removed'], [\n {'name': 'b', 'sheep': 12}\n ])\n self.assertEqual(diff['changed'], [\n {'key': ['c'],\n 'fields': {'sheep': {'from': 0, 'to': 2}}}\n ])\n\n # check that we can apply the diff\n patched = csvdiff.patch_records(diff, lhs)\n self.assertRecordsEqual(rhs, patched)\n\n def test_diff_records_multikey(self):\n lhs = [\n {'name': 'a', 'type': 1, 'sheep': 7},\n {'name': 'b', 'type': 1, 'sheep': 12},\n {'name': 'c', 'type': 1, 'sheep': 0},\n ]\n rhs = [\n {'name': 'a', 'type': 1, 'sheep': 7},\n {'name': 'c', 'type': 1, 'sheep': 2},\n {'name': 'd', 'type': 1, 'sheep': 8},\n ]\n\n diff = csvdiff.diff_records(lhs, rhs, ['name', 'type'])\n assert patch.is_valid(diff)\n assert patch.is_typed(diff)\n\n self.assertEqual(diff['added'], [\n {'name': 'd', 'sheep': 8, 'type': 1}\n ])\n self.assertEqual(diff['removed'], [\n {'name': 'b', 'sheep': 12, 'type': 1}\n ])\n self.assertEqual(diff['changed'], [\n {'key': ['c', 1],\n 'fields': {'sheep': {'from': 0, 'to': 2}}}\n ])\n\n # check that we can apply the diff\n patched = csvdiff.patch_records(diff, lhs)\n self.assertRecordsEqual(rhs, patched)\n\n def test_patch_schema_is_valid(self):\n assert not patch.is_valid({})\n\n def test_patch_cmd_valid_args(self):\n result = self.patch_cmd('-i', self.diff_file, self.a_file)\n self.assertEqual(result.exit_code, 0)\n\n with open(self.b_file) as istream:\n expected = list(csv.DictReader(istream))\n\n self.assertRecordsEqual(result.records, expected)\n\n def test_patch_cmd_fails_when_json_is_invalid(self):\n result = self.patch_cmd('-i', self.a_file, self.a_file)\n self.assertEqual(result.exit_code, 2)\n assert 'ERROR' in result.output\n\n def test_patch_cmd_fails_when_json_doesnt_match_schema(self):\n result = self.patch_cmd('-i', self.bad_diff_file, self.a_file)\n self.assertEqual(result.exit_code, 2)\n assert 'ERROR' in result.output\n\n def test_patch_add(self):\n orig = [\n {'name': 'a', 'type': '1', 'sheep': '7'},\n ]\n diff = {\n '_index': ['name'],\n 'added': [{'name': 'b', 'type': '1', 'sheep': '9'}],\n 'changed': [],\n 'removed': [],\n }\n expected = [\n {'name': 'a', 'type': '1', 'sheep': '7'},\n {'name': 'b', 'type': '1', 'sheep': '9'},\n ]\n self.assertRecordsEqual(patch.apply(diff, orig), expected)\n\n def test_patch_remove(self):\n orig = [\n {'name': 'a', 'type': '1', 'sheep': '7'},\n ]\n diff = {\n '_index': ['name'],\n 'added': [],\n 'changed': [],\n 'removed': [{'name': 'a', 'type': '1', 'sheep': '7'}],\n }\n expected = []\n self.assertRecordsEqual(patch.apply(diff, orig), expected)\n\n def test_patch_change(self):\n orig = [\n {'name': 'a', 'type': '1', 'sheep': '7'},\n ]\n diff = {\n '_index': ['name'],\n 'added': [],\n 'changed': [{'key': ['a'], 'fields': {'sheep': {'from': '7',\n 'to': '8'}}}],\n 'removed': [],\n }\n expected = [\n {'name': 'a', 'type': '1', 'sheep': '8'},\n ]\n self.assertRecordsEqual(patch.apply(diff, orig), expected)\n\n def assertPatchesEqual(self, lhs, rhs):\n self.assertEqual(lhs['_index'], rhs['_index'])\n self.assertRecordsEqual(lhs['added'], rhs['added'])\n self.assertRecordsEqual(lhs['removed'], rhs['removed'])\n self.assertChangesEqual(lhs['changed'], rhs['changed'])\n\n def assertRecordsEqual(self, lhs, rhs):\n self.assertEqual(records.sort(lhs), records.sort(rhs))\n\n def assertChangesEqual(self, lhs, rhs):\n self.assertEqual(sorted(lhs, key=lambda r: r['key']),\n sorted(rhs, key=lambda r: r['key']))\n\n def tearDown(self):\n pass\n\n\n@contextmanager\ndef tmp_csv_files(*args):\n files = []\n for arg in args:\n t = tempfile.NamedTemporaryFile()\n save_as_csv(arg, t.name)\n files.append(t)\n\n yield [f.name for f in files]\n\n\ndef save_as_csv(records, filename):\n with open(filename, 'w') as ostream:\n header = sorted(records[0].keys())\n\n writer = csv.DictWriter(ostream, header)\n\n writer.writerow(dict(zip(header, header)))\n for record in records:\n writer.writerow(record)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_csvdiff.py","file_name":"test_csvdiff.py","file_ext":"py","file_size_in_byte":11778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"543839412","text":"#!/usr/bin/env python3\n\nimport networkx as nx\nfrom itertools import combinations\n\ntest1 = '########################\\n#...............b.C.D.f#\\n#.######################\\n#.....@.a.B.c.d.A.e.F.g#\\n########################'\ntest2 = '#################\\n#i.G..c...e..H.p#\\n########.########\\n#j.A..b...f..D.o#\\n########@########\\n#k.E..a...g..B.n#\\n########.########\\n#l.F..d...h..C.m#\\n#################'\ntest3 = '########################\\n#@..............ac.GI.b#\\n###d#e#f################\\n###A#B#C################\\n###g#h#i################\\n########################'\n\nENTRANCE = '@'\nPASSAGE = '.'\nWALL = '#'\n\ndef is_key(what):\n return what.islower()\n\ndef is_door(what):\n return what.isupper()\n\ndef phash(path):\n return tuple(path[-1:] + sorted(path[:-1]))\n\ndef ghash(graph):\n return tuple(sorted(graph.nodes()))\n\nclass Day18:\n def __init__(self, data):\n self.map = [[c for c in r] for r in data.split('\\n')]\n self.events = {}\n for r in range(len(self.map)):\n for c in range(len(self.map[r])):\n what = self.map[r][c]\n if is_key(what):\n self.events[what] = (r, c)\n elif is_door(what):\n self.events[what] = (r, c)\n\n def _entrances(self):\n entrances = []\n for r in range(len(self.map)):\n for c in range(len(self.map[r])):\n what = self.map[r][c]\n if what == ENTRANCE:\n entrances.append((r, c))\n return entrances\n\n def _neighbors(self, pos):\n opened = [pos]\n closed = []\n neighbors = {}\n step = 0\n while True:\n step += 1\n temp = opened\n opened = []\n for pos in temp:\n closed.append(pos)\n u = (pos[0] - 1, pos[1])\n d = (pos[0] + 1, pos[1])\n l = (pos[0], pos[1] - 1)\n r = (pos[0], pos[1] + 1)\n for direction in (u, d, l, r):\n if direction in opened:\n continue\n if direction in closed:\n continue\n what = self.map[direction[0]][direction[1]]\n if what in (PASSAGE, ENTRANCE):\n opened.append(direction)\n elif is_door(what) or is_key(what):\n neighbors[what] = step\n if not opened:\n return neighbors\n\n def _edges(self, start):\n edges = {}\n edges[ENTRANCE] = self._neighbors(start)\n\n opened = list(edges[ENTRANCE])\n closed = []\n while True:\n temp = opened\n opened = []\n for e in temp:\n closed.append(e)\n pos = self.events[e]\n edges[e] = self._neighbors(pos)\n for n in edges[e]:\n if n in opened:\n continue\n if n in closed:\n continue\n opened.append(n)\n if not opened:\n return edges\n\n def _graph(self, start):\n edges = self._edges(start)\n graph = nx.Graph()\n for e1 in edges:\n for e2 in edges[e1]:\n length = edges[e1][e2]\n graph.add_edge(e1, e2, length = length)\n return graph\n\n def collect(self, graphs):\n first = [ENTRANCE]\n lasts = {ghash(graph): ENTRANCE for graph in graphs}\n opened = {phash(first): (0, first, lasts)}\n all_keys = [e for e in self.events if is_key(e)]\n\n count = 0\n while True:\n print(f'{count}/{len(all_keys)}', end='\\r')\n count += 1\n\n temp = opened\n opened = {}\n for h in temp:\n length, path, lasts = temp[h]\n for next_key in all_keys:\n # if the key was collected\n if next_key in path:\n continue\n # which graph is the key in\n for graph in graphs:\n if next_key in graph:\n last = lasts[ghash(graph)]\n # path to new key\n shortest_path = nx.shortest_path(graph, last, next_key, weight = 'length')\n shortest_len = nx.shortest_path_length(graph, last, next_key, weight = 'length')\n need_keys = [str.lower(x) for x in shortest_path[1:-1]]\n # if reachable\n if set(need_keys).issubset(set(path)):\n new_path = path + [next_key]\n new_length = length + shortest_len\n new_hash = phash(new_path)\n new_lasts = lasts.copy()\n new_lasts[ghash(graph)] = next_key\n # save best path\n if new_hash in opened:\n if new_length < opened[new_hash][0]:\n opened[new_hash] = (new_length, new_path, new_lasts)\n else:\n opened[new_hash] = (new_length, new_path, new_lasts)\n # if all keys were collected\n if not opened:\n return temp[min(temp, key=lambda h: temp[h][0])][:2]\n\n def part1(self):\n entrances = self._entrances()\n graph = self._graph(entrances[0])\n length, path = self.collect([graph])\n print(path)\n return length\n\n def part2(self):\n entrances = self._entrances()\n r, c = entrances[0]\n self.map[r-1][c-1:c+2] = list('@#@')\n self.map[r ][c-1:c+2] = list('###')\n self.map[r+1][c-1:c+2] = list('@#@')\n\n entrances = self._entrances()\n graphs = [self._graph(entrance) for entrance in entrances]\n length, path = self.collect(graphs)\n print(path)\n return length\n\ndef main():\n data = open('day18.txt').read().strip()\n day18 = Day18(data)\n print(f'Part 1: {day18.part1()}')\n print(f'Part 2: {day18.part2()}')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2019/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":6309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"156190646","text":"def fibonacci(n): #function to print out the nth element in the fibonacci series#\n \"\"\"Return the nth element in the fibonacci series.\"\"\"\n lst = []\n lst.insert(0,0)\n lst.insert(1,1)\n for i in range(2,n+1):\n value = lst[i-1] + lst[i-2]\n lst.insert(i,value)\n \n print (\"The nth fibonacci number is: \" + str(lst[n]))\n\n\ndef lucas(n): #function to print out the nth element in the lucas numbers series#\n \"\"\"Return the nth element in the lucas sesies\"\"\"\n lst = []\n lst.insert(0,2)\n lst.insert(1,1)\n for i in range(2,n+1):\n value = lst[i-1] + lst[i-2]\n lst.insert(i,value)\n\n print (\"The nth lucas number is: \" + str(lst[n]))\n\ndef sum_series(n,first=0,second=1): #generalized function to return the nth in the series that starts with numbers specified in the first and second optional parameters# \n \"\"\"Return the nth element in the series whose first two elements are specified by the optional parameters\"\"\"\n lst = []\n lst.insert(0,first)\n lst.insert(1,second)\n for i in range(2,n+1):\n value = lst[i-1] + lst[i-2]\n lst.insert(i,value)\n\n print (\"The nth number in the series is: \" + str(lst[n]))\n\n\n\nif __name__ == \"__main__\":\n fibonacci(10) #calling the fibonacci function to return the 10th element in the series#\n\n lucas(10) #calling the lucas function to retrun the 10th element in the series#\n\n sum_series(10) #calling the generalized function, sum_series without the optional parameters to generate the fibonacci number#\n\n sum_series(10,2,1) #calling the generalized function, sum_series to generate the lucas number#\n\n sum_series(10,3,2) #calling the generalized function, sum_series to generate the nth number in a random series#\n","sub_path":"students/madhu/session02/series.py","file_name":"series.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633779998","text":"import datetime\nfrom pprint import pprint\nimport logging\nfrom kiteconnect import KiteTicker\n\nlogging.basicConfig(level=logging.DEBUG)\n\n\n# Initialise\nkws = KiteTicker(\"z830r462fkvxo1y4\", \"9CkdF8zaw34oAKw94e286028HGLij029\")\n\n\n\ndef on_ticks(ws, ticks):\n for x in ticks:\n print(x['last_price'])\n \n \n \n # Callback to receive ticks.\n #logging.debug(\"Ticks: {}\".format(ticks))\n #pprint(ticks)\n #print(\"\\n\")\n\ndef on_connect(ws, response):\n # Callback on successful connect.\n # Subscribe to a list of instrument_tokens (RELIANCE and ACC here).\n ws.subscribe([884737])\n\n # Set RELIANCE to tick in `full` mode.\n ws.set_mode(ws.MODE_LTP, [884737])\n\ndef on_close(ws, code, reason):\n # On connection close stop the event loop.\n # Reconnection will not happen after executing `ws.stop()`\n ws.stop()\n\n# Assign the callbacks.\nkws.on_ticks = on_ticks\nkws.on_connect = on_connect\nkws.on_close = on_close\n\n# Infinite loop on the main thread. Nothing after this will run.\n# You have to use the pre-defined callbacks to manage subscriptions.\nkws.connect()\n\n\n\n\n#[{'instrument_token': 884737,\n # 'last_price': 125.8,\n #'mode': 'ltp',\n #'tradable': True}]\n\n\n\n","sub_path":"Zerodha-20200304T123230Z-001/Zerodha/GettingLiveData.py","file_name":"GettingLiveData.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"528750161","text":"#https://leetcode.com/problems/combination-sum/\n\nclass Solution:\n\tresult = []\n\n\tdef combinationSumCal(self, candidate, candidate_index, target, current):\n\t\tif target == 0:\n\t\t\tself.result.append(current)\n\t\telif target < 0:\n\t\t\treturn\n\n\t\tfor cand in candidate[candidate_index:]:\n\t\t\tcurrent.append(cand)\n\t\t\tself.combinationSumCal(candidate, candidate_index, target-cand, current.copy())\n\t\t\tcurrent.pop()\n\t\t\tcandidate_index += 1\n\n\n\tdef combinationSum(self, candidate, target):\n\t\tself.result = []\n\t\tself.combinationSumCal(candidate, 0, target, [])\n\t\treturn self.result\n\n\nif __name__ == '__main__':\n\tcandidate = [2,3,5]\n\ttarget = 8\n\n\ts = Solution()\n\tprint(s.combinationSum(candidate, target))\n","sub_path":"problems/combination_sum.py","file_name":"combination_sum.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316186938","text":"import os\nimport sys\nimport shutil\nimport librosa\nimport mir_eval\nimport numpy as np\nfrom sklearn import metrics\n\nfrom utils.utilities import (read_lst, read_config, write_lst, mkdir, int16_to_float32)\nfrom conf.feature import *\n\n\ndef load_audio_pair(est_path, ref_path, sample_rate=SAMPLE_RATE):\n\tmax_len = -1\n\tests = []\n\tfor path in est_path:\n\t\test, _ = librosa.load(path, sr=sample_rate, mono=True)\n\t\tests.append(est)\n\t\tif est.shape[-1] > max_len:\n\t\t\tmax_len = est.shape[-1]\n\n\trefs = []\n\tfor path in ref_path:\n\t\tref, _ = librosa.load(path, sr=sample_rate, mono=True)\n\t\trefs.append(ref)\n\n\tref = np.zeros([len(refs), max_len])\n\tfor i in range(len(refs)):\n\t\tref[i, : refs[i].shape[-1]] = refs[i]\n\n\test = np.zeros([len(refs), max_len])\n\tfor i in range(len(refs)):\n\t\test[i, : ests[i].shape[-1]] = ests[i]\n\treturn est, ref\n\ndef frame_roll_from_path(path, max_frame=-1, frames_per_second=100, notes_num=88):\n\tsegments = read_lst(path)\n\tsegments = [seg.rstrip().split('\\t') for seg in segments]\t\n\tif max_frame == -1:\n\t\tmax_frame = int(float(segments[-1][1]) * frames_per_second + 1)\n\tframe_roll = np.zeros([max_frame, notes_num + 1])\n\tframe_roll[:, notes_num] = 1\n\tfor seg in segments:\n\t\tst = int(float(seg[0]) * frames_per_second)\n\t\ted = int(float(seg[1]) * frames_per_second + 1)\n\t\tif st >= max_frame:\n\t\t\tbreak\n\t\tif ed > max_frame:\n\t\t\ted = max_frame\n\t\tframe_roll[st : ed, int(float(seg[2]))] = 1\n\t\tframe_roll[st : ed, notes_num] = 0\n\t\tif ed == max_frame:\n\t\t\tbreak\n\treturn frame_roll, max_frame\n\t\n\ndef measure_for_transcription(est_path, ref_path, mode='frame'):\n\tif mode == \"onset\":\n\t\test_intervals, est_pitches = mir_eval.io.load_valued_intervals(est_path)\n\t\tref_intervals, ref_pitches = mir_eval.io.load_valued_intervals(ref_path)\n\t\tprecision, recall, f_measure, _ = mir_eval.transcription.precision_recall_f1_overlap(\n\t\t\tref_intervals, ref_pitches, est_intervals, est_pitches)\n\telse:\n\t\tref_frame_roll, max_frame = frame_roll_from_path(ref_path)\n\t\test_frame_roll, _ = frame_roll_from_path(est_path, max_frame)\n\t\tpre = metrics.average_precision_score(ref_frame_roll, est_frame_roll, average='micro')\n\t\tprecision = recall = f_measure = pre\n\n\treturn precision, recall, f_measure\n\n\ndef measure_for_separation(est_path, ref_path, sample_rate=SAMPLE_RATE):\n\n\tif type(est_path) is str:\n\t\test, ref = load_audio_pair(est_path, ref_path, sample_rate)\t\n\telse:\n\t\test = est_path\n\t\tref = ref_path\n\t(sdr, sir, sar, perm) = mir_eval.separation.bss_eval_sources(ref, est, compute_permutation=True)\n\n\treturn sdr, sir, sar\n\n\ndef evaluate_separation(samples):\n\tscore = {}\n\tsample_scores = []\n\n\tfor sample in samples:\n\t\tfor instr in sample:\n\t\t\tif instr not in score:\n\t\t\t\tscore[instr] = []\n\n\tfor sample in samples:\n\t\tsample_score = {}\n\t\tfor instr in sample:\n\t\t\tpairs = sample[instr]['separation']\n\t\t\tfor pair in pairs:\n\t\t\t\test, _ = librosa.load(pair[0], sr=SAMPLE_RATE, mono=True)\n\t\t\t\tref, _ = librosa.load(pair[1], sr=SAMPLE_RATE, mono=True)\n\t\t\t\tsdr, sir, sar = measure_for_separation(est, ref)\n\t\t\t\tscore[instr].append(sdr)\n\t\t\t\tsample_score[instr] = sdr[0]\n\t\tsample_scores.append(sample_score)\n\n\treturn sample_scores\n\n\ndef evaluate_transcription(samples):\n\tscore = {}\n\tfor sample in samples:\n\t\tfor instr in sample:\n\t\t\tif instr not in score:\n\t\t\t\tscore[instr] = []\n\tres = []\n\tsample_scores = []\n\tfor sample in samples:\n\t\tsample_score = {}\n\t\tfor instr in sample:\n\t\t\tpairs = sample[instr]['transcription']\n\t\t\tfor pair in pairs:\n\t\t\t\tf1, pre, recall = measure_for_transcription(pair[0], pair[1])\n\t\t\t\tres.append([instr, f1, pre, recall])\n\t\t\t\tscore[instr].append(f1)\n\t\t\t\tsample_score[instr] = f1\n\n\t\tsample_scores.append(sample_score)\t\t\n\treturn sample_scores\n\n\nif __name__=='__main__':\n\test_path = ['evaluation/separation/test/AuSep_1_vn_27_King_est_1_.wav', 'evaluation/separation/test/AuSep_2_fl_30_Fugue_est_1_.wav']\n\tref_path = ['evaluation/separation/test/AuSep_1_vn_27_King_ref_1_.wav', 'evaluation/separation/test/AuSep_2_fl_30_Fugue_ref_1_.wav']\n\tsdr, sir, sar = separation_evaluation(est_path, ref_path)\n\tprint(sdr)\n\n","sub_path":"src/inference/compute_measure.py","file_name":"compute_measure.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"110060573","text":"urls_dict = \\\n { 'covid' : {\n 'url': 'https://moz.gov.ua/article/news/operativna-'+\n 'informacija-pro-poshirennja-koronavirusnoi-infekcii-2019-cov19',\n 'parser': 'CovidHTMLParser'\n },\n 'currency': {\n 'url': 'https://minfin.com.ua/ua/currency/',\n 'parser': 'CurrencyHTMLParser'\n },\n 'inflation': {\n 'url': 'https://index.minfin.com.ua/ua/economy/index/inflation/',\n 'parser': 'InflationHTMLParser'\n }\n }","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415997325","text":"from __future__ import print_function\nfrom random import random\nimport sys\nsys.path.append('..')\nfrom dorna2 import dorna\n\n\ndef main(ip, port):\n robot = dorna()\n robot.connect(ip, port)\n\n # go home\n print(\"go toward home\")\n arg = {\"rel\": 0, \"id\": robot.rand_id(), \"j0\": 1, \"j1\": 1, \"j2\": 0, \"j3\": 0, \"j4\": 0}\n robot.jmove(**arg)\n\n # come 200 mm back toward x\n print(\"go back in x direction\")\n arg = {\"rel\": 1, \"id\": robot.rand_id(), \"x\": -200, \"vel\": 1000, \"accel\": 20000, \"jerk\": 40000}\n robot.lmove(**arg)\n robot.wait(id=arg[\"id\"])\n\n # random points\n i = 0\n while True:\n arg = {\"rel\": 0, \"id\": i + 1, \"x\": 300 + 1 * random(), \"y\": -100 + 1 * random(), \"z\": 206.404 - 100 + 1 * random()}\n print(\"command\", i, \" arg: \", arg)\n robot.lmove(**arg)\n robot.wait(id=arg[\"id\"])\n i += 1\n robot.ws.close()\n\nif __name__ == '__main__':\n main(\"127.0.0.1\", 443)\n","sub_path":"example/random_line.py","file_name":"random_line.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"634696677","text":"#-------------------------------------------------#\n# Title: Working with Dictionaries\n# Dev: GKim\n# Date: February 6, 2019\n# ChangeLog: (Who, When, What)\n# RRoot, 11/02/2016, Created starting template\n# GKim, 2-06-19, started text file read back to python dictionary\n# GKim, 2-07-19, completed working script for Assignment 5\n# GKim, 2-08-19, I added other options to give to the user in the form of yes or no\n# and to account for choices other than yes or no or 0-4.\n# GKim, 2-9-19, edited code for comments and to account for user error, added a conditional\n# if the user misspells has numbers, spacing issues when they are trying to remove a task\n# GKim, 2-10-19, found a potential user error in choice \"2\". When adding a task i gave the user\n# an option to make another choice of going to back to menu, but didn't account for \"no\" and if the user \n# wanted to save data from option and leave. Found section of global variable, because a \"break\" does not \n# work in a function that is in a loop. \n# GKim, 2-10-19, choice \"3\" was not working properly and had to change range(len(lstTable))to range(len(lstTable)-1, -1, -1)\n# This made sure I iterated through my list properly \n#-------------------------------------------------#\n\n# This is to slow print statements \nimport time\n# Global varibale which to be accessable in the program\nglobal active\n# Set it to True, as a conditon to keep script active (running)\nactive = True\n# Empty list to write our text file into\nlstTable = []\nchoice = None\n\n# Load data from a text file\ntext_file = open(\"C:\\_PythonClass\\Module05\\ToDo1.txt\",\"r\")\nfor line in text_file.readlines():# to read through each line in text file.\n row = line.split(',')\n newDict = {\"Task\":row[0],\"Priority\":row[1].replace(\"\\n\",\"\")} # .replace(\"\\n\",\"\") gets rid of that character\n lstTable.append(newDict) # adds each dictionary to lstTable\ntext_file.close()\n\n# ------------------------------------------------------------------------ #\n# Used functions to make my starting script less bulky\ndef title():\n title = [\"This is the...\", \"PY\", \"DO\",\"LIST!!!\", \"(Also known as the To Do List)\"]\n for word in title:\n print(\"\\t\\t\\t\", word,\"\\n\")\n time.sleep(1.25)\n\n# Created function to show data in lstTable\ndef show_table():\n print(\"\\nUp-to-date Data in table:\\n\")\n for dict in lstTable:\n print(dict)\n print() \n# Shows the data in raw form to user\ndef show_data():\n print()\n print(\"#\" * 7, \"The current items TO DO are\", \"#\" * 7)\n for dict in lstTable:\n print(\"{}({})\".format(dict[\"Task\"],dict[\"Priority\"]))\n time.sleep(.25)\n print(\"#\" * 43)\n print()\n# Shows menu\ndef menu():\n print (\"\"\"\n PYDO Menu of Options\n 0) Exit Program\n 1) Show To Do List\n 2) Add a New Task.\n 3) Remove an existing Task.\n 4) Save Data to File\n \"\"\")\n time.sleep(.75)\n# Save data function with exit out of script\ndef save_data():\n global active \n save = input(\"\\nWould you like to save data to a text file (y/n)?\")\n if save.lower() == \"y\" or save.lower() == \"yes\":\n text_file = open(\"C:\\_PythonClass\\Module05\\ToDo1.txt\",\"w\")\n # Iterates through the list\n for dict in lstTable:\n #.get(), gets the value of the key .join (',') joins the 2 values with a comma\n text_file.write(\",\".join([dict.get('Task'),dict.get('Priority')+ \"\\n\"]))\n text_file.close()\n show_data()\n print(\"\\nYour data has been saved!\")\n time.sleep(1)\n active = False\n # Gave the user an out if they selected the wrong choice\n elif save.lower() == \"n\" or save.lower() == \"no\":\n active = False\n else:\n print(\"Not a proper entry, exiting PyDo List\")\n active = False\n \n\n# ------------------------------------------------------------------------------ #\n# Prints title at start of script \ntitle()\n# Start of Script\n# set choice = to None to have a blank starting point for the loop\nwhile choice == None:\n# Display a menu of choices to the user\n menu()\n choice = input(\"\\nWhich option would you like to perform? [0 to 4] - \")\n # Exits script\n if choice == \"0\":\n print(\"\\nYour are leaving the PYDO LIST APP!\")\n time.sleep(1)\n break\n \n # Show the current data in the table\n elif choice == \"1\":\n show_data()\n response = input(\"Would you like to make another choice (y/n)? \")\n # A quick exit if the data is all the user wanted to see\n if response.lower() == \"n\" or response.lower() == \"no\":\n break\n else:\n choice = None\n \n # Add a new Task to the list/Table\n elif choice == \"2\":\n task1 = input(\"What is the Task? \")\n priority1 = input(\"What is the Priority (low|high)? \")\n tpDict = {\"Task\":task1, \"Priority\":priority1}\n lstTable.append(tpDict)\n show_data()\n show_table()\n response1 = input(\"Would you like to make another choice (y/n) \")\n # I gave user an option to enter choice or exit out of script\n if response1.lower() == \"n\" or response1.lower() == \"no\":\n # function of save data \n save_data()\n # If active is False it will break out of loop\n if active == False:\n break\n elif response1.lower() == \"y\" or response1.lower() == \"yes\":\n choice = None\n # This line was to account for anything other than yes or no and leads the user back to the main menu\n else:\n print(\"Not a proper response! You are being redirected to the main menu\")\n time.sleep(1) \n choice = None \n # Remove a Task from the list/Table\n elif choice == \"3\":\n show_data()\n delete = input(\"What task would you like to remove? \")\n # ran a for loop to iterate each dictionary in the list. \n for dict in range(len(lstTable)-1, -1,-1):\n # .get(\"Task\") checks each Task value to delete(user input to remove Task)\n if lstTable[dict].get('Task',None) == delete:\n del lstTable[dict]\n show_data()\n show_table()\n print(\"The Task of {}, has been removed\".format(delete))\n time.sleep(1)\n # Send user right back to main menu\n break\n # Notifies user of error and takes them back to main menu\n else:\n print(\"\\nPlease use proper caps, spacing and no numbers Else item not found\")\n time.sleep(1)\n \n # Save tasks to the ToDo.txt file\n elif choice == \"4\":\n save = input(\"\\nWould you like to save data to a text file (y/n)?\")\n if save.lower() == \"y\" or save.lower() == \"yes\":\n text_file = open(\"C:\\_PythonClass\\Module05\\ToDo1.txt\",\"w\")\n # Iterates through the list\n for dict in lstTable:\n #.get(), gets the value of the key .join (',') joins the 2 values with a comma\n text_file.write(\",\".join([dict.get('Task'),dict.get('Priority')+ \"\\n\"]))\n text_file.close()\n show_data()\n print(\"\\nYour data has been saved!\")\n time.sleep(1)\n # Gave the user an out if they selected the wrong choice\n elif save.lower() == \"n\" or save.lower() == \"no\":\n backTo = input(\"\\nWould you like to go back to the main menu? \")\n # Added more escape clauses for user error\n if backTo.lower() == \"n\" or backTo.lower() == \"no\":\n break\n elif backTo.lower() == \"y\" or backTo.lower() == \"yes\":\n choice = None\n else:\n print(\"That is not a proper response. You are being redirected to the Main Menu!\")\n time.sleep(1)\n choice = None \n choice = None\n # For any number other than 0-4 or even any letter will be displayed this message and divert back to main menu\n else:\n print(\"\\nTHAT IS NOT A PROPER CHOICE!!. Please choose 0-4: \")\n time.sleep(1.5)\n choice = None\n \n# Resetting active to keep script active(running). Active is out of the loop and does not reset \nactive = True \n# End of script\nprint(\"\\nThank you for Stopping By!\")\ntime.sleep(.75)\ninput(\"\\nPress any key to Exit\") \ntime.sleep(.75)","sub_path":"Module05/ToDo1.py","file_name":"ToDo1.py","file_ext":"py","file_size_in_byte":8319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"633845590","text":"from zc.buildout.buildout import Buildout\nfrom z3c.checkversions import base\n\n\nclass Checker(base.Checker):\n \"\"\"checker class for a buildout\n \"\"\"\n def get_versions(self):\n buildout = Buildout(self.filename, '')\n\n # set the index URL from the buildout if not already provided\n buildout_index = buildout['buildout'].get('index')\n if not self.__custom_url:\n self._set_index_url(buildout_index)\n\n print(u\"Checking buildout file %s\" % self.filename)\n return buildout['versions']\n\n\n","sub_path":"z3c.checkversions/tags/0.1/z3c/checkversions/buildout.py","file_name":"buildout.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"470416787","text":"from pynhl.event import Event\nfrom pynhl.player import Player\nfrom pynhl.shift import Shift\nimport pynhl.helpers as helpers\nimport bisect, datetime\n\n\nclass Game:\n # Game will have Players who will have shifts and each shift can have event(s)\n\n # Edit for reading a JSON input or a CSV one\n def __init__(self, game_json, shift_json):\n # Basic game information provided by the API\n self.game_json = game_json\n self.shift_json = shift_json\n self.game_id = self.game_json['gameData']['game']['pk']\n self.game_season = self.game_json['gameData']['game']['season']\n self.away_team = self.game_json['gameData']['teams']['away']['triCode']\n self.home_team = self.game_json['gameData']['teams']['home']['triCode']\n self.away_goalie = set()\n self.home_goalie = set()\n # Home - Away normalized\n self.final_score = f\"{self.game_json['liveData']['plays']['allPlays'][-1]['about']['goals']['home']}-\" \\\n f\"{self.game_json['liveData']['plays']['allPlays'][-1]['about']['goals']['away']}\"\n\n self.players = self.assign_shifts_to_players()\n self.events_in_game = self.retrieve_events_in_game()\n\n # remove unnecessary data in memory before prolonged processing\n self.cleanup()\n\n # Extra functionality that doesn't require game/shift json\n self.add_strength_players_to_event()\n\n # Determine how much time each player played with every other player\n for p_i, player in enumerate(self.players):\n for other_player in {k: v for k, v in self.players.items() if k != player}:\n if self.players[other_player].team == self.players[player].team:\n # Check to see if the player has been added from this or a previous game\n if player in self.players[other_player].ice_time_with_players:\n # Check to see if this game has been done already\n if self.game_id in self.players[other_player].ice_time_with_players[player]:\n # Skip this player, they have already been calculated\n continue\n # Calculate the time between the two players in this function\n self.get_time_together_between_two_players(self.players[player], self.players[other_player])\n\n # Testing function\n for player in self.players:\n for o in self.players[player].ice_time_with_players:\n x = self.players[player].ice_time_with_players[o][self.game_id]\n s_x = [helpers.seconds_to_minutes(y) for y in x.values()]\n s_xx = helpers.seconds_to_minutes(sum([int(x) for x in x.values()]))\n a = 5\n\n def __str__(self):\n return f\"{self.home_team} vs.{self.away_team} Final Score:{self.final_score}\"\n\n def __repr__(self):\n return self.__str__()\n\n def cleanup(self):\n self.game_json = None\n self.shift_json = None\n\n def add_goalie(self, player_object):\n \"\"\"\n If a player is a goalie, adds it to home/away_goalie variable\n \"\"\"\n if player_object.team == self.home_team:\n self.home_goalie.add(player_object.name)\n else:\n self.away_goalie.add(player_object.name)\n return self\n\n def retrieve_players_in_game(self, active_players):\n \"\"\"\n Parse self.json_data for PLAYERS in the game\n Update self.players_in_game to be a list of [Player objects]\n \"\"\"\n players_dict = {}\n all_players = self.game_json[\"gameData\"][\"players\"]\n for player_id in all_players:\n temp = Player(all_players[player_id])\n if temp.name in active_players:\n players_dict[temp.name] = temp\n if 'G' in temp.position:\n self.add_goalie(temp)\n return players_dict\n\n def retrieve_shifts_from_game(self):\n \"\"\"\n Fetch shift information and generate a Shift object for each shift in the game\n \"\"\"\n shifts = []\n sorted_ins = bisect.insort\n for shift in self.shift_json['data']:\n temp_shift = Shift(self.game_id, self.home_team, shift)\n if temp_shift.duration != 0:\n # Maintains sorted order based off __eq__ inside shift object\n sorted_ins(shifts, temp_shift)\n return shifts\n\n def assign_shifts_to_players(self):\n \"\"\"\n Assigns shifts from each period in the game to the player object\n Shifts are separated by [GameID][Period] = [Shifts in the period, in that game]\n \"\"\"\n shifts = self.retrieve_shifts_from_game()\n players = self.retrieve_players_in_game(active_players=set([s.player for s in shifts]))\n for shift in shifts:\n if self.game_id not in players[shift.player].shifts:\n players[shift.player].shifts[self.game_id] = []\n players[shift.player].shifts[self.game_id].append(shift)\n return players\n\n def retrieve_events_in_game(self):\n \"\"\"\n Function to retrieve all events, and their necessary information to the class object\n \"\"\"\n # All events from the input JSON data\n events = self.game_json['liveData']['plays']['allPlays']\n events_in_game = []\n add_events = bisect.insort\n for curr_event in events:\n type_of_event = curr_event['result']['event']\n if type_of_event in helpers.TRACKED_EVENTS:\n temp_event = Event(curr_event, self.home_team, self.away_team)\n add_events(events_in_game, temp_event)\n return events_in_game\n\n def add_strength_players_to_event(self):\n \"\"\"\n Function to find the players who are on ice for each event in the game\n \"\"\"\n # TODO: When a penalty occurs, the play receiving the penalty should be included\n goalies = self.home_goalie.union(self.away_goalie)\n for i, event_to_parse in enumerate(self.events_in_game):\n for player in self.players:\n if player not in goalies:\n event_to_parse.get_players_for_event(self.players[player].shifts[self.game_id])\n # Based off players on the ice, determine the strength (5v5, 6v5 etc)\n event_to_parse.determine_event_state(self.home_team, self.away_team)\n event_to_parse.calculate_time_since_shot(self.events_in_game[:i])\n return self\n\n def get_period_range_in_events_list(self, period_to_find):\n temp = {}\n if period_to_find not in temp:\n per_index = bisect.bisect_left(self.events_in_game, period_to_find)\n per_end = bisect.bisect_left(self.events_in_game, period_to_find + 1)\n return (per_index, per_end)\n\n def get_time_together_between_two_players(self, player, other):\n \"\"\"\n For each player in the game, find their teammates & opposition for\n every second they are on the ice in that game\n \"\"\"\n for p_shift in player.shifts[self.game_id]:\n i = bisect.bisect_left(other.shifts[self.game_id], p_shift)\n if i == len(other.shifts[self.game_id]):\n i -= 1\n closest_shift = other.shifts[self.game_id][i]\n period_ranges = {}\n if helpers.do_shifts_overlap(p_shift, closest_shift):\n time_shared, time_lb, time_ub = helpers.get_time_shared(p_shift, closest_shift)\n if time_shared > 0:\n if p_shift.period not in period_ranges:\n period_ranges[p_shift.period] = self.get_period_range_in_events_list(p_shift.period)\n values, swapped = self.separate_time_shared_by_strengths(time_lb, time_ub, time_shared,\n period_ranges[p_shift.period][0],\n period_ranges[p_shift.period][1])\n # TODO: Think of better location for this function, ugly\n if player.team == self.away_team:\n player.add_shared_toi(self.game_id, other.name, swapped)\n else:\n player.add_shared_toi(self.game_id, other.name, values)\n if other.team == self.away_team:\n other.add_shared_toi(self.game_id, player.name, swapped)\n else:\n other.add_shared_toi(self.game_id, player.name, values)\n\n def separate_time_shared_by_strengths(self, lb, ub, time_shared, low_i, high_i):\n \"\"\"\n Splits the time shared between two players on a shift (time_shared) by the intervals found in\n self.strength_intervals\n\n lb / ub refer to the interval shared by the two players, a time between 00:00 and 19:59\n low_i/high_i refer to the start and end index for a given period in the game\n \"\"\"\n\n start = bisect.bisect_left(self.events_in_game, lb, low_i, high_i)\n if start > 0:\n start -= 1\n end = bisect.bisect_left(self.events_in_game, ub, low_i, high_i)\n if end < len(self.events_in_game):\n end += 1\n #\n prev_strength = self.events_in_game[start].strength\n pen_start = None\n strengths_during_shift = {prev_strength: 0} # strength:seconds\n '''\n Need to accurately account for penalty time\n Penalties last TWO to FOUR minutes\n '''\n for event in self.events_in_game[start:end]:\n # Break immediately, don't care about any events anymore\n if event.time > ub:\n break\n # Add strength to dict\n if \"Penalty\" in event.type_of_event:\n pen_start = event.time\n pen_end = helpers.add_minutes_to_time(pen_start, (event.penalty_duration // 60))\n if event.strength not in strengths_during_shift:\n strengths_during_shift[event.strength] = 0\n if event.time < lb:\n # Gets the strength just before the beginning of the shift\n prev_strength = event.strength\n # All the work done in here\n elif event.time < ub:\n diff = helpers.subtract_two_time_objects(lb, event.time)\n # Set the new lower bound\n lb = event.time\n # Add the difference and on to the next\n '''\n Handle the case when their shifts and a penalty was on during the ice\n 10:34 -> 12:34 , shift ends a little bit after that\n If there are no events after 12:34, what to do?\n Get remaining total time (already done that)\n Take that number, subtract it from pen_end and lb\n That becomes the PP numbers\n Remaining time is the remaining time\n '''\n strengths_during_shift[event.strength] += diff\n a = 5\n\n # Generate the remaining time here\n strengths_during_shift[prev_strength] += time_shared - sum(strengths_during_shift.values())\n # Returns the home v away and away v home versions for the case of teammate / teammate and teammate / opposition\n return {k: v for k, v in strengths_during_shift.items() if v != 0}, {f\"{k[2]}{k[1]}{k[0]}\": v for k, v in\n strengths_during_shift.items()}\n","sub_path":"pynhl/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":11560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"461356661","text":"import asyncio\nfrom prometheus_client import CollectorRegistry, Gauge, push_to_gateway\nfrom subprocess import Popen, PIPE\nimport re\nimport logging\n\n\nclass Worker:\n logging.basicConfig(filename='log.log', level=logging.DEBUG, format='%(asctime)s -%(levelname)s -%(message)s')\n registry = CollectorRegistry()\n \"\"\"Основной луп\"\"\"\n loop = asyncio.get_event_loop()\n THREAD_COUNT = 0\n host_config = [' ']\n task_add = []\n task_remove = [' ']\n\n \"\"\" \n объявление метрик типа Guage для Prometheus\n включает в себя набор лейблов \n \"\"\"\n metric_delay = Gauge('ping_pkt_latency_avg', 'h', ['ip', 'group', 'name'], registry=registry)\n metric_loss = Gauge('ping_pkt_lost', 'l', ['ip', 'group', 'name'], registry=registry)\n metric_received_pkt = Gauge('ping_match_received', 'packet received', ['ip', 'group', 'name', 'transmitted'], registry=registry)\n metric_delay_min = Gauge('ping_pkt_latency_min', 'l', ['ip', 'group', 'name'], registry=registry)\n metric_delay_max = Gauge('ping_pkt_latency_max', 'l', ['ip', 'group', 'name'], registry=registry)\n\n def __init__(self, gateway_host, job, max_thread, png_param, delay_ping, delay_collect, delay_parse):\n \"\"\"\n Инициализация переменных при создании объекта\n :param gateway_host: - адрес сервера прометеуса \n :param job: - job метрики\n :param max_thread: - задает максимальное кол-во потоков\n :param png_param: - параметры пинга\n :param delay_ping: - время задержки задачи пинга\n :param delay_parse: - время задержки задачи парса файла с хостам\n \"\"\"\n self.gateway_host = gateway_host\n self.job = job\n self.SEMAPHORE = asyncio.Semaphore(int(max_thread))\n\n self.DELAY_COLLECT = int(delay_collect)\n\n self.png_normal = png_param[0]\n self.png_fast = png_param[1]\n self.png_large = png_param[2]\n self.png_killing = png_param[3]\n\n self.delay_ping = int(delay_ping)\n self.delay_parse = int(delay_parse)\n\n\n def load_config(self):\n \"\"\"\n Парсит файл с задачами распределяя новые или удаленные задачи \n в соответствующие массивы self.task_remove\\self.task_add\n !!!!большой костыль в жопе, надо оптимизировать!!!!\n :return: - ничего не возвращает\n \"\"\"\n self.task_remove = [' ']\n self.task_add = []\n data = []\n with open('tsk.txt', 'r') as file:\n for row in file.readlines():\n if len(row) > 2:\n data.append(row.strip().split())\n\n for old_task in self.host_config:\n must_be_remove = True\n for update_task in data:\n if old_task == update_task:\n must_be_remove = False\n if must_be_remove:\n self.task_remove.append(old_task)\n self.host_config.remove(old_task)\n\n for new_task in data:\n must_be_add = True\n for old in self.host_config:\n if old == new_task:\n must_be_add = False\n if must_be_add:\n self.task_add.append(new_task)\n self.host_config.append(new_task)\n print('LOAD_CONFIG:%s' % self.host_config)\n logging.info(u'LOAD_CONFIG:%s' % self.host_config)\n\n @asyncio.coroutine\n def start_config(self):\n \"\"\"\n Стартовая функция в event_loop. \n Через заданный промежуток времени запускает функцию парчинга файла задач load_config()\n Добавляет задачи в основной луп с помощью массива новых задач task_add[]\n :return: - ничего не возвращает\n \"\"\"\n while True:\n self.load_config()\n for config in self.task_add:\n try:\n self.loop.create_task(self.ping_host(config[0], config[1], config[2], config[3]))\n except:\n \"\"\" При возникновении несостыковки строки конфигурации шлет нас подальше,\n не добавляя новую задачу\n \"\"\"\n print('Wrong config line %s'%config)\n logging.error(u'Wrong config line %s'%config)\n yield from asyncio.sleep(self.delay_parse)\n\n @asyncio.coroutine\n def ping_host(self, ip, parameters, group, name):\n \"\"\"\n Г��оздь программы. \n :param ip: - айпи хоста для пинга\n :param parameters: - параметры пинга\n :param group: - определяет группу для лейбла метрики\n :param name: - определяет имя для лейбла метрики\n :return: - ничего не возвращает\n \n Выполняет поставленную задачу пинга, если пул потоков свободен, \n в противном случае отдает управление , засыпает на 0 сек.\n \n При выполнения пинга, внезависимости от результата засыпает на заданное время.\n \n Если обнаруживает себя в массиве task_remove - удаляет себя из стека задач.\n \"\"\"\n\n if parameters == 'large':\n parameters = self.png_large\n elif parameters == 'fast':\n parameters = self.png_fast\n elif parameters == 'killing':\n parameters = self.png_killing\n else:\n parameters = self.png_normal\n\n while True:\n data = dict(\n pkt_transmitted=pkt_out[0],\n pkt_received=pkt_out[1],\n pkt_loss=pkt_out[2],\n rtt_min=ltn_out[0],\n rtt_avg=ltn_out[1],\n rtt_max=ltn_out[2], )\n\n flag = False\n for line in self.task_remove:\n if ip == line[0] and parameters == line[1] and group == line[2]:\n flag = True\n\n if not flag:\n for i in range(self.delay_ping):\n with(yield from self.SEMAPHORE):\n print('start ping ip %s' % ip)\n \"\"\"Если пул свободен, начинаем выполнение и отдаем управление в главный луп\"\"\"\n #logging.info(u'start ping ip %s' % ip)\n line = ('sudo ping' + ' ' + ip + ' ' + parameters)\n cmd = Popen(line.split(' '), stdout=PIPE)\n yield from asyncio.sleep(0)\n\n output = cmd.communicate()[0]\n output = str(output)\n try:\n packet_regex = re.compile(r'(\\d+) packets transmitted, (\\d+) (?:packets )?received, (\\d+\\.?\\d*)% packet loss')\n latency_regex = re.compile(r'(\\d+.\\d+)/(\\d+.\\d+)/(\\d+.\\d+)/(\\d+.\\d+)')\n packet_match = packet_regex.search(output)\n latency_match = latency_regex.search(output)\n\n pkt_out = packet_match.groups()\n ltn_out = latency_match.groups()\n data_new = dict(\n pkt_transmitted=pkt_out[0],\n pkt_received=pkt_out[1],\n pkt_loss=pkt_out[2],\n rtt_min=ltn_out[0],\n rtt_avg=ltn_out[1],\n rtt_max=ltn_out[2], )\n data = self.inc_dict(data, data_new)\n except:\n print('Error parse ping response')\n\n data = self.middle_result(self.delay_ping)\n\n if data:\n self.metric_delay_max.labels(ip, group, name).set(data['rtt_max'])\n self.metric_delay_min.labels(ip, group, name).set(data['rtt_min'])\n self.metric_delay.labels(ip, group, name).set(data['rtt_avg'])\n self.metric_loss.labels(ip, group, name).set(data['pkt_loss'])\n\n self.metric_received_pkt.labels(ip, group, name, data['pkt_transmitted']).set(data['pkt_received'])\n try:\n push_to_gateway('graph.arhat.ua:9091', job='ping', registry=self.registry)\n print('Push %s done' % ip)\n except:\n print('Error connect to push gate')\n logging.error(u'Error connect to pushgate')\n else:\n self.metric_delay.labels(ip, group, name).set(0)\n self.metric_loss.labels(ip, group, name).set(0)\n self.metric_delay_max.labels(ip, group, name).set(0)\n self.metric_delay_min.labels(ip, group, name).set(0)\n self.metric_received_pkt.labels(ip, group, name, 0).set(0)\n\n print('some trable with ping')\n logging.warning(u'Some trable with ping %s' % ip)\n \"\"\"Когда задача выполнена засыпаем на заданное время и отдаем управление в главный луп\"\"\"\n yield from asyncio.sleep(int(self.DELAY_COLLECT))\n else:\n self.metric_delay.labels(ip, group, name).set(0)\n self.metric_loss.labels(ip, group, name).set(0)\n self.metric_delay_max.labels(ip, group, name).set(0)\n self.metric_delay_min.labels(ip, group, name).set(0)\n self.metric_received_pkt.labels(ip, group, name, 0).set(0)\n\n task = asyncio.Task.current_task()\n task.cancel()\n print('Cancel %s' % ip)\n logging.warning(u'Cancel %s' % ip)\n yield from asyncio.sleep(0)\n\n def inc_dict(self, data, data_new):\n for param in data:\n data[param] += data_new[param]\n return data\n\n def middle_result(self, data, num_range):\n for param in data:\n data[param] = data[param]/num_range\n return data","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":10892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"204401524","text":"#-*-coding:utf-8-*-\n'''\nDpCas-Light\n|||| |||| |||| || ||||\n|| || || || || || |||| || ||\n|| || || || || || || || ||\n|| || || || || ||====|| ||||\n|| || |||| || || ||======|| ||\n|| || || || || || || || ||\n|||| || |||| || || ||||\n\n/------------------ HandPose_X ------------------/\n'''\n# date:2021-03-09\n# Author: Eric.Lee\n# function: pipline\n\nimport cv2\nimport numpy as np\nfrom hand_keypoints.handpose_x import handpose_x_model,draw_bd_handpose_c\nimport math\nfrom cores.tracking_utils import tracking_bbox\nfrom cores.hand_pnp import get_hand_pose\nimport numpy as np\n'''\n 求解二维向量的角度\n'''\ndef vector_2d_angle(v1,v2):\n v1_x=v1[0]\n v1_y=v1[1]\n v2_x=v2[0]\n v2_y=v2[1]\n try:\n angle_=math.degrees(math.acos((v1_x*v2_x+v1_y*v2_y)/(((v1_x**2+v1_y**2)**0.5)*((v2_x**2+v2_y**2)**0.5))))\n except:\n angle_ =65535.\n if angle_ > 180.:\n angle_ = 65535.\n return angle_\n'''\n 获取对应手相关向量的二维角度\n'''\ndef hand_angle(hand_,x=0,y=0):\n angle_list = []\n #---------------------------- thumb 大拇指角度\n angle_ = vector_2d_angle(\n ((int(hand_['0']['x']+x)- int(hand_['2']['x']+x)),(int(hand_['0']['y']+y)-int(hand_['2']['y']+y))),\n ((int(hand_['3']['x']+x)- int(hand_['4']['x']+x)),(int(hand_['3']['y']+y)- int(hand_['4']['y']+y)))\n )\n angle_list.append(angle_)\n #---------------------------- index 食指角度\n angle_ = vector_2d_angle(\n ((int(hand_['0']['x']+x)-int(hand_['6']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['6']['y']+y))),\n ((int(hand_['7']['x']+x)- int(hand_['8']['x']+x)),(int(hand_['7']['y']+y)- int(hand_['8']['y']+y)))\n )\n angle_list.append(angle_)\n #---------------------------- middle 中指角度\n angle_ = vector_2d_angle(\n ((int(hand_['0']['x']+x)- int(hand_['10']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['10']['y']+y))),\n ((int(hand_['11']['x']+x)- int(hand_['12']['x']+x)),(int(hand_['11']['y']+y)- int(hand_['12']['y']+y)))\n )\n angle_list.append(angle_)\n #---------------------------- ring 无名指角度\n angle_ = vector_2d_angle(\n ((int(hand_['0']['x']+x)- int(hand_['14']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['14']['y']+y))),\n ((int(hand_['15']['x']+x)- int(hand_['16']['x']+x)),(int(hand_['15']['y']+y)- int(hand_['16']['y']+y)))\n )\n angle_list.append(angle_)\n #---------------------------- pink 小拇指角度\n angle_ = vector_2d_angle(\n ((int(hand_['0']['x']+x)- int(hand_['18']['x']+x)),(int(hand_['0']['y']+y)- int(hand_['18']['y']+y))),\n ((int(hand_['19']['x']+x)- int(hand_['20']['x']+x)),(int(hand_['19']['y']+y)- int(hand_['20']['y']+y)))\n )\n angle_list.append(angle_)\n\n return angle_list\n'''\n # 二维约束的方法定义手势,由于受限于没有大量的静态手势数据集原因\n # fist five gun love one six three thumbup yeah\n # finger id: thumb index middle ring pink\n'''\ndef h_gesture(img,angle_list):\n thr_angle = 65.\n thr_angle_thumb = 53.\n thr_angle_s = 49.\n gesture_str = None\n if 65535. not in angle_list:\n if (angle_list[0]>thr_angle_thumb) and (angle_list[1]>thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):\n gesture_str = \"fist\"\n elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):\n gesture_str = \"gun\"\n elif (angle_list[0]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]5) and (angle_list[1]thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):\n gesture_str = \"one\"\n elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]thr_angle_thumb) and (angle_list[1]thr_angle):\n gesture_str = \"three\"\n elif (angle_list[0]thr_angle) and (angle_list[2]>thr_angle) and (angle_list[3]>thr_angle) and (angle_list[4]>thr_angle):\n gesture_str = \"thumbUp\"\n elif (angle_list[0]>thr_angle_thumb) and (angle_list[1]thr_angle) and (angle_list[4]>thr_angle):\n gesture_str = \"yeah\"\n\n return gesture_str\n\n#-------------------------------------\n'''\n 手部跟踪算法:采用边界框的IOU方式\n'''\ndef hand_tracking(data,hands_dict,track_index):\n if data is None:\n hands_dict = {}\n track_index = 0\n hands_dict,track_index = tracking_bbox(data,hands_dict,track_index) # 目标跟踪\n return hands_dict,track_index\n\n#-------------------------------------\n'''\n DpCas-Light\n /------------------ HandPose_X ------------------/\n 1) 手的21关键点回归检测\n 2) 食指和大拇指的捏和放开判断,即点击(click)判断\n'''\ndef handpose_track_keypoints21_pipeline(img,hands_dict,hands_click_dict,track_index,algo_img = None,handpose_model = None,gesture_model = None, icon=None,vis = False,dst_thr = 35,angle_thr = 16.):\n\n hands_list = []\n\n if algo_img is not None:\n\n for idx,id_ in enumerate(sorted(hands_dict.keys(), key=lambda x:x, reverse=False)):\n\n x_min,y_min,x_max,y_max,score,iou_,cnt_,ui_cnt = hands_dict[id_]\n\n cv2.putText(img, 'ID {}'.format(id_), (int(x_min+2),int(y_min+15)),cv2.FONT_HERSHEY_COMPLEX, 0.45, (255, 0, 0),5)\n cv2.putText(img, 'ID {}'.format(id_), (int(x_min+2),int(y_min+15)),cv2.FONT_HERSHEY_COMPLEX, 0.45, (173,255,73))\n\n\n # x_min,y_min,x_max,y_max,score = bbox\n w_ = max(abs(x_max-x_min),abs(y_max-y_min))\n if w_< 60:\n continue\n w_ = w_*1.26\n\n x_mid = (x_max+x_min)/2\n y_mid = (y_max+y_min)/2\n\n x1,y1,x2,y2 = int(x_mid-w_/2),int(y_mid-w_/2),int(x_mid+w_/2),int(y_mid+w_/2)\n\n x1 = np.clip(x1,0,img.shape[1]-1)\n x2 = np.clip(x2,0,img.shape[1]-1)\n\n y1 = np.clip(y1,0,img.shape[0]-1)\n y2 = np.clip(y2,0,img.shape[0]-1)\n\n bbox_ = x1,y1,x2,y2\n gesture_name = None\n pts_ = handpose_model.predict(algo_img[y1:y2,x1:x2,:])\n\n plam_list = []\n pts_hand = {}\n for ptk in range(int(pts_.shape[0]/2)):\n xh = (pts_[ptk*2+0]*float(x2-x1))\n yh = (pts_[ptk*2+1]*float(y2-y1))\n pts_hand[str(ptk)] = {\n \"x\":xh,\n \"y\":yh,\n }\n if ptk in [0,1,5,9,13,17]:\n plam_list.append((xh+x1,yh+y1))\n if ptk == 0: #手掌根部\n hand_root_ = int(xh+x1),int(yh+y1)\n if ptk == 4: # 大拇指\n thumb_ = int(xh+x1),int(yh+y1)\n if ptk == 8: # 食指\n index_ = int(xh+x1),int(yh+y1)\n if vis:\n if ptk == 0:# 绘制腕关节点\n cv2.circle(img, (int(xh+x1),int(yh+y1)), 9, (250,60,255),-1)\n cv2.circle(img, (int(xh+x1),int(yh+y1)), 5, (20,180,255),-1)\n cv2.circle(img, (int(xh+x1),int(yh+y1)), 4, (255,50,60),-1)\n cv2.circle(img, (int(xh+x1),int(yh+y1)), 3, (25,160,255),-1)\n\n # 计算食指和大拇指中心坐标\n choose_pt = (int((index_[0]+thumb_[0])/2),int((index_[1]+thumb_[1])/2))\n # 计算掌心\n plam_list = np.array(plam_list)\n plam_center = (np.mean(plam_list[:,0]),np.mean(plam_list[:,1]))\n\n # 绘制掌心坐标圆\n if vis:\n cv2.circle(img, (int(plam_center[0]),int(plam_center[1])), 12, (25,160,255),9)\n cv2.circle(img, (int(plam_center[0]),int(plam_center[1])), 12, (255,190,30),2)\n\n # 计算食指大拇指的距离\n dst = np.sqrt(np.square(thumb_[0]-index_[0]) +np.square(thumb_[1]-index_[1]))\n # 计算大拇指和手指相对��掌根部的角度:\n angle_ = vector_2d_angle((thumb_[0]-hand_root_[0],thumb_[1]-hand_root_[1]),(index_[0]-hand_root_[0],index_[1]-hand_root_[1]))\n # 判断手的点击click状态,即大拇指和食指是否捏合\n click_state = False\n if dst0) and ((y2_-y1_)>0):\n img_reco_crop = cv2.resize(algo_img[y1_:y2_,x1_:x2_,:], (130,130)) #待识别区域块\n print(\"------------------------>>> start object_recognize_model \")\n max_index,label_msg,score_ = object_recognize_model.predict(img_reco_crop)\n reco_msg = {\"index\":max_index,\"label_msg\":label_msg,\"score\":score_}\n # print(\" audio_recognize function ->> reco_msg : \",reco_msg)\n info_dict[\"reco_msg\"] = reco_msg\n if img_reco_crop is not None: # 绘制识别区域在左下角\n h,w,_ = img.shape\n img[(h-131):(h-1),(w-131):(w-1),:] = img_reco_crop\n cv2.rectangle(img, (w-131,h-131), (w-1,h-1), (225,66,66), 5)\n #-----------------------------------------\n\n info_dict[\"double_en_pts\"] = True\n\n cv2.rectangle(img, (x1_,y1_), (x2_,y2_), (225,255,62), 5)\n cv2.rectangle(img, (x1_,y1_), (x2_,y2_), (100,180,255), 2)\n cv2.putText(img, ' recognize{}'.format(\"\"), (x1_,y1_),cv2.FONT_HERSHEY_COMPLEX, 0.65, (255, 0, 0),5)\n cv2.putText(img, ' recognize{}'.format(\"\"), (x1_,y1_),cv2.FONT_HERSHEY_COMPLEX, 0.65, (0,33,255),1)\n\n else:\n\n info_dict[\"double_en_pts\"] = False\n return img_reco_crop,reco_msg\n\n'''\n 判断各手的click状态是否稳定(点击稳定充电环),即click是否持续一定阈值\n 注意:charge_cycle_step 充电步长越大,触发时间越短\n'''\ndef judge_click_stabel(img,handpose_list,charge_cycle_step = 32):\n flag_click_stable = True\n for i in range(len(handpose_list)):\n _,_,_,dict_ = handpose_list[i]\n id_ = dict_[\"id\"]\n click_cnt_ = dict_[\"click_cnt\"]\n pt_ = dict_[\"choose_pt\"]\n if click_cnt_ > 0:\n # print(\"double_en_pts --->>> id : {}, click_cnt : <{}> , pt : {}\".format(id_,click_cnt_,pt_))\n # 绘制稳定充电环\n # 充电环时间控制\n charge_cycle_step = charge_cycle_step # 充电步长越大,触发时间越短\n fill_cnt = int(click_cnt_*charge_cycle_step)\n if fill_cnt < 360:\n cv2.ellipse(img,pt_,(16,16),0,0,fill_cnt,(255,255,0),2)\n else:\n cv2.ellipse(img,pt_,(16,16),0,0,fill_cnt,(0,150,255),4)\n # 充电环未充满,置为 False\n if fill_cnt<360:\n flag_click_stable = False\n else:\n flag_click_stable = False\n return flag_click_stable\n\n'''\n 绘制手click 状态时的大拇指和食指中心坐标点轨迹\n'''\ndef draw_click_lines(img,gesture_lines_dict,vis = False):\n # 绘制点击使能后的轨迹\n if vis :\n for id_ in gesture_lines_dict.keys():\n if len(gesture_lines_dict[id_][\"pts\"]) >=2:\n for i in range(len(gesture_lines_dict[id_][\"pts\"])-1):\n pt1 = gesture_lines_dict[id_][\"pts\"][i]\n pt2 = gesture_lines_dict[id_][\"pts\"][i+1]\n cv2.line(img,pt1,pt2,gesture_lines_dict[id_][\"line_color\"],2,cv2.LINE_AA)\n","sub_path":"lib/hand_lib/cores/handpose_fuction.py","file_name":"handpose_fuction.py","file_ext":"py","file_size_in_byte":15769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12785067","text":"import pickle\nimport h5py\nimport numpy as np\nfrom PIL import Image\nfrom util import tile_raster_images\n\nfrom TfRBM import RBM\n\n\ndef create_outputter(filename, batch_size, num_features):\n f = h5py.File(filename, 'w')\n dset = f.create_dataset('output',\n shape=(0, batch_size, num_features),\n chunks=(1, batch_size, num_features),\n maxshape=(None, batch_size, num_features),\n dtype='f')\n\n def write(batch):\n dset.resize(len(dset) + 1, axis=0)\n dset[len(dset) - 1, :] = batch\n\n return write\n\n\ndef pretrain_mnist(x_train, model_path):\n def batch_gen(X):\n indices = list(range(len(X)))\n while True:\n np.random.shuffle(indices)\n for i in indices:\n yield X[i]\n\n def hdf5_generator(filename):\n with h5py.File(filename, 'r') as f:\n dset = f['output']\n num_rows = len(dset)\n indices = list(range(num_rows))\n print('Number of rows {}'.format(num_rows))\n while True:\n np.random.shuffle(indices)\n for i in indices:\n data = dset[i]\n yield data\n\n def mnist_callback(epoch, n_w):\n image = Image.fromarray(\n tile_raster_images(\n X=n_w.T,\n img_shape=(28, 28),\n tile_shape=(25, 20),\n tile_spacing=(1, 1)\n )\n )\n image.save(\"mnist/output/rbm_{}.png\".format(epoch))\n\n\n params = {\n 'activation': 'sigmoid',\n 'epsilonw': 0.1,\n 'epsilonvb': 0.1,\n 'epsilonhb': 0.1,\n 'weightcost': 0.0002,\n 'initialmomentum': 0.5,\n 'finalmomentum': 0.9,\n 'maxepoch': 20\n }\n numbatches = len(x_train)\n model = RBM(batch_gen(x_train), numbatches, 784, 1000, params, create_outputter('mnist/data/rbm1_output.hdf5', 100, 1000), callbacks=[mnist_callback])\n pickle.dump(model, open(model_path + '/layer1.pkl', 'wb'))\n\n model = RBM(hdf5_generator('mnist/data/rbm1_output.hdf5'), numbatches, 1000, 500, params, create_outputter('mnist/data/rbm2_output.hdf5', 100, 500))\n pickle.dump(model, open(model_path + '/layer2.pkl', 'wb'))\n\n model = RBM(hdf5_generator('mnist/data/rbm2_output.hdf5'), numbatches, 500, 250, params, create_outputter('mnist/data/rbm3_output.hdf5', 100, 250))\n pickle.dump(model, open(model_path + '/layer3.pkl', 'wb'))\n\n params = {\n 'activation': 'linear',\n 'noise': 'gaussian',\n 'epsilonw': 0.001,\n 'epsilonvb': 0.001,\n 'epsilonhb': 0.001,\n 'weightcost': 0.0002,\n 'initialmomentum': 0.5,\n 'finalmomentum': 0.9,\n 'maxepoch': 50\n }\n model = RBM(hdf5_generator('mnist/data/rbm3_output.hdf5'), numbatches, 250, 2, params, create_outputter('mnist/data/rbm4_output.hdf5', 100, 2))\n pickle.dump(model, open(model_path + '/layer4.pkl', 'wb'))\n\n\n","sub_path":"mnist/pretrain.py","file_name":"pretrain.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"145875760","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'''\\\nUpdate local hosts use google hosts from github.\nPrecondition:\n Install Python3: https://www.python.org/downloads/\n Use administrator or root permission to run this script.\nUsage: [sudo] python update_host.py\n'''\n__author__ = 'jinqinghua@gmail.com'\n__date__ = '2017/03/03'\n\nimport argparse\nimport sys\nimport time\nfrom urllib.request import urlopen\n\nnow = time.strftime('%Y-%m-%d %H:%M:%S')\nb_now = now.encode('utf-8')\n\n# https://github.com/racaljk/hosts/wiki/%E5%90%84%E5%B9%B3%E5%8F%B0-hosts-%E6%96%87%E4%BB%B6%E4%BD%8D%E7%BD%AE\n# https://docs.python.org/3/library/sys.html#sys.platform\nsystem_hosts_pathes = {\n 'win32': r'C:\\Windows\\System32\\drivers\\etc\\HOSTS',\n 'linux': '/etc/hosts',\n 'darwin': '/etc/hosts',\n}\n\nhosts_url = 'https://raw.githubusercontent.com/racaljk/hosts/master/hosts'\nhosts = b'''\\\n# Local hosts\n127.0.0.1 localhost\n255.255.255.255 broadcasthost\n::1 localhost \n\n# My hosts\n127.0.0.1 nexus\n\n# Google hosts update at %s\n\n%s\n\n# Google hosts update at %s\n'''\n\ndef get_hosts_path():\n '''get local hosts path by system'''\n for system, hosts_path in system_hosts_pathes.items():\n if sys.platform.startswith(system):\n return hosts_path\n\ndef get_remote_hosts():\n '''get google hosts from github'''\n print('Start to google hosts from github...')\n remote_hosts = urlopen(hosts_url).read()\n #print(remote_hosts.decode('utf-8'))\n print('done.')\n return remote_hosts\n\ndef update_local_hosts():\n '''write hosts to local'''\n hosts_path = get_hosts_path()\n hosts_remote = get_remote_hosts()\n\n print('Start to update local hosts: %s...' %(hosts_path))\n with open(hosts_path, 'wb+') as hosts_local:\n hosts_local.write(hosts %(b_now, hosts_remote, b_now))\n print('done.')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Update google hosts')\n parser.parse_args()\n\n update_local_hosts()\n print('Life is short, Python more.')\n","sub_path":"hosts/update_hosts.py","file_name":"update_hosts.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"35423278","text":"\"\"\"Provide piped, no disk-IO, BAM preparation for variant calling.\nHandles independent analysis of chromosome regions, allowing parallel\nruns of this step.\n\"\"\"\nimport os\nimport subprocess\n\nfrom bcbio import broad, utils\nfrom bcbio.distributed.transaction import file_transaction\nfrom bcbio.pipeline import shared\nfrom bcbio.variation import realign\n\ndef _region_to_gatk(region):\n chrom, start, end = region\n return \"%s:%s-%s\" % (chrom, start + 1, end)\n\ndef _gatk_extract_reads_cl(data, region, tmp_dir):\n \"\"\"Use GATK to extract reads from full BAM file, recalibrating if configured.\n \"\"\"\n broad_runner = broad.runner_from_config(data[\"config\"])\n algorithm = data[\"config\"][\"algorithm\"]\n args = [\"-T\", \"PrintReads\",\n \"-L\", _region_to_gatk(region),\n \"-R\", data[\"sam_ref\"],\n \"-I\", data[\"work_bam\"]]\n recal_config = algorithm.get(\"recalibrate\", True)\n if recal_config in [\"gatk\", True]:\n args += [\"-BQSR\", data[\"prep_recal\"]]\n elif recal_config:\n raise NotImplementedError(\"Recalibration method %s\" % recal_config)\n return broad_runner.cl_gatk(args, tmp_dir)\n\ndef _piped_input_cl(data, region, tmp_dir, out_base_file, realign_param):\n \"\"\"Retrieve the commandline for streaming input into preparation step.\n If marking duplicates, this requires writing an intermediate file since\n MarkDuplicates uses multiple passed on an input.\n \"\"\"\n broad_runner = broad.runner_from_config(data[\"config\"])\n cl = _gatk_extract_reads_cl(data, region, tmp_dir)\n algorithm = data[\"config\"][\"algorithm\"]\n if algorithm.get(\"mark_duplicates\", True):\n sel_file = \"%s-select%s\" % os.path.splitext(out_base_file)\n if not utils.file_exists(sel_file):\n with file_transaction(sel_file) as tx_out_file:\n cl += [\"-o\", tx_out_file]\n subprocess.check_call(cl)\n dup_metrics = \"%s-dup.dup_metrics\" % os.path.splitext(out_base_file)[0]\n compression = \"5\" if realign_param == \"gatk\" else \"0\"\n cl = broad_runner.cl_picard(\"MarkDuplicates\",\n [(\"INPUT\", sel_file),\n (\"OUTPUT\", \"/dev/stdout\"),\n (\"METRICS_FILE\", dup_metrics),\n (\"PROGRAM_RECORD_ID\", \"null\"),\n (\"COMPRESSION_LEVEL\", compression),\n (\"TMP_DIR\", tmp_dir)])\n else:\n sel_file = data[\"work_bam\"]\n broad_runner.run_fn(\"picard_index\", sel_file)\n return sel_file, \" \".join(cl)\n\ndef _piped_realign_gatk(data, region, cl, out_base_file, tmp_dir):\n \"\"\"Perform realignment with GATK, using input commandline.\n GATK requires writing to disk and indexing before realignment.\n \"\"\"\n broad_runner = broad.runner_from_config(data[\"config\"])\n pa_bam = \"%s-prealign%s\" % os.path.splitext(out_base_file)\n if not utils.file_exists(pa_bam):\n with file_transaction(pa_bam) as tx_out_file:\n subprocess.check_call(\"{cl} > {tx_out_file}\".format(**locals()), shell=True)\n broad_runner.run_fn(\"picard_index\", pa_bam)\n recal_file = realign.gatk_realigner_targets(broad_runner, pa_bam, data[\"sam_ref\"],\n dbsnp=shared.configured_ref_file(\"dbsnp\", data[\"config\"], data[\"sam_ref\"]),\n region=_region_to_gatk(region))\n recal_cl = realign.gatk_indel_realignment_cl(broad_runner, pa_bam, data[\"sam_ref\"],\n recal_file, tmp_dir, region=_region_to_gatk(region))\n return pa_bam, \" \".join(recal_cl)\n\ndef _cleanup_tempfiles(data, tmp_files):\n for tmp_file in tmp_files:\n if tmp_file and tmp_file != data[\"work_bam\"]:\n for ext in [\".bam\", \".bam.bai\", \".bai\"]:\n fname = \"%s%s\" % (os.path.splitext(tmp_file)[0], ext)\n if os.path.exists(fname):\n os.remove(fname)\n\ndef _piped_bamprep_region(data, region, out_file, tmp_dir):\n \"\"\"Do work of preparing BAM input file on the selected region.\n \"\"\"\n broad_runner = broad.runner_from_config(data[\"config\"])\n algorithm = data[\"config\"][\"algorithm\"]\n realign_param = algorithm.get(\"realign\", \"gatk\")\n realign_param = \"gatk\" if realign_param is True else realign_param\n cur_bam, cl = _piped_input_cl(data, region, tmp_dir, out_file, realign_param)\n if not realign_param:\n prerecal_bam = None\n elif realign_param == \"gatk\":\n prerecal_bam, cl = _piped_realign_gatk(data, region, cl, out_file, tmp_dir)\n else:\n raise NotImplementedError(\"Realignment method: %s\" % realign_param)\n with file_transaction(out_file) as tx_out_file:\n out_flag = \"-o\" if realign_param == \"gatk\" else \">\"\n subprocess.check_call(\"{cl} {out_flag} {tx_out_file}\".format(**locals()), shell=True)\n _cleanup_tempfiles(data, [cur_bam, prerecal_bam])\n\ndef piped_bamprep(data, region=None, out_file=None):\n \"\"\"Perform full BAM preparation using pipes to avoid intermediate disk IO.\n\n Handles de-duplication, recalibration and realignment of original BAMs.\n \"\"\"\n if region[0] == \"nochrom\":\n prep_bam = shared.write_nochr_reads(data[\"work_bam\"], out_file)\n elif region[0] == \"noanalysis\":\n prep_bam = shared.write_noanalysis_reads(data[\"work_bam\"], region[1], out_file)\n else:\n if not utils.file_exists(out_file):\n with utils.curdir_tmpdir() as tmp_dir:\n _piped_bamprep_region(data, region, out_file, tmp_dir)\n prep_bam = out_file\n data[\"work_bam\"] = prep_bam\n data[\"regions\"][\"current\"] = region\n return [data]\n","sub_path":"bcbio/variation/bamprep.py","file_name":"bamprep.py","file_ext":"py","file_size_in_byte":5662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"308682212","text":"import json\nimport boto3\n\nfrom django.utils import timezone\nfrom django.conf import settings\n\ns3_client = boto3.client('s3',\n aws_access_key_id=settings.S3_ACCESS_KEY_ID,\n aws_secret_access_key=settings.S3_SECRET_ACCESS_KEY)\n\n\ndef notify(handler, site_domain=\"\", **kwargs):\n '''\n This func uses an S3 Bucket to create a notification task file\n that should be handled by AWS Lambda func\n\n :param handler: string - this is the name of handler func on AWS Lambda side.\n :param site_domain: string to add to an URLs in data on AWS Lambda side\n :param kwargs: dict of serializable data\n :return: None\n '''\n\n data = kwargs or dict()\n if site_domain:\n data[\"site_domain\"] = site_domain\n data[\"handler\"] = handler\n\n handler = f\"a_initialized___{handler}___{timezone.now().timestamp()}\"\n s3_client.put_object(\n Body=json.dumps(data),\n Bucket=f\"mt-lambda-notifications-{settings.ENV_TYPE}\",\n Key=handler)\n","sub_path":"proj_common/notifications/notifier.py","file_name":"notifier.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491441898","text":"__author__ = 'Axel V'\n\nimport json\nimport os\nfrom random import choice\nfrom progressbar import *\n\ndef main():\n json_lst = [x for x in os.listdir('.') if x[-5:] == '.json' and x != 'data.json']\n maxLength = len(json_lst)\n pbar = ProgressBar(maxval=maxLength, widgets=[Percentage(), ' ', Bar(), ' ', SimpleProgress(), ' | ', ETA()]).start()\n\n while len(json_lst) != 1:\n x = choice(json_lst)\n y = choice(json_lst)\n\n if y != x and x:\n combine(x, y)\n os.remove(json_lst[json_lst.index(y)])\n\n json_lst = [x for x in os.listdir('.') if x[-5:] == '.json' and x != 'data.json']\n pbar.update(maxLength - len(json_lst))\n\n if 'data.json' in os.listdir('.'):\n os.remove('data.json')\n os.rename(json_lst[0], 'data.json')\n pbar.finish()\n\ndef combine(x_path, y_path):\n x = json.loads(open(x_path, 'r').read())\n y = json.loads(open(y_path, 'r').read())\n z = merge(x, y)\n write_json(z, x_path)\n\ndef merge(a, b, path=None):\n \"merges b into a\"\n if path is None:\n path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass\n else:\n print('Conflict at %s' % '.'.join(path + [str(key)]))\n a[key] = b[key]\n #raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n else:\n a[key] = b[key]\n return a\n\ndef write_json(dct, outfile):\n with open(outfile, 'w') as f:\n json.dump(dct, f, indent=4, sort_keys=True)\n\nmain()\n","sub_path":"Semantic Web/Final Project/combiner.py","file_name":"combiner.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"201774986","text":"# codeing: utf-8\n\"\"\"\nCopyright (c) 2019 The MITRE Corporation.\n\"\"\"\n\n__all__ = ['BertTransVAE', 'PureTransformerVAE']\n\nimport math\nimport os\nimport numpy as np\n\nimport gluonnlp as nlp\nimport mxnet as mx\nfrom mxnet import gluon\nfrom mxnet.gluon import nn\nfrom mxnet.gluon.block import HybridBlock, Block\nfrom gluonnlp.model import TransformerEncoderCell\nfrom gluonnlp.loss import LabelSmoothing\nfrom tmnt.distributions import LogisticGaussianLatentDistribution, GaussianLatentDistribution\nfrom tmnt.distributions import GaussianUnitVarLatentDistribution, HyperSphericalLatentDistribution\n\nclass PureTransformerVAE(Block):\n\n def __init__(self, vocabulary, emb_dim, latent_distrib='vmf', num_units=512, hidden_size=512, num_heads=4,\n n_latent=256, max_sent_len=64, transformer_layers=6, label_smoothing_epsilon=0.0,\n kappa = 100.0,\n batch_size=16, kld=0.1, wd_temp=0.01,\n ctx = mx.cpu(),\n prefix=None, params=None):\n super(PureTransformerVAE, self).__init__(prefix=prefix, params=params)\n self.kld_wt = kld\n self.n_latent = n_latent\n self.model_ctx = ctx\n self.max_sent_len = max_sent_len\n self.vocabulary = vocabulary\n self.batch_size = batch_size\n self.wd_embed_dim = emb_dim\n self.vocab_size = len(vocabulary.idx_to_token)\n self.latent_distrib = latent_distrib\n self.num_units = num_units\n self.hidden_size = hidden_size \n self.num_heads = num_heads\n self.transformer_layers = transformer_layers\n self.label_smoothing_epsilon = label_smoothing_epsilon\n self.kappa = kappa\n with self.name_scope():\n if latent_distrib == 'logistic_gaussian':\n self.latent_dist = LogisticGaussianLatentDistribution(n_latent, ctx, dr=0.0)\n elif latent_distrib == 'vmf':\n self.latent_dist = HyperSphericalLatentDistribution(n_latent, kappa=kappa, ctx=self.model_ctx, dr=0.0)\n elif latent_distrib == 'gaussian':\n self.latent_dist = GaussianLatentDistribution(n_latent, ctx, dr=0.0)\n elif latent_distrib == 'gaussian_unitvar':\n self.latent_dist = GaussianUnitVarLatentDistribution(n_latent, ctx, dr=0.0, var=0.05)\n else:\n raise Exception(\"Invalid distribution ==> {}\".format(latent_distrib))\n self.embedding = nn.Embedding(self.vocab_size, self.wd_embed_dim)\n self.encoder = TransformerEncoder(self.wd_embed_dim, self.num_units, hidden_size=hidden_size, num_heads=num_heads,\n n_layers=transformer_layers, n_latent=n_latent, sent_size = max_sent_len,\n batch_size = batch_size, ctx = ctx)\n self.decoder = TransformerDecoder(wd_embed_dim=self.wd_embed_dim, num_units=self.num_units, hidden_size=hidden_size, num_heads=num_heads,\n n_layers=transformer_layers, n_latent=n_latent, sent_size = max_sent_len,\n batch_size = batch_size, ctx = ctx)\n #self.out_embedding = gluon.nn.Embedding(input_dim=self.vocab_size, output_dim=self.wd_embed_dim)\n self.inv_embed = InverseEmbed(batch_size, max_sent_len, self.wd_embed_dim, temp=wd_temp, ctx=self.model_ctx, params = self.embedding.params)\n self.ce_loss_fn = mx.gluon.loss.SoftmaxCrossEntropyLoss(axis=-1, from_logits=True, sparse_label=True)\n #self.label_smoothing = LabelSmoothing(epsilon=label_smoothing_epsilon, units=self.vocab_size)\n self.embedding.initialize(mx.init.Xavier(magnitude=2.34), ctx=ctx)\n #self.out_embedding.initialize(mx.init.Uniform(0.1), ctx=ctx) \n #self.inv_embed.initialize(mx.init.Xavier(magnitude=2.34), ctx=ctx)\n if self.vocabulary.embedding:\n #self.out_embedding.weight.set_data(self.vocabulary.embedding.idx_to_vec)\n self.embedding.weight.set_data(self.vocabulary.embedding.idx_to_vec)\n \n\n def __call__(self, wp_toks):\n return super(PureTransformerVAE, self).__call__(wp_toks)\n\n def set_kl_weight(self, epoch, max_epochs):\n burn_in = int(max_epochs / 10)\n eps = 1e-6\n if epoch > burn_in:\n self.kld_wt = ((epoch - burn_in) / (max_epochs - burn_in)) + eps\n else:\n self.kld_wt = eps\n return self.kld_wt\n\n def encode(self, toks):\n embedded = self.embedding(toks)\n enc = self.encoder(embedded)\n return self.latent_dist.mu_encoder(enc)\n\n def forward(self, toks):\n embedded = self.embedding(toks)\n enc = self.encoder(embedded)\n z, KL = self.latent_dist(enc, self.batch_size)\n y = self.decoder(z)\n prob_logits = self.inv_embed(y)\n log_prob = mx.nd.log_softmax(prob_logits)\n recon_loss = self.ce_loss_fn(log_prob, toks)\n kl_loss = (KL * self.kld_wt)\n loss = recon_loss + kl_loss\n return loss, recon_loss, kl_loss, log_prob\n \n\nclass BertTransVAE(Block):\n def __init__(self, bert_base, latent_distrib='vmf',\n wd_embed_dim=300, num_units=512, n_latent=256, max_sent_len=64, transformer_layers=6,\n kappa = 100.0,\n batch_size=16, kld=0.1, wd_temp=0.01, ctx = mx.cpu(),\n increasing=True, decreasing=False,\n prefix=None, params=None):\n super(BertTransVAE, self).__init__(prefix=prefix, params=params)\n self.kld_wt = kld\n self.bert = bert_base\n self.n_latent = n_latent\n self.model_ctx = ctx\n self.max_sent_len = max_sent_len\n self.batch_size = batch_size\n self.wd_embed_dim = wd_embed_dim\n self.latent_distrib = latent_distrib\n with self.name_scope():\n if latent_distrib == 'logistic_gaussian':\n self.latent_dist = LogisticGaussianLatentDistribution(n_latent, ctx, dr=0.0)\n elif latent_distrib == 'vmf':\n self.latent_dist = HyperSphericalLatentDistribution(n_latent, kappa=kappa, dr=0.0, ctx=self.model_ctx)\n elif latent_distrib == 'gaussian':\n self.latent_dist = GaussianLatentDistribution(n_latent, ctx, dr=0.0)\n elif latent_distrib == 'gaussian_unitvar':\n self.latent_dist = GaussianUnitVarLatentDistribution(n_latent, ctx, dr=0.0)\n else:\n raise Exception(\"Invalid distribution ==> {}\".format(latent_distrib))\n self.decoder = TransformerDecoder(wd_embed_dim=wd_embed_dim, num_units=num_units,\n n_layers=transformer_layers, n_latent=n_latent, sent_size = max_sent_len,\n batch_size = batch_size, ctx = ctx)\n self.vocab_size = self.bert.word_embed[0].params.get('weight').shape[0]\n self.out_embedding = gluon.nn.Embedding(input_dim=self.vocab_size, output_dim=wd_embed_dim, weight_initializer=mx.init.Uniform(0.1))\n self.inv_embed = InverseEmbed(batch_size, max_sent_len, self.wd_embed_dim, temp=wd_temp, ctx=self.model_ctx, params = self.out_embedding.params)\n self.ce_loss_fn = mx.gluon.loss.SoftmaxCrossEntropyLoss(axis=-1, from_logits=True)\n\n def __call__(self, wp_toks, tok_types, valid_length=None):\n return super(BertTransVAE, self).__call__(wp_toks, tok_types, valid_length)\n\n def set_kl_weight(self, epoch, max_epochs):\n burn_in = int(max_epochs / 10)\n eps = 1e-6\n if epoch > burn_in:\n self.kld_wt = ((epoch - burn_in) / (max_epochs - burn_in)) + eps\n else:\n self.kld_wt = eps\n return self.kld_wt\n\n\n def forward(self, wp_toks, tok_types, valid_length=None):\n _, pooler_out_bert = self.bert(wp_toks, tok_types, valid_length)\n \n z, KL = self.latent_dist(pooler_out_bert, self.batch_size)\n y = self.decoder(z)\n \n ## does a matrix mult: rec_y = (32, 64, 300), shape mm = (32, 300, 25002)\n prob_logits = self.inv_embed(y)\n log_prob = mx.nd.log_softmax(prob_logits)\n recon_loss = self.ce_loss_fn(log_prob, wp_toks)\n kl_loss = (KL * self.kld_wt)\n loss = recon_loss + kl_loss\n return loss, recon_loss, kl_loss, log_prob\n \n\nclass TransformerDecoder(HybridBlock):\n def __init__(self, wd_embed_dim, num_units, hidden_size=512, num_heads=4, n_layers=6, n_latent=256, sent_size = 30,\n scale_embed=True, batch_size=8, ctx=mx.cpu()):\n super(TransformerDecoder, self).__init__()\n self._batch_size = batch_size\n self._sent_size = sent_size\n self._n_latent = n_latent\n self._wd_embed_dim = wd_embed_dim\n self._num_units = num_units\n with self.name_scope():\n self.projection = nn.Dense(in_units = n_latent, units = num_units)\n self.trans_block = TransformerBlock(\n attention_cell = 'multi_head',\n num_layers = n_layers,\n units = num_units, \n hidden_size = hidden_size,\n max_length = sent_size,\n num_heads = num_heads,\n scaled = True,\n dropout = 0.0,\n use_residual=True, output_attention=False,\n scale_embed=scale_embed,\n ctx = ctx)\n self.out_projection = nn.Dense(in_units = num_units, units = wd_embed_dim, flatten=False)\n\n def __call__(self, x):\n return super(TransformerDecoder, self).__call__(x)\n\n\n def hybrid_forward(self, F, x):\n ## x is shape (N, n_latent)\n x = self.projection(x) ## Map n_latent ==> wd_embed_dim\n x = F.expand_dims(x, 1) ## (N, 1, wd_embed_dim)\n x = F.broadcast_to(x, (self._batch_size, self._sent_size, self._num_units))\n y, _ = self.trans_block(x)\n yp = self.out_projection(y)\n return yp\n\n\nclass TransformerEncoder(HybridBlock):\n def __init__(self, wd_embed_dim, num_units, hidden_size=512, num_heads=4, n_layers=6, n_latent=256, sent_size = 30,\n scale_embed=True, batch_size=8, ctx=mx.cpu()):\n super(TransformerEncoder, self).__init__()\n self._sent_size = sent_size\n self._n_latent = n_latent\n with self.name_scope():\n self.in_projection = nn.Dense(in_units = wd_embed_dim, units = num_units, flatten=False)\n self.trans_block = TransformerBlock(\n attention_cell = 'multi_head',\n num_layers = n_layers,\n units = num_units, \n hidden_size = hidden_size,\n max_length = sent_size,\n num_heads = num_heads,\n scaled = True,\n dropout = 0.0,\n use_residual=True, output_attention=False,\n scale_embed=scale_embed,\n ctx = ctx)\n self.projection = nn.Dense(in_units = num_units, units = n_latent)\n\n def __call__(self, x):\n return super(TransformerEncoder, self).__call__(x)\n\n\n def hybrid_forward(self, F, x):\n x = self.in_projection(x)\n y, _ = self.trans_block(x)\n first = y[:,0,:]\n encoding = self.projection(first)\n return encoding\n \n\ndef _position_encoding_init(max_length, dim):\n \"\"\"Init the sinusoid position encoding table \"\"\"\n position_enc = np.arange(max_length).reshape((-1, 1)) \\\n / (np.power(10000, (2. / dim) * np.arange(dim).reshape((1, -1))))\n # Apply the cosine to even columns and sin to odds.\n position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i\n position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1\n return position_enc\n\ndef _get_layer_norm(use_bert, units):\n return nn.LayerNorm(in_channels=units)\n \n \nclass TransformerBlock(HybridBlock):\n \"\"\"Transformer Encoder used as Decoder for BERT => Transformer Encoder/Decoder\n\n Parameters\n ----------\n attention_cell : AttentionCell or str, default 'multi_head'\n Arguments of the attention cell.\n Can be 'multi_head', 'scaled_luong', 'scaled_dot', 'dot', 'cosine', 'normed_mlp', 'mlp'\n num_layers : int\n Number of attention layers.\n units : int\n Number of units for the output.\n hidden_size : int\n number of units in the hidden layer of position-wise feed-forward networks\n max_length : int\n Maximum length of the input sequence\n num_heads : int\n Number of heads in multi-head attention\n scaled : bool\n Whether to scale the softmax input by the sqrt of the input dimension\n in multi-head attention\n dropout : float\n Dropout probability of the attention probabilities.\n use_residual : bool\n output_attention: bool, default False\n Whether to output the attention weights\n output_all_encodings: bool, default False\n Whether to output encodings of all encoder's cells, or only the last one\n weight_initializer : str or Initializer\n Initializer for the input weights matrix, used for the linear\n transformation of the inputs.\n bias_initializer : str or Initializer\n Initializer for the bias vector.\n positional_weight: str, default 'sinusoidal'\n Type of positional embedding. Can be 'sinusoidal', 'learned'.\n If set to 'sinusoidal', the embedding is initialized as sinusoidal values and keep constant.\n use_bert_encoder : bool, default False\n Whether to use BERTEncoderCell and BERTLayerNorm. Set to True for pre-trained BERT model\n use_layer_norm_before_dropout: bool, default False\n Before passing embeddings to attention cells, whether to perform `layernorm -> dropout` or\n `dropout -> layernorm`. Set to True for pre-trained BERT models.\n scale_embed : bool, default True\n Scale the input embeddings by sqrt(embed_size). Set to False for pre-trained BERT models.\n prefix : str, default 'rnn_'\n Prefix for name of `Block`s\n (and name of weight if params is `None`).\n params : Parameter or None\n Container for weight sharing between cells.\n Created if `None`.\n \"\"\"\n def __init__(self, attention_cell='multi_head', num_layers=2,\n units=512, hidden_size=2048, max_length=50,\n num_heads=4, scaled=True, dropout=0.0,\n use_residual=True, output_attention=False, output_all_encodings=False,\n weight_initializer=None, bias_initializer='zeros',\n positional_weight='sinusoidal', use_bert_encoder=False,\n use_layer_norm_before_dropout=False, scale_embed=True, ctx=mx.cpu(),\n prefix=None, params=None):\n super(TransformerBlock, self).__init__(prefix=prefix, params=params)\n assert units % num_heads == 0,\\\n 'In TransformerEncoder, The units should be divided exactly ' \\\n 'by the number of heads. Received units={}, num_heads={}' \\\n .format(units, num_heads)\n self._ctx = ctx\n self._num_layers = num_layers\n self._max_length = max_length\n self._num_heads = num_heads\n self._units = units\n self._hidden_size = hidden_size\n self._output_attention = output_attention\n self._output_all_encodings = output_all_encodings\n self._dropout = dropout\n self._use_residual = use_residual\n self._scaled = scaled\n self._use_layer_norm_before_dropout = use_layer_norm_before_dropout\n self._scale_embed = scale_embed\n with self.name_scope():\n self.dropout_layer = nn.Dropout(dropout)\n self.layer_norm = _get_layer_norm(use_bert_encoder, units)\n self.position_weight = self._get_positional(positional_weight, max_length, units,\n weight_initializer)\n self.transformer_cells = nn.HybridSequential()\n for i in range(num_layers):\n cell = self._get_encoder_cell(use_bert_encoder, units, hidden_size, num_heads,\n attention_cell, weight_initializer, bias_initializer,\n dropout, use_residual, scaled, output_attention, i)\n self.transformer_cells.add(cell)\n\n \n def _get_positional(self, weight_type, max_length, units, initializer):\n if weight_type == 'sinusoidal':\n encoding = _position_encoding_init(max_length, units)\n position_weight = self.params.get_constant('const', encoding)\n elif weight_type == 'learned':\n position_weight = self.params.get('position_weight', shape=(max_length, units),\n init=initializer)\n else:\n raise ValueError('Unexpected value for argument position_weight: %s'%(position_weight))\n return position_weight\n\n def _get_encoder_cell(self, use_bert, units, hidden_size, num_heads, attention_cell,\n weight_initializer, bias_initializer, dropout, use_residual,\n scaled, output_attention, i):\n return TransformerEncoderCell(units=units, hidden_size=hidden_size,\n num_heads=num_heads, attention_cell=attention_cell,\n weight_initializer=weight_initializer,\n bias_initializer=bias_initializer,\n dropout=dropout, use_residual=use_residual,\n scaled=scaled, output_attention=output_attention,\n prefix='transformer%d_'%i)\n\n def __call__(self, inputs, states=None): #pylint: disable=arguments-differ\n return super(TransformerBlock, self).__call__(inputs, states)\n \n\n def forward(self, inputs, states=None): # pylint: disable=arguments-differ\n length = inputs.shape[1]\n if self._scale_embed:\n inputs = inputs * math.sqrt(inputs.shape[-1])\n steps = mx.nd.arange(length, ctx=inputs.context)\n if states is None:\n states = [steps]\n else:\n states.append(steps)\n step_output, additional_outputs = super(TransformerBlock, self).forward(inputs, states)\n return step_output, additional_outputs\n\n\n def hybrid_forward(self, F, inputs, states=None, position_weight=None):\n # pylint: disable=arguments-differ\n if states is not None:\n steps = states[-1]\n positional_embed = F.Embedding(steps, position_weight, self._max_length, self._units)\n inputs = F.broadcast_add(inputs, F.expand_dims(positional_embed, axis=0))\n if self._use_layer_norm_before_dropout:\n inputs = self.layer_norm(inputs)\n inputs = self.dropout_layer(inputs)\n else:\n inputs = self.dropout_layer(inputs)\n inputs = self.layer_norm(inputs)\n outputs = inputs\n\n all_encodings_outputs = []\n additional_outputs = []\n #batch_size = inputs.shape[0]\n \n for i,cell in enumerate(self.transformer_cells):\n outputs, attention_weights = cell(inputs, None)\n inputs = outputs\n if self._output_all_encodings:\n all_encodings_outputs.append(outputs)\n\n if self._output_attention:\n additional_outputs.append(attention_weights)\n\n if self._output_all_encodings:\n return all_encodings_outputs, additional_outputs\n else:\n return outputs, additional_outputs\n \n\n \nclass InverseEmbed(HybridBlock):\n def __init__(self, batch_size, sent_len, emb_size, temp=0.01, ctx=mx.cpu(), prefix=None, params=None):\n self.batch_size = batch_size\n self.max_sent_len = sent_len\n self.emb_size = emb_size\n self.temp = temp\n self.model_ctx = ctx\n super(InverseEmbed, self).__init__(prefix=prefix, params=params)\n\n\n def __call__(self, x):\n return super(InverseEmbed, self).__call__(x)\n\n\n def set_temp(self, epoch, max_epochs):\n # temp ranges from 1.01 to 0.01\n self.temp = (max_epochs - epoch) / max_epochs + 0.01\n return self.temp\n \n\n def hybrid_forward(self, F, x):\n if F is mx.ndarray:\n psym = self.params.get('weight').data()\n else:\n psym = self.params.get('weight').var()\n ## Must ensure that both the embedding weights and the inputs here are normalized\n w = F.expand_dims(F.transpose(psym), 0)\n eps_arr = mx.nd.full((1, 1), 1e-12, ctx=self.model_ctx) \n w_norm = F.norm(w, axis=1, keepdims=True)\n w_norm = F.broadcast_div(w, F.broadcast_maximum(w_norm, eps_arr))\n\n x_norm = F.norm(x, axis=-1, keepdims=True)\n x_norm = F.broadcast_div(x, F.broadcast_maximum(x_norm, eps_arr))\n\n mm = F.broadcast_axes(w_norm, axis=0, size=self.batch_size)\n prob_logits = F.linalg_gemm2(x_norm, mm) / self.temp ## temperature param that should be an argument\n return prob_logits\n \n","sub_path":"tmnt/seq_vae/trans_seq_models.py","file_name":"trans_seq_models.py","file_ext":"py","file_size_in_byte":21063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"518568255","text":"'''\nCopyright 2017-present, Airbnb Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\n# command: nosetests -v -s test/unit/\n# specific test: nosetests -v -s test/unit/file.py:TestStreamPayload.test_name\n\nimport base64\nimport json\n\nfrom nose.tools import assert_equal, assert_not_equal\n\nimport stream_alert.rule_processor.classifier as sa_classifier\n\nfrom stream_alert.rule_processor.classifier import StreamPayload, StreamClassifier\nfrom stream_alert.rule_processor.pre_parsers import StreamPreParsers\nfrom stream_alert.rule_processor.config import load_config\n\n\nclass TestStreamPayload(object):\n \"\"\"Test class for StreamPayload\"\"\"\n def __init__(self):\n self.config = {}\n\n @classmethod\n def setup_class(cls):\n \"\"\"Setup the class before any methods\"\"\"\n cls.env = {\n 'lambda_region': 'us-east-1',\n 'account_id': '123456789012',\n 'lambda_function_name': 'test_kinesis_stream',\n 'lambda_alias': 'production'\n }\n\n @classmethod\n def teardown_class(cls):\n \"\"\"Teardown the class after all methods\"\"\"\n cls.env = None\n\n def setup(self):\n \"\"\"Setup before each method\"\"\"\n self.config = load_config('test/unit/conf')\n\n def teardown(self):\n \"\"\"Teardown after each method\"\"\"\n self.config = None\n\n @staticmethod\n def pre_parse_kinesis(payload):\n \"\"\"Call through to the pre_parse_kinesis on StreamPreParsers\"\"\"\n return StreamPreParsers.pre_parse_kinesis(payload.raw_record)\n\n @staticmethod\n def make_kinesis_record(kinesis_stream, kinesis_data):\n \"\"\"Helper for creating the kinesis payload\"\"\"\n raw_record = {\n 'eventSource': 'aws:kinesis',\n 'eventSourceARN': 'arn:aws:kinesis:us-east-1:123456789012:stream/{}'\n .format(kinesis_stream),\n 'kinesis': {\n 'data': base64.b64encode(kinesis_data)\n }\n }\n return raw_record\n\n def payload_generator(self, kinesis_stream, kinesis_data):\n \"\"\"Given raw data, return a payload object\"\"\"\n kinesis_record = self.make_kinesis_record(kinesis_stream=kinesis_stream,\n kinesis_data=kinesis_data)\n\n return StreamPayload(raw_record=kinesis_record)\n\n def test_refresh_record(self):\n \"\"\"Payload Record Refresh\"\"\"\n kinesis_data = json.dumps({\n 'key3': 'key3data',\n 'key2': 'key2data',\n 'key1': 'key1data'\n })\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=kinesis_data)\n\n # generate a second record\n new_kinesis_data = json.dumps({\n 'key4': 'key4data',\n 'key5': 'key5data',\n 'key6': 'key6data'\n })\n second_record = self.make_kinesis_record(kinesis_stream='test_kinesis_stream',\n kinesis_data=new_kinesis_data)\n payload.refresh_record(second_record)\n\n # check newly loaded record\n assert_equal(payload.raw_record, second_record)\n\n def test_map_source_1(self):\n \"\"\"Payload Source Mapping 1\"\"\"\n data_encoded = base64.b64encode('test_map_source data')\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=data_encoded)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n test_kinesis_stream_logs = {\n 'test_log_type_json',\n 'test_log_type_json_2',\n 'test_log_type_json_nested',\n 'test_log_type_json_nested_with_data',\n 'test_log_type_csv',\n 'test_log_type_csv_nested',\n 'test_log_type_kv_auditd'\n }\n metadata = classifier._log_metadata()\n\n # service, entity, metadata test\n assert_equal(payload.service, 'kinesis')\n assert_equal(payload.entity, 'test_kinesis_stream')\n assert_equal(set(metadata.keys()), test_kinesis_stream_logs)\n\n def test_map_source_2(self):\n \"\"\"Payload Source Mapping 2\"\"\"\n data_encoded = base64.b64encode('test_map_source_data_2')\n payload = self.payload_generator(kinesis_stream='test_stream_2',\n kinesis_data=data_encoded)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n test_stream_2_logs = {\n 'test_multiple_schemas:01',\n 'test_multiple_schemas:02',\n 'test_log_type_json_2',\n 'test_log_type_json_nested_osquery',\n 'test_log_type_syslog'\n }\n metadata = classifier._log_metadata()\n\n # service, entity, metadata test\n assert_equal(payload.service, 'kinesis')\n assert_equal(payload.entity, 'test_stream_2')\n assert_equal(set(metadata.keys()), test_stream_2_logs)\n\n def test_multiple_schema_matching(self):\n \"\"\"Test Matching Multiple Schemas with Log Patterns\"\"\"\n kinesis_data = json.dumps({\n 'name': 'file added test',\n 'identifier': 'host4.this.test',\n 'time': 'Jan 01 2017',\n 'type': 'lol_file_added_event_test',\n 'message': 'bad_001.txt was added'\n })\n # Make sure support for multiple schema matching is ON\n sa_classifier.SUPPORT_MULTIPLE_SCHEMA_MATCHING = True\n\n payload = self.payload_generator(kinesis_stream='test_stream_2',\n kinesis_data=kinesis_data)\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n valid_parses = classifier._process_log_schemas(payload, data)\n\n assert_equal(len(valid_parses), 2)\n assert_equal(valid_parses[0].log_name, 'test_multiple_schemas:01')\n assert_equal(valid_parses[1].log_name, 'test_multiple_schemas:02')\n valid_parse = classifier._check_valid_parse(valid_parses)\n\n assert_equal(valid_parse.log_name, 'test_multiple_schemas:01')\n\n def test_single_schema_matching(self):\n \"\"\"Test Matching One Schema with Log Patterns\"\"\"\n kinesis_data = json.dumps({\n 'name': 'file removal test',\n 'identifier': 'host4.this.test.also',\n 'time': 'Jan 01 2017',\n 'type': 'file_removed_event_test',\n 'message': 'bad_001.txt was removed'\n })\n # Make sure support for multiple schema matching is OFF\n sa_classifier.SUPPORT_MULTIPLE_SCHEMA_MATCHING = False\n\n payload = self.payload_generator(kinesis_stream='test_stream_2',\n kinesis_data=kinesis_data)\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n valid_parses = classifier._process_log_schemas(payload, data)\n\n assert_equal(len(valid_parses), 1)\n valid_parse = classifier._check_valid_parse(valid_parses)\n assert_equal(valid_parse.log_name, 'test_multiple_schemas:02')\n\n def test_classify_record_kinesis_json_optional(self):\n \"\"\"Payload Classify JSON - optional fields\"\"\"\n kinesis_data = json.dumps({\n 'key1': [\n {\n 'test': 1,\n 'test2': 2\n },\n {\n 'test3': 3,\n 'test4': 4\n }\n ],\n 'key2': 'more sample data',\n 'key3': '1',\n 'key10': {\n 'test-field': 1,\n 'test-field2': 2\n }\n })\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=kinesis_data)\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n # pre parse and classify\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # log type test\n assert_equal(payload.log_source, 'test_log_type_json')\n\n # payload type test\n assert_equal(payload.type, 'json')\n assert_not_equal(payload.type, 'csv')\n\n # record value tests\n assert_equal(len(payload.records[0]['key1']), 2)\n assert_equal(payload.records[0]['key3'], 1)\n assert_equal(payload.records[0]['key1'][1]['test4'], 4)\n\n # optional field tests\n assert_equal(payload.records[0]['key11'], 0.0)\n assert_equal(payload.records[0]['key9'], False)\n assert_equal(len(payload.records[0]['key10']), 2)\n\n # record type tests\n assert_equal(type(payload.records[0]['key1']), list)\n assert_equal(type(payload.records[0]['key2']), str)\n assert_equal(type(payload.records[0]['key3']), int)\n\n def test_classify_record_kinesis_json(self):\n \"\"\"Payload Classify JSON - boolean, float, integer types\"\"\"\n kinesis_data = json.dumps({\n 'key4': 'true',\n 'key5': '10.001',\n 'key6': '10',\n 'key7': False\n })\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=kinesis_data)\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n # pre parse and classify\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # log type test\n assert_equal(payload.log_source, 'test_log_type_json_2')\n\n # payload type test\n assert_equal(payload.type, 'json')\n assert_not_equal(payload.type, 'csv')\n\n # record type test\n assert_equal(payload.records[0]['key4'], True)\n assert_equal(payload.records[0]['key5'], 10.001)\n assert_equal(payload.records[0]['key6'], 10)\n assert_equal(payload.records[0]['key7'], False)\n\n def test_classify_record_kinesis_nested_json(self):\n \"\"\"Payload Classify Nested JSON\"\"\"\n kinesis_data = json.dumps({\n 'date': 'Jan 01 2017',\n 'unixtime': '1485556524',\n 'host': 'my-host-name',\n 'data': {\n 'key1': 'test',\n 'key2': 'one'\n }\n })\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=kinesis_data)\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # log type test\n assert_equal(payload.log_source, 'test_log_type_json_nested')\n\n # payload type test\n assert_equal(payload.type, 'json')\n assert_not_equal(payload.type, 'csv')\n\n # record type test\n assert_equal(type(payload.records[0]['date']), str)\n assert_equal(type(payload.records[0]['unixtime']), int)\n assert_equal(type(payload.records[0]['data']), dict)\n\n # record value test\n assert_equal(payload.records[0]['date'], 'Jan 01 2017')\n assert_equal(payload.records[0]['data']['key1'], 'test')\n\n def test_classify_record_kinesis_nested_json_osquery(self):\n \"\"\"Payload Classify JSON osquery\"\"\"\n kinesis_data = json.dumps({\n 'name': 'testquery',\n 'hostIdentifier': 'host1.test.prod',\n 'calendarTime': 'Jan 01 2017',\n 'unixTime': '1485556524',\n 'columns': {\n 'key1': 'test',\n 'key2': 'one'\n },\n 'action': 'added',\n 'decorations': {\n 'role': 'web-server',\n 'env': 'production',\n 'cluster': 'eu-east',\n 'number': '100'\n }\n })\n payload = self.payload_generator(kinesis_stream='test_stream_2',\n kinesis_data=kinesis_data)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # log type test\n assert_equal(payload.log_source, 'test_log_type_json_nested_osquery')\n\n # payload type test\n assert_equal(payload.type, 'json')\n assert_not_equal(payload.type, 'csv')\n\n # record type test\n assert_equal(type(payload.records[0]['hostIdentifier']), str)\n assert_equal(type(payload.records[0]['unixTime']), int)\n assert_equal(type(payload.records[0]['columns']), dict)\n assert_equal(type(payload.records[0]['decorations']), dict)\n\n # record value test\n assert_equal(payload.records[0]['unixTime'], 1485556524)\n assert_equal(payload.records[0]['columns']['key1'], 'test')\n assert_equal(payload.records[0]['decorations']['cluster'], 'eu-east')\n assert_equal(payload.records[0]['decorations']['number'], 100)\n assert_equal(payload.records[0]['log_type'], '')\n\n def test_classify_record_kinesis_nested_json_missing_subkey_fields(self):\n \"\"\"Payload Classify Nested JSON Missing Subkeys\"\"\"\n kinesis_data = json.dumps({\n 'name': 'testquery',\n 'hostIdentifier': 'host1.test.prod',\n 'calendarTime': 'Jan 01 2017',\n 'unixTime': '12321412321',\n 'columns': {\n 'key1': 'test',\n 'key2': 'one'\n },\n 'action': 'added',\n 'decorations': {\n 'role': 'web-server',\n 'env': 'production',\n # 'cluster': 'eu-east',\n 'number': '100'\n }\n })\n payload = self.payload_generator(kinesis_stream='test_stream_2',\n kinesis_data=kinesis_data)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # invalid record test\n assert_equal(payload.valid, False)\n assert_equal(payload.records, None)\n\n def test_classify_record_kinesis_nested_json_with_data(self):\n \"\"\"Payload Classify Nested JSON Generic\"\"\"\n kinesis_data = json.dumps({\n 'date': 'Jan 01 2017',\n 'unixtime': '1485556524',\n 'host': 'host1',\n 'application': 'myapp',\n 'environment': 'development',\n 'data': {\n 'category': 'test',\n 'type': '1',\n 'source': 'dev-app-1'\n }\n })\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=kinesis_data)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # log type test\n assert_equal(payload.log_source, 'test_log_type_json_nested_with_data')\n\n # payload type test\n assert_equal(payload.type, 'json')\n assert_not_equal(payload.type, 'csv')\n\n # record type test\n assert_equal(type(payload.records[0]['date']), str)\n assert_equal(type(payload.records[0]['unixtime']), int)\n assert_equal(type(payload.records[0]['data']), dict)\n assert_equal(type(payload.records[0]['data']['type']), int)\n assert_equal(type(payload.records[0]['data']['category']), str)\n\n # record value test\n assert_equal(payload.records[0]['date'], 'Jan 01 2017')\n assert_equal(payload.records[0]['data']['source'], 'dev-app-1')\n\n def test_classify_record_kinesis_csv(self):\n \"\"\"Payload Classify CSV\"\"\"\n csv_data = 'jan102017,0100,host1,thisis some data with keyword1 in it'\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=csv_data)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # record value tests\n assert_equal(payload.records[0]['message'],\n 'thisis some data with keyword1 in it')\n assert_equal(payload.records[0]['host'], 'host1')\n\n # type test\n assert_equal(payload.type, 'csv')\n assert_not_equal(payload.type, 'json')\n\n # log source test\n assert_equal(payload.log_source, 'test_log_type_csv')\n\n def test_classify_record_kinesis_csv_nested(self):\n \"\"\"Payload Classify Nested CSV\"\"\"\n csv_nested_data = (\n '\"Jan 10 2017\",\"1485635414\",\"host1.prod.test\",\"Corp\",'\n '\"chef,web-server,1,10,success\"'\n )\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=csv_nested_data)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # record value tests\n assert_equal(payload.records[0]['date'], 'Jan 10 2017')\n assert_equal(payload.records[0]['host'], 'host1.prod.test')\n assert_equal(payload.records[0]['time'], 1485635414)\n assert_equal(payload.records[0]['message']['role'], 'web-server')\n assert_equal(payload.records[0]['message']['cluster_size'], 10)\n\n # type test\n assert_equal(payload.type, 'csv')\n assert_not_equal(payload.type, 'json')\n\n # log source test\n assert_equal(payload.log_source, 'test_log_type_csv_nested')\n\n def test_classify_record_kinesis_kv(self):\n \"\"\"Payload Classify KV\"\"\"\n auditd_test_data = (\n 'type=SYSCALL msg=audit(1364481363.243:24287): '\n 'arch=c000003e syscall=2 success=no exit=-13 a0=7fffd19c5592 a1=0 '\n 'a2=7fffd19c4b50 a3=a items=1 ppid=2686 pid=3538 auid=500 uid=500 '\n 'gid=500 euid=500 suid=500 fsuid=500 egid=500 sgid=500 fsgid=500 tty=pts0 '\n 'ses=1 comm=\"cat\" exe=\"/bin/cat\" '\n 'subj=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 '\n 'key=\"sshd_config\" type=CWD msg=audit(1364481363.243:24287): '\n 'cwd=\"/home/shadowman\" type=PATH '\n 'msg=audit(1364481363.243:24287): item=0 name=\"/etc/ssh/sshd_config\" '\n 'inode=409248 dev=fd:00 mode=0100600 ouid=0 ogid=0 '\n 'rdev=00:00 obj=system_u:object_r:etc_t:s0'\n )\n\n payload = self.payload_generator(kinesis_stream='test_kinesis_stream',\n kinesis_data=auditd_test_data)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # record value tests\n assert_equal(payload.records[0]['type'], 'SYSCALL')\n assert_equal(payload.records[0]['suid'], 500)\n assert_equal(payload.records[0]['pid'], 3538)\n assert_equal(payload.records[0]['type_3'], 'PATH')\n\n # type test\n assert_equal(payload.type, 'kv')\n assert_not_equal(payload.type, 'csv')\n assert_not_equal(payload.type, 'json')\n\n def test_classify_record_syslog(self):\n \"\"\"Payload Classify Syslog\"\"\"\n test_data_1 = (\n 'Jan 26 19:35:33 vagrant-ubuntu-trusty-64 '\n 'sudo: pam_unix(sudo:session): '\n 'session opened for user root by (uid=0)'\n )\n test_data_2 = (\n \"Jan 26 12:28:06 macbook004154test authd[122]: \"\n \"Succeeded authorizing right 'com.apple.trust-settings.admin' \"\n \"by client '/usr/sbin/ocspd' [11835] for authorization created by\"\n \" '/usr/bin/security' [21322] (3,0)\"\n )\n\n fixtures = {'test_1': test_data_1, 'test_2': test_data_2}\n for name, syslog_message in fixtures.iteritems():\n payload = self.payload_generator(kinesis_stream='test_stream_2',\n kinesis_data=syslog_message)\n\n classifier = StreamClassifier(config=self.config)\n classifier.map_source(payload)\n\n data = self.pre_parse_kinesis(payload)\n classifier.classify_record(payload, data)\n\n # valid record test\n assert_equal(payload.valid, True)\n assert_equal(type(payload.records[0]), dict)\n\n # type test\n assert_equal(payload.type, 'syslog')\n assert_not_equal(payload.type, 'csv')\n assert_not_equal(payload.type, 'json')\n assert_not_equal(payload.type, 'kv')\n\n # record value tests\n if name == 'test_1':\n assert_equal(payload.records[0]['host'], 'vagrant-ubuntu-trusty-64')\n assert_equal(payload.records[0]['application'], 'sudo')\n assert_equal(payload.records[0]['message'], 'pam_unix(sudo:session):'\n ' session opened for user'\n ' root by (uid=0)')\n elif name == 'test_2':\n assert_equal(payload.records[0]['host'], 'macbook004154test')\n assert_equal(payload.records[0]['application'], 'authd')\n","sub_path":"test/unit/stream_alert_rule_processor/test_classifier.py","file_name":"test_classifier.py","file_ext":"py","file_size_in_byte":23189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"189859962","text":"'''\nEscreva um programa que gere um número aleatório entre 0 e 5 e peça para o usuário tentar descobrir qual foi o número gerado.\nO programa deverá escrever na tela se o usuário venceu ou perdeu.\n'''\n\nfrom random import randrange\nnumero = int(randrange(0,5))\ntentativa = int(input('Hit the number! [0 - 5]'))\nif tentativa == numero:\n print('Congratulations! You re right!')\nelse:\n print('Nooop!')","sub_path":"Curso em Vídeo/Exercícios/ex028.py","file_name":"ex028.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"12891207","text":"# -*- coding: utf-8 -*-\n# code for console Encoding difference. Dont' mind on it \nimport sys\nimport imp\nimport random\nimp.reload(sys)\ntry: sys.setdefaultencoding('UTF8')\nexcept Exception as E: pass\n\ntry:\n import unittest2 as unittest\nexcept ImportError:\n import unittest\nfrom popbill import *\n\nclass FaxServiceTestCase(unittest.TestCase):\n @classmethod\n def setUpClass(self):\n self.faxService = FaxService('TESTER','r1bp+HzSDrMkSS8921B8Dyrn83Y/yDcOnru2OBTT2Z8=')\n self.faxService.IsTest = True\n self.testCorpNum = \"1234567890\"\n self.testUserID = \"testkorea\"\n\n def test_01_getURL(self):\n url = self.faxService.getURL(self.testCorpNum,self.testUserID,\"BOX\")\n print(url)\n self.assertEqual(url[:5],\"https\",\"https로시작\")\n\n def test_02_getUnitCost(self):\n unitCost = self.faxService.getUnitCost(self.testCorpNum)\n self.assertGreaterEqual(unitCost,0,\"단가는 0 이상.\")\n\n def test_03_sendFaxMulti(self):\n receivers = []\n\n filepath = [\"test.jpeg\", \"test2.jpeg\"]\n\n for x in range(0, 5):\n receivers.append(FaxReceiver(receiveNum=\"00011112222\",receiveName=\"수신자명칭\"))\n\n receiptNum = self.faxService.sendFax_multi( \n self.testCorpNum,\n \"070-7510-3710\",\n receivers,\n filepath\n )\n self.assertIsNotNone(receiptNum,\"접수번호 확인완료\")\n\n result = self.faxService.getFaxResult(self.testCorpNum, receiptNum, self.testUserID)\n\n print(result[0].sendState)\n print(result[0].convState)\n print(result[0].sendResult)\n\n def test_04_reserveCancel(self):\n\n receivers = FaxReceiver(receiveNum=\"000111222\",receiveName=\"수신자명\")\n filepath = \"test2.jpeg\"\n\n reserveDT = '20150325200000'\n receiptNum = self.faxService.sendFax_multi( \n self.testCorpNum,\n \"070-7510-3711\",\n receivers,\n filepath,\n reserveDT\n )\n self.assertIsNotNone(receiptNum,\"접수번호 확인완료\")\n print(receiptNum)\n\n try:\n result = self.faxService.cancelReserve(self.testCorpNum, \"015032513303900002\")\n except PopbillException as PE:\n print(PE.message)\n\n\n def test_05_sendFax(self):\n\n receiptNum = self.faxService.sendFax(self.testCorpNum, '07075103710', '070123','수신자명', 'test2.jpeg')\n\n self.assertIsNotNone(receiptNum,\" 접수번호 확인완료\")\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"faxtests.py","file_name":"faxtests.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"197608255","text":"# Copyright (C) 2020 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\n\"\"\"\n Helper module for saved_search related tests.\n Helps setup testing environment.\n\"\"\"\n\nimport json\n\nfrom ggrc.rbac import SystemWideRoles\nfrom ggrc_basic_permissions.models import Role, UserRole\n\n\ndef setup_user_role(user):\n \"\"\"\n Setup Administrator role for test user.\n \"\"\"\n role = Role.query.filter(\n Role.name == SystemWideRoles.ADMINISTRATOR,\n ).first()\n\n user_role = UserRole()\n user_role.role = role\n user_role.person = user\n\n return user_role\n\n\ndef get_client_and_headers(app, user):\n \"\"\"\n Get request headers and test client instance.\n \"\"\"\n\n client = app.test_client()\n\n headers = {\n \"Content-Type\": \"application/json\",\n \"X-Requested-By\": \"GGRC\",\n \"X-ggrc-user\": json.dumps({\n \"name\": user.name,\n \"email\": user.email,\n })\n }\n\n return client, headers\n","sub_path":"test/integration/ggrc/views/saved_searches/initializers.py","file_name":"initializers.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"137537269","text":"class Command(object):\n \"\"\"\n Base class for PIC and SGH commands.\n\n Attr params [int]: Message content filled with zeroes.\n Attr values [int]: Full message values (including start and end).\n Attr data bytearray: Full message ready to be sent trough a socket.\n \"\"\"\n\n START = [0x29] # Message start values.\n END = [0, 0] # Message end values.\n\n def __init__(self, node_len, params):\n \"\"\"\n Param node_len int: Message content length.\n Param params [int]: Message content.\n \"\"\"\n self.params = params[:node_len] + [0] * (node_len - len(params))\n self.values = self.START + self.params + self.END\n self.data = bytearray(self.values)\n","sub_path":"connections/commands/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334791737","text":"from argparse import ArgumentParser\nfrom glob import glob\nimport os\n\nimport ROOT\n\n# Define some C++ functions that will be dynamically compiled and used later.\n_CPPCODE = '''\n bool positioninfiducialvolume(const TLorentzVector &pos) {\n auto x = pos.X();\n auto y = pos.Y();\n auto z = pos.Z();\n return (abs(x) < 874.51\n && abs(y - 55.) < 874.51\n && 136.875 < z && z < 447.375);\n}\n\n bool infiducialvolume(T2K::Reco *reco) {\n auto &pos = reco->FrontPosition;\n return positioninfiducialvolume(pos);\n }\n'''\n\n# parse command line\ndef parsecml():\n parser = ArgumentParser(description=\"Make an flat ntuple from an oaAnalysis/eventAnalysis file with a T2KDataFrame.\")\n parser.add_argument(\"filelist\", nargs=\"+\", help=\"List of input files.\", type=str)\n return parser.parse_args()\n\ndef getfilelist(flist):\n # return a sorted list of unique normalised absolute file names matching the input patterns\n return list(sorted(set(os.path.abspath(f) for p in flist for f in glob(p))))\n\ndef tostdvector(input, cls=str):\n # convert input iterable to type std::vector required by some ROOT methods\n result = ROOT.std.vector(cls)()\n for x in input:\n result.push_back(x)\n return result\n\ndef main():\n args = parsecml()\n # create data T2KDataFrame\n flist = tostdvector(getfilelist(args.filelist))\n df = ROOT.T2K.MakeT2KDataFrame(flist, # input file list\n False, # switch off truth\n tostdvector([\"ReconDir/Global\"]), # list of trees to include, only include Global\n )\n # Compile to C++ code defined above\n ROOT.gInterpreter.Declare(_CPPCODE)\n # Define analysis\n anal = (\n # Select all global tracks in the fiducial volume, sort then by momentum (highest first)\n df.Define(\"tracks_in_fv\", \"\"\"\n return ROOT::VecOps::Sort(Filter(t2kreco, infiducialvolume), \n [](T2K::Reco *lhs, T2K::Reco *rhs) -> bool { return lhs->FrontMomentum > rhs->FrontMomentum; });\"\"\")\n # Only save events with at least 1 track in the fiducial volume\n .Filter(\"tracks_in_fv.size()>0\")\n # Define variables to write out\n .Define(\"leading_track_momentum\", \"tracks_in_fv.at(0)->FrontMomentum\")\n .Define(\"leading_track_position\", \"tracks_in_fv.at(0)->FrontPosition\")\n )\n # Write selected events and variables to a ROOT file\n anal.Snapshot(\"exntuple\", \"exntuple.root\",\n # variables to write to disk\n tostdvector([\"leading_track_momentum\", \"leading_track_position\"])\n )\n return\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"pyt2k/examples/exntuple.py","file_name":"exntuple.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"377782295","text":"import socket\nfrom sys import stdout\nfrom time import time\n\n# set the server's IP address and port\nip = \"localhost\"\nport = 1337\nONE = 0.1\nZERO= 0.025\ncovert_bin = \"\"\n\n# create the socker\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n# connect to server\ns.connect((ip, port))\n\n# recieve data until EOF\ndata = s.recv(4096).decode().rstrip(\"\\n\")\n\nprint(\"Received characters with delays:\")\n\nwhile (data != \"EOF\"):\n stdout.write(str(data))\n stdout.flush()\n t0 = time()\n data = s.recv(4096).decode().rstrip(\"\\n\")\n t1 = time()\n delta = round(t1 - t0, 3)\n if (delta > ZERO):\n print(\"\\tOne: \\t\" + str(delta))\n covert_bin += \"1\"\n else:\n print(\"\\tZero: \\t\" + str(delta))\n covert_bin += \"0\"\n\nprint(\"Binary received: \" + str(covert_bin))\n#question = input(\"Do you wish to show hidden msg? Y/N\\n\")\n#if (question.upper() == 'N'):\n# print(\"Goodbye\")\n # exit()\nprint(\"\\nConvert 8 byte binary to character:\")\n# close the connection to the server\ns.close()\n\ncovert = \"\"\ni = 0\nwhile (i < len(covert_bin)):\n b = covert_bin[i:i+8]\n print(\"bytes: \\t\" + str(b))\n n = int(\"0b{}\".format(b),2)\n print(\"int: \" + str(n))\n try:\n covert += chr(n)\n print(\"char: \\t\" + chr(n))\n except:\n covert += \"?\"\n i += 8\n print(\"\")\nprint(\"Covert message: \" + covert)\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"338298953","text":"import logging\n\n\nlog = logging.getLogger(__name__)\n\n\ndef prepare_pancakes(pancakes_pile):\n log.info('Pancakes pile: %s' % pancakes_pile)\n\n maneuvers = 0\n n = len(pancakes_pile)\n ideal_pile = '+' * n\n\n while pancakes_pile != ideal_pile:\n log.debug('Pancake pile is not ready to serve: %s' % pancakes_pile)\n\n # Lookup pankace pile from the bottom\n for i, pancake in enumerate(pancakes_pile[::-1]):\n if pancake == '-':\n lowest_incorrect_pancake = n - i - 1\n log.debug('Lowest incorrect pancake index: %d'\n % lowest_incorrect_pancake)\n break\n else:\n log.info('Serving pancakes. It required %d maneuvers' % maneuvers)\n return maneuvers\n\n pancakes_pile = _flip_top_pancakes(\n pancakes_pile, lowest_incorrect_pancake)\n\n maneuvers += 1\n\n log.info('Serving pancakes. It required %d maneuvers' % maneuvers)\n return maneuvers\n\n\ndef _flip_top_pancakes(pancakes_pile, n):\n top_stack, rest = pancakes_pile[:n+1], pancakes_pile[n+1:]\n log.debug('Flipping first pancakes [%s](%s)' % (top_stack, rest))\n stack_flipped = [_flip(pancake) for pancake in top_stack]\n stack_after_maneuver = ''.join(stack_flipped)\n pancakes_pile = stack_after_maneuver + pancakes_pile[n+1:]\n log.debug('Flipping result: %s' % pancakes_pile)\n return str(pancakes_pile)\n\n\ndef _flip(pancake):\n if pancake == '+':\n return '-'\n elif pancake == '-':\n return '+'\n else:\n raise ValueError(pancake)\n\n\nif __name__ == '__main__':\n\n logging.basicConfig(level='INFO')\n\n T = int(input()) # read a line with a single integer (input size)\n for i in range(1, T + 1):\n log.info(50 * '-' + ' CASE {:>d}'.format(i))\n S = input()\n print(\"Case #{}: {}\".format(i, prepare_pancakes(S)))\n","sub_path":"codes/CodeJamCrawler/16_0_2/cachitas/revenge_of_the_pancakes.py","file_name":"revenge_of_the_pancakes.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"241941424","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom terminaltables import AsciiTable\n\nfrom utils.stats import (\n non_max_suppression, xywh2xyxy, \n get_batch_statistics, ap_per_class, load_classe_names)\n\n\n@torch.no_grad()\ndef val(model, dataloader, epoch, opt, val_logger, visualizer=None):\n labels = []\n sample_matrics = []\n for i, (images, targets) in enumerate(dataloader):\n if targets.size(0) == 0:\n continue\n \n batches_done = len(dataloader) * epoch + i\n if not opt.no_cuda:\n model = model.to(opt.device)\n images = Variable(images.to(opt.device))\n if targets is not None:\n targets = Variable(targets.to(opt.device), requires_grad=False)\n \n labels += targets[:, 1].tolist()\n targets[:, 2:] = xywh2xyxy(targets[:, 2:])\n targets[:, 2:] *= opt.image_size\n\n detections = model.forward(images)\n detections = non_max_suppression(detections, opt.conf_thres, opt.nms_thres)\n sample_matrics += get_batch_statistics(detections, targets.cpu(), iou_threshold=0.5)\n\n if visualizer is not None and not opt.no_vis_preds:\n visualizer.plot_predictions(images.cpu(), detections, env='main') # plot prediction\n if visualizer is not None and not opt.no_vis_gt:\n visualizer.plot_ground_truth(images.cpu(), targets.cpu(), env='main') # plot ground truth\n \n true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_matrics))]\n precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)\n \n # logging\n metric_table_data = [\n ['Metrics', 'Value'], ['precision', precision.mean()], ['recall', recall.mean()], \n ['f1', f1.mean()], ['mAP', AP.mean()]]\n \n metric_table = AsciiTable(\n metric_table_data,\n title='[Epoch {:d}/{:d}'.format(epoch, opt.num_epochs))\n print('{}\\n\\n\\n'.format(metric_table.table))\n \n class_names = load_classe_names(opt.classname_path)\n for i, c in enumerate(ap_class):\n metric_table_data += [['AP-{}'.format(class_names[c]), AP[i]]]\n metric_table.table_data = metric_table_data\n val_logger.write('{}\\n\\n\\n'.format(metric_table.table))\n\n if visualizer is not None:\n vis.plot_metrics(metric_table_data, batches_done, env='main')","sub_path":"val.py","file_name":"val.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"73249805","text":"import matplotlib.pyplot as plt\nimport tensorflow as tf \nfrom tensorflow import keras \nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Conv2D, MaxPooling2D, Flatten, Dropout, BatchNormalization\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import EarlyStopping\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom sklearn.metrics import precision_recall_curve, roc_curve, accuracy_score, confusion_matrix, precision_score, recall_score\nfrom sklearn.decomposition import PCA\nfrom sklearn.model_selection import train_test_split\nimport seaborn as sns \nplt.style.use('fivethirtyeight')\nimport pickle \nimport os \nimport numpy as np\nimport cv2\nfrom os import listdir\nfrom matplotlib import image\nfrom PIL import Image\nfrom numpy import array\nfrom keras import backend as K\nimport keras.backend.tensorflow_backend as tfback\nfrom keras.utils import np_utils\n\n\n##if you can not use GPU\ndef _get_available_gpus():\n \"\"\"Get a list of available gpu devices (formatted as strings).\n\n # Returns\n A list of available GPU devices.\n \"\"\"\n #global _LOCAL_DEVICES\n if tfback._LOCAL_DEVICES is None:\n devices = tf.config.list_logical_devices()\n tfback._LOCAL_DEVICES = [x.name for x in devices]\n return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]\n\ntfback._get_available_gpus = _get_available_gpus\ntfback._get_available_gpus()\ntf.config.list_logical_devices()\n\n\n##change data format\n##K.set_image_data_format('channels_first') ##(1,200,200)\nK.set_image_data_format('channels_last') ##(200,200,1)\n\n\n## load all images in a directory -- method 1\n##loaded_images = list()\n##for filename in listdir('/Users/RunzeLeng/Desktop/Project/BP-Lung-Xray/TypeA/'):\n # load image\n## img_data = image.imread('/Users/RunzeLeng/Desktop/Project/BP-Lung-Xray/TypeA/' + filename)\n # store loaded image\n## loaded_images.append(img_data)\n## print('> loaded %s %s' % (filename, img_data.shape))\n \n \n## load all images in a directory -- method 2\nlabels = ['NORMAL','TypeA', 'TypeB', 'TypeC']\nimg_size = 200\ndef get_training_data(data_dir):\n data = [] \n for label in labels: \n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n for img in os.listdir(path):\n try:\n img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)\n resized_arr = cv2.resize(img_arr, (img_size, img_size))\n data.append([resized_arr, class_num])\n except Exception as e:\n print(e)\n return np.array(data)\n\ndataimg = get_training_data('/Users/RunzeLeng/Desktop/Project/BP-Lung-Xray/')\ndataimg.shape #(2389, 2) 每一组都少一个\n\n\n##look of current data\narr=dataimg[0][0]\narr\narr.shape\narr=dataimg[0]\narr\narr.shape\n\nimg = Image.fromarray(arr[0])\nimg.show()\n\n\n##formatting data\nX = []\ny = []\n\nfor feature, label in dataimg:\n X.append(feature)\n y.append(label)\n \n\n## resize data for deep learning \n##X = np.array(X).reshape(-1, 1, img_size, img_size)##(1,200,200)\nX = np.array(X).reshape(-1, img_size, img_size, 1)##(200,200,1)\ny = np.array(y)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=32)\n\nX_train = X_train / 255\nX_test = X_test / 255\n\ny_train = np_utils.to_categorical(y_train)\ny_test = np_utils.to_categorical(y_test)\n\nnum_classes = y_test.shape[1]\n\n##look of current data\nX_train.shape\nX_test.shape\ny_train.shape\ny_test.shape\nnum_classes\n\n\n##good for balancing out disproportions in the dataset\ndatagen = ImageDataGenerator(\n featurewise_center=False, \n samplewise_center=False, \n featurewise_std_normalization=False, \n samplewise_std_normalization=False, \n zca_whitening=False, \n rotation_range=60, \n zoom_range = 0.1, \n width_shift_range=0.1, \n height_shift_range=0.1, \n horizontal_flip=True, \n vertical_flip=True) \ndatagen.fit(X_train)\n\n\n\n##model 2\ndef larger_model2():\n model2 = Sequential()\n model2.add(Conv2D(256, (3, 3), input_shape=(200, 200,1), padding='same'))\n model2.add(Activation('relu'))\n model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n model2.add(BatchNormalization(axis=1))\n\n model2.add(Conv2D(64, (3, 3), padding='same'))\n model2.add(Activation('relu'))\n model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n model2.add(BatchNormalization(axis=1))\n\n model2.add(Conv2D(16, (3, 3), padding='same'))\n model2.add(Activation('relu'))\n model2.add(MaxPooling2D(pool_size=(2, 2), padding='same'))\n model2.add(BatchNormalization(axis=1))\n\n model2.add(Flatten()) # this converts our 3D feature maps to 1D feature vectors\n\n model2.add(Dropout(0.5))\n model2.add(Dense(64))\n model2.add(Activation('relu'))\n\n model2.add(Dropout(0.5))\n model2.add(Dense(4))\n model2.add(Activation('softmax'))\n\n adam = Adam(learning_rate=0.0001)\n model2.compile(loss='categorical_crossentropy',optimizer=adam,metrics=['acc'])\n return model2\n\nmodel2=larger_model2()\nmodel2.summary()\n\n\n\n## model 1\ndef larger_model():\n # create model\n model = Sequential()\n ## model.add(Conv2D(20, (5, 5), input_shape=(1, 200, 200), activation='relu'))\n model.add(Conv2D(20, (5, 5), input_shape=(200, 200, 1), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Conv2D(50, (5, 5), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Conv2D(70, (7, 7), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.5))\n \n model.add(Flatten())\n model.add(Dense(100, activation='relu'))\n model.add(Dense(50, activation='relu'))\n model.add(Dense(num_classes, activation='softmax'))\n # Compile model\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\nmodel = larger_model()\nmodel.summary()\n\n\n## early_stop\nearly_stop = EarlyStopping(patience=3, monitor='val_loss')\n\n\n## train and test model 1\n##model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=1, batch_size=50 ,verbose=1)\nmodel.fit(datagen.flow(X_train, y_train, batch_size=20), validation_data=(X_test, y_test), epochs=20,callbacks=[early_stop], verbose=1)\n\nscores = model.evaluate(X_test, y_test, verbose=1)\nprint(\"Large CNN Error: %.2f%%\" % (100-scores[1]*100))\nprint(\"Accuracy: %.2f%%\" % (scores[1]*100))\n\n\n\n## train and test model 2\nmodel2.fit(datagen.flow(X_train, y_train, batch_size=20), validation_data=(X_test, y_test), epochs=20,callbacks=[early_stop], verbose=1)\n\n\n\n##model 1 threhold adjustment\npred = model.predict(X_train)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n \n \n \n \n ","sub_path":"Lung_X_Ray.py","file_name":"Lung_X_Ray.py","file_ext":"py","file_size_in_byte":6789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"537368543","text":"# 给定两个字符串形式的非负整数 num1 和num2 ,计算它们的和。\n#\n# 注意:\n#\n#\n# num1 和num2 的长度都小于 5100.\n# num1 和num2 都只包含数字 0-9.\n# num1 和num2 都不包含任何前导零。\n# 你不能使用任何內建 BigInteger 库, 也不能直接将输入的字符串转换为整数形式。\n#\n# Related Topics 字符串\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def addStrings(self, num1: str, num2: str) -> str:\n if len(num2) > len(num1):\n return self.addStrings(num2, num1)\n l, l1 = len(num1), len(num2)\n jinwei = 0\n res = [str(0)] * (l + 1)\n for i in range(l - 1, -1, -1):\n tmp = num2[i - (l - l1)] if i - (l - l1) >= 0 else 0\n digit = int(num1[i]) + int(tmp) + jinwei\n jinwei = 1 if digit >= 10 else 0\n res[i + 1] = str(digit % 10)\n res[i] = str(jinwei)\n return ''.join(res) if res[0] != '0' else ''.join(res[1:])\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n\nres = Solution().addStrings('1', '1')\nprint(res)\n","sub_path":"Week_03/[415]字符串相加.py","file_name":"[415]字符串相加.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"255662233","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 13 11:17:37 2020\n\n@author: g_dic\n\"\"\"\n\n\nimport pandas as pd\nimport numpy as np\nimport h5py\n\n\nfilePath =r\"C:\\Users\\g_dic\\Documents\\Ian_S\\george_simple_export.txt\"\ndf = pd.read_csv(filePath, sep='\\t', skiprows=[0], encoding='UTF-16')\n\ncolNames = list(df.columns)\nprint(colNames) \n\nfilePath2 =r\"C:\\Users\\g_dic\\Documents\\Ian_S\\george_STORM_export.txt\"\ndf2 = pd.read_csv(filePath2, sep='\\t')\n\ncolNames2 = list(df2.columns)\nprint(colNames2) \n\ndf.head(2)","sub_path":"importPandaTable.py","file_name":"importPandaTable.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"216898336","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nfrom mpl_toolkits.axisartist.axislines import SubplotZero\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.ticker as mticker\n\n\ndef draw(graphs: list, stepsize: float = 0.001, axes={\"x\": {\"name\": \"Re(z)\"}, \"y\": {\n \"name\": \"Im(z)\"}, },\n linewidth: int = 3, points: list = []):\n\n fig = plt.figure()\n # Eruieren ob der Graph 3d oder 2d wird. 3D wenn mind. 1 Graph ein z_out parameter besitzt\n three_dimensional = True if True in [\n True for graph in graphs if \"z_out\" in graph] else False\n\n if(three_dimensional):\n ax = fig.gca(projection='3d')\n ax.set_zlabel(axes[\"z\"][\"name\"]\n if \"z\" in axes and \"name\" in axes[\"z\"] else \"z\", fontsize=15)\n else:\n ax = fig.add_subplot(111)\n # Beschriftung der Achsen einfügen und deren Position anpassen\n ax.set_xlabel(\n axes[\"x\"][\"name\"] if \"x\" in axes and \"name\" in axes[\"x\"] else \"x\", fontsize=15)\n ax.xaxis.set_label_coords(1, 0.45)\n ax.set_ylabel(axes[\"y\"][\"name\"] if \"y\" in axes and \"name\" in axes[\"y\"] else \"y\",\n rotation='horizontal', fontsize=15)\n ax.yaxis.set_label_coords(0.5, 1.02)\n ax.xaxis.labelpad = 30\n ax.yaxis.labelpad = 30\n if(three_dimensional):\n ax.zaxis.labelpad = 30\n\n # Skala Beschriftungen formatieren\n if(\"x\" in axes and \"format\" in axes[\"x\"]):\n plt.gca().xaxis.set_major_formatter(\n mticker.FormatStrFormatter(axes[\"x\"][\"format\"]))\n if(\"y\" in axes and \"format\" in axes[\"y\"]):\n plt.gca().yaxis.set_major_formatter(\n mticker.FormatStrFormatter(axes[\"y\"][\"format\"]))\n if(\"z\" in axes and \"format\" in axes[\"z\"] and three_dimensional):\n plt.gca().zaxis.set_major_formatter(\n mticker.FormatStrFormatter(axes[\"z\"][\"format\"]))\n # Größer der Skala Striche anpassen\n ax.tick_params(axis='x', labelsize=10, pad=10)\n ax.tick_params(axis='y', labelsize=10, pad=20)\n ax.tick_params(axis='z', labelsize=10, pad=20,\n ) if three_dimensional else 0\n\n # Wenn 2D Graph, Koordinatenachsen anpassen (Zu Ursprung verschieben) und Pfeile an Koordinatenachsen zeichnen\n if(not three_dimensional):\n # Koordinatensystem anpassen --> 2 Achsen die sich in Ursprung treffen\n ax.spines['left'].set_position('zero')\n ax.spines['bottom'].set_position('zero')\n ax.spines['left'].set_linewidth(2)\n ax.spines['bottom'].set_linewidth(2)\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.plot((1), (0), ls=\"\", marker=\">\", ms=10, color=\"k\",\n transform=ax.get_yaxis_transform(), clip_on=False)\n ax.plot((0), (1), ls=\"\", marker=\"^\", ms=10, color=\"k\",\n transform=ax.get_xaxis_transform(), clip_on=False)\n\n # Jeden Graph zeichnen\n for graph in graphs:\n x, y, z = [], [], []\n for i in [x * stepsize for x in range(\n int(graph[\"interval\"][0] * (1 / stepsize)),\n int(graph[\"interval\"][1] * (1 / stepsize)))]:\n x.append(graph[\"x_out\"](i))\n y.append(graph[\"y_out\"](i))\n\n # Wenn 3D: z-Achse befüllen und Scatterplot zeichnen\n if(three_dimensional):\n z.append(graph[\"z_out\"](\n i) if \"z_out\" in graph else 0) # Wenn der Graph keine z-Achse besitzt wird jeder z-wert mit 0 befüllt\n if(three_dimensional):\n ax.scatter(x, y, z, s=linewidth, zorder=1, color=graph[\"color\"])\n else:\n ax.plot(x, y, linewidth=linewidth, color=graph[\"color\"])\n\n # Jeden Punkt zeichnen\n for point in points:\n # Float werte, die auf 10 Nachkommastellen ihren ganzzahlig gerundeten Werten entsprechen werden gerundet, da von einer Abweichung bei der Computer bedingten ungenauigkeit ausgegangen wird\n for i in [\"x\", \"y\", \"z\"]:\n point[i] = (int(point[i]) if round(point[i], 10) == int(\n point[i]) else point[i]) if i in point else 0\n\n if(three_dimensional):\n ax.scatter(point[\"x\"], point[\"y\"], point[\"z\"],\n s=point[\"size\"] if \"size\" in point else linewidth * 2,\n zorder=3,\n color=point[\"color\"] if \"color\" in point else \"C0\")\n else:\n ax.plot(point[\"x\"], point[\"y\"],\n color=point[\"color\"] if \"color\" in point else \"C0\", marker=\"o\",\n linewidth=point[\"size\"] if \"size\" in point else linewidth * 2)\n\n if(\"annotation\" in point and point[\"annotation\"] == True):\n # Ergebnisse mit mehr als 3 Nachkommastellen werden zur Darstellung gerundet und als z.B. \"3.141...\"\" angegeben\n x_annotation = point[\"x\"] if round(\n point[\"x\"], 3) == point[\"x\"] else str(round(point[\"x\"], 3)) + \"...\"\n y_annotation = point[\"y\"] if round(\n point[\"y\"], 3) == point[\"y\"] else str(round(point[\"y\"], 3)) + \"...\"\n\n ax.annotate(f\"({x_annotation}|{y_annotation})\",\n (point[\"x\"], point[\"y\"]), fontsize=15)\n\n if(\"lines\" in point and point[\"lines\"] == True and not three_dimensional):\n plt.plot([0, point[\"x\"]], [point[\"x\"], point[\"y\"]],\n linestyle=\"dotted\", linewidth=2, color=point[\"color\"] if \"color\" in point else \"C0\")\n plt.plot([point[\"x\"], point[\"x\"]], [0, point[\"y\"]],\n linestyle=\"dotted\", linewidth=2, color=point[\"color\"] if \"color\" in point else \"C0\")\n # plt.savefig(\"Output/test.svg\")\n plt.show()\n\n\ngolden_ratio = (1 + math.sqrt(5)) / 2\n\n\ndef binet(n):\n return complex((golden_ratio**n - (1 / -golden_ratio)**n) / math.sqrt(5))\n\n\ndraw(graphs=[\n {\"interval\": [-10, 10],\n \"x_out\": lambda x: x,\n \"y_out\": lambda x: binet(x).real,\n \"z_out\": lambda x: binet(x).imag,\n \"color\": \"#1A74B2\"}\n],\n stepsize=0.01,\n axes={\n \"x\": {\"name\": \"n\"},\n \"y\": {\"name\": \"Re(z)\"},\n \"z\": {\"name\": \"Im(z)\", \"format\": '%.1fi'}\n},\n points=[\n {\"x\": x,\n \"y\": binet(x).real,\n \"z\": binet(x).imag,\n \"color\": \"red\",\n \"lines\": False,\n \"annotation\": False,\n \"size\": 20}\n for x in range(-9, 9)\n]\n)\n","sub_path":"fibo_visualization.py","file_name":"fibo_visualization.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"43396300","text":"\"\"\"Setting up the logger\n\"\"\"\nimport os\nimport logging\n\ndef set_up_logger(path_log_file):\n \"\"\"Create logger\n Arguments\n --------\n path_log_file : str\n Path to logger file\n Info\n -----\n The logging level can be changed depending on mode\n Note\n ----\n logger.debug('debug message')\n logger.warn('warn message')\n logger.error('error message')\n logger.critical('critical message')\n \"\"\"\n # Create logging file if not existing\n if not os.path.isfile(path_log_file):\n open(path_log_file, 'w').close()\n\n # Set logger level\n logging.basicConfig(\n filename=path_log_file,\n filemode='w', #'a, w'\n level=logging.DEBUG, #INFO, DEBUG, ERROR, CRITICAL\n format=('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n\n # Necessary to add loggers in visual studio console\n ##logging.getLogger().addHandler(logging.StreamHandler())\n\n # Turn on/off logger\n #logging.disable = False\n #logging.disable(logging.CRITICAL)\n","sub_path":"energy_demand/basic/logger_setup.py","file_name":"logger_setup.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"458077814","text":"from django.shortcuts import render, redirect\nfrom . models import Video\nfrom . forms import VideoForm\n\n\ndef view_index(request):\n retrieved_videos = Video.objects.order_by('-datetime_added')\n context = {'videos_list': retrieved_videos}\n return render(request, 'videorequest/index.html', context)\n\n\ndef view_vrform(request):\n if request.method == 'POST':\n form = VideoForm(request.POST)\n if form.is_valid():\n new_request = Video(\n videotitle=request.POST['videoname'],\n videodescription=request.POST['videodescription']\n )\n new_request.save()\n return redirect('/videorequest/')\n else:\n form = VideoForm()\n context = {'form': form}\n return render(request, 'videorequest/vrform.html', context)","sub_path":"05_project/mywebsite/videorequest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"282505046","text":"import visitor as visitor\nfrom AST import *\nimport AST_CIL\n\nclass Build_CIL:\n def __init__(self, ast, sem):\n self.end_line = {}\n self.idCount = 0\n self.astCIL = AST_CIL.Program()\n self.local_variables = [Var('self', 'SELF_TYPE')]\n self.local_variables_map = [(0,0)]\n self.local_variables_original = {}\n self.local_variables_name = {}\n self.constructor = {}\n self.classmethods = {}\n self.BFS(sem.graph, sem.classmethods_original)\n self.visit(ast, self.astCIL)\n\n def BFS(self, graph, class_methods):\n self.classmethods[('Object', 'abort')] = 'function_Object_abort'\n self.classmethods[('Object', 'type_name')] = 'function_Object_type_name'\n self.classmethods[('Object', 'copy')] = 'function_Object_copy'\n self.classmethods[('IO', 'out_string')] = 'function_IO_out_string'\n self.classmethods[('IO', 'out_int')] = 'function_IO_out_int'\n self.classmethods[('IO', 'in_string')] = 'function_IO_in_string'\n self.classmethods[('IO', 'in_int')] = 'function_IO_in_int'\n self.classmethods[('String', 'length')] = 'function_String_length'\n self.classmethods[('String', 'concat')] = 'function_String_concat'\n self.classmethods[('String', 'substr')] = 'function_String_substr'\n\n l = ['Object']\n while len(l) > 0:\n temp = l.pop(0)\n if not graph.__contains__(temp): continue\n for _class in graph[temp]:\n l.append(_class)\n for function in class_methods[temp]:\n self.classmethods[(_class, function)] = self.classmethods[(temp, function)] \n\n\n def get_local(self):\n dest = 'local_' + str(self.idCount)\n self.idCount += 1\n return dest\n\n @visitor.on('node')\n def visit(self, node, nodeCIL):\n pass\n\n @visitor.when(Program)\n def visit(self, program, programCIL):\n\n #añadir IO, Object, Int, String, Bool\n _type_IO = AST_CIL.Type('IO')\n func = 'function' + '_' + 'IO' + '_' + 'out_string'\n _type_IO.methods['out_string'] = func\n self.classmethods[('IO', 'out_string')] = func\n f = AST_CIL.Function(func)\n f.params.append('x')\n f.instructions.append(AST_CIL.Print('x')) \n self.astCIL.code_section.append(f)\n programCIL.type_section.append(_type_IO)\n\n for c in program.classes:\n self.visit(c, programCIL)\n\n @visitor.when(Class)\n def visit(self, _class, programCIL):\n #clase que estoy visitando\n self.current_class = _class.name\n\n #crear el tipo correspondiente\n _type = AST_CIL.Type(_class.name)\n #annadir el codigo de la clase\n for m in _class.methods:\n self.visit(m, _type)\n\n programCIL.type_section.append(_type)\n self.current_class = None\n\n @visitor.when(Method)\n def visit(self, method, typeCIL):\n \n self.current_method = method.id\n\n func = 'function' + '_' + self.current_class + '_' + method.id\n typeCIL.methods[method.id] = func\n\n self.classmethods[(self.current_class, method.id)] = func\n\n f = AST_CIL.Function(func)\n\n for arg in method.parameters:\n f.params.append(arg.id)\n\n result = self.visit(method.expression, f)\n f.instructions.append(AST_CIL.Return(result))\n \n self.astCIL.code_section.append(f)\n\n self.local_variables.clear()\n self.local_variables_map.clear()\n self.local_variables.append(Var('self','SELF_TYPE'))\n self.local_variables_map.append((0,0))\n self.current_method = None \n \n @visitor.when(String)\n def visit(self, string, functionCIL):\n #crear tag\n #annadir a data\n tag = 's' + str(len(self.astCIL.data_section))\n\n n = len(string.value)\n\n if string.value[n-1] == '\\n':\n s = string.value.replace(\"\\n\",'\\\\n\\\"')\n s = '\\\"' + s\n else: s = '\"' + s + '\"'\n\n self.astCIL.data_section[s] = tag\n d = self.get_local()\n intr = AST_CIL.Load(d, tag)\n functionCIL.localvars.append(d)\n functionCIL.instructions.append(intr)\n return d\n\n @visitor.when(Dispatch)\n def visit(self, dispatch, functionCIL):\n dest = 'local_' + str(self.idCount)\n self.idCount += 1\n\n for item in dispatch.parameters: #e(e1,e2,...,en)\n result = self.visit(item, functionCIL)\n functionCIL.instructions.append(AST_CIL.Arg(result))\n\n intr = AST_CIL.Call(dest, self.classmethods[(self.current_class, dispatch.func_id)])\n functionCIL.localvars.append(dest)\n functionCIL.instructions.append(intr)\n\n return dest\n","sub_path":"src/cool_to_cil.py","file_name":"cool_to_cil.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"573062308","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*\n'''\ncreated by will\n'''\nfrom django import template\n\nregister = template.Library()\n\n\n@register.simple_tag(takes_context=True)\ndef has_def(context, var):\n result = var in context and context[var] is not None\n return result\n","sub_path":"metamap_django/metamap/templatetags/ifdef.py","file_name":"ifdef.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"544659721","text":"import torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision.transforms.transforms import Compose, Normalize, Resize, ToTensor, RandomHorizontalFlip, RandomCrop\nfrom tqdm.auto import tqdm\nimport time\nfrom models import EncoderDecodertrain18\nfrom dataset import GetDataset, CapsCollate\nfrom vocab import Vocabulary\nimport matplotlib.pyplot as plt\nimport Levenshtein\nimport json\nfrom utils import get_best_model\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ntqdm.pandas()\n\n\ndef show_image(img, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n\n # unnormalize\n img[0] = img[0] * 0.229\n img[1] = img[1] * 0.224\n img[2] = img[2] * 0.225\n img[0] += 0.485\n img[1] += 0.456\n img[2] += 0.406\n\n img = img.numpy().transpose((1, 2, 0))\n\n plt.imshow(img)\n if title is not None:\n plt.title(title)\n plt.pause(0.001) # pause a bit so that plots are updated\n\n\nif __name__ == '__main__':\n dataset_name = 'data'\n\n transform = Compose([\n # RandomHorizontalFlip(),\n # Resize((256, 256), PIL.Image.BICUBIC),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ])\n\n vocab = Vocabulary(5)\n with open('tmp_files/word_2_index.json', 'r') as f:\n load_dict = json.load(f)\n vocab.stoi = load_dict\n vocab.itos = {v: k for k, v in vocab.stoi.items()}\n\n train_dataset = GetDataset(transform, dataset_name + '/label/train_label.csv', 'train', vocab)\n test_dataset = GetDataset(transform, dataset_name + '/label/test_label.csv', 'test', vocab)\n\n pad_idx = vocab.stoi[\"\"]\n\n test_dataloader = DataLoader(\n dataset=test_dataset,\n batch_size=256,\n shuffle=False,\n num_workers=10,\n collate_fn=CapsCollate(pad_idx=pad_idx, batch_first=True),\n )\n\n MODEL_PATH = get_best_model('model_files')\n model_state = torch.load(MODEL_PATH)\n model = EncoderDecodertrain18(\n embed_size=model_state['embed_size'],\n vocab_size=model_state['vocab_size'],\n attention_dim=model_state['attention_dim'],\n encoder_dim=model_state['encoder_dim'],\n decoder_dim=model_state['decoder_dim']\n ).to(device)\n model.load_state_dict(model_state['state_dict'])\n model = model.to(device)\n\n f = open('result/' + dataset_name + '.txt', 'w')\n\n count = 0\n print(len(test_dataset))\n model.eval()\n with torch.no_grad():\n for i, (image, captions) in enumerate(test_dataloader):\n img, target = image.to(device), captions.to(device)\n features = model.encoder(img)\n caps = model.decoder.generate_caption_batch(features, stoi=vocab.stoi, itos=vocab.itos)\n gen_captions = vocab.tensor_to_captions(caps)\n targets = vocab.tensor_to_captions(target)\n\n same_list = [x for _, x in enumerate(gen_captions) if x == targets[_]]\n count += len(same_list)\n print('Process: [{}/{}]\\tAcc {}'.format(i, len(test_dataloader), len(same_list) / len(target)))\n\n for _, item in enumerate(gen_captions):\n f.write(item + '\\n')\n f.close()\n","sub_path":"Stereochemistry/with/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":3141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"131203798","text":"import zarr\nimport numcodecs\nfrom skimage.data import astronaut\n\n# choose chunks s.t. we do have overhanging edge-chunks\nCHUNKS = (100, 100, 1)\nSTR_TO_COMPRESSOR = {'gzip': numcodecs.GZip(),\n 'blosc': numcodecs.Blosc(),\n 'zlib': numcodecs.Zlib()}\n\n\ndef generate_zarr_format(compressors=['gzip', 'blosc', 'zlib', None]):\n path = '../data/zarr.zr'\n im = astronaut()\n\n f = zarr.open(path)\n for compressor in compressors:\n name = compressor if compressor is not None else 'raw'\n compressor_impl = STR_TO_COMPRESSOR[compressor] if compressor is not None else None\n f.create_dataset(name, data=im, chunks=CHUNKS,\n compressor=compressor_impl)\n\n\n# this needs PR https://github.com/zarr-developers/zarr/pull/309\ndef generate_n5_format(compressors=['gzip', None]):\n path = '../data/zarr.n5'\n im = astronaut()\n\n f = zarr.open(path)\n for compressor in compressors:\n name = compressor if compressor is not None else 'raw'\n compressor_impl = STR_TO_COMPRESSOR[compressor] if compressor is not None else None\n f.create_dataset(name, data=im, chunks=CHUNKS,\n compressor=compressor_impl)\n\n\nif __name__ == '__main__':\n generate_zarr_format()\n generate_n5_format()\n","sub_path":"generate_data/generate_zarr.py","file_name":"generate_zarr.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"206566624","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 11:41:37 2020\n\n@author: sheifazera\n\"\"\"\n\nimport numpy as np\nfrom pyomo.environ import *\n'''\n min cR*xu + cZ*yu + dR*xl0 + dZ*yl0 \n s.t. AR*xu + AZ*yu + BR*xl0 + BZ* yl0 <= r\n'''\n\nmU = 2\nnL= 2 \n\nnR = 1\n\nnZ = 1\n\nmR = 1\n\nmZ = 1\n \nAR_array = np.array([0, 6]).reshape(2,1).tolist()\nAR={}\nfor i in range(0,2):\n for j in range(0,1):\n AR[(i+1,j+1)]=AR_array[i][j]\n \n \nAZ_array = np.array([7,9]).reshape(2,1).tolist()\nAZ={}\nfor i in range(0,2):\n for j in range(0,1):\n AZ[(i+1,j+1)]=AZ_array[i][j]\n\nBR_array = np.array([5, 10]).reshape(2,1).tolist()\nBR={}\nfor i in range(0,2):\n for j in range(0,1):\n BR[(i+1,j+1)]=BR_array[i][j]\n\nBZ_array = np.array([7,2]).reshape(2,1).tolist()\nBZ={}\nfor i in range(0,2):\n for j in range(0,1):\n BZ[(i+1,j+1)]=BZ_array[i][j]\n\nr_array = np.array([62, 117]).reshape(2,1).tolist()\nr={}\nfor i in range(0,2):\n r[i+1]=r_array[i][0] \n \ncR_array = np.array([20]).reshape(1,1).tolist()\ncR={}\nfor i in range(0,1):\n cR[i+1]=cR_array[i][0]\n \ncZ_array = np.array([-38]).reshape(1,1).tolist()\ncZ={}\nfor i in range(0,1):\n cZ[i+1]=cZ_array[i][0]\ndR_array = np.array([1]).reshape(1,1).tolist()\ndR={}\nfor i in range(0,1):\n dR[i+1]=dR_array[i][0]\n \ndZ_array = np.array([42]).reshape(1,1).tolist()\ndZ={}\nfor i in range(0,1):\n dZ[i+1]=dZ_array[i][0]\n \n \n#Turn it into a Pyomo model:\n\n\nParent=ConcreteModel()\n\nParent.mUset=RangeSet(1,mU)\nParent.nLset=RangeSet(1,nL)\nParent.nRset=RangeSet(1,nR)\nParent.nZset=RangeSet(1,nZ)\nParent.mRset=RangeSet(1,mR)\nParent.mZset=RangeSet(1,mZ)\n\nParent.r=Param(Parent.mUset,initialize=r,default=0,mutable=True)\nParent.cR=Param(Parent.mRset,initialize=cR,default=0,mutable=True)\nParent.cZ=Param(Parent.mZset,initialize=cZ,default=0,mutable=True)\nParent.dR=Param(Parent.nRset,initialize=dR,default=0,mutable=True)\nParent.dZ=Param(Parent.nZset,initialize=dZ,default=0,mutable=True)\n\n\nParent.AR=Param(Parent.mUset,Parent.mRset,initialize=AR, default=0,mutable=True)\nParent.AZ=Param(Parent.mUset,Parent.mZset,initialize=AZ, default=0,mutable=True)\nParent.BR=Param(Parent.mUset,Parent.nRset,initialize=BR, default=0,mutable=True)\nParent.BZ=Param(Parent.mUset,Parent.nZset,initialize=BZ, default=0,mutable=True)\n\nParent.zero=Param(initialize=0, mutable=True) \n\n\nParent.Master=Block()\n\nParent.Master.Y=Param(Any,within=NonNegativeIntegers,mutable=True) #Growing Parameter\nParent.Master.xu=Var(Parent.mRset,within=NonNegativeReals)\nParent.Master.yu=Var(Parent.mZset,within=NonNegativeIntegers)\nParent.Master.xl0=Var(Parent.nRset,within=NonNegativeReals)\nParent.Master.yl0=Var(Parent.nZset,within=NonNegativeIntegers)\n\n\ndef Master_obj(Master):\n value=(sum(Parent.cR[j]*Parent.Master.xu[j] for j in Parent.mRset)+\n sum(Parent.cZ[j]*Parent.Master.yu[j] for j in Parent.mZset)+\n sum(Parent.dR[j]*Parent.Master.xl0[j] for j in Parent.nRset)+\n sum(Parent.dZ[j]*Parent.Master.yl0[j] for j in Parent.nZset))\n return value\n\nParent.Master.Theta_star=Objective(rule=Master_obj,sense=minimize)\n \ndef Master_c1(Master,i):\n value=(sum(Parent.AR[(i,j)]*Parent.Master.xu[j] for j in Parent.mRset)+\n sum(Parent.AZ[(i,j)]*Parent.Master.yu[j] for j in Parent.mZset)+\n sum(Parent.BR[(i,j)]*Parent.Master.xl0[j] for j in Parent.nRset)+\n sum(Parent.BZ[(i,j)]*Parent.Master.yl0[j] for j in Parent.nZset))\n return value - Parent.r[i] <= Parent.zero\nParent.Master.c1=Constraint(Parent.mUset,rule=Master_c1) #(12) ","sub_path":"SingleLevelMatrixRepresentation.py","file_name":"SingleLevelMatrixRepresentation.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"146990237","text":"from distutils.core import setup\nfrom distutils.extension import Extension\nimport numpy as np\nimport sys\n\nUSE_CYTHON = True\nCOMPILE_CYTHON = False\n\nif USE_CYTHON:\n ext = '.pyx' if COMPILE_CYTHON else '.c'\n\n extensions = [Extension('basis_model', ['intprim/basis_model' + ext]),\n Extension('bayesian_interaction_primitives', ['intprim/bayesian_interaction_primitives' + ext])]\n\n if COMPILE_CYTHON:\n from Cython.Build import cythonize\n extensions = cythonize(extensions)\n\n setup(\n name = 'intprim',\n version = '1.0',\n description = 'Interaction Primitives library from the Interactive Robotics Lab at Arizona State University',\n url = 'https://github.com/ir-lab/interaction-primitives',\n author = 'Joseph Campbell',\n author_email = 'jacampb1@asu.edu',\n license = 'MIT',\n packages = ['intprim', 'intprim.examples'],\n ext_modules = extensions,\n include_dirs=[np.get_include()]\n )\nelse:\n setup(\n name='intprim',\n version='1.0',\n description='Interaction Primitives library from the Interactive Robotics Lab at Arizona State University',\n url='https://github.com/ir-lab/interaction-primitives',\n author='Joseph Campbell',\n author_email='jacampb1@asu.edu',\n license='MIT',\n packages=['intprim', 'intprim.examples']\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"536060904","text":"# -*- coding: utf-8 -*-\n'''\nCopyright (c) 2016, Virginia Tech\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification, are permitted provided that the\n following conditions are met:\n1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following\ndisclaimer.\n2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following\ndisclaimer in the documentation and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\nINCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\nWHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nThe views and conclusions contained in the software and documentation are those of the authors and should not be\ninterpreted as representing official policies, either expressed or implied, of the FreeBSD Project.\n\nThis material was prepared as an account of work sponsored by an agency of the United States Government. Neither the\nUnited States Government nor the United States Department of Energy, nor Virginia Tech, nor any of their employees,\nnor any jurisdiction or organization that has cooperated in the development of these materials, makes any warranty,\nexpress or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness or\nany information, apparatus, product, software, or process disclosed, or represents that its use would not infringe\nprivately owned rights.\n\nReference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or\notherwise does not necessarily constitute or imply its endorsement, recommendation, favoring by the United States\nGovernment or any agency thereof, or Virginia Tech - Advanced Research Institute. The views and opinions of authors\nexpressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.\n\nVIRGINIA TECH – ADVANCED RESEARCH INSTITUTE\nunder Contract DE-EE0006352\n\n#__author__ = \"Aditya Nugur\"\n#__credits__ = \"\"\n#__version__ = \"3.5\"\n#__maintainer__ = \"BEMOSS Team\"\n#__email__ = \"aribemoss@gmail.com\"\n#__website__ = \"www.bemoss.org\"\n#__created__ = \"2016-12-05 12:04:50\"\n#__lastUpdated__ = \"2016-12-05 11:23:33\"\n'''\n\n'''This API class is for an agent that want to discover/communicate/monitor/\neGauge Powermeter '''\n\nfrom DeviceAPI.BaseAPI import baseAPI\nfrom bemoss_lib.utils.BEMOSS_ONTOLOGY import BEMOSS_ONTOLOGY\nimport requests\nimport urllib\nimport settings\ndebug = True\nfrom HTMLParser import HTMLParser\nimport time\nfrom xml.dom import minidom\nattributes={}\nattributesfreq={}\nattributespow={}\nattributesrea={}\nattributesv={}\nattributesi={}\ndb_table_supported_devices = settings.DATABASES['default']['TABLE_supported_devices']\nattributest={}\nattributespa={}\nattributesh={}\nattributesee={}\nattributesi={}\nattributess={}\n\n\n\n\nclass API(baseAPI):\n def __init__(self, **kwargs):\n super(API, self).__init__(**kwargs)\n self.device_supports_auto = True\n #self.address=address\n self._debug = False\n\n def API_info(self):\n\n return [{'device_model': \"EG30x787x\", 'vendor_name': \"Egauge\", 'communication': 'WiFi',\n 'device_type_id': 5, 'api_name': 'API_eGaugePM', 'html_template': 'powermeter/powermeter.html',\n 'agent_type': 'BasicAgent', 'identifiable': False, 'authorizable' : False, 'is_cloud_device': False,\n 'schedule_weekday_period': 4, 'schedule_weekend_period': 4, 'allow_schedule_period_delete': True,\n 'chart_template': 'charts/charts_powermeter.html'},\n ]\n\n def dashboard_view(self):\n return {\"top\": None,\n \"center\": {\"type\": \"number\", \"value\": \"power\", \"unit\": 'W'},\n \"bottom\": None, \"image\":\"powermeter.png\"}\n\n def ontology(self):\n return {\"Total Usage\":BEMOSS_ONTOLOGY.ENERGY,\"PowerSum\":BEMOSS_ONTOLOGY.POWER,\"PowerA\":BEMOSS_ONTOLOGY.POWER_L1,\"PowerB\":BEMOSS_ONTOLOGY.POWER_L2,\"PowerC\":BEMOSS_ONTOLOGY.POWER_L3,\n \"VoltageAvg\":BEMOSS_ONTOLOGY.VOLTAGE,\"VoltA\":BEMOSS_ONTOLOGY.VOLTAGE_L1,\"VoltB\":BEMOSS_ONTOLOGY.VOLTAGE_L2,\"VoltC\":BEMOSS_ONTOLOGY.VOLTAGE_L3,\n \"Freq\":BEMOSS_ONTOLOGY.FREQUENCY,\"PowerFactorAvg\":BEMOSS_ONTOLOGY.POWERFACTOR,\"PowerFactorA\":BEMOSS_ONTOLOGY.POWERFACTOR_L1,\n \"PowerFactorB\":BEMOSS_ONTOLOGY.POWERFACTOR_L2,\"PowerFactorC\":BEMOSS_ONTOLOGY.POWERFACTOR_L3,\"PowerSumReactive\":BEMOSS_ONTOLOGY.REACTIVE_POWER,\n \"CurrentA\":BEMOSS_ONTOLOGY.CURRENT_L1,\"CurrentB\":BEMOSS_ONTOLOGY.CURRENT_L2,\"CurrentC\":BEMOSS_ONTOLOGY.CURRENT_L3 }\n\n def discover(self,address,model,token=None):\n\n responses = list()\n try:\n\n address='http://'+address+'/cgi-bin/egauge?inst'\n r = requests.get(\n url=address,\n params=\"tot\"\n )\n device_data = self.getDeviceInfoXML(r.content)\n number_of_power_meters=len ([1 for s in device_data.keys() if s.startswith('PowerSum')]) #counting number of power meters; single power for each meter\n\n for i in range(1,number_of_power_meters+1):\n mac=address[13:]\n mac=mac.split(\".\")[0]+'_'+str(i)\n\n responses.append({'address': address+'_'+str(i), 'mac': mac,\n 'model': model, 'vendor': 'Egauge'})\n\n return responses\n except Exception as e:\n #print e\n return responses\n\n def getDataFromDevice(self):\n full_address = self.config[\"address\"]\n #remove the device Id appended at the end of the address by the discovery function\n split_address = full_address.split('_')\n meter_id = split_address[-1]\n correct_address = '_'.join(split_address[:-1])\n\n r = requests.get(\n url=correct_address,\n params=\"tot\"\n )\n\n if r.status_code == 200: # 200 means successfully\n devicedata=self.getDeviceInfoXML(r.content,meter_id=meter_id)\n\n #need to calculate: VoltageAvg, PowerFactorA, PowerFactorB, PowerFactorC, PowerFactorAvg, PowerSumReactive\n devicedata['VoltageAvg'] = (devicedata['VoltA'] + devicedata['VoltB'] + devicedata['VoltC'])/3.0\n\n PowerSumReactive = 0\n for mId in ['A','B','C']:\n ApparentPower = devicedata['Volt'+mId] * devicedata['Current'+mId]\n RealPower = devicedata['Power'+mId]\n PowerSumReactive += (ApparentPower**2-RealPower**2)**0.5 if ApparentPower> RealPower else 0\n devicedata['PowerFactor'+mId] = abs(RealPower/ApparentPower)\n\n devicedata['PowerFactorAvg'] = (devicedata['PowerFactorA'] + devicedata['PowerFactorB'] + devicedata['PowerFactorC'])/3.0\n devicedata['PowerSumReactive'] = PowerSumReactive\n\n return devicedata\n else:\n raise Exception('Bad status code response from server')\n\n\n\n def getDeviceInfoXML(self, data,meter_id=None):\n # Use the dom module to load xml data into a dictionary\n try:\n #print data\n xmldata = minidom.parseString(data)\n registers = xmldata.getElementsByTagName('r')\n device_data = dict()\n for reg in registers:\n val = reg.getElementsByTagName('i')[0]\n name = reg.attributes['n'].value\n\n if meter_id: #if filtering by meter_id is desired\n split_name = name.split('_')\n if len(split_name)>1 and split_name[-1] != meter_id: #if the register is for specific meter_id (it has a\n # underscore ID in its name, and if the meter_id doesn't match passed meter id, ignore this register\n continue\n name = split_name[0]\n\n device_data[name] = float(val.firstChild.nodeValue)\n\n return device_data\n except Exception as e:\n raise\n\n\ndef main():\n Power_meter = API(model='Egauge',type='PowerMeter',api='API_eGaugePM',address='http://egauge32207.local/cgi-bin/egauge?inst_1')\n print(Power_meter.getDeviceStatus())\n Power_meter.discover(address='egauge32207.local',model='eGauge')\n print(\"dsfih\")\n #Power_meter.discover('egauge32207.egaug.es',\"egauge\")\n\nif __name__ == \"__main__\": main()\n","sub_path":"DeviceAPI/eGauge.py","file_name":"eGauge.py","file_ext":"py","file_size_in_byte":9034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"28040718","text":"\"\"\"\n Module contains factory class for WebDrivers.\n\"\"\"\n\nfrom selenium_generator.base.exceptions import MissingDriverConfiguration, UnsupportedDriver\nfrom selenium_generator.base.singleton import singleton\nfrom selenium_generator.factories.drivers.local_driver import LocalDriver\nfrom selenium_generator.factories.drivers.remote_driver import RemoteDriver\nfrom selenium_generator.parsers.config_parser import ConfigParser\n\nDEFAULT_LOCAL_DRIVERS = [\"chrome\", \"firefox\"]\n\"\"\"List of names of allowed local drivers. In other words driver which are by default supported \nwith :class:`selenium_generator.factories.drivers.local_driver.LocalDriver`\n\"\"\"\n\nDEFAULT_DRIVER_CLASSES = [LocalDriver, RemoteDriver]\n\"\"\"List of class which :class:`DriverFactory` uses for creating of required driver.\n\nList of default classes:\n :class:`selenium_generator.factories.drivers.local_driver.LocalDriver`\n :class:`selenium_generator.factories.drivers.remote_driver.RemoteDriver`\n\"\"\"\n\n\n@singleton\nclass DriverFactory:\n \"\"\"Class creates instance of Selenium WebDriver for specified driver.\n\n Args:\n driver_classes (list(BaseDriver)): List of classes for running individual types of drivers.\n allowed_local_drivers (str): List of names of allowed local drivers.\n\n Attributes:\n driver_classes (list(BaseDriver)): List of classes for running individual types of drivers.\n allowed_local_drivers (str): List of names of allowed local drivers.\n drivers_config (dict): Loaded drivers object from configuration.\n \"\"\"\n def __init__(self,\n driver_classes=DEFAULT_DRIVER_CLASSES,\n allowed_local_drivers=DEFAULT_LOCAL_DRIVERS):\n\n self.driver_classes = driver_classes\n self.drivers_config = ConfigParser().get_drivers_config()\n self.allowed_local_drivers = allowed_local_drivers\n\n def run(self, driver_name):\n \"\"\"Method create instance of required webdriver based on driver_name and its configuration.\n\n Args:\n driver_name (str): Name of a driver.\n\n Returns:\n Webdriver: Instance of a required webdriver.\n \"\"\"\n self._verify_drivers(driver_name)\n for driver in self.driver_classes:\n if driver.check_type(self.drivers_config[driver_name]['remote']):\n return driver(driver_name, self.drivers_config[driver_name]).run()\n\n def _verify_drivers(self, driver_name):\n \"\"\"Method verify if local driver is allowed in allowed_local_drivers and if any type of driver is configured in\n configuration.\n\n Args:\n driver_name (str): Name of a driver.\n\n Raises:\n UnsupportedDriver: If driver_name is not in allowed_local_drivers.\n MissingDriverConfiguration: If configuration of a driver is missing in configuration.\n \"\"\"\n if driver_name not in self.allowed_local_drivers:\n raise UnsupportedDriver(self.allowed_local_drivers)\n\n if driver_name not in self.drivers_config:\n raise MissingDriverConfiguration(driver_name)\n","sub_path":"selenium_generator/factories/drivers/driver_factory.py","file_name":"driver_factory.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"4502368","text":"import os\ndef soldier(path,file,formate):\n os.chdir(path)\n i=1\n files=os.listdir(path)\n with open(file) as f:\n filelist=f.read().split(\"\\n\")\n\n for file in files:\n if file not in filelist:\n os.rename(file,file.capitalize())\n\n if os.path.splittext(file)[1]==format:\n os.rename(file,f\"{i}.{format}\")\n i+=1\n\nsoldier(r\"C:\\Users\\dell\\Desktop\\HTML\\images\",r\"C:\\Users\\dell\\Desktop\\HTML\\images\",\".png\")\n\n\n","sub_path":"oh soldier prettify.py","file_name":"oh soldier prettify.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"187990687","text":"from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.db.models import Q\nfrom django.shortcuts import render\nfrom django.views.generic import View\n\nfrom article.models import Article\n\n\nclass SearchView(View):\n\n def get(self, request, *args, **kwargs):\n query = request.GET.get('text')\n contacts = None\n if query:\n title_ = Q(title__icontains=query)\n content_ = Q(content__icontains=query)\n qs = Article.objects.select_related('category__parent', 'dj_ver', 'py_ver').filter(title_ | content_)\n paginator = Paginator(qs, 6)\n\n page = request.GET.get('page')\n try:\n contacts = paginator.page(page)\n except PageNotAnInteger:\n contacts = paginator.page(1)\n except EmptyPage:\n contacts = paginator.page(paginator.num_pages)\n context = {'object_list': contacts}\n return render(request, 'search/search_result.html', context)\n","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"654310747","text":"import datetime\nfrom ..utils import common, time_utils, translation\nfrom enum import Enum\nfrom dataclasses import dataclass\n\n_ = translation.translator_fx()\n\n\"\"\"Minimum allowed temperature.\"\"\"\nMIN_TEMPERATURE = 5.0\n\n\"\"\"Maximum allowed temperature.\"\"\"\nMAX_TEMPERATURE = 30.0\n\n\"\"\"Minimum allowed hysteresis.\"\"\"\nMIN_HYSTERESIS = 0.1\n\n\"\"\"Maximum allowed hysteresis.\"\"\"\nMAX_HYSTERESIS = 5.0\n\n\"\"\"Minimum allowed heating duration.\"\"\"\nMIN_HEATING_DURATION = datetime.timedelta(minutes=15)\n\n\"\"\"Maximum allowed heating duration.\"\"\"\nMAX_HEATING_DURATION = datetime.timedelta(hours=12)\n\n\nclass RequestType(Enum):\n \"\"\"Heating can be turned on via manual request or via a scheduled job.\"\"\"\n MANUAL = 1\n SCHEDULED = 2\n\n\n@dataclass\nclass HeatingTask(object):\n \"\"\"Encapsulates all information required to perform a heating task.\"\"\"\n request_type: RequestType\n user: str\n duration: datetime.timedelta = None\n temperature_target: float = None\n temperature_hysteresis: float = 0.5\n started_at: datetime.datetime = None\n ends_at: datetime.datetime = None\n\n @property\n def residual_heating_time(self):\n \"\"\"Returns None or datetime.timedelta\"\"\"\n if self.ends_at is None:\n return None\n now = time_utils.dt_now_utc()\n if self.ends_at > now:\n return self.ends_at - now\n return datetime.timedelta(days=0, hours=0, minutes=0, seconds=0)\n\n # @property\n # def is_timed(self):\n # \"\"\"Returns True if this task has a duration (and thus, an ending time).\"\"\"\n # return self.duration is not None\n\n @property\n def timed_out(self):\n \"\"\"\n Returns True if this task timed out (end time was reached). In all\n other cases, it returns False.\"\"\"\n if self.ends_at is None:\n return False\n return time_utils.dt_now_utc() >= self.ends_at\n\n def start(self):\n \"\"\"\n Marks this task as started and computes the ending time if we have\n a configured duration.\n \"\"\"\n self.started_at = time_utils.dt_now_utc()\n if self.duration is not None:\n self.ends_at = self.started_at + self.duration\n\n @property\n def needs_feedback_controller(self):\n return self.temperature_target is not None\n\n def validate(self):\n \"\"\"Performs a sanity check of this HeatingTask.\"\"\"\n # Check if target temperature is within allowed range\n if self.temperature_target is not None and \\\n (self.temperature_target < MIN_TEMPERATURE or self.temperature_target > MAX_TEMPERATURE):\n return common.make_response(False,\n message=_('Temperature must be between %(t_min).1f and %(t_max).1f.',\n t_min=MIN_TEMPERATURE,\n t_max=MAX_TEMPERATURE))\n # Check if hysteresis is within allowed range\n if self.temperature_hysteresis is not None and \\\n (self.temperature_hysteresis < MIN_HYSTERESIS or self.temperature_hysteresis > MAX_HYSTERESIS):\n return common.make_response(False,\n message=_('Hysteresis must be between %(h_min).1f and %(h_max).1f.',\n h_min=MIN_HYSTERESIS,\n h_max=MAX_HYSTERESIS))\n # Check if duration is within allowed range\n if self.duration is not None and \\\n (self.duration < MIN_HEATING_DURATION or self.duration > MAX_HEATING_DURATION):\n return common.make_response(False,\n message=_('Duration must be between %(d_min)s and %(d_max)s.',\n d_min=time_utils.format_timedelta(MIN_HEATING_DURATION),\n d_max=time_utils.format_timedelta(MAX_HEATING_DURATION)))\n # Check if a valid user name has been set\n if self.user is None or len(self.user.strip()) == 0:\n return common.make_response(False,\n message=_('User name cannot be empty.'))\n # A scheduled job must have a duration\n if self.request_type == RequestType.SCHEDULED and self.duration is None:\n return common.make_response(False,\n message=_('A scheduled heating job must have a duration.'))\n\n return common.make_response(True, message=_('Heating job is valid.'))\n","sub_path":"src/helheimr_heating/helheimr_heating/heating/heating_task.py","file_name":"heating_task.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"563139582","text":"import os\nfrom django.shortcuts import render, reverse\nfrom django.http import FileResponse, Http404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.conf import settings\nfrom xfzauth.decorators import xfz_login_required\nfrom utils import restful\nfrom .models import Payinfo, PayinfoOrder\n\n\ndef index(request):\n payinfos = Payinfo.objects.filter(is_deleted=False)\n context = {'payinfos': payinfos}\n return render(request, 'payinfo/payinfo.html', context)\n\n\n@xfz_login_required\ndef payinfo_order(request):\n payinfo_id = request.GET.get('payinfo_id')\n payinfo = Payinfo.objects.get(pk=payinfo_id)\n order = PayinfoOrder.objects.create(payinfo=payinfo, buyer=request.user,\n amount=payinfo.price)\n notify_url = request.build_absolute_uri(reverse('payinfo:notify_view'))\n return_url = request.build_absolute_uri(reverse('payinfo:index'))\n context = {\n 'goods': payinfo,\n 'order': order,\n 'notify_url': notify_url,\n 'return_url': return_url,\n }\n return render(request, 'course/course_order.html', context)\n\n\n@csrf_exempt\ndef notify_view(request):\n orderid = request.POST.get('orderid')\n PayinfoOrder.objects.filter(pk=orderid, is_deleted=False).update(status=2)\n return restful.ok()\n\n\n@xfz_login_required\ndef download(request):\n payinfo_id = request.GET.get('payinfo_id')\n order = PayinfoOrder.objects.filter(\n payinfo_id=payinfo_id, buyer=request.user, status=2,\n is_deleted=False).first()\n if order:\n path = order.payinfo.path\n fp = open(os.path.join(settings.MEDIA_ROOT, path), 'rb')\n response = FileResponse(fp)\n response['Content-Type'] = 'application/pdf'\n response['Content-Disposition'] =\\\n f'attachment;filename={path.split(\"/\")[-1]}'\n return response\n else:\n return Http404\n","sub_path":"apps/payinfo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"47390350","text":"\"\"\"Tools and arithmetics for monomials of distributed polynomials.\"\"\"\n\nimport collections\n\nfrom ..core import Mul, S, sympify\nfrom ..printing.defaults import DefaultPrinting\nfrom .polyutils import dict_from_expr\n\n\n__all__ = 'Monomial', 'itermonomials'\n\n\ndef itermonomials(variables, degree):\n r\"\"\"\n Generate a set of monomials of the given total degree or less.\n\n Examples\n ========\n\n >>> set(itermonomials([x, y], 2))\n {1, x, x**2, y, y**2, x*y}\n\n \"\"\"\n if not variables:\n yield S.One\n else:\n x, tail = variables[0], variables[1:]\n\n for i in range(degree + 1):\n for m in itermonomials(tail, degree - i):\n yield m * x**i\n\n\nclass Monomial(tuple, DefaultPrinting):\n \"\"\"Class representing a monomial, i.e. a product of powers.\"\"\"\n\n def __new__(cls, monom, gens=None):\n \"\"\"Create and return the Monomial object.\"\"\"\n if not isinstance(monom, collections.abc.Iterable):\n rep, gens = dict_from_expr(sympify(monom), gens=gens)\n if len(rep) == 1 and list(rep.values())[0] == 1:\n monom = list(rep)[0]\n else:\n raise ValueError(\"Expected a monomial got %s\" % monom)\n\n obj = super().__new__(cls, map(int, monom))\n obj.gens = gens\n\n return obj\n\n @classmethod\n def _get_val(cls, other):\n if not isinstance(other, cls):\n try:\n other = cls(other)\n except (ValueError, TypeError):\n return\n return other\n\n def as_expr(self, *gens):\n \"\"\"Convert a monomial instance to a Diofant expression.\"\"\"\n gens = gens or self.gens\n\n if not gens:\n raise ValueError(\"can't convert %s to an expression without generators\" % str(self))\n\n return Mul(*[gen**exp for gen, exp in zip(gens, self)])\n\n def __mul__(self, other):\n \"\"\"Return self*other.\"\"\"\n other = self._get_val(other)\n if other is not None:\n return self.__class__((a + b for a, b in zip(self, other)), self.gens)\n else:\n return NotImplemented\n\n def __truediv__(self, other):\n \"\"\"Return self/other.\"\"\"\n other = self._get_val(other)\n if other is not None:\n return self.__class__((a - b for a, b in zip(self, other)), self.gens)\n else:\n return NotImplemented\n\n def divides(self, other):\n \"\"\"Check if self divides other.\"\"\"\n other, orig = self._get_val(other), other\n if other is not None:\n return all(a <= b for a, b in zip(self, other))\n else:\n raise TypeError(\"an instance of Monomial expected, got %s\" % orig)\n\n def __pow__(self, other):\n \"\"\"Return pow(self, other).\"\"\"\n n = int(other)\n\n if n >= 0:\n return self.__class__((a * n for a in self), self.gens)\n else:\n raise ValueError(\"a non-negative integer expected, got %s\" % other)\n\n def gcd(self, other):\n \"\"\"Greatest common divisor of monomials.\"\"\"\n other, orig = self._get_val(other), other\n if other is not None:\n return self.__class__((min(a, b) for a, b in zip(self, other)), self.gens)\n else:\n raise TypeError(\"an instance of Monomial expected, got %s\" % orig)\n\n def lcm(self, other):\n \"\"\"Least common multiple of monomials.\"\"\"\n other, orig = self._get_val(other), other\n if other is not None:\n return self.__class__((max(a, b) for a, b in zip(self, other)), self.gens)\n else:\n raise TypeError(\"an instance of Monomial expected, got %s\" % orig)\n","sub_path":"diofant/polys/monomials.py","file_name":"monomials.py","file_ext":"py","file_size_in_byte":3635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"287187865","text":"from utilities.serializable import Serializable\n\n\nclass Photo(Serializable):\n\n valid_extensions = [\"jpg\", \"JPG\", \"png\", \"gif\"]\n\n def __init__(self, file, path, baseurl):\n self.file = file\n self.url = \"{0}/{1}/{2}\".format(baseurl, path, file)\n self.extension = file.split('.')[-1]\n\n def is_valid_photo(self):\n if self.extension in Photo.valid_extensions:\n return True","sub_path":"models/photo.py","file_name":"photo.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"195726970","text":"# def get_formated_name(first_name, last_name):\n# \"\"\"Return a full anme, neatly formated.\"\"\"\n# full_name = f\"{first_name} {last_name}\"\n# return full_name.title()\n\n# musician = get_formated_name('jimi', 'hendrix')\n# print(musician)\n\n\n### middle name as optional argument so parameter goes to last place \ndef get_formated_name(first_name, last_name, middle_name=''):\n \"\"\"Return a full anme, neatly formated.\"\"\"\n # python interprets non-empty string as true, so if we enter middle name it will be true, if not else is executed\n if middle_name:\n full_name = f\"{first_name} {middle_name} {last_name}\"\n else:\n full_name = f\"{first_name} {last_name}\"\n return full_name.title()\n\nmusician = get_formated_name('jimi', 'hendrix')\nprint(musician)\nmusician = get_formated_name('jimi', 'hendrix', 'lee')\nprint(musician)\n\n","sub_path":"chapter_8_functions/formated_name_return_value.py","file_name":"formated_name_return_value.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"562698703","text":"from selenium.webdriver.common.by import By\n\nfrom woniuboss.lib.woniuboss_gui.login.decrypt import Decrypt\nfrom woniuboss.lib.woniuboss_gui.student_manage.course_schedule import CourseSchedule\nimport time\nfrom woniuboss.tools.woniuboss_gui.service import Service\nimport unittest\nfrom woniuboss.tools.woniuboss_gui.utility import Utility\nfrom parameterized import parameterized\ntest_config_info = Utility.get_json('..\\\\..\\\\conf\\\\woniuboss_gui\\\\testdata_stu_manage.conf')\nquery_teacher_info = Utility.get_excel_to_tuple(test_config_info[19])\nadd_curriculum_info = Utility.get_excel_to_tuple(test_config_info[20])\nuser_info=Utility.get_excel_to_user(test_config_info[19])\nmodify_curriculum_info = Utility.get_excel_to_tuple(test_config_info[21])\n\nclass CourseScheduleTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.driver = Service.get_driver(user_info)\n cls.course_schedule = CourseSchedule(cls.driver)\n Service.miss_login(cls.driver, user_info)\n Decrypt(cls.driver).do_decrypt(user_info['secondpwd'])\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n @parameterized.expand(query_teacher_info)\n def test_a_query_info(self,teacher,expect):\n ele_text = self.course_schedule.do_query_teacher_info(teacher)\n sum_record = ele_text.split('总共 ')[1].split(' 条')[0]\n sql = 'select count(*) from curriculum_schedule where work_id = WNCD005'\n db_result = Utility.query_one(user_info,sql)\n if db_result[0] == int(sum_record):\n actual = 'query teacher schedule pass'\n else:\n actual = 'query teacher schedule fail'\n self.assertEqual(expect,actual)\n\n @parameterized.expand(add_curriculum_info)\n def test_b_add_course_schedule(cls,s_time,e_time,status1,classroom1,class1,orientation1,course1,\n status2,classroom2,class2,orientation2,course2,\n status3,classroom3,class3,orientation3,course3,\n status4,classroom4,class4,orientation4,course4,\n status5,classroom5,class5,orientation5,course5,\n status6,classroom6,class6,orientation6,course6,\n status7,classroom7,class7,orientation7,course7,\n status8,classroom8,class8,orientation8,course8,\n status9,classroom9,class9,orientation9,course9,expect):\n\n sql ='select count(*) from curriculum_schedule'\n\n db_result1 = Utility.query_one(user_info,sql)\n attr = cls.course_schedule.do_add_curriculum(s_time,e_time,status1,classroom1,class1,orientation1,course1,\n status2,classroom2,class2,orientation2,course2,\n status3,classroom3,class3,orientation3,course3,\n status4,classroom4,class4,orientation4,course4,\n status5,classroom5,class5,orientation5,course5,\n status6,classroom6,class6,orientation6,course6,\n status7,classroom7,class7,orientation7,course7,\n status8,classroom8,class8,orientation8,course8,\n status9,classroom9,class9,orientation9,course9)\n print(attr)\n db_result2 = Utility.query_one(user_info,sql)\n num = db_result2[0] - db_result1[0]\n time.sleep(2)\n if Service.is_element_present(cls.driver,By.CSS_SELECTOR,'body > div.bootbox.modal.fade.mydialog.in > div > '\n 'div > div.modal-footer > button'):\n actual = 'test add course schedule fail'\n cls.driver.refresh()\n else:\n if 'none' in attr and num ==9:\n actual = 'test add course schedule pass'\n\n else:\n actual = 'test add course schedule fail'\n cls.assertEqual(expect,actual)\n\n @parameterized.expand(modify_curriculum_info)\n def test_c_modify_course_schedule(cls,s_time,e_time,status,orientation,course,class_num,classroom,expect):\n attr = cls.course_schedule.do_modify_curriculum(s_time,e_time,status,orientation,course,class_num,classroom)\n time.sleep(2)\n if 'none' in attr:\n actual = 'test modify course schedule pass'\n else:\n actual = 'test modify course schedule fail'\n cls.assertEqual(expect,actual)","sub_path":"jichengdaima/woniuboss/bin/woniuboss_gui/student_manage_test/i_curriculum_manage_test.py","file_name":"i_curriculum_manage_test.py","file_ext":"py","file_size_in_byte":4425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471385756","text":"import numpy as np\nfrom timemachine.integrator import langevin_coefficients\n\nclass System():\n\n def __init__(self, x0, v0, gradients, integrator):\n # fully contained class that allows simulations to be run forward\n # and backward\n self.x0 = x0\n self.v0 = v0\n self.gradients = gradients\n self.integrator = integrator\n\n\nclass Integrator():\n\n def __init__(self, steps, dt, temperature, friction, masses, lamb, seed):\n\n minimization_steps = 2000\n\n ca, cbs, ccs = langevin_coefficients(\n temperature,\n dt,\n friction,\n masses\n )\n\n complete_cas = np.ones(steps)*ca\n complete_dts = np.concatenate([\n np.linspace(0, dt, minimization_steps),\n np.ones(steps-minimization_steps)*dt\n ])\n\n self.dts = complete_dts\n self.cas = complete_cas\n self.cbs = -cbs\n self.ccs = ccs\n self.lambs = np.zeros(steps) + lamb\n self.seed = seed","sub_path":"fe/system.py","file_name":"system.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"176585992","text":"import gc\n\n\nclass X:\n def __del__(self):\n print(self, \"is dead.\")\n\n\ngc.disable() # 关闭 gc\n\na = X()\nb = X()\n\n# 构建循环引用\na.x = b\nb.x = a\n\ndel a\ndel b\n\n# 此时并未执行 __del__ ,说明清理机制失效\n\ngc.enable()\nprint(\"gc is enabled.\")\n\ngc.collect() # 主动回收\n","sub_path":"Python3学习笔记/GarbageCollect.py","file_name":"GarbageCollect.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"371014345","text":"\"\"\"\nCopyright ©2021. The Regents of the University of California (Regents). All Rights Reserved. Permission to use, copy, modify, and distribute this software and its documentation for educational, research, and not-for-profit purposes, without fee and without a signed licensing agreement, is hereby granted, provided that the above copyright notice, this paragraph and the following two paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.\n\nIN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nREGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED HEREUNDER IS PROVIDED \"AS IS\". REGENTS HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.\n\nScript for generating FTP data quality plots. This script is not related to the HOV-analysis project. Written by the Connected Corridor staff\n\"\"\"\nimport sys\nimport boto3\nfrom io import BytesIO\nimport gzip\nimport matplotlib as mpl\nfrom matplotlib import pyplot\nimport numpy as np\nimport pandas as pd\nimport os\n\nend_date = input(\"Enter the end date (example: 2020-10-31): \")\nnum_days = 7\n\nbdates = pd.date_range(end=end_date, periods=num_days).strftime('%Y%m%d')\nbdates = [i for i in bdates]\naccess_key_id = os.environ.get('AWS_ACCESS_KEY_ID', None)\nsecret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY', None)\n\n# create output directory\noutdir = \"results_data_quality/\"\nif not os.path.exists(outdir):\n os.makedirs(outdir)\n\ndef unzipper(date, time):\n # print(date+'/30sec_'+date+time+'.txt.gz')\n obj = s3res.Object('pems-raw-30sec-data',\n date + '/30sec_' + date + time + '.txt.gz')\n n = obj.get()['Body'].read()\n gzipfile = BytesIO(n)\n gzipfile = gzip.GzipFile(fileobj=gzipfile)\n content = gzipfile.read()\n return content\n\n\ndef makedict(data):\n sp = data.decode().split('\\n')\n name = sp[0]\n empty = {}\n for i in range(1, len(sp)):\n newsplit = sp[i].split(',')\n if newsplit[0] in combinedset:\n ind = newsplit[0]\n val = newsplit[1:]\n empty[ind] = val\n return [name, empty]\n\n\ndef makelist(data):\n sp = data.decode().split('\\n')\n return np.array([i[0:6] for i in sp[1:]])\n\n\ndef sectionalizer(data, mint):\n points = mint * 2\n sectional = []\n for sensor in data:\n vdsarray = np.array([])\n for i in range(len(times) // (points)):\n try:\n if 1 in sensor[i * points:(i + 1) * points]:\n vdsarray = np.append(vdsarray, 1)\n else:\n vdsarray = np.append(vdsarray, 0)\n except:\n if 1 in sensor[i * points:]:\n vdsarray = np.append(vdsarray, 1)\n else:\n vdsarray = np.append(vdsarray, 0)\n if np.array_equal(sectional, []):\n sectional = np.array([vdsarray])\n else:\n sectional = np.append(sectional, [vdsarray], axis=0)\n\n sectionaltimes = len(sectional[0])\n sectionalsum = 0\n for sublist in sectional:\n sectionalsum += sum(sublist)\n sectionalmissingpercent = (1 - sectionalsum / (\n (len(combinedset) * sectionaltimes))) * 100\n\n def sectionalchart(data, periods, filename):\n cmap = mpl.colors.ListedColormap(['red', 'green'])\n bounds = [-1, 0.5, 2]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n pyplot.figure(figsize=(15, 5))\n pyplot.imshow(data, interpolation='nearest', cmap=cmap, norm=norm,\n aspect='auto')\n pyplot.xticks(range(0, 2880 // periods, 120 // periods),\n [str(x) + ':00' for x in range(0, 24)])\n pyplot.yticks(\n [len(twooneoh) - 1, +len(twooneoh) + len(onethreefour) - 1,\n len(twooneoh) + len(onethreefour) + len(sixohfive) - 1],\n ['210', '134', '605'])\n pyplot.suptitle(str(mint) + '-minute VDS availability for ' + bdate[\n 0:4] + '-' + bdate[\n 4:6] + '-' + bdate[\n 6:8])\n pyplot.title('Missing Percent: ' + str(sectionalmissingpercent))\n pyplot.xlabel('Time (PST)')\n pyplot.ylabel('VDS grouped by highway')\n # img.set_size_inches(10,5)\n pyplot.savefig(filename)\n\n sectionalchart(sectional, points,\n \"results_data_quality/\" + bdate + '_' + str(mint) + 'min_missing_chart.png')\n\n\ndef createchart(data, filename):\n cmap = mpl.colors.ListedColormap(['red', 'green'])\n bounds = [-1, 0.5, 2]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n pyplot.figure(figsize=(15, 5))\n pyplot.imshow(data, interpolation='nearest', cmap=cmap, norm=norm,\n aspect='auto')\n pyplot.xticks(range(0, 2880, 120), [str(x) + ':00' for x in range(0, 24)])\n pyplot.yticks([len(twooneoh) - 1, +len(twooneoh) + len(onethreefour) - 1,\n len(twooneoh) + len(onethreefour) + len(sixohfive) - 1],\n ['210', '134', '605'])\n pyplot.suptitle(\n 'VDS availability for ' + bdate[0:4] + '-' + bdate[4:6] + '-' + bdate[\n 6:8])\n pyplot.title('Missing Percent: ' + str(missingpercent))\n pyplot.xlabel('Time (PST)')\n pyplot.ylabel('VDS grouped by highway')\n # img.set_size_inches(10,5)\n pyplot.savefig(filename)\n\n\nget_last_modified = lambda obj: int(obj['LastModified'].timestamp())\ns3 = boto3.client('s3')\ns3res = boto3.resource('s3',\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key)\nobjs = s3.list_objects_v2(Bucket='vds-sensor-codes-bucket')['Contents']\nlast_added = [obj['Key'] for obj in sorted(objs, key=get_last_modified)][-1]\ns3_clientobj = s3.get_object(Bucket='vds-sensor-codes-bucket', Key=last_added)\ndictionaryfromVDS = eval(s3_clientobj['Body'].read().decode('utf-8'))\n\ntwooneoh = np.array(list(map(str, dictionaryfromVDS['210'])))\nonethreefour = np.array(list(map(str, dictionaryfromVDS['134'])))\nsixohfive = np.array(list(map(str, dictionaryfromVDS['605'])))\ncombined = np.concatenate((twooneoh, onethreefour, sixohfive))\n\ntwooneohset = set(twooneoh)\nonethreefourset = set(onethreefour)\nsixohfiveset = set(sixohfive)\ncombinedset = set(combined)\n\n# tenset=set(list(map(str,dictionaryfromVDS['10'])))\n\n# bdate = str(sys.argv[1])\n\n\nfor bdate in bdates:\n try:\n minutes = int(sys.argv[2])\n except:\n minutes = 5\n\n times = []\n for i in np.array(\n ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09'] + list(\n map(str, range(10, 24)))):\n for j in np.array(\n ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09'] + list(\n map(str, range(10, 60)))):\n for k in np.array(['00', '30']):\n times = np.append(times, i + j + k)\n\n missing = 0\n missingfiles = 0\n\n twooneohmissing = 0\n onethreefourmissing = 0\n sixohfivemissing = 0\n # tenmissing=0\n\n missinglist = []\n\n visual = np.array([combined])\n\n for time in times:\n try:\n data = unzipper(bdate, time)\n except:\n print('missingfile')\n missingfiles += 1\n missing = missing + len(combinedset)\n twooneohmissing = twooneohmissing + len(twooneohset)\n onethreefourmissing = onethreefourmissing + len(onethreefourset)\n sixohfivemissing = sixohfivemissing + len(sixohfiveset)\n\n visual = np.append(visual, [[False] * len(combined)], axis=0)\n continue\n\n listdata = makelist(data)\n smallset = set(listdata)\n visual = np.append(visual, [[x in listdata for x in combined]], axis=0)\n\n diff = combinedset.difference(smallset)\n twooneohdiff = twooneohset.difference(smallset)\n onethreefourdiff = onethreefourset.difference(smallset)\n sixohfivediff = sixohfiveset.difference(smallset)\n # tendiff=tenset.difference(smallset)\n\n missinglist = np.append(missinglist, list(diff))\n\n missing = missing + len(diff)\n twooneohmissing = twooneohmissing + len(twooneohdiff)\n onethreefourmissing = onethreefourmissing + len(onethreefourdiff)\n sixohfivemissing = sixohfivemissing + len(sixohfivediff)\n # tenmissing=tenmissing+len(tendiff)\n\n missingcounts = {}\n for i in missinglist:\n if not i in missingcounts:\n missingcounts[i] = 1\n else:\n missingcounts[i] += 1\n\n missingpercent = missing / (len(combinedset) * len(times)) * 100\n twooneohpercent = twooneohmissing / (len(twooneohset) * len(times)) * 100\n onethreefourpercent = onethreefourmissing / (\n len(onethreefourset) * len(times)) * 100\n sixohfivepercent = sixohfivemissing / (len(sixohfiveset) * len(times)) * 100\n # tenpercent=tenmissing/(len(tenset)*len(times))*100\n\n\n print(\"210-MissingPercent: \" + str(twooneohpercent))\n print(\"134-MissingPercent: \" + str(onethreefourpercent))\n print(\"605-MissingPercent: \" + str(sixohfivepercent))\n # print(\"TEN-MissingPercent: \"+str(tenpercent))\n print(\"All-MissingPercent: \" + str(missingpercent))\n print(\"MissingFiles: \" + str(missingfiles))\n print(\"MissingCounts: \" + str(missingcounts))\n\n visual2 = 1 * (visual[1:] == 'True').T\n visualforcsv = np.insert(visual.T, 0,\n np.append(['VDS'], times[0:visual.T.shape[1] - 1]),\n axis=0)\n np.savetxt(bdate + '_missing.csv', visualforcsv, delimiter=',', header=bdate,\n comments='', fmt='%s')\n\n print(visual2)\n\n createchart(visual2, \"results_data_quality/\" + bdate + '_missing_chart.png')\n sectionalizer(visual2, 1)\n sectionalizer(visual2, 2)\n sectionalizer(visual2, 3)\n sectionalizer(visual2, 4)\n sectionalizer(visual2, 5)\n pyplot.close()\n\n\n\n","sub_path":"hov_degradation/preprocess/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":10671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"332141582","text":"import pymorphy2\nimport re\nimport operator\n#from sortedcontainers import SortedDict\n\n#x = {1: 2, 3: 4, 4: 3, 2: 1, 0: 0}\n#sorted_x = sorted(x.items(), key=operator.itemgetter(1))\n\nmorph = pymorphy2.MorphAnalyzer()\nv = []\n\nd = {}\nlast_d = {}\nmy_file = open(\"all_of_news.txt\", \"r\")\ndata = my_file.readlines() # read ALL the lines!\nprint(data[0])\nmy_file.close()\n\nfor j in range(len(data)):\n str1 = data[j].split()\n for i in str1: #data[0]:\n result = i\n if i[0].isupper():\n p = morph.parse(i)[0]\n l = p.tag.POS\n p = p.normal_form\n result = re.sub(r',' , '', p)\n result = re.sub(r'»', '', result)\n result = re.sub(r'«', '', result)\n #result = re.sub(r'.', '', result)\n\n if ((l == 'NOUN') or (l == None)):\n #if ((l == 'VERB') or (l == None) or (l == 'INFN') or (l == 'PRTS') or (l == 'PRTF') or (l == 'GRND')):\n #if (l == 'ADJF' or l == 'ADJS' or l == 'ADVB' or l == None):'''\n if d.get(result) == None:\n d[result] = 1\n else:\n d[result] += 1\n\n print(j, '\\n')\n\nfor w in sorted(d, key=d.get, reverse=True):\n #print(w, ' ', d[w])\n if (d[w] >= 10):\n last_d[w] = d[w]\n\nnew_file = open(\"frequences_all_names.txt\", \"a\")\nfor w in sorted(last_d, key=d.get, reverse=True):\n print(w, ' ', d[w])\n new_file.write(w)\n new_file.write(' ')\n new_file.write(str(d[w]))\n new_file.write('\\n')\n","sub_path":"frequencies_names.py","file_name":"frequencies_names.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"63506222","text":"def my_func(a :int, b :int) -> int:\n print(a)\n return a + b \n\nc = my_func(\"asdas\",\"asdas\")\nprint(c)\n\ndef myUtf(a = 1 ,b = 1):\n print(\"Default value func\",a,b)\n\nmyUtf()\nmyUtf(10,20)\nmyUtf(b = 10)\n\n\n","sub_path":"task10.py","file_name":"task10.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617042867","text":"from setuptools import setup, find_packages\nimport os\n\ndef read(*rnames):\n return open(os.path.join(os.path.dirname(__file__), *rnames)).read()\n\nversion = '0.1'\n\nlong_description = (\n read('README.txt')\n + '\\n' +\n read('CHANGES.txt')\n + '\\n' +\n 'Download\\n'\n '========\\n'\n )\n\ntests_require=['zope.testing']\n\n\nsetup(name='plonesocial.twitter.anywhere',\n version=version,\n description=\"Twitter @Anywhere integration to Plone\",\n long_description=long_description,\n # Get more strings from http://www.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Web Environment',\n 'Framework :: Plone',\n 'Framework :: Zope2',\n 'Intended Audience :: Developers',\n 'Intended Audience :: End Users/Desktop',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Site Management',\n ],\n keywords='plone plonesocial twitter anywhere socialnetworks socialweb web tweets follow cms',\n author='Christian Scholz',\n author_email='cs@comlounge.net',\n url='http://comlounge.net/anywhere',\n license='MIT',\n packages=find_packages(exclude=['ez_setup']),\n namespace_packages=['plonesocial', 'plonesocial.twitter'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'setuptools',\n 'simplejson',\n 'uuid',\n # -*- Extra requirements: -*-\n ],\n entry_points=\"\"\"\n [distutils.setup_keywords]\n paster_plugins = setuptools.dist:assert_string_list\n\n [egg_info.writers]\n paster_plugins.txt = setuptools.command.egg_info:write_arg\n\n [z3c.autoinclude.plugin]\n target = plone\n \"\"\",\n paster_plugins = [\"ZopeSkel\"],\n )\n","sub_path":"pypi_install_script/plonesocial.twitter.anywhere-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550484378","text":"import argparse\nfrom tasks import Tasks\nfrom meetings import Meetings\n\ndef tasks(parsed_args):\n Tasks().taskoverdue()\n Tasks().taskthreedays()\n Tasks().taskonday()\n\ndef meeting_threedays(parsed_args):\n Meetings().meetingsthreedays()\n\ndef meeting_today(parsed_args):\n Meetings().meetingstoday()\n\nparser=argparse.ArgumentParser()\n\nparser.add_argument('--tasks',\n dest='action',\n action='store_const',\n const=tasks,\n help='reminder for tasks')\n\nparser.add_argument('--meetings-threedays',\n dest='action',\n action='store_const',\n const=meeting_threedays,\n help='reminder for meetings into three days')\n\nparser.add_argument('--meetings-today',\n dest='action',\n action='store_const',\n const=meeting_today,\n help= 'reminder for meetings today')\n\nparsed_args = parser.parse_args()\n\nif parsed_args.action is None:\n parser.parse_args(['-h'])\nparsed_args.action(parsed_args)\n","sub_path":"reminder/smw-reminder.py","file_name":"smw-reminder.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316705105","text":"from django.contrib import admin\nfrom .models import ScholarCycle, Alumni\n\n# Register your models here.\n\n\nclass AlumniAdmin(admin.ModelAdmin):\n readonly_fields = ('cycle', 'folio', 'register_date', 'modified_date', )\n fieldsets = (\n\n ('Alumno', {\n 'fields': (\n 'agreement',\n ('child_name', 'child_familynames'),\n 'child_dob',\n 'child_curp_id',\n 'child_gender',\n 'child_schedule',\n 'child_group',\n 'child_blood_type',\n 'child_photo',\n 'child_observations',\n 'emergency_telephone_number',\n 'emergency_contact',\n 'doctor',\n 'hospital')}),\n\n ('Padres', {\n 'classes': ('collapse',),\n 'fields': (\n 'house_telephone',\n ('street_name', 'street_exterior_number', 'street_interior_number'),\n 'area_colonia',\n 'zip_code',\n 'description',\n 'mapa',\n ('father_name', 'father_familynames'),\n ('father_telephone_work', 'father_telephone_extension'),\n 'father_cell_phone',\n 'father_email',\n 'father_occupation',\n 'father_photo',\n ('father_ife_id', 'father_ife_file'),\n 'father_curp_id',\n ('mother_name', 'mother_familynames'),\n ('mother_telephone_work', 'mother_telephone_extension'),\n 'mother_cell_phone',\n 'mother_email',\n 'mother_occupation',\n 'mother_photo',\n ('mother_ife_id', 'mother_ife_file'),\n 'mother_curp_id')}),\n\n ('Persona Autorizada', {\n 'classes': ('collapse',),\n 'fields': (\n ('authorized1_name', 'authorized1_familynames'),\n 'authorized1_parentezco',\n 'authorized1_cell_phone',\n 'authorized1_telephone',\n ('authorized2_name', 'authorized2_familynames'),\n 'authorized2_parentezco',\n 'authorized2_cell_phone',\n 'authorized2_telephone')}),\n\n ('Pagos y Convenios', {\n 'classes': ('collapse',),\n 'fields': (\n 'august_payed',\n 'august_annotations',\n 'september_payed',\n 'september_annotations',\n 'october_payed',\n 'october_annotations',\n 'november_payed',\n 'november_annotations',\n 'december_payed',\n 'december_annotations',\n 'january_payed',\n 'january_annotations',\n 'february_payed',\n 'february_annotations',\n 'march_payed',\n 'march_annotations',\n 'april_payed',\n 'april_annotations',\n 'may_payed',\n 'may_annotations',\n 'june_payed',\n 'june_annotations',\n ('register_payed', 'register_notes'),\n ('insurance_payed', 'insurance_notes'),\n ('uniform_payed', 'uniform_notes'),\n ('supplies_payed', 'supplies_notes'),\n ('events_payed', 'events_notes'),\n ('otros_payed', 'otros_notes'))}),\n\n ('Documentos', {\n 'classes': ('collapse',),\n 'fields': (\n 'birth_certificate',\n 'immunization_card',\n 'curp_card')}))\n\n\nadmin.site.register(ScholarCycle)\nadmin.site.register(Alumni, AlumniAdmin)\n","sub_path":"alumni/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"150641837","text":"from person import Person, Manager\n\nbob = Person('bob')\nsue = Person('Sue Allen Miskeee', job='dev', pay=1000000)\nbill = Manager('Bill McNeal', 50000)\narth = Person('Art Vandalay', 'Architect', 5000000)\n\nimport shelve\ndb = shelve.open('persondb')\nfor obj in (bob, sue, bill, arth):\n\tdb[obj.name] = obj\ndb.close()\n","sub_path":"OO/makedb.py","file_name":"makedb.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334245661","text":"# Zmodyfikować kod z zadania poprzedniego w taki sposób, aby komunikaty wyświetlane na ekranie utworzyć metodą interpolacji stringów\n\nfirst_name = 'Grzegorz'\nlast_name = 'Brzeczeszczykiewicz'\nbalance = 20000.5\n\nname = f'{first_name} {last_name}'\n\nmessage = f'Witaj, {name}. Twój stan konta to {balance} PLN w zmiennej message.'\n\nprint(message)\n","sub_path":"lab1/live_coding/lc_2.py","file_name":"lc_2.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"433979856","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^register$', views.register),\n url(r'^success$', views.success),\n url(r'^login$', views.login),\n url(r'^logout$', views.logout),\n url(r'^post_secret$', views.post_secret),\n url(r'^like/(?P\\d+)$', views.like),\n url(r'^del_like/(?P\\d+)$', views.del_like),\n url(r'^most_liked$', views.most_liked),\n url(r'^.*$', views.nonsense)\n]\n","sub_path":"Python/Django/Django_Level_2/dojo_secrets/apps/secrets/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"315946616","text":"import os\nimport re\nimport sys\nimport glob\nimport tqdm\nimport imageio\nimport functools\nimport argparse\nimport numpy as np\nimport tensorflow as tf\n\nfrom PIL import Image, ImageEnhance, ImageFilter\n\nsys.path.extend([\"./lmnet\", \"/dlk/python/dlk\"])\n\nimport matplotlib.pyplot as plt\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--target\", type=str, required=True)\nparser.add_argument(\"--load_dir\", type=str,\n default=\"./dataset/\")\nparser.add_argument(\"--save_dir\", type=str,\n default=\"./dataset/\")\nparser.add_argument(\"--max_size\", type=int, default=None)\nparser.add_argument(\"--debug\", action=\"store_true\")\nargs = parser.parse_args()\n\n\ndef _open_image_file(file_name, dtype=np.uint8):\n return np.array(Image.open(file_name), dtype=dtype)\n\n\n@functools.lru_cache(maxsize=None)\ndef _open_flo_file(file_name, dtype=np.float32):\n with open(file_name, \"rb\") as f:\n magic = np.fromfile(f, np.float32, count=1)\n assert 202021.25 == magic, \\\n \"Magic number incorrect. Invalid .flo file\"\n width = np.fromfile(f, np.int32, count=1)[0]\n height = np.fromfile(f, np.int32, count=1)[0]\n data = np.fromfile(f, dtype, count=2 * width * height)\n return np.resize(data, (height, width, 2))\n\n\ndef _open_pfm_file(file_name, dtype=np.float32):\n color, width, height, scale, endian = None, None, None, None, None\n with open(file_name, \"rb\") as f:\n # loading header information\n header = f.readline().rstrip().decode(\"utf-8\")\n assert header == \"PF\" or header == \"Pf\", \"Not a PFM file.\"\n color = (header == \"PF\")\n\n # loading wdth and height information\n dim_match = re.match(r\"^(\\d+)\\s(\\d+)\\s$\", f.readline().decode(\"utf-8\"))\n assert dim_match is not None, \"Malformed PFM header.\"\n width, height = map(int, dim_match.groups())\n\n scale = float(f.readline().rstrip().decode(\"utf-8\"))\n if scale < 0:\n endian = \"<\"\n scale = -scale\n else:\n endian = \">\"\n data = np.fromfile(f, endian + \"f\").astype(dtype)\n shape = (height, width, 3) if color else (height, width)\n return np.reshape(data, shape)[..., :2]\n\n\ndef create_tf_exapmle(image_a_path, image_b_path, flow_path):\n image_a = imageio.imread(image_a_path).astype(np.float32)\n image_b = imageio.imread(image_b_path).astype(np.float32)\n image_a = image_a[..., [2, 1, 0]]\n image_b = image_b[..., [2, 1, 0]]\n image_a = image_a / 255.0\n image_b = image_b / 255.0\n root_dir, ext = os.path.splitext(flow_path)\n if ext == \".flo\":\n flow = open_flo_file(flow_path)\n elif ext == \".pfm\":\n flow = open_pfm_file(flow_path)\n if args.debug:\n pil_image = Image.fromarray(np.uint8(image))\n enhancer = ImageEnhance.Brightness(pil_image)\n processed_image = enhancer.enhance(factor)\n return np.array(processed_image)\n\n print(flow[..., 0].min(), flow[..., 0].max())\n print(flow[..., 1].min(), flow[..., 1].max())\n plt.figure(1)\n plt.subplot(2, 2, 1)\n plt.imshow(flow[..., 0], vmin=-30, vmax=30)\n plt.subplot(2, 2, 2)\n plt.imshow(flow[..., 1], vmin=-30, vmax=30)\n plt.subplot(2, 2, 3)\n plt.imshow(image_a)\n plt.subplot(2, 2, 4)\n plt.imshow(image_b)\n plt.show()\n\n def bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n return tf.train.Example(features=tf.train.Features(feature={\n \"image_a\": bytes_feature(image_a.tobytes()),\n \"image_b\": bytes_feature(image_b.tobytes()),\n \"flow\": bytes_feature(flow.tobytes())\n })\n )\n\n\ndef write_tfrecord(file_list, save_name):\n write_opts = tf.python_io.TFRecordOptions(\n tf.python_io.TFRecordCompressionType.ZLIB)\n with tf.python_io.TFRecordWriter(save_name, options=write_opts) as writer:\n # with tf.python_io.TFRecordWriter(save_name) as writer:\n pbar = tqdm.tqdm(file_list[:args.max_size])\n for image_a_path, image_b_path, flow_path in pbar:\n pbar.set_description(\"loading {}\".format(\n os.path.basename(flow_path)))\n ex = create_tf_exapmle(image_a_path, image_b_path, flow_path)\n # print(ex)\n writer.write(ex.SerializeToString())\n\n\ndef build_flying_chairs():\n data_dir = \"{}/FlyingChairs\".format(args.load_dir)\n train_val_split = np.loadtxt(\n \"{}/FlyingChairs_train_val.txt\".format(data_dir))\n train_idxs = np.flatnonzero(train_val_split == 1)\n valid_idxs = np.flatnonzero(train_val_split == 2)\n\n training_list = []\n for index in train_idxs:\n prefix = \"{}/data/{:05d}\".format(data_dir, index + 1)\n image_a_path = \"{}_img1.ppm\".format(prefix)\n image_b_path = \"{}_img2.ppm\".format(prefix)\n flow_path = \"{}_flow.flo\".format(prefix)\n training_list.append([image_a_path, image_b_path, flow_path])\n save_name = \"{}/train.tfrecord\".format(data_dir)\n write_tfrecord(training_list, save_name)\n\n validation_list = []\n for index in valid_idxs:\n prefix = \"{}/data/{:05d}\".format(data_dir, index + 1)\n image_a_path = \"{}_img1.ppm\".format(prefix)\n image_b_path = \"{}_img2.ppm\".format(prefix)\n flow_path = \"{}_flow.flo\".format(prefix)\n validation_list.append([image_a_path, image_b_path, flow_path])\n save_name = \"{}/valid.tfrecord\".format(data_dir)\n write_tfrecord(validation_list, save_name)\n\n\ndef build_chairs_sdhom():\n data_dir = \"{}/ChairsSDHom/\".format(args.load_dir)\n\n image_a_list = sorted(glob.glob(\"{}/data/train/t0/*.png\".format(data_dir)))\n image_b_list = sorted(glob.glob(\"{}/data/train/t1/*.png\".format(data_dir)))\n flow_list = sorted(glob.glob(\"{}/data/train/flow/*.pfm\".format(data_dir)))\n training_list = list(zip(image_a_list, image_b_list, flow_list))\n save_name = \"{}/train.tfrecord\".format(data_dir)\n # print(training_list)\n write_tfrecord(training_list, save_name)\n\n image_a_list = sorted(glob.glob(\"{}/data/test/t0/*.png\".format(data_dir)))\n image_b_list = sorted(glob.glob(\"{}/data/test/t1/*.png\".format(data_dir)))\n flow_list = sorted(glob.glob(\"{}/data/test/flow/*.pfm\".format(data_dir)))\n validation_list = list(zip(image_a_list, image_b_list, flow_list))\n # print(validation_list)\n save_name = \"{}/test.tfrecord\".format(data_dir)\n write_tfrecord(validation_list, save_name)\n\n\nif __name__ == \"__main__\":\n os.makedirs(args.save_dir, exist_ok=True)\n eval(\"build_{}\".format(args.target))()\n","sub_path":"script/dataset_debug/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"173604772","text":"import requests\nfrom bs4 import BeautifulSoup\n\nclass Imformation:\n def __init__(self, subject, date):\n self.subject = subject\n self.date = date\n def __str__(self):\n return \"주제: \" + self.subject + \" 는 \" + self.date +\"까지입니다.\"\ndef get_subjects():\n subjects = []\n req = requests.get(\"https://uwins.ulsan.ac.kr/JBGJ/A/MIA01S.aspx?MenuID=MIA01S!1\")\n html = req.text\n soup = BeautifulSoup(html, 'html.parser')\n divs = soup.findAll('td', {\"class\": \"textWrap Left\"})\n for div in divs:\n links = div.findAll('a')\n for link in links:\n subject = link.text\n subjects.append(subject)\n return subjects\nsubjects = get_subjects()\nprint(subjects)\nconversations = []\ni = 1\nfor sub in subjects:\n print('(', i, '/', len(subjects), ')', sub)\n req = requests.get('https://uwins.ulsan.ac.kr/JBGJ/A/MIA02S.aspx?MenuID=MIA01S!1&seq=7770&keyfield=1&keyword=¤t_page=1')\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"504511572","text":"import argparse\nimport os\nimport tensorflow as tf # Deep Learning library\nimport numpy as np # Handle matrices\nfrom builtins import input\n\nfrom trainer.helpers import discount_rewards, prepro\nfrom agents.tools.wrappers import AutoReset, FrameHistory\nfrom collections import deque# Ordered collection with ends\nfrom trainer.game import Game\nimport random\n\nimport matplotlib.pyplot as plt # Display graphs\n\nimport gym #can remove (with call to gym)\n\n# Game H\nACTIONS = [\"right\", \"down\", \"left\", \"up\", \"toggle\"] # (CONVERTION TO ONE HOT)\nR = 6 #200 # WONT WORK ON CNN! if < 8\nC = 7 #250\nOBSERVATION_DIM = R * C * 2 + 5 # give ingredient map (RxC), map of slices (RxC) ....cursor position (2x1) x slice_mode (1) and your constraints L, H (2)\n\nMEMORY_CAPACITY = 100000 #(OK)\nROLLOUT_SIZE = 50 #10000 #(OK)\n\n# MEMORY stores tuples:\n# (observation, label, reward)\nMEMORY = deque([], maxlen=MEMORY_CAPACITY) #NEEDS TO BE CHECKED!!\n\n\n### MODEL HYPERPARAMETERS\n#state_size = [110, 84, 4] # NEEDS TO BE CHECKED !!! Our input is a stack of 4 frames hence 110x84x4 (Width, height, channels)\nstate_size = [OBSERVATION_DIM] #[R, C, 2] # NEEDS TO BE CHECKED !!! ([1, OBSERVATION_DIM]) ? [None, OBSERVATION_DIM]\naction_size = len(ACTIONS) #(OK) 5 possible actions\nlearning_rate = 0.00025 #(OK) Alpha (aka learning rate)\n\n### TRAINING HYPERPARAMETERS\ntotal_episodes = 6000 #(OK) Total episodes for training (6000 EPOCHS)\nmax_steps = 50 #(OK) Max possible steps in an episode [ROLLOUT_SIZE]\nbatch_size = 64 #instead of 64 #10000 # NEEDS TO BE CHECKED !!! Batch size 64?\n\n# Exploration parameters for epsilon greedy strategy\nexplore_start = 1.0 # (OK)exploration probability at start\nexplore_stop = 0.01 # (OK)minimum exploration probability\ndecay_rate = 0.00001 # (OK)exponential decay rate for exploration prob\n\n# Q learning hyperparameters\ngamma = 0.9 # (OK)Discounting rate\n\n### MEMORY HYPERPARAMETERS\npretrain_length = batch_size # NEEDS TO BE CHECKED !!! Number of experiences stored in the Memory when initialized for the first time\nmemory_size = MEMORY_CAPACITY # (OK)Number of experiences the Memory can keep [MEMORY_CAPACITY]\n\n### PREPROCESSING HYPERPARAMETERS\n#stack_size = 4 # NEEDS TO BE CHECKED Number of frames stacked\n\n### MODIFY THIS TO FALSE IF YOU JUST WANT TO SEE THE TRAINED AGENT\ntraining = True\n\n## TURN THIS TO TRUE IF YOU WANT TO RENDER THE ENVIRONMENT\nepisode_render = True #(OK) use args.render (default=False)\n\ndef preprocess(state_dict):\n state = np.concatenate((\n np.array(state_dict['ingredients_map']).ravel(),\n np.array(state_dict['slices_map']).ravel(),\n np.array(state_dict['cursor_position']).ravel(),\n [state_dict['slice_mode'],\n state_dict['min_each_ingredient_per_slice'],\n state_dict['max_ingredients_per_slice']],\n ))\n return state.astype(np.float).ravel()\n\nclass DQNetwork:\n def __init__(self, state_size, action_size, learning_rate, name='DQNetwork'):\n self.state_size = state_size #(OK)\n self.action_size = action_size #(OK)\n self.learning_rate = learning_rate #(OK) args.\n\n with tf.variable_scope(name):\n # We create the placeholders\n # *state_size means that we take each elements of state_size in tuple hence is like if we wrote\n \t### Replacing [None, *state_size] by [1, batch_size, *state_size] NOPE needs [None, *state_size for predict_action (1 value)]\n self.inputs_ = tf.placeholder(tf.float32, [None, *state_size], name=\"inputs\")\n #self.inputs_ = tf.placeholder(tf.float32, [1, batch_size, *state_size], name=\"inputs\")\n self.actions_ = tf.placeholder(tf.float32, [None, self.action_size], name=\"actions_\")\n\n # Remember that target_Q is the R(s,a) + ymax Qhat(s', a')\n self.target_Q = tf.placeholder(tf.float32, [None], name=\"target\")\n\n\n ### INIT self.flatten to our flatten state!!! (no CNN for now)\n self.flatten = self.inputs_\n print(\"==========Yo==========\")\n print(self.flatten)\n\n # append 5 node features at the end (cursor 2x1, L, H)\n self.fc = tf.layers.dense(inputs = self.flatten,\n units = 512,\n activation = tf.nn.elu,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n name=\"fc1\")\n\n self.output = tf.layers.dense(inputs = self.fc,\n kernel_initializer=tf.contrib.layers.xavier_initializer(),\n units = self.action_size,\n activation=None)\n\n\n\n # Q is our predicted Q value.\n self.Q = tf.reduce_sum(tf.multiply(self.output, self.actions_))\n\n # The loss is the difference between our predicted Q_values and the Q_target\n # Sum(Qtarget - Q)^2\n self.loss = tf.reduce_mean(tf.square(self.target_Q - self.Q))\n\n self.optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)\n\n\n# Reset the graph\ntf.reset_default_graph()\n\n# Instantiate the DQNetwork\nDQNetwork = DQNetwork(state_size, action_size, learning_rate)\n\n\n\nclass Memory():\n def __init__(self, max_size):\n self.buffer = deque(maxlen = max_size)\n #MEMORY = deque([], maxlen=MEMORY_CAPACITY) #NEEDS TO BE CHECKED!!\n #MEMORY is buffer....\n def add(self, experience):\n self.buffer.append(experience)\n\n def sample(self, batch_size):\n buffer_size = len(self.buffer)\n index = np.random.choice(np.arange(buffer_size),\n size = batch_size,\n replace = False)\n\n return [self.buffer[i] for i in index] # similar to the generator....\n\n\n\n# Instantiate memory\nmemory = Memory(max_size = memory_size)\nfor i in range(pretrain_length):\n # If it's the first step\n if i == 0:\n\n game = Game({'max_steps':max_steps}) # initialize game from game.py // not 10000\n h = random.randint(1, R * C + 1)\n l = random.randint(1, h // 2 + 1)\n pizza_lines = [''.join([random.choice(\"MT\") for _ in range(C)]) for _ in range(R)]\n pizza_lines = [\"TMMMTTT\",\"MMMMTMM\", \"TTMTTMT\", \"TMMTMMM\", \"TTTTTTM\", \"TTTTTTM\"]\n pizza_config = { 'pizza_lines': pizza_lines, 'r': R, 'c': C, 'l': l, 'h': h }\n _state = preprocess(game.init(pizza_config)[0]) #np.zeros(OBSERVATION_DIM) #get only first value of tuple\n\n # Get the next_state, the rewards, done by taking a random action\n choice = random.randint(1,len(possible_actions))-1\n action = possible_actions[choice] #as one-hot\n #translate _action into 1 to 5 action for the game...\n _action = ACTIONS[np.argmax(action)]\n next_state, _reward, _done, _ = game.step(_action) #next_state is _state in Game agent\n _next_state = preprocess(next_state)\n\n if episode_render: # NEEDS TO BE CHECKED args.render:\n game.render()\n\n\n # If the episode is finished (we maxed out the number of frames)\n if _done:\n # We finished the episode\n _next_state = np.zeros(_state.shape) # _state is flattened with cursor, L and H appended\n\n # Add experience to memory (push action one-hot encoded instead of _action label e.g.'right')\n memory.add((_state, action, _reward, _next_state, _done))\n\n # Start a new episode\n game = Game({'max_steps':max_steps}) # initialize game from game.py not\n h = random.randint(1, R * C + 1)\n l = random.randint(1, h // 2 + 1)\n pizza_lines = [''.join([random.choice(\"MT\") for _ in range(C)]) for _ in range(R)]\n pizza_lines = [\"TMMMTTT\",\"MMMMTMM\", \"TTMTTMT\", \"TMMTMMM\", \"TTTTTTM\", \"TTTTTTM\"]\n pizza_config = { 'pizza_lines': pizza_lines, 'r': R, 'c': C, 'l': l, 'h': h }\n _state = preprocess(game.init(pizza_config)[0]) #np.zeros(OBSERVATION_DIM) #get only first value of tuple\n ### Watch-out, _observation will be flattened and won't conserve Rc needed for CNN\n\n else:\n # Add experience to memory (push action one-hot encoded instead of _action label e.g.'right')\n memory.add((_state, action, _reward, _next_state, _done))\n\n # Our new state is now the next_state\n _state = _next_state\n\n\n# Setup TensorBoard Writer #NEEDS TO BE CHECKED\nwriter = tf.summary.FileWriter(\"./tensorboard/dqn/1\")\n\nfor var in tf.trainable_variables():\n tf.summary.histogram(var.op.name, var)\n tf.summary.scalar('{}_max'.format(var.op.name), tf.reduce_max(var))\n tf.summary.scalar('{}_min'.format(var.op.name), tf.reduce_min(var))\n\n#tf.summary.scalar('rollout_reward', rollout_reward)\ntf.summary.scalar('loss', DQNetwork.loss)\n\n\n\ndef predict_action(explore_start, explore_stop, decay_rate, decay_step, state, actions):\n ## EPSILON GREEDY STRATEGY\n # Choose action a from state s using epsilon greedy.\n ## First we randomize a number\n exp_exp_tradeoff = np.random.rand()\n\n # Here we'll use an improved version of our epsilon greedy strategy used in Q-learning notebook\n explore_probability = explore_stop + (explore_start - explore_stop) * np.exp(-decay_rate * decay_step)\n\n if (explore_probability > exp_exp_tradeoff):\n # Make a random action (exploration)\n choice = random.randint(1,len(possible_actions))-1\n action = possible_actions[choice]\n\n else:\n # Get action from Q-network (exploitation)\n # Estimate the Qs values state\n Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: state.reshape((1, *state.shape))})\n\n # Take the biggest Q value (= the best action)\n choice = np.argmax(Qs)\n action = possible_actions[choice]\n\n return action, explore_probability\n\n# Saver will help us to save our model\nsaver = tf.train.Saver()\n\nif training == True:\n with tf.Session() as sess:\n # Initialize the variables\n sess.run(tf.global_variables_initializer())\n\n # Initialize the decay rate (that will use to reduce epsilon)\n decay_step = 0\n rewards_list = []\n for episode in range(total_episodes):\n # Set step to 0\n step = 0\n\n # Initialize the rewards of the episode\n episode_rewards = []\n\n # Make a new episode and observe the first state\n # Start a new episode\n game = Game({'max_steps':50}) # initialize game from game.py\n h = random.randint(1, R * C + 1)\n l = random.randint(1, h // 2 + 1)\n pizza_lines = [''.join([random.choice(\"MT\") for _ in range(C)]) for _ in range(R)]\n pizza_lines = [\"TMMMTTT\",\"MMMMTMM\", \"TTMTTMT\", \"TMMTMMM\", \"TTTTTTM\", \"TTTTTTM\"]\n pizza_config = { 'pizza_lines': pizza_lines, 'r': R, 'c': C, 'l': l, 'h': h }\n _state = preprocess(game.init(pizza_config)[0])\n\n while step < max_steps:\n step += 1\n\n #Increase decay_step\n decay_step +=1\n\n # Predict the action to take and take it\n action, explore_probability = predict_action(explore_start, explore_stop, decay_rate, decay_step, _state, possible_actions)\n #action is one-hot... so translate _action into 1 to 5 action for the game...\n _action = ACTIONS[np.argmax(action)]\n\n #Perform the action and get the next_state, reward, and done information\n next_state, _reward, _done, _ = game.step(_action) #next_state is _state in Game agent\n _next_state = preprocess(next_state)\n\n # Add the reward to total reward\n episode_rewards.append(_reward)\n\n # If the game is finished\n if _done:\n # The episode ends so no next state\n _next_state = np.zeros(_state.shape) # _state is flattened with cursor, L and H appended\n\n # Set step = max_steps to end the episode\n step = max_steps\n\n # Get the total reward of the episode\n total_reward = np.sum(episode_rewards)\n if episode % 100 == 0:\n print('Episode: {}'.format(episode),\n 'Total reward: {}'.format(total_reward),\n 'Explore P: {:.4f}'.format(explore_probability),\n 'Training Loss {:.4f}'.format(loss))\n\n rewards_list.append((episode, total_reward))\n\n # Store transition in memory D\n memory.add((_state, action, _reward, _next_state, _done))\n\n if episode_render and episode % 100 == 0:\n game.render()\n else:\n # Add experience to memory\n memory.add((_state, action, _reward, _next_state, _done))\n\n # st+1 is now our current state\n _state = _next_state\n\n\n ### LEARNING PART\n # Obtain random mini-batch from memory\n batch = memory.sample(batch_size)\n # reshaping states by using squeeze....\n states_mb = np.array([each[0] for each in batch], ndmin=3)\n states_mb = np.squeeze(states_mb, axis=0)\n actions_mb = np.array([each[1] for each in batch])\n\n rewards_mb = np.array([each[2] for each in batch])\n # reshaping next_states by using squeeze....\n next_states_mb = np.array([each[3] for each in batch], ndmin=3)\n next_states_mb = np.squeeze(next_states_mb, axis=0)\n\n dones_mb = np.array([each[4] for each in batch])\n target_Qs_batch = []\n\n # Get Q values for next_state /!\\ --- Why shape of DQNetwork.inputs_ = (1, 64, 89??)\n Qs_next_state = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: next_states_mb})\n\n # Set Q_target = r if the episode ends at s+1, otherwise set Q_target = r + gamma*maxQ(s', a')\n for i in range(0, len(batch)):\n terminal = dones_mb[i]\n\n # If we are in a terminal state, only equals reward\n if terminal:\n target_Qs_batch.append(rewards_mb[i])\n\n else:\n target = rewards_mb[i] + gamma * np.max(Qs_next_state[i])\n target_Qs_batch.append(target)\n\n\n targets_mb = np.array([each for each in target_Qs_batch])\n loss, _ = sess.run([DQNetwork.loss, DQNetwork.optimizer],\n feed_dict={DQNetwork.inputs_: states_mb,\n DQNetwork.target_Q: targets_mb,\n DQNetwork.actions_: actions_mb})\n\n # Write TF Summaries\n summary = sess.run(write_op, feed_dict={DQNetwork.inputs_: states_mb,\n DQNetwork.target_Q: targets_mb,\n DQNetwork.actions_: actions_mb})\n writer.add_summary(summary, episode)\n writer.flush()\n\n # Save model every 5 episodes\n if episode % 100 == 0:\n save_path = saver.save(sess, \"./models/model.ckpt\")\n print(\"Model Saved\")\n\nwith tf.Session() as sess:\n total_test_rewards = []\n\n # Load the model\n saver.restore(sess, \"./models/model.ckpt\")\n\n for episode in range(1):\n total_rewards = 0\n\n game = Game({'max_steps':10000}) # initialize game from game.py\n h = random.randint(1, R * C + 1)\n l = random.randint(1, h // 2 + 1)\n pizza_lines = [''.join([random.choice(\"MT\") for _ in range(C)]) for _ in range(R)]\n pizza_lines = [\"TMMMTTT\",\"MMMMTMM\", \"TTMTTMT\", \"TMMTMMM\", \"TTTTTTM\", \"TTTTTTM\"]\n pizza_config = { 'pizza_lines': pizza_lines, 'r': R, 'c': C, 'l': l, 'h': h }\n _state = preprocess(game.init(pizza_config)[0]) #np.zeros(OBSERVATION_DIM) #get only first value of tuple\n\n print(\"****************************************************\")\n print(\"EPISODE \", episode)\n\n while True:\n _state = _state.reshape((1, *state_size))\n # Get action from Q-network\n # Estimate the Qs values state\n Qs = sess.run(DQNetwork.output, feed_dict = {DQNetwork.inputs_: _state})\n\n # Take the biggest Q value (= the best action)\n choice = np.argmax(Qs)\n action = possible_actions[choice] #as one-hot\n #translate _action into 1 to 5 action for the game...\n _action = ACTIONS[np.argmax(action)]\n print(_action)\n\n #Perform the action and get the next_state, reward, and done information\n next_state, _reward, _done, _ = game.step(_action) #next_state is _state in Game agent\n _next_state = preprocess(next_state)\n game.render()\n\n total_rewards += _reward\n\n if _done:\n print (\"Score\", total_rewards)\n total_test_rewards.append(total_rewards)\n break\n\n _state = _next_state\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser('pizza trainer')\n\tparser.add_argument(\n\t\t'--n-epoch',\n\t\ttype=int,\n\t\tdefault=total_episodes)\n\tparser.add_argument(\n\t\t'--batch-size',\n\t\ttype=int,\n\t\tdefault=batch_size)\n\tparser.add_argument(\n\t\t'--output-dir',\n\t\ttype=str,\n\t\tdefault='/tmp/pizza_output')\n\tparser.add_argument(\n\t\t'--job-dir',\n\t\ttype=str,\n\t\tdefault='/tmp/pizza_output')\n\n\tparser.add_argument(\n\t\t'--restore',\n\t\tdefault=False,\n\t\taction='store_true')\n\tparser.add_argument(\n\t\t'--render',\n\t\tdefault=False,\n\t\taction='store_true')\n\tparser.add_argument(\n\t\t'--save-checkpoint-steps',\n\t\ttype=int,\n\t\tdefault=1)\n\n\tparser.add_argument(\n\t\t'--learning-rate',\n\t\ttype=float,\n\t\tdefault=learning_rate)\n\tparser.add_argument(\n\t\t'--decay',\n\t\ttype=float,\n\t\tdefault=decay_rate)\n\tparser.add_argument(\n\t\t'--gamma',\n\t\ttype=float,\n\t\tdefault=gamma)\n\tparser.add_argument(\n\t\t'--laziness',\n\t\ttype=float,\n\t\tdefault=0.01)\n\tparser.add_argument(\n\t\t'--hidden-dim',\n\t\ttype=int,\n\t\tdefault=200)\n\n\targs = parser.parse_args()\n\n\t# save all checkpoints\n\t#args.max_to_keep = args.n_epoch // args.save-checkpoint-steps\n\n\tmain(args)\n","sub_path":"pizza_ml_0/old_trainer/task_cp.py","file_name":"task_cp.py","file_ext":"py","file_size_in_byte":18704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"136960067","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\n\n# Now assume there is a mediator that carries the causal effect of sodium on blood pressure\n# Age is a confounder and effects of all other variables are negligible.\ndef generate_data(n=1000, seed=0, mediator_coef=1.05, sodium_coef=0.8,\n binary_treatment=True, binary_cutoff=3.5):\n np.random.seed(seed)\n age = np.random.normal(65, 5, n)\n sodium = age / 18 + np.random.normal(size=n)\n if binary_treatment:\n if binary_cutoff is None:\n binary_cutoff = sodium.mean()\n sodium = (sodium > binary_cutoff).astype(int)\n sodium_mediator = sodium_coef * sodium + np.random.normal(size=n)\n blood_pressure = mediator_coef * sodium_mediator + 2 * age + np.random.normal(size=n)\n # proteinuria = alpha1 * sodium + alpha2 * blood_pressure + np.random.normal(size=n)\n # hypertension = (blood_pressure >= 140).astype(int) # not used, but could be used for binary outcomes\n return pd.DataFrame({'blood_pressure': blood_pressure, 'sodium': sodium,\n 'sodium_mediator': sodium_mediator, 'age': age})\n\n\ndef estimate_causal_effect(Xt, y, model=LinearRegression(),\n treatment_colname='sodium',\n mediator_colname='sodium_mediator',\n step_two_adjustment_variable='age',\n regression_coef=True):\n # 1. Estimate a causal effect of sodium intake on its mediator\n # Because Y is a collider on the backdoor path from sodium to the mediator, all backdoor paths are blocked.\n # Hence, the only association that flows from sodium intake to M is causal and we can use\n # a standard statistical estimation.\n\n treatment_mediator_model = model\n treatment_mediator_model.fit(Xt[[treatment_colname]], Xt[mediator_colname])\n if regression_coef:\n treatment_coef = treatment_mediator_model.coef_[0]\n else:\n pass\n # Xt1 = pd.DataFrame.copy() # create a dataframe with treatment values of 1\n # Xt1[Xt.columns[treatment_idx]] = 1\n # Xt0 = pd.DataFrame.copy(Xt) # # create a dataframe with treatment values of 0\n # Xt0[Xt.columns[treatment_idx]] = 0\n # return (model.predict(Xt1) - model.predict(Xt0)).mean()\n\n # 2. Estimate a causal effect of the mediator on blood pressure\n # Sodium intake blocks the only backdoor path from the mediator to the outcome.\n # Hence, we only need to adjust for it to estimate the causal effect.\n # But, it adjusting for age gives more accurate results.\n mediator_outcome_model = model\n mediator_outcome_model.fit(Xt[[step_two_adjustment_variable, mediator_colname]], y)\n if regression_coef:\n mediator_coef = mediator_outcome_model.coef_[1]\n else:\n pass\n # Xt1 = pd.DataFrame.copy(Xt) # create a dataframe with treatment values of 1\n # Xt1[Xt.columns[treatment_idx]] = 1\n # Xt0 = pd.DataFrame.copy(Xt) # # create a dataframe with treatment values of 0\n # Xt0[Xt.columns[treatment_idx]] = 0\n # return (model.predict(Xt1) - model.predict(Xt0)).mean()\n\n return treatment_coef * mediator_coef\n\n\nif __name__ == '__main__':\n mediator_coef, sodium_coef = 1.05, 0.8\n fit_intercept = False\n data = generate_data(n=1000, seed=0, mediator_coef=mediator_coef, sodium_coef=sodium_coef,\n binary_treatment=False)\n Xt = data[['sodium', 'sodium_mediator', 'age']]\n y = data['blood_pressure']\n print(f'True causal coefficient: {mediator_coef*sodium_coef}')\n\n est_causal_coef = estimate_causal_effect(Xt, y,\n model=LinearRegression(fit_intercept=fit_intercept),\n treatment_colname='sodium',\n mediator_colname='sodium_mediator',\n step_two_adjustment_variable='age',\n regression_coef=True)\n print(f'Causal coefficient estimated using frontdoor adjustment: {est_causal_coef}')\n\n\n ### Non-causal models\n print('Corr matrix')\n print(data.corr())\n # Pairwise non-causal model\n model = LinearRegression(fit_intercept=fit_intercept)\n model.fit(Xt[['sodium']], y)\n pairwise_model_coef = model.coef_[0]\n print(f'Pairwise model coefficient: {pairwise_model_coef}')\n\n # All-data non-causal model\n model = LinearRegression(fit_intercept=fit_intercept)\n model.fit(Xt, y)\n alldata_model_coef = model.coef_[0]\n print(f'All-data model coefficient: {alldata_model_coef}')\n\n","sub_path":"Causal Inference/fontdoor_adjustment_example.py","file_name":"fontdoor_adjustment_example.py","file_ext":"py","file_size_in_byte":4666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"320271024","text":"# Copyright 2015 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nfrom master.v8.v8_notifier import V8Notifier\n\n\ndef Update(config, active_master, c):\n c['status'].extend([\n V8Notifier(\n config,\n active_master,\n categories_steps={\n 'chromium': [\n 'update',\n 'runhooks',\n 'gn',\n 'compile',\n ],\n },\n sendToInterestedUsers=True,\n ),\n V8Notifier(\n config,\n active_master,\n categories_steps={\n 'clusterfuzz': [\n 'check clusterfuzz',\n 'runhooks',\n 'gn',\n 'compile',\n 'gsutil upload',\n ],\n },\n extraRecipients=[\n 'machenbach@chromium.org',\n 'v8-clusterfuzz-sheriff@chromium.org',\n ],\n ),\n V8Notifier(\n config,\n active_master,\n categories_steps={'release': []},\n extraRecipients=[\n 'hablich@chromium.org',\n 'machenbach@chromium.org',\n ],\n ),\n ])\n","sub_path":"masters/master.client.v8.fyi/mail_notifier_cfg.py","file_name":"mail_notifier_cfg.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"621990586","text":"\n\nfrom xai.brain.wordbase.adjectives._bilingual import _BILINGUAL\n\n#calss header\nclass _BILINGUALS(_BILINGUAL, ):\n\tdef __init__(self,): \n\t\t_BILINGUAL.__init__(self)\n\t\tself.name = \"BILINGUALS\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"bilingual\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_bilinguals.py","file_name":"_bilinguals.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269737212","text":"#Given a list of prices where i is a price of a given stock at that day,\n#find the maximum profit that you can get from the list of prices.\n\nprices = [2,4,6,2,7,2,9,4,0,19,34,31,24,23]\n\nclass Solution:\n def maxProfit(prices) -> int:\n\n max_profit = 0\n\n for i in range(0,len(prices)):\n for j in range(i+1, len(prices)):\n min_price = prices[j] - prices[i]\n if min_price> max_profit:\n max_profit = min_price\n return max_profit\n\n print(maxProfit(prices))\n \n","sub_path":"bruteforce/maxProfit.py","file_name":"maxProfit.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"417556036","text":"# Auther : Heinz Samuelsson\n# Date : 2014-01-26\n# File : wrap.py\n# Reference : Game Programming, page 335.\n# Description : Wrap a sprite.\n# Python ver : 2.7.3 (gcc 4.6.3)\n\nimport pygame\npygame.init()\n\nscreen = pygame.display.set_mode((640,480))\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self,screen,background):\n\n pygame.sprite.Sprite.__init__(self)\n self.screen = screen\n self.background = background\n\n self.image = pygame.Surface((30,30))\n self.image.fill((255,255,255))\n pygame.draw.circle(self.image,(0,0,255),(15,15),15)\n self.rect = self.image.get_rect()\n\n self.rect.center = (320,240)\n self.dx = 5\n self.dy = 5\n\n def update(self):\n\n oldCenter = self.rect.center\n self.rect.centerx += self.dx\n self.rect.centery += self.dy\n pygame.draw.line(self.background,(0,0,0),oldCenter,self.rect.center)\n self.checkBounds()\n\n def checkBounds(self):\n\n if self.rect.centerx > self.screen.get_width():\n self.rect.centerx = 0\n\n if self.rect.centerx < 0:\n self.rect.centerx = self.screen.get_width()\n\n if self.rect.centery > self.screen.get_height():\n self.rect.centery = 0\n\n if self.rect.centery < 0:\n self.rect.centery = self.screen.get_height()\n \n\ndef main():\n screen = pygame.display.set_mode((640,480))\n pygame.display.set_caption(\"boundary checking - wrap\")\n\n background = pygame.Surface(screen.get_size())\n background.fill((255,255,255))\n screen.blit(background, (0,0))\n\n ball = Ball(screen,background)\n allSprites = pygame.sprite.Group(ball)\n \n clock = pygame.time.Clock()\n keepGoing = True\n\n while keepGoing:\n clock.tick(30)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n keepGoing = False\n\n allSprites.clear(screen,background)\n allSprites.update()\n allSprites.draw(screen)\n\n pygame.display.flip()\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"pygame3/wrap.py","file_name":"wrap.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"77263787","text":"from django import forms\nfrom clients.models import Client\nfrom contacts.models import Contact\nfrom django.utils.translation import ugettext as _\nfrom django.forms import ModelForm\n \nclass ContactEditForm (ModelForm):\n owner \t\t= forms.ModelChoiceField(queryset=Client.objects.filter(isfolder=False), required=False)\n fio \t\t= forms.CharField(max_length=255, required=True, label=_('FIO'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n email \t\t= forms.EmailField(required=False, label=_('Email'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n birthday\t= forms.DateField(required=False, label=_('Birthday'))\n position\t= forms.CharField(max_length=100, required=False, label=_('Position'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n phone\t\t= forms.CharField(max_length=100, required=False, label=_('Phone'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n \n class Meta:\n model = Client\n fields = ['fio','email', 'birthday','position', 'phone']\n \n\nclass ClientEditForm (ModelForm):\n#\tisfolder \t= forms.BooleanField()\n# code \t\t= forms.CharField(max_length=9, required=True, label='Code', widget=forms.TextInput(attrs={'placeholder': 'code', 'readonly': 'readonly'}))\n parent = forms.ModelChoiceField(queryset=Client.objects.filter(isfolder=True), required=False, label=_('Parent'))\n code \t\t= forms.CharField(max_length=9, required=False, label=_('Code'), widget=forms.TextInput(attrs={'readonly': 'readonly'}))\n name \t\t= forms.CharField(max_length=255, required=False, label=_('Name'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n iscompany \t= forms.BooleanField(label=_('Is company'), required=False)\n INN \t\t= forms.CharField(max_length=12, required=False, label=_('INN'))\n KPP \t\t= forms.CharField(max_length=9, required=False, label=_('KPP'))\n email \t\t= forms.EmailField(required=False, label=_('Email'))\n uradress\t= forms.CharField(max_length=150, required=False, label=_('Company Address'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n factadress\t= forms.CharField(max_length=150, required=False, label=_('Post address'))\n phone\t\t= forms.CharField(max_length=30, required=False, label=_('Phone'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n fax\t\t\t= forms.CharField(max_length=30, required=False, label=_('Fax'))\n site\t\t= forms.CharField(max_length=50, required=False, label=_('Site'))\n isfolder \t= forms.BooleanField(required=False, initial='', widget=forms.HiddenInput())\n \n class Meta:\n model = Client\n fields = ['parent','code', 'name', 'iscompany', 'INN', 'KPP', 'email', 'uradress', 'factadress', 'phone', 'fax', 'site']\n \nclass ClientFolderEditForm (ModelForm):\n parent = forms.ModelChoiceField(queryset=Client.objects.filter(isfolder=True), required=False)\n code \t\t= forms.CharField(max_length=9, required=False, label=_('Code'), widget=forms.TextInput(attrs={'readonly': 'readonly'}))\n name \t\t= forms.CharField(max_length=255, required=False, label=_('Name'), widget=forms.TextInput(attrs={'maxlength': '100'}))\n isfolder \t= forms.BooleanField(required=False, initial='true', widget=forms.HiddenInput())\n \n class Meta:\n model = Client\n fields = ['parent','code', 'name']","sub_path":"clients/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497101106","text":"import mysql.connector\r\nimport datetime\r\nfrom flask import render_template\r\nfrom flask import request\r\nfrom app import app\r\n\r\ndef getConexao():\r\n\tconexao = mysql.connector.connect(\r\n\t\thost='localhost',\r\n\t\tuser='root',\r\n\t\tpassword='admin',\r\n\t\tdatabase='eventos'\r\n\t)\r\n\treturn conexao\r\n\r\n@app.route('/')\r\ndef teste():\r\n\treturn render_template('site.html')\r\n\r\n@app.route('/eventos/listar')\r\ndef eventos_listar():\r\n\tsql = \"select id, titulo, data from eventos\"\r\n\t\r\n\tconexao = getConexao()\r\n\tcursor = conexao.cursor()\r\n\t\r\n\tcursor.execute(sql)\r\n\teventos = cursor.fetchall()\r\n\t\r\n\tcursor.close()\r\n\tconexao.close()\r\n\t\r\n\treturn render_template('eventos/listar.html', eventos=eventos)\r\n\r\n@app.route('/eventos/editar/', methods=['GET', 'POST'])\r\ndef eventos_update(id):\r\n\tpass\r\n\r\n@app.route('/eventos/apagar/', methods=['GET', 'POST'])\r\ndef eventos_delete(id):\r\n\tpass\r\n\t\r\n@app.route('/eventos/inserir', methods=['GET', 'POST'])\r\ndef eventos_inserir(id=0):\r\n\ttitulo = request.form.get('titulo')\r\n\tdata = request.form.get('data')\r\n\r\n\tconexao = getConexao()\r\n\tcursor = conexao.cursor()\r\n\tsql = 'insert into eventos (titulo, data) values (%s, %s)'\r\n\tcursor.execute(sql, (titulo, data))\r\n\tid = cursor.lastrowid\r\n\tconexao.commit()\r\n\tcursor.close()\r\n\tconexao.close()\r\n\treturn 'Cadastro realizado com sucesso!'\r\n\r\n@app.route('/eventos/cadastrar', methods=['GET', 'POST'])\r\n@app.route('/eventos/cadastrar/', methods=['GET', 'POST'])\r\ndef eventos_form(id=None):\r\n\tevento = {'data': datetime.datetime.now()}\r\n\t# id = request.args.get('id')\r\n\tif id is not None:\r\n\t\tconexao = getConexao()\r\n\t\tcursor = conexao.cursor(dictionary=True)\r\n\t\t\r\n\t\tsql = 'select * from eventos where id=%s'\r\n\t\tcursor.execute(sql, (id, ))\r\n\t\tevento = cursor.fetchone()\r\n\t\t\r\n\t\tcursor.close()\r\n\t\tconexao.close()\r\n\treturn render_template('eventos/cadastrar.html', evento=evento)\r\n\r\n\r\n@app.route('/inscricoes/listar')\r\ndef inscricoes_listar():\r\n\tsql = 'select e.titulo, u.nome from eventos e join inscricoes i on e.id=i.eventos_id join usuarios u on u.id=i.usuarios_id'\r\n\t\r\n\tconexao = getConexao()\r\n\tcursor = conexao.cursor()\r\n\t\r\n\tcursor.execute(sql)\r\n\tinscricoes = cursor.fetchall()\r\n\t\r\n\tcursor.close()\r\n\tconexao.close()\r\n\t\r\n\treturn render_template('inscricoes/listar.html', inscricoes=inscricoes)\r\n\r\n@app.route('/inscricoes/inserir', methods=['GET', 'POST'])\r\ndef inscricoes_inserir(id=0):\r\n\tnome = request.form.get('nome')\r\n\temail = request.form.get('email')\r\n\tevento = request.form.get('evento')\r\n\r\n\tconexao = getConexao()\r\n\tcursor = conexao.cursor()\r\n\tsql = 'insert into usuarios (nome, email) values (%s, %s)'\r\n\tcursor.execute(sql, (nome, email))\r\n\tid = cursor.lastrowid\r\n\tsql = 'insert into inscricoes (usuarios_id, eventos_id) values (%s, %s)'\r\n\tcursor.execute(sql, (id, evento))\r\n\t\r\n\tconexao.commit()\r\n\tcursor.close()\r\n\tconexao.close()\r\n\treturn 'Cadastro realizado com sucesso!'\r\n\r\n@app.route('/inscricoes/editar/', methods=['GET', 'POST'])\r\n@app.route('/inscricoes/cadastrar', methods=['GET', 'POST'])\r\ndef inscricoes_form(id=0):\r\n\tsql = \"select id, titulo from eventos\"\r\n\t\r\n\tconexao = getConexao()\r\n\tcursor = conexao.cursor()\r\n\t\r\n\tcursor.execute(sql)\r\n\teventos = cursor.fetchall()\r\n\t\r\n\tcursor.close()\r\n\tconexao.close()\r\n\treturn render_template('inscricoes/cadastrar.html', eventos=eventos)\r\n\r\n@app.route('/submissoes/listar')\r\ndef submissoes_listar():\r\n\treturn render_template('submissoes/listar.html')\r\n\r\n@app.route('/submissoes/cadastrar', methods=['GET', 'POST'])\r\ndef submissoes_form():\r\n\tconexao = getConexao()\r\n\tcursor = conexao.cursor()\r\n\t\r\n\tsql = \"select id, titulo from eventos\"\r\n\tcursor.execute(sql)\r\n\teventos = cursor.fetchall()\r\n\tsql = \"select id, nome from usuarios\"\r\n\tcursor.execute(sql)\r\n\tusuarios = cursor.fetchall()\r\n\t\r\n\tcursor.close()\r\n\tconexao.close()\r\n\treturn render_template('submissoes/cadastrar.html', eventos=eventos, usuarios=usuarios)\r\n\r\n'''\r\nclass X:\r\n\tdef __init__(self, name):\r\n\t\tself.name = name;\r\n\t\tprint('/' + self.name + '/index2')\r\n\t\t\r\n\tdef index(self):\r\n\t\tprint(self.name)\r\n\t\t@app.route('/' + self.name + '/index2')\r\n\t\tdef x():\r\n\t\t\treturn 'oi, ' + self.name\r\n\t\t\r\nx = X('oi')\r\ny = X('tchau')\r\n'''\r\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221599164","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n\tpath('',views.index, name='index'),\n path('posts/', views.index, name='posts'),\n path('home/', views.home, name='home'),\n path('downloads/', views.downloads, name='downloads'),\n path('about/', views.about, name='about'),\n]\n","sub_path":"mainApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"577411101","text":"import socket\nfrom threading import *\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n\n while True:\n client, client_address = server.accept()\n print(f'{client_address}:{client_address} has connected')\n client.send(bytes('Hi! Please, type your name and press enter: ',\n 'utf8'))\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n\n name = client.recv(BUFSIZ).decode(\"utf8\")\n welcome = f'Welcome {name}! If you ever want to quit, ' \\\n f'type \"^quit\" to exit.\\n' \\\n f'If you want to get list of participants, type ^list.\\n' \\\n f'If you want to send private message,' \\\n f' type ^!name of participant and your message.'\n client.send(bytes(welcome, \"utf8\"))\n msg = f'{name} has joined the chat'\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n\n while True:\n msg = client.recv(BUFSIZ)\n if msg == bytes('^quit', 'utf8'):\n client.send(bytes('^quit', 'utf8'))\n client.close()\n del clients[client]\n broadcast(bytes(f'{name} has left the chat', 'utf8'))\n break\n elif msg == bytes('^list', 'utf8'):\n participants(client)\n elif msg.startswith(bytes('^!', 'utf8')):\n private(client, msg)\n else:\n broadcast(msg, f'{name}: ')\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef participants(client):\n \"\"\"Sending list of participants\"\"\"\n\n names = list(clients.values())\n participant = ', '.join(names)\n client.send(bytes(participant, 'utf8'))\n\n\ndef private(client, msg):\n \"\"\"Sending private messages\"\"\"\n\n for recipient, name in clients.items():\n if msg.startswith(bytes(f'^!{name}', 'utf8')):\n message = f'Message from {clients[client]}:{str(msg[(len(name)+2):], \"utf8\")}'\n recipient.send(bytes(message, 'utf8'))\n\n\nclients = {}\naddresses = {}\n\nIP_address = '0.0.0.0'\nPort = 1111\nBUFSIZ = 1024\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nserver.bind((IP_address, Port))\n\nif __name__ == '__main__':\n server.listen(5)\n print('Waiting for connection...')\n ACCEPT_THREAD = Thread(target=accept_incoming_connections)\n ACCEPT_THREAD.start()\n ACCEPT_THREAD.join()\n server.close()\n","sub_path":"09-networks-basics/hw/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284376326","text":"from sys import stdin\nfrom itertools import permutations\n\nDEBUG = False\n\ndef process_input():\n\tshort_reads = set()\n\tnext_line = stdin.readline()\n\twhile next_line != '':\n\t\tshort_reads.add(next_line.strip())\n\t\tnext_line = stdin.readline()\n\n\treturn short_reads\n\n# filter out strings that are substrings of another\ndef filter_redundant(short_reads):\n\tfiltered = set()\n\tfor sr1 in short_reads:\n\t\tfor sr2 in short_reads:\n\t\t\tif sr1 != sr2 and sr1 in sr2:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tfiltered.add(sr1)\n\n\treturn filtered\n\ndef calculate_overlap(left, right):\n\tmax_possible_overlap = min(len(left), len(right))\n\tfor i in range(max_possible_overlap, 0, -1):\n\t\tif right.startswith(left[-i:]):\n\t\t\treturn i\n\treturn 0\n\n# find the ordered pair of short reads that have the biggest overlap\ndef find_biggest_overlap(short_reads, overlap_cache):\n\tbest_overlap_amount = -1\n\tbest_pair = None\n\tfor (left, right) in permutations(short_reads, 2):\n\t\tif (left, right) not in overlap_cache:\n\t\t\toverlap_cache[(left, right)] = calculate_overlap(left, right)\n\t\t\t\n\t\toverlap = overlap_cache[(left, right)]\n\t\tif overlap > best_overlap_amount:\n\t\t\tbest_overlap_amount = overlap\n\t\t\tbest_pair = (left, right)\n\n\treturn best_pair, best_overlap_amount\n\t\n\ndef main():\n\tshort_reads = process_input()\n\tshort_reads = filter_redundant(short_reads)\n\tnum_iter = len(short_reads) - 1\n\toverlap_cache = {}\n\tfor _ in range(num_iter): \n\t\t(left, right), overlap_amount = find_biggest_overlap(short_reads, overlap_cache)\n\t\tmerged = left + right[overlap_amount:]\n\t\tshort_reads.discard(left)\n\t\tshort_reads.discard(right)\n\t\tshort_reads.add(merged)\n\n\tprint(merged)\n\ndef log(message):\n\tif DEBUG:\n\t\tprint(message)\n\nif __name__ == \"__main__\":\n\tmain()\n\n","sub_path":"sr_assembler.py","file_name":"sr_assembler.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"497816727","text":"#Author:Silvio Severino\n#Date:30/12/18\n\nimport shutil\nimport os\nimport cv2\nfrom imutils import paths\nimport random\n\nimport tensorflow as tf\nfrom tensorflow.python.keras.preprocessing.image import img_to_array\n\n\n\ndef split_test_into_subfolders(path):\n \n dest = 'sc5-Test-tensorflow'\n if not os.path.exists(dest):\n os.makedirs(dest)\n \n with open(path) as file:\n \n for line in file:\n file_name, class_name = line.split(';')\n class_name = class_name.replace('\\n','')\n \n subdest = dest + '/' + class_name\n if not os.path.exists(subdest):\n os.makedirs(subdest)\n shutil.copy('sc5-Test/'+file_name, subdest)\n \n file.close()\n\n\ndef split_test_into_family_class(path):\n \n dest = 'sc5-test-tensorflow'\n start = 'sc5-Test-tensorflow-tmp'\n if not os.path.exists(dest):\n os.makedirs(dest)\n \n with open(path) as file:\n \n for line in file:\n file_name, class_name = line.split(';')\n class_name = class_name.replace('\\n','')\n \n if os.path.exists(start+'/'+file_name):\n subdest = dest + '/' + class_name\n \n if not os.path.exists(subdest):\n os.makedirs(subdest)\n shutil.move(start+'/'+file_name,subdest)\n file.close() \n\n\n\ndef split_into_family_class(path):\n \n dest = 'sc5-tensorflow'\n if not os.path.exists(dest):\n os.makedirs(dest)\n \n with open(path) as file:\n \n for line in file:\n file_name, class_name = line.split(';')\n class_name = class_name.replace('\\n','')\n \n if os.path.exists('sc5/'+file_name):\n subdest = dest + '/' + class_name\n \n if not os.path.exists(subdest):\n os.makedirs(subdest)\n shutil.move('sc5/'+file_name,subdest)\n file.close() \n\n\n\ndef balance_train_and_test(source_train, source_test):\n \n for family in os.listdir(source_train):\n \n for classes in os.listdir(source_train+'/'+family):\n \n subfolder_train = source_train + '/' + family + '/' + classes\n subfolder_test = source_test + '/' + family + '/' + classes\n \n if not os.path.exists(subfolder_test):\n print(classes)\n os.mkdir(subfolder_test)\n pics_to_move = int(0.2*len(os.listdir(source_train + '/' + family + '/' + classes)))\n images = os.listdir(subfolder_train)\n \n for i in range(pics_to_move):\n image = subfolder_train + '/' + images[i]\n shutil.move(image, subfolder_test)\n\n\n\n#balance_train_and_test('sc5-tensorflow','sc5-test-tensorflow')\n#split_into_subfolders('sc5-Test/ground_truth.txt')\n#split_into_family_class('sc5/ground-truth-family.txt')\n#split_test_into_family_class('sc5-Test/ground-truth-family.txt')\n\n\n#depth:\n# -1 image directory\n# -2 boats classification\n# -3 family classification\n# -4 source path\ndef read_images(path, depth, height, width):\n data = []\n labels = []\n\n # grab the image paths and randomly shuffle them\n print(\"[INFO] loading images...\")\n imagePaths = sorted(list(paths.list_images(path)))\n random.seed(42)\n random.shuffle(imagePaths)\n\n # loop over the input images\n for imagePath in imagePaths:\n image = cv2.imread(imagePath)\n image = cv2.resize(image, (height, width))\n image = img_to_array(image)\n data.append(image)\n\n label = imagePath.split(os.path.sep)[-depth]\n labels.append(label)\n return data, labels\n\n\n\n#a, b = read_images(\"sc5-tensorflow\", 2, height, width)\n#b.shape\n\n","sub_path":"dataset_manipulation.py","file_name":"dataset_manipulation.py","file_ext":"py","file_size_in_byte":3825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"269036725","text":"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport argh\nimport os.path\nimport collections\nimport random\nimport re\nimport shutil\nimport socket\nimport sys\nimport tempfile\nimport time\nimport cloud_logging\nfrom tqdm import tqdm\nimport gzip\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import gfile\n\nimport go\nimport dual_net\nfrom gtp_wrapper import make_gtp_instance, MCTSPlayer\nimport preprocessing\nimport selfplay_mcts\nfrom utils import logged_timer as timer\nimport evaluation\nimport sgf_wrapper\nimport utils\n\nimport qmeas\nimport goparams\n\n# How many positions we should aggregate per 'chunk'.\nEXAMPLES_PER_RECORD = goparams.EXAMPLES_PER_RECORD\n\n# How many positions to draw from for our training window.\n# AGZ used the most recent 500k games, which, assuming 250 moves/game = 125M\n# WINDOW_SIZE = 125000000\n#WINDOW_SIZE = 500000\nWINDOW_SIZE = goparams.WINDOW_SIZE\n\n\ndef _ensure_dir_exists(directory):\n if directory.startswith('gs://'):\n return\n os.makedirs(directory, exist_ok=True)\n\n\ndef gtp(load_file: \"The path to the network model files\"=None,\n readouts: 'How many simulations to run per move'=10000,\n #readouts: 'How many simulations to run per move'=2000,\n cgos_mode: 'Whether to use CGOS time constraints'=False,\n verbose=1):\n engine = make_gtp_instance(load_file,\n readouts_per_move=readouts,\n verbosity=verbose,\n cgos_mode=cgos_mode)\n sys.stderr.write(\"GTP engine ready\\n\")\n sys.stderr.flush()\n while not engine.disconnect:\n inpt = input()\n # handle either single lines at a time\n # or multiple commands separated by '\\n'\n try:\n cmd_list = inpt.split(\"\\n\")\n except:\n cmd_list = [inpt]\n for cmd in cmd_list:\n engine_reply = engine.send(cmd)\n sys.stdout.write(engine_reply)\n sys.stdout.flush()\n\n\ndef bootstrap(\n working_dir: 'tf.estimator working directory. If not set, defaults to a random tmp dir'=None,\n model_save_path: 'Where to export the first bootstrapped generation'=None):\n qmeas.start_time('bootstrap')\n if working_dir is None:\n with tempfile.TemporaryDirectory() as working_dir:\n _ensure_dir_exists(working_dir)\n _ensure_dir_exists(os.path.dirname(model_save_path))\n dual_net.bootstrap(working_dir)\n dual_net.export_model(working_dir, model_save_path)\n else:\n _ensure_dir_exists(working_dir)\n _ensure_dir_exists(os.path.dirname(model_save_path))\n dual_net.bootstrap(working_dir)\n dual_net.export_model(working_dir, model_save_path)\n qmeas.stop_time('bootstrap')\n\n\ndef train(\n working_dir: 'tf.estimator working directory.',\n chunk_dir: 'Directory where gathered training chunks are.',\n model_save_path: 'Where to export the completed generation.',\n generation_num: 'Which generation you are training.'=0):\n qmeas.start_time('train')\n tf_records = sorted(gfile.Glob(os.path.join(chunk_dir, '*.tfrecord.zz')))\n tf_records = tf_records[-1 * (WINDOW_SIZE // EXAMPLES_PER_RECORD):]\n\n print(\"Training from:\", tf_records[0], \"to\", tf_records[-1])\n\n with timer(\"Training\"):\n dual_net.train(working_dir, tf_records, generation_num)\n dual_net.export_model(working_dir, model_save_path)\n qmeas.stop_time('train')\n\n\ndef validate(\n working_dir: 'tf.estimator working directory',\n *tf_record_dirs: 'Directories where holdout data are',\n checkpoint_name: 'Which checkpoint to evaluate (None=latest)'=None,\n validate_name: 'Name for validation set (i.e., selfplay or human)'=None):\n qmeas.start_time('validate')\n tf_records = []\n with timer(\"Building lists of holdout files\"):\n for record_dir in tf_record_dirs:\n tf_records.extend(gfile.Glob(os.path.join(record_dir, '*.zz')))\n\n first_record = os.path.basename(tf_records[0])\n last_record = os.path.basename(tf_records[-1])\n with timer(\"Validating from {} to {}\".format(first_record, last_record)):\n dual_net.validate(\n working_dir, tf_records, checkpoint_name=checkpoint_name,\n name=validate_name)\n qmeas.stop_time('validate')\n\n\ndef evaluate(\n black_model: 'The path to the model to play black',\n white_model: 'The path to the model to play white',\n output_dir: 'Where to write the evaluation results'='sgf/evaluate',\n readouts: 'How many readouts to make per move.'=200,\n games: 'the number of games to play'=20,\n verbose: 'How verbose the players should be (see selfplay)' = 1):\n qmeas.start_time('evaluate')\n _ensure_dir_exists(output_dir)\n\n with timer(\"Loading weights\"):\n black_net = dual_net.DualNetwork(black_model)\n white_net = dual_net.DualNetwork(white_model)\n\n winners = []\n with timer(\"%d games\" % games):\n winners = evaluation.play_match(\n black_net, white_net, games, readouts, output_dir, verbose)\n qmeas.stop_time('evaluate')\n white_count = 0\n for win in winners:\n if 'W' in win or 'w' in win:\n white_count += 1\n return white_count * 1.0 / games\n\n # qmeas.report_profiler()\n\ndef evaluate_evenly(\n black_model: 'The path to the model to play black',\n white_model: 'The path to the model to play white',\n output_dir: 'Where to write the evaluation results'='sgf/evaluate',\n readouts: 'How many readouts to make per move.'=200,\n games: 'the number of games to play'=20,\n verbose: 'How verbose the players should be (see selfplay)' = 1):\n ''' Returns the white win rate; playes 'games' number of games on both sides. '''\n try:\n result = (evaluate(black_model, white_model, output_dir, readouts, games, verbose) + (1 - evaluate(white_model, black_model, output_dir, readouts, games, verbose)))/ 2.0\n except TypeError:\n # It is remotely possible that in weird twist of fate results in a type\n # error... Possibly due to weird corner cases in the evaluation...\n # Our fall back will be to try agian.\n result = (evaluate(black_model, white_model, output_dir, readouts, games, verbose) + (1 - evaluate(white_model, black_model, output_dir, readouts, games, verbose)))/ 2.0\n # should this really happen twice, the world really doesn't\n # want this to be successful... and we will raise the error.\n # If this is being run by the main loop harness, then the\n # effect of raising here will be to keep the newest model and go back to\n # selfplay.\n return result\n\n\n\ndef selfplay(\n load_file: \"The path to the network model files\",\n output_dir: \"Where to write the games\"=\"data/selfplay\",\n holdout_dir: \"Where to write the games\"=\"data/holdout\",\n output_sgf: \"Where to write the sgfs\"=\"sgf/\",\n readouts: 'How many simulations to run per move'=100,\n verbose: '>=2 will print debug info, >=3 will print boards' = 1,\n resign_threshold: 'absolute value of threshold to resign at' = 0.95,\n holdout_pct: 'how many games to hold out for validation' = 0.05):\n qmeas.start_time('selfplay')\n clean_sgf = os.path.join(output_sgf, 'clean')\n full_sgf = os.path.join(output_sgf, 'full')\n _ensure_dir_exists(clean_sgf)\n _ensure_dir_exists(full_sgf)\n _ensure_dir_exists(output_dir)\n _ensure_dir_exists(holdout_dir)\n\n with timer(\"Loading weights from %s ... \" % load_file):\n network = dual_net.DualNetwork(load_file)\n\n with timer(\"Playing game\"):\n player = selfplay_mcts.play(\n network, readouts, resign_threshold, verbose)\n\n output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())\n game_data = player.extract_data()\n with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:\n f.write(player.to_sgf(use_comments=False))\n with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:\n f.write(player.to_sgf())\n\n tf_examples = preprocessing.make_dataset_from_selfplay(game_data)\n\n # Hold out 5% of games for evaluation.\n if random.random() < holdout_pct:\n fname = os.path.join(holdout_dir, \"{}.tfrecord.zz\".format(output_name))\n else:\n fname = os.path.join(output_dir, \"{}.tfrecord.zz\".format(output_name))\n\n preprocessing.write_tf_examples(fname, tf_examples)\n qmeas.stop_time('selfplay')\n\n\ndef selfplay_cache_model(\n network: \"The path to the network model files\",\n output_dir: \"Where to write the games\"=\"data/selfplay\",\n holdout_dir: \"Where to write the games\"=\"data/holdout\",\n output_sgf: \"Where to write the sgfs\"=\"sgf/\",\n readouts: 'How many simulations to run per move'=100,\n verbose: '>=2 will print debug info, >=3 will print boards' = 1,\n resign_threshold: 'absolute value of threshold to resign at' = 0.95,\n holdout_pct: 'how many games to hold out for validation' = 0.05):\n qmeas.start_time('selfplay')\n clean_sgf = os.path.join(output_sgf, 'clean')\n full_sgf = os.path.join(output_sgf, 'full')\n _ensure_dir_exists(clean_sgf)\n _ensure_dir_exists(full_sgf)\n _ensure_dir_exists(output_dir)\n _ensure_dir_exists(holdout_dir)\n\n with timer(\"Playing game\"):\n player = selfplay_mcts.play(\n network, readouts, resign_threshold, verbose)\n\n output_name = '{}-{}'.format(int(time.time() * 1000 * 1000), socket.gethostname())\n game_data = player.extract_data()\n with gfile.GFile(os.path.join(clean_sgf, '{}.sgf'.format(output_name)), 'w') as f:\n f.write(player.to_sgf(use_comments=False))\n with gfile.GFile(os.path.join(full_sgf, '{}.sgf'.format(output_name)), 'w') as f:\n f.write(player.to_sgf())\n\n tf_examples = preprocessing.make_dataset_from_selfplay(game_data)\n\n # Hold out 5% of games for evaluation.\n if random.random() < holdout_pct:\n fname = os.path.join(holdout_dir, \"{}.tfrecord.zz\".format(output_name))\n else:\n fname = os.path.join(output_dir, \"{}.tfrecord.zz\".format(output_name))\n\n preprocessing.write_tf_examples(fname, tf_examples)\n qmeas.stop_time('selfplay')\n\n\n\ndef gather(\n input_directory: 'where to look for games'='data/selfplay/',\n output_directory: 'where to put collected games'='data/training_chunks/',\n examples_per_record: 'how many tf.examples to gather in each chunk'=EXAMPLES_PER_RECORD):\n qmeas.start_time('gather')\n _ensure_dir_exists(output_directory)\n models = [model_dir.strip('/')\n for model_dir in sorted(gfile.ListDirectory(input_directory))[-50:]]\n with timer(\"Finding existing tfrecords...\"):\n model_gamedata = {\n model: gfile.Glob(\n os.path.join(input_directory, model, '*.tfrecord.zz'))\n for model in models\n }\n print(\"Found %d models\" % len(models))\n for model_name, record_files in sorted(model_gamedata.items()):\n print(\" %s: %s files\" % (model_name, len(record_files)))\n\n meta_file = os.path.join(output_directory, 'meta.txt')\n try:\n with gfile.GFile(meta_file, 'r') as f:\n already_processed = set(f.read().split())\n except tf.errors.NotFoundError:\n already_processed = set()\n\n num_already_processed = len(already_processed)\n\n for model_name, record_files in sorted(model_gamedata.items()):\n if set(record_files) <= already_processed:\n continue\n print(\"Gathering files for %s:\" % model_name)\n for i, example_batch in enumerate(\n tqdm(preprocessing.shuffle_tf_examples(examples_per_record, record_files))):\n output_record = os.path.join(output_directory,\n '{}-{}.tfrecord.zz'.format(model_name, str(i)))\n preprocessing.write_tf_examples(\n output_record, example_batch, serialize=False)\n already_processed.update(record_files)\n\n print(\"Processed %s new files\" %\n (len(already_processed) - num_already_processed))\n with gfile.GFile(meta_file, 'w') as f:\n f.write('\\n'.join(sorted(already_processed)))\n qmeas.stop_time('gather')\n\n\nparser = argparse.ArgumentParser()\nargh.add_commands(parser, [gtp, bootstrap, train,\n selfplay, gather, evaluate, validate])\n\nif __name__ == '__main__':\n cloud_logging.configure()\n argh.dispatch(parser)\n","sub_path":"reinforcement/tensorflow/minigo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"106630583","text":"from flask import render_template\n\nfrom .FormController import FormController\nfrom app.api.models.pet.ppnp import PPnP\nfrom app.api.models.pet.attribute import Attribute\nfrom app.api.models.user import User\n\n\nclass MarketController(FormController):\n def get(self, phone):\n ppnp = PPnP.get(PPnP.owner == (User.get(User.phone == phone)))\n attributes = Attribute.select()\n best = list()\n accessories = list()\n s_and_c = list()\n for attr in attributes:\n if attr.is_best:\n best.append(attr)\n if attr.type == 'accessories':\n accessories.append(attr)\n elif attr.type == 'shoes' or attr.type == 'clothing':\n s_and_c.append(attr)\n return render_template('market.html', phone=phone, ppnp=ppnp, best=best, accessories=accessories, s_and_c=s_and_c)\n","sub_path":"app/api/controllers/market.py","file_name":"market.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"571387197","text":"from pyarcade.game_interface import GameInterface\nfrom pyarcade.minesweeper_builder import MinesweeperBoardBuilder\nfrom pyarcade.session_manager import SessionManager, MinesweeperSession\nfrom copy import deepcopy\n\n\nclass MinesweeperGame(GameInterface):\n \"\"\" A class representing MinesweeperGame game sessions. For now each instance of MinesweeperGame\n game will hold multiple sessions where there is only one player against the computer. Any instance has access to\n these sessions.\n\n Note:\n For now, MinesweeperGame will have a 9x9 board with only 10 mines.\n \"\"\"\n\n def __init__(self):\n self.session_manager = SessionManager.singleton()\n\n @staticmethod\n def print_board(board: dict) -> str:\n \"\"\"\n Note: This function is useful for debugging\n\n Args:\n board: The game board\n\n Returns: The game board with all cells revealed.\n \"\"\"\n reply = \"\"\n for key in board.keys():\n line = \"\"\n for cell in board[key]:\n if cell == 'mine':\n line += 'x '\n else:\n line += str(cell) + ' '\n line += '\\n'\n reply += line\n return reply\n\n @staticmethod\n def print_player_board(player_board: dict, board: dict) -> str:\n reply = \"\"\n for key in board.keys():\n line = \"\"\n for cell in range(len(board[key])):\n if isinstance(player_board[key][cell], str):\n line += 'F '\n elif player_board[key][cell]:\n line += '🬅 '\n else:\n line += str(board[key][cell]) + ' '\n line += '\\n'\n reply += line\n return reply\n\n @staticmethod\n def print_board_done(player_board: dict, board: dict) -> str:\n reply = \"\"\n for key in board.keys():\n line = \"\"\n for cell in range(len(board[key])):\n if isinstance(player_board[key][cell], str):\n line += 'F '\n elif isinstance(board[key][cell], str):\n line += 'x '\n elif player_board[key][cell]:\n line += '🬅 '\n else:\n line += str(board[key][cell]) + ' '\n line += '\\n'\n reply += line\n return reply\n\n @staticmethod\n def end_game(session: MinesweeperSession):\n \"\"\"\n Args:\n session: The session to be modified\n\n Returns: Nothing. Updates game state in place.\n Note: This contains some dead code from testing.\n \"\"\"\n session.set_to_done()\n print(\"You lose\\n\")\n MinesweeperGame.print_board(session.data[\"board\"])\n\n @staticmethod\n def unhide_cell(location: tuple, session: MinesweeperSession):\n \"\"\"\n Args:\n location: Location on the board of where to flag\n session: The session that is being modified\n\n Returns: Nothing. It modifies in place. If player unhides a mine then update game state to done.\n \"\"\"\n # Location stores in (row,column)\n if session.data[\"board\"][location[0]][location[1]] == 'mine':\n MinesweeperGame.end_game(session)\n else:\n session.data[\"player_board\"][location[0]][location[1]] = False\n session.data[\"cells_hidden\"] += -1\n\n @staticmethod\n def flag_cell(location: tuple, session: MinesweeperSession):\n \"\"\"\n Args:\n location: Location on the board of where to flag\n session: The session that is being modified\n\n Returns: Nothing. It modifies inplace.\n \"\"\"\n # Will not get bad input, so only cells that are hidden can be flagged\n if session.data[\"player_board\"][location[0]][location[1]] == 'flag':\n session.data[\"player_board\"][location[0]][location[1]] = True\n print(session.data[\"flags\"])\n session.data[\"flags\"] += 1\n print(session.data[\"flags\"])\n else:\n session.data[\"player_board\"][location[0]][location[1]] = 'flag'\n session.data[\"flags\"] += (-1)\n\n def create_game(self, request: dict) -> dict:\n \"\"\"\n Args:\n request: dictionary containing single key-value pair. The key is \"game_id\". The value\n is an integer denoting which game should be created\n\n Returns:\n reply: dictionary containing a single key-value pair. The key is \"session_id\". The value is a\n integer unique to all ongoing game sessions.\n \"\"\"\n\n board, mines = MinesweeperBoardBuilder.initialize_board()\n # Player board stores the state of whether or not cells are hidden or flagged.\n player_board = {}\n for length in range(MinesweeperBoardBuilder.EASY_SIZE):\n player_board[length] = [True] * MinesweeperBoardBuilder.EASY_SIZE\n new_game_session = {\"player_board\": player_board, \"board\": board,\n \"mines\": mines, \"cells_hidden\": MinesweeperBoardBuilder.EASY_SIZE ** 2,\n \"flags\": MinesweeperBoardBuilder.EASY_MINES}\n\n return self.session_manager.init_minesweeper_session(new_game_session)\n\n def read_game(self, request: dict) -> dict:\n \"\"\"\n Args:\n request: dictionary containing single key-value pair. The key is \"session_id\". The value is a\n integer unique to all ongoing game sessions.\n\n Returns:\n reply: dictionary containing a several key-value pairs that fully describe the game's state. They will be\n board which contains a string of the board, number of mines in this mode and the number flags the player has\n left to place as well as if the game is done or not. If the game was a loss, board contains x where the\n bombs were and correct flags. If it was a win, then board string is board with flags where the mines were.\n \"\"\"\n session = self.session_manager.get_session_by_id(request[\"session_id\"])\n data = session.data\n reply = deepcopy(data)\n reply.pop(\"cells_hidden\")\n if session.is_done():\n reply[\"board\"] = MinesweeperGame.print_board_done(reply[\"player_board\"], reply[\"board\"])\n else:\n reply[\"board\"] = MinesweeperGame.print_player_board(reply[\"player_board\"], reply[\"board\"])\n reply.pop(\"player_board\")\n reply[\"mines\"] = MinesweeperBoardBuilder.EASY_MINES\n reply[\"session_id\"] = request[\"session_id\"]\n reply[\"done\"] = session.is_done()\n\n return reply\n\n def update_game(self, request: dict) -> dict:\n \"\"\"\n Args:\n request: dictionary describing the \"move\" to be made in the game\n\n Returns:\n reply: dictionary describing the game's new state.\n \"\"\"\n session = self.session_manager.get_session_by_id(request[\"session_id\"])\n if \"unhide_cell\" in request:\n location = request[\"unhide_cell\"]\n MinesweeperGame.unhide_cell(location, session)\n request.pop(\"unhide_cell\")\n if \"flag_cell\" in request:\n location = request[\"flag_cell\"]\n MinesweeperGame.flag_cell(location, session)\n request.pop(\"flag_cell\")\n\n return self.read_game(request)\n\n def delete_game(self, request: dict) -> dict:\n \"\"\"\n Args:\n request: dictionary containing a single key-value pair. The key is \"session_id\". The value is a\n integer unique to all ongoing game sessions.\n\n Returns:\n reply: dictionary containing a single key-value pair. The key is \"session_id\". The value is a\n integer unique to all ongoing game sessions.\n \"\"\"\n\n return self.session_manager.delete_session(request[\"session_id\"])\n","sub_path":"pyarcade/pyarcade/minesweeper.py","file_name":"minesweeper.py","file_ext":"py","file_size_in_byte":7862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"608110112","text":"#!/usr/bin/env\tpython\nimport os\nimport sys\nimport string\nimport random\nimport re\nimport requests\n\nclass module_element(object):\n def __init__(self):\n self.require = [\n {\"name\": \"website\", \"value\": \"\", \"required\": \"yes\"}\n ]\n self.export = []\n self.export_file = \"\"\n self.id_list = []\n self.export_status = False\n self.meta = {\n 'author': 'Tristan Granier',\n 'type': 'website',\n 'result': \"rows\",\n 'export_primary': 'person',\n 'description': 'Search possible email address on selected page'\n }\n\n def get_options(self, name):\n for argument in self.require:\n if argument['name'] == name:\n return argument['value']\n\n def generate_unique_id(self, size=6, chars=string.ascii_uppercase + string.digits):\n start = 0\n unique_id = \"\"\n while start == 0:\n unique_id = ''.join(random.choice(chars) for _ in range(size))\n if unique_id not in self.id_list:\n self.id_list.append(unique_id)\n start = 1\n return unique_id\n\n def main(self):\n email_tpl = []\n email_checked = []\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'\n }\n website = self.get_options('website')\n loaded = 0\n if not \"://\" in website:\n website = \"http://\" + website\n try:\n req = requests.get(website, headers=headers)\n html = req.content\n loaded = 1\n except:\n return False\n\n emails = re.findall(r'[\\w\\.-]+@[\\w\\.-]+', html)\n if len(emails) > 0:\n for email in emails:\n if email not in email_checked:\n email_tpl.append({\n '_id': self.generate_unique_id(),\n 'email': str(email)\n })\n email_checked.append(email)\n self.export.append(email_tpl)\n\n","sub_path":"framework/engine/modules/get_email_website.py","file_name":"get_email_website.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"279744439","text":"\"\"\" Stackelberg \"\"\"\n\n# Dg, Dd = build_game_gradient([G_loss, D_loss], [G, D])\nDg = autograd.grad(G_loss, G.parameters(), create_graph=True)\nDd = autograd.grad(D_loss, D.parameters(), create_graph=True)\nDd_g = autograd.grad(G_loss, D.parameters(), create_graph=True) # Dy_f\n\nDD_reg = JacobianVectorProduct(Dd, list(D.parameters()), regularization)\n# leader_grad, q, x0 = compute_stackelberg_grad(G, Dg, Dd, Dd_g, DD_reg, x0, precise=precise, device=device)\nDg_vec = torch.cat([_.flatten() for _ in Dg]).view(-1, 1) # Dx_f_flat\nDd_g_vec = torch.cat([_.flatten() for _ in Dd_g]).view(-1, 1) # Dy_f_flat\n\nw, status = scipy.sparse.linalg.cg(DD_reg, Dd_g_vec.cpu().detach().numpy(), maxiter=5)\nq = torch.Tensor(JacobianVectorProduct(Dd, list(G.parameters()))(w)).view(-1, 1).to(device)\n\nleader_grad = Dg_vec - q\n\"\"\" Ours \"\"\"\nDx_f = autograd.grad(loss_G, list(G.parameters()), create_graph=True, retain_graph=True)\nDy_g = autograd.grad(loss_D, list(D.parameters()), create_graph=True, retain_graph=True)\nDy_f = autograd.grad(loss_G, list(D.parameters()), create_graph=True, retain_graph=True)\n\nHyy_g_reg_lo = JacobianVectorProduct(Dy_g, list(D.parameters()), self.hessian_reg_y)\n\nDy_g_flat = torch.cat([_.flatten() for _ in Dy_g]).view(-1, 1)\nDy_f_flat = torch.cat([_.flatten() for _ in Dy_f]).view(-1, 1)\nDy_f_flat_np = np.nan_to_num(Dy_f_flat.cpu().detach().numpy())\n\nHyy_g_reg_inv_Dy_f_flat, status = ssla.cg(Hyy_g_reg_lo, Dy_f_flat_np, maxiter=self.cg_maxiter)\n# print(\"Hyy_g_reg_inv_Dy_f_flat.shape\", Hyy_g_reg_inv_Dy_f_flat.shape)\n\nHxy_g_lo = JacobianVectorProduct(Dy_g, list(G.parameters()))\nHxy_g_Hyy_g_reg_inv_Dy_f_flat = torch.Tensor(Hxy_g_lo(Hyy_g_reg_inv_Dy_f_flat)).view(-1, 1).to(self.device)\nupdate_x = lr_x * (Dx_f_flat - Hxy_g_Hyy_g_reg_inv_Dy_f_flat)\n","sub_path":"Examine_Code.py","file_name":"Examine_Code.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"316855512","text":"# 백준 문제(2021.5.29)\n# 19698번) 농부 존은 소들을 위해 새로운 헛간을 지었다. \n# 소들의 리더인 암소 베시는 새로운 헛간에 입주할 소들을 추첨으로 정하기로 했다. \n# 소들은 그동안 모아 왔던 건초 더미를 담보로 추첨에 응모했다.\n# 이번 추첨에는 총 N마리의 소가 응모했다. 헛간은 크기의 직사각형이다. \n# 베시는 소 한 마리당 크기의 공간을 배정할 계획이다.\n# 베시는 헛간을 최대한 효율적으로 나누어 최대한 많은 소를 입주 시키려고 한다. \n# 헛간에는 최대 몇 마리의 소들이 입주할 수 있는지 출력하라. \n# 소들이 입주하는 공간의 각 변은 직사각형의 각 변에 대해 수평 혹은 수직이여야 한다.\n\nn, w, h, l = map(int, input().split())\nresult = (w//l)*(h//l)\n\nif(n <= result) :\n print(n)\nelse :\n print(result)","sub_path":"Bronze4/19698.py","file_name":"19698.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"364648895","text":"import functools\nimport pandas as pd\nimport time\n\n\n# timer wrapper function, used to find runtime of functions in testing\ndef timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n tic = time.perf_counter()\n value = func(*args, **kwargs)\n toc = time.perf_counter()\n elapsed_time = toc - tic\n print(f\"Elapsed time: {elapsed_time:0.4f} seconds\")\n return value\n\n return wrapper_timer\n\n\nratings_header = ['UserID', 'MovieID', 'Rating', 'Timestamp']\n\nratings_path: str = 'ml-1m/ratings.dat.xz'\nratings_prep_path: str = 'ml-1m/ratings.prep.dat'\n\n\n# load with python engine\n@timer\ndef load(file_name: str) -> pd.DataFrame:\n return pd.read_csv(ratings_path, sep='::', names=ratings_header, engine='python')\n\n\n# pre-processing step, replace '::' with ','\n# @timer\ndef prep(file_name: str, prep_file_name: str) -> None:\n with open(file_name) as source, open(prep_file_name, 'w') as target:\n for line in source:\n target.write(line.replace('::', ','))\n\n\n# load with pre-processing\n@timer\ndef load_with_prep(file_name: str, prep_file_name: str) -> pd.DataFrame:\n prep(file_name, prep_file_name)\n\n return pd.read_csv(prep_file_name, names=ratings_header) # , engine='c')\n\n\n@timer\ndef load_mem(file_name: str) -> pd.DataFrame:\n with open(file_name) as file:\n data = file.read()\n\n return pd.read_csv(data.replace('::', ','), names=ratings_header, engine='c')\n\n\n@timer\ndef load_alt(file_name: str) -> pd.DataFrame:\n return pd.read_table(file_name, sep=':', usecols=[0, 2, 4, 6], names=ratings_header, engine='c')\n\n\nload(ratings_path)\n\nload_with_prep(ratings_path, ratings_prep_path)\n\n# load_mem(ratings_path)\n\nload_alt(ratings_path)\n\n# print(df)\n","sub_path":"assignment3/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"115711399","text":"#views.py HvS asset app\nfrom flask import Flask, flash, redirect, render_template, request, session, url_for, g\nfrom functools import wraps\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom forms import AddAsset, RegisterForm, LoginForm\nfrom sqlalchemy.exc import IntegrityError\n\napp=Flask(__name__)\napp.config.from_object('config')\ndb = SQLAlchemy(app)\n\nfrom models import HvSAssets, User\n\ndef login_required(test):\n @wraps(test)\n def wrap (*args, **kwargs):\n if 'logged_in' in session:\n return test(*args, **kwargs)\n else:\n flash(\"you need to lgin first\")\n return redirect (url_for('login'))\n return wrap\n\n@app.route('/logout/')\ndef logout():\n session.pop('logged_in', None)\n session.pop('user_id', None)\n flash('You are logged out')\n return redirect(url_for('login'))\n\n# @app.route('/')\n# def home():\n# \"\"\"Render website's home page.\"\"\"\n# return render_template('login.html')\n\n\n@app.route ('/', methods=['GET', 'POST'])\ndef login():\n error=None\n if request.method=='POST':\n u=User.query.filter_by(name=request.form['name'],\n password=request.form['password']).first()\n if u is None:\n error='Invalid username or password'\n else:\n session['logged_in']=True\n session['user_id']=u.id\n flash('You are logged in')\n return redirect(url_for('assets'))\n return render_template(\"login.html\",\n form=LoginForm(request.form),\n error=error)\n\n@app.route ('/assets/')\n@login_required\ndef assets():\n open_assets = db.session.query(HvSAssets).filter_by(status=\"1\").order_by(HvSAssets.install_date.asc())\n closed_assets = db.session.query(HvSAssets).filter_by(status=\"0\").order_by(HvSAssets.install_date.asc())\n \n return render_template('assets.html', form=AddAsset(request.form), open_assets=open_assets, closed_assets=closed_assets)\n\n#@app.route ('/display//',)\n#def display(asset_id):\n# g.db=connect_db()\n# curr=g.db.execute('select asset_type, asset_descr, install_date, priority, status, asset_id from assets where asset_id='+str(asset_id))\n# display_assets=[dict(type=row[0], descr=row[1], date=row[2], priority=row[3], status=row[4], asset_id=row[5]) for row in curr.fetchall()] \n# g.db.close()\n# return \"ok\"\n# return render_template('dash2.html', display_assets=display_assets)\n\n\n\n#@app.route ('/dash/')\n#@login_required\n#def dash():\n# g.db=connect_db()\n# curr=g.db.execute('select asset_type, asset_descr, install_date, priority, status, asset_id from assets where status=1')\n# open_assets = [dict(type=row[0], descr=row[1], date=row[2], priority=row[3], status=row[4], asset_id=row[5]) for row in curr.fetchall()]\n# curr=g.db.execute('select asset_type, asset_descr, install_date, priority, status, asset_id from assets where asset_id=\"19\"')\n# closed_assets=[dict(type=row[0], descr=row[1], date=row[2], priority=row[3], status=row[4], asset_id=row[5]) for row in curr.fetchall()] \n# g.db.close()\n# return render_template('dash.html', open_assets=open_assets, display_assets=closed_assets)\n\n\n#add assets\n@app.route('/add/', methods=['GET', 'POST'])\n@login_required\ndef new_asset():\n \n form=AddAsset(request.form, csrf_enabled=False)\n \n if form.validate_on_submit():\n# return(form.asset_type.data)\n# else:\n# return \"not validated\"\n new_asset=HvSAssets(\n form.asset_type.data,\n form.asset_descr.data,\n form.install_date.data,\n form.priority.data,\n form.posted_date.data,\n '1',\n session['user_id']\n )\n \n db.session.add (new_asset)\n db.session.commit()\n flash('New entry successfully posted')\n else:\n flash_errors(form)\n return redirect(url_for('assets'))\n\n#change asset status\n@app.route('/complete//',)\n@login_required\ndef complete(asset_id):\n new_id=asset_id\n db.session.query(HvSAssets).filter_by(asset_id=new_id).update({\"status\":\"0\"})\n db.session.commit()\n flash('The asset status was changed')\n return redirect (url_for('assets'))\n\n#display asset detail\n#@app.route('/display//',)\n#@login_required\n#def display(asset_id):\n# g.db=connect_db()\n# curr=g.db.execute('select asset_type, asset_descr, install_date, priority, status, asset_id from assets where asset_id='+str(asset_id))\n# display_assets=[dict(type=row[0], descr=row[1], date=row[2], priority=row[3], status=row[4], asset_id=row[5]) for row in curr.fetchall()]\n# g.db.close()\n# flash('The asset status was changed')\n# return redirect (url_for('display', assets=display_assets))\n\n\n\n\n\n#Delete asset\n@app.route('/delete//',)\n@login_required\ndef delete_asset(asset_id):\n new_id=asset_id\n db.session.query(HvSAssets).filter_by(asset_id=new_id).delete()\n db.session.commit()\n flash('the asset was deleted')\n return redirect (url_for('assets'))\n\n@app.route ('/register/', methods=['GET', 'POST'])\ndef register():\n error= None\n form=RegisterForm(request.form, csrf_enabled=False)\n if form.validate_on_submit():\n\n new_user=User(\n form.name.data,\n form.email.data,\n form.password.data,\n )\n try:\n db.session.add(new_user)\n db.session.commit()\n flash('Thank you for registering. Please login')\n return redirect(url_for ('login'))\n except IntegrityError:\n error='That username and/or email already exists -- Please try again'\n return render_template('register.html', form=form, error=error)\n else:\n flash_errors(form)\n return render_template('register.html', form=form, error=error)\n\ndef flash_errors(form):\n for field, errors in form.errors.items():\n for error in errors:\n flash (u\"Error in the %s field -%s\" %(\n getattr(form,field).label.text,error), 'error')\n\n@app.errorhandler(500)\ndef internal_error(error):\n db.session.rollback()\n return render_template ('500.html'), 500\n\n@app.errorhandler(404)\ndef internal_error(error):\n return render_template ('404.html'), 404\n\n ","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"2071191","text":"import requests\nfrom bs4 import BeautifulSoup\nimport MySQLdb\nimport json\n\nurl = ['音乐网歌曲页url'.format(i) for i in range(1, 917)]\ndb = MySQLdb.connect(\"主机地址\", \"用户名\", \"密码\", \"数据库名\", charset='utf8')\ncursor = db.cursor()\nfor i in url:\n wb_data = requests.get(i)\n soup = BeautifulSoup(wb_data.text, 'lxml')\n divs = soup.select('.music-block')\n ids = []\n print(i)\n for j in divs:\n ids.append(j.get('data-music-id'))\n for k in ids:\n urls = '音乐信息url' + str(k)\n wb_data = requests.get(urls)\n t = json.loads(wb_data.text)\n ida = k\n music_name = t.get('data').get('tag_title')\n artist = t.get('data').get('artist')\n img_url = t.get('data').get('album_cover_300_url')\n file_url = t.get('data').get('file_url')\n sql = \"INSERT INTO musicinfo VALUES (%s,%s,%s,%s,%s)\"\n cursor.execute(sql, (ida, music_name, artist, img_url, file_url))\ndb.commit()\n\n","sub_path":"SiseMusicPy.py","file_name":"SiseMusicPy.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"127178491","text":"from tornado.tcpserver import TCPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.httpclient import AsyncHTTPClient\nfrom tornado.httpclient import HTTPRequest\nfrom tornado import gen \nfrom trollius import From\nfrom functools import partial\nimport logging\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s'+logging.BASIC_FORMAT)\nlogger = logging.getLogger(__name__)\n\nclass UrlServer(TCPServer):\n def handle_stream(self, stream, address):\n logger.debug('new connection' + str(address))\n self._read_line(stream)\n\n def _read_line(self,stream):\n handle = partial(self._handle_read, stream)\n stream.read_until('\\n', handle)\n\n @gen.coroutine\n def _handle_read(self, stream, data_in):\n stream.write(data_in.upper())\n http_client = AsyncHTTPClient()\n try:\n response = yield From(http_client.fetch(\"http://\" + data_in.strip()))\n logger.debug(response)\n stream.write(bytes(response) + \"\\n\")\n except:\n logger.exception('hndle error')\n self._read_line(stream)\n\nif __name__ == '__main__':\n UrlServer().listen(8001)\n IOLoop.current().start()\n","sub_path":"tornado/torUrlServer1.py","file_name":"torUrlServer1.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"328870099","text":"\"\"\"\nThe MIT License (MIT)\n\nOriginal work Copyright (c) 2015 Rapptz\nModified work Copyright 2015 Jose Francisco Taas\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport curses\nimport sys\n\n##\n# Handles TUI using curses\n##\nclass DTInterface(object):\n\n def __init__(self, on_return_key):\n self.on_return_key = on_return_key\n self._init_curses()\n self.print_output('DISCORD TERMINAL CLIENT', colorpair = 4)\n self.print_output('Type /help to list available commands.')\n self.print_info('Welcome to Discord!')\n\n def update(self):\n try:\n self._getkey()\n except:\n self.close()\n sys.exit()\n\n def close(self):\n \"\"\"Restores terminal to default behaviour.\"\"\"\n curses.nocbreak()\n self.stdscr.keypad(False)\n curses.echo()\n curses.nl()\n curses.endwin()\n\n def clear(self):\n \"\"\"Clears output box\"\"\"\n self._output_box.erase()\n self._refresh()\n\n def print_output(self, message, newline = True, colorpair=1):\n \"\"\"Displays a message in the output box.\"\"\"\n if newline:\n message = message + '\\n'\n self._output_box.attron(curses.color_pair(colorpair))\n self._output_box.addstr(message)\n self._refresh()\n\n def print_info(self, message, colorpair=1):\n \"\"\"Displays a message in the info box.\"\"\"\n self._info_box.erase()\n self._refresh()\n\n self._info_box.attron(curses.color_pair(colorpair))\n self._info_box.addstr(message)\n self._refresh()\n\n def set_input_status(self, server, channel):\n \"\"\"Sets the information displayed in the input box\"\"\"\n self.input_status = '[' + server + '] '\n self.input_status += '<' + channel + '> '\n self._update_input_status()\n\n def _init_curses(self):\n\n # initialize curses\n self.stdscr = curses.initscr()\n \n # set custom color pairs\n # TODO check COLORS for number of\n # supported pairs\n curses.start_color()\n curses.use_default_colors()\n curses.init_pair(1,-1,-1)\n curses.init_pair(2,curses.COLOR_BLUE,-1)\n curses.init_pair(3,curses.COLOR_RED,-1)\n curses.init_pair(4,curses.COLOR_CYAN,-1)\n curses.init_pair(5,curses.COLOR_YELLOW,-1)\n curses.init_pair(6,curses.COLOR_MAGENTA,-1)\n curses.init_pair(7,curses.COLOR_GREEN,-1)\n curses.init_pair(8,curses.COLOR_BLUE,-1)\n curses.init_pair(9,curses.COLOR_BLUE,-1)\n\n # change terminal behaviour\n curses.noecho()\n curses.cbreak()\n curses.nonl()\n self.stdscr.keypad(True)\n\n # create windows\n self._output_box = curses.newwin(curses.LINES-3,curses.COLS,0,0)\n self._output_box.scrollok(True)\n self._output_box.idlok(1)\n self._info_box = curses.newwin(1,curses.COLS,curses.LINES-2,0)\n self._input_box = curses.newwin(1,curses.COLS,curses.LINES-1,0)\n self._input_buffer = ''\n self.set_input_status('none', 'none')\n\n\n def _getkey(self):\n c = self._input_box.getch()\n if c == curses.KEY_BACKSPACE or c == 127:\n yx = self._input_box.getyx()\n if len(self.input_status) != yx[1]:\n self._input_box.delch(yx[0], yx[1]-1)\n self._input_buffer = self._input_buffer[:-1]\n elif c == curses.KEY_ENTER or c == 13:\n if len(self._input_buffer) != 0:\n self._update_input_status() # delete line without removing channel\n self.on_return_key(self._input_buffer)\n self._input_buffer = ''\n else:\n self._input_buffer += chr(c)\n self._input_box.addstr(chr(c))\n self._refresh()\n\n\n def _refresh(self):\n self._output_box.refresh()\n self._info_box.refresh()\n self._input_box.refresh()\n\n def _update_input_status(self):\n self._input_box.deleteln()\n self._input_box.addstr(self._input_box.getyx()[0], 0, self.input_status)\n self._refresh()\n\n","sub_path":"discordt/dtinterface.py","file_name":"dtinterface.py","file_ext":"py","file_size_in_byte":5038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"357588607","text":"# UVA10812 - Beat the Spread! - Accepted\n# goo.gl/Bj6JEx\n# @author: Marcelo Adriano Amancio\n# 'If you're trying to create a company, it's like baking a cake. You have to have all the ingredients in the right proportion.' -- Elon Musk \n\n\nfrom sys import stdin\nfrom math import sqrt\n\nif __name__ == '__main__':\n\tn = int(stdin.readline())\n\tfor _ in range(n):\n\t\tp,m = map(int, stdin.readline().split())\n\t\ta = (p+m)//2\n\t\tb = (p-m)//2\n\t\tif a+b == p and a>=0 and b>=0:\n\t\t\tprint (a,b)\n\t\telse:\n\t\t\tprint('impossible')\n\n\t\t\n\t\t\n\t\n","sub_path":"10812/uva10812.py","file_name":"uva10812.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"314076630","text":"from tkinter import *\n\n\ndef main():\n\n target_file = \"TODO.txt\"\n\n def read():\n output_one.delete(1.0, END)\n outstring = \"\"\n linecount = 0\n with open(target_file) as file_read:\n for i in file_read.readlines():\n linecount += 1\n outstring += format(linecount, \"2d\") + \". \" + i\n output_one.insert(END, outstring)\n \n def run_add():\n with open(target_file, \"a\") as file_append:\n input_string = input_one.get() + \"\\n\"\n file_append.write(input_string)\n reset()\n\n def run_subtract():\n delete_file = \"del_\" + target_file\n delete_line = eval(input_two.get()) - 1\n outstring = \"\"\n\n with open(target_file, \"r\") as file_delete:\n list = file_delete.readlines()\n add_deleted_line = list.pop(delete_line)\n\n with open(delete_file, \"a\") as deleted:\n deleted.write(add_deleted_line + \"\\n\")\n\n with open(target_file, \"w\") as file_append:\n for i in list:\n outstring += i\n file_append.write(outstring)\n\n reset()\n\n def reset():\n output_one.delete(1.0, END)\n input_one.delete(0, END)\n input_two.delete(0, END)\n read()\n\n root = Tk()\n root.title(\"~~ To Do List ~~\")\n top_gui = Frame(root)\n top_gui.grid(row=1)\n bottom_gui = Frame(root)\n bottom_gui.grid(row=2)\n\n # Top GUI\n Label(top_gui, text=\"Enter text to be added:\", width=34).grid(row=1, column=1)\n runner = Button(top_gui, text=\"Run\", width=34, command=lambda: run_add())\n runner.grid(row=1, column=2)\n\n input_one = Entry(top_gui, width=80)\n input_one.grid(row=2, column=1, columnspan=2)\n\n Label(top_gui, text=\"Enter line to be removed:\", width=34).grid(row=3, column=1)\n Button(top_gui, text=\"Remove\", command=lambda: run_subtract(), width=34).grid(row=3, column=2)\n\n input_two = Entry(top_gui, width=38)\n input_two.grid(row=4, column=1)\n\n Button(top_gui, text=\"Reset\", command=lambda: reset(), width=34).grid(row=4, column=2)\n\n # Bottom GUI\n output_one = Text(bottom_gui, width=60, background=\"light gray\")\n output_one.grid(row=2, column=1)\n Button(bottom_gui, width=68, text=\"QUIT\", command=root.destroy, fg=\"red\").grid(row=3, column=1)\n\n read()\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()","sub_path":"Learn_Python/Tkinter/GUI_Test3_ToDo/Adder_V2.py","file_name":"Adder_V2.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"474464667","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nfrom tensorflow.python.layers.core import Dense\nfrom tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors\nimport numpy as np\nimport os\nimport datetime\nimport pickle\nimport random\n\n\nfile_data = \"./data/sentence_full.tsv\"\nfile_model = \"./model/model.ckpt\"\nfile_dic_eng = \"./model/dic_eng.bin\"\nfile_rdic_eng = \"./model/rdic_eng.bin\"\nfile_dic_kor = \"./model/dic_kor.bin\"\nfile_rdic_kor = \"./model/rdic_kor.bin\"\nfile_data_list = \"./model/data_list.bin\"\nfile_data_idx_list = \"./model/data_idx_list.bin\"\nfile_max_len = \"./model/data_max_len.bin\"\ndir_summary = \"./model/summary/\"\n\npre_trained = 0\nmy_device = \"/cpu:0\"\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n\nif not os.path.exists(dir_summary):\n os.makedirs(dir_summary)\n\n\nif pre_trained == 0:\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(now)\n\n print(\"Load data file & make vocabulary...\")\n\n data_list = []\n total_eng = \"\"\n total_kor = \"\"\n with open(file_data, \"r\", encoding=\"utf8\") as tsv:\n for line in tsv:\n sep = line.split(\"\\t\")\n\n category = int(sep[0].replace(\"\\ufeff\", \"\"))\n sentence_english = sep[1].lower()\n sentence_english = sentence_english.replace(\"\\n\", \"\")\n sentence_korean = sep[2].lower()\n sentence_korean = sentence_korean.replace(\"\\n\", \"\")\n\n total_eng += sentence_english\n total_kor += sentence_korean\n data_list.append([list(sentence_english), list(sentence_korean), category])\n\n print(\"data_list example\")\n print(data_list[0])\n print(\"data_list size = %d\" % len(data_list))\n\n # make english char vocab\n symbols_eng = [\"\", \"\"]\n dic_eng = symbols_eng + list(set(total_eng))\n rdic_eng = {w: i for i, w in enumerate(dic_eng)}\n voc_size_eng = len(dic_eng)\n print(\"voc_size_eng size = %d\" % voc_size_eng)\n print(rdic_eng)\n\n # make korean char vocab\n symbols_kor = [\"\", \"\", \"\"]\n dic_kor = symbols_kor + list(set(total_kor))\n rdic_kor = {w: i for i, w in enumerate(dic_kor)}\n voc_size_kor = len(dic_kor)\n print(\"voc_size_kor size = %d\" % voc_size_kor)\n print(rdic_kor)\n\n data_idx_list = []\n eng_len_list = []\n kor_len_list = []\n for english, korean, category in data_list:\n idx_eng = []\n for eng in english:\n e = \"\"\n if eng in dic_eng:\n e = rdic_eng[eng]\n else:\n e = rdic_eng[\"\"]\n idx_eng.append(e)\n\n idx_kor = []\n for kor in korean:\n k = \"\"\n if kor in dic_kor:\n k = rdic_kor[kor]\n else:\n k = rdic_kor[\"\"]\n idx_kor.append(k)\n\n data_idx_list.append([idx_eng, idx_kor, category])\n eng_len_list.append(len(english))\n kor_len_list.append(len(korean))\n\n max_eng_len = max(eng_len_list)\n max_kor_len = max(kor_len_list)\n max_len = [max_eng_len, max_kor_len]\n print(\"max_eng_len = %d\" % max_eng_len)\n print(\"max_kor_len = %d\" % max_kor_len)\n print()\n\n # save dictionary\n with open(file_data_list, 'wb') as handle:\n pickle.dump(data_list, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(file_data_idx_list, 'wb') as handle:\n pickle.dump(data_idx_list, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(file_dic_eng, 'wb') as handle:\n pickle.dump(dic_eng, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(file_rdic_eng, 'wb') as handle:\n pickle.dump(rdic_eng, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(file_dic_kor, 'wb') as handle:\n pickle.dump(dic_kor, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(file_rdic_kor, 'wb') as handle:\n pickle.dump(rdic_kor, handle, protocol=pickle.HIGHEST_PROTOCOL)\n with open(file_max_len, 'wb') as handle:\n pickle.dump(max_len, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\nelse:\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(now)\n print(\"Load vocabulary from model file...\")\n\n with open(file_data_list, 'rb') as handle:\n data_list = pickle.load(handle)\n with open(file_data_idx_list, 'rb') as handle:\n data_idx_list = pickle.load(handle)\n with open(file_rdic_eng, 'rb') as handle:\n rdic_eng = pickle.load(handle)\n with open(file_dic_eng, 'rb') as handle:\n dic_eng = pickle.load(handle)\n with open(file_rdic_kor, 'rb') as handle:\n rdic_kor = pickle.load(handle)\n with open(file_dic_kor, 'rb') as handle:\n dic_kor = pickle.load(handle)\n with open(file_max_len, 'rb') as handle:\n max_len = pickle.load(handle)\n\n print(\"data_list example\")\n print(data_list[0])\n print(\"data_list size = %d\" % len(data_list))\n\n voc_size_eng = len(dic_eng)\n print(\"voc_size_eng size = %d\" % voc_size_eng)\n voc_size_kor = len(dic_kor)\n print(\"voc_size_kor size = %d\" % voc_size_kor)\n\n max_eng_len = max_len[0]\n max_kor_len = max_len[1]\n print(\"max_eng_len = %d\" % max_eng_len)\n print(\"max_kor_len = %d\" % max_kor_len)\n print()\n\n\npadded_eng_len = max_eng_len + 1\npadded_kor_len = max_kor_len + 2\nprint(\"padded_eng_len = %d\" % padded_eng_len)\nprint(\"padded_kor_len = %d\" % padded_kor_len)\n\n# split data set\nSIZE_TEST_DATA = 100\nrandom.shuffle(data_idx_list)\ndata_idx_list_test = data_idx_list[:SIZE_TEST_DATA]\ndata_idx_list = data_idx_list[SIZE_TEST_DATA:]\nprint(\"dataset for train = %d\" % len(data_idx_list))\nprint(\"dataset for test = %d\" % len(data_idx_list_test))\nprint()\n\n\n'''''''''''''''''''''''''''''''''''''''''''''\nBATCH GENERATOR\n'''''''''''''''''''''''''''''''''''''''''''''\ndef generate_batch(size):\n assert size <= len(data_idx_list)\n\n data_x = np.zeros((size, padded_eng_len), dtype=np.int)\n data_y = np.zeros((size, padded_kor_len), dtype=np.int)\n data_t = np.zeros((size, padded_kor_len), dtype=np.int)\n len_x = np.zeros(size, dtype=np.int)\n len_y = np.zeros(size, dtype=np.int)\n len_p = np.zeros(size, dtype=np.int)\n\n index = np.random.choice(range(len(data_idx_list)), size, replace=False)\n for a in range(len(index)):\n idx = index[a]\n\n x = data_idx_list[idx][0]\n len_x[a] = len(x)\n\n y = data_idx_list[idx][1]\n len_y[a] = len(y)\n len_p[a] = padded_kor_len\n\n t = data_idx_list[idx][1]\n assert len(x) > 0\n assert len(y) > 0\n\n x = x + [rdic_eng[\"\"]] * (padded_eng_len - len(x))\n y = [rdic_kor[\"\"]] + y + [rdic_kor[\"\"]] * (padded_kor_len - len(y) - 1)\n t = t + [rdic_kor[\"\"]] * (padded_kor_len - len(t))\n assert len(x) == padded_eng_len\n assert len(y) == padded_kor_len\n assert len(t) == padded_kor_len\n assert y[-1] == rdic_kor[\"\"]\n assert t[-1] == rdic_kor[\"\"]\n\n data_x[a] = x\n data_y[a] = y\n data_t[a] = t\n\n return data_x, data_y, data_t, len_x, len_y, len_p\n\n\ndef generate_test_batch(size):\n assert size <= len(data_idx_list_test)\n\n data_x = np.zeros((size, padded_eng_len), dtype=np.int)\n data_y = np.zeros((size, padded_kor_len), dtype=np.int)\n data_t = np.zeros((size, padded_kor_len), dtype=np.int)\n len_x = np.zeros(size, dtype=np.int)\n len_y = np.zeros(size, dtype=np.int)\n len_p = np.zeros(size, dtype=np.int)\n\n index = np.random.choice(range(len(data_idx_list_test)), size, replace=False)\n for a in range(len(index)):\n idx = index[a]\n\n x = data_idx_list_test[idx][0]\n len_x[a] = len(x)\n\n y = data_idx_list_test[idx][1]\n len_y[a] = len(y)\n len_p[a] = padded_kor_len\n\n t = data_idx_list_test[idx][1]\n assert len(x) > 0\n assert len(y) > 0\n\n x = x + [rdic_eng[\"\"]] * (padded_eng_len - len(x))\n y = [rdic_kor[\"\"]] + y + [rdic_kor[\"\"]] * (padded_kor_len - len(y) - 1)\n t = t + [rdic_kor[\"\"]] * (padded_kor_len - len(t))\n assert len(x) == padded_eng_len\n assert len(y) == padded_kor_len\n assert len(t) == padded_kor_len\n assert y[-1] == rdic_kor[\"\"]\n assert t[-1] == rdic_kor[\"\"]\n\n data_x[a] = x\n data_y[a] = y\n data_t[a] = t\n\n return data_x, data_y, data_t, len_x, len_y, len_p\n\n\nwith tf.Graph().as_default():\n\n '''''''''''''''''''''''''''''''''''''''''''''\n BUILD NETWORK\n '''''''''''''''''''''''''''''''''''''''''''''\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(now)\n print(\"Build Graph...\")\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.allow_soft_placement = True\n\n with tf.Session(config=config) as sess:\n\n with tf.device(my_device):\n SIZE_EMBED_DIM = 100\n SIZE_RNN_LAYER = 2\n SIZE_RNN_STATE = 60\n SIZE_ATTN = 60\n LEARNING_RATE = 0.001\n\n with tf.name_scope(\"input_placeholders\"):\n enc_input = tf.placeholder(tf.int32, shape=[None, None], name=\"enc_input\")\n enc_seq_len = tf.placeholder(tf.int32, shape=[None, ], name=\"enc_seq_len\")\n dec_input = tf.placeholder(tf.int32, shape=[None, None], name=\"dec_input\")\n dec_seq_len = tf.placeholder(tf.int32, shape=[None, ], name=\"dec_seq_len\")\n dec_pad_len = tf.placeholder(tf.int32, shape=[None, ], name=\"dec_pad_len\")\n targets = tf.placeholder(tf.int32, shape=[None, None], name=\"targets\")\n batch_size = tf.placeholder(tf.int32, shape=[], name=\"batch_size\")\n keep_prob = tf.placeholder(tf.float32, name=\"keep_prob\")\n\n with tf.name_scope(\"word_embedding\"):\n embeddings_eng = tf.get_variable(\"embeddings_eng\", [voc_size_eng, SIZE_EMBED_DIM])\n embed_enc = tf.nn.embedding_lookup(embeddings_eng, enc_input, name=\"embed_enc\")\n embeddings_kor = tf.get_variable(\"embeddings_kor\", [voc_size_kor, SIZE_EMBED_DIM])\n embed_dec = tf.nn.embedding_lookup(embeddings_kor, dec_input, name=\"embed_dec\")\n\n with tf.variable_scope(\"encoder_layer\"):\n cell_encode = []\n for a in range(SIZE_RNN_LAYER):\n cell = rnn.BasicLSTMCell(SIZE_RNN_STATE)\n cell = rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n cell_encode.append(cell)\n multi_rnn_encode = rnn.MultiRNNCell(cell_encode, state_is_tuple=True)\n output_enc, state_enc = tf.nn.dynamic_rnn(multi_rnn_encode, embed_enc, sequence_length=enc_seq_len,\n dtype=tf.float32)\n\n with tf.variable_scope(\"decoder_attention\"):\n attn_luong = tf.contrib.seq2seq.LuongAttention(\n num_units=SIZE_ATTN,\n memory=output_enc,\n memory_sequence_length=enc_seq_len,\n name=\"attention_luong\")\n\n cell_decode = []\n for a in range(SIZE_RNN_LAYER):\n cell = rnn.BasicLSTMCell(SIZE_RNN_STATE)\n cell = rnn.DropoutWrapper(cell, output_keep_prob=keep_prob)\n cell_decode.append(cell)\n multi_rnn_decode = rnn.MultiRNNCell(cell_decode, state_is_tuple=True)\n\n dec_cell = tf.contrib.seq2seq.DynamicAttentionWrapper(\n cell=multi_rnn_decode,\n attention_mechanism=attn_luong,\n attention_size=SIZE_ATTN,\n name=\"attention_wrapper\")\n\n initial_state = tf.contrib.seq2seq.DynamicAttentionWrapperState(\n cell_state=state_enc,\n attention=_zero_state_tensors(SIZE_ATTN, batch_size, tf.float32))\n\n output_layer = Dense(voc_size_kor, kernel_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))\n\n # train mode\n with tf.variable_scope(\"decoder_layer\"):\n train_helper = tf.contrib.seq2seq.TrainingHelper(inputs=embed_dec,\n sequence_length=dec_pad_len,\n time_major=False)\n train_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, train_helper, initial_state, output_layer)\n\n output_train_dec, state_train_dec = tf.contrib.seq2seq.dynamic_decode(\n decoder=train_decoder,\n output_time_major=False,\n impute_finished=True,\n maximum_iterations=padded_kor_len)\n\n # predict mode\n with tf.variable_scope(\"decoder_layer\", reuse=True):\n start_tokens = tf.tile(tf.constant([rdic_kor[\"\"]], dtype=tf.int32), [batch_size], name=\"start_tokens\")\n end_token = rdic_kor[\"\"]\n\n test_helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(embedding=embeddings_kor,\n start_tokens=start_tokens,\n end_token=end_token)\n test_decoder = tf.contrib.seq2seq.BasicDecoder(dec_cell, test_helper, initial_state, output_layer)\n\n output_test_dec, state_test_dec = tf.contrib.seq2seq.dynamic_decode(\n decoder=test_decoder,\n output_time_major=False,\n impute_finished=True,\n maximum_iterations=padded_kor_len)\n\n with tf.name_scope(\"train_optimization\"):\n train_logits = tf.identity(output_train_dec.rnn_output, name=\"train_logits\")\n masks = tf.sequence_mask(dec_seq_len + 1, padded_kor_len, dtype=tf.float32, name=\"masks\")\n\n loss = tf.contrib.seq2seq.sequence_loss(\n logits=train_logits,\n targets=targets,\n weights=masks,\n name=\"loss\")\n train_op = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)\n\n with tf.name_scope(\"prediction\"):\n test_prediction = tf.identity(output_test_dec.sample_id, name=\"test_prediction\")\n\n loss_summary = tf.summary.scalar(\"loss\", loss)\n merged_summary = tf.summary.merge_all()\n train_summary_writer = tf.summary.FileWriter(dir_summary, sess.graph)\n\n '''''''''''''''''''''''''''''''''''''''''''''\n TRAIN PHASE\n '''''''''''''''''''''''''''''''''''''''''''''\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n\n if pre_trained == 2:\n print(\"Restore model file...\")\n saver.restore(sess, file_model)\n\n BATCHS = 300\n BATCHS_TEST = 10\n EPOCHS = 50\n STEPS = int(len(data_idx_list) / BATCHS)\n\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(now)\n print(\"Train start!!\")\n\n glob_step = 0\n for epoch in range(EPOCHS):\n for step in range(STEPS):\n data_x, data_y, dat_t, len_x, len_y, len_p = generate_batch(BATCHS)\n\n feed_dict = {\n enc_input: data_x,\n enc_seq_len: len_x,\n dec_input: data_y,\n dec_seq_len: len_y,\n dec_pad_len: len_p,\n targets: dat_t,\n batch_size: BATCHS,\n keep_prob: 0.75\n }\n\n _, batch_loss, batch_loss_summ = sess.run([train_op, loss, merged_summary], feed_dict)\n\n if glob_step % 10 == 0:\n train_summary_writer.add_summary(batch_loss_summ, glob_step)\n\n if glob_step % 50 == 0:\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"epoch[%03d] glob_step[%05d] - batch_loss:%.5f (%s)\" % (epoch, glob_step, batch_loss, now))\n\n saver.save(sess, file_model)\n\n # simple test\n if glob_step % 500 == 0:\n data_x, data_y, data_t, len_x, len_y, len_p = generate_test_batch(BATCHS_TEST)\n feed_dict = {\n enc_input: data_x,\n enc_seq_len: len_x,\n dec_input: data_y,\n dec_seq_len: len_y,\n dec_pad_len: len_p,\n targets: dat_t,\n batch_size: BATCHS_TEST,\n keep_prob: 1.0\n }\n\n prediction_train = sess.run([train_logits], feed_dict)\n prediction_train = prediction_train[0]\n prediction_train = np.argmax(prediction_train, axis=2)\n\n prediction_test = sess.run([test_prediction], feed_dict)\n prediction_test = prediction_test[0]\n print()\n print(\"-\" * 60)\n for a in range(len(data_x)):\n sen_english = [dic_eng[r] for r in data_x[a]]\n sen_english = \"\".join(sen_english)\n\n sen_korean = [dic_kor[s] for s in data_y[a]]\n sen_korean = \"\".join(sen_korean)\n\n sen_train = [dic_kor[p] for p in prediction_train[a]]\n sen_train = \"\".join(sen_train)\n\n sen_test = [dic_kor[p] for p in prediction_test[a]]\n sen_test = \"\".join(sen_test)\n\n print(\"test[%03d] ENG_INPUT : %s\" % (a, sen_english))\n print(\"test[%03d] KOR_OUTPUT : %s\" % (a, sen_korean))\n #print(\"test[%03d] KOR_TRAIN : %s\" % (a, sen_train))\n print(\"test[%03d] KOR_PREDICT: %s\" % (a, sen_test))\n print()\n print(\"-\" * 60)\n print()\n\n glob_step += 1\n\n print(\"Train finished!!\")\n print()\n","sub_path":"reading_nmt_tf1.1.py","file_name":"reading_nmt_tf1.1.py","file_ext":"py","file_size_in_byte":18322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225378906","text":"\"\"\"\r\nimmune system simulator\r\nsimulates how the immune system interacts with foreign bodys\r\ncreated with Tkinter\r\nauthor: William Escobar Parra, Tim Goldsmith, Mathew Glaysher and Freddie Dugdale\r\n\"\"\"\r\nimport turtle\r\n#import tkinter\r\nimport tkinter.messagebox\r\nimport random\r\nimport threading\r\nimport math\r\nimport os\r\nimport webbrowser\r\nfrom tkinter import *\r\n \r\ndef introWin():\r\n webbrowser.open(\"Instructions.txt\")\r\n \r\n \r\ndef credit():\r\n webbrowser.open(\"Credits.txt\")\r\n\r\ndef closeWin(): \r\n startWin.quit()\r\n startWin.destroy()\r\n\r\n#turtle speed value is set \r\nturtleSpeed=1\r\n\r\ndef gameWin():\r\n \r\n #Set up screen/ window \r\n window = turtle.Screen()\r\n window.bgcolor(\"black\")\r\n window.tracer(2) #this will skip frames, allowing program to run slightly faster\r\n #window.bgpic(\"CS.gif\")\r\n window.title('Immune Rush')\r\n # here the turtle is created\r\n user= turtle.Turtle()\r\n user.shape('triangle')\r\n user.color('red')\r\n user.speed=0\r\n user.penup()\r\n \r\n #drawing border, preventing turtle from losing itself\r\n borderGuide=turtle.Turtle()\r\n borderGuide.color('white')\r\n borderGuide.penup()\r\n borderGuide.setpos(-350,-350)\r\n borderGuide.pendown()\r\n borderGuide.pensize(4)\r\n\r\n for x in range (4):\r\n borderGuide.forward(700)\r\n borderGuide.left(90)\r\n\r\n borderGuide.hideturtle()\r\n\r\n\r\n\r\n\r\n #wasd movement functions defined\r\n def turnLeft():\r\n user.setheading(user.heading()+30)\r\n print(user.heading()) \r\n def turnright():\r\n user.setheading(user.heading()-30)\r\n print(user.heading())\r\n def speedIncrease():\r\n global turtleSpeed\r\n turtleSpeed+=1\r\n\r\n def changeShape():\r\n #tried to make the user turtle cycle through shapes by pressing space, it changes once but never again\r\n if user.shape() ==\"circle\":\r\n user.shape(\"square\")\r\n \r\n elif user.shape()==\"triangle\":\r\n user.shape(\"circle\")\r\n \r\n elif user.shape()==\"square\":\r\n user.shape(\"triangle\")\r\n \r\n #keyboard bindings\r\n turtle.onkey(speedIncrease, 'w')\r\n\r\n turtle.onkeypress(turnLeft,\"a\")\r\n turtle.onkeypress(turnright,\"d\")\r\n turtle.onkey(changeShape,\"space\")\r\n turtle.listen()\r\n\r\n #cloning multiple turtles \r\n maxPathogens=20\r\n turtles=[]\r\n\r\n\r\n for count in range(maxPathogens):\r\n \r\n #pathogen creation\r\n turtles.append(turtle.Turtle()) \r\n turtles[count].speed(0)\r\n turtles[count].penup()\r\n turtles[count].setposition(random.randint(-350,350),random.randint(-350,350))\r\n turtles[count].color(\"purple\")\r\n turtles[count].shape(\"triangle\")\r\n #creating score board display\r\n userScore= 0\r\n\r\n\r\n\r\n #math calculation to calculate collision\r\n\r\n def collisionCheck(tur1,tur2):\r\n \r\n x= math.sqrt(math.pow(tur1.xcor()-tur2.xcor(),2))\r\n y= math.pow(tur1.ycor()-tur2.ycor(),2)\r\n #pythagoras theorem,change in x coordinate squared\r\n #plus the change in y squared\r\n d=x+y\r\n if d<19:\r\n return True\r\n else:\r\n return False\r\n\r\n \r\n while userScore < 5:\r\n user.forward(turtleSpeed)\r\n \r\n\r\n #setting up x,y coordinates for boundary checking\r\n if user.xcor()>350 or user.xcor()<-350:\r\n user.right(180)\r\n \r\n #attempted to get the user turtle to bouce off the boundary realistically\r\n #if user.heading()\r\n \r\n #user.right(360 - user.heading())\r\n #print(user.heading())\r\n #boundary checking the y coordinates\r\n if user.ycor()>350 or user.ycor()<-350:\r\n user.right(180)\r\n\r\n\r\n\r\n\r\n #allows the movement of multiple pathogens/turtles\r\n \r\n \r\n for count in range (maxPathogens):\r\n turtles[count].forward(2)\r\n \r\n #setting up x,y coordinates for boundary checking\r\n if turtles[count].xcor()>350 or turtles[count].xcor()<-350:\r\n turtles[count].right(180)\r\n \r\n #boundary checking the y coordinates\r\n if turtles[count].ycor()>350 or turtles[count].ycor()<-350:\r\n turtles[count].right(180)\r\n\r\n\r\n #if collision occurs, this will take turtle to a random spot\r\n if collisionCheck(user,turtles[count]):\r\n if (user.shape() == turtles[count].shape())==True:\r\n \r\n turtles[count].setposition(random.randint(-350,350),random.randint(-350,350))\r\n turtles[count].right(random.randint(0,360))\r\n if turtles[count].shape()==\"circle\":\r\n turtles[count].shape(\"square\")\r\n turtles[count].color(\"blue\")\r\n userScore = userScore + 2\r\n elif turtles[count].shape()==\"triangle\":\r\n turtles[count].shape(\"circle\")\r\n turtles[count].color(\"yellow\")\r\n userScore = userScore + 1\r\n elif turtles[count].shape()==\"square\":\r\n turtles[count].shape(\"triangle\")\r\n turtles[count].color(\"purple\")\r\n userScore = userScore + 3\r\n \r\n #show user score on screen using the borderGuide turtle\r\n borderGuide.undo()\r\n borderGuide.penup()\r\n borderGuide.hideturtle()\r\n borderGuide.setposition(-300,320)\r\n borderGuide.write(\"Current Score: \"+ str(userScore), move=False, align=\"left\", font=(\"comic\", 15, \"bold\"))\r\n window.title('You Win!!!')\r\n\r\ndef newWin(): \r\n #window.deiconify() #exposes game window \r\n startWin.withdraw() #hides start menu window \r\n gameWin()\r\n\r\nstartWin=Tk() #start menu window \r\nwindow=Toplevel() #game window \r\nwindow.withdraw() #temporally hides game window\r\n\r\nfirstB = Button(text='Immune Rush\\n------------\\nStart Game', command=gameWin) #opens game \r\nfirstB.pack()\r\n\r\nsecondB = Button(text='Instrictions', command=introWin) #opens instructions.txt file\r\nsecondB.pack()\r\n\r\nthirdB = Button(text='Credits', command=credit) #opens credits.txt file \r\nthirdB.pack() \r\n\r\nlastB = Button(text='Exit Game', command=closeWin) #Closes menu window \r\nlastB.pack()\r\n\r\nstartWin.mainloop()\r\n","sub_path":"immune system game w. start menu.py","file_name":"immune system game w. start menu.py","file_ext":"py","file_size_in_byte":6528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"224291352","text":"import torch\nfrom utils.model_utils import *\n#Class conditional loglikelihood!\n\ndef elbo_recon(prediction,target):\n error = (prediction - target).view(prediction.size(0), -1)\n error = error ** 2\n error = torch.sum(error, dim=-1)\n return error\n\ndef calculate_ELBO(model,real_images):\n with torch.no_grad():\n real_mu, real_logvar, z_real, rec = model(real_images)\n loss_rec = elbo_recon(rec,real_images)\n loss_kl = model.kl_loss(real_mu, real_logvar)\n ELBO = loss_rec+loss_kl\n return -ELBO.squeeze()\n\ndef estimate_loglikelihoods(dataloader_test, model,s=1000):\n _loglikelihood_estimates = []\n _elbo_estimates = []\n _class = []\n tensor_s = torch.tensor(s).float()\n with torch.no_grad():\n for iteration, (batch, c) in enumerate(tqdm.tqdm(dataloader_test)):\n _elbo = []\n for i in tqdm.trange(s):\n with autocast():\n ELBO = calculate_ELBO(model,batch.cuda())\n _elbo.append(ELBO)\n _elbo_estimates.append(ELBO)\n likelihood_est = torch.stack(_elbo,dim=1)\n # print(torch.logsumexp(likelihood_est,dim=1).cpu()-torch.log(tensor_s))\n _loglikelihood_estimates.append(torch.logsumexp(likelihood_est,dim=1).cpu()-torch.log(tensor_s))\n _class.append(c)\n _elbo_estimates = torch.cat(_elbo_estimates,dim=0)\n _class = torch.cat(_class,dim=0)\n _loglikelihood_estimates = torch.cat(_loglikelihood_estimates,dim=0)\n return _loglikelihood_estimates,_elbo_estimates,_class\n\ndef calculate_metrics(_loglikelihood_estimates,_elbo_estimates,_class):\n with torch.no_grad():\n loglikelihood_estimate = _loglikelihood_estimates.mean(0)\n ELBO = _elbo_estimates.mean()\n loglikelihood_estimate_A =_loglikelihood_estimates[~_class].mean(0)\n loglikelihood_estimate_B = _loglikelihood_estimates[_class].mean(0)\n ELBO_A = _elbo_estimates[~_class].mean()\n ELBO_B = _elbo_estimates[_class].mean()\n\n return loglikelihood_estimate.item(),ELBO.item(),loglikelihood_estimate_A.item(),loglikelihood_estimate_B.item(),ELBO_A.item(),ELBO_B.item()\n\n\n","sub_path":"utils/loglikelihood.py","file_name":"loglikelihood.py","file_ext":"py","file_size_in_byte":2169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"299625369","text":"f = open(\"label.txt\", \"r\")\r\nd = {}\r\nfor line in f:\r\n txt = line.split(\" \")[2]\r\n if (txt != \"U\"):\r\n if (d.get(txt) == None):\r\n d[txt] = 1\r\n else:\r\n d[txt] = d[txt] + 1\r\nprint(d)\r\nf.close()","sub_path":"label_count.py","file_name":"label_count.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"382646493","text":"import torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.utils.data as data_utils\nimport ChessResNet\nfrom DoubleHeadDataset import DoubleHeadTrainingDataset\nimport h5py\n\n\n# inputs and outputs are numpy arrays. This method of checking accuracy only works with imported games.\n# if it's not imported, accuracy will never be 100%, so it will just output the trained network after 10,000 epochs.\ndef trainGPUNetwork(boards, policyOutputs, policyMag, valueOutputs, EPOCHS=1, BATCH_SIZE=1, LR=0.001,\n loadDirectory='none.pt', saveDirectory='network1.pt'):\n\n policyLossHistory = []\n valueLossHistory = []\n\n policyOutputs = torch.from_numpy(policyOutputs).double()\n valueOutputs = torch.from_numpy(valueOutputs).double()\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(device)\n\n data = DoubleHeadTrainingDataset(boards, policyOutputs, policyMag,valueOutputs)\n\n trainLoader = torch.utils.data.DataLoader(dataset=data, batch_size=BATCH_SIZE, shuffle=True)\n\n # this is a residual network\n model = ChessResNet.ResNetDoubleHead().double().cuda()\n model.cuda()\n optimizer = torch.optim.Adam(model.parameters(), lr=LR)\n\n try:\n checkpoint = torch.load(loadDirectory)\n model.load_state_dict(checkpoint['model_state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer_state_dict'])\n totalLoss = checkpoint['loss']\n\n except:\n print(\"Pretrained NN model not found!\")\n\n policyCrit = nn.PoissonNLLLoss()\n valueCrit = nn.MSELoss()\n\n total_step = len(trainLoader)\n\n trainNotFinished = True\n for epoch in range(EPOCHS):\n if trainNotFinished:\n for i, (images, labels1, labels2) in enumerate(trainLoader):\n images = images.to(device)\n policyLabels = labels1.to(device)\n valueLabels = labels2.to(device)\n\n # Forward pass\n outputPolicy, outputValue = model(images)\n\n policyLoss = policyCrit(outputPolicy, policyLabels) * 4000\n valueLoss = valueCrit(outputValue, valueLabels)\n totalLoss = policyLoss + valueLoss\n\n policyLossHistory.append(policyLoss.detach().cpu().numpy())\n valueLossHistory.append(valueLoss.detach().cpu().numpy())\n\n # Backward and optimize\n optimizer.zero_grad()\n totalLoss.backward()\n optimizer.step()\n\n if (i + 1) % 1 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Policy Loss: {:.4f}, Value Loss: {:.4f}'\n .format(epoch + 1, EPOCHS, i + 1, total_step, policyLoss.item() / 4, valueLoss.item()))\n if (i + 1) % 200 == 0:\n # find predicted labels\n values = np.exp((model(images)[0].data.detach().cpu().numpy()))\n print(\"MAX:\", np.amax(np.amax(values, axis=1)))\n print(\"MIN:\", np.amin(np.amin(values, axis=1)))\n\n _, predicted = torch.max(model(images)[0].data, 1)\n predicted = predicted.cpu().numpy()\n print(predicted)\n\n _, actual = torch.max(labels1.data, 1) # for poisson nll loss\n actual = actual.numpy()\n\n print(actual)\n\n print(\"Correct:\", (predicted == actual).sum())\n if (i + 1) % 400 == 0:\n # Save Model\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': totalLoss,\n }, saveDirectory)\n\n # Save Loss History\n outF = open(\"Network Logs/policyLossHistory.txt\", \"w\")\n for k in range(len(policyLossHistory)):\n # write line to output file\n outF.write(str(policyLossHistory[k]))\n outF.write(\"\\n\")\n outF.close()\n outF = open(\"Network Logs/valueLossHistory.txt\", \"w\")\n for l in range(len(valueLossHistory)):\n # write line to output file\n outF.write(str(valueLossHistory[l]))\n outF.write(\"\\n\")\n outF.close()\n\n\n print(\"Updated!\")\n # Save Model\n torch.save({\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': totalLoss,\n }, saveDirectory)\n\n # Save Loss History\n outF = open(\"Network Logs/policyLossHistory.txt\", \"w\")\n for m in range(len(policyLossHistory)):\n # write line to output file\n outF.write(str(policyLossHistory[m]))\n outF.write(\"\\n\")\n outF.close()\n outF = open(\"Network Logs/valueLossHistory.txt\", \"w\")\n for n in range(len(valueLossHistory)):\n # write line to output file\n outF.write(str(valueLossHistory[n]))\n outF.write(\"\\n\")\n outF.close()\n\n\n\ntrain = True\nif train:\n\n with h5py.File(\"Training Data/StockfishInputs3[binaryConverted].h5\", 'r') as hf:\n boards = hf[\"Inputs\"][:]\n print(len(boards))\n with h5py.File(\"Training Data/StockfishOutputs3.h5\", 'r') as hf:\n policy = hf[\"Policy Outputs\"][:]\n policyMag = hf[\"Policy Magnitude Outputs\"][:]\n value = hf[\"Value Outputs\"][:]\n print(len(value))\n trainGPUNetwork(boards, policy, policyMag, value, loadDirectory=\"New Networks/[12x256_16_8]64fish.pt\",\n saveDirectory=\"New Networks/[12x256_16_8]64fish.pt\", EPOCHS=2,\n BATCH_SIZE=128, LR=0.001)\n\n","sub_path":"TrainNetworkGPU.py","file_name":"TrainNetworkGPU.py","file_ext":"py","file_size_in_byte":5776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"290131368","text":"# -*- coding: utf-8 -*-\n'''\n@author: a_kayerov\n'''\nfrom django.db.models import Sum\nfrom random import random\nimport openpyxl\nfrom openpyxl.styles import Font\n\nfrom medicament.oper_with_base import create_new_report, save_doc, get_name, get_period_namef, get_region_name\nfrom medicament.models import Doc2\nfrom _datetime import datetime\n\n\ndef create_report_form2(periodInt, datef):\n ''' Создание новых документов (в новом периоде)\n Возвращает True, если добавление записей прошло успешно\n В противном случае возвращает False\n copy_fields_formX - функция начального заполнения\n '''\n return create_new_report(2,Doc2,periodInt,datef, copy_fields_form2)\n\ndef save_doc_form2(request, type, id_doc, mode_comment):\n ''' Сохранить запись Document + комментарий с новой записью в комментрии с действием пользователя\n Установить собственый параментрв DOCx,set_fields_formx, is_valid_formx\n ''' \n return save_doc(Doc2,set_fields_form2, is_valid_form2, request, type, id_doc, mode_comment)\n\n\ndef copy_fields_form2(ds, dd):\n ''' Копирование полей - указать все поля для копирования \n Для каждой формы, \n ВЫЗЫВАЕТСЯ ТОЛЬКО ДЛЯ ДОКУМЕНТОВ В СОСТОЯНИИ ЗАВЕШЕНО- незаполненные и несогласаованные документы такой обработке не подлежат!\n '''\n dd.c2_1 = ds.c2_1\n dd.c2_2 = ds.c2_2\n dd.c2_3 = ds.c2_3\n dd.c2_3_1 = ds.c2_3_1\n dd.c2_3_2 = ds.c2_3_2\n dd.c2_3_3 = ds.c2_3_3\n dd.c2_4 = ds.c2_4\n dd.c2_4_1 = ds.c2_4_1\n dd.c2_4_2 = ds.c2_4_2\n dd.c2_4_3 = ds.c2_4_3\n dd.c2_5 = ds.c2_5\n dd.c2_5_1 = ds.c2_5_1\n dd.c2_5_2 = ds.c2_5_2\n dd.c2_5_3 = ds.c2_5_3\n dd.c2_5_4 = ds.c2_5_4\n dd.c2_5_5 = ds.c2_5_5\n dd.c2_5_6 = ds.c2_5_6\n dd.c2_5_7 = ds.c2_5_7\n dd.c2_5_8 = ds.c2_5_8\n dd.c2_5_9 = ds.c2_5_9\n dd.c2_5_10= ds.c2_5_10\n dd.c2_5_11= ds.c2_5_11\n dd.c2_5_12= ds.c2_5_12\n dd.c2_5_13= ds.c2_5_13\n dd.c2_5_14= ds.c2_5_14\n dd.c2_5_15= ds.c2_5_15\n dd.c2_5_16= ds.c2_5_16\n dd.c2_5_17= ds.c2_5_17\n dd.c2_5_18= ds.c2_5_18\n dd.c2_5_19= ds.c2_5_19\n dd.c2_5_20= ds.c2_5_20\n dd.c2_5_21= ds.c2_5_21\n dd.c2_5_22= ds.c2_5_22\n dd.c2_5_23= ds.c2_5_23\n dd.c2_5_24= ds.c2_5_24\n dd.c2_5_25= ds.c2_5_25\n dd.c2_5_26= ds.c2_5_26\n dd.c2_5_27= ds.c2_5_27\n dd.c2_5_28= ds.c2_5_28\n dd.c2_5_29= ds.c2_5_29\n dd.c2_5_30= ds.c2_5_30\n dd.c2_5_31= ds.c2_5_31\n dd.c2_5_32= ds.c2_5_32\n dd.c2_5_33= ds.c2_5_33\n dd.c2_5_34= ds.c2_5_34\n dd.c2_5_35= ds.c2_5_35\n dd.c2_5_36= ds.c2_5_36\n dd.c2_5_37= ds.c2_5_37\n dd.c2_5_38= ds.c2_5_38\n dd.c2_5_39= ds.c2_5_39\n dd.c2_5_40= ds.c2_5_40\n dd.c2_5_41= ds.c2_5_41\n dd.c2_6 = ds.c2_6\n dd.c2_7 = ds.c2_7\n dd.c2_8 = ds.c2_8\n dd.c2_9 = ds.c2_9\n dd.c2_10 = ds.c2_10\n dd.c2_11 = ds.c2_11\n dd.c2_12 = ds.c2_12\n dd.c2_13 = ds.c2_13\n dd.c2_13_1 = ds.c2_13_1\n dd.c2_13_2 = ds.c2_13_2\n dd.c2_13_3 = ds.c2_13_3\n dd.c2_13_4 = ds.c2_13_4\n dd.c2_13_5 = ds.c2_13_5\n dd.c2_13_6 = ds.c2_13_6\n dd.c2_13_7 = ds.c2_13_7\n dd.c2_13_8 = ds.c2_13_8\n dd.c2_13_9 = ds.c2_13_9\n dd.c2_13_10 = ds.c2_13_10\n dd.c2_13_11 = ds.c2_13_11\n dd.c2_13_12 = ds.c2_13_12\n dd.c2_13_13 = ds.c2_13_13\n dd.c2_13_14 = ds.c2_13_14\n dd.c2_13_15 = ds.c2_13_15\n dd.c2_13_16 = ds.c2_13_16\n dd.c2_13_17 = ds.c2_13_17\n dd.c2_13_18 = ds.c2_13_18\n dd.c2_13_19 = ds.c2_13_19\n dd.c2_13_20 = ds.c2_13_20\n dd.c2_13_21 = ds.c2_13_21\n dd.c2_13_22 = ds.c2_13_22\n dd.c2_13_23 = ds.c2_13_23\n dd.c2_13_24 = ds.c2_13_24\n dd.c2_13_25 = ds.c2_13_25\n dd.c2_13_26 = ds.c2_13_26\n dd.c2_13_27 = ds.c2_13_27\n dd.c2_13_28 = ds.c2_13_28\n dd.c2_13_29 = ds.c2_13_29\n dd.c2_13_30 = ds.c2_13_30\n dd.c2_13_31 = ds.c2_13_31\n dd.c2_13_32 = ds.c2_13_32\n dd.c2_13_33 = ds.c2_13_33\n dd.c2_13_34 = ds.c2_13_34\n dd.c2_13_35 = ds.c2_13_35\n dd.c2_13_36 = ds.c2_13_36\n dd.c2_13_37 = ds.c2_13_37\n dd.c2_13_38 = ds.c2_13_38\n dd.c2_13_39 = ds.c2_13_39\n dd.c2_13_40 = ds.c2_13_40\n dd.c2_13_41 = ds.c2_13_41\n dd.c2_13_42 = ds.c2_13_42\n dd.c2_13_43 = ds.c2_13_43\n dd.c2_13_44 = ds.c2_13_44\n dd.c2_13_45 = ds.c2_13_45\n dd.c2_13_46 = ds.c2_13_46\n dd.c2_13_47 = ds.c2_13_47\n dd.c2_13_48 = ds.c2_13_48\n dd.c2_13_49 = ds.c2_13_49\n dd.c2_13_50 = ds.c2_13_50\n dd.c2_13_51 = ds.c2_13_51\n dd.c2_13_52 = ds.c2_13_52\n dd.c2_13_53 = ds.c2_13_53\n dd.c2_13_54 = ds.c2_13_54\n dd.c2_13_55 = ds.c2_13_55\n dd.c2_13_56 = ds.c2_13_56\n dd.c2_13_57 = ds.c2_13_57\n dd.c2_13_58 = ds.c2_13_58\n dd.c2_13_59 = ds.c2_13_59\n dd.c2_13_60 = ds.c2_13_60\n dd.c2_13_61 = ds.c2_13_61\n dd.c2_13_62 = ds.c2_13_62\n dd.c2_13_63 = ds.c2_13_63\n dd.c2_13_64 = ds.c2_13_64\n dd.c2_13_65 = ds.c2_13_65\n dd.c2_13_66 = ds.c2_13_66\n dd.c2_13_67 = ds.c2_13_67\n dd.c2_13_68 = ds.c2_13_68\n dd.c2_13_69 = ds.c2_13_69\n dd.c2_13_70 = ds.c2_13_70\n dd.c2_13_71 = ds.c2_13_71\n dd.c2_13_72 = ds.c2_13_72\n dd.c2_13_73 = ds.c2_13_73\n dd.c2_13_74 = ds.c2_13_74\n dd.c2_13_75 = ds.c2_13_75\n dd.c2_13_76 = ds.c2_13_76\n dd.c2_13_77 = ds.c2_13_77\n dd.c2_13_78 = ds.c2_13_78\n dd.c2_13_79 = ds.c2_13_79\n dd.c2_13_80 = ds.c2_13_80\n dd.c2_13_81 = ds.c2_13_81\n dd.c2_13_82 = ds.c2_13_82\n dd.c2_13_83 = ds.c2_13_83\n dd.c2_13_84 = ds.c2_13_84\n dd.c2_13_85 = ds.c2_13_85\n dd.c2_13_86 = ds.c2_13_86\n dd.c2_13_87 = ds.c2_13_87\n\t\n dd.c2_13_89 = ds.c2_13_89\n dd.c2_13_90 = ds.c2_13_90\n dd.c2_13_91 = ds.c2_13_91\n dd.c2_13_92 = ds.c2_13_92\n dd.c2_13_93 = ds.c2_13_93\n dd.c2_13_94 = ds.c2_13_94\n dd.c2_13_95 = ds.c2_13_95\n dd.c2_13_96 = ds.c2_13_96\n dd.c2_13_97 = ds.c2_13_97\n dd.c2_13_98 = ds.c2_13_98\n dd.c2_13_99 = ds.c2_13_99\n dd.c2_13_100= ds.c2_13_100\n dd.c2_13_101= ds.c2_13_101\n dd.c2_13_102= ds.c2_13_102\n dd.c2_13_103= ds.c2_13_103\n dd.c2_13_104= ds.c2_13_104\n dd.c2_13_105= ds.c2_13_105\n dd.c2_13_106= ds.c2_13_106\n dd.c2_13_107= ds.c2_13_107\n dd.c2_13_108= ds.c2_13_108\n dd.c2_13_109= ds.c2_13_109\n dd.c2_13_110= ds.c2_13_110\n dd.c2_13_111= ds.c2_13_111\n dd.c2_13_112= ds.c2_13_112\n dd.c2_13_113= ds.c2_13_113\n dd.c2_13_114= ds.c2_13_114\n dd.c2_13_115= ds.c2_13_115\n dd.c2_13_116= ds.c2_13_116\n dd.c2_13_117= ds.c2_13_117\n dd.c2_13_118= ds.c2_13_118\n dd.c2_13_119= ds.c2_13_119\n dd.c2_13_120= ds.c2_13_120\n dd.c2_13_121= ds.c2_13_121\n dd.c2_13_122= ds.c2_13_122\n dd.c2_13_123= ds.c2_13_123\n dd.c2_13_124= ds.c2_13_124\n dd.c2_14= ds.c2_14\n dd.c2_14_1 = ds.c2_14_1\n dd.c2_14_2 = ds.c2_14_2\n dd.c2_14_3 = ds.c2_14_3\n dd.c2_14_4 = ds.c2_14_4\n dd.c2_14_5 = ds.c2_14_5\n dd.c2_14_6 = ds.c2_14_6\n dd.c2_14_7 = ds.c2_14_7\n dd.c2_14_8 = ds.c2_14_8\n dd.c2_14_9 = ds.c2_14_9\n dd.c2_14_10 = ds.c2_14_10\n dd.c2_14_11 = ds.c2_14_11\n dd.c2_14_12 = ds.c2_14_12\n dd.c2_14_13 = ds.c2_14_13\n dd.c2_14_14 = ds.c2_14_14\n dd.c2_14_15 = ds.c2_14_15\n dd.c2_14_16 = ds.c2_14_16\n dd.c2_14_17 = ds.c2_14_17\n dd.c2_14_18 = ds.c2_14_18\n dd.c2_14_19 = ds.c2_14_19\n dd.c2_14_20 = ds.c2_14_20\n dd.c2_14_21 = ds.c2_14_21\n dd.c2_14_22 = ds.c2_14_22\n dd.c2_14_23 = ds.c2_14_23\n dd.c2_14_24 = ds.c2_14_24\n dd.c2_14_25 = ds.c2_14_25\n dd.c2_14_26 = ds.c2_14_26\n dd.c2_14_27 = ds.c2_14_27\n dd.c2_14_28 = ds.c2_14_28\n dd.c2_14_29 = ds.c2_14_29\n dd.c2_14_30 = ds.c2_14_30\n dd.c2_14_31 = ds.c2_14_31\n dd.c2_14_32 = ds.c2_14_32\n dd.c2_14_33 = ds.c2_14_33\n dd.c2_14_34 = ds.c2_14_34\n dd.c2_14_35 = ds.c2_14_35\n dd.c2_14_36 = ds.c2_14_36\n dd.c2_14_37 = ds.c2_14_37\n dd.c2_14_38 = ds.c2_14_38\n dd.c2_14_39 = ds.c2_14_39\n dd.c2_14_40 = ds.c2_14_40\n dd.c2_14_41 = ds.c2_14_41\n dd.c2_14_42 = ds.c2_14_42\n dd.c2_14_43 = ds.c2_14_43\n dd.c2_14_44 = ds.c2_14_44\n dd.c2_14_45 = ds.c2_14_45\n dd.c2_15 = ds.c2_15\n dd.c2_16 = ds.c2_16\n dd.c2_17 = ds.c2_17\n dd.c2_18 = ds.c2_18\n dd.c2_19 = ds.c2_19\n dd.c2_20 = ds.c2_20\n dd.c2_21 = ds.c2_21\n dd.c2_22 = ds.c2_22\n dd.c2_23 = ds.c2_23\n dd.c2_24 = ds.c2_24\n dd.c2_25 = ds.c2_25\n dd.c2_26 = ds.c2_26\n dd.c2_27 = ds.c2_27\n dd.c2_28 = ds.c2_28\n\t\n dd.c3_1 = ds.c3_1\n dd.c3_2 = ds.c3_2\n dd.c3_3 = ds.c3_3\n dd.c3_4 = ds.c3_4\n dd.c3_5 = ds.c3_5\n dd.c3_6 = ds.c3_6\n dd.c3_7 = ds.c3_7\n dd.c3_8 = ds.c3_8\n dd.c3_9 = ds.c3_9\n dd.c3_10 = ds.c3_10\n dd.c3_11 = ds.c3_11\n dd.c3_12 = ds.c3_12\n dd.c3_13 = ds.c3_13\n dd.c3_14 = ds.c3_14\n dd.c3_15 = ds.c3_15\n dd.c3_16 = ds.c3_16\n dd.c3_17 = ds.c3_17\n dd.c3_18 = ds.c3_18\n dd.c3_19 = ds.c3_19\n dd.c3_20 = ds.c3_20\n dd.c3_21 = ds.c3_21\n dd.c3_22 = ds.c3_22\n dd.c3_23 = ds.c3_23\n dd.c3_24 = ds.c3_24\n dd.c3_25 = ds.c3_25\n dd.c3_26 = ds.c3_26\n dd.c3_27 = ds.c3_27\n dd.c3_28 = ds.c3_28\n dd.c3_29 = ds.c3_29\n dd.c3_30 = ds.c3_30\n dd.c3_31 = ds.c3_31\n dd.c3_32 = ds.c3_32\n dd.c3_33 = ds.c3_33\n dd.c3_34 = ds.c3_34\n dd.c3_35 = ds.c3_35\n dd.c3_36 = ds.c3_36\n dd.c3_37 = ds.c3_37\n dd.c3_38 = ds.c3_38\n\t\n dd.c4_1 = ds.c4_1\n dd.c4_2 = ds.c4_2\n dd.c4_3 = ds.c4_3\n dd.c4_4 = ds.c4_4\n dd.c4_5 = ds.c4_5\n dd.c4_6 = ds.c4_6\n dd.c4_7 = ds.c4_7\n dd.c4_8 = ds.c4_8\n dd.c4_9 = ds.c4_9\n dd.c4_10 = ds.c4_10\n dd.c4_11 = ds.c4_11\n dd.c4_12 = ds.c4_12\n dd.c4_13 = ds.c4_13\n dd.c4_14 = ds.c4_14\n dd.c4_15 = ds.c4_15\n \n\ndef set_fields_form2(request, doc):\n ''' Заполнение полей модели данными формы . \n Для каждой формы\n '''\n doc.c2_1 = request.POST['c2_1'] \n doc.c2_2 = request.POST['c2_2'] \n doc.c2_3 = request.POST['c2_3'] \n doc.c2_3_1 = request.POST['c2_3_1'] \n doc.c2_3_2 = request.POST['c2_3_2'] \n doc.c2_3_3 = request.POST['c2_3_3'] \n doc.c2_4 = request.POST['c2_4'] \n doc.c2_4_1 = request.POST['c2_4_1'] \n doc.c2_4_2 = request.POST['c2_4_2'] \n doc.c2_4_3 = request.POST['c2_4_3'] \n doc.c2_5 = request.POST['c2_5'] \n doc.c2_5_1 = request.POST['c2_5_1'] \n doc.c2_5_2 = request.POST['c2_5_2'] \n doc.c2_5_3 = request.POST['c2_5_3'] \n doc.c2_5_4 = request.POST['c2_5_4'] \n doc.c2_5_5 = request.POST['c2_5_5'] \n doc.c2_5_6 = request.POST['c2_5_6'] \n doc.c2_5_7 = request.POST['c2_5_7'] \n doc.c2_5_8 = request.POST['c2_5_8'] \n doc.c2_5_9 = request.POST['c2_5_9'] \n doc.c2_5_10 = request.POST['c2_5_10'] \n doc.c2_5_11 = request.POST['c2_5_11'] \n doc.c2_5_12 = request.POST['c2_5_12'] \n doc.c2_5_13 = request.POST['c2_5_13'] \n doc.c2_5_14 = request.POST['c2_5_14'] \n doc.c2_5_15 = request.POST['c2_5_15'] \n doc.c2_5_16 = request.POST['c2_5_16'] \n doc.c2_5_17 = request.POST['c2_5_17'] \n doc.c2_5_18 = request.POST['c2_5_18'] \n doc.c2_5_19 = request.POST['c2_5_19'] \n doc.c2_5_20 = request.POST['c2_5_20'] \n doc.c2_5_21 = request.POST['c2_5_21'] \n doc.c2_5_22 = request.POST['c2_5_22'] \n doc.c2_5_23 = request.POST['c2_5_23'] \n doc.c2_5_24 = request.POST['c2_5_24'] \n doc.c2_5_25 = request.POST['c2_5_25'] \n doc.c2_5_26 = request.POST['c2_5_26'] \n doc.c2_5_27 = request.POST['c2_5_27'] \n doc.c2_5_28 = request.POST['c2_5_28'] \n doc.c2_5_29 = request.POST['c2_5_29'] \n doc.c2_5_30 = request.POST['c2_5_30'] \n doc.c2_5_31 = request.POST['c2_5_31'] \n doc.c2_5_32 = request.POST['c2_5_32'] \n doc.c2_5_33 = request.POST['c2_5_33'] \n doc.c2_5_34 = request.POST['c2_5_34'] \n doc.c2_5_35 = request.POST['c2_5_35'] \n doc.c2_5_36 = request.POST['c2_5_36'] \n doc.c2_5_37 = request.POST['c2_5_37'] \n doc.c2_5_38 = request.POST['c2_5_38'] \n doc.c2_5_39 = request.POST['c2_5_39'] \n doc.c2_5_40 = request.POST['c2_5_40'] \n doc.c2_5_41 = request.POST['c2_5_41'] \n doc.c2_6 = request.POST['c2_6'] \n doc.c2_7 = request.POST['c2_7'] \n doc.c2_8 = request.POST['c2_8'].replace(',', '.') \n doc.c2_9 = request.POST['c2_9'] \n doc.c2_10 = request.POST['c2_10'] \n doc.c2_11 = request.POST['c2_11'] \n doc.c2_12 = request.POST['c2_12'] \n doc.c2_13 = request.POST['c2_13'] \n doc.c2_13_1 = request.POST['c2_13_1'] \n doc.c2_13_2 = request.POST['c2_13_2'] \n doc.c2_13_3 = request.POST['c2_13_3'] \n doc.c2_13_4 = request.POST['c2_13_4'] \n doc.c2_13_5 = request.POST['c2_13_5'] \n doc.c2_13_6 = request.POST['c2_13_6'] \n doc.c2_13_7 = request.POST['c2_13_7'] \n doc.c2_13_8 = request.POST['c2_13_8'] \n doc.c2_13_9 = request.POST['c2_13_9'] \n doc.c2_13_10 = request.POST['c2_13_10'] \n doc.c2_13_11 = request.POST['c2_13_11'] \n doc.c2_13_12 = request.POST['c2_13_12'] \n doc.c2_13_13 = request.POST['c2_13_13'] \n doc.c2_13_14 = request.POST['c2_13_14'] \n doc.c2_13_15 = request.POST['c2_13_15'] \n doc.c2_13_16 = request.POST['c2_13_16'] \n doc.c2_13_17 = request.POST['c2_13_17'] \n doc.c2_13_18 = request.POST['c2_13_18'] \n doc.c2_13_19 = request.POST['c2_13_19'] \n doc.c2_13_20 = request.POST['c2_13_20'] \n doc.c2_13_21 = request.POST['c2_13_21'] \n doc.c2_13_22 = request.POST['c2_13_22'] \n doc.c2_13_23 = request.POST['c2_13_23'] \n doc.c2_13_24 = request.POST['c2_13_24'] \n doc.c2_13_25 = request.POST['c2_13_25'] \n doc.c2_13_26 = request.POST['c2_13_26'] \n doc.c2_13_27 = request.POST['c2_13_27'] \n doc.c2_13_28 = request.POST['c2_13_28'] \n doc.c2_13_29 = request.POST['c2_13_29'] \n doc.c2_13_30 = request.POST['c2_13_30'] \n doc.c2_13_31 = request.POST['c2_13_31'] \n doc.c2_13_32 = request.POST['c2_13_32'] \n doc.c2_13_33 = request.POST['c2_13_33'] \n doc.c2_13_34 = request.POST['c2_13_34'] \n doc.c2_13_35 = request.POST['c2_13_35'] \n doc.c2_13_36 = request.POST['c2_13_36'] \n doc.c2_13_37 = request.POST['c2_13_37'] \n doc.c2_13_38 = request.POST['c2_13_38'] \n doc.c2_13_39 = request.POST['c2_13_39'] \n doc.c2_13_40 = request.POST['c2_13_40'] \n doc.c2_13_41 = request.POST['c2_13_41'] \n doc.c2_13_42 = request.POST['c2_13_42'] \n doc.c2_13_43 = request.POST['c2_13_43'] \n doc.c2_13_44 = request.POST['c2_13_44'] \n doc.c2_13_45 = request.POST['c2_13_45'] \n doc.c2_13_46 = request.POST['c2_13_46'] \n doc.c2_13_47 = request.POST['c2_13_47'] \n doc.c2_13_48 = request.POST['c2_13_48'] \n doc.c2_13_49 = request.POST['c2_13_49'] \n doc.c2_13_50 = request.POST['c2_13_50'] \n doc.c2_13_51 = request.POST['c2_13_51'] \n doc.c2_13_52 = request.POST['c2_13_52'] \n doc.c2_13_53 = request.POST['c2_13_53'] \n doc.c2_13_54 = request.POST['c2_13_54'] \n doc.c2_13_55 = request.POST['c2_13_55'] \n doc.c2_13_56 = request.POST['c2_13_56'] \n doc.c2_13_57 = request.POST['c2_13_57'] \n doc.c2_13_58 = request.POST['c2_13_58'] \n doc.c2_13_59 = request.POST['c2_13_59'] \n doc.c2_13_60 = request.POST['c2_13_60'] \n doc.c2_13_61 = request.POST['c2_13_61'] \n doc.c2_13_62 = request.POST['c2_13_62'] \n doc.c2_13_63 = request.POST['c2_13_63'] \n doc.c2_13_64 = request.POST['c2_13_64'] \n doc.c2_13_65 = request.POST['c2_13_65'] \n doc.c2_13_66 = request.POST['c2_13_66'] \n doc.c2_13_67 = request.POST['c2_13_67'] \n doc.c2_13_68 = request.POST['c2_13_68'] \n doc.c2_13_69 = request.POST['c2_13_69'] \n doc.c2_13_70 = request.POST['c2_13_70'] \n doc.c2_13_71 = request.POST['c2_13_71'] \n doc.c2_13_72 = request.POST['c2_13_72'] \n doc.c2_13_73 = request.POST['c2_13_73'] \n doc.c2_13_74 = request.POST['c2_13_74'] \n doc.c2_13_75 = request.POST['c2_13_75'] \n doc.c2_13_76 = request.POST['c2_13_76'] \n doc.c2_13_77 = request.POST['c2_13_77'] \n doc.c2_13_78 = request.POST['c2_13_78'] \n doc.c2_13_79 = request.POST['c2_13_79'] \n doc.c2_13_80 = request.POST['c2_13_80'] \n doc.c2_13_81 = request.POST['c2_13_81'] \n doc.c2_13_82 = request.POST['c2_13_82'] \n doc.c2_13_83 = request.POST['c2_13_83'] \n doc.c2_13_84 = request.POST['c2_13_84'] \n doc.c2_13_85 = request.POST['c2_13_85'] \n doc.c2_13_86 = request.POST['c2_13_86'] \n doc.c2_13_87 = request.POST['c2_13_87'] \n\t\n doc.c2_13_89 = request.POST['c2_13_89'] \n doc.c2_13_90 = request.POST['c2_13_90'] \n doc.c2_13_91 = request.POST['c2_13_91'] \n doc.c2_13_92 = request.POST['c2_13_92'] \n doc.c2_13_93 = request.POST['c2_13_93'] \n doc.c2_13_94 = request.POST['c2_13_94'] \n doc.c2_13_95 = request.POST['c2_13_95'] \n doc.c2_13_96 = request.POST['c2_13_96'] \n doc.c2_13_97 = request.POST['c2_13_97'] \n doc.c2_13_98 = request.POST['c2_13_98'] \n doc.c2_13_99 = request.POST['c2_13_99'] \n doc.c2_13_100 = request.POST['c2_13_100'] \n doc.c2_13_101 = request.POST['c2_13_101'] \n doc.c2_13_102 = request.POST['c2_13_102'] \n doc.c2_13_103 = request.POST['c2_13_103'] \n doc.c2_13_104 = request.POST['c2_13_104'] \n doc.c2_13_105 = request.POST['c2_13_105'] \n doc.c2_13_106 = request.POST['c2_13_106'] \n doc.c2_13_107 = request.POST['c2_13_107'] \n doc.c2_13_108 = request.POST['c2_13_108'] \n doc.c2_13_109 = request.POST['c2_13_109'] \n doc.c2_13_110 = request.POST['c2_13_110'] \n doc.c2_13_111 = request.POST['c2_13_111'] \n doc.c2_13_112 = request.POST['c2_13_112'] \n doc.c2_13_113 = request.POST['c2_13_113'] \n doc.c2_13_114 = request.POST['c2_13_114'] \n doc.c2_13_115 = request.POST['c2_13_115'] \n doc.c2_13_116 = request.POST['c2_13_116'] \n doc.c2_13_117 = request.POST['c2_13_117'] \n doc.c2_13_118 = request.POST['c2_13_118'] \n doc.c2_13_119 = request.POST['c2_13_119'] \n doc.c2_13_120 = request.POST['c2_13_120'] \n doc.c2_13_121 = request.POST['c2_13_121'] \n doc.c2_13_122 = request.POST['c2_13_122'] \n doc.c2_13_123 = request.POST['c2_13_123'] \n doc.c2_13_124 = request.POST['c2_13_124'] \n doc.c2_14 = request.POST['c2_14'] \n doc.c2_14_1 = request.POST['c2_14_1'] \n doc.c2_14_2 = request.POST['c2_14_2'] \n doc.c2_14_3 = request.POST['c2_14_3'] \n doc.c2_14_4 = request.POST['c2_14_4'] \n doc.c2_14_5 = request.POST['c2_14_5'] \n doc.c2_14_6 = request.POST['c2_14_6'] \n doc.c2_14_7 = request.POST['c2_14_7'] \n doc.c2_14_8 = request.POST['c2_14_8'] \n doc.c2_14_9 = request.POST['c2_14_9'] \n doc.c2_14_10 = request.POST['c2_14_10'] \n doc.c2_14_11 = request.POST['c2_14_11'] \n doc.c2_14_12 = request.POST['c2_14_12'] \n doc.c2_14_13 = request.POST['c2_14_13'] \n doc.c2_14_14 = request.POST['c2_14_14'] \n doc.c2_14_14 = request.POST['c2_14_14'] \n doc.c2_14_15 = request.POST['c2_14_15'] \n doc.c2_14_16 = request.POST['c2_14_16'] \n doc.c2_14_17 = request.POST['c2_14_17'] \n doc.c2_14_18 = request.POST['c2_14_18'] \n doc.c2_14_19 = request.POST['c2_14_19'] \n doc.c2_14_20 = request.POST['c2_14_20'] \n doc.c2_14_21 = request.POST['c2_14_21'] \n doc.c2_14_22 = request.POST['c2_14_22'] \n doc.c2_14_23 = request.POST['c2_14_23'] \n doc.c2_14_24 = request.POST['c2_14_24'] \n doc.c2_14_25 = request.POST['c2_14_25'] \n doc.c2_14_26 = request.POST['c2_14_26'] \n doc.c2_14_27 = request.POST['c2_14_27'] \n doc.c2_14_28 = request.POST['c2_14_28'] \n doc.c2_14_29 = request.POST['c2_14_29'] \n doc.c2_14_30 = request.POST['c2_14_30'] \n doc.c2_14_31 = request.POST['c2_14_31'] \n doc.c2_14_32 = request.POST['c2_14_32'] \n doc.c2_14_33 = request.POST['c2_14_33'] \n doc.c2_14_34 = request.POST['c2_14_34'] \n doc.c2_14_35 = request.POST['c2_14_35'] \n doc.c2_14_36 = request.POST['c2_14_36'] \n doc.c2_14_37 = request.POST['c2_14_37'] \n doc.c2_14_38 = request.POST['c2_14_38'] \n doc.c2_14_39 = request.POST['c2_14_39'] \n doc.c2_14_40 = request.POST['c2_14_40'] \n doc.c2_14_41 = request.POST['c2_14_41'] \n doc.c2_14_42 = request.POST['c2_14_42'] \n doc.c2_14_43 = request.POST['c2_14_43'] \n doc.c2_14_44 = request.POST['c2_14_44'] \n doc.c2_14_45 = request.POST['c2_14_45'] \n doc.c2_15 = request.POST['c2_15'] \n doc.c2_16 = request.POST['c2_16'] \n doc.c2_17 = request.POST['c2_17'] \n doc.c2_18 = request.POST['c2_18'] \n doc.c2_19 = request.POST['c2_19'] \n doc.c2_20 = request.POST['c2_20'] \n doc.c2_21 = request.POST['c2_21'] \n doc.c2_22 = request.POST['c2_22'] \n doc.c2_23 = request.POST['c2_23'] \n doc.c2_24 = request.POST['c2_24'] \n doc.c2_25 = request.POST['c2_25'] \n doc.c2_26 = request.POST['c2_26'] \n doc.c2_27 = request.POST['c2_27'] \n doc.c2_28 = request.POST['c2_28'] \n\n doc.c3_1 = request.POST['c3_1'] \n doc.c3_2 = request.POST['c3_2'] \n doc.c3_3 = request.POST['c3_3'] \n doc.c3_4 = request.POST['c3_4'] \n doc.c3_5 = request.POST['c3_5'] \n doc.c3_6 = request.POST['c3_6'] \n doc.c3_7 = request.POST['c3_7'] \n doc.c3_8 = request.POST['c3_8'] \n doc.c3_9 = request.POST['c3_9'] \n doc.c3_10 = request.POST['c3_10'] \n doc.c3_11 = request.POST['c3_11'] \n doc.c3_12 = request.POST['c3_12'] \n doc.c3_13 = request.POST['c3_13'] \n doc.c3_14 = request.POST['c3_14'] \n doc.c3_15 = request.POST['c3_15'] \n doc.c3_16 = request.POST['c3_16'] \n doc.c3_17 = request.POST['c3_17'] \n doc.c3_18 = request.POST['c3_18'] \n doc.c3_19 = request.POST['c3_19'] \n doc.c3_20 = request.POST['c3_20'] \n doc.c3_21 = request.POST['c3_21'] \n doc.c3_22 = request.POST['c3_22'] \n doc.c3_23 = request.POST['c3_23'] \n doc.c3_24 = request.POST['c3_24'] \n doc.c3_25 = request.POST['c3_25'] \n doc.c3_26 = request.POST['c3_26'] \n doc.c3_27 = request.POST['c3_27'] \n doc.c3_28 = request.POST['c3_28'] \n doc.c3_29 = request.POST['c3_29'] \n doc.c3_30 = request.POST['c3_30'] \n doc.c3_31 = request.POST['c3_31'] \n doc.c3_32 = request.POST['c3_32'] \n doc.c3_33 = request.POST['c3_33'] \n doc.c3_34 = request.POST['c3_34'] \n doc.c3_35 = request.POST['c3_35'] \n doc.c3_36 = request.POST['c3_36'] \n doc.c3_37 = request.POST['c3_37'] \n doc.c3_38 = request.POST['c3_38'] \n doc.c4_1 = request.POST['c4_1'] \n doc.c4_2 = request.POST['c4_2'] \n doc.c4_3 = request.POST['c4_3'] \n doc.c4_4 = request.POST['c4_4'] \n doc.c4_5 = request.POST['c4_5'] \n doc.c4_6 = request.POST['c4_6'] \n doc.c4_7 = request.POST['c4_7'] \n doc.c4_8 = request.POST['c4_8'] \n doc.c4_9 = request.POST['c4_9'] \n doc.c4_10 = request.POST['c4_10'] \n doc.c4_11 = request.POST['c4_11'] \n doc.c4_12 = request.POST['c4_12'] \n doc.c4_13 = request.POST['c4_13'] \n doc.c4_14 = request.POST['c4_14'] \n doc.c4_15 = request.POST['c4_15'] \n\n doc.p3_1 = request.POST['p3_1'] \n doc.p3_2 = request.POST['p3_2'] \n doc.p3_3 = request.POST['p3_3'] \n doc.p3_4 = request.POST['p3_4'] \n doc.p3_5 = request.POST['p3_5'] \n doc.p3_6 = request.POST['p3_6'] \n doc.p3_7 = request.POST['p3_7'] \n doc.p3_8 = request.POST['p3_8'] \n doc.p3_9 = request.POST['p3_9'] \n doc.p3_10 = request.POST['p3_10'] \n doc.p3_11 = request.POST['p3_11'] \n doc.p3_12 = request.POST['p3_12'] \n doc.p3_13 = request.POST['p3_13'] \n doc.p3_14 = request.POST['p3_14'] \n doc.p3_15 = request.POST['p3_15'] \n doc.p3_16 = request.POST['p3_16'] \n doc.p3_17 = request.POST['p3_17'] \n doc.p3_18 = request.POST['p3_18'] \n doc.p3_19 = request.POST['p3_19'] \n doc.p3_20 = request.POST['p3_20'] \n doc.p3_21 = request.POST['p3_21'] \n doc.p3_22 = request.POST['p3_22'] \n doc.p3_23 = request.POST['p3_23'] \n doc.p3_24 = request.POST['p3_24'] \n doc.p3_25 = request.POST['p3_25'] \n doc.p3_26 = request.POST['p3_26'] \n doc.p3_27 = request.POST['p3_27'] \n doc.p3_28 = request.POST['p3_28'] \n doc.p3_29 = request.POST['p3_29'] \n doc.p3_30 = request.POST['p3_30'] \n doc.p3_31 = request.POST['p3_31'] \n doc.p3_32 = request.POST['p3_32'] \n doc.p3_33 = request.POST['p3_33'] \n doc.p3_34 = request.POST['p3_34'] \n doc.p3_35 = request.POST['p3_35'] \n doc.p3_36 = request.POST['p3_36'] \n doc.p3_37 = request.POST['p3_37'] \n doc.p3_38 = request.POST['p3_38'] \n \n\n\ndef is_valid_form2(doc, doc_prev):\n ''' Проверка заполнения формы на корректность \n '''\n if int(doc.c2_3) != int(doc.c2_3_1) + int(doc.c2_3_2) + int(doc.c2_3_3):\n ret = [False,'Итого по строке 2.3 не равно сумме 2.3.1 - 2.3.3'] \n return ret\n if int(doc.c2_4) != int(doc.c2_4_1) + int(doc.c2_4_2) + int(doc.c2_4_3):\n ret = [False,'Итого по строке 2.4 не равно сумме 2.4.1 - 2.4.3'] \n return ret\n\t\t\n if int(doc.c2_13_1) < int(doc.c2_13_2):\n ret = [False,'Значение в строке 2.13.1 меньше значения 2.13.2'] \n return ret\n if int(doc.c2_13_42) < int(doc.c2_13_43) + int(doc.c2_13_44):\n ret = [False,'Итого по строке 2.13.42 меньше суммы 2.13.43 - 2.13.44'] \n return ret\n if int(doc.c2_13_65) < int(doc.c2_13_66) + int(doc.c2_13_67) + int(doc.c2_13_68) + int(doc.c2_13_69) + int(doc.c2_13_70) + int(doc.c2_13_71):\n ret = [False,'Итого по строке 2.13.65 меньше суммы 2.13.66 - 2.13.71'] \n return ret\n if int(doc.c2_13_92) < int(doc.c2_13_93) + int(doc.c2_13_94) + int(doc.c2_13_95) + int(doc.c2_13_96):\n ret = [False,'Итого по строке 2.13.92 меньше суммы 2.13.93 - 2.13.96'] \n return ret\n if int(doc.c2_13_105) < int(doc.c2_13_106):\n ret = [False,'Значение в строке 2.13.105 меньше значения 2.13.106'] \n return ret\n if int(doc.c2_13_119) < int(doc.c2_13_120) + int(doc.c2_13_121) + int(doc.c2_13_122) + int(doc.c2_13_123) + int(doc.c2_13_124):\n ret = [False,'Итого по строке 2.13.119 меньше суммы 2.13.120 - 2.13.124'] \n return ret\n if int(doc.c2_13) != int(doc.c2_13_1) + int(doc.c2_13_3) + int(doc.c2_13_4) + int(doc.c2_13_5) + int(doc.c2_13_6) + \\\n int(doc.c2_13_7) + int(doc.c2_13_8) + int(doc.c2_13_9) + int(doc.c2_13_10) + int(doc.c2_13_11) + \\\n int(doc.c2_13_12) + int(doc.c2_13_13) + int(doc.c2_13_14) + int(doc.c2_13_15) + int(doc.c2_13_16) + \\\n int(doc.c2_13_17) + int(doc.c2_13_18) + int(doc.c2_13_19) + int(doc.c2_13_20) + int(doc.c2_13_21) + \\\n int(doc.c2_13_22) + int(doc.c2_13_23) + int(doc.c2_13_24) + int(doc.c2_13_25) + int(doc.c2_13_26) + \\\n int(doc.c2_13_27) + int(doc.c2_13_28) + int(doc.c2_13_29) + int(doc.c2_13_30) + int(doc.c2_13_31) + \\\n int(doc.c2_13_32) + int(doc.c2_13_33) + int(doc.c2_13_34) + int(doc.c2_13_35) + int(doc.c2_13_36) + \\\n int(doc.c2_13_37) + int(doc.c2_13_38) + int(doc.c2_13_39) + int(doc.c2_13_40) + int(doc.c2_13_41) + \\\n int(doc.c2_13_42) + int(doc.c2_13_45) + int(doc.c2_13_46) + int(doc.c2_13_47) + int(doc.c2_13_48) + \\\n int(doc.c2_13_49) + int(doc.c2_13_50) + int(doc.c2_13_51) + int(doc.c2_13_52) + int(doc.c2_13_53) + \\\n int(doc.c2_13_54) + int(doc.c2_13_55) + int(doc.c2_13_56) + int(doc.c2_13_57) + int(doc.c2_13_58) + \\\n int(doc.c2_13_59) + int(doc.c2_13_60) + int(doc.c2_13_61) + int(doc.c2_13_62) + int(doc.c2_13_63) + \\\n int(doc.c2_13_64) + int(doc.c2_13_72) + \\\n int(doc.c2_13_73) + int(doc.c2_13_74) + int(doc.c2_13_75) + int(doc.c2_13_76) + int(doc.c2_13_77) + \\\n int(doc.c2_13_78) + int(doc.c2_13_79) + int(doc.c2_13_80) + int(doc.c2_13_81) + int(doc.c2_13_82) + \\\n int(doc.c2_13_83) + int(doc.c2_13_84) + int(doc.c2_13_85) + int(doc.c2_13_86) + int(doc.c2_13_87) + \\\n int(doc.c2_13_88) + int(doc.c2_13_89) + int(doc.c2_13_90) + int(doc.c2_13_91) + int(doc.c2_13_92) + \\\n int(doc.c2_13_97) + int(doc.c2_13_98) + int(doc.c2_13_99) + int(doc.c2_13_100) + int(doc.c2_13_101) + \\\n int(doc.c2_13_102) + int(doc.c2_13_103) + int(doc.c2_13_104) + int(doc.c2_13_105) + int(doc.c2_13_107) + \\\n int(doc.c2_13_108) + int(doc.c2_13_109) + int(doc.c2_13_110) + int(doc.c2_13_111) + int(doc.c2_13_112) + \\\n int(doc.c2_13_113) + int(doc.c2_13_114) + int(doc.c2_13_115) + int(doc.c2_13_116) + int(doc.c2_13_117) + \\\n int(doc.c2_13_118) + int(doc.c2_13_119) :\n ret = [False,'Итого по строке 2.13 не равно сумме 2.13.1; 2.13.3 - 2.13.42; 2.13.45 - 2.13.64; 2.13.72 - 2.13.92; 2.13.97 - 2.13.105; 2.13.107 - 2.13.119'] \n return ret\n\n if int(doc.c2_14_3) < int(doc.c2_14_4):\n ret = [False,'Значение в строке 2.14.3 меньше значения 2.14.4'] \n return ret\n if int(doc.c2_14_13) < int(doc.c2_14_14) + int(doc.c2_14_15) + int(doc.c2_14_16) + int(doc.c2_14_17) + int(doc.c2_14_18) + \\\n int(doc.c2_14_19) + int(doc.c2_14_21) + int(doc.c2_14_22) + int(doc.c2_14_23) + int(doc.c2_14_24) + \\\n int(doc.c2_14_25) + int(doc.c2_14_26) + int(doc.c2_14_27) + int(doc.c2_14_28) + int(doc.c2_14_29) + \\\n int(doc.c2_14_30) + int(doc.c2_14_31) + int(doc.c2_14_32) + int(doc.c2_14_33):\n ret = [False,'Итого по строке 2.14.13 не равно сумме 2.14.14 - 2.14.33'] \n return ret\n if int(doc.c2_14_41) < int(doc.c2_14_42) + int(doc.c2_14_43) + int(doc.c2_14_44):\n ret = [False,'Итого по строке 2.14.41 не равно сумме 2.14.42 - 2.14.44'] \n return ret\n if int(doc.c2_14) != int(doc.c2_14_1) + int(doc.c2_14_2) + int(doc.c2_14_3) + \\\n int(doc.c2_14_5) + int(doc.c2_14_6) + int(doc.c2_14_7) + int(doc.c2_14_8) + int(doc.c2_14_9) + \\\n int(doc.c2_14_10) + int(doc.c2_14_11) + int(doc.c2_14_12) + int(doc.c2_14_13) + int(doc.c2_14_34) + \\\n int(doc.c2_14_35) + int(doc.c2_14_36) + int(doc.c2_14_37) + int(doc.c2_14_38) + int(doc.c2_14_39) + \\\n int(doc.c2_14_40) + int(doc.c2_14_41) + int(doc.c2_14_45):\n ret = [False,'Итого по строке 2.14 не равно сумме 2.14.1 - 2.14.3; 2.14.5 - 2.14.41; 2.14.45'] \n return ret\n ret = [True,'OK']\n return ret\n\ndef calc_sum_form2(doc):\n ''' Возвращает Суммы данных отчетов\n '''\n# assert False\n aq0= doc.aggregate(Sum('c2_1'),Sum('c2_2'),Sum('c2_3'),Sum('c2_3_1'),Sum('c2_3_2'), Sum('c2_3_3'), \\\n Sum('c2_4'),Sum('c2_4_1'),Sum('c2_4_2'),Sum('c2_4_3'), \\\n Sum('c2_5'), Sum('c2_5_1'),Sum('c2_5_2'),Sum('c2_5_3'),Sum('c2_5_4'),Sum('c2_5_5'),Sum('c2_5_6'),Sum('c2_5_7'),Sum('c2_5_8'),Sum('c2_5_9'), \\\n Sum('c2_5_10'),Sum('c2_5_11'),Sum('c2_5_12'),Sum('c2_5_13'),Sum('c2_5_14'),Sum('c2_5_15'),Sum('c2_5_16'),Sum('c2_5_17'),Sum('c2_5_18'),Sum('c2_5_19'), \\\n Sum('c2_5_20'),Sum('c2_5_21'),Sum('c2_5_22'),Sum('c2_5_23'),Sum('c2_5_24'),Sum('c2_5_25'),Sum('c2_5_26'),Sum('c2_5_27'),Sum('c2_5_28'),Sum('c2_5_29'), \\\n Sum('c2_5_30'),Sum('c2_5_31'),Sum('c2_5_32'),Sum('c2_5_33'),Sum('c2_5_34'),Sum('c2_5_35'),Sum('c2_5_36'),Sum('c2_5_37'),Sum('c2_5_38'),Sum('c2_5_39'), \\\n Sum('c2_5_40'),Sum('c2_5_41'), \\\n Sum('c2_6'), Sum('c2_7'), Sum('c2_8'),Sum('c2_9'),Sum('c2_10'),Sum('c2_11'),Sum('c2_12'), \\\n Sum('c2_13'), Sum('c2_13_1'),Sum('c2_13_2'),Sum('c2_13_3'),Sum('c2_13_4'),Sum('c2_13_5'),Sum('c2_13_6'),Sum('c2_13_7'),Sum('c2_13_8'),Sum('c2_13_9'), \\\n Sum('c2_13_10'), Sum('c2_13_11'),Sum('c2_13_12'),Sum('c2_13_13'),Sum('c2_13_14'),Sum('c2_13_15'),Sum('c2_13_16'),Sum('c2_13_17'),Sum('c2_13_18'),Sum('c2_13_19'), \\\n Sum('c2_13_20'), Sum('c2_13_21'),Sum('c2_13_22'),Sum('c2_13_23'),Sum('c2_13_24'),Sum('c2_13_25'),Sum('c2_13_26'),Sum('c2_13_27'),Sum('c2_13_28'),Sum('c2_13_29'), \\\n Sum('c2_13_30'), Sum('c2_13_31'),Sum('c2_13_32'),Sum('c2_13_33'),Sum('c2_13_34'),Sum('c2_13_35'),Sum('c2_13_36'),Sum('c2_13_37'),Sum('c2_13_38'),Sum('c2_13_39'), \\\n Sum('c2_13_40'), Sum('c2_13_41'),Sum('c2_13_42'),Sum('c2_13_43'),Sum('c2_13_44'),Sum('c2_13_45'),Sum('c2_13_46'),Sum('c2_13_47'),Sum('c2_13_48'),Sum('c2_13_49'), \\\n Sum('c2_13_50'), Sum('c2_13_51'),Sum('c2_13_52'),Sum('c2_13_53'),Sum('c2_13_54'),Sum('c2_13_55'),Sum('c2_13_56'),Sum('c2_13_57'),Sum('c2_13_58'),Sum('c2_13_59'), \\\n Sum('c2_13_60'), Sum('c2_13_61'),Sum('c2_13_62'),Sum('c2_13_63'),Sum('c2_13_64'),Sum('c2_13_65'),Sum('c2_13_66'),Sum('c2_13_67'),Sum('c2_13_68'),Sum('c2_13_69'), \\\n Sum('c2_13_70'), Sum('c2_13_71'),Sum('c2_13_72'),Sum('c2_13_73'),Sum('c2_13_74'),Sum('c2_13_75'),Sum('c2_13_76'),Sum('c2_13_77'),Sum('c2_13_78'),Sum('c2_13_79'), \\\n Sum('c2_13_80'), Sum('c2_13_81'),Sum('c2_13_82'),Sum('c2_13_83'),Sum('c2_13_84'),Sum('c2_13_85'),Sum('c2_13_86'),Sum('c2_13_87'), Sum('c2_13_89'), \\\n Sum('c2_13_90'), Sum('c2_13_91'),Sum('c2_13_92'),Sum('c2_13_93'),Sum('c2_13_94'),Sum('c2_13_95'),Sum('c2_13_96'),Sum('c2_13_97'),Sum('c2_13_98'),Sum('c2_13_99'), \\\n Sum('c2_13_100'), Sum('c2_13_101'),Sum('c2_13_102'),Sum('c2_13_103'),Sum('c2_13_104'),Sum('c2_13_105'),Sum('c2_13_106'),Sum('c2_13_107'),Sum('c2_13_108'),Sum('c2_13_109'), \\\n Sum('c2_13_110'), Sum('c2_13_111'),Sum('c2_13_112'),Sum('c2_13_113'),Sum('c2_13_114'),Sum('c2_13_115'),Sum('c2_13_116'),Sum('c2_13_117'),Sum('c2_13_118'),Sum('c2_13_119'), \\\n Sum('c2_13_120'), Sum('c2_13_121'),Sum('c2_13_122'),Sum('c2_13_123'),Sum('c2_13_124'), \\\n )\n aq1= doc.aggregate( Sum('c2_14'), Sum('c2_14_1'),Sum('c2_14_2'),Sum('c2_14_3'),Sum('c2_14_4'),Sum('c2_14_5'),Sum('c2_14_6'),Sum('c2_14_7'),Sum('c2_14_8'),Sum('c2_14_9'), \\\n Sum('c2_14_10'), Sum('c2_14_11'),Sum('c2_14_12'),Sum('c2_14_13'),Sum('c2_14_14'),Sum('c2_14_15'),Sum('c2_14_16'),Sum('c2_14_17'),Sum('c2_14_18'),Sum('c2_14_19'), \\\n Sum('c2_14_20'), Sum('c2_14_21'),Sum('c2_14_22'),Sum('c2_14_23'),Sum('c2_14_24'),Sum('c2_14_25'),Sum('c2_14_26'),Sum('c2_14_27'),Sum('c2_14_28'),Sum('c2_14_29'), \\\n Sum('c2_14_30'), Sum('c2_14_31'),Sum('c2_14_32'),Sum('c2_14_33'),Sum('c2_14_34'),Sum('c2_14_35'),Sum('c2_14_36'),Sum('c2_14_37'),Sum('c2_14_38'),Sum('c2_14_39'), \\\n Sum('c2_14_40'), Sum('c2_14_41'),Sum('c2_14_42'),Sum('c2_14_43'),Sum('c2_14_44'),Sum('c2_14_45'), \\\n Sum('c2_15'),Sum('c2_16'),Sum('c2_17'),Sum('c2_18'),Sum('c2_19'),Sum('c2_20'),Sum('c2_21'),Sum('c2_22'),Sum('c2_23'), \\\n Sum('c2_24'),Sum('c2_25'),Sum('c2_26'),Sum('c2_27'),Sum('c2_28'), \\\n Sum('c3_1'),Sum('c3_2'),Sum('c3_3'),Sum('c3_4'),Sum('c3_5'),Sum('c3_6'),Sum('c3_7'),Sum('c3_8'),Sum('c3_9'), \\\n Sum('c3_10'),Sum('c3_11'),Sum('c3_12'),Sum('c3_13'),Sum('c3_14'),Sum('c3_15'),Sum('c3_16'),Sum('c3_17'),Sum('c3_18'),Sum('c3_19'), \\\n Sum('c3_20'),Sum('c3_21'),Sum('c3_22'),Sum('c3_23'),Sum('c3_24'),Sum('c3_25'),Sum('c3_26'),Sum('c3_27'),Sum('c3_28'),Sum('c3_29'), \\\n Sum('c3_30'),Sum('c3_31'),Sum('c3_32'),Sum('c3_33'),Sum('c3_34'),Sum('c3_35'),Sum('c3_36'),Sum('c3_37'),Sum('c3_38'),\\\n Sum('c4_1'),Sum('c4_2'),Sum('c4_3'),Sum('c4_4'),Sum('c4_5'),Sum('c4_6'),Sum('c4_7'),Sum('c4_8'),Sum('c4_9'), \\\n Sum('c4_10'),Sum('c4_11'),Sum('c4_12'),Sum('c4_13'),Sum('c4_14'),Sum('c4_15') \\\n )\n \n s = [[\"2.1. Число врачей (без аспирантов, интернов и ординаторов), работающих в медицинских организациях субъекта Российской Федерации \", aq0['c2_1__sum']],\n [\"2.2. Число среднего медицинского персонала, работающего в медицинских организациях субъекта Российской Федерации\",aq0['c2_2__sum']],\n [\"2.3. Число участковых врачей (без аспирантов, интернов и ординаторов), оказывающих медицинскую помощь сельскому населению (выбрать из справочника), всего\",aq0['c2_3__sum']],\n [\"2.3.1 Участковый врач терапевт\",aq0['c2_3_1__sum']],\n [\"2.3.2 Участковый врач педиатр\",aq0['c2_3_2__sum']],\n [\"2.3.3 Врач общей практики\",aq0['c2_3_3__sum']],\n [\"2.4. Число среднего медицинского персонала, оказывающего медицинскую помощь сельскому населению (выбрать из справочника), всего\",aq0['c2_4__sum']],\n [\"2.4.1 Участковая медицинская сестра врача терапевта\",aq0['c2_4_1__sum']],\n [\"2.4.2 Участковая медицинская сестра врача педиатра\",aq0['c2_4_2__sum']],\n [\"2.4.3 Участковая медицинская сестра врача общей практики\",aq0['c2_4_3__sum']],\n [\"2.5. Число врачей клинических специальностей (выбрать из списка), всего\",aq0['c2_5__sum']],\n [\"2.5.1 акушеры-гинекологи\",aq0['c2_5_1__sum']],\n [\"2.5.2 аллергологи-иммунологи\",aq0['c2_5_2__sum']],\n [\"2.5.3 гастроэнтерологи\",aq0['c2_5_3__sum']],\n [\"2.5.4 гематологи\",aq0['c2_5_4__sum']],\n [\"2.5.5 гериатры\",aq0['c2_5_5__sum']],\n [\"2.5.6 дерматовенерологи\",aq0['c2_5_6__sum']],\n [\"2.5.7 диабетологи\",aq0['c2_5_7__sum']],\n [\"2.5.8 инфекционисты\",aq0['c2_5_8__sum']],\n [\"2.5.9 кардиологи\",aq0['c2_5_9__sum']],\n [\"2.5.10 кардиологи детские\",aq0['c2_5_10__sum']],\n [\"2.5.11 колопроктологи\",aq0['c2_5_11__sum']],\n [\"2.5.12 неврологи\",aq0['c2_5_12__sum']],\n [\"2.5.13 нейрохирурги\",aq0['c2_5_13__sum']],\n [\"2.5.14 неонатологи\",aq0['c2_5_14__sum']],\n [\"2.5.15 нефрологи\",aq0['c2_5_15__sum']],\n [\"2.5.16 общей практики (семейные)\",aq0['c2_5_16__sum']],\n [\"2.5.17 онкологи\",aq0['c2_5_17__sum']],\n [\"2.5.18 онкологи детские\",aq0['c2_5_18__sum']],\n [\"2.5.19 оториноларингологи\",aq0['c2_5_19__sum']],\n [\"2.5.20 офтальмологи\",aq0['c2_5_20__sum']],\n [\"2.5.21 педиатры\",aq0['c2_5_21__sum']],\n [\"2.5.22 профпатологи\",aq0['c2_5_22__sum']],\n [\"2.5.23 no рентгенэндоваскулярным диагностике и лечению\",aq0['c2_5_23__sum']],\n [\"2.5.24 психиатры\",aq0['c2_5_24__sum']],\n [\"2.5.25 психиатры-наркологи\",aq0['c2_5_25__sum']], \n [\"2.5.26 пульмонологи\",aq0['c2_5_26__sum']],\n [\"2.5.27 ревматологи\",aq0['c2_5_27__sum']],\n [\"2.5.28 скорой медицинской помощи\",aq0['c2_5_28__sum']],\n [\"2.5.29 терапевты\",aq0['c2_5_29__sum']],\n [\"2.5.30 травматология и ортопеды\",aq0['c2_5_30__sum']],\n [\"2.5.31 урологи\",aq0['c2_5_31__sum']],\n [\"2.5.32 урологи-андрологи детские\",aq0['c2_5_32__sum']],\n [\"2.5.33 фтизиатры\",aq0['c2_5_33__sum']],\n [\"2.5.34 хирурги\",aq0['c2_5_34__sum']],\n [\"2.5.35 хирурги детские\",aq0['c2_5_35__sum']],\n [\"2.5.36 хирурги пластические\",aq0['c2_5_36__sum']],\n [\"2.5.37 хирурги сердечно-сосудистые\",aq0['c2_5_37__sum']],\n [\"2.5.38 хирурги торакальные\",aq0['c2_5_38__sum']],\n [\"2.5.39 хирурги челюстно-лицевые\",aq0['c2_5_39__sum']],\n [\"2.5.40 эндокринологи\",aq0['c2_5_40__sum']],\n [\"2.5.41 эндокринологи детские\",aq0['c2_5_41__sum']], \n [\"2.6. Число среднего медицинского персонала, работающих с врачами клинических специальностей\",aq0['c2_6__sum']],\n [\"2.7. Число врачей, работающих в медицинских организациях, оказывающих медицинскую помощь в амбулаторных условиях\",aq0['c2_7__sum']],\n [\"2.8. Число занятых врачебных должностей, оказывающих медицинскую помощь в амбулаторных условиях\",aq0['c2_8__sum']],\n [\"2.9. Число среднего медицинского персонала, работающих в медицинских организациях, оказывающих медицинскую помощь в амбулаторных условиях\",aq0['c2_9__sum']],\n [\"2.10. Число занятых должностей среднего медицинского персонала, оказывающих медицинскую помощь в амбулаторных условиях\",aq0['c2_10__sum']],\n [\"2.11. Число штатных должностей врачей, оказывающих медицинскую помощь в амбулаторных условиях\",aq0['c2_11__sum']],\n [\"2.12. Число штатных должностей среднего медицинского персонала, оказывающих медицинскую помощь в амбулаторных условиях\",aq0['c2_12__sum']],\n [\"2.13. Число врачей дефицитных для субъекта Российской Федерации должностей на конец отчетного периода (выбрать из справочника), всего\",aq0['c2_13__sum']],\n [\"2.13.1 акушеры – гинекологи\",aq0['c2_13_1__sum']],\n [\"2.13.2 - их них акушеры – гинекологи цехового врачебного участка\",aq0['c2_13_2__sum']],\n [\"2.13.3 аллергологи – иммунологи\",aq0['c2_13_3__sum']],\n [\"2.13.4 анестезиологи – реаниматологи\",aq0['c2_13_4__sum']],\n [\"2.13.5 бактериологи\",aq0['c2_13_5__sum']],\n [\"2.13.6 вирусологи\",aq0['c2_13_6__sum']],\n [\"2.13.7 врачи здравпунктов\",aq0['c2_13_7__sum']],\n [\"2.13.8 гастроэнтерологи\",aq0['c2_13_8__sum']], \n [\"2.13.9 гематологи\",aq0['c2_13_9__sum']],\n [\"2.13.10 генетики\",aq0['c2_13_10__sum']],\n [\"2.13.11 гериатры\",aq0['c2_13_11__sum']],\n [\"2.13.12 дезинфектологи\",aq0['c2_13_12__sum']],\n [\"2.13.13 дерматовенерологи\",aq0['c2_13_13__sum']],\n [\"2.13.14 диабетологи\",aq0['c2_13_14__sum']],\n [\"2.13.15 диетологи\",aq0['c2_13_15__sum']],\n [\"2.13.16 инфекционисты\",aq0['c2_13_16__sum']],\n [\"2.13.17 кардиологи\",aq0['c2_13_17__sum']],\n [\"2.13.18 кардиологи детские\",aq0['c2_13_18__sum']],\n [\"2.13.19 клинической лабораторной диагностики\",aq0['c2_13_19__sum']],\n [\"2.13.20 клинические микологи\",aq0['c2_13_20__sum']],\n [\"2.13.21 колопроктологи\",aq0['c2_13_21__sum']],\n [\"2.13.22 косметологи\",aq0['c2_13_22__sum']],\n [\"2.13.23 лаборанты\",aq0['c2_13_23__sum']],\n [\"2.13.24 лабораторные генетики\",aq0['c2_13_24__sum']], \n [\"2.13.25 лабораторные микологи\",aq0['c2_13_25__sum']],\n [\"2.13.26 мануальной терапии\",aq0['c2_13_26__sum']],\n [\"2.13.27 методисты\",aq0['c2_13_27__sum']],\n [\"2.13.28 неврологи\",aq0['c2_13_28__sum']],\n [\"2.13.29 нейрохирурги\",aq0['c2_13_29__sum']],\n [\"2.13.30 неонатолог\",aq0['c2_13_30__sum']],\n [\"2.13.31 нефрологи\",aq0['c2_13_31__sum']],\n [\"2.13.32 общей практики (семейные)\",aq0['c2_13_32__sum']],\n [\"2.13.33 онкологи\",aq0['c2_13_33__sum']],\n [\"2.13.34 онкологи детские\",aq0['c2_13_34__sum']],\n [\"2.13.35 ортодонты\",aq0['c2_13_35__sum']],\n [\"2.13.36 остеопаты\",aq0['c2_13_36__sum']],\n [\"2.13.37 оториноларингологи\",aq0['c2_13_37__sum']],\n [\"2.13.38 офтальмологи\",aq0['c2_13_38__sum']],\n [\"2.13.39 офтальмологи-протезисты\",aq0['c2_13_39__sum']],\n [\"2.13.40 паразитологи\",aq0['c2_13_40__sum']], \n [\"2.13.41 патологоанатомы\",aq0['c2_13_41__sum']],\n [\"2.13.42 педиатры – всего\",aq0['c2_13_42__sum']],\n [\"2.13.43 - из них: педиатры участковые (включая педиатров участковых приписных участков)\",aq0['c2_13_43__sum']],\n [\"2.13.44 - педиатры городские (районные)\",aq0['c2_13_44__sum']],\n [\"2.13.45 по авиационной и космической медицине\",aq0['c2_13_45__sum']],\n [\"2.13.46 по водолазной медицине\",aq0['c2_13_46__sum']],\n [\"2.13.47 по гигиене детей и подростков\",aq0['c2_13_47__sum']],\n [\"2.13.48 по гигиене питания\",aq0['c2_13_48__sum']],\n [\"2.13.49 по гигиене труда\",aq0['c2_13_49__sum']],\n [\"2.13.50 по гигиеническому воспитанию\",aq0['c2_13_50__sum']],\n [\"2.13.51 по коммунальной гигиене\",aq0['c2_13_51__sum']],\n [\"2.13.52 по лечебной физкультуре\",aq0['c2_13_52__sum']],\n [\"2.13.53 по медико-социальной экспертизе\",aq0['c2_13_53__sum']],\n [\"2.13.54 по медицинской профилактике\",aq0['c2_13_54__sum']],\n [\"2.13.55 по медицинской реабилитации\",aq0['c2_13_55__sum']],\n [\"2.13.56 по общей гигиене\",aq0['c2_13_56__sum']], \n [\"2.13.57 по паллиативной медицинской помощи\",aq0['c2_13_57__sum']],\n [\"2.13.58 по радиационной гигиене\",aq0['c2_13_58__sum']],\n [\"2.13.59 по рентгенэдоваскулярным диагностике и лечению\",aq0['c2_13_59__sum']],\n [\"2.13.60 по санитарно-гигиеническим лабораторным исследованиям\",aq0['c2_13_60__sum']],\n [\"2.13.61 по спортивной медицине\",aq0['c2_13_61__sum']],\n [\"2.13.62 приемного отделения\",aq0['c2_13_62__sum']],\n [\"2.13.63 профпатологи\",aq0['c2_13_63__sum']],\n [\"2.13.64 психиатры\",aq0['c2_13_64__sum']],\n [\"2.13.65 - из них: участковые\",aq0['c2_13_65__sum']],\n [\"2.13.66 - психиатры детские\",aq0['c2_13_66__sum']],\n [\"2.13.67 - из них психиатры детские участковые\",aq0['c2_13_67__sum']],\n [\"2.13.68 - психиат��ы подростковые\",aq0['c2_13_68__sum']],\n [\"2.13.69 - из них психиатры подростковые участковые\",aq0['c2_13_69__sum']],\n [\"2.13.70 - психиатры-наркологи\",aq0['c2_13_70__sum']],\n [\"2.13.71 - из них психиатры-наркологи участковые\",aq0['c2_13_71__sum']],\n [\"2.13.72 психотерапевты\",aq0['c2_13_72__sum']],\n [\"2.13.73 пульмонологи\",aq0['c2_13_73__sum']], \n [\"2.13.74 радиологи\",aq0['c2_13_74__sum']],\n [\"2.13.75 радиотерапевты\",aq0['c2_13_75__sum']],\n [\"2.13.76 ревматологи\",aq0['c2_13_76__sum']],\n [\"2.13.77 рентгенологи\",aq0['c2_13_77__sum']],\n [\"2.13.78 рефлексотерапевты\",aq0['c2_13_78__sum']],\n [\"2.13.79 сексологи\",aq0['c2_13_79__sum']],\n [\"2.13.80 стажеры\",aq0['c2_13_80__sum']],\n [\"2.13.81 статистики\",aq0['c2_13_81__sum']],\n [\"2.13.82 стоматологи\",aq0['c2_13_82__sum']],\n [\"2.13.83 стоматологи детские\",aq0['c2_13_83__sum']],\n [\"2.13.84 стоматологи-ортопеды\",aq0['c2_13_84__sum']],\n [\"2.13.85 стоматологи-терапевты\",aq0['c2_13_85__sum']],\n [\"2.13.86 стоматологи-хирурги\",aq0['c2_13_86__sum']],\n [\"2.13.87 судебно-психиатрические эксперты\",aq0['c2_13_87__sum']],\n [\"2.13.89 судовые врачи\",aq0['c2_13_89__sum']],\n [\"2.13.90 сурдологи-оториноларингологи\",aq0['c2_13_90__sum']],\n [\"2.13.91 сурдологи-протезисты\",aq0['c2_13_91__sum']],\n [\"2.13.92 терапевты - всего\",aq0['c2_13_92__sum']],\n [\"2.13.93 - из них: терапевты участковые\",aq0['c2_13_93__sum']],\n [\"2.13.94 - терапевты участковые цеховых врачебных участков\",aq0['c2_13_94__sum']],\n [\"2.13.95 - терапевты амбулаторий\",aq0['c2_13_95__sum']],\n [\"2.13.96 - терапевты подростковые\",aq0['c2_13_96__sum']],\n [\"2.13.97 токсикологи\",aq0['c2_13_97__sum']],\n [\"2.13.98 травматологи - ортопеды\",aq0['c2_13_98__sum']],\n [\"2.13.99 трансфузиологи\",aq0['c2_13_99__sum']], \n [\"2.13.100 ультразвуковой диагностики\",aq0['c2_13_100__sum']],\n [\"2.13.101 урологи\",aq0['c2_13_101__sum']],\n [\"2.13.102 урологи-андрологи детские\",aq0['c2_13_102__sum']],\n [\"2.13.103 фармакологи клинические\",aq0['c2_13_103__sum']],\n [\"2.13.104 физиотерапевты\",aq0['c2_13_104__sum']],\n [\"2.13.105 фтизиатры\",aq0['c2_13_105__sum']],\n [\"2.13.106 - из них: фтизиатры участковые\",aq0['c2_13_106__sum']],\n [\"2.13.107 функциональной диагностики\",aq0['c2_13_107__sum']],\n [\"2.13.108 хирурги\",aq0['c2_13_108__sum']], \n [\"2.13.109 хирурги детские\",aq0['c2_13_109__sum']],\n [\"2.13.110 хирурги пластические\",aq0['c2_13_110__sum']],\n [\"2.13.111 хирурги сердечно-сосудистые\",aq0['c2_13_111__sum']],\n [\"2.13.112 хирурги торакальные\",aq0['c2_13_112__sum']],\n [\"2.13.113 хирурги челюстно-лицевые\",aq0['c2_13_113__sum']],\n [\"2.13.114 эндокринологи\",aq0['c2_13_114__sum']],\n [\"2.13.115 эндокринологи детские\",aq0['c2_13_115__sum']],\n [\"2.13.116 эндоскописты\",aq0['c2_13_116__sum']],\n [\"2.13.117 эпидемиологи\",aq0['c2_13_117__sum']], \n [\"2.13.118 прочие\",aq0['c2_13_118__sum']],\n [\"2.13.119 Специалисты с высшим немедицинским образованием – всего:\",aq0['c2_13_119__sum']],\n [\"2.13.120 - из них специалисты: биологи\",aq0['c2_13_120__sum']],\n [\"2.13.121 - психологи медицинские\",aq0['c2_13_121__sum']],\n [\"2.13.122 - инструкторы-методисты по лечебной физкультуре\",aq0['c2_13_122__sum']],\n [\"2.13.123 - судебные эксперты\",aq0['c2_13_123__sum']],\n [\"2.13.124 - химики-эксперты\",aq0['c2_13_124__sum']],\n [\"2.14.Число среднего медицинского персонала дефицитных для субъекта Российской Федерации должностей на конец отчетного периода (выб��ать из справочника), всего\",aq1['c2_14__sum']],\n [\"2.14.1 акушерки\",aq1['c2_14_1__sum']], \n [\"2.14.2 гигиенисты стоматологические\",aq1['c2_14_2__sum']],\n [\"2.14.3 заведующие\",aq1['c2_14_3__sum']],\n [\"2.14.4 - из них: заведущие фельдшерско-акушерским пунктом\",aq1['c2_14_4__sum']],\n [\"2.14.5 зубные врачи\",aq1['c2_14_5__sum']],\n [\"2.14.6 зубные техники\",aq1['c2_14_6__sum']],\n [\"2.14.7 инструкторы-дезинфекторы\",aq1['c2_14_7__sum']],\n [\"2.14.8 инструкторы по гигиеническому воспитанию\",aq1['c2_14_8__sum']],\n [\"2.14.9 инструкторы по лечебной физкультуре\",aq1['c2_14_9__sum']],\n [\"2.14.10 инструкторы по трудовой терапии\",aq1['c2_14_10__sum']], \n [\"2.14.11 лаборанты\",aq1['c2_14_11__sum']],\n [\"2.14.12 медицинские лабораторные техники (фельдшеры-лаборанты)\",aq1['c2_14_12__sum']],\n [\"2.14.13 медицинские сестры\",aq1['c2_14_13__sum']],\n [\"2.14.14 - анестезисты\",aq1['c2_14_14__sum']],\n [\"2.14.15 - врачей общей практики (семейных врачей)\",aq1['c2_14_15__sum']],\n [\"2.14.16 - диетические\",aq1['c2_14_16__sum']],\n [\"2.14.17 - медико-социальной помощи\",aq1['c2_14_17__sum']],\n [\"2.14.18 - медицинская сестра (фельдшер) по приему вызовов скорой медицинской помощи и передаче их выездным бригадам скорой медицинской помощи\",aq1['c2_14_18__sum']],\n [\"2.14.19 - операционные\",aq1['c2_14_19__sum']], \n [\"2.14.20 - палатные (постовые)\",aq1['c2_14_20__sum']],\n [\"2.14.21 - патронажные\",aq1['c2_14_21__sum']],\n [\"2.14.22 - перевязочной\",aq1['c2_14_22__sum']],\n [\"2.14.23 - по косметологии\",aq1['c2_14_23__sum']],\n [\"2.14.24 - по массажу\",aq1['c2_14_24__sum']],\n [\"2.14.25 - приемного отделения\",aq1['c2_14_25__sum']],\n [\"2.14.26 - процедурной\",aq1['c2_14_26__sum']],\n [\"2.14.27 - по реабилитации\",aq1['c2_14_27__sum']],\n [\"2.14.28 - старшие\",aq1['c2_14_28__sum']], \n [\"2.14.29 - стерилизационной\",aq1['c2_14_29__sum']],\n [\"2.14.30 - участковые врачей-терапевтов участковых\",aq1['c2_14_30__sum']],\n [\"2.14.31 - участковые врачей-педиатров участковых\",aq1['c2_14_31__sum']],\n [\"2.14.32 - по физиотерапии\",aq1['c2_14_32__sum']],\n [\"2.14.33 - по функциональной диагностике\",aq1['c2_14_33__sum']],\n [\"2.14.34 медицинские дезинфекторы\",aq1['c2_14_34__sum']],\n [\"2.14.35 медицинские оптики-оптометристы\",aq1['c2_14_35__sum']],\n [\"2.14.36 медицинские регистраторы\",aq1['c2_14_36__sum']],\n [\"2.14.37 медицинские статистики\",aq1['c2_14_37__sum']], \n [\"2.14.38 медицинские технологи\",aq1['c2_14_38__sum']],\n [\"2.14.39 помощники врачей\",aq1['c2_14_39__sum']],\n [\"2.14.40 рентгенолаборанты\",aq1['c2_14_40__sum']],\n [\"2.14.41 фельдшеры\",aq1['c2_14_41__sum']], \n [\"2.14.42 - из них: фельдшеры скорой медицинской помощи\",aq1['c2_14_42__sum']],\n [\"2.14.43 - фельдшеры-наркологи\",aq1['c2_14_43__sum']],\n [\"2.14.44 - фельдшеры-водители скорой медицинской помощи\",aq1['c2_14_44__sum']],\n [\"2.14.45 прочий средний медицинский персонал\",aq1['c2_14_45__sum']],\n [\"2.15.Число участковых врачей терапевтов, работающих в медицинских организациях субъекта Российской Федерации\",aq1['c2_15__sum']],\n [\"2.16.Число участковых врачей педиатров, работающих в медицинских организациях субъекта Российской Федерации\",aq1['c2_16__sum']],\n [\"2.17.Число врачей общей практики (семейных врачей), работающих в медицинских организациях субъекта Российской Федерации\",aq1['c2_17__sum']],\n [\"2.18.Число участковых медицинских сестер участкового врача терапевта\",aq1['c2_18__sum']],\n [\"2.19.Число участковых медицинских сестер участкового врача педиатра\",aq1['c2_19__sum']], \n [\"2.20.Число участковых медицинских сестер врача общей практики (семейного врача)\",aq1['c2_20__sum']],\n [\"2.21.Число штатных должностей участковых врачей терапевтов\",aq1['c2_21__sum']],\n [\"2.22.Число штатных должностей участковых врачей педиатров\",aq1['c2_22__sum']],\n [\"2.23.Число штатных должностей врачей общей практики (семейного врача)\",aq1['c2_23__sum']], \n [\"2.24.Число штатных должностей участковых медицинских сестер участкового врача терапевта\",aq1['c2_24__sum']],\n [\"2.25.Число штатных должностей участковых медицинских сестер участкового врача педиатра\",aq1['c2_25__sum']],\n [\"2.26.Число штатных должностей участковых медицинских сестер врача общей практики (семейного врача)\",aq1['c2_26__sum']],\n [\"2.27.Число врачей, привлеченных на работу в медицинские организации субъекта Российской Федерации\",aq1['c2_27__sum']],\n [\"2.28.Число среднего медицинского персонала, привлеченного на работу в медицинские организации субъекта Российской Федерации\",aq1['c2_28__sum']],\n [\"3.1 Число врачей, получивших жилье из нуждающихся в улучшении жилищных условий, всего, из них\",aq1['c3_1__sum']],\n [\"3.2 - по социальному найму\",aq1['c3_2__sum']],\n [\"3.3 - служебное жилье\",aq1['c3_3__sum']],\n [\"3.4 - предоставление общежития\",aq1['c3_4__sum']], \n [\"3.5 Число врачей, приобретших жилье из нуждающихся в улучшении жилищных условий, всего, из них:\",aq1['c3_5__sum']],\n [\"3.6 - с использованием безвозмездной единовременной субсидии (выплаты) на компенсацию части стоимости приобретаемого (строящегося) жилья\",aq1['c3_6__sum']],\n [\"3.7 - с использованием безвозмездной единовременной субсидии (выплаты) на компенсацию части платежа по кредитному договору (договору займа)\",aq1['c3_7__sum']],\n [\"3.8 - на основе льготного кредитования\",aq1['c3_8__sum']], \n [\"3.9 Число врачей, нуждающихся в улучшении жилищных условий\",aq1['c3_9__sum']],\n [\"3.10 Число врачей, обеспеченных жильем, из числа врачей, привлеченных в субъект Российской Федерации\",aq1['c3_10__sum']],\n [\"3.11 Число врачей, нуждающихся в улучшении жилищных условий из числа врачей, привлеченных в субъект Российской Федерации\",aq1['c3_11__sum']],\n [\"3.12 Число среднего медицинского персонала, получившего жилье из нуждающихся в улучшении жилищных условий, всего, из них:\",aq1['c3_12__sum']],\n [\"3.13 - по социальному найму\",aq1['c3_13__sum']],\n [\"3.14 - служебное жилье\",aq1['c3_14__sum']],\n [\"3.15 - предоставление места в общежитии\",aq1['c3_15__sum']],\n [\"3.16 Число среднего медицинского персонала, приобретшего жилье из нуждающихся в улучшении жилищных условий, всего, из них:\",aq1['c3_16__sum']],\n [\"3.17 - с использованием безвозмездной единовременной субсидии (выплаты) на компенсацию части стоимости приобретаемого (строящегося) жилья\",aq1['c3_17__sum']], \n [\"3.18 - с исполь��ованием безвозмездной единовременной субсидии (выплаты) на компенсацию части платежа по кредитному договору (договору займа)\",aq1['c3_18__sum']],\n [\"3.19 - на основе льготного кредитования\",aq1['c3_19__sum']],\n [\"3.20 Число среднего медицинского персонала, нуждающихся в улучшении жилищных условий\",aq1['c3_20__sum']],\n [\"3.21 Число среднего медицинского персонала, обеспеченных жильем, из числа средних медицинских работников, привлеченных в субъект Российской Федерации\",aq1['c3_21__sum']], \n [\"3.22 Число среднего медицинского персонала, нуждающихся в улучшении жилищных условий из числа средних медицинских работников, привлеченных в субъект Российской Федерации\",aq1['c3_22__sum']],\n [\"3.23 Число врачей, получивших безвозмездно земельный участок для строительства (покупки) жилья, всего\",aq1['c3_23__sum']],\n [\"3.24 - в том числе, привлеченных в субъект Российской Федерации\",aq1['c3_24__sum']],\n [\"3.25 Число среднего медицинского персонала, получивших безвозмездно земельный участок для строительства (покупки) жилья, всего\",aq1['c3_25__sum']],\n [\"3.26 - в том числе, привлеченных в субъект Российской Федерации\",aq1['c3_26__sum']],\n [\"3.27 Число врачей, получивших компенсацию расходов за аренду жилого помещения\",aq1['c3_27__sum']],\n [\"3.28 Число среднего медицинского персонала, получивших компенсацию расходов за аренду жилого помещения\",aq1['c3_28__sum']],\n [\"3.29 Число врачей, получивших единовременные денежные выплаты при заключении трудового договора («подъмные»), с указанием программ – Земский доктор, иные\",aq1['c3_29__sum']],\n [\"3.30 Число среднего медицинского персонала, получивших единовременные денежные выплаты при заключении трудового договора («подъмные»), с указанием программ – Земский доктор, иные\",aq1['c3_30__sum']], \n [\"3.31 Число врачей, получивших ежемесячные денежные выплаты\",aq1['c3_31__sum']],\n [\"3.32 Число среднего медицинского персонала, получивших ежемесячные денежные выплаты\",aq1['c3_32__sum']],\n [\"3.33 Число студентов образовательных учреждений высшего профессионального образования, получивших доплаты к стипендиям\",aq1['c3_33__sum']],\n [\"3.34 Число студентов образовательных учреждений среднего профессионального образования, получивших доплаты к стипендиям\",aq1['c3_34__sum']], \n [\"3.35 Число врачей, получивших компенсацию расходов на оплату жилищно-коммунальных услуг\",aq1['c3_35__sum']],\n [\"3.36 Число среднего медицинского персонала, получивших компенсацию расходов на оплату жилищно-коммунальных услуг\",aq1['c3_36__sum']],\n [\"3.37 Число врачей, которые воспользовались внеочередным предоставлением места в дошкольной образовательной организации\",aq1['c3_37__sum']],\n [\"3.38 Число среднего медицинского персонала, которые воспользовались внеочередным предоставлением места в дошкольной образовательной организации\",aq1['c3_38__sum']],\n [\"4.1 Число врачей, прошедших обучение по программам дополнительного профессионального образования (повышение квалификации)\",aq1['c4_1__sum']],\n [\"4.2 Число врачей, прошедших обучение по программам дополнительного профессионального образования (профессиональная переподготовка)\",aq1['c4_2__sum']],\n [\"4.3 Число врачей, направленных на целевую подготовку в интернатуре\",aq1['c4_3__sum']], \n [\"4.4 Число врачей, направленных на целевую подготовку в ординатуре\",aq1['c4_4__sum']],\n [\"4.5 Число врачей, завершивших обучение в интернатуре, всего\",aq1['c4_5__sum']],\n [\"4.6 в том числе, завершивших обучение в целевой интернатуре\",aq1['c4_6__sum']],\n [\"4.7 Число врачей, завершивших обучение в ординатуре, всего\",aq1['c4_7__sum']], \n [\"4.8 в том числе, завершивших обучение в целевой ординатуре\",aq1['c4_8__sum']],\n [\"4.9 Число выпускников, трудоустроившихся в медицинских организациях государственной и муниципальной систем здравоохранения субъекта Российской Федерации после завершения обучения в целевой интернатуре\",aq1['c4_9__sum']],\n [\"4.10 Число выпускников, трудоустроившихся в медицинских организациях государственной и муниципальной систем здравоохранения субъекта Российской Федерации после завершения обучения в целевой ординатуре\",aq1['c4_10__sum']],\n [\"4.11 Число среднего медицинского персонала, направленного на целевую подготовку по программам среднего профессионального образования (базовая и углубленная подготовка)\",aq1['c4_11__sum']],\n [\"4.12 Число специалистов со средним медицинским и фармацевтическим образованием, прошедших обучение по программам дополнительного образования в образовательных учреждениях среднего профессионального и дополнительного профессионального образования\",aq1['c4_12__sum']],\n [\"4.13 Число бюджетных мест в образовательных учреждениях среднего профессионального образования, подведомственных субъекту Российской Федерации\",aq1['c4_13__sum']],\n [\"4.14 Число внебюджетных мест в образовательных учреждениях среднего профессионального образования, подведомственных субъекту Российской Федерации\",aq1['c4_14__sum']],\n [\"4.15 Число среднего медицинского персонала, трудоустроившихся в медицинских организациях государственной и муниципальной систем здравоохранения после завершения целевой подготовки по программам среднего профессионального образования (базовая и углубленная подготовка)\",aq1['c4_15__sum']],\n ]\n\n return s\n\ndef calc_valf3_form2(doc):\n ''' Возвращает значения в приложении 3.1 \n '''\n s = [[\"3.1 Число врачей, получивших жилье из нуждающихся в улучшении жилищных условий, всего, из них\", doc[0].p3_1],\n [\"3.2 - по социальному найму\", doc[0].p3_2],\n [\"3.3 - служебное жилье\", doc[0].p3_3],\n [\"3.4 - предоставление общежития\", doc[0].p3_4], \n [\"3.5 Число врачей, приобретших жилье из нуждающихся в улучшении жилищных условий, всего, из них:\", doc[0].p3_5],\n [\"3.6 - с использованием безвозмездной единовременной субсидии (выплаты) на компенсацию части стоимости приобретаемого (строящегося) жилья\", doc[0].p3_6],\n [\"3.7 - с использованием безвозмездной единовременной субсидии (выплаты) на компенсацию части платежа по кредитному договору (договору займа)\", doc[0].p3_7],\n [\"3.8 - на основе льготного кредитования\", doc[0].p3_8], \n [\"3.9 Число врачей, нуждающихся в улучшении жилищных условий\", doc[0].p3_9],\n [\"3.10 Число врачей, обеспеченных жильем, из числа врачей, привлеченных в субъект Российской Федерации\", doc[0].p3_10],\n [\"3.11 Число врачей, нуждающихся в улучшении жилищных условий из числа врачей, привлеченных в субъект Российской Федерации\",doc[0].p3_11],\n [\"3.12 Число среднего медицинского персонала, получившего жилье из нуждающихся в улучшении жилищных условий, всего, из них:\",doc[0].p3_12],\n [\"3.13 - по социальному найму\",doc[0].p3_13],\n [\"3.14 - служебное жилье\",doc[0].p3_14],\n [\"3.15 - предоставление места в общежитии\",doc[0].p3_15],\n [\"3.16 Число среднего медицинского персонала, приобретшего жилье из нуждающихся в улучшении жилищных условий, всего, из них:\",doc[0].p3_16],\n [\"3.17 - с использованием безвозмездной единовременной субсидии (выплаты) на компенсацию части стоимости приобретаемого (строящегося) жилья\",doc[0].p3_17], \n [\"3.18 - с использованием безвозмездной единовременной субсидии (выплаты) на компенсацию части платежа по кредитному договору (договору займа)\",doc[0].p3_18],\n [\"3.19 - на основе льготного кредитования\",doc[0].p3_19],\n [\"3.20 Число среднего медицинского персонала, нуждающихся в улучшении жилищных условий\",doc[0].p3_20],\n [\"3.21 Число среднего медицинского персонала, обеспеченных жильем, из числа средних медицинских работников, привлеченных в субъект Российской Федерации\",doc[0].p3_21], \n [\"3.22 Число среднего медицинского персонала, нуждающихся в улучшении жилищных условий из числа средних медицинских работников, привлеченных в субъект Российской Федерации\",doc[0].p3_22],\n [\"3.23 Число врачей, получивших безвозмездно земельный участок для строительства (покупки) жилья, всего\",doc[0].p3_23],\n [\"3.24 - в том числе, привлеченных в субъект Российской Федерации\",doc[0].p3_24],\n [\"3.25 Число среднего медицинского персонала, получивших безвозмездно земельный участок для строительства (покупки) жилья, всего\",doc[0].p3_25],\n [\"3.26 - в том числе, привлеченных в субъект Российской Федерации\",doc[0].p3_26],\n [\"3.27 Число врачей, получивших компенсацию расходов за аренду жилого помещения\",doc[0].p3_27],\n [\"3.28 Число среднего медицинского персонала, получивших компенсацию расходов за аренду жилого помещения\",doc[0].p3_28],\n [\"3.29 Число врачей, получивших единовременные денежные выплаты при заключении трудового договора («подъмные»), с указанием программ – Земский доктор, иные\",doc[0].p3_29],\n [\"3.30 Число среднего медицинского персонала, получивших единовременные денежные выплаты при заключении трудового договора («подъмные»), с указанием программ – Земский доктор, иные\",doc[0].p3_30], \n [\"3.31 Число врачей, ��олучивших ежемесячные денежные выплаты\",doc[0].p3_31],\n [\"3.32 Число среднего медицинского персонала, получивших ежемесячные денежные выплаты\",doc[0].p3_32],\n [\"3.33 Число студентов образовательных учреждений высшего профессионального образования, получивших доплаты к стипендиям\",doc[0].p3_33],\n [\"3.34 Число студентов образовательных учреждений среднего профессионального образования, получивших доплаты к стипендиям\",doc[0].p3_34], \n [\"3.35 Число врачей, получивших компенсацию расходов на оплату жилищно-коммунальных услуг\",doc[0].p3_35],\n [\"3.36 Число среднего медицинского персонала, получивших компенсацию расходов на оплату жилищно-коммунальных услуг\",doc[0].p3_36],\n [\"3.37 Число врачей, которые воспользовались внеочередным предоставлением места в дошкольной образовательной организации\",doc[0].p3_37],\n [\"3.38 Число среднего медицинского персонала, которые воспользовались внеочередным предоставлением места в дошкольной образовательной организации\",doc[0].p3_38]\n ]\n \n return s\n\n\n\n\n\ndef exp_to_excel_form2(doc, iperiod, iregion, mode, stat = None): # mode = 0 по региону или группе больниц mode = 1 - по конкретной больнице\n res = calc_sum_form2(doc)\n speriod = get_period_namef(iperiod)\n sregion = get_region_name(mode,doc,iregion)\n if mode == 1:\n name_file = get_name(\"/static/Form/Form2.xlsx\")\n else:\n name_file = get_name(\"/static/Form/Form2_All.xlsx\")\n\n wb = openpyxl.load_workbook(name_file)\n sheet = wb.active\n sheet['B2'] = speriod\n sheet['B1'] = sregion\n if mode==0:\n sheet['B310'] = \"Статистика по отчету\" \n sheet['B311'] = \"Организаций предоставляющих, Всего\"\n sheet['C311'] = stat.rec_all\n sheet['B312'] = \"Отобрано в отчет, Всего\"\n sheet['C312'] = stat.rec_fltr\n sheet['B313'] = \"Завершено\"\n sheet['C313'] = stat.rec_complete\n sheet['B314'] = \"Согласование\"\n sheet['C314'] = stat.rec_soglas\n sheet['B315'] = \"Корректировка\"\n sheet['C315'] = stat.rec_correct\n sheet['B316'] = \"Редактирование\"\n sheet['C316'] = stat.rec_edit\n\n\n startrow = 7 \n for i in range(0,296):\n srA = \"B\" + str(startrow + i)\n srB = \"C\" + str(startrow + i)\n sheet[srA] = res[i][0]\n sheet[srB] = res[i][1]\n# вывод только для конкретной МО для все не выводится \n if mode == 1:\n res = calc_valf3_form2(doc)\n startrow = 307 \n for i in range(0,38):\n srA = \"B\" + str(startrow + i)\n srB = \"C\" + str(startrow + i)\n sheet[srA] = res[i][0]\n sheet[srB] = res[i][1]\n \n sheet['A346'] = \"Выведено в системе Мед+ \" + str(datetime.now()) \n sheet['A346'].font = Font(size=5)\n else:\n sheet['A318'] = \"Выведено в системе Мед+ \" + str(datetime.now()) \n sheet['A318'].font = Font(size=5)\n \n # name_file = get_name(\"\\\\medicament\\\\Form\\\\rep\" + str(int(random()*100000000)) + \".xlsx\") \n name_file = get_name(\"/medicament/Form/rep\" + str(int(random()*100000000)) + \".xlsx\") \n wb.save(name_file)\n \n return name_file\n\ndef load_from_excel_form2(doc_id, filename):\n d = Doc2.objects.get(pk=doc_id)\n name_file = (\"C:\\\\MedPlus\\\\\" + filename)\n\n wb = openpyxl.load_workbook(name_file)\n sheet = wb.active\n startrow = 7 \n for i in range(0,296):\n c = \"C\" + str(startrow + i)\n if i==0:\n d.c2_1 = sheet[c].value\n elif i==1:\n d.c2_2 = sheet[c].value\n elif i==2:\n d.c2_3 = sheet[c].value\n elif i==3:\n d.c2_3_1 = sheet[c].value\n elif i==4:\n d.c2_3_2 = sheet[c].value\n elif i==5:\n d.c2_3_3 = sheet[c].value\n elif i==6:\n d.c2_4 = sheet[c].value\n elif i==7:\n d.c2_4_1 = sheet[c].value\n elif i==8:\n d.c2_4_2 = sheet[c].value\n elif i==9:\n d.c2_4_3 = sheet[c].value\n elif i==10:\n d.c2_5 = sheet[c].value\n elif i==11:\n d.c2_5_1 = sheet[c].value\n elif i==12:\n d.c2_5_2 = sheet[c].value\n elif i==13:\n d.c2_5_3 = sheet[c].value\n elif i==14:\n d.c2_5_4 = sheet[c].value\n elif i==15:\n d.c2_5_5 = sheet[c].value\n elif i==16:\n d.c2_5_6 = sheet[c].value\n elif i==17:\n d.c2_5_7 = sheet[c].value\n elif i==18:\n d.c2_5_8 = sheet[c].value\n elif i==19:\n d.c2_5_9 = sheet[c].value\n elif i==20:\n d.c2_5_10 = sheet[c].value\n elif i==21:\n d.c2_5_11 = sheet[c].value\n elif i==22:\n d.c2_5_12 = sheet[c].value\n elif i==23:\n d.c2_5_13 = sheet[c].value\n elif i==24:\n d.c2_5_14 = sheet[c].value\n elif i==25:\n d.c2_5_15 = sheet[c].value\n elif i==26:\n d.c2_5_16 = sheet[c].value\n elif i==27:\n d.c2_5_17 = sheet[c].value\n elif i==28:\n d.c2_5_18 = sheet[c].value\n elif i==29:\n d.c2_5_19 = sheet[c].value\n elif i==30:\n d.c2_5_20 = sheet[c].value\n elif i==31:\n d.c2_5_21 = sheet[c].value\n elif i==32:\n d.c2_5_22 = sheet[c].value\n elif i==33:\n d.c2_5_23 = sheet[c].value\n elif i==34:\n d.c2_5_24 = sheet[c].value\n elif i==35:\n d.c2_5_25 = sheet[c].value\n elif i==36:\n d.c2_5_26 = sheet[c].value\n elif i==37:\n d.c2_5_27 = sheet[c].value\n elif i==38:\n d.c2_5_28 = sheet[c].value\n elif i==39:\n d.c2_5_29 = sheet[c].value\n elif i==40:\n d.c2_5_30 = sheet[c].value\n elif i==41:\n d.c2_5_31 = sheet[c].value\n elif i==42:\n d.c2_5_32 = sheet[c].value\n elif i==43:\n d.c2_5_33 = sheet[c].value\n elif i==44:\n d.c2_5_34 = sheet[c].value\n elif i==45:\n d.c2_5_35 = sheet[c].value\n elif i==46:\n d.c2_5_36 = sheet[c].value\n elif i==47:\n d.c2_5_37 = sheet[c].value\n elif i==48:\n d.c2_5_38 = sheet[c].value\n elif i==49:\n d.c2_5_39 = sheet[c].value\n elif i==50:\n d.c2_5_40 = sheet[c].value\n elif i==51:\n d.c2_5_41 = sheet[c].value\n elif i==52:\n d.c2_6 = sheet[c].value\n elif i==53:\n d.c2_7 = sheet[c].value\n elif i==54:\n d.c2_8 = sheet[c].value\n elif i==55:\n d.c2_9 = sheet[c].value\n elif i==56:\n d.c2_10 = sheet[c].value\n elif i==57:\n d.c2_11 = sheet[c].value\n elif i==58:\n d.c2_12 = sheet[c].value\n elif i==59:\n d.c2_13 = sheet[c].value\n elif i==60:\n d.c2_13_1 = sheet[c].value\n elif i==61:\n d.c2_13_2 = sheet[c].value\n elif i==62:\n d.c2_13_3 = sheet[c].value\n elif i==63:\n d.c2_13_4 = sheet[c].value\n elif i==64:\n d.c2_13_5 = sheet[c].value\n elif i==65:\n d.c2_13_6 = sheet[c].value\n elif i==66:\n d.c2_13_7 = sheet[c].value\n elif i==67:\n d.c2_13_8 = sheet[c].value\n elif i==68:\n d.c2_13_9 = sheet[c].value\n elif i==69:\n d.c2_13_10 = sheet[c].value\n elif i==70:\n d.c2_13_11 = sheet[c].value\n elif i==71:\n d.c2_13_12 = sheet[c].value\n elif i==72:\n d.c2_13_13 = sheet[c].value\n elif i==73:\n d.c2_13_14 = sheet[c].value\n elif i==74:\n d.c2_13_15 = sheet[c].value\n elif i==75:\n d.c2_13_16 = sheet[c].value\n elif i==76:\n d.c2_13_17 = sheet[c].value\n elif i==77:\n d.c2_13_18 = sheet[c].value\n elif i==78:\n d.c2_13_19 = sheet[c].value\n elif i==79:\n d.c2_13_20 = sheet[c].value\n elif i==80:\n d.c2_13_21 = sheet[c].value\n elif i==81:\n d.c2_13_22 = sheet[c].value\n elif i==82:\n d.c2_13_23 = sheet[c].value \n elif i==83:\n d.c2_13_24 = sheet[c].value\n elif i==84:\n d.c2_13_25 = sheet[c].value\n elif i==85:\n d.c2_13_26 = sheet[c].value\n elif i==86:\n d.c2_13_27 = sheet[c].value\n elif i==87:\n d.c2_13_28 = sheet[c].value\n elif i==88:\n d.c2_13_29 = sheet[c].value\n elif i==89:\n d.c2_13_30 = sheet[c].value\n elif i==90:\n d.c2_13_31 = sheet[c].value\n elif i==91:\n d.c2_13_32 = sheet[c].value\n elif i==92:\n d.c2_13_33 = sheet[c].value\n elif i==93:\n d.c2_13_34 = sheet[c].value\n elif i==94:\n d.c2_13_35 = sheet[c].value\n elif i==95:\n d.c2_13_36 = sheet[c].value\n elif i==96:\n d.c2_13_37 = sheet[c].value\n elif i==97:\n d.c2_13_38 = sheet[c].value\n elif i==98:\n d.c2_13_39 = sheet[c].value\n elif i==99:\n d.c2_13_40 = sheet[c].value\n elif i==100:\n d.c2_13_41 = sheet[c].value\n elif i==101:\n d.c2_13_42 = sheet[c].value\n elif i==102:\n d.c2_13_43 = sheet[c].value\n elif i==103:\n d.c2_13_44 = sheet[c].value\n elif i==104:\n d.c2_13_45 = sheet[c].value\n elif i==105:\n d.c2_13_46 = sheet[c].value\n elif i==106:\n d.c2_13_47 = sheet[c].value \n elif i==107:\n d.c2_13_48 = sheet[c].value\n elif i==108:\n d.c2_13_49 = sheet[c].value\n elif i==109:\n d.c2_13_50 = sheet[c].value\n elif i==110:\n d.c2_13_51 = sheet[c].value\n elif i==111:\n d.c2_13_52 = sheet[c].value\n elif i==112:\n d.c2_13_53 = sheet[c].value\n elif i==113:\n d.c2_13_54 = sheet[c].value\n elif i==114:\n d.c2_13_55 = sheet[c].value\n elif i==115:\n d.c2_13_56 = sheet[c].value\n elif i==116:\n d.c2_13_57 = sheet[c].value\n elif i==117:\n d.c2_13_58 = sheet[c].value\n elif i==118:\n d.c2_13_59 = sheet[c].value\n elif i==119:\n d.c2_13_60 = sheet[c].value\n elif i==120:\n d.c2_13_61 = sheet[c].value\n elif i==121:\n d.c2_13_62 = sheet[c].value\n elif i==122:\n d.c2_13_63 = sheet[c].value\n elif i==123:\n d.c2_13_64 = sheet[c].value\n elif i==124:\n d.c2_13_65 = sheet[c].value\n elif i==125:\n d.c2_13_66 = sheet[c].value\n elif i==126:\n d.c2_13_67 = sheet[c].value\n elif i==127:\n d.c2_13_68 = sheet[c].value\n elif i==128:\n d.c2_13_69 = sheet[c].value\n elif i==129:\n d.c2_13_70 = sheet[c].value\n elif i==130:\n d.c2_13_71 = sheet[c].value \n elif i==131:\n d.c2_13_72 = sheet[c].value\n elif i==132:\n d.c2_13_73 = sheet[c].value\n elif i==133:\n d.c2_13_74 = sheet[c].value\n elif i==134:\n d.c2_13_75 = sheet[c].value\n elif i==135:\n d.c2_13_76 = sheet[c].value\n elif i==136:\n d.c2_13_77 = sheet[c].value\n elif i==137:\n d.c2_13_78 = sheet[c].value\n elif i==138:\n d.c2_13_79 = sheet[c].value\n elif i==139:\n d.c2_13_80 = sheet[c].value\n elif i==140:\n d.c2_13_81 = sheet[c].value\n elif i==141:\n d.c2_13_82 = sheet[c].value\n elif i==142:\n d.c2_13_83 = sheet[c].value\n elif i==143:\n d.c2_13_84 = sheet[c].value\n elif i==144:\n d.c2_13_85 = sheet[c].value\n elif i==145:\n d.c2_13_86 = sheet[c].value\n elif i==146:\n d.c2_13_87 = sheet[c].value\n elif i==147:\n d.c2_13_88 = sheet[c].value\n elif i==147:\n d.c2_13_89 = sheet[c].value\n elif i==148:\n d.c2_13_90 = sheet[c].value\n elif i==149:\n d.c2_13_91 = sheet[c].value\n elif i==150:\n d.c2_13_92 = sheet[c].value\n elif i==151:\n d.c2_13_93 = sheet[c].value\n elif i==152:\n d.c2_13_94 = sheet[c].value\n elif i==153:\n d.c2_13_95 = sheet[c].value \n elif i==154:\n d.c2_13_96 = sheet[c].value\n elif i==155:\n d.c2_13_97 = sheet[c].value\n elif i==156:\n d.c2_13_98 = sheet[c].value\n elif i==157:\n d.c2_13_99 = sheet[c].value\n elif i==158:\n d.c2_13_100 = sheet[c].value\n elif i==159:\n d.c2_13_101 = sheet[c].value\n elif i==160:\n d.c2_13_102= sheet[c].value\n elif i==161:\n d.c2_13_103 = sheet[c].value\n elif i==162:\n d.c2_13_104 = sheet[c].value\n elif i==163:\n d.c2_13_105 = sheet[c].value\n elif i==164:\n d.c2_13_106 = sheet[c].value\n elif i==165:\n d.c2_13_107 = sheet[c].value\n elif i==166:\n d.c2_13_108 = sheet[c].value\n elif i==167:\n d.c2_13_109 = sheet[c].value\n elif i==168:\n d.c2_13_110 = sheet[c].value\n elif i==169:\n d.c2_13_111 = sheet[c].value\n elif i==170:\n d.c2_13_112 = sheet[c].value\n elif i==171:\n d.c2_13_113 = sheet[c].value\n elif i==172:\n d.c2_13_114 = sheet[c].value\n elif i==173:\n d.c2_13_115 = sheet[c].value\n elif i==174:\n d.c2_13_116 = sheet[c].value\n elif i==175:\n d.c2_13_117 = sheet[c].value\n elif i==176:\n d.c2_13_118 = sheet[c].value\n elif i==177:\n d.c2_13_119 = sheet[c].value \n elif i==178:\n d.c2_13_120 = sheet[c].value\n elif i==179:\n d.c2_13_121 = sheet[c].value\n elif i==180:\n d.c2_13_122 = sheet[c].value\n elif i==181:\n d.c2_13_123 = sheet[c].value\n elif i==182:\n d.c2_13_124 = sheet[c].value\n elif i==183:\n d.c2_14 = sheet[c].value\n elif i==184:\n d.c2_14_1 = sheet[c].value\n elif i==185:\n d.c2_14_2 = sheet[c].value\n elif i==186:\n d.c2_14_3 = sheet[c].value\n elif i==187:\n d.c2_14_4 = sheet[c].value\n elif i==188:\n d.c2_14_5 = sheet[c].value\n elif i==189:\n d.c2_14_6 = sheet[c].value\n elif i==190:\n d.c2_14_7 = sheet[c].value\n elif i==191:\n d.c2_14_8 = sheet[c].value\n elif i==192:\n d.c2_14_9 = sheet[c].value\n elif i==193:\n d.c2_14_10 = sheet[c].value\n elif i==194:\n d.c2_14_11 = sheet[c].value\n elif i==195:\n d.c2_14_12 = sheet[c].value\n elif i==196:\n d.c2_14_13 = sheet[c].value\n elif i==197:\n d.c2_14_14 = sheet[c].value\n elif i==198:\n d.c2_14_15 = sheet[c].value\n elif i==199:\n d.c2_14_16 = sheet[c].value\n elif i==200:\n d.c2_14_17 = sheet[c].value\n elif i==201:\n d.c2_14_18 = sheet[c].value \n elif i==202:\n d.c2_14_19 = sheet[c].value\n elif i==203:\n d.c2_14_20 = sheet[c].value\n elif i==204:\n d.c2_14_21 = sheet[c].value\n elif i==205:\n d.c2_14_22 = sheet[c].value\n elif i==206:\n d.c2_14_23 = sheet[c].value\n elif i==207:\n d.c2_14_24 = sheet[c].value\n elif i==208:\n d.c2_14_25 = sheet[c].value\n elif i==209:\n d.c2_14_26 = sheet[c].value\n elif i==210:\n d.c2_14_27 = sheet[c].value\n elif i==211:\n d.c2_14_28 = sheet[c].value\n elif i==212:\n d.c2_14_29 = sheet[c].value\n elif i==213:\n d.c2_14_30 = sheet[c].value\n elif i==214:\n d.c2_14_31 = sheet[c].value\n elif i==215:\n d.c2_14_32 = sheet[c].value\n elif i==216:\n d.c2_14_33 = sheet[c].value\n elif i==217:\n d.c2_14_34 = sheet[c].value\n elif i==218:\n d.c2_14_35 = sheet[c].value\n elif i==219:\n d.c2_14_36 = sheet[c].value\n elif i==220:\n d.c2_14_37 = sheet[c].value\n elif i==221:\n d.c2_14_38 = sheet[c].value\n elif i==222:\n d.c2_14_39 = sheet[c].value\n elif i==223:\n d.c2_14_40 = sheet[c].value\n elif i==224:\n d.c2_14_41 = sheet[c].value\n elif i==225:\n d.c2_14_42 = sheet[c].value \n elif i==226:\n d.c2_14_43 = sheet[c].value\n elif i==227:\n d.c2_14_44 = sheet[c].value\n elif i==228:\n d.c2_14_45 = sheet[c].value\n elif i==229:\n d.c2_15 = sheet[c].value\n elif i==230:\n d.c2_16 = sheet[c].value\n elif i==231:\n d.c2_17 = sheet[c].value\n elif i==232:\n d.c2_18 = sheet[c].value\n elif i==233:\n d.c2_19 = sheet[c].value\n elif i==234:\n d.c2_20 = sheet[c].value\n elif i==235:\n d.c2_21 = sheet[c].value\n elif i==236:\n d.c2_22 = sheet[c].value\n elif i==237:\n d.c2_23 = sheet[c].value\n elif i==238:\n d.c2_24 = sheet[c].value\n elif i==239:\n d.c2_25 = sheet[c].value\n elif i==240:\n d.c2_26 = sheet[c].value\n elif i==241:\n d.c2_27 = sheet[c].value\n elif i==242:\n d.c2_28 = sheet[c].value\n elif i==243:\n d.c3_1 = sheet[c].value\n elif i==244:\n d.c3_2 = sheet[c].value\n elif i==245:\n d.c3_3 = sheet[c].value\n elif i==246:\n d.c3_4 = sheet[c].value\n elif i==247:\n d.c3_5 = sheet[c].value\n elif i==248:\n d.c3_6 = sheet[c].value\n elif i==249:\n d.c3_7 = sheet[c].value \n elif i==250:\n d.c3_8 = sheet[c].value\n elif i==251:\n d.c3_9 = sheet[c].value\n elif i==252:\n d.c3_10 = sheet[c].value\n elif i==253:\n d.c3_11 = sheet[c].value\n elif i==254:\n d.c3_12 = sheet[c].value\n elif i==255:\n d.c3_13 = sheet[c].value\n elif i==256:\n d.c3_14 = sheet[c].value\n elif i==257:\n d.c3_15 = sheet[c].value\n elif i==258:\n d.c3_16 = sheet[c].value\n elif i==259:\n d.c3_17 = sheet[c].value\n elif i==260:\n d.c3_18 = sheet[c].value\n elif i==261:\n d.c3_19 = sheet[c].value\n elif i==262:\n d.c3_20 = sheet[c].value\n elif i==263:\n d.c3_21 = sheet[c].value\n elif i==264:\n d.c3_22 = sheet[c].value\n elif i==265:\n d.c3_23 = sheet[c].value\n elif i==266:\n d.c3_24 = sheet[c].value\n elif i==267:\n d.c3_25 = sheet[c].value\n elif i==268:\n d.c3_26 = sheet[c].value\n elif i==269:\n d.c3_27 = sheet[c].value\n elif i==270:\n d.c3_28 = sheet[c].value\n elif i==271:\n d.c3_29 = sheet[c].value\n elif i==272:\n d.c3_30 = sheet[c].value\n elif i==273:\n d.c3_31 = sheet[c].value \n elif i==274:\n d.c3_32 = sheet[c].value\n elif i==275:\n d.c3_33 = sheet[c].value\n elif i==276:\n d.c3_34 = sheet[c].value\n elif i==277:\n d.c3_35 = sheet[c].value\n elif i==278:\n d.c3_36 = sheet[c].value\n elif i==279:\n d.c3_37 = sheet[c].value\n elif i==280:\n d.c3_38 = sheet[c].value\n elif i==281:\n d.c4_1 = sheet[c].value\n elif i==282:\n d.c4_2 = sheet[c].value\n elif i==283:\n d.c4_3 = sheet[c].value\n elif i==284:\n d.c4_4 = sheet[c].value\n elif i==285:\n d.c4_5 = sheet[c].value\n elif i==286:\n d.c4_6 = sheet[c].value\n elif i==287:\n d.c4_7 = sheet[c].value\n elif i==288:\n d.c4_8 = sheet[c].value\n elif i==289:\n d.c4_9 = sheet[c].value\n elif i==290:\n d.c4_10 = sheet[c].value\n elif i==291:\n d.c4_11 = sheet[c].value\n elif i==292:\n d.c4_12 = sheet[c].value\n elif i==293:\n d.c4_13 = sheet[c].value\n elif i==294:\n d.c4_14 = sheet[c].value\n elif i==295:\n d.c4_15 = sheet[c].value\n\n startrow = 307 \n for i in range(0,38):\n c = \"C\" + str(startrow + i)\n for i in range(0,38):\n c = \"C\" + str(startrow + i)\n if i==0:\n d.p3_1 = sheet[c].value\n if i==1:\n d.p3_2 = sheet[c].value\n if i==2:\n d.p3_3 = sheet[c].value\n if i==3:\n d.p3_4 = sheet[c].value\n if i==4:\n d.p3_5 = sheet[c].value\n if i==5:\n d.p3_6 = sheet[c].value\n if i==6:\n d.p3_7 = sheet[c].value\n if i==7:\n d.p3_8 = sheet[c].value\n if i==8:\n d.p3_9 = sheet[c].value\n if i==9:\n d.p3_10 = sheet[c].value\n if i==10:\n d.p3_11 = sheet[c].value\n if i==11:\n d.p3_12 = sheet[c].value\n if i==12:\n d.p3_13 = sheet[c].value\n if i==13:\n d.p3_14 = sheet[c].value\n if i==14:\n d.p3_15 = sheet[c].value\n if i==15:\n d.p3_16 = sheet[c].value\n if i==16:\n d.p3_17 = sheet[c].value\n if i==17:\n d.p3_18 = sheet[c].value\n if i==18:\n d.p3_19 = sheet[c].value\n if i==19:\n d.p3_20 = sheet[c].value\n if i==20:\n d.p3_21 = sheet[c].value\n if i==21:\n d.p3_22 = sheet[c].value\n if i==22:\n d.p3_23 = sheet[c].value\n if i==23:\n d.p3_24 = sheet[c].value\n if i==24:\n d.p3_25 = sheet[c].value\n if i==25:\n d.p3_26 = sheet[c].value\n if i==26:\n d.p3_27 = sheet[c].value\n if i==27:\n d.p3_28 = sheet[c].value\n if i==28:\n d.p3_29 = sheet[c].value\n if i==29:\n d.p3_30 = sheet[c].value\n if i==30:\n d.p3_31 = sheet[c].value\n if i==31:\n d.p3_32 = sheet[c].value\n if i==32:\n d.p3_33 = sheet[c].value\n if i==33:\n d.p3_34 = sheet[c].value\n if i==34:\n d.p3_35 = sheet[c].value\n if i==35:\n d.p3_36 = sheet[c].value\n if i==36:\n d.p3_37 = sheet[c].value\n if i==37:\n d.p3_38 = sheet[c].value \n d.save() \n return\n\n \n","sub_path":"medicament/Form/form2.py","file_name":"form2.py","file_ext":"py","file_size_in_byte":102712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"414593779","text":"\"\"\"Test Scene for manager using pysdl2.\"\"\"\n\nimport unittest\n\nfrom dnf_game.scene_manager import Manager\nfrom dnf_game.scene_manager.scenes import base_scenes\nfrom dnf_game.scene_manager.layers import base_layers\n\n\nclass LayerMsgLogTest(unittest.TestCase):\n \"\"\"...\"\"\"\n\n def setUp(self):\n \"\"\"...\"\"\"\n print(\"\\n\", \"#\" * 30, \"\\n%s\" % __file__)\n\n def test_scene(self):\n \"\"\"...\"\"\"\n Manager(scene=SceneLayerMsgLogTest, test=True).execute()\n\n\nclass SceneLayerMsgLogTest(base_scenes.SceneMultiLayer):\n \"\"\"...\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"...\"\"\"\n super().__init__(**kwargs)\n msg_log = base_layers.MsgLog(parent=self)\n self.insert_layer(msg_log)\n msg_log.add(\"One\")\n msg_log.add(\"Two\")\n\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n","sub_path":"tests/layer_msglog_test.py","file_name":"layer_msglog_test.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"425314497","text":"try:\n\tfrom PyQt5.QtGui import *\n\tfrom PyQt5.QtWidgets import *\n\tfrom PyQt5.QtCore import *\nexcept:\n\tfrom PyQt4.QtGui import *\n\tfrom PyQt4.QtCore import *\n\nfrom SQLConnection import *\n\nimport re\n\nfrom time import strftime \n\nimport smtplib\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nclass NewCustomerWidget(QWidget):\n\t\"\"\"Docstring for NewCustomerWidget\"\"\"\n\tdef __init__(self, parent):\n\t\tsuper().__init__()\n\t\tself.connection = None\n\n\t\tself.parent = parent\n\n\t\tself.mainLayout = QHBoxLayout()\n\t\t\n\t\tself.leftWidget = QWidget()\n\t\tself.leftWidget.setFixedWidth(350)\n\n\t\tself.leftLayout = self.newCustomerLayout()\n\t\tself.mainLayout.setAlignment(Qt.AlignHCenter)\n\n\t\tself.leftWidget.setLayout(self.leftLayout)\n\n\t\tself.mainLayout.addWidget(self.leftWidget)\n\n\t\tself.setLayout(self.mainLayout)\n\n\tdef addConnection(self, connection):\n\t\tself.connection = connection\n\t\treturn True\n\n\n\tdef newCustomerLayout(self):\n\t\tself.addcustomerTitleText = QLabel(\"Add New customer\")\n\t\tself.addcustomerTitleText.setAlignment(Qt.AlignCenter)\n\t\tself.addcustomerTitleText.setStyleSheet(\"font-size:20px;\")\n\n\t\t\n\t\tself.counties = ['Please select...','Aberdeenshire', 'Angus', 'Argyll and Bute', 'Ayrshire', 'Ayrshire and Arran',\n\t\t\t\t 'Banffshire', 'Bedfordshire', 'Berkshire','Berwickshire', 'Buckinghamshire',\n\t\t\t\t 'Caithness', 'Cambridgeshire', 'Ceredigion', 'Cheshire', 'City of Bristol', 'City of Edinburgh',\n\t\t\t\t 'City of Glasgow', 'Clwyd', 'Cornwall', 'Cumbria', 'Denbighshire', 'Derbyshire', 'Devon', 'Dorset',\n\t\t\t\t 'Dumbartonshire', 'Dumfries','Durham', 'Dyfed', 'East Lothian', 'East Sussex', 'East Yorkshire', 'Essex',\n\t\t\t\t 'Ettrick and Lauderdale', 'Fife', 'Gloucestershire','Greater London', 'Greater Manchester', 'Gwent', 'Gwynedd',\n\t\t\t\t 'Hampshire', 'Herefordshire', 'Hertfordshire', 'Highlands', 'Inverness','Isle of Skye', 'Isle of Wight', 'Kent',\n\t\t\t\t 'Lanarkshire', 'Lancashire', 'Leicestershire', 'Lincolnshire', 'Merseyside', 'Mid Glamorgan','Morayshire', 'Norfolk',\n\t\t\t\t 'North Yorkshire', 'Northamptonshire', 'Northumberland', 'Nottinghamshire', 'Orkney', 'Oxfordshire', 'Perth and Kinross',\n\t\t\t\t 'Powys', 'Renfrewshire', 'Roxburgh', 'Shetland', 'Shropshire', 'Somerset', 'South Glamorgan', 'South Yorkshire', 'Staffordshire',\n\t\t\t\t 'Stirling and Falkirk', 'Suffolk', 'Surrey', 'Sutherland', 'Tweeddale', 'Tyne and Wear', 'Warwickshire', 'West Glamorgan',\n\t\t\t\t 'West Lothian', 'West Midlands', 'West Sussex', 'West Yorkshire', 'Western Isles', 'Wiltshire', 'Worcestershire']\n\n\t\tcustomerTitleLabel = QLabel('Title:*')\n\t\tcustomerFirstNameLabel = QLabel('First Name:*')\n\t\tcustomerSurnameLabel = QLabel('Surname:*')\n\t\tcustomerCompanyLabel = QLabel('Company:')\n\t\tcustomerStreetLabel = QLabel('Street:*')\n\t\tcustomerTownLabel = QLabel('Town/City:*')\n\t\tcustomerCountyLabel = QLabel('County:*')\n\t\tcustomerPostCodeLabel = QLabel('Post Code:*')\n\t\tcustomerMobileLabel = QLabel('Mobile Number:*')\n\t\tcustomerLandlineLabel = QLabel('Landline Number:*')\n\t\tcustomerEmailLabel = QLabel('Email:*')\n\t\tsmallPrint = QLabel('* required fields.')\n\t\tsmallPrint.setStyleSheet(\"font-size:11pt\")\n\n\t\tself.customerTitle = QComboBox()\n\t\tself.titles = [\"Please select...\",\"Mr\",\"Mrs\",\"Ms\",\"Miss\"]\n\t\tself.customerTitle.addItems(self.titles)\n\n\t\tj = self.customerTitle.model().index(0,0)\n\t\tself.customerTitle.model().setData(j, 0, Qt.UserRole-1)\n\n\t\tself.customerFirstName = QLineEdit()\n\t\tself.customerSurname = QLineEdit()\n\t\tself.customerStreet = QLineEdit()\n\t\tself.customerCompany = QLineEdit()\n\t\tself.customerTown = QLineEdit()\n\n\t\tself.customerCounty = QComboBox()\n\t\tself.customerCounty.addItems(self.counties)\n\n\t\tj = self.customerCounty.model().index(0,0)\n\t\tself.customerCounty.model().setData(j, 0, Qt.UserRole-1)\n\n\t\tself.customerPostCode = QLineEdit()\n\t\tself.customerMobile = QLineEdit()\n\t\tself.customerLandline = QLineEdit()\n\t\tself.customerEmail = QLineEdit()\n\t\t\n\t\tgrid = QGridLayout()\n\t\tgrid.setSpacing(10)\n\t\t\n\t\t\n\t\tgrid.addWidget(customerTitleLabel, 0, 0)\n\t\tgrid.addWidget(self.customerTitle, 0, 1)\n\n\t\tgrid.addWidget(customerFirstNameLabel, 1, 0)\n\t\tgrid.addWidget(self.customerFirstName, 1, 1)\n\n\t\tgrid.addWidget(customerSurnameLabel, 2, 0)\n\t\tgrid.addWidget(self.customerSurname, 2, 1)\n\n\t\tgrid.addWidget(customerCompanyLabel, 3, 0)\n\t\tgrid.addWidget(self.customerCompany, 3, 1)\t\t\n\n\t\tgrid.addWidget(customerStreetLabel, 4, 0)\n\t\tgrid.addWidget(self.customerStreet, 4, 1)\n\n\t\tgrid.addWidget(customerTownLabel, 5, 0)\n\t\tgrid.addWidget(self.customerTown, 5, 1)\n\n\t\tgrid.addWidget(customerCountyLabel, 6, 0)\n\t\tgrid.addWidget(self.customerCounty, 6, 1)\n\n\t\tgrid.addWidget(customerPostCodeLabel, 7, 0)\n\t\tgrid.addWidget(self.customerPostCode, 7, 1)\n\n\t\tgrid.addWidget(customerMobileLabel, 8, 0)\n\t\tgrid.addWidget(self.customerMobile, 8, 1)\n\n\t\tgrid.addWidget(customerLandlineLabel, 9, 0)\n\t\tgrid.addWidget(self.customerLandline, 9, 1)\n\n\t\tgrid.addWidget(customerEmailLabel, 10, 0)\n\t\tgrid.addWidget(self.customerEmail, 10, 1)\n\n\t\tgrid.addWidget(smallPrint, 11, 0)\n\n\t\tself.gridWidget = QGroupBox(\"New Customer\")\n\t\tself.gridWidget.setLayout(grid)\n\t\tself.gridWidget.setFixedHeight(400)\n\n\t\tself.verticalLayout = QVBoxLayout()\n\t\tself.verticalLayout.addWidget(self.addcustomerTitleText)\n\t\tself.verticalLayout.addStretch(1)\n\t\tself.verticalLayout.addWidget(self.gridWidget)\n\n\t\tself.cancelButton = QPushButton(\"Cancel\")\n\t\tself.cancelButton.setShortcut('Esc')\n\t\tself.cancelButton.setAutoDefault(False)\n\t\tself.cancelButton.setDefault(False)\n\n\t\tself.confirmButton = QPushButton(\"Confirm\")\n\t\tself.confirmButton.setShortcut('Return')\n\t\tself.confirmButton.setAutoDefault(True)\n\t\tself.confirmButton.setDefault(True)\n\n\t\tself.buttonLayout = QHBoxLayout()\n\t\tself.buttonLayout.addWidget(self.cancelButton)\n\t\tself.buttonLayout.addWidget(self.confirmButton)\n\t\tself.buttonWidget = QWidget()\n\t\tself.buttonWidget.setLayout(self.buttonLayout)\n\t\t\n\t\tself.verticalLayout.addWidget(self.buttonWidget)\n\t\tself.verticalLayout.addStretch(1)\n\n\t\t#connections\n\t\tself.customerFirstName.textChanged.connect(self.validateFirstName)\n\t\tself.customerSurname.textChanged.connect(self.validateSurname)\n\t\tself.customerCompany.textChanged.connect(self.validateCompany)\n\t\tself.customerStreet.textChanged.connect(self.validateStreet)\n\t\tself.customerTown.textChanged.connect(self.validateTown)\n\t\tself.customerPostCode.textChanged.connect(self.validatePostCode)\n\t\tself.customerEmail.textChanged.connect(self.validateEmail)\n\t\tself.customerMobile.textChanged.connect(self.validateMobile)\n\t\tself.customerLandline.textChanged.connect(self.validateUKLandline)\n\n\t\tself.confirmButton.clicked.connect(self.switchToPreviewWidget)\n\n\t\treturn self.verticalLayout\n\n\tdef previewNewCustomer(self):\n\t\tself.previewCustomerTitleText = QLabel(\"Preview New Customer\")\n\t\tself.previewCustomerTitleText.setAlignment(Qt.AlignCenter)\n\t\tself.previewCustomerTitleText.setStyleSheet(\"font-size:20px;\")\n\n\t\tcustomerTitleLabel = QLabel('Title:')\n\t\tcustomerFirstNameLabel = QLabel('First Name:')\n\t\tcustomerSurnameLabel = QLabel('Surname:')\n\t\tcustomerCompanyLabel = QLabel('Company:')\n\t\tcustomerStreetLabel = QLabel('Street:')\n\t\tcustomerTownLabel = QLabel('Town/City:')\n\t\tcustomerCountyLabel = QLabel('County:')\n\t\tcustomerPostCodeLabel = QLabel('Post Code:')\n\t\tcustomerMobileLabel = QLabel('Mobile Number:')\n\t\tcustomerLandlineLabel = QLabel('Landline Number:')\n\t\tcustomerEmailLabel = QLabel('Email:')\n\n\t\tTitle = self.customerTitle.currentText()\n\t\tFirstname = self.customerFirstName.text()\n\t\tSurname = self.customerSurname.text()\n\t\tCompany = self.customerCompany.text()\n\t\tStreet = self.customerStreet.text()\n\t\tTown = self.customerTown.text()\n\t\tCounty = self.customerCounty.currentText()\n\t\tPostCode = self.customerPostCode.text()\n\t\tMobile = self.customerMobile.text()\n\t\tLandline = self.customerLandline.text()\n\t\tEmail = self.customerEmail.text()\n\n\n\t\tif Title == 'Please select...':\n\t\t\tself.previewCustomerTitle = QLabel(\"\")\n\t\telse:\n\t\t\tself.previewCustomerTitle = QLabel(\"{0}\".format(Title))\n\n\t\tself.previewCustomerFirstName = QLabel('{0}'.format(Firstname))\n\t\tself.previewCustomerSurname = QLabel('{0}'.format(Surname))\n\t\tself.previewCustomerCompany = QLabel('{0}'.format(Company))\n\t\tself.previewCustomerStreet = QLabel('{0}'.format(Street))\n\t\tself.previewCustomerTown = QLabel('{0}'.format(Town))\n\n\t\tif County == 'Please select...':\n\t\t\tself.previewCustomerCounty = QLabel(\"\")\n\t\telse:\n\t\t\tself.previewCustomerCounty = QLabel(\"{0}\".format(County))\n\n\t\tself.previewCustomerPostCode = QLabel('{0}'.format(PostCode))\n\t\tself.previewCustomerMobile = QLabel('{0}'.format(Mobile))\n\t\tself.previewCustomerLandline = QLabel('{0}'.format(Landline))\n\t\tself.previewCustomerEmail = QLabel('{0}'.format(Email))\n\n\t\tgrid = QGridLayout()\n\n\t\tgrid.addWidget(customerTitleLabel, 0, 0)\n\t\tgrid.addWidget(self.previewCustomerTitle, 0, 1)\n\n\t\tgrid.addWidget(customerFirstNameLabel, 1, 0)\n\t\tgrid.addWidget(self.previewCustomerFirstName, 1, 1)\n\n\t\tgrid.addWidget(customerSurnameLabel, 2, 0)\n\t\tgrid.addWidget(self.previewCustomerSurname, 2, 1)\n\n\t\tgrid.addWidget(customerCompanyLabel, 3, 0)\n\t\tgrid.addWidget(self.previewCustomerCompany, 3, 1)\t\t\n\n\t\tgrid.addWidget(customerStreetLabel, 4, 0)\n\t\tgrid.addWidget(self.previewCustomerStreet, 4, 1)\n\n\t\tgrid.addWidget(customerTownLabel, 5, 0)\n\t\tgrid.addWidget(self.previewCustomerTown, 5, 1)\n\n\t\tgrid.addWidget(customerCountyLabel, 6, 0)\n\t\tgrid.addWidget(self.previewCustomerCounty, 6, 1)\n\n\t\tgrid.addWidget(customerPostCodeLabel, 7, 0)\n\t\tgrid.addWidget(self.previewCustomerPostCode, 7, 1)\n\n\t\tgrid.addWidget(customerMobileLabel, 8, 0)\n\t\tgrid.addWidget(self.previewCustomerMobile, 8, 1)\n\n\t\tgrid.addWidget(customerLandlineLabel, 9, 0)\n\t\tgrid.addWidget(self.previewCustomerLandline, 9, 1)\n\n\t\tgrid.addWidget(customerEmailLabel, 10, 0)\n\t\tgrid.addWidget(self.previewCustomerEmail, 10, 1)\n\n\t\tself.gridWidget = QGroupBox(\"Preview\")\n\t\tself.gridWidget.setLayout(grid)\n\t\tself.gridWidget.setFixedHeight(400)\n\n\t\tself.previewVerticalLayout = QVBoxLayout()\n\t\tself.previewVerticalLayout.addWidget(self.previewCustomerTitleText)\n\t\tself.previewVerticalLayout.addStretch(1)\n\t\tself.previewVerticalLayout.addWidget(self.gridWidget)\n\n\t\tself.editButton = QPushButton(\"Edit\")\n\t\tself.editButton.setShortcut('Esc')\n\t\tself.editButton.setAutoDefault(False)\n\t\tself.editButton.setDefault(False)\n\n\t\tself.addButton = QPushButton(\"Add\")\n\t\tself.addButton.setShortcut('Return')\n\t\tself.addButton.setAutoDefault(True)\n\t\tself.addButton.setDefault(True)\n\n\t\tself.buttomLayout = QHBoxLayout()\n\t\tself.buttomLayout.addWidget(self.editButton)\n\t\tself.buttomLayout.addWidget(self.addButton)\n\t\tself.buttonWodget = QWidget()\n\t\tself.buttonWodget.setLayout(self.buttomLayout)\n\t\tself.buttonWodget.setFixedHeight(100)\n\t\t\n\t\tself.previewVerticalLayout.addWidget(self.buttonWodget)\n\t\tself.previewVerticalLayout.addStretch(1)\n\n\t\t#connections\n\t\tself.editButton.clicked.connect(self.editEntry)\n\t\tself.addButton.clicked.connect(self.validateAddcustomerForm)\n\n\t\treturn self.previewVerticalLayout\n\n\tdef editEntry(self):\n\t\tself.rightWidget.setEnabled(False)\n\t\tself.leftWidget.setEnabled(True)\n\n\tdef switchToPreviewWidget(self):\n\t\t#disable editing widget\n\t\tself.leftWidget.setEnabled(False)\n\n\t\t#reshresh preview widget\n\t\tif hasattr(self, 'rightWidget'):\n\t\t\tself.rightWidget.close()\n\t\t\tself.rightWidget = QWidget()\n\t\t\tself.rightWidget.setFixedWidth(350)\n\t\t\tself.mainLayout.addWidget(self.rightWidget)\n\n\t\t#set preview widget layout\n\t\tself.rightLayout = self.previewNewCustomer()\n\t\tself.rightWidget.setLayout(self.rightLayout)\n\n\tdef clearForm(self):\n\t\t\n\t\tself.customerTitle.setCurrentIndex(0)\n\t\tself.customerFirstName.clear()\n\t\tself.customerSurname.clear()\n\t\tself.customerCompany.clear()\n\t\tself.customerStreet.clear()\n\t\tself.customerTown.clear()\n\t\tself.customerCounty.setCurrentIndex(0)\n\t\tself.customerPostCode.clear()\n\t\tself.customerMobile.clear()\n\t\tself.customerLandline.clear()\n\t\tself.customerEmail.clear()\n\n\t\tself.customerFirstName.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerSurname.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerCompany.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerStreet.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerTown.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerPostCode.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerMobile.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerLandline.setStyleSheet(\"background-color:#FFF;\")\n\t\tself.customerEmail.setStyleSheet(\"background-color:#FFF;\")\n\n\t\tif hasattr(self, 'rightWidget'):\n\t\t\tself.rightWidget.close()\n\n\t\tself.rightWidget = QWidget()\n\t\tself.rightWidget.setFixedWidth(350)\n\t\tself.rightLayout = self.previewNewCustomer()\n\t\tself.rightWidget.setLayout(self.rightLayout)\n\n\t\tself.mainLayout.addWidget(self.rightWidget)\n\n\t\tself.rightWidget.setEnabled(False)\n\t\tself.leftWidget.setEnabled(True)\n\n\tdef validateTitle(self):\n\t\ttext = self.customerTitle.currentText()\n\n\t\tif text == \"Please select...\":\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\n\n\tdef validateFirstName(self):\n\t\t\t\ttext = self.customerFirstName.text()\n\t\t\t\tlength = len(text)\n\n\t\t\t\tif length > 2:\n\t\t\t\t\t\t\t\tself.customerFirstName.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\t\t\t\t\t\treturn True\n\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.customerFirstName.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\t\t\t\t\t\treturn False\n\n\tdef validateSurname(self):\n\t\t\ttext = self.customerSurname.text()\n\t\t\tlength = len(text)\n\n\t\t\tif length > 2:\n\t\t\t\t\t\t\tself.customerSurname.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\t\t\t\tself.customerSurname.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\t\t\t\t\treturn False\n\n\tdef validateCompany(self):\n\t\t\ttext = self.customerCompany.text()\n\t\t\tlength = len(text)\n\n\t\t\tif text == '-' or text =='':\n\t\t\t\t\t\t\treturn True\n\n\tdef validateStreet(self):\n\t\t\ttext = self.customerStreet.text()\n\t\t\tlength = len(text)\n\n\t\t\tif length > 5:\n\t\t\t\t\t\t\tself.customerStreet.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\t\t\t\tself.customerStreet.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\t\t\t\t\treturn False\n\n\n\tdef validateTown(self): \n\t\t\ttext = self.customerTown.text()\n\t\t\tlength = len(text)\n\n\t\t\tif length > 3:\n\t\t\t\t\t\t\tself.customerTown.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\t\t\t\tself.customerTown.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\t\t\t\t\treturn False\n\n\tdef validateCounty(self):\n\t\t\ttext = self.customerCounty.currentText()\n\n\t\t\tif text == \"Please select...\":\n\t\t\t\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\t\t\t\treturn True\n\n\n\tdef validatePostCode(self):\n\t\n\t\ttext = self.customerPostCode.text()\n\n\t\tpostCodeRegEx = re.compile(\"^[A-Z]{1,2}[0-9][0-9A-Z]?\\s?[0-9][A-Z]{2}$\")\n\t\t# Source: http://stackoverflow.com/questions/164979/uk-postcode-regex-comprehensive\n\n\t\tmatch = postCodeRegEx.match(text.upper())\n\n\t\tif match:\n\t\t\tself.customerPostCode.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\treturn True\n\t\telse:\n\t\t\tself.customerPostCode.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\treturn False\n\n\tdef validateMobile(self):\n\t\ttext = self.customerMobile.text()\n\t\tlength = len(text)\n\n\t\tno_letters = re.search('^[a-z],[A-z]*$', text)\n\t\tvalid_mobile = re.search('^(((\\+44\\s?\\d{4}|\\(?0\\d{4}\\)?)\\s?\\d{3}\\s?\\d{3})|((\\+44\\s?\\d{3}|\\(?0\\d{3}\\)?)\\s?\\d{3}\\s?\\d{4})|((\\+44\\s?\\d{2}|\\(?0\\d{2}\\)?)\\s?\\d{4}\\s?\\d{4}))(\\s?\\#(\\d{4}|\\d{3}))?$', text)\n\t\t# Source: http://regexlib.com/UserPatterns.aspx?authorid=d95177b0-6014-4e73-a959-73f1663ae814\n\t\tif not no_letters and valid_mobile:\n\t\t\tvalid = True\n\t\telse:\n\t\t\tvalid = False\n\n\t\tif length >= 11 and valid == True:\n\t\t\tself.customerMobile.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\treturn True\n\t\telse:\n\t\t\tself.customerMobile.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\treturn False\n\n\n\tdef validateUKLandline(self):\n\t\ttext = self.customerLandline.text()\n\t\tlength = len(text)\n\n\t\tno_letters = re.search('^[a-z],[A-z]*$', text)\n\t\tvalid_landline_number = re.search('^((\\(?0\\d{4}\\)?\\s?\\d{3}\\s?\\d{3})|(\\(?0\\d{3}\\)?\\s?\\d{3}\\s?\\d{4})|(\\(?0\\d{2}\\)?\\s?\\d{4}\\s?\\d{4}))(\\s?\\#(\\d{4}|\\d{3}))?$', text)\n\t\t# Source: http://regexlib.com/UserPatterns.aspx?authorid=d95177b0-6014-4e73-a959-73f1663ae814\n\t\tif not no_letters and valid_landline_number:\n\t\t\tvalid = True\n\t\telse:\n\t\t\tvalid = False\n\n\t\tif length >= 11 and valid == True:\n\t\t\tself.customerLandline.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\treturn True\n\t\telse:\n\t\t\tself.customerLandline.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\treturn False\n\n\tdef validateEmail(self):\n\t\ttext = self.customerEmail.text()\n\n\t\temailRegEx = re.compile(\"^([a-zA-Z0-9_\\-\\.]+)@((\\[[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.)|(([a-zA-Z0-9\\-]+\\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\\]?)$\")\n\t\t# Source: http://regexlib.com/REDetails.aspx?regexp_id=26\n\n\t\tmatch = emailRegEx.match(text)\n\n\t\tif match:\n\t\t\tself.customerEmail.setStyleSheet(\"background-color:#3CC046;\")\n\t\t\treturn True\n\t\telse:\n\t\t\tself.customerEmail.setStyleSheet(\"background-color:#FF6166;\")\n\t\t\treturn False\n\n\tdef validateAddcustomerForm(self):\n\n\t\tcheckTitle = self.validateTitle()\n\t\tcheckFirstName = self.validateFirstName()\n\t\tcheckSurname = self.validateSurname()\n\t\tcheckCompany = self.validateCompany()\n\t\tcheckStreet = self.validateStreet()\n\t\tcheckTown = self.validateTown()\n\t\tcheckCounty = self.validateCounty()\n\t\tcheckPostCode = self.validatePostCode()\n\t\tcheckMobile = self.validateMobile()\n\t\tcheckLandline = self.validateUKLandline()\n\t\tcheckEmail = self.validateEmail()\n\n\t\terror_count = 0\n\n\t\tif checkTitle == False:\n\t\t\terror_count += 1\n\t\tif checkFirstName == False:\n\t\t\terror_count += 1\n\t\tif checkSurname == False:\n\t\t\terror_count += 1\n\t\tif checkCompany == False:\n\t\t\terror_count += 1\n\t\tif checkStreet == False:\n\t\t\terror_count += 1\n\t\tif checkTown == False:\n\t\t\terror_count += 1\n\t\tif checkCounty == False:\n\t\t\terror_count += 1\n\t\tif checkPostCode == False:\n\t\t\terror_count += 1\n\t\tif checkMobile == False:\n\t\t\terror_count += 1\n\t\tif checkLandline == False:\n\t\t\terror_count += 1\n\t\tif checkEmail == False:\n\t\t\terror_count += 1\n\n\t\tif error_count > 0:\n\t\t\tself.error_message_dialog = QMessageBox()\n\t\t\tself.error_message_dialog.setFixedWidth(200)\n\t\t\tself.error_message_dialog.setWindowTitle(\"Input Error\")\n\t\t\tself.error_message_dialog.setText(\"Error! Some data entered is invalid \\n\"\n\t\t\t\t\t\t\t\t\t\t\t \"\\n\"\n\t\t\t\t\t\t\t\t\t\t\t \"Click the 'Show details' button for more information\")\n\t\t\tself.error_message_dialog.setDetailedText(\"The information entered is invalid \\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"Steps to take: \\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\"\\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\" 1. Make sure that valid post-codes and numbers are \\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\" entered into the required fields. \\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\" 2. The drop-down menus should NOT have \\n\"\n\t\t\t\t\t\t\t\t\t\t\t\t\t\" 'Please select...' as an option for data input. \\n\")\n\t\t\tself.error_message_dialog.setIcon(QMessageBox.Warning)\n\t\t\tself.okay_button = self.error_message_dialog.addButton(self.tr(\"Okay\"), QMessageBox.AcceptRole)\n\t\t\tself.error_message_dialog.setEscapeButton(self.okay_button)\n\t\t\tself.error_message_dialog.setDefaultButton(self.okay_button)\n\t\t\tself.okay_button.clicked.connect(self.editEntry)\n\t\t\tself.error_message_dialog.exec_()\n\t\telse: \n\t\t\tself.addCustomerToDatabase()\n\t\t\n\n\tdef addCustomerToDatabase(self):\n\n\t\tcounty = str(self.customerCounty.currentText())\n\t\ttitle = str(self.customerTitle.currentText())\n\n\t\tif self.customerCompany.text() == \"\":\n\t\t\tself.customerCompany == \"-\"\n\t\telse:\n\t\t\tself.customerCompany == self.customerCompany.text()\n\n\t\tself.record = \"{0}. {1} {2} {3}\".format(self.customerTitle.currentText(), self.customerFirstName.text(), self.customerSurname.text(), self.customerCompany.text())\n\n\t\tvalues = { \"Title\": title,\n\t\t\t\t \"FirstName\": self.customerFirstName.text(),\n\t\t\t\t \"LastName\": self.customerSurname.text(),\n\t\t\t\t \"Company\": self.customerCompany.text(),\n\t\t\t\t \"Street\": self.customerStreet.text(),\n\t\t\t\t \"Town\": self.customerTown.text(),\n\t\t\t\t \"County\": county,\n\t\t\t\t \"PostCode\": self.customerPostCode.text(),\n\t\t\t\t \"Mobile\": self.customerMobile.text(),\n\t\t\t\t \"Landline\": self.customerLandline.text(),\n\t\t\t\t \"Email\": self.customerEmail.text()}\n\n\t\tcustomerAdded = self.connection.addCustomer(values, self)\n\n\t\tif not customerAdded:\n\t\t\tself.error_message_dialog = QMessageBox()\n\t\t\tself.error_message_dialog.setFixedWidth(200)\n\t\t\tself.error_message_dialog.setWindowTitle(\"Input Error\")\n\t\t\tself.error_message_dialog.setText(\"Error! Failed to commit to database\\n\"\n\t\t\t\t\t\t\t\t\t\t\t \"\\n\"\n\t\t\t\t\t\t\t\t\t\t\t \"Click the 'Show details' button for more information\")\n\t\t\tself.error_message_dialog.setDetailedText(\"Database Error:\\n \\n \"\n\t\t\t\t\t\t\t\t\t\t\t \"{0}\".format(self.connection.error))\n\t\t\tself.error_message_dialog.setIcon(QMessageBox.Warning)\n\t\t\tself.okay_button = self.error_message_dialog.addButton(self.parent.tr(\"Okay\"), QMessageBox.AcceptRole)\n\t\t\tself.error_message_dialog.setEscapeButton(self.okay_button)\n\t\t\tself.error_message_dialog.setDefaultButton(self.okay_button)\n\t\t\tself.okay_button.clicked.connect(self.editEntry)\n\t\t\tself.error_message_dialog.exec_()\n\t\t\tself.parent.statusBar.showMessage(\"Customer {0} {1} unsuccessfully added to the database\".format(values[\"FirstName\"],values[\"LastName\"]))\n\t\t\tself.parent.switchToMainMenu()\n\t\telse:\n\t\t\tself.mssg = QMessageBox()\n\t\t\tself.mssg.setFixedWidth(200)\n\t\t\tself.mssg.setWindowTitle(\"Customer Added\")\n\t\t\tself.mssg.setText(\"Success! Record added for {0} {1}\".format(self.customerFirstName.text(), self.customerSurname.text()))\n\t\t\tself.mssg.setIcon(QMessageBox.Information)\n\t\t\tself.okay_button = self.mssg.addButton(self.parent.tr(\"Okay\"), QMessageBox.AcceptRole)\n\t\t\tself.mssg.setEscapeButton(self.okay_button)\n\t\t\tself.mssg.setDefaultButton(self.okay_button)\n\t\t\tself.mssg.exec_()\n\t\t\tself.parent.statusBar.showMessage(\"Customer {0} {1} successfully added to the database\".format(values[\"FirstName\"],values[\"LastName\"]))\n\t\t\tself.emaiLClient()\n\t\t\tself.clearForm()\n\t\t\tself.parent.switchToMainMenu()\n\n\n\tdef emaiLClient(self):\n\t\tpassword = self.connection.getEmailPassword()\n\t\t\n\t\tfromaddr = 'joelbutcher97@icloud.com'\n\t\ttoaddrs = 'inventory@cthree.org'\n\t\tmsg = \"\\r\\n\".join([\n\t\t\t \"From: joelbutcher97@icloud.com\",\n\t\t\t \"To: inventory@cthree.org\",\n\t\t\t \"Subject: {0} was Added to the Database\".format(self.record),\n\t\t\t \"A customer was added to the database. \\n \\nDetails:{0} \\n \\nDate: {1} \\nTime:{2}\".format(self.record, strftime(\"%Y-%m-%d\"), strftime(\"%H:%M:%S\")),\n\t\t\t \"\\n\"\n\t\t\t \"\\n\"\n\t\t\t \"This Email was sent as a precaution, if you or anyone you know of make this alteration, \\n please change the password for the system immediately.\"\n\t\t\t \"\\n\"\n\t\t\t \"Thank You\"\n\t\t\t ])\n\t\tusername = 'joelbutcher97@icloud.com'\n\t\tpassword = '{0}'.format(password)\n\t\tserver = smtplib.SMTP('smtp.mail.me.com:587')\n\t\tserver.ehlo()\n\t\tserver.starttls()\n\t\tserver.login(username,password)\n\t\tserver.sendmail(fromaddr, toaddrs, msg)\n\t\tserver.quit()\n","sub_path":"Implementation/GUI/NewCustomerWidget.py","file_name":"NewCustomerWidget.py","file_ext":"py","file_size_in_byte":22457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97692934","text":"from datetime import datetime\nfrom dateutil.parser import parse\nfrom datetime import timedelta\nimport plotly.graph_objects as go\nimport plotly as py\nimport numpy as np\n\n\n\nclass PatternMatcher:\n def __init__(self, sharkData):\n self.sharkData = sharkData\n self.sharkList = []\n self.sharkTime = []\n self.SYN = []\n self.SYN_ACK = []\n self.ACK = []\n self.PSH_ACK = []\n self.FIN_ACK = []\n self.Connect = []\n self.BtwPsh = []\n self.Fin = []\n self.trace1 = go.Scatter()\n self.trace2 = go.Scatter()\n self.trace3 = go.Scatter()\n\n\n def cleanLists(self):\n with open(self.sharkData, \"r\") as f:\n for line in f:\n l = line.split(\",\")\n self.sharkList.append(l)\n del self.sharkList[0]\n\n\n def tcpFlagCollect(self):\n #self.sharkList within the lists index 9 is the tcp flag hexes\n for e in self.sharkList:\n #self.sharkTime.append(e[8][e[8].find(\"2019 \") + 5: e[8].find(\"EST\") - 1])\n\n if e[9] == '0x00000002\\n': \n self.SYN.append((\"SYN\", e[2], e[3], e[8]))\n if e[9] == '0x00000012\\n': \n self.SYN_ACK.append((\"SYN_ACK\", e[2], e[3], e[8]))\n if e[9] == '0x00000010\\n': \n self.ACK.append((\"ACK\", e[2], e[3], e[8]))\n if e[9] == '0x00000018\\n': \n self.PSH_ACK.append((\"PSH_ACK\", e[2], e[3], e[8]))\n if e[9] == '0x00000011\\n': \n self.FIN_ACK.append((\"FIN_ACK\", e[2], e[3], e[8]))\n\n #test.append(e[9])\n def Handshake(self):\n #Makes sure the whole syn, syn ack, ack process is in the data before adding\n TCP_CONNECT = []\n for i in self.SYN:\n for j in self.SYN_ACK:\n if j[1] == i[2] and j[2] == i[1]:\n for k in self.ACK:\n if k[1] == i[1] and k[2] == i[2]:\n TCP_CONNECT.append([i, j, k])\n\n #print(TCP_CONNECT)\n for i in TCP_CONNECT:\n t1 = i[0][3][i[0][3].find(\"2020 \") + 5: i[0][3].find(\"EST\") - 1]\n t2 = i[2][3][i[2][3].find(\"2020 \") + 5: i[2][3].find(\"EST\") - 1]\n t1 = parse(t1)\n t2 = parse(t2) #this makes things 2020 but whatever\n tdelta = t2 - t1\n if t2 >= t1: #should be anyway\n self.Connect.append(tdelta.total_seconds())\n tsum = sum(self.Connect)\n length = len(self.Connect)\n avg = tsum / length\n print(avg)\n\n def BetweenShakeAndPsh(self):\n \n TCP_CONNECT = []\n for i in self.ACK:\n for j in self.PSH_ACK:\n if i[1] == j[1] and i[2] == j[2]:\n TCP_CONNECT.append([i,j])\n\n #print(TCP_CONNECT)\n for i in TCP_CONNECT:\n t1 = i[0][3][i[0][3].find(\"2020 \") + 5: i[0][3].find(\"EST\") - 1]\n t2 = i[1][3][i[1][3].find(\"2020 \") + 5: i[1][3].find(\"EST\") - 1]\n t1 = parse(t1)\n t2 = parse(t2) #this makes things 2020 but whatever\n tdelta = t2 - t1\n if t2 >= t1: #should be anyway\n self.BtwPsh.append(tdelta.total_seconds())\n tsum = sum(self.BtwPsh)\n length = len(self.BtwPsh)\n avg = tsum / length\n print(avg)\n\n def Close(self):\n\n TCP_CONNECT = []\n for i in self.FIN_ACK:\n for j in self.FIN_ACK:\n if i[1] == j[2] and i[2] == j[1]:\n TCP_CONNECT.append([i,j])\n\n #print(TCP_CONNECT)\n for i in TCP_CONNECT:\n t1 = i[0][3][i[0][3].find(\"2020 \") + 5: i[0][3].find(\"EST\") - 1]\n t2 = i[1][3][i[1][3].find(\"2020 \") + 5: i[1][3].find(\"EST\") - 1]\n t1 = parse(t1)\n t2 = parse(t2) #this makes things 2020 but whatever\n tdelta = t2 - t1\n if t2 >= t1: #should be anyway\n self.Fin.append(tdelta.total_seconds())\n tsum = sum(self.Fin)\n length = len(self.Fin)\n avg = tsum / length\n print(avg)\n \n def plotData(self):\n\n layout1 = go.Layout(\n title = 'Duration of Handshake',\n xaxis = dict(\n title = 'Time (seconds)'\n ),\n yaxis = dict(\n title = 'Frequency'\n ),\n )\n\n layout2 = go.Layout(\n title = 'Time Between Handshake and Push',\n xaxis = dict(\n title = 'Time (seconds)'\n ),\n yaxis = dict(\n title = 'Frequency'\n ),\n )\n\n layout3 = go.Layout(\n title = 'Duration to Close',\n xaxis = dict(\n title = 'Time (seconds)'\n ),\n yaxis = dict(\n title = 'Frequency'\n ),\n )\n fig1 = go.Figure(data=[go.Histogram(x=self.Connect)], layout = layout1)\n fig2 = go.Figure(data=[go.Histogram(x=self.BtwPsh)], layout = layout2)\n fig3 = go.Figure(data=[go.Histogram(x=self.Fin)], layout = layout3)\n \n\n py.offline.plot(fig1, filename='Handshake')\n py.offline.plot(fig2, filename='Between')\n py.offline.plot(fig3, filename='Close')\n\n def handshakeCDF(self):\n\n #sort data\n sortedConnect = np.sort(self.Connect)\n #calculate the proportional values of samples\n pConnect = 1. * np.arange(len(self.Connect)) / (len(self.Connect) - 1)\n \n self.trace1 = go.Scatter(\n x = sortedConnect,\n y = pConnect,\n mode = 'lines+markers',\n name = 'Handshake CDF',\n marker=dict(\n size = 8,\n ),\n line = dict(\n dash = 'dash'\n )\n ) \n\n layout = go.Layout(\n title = 'Handshake CDF',\n xaxis = dict(\n title = 'Values'\n ),\n yaxis = dict(\n title = 'Probability'\n ),\n )\n\n fig = go.Figure(data=[self.trace1], layout = layout)\n py.offline.plot(fig, filename='Handshake CDF')\n\n\n def betweenCDF(self):\n\n #sort data\n sortedBtw = np.sort(self.BtwPsh)\n #calculate the proportional values of samples\n pBtw = 1. * np.arange(len(self.BtwPsh)) / (len(self.BtwPsh) - 1)\n \n self.trace2 = go.Scatter(\n x = sortedBtw,\n y = pBtw,\n mode = 'lines+markers',\n name = 'Between Shake and Push CDF',\n marker=dict(\n size = 8,\n ),\n line = dict(\n dash = 'dash'\n )\n ) \n\n layout = go.Layout(\n title = 'Between Shake and Push CDF',\n xaxis = dict(\n title = 'Values'\n ),\n yaxis = dict(\n title = 'Probability'\n ),\n )\n\n fig = go.Figure(data=[self.trace2], layout = layout)\n py.offline.plot(fig, filename='Between Shake and Push CDF')\n\n\n def finCDF(self):\n\n #sort data\n sortedFin = np.sort(self.Fin)\n #calculate the proportional values of samples\n pFin = 1. * np.arange(len(self.Fin)) / (len(self.Fin) - 1)\n \n self.trace3 = go.Scatter(\n x = sortedFin,\n y = pFin,\n mode = 'lines+markers',\n name = 'Finish CDF',\n marker=dict(\n size = 8,\n ),\n line = dict(\n dash = 'dash'\n )\n ) \n\n layout = go.Layout(\n title = 'Finish CDF',\n xaxis = dict(\n title = 'Values'\n ),\n yaxis = dict(\n title = 'Probability'\n ),\n )\n fig = go.Figure(data=[self.trace3], layout = layout)\n py.offline.plot(fig, filename='Finish CDF')\n \n \n\nif __name__ == '__main__':\n print(\"Welcome to the raspi network plotter\")\n matcherArr = []\n sharkFile = input(\"Please enter tsharkData csv\\n\")\n pm = PatternMatcher(sharkFile)\n # sharkFile = input(\"Please enter tsharkData csv\\n\")\n # pm2 = PatternMatcher(sharkFile)\n # sharkFile = input(\"Please enter tsharkData csv\\n\")\n # pm3 = PatternMatcher(sharkFile)\n # sharkFile = input(\"Please enter tsharkData csv\\n\")\n # pm4 = PatternMatcher(sharkFile)\n\n matcherArr.append(pm)\n # matcherArr.append(pm2)\n # matcherArr.append(pm3)\n # matcherArr.append(pm4)\n \n for i in matcherArr:\n i.cleanLists()\n i.tcpFlagCollect()\n i.Handshake()\n i.BetweenShakeAndPsh()\n i.Close()\n i.plotData()\n# i.handshakeCDF()\n# i.betweenCDF()\n# i.finCDF()\n\n exit()\n\n layout = go.Layout(\n title = 'Handshake CDF',\n xaxis = dict(\n title = 'Values',\n range = [0, 3]\n ),\n yaxis = dict(\n title = 'Probability'\n ),\n )\n fig = go.Figure()\n for i in matcherArr:\n fig.add_trace(i.trace1)\n fig.update_layout(layout) \n #fig = go.Figure(data=[self.trace3], layout = layout)\n py.offline.plot(fig, filename='Joint Handshake CDF')\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"tcpPatterns/scripts/trafficMatcher.py","file_name":"trafficMatcher.py","file_ext":"py","file_size_in_byte":9220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198770861","text":"#실습1 윤년 판단 프로그램\r\n\r\nwhile True :\r\n year=int(input(\"연도를 입력하세요:\"))\r\n \r\n #윤년 조건\r\n condition= (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0)\r\n\r\n #0을 누르면 프로그램 종료\r\n if(year==0):\r\n print(\"프로그램 종료\")\r\n break\r\n\r\n elif (year>0) and (condition):\r\n print(year, \"은 윤년입니다.\")\r\n \r\n elif (year>0) and (not condition):\r\n print(year, \"은 윤년이 아닙니다.\")\r\n \r\n else:\r\n print(\"잘못된 년도 입력입니다.\")\r\n \r\n","sub_path":"python_4day.py","file_name":"python_4day.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572927560","text":"from tkinter import *\nimport cryptifier\nfrom collections import Counter\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n'''This program requires the matplotlib and numpy modules to be installed.'''\n\nclass encryptionGui:\n def __init__(self, master):\n # configuration\n master.configure(bg='peach puff')\n master.title('Cryptography Cave')\n master.option_add('*Font', 'Georgia 12') # font for all widgets\n master.option_add('*Background', 'honeydew3') # background of all\n master.option_add('*Label.Font', 'helvetica 14') # font for all\n master.geometry('800x500+980+180') # w,h,x,y (top left corner)\n #\n master.iconbitmap(r'padlock.ico')\n #\n # frame1 - header area\n #\n frame1 = Frame(master, bg='gray15')\n frame1.pack(fill='both', expand=True) # fills window\n #\n # frame2 - user input area\n #\n frame2 = Frame(master)\n #\n frame2.pack(fill='both', expand=True)\n #\n # frame 2a (subframe for frame 2)\n #\n frame2a = Frame(frame2, relief=GROOVE, borderwidth=1, bg='honeydew3')\n frame2a.pack(side=LEFT, fill=Y) # fill Y makes frames same height\n #\n label_2a_heading = Label(frame2a)\n label_2a_heading.config(borderwidth=0, text='Choose a Cipher:', bg='honeydew3')\n label_2a_heading.pack(padx=10, pady=10)\n #\n self.radio_2a_choice = StringVar(master) # variable to store radio button choice\n self.radio_2a_choice.set('Caesar') # sets to default value\n radio_2a_1 = Radiobutton(frame2a, text='Caesar', variable=self.radio_2a_choice, value='Caesar', bg='honeydew3')\n radio_2a_2 = Radiobutton(frame2a, text='Vernam', variable=self.radio_2a_choice, value='Vernam', bg='honeydew3')\n radio_2a_1.pack(anchor=W, padx=10, pady=10)\n radio_2a_2.pack(anchor=W, padx=10, pady=10)\n #\n # frame 2b \n #\n frame2b = Frame(frame2, relief=GROOVE, borderwidth=1, bg='honeydew3')\n frame2b.pack(side=LEFT, fill=Y)\n #\n label_2b_heading = Label(frame2b)\n label_2b_heading.config(borderwidth=0, text='Shift Number:', bg='honeydew3')\n label_2b_heading.pack(padx=10, pady=10)\n #\n self.length_spin = Spinbox(frame2b, width=6, from_=-26, to=26, bg='gray95')\n self.length_spin.pack(anchor=W, padx=10, pady=10)\n\n #\n # frame 2c \n #\n frame2c = Frame(frame2, relief=GROOVE, borderwidth=1, bg='honeydew3')\n frame2c.pack(side=LEFT, fill=Y)\n #\n label_2c_heading = Label(frame2c)\n label_2c_heading.config(borderwidth=0, text='Choose:', bg='honeydew3')\n label_2c_heading.pack(anchor=W, padx=10, pady=10, side=TOP)\n #\n self.radio_2c_choice = StringVar(master) # variable to store radio button choice\n self.radio_2c_choice.set('Encrypt') # sets to default value\n radio_2c_1 = Radiobutton(frame2c, text='Encrypt', variable=self.radio_2c_choice, value='Encrypt',\n bg='honeydew3')\n radio_2c_2 = Radiobutton(frame2c, text='Decrypt', variable=self.radio_2c_choice, value='Decrypt',\n bg='honeydew3')\n radio_2c_1.pack(anchor=W, padx=10, pady=10)\n radio_2c_2.pack(anchor=W, padx=10, pady=10)\n #\n btn_generate = Button(frame2)\n btn_generate.config(relief=RAISED, borderwidth=5, text='Cryptify!', command=self.criptify)\n btn_generate.pack(padx=10, pady=10, side=BOTTOM)\n #\n label_heading_input = Label(frame2)\n label_heading_input.config(text='Input:')\n label_heading_input.pack(side=LEFT, padx=10, pady=10, anchor=NW)\n #\n self.entry_input_var = StringVar(master)\n self.entry_input_var.set('')\n entry_input = Entry(frame2)\n entry_input.config(bd=1, relief=GROOVE, bg='gray93', textvariable=self.entry_input_var)\n entry_input.pack(side=LEFT, anchor=W, padx=10, pady=10)\n #\n icon = PhotoImage(file=\"cryptocave.gif\")\n lbl_icon = Label(frame1)\n lbl_icon.config(image=icon)\n lbl_icon.image = icon\n lbl_icon.pack(side=LEFT, padx=10, pady=10)\n lbl_icon.config(bg='gray15') # override default font\n lbl_icon.pack(side=LEFT, padx=10, pady=10)\n #\n btn_frequencyAnalysis = Button(frame1)\n btn_frequencyAnalysis.config(text='Frequency Analysis', font='helvetica 16 bold', activebackground='sea green',\n bg='gold2', command=self.freqAnalysis)\n btn_frequencyAnalysis.pack(side=RIGHT, padx=10, pady=10)\n #\n # frame3-output area\n #\n frame3 = Frame(master)\n frame3.pack(fill='both', expand=True)\n #\n label_3a = Label(frame3)\n label_3a.config(borderwidth=0, text='Here is your output: ', bg='honeydew3')\n label_3a.pack(side=LEFT)\n #\n self.output_var = StringVar(master)\n self.output_var.set('') # default to empty string\n entry_output = Entry(frame3)\n entry_output.config(borderwidth=1, relief=GROOVE, bg='light gray', textvariable=self.output_var)\n entry_output.pack(side=LEFT)\n #\n btn_quit = Button(frame3)\n btn_quit.config(relief=RAISED, borderwidth=5, text=' Quit ', command=master.destroy)\n btn_quit.pack(padx=10, pady=10, side=BOTTOM, anchor=SE)\n #\n label_symmetrical_heading = Button(frame3)\n label_symmetrical_heading.config(relief=RAISED, borderwidth=5, bg='honeydew3',\n text='The Two Sides of Encryption', command=self.typesOf_Encryption)\n label_symmetrical_heading.pack(padx=10, pady=10, side=BOTTOM, anchor=SE)\n\n def criptify(self):\n # get values from interface\n shiftNumber = self.length_spin.get()\n cipherChoice = self.radio_2a_choice.get()\n encrypt_or_decrypt = self.radio_2c_choice.get()\n inputEntry = self.entry_input_var.get()\n #\n cryptifier.cipherOutput(shiftNumber, cipherChoice, encrypt_or_decrypt, inputEntry)\n output = cryptifier.cipherOutput(shiftNumber, cipherChoice, encrypt_or_decrypt, inputEntry)\n self.output_var.set(output)\n\n def typesOf_Encryption(self):\n encryptionTypes = Toplevel()\n encryptionTypes.title('The Two Sides of Encryption')\n encryptionTypes.geometry('800x500+980+180')\n encryptionTypes.option_add('*Font', 'Georgia 12')\n encryptionTypes.option_add('*Background', 'gray15')\n encryptionTypes.option_add('*Label.Font', 'helvetica 14')\n encryptionTypes.config(bg='gray15')\n #\n # Frame1 - Header for Symmetrical + Definition + Examples\n #\n frame1 = Frame(encryptionTypes, bg='gray30')\n frame1.pack(fill=Y, expand=True, side=LEFT)\n #\n header_Symmetrical = Label(frame1)\n header_Symmetrical.config(borderwidth=0, text='Symmetrical', bg='gray30', fg='ghost white')\n header_Symmetrical.pack(anchor=N, side=TOP, padx=10, pady=10)\n #\n symmetricalDefinition = Label(frame1)\n symmetricalDefinition.config(borderwidth=0, wraplength=300, justify=LEFT,\n text='When we use a single key to encrypt/decrypt plaintext.', bg='gray30',\n fg='ghost white')\n symmetricalDefinition.pack(anchor=N, side=TOP, padx=10, pady=10)\n #\n symmetricalExamples = Label(frame1)\n symmetricalExamples.config(borderwidth=0, wraplength=160, justify=LEFT,\n text='Examples: -Caesar Cipher; -Vernam Cipher',\n bg='gray30', fg='ghost white')\n symmetricalExamples.pack(anchor=N, side=LEFT, padx=10, pady=10)\n #\n # Frame2 - Header for Asymmetrical + Definition + Examples\n #\n frame2 = Frame(encryptionTypes, bg='gray30')\n frame2.pack(fill=Y, expand=True, side=RIGHT)\n #\n header_Asymmetrical = Label(frame2)\n header_Asymmetrical.config(borderwidth=0, text='Asymmetrical', bg='gray30', fg='ghost white')\n header_Asymmetrical.pack(anchor=N, side=TOP, padx=10, pady=10)\n #\n asymmetricalDefinition = Label(frame2)\n asymmetricalDefinition.config(borderwidth=0, wraplength=300, justify=LEFT, fg='ghost white', bg='gray30',\n text=\"When keys come in pairs, a Public Key and a Private Key. Users can send secret messages by encrypting a message with the recipient's public key. Only the intended recipient can decrypt the message, since only that user should have access to the required key.\")\n asymmetricalDefinition.pack(anchor=N, side=TOP, padx=10, pady=10)\n #\n asymmetricalExamples = Label(frame2)\n asymmetricalExamples.config(borderwidth=0, wraplength=160, justify=LEFT,\n text='Examples: -RSA Encryption (This relies on the fact that it is very hard to factorise large composite numbers with few prime numbers)',\n bg='gray30', fg='ghost white')\n asymmetricalExamples.pack(anchor=N, side=LEFT, padx=10, pady=10)\n\n btn_close = Button(encryptionTypes, bg='honeydew3', relief=RAISED, borderwidth=5, text=' Close ',\n command=encryptionTypes.destroy)\n btn_close.pack(padx=10, pady=10, side=BOTTOM)\n\n def freqAnalysis(self):\n\n # fetches data from GUI\n shiftNumber = self.length_spin.get()\n cipherChoice = self.radio_2a_choice.get()\n encrypt_or_decrypt = self.radio_2c_choice.get()\n inputEntry = self.entry_input_var.get()\n\n # fetches output data from cryptifier\n cryptifier.cipherOutput(shiftNumber, cipherChoice, encrypt_or_decrypt, inputEntry)\n data = cryptifier.cipherOutput(shiftNumber, cipherChoice, encrypt_or_decrypt, inputEntry)\n\n # Creates the bar chart visually on the GUI\n characters = list(data)\n\n while data == '': # If the user enters nothing, this will run. it is a validation check\n try:\n plt.clf()\n labels, values = zip(*Counter(characters).items())\n indexes = np.arange(len(labels))\n width = 0.5\n\n plt.bar(indexes, values, width)\n plt.xticks(indexes + width * 0, labels)\n plt.title('Most frequent characters outputted')\n plt.ylabel('Number of uses')\n plt.xlabel('Characters')\n plt.figure('Frequency Analysis')\n plt.show()\n except ValueError:\n from tkinter import messagebox\n messagebox.showerror('Error', 'Please enter an input')\n break\n else: # create the graph if all else is good.\n plt.figure('Frequency Analysis')\n plt.clf()\n labels, values = zip(*Counter(characters).items())\n indexes = np.arange(len(labels))\n width = 0.5\n\n plt.bar(indexes, values, width)\n plt.xticks(indexes + width * 0, labels)\n plt.yticks()\n plt.title('Most frequent characters outputted')\n plt.ylabel('Number of uses')\n plt.xlabel('Characters')\n plt.show()\n\n\ndef main():\n root = Tk() # creates a tkinter root window\n app = encryptionGui(root) # puts GUI onto the root\n root.mainloop() # runs the event handler\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"encryptionT.py","file_name":"encryptionT.py","file_ext":"py","file_size_in_byte":11658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"225736996","text":"'''\n문제: 시리얼 번호\n다솜이는 기타를 많이 가지고 있다. 그리고 각각의 기타는 모두 다른 시리얼 번호를 가지고 있다. 다솜이는 기타를 빨리 찾아서 빨리 사람들에게 연주해주기 위해서 \n기타를 시리얼 번호 순서대로 정렬하고자 한다. 모든 시리얼 번호는 알파벳 대문자 (A-Z)와 숫자 (0-9)로 이루어져 있다.\n시리얼번호 A가 시리얼번호 B의 앞에 오는 경우는 다음과 같다.\n\n 1. A와 B의 길이가 다르면, 짧은 것이 먼저 온다.\n 2. 만약 서로 길이가 같다면, A의 모든 자리수의 합과 B의 모든 자리수의 합을 비교해서 작은 합을 가지는 것이 먼저온다. (숫자인 것만 더한다)\n 3. 만약 1,2번 둘 조건으로도 비교할 수 없으면, 사전순으로 비교한다. 숫자가 알파벳보다 사전순으로 작다.\n \n시리얼이 주어졌을 때, 정렬해서 출력하는 프로그램을 작성하시오.\n\n첫째 줄에 기타의 개수 N이 주어진다. N은 50보다 작거나 같다. 둘째 줄부터 N개의 줄에 시리얼 번호가 하나씩 주어진다. \n시리얼 번호의 길이는 최대 50이고, 알파벳 대문자 또는 숫자로만 이루어져 있다. 시리얼 번호는 중복되지 않는다.\n'''\n\nimport sys\n\ndef sum_num(sirial):\n result = 0\n \n for word in sirial:\n if word.isdigit():\n result += int(word)\n \n return result\n\nn = int(sys.stdin.readline().rstrip())\ndata = [sys.stdin.readline().rstrip() for _ in range(n)]\n\ndata.sort(key = lambda sirial : (len(sirial), sum_num(sirial), sirial))\n\nfor sirial in data:\n print(sirial)","sub_path":"sort/SortPrac13.py","file_name":"SortPrac13.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476520590","text":"from __future__ import division\n\nimport random\nimport warnings\nimport numpy as np\n\nfrom skimage.morphology import dilation,erosion\nfrom skimage.filters import gaussian\n\nclass Compose(object):\n \"\"\"Compose transforms\n\n Args:\n transforms (list): list of transformations to compose.\n input_size (tuple): input size of model in (z, y, x).\n keep_uncropped (bool): keep uncropped images and labels (default: False).\n keep_non_smooth (bool): return also the non-smoothed masks (default: False).\n \"\"\"\n def __init__(self, \n transforms, \n input_size = (8,196,196),\n keep_uncropped = False,\n keep_non_smoothed = False):\n self.transforms = transforms\n self.input_size = np.array(input_size)\n self.sample_size = self.input_size.copy()\n self.set_sample_params()\n self.keep_uncropped = keep_uncropped\n self.keep_non_smoothed = keep_non_smoothed\n\n def set_sample_params(self):\n for _, t in enumerate(self.transforms):\n self.sample_size = np.ceil(self.sample_size * t.sample_params['ratio']).astype(int)\n self.sample_size = self.sample_size + (2 * np.array(t.sample_params['add']))\n print('Sample size required for the augmentor:', self.sample_size)\n\n def smooth_edge(self, data):\n smoothed_label = data['label'].copy()\n\n for z in range(smoothed_label.shape[0]):\n temp = smoothed_label[z].copy()\n for idx in np.unique(temp):\n if idx != 0:\n binary = (temp==idx).astype(np.uint8)\n for _ in range(2):\n binary = dilation(binary)\n binary = gaussian(binary, sigma=2, preserve_range=True)\n binary = dilation(binary)\n binary = (binary > 0.8).astype(np.uint8)\n \n temp[np.where(temp==idx)]=0\n temp[np.where(binary==1)]=idx\n smoothed_label[z] = temp\n\n data['label'] = smoothed_label\n return data\n\n def crop(self, data):\n image, label = data['image'], data['label']\n\n if 'mask' in data and data['mask'] is not None:\n mask = data['mask']\n else:\n mask = None\n\n assert image.shape[-3:] == label.shape\n assert image.ndim == 3 or image.ndim == 4\n margin = (label.shape[1] - self.input_size[1]) // 2\n margin = int(margin)\n \n # whether need to crop z or not (missing section augmentation)\n if label.shape[0] > self.input_size[0]:\n z_low = np.random.choice(label.shape[0]-self.input_size[0]+1, 1)[0]\n else:\n z_low = 0\n z_high = z_low + self.input_size[0] \n z_low, z_high = int(z_low), int(z_high)\n\n if margin==0:\n if mask is None:\n return {'image': image, 'label': label}\n else:\n return {'image': image, 'label': label, 'mask': mask}\n else: \n low = margin\n high = margin + self.input_size[1]\n if image.ndim == 3:\n if self.keep_uncropped == True:\n if mask is None:\n return {'image': image[z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high],\n 'image_uncropped': image,\n 'label_uncropped': label}\n else:\n return {'image': image[z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high],\n 'mask': mask[z_low:z_high, low:high, low:high],\n 'image_uncropped': image,\n 'label_uncropped': label,\n 'mask_uncropped': mask}\n\n else:\n if mask is None:\n return {'image': image[z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high]}\n else:\n return {'image': image[z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high],\n 'mask': mask[z_low:z_high, low:high, low:high]}\n\n else:\n if self.keep_uncropped == True:\n if mask is not None:\n return {'image': image[:, z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high],\n 'mask': mask[z_low:z_high, low:high, low:high],\n 'image_uncropped': image,\n 'label_uncropped': label,\n 'mask_uncropped': mask}\n else:\n return {'image': image[:, z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high],\n 'image_uncropped': image,\n 'label_uncropped': label}\n else:\n if mask is not None:\n return {'image': image[:, z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high],\n 'mask': mask[z_low:z_high, low:high, low:high]}\n else:\n return {'image': image[:, z_low:z_high, low:high, low:high],\n 'label': label[z_low:z_high, low:high, low:high]}\n\n def __call__(self, data, random_state=None):\n data['image'] = data['image'].astype(np.float32)\n for t in reversed(self.transforms):\n if random.random() < t.p:\n data = t(data, random_state)\n\n # crop the data to input size\n if self.keep_uncropped:\n data['uncropped_image'] = data['image']\n data['uncropped_label'] = data['label']\n data['uncropped_mask'] = data['mask']\n data = self.crop(data)\n if self.keep_non_smoothed:\n data['non_smoothed'] = data['label']\n data = self.smooth_edge(data)\n return data","sub_path":"torch_connectomics/data/augmentation/composition.py","file_name":"composition.py","file_ext":"py","file_size_in_byte":6410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"89995233","text":"\n#打印商品列表\ndef goods_list():\n print(\"本店商品如下:\")\n print(\"======================\")\n for key, value in enumerate(goods, 1):\n print(key, value)\n print(\"======================\")\n\n#删除商品\ndef d():\n for key,value in enumerate(cart, 1):\n print(key, value)\n i = int(input(\"请输入您想删除商品的序列号\"))\n del cart[i-1]\n\ngoods = [\n {\"name\": \"电脑\", \"price\": 1999},\n {\"name\": \"鼠标\", \"price\": 10},\n {\"name\": \"游艇\", \"price\": 20},\n {\"name\": \"跑鞋\", \"price\": 998},\n]\ncart =[]\nmoney = int(input(\"请输入您卡内金额:\"))\nprint(\"您卡内余额为:\", money)\n\ngoods_list()\n\nflag = 1\n\nwhile flag == 1:\n print(\"请选择下列字母来进行下一步动作\")\n action = str(input(\"p:列出购物车内商品 w:结算 d:删除 n:继续购物 i:充值 m:查看余额 q:退出\"))\n if action == \"p\":\n print(cart)\n elif action == \"n\":\n goods_list()\n goods_num = int(input(\"请选择您要购买的物品:\"))\n cart.append(goods[goods_num - 1])\n elif action == \"w\":\n amount = 0\n for i in cart:\n amount += i[\"price\"]\n print(amount)\n if amount > money:\n print(\"您的余额不足,请选择充值或者删除部分商品\")\n\n elif action == \"d\":\n d()\n elif action == \"i\":\n i = int(input(\"请输入您想充值的金额:\"))\n money += i\n print(\"充值后您的余额为:\", money)\n elif action == \"m\":\n print(\"您的余额为:\", money)\n elif action == \"q\":\n flag = 0\n else:\n print(\"输入错误,请继续选择。\")\n\nprint(\"欢迎下次再来!\")","sub_path":"shop/shopping mall.py","file_name":"shopping mall.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491359421","text":"# -*- coding:utf-8 -*-\n# @Author : 江湖一笑\n# @Time : 2020/6/26 9:11\n# @Software : Web-Autotest-Python\n# @Python_verison : 3.7\n'''\n使用ddt+unittest读取mysql数据\n'''\nimport unittest, ddt\nimport pymysql\nfrom mysql_data import MysqlData\nfrom mysql_init import MysqlInit\ndef get_mysql_data():\n MD = MysqlData(\n host='localhost',\n database='db1',\n port=3306,\n user='root',\n password='123',\n charset='utf8')\n test_data = MD.get_mysql_data()\n return test_data\n\n@ddt.ddt\nclass TestMysqlDdt(unittest.TestCase):\n\n @ddt.data( * get_mysql_data()) # 调用get_mysql_data函数\n @ddt.unpack\n def test_mysql(self, a, b):\n print(a, b)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"第七章-测试框架之数据驱动应用/数据驱动_mysql08/test_mysql_ddt.py","file_name":"test_mysql_ddt.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"278558453","text":"#!/usr/bin/env python \nfrom socket import *\n\n#данные сервера\nhost = 'localhost'\nport = 1604\naddr = (host,port)\n\ntcp_socket = socket(AF_INET, SOCK_STREAM)\ntcp_socket.bind(addr)\ntcp_socket.listen(1)\n\nwhile True:\n print('wait connection...')\n conn, addr = tcp_socket.accept()\n print('client addr: ', addr)\n data = conn.recv(1024)\n if not data:\n conn.close()\n break\n else:\n print(data)\n conn.send(b'Hello from server!')\n continue\n\ntcp_socket.close()\n\n","sub_path":"tests/soket.py","file_name":"soket.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"468586163","text":"# coding=utf-8\n\n\nimport sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import uic\n\n\nclass Form(QtWidgets.QDialog):\n def __init__(self, parent=None):\n QtWidgets.QDialog.__init__(self, parent)\n self.ui = uic.loadUi(\"/VendingMachine.ui\", self)\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n w = Form()\n w.show()\n\n sys.exit(app.exec())","sub_path":"VendingMachine/VendingMachine.py","file_name":"VendingMachine.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"302101798","text":"# This script downloads daily historical weather data from wunderground,\n# for citibike analysis project.\n\n# Saves a modified file to sqlite3 database\n\nimport sqlite3\nimport pandas as pd\n\ndef get_wea_data_yearly(year,station):\n \"\"\"\n Get historical (daily) weather data from wunderground for specified year and station\n \"\"\"\n url = 'http://www.wunderground.com/history/airport/' + station + '/' + year + '/1/1/CustomHistory.html?dayend=31&monthend=12&yearend=' + year + '&req_city=NA&req_state=NA&req_statename=NA&format=1'\n dat = pd.read_csv(url,parse_dates=True)\n # Name of date column is tz, which varies so we can't hardwire name\n dat.iloc[:,0] = pd.to_datetime(dat.iloc[:,0])\n dat['date'] = dat.iloc[:,0]\n dat.set_index(dat.date, inplace=True)\n dat['yday'] = dat.date.dt.dayofyear\n dat['month'] = dat.date.dt.month\n dat['year'] = dat.date.dt.year\n dat.rename(columns=lambda x: x.replace(' ', '_'), inplace=True)\n dat['st_code'] = station\n vars_to_keep = ['date','st_code','Max_TemperatureF','Min_TemperatureF','Mean_TemperatureF','year','yday','month','PrecipitationIn','_CloudCover','_Max_Gust_SpeedMPH','_Events']\n dat = dat[vars_to_keep]\n dat.rename(columns={'Max_TemperatureF':'max_temp','Min_TemperatureF':'min_temp','Mean_TemperatureF':'mean_temp','PrecipitationIn':'precip_In','_CloudCover':\n 'cloud_cover','_Max_Gust_SpeedMPH':'max_gust_mph','_Events':'events'},inplace=True)\n\n return dat\n\n# Write to databse\ncon = sqlite3.connect(\"data/nyc_weather.db3\")\n\nyears = ['2013','2014','2015','2016','2017']\n# KGLA is code for LaGuardia airport\nfor year in years:\n dat = get_wea_data_yearly(year,'KLGA')\n dat.to_sql(\"temps\",con,if_exists='append',index=False)\n del dat\n","sub_path":"src/get_weather_data.py","file_name":"get_weather_data.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253648446","text":"import argparse\nimport multiprocessing as mp\nimport random\nimport sys\nimport time\n\nimport torch\nfrom test_utils import print_device_info\n\nimport hivemind\nfrom hivemind import find_open_port\nfrom hivemind.server import layers\nfrom hivemind.utils.threading import increase_file_limit\n\n\ndef client_process(can_start, benchmarking_failed, port, num_experts, batch_size, hid_dim, num_batches, backprop=True):\n torch.set_num_threads(1)\n can_start.wait()\n experts = [hivemind.RemoteExpert(f\"expert{i}\", endpoint=f\"{hivemind.LOCALHOST}:{port}\") for i in range(num_experts)]\n\n try:\n dummy_batch = torch.randn(batch_size, hid_dim)\n for batch_i in range(num_batches):\n expert = random.choice(experts)\n out = expert(dummy_batch)\n if backprop:\n out.sum().backward()\n except BaseException as e:\n benchmarking_failed.set()\n raise e\n\n\ndef benchmark_throughput(num_experts=16, num_handlers=None, num_clients=128, num_batches_per_client=16,\n expert_cls='ffn', hid_dim=1024, batch_size=2048, max_batch_size=None, backprop=True,\n device=None, port=None):\n assert not hasattr(torch.cuda, 'is_initialized') or not torch.cuda.is_initialized() \\\n or torch.device(device) == torch.device('cpu')\n assert expert_cls in layers.name_to_block\n port = port or find_open_port()\n max_batch_size = max_batch_size or batch_size * 4\n num_handlers = max(1, num_handlers or num_clients // 2)\n benchmarking_failed = mp.Event()\n can_start = mp.Event()\n timestamps = dict(started=time.perf_counter())\n\n try:\n # start clients and await server\n # Note: client processes must be launched BEFORE touching gpu, even torch.cuda.is_available can cause trouble\n clients = [\n mp.Process(\n target=client_process, name=f'client_process-{i}',\n args=(can_start, benchmarking_failed, port, num_experts, batch_size,\n hid_dim, num_batches_per_client, backprop))\n for i in range(num_clients)]\n\n for client in clients:\n client.daemon = True\n client.start()\n\n timestamps['launched_clients'] = timestamps['began_launching_server'] = time.perf_counter()\n\n # start server\n device = device or ('cuda' if torch.cuda.is_available() else 'cpu')\n experts = {}\n for i in range(num_experts):\n expert = torch.jit.script(layers.name_to_block[expert_cls](hid_dim))\n experts[f'expert{i}'] = hivemind.ExpertBackend(name=f'expert{i}',\n expert=expert, opt=torch.optim.Adam(expert.parameters()),\n args_schema=(hivemind.BatchTensorDescriptor(hid_dim),),\n outputs_schema=hivemind.BatchTensorDescriptor(hid_dim),\n max_batch_size=max_batch_size,\n )\n timestamps['created_experts'] = time.perf_counter()\n server = hivemind.Server(None, experts, listen_on=f\"{hivemind.LOCALHOST}:{port}\",\n num_connection_handlers=num_handlers, device=device)\n server.start()\n server.ready.wait()\n timestamps['server_ready'] = time.perf_counter()\n can_start.set()\n\n for client in clients:\n client.join()\n timestamps['clients_finished'] = time.perf_counter()\n except BaseException as e:\n benchmarking_failed.set()\n raise e\n finally:\n for client in clients:\n if client.is_alive():\n client.terminate()\n server.shutdown()\n timestamps['server_shutdown_finished'] = time.perf_counter()\n server.join()\n\n sys.stdout.flush()\n sys.stderr.flush()\n time_between = lambda key1, key2: \\\n abs(timestamps[key2] - timestamps[key1]) if (key1 in timestamps and key2 in timestamps) else float('nan')\n total_examples = batch_size * num_clients * num_batches_per_client\n\n print('\\n' * 3)\n print(\"Benchmark finished, status:\" + [\"Success\", \"Failure\"][benchmarking_failed.is_set()])\n print(f\"Server parameters: num_experts={num_experts}, num_handlers={num_handlers}, max_batch_size={max_batch_size},\"\n f\" expert_cls={expert_cls}, hid_dim={hid_dim}, device={device}\")\n print(f\"Client parameters: num_clients={num_clients}, num_batches_per_client={num_batches_per_client}, \"\n f\"batch_size={batch_size}, backprop={backprop}\")\n\n print(\"Results: \")\n print(f\"\\tServer startup took {time_between('began_launching_server', 'server_ready') :.3f} s. \"\n f\"({time_between('began_launching_server', 'created_experts') :.3f} s. experts + \"\n f\"{time_between('created_experts', 'server_ready') :.3f} s. networking)\")\n print(f\"\\tProcessed {total_examples} examples in {time_between('server_ready', 'clients_finished') :.3f}\")\n print(f\"\\tThroughput for {'forward + backward' if backprop else 'forward'} passes: \"\n f\"{total_examples / time_between('server_ready', 'clients_finished') :.3f} samples / s.\")\n print(f\"\\tBenchmarking took {time_between('started', 'server_shutdown_finished') :.3f} s.\")\n if benchmarking_failed.is_set():\n print(\"Note: benchmark code failed, timing/memory results only indicate time till failure!\")\n print_device_info(device)\n print(flush=True)\n\n assert not benchmarking_failed.is_set()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--preset', type=str, default='default', required=False)\n parser.add_argument('--num_batches_per_client', type=int, default=16, required=False)\n args = parser.parse_args()\n\n if args.preset in ('default', 'ffn_forward_backward'):\n benchmark_throughput()\n elif args.preset == 'ffn_forward':\n benchmark_throughput(backprop=False, num_batches_per_client=args.num_batches_per_client)\n elif args.preset == 'ffn_small_batch':\n benchmark_throughput(backprop=False, num_experts=4, batch_size=32, max_batch_size=8192,\n num_batches_per_client=args.num_batches_per_client)\n elif args.preset == 'ffn_small_batch_512clients':\n benchmark_throughput(backprop=True, num_experts=1, batch_size=1, max_batch_size=8192,\n num_clients=512, num_batches_per_client=args.num_batches_per_client)\n elif args.preset == 'ffn_small_batch_512clients_32handlers':\n benchmark_throughput(backprop=True, num_experts=1, batch_size=1, max_batch_size=8192, num_handlers=32,\n num_clients=512, num_batches_per_client=args.num_batches_per_client)\n elif args.preset == 'ffn_massive':\n increase_file_limit()\n benchmark_throughput(backprop=False, num_clients=512, batch_size=512,\n max_batch_size=8192, num_batches_per_client=args.num_batches_per_client)\n elif args.preset == 'minimalistic':\n benchmark_throughput(num_experts=1, num_clients=1, num_handlers=1,\n num_batches_per_client=args.num_batches_per_client)\n elif args.preset == 'nop':\n benchmark_throughput(expert_cls='nop', backprop=False, num_batches_per_client=args.num_batches_per_client)\n else:\n raise ValueError(f\"No such benchmark preset: {args.preset}\")\n","sub_path":"tests/benchmark_throughput.py","file_name":"benchmark_throughput.py","file_ext":"py","file_size_in_byte":7504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257786","text":"import shopping_basket as sb\n\nclass Test():\n\n \"\"\" Example test class based on example test parameters in git repo \"\"\"\n\n def __init__(self):\n # Initialise empty objects\n self.catalogue = sb.Catalogue()\n self.offers = sb.Offers()\n self.basket_1 = sb.Basket()\n self.basket_2 = sb.Basket()\n\n def fill_catalogue(self):\n \"\"\" Add products to catalogue, duplicate products should be rejected \"\"\"\n print('Populating catalogue...')\n try:\n self.catalogue.add_new_product('Baked Beans', 0.99)\n self.catalogue.add_new_product('Biscuits', 1.20)\n self.catalogue.add_new_product('Sardines', 1.89)\n self.catalogue.add_new_product('Shampoo (Small)', 2.00)\n self.catalogue.add_new_product('Shampoo (Medium)', 2.50)\n self.catalogue.add_new_product('Shampoo (Large)', 3.50)\n self.catalogue.add_new_product('Ice Cream', -1.00)\n self.catalogue.add_new_product('Biscuits', 1.20)\n except Exception as e:\n print(f'Exception adding product to catalogue: {e}')\n print(f'{len(self.catalogue.products)} items added to catalogue')\n\n def fill_offers(self):\n \"\"\" Add offers to offers_list \"\"\"\n print('Populating offers...')\n try:\n self.offers.add_new_offer('Offer1', 'Baked Beans', 3, 1)\n self.offers.add_new_offer('Offer2', 'Sardines', 1, 0.25)\n self.offers.add_new_offer('Offer2', 'Biscuits', 1, 0.25)\n except Exception as e:\n print(f'Exception adding offer to offer list: {e}')\n print(f'{len(self.offers.offers_list)} offers added to offers list')\n\n def fill_basket(self, basket: object, item_list: dict):\n \"\"\" Fill a specified basket with a list of items \"\"\"\n for item_name, quantity in item_list.items():\n try:\n basket.add_new_basket_item(item_name, quantity)\n except Exception as e:\n print(f'Exception adding item to basket: {e}')\n print(f'{len(basket.items)} items added to basket')\n\n def get_basket_discount(self, basket: object):\n \"\"\" Get the sub-total, disocunt and total amount for specified basket \"\"\"\n response = basket.calculate_basket_discount(self.catalogue, self.offers)\n return response\n\nif __name__ == '__main__':\n\n test = Test()\n\n # Populate catalogue\n test.fill_catalogue()\n\n # Populate offers\n test.fill_offers()\n\n # Test basket 1 discount\n print(f'Testing basket 1...')\n test.fill_basket(test.basket_1, {\n 'Baked Beans': 4,\n 'Biscuits': 1,\n 'Sardines': 0,\n })\n \n # Get and display basket 1 discount\n print('Requesting discount details for basket 1...')\n try:\n basket_1_discount = test.get_basket_discount(test.basket_1)\n print(basket_1_discount)\n except Exception as e:\n print(f'Exception getting basket discount details: {e}')\n\n # Test basket 2 discount\n print(f'Testing basket 2...')\n test.fill_basket(test.basket_2, {\n 'Baked Beans': 2,\n 'Biscuits': 1,\n 'Sardines': 2,\n })\n\n # Get and display basket 2 discount\n print('Requesting discount details for basket 2...')\n try:\n basket_2_discount = test.get_basket_discount(test.basket_2)\n print(basket_2_discount)\n except Exception as e:\n print(f'Exception getting basket discount details: {e}')\n\n\n","sub_path":"shopping_basket/shopping_example.py","file_name":"shopping_example.py","file_ext":"py","file_size_in_byte":3445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550563880","text":"# coding: utf8\n# Christian Mañibo\n# 21/03/2018\n\n# Inicializaciones\nnumero=int(input(\"Introduce un número: \"))\nvalor=1\ncontador=0\nsalir=\"N\"\n\nwhile (salir==\"N\") :\n # Hago cosas\n if(numero%2==0): \n contador=contador+1\n \n # Activo indicador de salida si toca\n if(numero==valor): # Condición de salida\n salir=\"S\"\n\n valor=valor+1\n\nif (contador>2):\n print (numero , \"no es un numero primo\")\n\nelse:\n print (numero , \"es un numero primo\")\n","sub_path":"python2k18/Ejercicios/ejercicio-bucle-primo.py","file_name":"ejercicio-bucle-primo.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"512372073","text":"from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom django.conf import settings\n\nfrom .utils import my_slugify\n\n\n\nclass BaseShopMixin(models.Model):\n date_create = models.DateTimeField(_('Дата создания'), default=timezone.now)\n\n def get_date_create(self):\n return self.date_create.strftime(settings.ADMIN_DATE_FORMAT)\n get_date_create.short_description = 'Дата создания'\n\n class Meta:\n abstract = True\n\nclass CurrencyBaseShopMixin(BaseShopMixin):\n CURRENCIES = (('USD', '$'),)\n currency = models.CharField(max_length=10, choices=CURRENCIES, default='USD')\n\n @property\n def c(self):\n return self.get_currency_display()\n\n\n class Meta:\n abstract = True\n\n\n\nclass ShopMixin(BaseShopMixin):\n\n stat = (('A', 'Active'),\n ('D', 'Deactive'))\n\n date_update = models.DateTimeField(_('Дата последнего изменения'), default=timezone.now)\n image = models.ImageField(upload_to='shop_media/', blank=True, null=True)\n status = models.CharField(choices=stat, max_length=20, default='A')\n description = models.TextField(blank=True, null=True)\n slug = models.SlugField(allow_unicode=True, max_length=65)\n name = models.CharField(max_length=124, unique=True)\n\n\n\n def save(self, *args, **kwargs):\n self.slug = my_slugify(self.name)\n super().save()\n\n def __str__(self):\n return self.name\n\n class Meta:\n abstract = True\n\n\n\n\n","sub_path":"django_shop/shop/apps/core/model_mixins.py","file_name":"model_mixins.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"445713921","text":"from plugin_system import Plugin\n\nplugin = Plugin(\"Пример плагина\",\n usage=[\"тест - пример плагина с аргументами\"])\n\n\n# Чтобы функция выполнялась переодически и без остановки смотри пример в friends.py\n\n# Функция выполняется при загрузке плагинов.\n# Обязательно принимает один аргумент: объект VkPlus\n@plugin.on_init()\nasync def init(vk):\n # Есть plugin.temp_data, хранит в себе\n # что-нибудь нужное вам\n if \"some_data\" not in plugin.temp_data:\n plugin.temp_data[\"some_data\"] = {}\n\n\n# Если необходимо выполнить какую-то длительную задачу(которую нелья переписать как корутину),\n# не блокируя исполнения команд другими пользователями - ипользуйте plugin.process_pool:\n# result = await loop.run_in_executor(plugin.process_pool, long_function, argument1, argument2)\n\n\n# Функция будет выполняться, когда пользователь введёт команду(ы),\n# указанные в аргументах on_command\n# Обязательно принимает 2 аргумента - команда и аргументы\n# Желательно первой командой указывать основную (она будет в списке команд)\n@plugin.on_command('тест', 'test')\nasync def command(msg, args):\n # msg.text - для полного текста сообщения без команды\n # msg.body - для полного текста сообщения\n # msg.user_id - id пользоватлея, который обратился к боту\n\n if msg.brief_attaches: # короткий список вложений\n attachments = await msg.full_attaches # полные вложения\n attaches = \"\\n\".join(str(attach) for attach in attachments)\n else:\n attaches = \"\"\n\n await msg.answer(f'Пример плагина.\\n' +\n f'Аргументы - {args}\\n' +\n (f'Вложения:\\n{attaches}' if attaches else \"\"))\n","sub_path":"plugins/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"330229820","text":"import hashlib\r\nimport os\r\nimport random\r\nimport socket\r\nimport unittest\r\nfrom multiprocessing import Barrier, Process\r\n\r\nfrom .tp2_utils.blocking_socket_transferer import BlockingSocketTransferer\r\n\r\n\r\ndef message_sender(barrier, port):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.bind(('', port))\r\n sock.listen(1)\r\n barrier.wait()\r\n c, addr = sock.accept()\r\n transferer = BlockingSocketTransferer(c)\r\n transferer.send_plain_text(\"Hola uacho\")\r\n c.close()\r\n\r\n\r\ndef file_sender(barrier, port, input_file):\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.bind(('', port))\r\n sock.listen(1)\r\n barrier.wait()\r\n c, addr = sock.accept()\r\n transferer = BlockingSocketTransferer(c)\r\n transferer.send_file(input_file)\r\n transferer.close()\r\n\r\n\r\nclass TestBlockingSocketTransferer(unittest.TestCase):\r\n TEST_PORT = random.randint(8000, 9000)\r\n\r\n def setUp(self) -> None:\r\n try:\r\n from pytest_cov.embed import cleanup_on_sigterm\r\n except ImportError:\r\n pass\r\n else:\r\n cleanup_on_sigterm()\r\n self.barrier = Barrier(2)\r\n self.p = None\r\n\r\n def tearDown(self) -> None:\r\n self.p.join()\r\n TestBlockingSocketTransferer.TEST_PORT += 1\r\n\r\n def test_send_text(self):\r\n self.p = Process(target=message_sender, args=(self.barrier, TestBlockingSocketTransferer.TEST_PORT))\r\n self.p.start()\r\n self.barrier.wait()\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect(('localhost', TestBlockingSocketTransferer.TEST_PORT))\r\n socket_transferer = BlockingSocketTransferer(sock)\r\n self.assertEqual(socket_transferer.receive_plain_text(), \"Hola uacho\")\r\n socket_transferer.close()\r\n\r\n def test_send_file(self):\r\n with open('/tmp/big_dummy_file_test', 'wb') as dummy_file:\r\n for i in range(100000):\r\n dummy_file.write((\"%d%d%d\" % (i, i, i)).encode('utf-8'))\r\n sha256 = hashlib.sha256()\r\n with open('/tmp/big_dummy_file_test', 'rb') as dummy_file:\r\n while True:\r\n data = dummy_file.read(2048)\r\n if not data:\r\n break\r\n sha256.update(data)\r\n original_hash = sha256.hexdigest()\r\n\r\n self.p = Process(target=file_sender, args=(self.barrier, TestBlockingSocketTransferer.TEST_PORT,\r\n '/tmp/big_dummy_file_test'))\r\n self.p.start()\r\n self.barrier.wait()\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n sock.connect(('localhost', TestBlockingSocketTransferer.TEST_PORT))\r\n socket_transferer = BlockingSocketTransferer(sock)\r\n with open('/tmp/big_dummy_file_test_out', 'wb') as write_file:\r\n socket_transferer.receive_file_data(write_file)\r\n sha256 = hashlib.sha256()\r\n with open('/tmp/big_dummy_file_test_out', 'rb') as dummy_file:\r\n while True:\r\n data = dummy_file.read(2048)\r\n if not data:\r\n break\r\n sha256.update(data)\r\n self.assertEqual(sha256.hexdigest(), original_hash)\r\n os.remove('/tmp/big_dummy_file_test')\r\n os.remove('/tmp/big_dummy_file_test_out')\r\n socket_transferer.close()\r\n","sub_path":"tp2_utils_package/test_blocking_socket_transferer.py","file_name":"test_blocking_socket_transferer.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"492890780","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.http import HttpRequest\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.utils import timezone\nfrom core.views import today\nfrom .base import get_response_jsons_items_attributes, create_wordtext_and_worddetails, create_link\n\n\nclass TodayViewTest(TestCase):\n\n def setUp(self):\n self.today_date = timezone.datetime.today()\n self.yesterday_date = timezone.datetime.today() - timezone.timedelta(days=1)\n\n self.word1 = create_wordtext_and_worddetails(\n text='vehicle', source_name='WP', occured_num=3, datetime=self.today_date)\n self.word2 = create_wordtext_and_worddetails(\n text='gun', source_name='WP', occured_num=2, datetime=self.today_date)\n self.word3 = create_wordtext_and_worddetails(\n text='bike', source_name='WP', occured_num=1, datetime=self.yesterday_date)\n\n self.link1 = create_link(datetime=self.today_date, title='Police find gun in vehicle.',\n url_address='www.wp.pl/articles/011', source='WP')\n\n self.link2 = create_link(datetime=self.today_date, title=\"Is this the world's greatest sport-utility vehicle?\",\n url_address='www.wp.pl/articles/012', source='WP')\n\n self.link3 = create_link(datetime=self.yesterday_date, title=\"10 Reasons to Get on a Bike.\",\n url_address='www.wp.pl/articles/015', source='WP')\n\n self.link1.words.add(self.word1)\n self.link1.save()\n self.link1.words.add(self.word2)\n self.link1.save()\n self.link2.words.add(self.word1)\n self.link2.save()\n self.link3.words.add(self.word3)\n self.link3.save()\n\n def test_today_view_renders_home_and_index_template(self):\n url = reverse('views.today')\n response = self.client.get(url)\n\n self.assertTemplateUsed(response, 'home.html')\n self.assertTemplateUsed(response, 'index.html')\n\n def test_today_view_renders_words_occurred_only_today(self):\n request = HttpRequest()\n response = today(request=request)\n\n self.assertContains(response, self.word1.text)\n self.assertContains(response, self.word2.text)\n\n def test_todat_view_does_not_render_words_occurred_not_today(self):\n request = HttpRequest()\n response = today(request=request)\n\n self.assertNotContains(response, self.word3.text)\n","sub_path":"core/tests/views/test_today.py","file_name":"test_today.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"603938413","text":"'''\nTesting the optimiser on the suzuki dataset\n\nSome additional acquisition functions were added to the optimiser algorithm\nin the acq_func.py file, so to import these we need to point to\nthe location of said file.\n\n(Answer via https://stackoverflow.com/questions/4383571)\n'''\n\nimport sys\nsys.path.insert(1, '..')\n\n# Then, we'll introduce the imports necessary to run the optimiser itself.\n\nimport pandas as pd\nfrom edbo.utils import Data\nfrom edbo.bro import BO_express\nfrom gpytorch.priors import GammaPrior\n\n'''\nAnd finally, some standard libraries\nto help with collecting data on optimiser performance.\n'''\n\nimport random\nimport os.path\n\n# Next, we'll define constants specific to the collection of performance data.\n\n# Location of known yield results data\nRESULT_PATH = 'data/suzuki/experiment_index.csv'\n\n# Location used to store temp files and performance data\nFOLDER_PATH = \"test_bo_suzuki/\"\nDATA_PATH = FOLDER_PATH + \"data/\"\nTEMP_PATH = FOLDER_PATH + \"temp/\"\n\n# What we're trying to optimise\nTARGET = 'yield'\n\n# Seed used to ensure repeatability\nMASTER_SEED = 42\n\n# Number of times you want to run the optimisier on each 'configuration',\n# where a configuration is a combination of acquisition function,\n# batch size and a number of rounds.\nN_EXPERIMENTS = 50\n\n# For each test of the optimiser, the number of top values you'd like\n# to store in the performance data file.\nTOP_N = 1\n\n# The acquisition functions you'd like to test\nMETHODS = ['EI', 'TS', 'rand']\n\n\n# (batch_size, round) pairs. Idea is to approximate an experiment budget\n# of 50, though of course you can define your own pairs for different budgets.\nBATCH_ROUNDS = [\n (1, 50),\n (2, 25),\n (3, 17),\n (4, 12),\n (5, 10),\n (6, 8),\n (7, 7),\n (8, 6),\n (9, 5),\n (9, 6),\n (10, 5)\n]\n\nrandom.seed(MASTER_SEED) # Ensures repeatability\n\n# The seeds used to ensure the optimiser selects the same set of\n# Initial experiments for each configuration.\n# This generates an array of 50 integers from 0 to 10 ** 6 - 1.\n# Each time you run the optimiser, you select one of these integers.\n# The integer determines which initial experiments are chosen by the optimiser.\n# For the same integer and batch size, you're guaranteed the same\n# initial experiments. Of course, with different batch sizes,\n# the set of initial experiments will be different anyway.\nSEEDS = random.sample(range(10 ** 6), N_EXPERIMENTS)\n\n#############################\n#############################\n##### REACTION ENCODING #####\n#############################\n#############################\n\nprint(\"Starting Reaction Encoding!\")\n\n# Load DFT descriptor CSV files computed with auto-qchem using pandas\n# Instantiate a Data object\nelectrophiles = Data(pd.read_csv('data/suzuki/electrophile_dft.csv'))\nnucleophiles = Data(pd.read_csv('data/suzuki/nucleophile_dft.csv'))\nligands = Data(pd.read_csv('data/suzuki/ligand-random_dft.csv'))\nbases = Data(pd.read_csv('data/suzuki/base_dft.csv'))\nsolvents = Data(pd.read_csv('data/suzuki/solvent_dft.csv'))\nreactants = [electrophiles, nucleophiles, ligands, bases, solvents]\n\nprint(\"Loaded csv files...\")\n\n# Use Data.drop method to drop descriptors containing some unwanted keywords\n\nfor data in reactants:\n data.drop(\n [\n 'file_name',\n 'vibration',\n 'correlation',\n 'Rydberg',\n 'correction',\n 'atom_number',\n 'E-M_angle',\n 'MEAN',\n 'MAXG',\n 'STDEV'\n ]\n )\n\nprint(\"Dropped unnecessary data...\")\n\n# Parameters in reaction space\n\ncomponents = {\n 'electrophile':'DFT',\n 'nucleophile':'DFT',\n 'ligand':'DFT',\n 'base':'DFT',\n 'solvent':'DFT'\n}\n\n\n# External descriptor matrices override specified encoding\n\ndft = {\n 'electrophile':electrophiles.data,\n 'nucleophile':nucleophiles.data,\n 'ligand':ligands.data,\n 'base':bases.data,\n 'solvent':solvents.data\n}\n\nencoding = {}\n\n############################\n############################\n#### Instantiating EDBO ####\n############################\n############################\n\n# Yield data is provided at RESULT_PATH\n# We read in the csv file, and create a python dictionary\n# The 'key' is the part of the line in the csv file just before the yield\n# The 'value' is then the yield, as a float\n# E.g. if the configuration was halide1,additive3,base4,ligand2,56.78\n# The dictionary entry would be \"halide1,additive3,base4,ligand2\": 56.78\n\nwith open(RESULT_PATH) as f:\n FULL_RESULT_DICT = {\n \",\".join(\n line.split(\",\")[1:-1]\n ): float(\n line.split(\",\")[-1][:-1]\n ) for line in f.readlines()[1:] # First line has headers,\n # so ignore it.\n }\n\n\n# This function is specifically for 'worst' initial selection\n# To use it, set worst=True in simulate_bo.\n# You'll need to manually alter the naming scheme.\n\ndef get_worst_percentile(full_result_dict, centile=10):\n # Get the bottom centile% of experiments from the results table.\n number_of_results = len(full_result_dict)\n sorted_by_yield = sorted(full_result_dict.items(), key=lambda item: item[1])\n bottom_centile = sorted_by_yield[:int(0.01 * centile * number_of_results)]\n return dict(bottom_centile)\n\n\ndef instantiate_bo(acquisition_func: str, batch_size: int):\n bo = BO_express(\n components,\n encoding=encoding,\n descriptor_matrices=dft,\n acquisition_function=acquisition_func,\n init_method='rand', # Allows control of initial experiments,\n # via seeds.\n batch_size=batch_size,\n target=TARGET\n )\n\n # The priors are set to the ones in the paper, for consistency.\n bo.lengthscale_prior = [GammaPrior(2.0, 0.2), 5.0]\n bo.outputscale_prior = [GammaPrior(5.0, 0.5), 8.0]\n bo.noise_prior = [GammaPrior(1.5, 0.5), 1.0]\n return bo\n\n'''\nWhen running a BO_express instance, the proposed experiments are output to a\ncsv file, with the experimenter meant to manually input the resulting yields\nfrom running those conditions. Of course, for our purposes this would ideally\nbe automatic - however, the BO_express instance doesn't support\na results table as input.\n\nThere is a parent class BO that does accept a results table as input, but\nit requires manual construction of the domain\nas well as manual numerical encoding, which is a hassle.\n\nInstead, we let the proposed experiments be output to a csv file,\nand then programmatically read in the csv file\nand fill in the resulting yield values from our known results table.\n\nIt's likely slower than the program having access to the results table\nin memory, but it works!\n'''\n\ndef fill_in_experiment_values(input_path):\n # Reading in values\n newfile = \"\"\n with open(input_path) as f:\n # In this case f is a csv file\n first_line = True\n for line in f:\n original_line = line\n if first_line:\n newfile += line\n first_line = False\n continue\n line = line.split(\",\")\n search_string = \",\".join(line[1:-1]) # Everything except the\n # input for the yield value, i.e. the combination.\n input_yield = FULL_RESULT_DICT.get(search_string, 0)\n # Here, the 'get' method returns 0 if the combination\n # is not listed in the yield results table.\n # In other files, this is unnecessary, since we construct\n # the reaction domain from the yield results table.\n # However, here we simply multiply together\n # the possible combinations of base, ligand etc to form\n # our reaction domain, which may lead to combinations\n # that don't have entries in the yield results table.\n line = \",\".join(\n original_line.split(\",\")[:-1]\n ) + \",\" + str(input_yield) + \"\\n\" # 'Filling in' the yield value.\n newfile += line\n with open(input_path, 'w') as f:\n f.write(newfile)\n\ndef write_prop_read_run(bo, export_path):\n 'Helper function for running a single round of optimisation.'\n bo.export_proposed(export_path)\n fill_in_experiment_values(export_path)\n bo.add_results(export_path)\n bo.run()\n\n\ndef get_max_yields(bo, num_rounds):\n results = pd.DataFrame(columns=bo.reaction.index_headers + [TARGET])\n for path in [\n TEMP_PATH\n ] + [\n TEMP_PATH + f'round{num}' for num in range(num_rounds)\n ]:\n results = pd.concat(\n [results, pd.read_csv(path + '.csv', index_col=0)],\n sort=False\n )\n return sorted(results[TARGET].tolist(), reverse=True)[:TOP_N]\n\ndef simulate_bo(seed, acquisition_func, batch_size, num_rounds, worst=False):\n bo = instantiate_bo(acquisition_func, batch_size)\n if worst:\n\n # Choose from bottom 10% of experiments\n worst_result_dict = get_worst_percentile(FULL_RESULT_DICT, centile=10)\n init_file = \",electrophile_SMILES_index,nucleophile_SMILES_index,ligand_SMILES_index,base_SMILES_index,solvent_SMILES_index,yield\\n\"\n count = 1\n random.seed(seed)\n for result in random.sample(list(worst_result_dict.keys()), batch_size):\n init_file += str(count) + \",\" + result + \",\" + str(\n worst_result_dict[result]\n ) + \"\\n\"\n count += 1\n with open(TEMP_PATH + 'init.csv', 'w') as f:\n f.write(init_file)\n bo.add_results(TEMP_PATH + 'init.csv')\n bo.run()\n else:\n bo.init_sample(seed=seed)\n print(bo.get_experiments())\n write_prop_read_run(bo, TEMP_PATH + 'init.csv')\n\n for num in range(num_rounds):\n print(f\"Starting round {num}\")\n write_prop_read_run(bo, TEMP_PATH + f\"round{num}.csv\")\n print(f\"Finished round {num}\")\n return get_max_yields(bo, num_rounds)\n\n\n# Key of acquisition functions\n# EI - expected improvement\n# TS - Thompson sampling\n# TS-EI - hybrid (custom implementation, 1 TS and n - 1 EI for batch size n)\n# EI-TS - hybrid (default implementation, 1 EI and n - 1 TS for batch size n)\n\nfor method in METHODS:\n for batch_size, num_rounds in BATCH_ROUNDS:\n print(\n f\"Testing bo with acquisition function {method}\",\n f\"\\n with a batch size of {batch_size}\",\n f\"\\n and doing {num_rounds} rounds\",\n )\n results_file = \"seed,maximum observed yield\" + \",\".join(\n str(num) for num in range(2, TOP_N + 1)\n ) + \"\\n\"\n name = f\"suzuki_{method}_{batch_size}_{batch_size * num_rounds}_{N_EXPERIMENTS}\"\n path = DATA_PATH + name + \".csv\"\n if os.path.isfile(path):\n # So we've already written data to it\n # No need to overwrite\n continue\n for index, seed in enumerate(SEEDS):\n print(f\"On number {index} of {N_EXPERIMENTS}\")\n result = simulate_bo(\n seed, method, batch_size, num_rounds\n )\n results_file += f\"{seed},{result}\\n\"\n\n with open(path, 'w') as f:\n f.write(results_file)\n","sub_path":"Shields paper/test_bo_suzuki.py","file_name":"test_bo_suzuki.py","file_ext":"py","file_size_in_byte":11019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"484792870","text":"import time\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom training.datasets.coco import get_loader\nfrom network.rtpose_vgg import get_model, use_vgg\n\n# Hyper-params\ndata_dir = '/data/coco/images'\nmask_dir = '/data/coco/'\njson_path = '/data/coco/COCO.json'\nopt = 'sgd'\nmomentum = 0.9\nweight_decay = 0.000\nnesterov = True\ninp_size = 368\nfeat_stride = 8\n\nmodel_path = './network/weight/'\n\n# Set Training parameters\nexp_name = 'original_rtpose'\nsave_dir = '/extra/tensorboy/models/{}'.format(exp_name)\n\nmax_epoch = 30\nlr_decay_epoch = {30, 60, 90, 120, 150, 180}\ninit_lr = 0.1\nlr_decay = 0.8\n\ngpus = [0,1, 2, 3]\nbatch_size = 20 * len(gpus)\nprint_freq = 20\n\ndef build_names():\n names = []\n\n for j in range(1, 7):\n for k in range(1, 3):\n names.append('loss_stage%d_L%d' % (j, k))\n return names\n\n\ndef get_loss(saved_for_loss, heat_temp, heat_weight,\n vec_temp, vec_weight):\n\n names = build_names()\n saved_for_log = OrderedDict()\n criterion = nn.MSELoss(size_average=True).cuda()\n total_loss = 0\n\n for j in range(6):\n pred1 = saved_for_loss[2 * j] * vec_weight\n \"\"\"\n print(\"pred1 sizes\")\n print(saved_for_loss[2*j].data.size())\n print(vec_weight.data.size())\n print(vec_temp.data.size())\n \"\"\"\n gt1 = vec_temp * vec_weight\n\n pred2 = saved_for_loss[2 * j + 1] * heat_weight\n gt2 = heat_weight * heat_temp\n \"\"\"\n print(\"pred2 sizes\")\n print(saved_for_loss[2*j+1].data.size())\n print(heat_weight.data.size())\n print(heat_temp.data.size())\n \"\"\"\n\n # Compute losses\n loss1 = criterion(pred1, gt1)\n loss2 = criterion(pred2, gt2) \n\n total_loss += loss1\n total_loss += loss2\n # print(total_loss)\n\n # Get value from Variable and save for log\n saved_for_log[names[2 * j]] = loss1.item()\n saved_for_log[names[2 * j + 1]] = loss2.item()\n\n saved_for_log['max_ht'] = torch.max(\n saved_for_loss[-1].data[:, 0:-1, :, :])\n saved_for_log['min_ht'] = torch.min(\n saved_for_loss[-1].data[:, 0:-1, :, :])\n saved_for_log['max_paf'] = torch.max(saved_for_loss[-2].data)\n saved_for_log['min_paf'] = torch.min(saved_for_loss[-2].data)\n\n return total_loss, saved_for_log\n \n\ndef train(train_loader, model, optimizer, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (img, heatmap_target, heat_mask, paf_target, paf_mask) in enumerate(train_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n img = img.cuda()\n heatmap_target = heatmap_target.cuda()\n heat_mask = heat_mask.cuda()\n paf_target = paf_target.cuda()\n paf_mask = paf_mask.cuda()\n \n # compute output\n _,saved_for_loss = model(img)\n \n total_loss, saved_for_log = get_loss(saved_for_loss, heatmap_target, heat_mask,\n paf_target, paf_mask)\n \n losses.update(total_loss.item(), img.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n if i % print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(\n epoch, i, len(train_loader), batch_time=batch_time,\n data_time=data_time, loss=losses))\n\n return losses.avg \n \ndef validate(val_loader, model, epoch):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n\n # switch to train mode\n model.train()\n\n end = time.time()\n for i, (img, heatmap_target, heat_mask, paf_target, paf_mask) in enumerate(val_loader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n img = img.cuda()\n heatmap_target = heatmap_target.cuda()\n heat_mask = heat_mask.cuda()\n paf_target = paf_target.cuda()\n paf_mask = paf_mask.cuda()\n \n # compute output\n _,saved_for_loss = model(img)\n \n total_loss, saved_for_log = get_loss(saved_for_loss, heatmap_target, heat_mask,\n paf_target, paf_mask)\n\n losses.update(total_loss.item(), img.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time() \n return losses.avg\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\n\ndef adjust_learning_rate(optimizer, epoch):\n \"\"\"Sets the learning rate to the initial LR decayed by 10 every 30 epochs\"\"\"\n lr = args.lr * (0.1 ** (epoch // 30))\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\nprint(\"Loading dataset...\")\n# load data\ntrain_data = get_loader(json_path, data_dir,\n mask_dir, inp_size, feat_stride,\n 'vgg', batch_size,\n shuffle=True, training=True)\nprint('train dataset len: {}'.format(len(train_data.dataset)))\n\n# validation data\nvalid_data = get_loader(json_path, data_dir, mask_dir, inp_size,\n feat_stride, preprocess='vgg', training=False,\n batch_size=batch_size, shuffle=True)\nprint('val dataset len: {}'.format(len(valid_data.dataset)))\n\n# model\nmodel = get_model(trunk='vgg19')\nmodel = torch.nn.DataParallel(model).cuda()\n# load pretrained\nuse_vgg(model, model_path, 'vgg19')\n\n\n# Fix the VGG weights first, and then the weights will be released\nfor i in range(20):\n for param in model.module.model0[i].parameters():\n param.requires_grad = False\n\ntrainable_vars = [param for param in model.parameters() if param.requires_grad]\noptimizer = torch.optim.SGD(trainable_vars, lr=init_lr,\n momentum=momentum,\n weight_decay=weight_decay,\n nesterov=nesterov)\n \n \nfor epoch in range(5):\n #adjust_learning_rate(optimizer, epoch)\n\n # train for one epoch\n train_loss = train(train_data, model, optimizer, epoch)\n\n # evaluate on validation set\n val_loss = validate(valid_data, model, epoch) \n \nfor param in model.module.parameters():\n param.requires_grad = True\n\ntrainable_vars = [param for param in model.parameters() if param.requires_grad]\noptimizer = torch.optim.SGD(trainable_vars, lr=init_lr,\n momentum=momentum,\n weight_decay=weight_decay,\n nesterov=nesterov) \n \nlr_scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.8, patience=5, verbose=True, threshold=0.0001, threshold_mode='rel', cooldown=3, min_lr=0, eps=1e-08)\n\nfor epoch in range(300):\n #adjust_learning_rate(optimizer, epoch)\n\n # train for one epoch\n train_loss = train(train_data, model, optimizer, epoch)\n\n # evaluate on validation set\n val_loss = validate(valid_data, model, epoch) \n lr_scheduler.step(val_loss) \n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397319397","text":"from __future__ import division\n\nimport numpy as np\nimport sklearn\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\n\nfrom util.VR_algorithm import ZMQ_VR_agent\nfrom util.cost_func import *\nfrom util.read_data import *\nimport zmq\n\n\ndef sk_train(X, y, C=1):\n lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-7, max_iter=300)\n lr.fit(X, np.squeeze(y))\n return lr.coef_.T\n\nif __name__ == '__main__':\n N_agent = 2\n\n # X, y = read_cifar()\n\n # X, y = gen_lr_data(1000, 28*28)\n # norm = sklearn.preprocessing.Normalizer()\n # X = norm.fit_transform(X)\n X, y = read_mnist()\n\n # make data evenly distributed\n X = X[:-1] if X.shape[0]%2 else X\n y = y[:-1] if y.shape[0]%2 else y\n N = X.shape[0]\n print (X.shape, y.shape)\n\n C, N_epoch = 1, 1\n rho = 1/X.shape[0]/C\n w_star = sk_train(X,y,C)\n\n context = zmq.Context()\n socket = context.socket(zmq.PAIR)\n port = '6666'\n socket.bind(\"tcp://*:%s\" % port)\n print (\"Start listening.....\")\n\n # server get first half data\n agent = ZMQ_VR_agent(X[:N//2], y[:N//2], w_star, logistic_regression, [socket], rho = rho, name = 0)\n mu = 2.0\n max_ite = 40000\n kwargs = {'err_per_iter': 20, 'metric': 'MSD', 'using_sgd': 2}\n agent.train(mu, max_ite, 'SVRG', 'Diffusion', **kwargs)\n\n socket.close()\n","sub_path":"multi_VR_with_zmq_server.py","file_name":"multi_VR_with_zmq_server.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"523909657","text":"import numpy as np\nimport torch\n\n\ndef clip_by_norm(x, max_norm):\n norm = torch.norm(x, dim=-1, keepdim=True)\n max_norm_x = x / norm * max_norm\n replace = (norm < max_norm).float()\n x = max_norm_x * replace + x * (1-replace)\n return x\n\n\nclass FCLoss:\n def __init__(self, mu=0.1):\n\n self.device = torch.device('cuda')\n device = self.device\n \n self.transformation_matrix = torch.tensor(np.array([[0,0,0,0,0,-1,0,1,0], [0,0,1,0,0,0,-1,0,0], [0,-1,0,1,0,0,0,0,0]])).float().to(device)\n self.eye3 = torch.tensor(np.eye(3).reshape(1, 1, 3, 3)).float().to(device)\n self.eye6 = torch.tensor(np.eye(6).reshape(1,6,6)).float().to(device)\n \n self.eps = torch.tensor(0.01).float().to(device)\n self.mu = torch.tensor(mu).float().to(device)\n self.sqrt_sq_mu_1 = torch.sqrt(self.mu*self.mu+1)\n self.relu = torch.nn.ReLU()\n\n def l2_norm(self, x):\n if len(x.shape) == 3:\n return torch.sum(x*x, (1, 2))\n if len(x.shape) == 2:\n return torch.sum(x*x, (1))\n raise ValueError\n\n def x_to_G(self, x):\n \"\"\"\n x: B x N x 3\n G: B x 6 x 3N\n \"\"\"\n B = x.shape[0]\n N = x.shape[1]\n xi_cross = torch.matmul(x, self.transformation_matrix).reshape([B,N,3,3]).transpose(1, 2).reshape([B, 3, 3*N])\n I = self.eye3.repeat([B, N, 1, 1]).transpose(1,2).reshape([B, 3, 3*N])\n G = torch.stack([I, xi_cross], 1).reshape([B, 6, 3*N])\n return G\n\n def loss_8a(self, G):\n \"\"\"\n G: B x 6 x 3N\n \"\"\"\n Gt = G.transpose(1,2)\n\n temp = self.eps * self.eye6\n temp = torch.matmul(G, Gt) - temp\n eigval = torch.symeig(temp.cpu(), eigenvectors=True)[0].to(self.device)\n rnev = self.relu(-eigval)\n result = torch.sum(rnev * rnev, 1)\n return result\n \n def loss_8b(self, f, G):\n \"\"\"\n G: B x 6 x 3N\n f2: B x N x 3\n \"\"\"\n B = f.shape[0]\n N = f.shape[1]\n # f = f2 * mask.unsqueeze(-1)\n return self.relu(self.l2_norm(torch.matmul(G, f.reshape(B, 3*N, 1))))\n \n def loss_8c(self, normal, friction):\n \"\"\"\n normal: B x N x 3\n friction: B x N x 3\n x: B x N x 3\n \"\"\"\n normal = normal / torch.norm(normal, dim=-1, keepdim=True)\n friction = friction / torch.norm(friction, dim=-1, keepdim=True)\n left = torch.einsum('ijk, ijk->ij', normal, friction)\n right = 1 / self.sqrt_sq_mu_1\n diff = left - right\n return torch.sum(self.relu(-diff), 1)\n \n def fc_loss(self, x, normal):\n G = self.x_to_G(x)\n l8a = self.loss_8a(G)\n l8b = self.loss_8b(normal, G)\n # l8c = self.loss_8c(normal, friction)\n # l8d = self.dist_loss(obj_code, x)\n return l8b + l8a\n\nif __name__ == \"__main__\":\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n plt.ion()\n ax = plt.subplot(111, projection='3d')\n\n n = 5\n\n fc = FCLoss()\n x = torch.tensor(np.random.random([1, n, 3]), requires_grad=True).float().cuda()\n f = torch.tensor(np.random.random([1, n, 3]), requires_grad=True).float().cuda()\n f = f / torch.norm(f, dim=-1, keepdim=True)\n G = fc.x_to_G(x)\n\n while True:\n ax.cla()\n ax.quiver(np.zeros([n]), np.zeros([n]), np.zeros([n]), f.detach().cpu().numpy()[0,:,0], f.detach().cpu().numpy()[0,:,1], f.detach().cpu().numpy()[0,:,2], length=0.1, normalize=True, color='red')\n plt.pause(1e-5)\n\n l8b = fc.loss_8b(f, G)\n print(l8b)\n grad = torch.autograd.grad(l8b, f)[0]\n f = f - grad * 0.1\n f = f / torch.norm(f, dim=-1, keepdim=True)\n\n","sub_path":"VariationalFC/Losses.py","file_name":"Losses.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"30836142","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 19 11:49:39 2015\n\n@author: Ingo Scholtes\n\"\"\"\n\nimport igraph \nimport collections\nimport datetime as dt\nimport time\n\nclass TemporalNetwork:\n \"\"\"A class representing a temporal network\"\"\"\n \n def __init__(self, tedges = None):\n \"\"\"Constructor generating an empty temporal network\"\"\"\n self.tedges = []\n self.nodes = []\n if tedges is not None:\n for e in tedges:\n self.tedges.append(e) \n for e in self.tedges:\n source = e[0]\n target = e[1]\n if source not in self.nodes:\n self.nodes.append(source)\n if target not in self.nodes:\n self.nodes.append(target) \n self.twopaths = []\n self.twopathsByNode = {}\n self.tpcount = -1\n \n \n def readFile(filename, sep=',', fformat=\"TEDGE\", timestampformat=\"%s\"):\n \"\"\" Reads time-stamped edges from TEDGE or TRIGRAM file. If fformat is TEDGES,\n the file is expected to contain lines in the format 'v,w,t' each line \n representing a directed time-stamped link from v to w at time t.\n Semantics of columns should be given in a header file indicating either \n 'node1,node2,time' or 'source,target,time' (in arbitrary order).\n If fformat is TRIGRAM the file is expected to contain lines in the format\n 'u,v,w' each line representing a time-respecting path (u,v) -> (v,w) consisting \n of two consecutive links (u,v) and (v,w). Timestamps can be integer numbers or\n string timestamps (in which case the timestampformat string is used for parsing)\n \"\"\"\n \n assert filename is not \"\"\n \n f = open(filename, 'r')\n tedges = []\n \n header = f.readline()\n header = header.split(sep)\n \n # Support for arbitrary column ordering\n time_ix = -1\n source_ix = -1\n target_ix = -1 \n for i in range(len(header)):\n if header[i] == 'node1' or header[i] == 'source':\n source_ix = i\n elif header[i] == 'node2' or header[i] == 'target':\n target_ix = i\n elif header[i] == 'time':\n time_ix = i\n \n # Read time-stamped edges\n line = f.readline()\n while not line is '':\n fields = line.rstrip().split(sep)\n if fformat ==\"TEDGE\":\n timestamp = fields[time_ix]\n if (timestamp.isdigit()):\n t = int(timestamp)\n else:\n x = dt.datetime.strptime(timestamp, \"%Y-%m-%d %H:%M\")\n t = int(time.mktime(x.timetuple()))\n\n tedge = (fields[source_ix], fields[target_ix], t)\n tedges.append(tedge)\n elif fformat ==\"TRIGRAM\":\n # TODO: Add support for trigram files\n pass\n line = f.readline()\n return TemporalNetwork(tedges)\n\n\n\n def addEdge(self, source, target, time):\n \"\"\"Adds a (directed) time-stamped edge to the temporal network\"\"\"\n \n self.tedges.append((source, target, time))\n if source not in self.nodes:\n self.nodes.append(source)\n if target not in self.nodes:\n self.nodes.append(target)\n \n self.tpcount = -1\n\n\n \n def vcount(self):\n \"\"\"Returns the number of vertices in the static network\"\"\"\n return len(self.nodes)\n\n \n def ecount(self):\n \"\"\"Returns the number of time-stamped edges\"\"\"\n return len(self.tedges)\n\n\n def extractTwoPaths(self, delta=1):\n \"\"\"Extracts all time-respecting paths of length two. The delta parameter indicates the maximum\n temporal distance below which two consecutive links will be considered as a time-respecting path.\n For (u,v,3) and (v,w,7) a time-respecting path (u,v)->(v,w) will be inferred for all delta < 4, \n while no time-respecting path will be inferred for all delta >= 4.\n \"\"\"\n \n self.twopaths = []\n \n # An index structure to quickly access tedges by time\n time = collections.OrderedDict()\n for e in self.tedges:\n if not e[2] in time:\n time[e[2]] = []\n time[e[2]].append(e)\n\n # An index structure to quickly access tedges by time and target/source\n targets = collections.OrderedDict()\n sources = collections.OrderedDict()\n for e in self.tedges:\n if not e[2] in targets:\n targets[e[2]] = dict()\n if not e[2] in sources:\n sources[e[2]] = dict()\n if not e[1] in targets[e[2]]:\n targets[e[2]][e[1]] = []\n if not e[0] in sources[e[2]]:\n sources[e[2]][e[0]] = []\n targets[e[2]][e[1]].append(e)\n sources[e[2]][e[0]].append(e)\n\n # Extract time-respecting paths of length two \n prev_t = -1\n for t in time:\n if prev_t ==-1: \n pass\n elif prev_t < t-delta:\n pass\n else:\n for v in targets[prev_t]:\n if v in sources[t]:\n for e_out in sources[t][v]:\n for e_in in targets[prev_t][v]:\n s = e_in[0]\n d = e_out[1]\n \n # TODO: Add support for weighted time-\n # stamped links\n pass\n indeg_v = len(targets[prev_t][v])\n outdeg_v = len(sources[t][v]) \n \n # Create a weighted two-path tuple\n # (s, v, d, weight)\n # representing (s,v) -> (v,d)\n two_path = (s,v,d, float(1)/(indeg_v*outdeg_v))\n self.twopaths.append(two_path)\n if not v in self.twopathsByNode:\n # Generate dictionary indexed by time stamps\n self.twopathsByNode[v] = {}\n if not t in self.twopathsByNode[v]:\n # Generate list taking all two paths through node v at time t\n self.twopathsByNode[v][t] = []\n self.twopathsByNode[v][t].append(two_path)\n prev_t = t\n \n # Update the count of two-paths\n self.tpcount = len(self.twopaths)\n\n\n \n def TwoPathCount(self):\n \"\"\"Returns the total number of time-respecting paths of length two which have\n been extracted from the time-stamped edge sequence.\"\"\"\n \n # If two-paths have not been extracted yet, do it now\n if self.tpcount == -1:\n self.extractTwoPaths()\n\n return self.tpcount\n \n \n \n def iGraphFirstOrder(self):\n \"\"\"Returns the first-order time-aggregated network\n corresponding to this temporal network. This network corresponds to \n a first-order Markov model reproducing the link statistics in the \n weighted, time-aggregated network.\"\"\"\n \n # If two-paths have not been extracted yet, do it now\n if self.tpcount == -1:\n self.extractTwoPaths()\n\n g1 = igraph.Graph(directed=True)\n \n for v in self.nodes:\n g1.add_vertex(str(v))\n\n # We first keep multiple (weighted) edges\n for tp in self.twopaths:\n g1.add_edge(str(tp[0]), str(tp[1]), weight=tp[3])\n g1.add_edge(str(tp[1]), str(tp[2]), weight=tp[3])\n \n # We then collapse them, while summing their weights\n g1 = g1.simplify(combine_edges=\"sum\")\n return g1\n\n\n \n def iGraphSecondOrder(self):\n \"\"\"Returns the second-order time-aggregated network\n corresponding to this temporal network. This network corresponds to \n a second-order Markov model reproducing both the link statistics and \n (first-order) order correlations in the underlying temporal network.\"\"\"\n\n if self.tpcount == -1:\n self.extractTwoPaths() \n \n g2 = igraph.Graph(directed=True)\n g2.vs[\"name\"] = []\n for tp in self.twopaths: \n n1 = str(tp[0])+\";\"+str(tp[1])\n n2 = str(tp[1])+\";\"+str(tp[2])\n if not n1 in g2.vs[\"name\"]:\n g2.add_vertex(name=n1)\n if not n2 in g2.vs[\"name\"]:\n g2.add_vertex(name=n2)\n if not g2.are_connected(n1, n2):\n g2.add_edge(n1, n2, weight=tp[3])\n return g2\n\n\n \n def iGraphSecondOrderNull(self):\n \"\"\"Returns a second-order null Markov model \n corresponding to the first-order aggregate network. This network\n is a second-order representation of the weighted time-aggregated network.\"\"\"\n\n g1 = self.iGraphFirstOrder()\n \n D = g1.strength(mode=\"OUT\", weights=g1.es[\"weight\"])\n \n g2n = igraph.Graph(directed=True)\n g2n.vs[\"name\"] = []\n \n for e_in in g1.es:\n for e_out in g1.es:\n if e_in.target == e_out.source:\n n1 = g1.vs[e_in.source][\"name\"]+\";\"+g1.vs[e_in.target][\"name\"]\n n2 = g1.vs[e_out.source][\"name\"]+\";\"+g1.vs[e_out.target][\"name\"]\n if n1 not in g2n.vs[\"name\"]:\n g2n.add_vertex(name=n1)\n if n2 not in g2n.vs[\"name\"]:\n g2n.add_vertex(name=n2)\n \n w = 0.5 * e_in[\"weight\"] * e_out[\"weight\"] / D[e_out.source]\n g2n.add_edge(n1, n2, weight = w)\n return g2n\n\n\n \n def exportMovie(self, filename, fps=5, dpi=100):\n \"\"\"Exports an animated movie showing the temporal\n evolution of the network\"\"\"\n \n import matplotlib.animation as anim\n import matplotlib.image as mpimg\n import matplotlib.pyplot as plt\n \n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n \n \n def next_img(n): \n g = igraph.Graph.Erdos_Renyi(n=5, m=7)\n igraph.plot(g, \"frame.png\")\n return plt.imshow(mpimg.imread(\"frame.png\"))\n \n ani = anim.FuncAnimation(fig, next_img, 300, interval=30)\n writer = anim.FFMpegWriter(fps=fps)\n ani.save(filename, writer=writer, dpi=dpi)","sub_path":"pyTempNet/TemporalNetwork.py","file_name":"TemporalNetwork.py","file_ext":"py","file_size_in_byte":10997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"550998337","text":"from panda3d.core import loadPrcFile\nloadPrcFile('./config/conf.prc')\n\nimport sys, os\n\n#Panda 3D Imports\nimport panda3d\nfrom panda3d.core import Filename\nfrom direct.showbase.ShowBase import ShowBase\n\n#Custom Functions\nfrom environment.position import quad_position\nfrom computer_vision.quadrotor_cv import computer_vision\nfrom computer_vision.camera_calibration import calibration\nfrom models.world_setup import world_setup, quad_setup\nfrom models.camera_control import camera_control\nfrom config.menu import menu\nfrom computer_vision.img_2_cv import get_image_cv\n\n\"\"\"\nINF209B − TÓPICOS ESPECIAIS EM PROCESSAMENTO DE SINAIS:\n\nVISAO COMPUTACIONAL\n\nPROJETO\n\nRA: 21201920754\nNOME: RAFAEL COSTA FERNANDES\nE−MAIL: COSTA.FERNANDES@UFABC.EDU.BR\n\nDESCRIÇÃO:\n Ambiente 3D de simulação do quadrirrotor. \n Dividido em tarefas, Camera Calibration e Drone position\n Se não haver arquivo de calibração de camera no caminho ./config/camera_calibration.npz, realiza a calibração da câmera tirando 70 fotos aleatórias do padrão xadrez\n \n A bandeira IMG_POS_DETER adiciona a tarefa de determinação de posição por visão computacional, utilizando um marco artificial que já está no ambiente 3D, na origem. \n A bandeira REAL_STATE_CONTROL determina se o algoritmo de controle será alimentado pelos valores REAIS do estado ou os valores estimados pelos sensores a bordo\n \n O algoritmo não precisa rodar em tempo real, a simulação está totalmente disvinculada da velocidade de renderização do ambiente 3D.\n Se a simulação 3D estiver muito lenta, recomendo mudar a resolução nativa no arquivo de configuração ./config/conf.prc\n Mudando a resolução PROVAVELMENTE será necessário recalibrar a câmera, mas sem problemas adicionais. \n \n Recomendado rodar em um computador com placa de vídeo. \n Em um i5-4460 com uma AMD R9-290:\n Taxa de FPS foi cerca de 95 FPS com a bandeira IMG_POS_DETER = False e REAL_STATE_CONTROL = True\n Taxa de FPS foi cerca de 35 FPS com a bandeira IMG_POS_DETER = True e REAL_STATE_CONTROL = False\n \n O algoritmo de detecção de pontos FAST ajudou na velocidade da detecção, mas a performance precisa ser melhorada \n (principalmente para frames em que o tabuleiro de xadrez aparece parcialmente)\n \n \n\"\"\"\n\n\n# POSITION AND ATTITUDE ESTIMATION BY IMAGE, ELSE BY MEMS SENSORS\nIMG_POS_DETER = True\n\n# REAL STATE CONTROL ELSE BY ESTIMATION METHODS\nREAL_CTRL = False\n\n# HOVER FLIGHT ELSE RANDOM INITIAL STATE\nHOVER = True\n\n# NUMBER OF EPISODE TIMESTEPS \nEPISODE_STEPS = 3000\n\n# TOTAL NUMBER OF EPISODES\nERROR_AQS_EPISODES = 40\n\n# PATH TO ERROR DATALOG\nERROR_PATH = './data/hover/' if HOVER else './data/random_initial_state/'\n\nmydir = os.path.abspath(sys.path[0])\n\nmydir = Filename.fromOsSpecific(mydir).getFullpath()\n\nclass MyApp(ShowBase):\n def __init__(self):\n \n ShowBase.__init__(self) \n render = self.render\n \n # CAMERA NEUTRAL POSITION\n self.cam_neutral_pos = panda3d.core.LPoint3f(5, 5, 7)\n\n self.img_buffer = get_image_cv(self)\n \n # MODELS SETUP\n world_setup(self, render, mydir)\n quad_setup(self, render, mydir)\n \n # CALIBRATION ALGORITHM\n self.camera_cal = calibration(self, self.img_buffer) \n \n if self.camera_cal.calibrated:\n self.run_setup()\n \n def run_setup(self):\n # DRONE POSITION\n self.drone = quad_position(self, self.quad_model, self.prop_models, EPISODE_STEPS, REAL_CTRL, IMG_POS_DETER, ERROR_AQS_EPISODES, ERROR_PATH, HOVER)\n \n # COMPUTER VISION\n self.cv = computer_vision(self, self.quad_model, self.drone.env, self.drone.sensor, self.drone, self.img_buffer, self.camera_cal, mydir, IMG_POS_DETER) \n \n # IN ENGINE MENU (REAL TIME CONTROLLER CHANGE)\n menu(self, self.drone, self.drone.sensor, self.cv)\n \n # CAMERA CONTROL\n camera_control(self, self.render) \n print(sys.modules.keys())\napp = MyApp()\napp.run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"183963536","text":"def classifica_idade(a):\n a = input('Qual é a sua idade? ')\n if a>=0 and a<=11:\n print('crianca')\n elif a >=12 and a <=17:\n print('adolescente')\n else:\n print('adulto')\n \n\n ","sub_path":"backup/user_267/ch4_2020_04_06_14_42_29_765172.py","file_name":"ch4_2020_04_06_14_42_29_765172.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"397230207","text":"#import sys\n#print sys.path\n#from display.default.result import ResultData\n#print \" ******\", dir(ResultData)\n#from display.default.detail import DetailData as DefaultDetailData\n\n#class DetailData(DefaultDetailData):\n# def __activate__(self, context):\n# DefaultDetailData.__activate__(self, context)\n# \n\nfrom com.googlecode.fascinator.common import JsonConfigHelper\n\nfrom java.io import InputStreamReader\n\nclass DetailData:\n def __activate__(self, context):\n self.services = context[\"Services\"]\n self.formData = context[\"formData\"]\n self.metadata = context[\"metadata\"]\n \n oid = self.metadata.get(\"id\")\n manifest = self.__readManifest(oid)\n self.__manifest = manifest.getJsonMap(\"manifest\")\n \n def getManifest(self):\n return self.__manifest\n \n def __readManifest(self, oid):\n object = self.services.getStorage().getObject(oid)\n sourceId = object.getSourceId()\n payload = object.getPayload(sourceId)\n payloadReader = InputStreamReader(payload.open(), \"UTF-8\")\n manifest = JsonConfigHelper(payloadReader)\n payloadReader.close()\n payload.close()\n object.close()\n return manifest\n \n","sub_path":"config/src/main/config/portal/default/mint/scripts/display/name-authority/detail.py","file_name":"detail.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"184234008","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndata_set = pd.read_csv('Salary_Data.csv')\n# : toma todas las filas\n# :-1 toma todas -1 , en este caso, toma todas\n# las columnas menos la ultima, ya que es la\n# variable independiente.\nX = data_set.iloc[:, :-1].values\nY = data_set.iloc[:, 1].values\n\n# Splitting dataset en training y test\nfrom sklearn.model_selection import train_test_split\n\n# se le envia la matriz X, y la matriz Y,\n# y el numero de valores que se le asignaran\n# al test_size, 0.2 representa el 20%\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=1 / 3, random_state=0)\n\n# Fitting SLR(Simple Linear Regression) to the training set\nfrom sklearn.linear_model import LinearRegression\n\nregressor = LinearRegression()\nregressor.fit(X_train, Y_train)\n\n# Prediciendo los valores de las matrices de train\n\ny_pred = regressor.predict(X_test) # se predicen los valores del test set\n\n# Graficando\n\nplt.scatter(X_test, Y_test)\nplt.plot(X_train, regressor.predict(X_train))\nplt.title('Salary Vs Experience Test set')\nplt.xlabel(\"User Experience\")\nplt.ylabel(\"Salary\")\nplt.show()\n","sub_path":"Simple_Linear_Regression/simple_linear_regression.py","file_name":"simple_linear_regression.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"415347070","text":"import click\nimport os\nimport random\nimport numpy as np\n\nfrom PIL import Image\nfrom os.path import join\nfrom itertools import islice\nfrom collections import defaultdict\n\nimport pokemon_data\nfrom config import grouped_ids\n\n\n# region Utils\n\ndef random_in(a, b):\n return random.random() * (b - a) + a\n\n\ndef tiled_sample(sequence):\n while True:\n if len(sequence) == 0:\n yield None\n else:\n for x in random.sample(sequence, len(sequence)):\n yield x\n\n# endregion\n\n\ndef load_pokemon_data():\n \"\"\"\n Load the complete pokemon dataset (with info about types), filtered using the whitelist\n :return:\n \"\"\"\n return [row for i, row in enumerate(pokemon_data.load()) if i+1 in whitelist_ids]\n\n\ndef get_crop_from_idx(renders, idx):\n \"\"\"\n Generate a random crop on the fly for the pokemon with the specified idx.\n :param renders: preallocated dataset\n :param idx: pokedex number of the pokemon\n :return: random generated crop\n \"\"\"\n if len(renders[idx]) == 0:\n return None\n\n data = random.sample(renders[idx], 1)[0]\n img: Image = data['img']\n w, h = img.size\n tw = random_in(200, 300)\n th = random_in(200, 300)\n pw = random_in(50, w - tw - 50)\n ph = random_in(50, h - th - 50)\n cropped = img.crop((pw, ph, pw+tw, ph+th))\n return np.array(cropped.resize((256, 256)))\n\n\ndef generator(renders):\n \"\"\"\n Return an endless generator of random crops that can be iterated indefinitely.\n The result for each iteration is a dict, with pokemon types as keys and crops as values.\n The crops are numpy array with shape (256, 256, 3) and dtype np.uint8\n :param renders: dataset of renders\n :return: a random crop generator\n \"\"\"\n ids_we_have = renders.keys()\n grouped_ids_we_have = {k: [i for i in ids if i in ids_we_have] for k, ids in grouped_ids.items()}\n\n types = [k for k, ids in grouped_ids_we_have.items() if len(ids) > 0]\n samplers = {t: tiled_sample(grouped_ids_we_have[t]) for t in types}\n ids_sampler = zip(*samplers.values())\n ids_sampler = (dict(zip(types, v)) for v in ids_sampler)\n\n for ids in ids_sampler:\n yield {t: get_crop_from_idx(renders, idx) for t, idx in ids.items()}\n\n\ndef load_renders(renders_path):\n \"\"\"\n Allocate the full renders dataset into memory\n :param renders_path: path to renders\n :return: complete dataset of renders\n \"\"\"\n images = defaultdict(list)\n\n subfolders = os.listdir(renders_path)\n for sf in subfolders:\n renders = os.listdir(join(renders_path, sf))\n for render in renders:\n render = join(renders_path, sf, render)\n img = Image.open(render)\n img.load() # required for png.split()\n no_alpha = Image.new(\"RGB\", img.size, (255, 255, 255))\n no_alpha.paste(img, mask=img.split()[3]) # 3 is the alpha channel\n\n images[int(sf)].append({'name': render, 'img': no_alpha})\n\n return images\n\n\ndef get_render_folders(renders_path):\n subfolders = os.listdir(renders_path)\n for sf in subfolders:\n renders = os.listdir(join(renders_path, sf))\n renders = [join(sf, render) for render in renders]\n yield int(sf), renders\n\n\ndef load_renders_iter(renders_path):\n \"\"\"\n Allocate the full renders dataset into memory\n :param renders_path: path to renders\n :return: complete dataset of renders\n \"\"\"\n subfolders = os.listdir(renders_path)\n for sf in subfolders:\n renders = os.listdir(join(renders_path, sf))\n for render in renders:\n render = join(renders_path, sf, render)\n img = Image.open(render)\n img.load() # required for png.split()\n no_alpha = Image.new(\"RGB\", img.size, (255, 255, 255))\n no_alpha.paste(img, mask=img.split()[3]) # 3 is the alpha channel\n\n yield int(sf), render, no_alpha\n\n\n@click.command()\n@click.argument('renders_path')\ndef test(renders_path):\n renders = load_renders(renders_path)\n gen = generator(renders)\n\n for vals in gen:\n for t, img in vals.items():\n im = Image.fromarray(img)\n im.show('Crop')\n input()\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"python/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"476400450","text":"from __future__ import annotations\n\n\nfrom argparse import ArgumentParser\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pebble import ProcessPool\nimport concurrent.futures as cf\nfrom functools import partial\nfrom pathlib import PosixPath\nimport os\nimport math\nimport itertools as it\nfrom glob import iglob, glob\nimport subprocess as sp\nfrom tqdm import tqdm\nfrom typing import Iterator, List, Iterable, Tuple, Union\n\nimport re\n\nfrom extractor import Extractor\n\n\nPath = Union[str, PosixPath]\n\n\ndef process(fname: str, max_length: int, max_wdith: int) -> List[str]:\n extractor = Extractor(max_path_length=8, max_path_width=2)\n try:\n paths = extractor.extract_paths(fname)\n except (ValueError, SyntaxError, RecursionError):\n return list()\n return list(paths)\n\n\ndef write_lines(fname: str, lines: Iterable[str]) -> None:\n with open(fname, \"a\", encoding=\"ISO-8859-1\") as stream:\n stream.writelines(map(mask_method_name, lines))\n\n\ndef mask_method_name(line: str) -> str:\n method_name, _, _ = line.partition(\" \")\n pattern = re.compile(re.escape(f\" {method_name},\"))\n return pattern.sub(\" METHOD_NAME,\", line)\n\n\ndef to_str_path(list_path: List[str]) -> str:\n return f\"{list_path[0]},{'|'.join(list_path[1:-1])},{list_path[-1]}\"\n\n\ndef make_posix_path(path: Path) -> PosixPath:\n return PosixPath(path) if isinstance(path, str) else path\n\n\ndef concatenate_path_conext_files(mined_dir_path: Path) -> None:\n mined_dir_path = make_posix_path(mined_dir_path)\n dtq = tqdm([\"train\", \"test\", \"val\"], desc=\"concatenating ast path conext files\")\n for _dir in dtq:\n file_dir = str(mined_dir_path / f\"{_dir}\")\n concate_sh = f\"cat {file_dir}/*.c2v > {file_dir}/path_contexts.csv\"\n sp.run(concate_sh, shell=True, check=True)\n\n for f in iglob(str(mined_dir_path / \"*/*.c2v\")):\n os.remove(f)\n\n print(\"Done concatenating all path_contexts from AST miner to a single file\")\n\n\ndef source_files(data_dir: str):\n for fname in iglob(f\"{data_dir}/*/**/[!setup]*.py\", recursive=True):\n if os.path.isfile(fname) and not fname.startswith(\"test\"):\n yield fname\n\n\ndef chunker(iterable, n, fillvalue=None):\n \"Collect data into fixed-length chunks or blocks\"\n # chunker('ABCDEFG', 3, 'x') --> ABC DEF Gxx\"\n args = [iter(iterable)] * n\n return it.zip_longest(*args, fillvalue=fillvalue)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"-maxlen\", \"--max_path_length\", dest=\"max_path_length\", required=False, default=8)\n parser.add_argument(\"-maxwidth\", \"--max_path_width\", dest=\"max_path_width\", required=False, default=2)\n parser.add_argument(\"-workers\", \"--max_workers\", dest=\"max_workers\", required=False, default=None)\n parser.add_argument(\"-in_dir\", \"--in_dir\", dest=\"in_dir\", required=True)\n parser.add_argument(\"-out_dir\", \"--out_dir\", dest=\"out_dir\", required=True)\n # parser.add_argument(\"-file\", \"--file\", dest=\"file\", required=False)\n args = parser.parse_args()\n\n TIMEOUT = 60 * 10\n MAX_WORKERS = int(args.max_workers)\n MAX_LENGTH = args.max_path_length\n MAX_WIDTH = args.max_path_width\n REPOS = args.in_dir\n OUTPUT = args.out_dir\n\n writes = list()\n futures = list()\n with ProcessPool(max_workers=MAX_WORKERS) as pool, ThreadPoolExecutor(\n max_workers=1\n ) as writer:\n futures = {\n pool.schedule(process, args=[fname, MAX_LENGTH, MAX_WIDTH], timeout=TIMEOUT): fname\n for fname in source_files(REPOS)\n }\n\n for future in tqdm(cf.as_completed(futures), total=len(futures)):\n fname = futures[future]\n splitted = fname.split(\"/\")\n project = splitted[2]\n bin_ = splitted[1]\n c2v_file = f\"{OUTPUT}/{bin_}/{project}.c2v\"\n try:\n paths = future.result()\n except cf.TimeoutError:\n continue\n if paths:\n writes.append(writer.submit(partial(write_lines, c2v_file), paths))\n\n cf.wait(writes)\n\n concatenate_path_conext_files(mined_dir_path=OUTPUT)\n","sub_path":"python_extractor/extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"275741022","text":"from bs4 import BeautifulSoup\nimport re\n\n\n# Need to wget all the years I want.\n\ndef read_win_loss(filename):\n filename = 'uw/1950-schedule.html'\n with open(filename, 'r', encoding='utf-8') as tempfile:\n data = tempfile.read()\n indx = [m.start() for m in re.finditer('game_result', data)]\n ","sub_path":"blargh/Code/apple_cup_relevance/read_records.py","file_name":"read_records.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"253515931","text":"# IMPORTANT: CHANGE ALL ELEMENTS WITH '$'\n\nimport botogram\nimport sqlite3\nbot = botogram.create(\"$YOUR-API-KEY\")\ndat = sqlite3.connect('dat1.db', timeout=0)\nd = dat.cursor()\n# d.execute(\"DROP TABLE IF EXISTS request\")\n# d.execute(\"DROP TABLE IF EXISTS mess\")\n# d.execute(\"DROP TABLE IF EXISTS ids\")\n# d.execute(\"VACUUM\")\nd.execute(\"CREATE TABLE IF NOT EXISTS request (name TEXT, link TEXT, userid INTEGER PRIMARY KEY, username TEXT, nameuser TEXT, stage INTEGER DEFAULT 1, type TEXT, votes INTEGER DEFAULT 0, mesid INTEGER)\")\nd.execute(\"CREATE TABLE IF NOT EXISTS ids (id INTEGER PRIMARY KEY, type INTEGER)\")\nd.execute('CREATE TABLE IF NOT EXISTS mess (mesid INTEGER, user INTEGER PRIMARY KEY)')\ndat.commit()\n# type=0(global admin bot) type=1(staff group) type2=(log channel) type=3(public group) type=4(channel)\n# stage=0(ready to do a request) stage=1(just typed /request) stage=2(gave the name of the request) stage=3(he have to confirm all) stage=4(examinating) stage=5(voting) stage=6(approved) stage=7(soddisfacted and to delete)\nbot.before_help = ['Welcome to the Request Bot of @$channel']\nbot.after_help = ['Be sure to read the rules before requesting $something']\nbot.about = \"This Bot helps you to request $something to the $channel\"\nbot.owner = \"@$yourusername & https://github.com/Mamiglia/Requester-Bot\"\n\nreq = (input('Are requests open?[Y,N,X] ')).lower()\nif req == 'y':\n print('Ok the Bot is open to 20 requests')\n door = 20\nelif req == 'x':\n while True:\n n = input('How many requests? ')\n try:\n door = int(n)\n break\n except ValueError:\n print('Insert a valid Number')\nelse:\n print('the Bot isn\\'t open to any request')\n door = 0\nglobal door\n\n\ndef visualizer(ch):\n d.execute(\"SELECT * FROM request WHERE userid=?\", [ch.id])\n ex = d.fetchone()\n name = ex[0]\n lnk = ex[1]\n tosend = (\"Are you sure? Do you wanna request [%s](%s)?\" % (name, lnk))\n return tosend\n\n\ndef staffvis(userid):\n d.execute(\"SELECT * FROM request WHERE userid=?\", (int(userid), ))\n ex = d.fetchone()\n name = ex[0]\n lnk = ex[1]\n username = ex[3]\n nameuser = ex[4]\n ur = ex[6]\n if username is None:\n tosend = (\"#%s\\nThe user [%s](tg://user?id=%s) has requested:\\n%s\\n%s\" % (ur, nameuser, userid, name, lnk))\n else:\n tosend = (\"#%s\\nThe user @%s has requested:\\n%s\\n%s\" % (ur, username, name, lnk))\n return tosend\n\n\ndef verdict(userid, what):\n d.execute(\"SELECT * FROM request WHERE userid=?\", (userid, ))\n ex = d.fetchone()\n name = ex[0]\n lnk = ex[1]\n nameuser = ex[4]\n if what is True:\n tosend = (\"The request of the user [%s](tg://user?id=%s) has been accepted:\\n[%s](%s)\" % (nameuser, userid, name, lnk))\n elif what is False:\n tosend = (\"The request of the user [%s](tg://user?id=%s) has been refused:\\n[%s](%s)\" % (nameuser, userid, name, lnk))\n else:\n tosend = (\"The request of the user [%s](tg://user?id=%s) is being voted:\\n[%s](%s)\" % (nameuser, userid, name, lnk))\n return tosend\n\n\ndef checklink(lnk):\n # this checks the given link, you could not need this\n if \"https://google.com/\" in lnk:\n return True\n else:\n return False\n\n\ndef check2(userlist, usr):\n for row in userlist:\n if usr in row:\n return False\n break\n else:\n return True\n\n\ndef knowit(r):\n if r > 0:\n return 'Write /request or /$request2 to start'\n elif r <= 0:\n return 'Sorry, but at the moment the requests are closed'\n else:\n return 'Something went wrong, you found a motherfucking bug!'\n\n\ndef checkreq(prim, typ):\n if door > 0:\n try:\n d.execute(\"INSERT INTO request (userid, type) VALUES (?,?)\", (prim.id, typ))\n dat.commit()\n return True\n except sqlite3.IntegrityError:\n prim.send('You have already requested something, wait until that request is solved')\n return False\n else:\n prim.send('Sorry, but at the moment request are closed')\n\n\ndef checkperm(i):\n d.execute(\"SELECT id FROM ids WHERE type=0\")\n admins = d.fetchall()\n for adm in admins:\n if i in adm:\n return True\n break\n else:\n return False\n\n@bot.command(\"cleanreq\", hidden=True)\ndef cleanreq(message, chat):\n if checkperm(message.sender.id):\n chat.send('CLeaning requests...')\n d.execute(\"DROP TABLE IF EXISTS request\")\n d.execute(\"VACUUM\")\n d.execute(\"CREATE TABLE IF NOT EXISTS request (name TEXT, link TEXT, userid INTEGER PRIMARY KEY, username TEXT, nameuser TEXT, stage INTEGER DEFAULT 1, type TEXT, votes INTEGER DEFAULT 0, mesid INTEGER)\")\n chat.send('Done')\n dat.commit() \n \n \n@bot.command(\"cleanall\", hidden=True)\ndef cleanall(message, chat):\n if checkperm(message.sender.id):\n chat.send('Cleaning All..')\n d.execute(\"DROP TABLE IF EXISTS request\")\n d.execute(\"DROP TABLE IF EXISTS mess\")\n d.execute(\"DROP TABLE IF EXISTS ids\")\n d.execute(\"VACUUM\")\n d.execute(\"CREATE TABLE IF NOT EXISTS request (name TEXT, link TEXT, userid INTEGER, userid INTEGER PRIMARY KEY, username TEXT, nameuser TEXT, stage INTEGER DEFAULT 1, type TEXT, votes INTEGER DEFAULT 0, mesid INTEGER)\")\n d.execute(\"CREATE TABLE IF NOT EXISTS ids (id INTEGER PRIMARY KEY, type INTEGER)\")\n d.execute('CREATE TABLE IF NOT EXISTS mess (mesid INTEGER, user INTEGER PRIMARY KEY)')\n chat.send('Done! You have deleted all the proofs')\n dat.commit()\n\n\n@bot.command(\"door\", hidden=True)\ndef hhh(message, chat):\n if checkperm(message.sender.id):\n try:\n door = int(message.text[5::])\n chat.send('Ok, now i\\'m opened to %s requests' % (door))\n except ValueError:\n chat.send('Insert a valid number')\n\n\n@bot.command(\"easteregg\", hidden=True)\ndef oliodipalmas(chat, message):\n # $ Put at least one easter egg or you're dead to me\n chat.send('')\n\n\n@bot.command(\"newadmin\", hidden=True)\ndef newadmin(chat, message):\n # remember to set your password\n if \"$password\" in message.text:\n d.execute(\"INSERT INTO ids (id, type) VALUES (?,?)\", (message.sender.id, 0))\n chat.send(\"Welcome home my lord\")\n dat.commit()\n else:\n chat.send(\"Go away dirty peasant\")\n\n\n@bot.command(\"setstaff\", hidden=True)\ndef staff(chat, message):\n if chat.type == \"supergroup\":\n if checkperm(message.sender.id):\n d.execute(\"INSERT INTO ids (id, type) VALUES (?,?)\", (chat.id, 1))\n chat.send(\"Staff Group correctly set!\")\n dat.commit()\n else:\n chat.send(\"I need to receive the command from an important guy, not a console peasant as you\")\n else:\n chat.send(\"I should be in a Supergroup, wrong chat!\")\n\n\n@bot.command(\"setgroup\", hidden=True)\ndef setgroup(chat, message):\n if chat.type == \"supergroup\":\n if checkperm(message.sender.id):\n d.execute(\"INSERT INTO ids (id, type) VALUES (?,?)\", (chat.id, 3))\n chat.send(\"Group correctly set!\")\n dat.commit()\n else:\n chat.send(\"I need to receive the command from an important guy, not a console peasant as you\")\n else:\n chat.send(\"I should be in a Supergroup, wrong chat!\")\n\n\n@bot.command(\"setinfochannel\", hidden=True)\ndef setinfochannel(chat, message):\n if chat.type == \"supergroup\":\n if checkperm(message.sender.id):\n d.execute(\"INSERT INTO ids (id, type) VALUES (?,?)\", (chat.id, 2))\n chat.send(\"Log Channel correctly set!\")\n dat.commit()\n else:\n chat.send(\"I need to receive the command from an important guy, not a console peasant as you\")\n else:\n chat.send(\"I should be in a Supergroup, wrong chat!\")\n# one day this will be a Channel\n\n\n@bot.command(\"request\")\ndef first(message, chat):\n \"\"\"Do your request\"\"\"\n if chat.type == \"private\":\n if checkreq(chat, 'request') is True:\n bt = botogram.Buttons()\n bt[0].callback(\"Cancel\", \"zero\")\n chat.send(\"Send me the name of your $request\", attach=bt)\n\n\n@bot.command(\"$request2\")\ndef second(message, chat):\n '''A different type of request'''\n # instead of request2 put the other type of your request\n if chat.type == \"private\":\n if checkreq(chat, '$request2') is True:\n bt = botogram.Buttons()\n bt[0].callback(\"Cancel\", \"zero\")\n chat.send(\"Send me the name of your $request2\", attach=bt)\n\n\n@bot.process_message\ndef stager(chat, message):\n if chat.type == \"private\":\n d.execute(\"SELECT stage FROM request WHERE userid=?\", (chat.id, ))\n try:\n stage = int(d.fetchone()[0])\n if stage == 1:\n bt = botogram.Buttons()\n chat.send(\"Name received\")\n d.execute(\"UPDATE request SET stage=2, name=? WHERE userid=?\", (message.text, chat.id))\n bt[0].callback(\"Back\", \"rename\")\n bt[1].callback('There aren\\'t any links', 'void')\n chat.send(\"Now send me the link\", attach=bt)\n elif stage == 2:\n try:\n d.execute(\"SELECT link FROM request WHERE userid=?\", (chat.id, ))\n if d.fetchone()[0] is None:\n chat.send(\"Link received\")\n applink = str(message.parsed_text.filter(\"link\")[0])\n res = checklink(applink)\n else:\n applink = 'No link'\n res = True\n if res:\n bt = botogram.Buttons()\n d.execute(\"UPDATE request SET link=?, username=?, nameuser=?, userid=?, stage=3 WHERE userid=?\", (applink, message.sender.username, message.sender.name, message.sender.id, chat.id))\n bt[0].callback(\"Back\", \"relink\", str(message.sender.id))\n bt[1].callback(\"Cancel\", \"zero\", str(message.sender.id))\n bt[1].callback(\"Send\", 'confirm', str(message.sender.id))\n chat.send(visualizer(chat), syntax=\"markdown\", attach=bt)\n dat.commit()\n else:\n chat.send(\"The link is not correct!\\nSend now the correct one\")\n except IndexError:\n chat.send(\"The link is not correct!\\nSend now the correct one\")\n elif stage == 3:\n chat.send('Do you wanna confirm the request?')\n elif stage == 4:\n chat.send('We are examinating your request')\n elif stage == 5:\n chat.send('At the moment your request is being voted on @$yourpublicgroup')\n elif stage == 6:\n chat.send('Your request has been approved, give us the time to publish it!')\n else:\n chat.send(\"GREAT, A BUG\")\n except TypeError:\n chat.send(knowit(door))\n dat.commit()\n return True\n\n\n@bot.callback(\"rename\")\ndef rename(chat, message):\n d.execute(\"UPDATE request SET stage=1 WHERE userid=?\", (chat.id, ))\n dat.commit()\n chat.send(\"Send me the name!\")\n\n\n@bot.callback(\"relink\")\ndef relink(chat, message, data):\n d.execute(\"UPDATE request SET stage=2 WHERE userid=?\", (chat.id, ))\n dat.commit()\n message.edit(\"Send me the link!\")\n\n\n@bot.callback('void')\ndef void(message, chat):\n d.execute('UPDATE request SET link=? WHERE userid=?', ('void', chat.id))\n chat.send('Ok, so there aren\\'t any links, send me another message to complete your request')\n dat.commit()\n\n\n@bot.callback(\"zero\")\ndef zero(chat, message, data):\n d.execute(\"DELETE FROM request WHERE userid=?\", (chat.id, ))\n dat.commit()\n message.edit(\"Request cancelled\")\n\n\n@bot.callback(\"zero2\")\ndef zero2(chat, message, data):\n d.execute(\"DELETE FROM request WHERE userid=?\", (int(data), ))\n dat.commit()\n bot.chat(int(data)).send(\"Request Published!\")\n\n\n@bot.callback(\"confirm\")\ndef confirm(chat, message, data):\n door += -1\n bt = botogram.Buttons()\n d.execute('UPDATE request SET stage=4 WHERE userid=?', (chat.id, ))\n dat.commit()\n d.execute(\"SELECT id FROM ids WHERE type=1\")\n staffs = d.fetchone()\n bt[0].callback(\"Refuse\", \"refuse\", str(chat.id))\n bt[1].callback(\"Accept\", \"good\", str(chat.id))\n bt[2].callback(\"VOTE\", \"vote\", str(chat.id))\n message.edit(\"Your request is being examinated\")\n for group in staffs:\n bot.chat(group).send(staffvis(str(chat.id)), attach=bt)\n\n\n@bot.callback(\"refuse\")\ndef refuse(chat, message, data):\n message.edit(verdict(int(data), False))\n d.execute(\"DELETE FROM request WHERE userid=?\", (int(data), ))\n bot.chat(int(data)).send(\"Your request has been refused by the admins$\")\n # I strongly recommend you to customize this message\n dat.commit()\n\n\n@bot.callback(\"vote\")\ndef groupvote(chat, message, data):\n bot.chat(int(data)).send('Your request is being voted on @$yourpublicgroup')\n message.edit(verdict(int(data), 'vote'))\n d.execute('UPDATE request SET stage=5 WHERE userid=?', (int(data), ))\n dat.commit()\n d.execute(\"SELECT id FROM ids WHERE type=3\")\n groups = d.fetchone()\n bt = botogram.Buttons()\n bt[0].callback('start', 'startpoll', data)\n for group in groups:\n bot.chat(group).send(\"New Poll by %s\" % (data), attach=bt)\n\n\n@bot.callback(\"good\")\ndef good(chat, message, data):\n message.edit(verdict(int(data), True))\n bot.chat(int(data)).send(\"Your request has been approved, we will soon (but not too soon) publish it\")\n d.execute('UPDATE request SET stage=6 WHERE userid=?', (int(data), ))\n dat.commit()\n d.execute(\"SELECT id FROM ids WHERE type=2\")\n ch = d.fetchone()\n bt = botogram.Buttons()\n bt[0].callback('Published', 'zero2', data)\n for channel in ch:\n bot.chat(channel).send(staffvis(data), attach=bt)\n\n\n@bot.callback('op')\ndef operation(chat, message, query, data):\n d.execute('SELECT user FROM mess WHERE mesid=?', (message.message_id, ))\n users = d.fetchall()\n if users == [] or check2(users, query.sender.id):\n d.execute('SELECT votes, userid FROM request WHERE mesid=?', (message.message_id, ))\n ex = d.fetchone()\n vote = ex[0] + int(data)\n userid = ex[1]\n # $ You surely have to set the votes: how many votes in favor are needed for the request to be approved\n if vote == 20:\n d.execute('DELETE FROM mess WHERE mesid=?', (message.message_id, ))\n good(chat, message, userid)\n message.edit('The citizens have talked, We want this request!')\n elif vote == -20:\n message.edit('...this request is more negative than @mamiglia...')\n zero(chat, message, userid)\n d.execute('DELETE FROM mess WHERE mesid=?', (message.message_id, ))\n else:\n d.execute('UPDATE request SET votes=? WHERE mesid=?', (vote, message.message_id))\n d.execute('INSERT INTO mess VALUES (?,?)', (message.message_id, query.sender.id))\n dat.commit()\n query.notify('Vote added!')\n qtvis(chat, message, userid)\n else:\n query.notify('Already voted!')\n\n\n@bot.callback('startpoll')\ndef startpoll(chat, message, data):\n d.execute('UPDATE request SET mesid=? WHERE userid=?', (message.message_id, int(data)))\n dat.commit()\n qtvis(chat, message, data)\n\n\n@bot.callback('qtvis')\ndef qtvis(chat, message, data):\n d.execute(\"SELECT * FROM request WHERE userid=?\", (int(data), ))\n ex = d.fetchone()\n name = ex[0]\n lnk = ex[1]\n userid = int(data)\n nameuser = ex[4]\n ur = ex[6]\n vote = ex[7]\n bt = botogram.Buttons()\n bt[0].callback('+1', 'op', str(+1))\n bt[0].callback('-1', 'op', str(-1))\n message.edit('#%s\\nIn the opinion of [%s](tg://user?id=%s) will be really interesting if this $request would be published on the channel:\\n[%s](%s)\\nWhat does you think?\\nVotes: %s' % (ur, nameuser, userid, name, lnk, vote), preview=False, attach=bt)\n\n\nif __name__ == \"__main__\":\n bot.run()\n","sub_path":"RequesterBot.py","file_name":"RequesterBot.py","file_ext":"py","file_size_in_byte":16166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"358429743","text":"products=[\"mobile\",\"soap\",\"car\",\"pizza\"]\r\nprice=[10000,26,3000000,270]\r\na=input(\"Enter The Product Name:\")\r\ni=0\r\nwhile i<=2:\r\n while a==products[i]: \r\n print(\"price is \",price[i])\r\n break\r\n i=i+1\r\nx=input(\"bye user \") \r\n \r\n \r\n \r\n \r\n \r\n\r\n\r\n","sub_path":"WHILE products.py","file_name":"WHILE products.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"625598005","text":"# -*- coding: utf-8 -*-\nfrom django.core.management.base import BaseCommand, CommandError\nfrom sys import argv\nfrom optparse import make_option\nfrom catalog.models import TreeItem, Section, Item\n\ntry:\n from xml.etree.ElementTree import fromstring\n# python 2.4 fix\nexcept ImportError:\n from elementtree.ElementTree import fromstring\n\nclass Offer(dict):\n def __init__(self, xml_tree, *args, **kwds):\n super(Offer, self).__init__(*args, **kwds)\n self.xml_tree = xml_tree\n self['price'] = xml_tree.attrib['Цена'.decode('utf8')]\n self['quantity'] = xml_tree.attrib['Количество'.decode('utf8')]\n self['name'] = self._get_full_name()\n self['identifier'] = self._get_catalog_id()\n\n def _get_properties(self):\n result_data = {}\n property_values = self.xml_tree.findall('ЗначениеСвойства'.decode('utf8'))\n for property_value in property_values:\n key = property_value.attrib['ИдентификаторСвойства'.decode('utf8')]\n value = property_value.attrib['Значение'.decode('utf8')]\n result_data[key] = value\n return result_data\n\n def _get_full_name(self):\n properties = self._get_properties()\n return properties['ПолноеНаименование'.decode('utf8')]\n\n def _get_catalog_id(self):\n item_link = self.xml_tree.find('СсылкаНаТовар'.decode('utf8'))\n return item_link.attrib['ИдентификаторВКаталоге'.decode('utf8')]\n\n\nclass Command(BaseCommand):\n help = '''Export items from CommerceML format\n Usage: manage.py importcml filename.xml \n '''\n\n def handle(self, *args, **options):\n if len(args) == 0:\n raise CommandError(\"You should specify file to import\")\n filename = args[0]\n xml_file = open(filename, 'r')\n cml = xml_file.read()\n\n tree = fromstring(cml)\n offers = tree.getchildren()[1].findall('Предложение'.decode('utf-8'))\n for offer in offers:\n self.create_item(Offer(offer))\n\n def create_item(self, item_params):\n try:\n target_tree_item = TreeItem.objects.get(name=u'Импорт')\n except TreeItem.DoesNotExist:\n target_tree_item = TreeItem(parent_id=None, name=u'Импорт')\n target_tree_item.save()\n target_section = Section(is_meta_item=False)\n target_section.save()\n target_tree_item.section = target_section\n target_tree_item.save()\n \n new_tree_item = TreeItem(name = item_params['name'][:200],\n parent=target_tree_item)\n new_item = Item(\n price = item_params['price'],\n identifier = item_params['identifier'],\n quantity = item_params['quantity'],\n )\n new_tree_item.save()\n new_item.save()\n new_tree_item.item = new_item\n new_tree_item.save()\n","sub_path":"defaults/management/commands/importcml.py","file_name":"importcml.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"198141073","text":"import numpy as np\nimport cv2\nimport glob\nfrom moviepy.editor import VideoFileClip\nimport copy\n\n'''\nAdvanced Lane Finding Project\n\nThe goals / steps of this project are the following:\n\nCompute the camera calibration matrix and distortion coefficients given a set of chessboard images.\nApply a distortion correction to raw images.\nUse color transforms, gradients, etc., to create a thresholded binary image.\nApply a perspective transform to rectify binary image (“birds-eye view”).\nDetect lane pixels and fit to find the lane boundary.\nDetermine the curvature of the lane and vehicle position with respect to center.\nWarp the detected lane boundaries back onto the original image.\nOutput visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.\n'''\n\n'''\nPart1:\nread all chessboard images, detect the patterns and compute the camera calibration matrix\n'''\n\n\nclass Camera(object):\n \"\"\"\n A Class to handle camera operations (calibration etc)\n \"\"\"\n\n def __init__(self):\n self.debug = False # display or not the loaded picture\n self.intrinsic = None # camera matrix\n self.distortion = None # camera distortion\n self.img_shape = None\n # Arrays to store object points and image points from all the images.\n self.obj_points = [] # 3d points in real world space\n self.img_points = [] # 2d points in image plane.\n\n def read_chessboards(self, img_path):\n # prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)\n objp = np.zeros((6 * 9, 3), np.float32)\n objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)\n images = glob.glob(img_path)\n # Step through the list and search for chessboard corners\n for fname in images:\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if self.img_shape is None:\n self.img_shape = gray.shape[::-1]\n # Find the chessboard corners\n ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)\n # If found, add object points, image points\n if ret == True:\n self.obj_points.append(objp)\n self.img_points.append(corners)\n if self.debug:\n # Draw and display the corners\n img = cv2.drawChessboardCorners(img, (9, 6), corners, ret)\n cv2.imshow('img', img)\n cv2.waitKey(500)\n\n def calibrate_camera(self, img_path):\n # Compute camera calibration using the chessboards\n ret, self.intrinsic, self.distortion, rvecs, tvecs = cv2.calibrateCamera(self.obj_points,\n self.img_points,\n self.img_shape,\n None,\n None)\n images = glob.glob(img_path)\n fname = images[0]\n img = cv2.imread(fname)\n dst = self.undistort(img)\n cv2.imwrite('output_images/undistorted_pattern.jpg', dst)\n cv2.imwrite('output_images/original_pattern.jpg', img)\n\n def undistort(self, img):\n if self.intrinsic is not None and self.distortion is not None:\n return cv2.undistort(img, self.intrinsic, self.distortion)\n else:\n print(\"Camera was not calibrated yet\")\n exit(1)\n\n\nclass FilterTools(object):\n \"\"\"\n A class containing different static functions offering filtering functionalities\n \"\"\"\n\n @staticmethod\n def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):\n thresh_min = mag_thresh[0]\n thresh_max = mag_thresh[1]\n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = img\n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Calculate the magnitude\n mag_sobel = np.sqrt(np.power(sobelx, 2) + np.power(sobely, 2))\n # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8\n scaled_sobel = np.uint8(255 * mag_sobel / np.max(mag_sobel))\n # 5) Create a mask of 1's where the scaled gradient magnitude\n # is > thresh_min and < thresh_max\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1\n # 6) Return this mask as your binary_output image\n return binary_output\n\n @staticmethod\n def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):\n thresh_min = thresh[0]\n thresh_max = thresh[1]\n # Apply the following steps to img\n # 1) Convert to grayscale\n gray = img\n # 2) Take the gradient in x and y separately\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Take the absolute value of the x and y gradients\n abs_sobelx = np.abs(sobelx)\n abs_sobely = np.abs(sobely)\n # 4) Use np.arctan2(abs_sobely, abs_sobelx) to calculate the direction of the gradient\n dir_sobel = np.arctan2(abs_sobely, abs_sobelx)\n # 5) Create a binary mask where direction thresholds are met\n binary_output = np.zeros_like(dir_sobel)\n binary_output[(dir_sobel >= thresh_min) & (dir_sobel <= thresh_max)] = 1\n # 6) Return this mask as your binary_output image\n return binary_output\n\n @staticmethod\n def abs_sobel_thresh(img, orient='x', sobel_kernel=3, thresh=(0, 255)):\n # Apply the following steps to img\n # 1) Convert to grayscale\n thresh_min = thresh[0]\n thresh_max = thresh[1]\n gray = img\n # 2) Take the derivative in x or y given orient = 'x' or 'y'\n if orient == 'x':\n sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)\n else:\n sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)\n # 3) Take the absolute value of the derivative or gradient\n abs_sobel = np.absolute(sobel)\n # 4) Scale to 8-bit (0 - 255) then convert to type = np.uint8\n scaled_sobel = np.uint8(255 * abs_sobel / np.max(abs_sobel))\n # 5) Create a mask of 1's where the scaled gradient magnitude\n # is > thresh_min and < thresh_max\n binary_output = np.zeros_like(scaled_sobel)\n binary_output[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1\n # 6) Return this mask as your binary_output image\n return binary_output\n\n @staticmethod\n def intensity_thresh(img, thresh=(0, 255)):\n thresh_min = thresh[0]\n thresh_max = thresh[1]\n binary_output = np.zeros_like(img)\n binary_output[(img >= thresh_min) & (img <= thresh_max)] = 1\n # 6) Return this mask as your binary_output image\n return binary_output\n\n @staticmethod\n def hls_select(img, thresh=(0, 255)):\n s_channel = img\n binary_output = np.zeros_like(s_channel)\n binary_output[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 1\n # 3) Return a binary image of threshold result\n return binary_output\n\n @staticmethod\n def hsv_select(img):\n # Extact yellow and white colors from image and store them in the variable combined\n hsv_image = img\n # define range of blue and white color in HSV (blue instead of yellow due to the BGR RGB flip)\n lower_blue = np.array([0, 0, 0])\n upper_blue = np.array([30, 225, 255])\n lower_white = np.array([90, 5, 210])\n upper_white = np.array([160, 30, 255])\n # cv2.imshow(\"hue\", hsv_image[:, :, 0])\n # cv2.imshow(\"saturation\", hsv_image[:, :, 1])\n # cv2.imshow(\"value\", hsv_image[:, :, 2])\n # Additional filtering to extract only yellow and white colors from image\n mask_yellow = cv2.inRange(hsv_image, lower_blue, upper_blue)\n mask_white = cv2.inRange(hsv_image, lower_white, upper_white)\n # combine yellow and white image\n combined = cv2.bitwise_or(mask_white, mask_yellow)\n # Dilate and erode, more iterations for erode to remove single points\n kernel = np.ones((3, 3), np.uint8)\n combined = cv2.dilate(combined, kernel, iterations=3)\n combined = cv2.erode(combined, kernel, iterations=2)\n return combined\n\n\ndef search_for_lines(binary_warped, out_img):\n \"\"\"\n use sliding windows and histogram to find fit lines to the detected white pixels\n :param binary_warped:\n :param out_img:\n :return:\n \"\"\"\n histogram = np.sum(binary_warped[binary_warped.shape[0] / 2:, :], axis=0)\n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines\n midpoint = np.int(histogram.shape[0] / 2)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint\n # Choose the number of sliding windows\n nwindows = 9\n # Set height of windows\n window_height = np.int(binary_warped.shape[0] / nwindows)\n # Identify the x and y positions of all nonzero pixels in the image\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base\n # Set the width of the windows +/- margin\n margin = 100\n # Set minimum number of pixels found to recenter window\n minpix = 50\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n # Step through the windows one by one\n for window in range(nwindows):\n # Identify window boundaries in x and y (and right and left)\n win_y_low = binary_warped.shape[0] - (window + 1) * window_height\n win_y_high = binary_warped.shape[0] - window * window_height\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n # Draw the windows on the visualization image\n cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)\n cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)\n # Identify the nonzero pixels in x and y within the window\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (\n nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (\n nonzerox < win_xright_high)).nonzero()[0]\n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix:\n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n # Concatenate the arrays of indices\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n\n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n return left_fit, right_fit, left_lane_inds, right_lane_inds\n\n\ndef open(image):\n \"\"\"\n Dilate and erode, more iterations for erode to remove single points\n :param image:\n :return: dilated then eroded image\n \"\"\"\n kernel = np.ones((3, 3), np.uint8)\n combined = cv2.dilate(image, kernel, iterations=3)\n combined = cv2.erode(combined, kernel, iterations=2)\n return combined\n\n\ndef predict_lines(binary_warped, left_fit, right_fit):\n \"\"\"\n Assume you now have a new warped binary image\n from the next frame of video (also called \"binary_warped\")\n It's now much easier to find line pixels!\n :param binary_warped:\n :param left_fit:\n :param right_fit:\n :return:\n \"\"\"\n\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n margin = 100\n left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) & (\n nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))\n right_lane_inds = (\n (nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) & (\n nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))\n\n # Again, extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds]\n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds]\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n return left_fit, right_fit, left_lane_inds, right_lane_inds\n # Generate x and y values for plotting\n # ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n # left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]\n # right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]\n\n\ndef good_lines(left_fit, right_fit):\n return True\n\n\ndef find_lines(binary_warped, image, Minv):\n \"\"\"\n Function that finds the lines in a warped patch\n either with no prior knowledge by running search_for_lines (sliding windows)\n or by using the previous fitted lines and doing slight updates (predict_lines)\n :param binary_warped:\n :param image:\n :param Minv:\n :return:\n \"\"\"\n global left_fit\n global right_fit\n global last_one_good\n nonzero = binary_warped.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n # Create an output image to draw on and visualize the result\n out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255\n if left_fit is None or right_fit is None or not last_one_good:\n new_left_fit, new_right_fit, left_lane_inds, right_lane_inds = search_for_lines(binary_warped, out_img)\n else:\n new_left_fit, new_right_fit, left_lane_inds, right_lane_inds = predict_lines(binary_warped, left_fit, right_fit)\n if good_lines(new_left_fit, new_right_fit):\n left_fit, right_fit = new_left_fit, new_right_fit\n else:\n last_one_good = False\n\n # Generate x and y values for plotting\n ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])\n left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]\n\n out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]\n out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]\n # plt.imshow(out_img)\n # plt.plot(left_fitx, ploty, color='yellow')\n # plt.plot(right_fitx, ploty, color='yellow')\n # plt.xlim(0, 800)\n # plt.ylim(800, 0)\n # plt.show()\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(binary_warped).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))\n # Combine the result with the original image\n result = cv2.addWeighted(image, 1, newwarp, 0.3, 0)\n left_curv, right_curv = compute_curvature(left_fit, right_fit, binary_warped.shape[0])\n cv2.putText(result, \"LCurv: %s | RCurv: %s\" % (left_curv, right_curv), (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1.,\n (0, 0, 255), thickness=3)\n return result\n\n\ndef compute_curvature(left_fit, right_fit, y_eval):\n \"\"\"\n a function to compute lane curvature from the polynomial fits\n :param fit_left:\n :param fit_right:\n :return:\n \"\"\"\n left_curverad = ((1 + (2 * left_fit[0] * y_eval + left_fit[1]) ** 2) ** 1.5) / np.absolute(2 * left_fit[0])\n right_curverad = ((1 + (2 * right_fit[0] * y_eval + right_fit[1]) ** 2) ** 1.5) / np.absolute(2 * right_fit[0])\n return left_curverad, right_curverad\n\n\ndef filter(img):\n \"\"\"\n Use a combination of filters from FIlterTools to isolate lane lines in a binary image\n :param img:\n :return:\n \"\"\"\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # cv2.equalizeHist(gray, gray)\n s = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)[:, :, 2]\n # cv2.equalizeHist(s, s)\n # hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n # hsv_thresh = FilterTools.hsv_select(hsv)\n # cv2.imshow(\"hsv\", hsv_thresh*255)\n s_sobel_x = FilterTools.abs_sobel_thresh(s, 'x', 7, (20, 100))\n s_sobel_y = FilterTools.abs_sobel_thresh(s, 'y', 7, (10, 100))\n sobel_x = FilterTools.abs_sobel_thresh(gray, 'x', 7, (30, 100))\n sobel_y = FilterTools.abs_sobel_thresh(gray, 'y', 7, (30, 100))\n # mag_s_sobel = FilterTools.mag_thresh(s, 3, (10, 20))\n # mag_s_sobel = open(mag_s_sobel)\n # mag_sobel = FilterTools.mag_thresh(gray, 3, (10, 20))\n # mag_sobel = open(mag_s_sobel)\n # cv2.imshow(\"mag\", mag_s_sobel*255)\n # cv2.imshow(\"maggray\", mag_sobel*255)\n # cv2.imshow(\"sobel_x\", sobel_x*255)\n # cv2.imshow(\"sobel_y\", sobel_y*255)\n # cv2.imshow(\"s_sobel_x\", s_sobel_x*255)\n # cv2.imshow(\"s_sobel_y\", s_sobel_y*255)\n # cv2.imshow(\"gray\", gray)\n s_thresh = FilterTools.hls_select(s, (70, 255))\n # dir_sobel = FilterTools.dir_threshold(gray, 3, (0.75, 0.8))\n # s_dir_sobel = FilterTools.dir_threshold(s, 3, (0.75, 0.8))\n\n intensity = FilterTools.intensity_thresh(gray, (40, 255))\n combined = np.zeros_like(s)\n combined[intensity == 1 & (((s_sobel_x == 1) & (s_sobel_y == 1)) |\n ((sobel_x == 1) & (sobel_y == 1)) |\n (s_thresh == 1))] = 1\n return combined\n\n\ndef warp(filtered_img, original_img):\n \"\"\"\n undistort an image using the camera model then warp it to get the topdown view\n :param filtered_img:\n :param original_img:\n :return:\n \"\"\"\n global cam\n global transform\n global warped_shape\n undistorted = cam.undistort(filtered_img)\n undistorted_color = cam.undistort(original_img)\n warped = cv2.warpPerspective(undistorted, transform, tuple(warped_shape))\n return warped, undistorted_color\n\n\ndef display_image(img):\n \"\"\"\n simple image display\n :param img:\n :return:\n \"\"\"\n cv2.imshow(\"test\", img)\n cv2.waitKey(100)\n return img\n\n\ndef draw_roi(img, roi):\n \"\"\"\n draws a region of interest composed of n points\n :param img:\n :param roi:\n :return:\n \"\"\"\n pts = roi.reshape((-1, 1, 2))\n cv2.polylines(img, [pts.astype(np.int32)], True, (0, 255, 255))\n return img\n\n\ndef crop_edges(img, margin):\n \"\"\"\n sets edge pixels to zero to avoid noisy edges\n :param img:\n :param margin:\n :return:\n \"\"\"\n img[:, :margin] = 0\n img[:margin, :] = 0\n img[-margin:, :] = 0\n img[:, -margin:] = 0\n return img\n\n\ndef pipeline(img, debug=False):\n \"\"\"\n Main function, running the pipeline for road lane detection\n :param img:\n :return: image with road lane drawed on it\n \"\"\"\n global transform\n global roi\n roi_img = copy.deepcopy(img)\n roi_img = draw_roi(roi_img, roi)\n # Filter image to get combined set of features\n filtered = filter(img)\n # Get the birdeye view from top\n warped, undistorted_color = warp(filtered, img)\n # Remove edges\n cropped = crop_edges(warped, 10)\n # Find lane lines\n result = find_lines(cropped, undistorted_color, np.linalg.inv(transform))\n if debug:\n cv2.imwrite(\"output_images/filtered_image.jpg\", filtered * 255)\n cv2.imwrite(\"output_images/undistorted_image.jpg\", undistorted_color)\n cv2.imwrite(\"output_images/original_image.jpg\", img)\n cv2.imwrite(\"output_images/warped_cropped.jpg\", cropped * 255)\n cv2.imwrite(\"output_images/roi.jpg\", roi_img)\n warped_color, undistorted_color = warp(roi_img, img)\n cv2.imwrite(\"output_images/warped_color.jpg\", warped_color)\n # cv2.waitKey(0)\n return result\n\n\nleft_fit = None # plynomial fit for left lane\nright_fit = None # polynomial fit for right lane\nlast_one_good = False # Was a good polynomial fit found in last frame?\n\ncam = Camera() # create a camera instance\ncam.read_chessboards('camera_cal/calibration*.jpg')\ncam.calibrate_camera('camera_cal/calibration*.jpg')\n\n# Define a region of interest in the road image\nroi = np.float32([[100, 666], [490, 500], [840, 500], [1230, 666]])\n# Target bird eye view image shape\nwarped_shape = np.array([800, 800])\ntarget_roi = np.float32([[0, warped_shape[0]], [0, 0], [warped_shape[0], 0], warped_shape])\n# Get the perspective transform to apply to all road images\ntransform = cv2.getPerspectiveTransform(roi, target_roi)\n\n# Test on test images\nimages = glob.glob('test_images/*.jpg')\nfor idx, image in enumerate(images):\n img = cv2.imread(image)\n result = pipeline(img, debug=(idx == 0))\n cv2.imwrite(\"output_images/test%s_result.jpg\" % idx, result)\n\n# My code works only with the project video\nvideo = \"project_video.mp4\"\nclip1 = VideoFileClip(video)\nwhite_clip = clip1.fl_image(pipeline)\nwhite_clip.write_videofile(\"output_images/result_project_video.mp4\", audio=False)\n","sub_path":"find_lanes.py","file_name":"find_lanes.py","file_ext":"py","file_size_in_byte":22504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"424821545","text":"# -*- encoding: utf-8 -*-\n# @Time : 2017/12/20 17:49\n# @Author : mike.liu\n# @File : test_log01.py\n\nimport unittest\nimport time\nfrom selenium import webdriver\n\nclass testfour(unittest.TestCase):\n\n def setUp(self):\n '''测试固件setUp的代码,主要用于测试的前期准备工作'''\n self.driver = webdriver.Chrome()\n self.driver.maximize_window()\n self.driver.implicitly_wait(8)\n self.driver.get(\"https://qa1-erp.jfz.com\")\n\n def tearDown(self):\n '''测试结束之后的清理,一般是关闭浏览器'''\n\n # self.driver.quit()\n\n def test_login01(self):\n '''这里一定要以test开头'''\n\n self.driver.find_element_by_xpath(\"//*[@name='username']\").send_keys(\"defang2\")\n self.driver.find_element_by_xpath(\"//*[@name='password']\").send_keys(\"123\")\n self.driver.find_element_by_xpath(\"//*[@class='submit_wrap']\").click()\n self.assertEqual(self.driver.title,'深圳市金斧子网络科技有限公司-ERP',msg='访问erp失败')\n time.sleep(5)\n self.driver.find_element_by_xpath(\"//a[contains(.,'退出系统')]\")\n time.sleep(5)\n\n print(\"标题:\"+ self.driver.title)\n\n time.sleep(5)\n assert '德芳客服' in self.driver.page_source\n time.sleep(5)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"第四课/testfour/testlog1/test_log01.py","file_name":"test_log01.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"605526080","text":"import tensorflow as tf\nimport numpy as np\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\n'''\n\ntfrecords作为tensorflow的数据io格式,可以解决大数据集爆内存问题,一边读入数据,��边\nfeed数据\n\n'''\ndef get_batches(filenamequeue, batch_size, nclass, shuffle):\n '''\n param:\n filenamequene:文件队列\n batch_size:\n nclass: 类别数\n shuffle:随机打乱,只对训练集\n return:\n image_batch, label_batch 返回tensor\n\n '''\n reader = tf.TFRecordReader()\n filename_queue = tf.train.string_input_producer([filenamequeue])\n _, serialized_example = reader.read(filename_queue)\n\n features = tf.parse_single_example(serialized_example,\n features={\n 'image_raw': tf.FixedLenFeature([],tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64)\n })\n\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n label = tf.cast(features['label'], tf.int64)\n width = tf.cast(features['width'], tf.int64)\n height= tf.cast(features['height'], tf.int64)\n\n im_shape = tf.stack([28, 28, 1])\n image = tf.reshape(image,im_shape)\n label = tf.reshape(label,[1])\n min_after_dequeue = 10000\n capacity = min_after_dequeue + 3*batch_size\n if shuffle:\n image_batch,label_batch = tf.train.shuffle_batch(\n [image, label],\n batch_size = batch_size,\n capacity = capacity,\n min_after_dequeue = min_after_dequeue,\n num_threads = 4)\n\n else:\n image_batch, label_batch = tf.train.batch(\n [image, label],\n batch_size = batch_size,\n num_threads = 4,\n capacity = 1000)\n label_batch = tf.one_hot(label_batch, depth = nclass)\n label_batch = tf.cast(label_batch, dtype=tf.int32)\n label_batch = tf.reshape(label_batch, [batch_size, nclass])\n\n return image_batch, label_batch\n\ndef parse_example(serialized_example,nclass=10):\n '''\n\n param:\n serialized_example: 序列化的数据\n nclass: 类别数\n return:\n image,label 单样本的tensor\n '''\n features = tf.parse_single_example(serialized_example,\n features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'width': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64)\n })\n\n image = tf.decode_raw(features['image_raw'], tf.uint8)\n label = tf.cast(features['label'], tf.int32)\n width = tf.cast(features['width'], tf.int64)\n height= tf.cast(features['height'], tf.int64)\n\n im_shape = tf.stack([28, 28, 1])\n image = tf.reshape(image, im_shape)\n label = tf.one_hot(label, depth=nclass)\n return image, label\n\n\ndef get_batches_v1(filenames, batchsize, nclass, shuffle=True):\n '''\n\n param:\n filenames: tfrecords文件路径\n batchsize:\n nclass:\n shuffle: 是否随机打乱\n return:\n 返回数据迭代器iterator,这里使用可初始化的iterator\n '''\n dataset = tf.data.TFRecordDataset([filenames])\n #这里使用lambda使得map支持带参数的函数作为输入参数\n dataset = dataset.map(lambda x: parse_example(x, nclass=10))\n if shuffle:\n # 这里不使用repeat,因为使用repeat函数,每个epoch的起止没有标志,难以界定epoch的起止\n dataset = dataset.shuffle(buffer_size=10000).batch(batchsize)\n else:\n dataset = dataset.batch(batchsize)\n\n iterator = dataset.make_initializable_iterator()\n return iterator\n\n\n\ndef write_tfrecords(filename, is_train):\n\n mnist = input_data.read_data_sets(\"./mnist_data/\",dtype=tf.uint8, one_hot=True)\n\n if is_train:\n images = mnist.train.images\n labels = mnist.train.labels\n pixels = images.shape[0]\n num_examples = mnist.train.num_examples\n else:\n images = mnist.test.images\n labels = mnist.test.labels\n pixels = images.shape[0]\n num_examples = mnist.test.num_examples\n image_height= 28\n image_width = 28\n\n writer = tf.python_io.TFRecordWriter(filename)\n for index in range(num_examples):\n image_raw = images[index].tostring()\n example = tf.train.Example(features=tf.train.Features(feature={\n 'label': _int64_feature(np.argmax(labels[index])),\n 'image_raw': _bytes_feature(image_raw),\n 'width': _int64_feature(image_width),\n 'height': _int64_feature(image_height)}))\n writer.write(example.SerializeToString())\n\n writer.close()\n\n\n\n","sub_path":"tf_iterator/train_and_val/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"491280481","text":"from pbpstats.resources.enhanced_pbp import (\n InvalidNumberOfStartersException,\n StartOfPeriod,\n)\nfrom pbpstats.resources.enhanced_pbp.data_nba.enhanced_pbp_item import (\n DataEnhancedPbpItem,\n)\n\n\nclass DataStartOfPeriod(StartOfPeriod, DataEnhancedPbpItem):\n \"\"\"\n Class for start of period events\n \"\"\"\n\n event_type = 12\n\n def __init__(self, *args):\n super().__init__(*args)\n\n def get_period_starters(self, file_directory=None):\n \"\"\"\n Gets player ids of players who started the period for each team\n If players can't be determined from parsing pbp, will try to\n find them by making API request to stats.nba.com boxscore filtered by time.\n\n :param str file_directory: directory in which overrides subdirectory exists\n containing period starter overrides when period starters can't be determined\n from parsing pbp events\n :returns: dict with list of player ids for each team\n with players on the floor at start of period\n :raises: :obj:`~pbpstats.resources.enhanced_pbp.start_of_period.InvalidNumberOfStartersException`:\n If all 5 players that start the period for a team can't be determined.\n \"\"\"\n try:\n return self._get_period_starters_from_period_events(file_directory)\n except InvalidNumberOfStartersException:\n # get starters from stats.nba.com boxcore data by time range\n return self._get_starters_from_boxscore_request()\n","sub_path":"pbpstats/resources/enhanced_pbp/data_nba/start_of_period.py","file_name":"start_of_period.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"280502884","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author:XuMing(xuming624@qq.com)\n@description:\n\"\"\"\n\nimport os\nimport sys\nimport time\nfrom codecs import open\nfrom random import sample\nfrom xml.dom import minidom\n\nsys.path.append(\"../..\")\nfrom pycorrector.corrector import Corrector\nfrom pycorrector.utils.io_utils import load_json, save_json\nfrom pycorrector.utils.io_utils import load_pkl\nfrom pycorrector.utils.math_utils import find_all_idx\n\npwd_path = os.path.abspath(os.path.dirname(__file__))\neval_data_path = os.path.join(pwd_path, '../data/eval_corpus.json')\nsighan_2015_path = os.path.join(pwd_path, '../data/cn/sighan_2015/test.tsv')\n\n\ndef get_bcmi_corpus(line, left_symbol='((', right_symbol='))'):\n \"\"\"\n 转换原始文本为encoder-decoder列表\n :param line: 王老师心((性))格温和,态度和爱((蔼)),教学有方,得到了许多人的好平((评))。\n :param left_symbol:\n :param right_symbol:\n :return: [\"王老师心格温和,态度和爱,教学有方,得到了许多人的好平。\" , \"王老师性格温和,态度和蔼,教学有方,得到了许多人的好评。\"]\n \"\"\"\n error_sentence = ''\n correct_sentence = ''\n details = []\n if left_symbol not in line or right_symbol not in line:\n return error_sentence, correct_sentence, details\n\n left_ids = find_all_idx(line, left_symbol)\n right_ids = find_all_idx(line, right_symbol)\n if len(left_ids) != len(right_ids):\n return error_sentence, correct_sentence, details\n begin = 0\n for left, right in zip(left_ids, right_ids):\n correct_len = right - left - len(left_symbol)\n correct_word = line[(left + len(left_symbol)):right]\n error_sentence += line[begin:left]\n correct_sentence += line[begin:(left - correct_len)] + correct_word\n begin = right + len(right_symbol)\n details.append(correct_word)\n error_sentence += line[begin:]\n correct_sentence += line[begin:]\n n_details = []\n for i in details:\n idx = correct_sentence.find(i)\n end_idx = idx + len(i)\n error_item = error_sentence[idx:end_idx]\n n_details.append([error_item, i, idx, end_idx])\n return error_sentence, correct_sentence, n_details\n\n\ndef build_bcmi_corpus(data_path, output_path):\n corpus = []\n with open(data_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n error_sentence, correct_sentence, details = get_bcmi_corpus(line)\n if not error_sentence:\n continue\n line_dict = {\"text\": error_sentence, \"correction\": correct_sentence, \"errors\": details}\n corpus.append(line_dict)\n save_json(corpus, output_path)\n\n\ndef eval_bcmi_data(data_path, verbose=False):\n total_count = 0\n right_count = 0\n right_result = dict()\n wrong_result = dict()\n rule_model = Corrector()\n with open(data_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n error_sentence, right_sentence, right_detail = get_bcmi_corpus(line)\n if not error_sentence:\n continue\n pred_sentence, pred_detail = rule_model.correct(error_sentence)\n total_count += 1\n if right_sentence == pred_sentence:\n right_count += 1\n right_result[error_sentence] = [right_sentence, pred_sentence]\n else:\n wrong_result[error_sentence] = [right_sentence, pred_sentence]\n if verbose:\n print('input sentence:', error_sentence)\n print('pred sentence:', pred_sentence, pred_detail)\n print('right sentence:', right_sentence, right_detail)\n if verbose:\n print('right count:', right_count, ';total_count:', total_count)\n right_rate = 0.0\n if total_count > 0:\n right_rate = right_count / total_count\n return right_rate, right_result, wrong_result\n\n\ndef build_sighan_corpus(data_path, output_path):\n corpus = []\n sighan_data = load_pkl(data_path)\n for error_sentence, error_details in sighan_data:\n ids = []\n error_word = ''\n right_word = ''\n if not error_details:\n continue\n for detail in error_details:\n idx = detail[0]\n error_word = detail[1]\n right_word = detail[2]\n begin_idx = idx - 1\n ids.append(begin_idx)\n correct_sentence = error_sentence.replace(error_word, right_word)\n details = []\n for i in ids:\n details.append([error_sentence[i], correct_sentence[i], i, i + 1])\n line_dict = {\"text\": error_sentence, \"correction\": correct_sentence, \"errors\": details}\n corpus.append(line_dict)\n save_json(corpus, output_path)\n\n\ndef build_cged_no_error_corpus(data_path, output_path, limit_size=500):\n corpus = []\n print('Parse data from %s' % data_path)\n dom_tree = minidom.parse(data_path)\n docs = dom_tree.documentElement.getElementsByTagName('DOC')\n count = 0\n for doc in docs:\n # Input the text\n text = doc.getElementsByTagName('TEXT')[0]. \\\n childNodes[0].data.strip()\n # Input the correct text\n correction = doc.getElementsByTagName('CORRECTION')[0]. \\\n childNodes[0].data.strip()\n\n if correction:\n count += 1\n line_dict = {\"text\": correction, \"correction\": correction, \"errors\": []}\n corpus.append(line_dict)\n if count > limit_size:\n break\n save_json(corpus, output_path)\n\n\ndef build_eval_corpus(output_eval_path=eval_data_path):\n \"\"\"\n 生成评估样本集,抽样分布可修改\n 当前已经生成评估集,可以修改代码生成自己的样本分布\n :param output_eval_path:\n :return: json file\n \"\"\"\n bcmi_path = os.path.join(pwd_path, '../data/cn/bcmi.txt')\n clp_path = os.path.join(pwd_path, '../data/cn/clp14_C1.pkl')\n sighan_path = os.path.join(pwd_path, '../data/cn/sighan15_A2.pkl')\n cged_path = os.path.join(pwd_path, '../data/cn/CGED/CGED16_HSK_TrainingSet.xml')\n\n char_error_path = os.path.join(pwd_path, './bcmi_corpus.json')\n build_bcmi_corpus(bcmi_path, char_error_path)\n char_errors = load_json(char_error_path)\n\n word_error_path = os.path.join(pwd_path, './sighan_corpus.json')\n build_sighan_corpus(sighan_path, word_error_path)\n word_errors = load_json(word_error_path)\n\n grammar_error_path = os.path.join(pwd_path, './clp_corpus.json')\n build_sighan_corpus(clp_path, grammar_error_path)\n grammar_errors = load_json(grammar_error_path)\n\n no_error_path = os.path.join(pwd_path, './noerror_corpus.json')\n build_cged_no_error_corpus(cged_path, no_error_path)\n no_errors = load_json(no_error_path)\n\n corpus = sample(char_errors, 100) + sample(word_errors, 100) + sample(grammar_errors, 100) + sample(no_errors, 200)\n save_json(corpus, output_eval_path)\n print(\"save eval corpus done\", output_eval_path)\n os.remove(char_error_path)\n os.remove(word_error_path)\n os.remove(grammar_error_path)\n os.remove(no_error_path)\n\n\ndef eval_corpus500_by_model(correct_fn, input_eval_path=eval_data_path, verbose=True):\n \"\"\"\n 句级评估结果,设定需要纠错为正样本,无需纠错为负样本\n Args:\n correct_fn:\n input_eval_path:\n output_eval_path:\n verbose:\n\n Returns:\n Acc, Recall, F1\n \"\"\"\n corpus = load_json(input_eval_path)\n TP = 0.0\n FP = 0.0\n FN = 0.0\n TN = 0.0\n total_num = 0\n start_time = time.time()\n for data_dict in corpus:\n src = data_dict.get('text', '')\n tgt = data_dict.get('correction', '')\n errors = data_dict.get('errors', [])\n\n # pred_detail: list(wrong, right, begin_idx, end_idx)\n tgt_pred, pred_detail = correct_fn(src)\n if verbose:\n print()\n print('input :', src)\n print('truth :', tgt, errors)\n print('predict:', tgt_pred, pred_detail)\n\n # 负样本\n if src == tgt:\n # 预测也为负\n if tgt == tgt_pred:\n TN += 1\n print('right')\n # 预测为正\n else:\n FP += 1\n print('wrong')\n # 正样本\n else:\n # 预测也为正\n if tgt == tgt_pred:\n TP += 1\n print('right')\n # 预测为负\n else:\n FN += 1\n print('wrong')\n total_num += 1\n spend_time = time.time() - start_time\n acc = (TP + TN) / total_num\n precision = TP / (TP + FP) if TP > 0 else 0.0\n recall = TP / (TP + FN) if TP > 0 else 0.0\n f1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0\n print(\n f'Sentence Level: acc:{acc:.4f}, precision:{precision:.4f}, recall:{recall:.4f}, f1:{f1:.4f}, '\n f'cost time:{spend_time:.2f} s, total num: {total_num}')\n return acc, precision, recall, f1\n\n\ndef eval_sighan2015_by_model(correct_fn, sighan_path=sighan_2015_path, verbose=True):\n \"\"\"\n SIGHAN句级评估结果,设定需要纠错为正样本,无需纠错为负样本\n Args:\n correct_fn:\n input_eval_path:\n output_eval_path:\n verbose:\n\n Returns:\n Acc, Recall, F1\n \"\"\"\n TP = 0.0\n FP = 0.0\n FN = 0.0\n TN = 0.0\n total_num = 0\n start_time = time.time()\n with open(sighan_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line.startswith('#'):\n continue\n parts = line.split('\\t')\n if len(parts) != 2:\n continue\n src = parts[0]\n tgt = parts[1]\n\n tgt_pred, pred_detail = correct_fn(src)\n if verbose:\n print()\n print('input :', src)\n print('truth :', tgt)\n print('predict:', tgt_pred, pred_detail)\n\n # 负样本\n if src == tgt:\n # 预测也为负\n if tgt == tgt_pred:\n TN += 1\n print('right')\n # 预测为正\n else:\n FP += 1\n print('wrong')\n # 正样本\n else:\n # 预测也为正\n if tgt == tgt_pred:\n TP += 1\n print('right')\n # 预测为负\n else:\n FN += 1\n print('wrong')\n total_num += 1\n spend_time = time.time() - start_time\n acc = (TP + TN) / total_num\n precision = TP / (TP + FP) if TP > 0 else 0.0\n recall = TP / (TP + FN) if TP > 0 else 0.0\n f1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0\n print(\n f'Sentence Level: acc:{acc:.4f}, precision:{precision:.4f}, recall:{recall:.4f}, f1:{f1:.4f}, '\n f'cost time:{spend_time:.2f} s, total num: {total_num}')\n return acc, precision, recall, f1\n\n\ndef eval_sighan2015_by_model_batch(correct_fn, sighan_path=sighan_2015_path, verbose=True):\n \"\"\"\n SIGHAN句级评估结果,设定需要纠错为正样本,无需纠错为负样本\n Args:\n correct_fn:\n sighan_path:\n verbose:\n\n Returns:\n Acc, Recall, F1\n \"\"\"\n TP = 0.0\n FP = 0.0\n FN = 0.0\n TN = 0.0\n total_num = 0\n start_time = time.time()\n srcs = []\n tgts = []\n with open(sighan_path, 'r', encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line.startswith('#'):\n continue\n parts = line.split('\\t')\n if len(parts) != 2:\n continue\n src = parts[0]\n tgt = parts[1]\n\n srcs.append(src)\n tgts.append(tgt)\n\n res = correct_fn(srcs)\n for each_res, src, tgt in zip(res, srcs, tgts):\n if len(each_res) == 2:\n tgt_pred, pred_detail = each_res\n else:\n tgt_pred = each_res\n if verbose:\n print()\n print('input :', src)\n print('truth :', tgt)\n print('predict:', each_res)\n\n # 负样本\n if src == tgt:\n # 预测也为负\n if tgt == tgt_pred:\n TN += 1\n print('right')\n # 预测为正\n else:\n FP += 1\n print('wrong')\n # 正样本\n else:\n # 预测也为正\n if tgt == tgt_pred:\n TP += 1\n print('right')\n # 预测为负\n else:\n FN += 1\n print('wrong')\n total_num += 1\n\n spend_time = time.time() - start_time\n acc = (TP + TN) / total_num\n precision = TP / (TP + FP) if TP > 0 else 0.0\n recall = TP / (TP + FN) if TP > 0 else 0.0\n f1 = 2 * precision * recall / (precision + recall) if precision + recall != 0 else 0\n print(\n f'Sentence Level: acc:{acc:.4f}, precision:{precision:.4f}, recall:{recall:.4f}, f1:{f1:.4f}, '\n f'cost time:{spend_time:.2f} s, total num: {total_num}')\n return acc, precision, recall, f1\n\n\nif __name__ == \"__main__\":\n # 评估规则方法的纠错准召率\n # eval_corpus500_by_model(pycorrector.correct)\n\n # 评估macbert模型的纠错准召率\n from pycorrector.macbert.macbert_corrector import MacBertCorrector\n\n model = MacBertCorrector()\n eval_corpus500_by_model(model.macbert_correct)\n eval_sighan2015_by_model(model.macbert_correct)\n","sub_path":"pycorrector/utils/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":13747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"257266899","text":"#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\n### Import functions\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport os\nimport torch\nimport torch.nn as nn\nfrom matplotlib import cm\n\nsys.path.append('')\n\nimport fct_facilities as fac\nimport fct_network as net\nimport fct_integrals as integ\n\n\nfac.SetPlotParams()\n\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n### Set parameters\n\nP = 20\nN = 200\n\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n### Theory \n\neta_values_th = np.linspace( 0, 0.4, 100 )\noffset_values_th = np.linspace( 1, 2, 4 )\n\nK_bar_th = np.zeros(( 2, 2, len(eta_values_th), len(offset_values_th) )) # First axis indicates t = 0 or t = T\nY_bar_th = np.zeros(( 2, 2, len(eta_values_th), len(offset_values_th) ))\n\nK_corr_th = np.zeros(( 2, len(eta_values_th), len(offset_values_th) ))\nY_corr_th = np.zeros(( 2, len(eta_values_th), len(offset_values_th) ))\n\n#\n\ndoCompute = 0\n\nif doCompute:\n\n\tfor j, offset in enumerate(offset_values_th):\n\n\t\tih = net.ComputeI(net.fh, net.gain_phi, offset)\n\t\til = net.ComputeI(net.fl, net.gain_phi, offset)\n\n\t\tfor i, eta in enumerate(eta_values_th):\n\n\t\t\t#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\t\t\t### t = 0 \n\n\t\t\t# Almost everyinteging remains 0\n\n\t\t\t#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\t\t\t### t = T \n\n\t\t\t# Compute coordinates\n\n\t\t\talpha = integ.PsiPrimeSq() + eta * (integ.PsiSq() - integ.Psi()**2)\n\t\t\tbeta = eta * P/2. * integ.Psi()**2 \n\n\t\t\tch = ( ih + beta/alpha*(ih - il) ) / (alpha + 2*beta) \n\t\t\tcl = ( il - beta/alpha*(ih - il) ) / (alpha + 2*beta)\n\n\t\t\t# Projections on axes\n\n\t\t\tK_bar_th[1,0,i,j] = ch * integ.PsiPrime()\n\t\t\tK_bar_th[1,1,i,j] = cl * integ.PsiPrime()\n\n\t\t\tY_bar_th[1,0,i,j] = ch * integ.PsiPrimeSq()\n\t\t\tY_bar_th[1,1,i,j] = cl * integ.PsiPrimeSq()\n\n\t\t\t# Compute dot products\n\n\t\t\tKnormA = N + integ.PsiPrimeSq() * ch**2\n\t\t\tKnormB = N + integ.PsiPrimeSq() * cl**2 \n\t\t\tKAA = integ.PsiPrime()**2 * ch**2 \n\t\t\tKBB = integ.PsiPrime()**2 * cl**2 \n\t\t\tKAB = integ.PsiPrime()**2 * ( ch * cl )\n\n\t\t\tYnormA = N * ( integ.PsiSq() - integ.Psi()**2 ) + integ.PsiPrimeFourth() * ch**2 \n\t\t\tYnormB = N * ( integ.PsiSq() - integ.Psi()**2 ) + integ.PsiPrimeFourth() * cl**2 \n\t\t\tYAA = integ.PsiPrimeSq()**2 * ch**2\n\t\t\tYBB = integ.PsiPrimeSq()**2 * cl**2 \n\t\t\tYAB = integ.PsiPrimeSq()**2 * ( ch * cl )\n\n\t\t\t# Activity measures\n\n\t\t\tK_corr_th[1,i,j] = KAB / ( np.sqrt(KnormA) * np.sqrt(KnormB) )\n\t\t\tY_corr_th[1,i,j] = YAB / ( np.sqrt(YnormA) * np.sqrt(YnormB) )\n\n\t# Store \n\n\tfac.Store(eta_values_th, 'eta_values_th.p', 'Results/')\n\tfac.Store(offset_values_th, 'offset_values_th.p', 'Results/')\n\n\tfac.Store(K_bar_th, 'K_bar_th.p', 'Results/')\n\tfac.Store(Y_bar_th, 'Y_bar_th.p', 'Results/')\n\n\tfac.Store(K_corr_th, 'K_corr_th.p', 'Results/')\n\tfac.Store(Y_corr_th, 'Y_corr_th.p', 'Results/')\n\n\nelse:\n\n\t# Retrieve\n\n\teta_values_th = fac.Retrieve('eta_values_th.p', 'Results/')\n\toffset_values_th = fac.Retrieve('offset_values_th.p', 'Results/')\n\n\tK_bar_th = fac.Retrieve('K_bar_th.p', 'Results/')\n\tY_bar_th = fac.Retrieve('Y_bar_th.p', 'Results/')\n\n\tK_corr_th = fac.Retrieve('K_corr_th.p', 'Results/')\n\tY_corr_th = fac.Retrieve('Y_corr_th.p', 'Results/')\n\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n### Plots\n\ncm_subsection = np.linspace(0.2, 0.8, len(offset_values_th)) \npalette = [ cm.Greys(x) for x in cm_subsection ]\n\n#\n\nfg = plt.figure()\nax0 = plt.axes(frameon=True)\n\t\n# line, = plt.plot(eta_values_th, Y_corr_th[0,:,0], color = '#FFD012', label = 'pre-')\n# line, = plt.plot(eta_values_th, Y_corr_th[1,:,0], '0.8', label = 'post-')\n\nfor j, offset in enumerate(offset_values_th):\n\tline, = plt.plot(eta_values_th, Y_corr_th[1,:,j], color = palette[j])\n\nplt.xlabel(r'Learning rates ratio $\\eta_w/\\eta_u$')\nplt.ylabel(r'Category correlation')\n\nplt.xlim(0, 0.4)\nplt.xticks([0, 0.2, 0.4])\n\nplt.ylim(-0.2, 0.2)\nplt.yticks([-0.2, 0, 0.2])\n\nplt.legend(frameon = False)\n\nax0.spines['top'].set_visible(False)\nax0.spines['right'].set_visible(False)\nax0.yaxis.set_ticks_position('left')\nax0.xaxis.set_ticks_position('bottom')\nplt.locator_params(nbins=4)\n\nplt.savefig('Y_corr.pdf')\nplt.show()\n\n#### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### #### ####\n\nsys.exit(0)\n","sub_path":"SimpleCategorization/Correlations/eta/compute_theory.py","file_name":"compute_theory.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"138222777","text":"'''\nWin32处理模块,主要用来处理win32窗口、win32元素等\n'''\n\nfrom . import clipboard, element, image, screenshot, window\n\nfrom .._core.retry import Retry as _Retry\nfrom .._core.validation import valid as _valid, ValidPattern as _ValidPattern\nfrom .._core.uidriver import execute as _execute\nfrom ..selector import Selector, _get_selector_by_name\nfrom ..errors import UIAError, UIAErrorCode\nfrom .window import Win32Window\nfrom .element import Win32Element\n\nimport typing\nimport time\n\nimport os, typing, base64, time, json, win32api, win32gui, winerror, win32event, win32process, ctypes\n\n\ndef get(title=None, class_name=None, use_wildcard=False, *, timeout=5) -> Win32Window:\n \"\"\"\n 根据指定的标题或类名获取窗口对象, 默认是使用模糊匹配, 如果`use_wildcard`为`True`则使用通配符匹配\n * @param title, 指定的窗口标题\n * @param class_name, 指定的窗口类名\n * @param use_wildcard, 是否使用通配符进行匹配, 默认值为`False`\n * @param timeout, 获取窗口对象超时时间, 默认值是5s, 如果超过改时间还未找到窗口在抛出 `UIAError` 的异常\n * 等于 0, 不等待\n * 大于 0, 按时间等待\n * 等于 -1, 一直等待\n * @return `Win32Window`, 获取到的窗口对象\n \"\"\"\n\n if title is None and class_name is None:\n raise ValueError('标题或类型名至少指定一个')\n for _ in _Retry(timeout=timeout, interval=0.5, error_message='获取窗口失败'):\n try:\n wid = _invoke('GetWindowByTitleAndClassName', {\n 'title': title, 'className': class_name, 'useWildcard': use_wildcard})\n window = _create_window(wid)\n return window\n except UIAError as e:\n if e.code == UIAErrorCode.NoSuchWindow:\n pass\n else:\n raise e\n\n\ndef get_by_handle(handle=None, *, timeout=5) -> Win32Window:\n \"\"\"\n 根据指定的窗口句柄获取窗口对象\n * @param handle, 指定的窗口句柄获, 是一个十六进制的数, 如 0x14210c\n * @param timeout, 获取窗口对象超时时间, 默认值是5s, 如果超过改时间还未找到窗口在抛出 `UIAError` 的异常\n * 等于 0, 不等待\n * 大于 0, 按时间等待\n * 等于 -1, 一直等待\n * @return `Win32Window`, 获取到的窗口对象\n \"\"\"\n\n _valid('handle', handle, (_ValidPattern.Type, int))\n for _ in _Retry(timeout=timeout, interval=0.5, error_message='获取窗口失败'):\n try:\n wid = _invoke('GetWindowByHandle', {'handle': handle})\n window = _create_window(wid)\n return window\n except UIAError as e:\n if e.code == UIAErrorCode.NoSuchWindow:\n pass\n else:\n raise e\n\n\ndef get_by_selector(selector=None, *, timeout=5) -> Win32Window:\n \"\"\"\n 查找并返回与选择器匹配的窗口对象\n * @param selector, 要查找的选择器, 支持以下格式: \n * 选择器名称, `str`类型\n * 选择器对象, `Selector`类型\n * @param timeout, 获取窗口对象超时时间, 默认值是5s, 如果超过改时间还未找到窗口在抛出 `UIAError` 的异常\n * 等于 0, 不等待\n * 大于 0, 按时间等待\n * 等于 -1, 一直等待\n * @return `Win32Window`, ���回与选择器配的窗口对象\n \"\"\"\n\n if isinstance(selector, str):\n selector = _get_selector_by_name(selector)\n _valid('selector', selector, (_ValidPattern.Type, Selector))\n for _ in _Retry(timeout=timeout, interval=0.5, error_message='获取窗口失败'):\n try:\n wid = _invoke('GetWindowBySelector', {'selector': selector.value})\n window = _create_window(wid)\n return window\n except UIAError as e:\n if e.code == UIAErrorCode.NoSuchWindow:\n pass\n else:\n raise e\n\n\ndef get_desktop() -> Win32Window:\n \"\"\"\n 获取桌面对象\n * @return `Win32Window`, 获取到的桌面对象\n \"\"\"\n\n for _ in _Retry(5, interval=0.5, error_message='获取窗口失败'):\n try:\n wid = _invoke('GetDesktopWindow')\n window = _create_window(wid)\n return window\n except UIAError as e:\n if e.code == UIAErrorCode.NoSuchWindow:\n pass\n else:\n raise e\n\n\ndef minimize_all():\n \"\"\"\n 最小化全部窗口\n \"\"\"\n\n _invoke(\"MinimizeAllWindows\")\n\n\ndef mouse_move(point_x: int, point_y: int, relative_to='screen', move_speed='instant', delay_after=1):\n \"\"\"\n 移动鼠标到指定位置\n * @param point_x, 指定坐标的横坐标, 为整数\n * @param point_y, 指定坐标的纵坐标, 为整数\n * @param relative_to, 指定坐标位置的相对对象, 默认相对于桌面移动\n * `'screen'`, 相对于桌面\n * `'window'`, 相对于当前打开(激活)的窗口\n * `'position'`, 相对于当前数据所在的位置\n * @param move_speed, 移动鼠标到指定坐标的速度, 默认瞬间移动到目标坐标\n * `'instant'`, 瞬间\n * `'fast'`, 快速\n * `'middle'`, 中速\n * `'slow'`, 慢速\n * @param delay_after, 执行成功后延迟时间, 默认延迟1s\n \"\"\"\n\n _invoke(\"MouseMove\", {'pointX': point_x, 'pointY': point_y,\n 'relativeTo': relative_to, 'moveSpeed': move_speed})\n if delay_after > 0:\n time.sleep(delay_after)\n\n\ndef mouse_move_by_anchor(rectangle, anchor=None, relative_to='screen', move_speed='instant', delay_after=1):\n \"\"\"\n 移动鼠标到指定位置\n * @param rectangle, 需要悬浮的目标矩形范围,格式为 (x, y, width, height)\n * @param anchor, 锚点位置,默认值为 ('middleCenter', 0, 0),表示悬浮在范围的中心,不偏移\n * @param relative_to, 指定坐标位置的相对对象, 默认相对于桌面移动\n * `'screen'`, 相对于桌面\n * `'window'`, 相对于当前打开(激活)的窗口\n * `'position'`, 相对于当前数据所在的位置\n * @param move_speed, 移动鼠标到指定坐标的速度, 默认瞬间移动到目标坐标\n * `'instant'`, 瞬间\n * `'fast'`, 快速\n * `'middle'`, 中速\n * `'slow'`, 慢速\n * @param delay_after, 执行成功后延迟时间, 默认延迟1s\n \"\"\"\n \n sudoku_part, offset_x, offset_y = ('middleCenter', 0, 0) if anchor is None else anchor\n x, y, width, height = rectangle\n\n _invoke('MouseMoveByAnchor', {\n 'rectangle': {'x': x, 'y': y, 'width': width, 'height': height},\n 'sudokuPart': sudoku_part,\n 'offsetX': offset_x,\n 'offsetY': offset_y,\n\n 'relativeTo': relative_to,\n 'moveSpeed': move_speed\n }\n )\n\n if delay_after > 0:\n time.sleep(delay_after)\n\n\ndef send_keys(keys='', send_key_delay=50, hardware_driver_input=False, delay_after=1):\n \"\"\"\n 给当前激活窗口发送文本: 对于默认输入方式: !、#、+、^、{、} 这六个特殊符号,请用{}包括起来, 如{#}、{{}等; 对于驱动输入方式: 只支持键盘上的可见字符, 不能用此方式发送特殊按键, 比如Tab、Ctr、Enter、Shift...\n * @param keys, 键盘按键\n * @param hardware_driver_input, 是否通过硬件驱动的方式输入\n * @param send_key_delay, 两次按键之间的时间间隔,默认为50ms\n * @param delay_after, 执行成功后延迟时间, 默认延迟1s\n \"\"\"\n\n _invoke(\"SendKeys\", {'keys': keys, \"hardwareDriverInput\": hardware_driver_input, \"sendKeyDelay\": send_key_delay})\n if delay_after > 0:\n time.sleep(delay_after)\n\n\ndef mouse_click(button='left', click_type='click', hardware_driver_click=False, keys='none', delay_after=1):\n \"\"\"\n 模拟鼠标点击, 如鼠标左键单击、左键双击等\n * @param button, 要点击的鼠标按键, 默认为左键\n * `'left'`, 鼠标左键\n * `'right'`, 鼠标右键\n * @param click_type, 鼠标按键的点击方式, 如单击、双击等, 默认为单击\n * `'click'`, 鼠标单击\n * `'dbclick'`, 鼠标双击\n * `'down'`, 鼠标按键按下\n * `'up'`, 鼠标按键弹起\n * @param hardware_driver_click, 是否通过硬件驱动的方式点击\n * @param keys, 点击鼠标按钮时的键盘辅助按钮,可以为空,默认为空\n * `'none'`, 无键盘辅助按钮\n * `'alt'`, 使用`alt`键作为辅助按钮\n * `'ctrl'`, 使用 `ctrl`键作为辅助按钮\n * `'shift'`, 使用`shift`键作为辅助按钮\n * `'win'`, 使用win(窗口)键作为辅助按钮\n * @param delay_after, 执行成功后延迟时间, 默认延迟1s\n \"\"\"\n\n _invoke('MouseClick', {'button': button,\n 'clickType': click_type, \"hardwareDriverClick\": hardware_driver_click, 'keys': keys})\n if delay_after > 0:\n time.sleep(delay_after)\n\n\ndef mouse_click_by_anchor(rectangle, anchor=None, button='left', click_type='click', keys='none', hardware_driver_click=False, delay_after=1, move_mouse=True):\n \"\"\"\n 模拟鼠标点击指定位置, 如鼠标左键单击、左键双击等\n * @param rectangle, 需要点击的目标矩形范围,格式为 (x, y, width, height)\n * @param anchor, 锚点位置,默认值为 ('middleCenter', 0, 0),表示点击范围的中心,不偏移\n * @param button, 要点击的鼠标按键, 默认为左键\n * `'left'`, 鼠标左键\n * `'right'`, 鼠标右键\n * @param click_type, 鼠标按键的点击方式, 如单击、双击等, 默认为单击\n * `'click'`, 鼠标单击\n * `'dbclick'`, 鼠标双击\n * `'down'`, 鼠标按键按下\n * `'up'`, 鼠标按键弹起\n * @param hardware_driver_click, 是否通过硬件驱动的方式点击\n * @param keys, 点击鼠标按钮时的键盘辅助按钮,可以为空,默认为空\n * `'none'`, 无键盘辅助按钮\n * `'alt'`, 使用`alt`键作为辅助按钮\n * `'ctrl'`, 使用 `ctrl`键作为辅助按钮\n * `'shift'`, 使用`shift`键作为辅助按钮\n * `'win'`, 使用win(窗口)键作为辅助按钮\n * @param delay_after, 执行成功后延迟时间, 默认延迟1s\n * @param move_mouse, 是否显示鼠标移动轨迹, 默认为`True`,显示鼠标移动轨迹\n \"\"\"\n\n sudoku_part, offset_x, offset_y = ('middleCenter', 0, 0) if anchor is None else anchor\n x, y, width, height = rectangle\n\n _invoke('MouseClickByAnchor', {\n 'rectangle': {'x': x, 'y': y, 'width': width, 'height': height},\n 'sudokuPart': sudoku_part,\n 'offsetX': offset_x,\n 'offsetY': offset_y,\n \n 'button': button,\n 'clickType': click_type, \n 'keys': keys,\n \"hardwareDriverClick\": hardware_driver_click,\n\n 'moveMouse': move_mouse\n }\n )\n\n if delay_after > 0:\n time.sleep(delay_after)\n\n\ndef mouse_wheel(wheel_direction='down', wheel_times=1, keys='none', delay_after=1):\n \"\"\"\n 模拟鼠标滚轮滚动, 默认往下滚动\n * @param wheel_direction, 鼠标滚轮滚动方向, 默认往下滚动\n * `'up'`, 往上滚动鼠标滚轮\n * `'down'`, 往下滚动鼠标滚轮\n * @param wheel_times, 滚动鼠标滚轮次数,默认滚动1次\n * @param keys, 滚动鼠标滚轮时的键盘辅助按钮,可以为空,默认为空\n * `'none'`, 无键盘辅助按钮\n * `'alt'`, 使用`alt`键作为辅助按钮\n * `'ctrl'`, 使用 `ctrl`键作为辅助按钮\n * `'shift'`, 使用`shift`键作为辅助按钮\n * `'win'`, 使用win(窗口)键作为辅助按钮\n * @param delay_after, 执行成功后延迟时间, 默认延迟1s\n \"\"\"\n\n _invoke('MouseWheel', {'wheelDirection': wheel_direction,\n 'wheelTimes': wheel_times, 'keys': keys})\n if delay_after > 0:\n time.sleep(delay_after)\n\n\ndef get_mouse_position(relative_to='screen') -> typing.Tuple:\n \"\"\"\n 获取鼠标相对于桌面/当前选中(激活)的窗的坐标位置\n * @param relative_to, 鼠标滚轮滚动方向, 默认往下滚动\n * `'screen'`, 相对于桌面的位置\n * `'window'`, 相对于当前选中(激活)窗口的位置\n * @return `tuple`, 返回一组坐标值\n \"\"\"\n position = _invoke(\"GetMousePosition\", {'relativeTo': relative_to})\n return (position['x'], position['y'])\n\n\ndef exists(window) -> bool:\n '''\n 判断窗口是否存在\n * @param window, Win32Window窗口对象\n * @return `bool`, 返回窗口的存在窗台, 存在返回`True`, 否则返回`False`\n '''\n\n if window is None:\n return False\n if not isinstance(window, Win32Window):\n return False\n return _invoke('Exists', {'hWnd': window.hWnd})\n\n\ndef get_selected_text(**args) -> str:\n '''\n 获取当前激活窗口选中的文本\n * @return `str`, 返回当前激活窗口选中的文本\n ''' \n return _invoke('GetSelectedText')\n\n\ndef _invoke(action, args=None):\n return _execute(f'Win32.{action}', args)\n\n\ndef _create_window(wid) -> Win32Window:\n return Win32Window('Win32Window', wid)\n\n\ndef lock_screen() -> typing.NoReturn:\n '''\n 屏幕锁屏\n '''\n ctypes.windll.user32.LockWorkStation()\n\n\ndef _credential_provider_running():\n mutex_name = 'shadowbot_credential_provider_running'\n mutex = win32event.CreateMutex(None, False, mutex_name)\n is_running = (winerror.ERROR_ALREADY_EXISTS == win32api.GetLastError())\n win32api.CloseHandle(mutex)\n return is_running\n\ndef unlock_screen(user_name, password) -> typing.NoReturn:\n '''\n 屏幕解屏\n * @param user_name, 用户名\n * @param password, 密码\n '''\n\n path = ctypes.create_unicode_buffer(1024)\n ctypes.windll.Kernel32.GetEnvironmentVariableW('ProgramData', path, 1024)\n path = os.path.join(path.value, r'ShadowBot\\support\\CredentialProvider')\n\n #检查是否已经安装 credential provider \n versiontxt_path = os.path.join(path, r'Version.txt')\n if not os.path.exists(versiontxt_path):\n raise ValueError('屏幕解锁服务尚未安装,请在“托盘右键菜单>设置>工具>屏幕解锁服务”中安装。')\n with open(versiontxt_path, 'r') as f:\n version = f.readline()\n if version != '1.0.0.1':\n raise ValueError('屏幕解锁服务版本过低,请在“托盘右键菜单>设置>工具>屏幕解锁服务”中重新安装。')\n\n #程序是否正在运行\n if not _credential_provider_running():\n return\n \n #写入用户名和密码 \n rdptxt_path = os.path.join(path, r'Credential.txt')\n with open(rdptxt_path, 'w') as f:\n password_base64 = base64.b64encode(password.encode('ANSI')).decode()\n #写入文件时,python 会将 \\n 自动转换成 \\r\\n\n text = f'{user_name}\\n{password_base64}'\n f.write(text)\n\n #等待解屏\n for i in range(5): \n time.sleep(1)\n\n #是否登陆成功\n if not _credential_provider_running():\n return\n \n raise ValueError('屏幕解屏失败!')\n\ndef _is_inputlang_valid(window):\n thread_id, _ = win32process.GetWindowThreadProcessId(window)\n hkl = win32api.GetKeyboardLayout(thread_id)\n\n name = ctypes.create_unicode_buffer(100)\n ctypes.windll.Imm32.ImmGetDescriptionW(hkl, name, 100)\n\n return '必应 Bing 输入法' not in name.value and 'QQ五笔输入法' not in name.value\n\ndef set_ime(lang) -> typing.NoReturn:\n '''\n 设置当前激活窗口的输入法\n * @param lang, 取值为“chinese”或者“english”\n '''\n\n foreground_window = win32gui.GetForegroundWindow()\n ime_window = ctypes.windll.Imm32.ImmGetDefaultIMEWnd(foreground_window)\n if not _is_inputlang_valid(ime_window):\n input_locale_identifier_name = '00000804' if lang == \"chinese\" else '08040804'\n input_locale_identifier = win32api.LoadKeyboardLayout(input_locale_identifier_name, 1)\n win32api.SendMessage(foreground_window, 0x50, 0, input_locale_identifier)\n else:\n lparam = 1 if lang == \"chinese\" else 0\n win32api.SendMessage(ime_window, 0x0283, 6, lparam)\n\n\ndef get_ime() -> str:\n '''\n 获取当前激活窗口的输入法中英文状态\n * @return `str`, 返回 unknow:位置;english:当前输入法是英文输入;chinese:当前输入法是中文输入\n '''\n foreground_window = win32gui.GetForegroundWindow()\n ime_window = ctypes.windll.Imm32.ImmGetDefaultIMEWnd(foreground_window)\n if _is_inputlang_valid(ime_window):\n if 1 == win32api.SendMessage(ime_window, 0x0283, 5, 0):\n return 'chinese'\n else:\n return 'english'\n else:\n return 'unknow'","sub_path":"RPA/组件/ShadowBot/3.8.9/xbot/win32/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":17264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"449279988","text":"from sys import stdin\n\n\n\"\"\"Nombre: Brayan David Vera Leyton\nfecha: 22/feb/2019\ncodigo: 8918691\n\"\"\"\n\ndef tab(p,r,M):\n N=len(p)\n tab=[[0 for _ in range(M+1)]for _ in range(N+1)]\n n,m=1,0\n while n!=N+1:\n if m==M+1:\n n,m=n+1,0\n else:\n tab[n][m]=tab[n-1][m]\n if p[n-1]<=m:\n tab[n][m]=max(tab[n][m],1+tab[n-1][min(m-p[n-1],r[n-1])])\n m+=1\n return tab[N][M]\n\ndef main():\n n=int(stdin.readline().strip())\n M=0\n while n!=0:\n p=[0 for i in range(n)]\n r=[0 for i in range(n)]\n for i in range(0,n):\n tok = stdin.readline().strip().split()\n x=int(tok[0])\n y=int(tok[1])\n p[i]=x\n r[i]=y\n M=max(M,x+y)\n p.reverse()\n r.reverse()\n print(tab(p,r,M))\n n=int(stdin.readline().strip())\nmain()\n\n\n\n\n\n\n\n\n","sub_path":"parcial1 ADA/boxes.py","file_name":"boxes.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"54201010","text":"from __future__ import print_function\nimport time\nimport os\nimport time\nfrom math import pi\n\nfrom pygecko.multiprocessing import geckopy\nfrom pygecko.transport import Pub, Sub\nfrom pygecko.transport import zmqTCP, zmqUDS\nfrom pygecko.transport import GeckoCore\nfrom pygecko.multiprocessing import GeckoSimpleProcess\n\nfrom pygecko import FileJson, FileYaml\nfrom pygecko import Quaternion\nfrom pygecko import Vector\nfrom pygecko import IMU\nfrom pygecko import Twist\n# from pygecko import Wrench\nfrom pygecko import Pose\n# from pygecko import Joystick\nfrom pygecko import Image, image2msg, msg2image\n# from pygecko import PoseStamped\n# from pygecko import Lidar\n\n# Fake cv2 things for testing\nimport pygecko.fake.fake_camera as pcv2\n\n\ndef test_images():\n img = pcv2.cvImage(6, 4)\n msg = image2msg(img)\n print(msg)\n ret = msg2image(msg)\n print(ret)\n assert img.all() == ret.all()\n\n\ndef test_compressed_images():\n img = pcv2.cvImage(6, 4)\n msg = image2msg(img, compressed=True)\n print(msg)\n ret = msg2image(msg)\n print(ret)\n assert img.all() == ret.all()\n\n\n\ndef file_func(Obj, fname):\n data = {'a':1, 'bob':[1,2,3,4], 'c':\"hello cowboy\", 'd': {'a':pi}}\n f = Obj()\n f.set(data)\n f.write(fname)\n d = f.read(fname)\n assert d == data\n os.remove(fname)\n\n\ndef test_json():\n file_func(FileJson, 'test.json')\n\n\ndef test_yaml():\n file_func(FileYaml, 'test.yml')\n\n\ndef test_rate():\n rate = geckopy.Rate(10)\n start = time.time()\n for _ in range(10):\n rate.sleep()\n stop = time.time()\n # print(stop - start)\n assert (stop - start) + 0.05 > 1.0\n\n\n# # tcp setup\n# tcp_pub = zmqTCP('localhost', 9998)\n# tcp_sub = zmqTCP('localhost', 9999)\n#\n# # unix domain setup\n# uds_ifile = zmqUDS('/tmp/uds_ifile')\n# uds_ofile = zmqUDS('/tmp/uds_ofile')\n\n\ndef msg_zmq():\n # start message hub\n core = GeckoCore()\n core.start()\n\n msg1 = IMU(\n Vector(1,2,3),\n Vector(11,12,13),\n Vector(21,22,23))\n\n msg2 = Twist(\n Vector(1,2,3),\n Vector(11,12,13))\n\n msg3 = Pose(\n Vector(1,2,3),\n Quaternion(1,2,3,4))\n\n def publisher(**kwargs):\n geckopy.init_node(**kwargs)\n p = geckopy.Publisher(topics=['test'])\n time.sleep(1) # need this!!\n\n for msg in [msg1,msg2,msg3]:\n p.pub('test', msg)\n time.sleep(0.01)\n\n args = {'host': 'localhost'}\n p = GeckoSimpleProcess()\n p.start(func=publisher, name='publisher', kwargs=args)\n\n # subscriber\n s = Sub(topics=['test'])\n s.connect(sub_addr)\n\n for msg in [msg1,msg2,msg3]:\n t, m = s.recv()\n assert t == b'test' # FIXME: fix stupid binary string crap!\n assert msg == m\n\n core.join(0.1)\n time.sleep(1) # if I run these too fast, I get errors on bind()\n\n\n# def test_msg_zmq_uds():\n# msg_zmq(uds_ifile, uds_ofile)\n\n\ndef test_msg_zmq_tcp():\n msg_zmq()\n\n\ndef py_zmq():\n # start message hub\n core = GeckoCore()\n core.start()\n\n def publisher(**kwargs):\n geckopy.init_node(**kwargs)\n p = geckopy.Publisher(topics=['test'])\n time.sleep(1)\n msg = {'a':1, 'b':[1,2,3], 'c':'hello cowboy'}\n p.pub('test', msg) # topic msg\n\n args = {'host': \"localhost\"}\n p = GeckoSimpleProcess()\n p.start(func=publisher, name='publisher', kwargs=args)\n\n # subscriber\n s = Sub(topics=['test'])\n s.connect(sub_addr)\n t, msg = s.recv()\n\n assert t == b'test'\n assert msg == {'a':1, 'b':[1,2,3], 'c':'hello cowboy'}\n\n core.join(0.1)\n time.sleep(1) # if I run these too fast, I get errors on bind()\n\n\ndef test_py_zmq_tcp():\n py_zmq()\n\n\n# def test_py_zmq_uds():\n# py_zmq(uds_ifile, uds_ofile)\n\n\n# def py_geckpy(pub_addr, sub_addr):\n# # start message hub\n# core = GeckoCore(pub_addr, sub_addr)\n# core.start()\n#\n# geckopy = GeckoPy()\n#\n# def callback(t, msg):\n# assert t == b'test'\n# assert msg == {'a':1, 'b':[1,2,3], 'c':'hello cowboy'}\n#\n# def callback(**kwargs):\n# addr = kwargs.get('addr')\n# p = Pub()\n# p.connect(addr)\n# time.sleep(1)\n# msg = {'a':1, 'b':[1,2,3], 'c':'hello cowboy'}\n# p.pub('test', msg) # topic msg\n#\n#\n#\n# args = {'addr': pub_addr}\n# p = GeckoSimpleProcess()\n# p.start(func=publisher, name='publisher', kwargs=args)\n#\n# # subscriber\n# s = Sub(topics=['test'])\n# s.connect(sub_addr)\n# t, msg = s.recv()\n#\n# print(t, msg)\n#\n# assert t == b'test'\n# assert msg == {'a':1, 'b':[1,2,3], 'c':'hello cowboy'}\n#\n# p.join(0.1)\n# core.join(0.1)\n#\n# def test_py_geckopy_tcp():\n# py_geckpy(tcp_pub, tcp_sub)\n#\n#\n# def test_py_geckopy_uds():\n# py_geckpy(uds_ifile, uds_ofile)\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"284009154","text":"#!/usr/bin/python\n# -*-coding: utf-8 -*-\n\n# search from 987654321\n# remove the even number and the number can be divided by 3\n\nfrom itertools import permutations\nfrom math import sqrt\n\n\ndef isprime(number):\n for i in range(2, int(sqrt(number))+1):\n if number % i == 0:\n return False\n return True\n\nodd_list = [1, 3, 5, 7]\nresult = 0\nfor last in odd_list:\n rest_odd = [i for i in odd_list if i != last]\n for number in permutations([2, 4, 6, 8]+rest_odd, 7):\n n = int(''.join([str(i) for i in number])+str(last))\n if (sum(number)+last) % 3 == 0:\n continue\n if isprime(n) and n > result:\n result = n\n\nprint(result)\n","sub_path":"Project-Euler/src/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"84272846","text":"import markdown\nimport csv\nimport datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import Q\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.views.generic import ListView, FormView, DetailView\nfrom django.http import HttpResponse\nfrom announcements.models import Announcement\nfrom content.models import Page, Event\nfrom .forms import SessionForm, LoginForm\nfrom .models import Date, UserSession, User\n\n\ndef index(req):\n today = datetime.datetime.today()\n\n pages = Page.objects.filter(published=True).all()\n date = Date.objects.filter(date__gte=today).order_by('date').first()\n event = Event.objects.filter(type='social', date__gte=today).order_by('date').first()\n announcements = Announcement.objects.order_by('-date').all()\n for announcement in announcements:\n announcement.html = markdown.markdown(announcement.text)\n\n return render(req, 'pages/index.html', {\n 'd': date,\n 'a': announcements,\n 'p': pages,\n 'e': event\n })\n\n\nclass BookView(ListView):\n template_name = 'pages/sessions.html'\n context_object_name = 'date_list'\n\n def get_queryset(self):\n today = datetime.datetime.today()\n return Date.objects.filter(date__gte=today).order_by('date')\n\n\nclass DateView(DetailView):\n template_name = 'pages/dates.html'\n\n def get(self, context, **response_kwargs):\n date = Date.objects.get(date=response_kwargs['d'])\n\n if not date:\n return HttpResponseRedirect('/sessions')\n\n return render(self.request, self.template_name, {\n 'date': date,\n })\n\n\nclass DateBookingView(FormView):\n template_name = 'pages/booking.html'\n form_class = SessionForm\n\n def get(self, context, **response_kwargs):\n date = Date.objects.get(date=response_kwargs['d'])\n self.form_class.declared_fields['activity'].initial = 'sailing'\n\n if not date:\n return HttpResponseRedirect('/sessions')\n\n # Check that time is valid for this day.\n\n return render(self.request, self.template_name, {\n 'form': self.form_class,\n 'time': \"11.00\",\n 'date': date.to_human(),\n })\n\n def form_valid(self, form):\n session = UserSession(\n user=User.objects.all().filter(Q(name=form.data['name']) | Q(kent_id=form.data['name'])).get(),\n date=Date.objects.filter(date=self.kwargs['d']).get(),\n time=self.kwargs['t'],\n activity=str(form.data['activity'])[0].upper()\n )\n\n try:\n session.validate_unique()\n session.save()\n session.send_confirmation()\n messages.success(self.request, 'Thanks for booking! We\\'ll send you a reminder the day before.')\n return redirect('/')\n except ValidationError:\n messages.error(self.request, 'Looks like you\\'re already booked for that session.')\n return redirect('/')\n\n def form_invalid(self, form):\n messages.error(self.request, 'It appears you haven\\'t signed up before. Please could you give us some details?')\n return redirect('/signup')\n\n\nclass DateTimeBookingView(DateBookingView):\n def get(self, context, **response_kwargs):\n date = Date.objects.get(date=response_kwargs['d'])\n time = response_kwargs['t'] if response_kwargs['t'] in date.get_times() else \"11.00\"\n\n self.form_class.declared_fields['activity'].initial = 'sailing'\n\n if not date:\n return HttpResponseRedirect('/sessions')\n\n return render(self.request, self.template_name, {\n 'form': self.form_class,\n 'date': date.to_human(),\n 'time': time\n })\n\n\nclass DateTimeActivityBookingView(DateTimeBookingView):\n def get(self, context, **response_kwargs):\n date = Date.objects.get(date=response_kwargs['d'])\n time = response_kwargs['t'] if response_kwargs['t'] in date.get_times() else \"11.00\"\n activity = response_kwargs['a']\n\n self.form_class.declared_fields['activity'].initial = activity\n\n if not date:\n return HttpResponseRedirect('/sessions')\n\n return render(self.request, self.template_name, {\n 'form': self.form_class,\n 'date': date.to_human(),\n 'time': time,\n 'activity': activity\n })\n\n\nclass Redirect(DetailView):\n def get(self, *context, **kwargs):\n return redirect('/')\n\n\nclass BookingSuccess(Redirect):\n def get(self, *context, **kwargs):\n messages.success(self.request, 'Thanks for booking! We\\'ll send you a reminder the day before.')\n return redirect('/')\n\nclass ExportView(DetailView):\n def get(self, request, *args, **kwargs):\n return self.render_to_response(request)\n\n def render_to_response(self, context, **response_kwargs):\n today = datetime.datetime.today()\n date = Date.objects.filter(date__gte=today).order_by('date').first()\n sessions = UserSession.objects.filter(date=date).iterator()\n\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"ukc-' + str(date.date) + '.csv\"'\n\n writer = csv.writer(response)\n\n writer.writerow(['Shown up', 'Name', 'Kent ID', 'Time', 'Activity',])\n for session in sessions:\n writer.writerow(['', session.user.name, session.user.kent_id, session.time, session.activity])\n\n return response\n\n\nclass LoginView(DetailView):\n template_name = 'pages/login.html'\n form_class = LoginForm\n\n def get(self, request, *args, **kwargs):\n return render(self.request, self.template_name, {\n 'form': self.form_class,\n })\n\n def post(self, request, *args, **kwargs):\n try:\n user = User.objects.all().filter(Q(kent_id=request.POST['name'])).get()\n return redirect('/user/' + user.kent_id)\n\n except User.DoesNotExist:\n messages.error(self.request, 'Can\\'t find any bookings for that Kent ID / Username.')\n return redirect('/')\n\n\nclass UserView(DetailView):\n template_name = 'pages/user.html'\n\n def get(self, request, *args, **kwargs):\n today = datetime.datetime.today()\n\n return render(self.request, self.template_name, {\n 'id': kwargs['u'],\n 'sessions': UserSession.objects.filter(user__kent_id=kwargs['u'], date__date__gte=today),\n })\n\n\nclass UnbookView(DetailView):\n def get(self, request, *args, **kwargs):\n messages.error(self.request, 'Something went wrong. We\\'re sorry. We tried, we really did')\n return redirect('/')\n\n def post(self, request, *args, **kwargs):\n try:\n session = UserSession.objects.filter(user__kent_id=kwargs['u'], date__date=kwargs['d'], time=kwargs['t'], activity=kwargs['a']).get()\n session.delete()\n messages.success(self.request, 'No problem, we\\'ve unbooked you from that slot!')\n return redirect('/')\n except UserSession.DoesNotExist:\n messages.error(self.request, 'We couldn\\'t unbook you for some reason. '\n 'Try contacting us via Facebook and we\\'ll sort it.')\n return redirect('/')\n\n\n","sub_path":"tasters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"134034319","text":"#!/usr/bin/env python3\n\n# -------------------------------------------------- #\n#\n# Author: Ryne Burden\n#\n# Description: \n# - This script can be called on it's own, however it's usually called from data_gather.py\n# \n# Standalone usage example: \n# - python3 team_scraper.py Chicago\\ Bears 2020 8 OR ./team_scraper.py Chicago\\ Bears 2020 8\n# \n# - The script call above will scrape data for the 2020 Chicago Bears through week 8. Use 17 to scrape a whole season \n#\n# Tested on Python 3.8.1\n#\n# -------------------------------------------------- #\n\nfrom bs4 import BeautifulSoup as bs\nimport requests\nimport sys\nimport os\nimport teams\n\n# Global list of dictionaries with PFR data-stat tags to search with Beautiful Soup\nstatList = [\n {'data-stat':\"opp\"}, # [0] - opposing team\n {'data-stat':\"first_down_off\"}, # [1] - offensive first downs gained\n {'data-stat':\"pass_yds_off\"}, # [2] - offensive passing yards gained per game\n {'data-stat':\"rush_yds_off\"}, # [3] - offensive rushing yards gained per game\n {'data-stat':\"to_off\"}, # [4] - offensive giveaways \n {'data-stat':\"first_down_def\"}, # [5] - defensive first downs allowed per game\n {'data-stat':\"pass_yds_def\"}, # [6] - defensive passing yards allowed per game\n {'data-stat':\"rush_yds_def\"}, # [7] - defensive rushing yards allowed per game\n {'data-stat':\"to_def\"}, # [8] defensive takeaways\n {'data-stat':\"game_outcome\"} # [9] 1 for win - 0 for loss\n]\n\ndef main():\n # Take arguments from the script call\n # This method makes it easier to automate the process later\n team = sys.argv[1]\n year = sys.argv[2]\n week = sys.argv[3]\n \n # Formulate URL based on given team and season\n # This will pull up the basic stat boxscores for each game\n # teams.py is a file I made that correlates a team's whole name with their PFR abbreviation\n seasonUrl = \"https://www.pro-football-reference.com/teams/\" + teams.URL[team] + \"/\" + str(year) + \".htm\"\n \n # request the HTML from URL\n pageRequest = requests.get(seasonUrl)\n \n # Turn the HTML document into a Beautiful Soup object\n seasonToScrape = bs(pageRequest.text, \"lxml\")\n \n # Find the game stat table\n currentSeason = seasonToScrape.find(id=\"games\")\n\n # Get rid of everything but the game stats that are located in the table body\n currentSeason = currentSeason.tbody\n\n # Type case team as a string to make sure it is processed correctly\n # This may be unecessary but I did it anyway - it worked\n team = str(team)\n\n # Opens a new txt file for the team's stats\n seasonToOutput = open(\"Data/Stats/\" + str(year) + \"/\" + team.replace(\" \", \"_\") + \"_\" + str(year) + \".txt\", \"w\")\n\n # priming variable\n # Pro Football Reference uses HTML tables, so this script looks for table rows (tr) of data\n currentWeek = currentSeason.find(\"tr\")\n\n # This loop goes through each game in the user given range\n for week in range (0, int(week)):\n for stat in statList:\n \n # Find the next stat in the statList dictionary list\n currentStat = currentWeek.find(attrs = stat)\n # Get the text from the HTML tag\n currentStat = currentStat.get_text()\n\n # This list will hold stats to reference while scraping\n stats = []\n\n #Conditionals for uniform data formatting\n if currentStat != \"\":\n # Write a 1 to file if the given week's game was won\n if currentStat == \"W\":\n seasonToOutput.write(\"1.000 \")\n stats.append(\"1\")\n # Write a 0 to file for a loss or tie\n elif currentStat == \"L\" or currentStat == \"T\":\n if currentStat == \"L\":\n seasonToOutput.write(\"0.000 \")\n stats.append(\"0\")\n #elif currentStat== \"T\":\n #seasonToOutput.write(\"0.500 \")\n #stats.append(\"0.5\")\n # Each elif statement below ensure the given stat is output with 4 figures\n # This makes the .txt files much easier to read\n elif len(currentStat) == 1:\n seasonToOutput.write(currentStat + \".000 \")\n stats.append(currentStat)\n \n elif len(currentStat) == 2:\n seasonToOutput.write(currentStat + \".00 \")\n stats.append(currentStat)\n \n elif len(currentStat) == 3:\n seasonToOutput.write(currentStat + \".0 \")\n stats.append(currentStat)\n\n # Write a 0.000 if currentStat is blank - used for bye weeks\n else: \n seasonToOutput.write(\"0.000 \")\n\n # Reset current Stat after find\n currentStat = None\n\n # Clear stat list before moving on to the next week\n stats.clear()\n\n # Write a newline to the file to separate weeks\n if currentStat != \"Bye Week\":\n seasonToOutput.write(\"\\n\")\n\n # Cycle currentWeek to the next table row\n currentWeek = currentWeek.find_next_sibling(\"tr\")\n\n # Close the output file to end\n seasonToOutput.close()\n\nmain()\n","sub_path":"team_scraper.py","file_name":"team_scraper.py","file_ext":"py","file_size_in_byte":5253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159279267","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('contact.html/', views.contact, name='contact'),\n path('about.html/', views.about, name='about'),\n path('blog.html/', views.blog, name='blog'),\n path('appointment.html/', views.appointment, name='appointment')\n]\n","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"490116083","text":"# coding=SJIS\n\nfrom Rate.BaseRate import BaseRate\nfrom Utility.memoize import memoized\nfrom Utility.Util import Util, Settings\nfrom Utility.CommutationTable import CommutationTable\n\nclass WLRate(BaseRate):\n u\"\"\"\n 終身保険のレートを表すクラス\n \"\"\" \n\n @memoized\n def CT(s, kiso):\n if (kiso == 0):\n CIR = s.asm.get_float_value(\"CIR_P\")\n else:\n CIR = s.asm.get_float_value(\"CIR_V\")\n return CommutationTable(CIR, s.qx())\n\n # 予定発生率\n @memoized \n def qx(s):\n if (s.sex == \"M\") : \n return s.asm.get_float_list(\"qx_m_SLT1996\")\n else:\n return s.asm.get_float_list(\"qx_f_SLT1996\")\n\n # 予定事業費\n @memoized\n def Gamma(s):\n return 0.0\n \n @memoized\n def Alpha(s):\n return 0.015\n\n @memoized\n def Beta(s):\n return 0.1\n\n @memoized\n def ZillAlpha(s):\n return s.Alpha()\n\n # レート\n @memoized\n def GrossP_Rate(s):\n return s.BaseP_Rate() *12\n\n @memoized\n def BaseP_Rate(s):\n # monthly rate\n kiso = 0\n numer = ( (s.CT(kiso).Mx(s.x) - s.CT(kiso).Mx(s.x + s.n)) / s.CT(kiso).Dx(s.x) \n + s.Alpha() \n + s.CT(kiso).Annuity_xn(s.x, s.n) * s.Gamma()\n )\n denom = (1 - s.Beta()) * s.CT(kiso).Annuity_xn_k(s.x, s.m,12) * 12\n return numer / denom\n\n @memoized\n def CV_Rate(s, t):\n kiso = 0;\n if (t < 10) : return max(s.NetV_Rate(t, kiso) - s.ZillAlpha() * (10.0 - t) / 10.0, 0.0)\n return s.NetV_Rate(t, kiso);\n\n @memoized\n def NetP_Rate(s, kiso):\n numer = ((s.CT(kiso).Mx(s.x) - s.CT(kiso).Mx(s.x + s.n)) / s.CT(kiso).Dx(s.x) \n + s.CT(kiso).Annuity_xn(s.x, s.n) * s.Gamma()\n )\n denom = s.CT(kiso).Annuity_xn(s.x, s.m);\n return numer / denom;\n\n @memoized\n def NetV_Rate(s, t, kiso):\n if (t > s.n) : return 0.0\n if (s.CT(kiso).Dx(s.x + t) <= 0.0) : return 0.0\n return ( (s.CT(kiso).Mx(s.x + t) - s.CT(kiso).Mx(s.x + s.n)) / s.CT(kiso).Dx(s.x + t) \n + s.CT(kiso).Annuity_xn(s.x + t, s.n - t) * s.Gamma() \n - s.CT(kiso).Annuity_xn(s.x + t, s.m - t) * s.NetP_Rate(kiso)\n )\n @memoized\n def ZillV_Rate(s, t, kiso, z):\n zm = min(s.m, z);\n if (t < zm): \n return ( s.NetV_Rate(t, kiso) \n - s.ZillAlpha() * s.CT(kiso).Annuity_xn(s.x + t, zm - t) \n / s.CT(kiso).Annuity_xn(s.x, zm)\n )\n return s.NetV_Rate(t, kiso);\n\n","sub_path":"1.Projection/Rate/WLRate.py","file_name":"WLRate.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"97188736","text":"from ml_model import ModelInit\nfrom picture_detection import get_picture_detection\nfrom video_detection import get_video_detection\n\nmodel_structure_name = 'model_5conv'\npicture_size = 100\nnumber_of_classes = 4\nactivation_model = \"softmax\" # prawdopodobieństwo 1 podzielone na ilość klas\n# activation_model = \"sigmoid\" # procent prawdopodobieństwa\n\nmodel_init = ModelInit(picture_size, number_of_classes, activation_model, model_structure_name)\nmodel = model_init.model\n\n# Picture detection\nget_picture_detection(model, model_init.activation_model, model_init.number_of_classes)\n\n# Video detection\n# get_video_detection(model)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"617880879","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 14 13:44:06 2016\n\n@author: r_luzar\n\"\"\"\n\nimport lvm_read\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import signal\n\n\ndef get_data(podatki, stolpec):\n sila = np.zeros(len(podatki))\n for i in range(len(podatki)):\n sila[i] = podatki[i][stolpec]\n return sila\n\n\ndef st_obr(obr, dvarphi):\n count = 0\n for i in range(len(obr)):\n# print(dvarphi[obr][i])\n if dvarphi[obr][i] > 3.5:\n count += 1\n# print(count)\n return count\n\n\nlvm = lvm_read.read('filtriran_39.lvm', read_from_pickle=False)\npodatki = lvm[0]['data']\ntime = get_data(podatki, 0)\nAx = get_data(podatki, 1)\nBx = get_data(podatki, 2)\n#dvarphi = get_data(podatki, 3)\n\n# %%\nlvm2 = lvm_read.read('nefiltriran_39.lvm', read_from_pickle=False)\npodatki2 = lvm2[0]['data']\ndvarphi = get_data(podatki2, 3)\n\n#obr = signal.find_peaks_cwt(dvarphi, np.arange(1, 30))\n#frekvenca = st_obr(obr, dvarphi) # merili smo 1 s --> Hz\nfrekvenca = 49\n\n# 0.1 V/N\npretvorba = -10 / 3.165\nAx = Ax * pretvorba\nBx = Bx * pretvorba\ninterval = int(len(time)/frekvenca)\n\nAxBx = Ax + Bx\n\n#plt.plot(time, Ax)\n#plt.plot(time[interval:3*interval], AxBx[interval:3*interval],label=r\"$A_x + B_x$\")\n#plt.plot(time[interval:3*interval], Ax[interval:3*interval],label=r\"$A_x$\")\n#plt.plot(time[interval:3*interval], Bx[interval:3*interval],label=r\"$B_x$\")\n#plt.plot(time[interval:2*interval], dvarphi[interval:2*interval],label=r\"$\\dvarphi$\")\n#plt.plot(time, AxBx,label=r\"$A_x + B_x$\")\n#plt.plot(time, Ax,label=r\"$A_x$\")\n#plt.plot(time, Bx,label=r\"$B_x$\")\n#plt.plot(time, dvarphi)\n\n#plt.plot(time[162:162+200], dvarphi[162:162+200])\nxx = np.linspace(0, 2*np.pi,200)\n#plt.plot(time[162:162+200], AxBx[162:162+200],label=r\"$A_x + B_x$\")\n#plt.plot(time[162:162+200], Ax[162:162+200],label=r\"$A_x$\")\n#plt.plot(time[162:162+200], Bx[162:162+200],label=r\"$B_x$\")\nplt.plot(xx, AxBx[162:162+200],label=r\"$A_x + B_x$\")\nplt.plot(xx, Ax[162:162+200],label=r\"$A_x$\")\nplt.plot(xx, Bx[162:162+200],label=r\"$B_x$\")\n\n#plt.figure(1)\n#plt.subplot(211)\n#plt.plot(time[interval:4*interval], AxBx[interval:4*interval])\n#plt.plot(time[interval:4*interval], Ax[interval:4*interval])\n#plt.plot(time[interval:4*interval], Bx[interval:4*interval])\n#plt.ylabel('Sila [N]')\n#plt.grid()\n#plt.subplot(212)\n#plt.plot(time[interval:4*interval], dvarphi[interval:4*interval])\n\nplt.xlabel(r'$\\varphi $[rad]')\n#plt.xlabel('Cas [s]')\n#plt.ylabel('Napetost [V]')\nplt.ylabel('Sila [N]')\nplt.legend()\nplt.grid()\nplt.show()\n\n\n\n\n\n\n\n\n","sub_path":"BalansirnaNaprava/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"275649332","text":"description = ''\r\ncomponent = []\r\npriors = []\r\nbugs = []\r\nname_of_test = []\r\nfunction_in_test = []\r\nresult_of_test = []\r\ninitialtests = \"\"\r\nstart_test = 0\r\nnew_component = []\r\ncomponentNumber = 0\r\n\r\ncountSingle = 0\r\ncountDouble = 0\r\ncountTriplet = 0\r\ncountOldBugs = 0\r\ncountDoubleBugs = 0\r\ncountTripletBugs = 0\r\ndoubleComponents = []\r\ntripletComponents = []\r\n\r\n# dominators\r\n# fileName = 'CVE-2017-7623'\r\n# fileName = 'CVE-2017-7939'\r\n# fileName = 'CVE-2017-9204'\r\n# fileName = 'CVE-2017-9205'\r\n# fileName = 'CVE-2017-9206'\r\n# fileName = 'CVE-2017-9207'\r\n\r\n# xrefs\r\n# fileName = 'CVE-2015-8784'\r\n# fileName = 'CVE-2016-8691'\r\n# fileName = 'CVE-2016-8692'\r\n# fileName = 'CVE-2016-8887'\r\n# fileName = 'CVE-2016-10250'\r\n# fileName = 'CVE-2016-10251'\r\n# fileName = 'CVE-2016-10271'\r\n# fileName = 'CVE-2016-10272'\r\n# fileName = 'CVE-2017-7452'\r\n# fileName = 'CVE-2017-7453'\r\n# fileName = 'CVE-2017-7454'\r\n# fileName = 'CVE-2017-7623'\r\n# fileName = 'CVE-2017-7939'\r\n# fileName = 'CVE-2017-7962'\r\n# fileName = 'CVE-2017-9204'\r\n# fileName = 'CVE-2017-9205'\r\n# fileName = 'CVE-2017-9206'\r\nfileName = 'CVE-2017-9207'\r\n\r\n\r\npathLittle = '../../../Desktop/matrices/dominators/'\r\npathBig = '../../../Desktop/matrices/xrefs/'\r\n\r\n# with open(pathLittle + fileName + '.txt') as f:\r\nwith open(pathBig + fileName + '.txt') as f:\r\n lines = f.read().splitlines()\r\n i=0\r\n while i < len(lines):\r\n if(lines[i] == '[Description]'):\r\n description = lines[i+1]\r\n elif(lines[i] == '[Components names]'):\r\n pre_component = lines[i+1].split('), (')\r\n for p in pre_component:\r\n new_p = p.split(', ')\r\n component.append(str(new_p[1].split(\"'\")[1]))\r\n countSingle = len(component)\r\n componentNumber = len(component)\r\n for k in range(len(component)):\r\n tmp = (k, component[k])\r\n new_component.append(tmp)\r\n elif(lines[i] == '[Priors]'):\r\n pre_priors = lines[i+1].split(', ')\r\n for p in range(len(pre_priors)):\r\n if(p==0):\r\n priors.append(float(pre_priors[p].split('[')[1]))\r\n elif(p==len(pre_priors)-1):\r\n priors.append(float(pre_priors[p].split(']')[0]))\r\n else:\r\n priors.append(float(pre_priors[p]))\r\n elif(lines[i]=='[Bugs]'):\r\n pre_bugs = lines[i+1].split(', ')\r\n for p in range(len(pre_bugs)):\r\n if(p==0):\r\n bugs.append(int(pre_bugs[p].split('[')[1]))\r\n elif(p==len(pre_bugs)-1):\r\n bugs.append(int(pre_bugs[p].split(']')[0]))\r\n else:\r\n bugs.append(int(pre_bugs[p]))\r\n countOldBugs = countOldBugs + 1\r\n elif(lines[i]=='[InitialTests]'):\r\n initialtests = lines[i+1]\r\n elif(lines[i]=='[TestDetails]'):\r\n i=i+1\r\n break\r\n\r\n i = i+1\r\n \r\n while i < len(lines):\r\n pre_line = lines[i].split(';[')\r\n line = pre_line[1].split('];')\r\n name_of_test.append(pre_line[0])\r\n pre_functions = line[0].split(', ')\r\n functions = []\r\n for p in pre_functions:\r\n if p != '':\r\n functions.append(int(p))\r\n function_in_test.append(functions)\r\n result_of_test.append(int(line[1]))\r\n i=i+1\r\n while(start_test\n#\n# This file is part of cppreference-doc\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see http://www.gnu.org/licenses/.\n\n# This program downloads all files that have been forgotten by httrack\n# Currently these files are images referred from the CSS files\n\nimport fnmatch\nimport re\nimport os\n\n# find all css files\ncss_files = []\nfor root, dirnames, filenames in os.walk('reference'):\n for filename in fnmatch.filter(filenames, '*.css'):\n css_files.append(os.path.join(root, filename))\n\nfor fn in css_files:\n f = open(fn, \"r\")\n text = f.read()\n f.close()\n\n p_text = text\n\n # spaces within paths NOT supported\n # matches only those URLS starting with http://$(lang).cppreference.com\n text = re.sub('\\s*', '', text)\n matches = re.findall(':url\\(([^\\'\"][^\\(]*[^\\'\"])\\)', text)\n matches += re.findall(':url\\(\\'([^\\']*)\\'\\)', text)\n matches += re.findall(':url\\(\"([^\\']*)\"\\)', text)\n\n for match in matches:\n p_match = match\n\n # strip query string\n match = re.sub('\\?[^?]*$', '', match)\n\n if (not re.match('https?://', match)):\n continue\n match = re.sub('^https?://', '', match)\n if (not re.match('[^.]*\\.cppreference\\.com', match)):\n continue\n\n start = os.path.abspath(os.path.dirname(fn))\n dest = os.path.abspath('reference/' + match)\n\n os.system('rm -f \"' + dest + '\"')\n os.system('wget \"' + p_match + '\" -O \"' + dest + '\"')\n relpath = os.path.relpath(dest, start)\n\n p_text = p_text.replace(p_match, relpath)\n\n f = open(fn, \"w\")\n f.write(p_text)\n f.close()\n","sub_path":"httrack-workarounds.py","file_name":"httrack-workarounds.py","file_ext":"py","file_size_in_byte":2274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"234048926","text":"#Welcome message\nprint (\"Bienvenido a la super calculadora de intereses Acerenzomatic 0.2.beta en Python - Cortesía de Sergio Bianco\")\n\n#Function definition\n\ndef ratecalculator (f_monto, f_cuotas) :\n if f_cuotas == 1:\n x = f_monto\n print (f\"El importe a cobrar es el mismo al ingresado (${x}). Me ejecutaste al pedo\")\n elif f_cuotas == 2:\n x = f_monto + (f_monto*9/100)\n y = f_monto*9/100\n print (f\"Los intereses ascienden a ${y}, aplicándose una tasa del 4,5% mensual. El importe final a cobrar es ${x}\")\n elif f_cuotas > 2:\n x = f_monto + (f_monto*6.7*f_cuotas/100)\n y = f_monto*6.7*f_cuotas/100\n print (f\"Los intereses ascienden a ${y}, aplicándose una tasa del 6,7% mensual. El importe final a cobrar es ${x}\"),\n else:\n print (\"No ingresaste un número de cuotas válido. O le pifiaste o reinventaste la economía\")\n\n#Making calculator reusable\n\nrepeat = \"si\"\nwhile repeat == \"si\":\n #Asking user for input\n monto = input(\"Ingrese el monto \")\n print (f\"El monto ingresado es ${monto}\")\n cuotas = input(\"Ingrese numero de cuotas \")\n print (f\"La cantidad de cuotas es {cuotas}\")\n\n#Input gets converted to float - If error, calculator shuts down\n\n try:\n f_monto = float(monto)\n f_cuotas = float(cuotas)\n except:\n print(\"Tenés que meter numeritos, no letras, mamerto. Ahora me cierro\")\n quit()\n\n#Calculator\n\n calculator = ratecalculator(f_monto, f_cuotas)\n\n#JobDone\n\n print (\"Trabajo finalizado. A mimir hasta recibir nueva orden\")\n\n#Asking if user wants to reuse the Calculator\n\n repeat = input(\"¿Desea realizar otro cálculo, querido? (si/no)\")\n\nprint (\"Hasta la vista, baby\")\n","sub_path":"calculadora.py","file_name":"calculadora.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"353897953","text":"#!/usr/bin/python\n\nimport argparse\nimport sys\nfrom xml.etree import cElementTree as ET\n\nimport osc.conf\nimport osc.core\nfrom osclib.conf import Config\nfrom osclib.stagingapi import StagingAPI\n\n\ndef devel_projects_get(apiurl, project):\n \"\"\"\n Returns a sorted list of devel projects for a given project.\n\n Loads all packages for a given project, checks them for a devel link and\n keeps a list of unique devel projects.\n \"\"\"\n devel_projects = {}\n\n url = osc.core.makeurl(apiurl, ['search', 'package'], \"match=[@project='%s']\" % project)\n root = ET.parse(osc.core.http_GET(url)).getroot()\n for package in root.findall('package'):\n devel = package.find('devel')\n if devel is not None:\n devel_projects[devel.attrib['project']] = True\n\n return sorted(devel_projects)\n\ndef main(args):\n osc.conf.get_config(override_apiurl=args.apiurl)\n osc.conf.config['debug'] = args.debug\n\n devel_projects = devel_projects_get(osc.conf.config['apiurl'], args.project)\n if len(devel_projects) == 0:\n print('no devel projects found')\n else:\n out = '\\n'.join(devel_projects)\n print(out)\n\n if args.write:\n Config(args.project)\n api = StagingAPI(osc.conf.config['apiurl'], args.project)\n api.save_file_content('%s:Staging' % api.project, 'dashboard', 'devel_projects', out)\n\n\nif __name__ == '__main__':\n description = 'Print out devel projects for a given project (like openSUSE:Factory).'\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('-A', '--apiurl', metavar='URL', help='API URL')\n parser.add_argument('-d', '--debug', action='store_true', help='print info useful for debuging')\n parser.add_argument('-p', '--project', default='openSUSE:Factory', metavar='PROJECT', help='project for which to list devel projects')\n parser.add_argument('-w', '--write', action='store_true', help='write to dashboard container package')\n args = parser.parse_args()\n\n sys.exit(main(args))\n","sub_path":"devel-project-list.py","file_name":"devel-project-list.py","file_ext":"py","file_size_in_byte":2035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"598649543","text":"import datetime\nimport calendar\nimport graphene\nfrom django.utils.timezone import utc\nfrom graphene_django import DjangoObjectType\n\nimport food_api\nfrom food_api.models import Recipe, DailyRecipe, ShoppingListItem, IngredientRecipe\nfrom food_api.schemas.users.schema import UserType\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nimport random\n\n\nclass RecipeType(DjangoObjectType):\n class Meta:\n model = Recipe\n filter_fields = ['id']\n\n\nclass IngredientRecipeType(DjangoObjectType):\n class Meta:\n model = IngredientRecipe\n filter_fields = ['recipe', 'id']\n\n\nclass DailyRecipeType(DjangoObjectType):\n class Meta:\n model = DailyRecipe\n\n\nclass ShoppingListItemType(DjangoObjectType):\n class Meta:\n model = ShoppingListItem\n\n\nclass AddRecipe(graphene.Mutation):\n daily_recipe = graphene.Field(DailyRecipeType)\n\n class Arguments:\n recipe_id = graphene.String(required=True)\n user_email = graphene.String(required=True)\n date = graphene.Date()\n\n def mutate(self, info, recipe_id, user_email, date):\n recipe = Recipe.objects.get(id=recipe_id)\n user = User.objects.get(email=user_email)\n\n existing = DailyRecipe.objects.filter(user=user, day=date).exists()\n if existing:\n daily_recipe = DailyRecipe.objects.filter(user=user, day=date).get()\n daily_recipe.recipe = recipe\n else:\n daily_recipe = DailyRecipe(\n recipe=recipe,\n user=user,\n day=date\n )\n daily_recipe.save()\n # shopping_list_item = ShoppingListItem(user=user, ingredients=)\n return AddRecipe(daily_recipe=daily_recipe)\n\n\nclass DeleteShoppingListItem(graphene.Mutation):\n result = graphene.String()\n\n class Arguments:\n item_id = graphene.Int(required=True)\n\n def mutate(self, info, item_id):\n item = ShoppingListItem.objects.get(id=item_id)\n item.delete()\n return DeleteShoppingListItem(result='done')\n\n\nclass Query(graphene.ObjectType):\n recipe = graphene.Field(RecipeType, id=graphene.Int())\n week_menu = graphene.List(DailyRecipeType)\n todays_recipe = graphene.List(IngredientRecipeType)\n random_recipe = graphene.Field(RecipeType)\n set_initial_shoppingList = graphene.String()\n shopping_list = graphene.List(ShoppingListItemType)\n\n def resolve_todays_recipe(self, info):\n user = info.context.user\n if user.is_anonymous:\n raise Exception('Not logged!')\n todays_recipe = DailyRecipe.objects.get(user_id=user, day=datetime.datetime.now().date()).recipe\n ingredient_recipe = IngredientRecipe.objects.filter(recipe=todays_recipe)\n return ingredient_recipe\n\n def resolve_week_menu(self, info):\n user = info.context.user\n if user.is_anonymous:\n raise Exception('Not logged!')\n first_day_of_week = datetime.datetime.today() - datetime.timedelta(\n days=datetime.datetime.today().isoweekday() - 1 % 7)\n menu = DailyRecipe.objects.filter(Q(day__gte=first_day_of_week), Q(user=user))[:7]\n if len(menu) < 7:\n raise Exception(\"Menu not defined for this week\")\n return menu\n\n def resolve_random_recipe(self, info):\n user = info.context.user\n if user.is_anonymous:\n raise Exception('Not logged!')\n number_of_recipes = Recipe.objects.all().count()\n random_id = random.randint(1, number_of_recipes)\n tries = 0\n while tries <= 20:\n try:\n recipe = Recipe.objects.get(id=random_id)\n break\n except food_api.models.Recipe.DoesNotExist:\n tries += 1\n if tries == 20:\n return Exception('An error occurred while trying to find a recipe')\n return recipe\n\n def resolve_recipe(self, info, **kwargs):\n user = info.context.user\n if user.is_anonymous:\n raise Exception('Not logged!')\n return Recipe.objects.get(id=kwargs['id'])\n\n def resolve_set_initial_shoppingList(self, info):\n user = info.context.user\n if user.is_anonymous:\n raise Exception('Not logged!')\n ShoppingListItem.objects.filter(user=user).delete()\n first_day_of_week = datetime.datetime.today() - datetime.timedelta(\n days=datetime.datetime.today().isoweekday() - 1 % 7)\n menu = DailyRecipe.objects.filter(Q(day__gte=first_day_of_week), Q(user=user))[:7]\n ingredient_dict = {}\n for daily_recipe in menu:\n todays_recipe = daily_recipe.recipe\n ingredient_recipe = IngredientRecipe.objects.filter(recipe=todays_recipe)\n\n for ingredient_recipe_item in ingredient_recipe:\n if ingredient_recipe_item.ingredient.name in ingredient_dict:\n if ingredient_recipe_item.quantity is not None:\n ingredient_dict[ingredient_recipe_item.ingredient.name] += ingredient_recipe_item.quantity\n else:\n ingredient_dict[ingredient_recipe_item.ingredient.name] = ingredient_recipe_item.quantity\n for ingredient, quantity in ingredient_dict.items():\n if 'sel' not in ingredient.lower() and 'poivre' not in ingredient.lower():\n shopping_list_item = ShoppingListItem(user=user)\n shopping_list_item.ingredient = ingredient\n if quantity is not None:\n shopping_list_item.quantity = round(quantity, 3)\n shopping_list_item.save()\n return 'Done'\n\n def resolve_shopping_list(self, info):\n user = info.context.user\n if user.is_anonymous:\n raise Exception('Not logged!')\n return ShoppingListItem.objects.filter(user=user)\n\n\nclass Mutation(graphene.ObjectType):\n add_recipe = AddRecipe.Field()\n remove_shopping_list_item = DeleteShoppingListItem.Field()\n","sub_path":"Food/food_api/schemas/recipes/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":5985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"385809956","text":"# place `import` statement at top of the program\nimport random\n\n# don't modify this code or variable `n` may not be available\nn = int(input())\n\n# put your code here\nrandom.seed(n)\nrange_one = random.randint(-100, 100)\nn = range_one\nprint(n)\n","sub_path":"Problems/Not exactly random/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"482105959","text":"# Date: 23-01-2021\n# Pie Chart\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ny = np.array([35, 25, 25, 15])\nmy_labels = [\"apples\", \"bananas\", \"cherries\", \"dates\"]\nmy_explode = [0.2, 0, 0, 0]\n\nplt.pie(y, labels=my_labels, explode=my_explode, shadow=True)\nplt.show()\n","sub_path":"python program/pie_chart.py","file_name":"pie_chart.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"221937424","text":"#/usr/bin/python\nimport numpy as np\nimport pylab as pb\n\n#ETa\neta_sseb = np.genfromtxt(\"/home/yann/DATA/stats/eta_sseb.csv\", delimiter=\",\", usecols = (6), autostrip=True)\neta_sebal = np.genfromtxt(\"/home/yann/DATA/stats/eta_sebal.csv\", delimiter=\",\", usecols = (6), autostrip=True)\nta = np.genfromtxt(\"/home/yann/DATA/stats/ta.csv\", delimiter=\",\", usecols = (6), autostrip=True)\neto_pm = np.genfromtxt(\"/home/yann/DATA/stats/eto_pm.csv\", delimiter=\",\", usecols = (6), autostrip=True)\net_potd = np.genfromtxt(\"/home/yann/DATA/stats/et_potd.csv\", delimiter=\",\", usecols = (6), autostrip=True)\n\neta_sseb[eta_sseb>10] = np.nan\neta_sebal[eta_sebal>10] = np.nan\nta[ta>5] = np.nan\neto_pm[eto_pm>10] = np.nan\net_potd[et_potd>10] = np.nan\n\n#Plot ET\npb.figure(num=1,figsize = (11,5),dpi=80)\nt = np.arange(len(eta_sseb))\npb.plot(t, eta_sseb, 'b-', label=\"ETa SSEB\")\npb.plot(t, eta_sebal, 'g-', label=\"ETa SEBAL\")\npb.plot(t, ta, 'r-', label=\"Ta\")\npb.plot(t, eto_pm, 'y-', label=\"ETo PM\")\npb.plot(t, et_potd, 'm-', label=\"ET Potential\")\npb.legend(loc=(.75, 0.1))\npb.xlabel('Time (days)')\npb.ylabel('ETo/ETa/Ta/ETpot (mm/d)')\npb.title('Daily Evapotranspiration')\npb.grid(True)\npb.savefig('ET.png', dpi=300)\n\n\n# Evapfr\nevapfr_sseb = np.genfromtxt(\"/home/yann/DATA/stats/evapfr_sseb.csv\", delimiter=\",\", usecols = (6), autostrip=True)\nevapfr_sebal = np.genfromtxt(\"/home/yann/DATA/stats/evapfr_sebal.csv\", delimiter=\",\", usecols = (6), autostrip=True)\n#evapfr_metric = np.genfromtxt(\"/home/yann/DATA/stats/evapfr_metric.csv\", delimiter=\",\", usecols = (6), autostrip=True)\n\nevapfr_sseb[evapfr_sseb>1] = np.nan\nevapfr_sebal[evapfr_sebal>1] = np.nan\n#evapfr_metric[evapfr_metric>1] = np.nan\n\n# Plot Evapfr\npb.figure(num=2,figsize = (11,5),dpi=80)\nt = np.arange(len(evapfr_sseb))\n\npb.plot(t, evapfr_sseb, 'b-', label=\"SSEB\")\npb.plot(t, evapfr_sebal, 'g-', label=\"SEBAL\")\n#pb.plot(t, evapfr_metric, 'r-', label=\"METRIC\")\npb.legend(loc=(.75, 0.1))\npb.xlabel('Time (days from 2000.03.01)')\npb.ylabel('Evaporative Fraction (-)')\npb.title('Daily Evaporative Fraction')\npb.grid(True)\npb.savefig('Evapfr.png', dpi=300)\npb.show()\n\n","sub_path":"country/Cambodia/06_graphs.py","file_name":"06_graphs.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"334458272","text":"\"\"\"Adoption Agency application.\"\"\"\n\nfrom flask import Flask, redirect, render_template, request, flash\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom models import db, connect_db, Pet\nfrom forms import AddPetForm, EditPetForm\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:2118@localhost/adopt'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SECRET_KEY'] = 'asdfghjkl'\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS'] = False\ndebug = DebugToolbarExtension(app)\n\nconnect_db(app)\ndb.create_all()\n\n@app.route('/')\ndef homepage():\n \"\"\"List pets\"\"\"\n\n pets = Pet.query.all()\n return render_template('index.html', pets=pets)\n\n@app.route('/add', methods=['GET', 'POST'])\ndef add_pet():\n \"\"\"Add a pet\"\"\"\n\n form = AddPetForm()\n\n if form.validate_on_submit():\n new_pet = Pet(name=form.name.data, species=form.species.data, photo_url=form.photo_url.data, age=form.age.data, notes=form.notes.data)\n\n db.session.add(new_pet)\n db.session.commit()\n\n flash(f\"Added a {new_pet.species} named {new_pet.name}.\")\n return redirect('/')\n\n else:\n return render_template('add_pet_form.html', form=form)\n\n@app.route(f'/', methods=['GET', 'POST'])\ndef edit_pet(id):\n \"\"\"Edit a pet\"\"\"\n\n pet = Pet.query.get_or_404(id)\n form = EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.photo_url = form.photo_url.data\n pet.notes = form.notes.data\n pet.available = form.available.data\n\n db.session.commit()\n\n flash(f\"Updated {pet.name}\")\n\n return redirect('/')\n else:\n return render_template('edit_pet_form.html', form=form, pet=pet)\n\n@app.route(f\"//delete\", methods=['POST'])\ndef delete_tag(id):\n \"\"\"Delete a pet\"\"\"\n \n pet = Pet.query.get_or_404(id)\n\n db.session.delete(pet)\n db.session.commit()\n\n return redirect(f'/')\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"214799341","text":"# Import\nimport os\nimport time\nimport random\nimport sqlite3\nimport pandas\n\n# Custom Import\n# from Map_Gen import Biome\n\nfrom Map_Gen import Biome\nfrom Map_Gen import SubBiome\n\n# GLOBAL VARIABLES\n# Map Width and Height\n# I am thinking maybe start with a 50 x 50 map but when all is said and done\n# 1000 x 1000 will be the map size\nmap_width = 100\nmap_height = 100\nsub_map_width = 10\nsub_map_height = 10\nmap_seed = []\n\n# coords\nx = 1\ny = 1\n\n\n# Clears print screen\ndef clear():\n if os.name == 'nt':\n _ = os.system('cls')\n\n else:\n _ = os.system('clear')\n\n\n# This is the blueprint of the tiles or biomes\nclass Tile:\n def __init__(self, coords_x, coords_y, abriv, name, rarity, difficulty,\n enterable, exit_x, exit_y, discovered=False):\n self.coords_x = coords_x\n self.coords_y = coords_y\n self.abriv = abriv\n self.name = name\n self.rarity = rarity # How often you need to be spawned\n self.difficulty = difficulty\n self.enterable = enterable # True or False\n self.floor = 0 # in SQL it is tile_level\n self.exit_x = exit_x\n self.exit_y = exit_y\n self.discovered = discovered\n\n\nclass SubTile:\n def __init__(self, from_x, from_y, coords_x, coords_y, biome_name, place_name, difficulty):\n self.from_x = from_x\n self.from_y = from_y\n self.coords_x = coords_x\n self.coords_y = coords_y\n self.biome_name = biome_name\n self.place_name = place_name\n self.difficulty = difficulty\n self.floor = 1 # in SQL it is tile_level\n self.discovered = False\n\n\n# Finds a random biome to be made in a tile\ndef get_random_biome():\n biome_keys = Biome.world_biomes.keys()\n list_of_biome_abriv = []\n\n for bio in biome_keys:\n list_of_biome_abriv.append(bio)\n\n return random.choice(list_of_biome_abriv)\n\n\ndef get_random_sub_biome(key):\n biome_keys = SubBiome.sub_biome[key].keys()\n list_of_sub_biome_abriv = []\n\n for bio in biome_keys:\n list_of_sub_biome_abriv.append(bio)\n\n return random.choice(list_of_sub_biome_abriv)\n\n\ndef set_exit_coords_x():\n return random.randint(0, sub_map_width)\n\n\ndef set_exit_coords_y():\n return random.randint(0, sub_map_height)\n\n\n# right now there is only Common Uncommon and Rare, But maybe i will add Epic as well\ndef rare_control():\n while True:\n r_biome = get_random_biome()\n if Biome.world_biomes[r_biome]['rarity'] == 'Rare':\n num = random.randint(0, 100)\n\n if num == 50:\n return r_biome\n\n elif Biome.world_biomes[r_biome]['rarity'] == 'Uncommon':\n num = random.randint(0, 20)\n\n if num == 10:\n return r_biome\n\n elif Biome.world_biomes[r_biome]['rarity'] == 'Common':\n return r_biome\n\n\n# Gets a random number to set biome difficulty\ndef ran_diff():\n return random.randint(1, 10)\n\n\n# Map is placed into a class here\ndef map_builder():\n loading_dots = 0\n clear()\n print('Loading.')\n for spot in range(0, (map_width * map_height)):\n # spot is index and coord_x and coord_y is finding the (x, y) coordinates from index (AKA spot)\n if spot in [10, 100, 500, 1000, 2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000]:\n loading_dots += 1\n clear()\n print('-------------------')\n print('Loading' + '.' * loading_dots)\n print('-------------------')\n\n coord_y = int((spot / map_width))\n coord_x = spot - (map_width * coord_y)\n biome = rare_control()\n map_seed.append(biome)\n biome_name = Biome.world_biomes[biome]['name']\n biome_rarity = Biome.world_biomes[biome]['rarity']\n biome_layers = Biome.world_biomes[biome]['enterable']\n # coords_x, coords_y, name, rarity, difficulty, enterable=False, discovered=False\n # coords_x, coords_y, name, rarity, difficulty, enterable, exit_x, exit_y, discovered=False\n insert_tile(\n Tile(coord_x, coord_y, biome, biome_name, biome_rarity, ran_diff(), biome_layers, set_exit_coords_x(),\n set_exit_coords_y()))\n if biome_layers:\n for subspot in range(0, (sub_map_width * sub_map_height)):\n sub_coord_y = int((subspot / sub_map_width))\n sub_coord_x = subspot - (sub_map_width * sub_coord_y)\n # print(f\"X: {coord_x}\")\n # print(f\"Y: {coord_y}\")\n # print(f\"SUB X: {sub_coord_x}\")\n # print(f\"SUB Y: {sub_coord_y}\")\n # input(\"Enter\")\n try:\n sub_biome_name = SubBiome.sub_biome[biome][get_random_sub_biome(biome)]['name']\n\n except:\n continue\n insert_subtile(\n SubTile(coord_x, coord_y, sub_coord_x, sub_coord_y, biome_name, sub_biome_name, ran_diff()))\n\n\n# mapfile is the varible name of the file that the map is stored in\nmapfile = 'Worldmap.db'\n\n# Conn connects to the database\nconn = sqlite3.connect(mapfile)\n\n# c is what lets you write\nc = conn.cursor()\n\n\ndef neat_layout():\n df1 = pandas.read_sql_query(\"SELECT * FROM tile ;\", conn)\n df2 = pandas.read_sql_query(\"SELECT * FROM subTile ;\", conn)\n print(df1)\n print(\"\\n--------------------------------------------------------------------------------------------------\\n\")\n print(df2)\n\n\ndef make_map_datebase():\n with conn:\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tile (coords_x int, \n coords_y int, \n abriv text, \n place_name text, \n rarity text, \n difficulty int, \n enterable boolean, \n tile_level int, \n exit_x int, \n exit_y int, \n discovered boolean)\"\"\")\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS subTile (from_x int, \n from_y int, \n coords_x int, \n coords_y int, \n biome_name text, \n place_name text, \n difficulty int, \n tile_level int, \n discovered boolean)\"\"\")\n\n\ndef insert_tile(tile):\n with conn:\n c.execute(\n \"INSERT INTO tile VALUES (:coords_x, \"\n \":coords_y, \"\n \":abriv, \"\n \":place_name, \"\n \":rarity, \"\n \":difficulty, \"\n \":enterable, \"\n \":tile_level, \"\n \":exit_x, \"\n \":exit_y, \"\n \":discovered)\",\n {'coords_x': tile.coords_x, 'coords_y': tile.coords_y, 'abriv': tile.abriv, 'place_name': tile.name,\n 'rarity': tile.rarity, 'difficulty': tile.difficulty, 'enterable': tile.enterable,\n 'tile_level': tile.floor, 'exit_x': tile.exit_x, 'exit_y': tile.exit_y, 'discovered': tile.discovered})\n\n\ndef insert_subtile(subtile):\n with conn:\n c.execute(\n \"INSERT INTO subTile VALUES (:from_x, \"\n \":from_y, \"\n \":coords_x, \"\n \":coords_y, \"\n \":biome_name, \"\n \":place_name, \"\n \":difficulty, \"\n \":tile_level, \"\n \":discovered)\",\n {'from_x': subtile.from_x, 'from_y': subtile.from_y, 'coords_x': subtile.coords_x,\n 'coords_y': subtile.coords_y, 'biome_name': subtile.biome_name, 'place_name': subtile.place_name,\n 'difficulty': subtile.difficulty, 'tile_level': subtile.floor, 'discovered': subtile.discovered})\n\n\ndef get_tile(x, y):\n c.execute(\"SELECT * FROM tile WHERE coords_x=:coords_x AND coords_y=:coords_y;\", {'coords_x': x, 'coords_y': y})\n return c.fetchall()\n\n\ndef get_subTile(from_x, from_y, x, y):\n c.execute(\n \"SELECT * FROM subTile WHERE from_x=:from_x AND from_y=:from_y AND coords_x=:coords_x AND coords_y=:coords_y;\",\n {'from_x': from_x, 'from_y': from_y, 'coords_x': x, 'coords_y': y})\n return c.fetchall()\n\n\ndef get_all():\n c.execute(\"SELECT * FROM map;\")\n return c.fetchall()\n\n\ndef update_tile(x, y, discovered):\n with conn:\n c.execute(\"\"\"UPDATE tile SET discovered=:discovered WHERE coords_x=:coords_x AND coords_y=:coords_y\"\"\",\n {'coords_x': x, 'coords_y': y, 'discovered': discovered})\n\n\ndef update_subTile(from_x, from_y, x, y, discovered):\n with conn:\n c.execute(\n \"\"\"UPDATE subTile SET discovered=:discovered WHERE \n from_x=:from_x AND from_y=:from_y AND coords_x=:coords_x AND coords_y=:coords_y\"\"\",\n {'from_x': from_x, 'from_y': from_y, 'coords_x': x, 'coords_y': y, 'discovered': discovered})\n","sub_path":"Rpg/Map_Gen/Engine.py","file_name":"Engine.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"77403180","text":"from django.contrib import admin\nfrom rango.models import Category, Page\nfrom rango. models import UserProfile\n\nadmin.site.register(UserProfile)\n\nclass PageAdmin(admin.ModelAdmin):\n\t# sets up the following fields as columns in rango/pages\n\tlist_display = ['title', 'category','url']\n\t# Allows you to display fields within Fieldsets when you select a specific page\n\t# And allows you to change vertical ordering of the following fields\n\t# fieldsets = [\n\t# \t(None,\t\t {'fields': ['title']}),\n\t# \t('Category', {'fields': ['category']}),\n\t# \t('URL',\t\t {'fields': ['url']}),\n\t# \t('Views',\t {'fields': ['views']}),\n\t# ]\n\tfieldsets = [\n\t\t(None, {'fields': ['title',\n\t\t\t\t\t\t 'category',\n\t\t\t\t\t\t 'url',\n\t\t\t\t\t\t 'views']}),\n\t]\n\nclass CategoryAdmin(admin.ModelAdmin):\n\tprepopulated_fields = {'slug':('name',)}\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(Page, PageAdmin)","sub_path":"rango/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"471150002","text":"from .sim import BaseSimulator, EnsembleSimulator\nfrom . import adcirc_utils as au\nimport netCDF4 as nc\nimport os\nfrom glob import glob\n\nSECS_IN_DAY = 24*3600\n\nclass SegmentedSimulator(BaseSimulator):\n \"\"\"Runs a single ADCIRC simulation - with stops for custom logic\n \"\"\"\n\n def setup_job(self):\n super().setup_job()\n self.init_fort15(self.job_config[\"job_dir\"])\n\n def run_job(self):\n self.sim_days = 0.0\n self.steps = 0\n while not self.done():\n self.sim_days += self.interval\n self.run_segment()\n self.steps += 1\n\n def add_commandline_args(self, parser):\n parser.add_argument(\"--run-days\", required=True, type=float)\n parser.add_argument(\"--interval-hours\", type=float)\n\n def done(self):\n # add fudge factor to account for roundoff error\n return self.sim_days >= self.get_arg(\"run_days\") - 1e-6\n\n def run_segment(self):\n super().run_job()\n\n def make_preprocess_command(self, run, run_dir):\n fort15 = run_dir+\"/fort.15\"\n if self.steps > 0:\n # fix the fort.15 files\n hotstart_file = self.get_last_hotstart(run_dir)\n with nc.Dataset(hotstart_file) as ds:\n hotstart_days = self._get_hotstart_days(ds)\n \n\n new_rndy = self.interval + hotstart_days\n dt = au.snatch_fort_params(fort15, [\"DT\"])[\"DT\"]\n new_params = self.get_new_rndy_params(new_rndy, dt=dt)\n new_params[\"IHOT\"] = \"567\" if hotstart_file.endswith(\"67.nc\") else \"568\"\n au.fix_fort_params(fort15, new_params)\n return au.fix_all_fort15_params_cmd(run_dir, new_params)\n\n else:\n return super().make_preprocess_command(run, run_dir)\n\n def get_hotstart_params(self, fort15):\n params = au.snatch_fort_params(fort15, [\"DT\", \"NHSINC\", \"IHOT\"])\n ihot = params[\"IHOT\"].strip()\n dt = float(params[\"DT\"])\n nhsinc = int(params[\"NHSINC\"].split()[-1])\n return {\"interval\": dt*nhsinc/SECS_IN_DAY, \"ihot\": ihot, \"dt\": dt}\n\n def init_fort15(self, run_dir):\n fort15 = run_dir + \"/fort.15\"\n hot_params = self.get_hotstart_params(fort15)\n ihot, fort15_interval = hot_params[\"ihot\"], hot_params[\"interval\"]\n interval_hours = self.get_arg(\"interval_hours\")\n if interval_hours is not None:\n self.interval = interval_hours / 24\n else:\n self.interval = fort15_interval\n # check to see if we have an existing hotstart file\n new_params = {}\n if ihot.endswith(\"67\") or ihot.endswith(\"68\"):\n with nc.Dataset(run_dir + \"/fort.\"+ihot[-2:]+\".nc\") as ds:\n base_date = ds[\"time\"].base_date.split(\"!\")[0]\n new_rndy = self.interval + self._get_hotstart_days(ds)\n new_params[\"BASE_DATE\"] = base_date\n else:\n new_rndy = self.interval\n \n new_params.update(self.get_new_rndy_params(new_rndy, dt=hot_params[\"dt\"]))\n au.fix_fort_params(fort15, new_params)\n\n\n def get_new_rndy_params(self, rndy, dt):\n \"\"\"Fix the rndy and hsinc before updating fort.15\n \"\"\"\n\n # add a little bit to ensure the simulation will generate a hotstart file\n # round to match the format expected by ADCIRC (too many digits results in an error)\n return {\n \"RND\": round(rndy + 5e-3, 2),\n \"NHSINC\": \"5 \" + str(int(float(rndy) * SECS_IN_DAY / float(dt)))\n }\n\n def get_last_hotstart(self, run_dir=None):\n \"\"\"Return the most recent hotstart file\n \"\"\"\n if run_dir is None:\n run_dir = self.job_config['job_dir']\n # determine which hotstart file is more recent\n files = [run_dir+\"/fort.67.nc\", run_dir+\"/fort.68.nc\"]\n return max(files, key=os.path.getmtime)\n \n def _get_hotstart_days(self, ds):\n return ds[\"time\"][0] / (24 * 3600)\n\n\nclass SegmentedEnsembleSimulator(SegmentedSimulator, EnsembleSimulator):\n\n \"\"\"A class for performing ensemble simulations in which members of the ensemble need to be hotstarted repeatedly.\n \"\"\"\n\n def setup_job(self):\n EnsembleSimulator.setup_job(self)\n for run_dir in self.run_dirs:\n self.init_fort15(run_dir)\n\n def run_segment(self):\n # remove pylauncher temporary directories\n job_dir = self.job_config['job_dir']\n for d in glob(f\"{job_dir}/pylauncher_tmp*\"):\n self._run_command(f\"rm -r {d}\")\n EnsembleSimulator.run_job(self)\n","sub_path":"src/ch_sim/hotstart.py","file_name":"hotstart.py","file_ext":"py","file_size_in_byte":4562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"62610370","text":"\ncountries = [\"Australia\",\"Austria\",\"Belarus\",\"Canada\",\"China\",\"Croatia\",\\\n \"Czech Republic\",\"Finland\",\"France\",\"Germany\",\"Great Britain\",\"Italy\",\\\n \"Japan\",\"Kazakhstan\",\"Latvia\",\"Netherlands\",\"Norway\",\"Poland\",\"Russia\",\\\n \"Slovakia\",\"Slovenia\",\"South Korea\",\"Sweden\",\"Switzerland\",\"Ukraine\",\"United States\"\n ]\n\ngold = [0,4,5,10,3,0,2,1,4,8,1,0,1,0,0,8,11,4,13,1,2,3,2,6,1,9]\nsilv = [2,8,0,10,4,1,4,3,4,6,1,2,4,0,2,7,5,1,11,0,2,3,7,3,0,7]\nbron = [1,5,1,5,2,0,2,1,7,5,2,6,3,1,2,9,10,1,9,0,4,2,6,2,1,12]\n\ntotals = []\n\nprint(totals)\nfor i in range(len(countries)):\n medals = gold[i] + silv[i] + bron[i] \n totals.append( (medals, countries[i]) ) \n\n\n#totals = [(total[i], countries[i]) for i in range(len(total))] \n\n\n#for i in range(len(gold)):\n# a = countries[i]\n# total[i].append(a)\n\ntotals.sort()\ntotals.reverse()\n\n\n#top5 = totals[:5]\n#print(top5)\n\n\n\"\"\"\nfor p in top5:\n medals, country = p\n print(medals, country)\n\"\"\"\n\n#for medals, country in top5:\n# print(medals, country)\n\ntable = []\nfor i in range(len(countries)):\n table.append((gold[i], silv[i], bron[i], countries[i]))\n\nprint(\"\\n\")\n\ntable.sort()\ntop5 = table[-5:]\ntop5.reverse()\n\nfor g, s, b, country in top5:\n print(country, g, s, b)\n\n\ndef no_medals(countries, al, bl):\n result = []\n for i in range(len(countries)):\n if al[i] == 0 and bl[i] == 0:\n result.append(countries[i])\n return result\n\nonly_gold = no_medals(countries, silv, bron)\nonly_silv = no_medals(countries, gold, bron)\nonly_bron = no_medals(countries, gold, silv)\nonly_one = only_gold + only_silv + only_bron\n\nprint(only_one)\n\nmedals = []\n\nfor i in range(len(countries)):\n medals.append((countries[i], gold[i], silv[i], bron[i]))\n\ndef print_totals():\n for country, g, s, b in medals:\n print(country + \":\", g + s + b)\n\ndef print_totals2():\n for item in medals:\n print(item[0] + \":\", sum(item[1:]))\n\nprint_totals()\nprint(\"\\n\")\nprint_totals2()\n\n#마지막 4분 소화못함\n\n\"\"\"\nprint(countries[3])\nprint(len(countries))\nprint(countries[-1])\n\n\nkorea = [ 'Korea', 'KR', (1,2,3) ]\nprint(len(korea))\nprint(korea[2])\nprint(max(gold))\n\nnobles = [\"helium\", \"none\", \"argon\"]\nprint(nobles)\nnobles[1] = \"neon\"\nprint(nobles)\n\nfor country in countries[-5:]:\n print(country)\n\nfor i in range (len(gold)):\n if (gold[i] == max(gold)):\n print(i)\n print(countries[i])\n\nprint(list(range(5)))\n\nfor i in range(21,26):\n print(countries[i])\n print(gold[i])\n\nfor i in range((len(bron))):\n print(i)\n\"\"\"\n\n\n","sub_path":"0008.lec14.py","file_name":"0008.lec14.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"623211948","text":"\n\n#Ejercicio 5\n\nhora = 14\nminutos = 15\nsegundos = 45\n\ntiempo_para_medianoche = 84600 - ((hora*60*60) + (minutos*60) + segundos)\ntiempo_pasado_de_medianoche = (hora*60*60) + (minutos*60) + segundos\n\nprint(tiempo_para_medianoche, \"segundos para medianoche\")\nprint(tiempo_pasado_de_medianoche, \"segundos desde la última medianoche\")","sub_path":"laboratorio/cuaderno-1/ejercicio5.py","file_name":"ejercicio5.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"482810033","text":"import ctypes\nimport os.path\nfrom sys import platform\n\n\n__all__ = ['perform_network_assignment_DTALite']\n\n\nif platform.startswith('win32'):\n _dll_file = os.path.join(os.path.dirname(__file__), 'bin/DTALite.dll')\nelif platform.startswith('linux'):\n _dll_file = os.path.join(os.path.dirname(__file__), 'bin/DTALite.so')\nelif platform.startswith('darwin'):\n _dll_file = os.path.join(os.path.dirname(__file__), 'bin/DTALite.dylib')\nelse:\n raise Exception('Please build the shared library compatible to your OS\\\n using source files')\n\n_dtalite_engine = ctypes.cdll.LoadLibrary(_dll_file)\n\n_dtalite_engine.network_assignment.argtypes = [ctypes.c_int,\n ctypes.c_int,\n ctypes.c_int]\n\n\ndef perform_network_assignment_DTALite(assignment_mode,\n iter_num,\n column_update_num):\n \"\"\" python interface to call DTALite (precompiled as shared library)\n\n WARNING\n -------\n MAKE SURE TO BACKUP agent.csv and link_performance.csv if you have\n called perform_network_assignment() before. Otherwise, they will be\n overwritten by results generated by DTALite.\n\n Parameters\n ----------\n assignment_mode\n 0: Link based UE, only produces link performance file without agent\n path file\n 1: Path based UE, produces link performance file and agent path file\n 2: UE + dynamic traffic assignment and simulation , produces link\n performance file and agent path file\n 3: ODME\n iter_num\n number of assignment iterations to be performed before optimizing\n column pool\n column_update_iter\n number of iterations to be performed on optimizing column pool\n\n Outputs\n -------\n agent.csv\n link_performance.csv\n\n Notes\n -----\n The parameters are exactly the same as do_network_assignment()\n The outputs are exactly the same as output_link_performance()\n \"\"\"\n # make sure assignment_mode is right\n assert(assignment_mode in [0, 1, 2, 3])\n # make sure iteration numbers are both non-negative\n assert(iter_num>=0)\n assert(column_update_num>=0)\n\n print('\\nDTALite run starts')\n\n _dtalite_engine.network_assignment(assignment_mode,\n iter_num,\n column_update_num)\n\n print('\\nDTALite run completes')","sub_path":"path4gmns/dtaapi.py","file_name":"dtaapi.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"51408832","text":"from keras.models import Sequential\nfrom keras.layers import Conv2D\nfrom keras.layers import MaxPooling2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\n\nimport tensorflow as tf\nimport numpy as np\n\nimport keras.backend as K\n\nfrom keras.models import Model\n\nSIZE = 224\n\nfrom keras import layers\n\n\n#contitional\nq=True\nif q==True: #we have quantization ==> float16\n import keras.backend as K\n K.set_floatx('float16')\n from bn import BatchNormalizationF16\n\ndef conv(x, channels, kernel, stride, name, use_bias=True, padding='SAME'):\n x = layers.Conv2D(filters=channels, kernel_size=kernel, strides=stride, use_bias=use_bias, padding=padding, name=name)(x)\n print(x.get_shape(), name, K.dtype(x))\n return x\n\ndef batch_norm(x, name):\n if (q==False): # float(32)\n return layers.BatchNormalization(epsilon=1e-04, center=True, scale=True)(x)\n else: #quatized version (float 16)\n # return x\n return BatchNormalizationF16(name = name)(x)\ndef relu(x, name):\n x = layers.Activation('relu', name=name)(x)\n print(x.get_shape(), name, K.dtype(x))\n return x\n\ndef concat(arrs, axis, name):\n x= layers.concatenate(arrs, axis, name=name) #3 is the channel \n print(x.get_shape(), name, K.dtype(x))\n return x\n \n\ndef maxpool2d(x, kernel, stride, name, padding='SAME'):\n # x = tf.nn.max_pool2d(x, kernel, stride, padding)\n x = layers.MaxPooling2D( (kernel,kernel), (stride,stride), padding)(x)\n print(x.get_shape(), name, K.dtype(x))\n return x\n\ndef avgpool2d(x, kernel, stride,name, padding='SAME'):\n x = layers.GlobalAveragePooling2D()(x)\n print(x.get_shape(), name, K.dtype(x))\n return x\n\ndef fc(x, units, name, use_bias=True):\n x = layers.Dense(units, activation='softmax', name=name)(x) \n print(x.get_shape(), name, K.dtype(x))\n return x\n\n#########################define network\ndef my_net_16():\n img_input = layers.Input(shape=[SIZE, SIZE, 3])\n\n Convolution1 = conv(img_input, 32, 3, 2, 'Conv2D', False, padding='VALID')\n\n Scale1 = batch_norm(Convolution1, \"BatchNorm1\" )\n Scale1 = relu(Scale1, 'relu1')\n\n Convolution2 = conv(Scale1, 64, 3, 1, 'Conv2D_1', False, padding='SAME')\n Scale2 = batch_norm(Convolution2,'BatchNorm2' ) #BN+Scale\n Scale2 = relu(Scale2, 'relu2')\n\n Pooling1 = maxpool2d(Scale2, 2, 2, 'pool1')#pool1 (x, kernel, stride, padding='SAME'):\n\n Convolution3 = conv(Pooling1, 64, 3, 1, 'Conv2D_2', False, padding='SAME') #Conv2D_2\n Scale3 = batch_norm(Convolution3, 'BatchNorm3' ) #BN+Scale\n Scale3 = relu(Scale3, 'm1_b1_relu1')\n\n Convolution4 = conv(Pooling1, 48, 1, 1, 'Conv2D_3', False, padding='VALID') #Conv2D_3\n Scale4 = batch_norm(Convolution4, \"BatchNorm4\" ) #BN+Scale\n Scale4 = relu(Scale4, 'm1_b2_relu1')\n\n Scale4 = layers.ZeroPadding2D((2,2))(Scale4)\n Convolution5 = conv(Scale4, 64, 5, 1, 'Conv2D_4', False, padding='VALID') #Conv2D_4 ###pad=2!!! \n Scale5 = batch_norm(Convolution5, 'BatchNorm5' ) #BN+Scale\n Scale5 = relu(Scale5, 'm1_b2_relu2') \n\n Eltwise1 = layers.add([Scale3,Scale5], name='Eltwise1')\n concat1 = concat([Eltwise1, Pooling1], axis=3, name='concat1')\n\n Convolution6 = conv(concat1, 96, 3, 2,'Conv2D_5', False, padding='VALID') #Conv2D_5\n Scale6 = batch_norm(Convolution6, 'BatchNorm6' ) #BN+Scale\n Scale6 = relu(Scale6, 'm2_b1_relu1')\n\n Convolution7 = conv(concat1, 64, 1, 1, 'Conv2D_6', False, padding='VALID') #Conv2D_6\n Scale7 = batch_norm(Convolution7, 'BatchNorm7' ) #BN+Scale\n Scale7 = relu(Scale7, 'm2_b2_relu1')\n\n Convolution8 = conv(Scale7, 96, 3, 1, 'Conv2D_7',False, padding='SAME') #Conv2D_7\n Scale8 = batch_norm(Convolution8, 'BatchNorm8' ) #BN+Scale\n Scale8 = relu(Scale8, 'm2_b2_relu2')\n\n Convolution9 = conv(Scale8, 96, 3, 2, 'Conv2D_8', False, padding='VALID') #Conv2D_8\n Scale9 = batch_norm(Convolution9, 'BatchNorm9' ) #BN+Scale\n Scale9 = relu(Scale9, 'm2_b2_relu2_1')\n\n Pooling2 = maxpool2d(concat1 , 4, 2, 'm2_b3_pool1', 'VALID')#m2_b3_pool1\n Pooling3 = maxpool2d(Pooling1, 4, 2, 'm2_b4_pool2', 'VALID')#m2_b4_pool2\n\n\n concat2 = concat([Pooling3, Scale6, Scale9, Pooling2], 3, 'concat2') #??concat dim??\n\n Convolution10 = conv(concat2, 64, 3, 2, 'Conv2D_9', False, padding='VALID') #Conv2D_9\n Scale10 = batch_norm(Convolution10, 'BatchNorm10' ) #BN+Scale\n Scale10 = relu(Scale10, 'relu6') #relu6\n\n Pooling4 = avgpool2d(Scale10, 4, 2, 'pool3')#pool3\n\n BiasAdd = fc(Pooling4,2, 'MatMul')\n\n model = Model(img_input, BiasAdd)\n\n return model\n\n\n# x=my_net_16()\n# x.summary()","sub_path":"mine_f16.py","file_name":"mine_f16.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"592686828","text":"def findMax(tree, P):\r\n temp = tree[P]\r\n L = (P * 2)\r\n R = (P * 2) + 1\r\n if (tree[L] > tree[R]):\r\n if (tree[L] > tree[P]):\r\n # L.key > P.key\r\n return L\r\n if (tree[R] > tree[P]):\r\n # R.Key > P.key\r\n return R\r\n else:\r\n return P\r\n\r\n\r\ndef buildHeap(array, n):\r\n tree = [0]\r\n for i in range(1, n+1):\r\n print(tree)\r\n tree = heapInsertion(tree, len(tree), array[i])\r\n\r\n\r\ndef heapInsertion(tree, n, value):\r\n tree.append(value)\r\n print(tree)\r\n for j in range(n//2, 1-1, -1):\r\n now = j\r\n while(max(j, j*2, j*2+1) != now):\r\n temp = tree[max(j, j*2, j*2+1)]\r\n tree[max(j, j*2, j*2+1)] = tree[j]\r\n tree[j] = temp\r\n return tree\r\n\r\n\r\n\r\nbuildHeap([0, 4, 234, 2, 3, 1, 2, 5, 6, 2, 3], 10)","sub_path":"HeapSort02.py","file_name":"HeapSort02.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"123070851","text":"# Given an array of roses. roses[i] means rose i will bloom on day roses[i]. Also given an int k, which is the minimum number of adjacent bloom roses required for a bouquet, and an int n, which is the number of bouquets we need. Return the earliest day that we can get n bouquets of roses.\r\n#\r\n# Example:\r\n# Input: roses = [1, 2, 4, 9, 3, 4, 1], k = 2, n = 2\r\n# Output: 4\r\n# Explanation:\r\n# day 1: [b, n, n, n, n, n, b]\r\n# The first and the last rose bloom.\r\n#\r\n# day 2: [b, b, n, n, n, n, b]\r\n# The second rose blooms. Here the first two bloom roses make a bouquet.\r\n#\r\n# day 3: [b, b, n, n, b, n, b]\r\n#\r\n# day 4: [b, b, b, n, b, b, b]\r\n# Here the last three bloom roses make a bouquet, meeting the required n = 2 bouquets of bloom roses. So return day 4.\r\n\r\n#DOES NOT WORK\r\n#HALF WRITTEN\r\n#-ADITYA\r\ndef mindays(roses, n, k):\r\n bloomed = ['n'] * len(roses)\r\n days = max(roses)\r\n for i in range(days):\r\n for j in range(len(roses)):\r\n if i == roses[j]:\r\n bloomed[j] = 'b'\r\n for l in range(k):\r\n if bloomed[j+l] == 'b':\r\n break\r\n\r\n\r\nroses = [1, 2, 4, 9, 3, 4, 1]\r\nn = 2\r\nk = 2\r\nmindays(roses, n, k)\r\n","sub_path":"rose_bouquet.py","file_name":"rose_bouquet.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"398627422","text":"# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"WALS model input data, training and predict functions.\"\"\"\n\nimport datetime\nimport numpy as np\nimport os\nimport pandas as pd\nfrom scipy.sparse import coo_matrix\nimport sh\nimport tensorflow as tf\n\nimport wals\n\n# ratio of train set size to test set size\nTEST_SET_RATIO = 10\n\n# default hyperparameters\nDEFAULT_PARAMS = {\n 'weights': True,\n 'latent_factors': 5,\n 'num_iters': 20,\n 'regularization': 0.07,\n 'unobs_weight': 0.01,\n 'wt_type': 0,\n 'feature_wt_factor': 130.0,\n 'feature_wt_exp': 0.08,\n 'delimiter': '\\t'\n}\n\n# parameters optimized with hypertuning for the MovieLens data set\nOPTIMIZED_PARAMS = {\n 'latent_factors': 34,\n 'regularization': 9.83,\n 'unobs_weight': 0.001,\n 'feature_wt_factor': 189.8,\n}\n\n# parameters optimized with hypertuning for the included web views data set\nOPTIMIZED_PARAMS_WEB = {\n 'latent_factors': 30,\n 'regularization': 7.27,\n 'unobs_weight': 0.01,\n 'feature_wt_exp': 5.05,\n}\n\n\ndef create_test_and_train_sets(args, input_file, data_type='ratings'):\n \"\"\"Create test and train sets, for different input data types.\n\n Args:\n args: input args for job\n input_file: path to csv data file\n data_type: 'ratings': MovieLens style ratings matrix\n 'web_views': Google Analytics time-on-page data\n\n Returns:\n array of user IDs for each row of the ratings matrix\n array of item IDs for each column of the rating matrix\n sparse coo_matrix for training\n sparse coo_matrix for test\n\n Raises:\n ValueError: if invalid data_type is supplied\n \"\"\"\n if data_type == 'ratings':\n return _ratings_train_and_test(args['headers'], args['delimiter'],\n input_file)\n elif data_type == 'web_views':\n return _page_views_train_and_test(input_file)\n else:\n raise ValueError('data_type arg value %s not supported.' % data_type)\n\n\ndef _ratings_train_and_test(use_headers, delimiter, input_file):\n \"\"\"Load data set. Assumes Movielens header, format etc.\n\n MovieLens data starts with user_id=1. The max user id is close to\n the number of users, but there may be missing user_id's or item ids\n (i.e. movies). For our sparse matrices we need to map the user/item ids\n down to a zero-based set of indices, without missing values.\n\n Args:\n use_headers: (boolean) true = headers, false = no headers\n delimiter: (string) delimiter to use for csv\n input_file: path to csv data file\n\n Returns:\n array of user IDs for each row of the ratings matrix\n array of item IDs for each column of the rating matrix\n sparse coo_matrix for training\n sparse coo_matrix for test\n \"\"\"\n headers = ['user_id', 'item_id', 'rating', 'timestamp']\n header_row = 0 if use_headers else None\n ratings_df = pd.read_csv(input_file,\n sep=delimiter,\n names=headers,\n header=header_row,\n dtype={\n 'user_id': np.int32,\n 'item_id': np.int32,\n 'rating': np.float32,\n 'timestamp': np.int32,\n })\n\n np_users = ratings_df.user_id.as_matrix()\n np_items = ratings_df.item_id.as_matrix()\n unique_users = np.unique(np_users)\n unique_items = np.unique(np_items)\n\n n_users = unique_users.shape[0]\n n_items = unique_items.shape[0]\n\n # make indexes for users and items if necessary\n max_user = unique_users[-1]\n max_item = unique_items[-1]\n if n_users != max_user or n_items != max_item:\n # make an array of 0-indexed unique user ids corresponding to the dataset\n # stack of user ids\n z = np.zeros(max_user+1, dtype=int)\n z[unique_users] = np.arange(n_users)\n u_r = z[np_users]\n\n # make an array of 0-indexed unique item ids corresponding to the dataset\n # stack of item ids\n z = np.zeros(max_item+1, dtype=int)\n z[unique_items] = np.arange(n_items)\n i_r = z[np_items]\n\n # construct the ratings set from the three stacks\n np_ratings = ratings_df.rating.as_matrix()\n ratings = np.zeros((np_ratings.shape[0], 3), dtype=object)\n ratings[:, 0] = u_r\n ratings[:, 1] = i_r\n ratings[:, 2] = np_ratings\n else:\n ratings = ratings_df.as_matrix(['user_id', 'item_id', 'rating'])\n # deal with 1-based user indices\n ratings[:, 0] -= 1\n ratings[:, 1] -= 1\n\n tr_sparse, test_sparse = _create_sparse_train_and_test(ratings,\n n_users, n_items)\n\n return ratings[:, 0], ratings[:, 1], tr_sparse, test_sparse\n\n\ndef _page_views_train_and_test(input_file):\n \"\"\"Load page views dataset, and create train and set sparse matrices.\n\n Assumes 'clientId', 'contentId', and 'timeOnPage' columns.\n\n Args:\n input_file: path to csv data file\n\n Returns:\n array of user IDs for each row of the ratings matrix\n array of item IDs for each column of the rating matrix\n sparse coo_matrix for training\n sparse coo_matrix for test\n \"\"\"\n views_df = pd.read_csv(input_file, sep=',', header=0)\n\n df_items = pd.DataFrame({'contentId': views_df.contentId.unique()})\n df_sorted_items = df_items.sort_values('contentId').reset_index()\n pds_items = df_sorted_items.contentId\n\n # preprocess data. df.groupby.agg sorts clientId and contentId\n df_user_items = views_df.groupby(['clientId', 'contentId']\n ).agg({'timeOnPage': 'sum'})\n\n # create a list of (userId, itemId, timeOnPage) ratings, where userId and\n # clientId are 0-indexed\n current_u = -1\n ux = -1\n pv_ratings = []\n user_ux = []\n for timeonpg in df_user_items.itertuples():\n user = timeonpg[0][0]\n item = timeonpg[0][1]\n\n # as we go, build a (sorted) list of user ids\n if user != current_u:\n user_ux.append(user)\n ux += 1\n current_u = user\n\n # this search makes the preprocessing time O(r * i log(i)),\n # r = # ratings, i = # items\n ix = pds_items.searchsorted(item)\n if not isinstance(ix,int): ix = ix[0]\n pv_ratings.append((ux, ix, timeonpg[1]))\n\n # convert ratings list and user map to np array\n pv_ratings = np.asarray(pv_ratings)\n user_ux = np.asarray(user_ux)\n\n # create train and test sets\n tr_sparse, test_sparse = _create_sparse_train_and_test(pv_ratings,\n ux + 1,\n df_items.size)\n\n return user_ux, pds_items.as_matrix(), tr_sparse, test_sparse\n\n\ndef _create_sparse_train_and_test(ratings, n_users, n_items):\n \"\"\"Given ratings, create sparse matrices for train and test sets.\n\n Args:\n ratings: list of ratings tuples (u, i, r)\n n_users: number of users\n n_items: number of items\n\n Returns:\n train, test sparse matrices in scipy coo_matrix format.\n \"\"\"\n # pick a random test set of entries, sorted ascending\n test_set_size = len(ratings) / TEST_SET_RATIO\n test_set_idx = np.random.choice(xrange(len(ratings)),\n size=test_set_size, replace=False)\n test_set_idx = sorted(test_set_idx)\n\n # sift ratings into train and test sets\n ts_ratings = ratings[test_set_idx]\n tr_ratings = np.delete(ratings, test_set_idx, axis=0)\n\n # create training and test matrices as coo_matrix's\n u_tr, i_tr, r_tr = zip(*tr_ratings)\n tr_sparse = coo_matrix((r_tr, (u_tr, i_tr)), shape=(n_users, n_items))\n\n u_ts, i_ts, r_ts = zip(*ts_ratings)\n test_sparse = coo_matrix((r_ts, (u_ts, i_ts)), shape=(n_users, n_items))\n\n return tr_sparse, test_sparse\n\n\ndef train_model(args, tr_sparse):\n \"\"\"Instantiate WALS model and use \"simple_train\" to factorize the matrix.\n\n Args:\n args: training args containing hyperparams\n tr_sparse: sparse training matrix\n\n Returns:\n the row and column factors in numpy format.\n \"\"\"\n dim = args['latent_factors']\n num_iters = args['num_iters']\n reg = args['regularization']\n unobs = args['unobs_weight']\n wt_type = args['wt_type']\n feature_wt_exp = args['feature_wt_exp']\n obs_wt = args['feature_wt_factor']\n\n tf.logging.info('Train Start: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n\n # generate model\n input_tensor, row_factor, col_factor, model = wals.wals_model(tr_sparse,\n dim,\n reg,\n unobs,\n args['weights'],\n wt_type,\n feature_wt_exp,\n obs_wt)\n\n # factorize matrix\n session = wals.simple_train(model, input_tensor, num_iters)\n\n tf.logging.info('Train Finish: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now()))\n\n # evaluate output factor matrices\n output_row = row_factor.eval(session=session)\n output_col = col_factor.eval(session=session)\n\n # close the training session now that we've evaluated the output\n session.close()\n\n return output_row, output_col\n\n\ndef save_model(args, user_map, item_map, row_factor, col_factor):\n \"\"\"Save the user map, item map, row factor and column factor matrices in numpy format.\n\n These matrices together constitute the \"recommendation model.\"\n\n Args:\n args: input args to training job\n user_map: user map numpy array\n item_map: item map numpy array\n row_factor: row_factor numpy array\n col_factor: col_factor numpy array\n \"\"\"\n model_dir = os.path.join(args['output_dir'], 'model')\n\n # if our output directory is a GCS bucket, write model files to /tmp,\n # then copy to GCS\n gs_model_dir = None\n if model_dir.startswith('gs://'):\n gs_model_dir = model_dir\n model_dir = '/tmp/{0}'.format(args['job_name'])\n\n os.makedirs(model_dir)\n np.save(os.path.join(model_dir, 'user'), user_map)\n np.save(os.path.join(model_dir, 'item'), item_map)\n np.save(os.path.join(model_dir, 'row'), row_factor)\n np.save(os.path.join(model_dir, 'col'), col_factor)\n\n if gs_model_dir:\n sh.gsutil('cp', '-r', os.path.join(model_dir, '*'), gs_model_dir)\n\n\ndef generate_recommendations(user_idx, user_rated, row_factor, col_factor, k):\n \"\"\"Generate recommendations for a user.\n\n Args:\n user_idx: the row index of the user in the ratings matrix,\n\n user_rated: the list of item indexes (column indexes in the ratings matrix)\n previously rated by that user (which will be excluded from the\n recommendations)\n\n row_factor: the row factors of the recommendation model\n col_factor: the column factors of the recommendation model\n\n k: number of recommendations requested\n\n Returns:\n list of k item indexes with the predicted highest rating, excluding\n those that the user has already rated\n \"\"\"\n\n # bounds checking for args\n assert (row_factor.shape[0] - len(user_rated)) >= k\n\n # retrieve user factor\n user_f = row_factor[user_idx]\n\n # dot product of item factors with user factor gives predicted ratings\n pred_ratings = col_factor.dot(user_f)\n\n # find candidate recommended item indexes sorted by predicted rating\n k_r = k + len(user_rated)\n candidate_items = np.argsort(pred_ratings)[-k_r:]\n\n # remove previously rated items and take top k\n recommended_items = [i for i in candidate_items if i not in user_rated]\n recommended_items = recommended_items[-k:]\n\n # flip to sort highest rated first\n recommended_items.reverse()\n\n return recommended_items\n\n","sub_path":"courses/machine_learning/deepdive/10_recommend/endtoend/wals_ml_engine/trainer/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"67063696","text":"#!/usr/bin/env python\n'''\nSubbuilder of the FieldCage\n'''\n\nimport gegede.builder\nfrom gegede import Quantity as Q\nfrom gegede import units\n\nclass FieldCageBuilder(gegede.builder.Builder):\n '''\n Build the field cage\n '''\n # ^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^\n def configure(self,\n barWidth = None,\n barHeight = None,\n horizontalBarLength = None,\n verticalBarLength = None,\n material = None,\n APAFrameSize = None,\n APAGap_y = None,\n APAGap_z = None,\n **kwds):\n self.barWidth = barWidth \n self.barHeight = barHeight \n self.horizontalBarLength = horizontalBarLength\n self.verticalBarLength = verticalBarLength \n self.material = material\n self.APAFrameSize = APAFrameSize\n self.APAGap_y = APAGap_y \n self.APAGap_z = APAGap_z\n\n # ^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^~^\n def construct(self, geom):\n\n ii = 0\n lenHorizontalBar = (25*self.APAFrameSize[2]) + (24*self.APAGap_z)\n lenVerticalBar = (2 *self.APAFrameSize[1]) + (1 *self.APAGap_y)\n CageBox = geom.shapes.Box('CageSegment',\n dx=0.5*(Q(\"6cm\")*250),\n dy=0.5*Q('100m'),\n dz=0.5*Q('100m'))\n cage_lv = geom.structure.Volume('volFieldCage_'+str(ii), material=self.material, shape=CageBox)\n self.add_volume(cage_lv)\n\n # Make the horizontal beam\n HorizontalBox = geom.shapes.Box('FieldCageHorizontalSegment',\n dx=0.5*self.barWidth,\n dy=0.5*self.barHeight,\n dz=0.5*lenHorizontalBar)\n Horizontal_lv = geom.structure.Volume('vol_FieldCageHorizontalSegment', material=self.material, shape=HorizontalBox)\n\n # Make the vertical beam\n VerticalBox = geom.shapes.Box('FieldCageVerticalSegment',\n dx=0.5*self.barWidth,\n dy=0.5*(lenVerticalBar-(2*self.barHeight)),\n dz=0.5*self.barHeight)\n Vertical_lv = geom.structure.Volume('vol_FieldCageVerticalSegment', material=self.material, shape=VerticalBox)\n \n posHorizontal = [Q(\"3cm\"), 0.5*(lenVerticalBar-self.barHeight), Q(\"0m\")]\n posVertical = [Q(\"3cm\"), Q(\"0m\"), 0.5*(lenHorizontalBar-self.barHeight)]\n\n for i in range(123):\n # Define the positions of the bar\n topBeamPos = geom.structure.Position('TopBeamPos_'+str(i) ,\n posHorizontal[0]+(i*Q(\"6cm\")), posHorizontal[1], posHorizontal[2])\n botBeamPos = geom.structure.Position('BottomBeamPos_'+str(i) ,\n posHorizontal[0]+(i*Q(\"6cm\")), -posHorizontal[1], posHorizontal[2])\n lefBeamPos = geom.structure.Position('LeftBeamPos_'+str(i) ,\n posVertical[0]+(i*Q(\"6cm\")), posVertical[1], posVertical[2])\n rigBeamPos = geom.structure.Position('RightBeamPos_'+str(i) ,\n posVertical[0]+(i*Q(\"6cm\")), posVertical[1], -posVertical[2])\n\n topBeamNeg = geom.structure.Position('TopBeamPos_-'+str(i) ,\n -posHorizontal[0]-(i*Q(\"6cm\")), posHorizontal[1], posHorizontal[2])\n botBeamNeg = geom.structure.Position('BottomBeamPos_-'+str(i) ,\n -posHorizontal[0]-(i*Q(\"6cm\")), -posHorizontal[1], posHorizontal[2])\n lefBeamNeg = geom.structure.Position('LeftBeamPos_-'+str(i) ,\n -posVertical[0]-(i*Q(\"6cm\")), posVertical[1], posVertical[2])\n rigBeamNeg = geom.structure.Position('RightBeamPos_-'+str(i) ,\n -posVertical[0]-(i*Q(\"6cm\")), posVertical[1], -posVertical[2])\n\n \n # Define the placements of the volumes\n Placement_topBeamPos = geom.structure.Placement('placeTopBeam_'+str(i) , volume=Horizontal_lv, pos=topBeamPos)\n Placement_botBeamPos = geom.structure.Placement('placeBottomBeam_'+str(i) , volume=Horizontal_lv, pos=botBeamPos)\n Placement_lefBeamPos = geom.structure.Placement('placeLeftBeam_'+str(i) , volume=Vertical_lv , pos=lefBeamPos)\n Placement_rigBeamPos = geom.structure.Placement('placeRightBeam_'+str(i) , volume=Vertical_lv , pos=rigBeamPos)\n\n Placement_topBeamNeg = geom.structure.Placement('placeTopBeam_-'+str(i) , volume=Horizontal_lv, pos=topBeamNeg)\n Placement_botBeamNeg = geom.structure.Placement('placeBottomBeam_-'+str(i) , volume=Horizontal_lv, pos=botBeamNeg)\n Placement_lefBeamNeg = geom.structure.Placement('placeLeftBeam_-'+str(i) , volume=Vertical_lv , pos=lefBeamNeg)\n Placement_rigBeamNeg = geom.structure.Placement('placeRightBeam_-'+str(i) , volume=Vertical_lv , pos=rigBeamNeg)\n\n cage_lv.placements.append(Placement_topBeamPos.name)\n cage_lv.placements.append(Placement_botBeamPos.name)\n cage_lv.placements.append(Placement_lefBeamPos.name)\n cage_lv.placements.append(Placement_rigBeamPos.name)\n\n cage_lv.placements.append(Placement_topBeamNeg.name)\n cage_lv.placements.append(Placement_botBeamNeg.name)\n cage_lv.placements.append(Placement_lefBeamNeg.name)\n cage_lv.placements.append(Placement_rigBeamNeg.name)\n \n","sub_path":"python/duneggd/TestArena/FieldCage.py","file_name":"FieldCage.py","file_ext":"py","file_size_in_byte":5970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"69677680","text":"import sys\nimport os\nimport ctypes\nimport threading\nimport time\nimport warnings\n\ndef shut_up(*args, **kwargs):\n return\nwarnings.simplefilter = shut_up\n\n\ndef check_dependencies():\n\n try:\n import PySide2\n except ImportError:\n sys.stderr.write(\"Cannot find the PySide2 package. You may install it via pip:\\n\" +\n \" pip install pyside2\\n\")\n return False\n\n try:\n import qtconsole\n except ImportError:\n sys.stderr.write(\"Cannot find the qtconsole package. You may install it via pip:\\n\" +\n \" pip install qtconsole\\n\")\n return False\n\n return True\n\n\ndef set_app_user_model_id():\n # Explicitly call SetCurrentProcessExplicitAppUserModelID() so the taskbar icon is displayed correctly.\n\n if sys.platform == 'win32':\n winver = sys.getwindowsversion()\n if winver.major >= 5:\n myappid = u'angr-management'\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)\n\n\ndef start_management(filepath=None, use_daemon=False):\n\n if not check_dependencies():\n sys.exit(1)\n\n set_app_user_model_id()\n\n from PySide2.QtWidgets import QApplication, QSplashScreen, QMessageBox\n from PySide2.QtGui import QFontDatabase, QPixmap, QIcon\n from PySide2.QtCore import Qt\n\n from .config import FONT_LOCATION, IMG_LOCATION\n\n app = QApplication(sys.argv)\n app.setApplicationDisplayName(\"angr management\")\n app.setApplicationName(\"angr management\")\n\n # URL scheme\n from .logic.url_scheme import AngrUrlScheme\n scheme = AngrUrlScheme()\n registered, _ = scheme.is_url_scheme_registered()\n if not registered:\n btn = QMessageBox.question(None, \"Setting up angr URL scheme\",\n \"angr URL scheme allows \\\"deep linking\\\" from browsers and other applications by registering the \"\n \"angr:// protocol to the current user. Do you want to register it? You may unregister at any \"\n \"time in Preferences.\",\n defaultButton=QMessageBox.Yes)\n if btn == QMessageBox.Yes:\n try:\n AngrUrlScheme().register_url_scheme()\n except (ValueError, FileNotFoundError) as ex:\n QMessageBox.warning(None, \"Error in registering angr URL scheme\",\n \"Failed to register the angr URL scheme.\\n\"\n \"The following exception occurred:\\n\"\n + str(ex))\n\n # Make + display splash screen\n splashscreen_location = os.path.join(IMG_LOCATION, 'angr-splash.png')\n splash_pixmap = QPixmap(splashscreen_location)\n splash = QSplashScreen(splash_pixmap, Qt.WindowStaysOnTopHint)\n icon_location = os.path.join(IMG_LOCATION, 'angr.png')\n splash.setWindowIcon(QIcon(icon_location))\n splash.setWindowFlags(Qt.WindowStaysOnTopHint | Qt.FramelessWindowHint)\n splash.setEnabled(False)\n splash.show()\n time.sleep(0.05)\n app.processEvents()\n\n from .logic import GlobalInfo\n from .ui.css import CSS\n from .ui.main_window import MainWindow\n from .daemon import daemon_exists, run_daemon_process, daemon_conn\n from .daemon.client import ClientService\n\n # Load fonts\n QFontDatabase.addApplicationFont(os.path.join(FONT_LOCATION, \"SourceCodePro-Regular.ttf\"))\n\n GlobalInfo.gui_thread = threading.get_ident()\n\n # apply the CSS\n app.setStyleSheet(CSS.global_css())\n\n if use_daemon:\n # connect to daemon (if there is one)\n if not daemon_exists():\n print(\"[+] Starting a new daemon.\")\n run_daemon_process()\n time.sleep(0.2)\n else:\n print(\"[+] Connecting to an existing angr management daemon.\")\n\n while True:\n try:\n GlobalInfo.daemon_conn = daemon_conn(service=ClientService)\n except ConnectionRefusedError:\n print(\"[-] Connection failed... try again.\")\n time.sleep(0.4)\n continue\n print(\"[+] Connected to daemon.\")\n break\n\n from rpyc import BgServingThread\n th = BgServingThread(GlobalInfo.daemon_conn)\n\n file_to_open = filepath if filepath else sys.argv[1] if len(sys.argv) > 1 else None\n main_window = MainWindow()\n splash.finish(main_window)\n\n if file_to_open is not None:\n main_window.load_file(file_to_open)\n\n app.exec_()\n\n\ndef main():\n import argparse\n\n parser = argparse.ArgumentParser(description=\"angr management\")\n parser.add_argument(\"-s\", \"--script\", type=str, help=\"run a python script in the (commandline) angr environment\")\n parser.add_argument(\"-i\", \"--interactive\", action='store_true', help=\"interactive (ipython) mode\")\n parser.add_argument(\"-n\", \"--no-gui\", action='store_true', help=\"run in headless mode\")\n parser.add_argument('-d', \"--with-daemon\", action='store_true', help=\"use angr with the daemon. this allows angr \"\n \"to handle angr:// URLs. it will \"\n \"automatically start a daemon if there isn't \"\n \"already one running.\")\n parser.add_argument(\"-D\", \"--daemon\", action='store_true', help=\"start a daemon to handle angr:// URLs.\")\n parser.add_argument(\"-u\", \"--url\", type=str, help=\"(internal) handle angr:// URLs. the daemon must be running.\")\n parser.add_argument(\"binary\", nargs=\"?\", help=\"the binary to open (for the GUI)\")\n\n args = parser.parse_args()\n\n if args.daemon:\n from .daemon import start_daemon\n start_daemon()\n return\n elif args.url:\n from .daemon import daemon_exists, handle_url, run_daemon_process, daemon_conn\n if not daemon_exists():\n run_daemon_process()\n time.sleep(1)\n\n action = handle_url(args.url, act=False)\n action.act(daemon_conn())\n return\n if args.script:\n import runpy\n script_globals = runpy.run_path(args.script)\n if args.interactive:\n if args.script:\n print(\"Your script's globals() dict is available in the `script_globals` variable.\")\n import IPython\n IPython.embed(banner1=\"\")\n if not args.no_gui:\n start_management(args.binary)\n\nif __name__ == '__main__':\n main()\n","sub_path":"angrmanagement/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"188434076","text":"\"\"\"\nRelays sit below an announcer, or another relay, and simply repeat what\nthey receive over PUB/SUB.\n\"\"\"\n# Logging has to be configured first before we do anything.\nimport logging\nfrom threading import Thread\n\nlogger = logging.getLogger(__name__)\nimport zlib\n\nimport gevent\nimport simplejson\nimport zmq.green as zmq\nfrom bottle import get, run as bottle_run\nfrom eddn.conf.Settings import Settings\n\nfrom gevent import monkey\nmonkey.patch_all()\n\nfrom eddn.StatsCollector import StatsCollector\n\nstatsCollector = StatsCollector()\nstatsCollector.start()\n\n\n@get('/stats/')\ndef stats():\n return simplejson.dumps(statsCollector.getSummary())\n\n\nclass Relay(Thread):\n\n def run(self):\n \"\"\"\n Fires up the relay process.\n \"\"\"\n # These form the connection to the Gateway daemon(s) upstream.\n context = zmq.Context()\n\n receiver = context.socket(zmq.SUB)\n receiver.setsockopt(zmq.SUBSCRIBE, '')\n for binding in Settings.RELAY_RECEIVER_BINDINGS:\n # Relays bind upstream to an Announcer, or another Relay.\n receiver.connect(binding)\n\n sender = context.socket(zmq.PUB)\n for binding in Settings.RELAY_SENDER_BINDINGS:\n # End users, or other relays, may attach here.\n sender.bind(binding)\n\n def relay_worker(message):\n \"\"\"\n This is the worker function that re-sends the incoming messages out\n to any subscribers.\n :param str message: A JSON string to re-broadcast.\n \"\"\"\n # if is_message_duped(message):\n # We've already seen this message recently. Discard it.\n # return\n\n if Settings.RELAY_DECOMPRESS_MESSAGES:\n message = zlib.decompress(message)\n\n sender.send(message)\n statsCollector.tally(\"outbound\")\n\n logger.info(\"Relay is now listening for order data.\")\n\n while True:\n # For each incoming message, spawn a greenlet using the relay_worker\n # function.\n inboundMessage = receiver.recv()\n statsCollector.tally(\"inbound\")\n gevent.spawn(relay_worker, inboundMessage)\n\n\ndef main():\n r = Relay()\n r.start()\n bottle_run(host='0.0.0.0', port=9090, server='gevent')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/eddn/Relay.py","file_name":"Relay.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"271125681","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\nclass Solution(object):\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n if n <= 1:\n return \"1\"\n\n pre_str = \"1\"\n for i in range(2, n + 1):\n # Get the ith count-and-say sequence by scan pre_str\n length = len(pre_str)\n current_str = \"\"\n index = 0\n\n # Count and say the pre_str\n while index < length:\n char = pre_str[index]\n repeat = 0\n pos = repeat + index + 1\n while pos < length and pre_str[pos] == char:\n repeat += 1\n pos = repeat + index + 1\n\n if repeat:\n current_str = current_str + str(repeat + 1) + char\n else:\n current_str = current_str + \"1\" + char\n index = index + repeat + 1\n\n pre_str = current_str\n\n return pre_str\n","sub_path":"Week05/38.py","file_name":"38.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"572130973","text":"\"\"\"\nJSON encoder for double values\n\"\"\"\n\n__author__ = 'VMware, Inc.'\n__copyright__ = 'Copyright 2015, 2017 VMware, Inc. All rights reserved. -- VMware Confidential' # pylint: disable=line-too-long\n\n\nimport decimal\nimport json\nimport math\nimport six\n\nfrom vmware.vapi.exception import CoreException\nfrom vmware.vapi.lib.log import get_vapi_logger\nfrom vmware.vapi.l10n.runtime import message_factory\n\nlogger = get_vapi_logger(__name__)\n\n\ndef canonicalize_double(obj):\n \"\"\"\n Canonicalize double based on XML schema double canonical format\n\n The exponent must be indicated by \"E\". Leading zeroes and the\n preceding optional \"+\" sign are prohibited in the exponent. If the\n exponent is zero, it must be indicated by \"E0\". For the mantissa, the\n preceding optional \"+\" sign is prohibited and the decimal point is\n required. Leading and trailing zeroes are prohibited subject to the\n following: number representations must be normalized such that there\n is a single digit which is non-zero to the left of the decimal point\n and at least a single digit to the right of the decimal point unless\n the value being represented is zero. The canonical representation\n for zero is 0.0E0\n http://www.w3.org/TR/xmlschema-2/#double\n\n :type obj: :class:`decimal.Decimal`\n :param obj: Decimal object to be canonicalized\n :rtype: :class:`str`\n :return: Canonical string representation of the decimal\n \"\"\"\n # NaN and INF must not be used\n if obj.is_infinite() or obj.is_nan():\n msg = message_factory.get_message(\n 'vapi.decimal.canonicalization')\n logger.debug(msg)\n raise CoreException(msg)\n\n str_val = str(obj)\n\n # Extract sign of mantissa\n neg_sign = ''\n if str_val.startswith('-'):\n neg_sign = '-'\n str_val = str_val[1:]\n\n # Extract mantissa and exponent\n mantissa, exponent = None, None\n if 'E' in str_val:\n mantissa, exponent = str_val.split('E')\n else:\n mantissa = str_val\n\n # decimal class uses context objects that governs precision\n # for arithmetic operations. Setting the precision to the\n # length of mantissa string. To canonicalize the mantissa,\n # we do a division operation, precision maybe lost if we\n # don't set .prec\n decimal.getcontext().prec = len(mantissa)\n\n mantissa = decimal.Decimal(mantissa)\n exponent = int(exponent) if exponent is not None else 0\n\n # There MUST be a single non zero digit on the left of the decimal point\n # (unless a zero is represented)\n num_digits = 0\n if mantissa:\n num_digits = int(math.log10(mantissa))\n exponent = exponent + num_digits\n\n if num_digits < 0:\n # If the number is of the form 0.[0]+[1-9]+\n num_digits *= -1\n # since there MUST be a single non zero digit on the left of decimal\n # point, we have to multiple by an extra 10\n num_digits += 1\n exponent -= 1\n mantissa = mantissa * int(math.pow(10, num_digits))\n else:\n # If the number is of the form [1-9]+.[0-9]+\n mantissa = mantissa / int(math.pow(10, num_digits))\n\n if mantissa < 1:\n # If the original number is of the form 0.[1-9]+ then, num_digits\n # would have been 0, multile by extra 10 to get it to\n # canonical form\n mantissa *= 10\n exponent -= 1\n\n # There MUST be at least single digit on the right of the decimal point\n mantissa = str(mantissa)\n if '.' not in mantissa:\n mantissa = '%s.0' % mantissa\n\n # If there are any trailing zeros, strip them off\n left, right = mantissa.split('.')\n first = right[0]\n remaining = right[1:].rstrip('0')\n mantissa = '%s.%s%s' % (left, first, remaining)\n\n return '%s%sE%s' % (neg_sign, mantissa, exponent)\n\n\nclass DecimalEncoder(json.JSONEncoder):\n \"\"\"\n Class that adds capability of encoding decimal\n in JSON\n \"\"\"\n # In 2.x version of json library, _iterencode should be overriden\n # to have special encoding for objects\n def _iterencode(self, obj, markers=None):\n \"\"\"\n Overriding the decimal encoding for the default encoder\n \"\"\"\n if isinstance(obj, decimal.Decimal):\n return (six.text_type(obj) for obj in [obj])\n return super(DecimalEncoder, self)._iterencode(obj, markers) # pylint: disable=W0212\n\n # In 3.x version of json library, default() should be overrriden\n # to have special encoding for objects\n def default(self, obj): # pylint: disable=E0202\n if isinstance(obj, decimal.Decimal):\n return (six.text_type(obj) for obj in [obj])\n return json.JSONEncoder.default(self, obj)\n","sub_path":"alexa-program.bak/vmware/vapi/lib/jsonlib.py","file_name":"jsonlib.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"541459165","text":"class Config:\n def __init__(self):\n # LEARNING\n self.BATCH_SIZE = 256 # None for offline\n self.EPOCHS = 3\n\n # LAYER\n\n # sigmoid\n # softmax\n # relu\n # none\n self.FIRST_LAYER_WITH_ACTIVATION = False\n self.LAST_LAYER_ACTIVATION = 'sigmoid'\n self.LAYER_ACTIVATION = 'sigmoid'\n self.LAYER_BIAS = True\n self.LAYER_WEIGHTS_RANGE = 1\n # momentum\n # adagrad\n # adadelta\n # adam, alpha = .001\n # none\n self.LAYER_OPTIMIZATION_METHOD = 'adadelta'\n\n # OPTIMIZATION\n self.MOMENTUM_GAMMA = .2\n\n self.ADADELTA_GAMMA = .2\n\n self.ADAM_BETA1 = .9\n self.ADAM_BETA2 = .999\n\n # NETWORK\n\n # mse\n # cross\n self.NETWORK_LOSS = 'cross' # cross requires softmax activation on last layer\n self.NETWORK_ALPHA = .7\n\n # EXPERIMENTS\n self.TEST_NUMBER = 1\n\n # RANDOMIZATION\n # None for random\n self.RANDOM_SEED = None\n\n # EXPERIMENTS\n self.RESULTS_DIR = 'results'\n\n self.ASSIGMENT2_HIDDEN_SIZE = 64\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"412685379","text":"from django.db import models\nfrom dotamatch.history import MatchDetails\nfrom dotamatch.teams import Teams\nfrom datetime import datetime, timedelta\nfrom django.utils import timezone\nimport http.client\nfrom dotamatch.livegame import LiveGame as LG\nfrom dotamatch.heroes import Heroes\nimport time\nimport requests\nimport random\n\n\n# Create your models here.\n\ndef getTeamLogoUrl(id):\n b = \"/ISteamRemoteStorage/GetUGCFileDetails/v1/?key=09C6440CF707C6CBA2551E42E5279611&appid=570&ugcid=\" + str(id)\n \"\"\"conn = http.client.HTTPConnection(\"api.steampowered.com\")\n r = conn.request(\"GET\", b)\n o = conn.getresponse().read()\"\"\"\n r = requests.get(\"http://api.steampowered.com\" + b)\n o = r.content\n if \"url\\\": \\\"\" in str(o):\n return str(o).split(\"url\\\": \\\"\")[1].split(\"\\\"\")[0]\n else:\n return \"/static/matches/images/noimage.png\"\n\ndef getHeroSmallPortrait(id):\n h = Heroes(\"09C6440CF707C6CBA2551E42E5279611\")\n heroes = h.heroes()\n name = heroes[id].name[14:]\n return \"http://cdn.dota2.com/apps/dota2/images/heroes/\" + name + \"_sb.png\"\n\n\ndef getHeroLargePortrait(id):\n h = Heroes(\"09C6440CF707C6CBA2551E42E5279611\")\n heroes = h.heroes()\n name = heroes[id].name[14:]\n return \"http://cdn.dota2.com/apps/dota2/images/heroes/\" + name + \"_lg.png\"\n\ndef getHeroVertPortrait(id):\n h = Heroes(\"09C6440CF707C6CBA2551E42E5279611\")\n heroes = h.heroes()\n name = heroes[id].name[14:]\n return \"http://cdn.dota2.com/apps/dota2/images/heroes/\" + name + \"_vert.jpg\"\n \n \n\nclass LiveGame(models.Model):\n lastUpdate = models.IntegerField(default=0)\n liveGame = models.BooleanField(default=False)\n \n tracking = models.IntegerField(default=1950817)\n \n \n gameInfo = models.TextField(blank=True)\n \n def update(self):\n if self.lastUpdate + 20 < int(time.time()):\n self.lastUpdate = int(time.time())\n else:\n return False\n \"\"\"if self.lastUpdate + timedelta(0, 30) < timezone.now():\n self.lastUpdate = timezone.now()\n else:\n return False\"\"\"\n l = LG(\"09C6440CF707C6CBA2551E42E5279611\")\n game = l.livegamebyID(self.tracking)\n if game == False:\n self.liveGame = False\n return\n else:\n self.liveGame = True\n self.gameInfo = str(game)\n \n def get_casters_list(self):\n casters = []\n for player in eval(self.gameInfo)[\"players\"]:\n if player[\"team\"] == 2:\n casters.append(player[\"name\"])\n return casters\n def get_current_spectators(self):\n return eval(self.gameInfo)[\"spectators\"]\n def get_radiant_team_name(self):\n return eval(self.gameInfo)[\"radiant_team\"][\"team_name\"]\n def get_dire_team_name(self):\n return eval(self.gameInfo)[\"dire_team\"][\"team_name\"]\n \n def get_radiant_team_logo(self):\n return getTeamLogoUrl(eval(self.gameInfo)[\"radiant_team\"][\"team_logo\"])\n def get_dire_team_logo(self):\n return getTeamLogoUrl(eval(self.gameInfo)[\"dire_team\"][\"team_logo\"])\n \n def get_tracked_team_logo(self):\n return getTeamLogoUrl(self.tracking)\n \n def get_dire_heroes_portraits(self):\n e = eval(self.gameInfo)\n data = e[\"scoreboard\"][\"dire\"]\n heroes = []\n for i in data[\"players\"]:\n heroes.append(getHeroSmallPortrait(i[\"hero_id\"]))\n return heroes\n \n def get_radiant_heroes_portraits(self):\n e = eval(self.gameInfo)\n data = e[\"scoreboard\"][\"radiant\"]\n heroes = []\n for i in data[\"players\"]:\n heroes.append(getHeroSmallPortrait(i[\"hero_id\"]))\n return heroes\n \n def get_all_heroes(self):\n e = eval(self.gameInfo)\n data = e[\"scoreboard\"][\"radiant\"]\n radiantheroes = []\n for i in data[\"players\"]:\n name = \"\"\n for player in e[\"players\"]:\n if player[\"account_id\"] == i[\"account_id\"]:\n name = player[\"name\"]\n radiantheroes.append([getHeroVertPortrait(i[\"hero_id\"]),\n i[\"kills\"],\n i[\"death\"],\n i[\"assists\"],\n name,\n ])\n \n data = e[\"scoreboard\"][\"dire\"]\n direheroes = []\n for i in data[\"players\"]:\n name = \"\"\n for player in e[\"players\"]:\n if player[\"account_id\"] == i[\"account_id\"]:\n name = player[\"name\"]\n direheroes.append([getHeroVertPortrait(i[\"hero_id\"]),\n i[\"kills\"],\n i[\"death\"],\n i[\"assists\"],\n name,\n ])\n return zip(radiantheroes, direheroes)\n \n def get_small_portrait(self, id):\n return getHeroSmallPortrait(id)\n \n def get_radiant_bans(self):\n e = eval(self.gameInfo)\n data = e[\"scoreboard\"][\"radiant\"][\"bans\"]\n outList = []\n for hero in data:\n outList.append(self.get_small_portrait(hero[\"hero_id\"]))\n return outList\n\n def get_dire_bans(self):\n e = eval(self.gameInfo)\n data = e[\"scoreboard\"][\"dire\"][\"bans\"]\n outList = []\n for hero in data:\n outList.append(self.get_small_portrait(hero[\"hero_id\"]))\n return outList\n \n \n \n \n\nclass League(models.Model):\n leagueid = models.IntegerField(default=0)\n leaguename = models.CharField(blank=True, max_length=50)\n leagueurl = models.CharField(blank=True, max_length=100)\n leaguepicture = models.CharField(blank=True, max_length=100)\n \n tgposition = models.IntegerField(default=1)\n \n finished = models.BooleanField(default=False)\n current = models.BooleanField(default=False)\n \n def label_from_instance(self, obj):\n return self.leaguename\n \n def __unicode__(self):\n return self.leaguename\n \n def __str__(self):\n return self.leaguename\n\nclass Match(models.Model):\n league = models.ForeignKey(League, default=1)\n \n matchfinished = models.BooleanField(default=False, verbose_name=\"Match finished?\")\n gamedatetime = models.DateTimeField(default=datetime(1990, 1, 1), verbose_name=\"Game time\")\n tbd = models.BooleanField(default=False)\n\n \n tgteam = models.IntegerField(default=1950817, verbose_name=\"Team 1 ID\")\n opponentteam = models.IntegerField(default=0, verbose_name=\"Team 2 ID\")\n \n tg = None\n opponent = None\n \n tgteamlogo = models.CharField(max_length=200)\n opponentteamlogo = models.CharField(max_length=200)\n \n tgteamname = models.CharField(max_length=50, default=\"Team Genysis\", verbose_name=\"Team 1 Name\")\n opponentteamname = models.CharField(max_length=50, default=\"Opponent\", verbose_name=\"Opponent Name\")\n \n tgteamtag = models.CharField(max_length=20, default=\"TG\")\n opponentteamtag = models.CharField(max_length=20, default=\"OPP\")\n \n gameIDs = models.CharField(max_length=200, blank=True)\n \n tgscore = models.IntegerField(default=0)\n opponentscore = models.IntegerField(default=0)\n \n hidden = models.BooleanField(default=False)\n \n \n gIDs = []\n \n def get_time(self):\n if self.tbd:\n return \"TBD\"\n else:\n return self.gamedatetime\n \n def get_league_picture(self):\n return self.league.leaguepicture\n \n def has_league(self):\n if self.league.leagueid != 0:\n return True\n else:\n return False\n \n def game_ids_as_list(self):\n if self.gameIDs.strip(\" \") == \"\":\n return []\n return self.gameIDs.split(\" \")\n\n def get_carousel_backgrounds(self):\n l = [\"matches/images/radiant_ancient.jpg\",\n \"matches/images/radiant_t2.jpg\",\n \"matches/images/radiant_t1.jpg\",\n \"matches/images/radiant_mid.jpg\",\n ]\n return l\n \n def get_random_carousel_background(self):\n l = [\"matches/images/radiant_ancient.jpg\",\n \"matches/images/radiant_t2.jpg\",\n \"matches/images/radiant_t1.jpg\",\n \"matches/images/radiant_mid.jpg\",\n ]\n return random.choice(l)\n \n def save(self, *args, **kwargs):\n if self.tgteam != 0 and self.opponentteam != 0:\n t = Teams(\"09C6440CF707C6CBA2551E42E5279611\")\n tg = t.teams(start_at_team_id=self.tgteam, teams_requested=1)[0]\n opp = t.teams(start_at_team_id=self.opponentteam, teams_requested=1)[0]\n \n self.tgteamname = tg.name\n self.opponentteamname = opp.name\n \n self.tgteamtag = tg.tag\n self.opponentteamtag = opp.tag\n \n \n \n \"\"\"\"a = \"/ISteamRemoteStorage/GetUGCFileDetails/v1/?key=09C6440CF707C6CBA2551E42E5279611&appid=570&ugcid=\" + str(tg.logo)\n conn = http.client.HTTPConnection(\"api.steampowered.com\")\n r = conn.request(\"GET\", a)\n o = conn.getresponse().read()\n self.tgteamlogo = str(o).split(\"url\\\": \\\"\")[1].split(\"\\\"\")[0]\"\"\"\n \n self.tgteamlogo = getTeamLogoUrl(tg.logo)\n self.opponentteamlogo = getTeamLogoUrl(opp.logo)\n \n \n \"\"\"\"b = \"/ISteamRemoteStorage/GetUGCFileDetails/v1/?key=09C6440CF707C6CBA2551E42E5279611&appid=570&ugcid=\" + str(opp.logo)\n conn = http.client.HTTPConnection(\"api.steampowered.com\")\n r = conn.request(\"GET\", b)\n o = conn.getresponse().read()\n if \"url\\\": \\\"\" in str(o):\n self.opponentteamlogo = str(o).split(\"url\\\": \\\"\")[1].split(\"\\\"\")[0]\n else:\n #self.opponentteamlogo = \"http://cdn.dotabuff.net/assets/teams/default-62a2a9f1b745e3654901a7e044d75322.jpg\"\n self.opponentteamlogo = \"/static/matches/images/noimage.png\"\"\"\n \n if self.matchfinished:\n if len(self.gameIDs.strip()) > 0:\n self.gIDs = []\n for n in self.gameIDs.split(\" \"):\n self.gIDs.append(int(n))\n tgs = 0\n opps = 0\n details = MatchDetails(\"09C6440CF707C6CBA2551E42E5279611\")\n for n in self.gIDs:\n game = details.match(n)\n radiantwin = game.radiant_win\n if (radiantwin and game.radiant_team_id == self.tgteam) or (not radiantwin and game.dire_team_id == self.tgteam):\n tgs += 1\n else:\n opps += 1\n self.tgscore = tgs\n self.opponentscore = opps\n \n \n super(Match, self).save(*args, **kwargs)\n \n \n \n \n \n \n \n #===========================================================================\n # opponent = models.CharField(max_length=75)\n # opponentlink = models.CharField(max_length=75, default=\" \")\n # opponentlogo = models.ImageField(upload_to='static/matches/images/', default=\"matches/images/noimage.png\")\n # gametime = models.DateTimeField()\n # tgscore = models.IntegerField(default=0)\n # opponentscore = models.IntegerField(default=0)\n # game1link = models.CharField(max_length=100, default=\" \")\n # game1win = models.BooleanField(default=False)\n # \n # game2link = models.CharField(max_length=100, default=\" \")\n # game2win = models.BooleanField(default=False)\n # \n # game3link = models.CharField(max_length=100, default=\" \")\n # game3win = models.BooleanField(default=False)\n # \n # game4link = models.CharField(max_length=100, default=\" \")\n # game4win = models.BooleanField(default=False)\n # \n # game5link = models.CharField(max_length=100, default=\" \")\n # game5win = models.BooleanField(default=False)\n # \n # matchfinished = models.BooleanField(default=False)\n #===========================================================================","sub_path":"matches/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":12130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"569935522","text":"#!/usr/bin/python3\n# coding=utf-8\n\nimport threading\n\ng_num = 0\n\ndef num_add():\n global g_num\n\n i = 0\n #for i in range(1000000):\n while i < 1000000:\n\n flag = mutex.acquire(False) # 获得互斥锁\n\n if flag:\n g_num += 1\n mutex.release()\n i += 1\n pass\n else:\n print('线程%s, 没有获得互斥锁'%threading.current_thread().name)\n\nmutex = threading.Lock() # 创建互斥锁\n\ndef main():\n\n t1 = threading.Thread(target = num_add)\n\n t2 = threading.Thread(target = num_add)\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n print(g_num)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"j07day/互斥锁非阻塞.py","file_name":"互斥锁非阻塞.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"635799818","text":"#!/bin/python\nimport pygame\nimport sys\nimport ast\nimport os\nfrom utils import *\nfrom levels.tutorial_level import Tutorial_level\nfrom characters.tracy import Tracy\nfrom characters.biggie import Biggie\nfrom levels.platforms import Platform\nfrom camera import Camera\n\nclass Control(object):\n\n\tdef __init__(self, screen):\n\t\t# instanciate players and their size\n\t\tself.player = Tracy(100, SCREEN_HEIGHT/2)\n\t\tself.AI = Biggie(0, SCREEN_HEIGHT/2)\n\n\t\t# screen\n\t\tself.screen = screen\n\n\t\t# create sprite grouping for active sprites\n\t\tself.active_sprites = pygame.sprite.Group()\n\t\tself.active_sprites.add(self.player)\n\t\tself.active_sprites.add(self.AI)\n\n\t\t# which is active\n\t\tself.ACTIVE = self.player\n\n\t\t# create level and list of levels\n\t\tself.lvl_list = [Tutorial_level(self.player, self.AI)]\n\t\tself.lvl_num = 0\n\t\tself.lvl_current = self.lvl_list[self.lvl_num]\n\t\tself.player.level = self.lvl_current\n\t\tself.AI.level = self.lvl_current\n\n\t\t# some initialization for game loop\n\t\tself.done = False\n\t\tself.clock=pygame.time.Clock()\n\n\t\t# instantiate camera\n\t\tself.camera = Camera()\n\n\tdef save(self):\n\t\twith open('save/save.txt', 'w') as f:\n\t\t\tsaveStr = str(lvl_no) + ' ' + str(self.player.abilities) + ' ' + str(self.player.abilities)\n\t\t\tf.write(saveStr)\n\n\tdef load(self):\n\t\tif os.path.exists('save/save.txt'):\n\t\t\twith open('save/save.txt', 'r') as f:\n\n\t\t\t\ttext = f.read()\n\n\t\t\t\tif text != '':\n\t\t\t\t\ttext = text.split(' ')\n\t\t\t\t\ttext[-1] = text[-1].rstrip('\\n')\n\t\t\n\t\t\t\t\t# read in level\n\t\t\t\t\tself.lvl_no = text[0]\n\t\t\t\t\tself.current_lvl = self.lvl_list[self.lvl_num]\n\t\t\t\t\tself.player.level = self.lvl_current\n\t\t\t\t\tself.AI.level = self.lvl_current\n\t\t\t\t\tif len(text) > 1:\n\t\t\t\t\t\t# read in abilities\n\t\t\t\t\t\tself.player.abilities = ast.literal_eval(text[1])\n\t\t\t\t\t\tself.AI.abilities = ast.literal_eval(text[2])\n\n\t\t# reset player position\n\t\tself.player.rect.x = 50\n\t\tself.AI.rect.x = 0\t\n\t\t\t\n\tdef processEvents(self):\n\t\t# loop events\n\t\tfor event in pygame.event.get()\t:\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\tself.done = True\n\t\t\t# key pressed \n\t\t\tif event.type == pygame.KEYDOWN:\n\t\t\t\tif event.key == pygame.K_LEFT:\n\t\t\t\t\tself.player.move_left()\n\t\t\t\tif event.key == pygame.K_RIGHT:\n\t\t\t\t\tself.player.move_right()\n\t\t\t\tif event.key == pygame.K_SPACE:\n\t\t\t\t\tself.player.jump()\n\t\t\t\telse:\n\t\t\t\t\t# check for ability key\n\t\t\t\t\tk = self.player.checkAbility(event.key)\n\t\t\t\t\tif k is not None:\n\t\t\t\t\t\tk.cast(self)\n\t\t\t# key released\n\t\t\tif event.type == pygame.KEYUP:\n\t\t\t\tif event.key == pygame.K_LEFT and self.player.delta_x < 0:\n\t\t\t\t\tself.player.stop()\n\t\t\t\tif event.key == pygame.K_RIGHT and self.player.delta_x > 0:\n\t\t\t\t\tself.player.stop()\n\t\t\t\n\n\tdef update(self):\n\t\t# update camera\n\t\tself.camera.update(self.player)\t\t\t\n\n\t\t#update player\n\t\tself.active_sprites.update()\n\t\tif self.player.dead:\n\t\t\tself.load()\n\t\t\tself.player.dead = False\n\t\t\t\n\t\t# update level\n\t\tself.lvl_current.update()\n\n\t\t# check if we've moved onto the next area\n\t\tif self.player.rect.right > LEVEL_WIDTH and self.lvl_num < len(self.lvl_list)-1:\n\t\t\t# save checkpoint\n\t\t\tself.save()\n\t\t\tself.player.rect.x = 50\n\t\t\tself.AI.rect.x = 0\n\t\t\tself.lvl_num += 1\n\t\t\tself.lvl_current = self.lvl_list[self.lvl_num]\n\t\t\tself.player.level = self.lvl_current\n\t\t\tself.AI.level = self.lvl_current\n\n\t\t# go to previous area\n\t\telif self.player.rect.left < 0 and self.lvl_num > 0:\n\t\t\tself.player.rect.x = LEVEL_WIDTH-100\n\t\t\tself.player.rect.x = LEVEL_WIDTH\n\t\t\tself.lvl_num -= 1\n\t\t\tself.lvl_current = self.lvl_list[lvl_num]\n\t\t\tself.player.level = self.lvl_current\n\t\t\tself.AI.level = self.lvl_current\n\n\tdef draw(self):\n\t\t# draw\n\t\tself.lvl_current.draw(self.screen, self.camera)\n\n\t\t# draw players\n\t\tfor char in self.active_sprites:\n\t\t\tself.screen.blit(char.image, self.camera.applyCam(char))\n\n\tdef main_loop(self):\n\t\twhile not self.done:\n\t\t\t\n\t\t\t# input, update, draw\n\t\t\tself.processEvents()\n\t\t\tself.update()\n\t\t\tself.draw()\n\t\t\t\n\t\t\t#FPS\t\t\n\t\t\tself.clock.tick(60)\n\t\t\t\n\t\t\t#refresh screen\n\t\t\tpygame.display.flip()\n\ndef main():\n\n\t# initialize pygame\n\tpygame.init()\n\n\t# initialize screen\n\tsize = [SCREEN_WIDTH, SCREEN_HEIGHT]\n\tscreen = pygame.display.set_mode(size)\n\tpygame.display.set_caption(\"This is a game... or is it\")\n\n\tControl(screen).main_loop()\n\tpygame.quit()\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"1"} +{"seq_id":"159672236","text":"import tkinter\nfrom database import query, cursor\nfrom os import path\nfrom PIL import Image, ImageTk\n\n\n\nclass Display:\n def __init__(self):\n self.word_input = \"\"\n self.film_searched = \"\"\n self.image_label = \"\"\n self.title_film = \"\"\n\n self.root = tkinter.Tk()\n self.root.geometry(\"1024x512+600+300\")\n\n self.input = tkinter.Entry(self.root, textvariable=self.word_input)\n self.input.grid(row=0, column=0)\n\n self.button = tkinter.Button(self.root, text=\"Chercher\", command=self.search)\n self.button.grid(row=1, column=0)\n\n self.root.mainloop()\n\n def search(self):\n # print(\"%s\" % (self.input.get()))\n\n cursor.execute(query, {\"title_to_find\": \"%\"+self.input.get()+\"%\"})\n\n while True:\n self.film_searched = cursor.fetchone()\n if self.film_searched == None:\n break\n else:\n # print(films['title'])\n # self.list_films_searched.append(films['title'])\n result = tkinter.Label(self.root, text=self.film_searched[\"title\"])\n result.film = self.film_searched\n result.bind(\"